| .. | .. |
|---|
| 1 | 1 | /* |
|---|
| 2 | | - * Copyright(c) 2015 - 2018 Intel Corporation. |
|---|
| 2 | + * Copyright(c) 2015 - 2020 Intel Corporation. |
|---|
| 3 | 3 | * |
|---|
| 4 | 4 | * This file is provided under a dual BSD/GPLv2 license. When using or |
|---|
| 5 | 5 | * redistributing this file, you may do so under either license. |
|---|
| .. | .. |
|---|
| 66 | 66 | #include "affinity.h" |
|---|
| 67 | 67 | #include "debugfs.h" |
|---|
| 68 | 68 | #include "fault.h" |
|---|
| 69 | | - |
|---|
| 70 | | -#define NUM_IB_PORTS 1 |
|---|
| 71 | | - |
|---|
| 72 | | -uint kdeth_qp; |
|---|
| 73 | | -module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO); |
|---|
| 74 | | -MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix"); |
|---|
| 69 | +#include "netdev.h" |
|---|
| 75 | 70 | |
|---|
| 76 | 71 | uint num_vls = HFI1_MAX_VLS_SUPPORTED; |
|---|
| 77 | 72 | module_param(num_vls, uint, S_IRUGO); |
|---|
| .. | .. |
|---|
| 130 | 125 | |
|---|
| 131 | 126 | /* |
|---|
| 132 | 127 | * RSM instance allocation |
|---|
| 133 | | - * 0 - Verbs |
|---|
| 134 | | - * 1 - User Fecn Handling |
|---|
| 135 | | - * 2 - Vnic |
|---|
| 128 | + * 0 - User Fecn Handling |
|---|
| 129 | + * 1 - Vnic |
|---|
| 130 | + * 2 - AIP |
|---|
| 131 | + * 3 - Verbs |
|---|
| 136 | 132 | */ |
|---|
| 137 | | -#define RSM_INS_VERBS 0 |
|---|
| 138 | | -#define RSM_INS_FECN 1 |
|---|
| 139 | | -#define RSM_INS_VNIC 2 |
|---|
| 133 | +#define RSM_INS_FECN 0 |
|---|
| 134 | +#define RSM_INS_VNIC 1 |
|---|
| 135 | +#define RSM_INS_AIP 2 |
|---|
| 136 | +#define RSM_INS_VERBS 3 |
|---|
| 140 | 137 | |
|---|
| 141 | 138 | /* Bit offset into the GUID which carries HFI id information */ |
|---|
| 142 | 139 | #define GUID_HFI_INDEX_SHIFT 39 |
|---|
| .. | .. |
|---|
| 176 | 173 | |
|---|
| 177 | 174 | /* QPN[m+n:1] QW 1, OFFSET 1 */ |
|---|
| 178 | 175 | #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull)) |
|---|
| 176 | + |
|---|
| 177 | +/* RSM fields for AIP */ |
|---|
| 178 | +/* LRH.BTH above is reused for this rule */ |
|---|
| 179 | + |
|---|
| 180 | +/* BTH.DESTQP: QW 1, OFFSET 16 for match */ |
|---|
| 181 | +#define BTH_DESTQP_QW 1ull |
|---|
| 182 | +#define BTH_DESTQP_BIT_OFFSET 16ull |
|---|
| 183 | +#define BTH_DESTQP_OFFSET(off) ((BTH_DESTQP_QW << QW_SHIFT) | (off)) |
|---|
| 184 | +#define BTH_DESTQP_MATCH_OFFSET BTH_DESTQP_OFFSET(BTH_DESTQP_BIT_OFFSET) |
|---|
| 185 | +#define BTH_DESTQP_MASK 0xFFull |
|---|
| 186 | +#define BTH_DESTQP_VALUE 0x81ull |
|---|
| 187 | + |
|---|
| 188 | +/* DETH.SQPN: QW 1 Offset 56 for select */ |
|---|
| 189 | +/* We use 8 most significant Soure QPN bits as entropy fpr AIP */ |
|---|
| 190 | +#define DETH_AIP_SQPN_QW 3ull |
|---|
| 191 | +#define DETH_AIP_SQPN_BIT_OFFSET 56ull |
|---|
| 192 | +#define DETH_AIP_SQPN_OFFSET(off) ((DETH_AIP_SQPN_QW << QW_SHIFT) | (off)) |
|---|
| 193 | +#define DETH_AIP_SQPN_SELECT_OFFSET \ |
|---|
| 194 | + DETH_AIP_SQPN_OFFSET(DETH_AIP_SQPN_BIT_OFFSET) |
|---|
| 179 | 195 | |
|---|
| 180 | 196 | /* RSM fields for Vnic */ |
|---|
| 181 | 197 | /* L2_TYPE: QW 0, OFFSET 61 - for match */ |
|---|
| .. | .. |
|---|
| 1081 | 1097 | static void handle_temp_err(struct hfi1_devdata *dd); |
|---|
| 1082 | 1098 | static void dc_shutdown(struct hfi1_devdata *dd); |
|---|
| 1083 | 1099 | static void dc_start(struct hfi1_devdata *dd); |
|---|
| 1084 | | -static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, |
|---|
| 1100 | +static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp, |
|---|
| 1085 | 1101 | unsigned int *np); |
|---|
| 1086 | 1102 | static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd); |
|---|
| 1087 | 1103 | static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms); |
|---|
| .. | .. |
|---|
| 1102 | 1118 | const char *desc; |
|---|
| 1103 | 1119 | }; |
|---|
| 1104 | 1120 | |
|---|
| 1105 | | -#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START) |
|---|
| 1106 | | -#define NUM_DC_ERRS (IS_DC_END - IS_DC_START) |
|---|
| 1107 | | -#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START) |
|---|
| 1121 | +#define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START) |
|---|
| 1122 | +#define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START) |
|---|
| 1123 | +#define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START) |
|---|
| 1108 | 1124 | |
|---|
| 1109 | 1125 | /* |
|---|
| 1110 | 1126 | * Helpers for building HFI and DC error interrupt table entries. Different |
|---|
| .. | .. |
|---|
| 4111 | 4127 | def_access_ibp_counter(rdma_seq); |
|---|
| 4112 | 4128 | def_access_ibp_counter(unaligned); |
|---|
| 4113 | 4129 | def_access_ibp_counter(seq_naks); |
|---|
| 4130 | +def_access_ibp_counter(rc_crwaits); |
|---|
| 4114 | 4131 | |
|---|
| 4115 | 4132 | static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = { |
|---|
| 4116 | 4133 | [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH), |
|---|
| 4134 | +[C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH), |
|---|
| 4135 | +[C_RX_SHORT_ERR] = RXE32_DEV_CNTR_ELEM(RxShrErr, RCV_SHORT_ERR_CNT, CNTR_SYNTH), |
|---|
| 4136 | +[C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH), |
|---|
| 4137 | +[C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH), |
|---|
| 4117 | 4138 | [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT, |
|---|
| 4118 | 4139 | CNTR_NORMAL), |
|---|
| 4119 | 4140 | [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT, |
|---|
| .. | .. |
|---|
| 4265 | 4286 | access_sw_pio_drain), |
|---|
| 4266 | 4287 | [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL, |
|---|
| 4267 | 4288 | access_sw_kmem_wait), |
|---|
| 4289 | +[C_SW_TID_WAIT] = CNTR_ELEM("TidWait", 0, 0, CNTR_NORMAL, |
|---|
| 4290 | + hfi1_access_sw_tid_wait), |
|---|
| 4268 | 4291 | [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL, |
|---|
| 4269 | 4292 | access_sw_send_schedule), |
|---|
| 4270 | 4293 | [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn", |
|---|
| .. | .. |
|---|
| 5126 | 5149 | [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq), |
|---|
| 5127 | 5150 | [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned), |
|---|
| 5128 | 5151 | [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks), |
|---|
| 5152 | +[C_SW_IBP_RC_CRWAITS] = SW_IBP_CNTR(RcCrWait, rc_crwaits), |
|---|
| 5129 | 5153 | [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL, |
|---|
| 5130 | 5154 | access_sw_cpu_rc_acks), |
|---|
| 5131 | 5155 | [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL, |
|---|
| .. | .. |
|---|
| 5232 | 5256 | dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT |
|---|
| 5233 | 5257 | & CCE_REVISION_CHIP_REV_MINOR_MASK; |
|---|
| 5234 | 5258 | return (chip_rev_minor & 0xF0) == 0x10; |
|---|
| 5259 | +} |
|---|
| 5260 | + |
|---|
| 5261 | +/* return true is kernel urg disabled for rcd */ |
|---|
| 5262 | +bool is_urg_masked(struct hfi1_ctxtdata *rcd) |
|---|
| 5263 | +{ |
|---|
| 5264 | + u64 mask; |
|---|
| 5265 | + u32 is = IS_RCVURGENT_START + rcd->ctxt; |
|---|
| 5266 | + u8 bit = is % 64; |
|---|
| 5267 | + |
|---|
| 5268 | + mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64))); |
|---|
| 5269 | + return !(mask & BIT_ULL(bit)); |
|---|
| 5235 | 5270 | } |
|---|
| 5236 | 5271 | |
|---|
| 5237 | 5272 | /* |
|---|
| .. | .. |
|---|
| 6856 | 6891 | } |
|---|
| 6857 | 6892 | rcvmask = HFI1_RCVCTRL_CTXT_ENB; |
|---|
| 6858 | 6893 | /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */ |
|---|
| 6859 | | - rcvmask |= rcd->rcvhdrtail_kvaddr ? |
|---|
| 6894 | + rcvmask |= hfi1_rcvhdrtail_kvaddr(rcd) ? |
|---|
| 6860 | 6895 | HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; |
|---|
| 6861 | 6896 | hfi1_rcvctrl(dd, rcvmask, rcd); |
|---|
| 6862 | 6897 | hfi1_rcd_put(rcd); |
|---|
| .. | .. |
|---|
| 7282 | 7317 | case 1: return OPA_LINK_WIDTH_1X; |
|---|
| 7283 | 7318 | case 2: return OPA_LINK_WIDTH_2X; |
|---|
| 7284 | 7319 | case 3: return OPA_LINK_WIDTH_3X; |
|---|
| 7320 | + case 4: return OPA_LINK_WIDTH_4X; |
|---|
| 7285 | 7321 | default: |
|---|
| 7286 | 7322 | dd_dev_info(dd, "%s: invalid width %d, using 4\n", |
|---|
| 7287 | 7323 | __func__, width); |
|---|
| 7288 | | - /* fall through */ |
|---|
| 7289 | | - case 4: return OPA_LINK_WIDTH_4X; |
|---|
| 7324 | + return OPA_LINK_WIDTH_4X; |
|---|
| 7290 | 7325 | } |
|---|
| 7291 | 7326 | } |
|---|
| 7292 | 7327 | |
|---|
| .. | .. |
|---|
| 7341 | 7376 | case 0: |
|---|
| 7342 | 7377 | dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G; |
|---|
| 7343 | 7378 | break; |
|---|
| 7379 | + case 1: |
|---|
| 7380 | + dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; |
|---|
| 7381 | + break; |
|---|
| 7344 | 7382 | default: |
|---|
| 7345 | 7383 | dd_dev_err(dd, |
|---|
| 7346 | 7384 | "%s: unexpected max rate %d, using 25Gb\n", |
|---|
| 7347 | 7385 | __func__, (int)max_rate); |
|---|
| 7348 | | - /* fall through */ |
|---|
| 7349 | | - case 1: |
|---|
| 7350 | 7386 | dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; |
|---|
| 7351 | 7387 | break; |
|---|
| 7352 | 7388 | } |
|---|
| .. | .. |
|---|
| 8193 | 8229 | /** |
|---|
| 8194 | 8230 | * is_rcv_urgent_int() - User receive context urgent IRQ handler |
|---|
| 8195 | 8231 | * @dd: valid dd |
|---|
| 8196 | | - * @source: logical IRQ source (ofse from IS_RCVURGENT_START) |
|---|
| 8232 | + * @source: logical IRQ source (offset from IS_RCVURGENT_START) |
|---|
| 8197 | 8233 | * |
|---|
| 8198 | 8234 | * RX block receive urgent interrupt. Source is < 160. |
|---|
| 8199 | 8235 | * |
|---|
| .. | .. |
|---|
| 8243 | 8279 | is_sdma_eng_err_name, is_sdma_eng_err_int }, |
|---|
| 8244 | 8280 | { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, |
|---|
| 8245 | 8281 | is_sendctxt_err_name, is_sendctxt_err_int }, |
|---|
| 8246 | | -{ IS_SDMA_START, IS_SDMA_END, |
|---|
| 8282 | +{ IS_SDMA_START, IS_SDMA_IDLE_END, |
|---|
| 8247 | 8283 | is_sdma_eng_name, is_sdma_eng_int }, |
|---|
| 8248 | 8284 | { IS_VARIOUS_START, IS_VARIOUS_END, |
|---|
| 8249 | 8285 | is_various_name, is_various_int }, |
|---|
| .. | .. |
|---|
| 8269 | 8305 | |
|---|
| 8270 | 8306 | /* avoids a double compare by walking the table in-order */ |
|---|
| 8271 | 8307 | for (entry = &is_table[0]; entry->is_name; entry++) { |
|---|
| 8272 | | - if (source < entry->end) { |
|---|
| 8308 | + if (source <= entry->end) { |
|---|
| 8273 | 8309 | trace_hfi1_interrupt(dd, entry, source); |
|---|
| 8274 | 8310 | entry->is_int(dd, source - entry->start); |
|---|
| 8275 | 8311 | return; |
|---|
| .. | .. |
|---|
| 8288 | 8324 | * context DATA IRQs are threaded and are not supported by this handler. |
|---|
| 8289 | 8325 | * |
|---|
| 8290 | 8326 | */ |
|---|
| 8291 | | -static irqreturn_t general_interrupt(int irq, void *data) |
|---|
| 8327 | +irqreturn_t general_interrupt(int irq, void *data) |
|---|
| 8292 | 8328 | { |
|---|
| 8293 | 8329 | struct hfi1_devdata *dd = data; |
|---|
| 8294 | 8330 | u64 regs[CCE_NUM_INT_CSRS]; |
|---|
| .. | .. |
|---|
| 8321 | 8357 | return handled; |
|---|
| 8322 | 8358 | } |
|---|
| 8323 | 8359 | |
|---|
| 8324 | | -static irqreturn_t sdma_interrupt(int irq, void *data) |
|---|
| 8360 | +irqreturn_t sdma_interrupt(int irq, void *data) |
|---|
| 8325 | 8361 | { |
|---|
| 8326 | 8362 | struct sdma_engine *sde = data; |
|---|
| 8327 | 8363 | struct hfi1_devdata *dd = sde->dd; |
|---|
| .. | .. |
|---|
| 8364 | 8400 | struct hfi1_devdata *dd = rcd->dd; |
|---|
| 8365 | 8401 | u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg); |
|---|
| 8366 | 8402 | |
|---|
| 8367 | | - mmiowb(); /* make sure everything before is written */ |
|---|
| 8368 | 8403 | write_csr(dd, addr, rcd->imask); |
|---|
| 8369 | 8404 | /* force the above write on the chip and get a value back */ |
|---|
| 8370 | 8405 | (void)read_csr(dd, addr); |
|---|
| .. | .. |
|---|
| 8389 | 8424 | static inline int check_packet_present(struct hfi1_ctxtdata *rcd) |
|---|
| 8390 | 8425 | { |
|---|
| 8391 | 8426 | u32 tail; |
|---|
| 8392 | | - int present; |
|---|
| 8393 | 8427 | |
|---|
| 8394 | | - if (!rcd->rcvhdrtail_kvaddr) |
|---|
| 8395 | | - present = (rcd->seq_cnt == |
|---|
| 8396 | | - rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd)))); |
|---|
| 8397 | | - else /* is RDMA rtail */ |
|---|
| 8398 | | - present = (rcd->head != get_rcvhdrtail(rcd)); |
|---|
| 8399 | | - |
|---|
| 8400 | | - if (present) |
|---|
| 8428 | + if (hfi1_packet_present(rcd)) |
|---|
| 8401 | 8429 | return 1; |
|---|
| 8402 | 8430 | |
|---|
| 8403 | 8431 | /* fall back to a CSR read, correct indpendent of DMA_RTAIL */ |
|---|
| 8404 | 8432 | tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); |
|---|
| 8405 | | - return rcd->head != tail; |
|---|
| 8433 | + return hfi1_rcd_head(rcd) != tail; |
|---|
| 8434 | +} |
|---|
| 8435 | + |
|---|
| 8436 | +/** |
|---|
| 8437 | + * Common code for receive contexts interrupt handlers. |
|---|
| 8438 | + * Update traces, increment kernel IRQ counter and |
|---|
| 8439 | + * setup ASPM when needed. |
|---|
| 8440 | + */ |
|---|
| 8441 | +static void receive_interrupt_common(struct hfi1_ctxtdata *rcd) |
|---|
| 8442 | +{ |
|---|
| 8443 | + struct hfi1_devdata *dd = rcd->dd; |
|---|
| 8444 | + |
|---|
| 8445 | + trace_hfi1_receive_interrupt(dd, rcd); |
|---|
| 8446 | + this_cpu_inc(*dd->int_counter); |
|---|
| 8447 | + aspm_ctx_disable(rcd); |
|---|
| 8448 | +} |
|---|
| 8449 | + |
|---|
| 8450 | +/** |
|---|
| 8451 | + * __hfi1_rcd_eoi_intr() - Make HW issue receive interrupt |
|---|
| 8452 | + * when there are packets present in the queue. When calling |
|---|
| 8453 | + * with interrupts enabled please use hfi1_rcd_eoi_intr. |
|---|
| 8454 | + * |
|---|
| 8455 | + * @rcd: valid receive context |
|---|
| 8456 | + */ |
|---|
| 8457 | +static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd) |
|---|
| 8458 | +{ |
|---|
| 8459 | + if (!rcd->rcvhdrq) |
|---|
| 8460 | + return; |
|---|
| 8461 | + clear_recv_intr(rcd); |
|---|
| 8462 | + if (check_packet_present(rcd)) |
|---|
| 8463 | + force_recv_intr(rcd); |
|---|
| 8464 | +} |
|---|
| 8465 | + |
|---|
| 8466 | +/** |
|---|
| 8467 | + * hfi1_rcd_eoi_intr() - End of Interrupt processing action |
|---|
| 8468 | + * |
|---|
| 8469 | + * @rcd: Ptr to hfi1_ctxtdata of receive context |
|---|
| 8470 | + * |
|---|
| 8471 | + * Hold IRQs so we can safely clear the interrupt and |
|---|
| 8472 | + * recheck for a packet that may have arrived after the previous |
|---|
| 8473 | + * check and the interrupt clear. If a packet arrived, force another |
|---|
| 8474 | + * interrupt. This routine can be called at the end of receive packet |
|---|
| 8475 | + * processing in interrupt service routines, interrupt service thread |
|---|
| 8476 | + * and softirqs |
|---|
| 8477 | + */ |
|---|
| 8478 | +static void hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd) |
|---|
| 8479 | +{ |
|---|
| 8480 | + unsigned long flags; |
|---|
| 8481 | + |
|---|
| 8482 | + local_irq_save(flags); |
|---|
| 8483 | + __hfi1_rcd_eoi_intr(rcd); |
|---|
| 8484 | + local_irq_restore(flags); |
|---|
| 8485 | +} |
|---|
| 8486 | + |
|---|
| 8487 | +/** |
|---|
| 8488 | + * hfi1_netdev_rx_napi - napi poll function to move eoi inline |
|---|
| 8489 | + * @napi - pointer to napi object |
|---|
| 8490 | + * @budget - netdev budget |
|---|
| 8491 | + */ |
|---|
| 8492 | +int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget) |
|---|
| 8493 | +{ |
|---|
| 8494 | + struct hfi1_netdev_rxq *rxq = container_of(napi, |
|---|
| 8495 | + struct hfi1_netdev_rxq, napi); |
|---|
| 8496 | + struct hfi1_ctxtdata *rcd = rxq->rcd; |
|---|
| 8497 | + int work_done = 0; |
|---|
| 8498 | + |
|---|
| 8499 | + work_done = rcd->do_interrupt(rcd, budget); |
|---|
| 8500 | + |
|---|
| 8501 | + if (work_done < budget) { |
|---|
| 8502 | + napi_complete_done(napi, work_done); |
|---|
| 8503 | + hfi1_rcd_eoi_intr(rcd); |
|---|
| 8504 | + } |
|---|
| 8505 | + |
|---|
| 8506 | + return work_done; |
|---|
| 8507 | +} |
|---|
| 8508 | + |
|---|
| 8509 | +/* Receive packet napi handler for netdevs VNIC and AIP */ |
|---|
| 8510 | +irqreturn_t receive_context_interrupt_napi(int irq, void *data) |
|---|
| 8511 | +{ |
|---|
| 8512 | + struct hfi1_ctxtdata *rcd = data; |
|---|
| 8513 | + |
|---|
| 8514 | + receive_interrupt_common(rcd); |
|---|
| 8515 | + |
|---|
| 8516 | + if (likely(rcd->napi)) { |
|---|
| 8517 | + if (likely(napi_schedule_prep(rcd->napi))) |
|---|
| 8518 | + __napi_schedule_irqoff(rcd->napi); |
|---|
| 8519 | + else |
|---|
| 8520 | + __hfi1_rcd_eoi_intr(rcd); |
|---|
| 8521 | + } else { |
|---|
| 8522 | + WARN_ONCE(1, "Napi IRQ handler without napi set up ctxt=%d\n", |
|---|
| 8523 | + rcd->ctxt); |
|---|
| 8524 | + __hfi1_rcd_eoi_intr(rcd); |
|---|
| 8525 | + } |
|---|
| 8526 | + |
|---|
| 8527 | + return IRQ_HANDLED; |
|---|
| 8406 | 8528 | } |
|---|
| 8407 | 8529 | |
|---|
| 8408 | 8530 | /* |
|---|
| .. | .. |
|---|
| 8413 | 8535 | * invoked) is finished. The intent is to avoid extra interrupts while we |
|---|
| 8414 | 8536 | * are processing packets anyway. |
|---|
| 8415 | 8537 | */ |
|---|
| 8416 | | -static irqreturn_t receive_context_interrupt(int irq, void *data) |
|---|
| 8538 | +irqreturn_t receive_context_interrupt(int irq, void *data) |
|---|
| 8417 | 8539 | { |
|---|
| 8418 | 8540 | struct hfi1_ctxtdata *rcd = data; |
|---|
| 8419 | | - struct hfi1_devdata *dd = rcd->dd; |
|---|
| 8420 | 8541 | int disposition; |
|---|
| 8421 | | - int present; |
|---|
| 8422 | 8542 | |
|---|
| 8423 | | - trace_hfi1_receive_interrupt(dd, rcd); |
|---|
| 8424 | | - this_cpu_inc(*dd->int_counter); |
|---|
| 8425 | | - aspm_ctx_disable(rcd); |
|---|
| 8543 | + receive_interrupt_common(rcd); |
|---|
| 8426 | 8544 | |
|---|
| 8427 | 8545 | /* receive interrupt remains blocked while processing packets */ |
|---|
| 8428 | 8546 | disposition = rcd->do_interrupt(rcd, 0); |
|---|
| .. | .. |
|---|
| 8435 | 8553 | if (disposition == RCV_PKT_LIMIT) |
|---|
| 8436 | 8554 | return IRQ_WAKE_THREAD; |
|---|
| 8437 | 8555 | |
|---|
| 8438 | | - /* |
|---|
| 8439 | | - * The packet processor detected no more packets. Clear the receive |
|---|
| 8440 | | - * interrupt and recheck for a packet packet that may have arrived |
|---|
| 8441 | | - * after the previous check and interrupt clear. If a packet arrived, |
|---|
| 8442 | | - * force another interrupt. |
|---|
| 8443 | | - */ |
|---|
| 8444 | | - clear_recv_intr(rcd); |
|---|
| 8445 | | - present = check_packet_present(rcd); |
|---|
| 8446 | | - if (present) |
|---|
| 8447 | | - force_recv_intr(rcd); |
|---|
| 8448 | | - |
|---|
| 8556 | + __hfi1_rcd_eoi_intr(rcd); |
|---|
| 8449 | 8557 | return IRQ_HANDLED; |
|---|
| 8450 | 8558 | } |
|---|
| 8451 | 8559 | |
|---|
| .. | .. |
|---|
| 8453 | 8561 | * Receive packet thread handler. This expects to be invoked with the |
|---|
| 8454 | 8562 | * receive interrupt still blocked. |
|---|
| 8455 | 8563 | */ |
|---|
| 8456 | | -static irqreturn_t receive_context_thread(int irq, void *data) |
|---|
| 8564 | +irqreturn_t receive_context_thread(int irq, void *data) |
|---|
| 8457 | 8565 | { |
|---|
| 8458 | 8566 | struct hfi1_ctxtdata *rcd = data; |
|---|
| 8459 | | - int present; |
|---|
| 8460 | 8567 | |
|---|
| 8461 | 8568 | /* receive interrupt is still blocked from the IRQ handler */ |
|---|
| 8462 | 8569 | (void)rcd->do_interrupt(rcd, 1); |
|---|
| 8463 | 8570 | |
|---|
| 8464 | | - /* |
|---|
| 8465 | | - * The packet processor will only return if it detected no more |
|---|
| 8466 | | - * packets. Hold IRQs here so we can safely clear the interrupt and |
|---|
| 8467 | | - * recheck for a packet that may have arrived after the previous |
|---|
| 8468 | | - * check and the interrupt clear. If a packet arrived, force another |
|---|
| 8469 | | - * interrupt. |
|---|
| 8470 | | - */ |
|---|
| 8471 | | - local_irq_disable(); |
|---|
| 8472 | | - clear_recv_intr(rcd); |
|---|
| 8473 | | - present = check_packet_present(rcd); |
|---|
| 8474 | | - if (present) |
|---|
| 8475 | | - force_recv_intr(rcd); |
|---|
| 8476 | | - local_irq_enable(); |
|---|
| 8571 | + hfi1_rcd_eoi_intr(rcd); |
|---|
| 8477 | 8572 | |
|---|
| 8478 | 8573 | return IRQ_HANDLED; |
|---|
| 8479 | 8574 | } |
|---|
| .. | .. |
|---|
| 9663 | 9758 | } |
|---|
| 9664 | 9759 | } |
|---|
| 9665 | 9760 | |
|---|
| 9666 | | -static void init_qsfp_int(struct hfi1_devdata *dd) |
|---|
| 9761 | +void init_qsfp_int(struct hfi1_devdata *dd) |
|---|
| 9667 | 9762 | { |
|---|
| 9668 | 9763 | struct hfi1_pportdata *ppd = dd->pport; |
|---|
| 9669 | | - u64 qsfp_mask, cce_int_mask; |
|---|
| 9670 | | - const int qsfp1_int_smask = QSFP1_INT % 64; |
|---|
| 9671 | | - const int qsfp2_int_smask = QSFP2_INT % 64; |
|---|
| 9672 | | - |
|---|
| 9673 | | - /* |
|---|
| 9674 | | - * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0 |
|---|
| 9675 | | - * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR, |
|---|
| 9676 | | - * therefore just one of QSFP1_INT/QSFP2_INT can be used to find |
|---|
| 9677 | | - * the index of the appropriate CSR in the CCEIntMask CSR array |
|---|
| 9678 | | - */ |
|---|
| 9679 | | - cce_int_mask = read_csr(dd, CCE_INT_MASK + |
|---|
| 9680 | | - (8 * (QSFP1_INT / 64))); |
|---|
| 9681 | | - if (dd->hfi1_id) { |
|---|
| 9682 | | - cce_int_mask &= ~((u64)1 << qsfp1_int_smask); |
|---|
| 9683 | | - write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)), |
|---|
| 9684 | | - cce_int_mask); |
|---|
| 9685 | | - } else { |
|---|
| 9686 | | - cce_int_mask &= ~((u64)1 << qsfp2_int_smask); |
|---|
| 9687 | | - write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)), |
|---|
| 9688 | | - cce_int_mask); |
|---|
| 9689 | | - } |
|---|
| 9764 | + u64 qsfp_mask; |
|---|
| 9690 | 9765 | |
|---|
| 9691 | 9766 | qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N); |
|---|
| 9692 | 9767 | /* Clear current status to avoid spurious interrupts */ |
|---|
| .. | .. |
|---|
| 9703 | 9778 | write_csr(dd, |
|---|
| 9704 | 9779 | dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT, |
|---|
| 9705 | 9780 | qsfp_mask); |
|---|
| 9781 | + |
|---|
| 9782 | + /* Enable the appropriate QSFP IRQ source */ |
|---|
| 9783 | + if (!dd->hfi1_id) |
|---|
| 9784 | + set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true); |
|---|
| 9785 | + else |
|---|
| 9786 | + set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true); |
|---|
| 9706 | 9787 | } |
|---|
| 9707 | 9788 | |
|---|
| 9708 | 9789 | /* |
|---|
| .. | .. |
|---|
| 10058 | 10139 | * the first kernel context would have been allocated by now so |
|---|
| 10059 | 10140 | * we are guaranteed a valid value. |
|---|
| 10060 | 10141 | */ |
|---|
| 10061 | | - return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2; |
|---|
| 10142 | + return (get_hdrqentsize(dd->rcd[0]) - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2; |
|---|
| 10062 | 10143 | } |
|---|
| 10063 | 10144 | |
|---|
| 10064 | 10145 | /* |
|---|
| .. | .. |
|---|
| 10103 | 10184 | thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50), |
|---|
| 10104 | 10185 | sc_mtu_to_threshold(dd->vld[i].sc, |
|---|
| 10105 | 10186 | dd->vld[i].mtu, |
|---|
| 10106 | | - dd->rcd[0]->rcvhdrqentsize)); |
|---|
| 10187 | + get_hdrqentsize(dd->rcd[0]))); |
|---|
| 10107 | 10188 | for (j = 0; j < INIT_SC_PER_VL; j++) |
|---|
| 10108 | 10189 | sc_set_cr_threshold( |
|---|
| 10109 | 10190 | pio_select_send_context_vl(dd, j, i), |
|---|
| .. | .. |
|---|
| 11817 | 11898 | << RCV_EGR_INDEX_HEAD_HEAD_SHIFT; |
|---|
| 11818 | 11899 | write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg); |
|---|
| 11819 | 11900 | } |
|---|
| 11820 | | - mmiowb(); |
|---|
| 11821 | 11901 | reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) | |
|---|
| 11822 | 11902 | (((u64)hd & RCV_HDR_HEAD_HEAD_MASK) |
|---|
| 11823 | 11903 | << RCV_HDR_HEAD_HEAD_SHIFT); |
|---|
| 11824 | 11904 | write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg); |
|---|
| 11825 | | - mmiowb(); |
|---|
| 11826 | 11905 | } |
|---|
| 11827 | 11906 | |
|---|
| 11828 | 11907 | u32 hdrqempty(struct hfi1_ctxtdata *rcd) |
|---|
| .. | .. |
|---|
| 11832 | 11911 | head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) |
|---|
| 11833 | 11912 | & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT; |
|---|
| 11834 | 11913 | |
|---|
| 11835 | | - if (rcd->rcvhdrtail_kvaddr) |
|---|
| 11914 | + if (hfi1_rcvhdrtail_kvaddr(rcd)) |
|---|
| 11836 | 11915 | tail = get_rcvhdrtail(rcd); |
|---|
| 11837 | 11916 | else |
|---|
| 11838 | 11917 | tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); |
|---|
| .. | .. |
|---|
| 11876 | 11955 | return 0x1; /* if invalid, go with the minimum size */ |
|---|
| 11877 | 11956 | } |
|---|
| 11878 | 11957 | |
|---|
| 11958 | +/** |
|---|
| 11959 | + * encode_rcv_header_entry_size - return chip specific encoding for size |
|---|
| 11960 | + * @size: size in dwords |
|---|
| 11961 | + * |
|---|
| 11962 | + * Convert a receive header entry size that to the encoding used in the CSR. |
|---|
| 11963 | + * |
|---|
| 11964 | + * Return a zero if the given size is invalid, otherwise the encoding. |
|---|
| 11965 | + */ |
|---|
| 11966 | +u8 encode_rcv_header_entry_size(u8 size) |
|---|
| 11967 | +{ |
|---|
| 11968 | + /* there are only 3 valid receive header entry sizes */ |
|---|
| 11969 | + if (size == 2) |
|---|
| 11970 | + return 1; |
|---|
| 11971 | + if (size == 16) |
|---|
| 11972 | + return 2; |
|---|
| 11973 | + if (size == 32) |
|---|
| 11974 | + return 4; |
|---|
| 11975 | + return 0; /* invalid */ |
|---|
| 11976 | +} |
|---|
| 11977 | + |
|---|
| 11978 | +/** |
|---|
| 11979 | + * hfi1_validate_rcvhdrcnt - validate hdrcnt |
|---|
| 11980 | + * @dd: the device data |
|---|
| 11981 | + * @thecnt: the header count |
|---|
| 11982 | + */ |
|---|
| 11983 | +int hfi1_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt) |
|---|
| 11984 | +{ |
|---|
| 11985 | + if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { |
|---|
| 11986 | + dd_dev_err(dd, "Receive header queue count too small\n"); |
|---|
| 11987 | + return -EINVAL; |
|---|
| 11988 | + } |
|---|
| 11989 | + |
|---|
| 11990 | + if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { |
|---|
| 11991 | + dd_dev_err(dd, |
|---|
| 11992 | + "Receive header queue count cannot be greater than %u\n", |
|---|
| 11993 | + HFI1_MAX_HDRQ_EGRBUF_CNT); |
|---|
| 11994 | + return -EINVAL; |
|---|
| 11995 | + } |
|---|
| 11996 | + |
|---|
| 11997 | + if (thecnt % HDRQ_INCREMENT) { |
|---|
| 11998 | + dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n", |
|---|
| 11999 | + thecnt, HDRQ_INCREMENT); |
|---|
| 12000 | + return -EINVAL; |
|---|
| 12001 | + } |
|---|
| 12002 | + |
|---|
| 12003 | + return 0; |
|---|
| 12004 | +} |
|---|
| 12005 | + |
|---|
| 12006 | +/** |
|---|
| 12007 | + * set_hdrq_regs - set header queue registers for context |
|---|
| 12008 | + * @dd: the device data |
|---|
| 12009 | + * @ctxt: the context |
|---|
| 12010 | + * @entsize: the dword entry size |
|---|
| 12011 | + * @hdrcnt: the number of header entries |
|---|
| 12012 | + */ |
|---|
| 12013 | +void set_hdrq_regs(struct hfi1_devdata *dd, u8 ctxt, u8 entsize, u16 hdrcnt) |
|---|
| 12014 | +{ |
|---|
| 12015 | + u64 reg; |
|---|
| 12016 | + |
|---|
| 12017 | + reg = (((u64)hdrcnt >> HDRQ_SIZE_SHIFT) & RCV_HDR_CNT_CNT_MASK) << |
|---|
| 12018 | + RCV_HDR_CNT_CNT_SHIFT; |
|---|
| 12019 | + write_kctxt_csr(dd, ctxt, RCV_HDR_CNT, reg); |
|---|
| 12020 | + reg = ((u64)encode_rcv_header_entry_size(entsize) & |
|---|
| 12021 | + RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) << |
|---|
| 12022 | + RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT; |
|---|
| 12023 | + write_kctxt_csr(dd, ctxt, RCV_HDR_ENT_SIZE, reg); |
|---|
| 12024 | + reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK) << |
|---|
| 12025 | + RCV_HDR_SIZE_HDR_SIZE_SHIFT; |
|---|
| 12026 | + write_kctxt_csr(dd, ctxt, RCV_HDR_SIZE, reg); |
|---|
| 12027 | + |
|---|
| 12028 | + /* |
|---|
| 12029 | + * Program dummy tail address for every receive context |
|---|
| 12030 | + * before enabling any receive context |
|---|
| 12031 | + */ |
|---|
| 12032 | + write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, |
|---|
| 12033 | + dd->rcvhdrtail_dummy_dma); |
|---|
| 12034 | +} |
|---|
| 12035 | + |
|---|
| 11879 | 12036 | void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, |
|---|
| 11880 | 12037 | struct hfi1_ctxtdata *rcd) |
|---|
| 11881 | 12038 | { |
|---|
| .. | .. |
|---|
| 11897 | 12054 | /* reset the tail and hdr addresses, and sequence count */ |
|---|
| 11898 | 12055 | write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR, |
|---|
| 11899 | 12056 | rcd->rcvhdrq_dma); |
|---|
| 11900 | | - if (rcd->rcvhdrtail_kvaddr) |
|---|
| 12057 | + if (hfi1_rcvhdrtail_kvaddr(rcd)) |
|---|
| 11901 | 12058 | write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, |
|---|
| 11902 | 12059 | rcd->rcvhdrqtailaddr_dma); |
|---|
| 11903 | | - rcd->seq_cnt = 1; |
|---|
| 12060 | + hfi1_set_seq_cnt(rcd, 1); |
|---|
| 11904 | 12061 | |
|---|
| 11905 | 12062 | /* reset the cached receive header queue head value */ |
|---|
| 11906 | | - rcd->head = 0; |
|---|
| 12063 | + hfi1_set_rcd_head(rcd, 0); |
|---|
| 11907 | 12064 | |
|---|
| 11908 | 12065 | /* |
|---|
| 11909 | 12066 | * Zero the receive header queue so we don't get false |
|---|
| .. | .. |
|---|
| 11973 | 12130 | |
|---|
| 11974 | 12131 | rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK; |
|---|
| 11975 | 12132 | } |
|---|
| 11976 | | - if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) |
|---|
| 12133 | + if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) { |
|---|
| 12134 | + set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, |
|---|
| 12135 | + IS_RCVAVAIL_START + rcd->ctxt, true); |
|---|
| 11977 | 12136 | rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK; |
|---|
| 11978 | | - if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) |
|---|
| 12137 | + } |
|---|
| 12138 | + if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) { |
|---|
| 12139 | + set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, |
|---|
| 12140 | + IS_RCVAVAIL_START + rcd->ctxt, false); |
|---|
| 11979 | 12141 | rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK; |
|---|
| 11980 | | - if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr) |
|---|
| 12142 | + } |
|---|
| 12143 | + if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && hfi1_rcvhdrtail_kvaddr(rcd)) |
|---|
| 11981 | 12144 | rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK; |
|---|
| 11982 | 12145 | if (op & HFI1_RCVCTRL_TAILUPD_DIS) { |
|---|
| 11983 | 12146 | /* See comment on RcvCtxtCtrl.TailUpd above */ |
|---|
| .. | .. |
|---|
| 12006 | 12169 | rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK; |
|---|
| 12007 | 12170 | if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS) |
|---|
| 12008 | 12171 | rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK; |
|---|
| 12172 | + if (op & HFI1_RCVCTRL_URGENT_ENB) |
|---|
| 12173 | + set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, |
|---|
| 12174 | + IS_RCVURGENT_START + rcd->ctxt, true); |
|---|
| 12175 | + if (op & HFI1_RCVCTRL_URGENT_DIS) |
|---|
| 12176 | + set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, |
|---|
| 12177 | + IS_RCVURGENT_START + rcd->ctxt, false); |
|---|
| 12178 | + |
|---|
| 12009 | 12179 | hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl); |
|---|
| 12010 | 12180 | write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl); |
|---|
| 12011 | 12181 | |
|---|
| .. | .. |
|---|
| 12178 | 12348 | |
|---|
| 12179 | 12349 | if (dd->synth_stats_timer.function) |
|---|
| 12180 | 12350 | del_timer_sync(&dd->synth_stats_timer); |
|---|
| 12351 | + cancel_work_sync(&dd->update_cntr_work); |
|---|
| 12181 | 12352 | ppd = (struct hfi1_pportdata *)(dd + 1); |
|---|
| 12182 | 12353 | for (i = 0; i < dd->num_pports; i++, ppd++) { |
|---|
| 12183 | 12354 | kfree(ppd->cntrs); |
|---|
| .. | .. |
|---|
| 12711 | 12882 | static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate) |
|---|
| 12712 | 12883 | { |
|---|
| 12713 | 12884 | switch (chip_lstate) { |
|---|
| 12714 | | - default: |
|---|
| 12715 | | - dd_dev_err(dd, |
|---|
| 12716 | | - "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n", |
|---|
| 12717 | | - chip_lstate); |
|---|
| 12718 | | - /* fall through */ |
|---|
| 12719 | 12885 | case LSTATE_DOWN: |
|---|
| 12720 | 12886 | return IB_PORT_DOWN; |
|---|
| 12721 | 12887 | case LSTATE_INIT: |
|---|
| .. | .. |
|---|
| 12724 | 12890 | return IB_PORT_ARMED; |
|---|
| 12725 | 12891 | case LSTATE_ACTIVE: |
|---|
| 12726 | 12892 | return IB_PORT_ACTIVE; |
|---|
| 12893 | + default: |
|---|
| 12894 | + dd_dev_err(dd, |
|---|
| 12895 | + "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n", |
|---|
| 12896 | + chip_lstate); |
|---|
| 12897 | + return IB_PORT_DOWN; |
|---|
| 12727 | 12898 | } |
|---|
| 12728 | 12899 | } |
|---|
| 12729 | 12900 | |
|---|
| .. | .. |
|---|
| 12731 | 12902 | { |
|---|
| 12732 | 12903 | /* look at the HFI meta-states only */ |
|---|
| 12733 | 12904 | switch (chip_pstate & 0xf0) { |
|---|
| 12734 | | - default: |
|---|
| 12735 | | - dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n", |
|---|
| 12736 | | - chip_pstate); |
|---|
| 12737 | | - /* fall through */ |
|---|
| 12738 | 12905 | case PLS_DISABLED: |
|---|
| 12739 | 12906 | return IB_PORTPHYSSTATE_DISABLED; |
|---|
| 12740 | 12907 | case PLS_OFFLINE: |
|---|
| .. | .. |
|---|
| 12747 | 12914 | return IB_PORTPHYSSTATE_LINKUP; |
|---|
| 12748 | 12915 | case PLS_PHYTEST: |
|---|
| 12749 | 12916 | return IB_PORTPHYSSTATE_PHY_TEST; |
|---|
| 12917 | + default: |
|---|
| 12918 | + dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n", |
|---|
| 12919 | + chip_pstate); |
|---|
| 12920 | + return IB_PORTPHYSSTATE_DISABLED; |
|---|
| 12750 | 12921 | } |
|---|
| 12751 | 12922 | } |
|---|
| 12752 | 12923 | |
|---|
| .. | .. |
|---|
| 13038 | 13209 | return ret; |
|---|
| 13039 | 13210 | } |
|---|
| 13040 | 13211 | |
|---|
| 13041 | | -/** |
|---|
| 13042 | | - * get_int_mask - get 64 bit int mask |
|---|
| 13043 | | - * @dd - the devdata |
|---|
| 13044 | | - * @i - the csr (relative to CCE_INT_MASK) |
|---|
| 13045 | | - * |
|---|
| 13046 | | - * Returns the mask with the urgent interrupt mask |
|---|
| 13047 | | - * bit clear for kernel receive contexts. |
|---|
| 13048 | | - */ |
|---|
| 13049 | | -static u64 get_int_mask(struct hfi1_devdata *dd, u32 i) |
|---|
| 13050 | | -{ |
|---|
| 13051 | | - u64 mask = U64_MAX; /* default to no change */ |
|---|
| 13052 | | - |
|---|
| 13053 | | - if (i >= (IS_RCVURGENT_START / 64) && i < (IS_RCVURGENT_END / 64)) { |
|---|
| 13054 | | - int j = (i - (IS_RCVURGENT_START / 64)) * 64; |
|---|
| 13055 | | - int k = !j ? IS_RCVURGENT_START % 64 : 0; |
|---|
| 13056 | | - |
|---|
| 13057 | | - if (j) |
|---|
| 13058 | | - j -= IS_RCVURGENT_START % 64; |
|---|
| 13059 | | - /* j = 0..dd->first_dyn_alloc_ctxt - 1,k = 0..63 */ |
|---|
| 13060 | | - for (; j < dd->first_dyn_alloc_ctxt && k < 64; j++, k++) |
|---|
| 13061 | | - /* convert to bit in mask and clear */ |
|---|
| 13062 | | - mask &= ~BIT_ULL(k); |
|---|
| 13063 | | - } |
|---|
| 13064 | | - return mask; |
|---|
| 13065 | | -} |
|---|
| 13066 | | - |
|---|
| 13067 | 13212 | /* ========================================================================= */ |
|---|
| 13068 | 13213 | |
|---|
| 13069 | | -/* |
|---|
| 13070 | | - * Enable/disable chip from delivering interrupts. |
|---|
| 13214 | +/** |
|---|
| 13215 | + * read_mod_write() - Calculate the IRQ register index and set/clear the bits |
|---|
| 13216 | + * @dd: valid devdata |
|---|
| 13217 | + * @src: IRQ source to determine register index from |
|---|
| 13218 | + * @bits: the bits to set or clear |
|---|
| 13219 | + * @set: true == set the bits, false == clear the bits |
|---|
| 13220 | + * |
|---|
| 13071 | 13221 | */ |
|---|
| 13072 | | -void set_intr_state(struct hfi1_devdata *dd, u32 enable) |
|---|
| 13222 | +static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits, |
|---|
| 13223 | + bool set) |
|---|
| 13073 | 13224 | { |
|---|
| 13074 | | - int i; |
|---|
| 13225 | + u64 reg; |
|---|
| 13226 | + u16 idx = src / BITS_PER_REGISTER; |
|---|
| 13075 | 13227 | |
|---|
| 13076 | | - /* |
|---|
| 13077 | | - * In HFI, the mask needs to be 1 to allow interrupts. |
|---|
| 13078 | | - */ |
|---|
| 13079 | | - if (enable) { |
|---|
| 13080 | | - /* enable all interrupts but urgent on kernel contexts */ |
|---|
| 13081 | | - for (i = 0; i < CCE_NUM_INT_CSRS; i++) { |
|---|
| 13082 | | - u64 mask = get_int_mask(dd, i); |
|---|
| 13228 | + spin_lock(&dd->irq_src_lock); |
|---|
| 13229 | + reg = read_csr(dd, CCE_INT_MASK + (8 * idx)); |
|---|
| 13230 | + if (set) |
|---|
| 13231 | + reg |= bits; |
|---|
| 13232 | + else |
|---|
| 13233 | + reg &= ~bits; |
|---|
| 13234 | + write_csr(dd, CCE_INT_MASK + (8 * idx), reg); |
|---|
| 13235 | + spin_unlock(&dd->irq_src_lock); |
|---|
| 13236 | +} |
|---|
| 13083 | 13237 | |
|---|
| 13084 | | - write_csr(dd, CCE_INT_MASK + (8 * i), mask); |
|---|
| 13238 | +/** |
|---|
| 13239 | + * set_intr_bits() - Enable/disable a range (one or more) IRQ sources |
|---|
| 13240 | + * @dd: valid devdata |
|---|
| 13241 | + * @first: first IRQ source to set/clear |
|---|
| 13242 | + * @last: last IRQ source (inclusive) to set/clear |
|---|
| 13243 | + * @set: true == set the bits, false == clear the bits |
|---|
| 13244 | + * |
|---|
| 13245 | + * If first == last, set the exact source. |
|---|
| 13246 | + */ |
|---|
| 13247 | +int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set) |
|---|
| 13248 | +{ |
|---|
| 13249 | + u64 bits = 0; |
|---|
| 13250 | + u64 bit; |
|---|
| 13251 | + u16 src; |
|---|
| 13252 | + |
|---|
| 13253 | + if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES) |
|---|
| 13254 | + return -EINVAL; |
|---|
| 13255 | + |
|---|
| 13256 | + if (last < first) |
|---|
| 13257 | + return -ERANGE; |
|---|
| 13258 | + |
|---|
| 13259 | + for (src = first; src <= last; src++) { |
|---|
| 13260 | + bit = src % BITS_PER_REGISTER; |
|---|
| 13261 | + /* wrapped to next register? */ |
|---|
| 13262 | + if (!bit && bits) { |
|---|
| 13263 | + read_mod_write(dd, src - 1, bits, set); |
|---|
| 13264 | + bits = 0; |
|---|
| 13085 | 13265 | } |
|---|
| 13086 | | - |
|---|
| 13087 | | - init_qsfp_int(dd); |
|---|
| 13088 | | - } else { |
|---|
| 13089 | | - for (i = 0; i < CCE_NUM_INT_CSRS; i++) |
|---|
| 13090 | | - write_csr(dd, CCE_INT_MASK + (8 * i), 0ull); |
|---|
| 13266 | + bits |= BIT_ULL(bit); |
|---|
| 13091 | 13267 | } |
|---|
| 13268 | + read_mod_write(dd, last, bits, set); |
|---|
| 13269 | + |
|---|
| 13270 | + return 0; |
|---|
| 13092 | 13271 | } |
|---|
| 13093 | 13272 | |
|---|
| 13094 | 13273 | /* |
|---|
| 13095 | 13274 | * Clear all interrupt sources on the chip. |
|---|
| 13096 | 13275 | */ |
|---|
| 13097 | | -static void clear_all_interrupts(struct hfi1_devdata *dd) |
|---|
| 13276 | +void clear_all_interrupts(struct hfi1_devdata *dd) |
|---|
| 13098 | 13277 | { |
|---|
| 13099 | 13278 | int i; |
|---|
| 13100 | 13279 | |
|---|
| .. | .. |
|---|
| 13118 | 13297 | write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0); |
|---|
| 13119 | 13298 | } |
|---|
| 13120 | 13299 | |
|---|
| 13121 | | -/** |
|---|
| 13122 | | - * hfi1_clean_up_interrupts() - Free all IRQ resources |
|---|
| 13123 | | - * @dd: valid device data data structure |
|---|
| 13124 | | - * |
|---|
| 13125 | | - * Free the MSIx and assoicated PCI resources, if they have been allocated. |
|---|
| 13126 | | - */ |
|---|
| 13127 | | -void hfi1_clean_up_interrupts(struct hfi1_devdata *dd) |
|---|
| 13128 | | -{ |
|---|
| 13129 | | - int i; |
|---|
| 13130 | | - struct hfi1_msix_entry *me = dd->msix_entries; |
|---|
| 13131 | | - |
|---|
| 13132 | | - /* remove irqs - must happen before disabling/turning off */ |
|---|
| 13133 | | - for (i = 0; i < dd->num_msix_entries; i++, me++) { |
|---|
| 13134 | | - if (!me->arg) /* => no irq, no affinity */ |
|---|
| 13135 | | - continue; |
|---|
| 13136 | | - hfi1_put_irq_affinity(dd, me); |
|---|
| 13137 | | - pci_free_irq(dd->pcidev, i, me->arg); |
|---|
| 13138 | | - } |
|---|
| 13139 | | - |
|---|
| 13140 | | - /* clean structures */ |
|---|
| 13141 | | - kfree(dd->msix_entries); |
|---|
| 13142 | | - dd->msix_entries = NULL; |
|---|
| 13143 | | - dd->num_msix_entries = 0; |
|---|
| 13144 | | - |
|---|
| 13145 | | - pci_free_irq_vectors(dd->pcidev); |
|---|
| 13146 | | -} |
|---|
| 13147 | | - |
|---|
| 13148 | 13300 | /* |
|---|
| 13149 | 13301 | * Remap the interrupt source from the general handler to the given MSI-X |
|---|
| 13150 | 13302 | * interrupt. |
|---|
| 13151 | 13303 | */ |
|---|
| 13152 | | -static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr) |
|---|
| 13304 | +void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr) |
|---|
| 13153 | 13305 | { |
|---|
| 13154 | 13306 | u64 reg; |
|---|
| 13155 | 13307 | int m, n; |
|---|
| .. | .. |
|---|
| 13173 | 13325 | write_csr(dd, CCE_INT_MAP + (8 * m), reg); |
|---|
| 13174 | 13326 | } |
|---|
| 13175 | 13327 | |
|---|
| 13176 | | -static void remap_sdma_interrupts(struct hfi1_devdata *dd, |
|---|
| 13177 | | - int engine, int msix_intr) |
|---|
| 13328 | +void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr) |
|---|
| 13178 | 13329 | { |
|---|
| 13179 | 13330 | /* |
|---|
| 13180 | 13331 | * SDMA engine interrupt sources grouped by type, rather than |
|---|
| .. | .. |
|---|
| 13183 | 13334 | * SDMAProgress |
|---|
| 13184 | 13335 | * SDMAIdle |
|---|
| 13185 | 13336 | */ |
|---|
| 13186 | | - remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine, |
|---|
| 13187 | | - msix_intr); |
|---|
| 13188 | | - remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine, |
|---|
| 13189 | | - msix_intr); |
|---|
| 13190 | | - remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine, |
|---|
| 13191 | | - msix_intr); |
|---|
| 13192 | | -} |
|---|
| 13193 | | - |
|---|
| 13194 | | -static int request_msix_irqs(struct hfi1_devdata *dd) |
|---|
| 13195 | | -{ |
|---|
| 13196 | | - int first_general, last_general; |
|---|
| 13197 | | - int first_sdma, last_sdma; |
|---|
| 13198 | | - int first_rx, last_rx; |
|---|
| 13199 | | - int i, ret = 0; |
|---|
| 13200 | | - |
|---|
| 13201 | | - /* calculate the ranges we are going to use */ |
|---|
| 13202 | | - first_general = 0; |
|---|
| 13203 | | - last_general = first_general + 1; |
|---|
| 13204 | | - first_sdma = last_general; |
|---|
| 13205 | | - last_sdma = first_sdma + dd->num_sdma; |
|---|
| 13206 | | - first_rx = last_sdma; |
|---|
| 13207 | | - last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts; |
|---|
| 13208 | | - |
|---|
| 13209 | | - /* VNIC MSIx interrupts get mapped when VNIC contexts are created */ |
|---|
| 13210 | | - dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues; |
|---|
| 13211 | | - |
|---|
| 13212 | | - /* |
|---|
| 13213 | | - * Sanity check - the code expects all SDMA chip source |
|---|
| 13214 | | - * interrupts to be in the same CSR, starting at bit 0. Verify |
|---|
| 13215 | | - * that this is true by checking the bit location of the start. |
|---|
| 13216 | | - */ |
|---|
| 13217 | | - BUILD_BUG_ON(IS_SDMA_START % 64); |
|---|
| 13218 | | - |
|---|
| 13219 | | - for (i = 0; i < dd->num_msix_entries; i++) { |
|---|
| 13220 | | - struct hfi1_msix_entry *me = &dd->msix_entries[i]; |
|---|
| 13221 | | - const char *err_info; |
|---|
| 13222 | | - irq_handler_t handler; |
|---|
| 13223 | | - irq_handler_t thread = NULL; |
|---|
| 13224 | | - void *arg = NULL; |
|---|
| 13225 | | - int idx; |
|---|
| 13226 | | - struct hfi1_ctxtdata *rcd = NULL; |
|---|
| 13227 | | - struct sdma_engine *sde = NULL; |
|---|
| 13228 | | - char name[MAX_NAME_SIZE]; |
|---|
| 13229 | | - |
|---|
| 13230 | | - /* obtain the arguments to pci_request_irq */ |
|---|
| 13231 | | - if (first_general <= i && i < last_general) { |
|---|
| 13232 | | - idx = i - first_general; |
|---|
| 13233 | | - handler = general_interrupt; |
|---|
| 13234 | | - arg = dd; |
|---|
| 13235 | | - snprintf(name, sizeof(name), |
|---|
| 13236 | | - DRIVER_NAME "_%d", dd->unit); |
|---|
| 13237 | | - err_info = "general"; |
|---|
| 13238 | | - me->type = IRQ_GENERAL; |
|---|
| 13239 | | - } else if (first_sdma <= i && i < last_sdma) { |
|---|
| 13240 | | - idx = i - first_sdma; |
|---|
| 13241 | | - sde = &dd->per_sdma[idx]; |
|---|
| 13242 | | - handler = sdma_interrupt; |
|---|
| 13243 | | - arg = sde; |
|---|
| 13244 | | - snprintf(name, sizeof(name), |
|---|
| 13245 | | - DRIVER_NAME "_%d sdma%d", dd->unit, idx); |
|---|
| 13246 | | - err_info = "sdma"; |
|---|
| 13247 | | - remap_sdma_interrupts(dd, idx, i); |
|---|
| 13248 | | - me->type = IRQ_SDMA; |
|---|
| 13249 | | - } else if (first_rx <= i && i < last_rx) { |
|---|
| 13250 | | - idx = i - first_rx; |
|---|
| 13251 | | - rcd = hfi1_rcd_get_by_index_safe(dd, idx); |
|---|
| 13252 | | - if (rcd) { |
|---|
| 13253 | | - /* |
|---|
| 13254 | | - * Set the interrupt register and mask for this |
|---|
| 13255 | | - * context's interrupt. |
|---|
| 13256 | | - */ |
|---|
| 13257 | | - rcd->ireg = (IS_RCVAVAIL_START + idx) / 64; |
|---|
| 13258 | | - rcd->imask = ((u64)1) << |
|---|
| 13259 | | - ((IS_RCVAVAIL_START + idx) % 64); |
|---|
| 13260 | | - handler = receive_context_interrupt; |
|---|
| 13261 | | - thread = receive_context_thread; |
|---|
| 13262 | | - arg = rcd; |
|---|
| 13263 | | - snprintf(name, sizeof(name), |
|---|
| 13264 | | - DRIVER_NAME "_%d kctxt%d", |
|---|
| 13265 | | - dd->unit, idx); |
|---|
| 13266 | | - err_info = "receive context"; |
|---|
| 13267 | | - remap_intr(dd, IS_RCVAVAIL_START + idx, i); |
|---|
| 13268 | | - me->type = IRQ_RCVCTXT; |
|---|
| 13269 | | - rcd->msix_intr = i; |
|---|
| 13270 | | - hfi1_rcd_put(rcd); |
|---|
| 13271 | | - } |
|---|
| 13272 | | - } else { |
|---|
| 13273 | | - /* not in our expected range - complain, then |
|---|
| 13274 | | - * ignore it |
|---|
| 13275 | | - */ |
|---|
| 13276 | | - dd_dev_err(dd, |
|---|
| 13277 | | - "Unexpected extra MSI-X interrupt %d\n", i); |
|---|
| 13278 | | - continue; |
|---|
| 13279 | | - } |
|---|
| 13280 | | - /* no argument, no interrupt */ |
|---|
| 13281 | | - if (!arg) |
|---|
| 13282 | | - continue; |
|---|
| 13283 | | - /* make sure the name is terminated */ |
|---|
| 13284 | | - name[sizeof(name) - 1] = 0; |
|---|
| 13285 | | - me->irq = pci_irq_vector(dd->pcidev, i); |
|---|
| 13286 | | - ret = pci_request_irq(dd->pcidev, i, handler, thread, arg, |
|---|
| 13287 | | - name); |
|---|
| 13288 | | - if (ret) { |
|---|
| 13289 | | - dd_dev_err(dd, |
|---|
| 13290 | | - "unable to allocate %s interrupt, irq %d, index %d, err %d\n", |
|---|
| 13291 | | - err_info, me->irq, idx, ret); |
|---|
| 13292 | | - return ret; |
|---|
| 13293 | | - } |
|---|
| 13294 | | - /* |
|---|
| 13295 | | - * assign arg after pci_request_irq call, so it will be |
|---|
| 13296 | | - * cleaned up |
|---|
| 13297 | | - */ |
|---|
| 13298 | | - me->arg = arg; |
|---|
| 13299 | | - |
|---|
| 13300 | | - ret = hfi1_get_irq_affinity(dd, me); |
|---|
| 13301 | | - if (ret) |
|---|
| 13302 | | - dd_dev_err(dd, "unable to pin IRQ %d\n", ret); |
|---|
| 13303 | | - } |
|---|
| 13304 | | - |
|---|
| 13305 | | - return ret; |
|---|
| 13306 | | -} |
|---|
| 13307 | | - |
|---|
| 13308 | | -void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd) |
|---|
| 13309 | | -{ |
|---|
| 13310 | | - int i; |
|---|
| 13311 | | - |
|---|
| 13312 | | - for (i = 0; i < dd->vnic.num_ctxt; i++) { |
|---|
| 13313 | | - struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i]; |
|---|
| 13314 | | - struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr]; |
|---|
| 13315 | | - |
|---|
| 13316 | | - synchronize_irq(me->irq); |
|---|
| 13317 | | - } |
|---|
| 13318 | | -} |
|---|
| 13319 | | - |
|---|
| 13320 | | -void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd) |
|---|
| 13321 | | -{ |
|---|
| 13322 | | - struct hfi1_devdata *dd = rcd->dd; |
|---|
| 13323 | | - struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr]; |
|---|
| 13324 | | - |
|---|
| 13325 | | - if (!me->arg) /* => no irq, no affinity */ |
|---|
| 13326 | | - return; |
|---|
| 13327 | | - |
|---|
| 13328 | | - hfi1_put_irq_affinity(dd, me); |
|---|
| 13329 | | - pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg); |
|---|
| 13330 | | - |
|---|
| 13331 | | - me->arg = NULL; |
|---|
| 13332 | | -} |
|---|
| 13333 | | - |
|---|
| 13334 | | -void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd) |
|---|
| 13335 | | -{ |
|---|
| 13336 | | - struct hfi1_devdata *dd = rcd->dd; |
|---|
| 13337 | | - struct hfi1_msix_entry *me; |
|---|
| 13338 | | - int idx = rcd->ctxt; |
|---|
| 13339 | | - void *arg = rcd; |
|---|
| 13340 | | - int ret; |
|---|
| 13341 | | - |
|---|
| 13342 | | - rcd->msix_intr = dd->vnic.msix_idx++; |
|---|
| 13343 | | - me = &dd->msix_entries[rcd->msix_intr]; |
|---|
| 13344 | | - |
|---|
| 13345 | | - /* |
|---|
| 13346 | | - * Set the interrupt register and mask for this |
|---|
| 13347 | | - * context's interrupt. |
|---|
| 13348 | | - */ |
|---|
| 13349 | | - rcd->ireg = (IS_RCVAVAIL_START + idx) / 64; |
|---|
| 13350 | | - rcd->imask = ((u64)1) << |
|---|
| 13351 | | - ((IS_RCVAVAIL_START + idx) % 64); |
|---|
| 13352 | | - me->type = IRQ_RCVCTXT; |
|---|
| 13353 | | - me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr); |
|---|
| 13354 | | - remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr); |
|---|
| 13355 | | - |
|---|
| 13356 | | - ret = pci_request_irq(dd->pcidev, rcd->msix_intr, |
|---|
| 13357 | | - receive_context_interrupt, |
|---|
| 13358 | | - receive_context_thread, arg, |
|---|
| 13359 | | - DRIVER_NAME "_%d kctxt%d", dd->unit, idx); |
|---|
| 13360 | | - if (ret) { |
|---|
| 13361 | | - dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n", |
|---|
| 13362 | | - me->irq, idx, ret); |
|---|
| 13363 | | - return; |
|---|
| 13364 | | - } |
|---|
| 13365 | | - /* |
|---|
| 13366 | | - * assign arg after pci_request_irq call, so it will be |
|---|
| 13367 | | - * cleaned up |
|---|
| 13368 | | - */ |
|---|
| 13369 | | - me->arg = arg; |
|---|
| 13370 | | - |
|---|
| 13371 | | - ret = hfi1_get_irq_affinity(dd, me); |
|---|
| 13372 | | - if (ret) { |
|---|
| 13373 | | - dd_dev_err(dd, |
|---|
| 13374 | | - "unable to pin IRQ %d\n", ret); |
|---|
| 13375 | | - pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg); |
|---|
| 13376 | | - } |
|---|
| 13337 | + remap_intr(dd, IS_SDMA_START + engine, msix_intr); |
|---|
| 13338 | + remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr); |
|---|
| 13339 | + remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr); |
|---|
| 13377 | 13340 | } |
|---|
| 13378 | 13341 | |
|---|
| 13379 | 13342 | /* |
|---|
| 13380 | 13343 | * Set the general handler to accept all interrupts, remap all |
|---|
| 13381 | 13344 | * chip interrupts back to MSI-X 0. |
|---|
| 13382 | 13345 | */ |
|---|
| 13383 | | -static void reset_interrupts(struct hfi1_devdata *dd) |
|---|
| 13346 | +void reset_interrupts(struct hfi1_devdata *dd) |
|---|
| 13384 | 13347 | { |
|---|
| 13385 | 13348 | int i; |
|---|
| 13386 | 13349 | |
|---|
| .. | .. |
|---|
| 13393 | 13356 | write_csr(dd, CCE_INT_MAP + (8 * i), 0); |
|---|
| 13394 | 13357 | } |
|---|
| 13395 | 13358 | |
|---|
| 13359 | +/** |
|---|
| 13360 | + * set_up_interrupts() - Initialize the IRQ resources and state |
|---|
| 13361 | + * @dd: valid devdata |
|---|
| 13362 | + * |
|---|
| 13363 | + */ |
|---|
| 13396 | 13364 | static int set_up_interrupts(struct hfi1_devdata *dd) |
|---|
| 13397 | 13365 | { |
|---|
| 13398 | | - u32 total; |
|---|
| 13399 | | - int ret, request; |
|---|
| 13400 | | - |
|---|
| 13401 | | - /* |
|---|
| 13402 | | - * Interrupt count: |
|---|
| 13403 | | - * 1 general, "slow path" interrupt (includes the SDMA engines |
|---|
| 13404 | | - * slow source, SDMACleanupDone) |
|---|
| 13405 | | - * N interrupts - one per used SDMA engine |
|---|
| 13406 | | - * M interrupt - one per kernel receive context |
|---|
| 13407 | | - * V interrupt - one for each VNIC context |
|---|
| 13408 | | - */ |
|---|
| 13409 | | - total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts; |
|---|
| 13410 | | - |
|---|
| 13411 | | - /* ask for MSI-X interrupts */ |
|---|
| 13412 | | - request = request_msix(dd, total); |
|---|
| 13413 | | - if (request < 0) { |
|---|
| 13414 | | - ret = request; |
|---|
| 13415 | | - goto fail; |
|---|
| 13416 | | - } else { |
|---|
| 13417 | | - dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries), |
|---|
| 13418 | | - GFP_KERNEL); |
|---|
| 13419 | | - if (!dd->msix_entries) { |
|---|
| 13420 | | - ret = -ENOMEM; |
|---|
| 13421 | | - goto fail; |
|---|
| 13422 | | - } |
|---|
| 13423 | | - /* using MSI-X */ |
|---|
| 13424 | | - dd->num_msix_entries = total; |
|---|
| 13425 | | - dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total); |
|---|
| 13426 | | - } |
|---|
| 13366 | + int ret; |
|---|
| 13427 | 13367 | |
|---|
| 13428 | 13368 | /* mask all interrupts */ |
|---|
| 13429 | | - set_intr_state(dd, 0); |
|---|
| 13369 | + set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false); |
|---|
| 13370 | + |
|---|
| 13430 | 13371 | /* clear all pending interrupts */ |
|---|
| 13431 | 13372 | clear_all_interrupts(dd); |
|---|
| 13432 | 13373 | |
|---|
| 13433 | 13374 | /* reset general handler mask, chip MSI-X mappings */ |
|---|
| 13434 | 13375 | reset_interrupts(dd); |
|---|
| 13435 | 13376 | |
|---|
| 13436 | | - ret = request_msix_irqs(dd); |
|---|
| 13377 | + /* ask for MSI-X interrupts */ |
|---|
| 13378 | + ret = msix_initialize(dd); |
|---|
| 13437 | 13379 | if (ret) |
|---|
| 13438 | | - goto fail; |
|---|
| 13380 | + return ret; |
|---|
| 13439 | 13381 | |
|---|
| 13440 | | - return 0; |
|---|
| 13382 | + ret = msix_request_irqs(dd); |
|---|
| 13383 | + if (ret) |
|---|
| 13384 | + msix_clean_up_interrupts(dd); |
|---|
| 13441 | 13385 | |
|---|
| 13442 | | -fail: |
|---|
| 13443 | | - hfi1_clean_up_interrupts(dd); |
|---|
| 13444 | 13386 | return ret; |
|---|
| 13445 | 13387 | } |
|---|
| 13446 | 13388 | |
|---|
| .. | .. |
|---|
| 13453 | 13395 | * in array of contexts |
|---|
| 13454 | 13396 | * freectxts - number of free user contexts |
|---|
| 13455 | 13397 | * num_send_contexts - number of PIO send contexts being used |
|---|
| 13456 | | - * num_vnic_contexts - number of contexts reserved for VNIC |
|---|
| 13398 | + * num_netdev_contexts - number of contexts reserved for netdev |
|---|
| 13457 | 13399 | */ |
|---|
| 13458 | 13400 | static int set_up_context_variables(struct hfi1_devdata *dd) |
|---|
| 13459 | 13401 | { |
|---|
| 13460 | 13402 | unsigned long num_kernel_contexts; |
|---|
| 13461 | | - u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT; |
|---|
| 13462 | | - int total_contexts; |
|---|
| 13403 | + u16 num_netdev_contexts; |
|---|
| 13463 | 13404 | int ret; |
|---|
| 13464 | 13405 | unsigned ngroups; |
|---|
| 13465 | 13406 | int rmt_count; |
|---|
| 13466 | | - int user_rmt_reduced; |
|---|
| 13467 | 13407 | u32 n_usr_ctxts; |
|---|
| 13468 | 13408 | u32 send_contexts = chip_send_contexts(dd); |
|---|
| 13469 | 13409 | u32 rcv_contexts = chip_rcv_contexts(dd); |
|---|
| .. | .. |
|---|
| 13496 | 13436 | num_kernel_contexts = send_contexts - num_vls - 1; |
|---|
| 13497 | 13437 | } |
|---|
| 13498 | 13438 | |
|---|
| 13499 | | - /* Accommodate VNIC contexts if possible */ |
|---|
| 13500 | | - if ((num_kernel_contexts + num_vnic_contexts) > rcv_contexts) { |
|---|
| 13501 | | - dd_dev_err(dd, "No receive contexts available for VNIC\n"); |
|---|
| 13502 | | - num_vnic_contexts = 0; |
|---|
| 13503 | | - } |
|---|
| 13504 | | - total_contexts = num_kernel_contexts + num_vnic_contexts; |
|---|
| 13505 | | - |
|---|
| 13506 | 13439 | /* |
|---|
| 13507 | 13440 | * User contexts: |
|---|
| 13508 | 13441 | * - default to 1 user context per real (non-HT) CPU core if |
|---|
| .. | .. |
|---|
| 13515 | 13448 | /* |
|---|
| 13516 | 13449 | * Adjust the counts given a global max. |
|---|
| 13517 | 13450 | */ |
|---|
| 13518 | | - if (total_contexts + n_usr_ctxts > rcv_contexts) { |
|---|
| 13451 | + if (num_kernel_contexts + n_usr_ctxts > rcv_contexts) { |
|---|
| 13519 | 13452 | dd_dev_err(dd, |
|---|
| 13520 | | - "Reducing # user receive contexts to: %d, from %u\n", |
|---|
| 13521 | | - rcv_contexts - total_contexts, |
|---|
| 13453 | + "Reducing # user receive contexts to: %u, from %u\n", |
|---|
| 13454 | + (u32)(rcv_contexts - num_kernel_contexts), |
|---|
| 13522 | 13455 | n_usr_ctxts); |
|---|
| 13523 | 13456 | /* recalculate */ |
|---|
| 13524 | | - n_usr_ctxts = rcv_contexts - total_contexts; |
|---|
| 13457 | + n_usr_ctxts = rcv_contexts - num_kernel_contexts; |
|---|
| 13525 | 13458 | } |
|---|
| 13526 | 13459 | |
|---|
| 13460 | + num_netdev_contexts = |
|---|
| 13461 | + hfi1_num_netdev_contexts(dd, rcv_contexts - |
|---|
| 13462 | + (num_kernel_contexts + n_usr_ctxts), |
|---|
| 13463 | + &node_affinity.real_cpu_mask); |
|---|
| 13527 | 13464 | /* |
|---|
| 13528 | | - * The RMT entries are currently allocated as shown below: |
|---|
| 13529 | | - * 1. QOS (0 to 128 entries); |
|---|
| 13530 | | - * 2. FECN for PSM (num_user_contexts + num_vnic_contexts); |
|---|
| 13531 | | - * 3. VNIC (num_vnic_contexts). |
|---|
| 13532 | | - * It should be noted that PSM FECN oversubscribe num_vnic_contexts |
|---|
| 13533 | | - * entries of RMT because both VNIC and PSM could allocate any receive |
|---|
| 13534 | | - * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts, |
|---|
| 13535 | | - * and PSM FECN must reserve an RMT entry for each possible PSM receive |
|---|
| 13536 | | - * context. |
|---|
| 13465 | + * RMT entries are allocated as follows: |
|---|
| 13466 | + * 1. QOS (0 to 128 entries) |
|---|
| 13467 | + * 2. FECN (num_kernel_context - 1 [a] + num_user_contexts + |
|---|
| 13468 | + * num_netdev_contexts [b]) |
|---|
| 13469 | + * 3. netdev (NUM_NETDEV_MAP_ENTRIES) |
|---|
| 13470 | + * |
|---|
| 13471 | + * Notes: |
|---|
| 13472 | + * [a] Kernel contexts (except control) are included in FECN if kernel |
|---|
| 13473 | + * TID_RDMA is active. |
|---|
| 13474 | + * [b] Netdev and user contexts are randomly allocated from the same |
|---|
| 13475 | + * context pool, so FECN must cover all contexts in the pool. |
|---|
| 13537 | 13476 | */ |
|---|
| 13538 | | - rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2); |
|---|
| 13539 | | - if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) { |
|---|
| 13540 | | - user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count; |
|---|
| 13541 | | - dd_dev_err(dd, |
|---|
| 13542 | | - "RMT size is reducing the number of user receive contexts from %u to %d\n", |
|---|
| 13543 | | - n_usr_ctxts, |
|---|
| 13544 | | - user_rmt_reduced); |
|---|
| 13545 | | - /* recalculate */ |
|---|
| 13546 | | - n_usr_ctxts = user_rmt_reduced; |
|---|
| 13477 | + rmt_count = qos_rmt_entries(num_kernel_contexts - 1, NULL, NULL) |
|---|
| 13478 | + + (HFI1_CAP_IS_KSET(TID_RDMA) ? num_kernel_contexts - 1 |
|---|
| 13479 | + : 0) |
|---|
| 13480 | + + n_usr_ctxts |
|---|
| 13481 | + + num_netdev_contexts |
|---|
| 13482 | + + NUM_NETDEV_MAP_ENTRIES; |
|---|
| 13483 | + if (rmt_count > NUM_MAP_ENTRIES) { |
|---|
| 13484 | + int over = rmt_count - NUM_MAP_ENTRIES; |
|---|
| 13485 | + /* try to squish user contexts, minimum of 1 */ |
|---|
| 13486 | + if (over >= n_usr_ctxts) { |
|---|
| 13487 | + dd_dev_err(dd, "RMT overflow: reduce the requested number of contexts\n"); |
|---|
| 13488 | + return -EINVAL; |
|---|
| 13489 | + } |
|---|
| 13490 | + dd_dev_err(dd, "RMT overflow: reducing # user contexts from %u to %u\n", |
|---|
| 13491 | + n_usr_ctxts, n_usr_ctxts - over); |
|---|
| 13492 | + n_usr_ctxts -= over; |
|---|
| 13547 | 13493 | } |
|---|
| 13548 | 13494 | |
|---|
| 13549 | | - total_contexts += n_usr_ctxts; |
|---|
| 13550 | | - |
|---|
| 13551 | | - /* the first N are kernel contexts, the rest are user/vnic contexts */ |
|---|
| 13552 | | - dd->num_rcv_contexts = total_contexts; |
|---|
| 13495 | + /* the first N are kernel contexts, the rest are user/netdev contexts */ |
|---|
| 13496 | + dd->num_rcv_contexts = |
|---|
| 13497 | + num_kernel_contexts + n_usr_ctxts + num_netdev_contexts; |
|---|
| 13553 | 13498 | dd->n_krcv_queues = num_kernel_contexts; |
|---|
| 13554 | 13499 | dd->first_dyn_alloc_ctxt = num_kernel_contexts; |
|---|
| 13555 | | - dd->num_vnic_contexts = num_vnic_contexts; |
|---|
| 13500 | + dd->num_netdev_contexts = num_netdev_contexts; |
|---|
| 13556 | 13501 | dd->num_user_contexts = n_usr_ctxts; |
|---|
| 13557 | 13502 | dd->freectxts = n_usr_ctxts; |
|---|
| 13558 | 13503 | dd_dev_info(dd, |
|---|
| 13559 | | - "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n", |
|---|
| 13504 | + "rcv contexts: chip %d, used %d (kernel %d, netdev %u, user %u)\n", |
|---|
| 13560 | 13505 | rcv_contexts, |
|---|
| 13561 | 13506 | (int)dd->num_rcv_contexts, |
|---|
| 13562 | 13507 | (int)dd->n_krcv_queues, |
|---|
| 13563 | | - dd->num_vnic_contexts, |
|---|
| 13508 | + dd->num_netdev_contexts, |
|---|
| 13564 | 13509 | dd->num_user_contexts); |
|---|
| 13565 | 13510 | |
|---|
| 13566 | 13511 | /* |
|---|
| .. | .. |
|---|
| 14239 | 14184 | |
|---|
| 14240 | 14185 | static void init_kdeth_qp(struct hfi1_devdata *dd) |
|---|
| 14241 | 14186 | { |
|---|
| 14242 | | - /* user changed the KDETH_QP */ |
|---|
| 14243 | | - if (kdeth_qp != 0 && kdeth_qp >= 0xff) { |
|---|
| 14244 | | - /* out of range or illegal value */ |
|---|
| 14245 | | - dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring"); |
|---|
| 14246 | | - kdeth_qp = 0; |
|---|
| 14247 | | - } |
|---|
| 14248 | | - if (kdeth_qp == 0) /* not set, or failed range check */ |
|---|
| 14249 | | - kdeth_qp = DEFAULT_KDETH_QP; |
|---|
| 14250 | | - |
|---|
| 14251 | 14187 | write_csr(dd, SEND_BTH_QP, |
|---|
| 14252 | | - (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) << |
|---|
| 14188 | + (RVT_KDETH_QP_PREFIX & SEND_BTH_QP_KDETH_QP_MASK) << |
|---|
| 14253 | 14189 | SEND_BTH_QP_KDETH_QP_SHIFT); |
|---|
| 14254 | 14190 | |
|---|
| 14255 | 14191 | write_csr(dd, RCV_BTH_QP, |
|---|
| 14256 | | - (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) << |
|---|
| 14192 | + (RVT_KDETH_QP_PREFIX & RCV_BTH_QP_KDETH_QP_MASK) << |
|---|
| 14257 | 14193 | RCV_BTH_QP_KDETH_QP_SHIFT); |
|---|
| 14194 | +} |
|---|
| 14195 | + |
|---|
| 14196 | +/** |
|---|
| 14197 | + * hfi1_get_qp_map |
|---|
| 14198 | + * @dd: device data |
|---|
| 14199 | + * @idx: index to read |
|---|
| 14200 | + */ |
|---|
| 14201 | +u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx) |
|---|
| 14202 | +{ |
|---|
| 14203 | + u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8); |
|---|
| 14204 | + |
|---|
| 14205 | + reg >>= (idx % 8) * 8; |
|---|
| 14206 | + return reg; |
|---|
| 14258 | 14207 | } |
|---|
| 14259 | 14208 | |
|---|
| 14260 | 14209 | /** |
|---|
| .. | .. |
|---|
| 14356 | 14305 | } |
|---|
| 14357 | 14306 | } |
|---|
| 14358 | 14307 | |
|---|
| 14308 | +/* Is a receive side mapping rule */ |
|---|
| 14309 | +static bool has_rsm_rule(struct hfi1_devdata *dd, u8 rule_index) |
|---|
| 14310 | +{ |
|---|
| 14311 | + return read_csr(dd, RCV_RSM_CFG + (8 * rule_index)) != 0; |
|---|
| 14312 | +} |
|---|
| 14313 | + |
|---|
| 14359 | 14314 | /* |
|---|
| 14360 | 14315 | * Add a receive side mapping rule. |
|---|
| 14361 | 14316 | */ |
|---|
| .. | .. |
|---|
| 14391 | 14346 | } |
|---|
| 14392 | 14347 | |
|---|
| 14393 | 14348 | /* return the number of RSM map table entries that will be used for QOS */ |
|---|
| 14394 | | -static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, |
|---|
| 14349 | +static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp, |
|---|
| 14395 | 14350 | unsigned int *np) |
|---|
| 14396 | 14351 | { |
|---|
| 14397 | 14352 | int i; |
|---|
| 14398 | 14353 | unsigned int m, n; |
|---|
| 14399 | | - u8 max_by_vl = 0; |
|---|
| 14354 | + uint max_by_vl = 0; |
|---|
| 14400 | 14355 | |
|---|
| 14401 | 14356 | /* is QOS active at all? */ |
|---|
| 14402 | | - if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS || |
|---|
| 14357 | + if (n_krcv_queues < MIN_KERNEL_KCTXTS || |
|---|
| 14403 | 14358 | num_vls == 1 || |
|---|
| 14404 | 14359 | krcvqsset <= 1) |
|---|
| 14405 | 14360 | goto no_qos; |
|---|
| .. | .. |
|---|
| 14457 | 14412 | |
|---|
| 14458 | 14413 | if (!rmt) |
|---|
| 14459 | 14414 | goto bail; |
|---|
| 14460 | | - rmt_entries = qos_rmt_entries(dd, &m, &n); |
|---|
| 14415 | + rmt_entries = qos_rmt_entries(dd->n_krcv_queues - 1, &m, &n); |
|---|
| 14461 | 14416 | if (rmt_entries == 0) |
|---|
| 14462 | 14417 | goto bail; |
|---|
| 14463 | 14418 | qpns_per_vl = 1 << m; |
|---|
| .. | .. |
|---|
| 14518 | 14473 | init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1); |
|---|
| 14519 | 14474 | } |
|---|
| 14520 | 14475 | |
|---|
| 14521 | | -static void init_user_fecn_handling(struct hfi1_devdata *dd, |
|---|
| 14522 | | - struct rsm_map_table *rmt) |
|---|
| 14476 | +static void init_fecn_handling(struct hfi1_devdata *dd, |
|---|
| 14477 | + struct rsm_map_table *rmt) |
|---|
| 14523 | 14478 | { |
|---|
| 14524 | 14479 | struct rsm_rule_data rrd; |
|---|
| 14525 | 14480 | u64 reg; |
|---|
| 14526 | | - int i, idx, regoff, regidx; |
|---|
| 14481 | + int i, idx, regoff, regidx, start; |
|---|
| 14527 | 14482 | u8 offset; |
|---|
| 14528 | 14483 | u32 total_cnt; |
|---|
| 14529 | 14484 | |
|---|
| 14485 | + if (HFI1_CAP_IS_KSET(TID_RDMA)) |
|---|
| 14486 | + /* Exclude context 0 */ |
|---|
| 14487 | + start = 1; |
|---|
| 14488 | + else |
|---|
| 14489 | + start = dd->first_dyn_alloc_ctxt; |
|---|
| 14490 | + |
|---|
| 14491 | + total_cnt = dd->num_rcv_contexts - start; |
|---|
| 14492 | + |
|---|
| 14530 | 14493 | /* there needs to be enough room in the map table */ |
|---|
| 14531 | | - total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt; |
|---|
| 14532 | 14494 | if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) { |
|---|
| 14533 | | - dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n"); |
|---|
| 14495 | + dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n"); |
|---|
| 14534 | 14496 | return; |
|---|
| 14535 | 14497 | } |
|---|
| 14536 | 14498 | |
|---|
| 14537 | 14499 | /* |
|---|
| 14538 | 14500 | * RSM will extract the destination context as an index into the |
|---|
| 14539 | 14501 | * map table. The destination contexts are a sequential block |
|---|
| 14540 | | - * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive). |
|---|
| 14502 | + * in the range start...num_rcv_contexts-1 (inclusive). |
|---|
| 14541 | 14503 | * Map entries are accessed as offset + extracted value. Adjust |
|---|
| 14542 | 14504 | * the added offset so this sequence can be placed anywhere in |
|---|
| 14543 | 14505 | * the table - as long as the entries themselves do not wrap. |
|---|
| 14544 | 14506 | * There are only enough bits in offset for the table size, so |
|---|
| 14545 | 14507 | * start with that to allow for a "negative" offset. |
|---|
| 14546 | 14508 | */ |
|---|
| 14547 | | - offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used - |
|---|
| 14548 | | - (int)dd->first_dyn_alloc_ctxt); |
|---|
| 14509 | + offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start); |
|---|
| 14549 | 14510 | |
|---|
| 14550 | | - for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used; |
|---|
| 14551 | | - i < dd->num_rcv_contexts; i++, idx++) { |
|---|
| 14511 | + for (i = start, idx = rmt->used; i < dd->num_rcv_contexts; |
|---|
| 14512 | + i++, idx++) { |
|---|
| 14552 | 14513 | /* replace with identity mapping */ |
|---|
| 14553 | 14514 | regoff = (idx % 8) * 8; |
|---|
| 14554 | 14515 | regidx = idx / 8; |
|---|
| .. | .. |
|---|
| 14586 | 14547 | rmt->used += total_cnt; |
|---|
| 14587 | 14548 | } |
|---|
| 14588 | 14549 | |
|---|
| 14589 | | -/* Initialize RSM for VNIC */ |
|---|
| 14590 | | -void hfi1_init_vnic_rsm(struct hfi1_devdata *dd) |
|---|
| 14550 | +static inline bool hfi1_is_rmt_full(int start, int spare) |
|---|
| 14551 | +{ |
|---|
| 14552 | + return (start + spare) > NUM_MAP_ENTRIES; |
|---|
| 14553 | +} |
|---|
| 14554 | + |
|---|
| 14555 | +static bool hfi1_netdev_update_rmt(struct hfi1_devdata *dd) |
|---|
| 14591 | 14556 | { |
|---|
| 14592 | 14557 | u8 i, j; |
|---|
| 14593 | 14558 | u8 ctx_id = 0; |
|---|
| 14594 | 14559 | u64 reg; |
|---|
| 14595 | 14560 | u32 regoff; |
|---|
| 14596 | | - struct rsm_rule_data rrd; |
|---|
| 14561 | + int rmt_start = hfi1_netdev_get_free_rmt_idx(dd); |
|---|
| 14562 | + int ctxt_count = hfi1_netdev_ctxt_count(dd); |
|---|
| 14597 | 14563 | |
|---|
| 14598 | | - if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) { |
|---|
| 14599 | | - dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n", |
|---|
| 14600 | | - dd->vnic.rmt_start); |
|---|
| 14601 | | - return; |
|---|
| 14564 | + /* We already have contexts mapped in RMT */ |
|---|
| 14565 | + if (has_rsm_rule(dd, RSM_INS_VNIC) || has_rsm_rule(dd, RSM_INS_AIP)) { |
|---|
| 14566 | + dd_dev_info(dd, "Contexts are already mapped in RMT\n"); |
|---|
| 14567 | + return true; |
|---|
| 14602 | 14568 | } |
|---|
| 14603 | 14569 | |
|---|
| 14604 | | - dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n", |
|---|
| 14605 | | - dd->vnic.rmt_start, |
|---|
| 14606 | | - dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES); |
|---|
| 14570 | + if (hfi1_is_rmt_full(rmt_start, NUM_NETDEV_MAP_ENTRIES)) { |
|---|
| 14571 | + dd_dev_err(dd, "Not enough RMT entries used = %d\n", |
|---|
| 14572 | + rmt_start); |
|---|
| 14573 | + return false; |
|---|
| 14574 | + } |
|---|
| 14575 | + |
|---|
| 14576 | + dev_dbg(&(dd)->pcidev->dev, "RMT start = %d, end %d\n", |
|---|
| 14577 | + rmt_start, |
|---|
| 14578 | + rmt_start + NUM_NETDEV_MAP_ENTRIES); |
|---|
| 14607 | 14579 | |
|---|
| 14608 | 14580 | /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */ |
|---|
| 14609 | | - regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8; |
|---|
| 14581 | + regoff = RCV_RSM_MAP_TABLE + (rmt_start / 8) * 8; |
|---|
| 14610 | 14582 | reg = read_csr(dd, regoff); |
|---|
| 14611 | | - for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) { |
|---|
| 14612 | | - /* Update map register with vnic context */ |
|---|
| 14613 | | - j = (dd->vnic.rmt_start + i) % 8; |
|---|
| 14583 | + for (i = 0; i < NUM_NETDEV_MAP_ENTRIES; i++) { |
|---|
| 14584 | + /* Update map register with netdev context */ |
|---|
| 14585 | + j = (rmt_start + i) % 8; |
|---|
| 14614 | 14586 | reg &= ~(0xffllu << (j * 8)); |
|---|
| 14615 | | - reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8); |
|---|
| 14616 | | - /* Wrap up vnic ctx index */ |
|---|
| 14617 | | - ctx_id %= dd->vnic.num_ctxt; |
|---|
| 14587 | + reg |= (u64)hfi1_netdev_get_ctxt(dd, ctx_id++)->ctxt << (j * 8); |
|---|
| 14588 | + /* Wrap up netdev ctx index */ |
|---|
| 14589 | + ctx_id %= ctxt_count; |
|---|
| 14618 | 14590 | /* Write back map register */ |
|---|
| 14619 | | - if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) { |
|---|
| 14591 | + if (j == 7 || ((i + 1) == NUM_NETDEV_MAP_ENTRIES)) { |
|---|
| 14620 | 14592 | dev_dbg(&(dd)->pcidev->dev, |
|---|
| 14621 | | - "Vnic rsm map reg[%d] =0x%llx\n", |
|---|
| 14593 | + "RMT[%d] =0x%llx\n", |
|---|
| 14622 | 14594 | regoff - RCV_RSM_MAP_TABLE, reg); |
|---|
| 14623 | 14595 | |
|---|
| 14624 | 14596 | write_csr(dd, regoff, reg); |
|---|
| 14625 | 14597 | regoff += 8; |
|---|
| 14626 | | - if (i < (NUM_VNIC_MAP_ENTRIES - 1)) |
|---|
| 14598 | + if (i < (NUM_NETDEV_MAP_ENTRIES - 1)) |
|---|
| 14627 | 14599 | reg = read_csr(dd, regoff); |
|---|
| 14628 | 14600 | } |
|---|
| 14629 | 14601 | } |
|---|
| 14630 | 14602 | |
|---|
| 14631 | | - /* Add rule for vnic */ |
|---|
| 14632 | | - rrd.offset = dd->vnic.rmt_start; |
|---|
| 14633 | | - rrd.pkt_type = 4; |
|---|
| 14634 | | - /* Match 16B packets */ |
|---|
| 14635 | | - rrd.field1_off = L2_TYPE_MATCH_OFFSET; |
|---|
| 14636 | | - rrd.mask1 = L2_TYPE_MASK; |
|---|
| 14637 | | - rrd.value1 = L2_16B_VALUE; |
|---|
| 14638 | | - /* Match ETH L4 packets */ |
|---|
| 14639 | | - rrd.field2_off = L4_TYPE_MATCH_OFFSET; |
|---|
| 14640 | | - rrd.mask2 = L4_16B_TYPE_MASK; |
|---|
| 14641 | | - rrd.value2 = L4_16B_ETH_VALUE; |
|---|
| 14642 | | - /* Calc context from veswid and entropy */ |
|---|
| 14643 | | - rrd.index1_off = L4_16B_HDR_VESWID_OFFSET; |
|---|
| 14644 | | - rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES); |
|---|
| 14645 | | - rrd.index2_off = L2_16B_ENTROPY_OFFSET; |
|---|
| 14646 | | - rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES); |
|---|
| 14647 | | - add_rsm_rule(dd, RSM_INS_VNIC, &rrd); |
|---|
| 14603 | + return true; |
|---|
| 14604 | +} |
|---|
| 14648 | 14605 | |
|---|
| 14649 | | - /* Enable RSM if not already enabled */ |
|---|
| 14606 | +static void hfi1_enable_rsm_rule(struct hfi1_devdata *dd, |
|---|
| 14607 | + int rule, struct rsm_rule_data *rrd) |
|---|
| 14608 | +{ |
|---|
| 14609 | + if (!hfi1_netdev_update_rmt(dd)) { |
|---|
| 14610 | + dd_dev_err(dd, "Failed to update RMT for RSM%d rule\n", rule); |
|---|
| 14611 | + return; |
|---|
| 14612 | + } |
|---|
| 14613 | + |
|---|
| 14614 | + add_rsm_rule(dd, rule, rrd); |
|---|
| 14650 | 14615 | add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); |
|---|
| 14616 | +} |
|---|
| 14617 | + |
|---|
| 14618 | +void hfi1_init_aip_rsm(struct hfi1_devdata *dd) |
|---|
| 14619 | +{ |
|---|
| 14620 | + /* |
|---|
| 14621 | + * go through with the initialisation only if this rule actually doesn't |
|---|
| 14622 | + * exist yet |
|---|
| 14623 | + */ |
|---|
| 14624 | + if (atomic_fetch_inc(&dd->ipoib_rsm_usr_num) == 0) { |
|---|
| 14625 | + int rmt_start = hfi1_netdev_get_free_rmt_idx(dd); |
|---|
| 14626 | + struct rsm_rule_data rrd = { |
|---|
| 14627 | + .offset = rmt_start, |
|---|
| 14628 | + .pkt_type = IB_PACKET_TYPE, |
|---|
| 14629 | + .field1_off = LRH_BTH_MATCH_OFFSET, |
|---|
| 14630 | + .mask1 = LRH_BTH_MASK, |
|---|
| 14631 | + .value1 = LRH_BTH_VALUE, |
|---|
| 14632 | + .field2_off = BTH_DESTQP_MATCH_OFFSET, |
|---|
| 14633 | + .mask2 = BTH_DESTQP_MASK, |
|---|
| 14634 | + .value2 = BTH_DESTQP_VALUE, |
|---|
| 14635 | + .index1_off = DETH_AIP_SQPN_SELECT_OFFSET + |
|---|
| 14636 | + ilog2(NUM_NETDEV_MAP_ENTRIES), |
|---|
| 14637 | + .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES), |
|---|
| 14638 | + .index2_off = DETH_AIP_SQPN_SELECT_OFFSET, |
|---|
| 14639 | + .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES) |
|---|
| 14640 | + }; |
|---|
| 14641 | + |
|---|
| 14642 | + hfi1_enable_rsm_rule(dd, RSM_INS_AIP, &rrd); |
|---|
| 14643 | + } |
|---|
| 14644 | +} |
|---|
| 14645 | + |
|---|
| 14646 | +/* Initialize RSM for VNIC */ |
|---|
| 14647 | +void hfi1_init_vnic_rsm(struct hfi1_devdata *dd) |
|---|
| 14648 | +{ |
|---|
| 14649 | + int rmt_start = hfi1_netdev_get_free_rmt_idx(dd); |
|---|
| 14650 | + struct rsm_rule_data rrd = { |
|---|
| 14651 | + /* Add rule for vnic */ |
|---|
| 14652 | + .offset = rmt_start, |
|---|
| 14653 | + .pkt_type = 4, |
|---|
| 14654 | + /* Match 16B packets */ |
|---|
| 14655 | + .field1_off = L2_TYPE_MATCH_OFFSET, |
|---|
| 14656 | + .mask1 = L2_TYPE_MASK, |
|---|
| 14657 | + .value1 = L2_16B_VALUE, |
|---|
| 14658 | + /* Match ETH L4 packets */ |
|---|
| 14659 | + .field2_off = L4_TYPE_MATCH_OFFSET, |
|---|
| 14660 | + .mask2 = L4_16B_TYPE_MASK, |
|---|
| 14661 | + .value2 = L4_16B_ETH_VALUE, |
|---|
| 14662 | + /* Calc context from veswid and entropy */ |
|---|
| 14663 | + .index1_off = L4_16B_HDR_VESWID_OFFSET, |
|---|
| 14664 | + .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES), |
|---|
| 14665 | + .index2_off = L2_16B_ENTROPY_OFFSET, |
|---|
| 14666 | + .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES) |
|---|
| 14667 | + }; |
|---|
| 14668 | + |
|---|
| 14669 | + hfi1_enable_rsm_rule(dd, RSM_INS_VNIC, &rrd); |
|---|
| 14651 | 14670 | } |
|---|
| 14652 | 14671 | |
|---|
| 14653 | 14672 | void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd) |
|---|
| 14654 | 14673 | { |
|---|
| 14655 | 14674 | clear_rsm_rule(dd, RSM_INS_VNIC); |
|---|
| 14675 | +} |
|---|
| 14656 | 14676 | |
|---|
| 14657 | | - /* Disable RSM if used only by vnic */ |
|---|
| 14658 | | - if (dd->vnic.rmt_start == 0) |
|---|
| 14659 | | - clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); |
|---|
| 14677 | +void hfi1_deinit_aip_rsm(struct hfi1_devdata *dd) |
|---|
| 14678 | +{ |
|---|
| 14679 | + /* only actually clear the rule if it's the last user asking to do so */ |
|---|
| 14680 | + if (atomic_fetch_add_unless(&dd->ipoib_rsm_usr_num, -1, 0) == 1) |
|---|
| 14681 | + clear_rsm_rule(dd, RSM_INS_AIP); |
|---|
| 14660 | 14682 | } |
|---|
| 14661 | 14683 | |
|---|
| 14662 | 14684 | static int init_rxe(struct hfi1_devdata *dd) |
|---|
| .. | .. |
|---|
| 14673 | 14695 | |
|---|
| 14674 | 14696 | /* set up QOS, including the QPN map table */ |
|---|
| 14675 | 14697 | init_qos(dd, rmt); |
|---|
| 14676 | | - init_user_fecn_handling(dd, rmt); |
|---|
| 14698 | + init_fecn_handling(dd, rmt); |
|---|
| 14677 | 14699 | complete_rsm_map_table(dd, rmt); |
|---|
| 14678 | | - /* record number of used rsm map entries for vnic */ |
|---|
| 14679 | | - dd->vnic.rmt_start = rmt->used; |
|---|
| 14700 | + /* record number of used rsm map entries for netdev */ |
|---|
| 14701 | + hfi1_netdev_set_free_rmt_idx(dd, rmt->used); |
|---|
| 14680 | 14702 | kfree(rmt); |
|---|
| 14681 | 14703 | |
|---|
| 14682 | 14704 | /* |
|---|
| .. | .. |
|---|
| 14900 | 14922 | */ |
|---|
| 14901 | 14923 | static int init_asic_data(struct hfi1_devdata *dd) |
|---|
| 14902 | 14924 | { |
|---|
| 14903 | | - unsigned long flags; |
|---|
| 14904 | | - struct hfi1_devdata *tmp, *peer = NULL; |
|---|
| 14925 | + unsigned long index; |
|---|
| 14926 | + struct hfi1_devdata *peer; |
|---|
| 14905 | 14927 | struct hfi1_asic_data *asic_data; |
|---|
| 14906 | 14928 | int ret = 0; |
|---|
| 14907 | 14929 | |
|---|
| .. | .. |
|---|
| 14910 | 14932 | if (!asic_data) |
|---|
| 14911 | 14933 | return -ENOMEM; |
|---|
| 14912 | 14934 | |
|---|
| 14913 | | - spin_lock_irqsave(&hfi1_devs_lock, flags); |
|---|
| 14935 | + xa_lock_irq(&hfi1_dev_table); |
|---|
| 14914 | 14936 | /* Find our peer device */ |
|---|
| 14915 | | - list_for_each_entry(tmp, &hfi1_dev_list, list) { |
|---|
| 14916 | | - if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) && |
|---|
| 14917 | | - dd->unit != tmp->unit) { |
|---|
| 14918 | | - peer = tmp; |
|---|
| 14937 | + xa_for_each(&hfi1_dev_table, index, peer) { |
|---|
| 14938 | + if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) && |
|---|
| 14939 | + dd->unit != peer->unit) |
|---|
| 14919 | 14940 | break; |
|---|
| 14920 | | - } |
|---|
| 14921 | 14941 | } |
|---|
| 14922 | 14942 | |
|---|
| 14923 | 14943 | if (peer) { |
|---|
| .. | .. |
|---|
| 14929 | 14949 | mutex_init(&dd->asic_data->asic_resource_mutex); |
|---|
| 14930 | 14950 | } |
|---|
| 14931 | 14951 | dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */ |
|---|
| 14932 | | - spin_unlock_irqrestore(&hfi1_devs_lock, flags); |
|---|
| 14952 | + xa_unlock_irq(&hfi1_dev_table); |
|---|
| 14933 | 14953 | |
|---|
| 14934 | 14954 | /* first one through - set up i2c devices */ |
|---|
| 14935 | 14955 | if (!peer) |
|---|
| .. | .. |
|---|
| 15009 | 15029 | } |
|---|
| 15010 | 15030 | |
|---|
| 15011 | 15031 | /** |
|---|
| 15012 | | - * Allocate and initialize the device structure for the hfi. |
|---|
| 15032 | + * hfi1_init_dd() - Initialize most of the dd structure. |
|---|
| 15013 | 15033 | * @dev: the pci_dev for hfi1_ib device |
|---|
| 15014 | 15034 | * @ent: pci_device_id struct for this dev |
|---|
| 15015 | | - * |
|---|
| 15016 | | - * Also allocates, initializes, and returns the devdata struct for this |
|---|
| 15017 | | - * device instance |
|---|
| 15018 | 15035 | * |
|---|
| 15019 | 15036 | * This is global, and is called directly at init to set up the |
|---|
| 15020 | 15037 | * chip-specific function pointers for later use. |
|---|
| 15021 | 15038 | */ |
|---|
| 15022 | | -struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, |
|---|
| 15023 | | - const struct pci_device_id *ent) |
|---|
| 15039 | +int hfi1_init_dd(struct hfi1_devdata *dd) |
|---|
| 15024 | 15040 | { |
|---|
| 15025 | | - struct hfi1_devdata *dd; |
|---|
| 15041 | + struct pci_dev *pdev = dd->pcidev; |
|---|
| 15026 | 15042 | struct hfi1_pportdata *ppd; |
|---|
| 15027 | 15043 | u64 reg; |
|---|
| 15028 | 15044 | int i, ret; |
|---|
| .. | .. |
|---|
| 15033 | 15049 | "Functional simulator" |
|---|
| 15034 | 15050 | }; |
|---|
| 15035 | 15051 | struct pci_dev *parent = pdev->bus->self; |
|---|
| 15036 | | - u32 sdma_engines; |
|---|
| 15052 | + u32 sdma_engines = chip_sdma_engines(dd); |
|---|
| 15037 | 15053 | |
|---|
| 15038 | | - dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS * |
|---|
| 15039 | | - sizeof(struct hfi1_pportdata)); |
|---|
| 15040 | | - if (IS_ERR(dd)) |
|---|
| 15041 | | - goto bail; |
|---|
| 15042 | | - sdma_engines = chip_sdma_engines(dd); |
|---|
| 15043 | 15054 | ppd = dd->pport; |
|---|
| 15044 | 15055 | for (i = 0; i < dd->num_pports; i++, ppd++) { |
|---|
| 15045 | 15056 | int vl; |
|---|
| .. | .. |
|---|
| 15218 | 15229 | if (ret) |
|---|
| 15219 | 15230 | goto bail_cleanup; |
|---|
| 15220 | 15231 | |
|---|
| 15232 | + /* |
|---|
| 15233 | + * This should probably occur in hfi1_pcie_init(), but historically |
|---|
| 15234 | + * occurs after the do_pcie_gen3_transition() code. |
|---|
| 15235 | + */ |
|---|
| 15236 | + tune_pcie_caps(dd); |
|---|
| 15237 | + |
|---|
| 15221 | 15238 | /* start setting dd values and adjusting CSRs */ |
|---|
| 15222 | 15239 | init_early_variables(dd); |
|---|
| 15223 | 15240 | |
|---|
| .. | .. |
|---|
| 15234 | 15251 | (u32)dd->minrev, |
|---|
| 15235 | 15252 | (dd->revision >> CCE_REVISION_SW_SHIFT) |
|---|
| 15236 | 15253 | & CCE_REVISION_SW_MASK); |
|---|
| 15254 | + |
|---|
| 15255 | + /* alloc netdev data */ |
|---|
| 15256 | + ret = hfi1_netdev_alloc(dd); |
|---|
| 15257 | + if (ret) |
|---|
| 15258 | + goto bail_cleanup; |
|---|
| 15237 | 15259 | |
|---|
| 15238 | 15260 | ret = set_up_context_variables(dd); |
|---|
| 15239 | 15261 | if (ret) |
|---|
| .. | .. |
|---|
| 15333 | 15355 | free_cntrs(dd); |
|---|
| 15334 | 15356 | bail_clear_intr: |
|---|
| 15335 | 15357 | hfi1_comp_vectors_clean_up(dd); |
|---|
| 15336 | | - hfi1_clean_up_interrupts(dd); |
|---|
| 15358 | + msix_clean_up_interrupts(dd); |
|---|
| 15337 | 15359 | bail_cleanup: |
|---|
| 15360 | + hfi1_netdev_free(dd); |
|---|
| 15338 | 15361 | hfi1_pcie_ddcleanup(dd); |
|---|
| 15339 | 15362 | bail_free: |
|---|
| 15340 | 15363 | hfi1_free_devdata(dd); |
|---|
| 15341 | | - dd = ERR_PTR(ret); |
|---|
| 15342 | 15364 | bail: |
|---|
| 15343 | | - return dd; |
|---|
| 15365 | + return ret; |
|---|
| 15344 | 15366 | } |
|---|
| 15345 | 15367 | |
|---|
| 15346 | 15368 | static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate, |
|---|