.. | .. |
---|
71 | 71 | } |
---|
72 | 72 | } |
---|
73 | 73 | |
---|
74 | | -/* defined in header release 48 and higher */ |
---|
75 | | -#ifndef SEND_CTRL_UNSUPPORTED_VL_SHIFT |
---|
76 | | -#define SEND_CTRL_UNSUPPORTED_VL_SHIFT 3 |
---|
77 | | -#define SEND_CTRL_UNSUPPORTED_VL_MASK 0xffull |
---|
78 | | -#define SEND_CTRL_UNSUPPORTED_VL_SMASK (SEND_CTRL_UNSUPPORTED_VL_MASK \ |
---|
79 | | - << SEND_CTRL_UNSUPPORTED_VL_SHIFT) |
---|
80 | | -#endif |
---|
81 | | - |
---|
82 | 74 | /* global control of PIO send */ |
---|
83 | 75 | void pio_send_control(struct hfi1_devdata *dd, int op) |
---|
84 | 76 | { |
---|
.. | .. |
---|
94 | 86 | switch (op) { |
---|
95 | 87 | case PSC_GLOBAL_ENABLE: |
---|
96 | 88 | reg |= SEND_CTRL_SEND_ENABLE_SMASK; |
---|
97 | | - /* Fall through */ |
---|
| 89 | + fallthrough; |
---|
98 | 90 | case PSC_DATA_VL_ENABLE: |
---|
99 | 91 | mask = 0; |
---|
100 | 92 | for (i = 0; i < ARRAY_SIZE(dd->vld); i++) |
---|
.. | .. |
---|
750 | 742 | spin_lock_init(&sc->alloc_lock); |
---|
751 | 743 | spin_lock_init(&sc->release_lock); |
---|
752 | 744 | spin_lock_init(&sc->credit_ctrl_lock); |
---|
| 745 | + seqlock_init(&sc->waitlock); |
---|
753 | 746 | INIT_LIST_HEAD(&sc->piowait); |
---|
754 | 747 | INIT_WORK(&sc->halt_work, sc_halted); |
---|
755 | 748 | init_waitqueue_head(&sc->halt_wait); |
---|
.. | .. |
---|
927 | 920 | { |
---|
928 | 921 | u64 reg; |
---|
929 | 922 | struct pio_buf *pbuf; |
---|
| 923 | + LIST_HEAD(wake_list); |
---|
930 | 924 | |
---|
931 | 925 | if (!sc) |
---|
932 | 926 | return; |
---|
.. | .. |
---|
959 | 953 | } |
---|
960 | 954 | } |
---|
961 | 955 | spin_unlock(&sc->release_lock); |
---|
| 956 | + |
---|
| 957 | + write_seqlock(&sc->waitlock); |
---|
| 958 | + list_splice_init(&sc->piowait, &wake_list); |
---|
| 959 | + write_sequnlock(&sc->waitlock); |
---|
| 960 | + while (!list_empty(&wake_list)) { |
---|
| 961 | + struct iowait *wait; |
---|
| 962 | + struct rvt_qp *qp; |
---|
| 963 | + struct hfi1_qp_priv *priv; |
---|
| 964 | + |
---|
| 965 | + wait = list_first_entry(&wake_list, struct iowait, list); |
---|
| 966 | + qp = iowait_to_qp(wait); |
---|
| 967 | + priv = qp->priv; |
---|
| 968 | + list_del_init(&priv->s_iowait.list); |
---|
| 969 | + priv->s_iowait.lock = NULL; |
---|
| 970 | + hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN); |
---|
| 971 | + } |
---|
| 972 | + |
---|
962 | 973 | spin_unlock_irq(&sc->alloc_lock); |
---|
963 | 974 | } |
---|
964 | 975 | |
---|
.. | .. |
---|
1585 | 1596 | else |
---|
1586 | 1597 | sc_del_credit_return_intr(sc); |
---|
1587 | 1598 | trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl); |
---|
1588 | | - if (needint) { |
---|
1589 | | - mmiowb(); |
---|
| 1599 | + if (needint) |
---|
1590 | 1600 | sc_return_credits(sc); |
---|
1591 | | - } |
---|
1592 | 1601 | } |
---|
1593 | 1602 | |
---|
1594 | 1603 | /** |
---|
.. | .. |
---|
1602 | 1611 | static void sc_piobufavail(struct send_context *sc) |
---|
1603 | 1612 | { |
---|
1604 | 1613 | struct hfi1_devdata *dd = sc->dd; |
---|
1605 | | - struct hfi1_ibdev *dev = &dd->verbs_dev; |
---|
1606 | 1614 | struct list_head *list; |
---|
1607 | 1615 | struct rvt_qp *qps[PIO_WAIT_BATCH_SIZE]; |
---|
1608 | 1616 | struct rvt_qp *qp; |
---|
1609 | 1617 | struct hfi1_qp_priv *priv; |
---|
1610 | 1618 | unsigned long flags; |
---|
1611 | | - uint i, n = 0, max_idx = 0; |
---|
1612 | | - u8 max_starved_cnt = 0; |
---|
| 1619 | + uint i, n = 0, top_idx = 0; |
---|
1613 | 1620 | |
---|
1614 | 1621 | if (dd->send_contexts[sc->sw_index].type != SC_KERNEL && |
---|
1615 | 1622 | dd->send_contexts[sc->sw_index].type != SC_VL15) |
---|
.. | .. |
---|
1621 | 1628 | * could end up with QPs on the wait list with the interrupt |
---|
1622 | 1629 | * disabled. |
---|
1623 | 1630 | */ |
---|
1624 | | - write_seqlock_irqsave(&dev->iowait_lock, flags); |
---|
| 1631 | + write_seqlock_irqsave(&sc->waitlock, flags); |
---|
1625 | 1632 | while (!list_empty(list)) { |
---|
1626 | 1633 | struct iowait *wait; |
---|
1627 | 1634 | |
---|
1628 | 1635 | if (n == ARRAY_SIZE(qps)) |
---|
1629 | 1636 | break; |
---|
1630 | 1637 | wait = list_first_entry(list, struct iowait, list); |
---|
| 1638 | + iowait_get_priority(wait); |
---|
1631 | 1639 | qp = iowait_to_qp(wait); |
---|
1632 | 1640 | priv = qp->priv; |
---|
1633 | 1641 | list_del_init(&priv->s_iowait.list); |
---|
1634 | 1642 | priv->s_iowait.lock = NULL; |
---|
1635 | | - iowait_starve_find_max(wait, &max_starved_cnt, n, &max_idx); |
---|
| 1643 | + if (n) { |
---|
| 1644 | + priv = qps[top_idx]->priv; |
---|
| 1645 | + top_idx = iowait_priority_update_top(wait, |
---|
| 1646 | + &priv->s_iowait, |
---|
| 1647 | + n, top_idx); |
---|
| 1648 | + } |
---|
| 1649 | + |
---|
1636 | 1650 | /* refcount held until actual wake up */ |
---|
1637 | 1651 | qps[n++] = qp; |
---|
1638 | 1652 | } |
---|
.. | .. |
---|
1645 | 1659 | if (!list_empty(list)) |
---|
1646 | 1660 | hfi1_sc_wantpiobuf_intr(sc, 1); |
---|
1647 | 1661 | } |
---|
1648 | | - write_sequnlock_irqrestore(&dev->iowait_lock, flags); |
---|
| 1662 | + write_sequnlock_irqrestore(&sc->waitlock, flags); |
---|
1649 | 1663 | |
---|
1650 | | - /* Wake up the most starved one first */ |
---|
| 1664 | + /* Wake up the top-priority one first */ |
---|
1651 | 1665 | if (n) |
---|
1652 | | - hfi1_qp_wakeup(qps[max_idx], |
---|
| 1666 | + hfi1_qp_wakeup(qps[top_idx], |
---|
1653 | 1667 | RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN); |
---|
1654 | 1668 | for (i = 0; i < n; i++) |
---|
1655 | | - if (i != max_idx) |
---|
| 1669 | + if (i != top_idx) |
---|
1656 | 1670 | hfi1_qp_wakeup(qps[i], |
---|
1657 | 1671 | RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN); |
---|
1658 | 1672 | } |
---|
.. | .. |
---|
2107 | 2121 | int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return); |
---|
2108 | 2122 | |
---|
2109 | 2123 | set_dev_node(&dd->pcidev->dev, i); |
---|
2110 | | - dd->cr_base[i].va = dma_zalloc_coherent( |
---|
2111 | | - &dd->pcidev->dev, |
---|
2112 | | - bytes, |
---|
2113 | | - &dd->cr_base[i].dma, |
---|
2114 | | - GFP_KERNEL); |
---|
| 2124 | + dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev, |
---|
| 2125 | + bytes, |
---|
| 2126 | + &dd->cr_base[i].dma, |
---|
| 2127 | + GFP_KERNEL); |
---|
2115 | 2128 | if (!dd->cr_base[i].va) { |
---|
2116 | 2129 | set_dev_node(&dd->pcidev->dev, dd->node); |
---|
2117 | 2130 | dd_dev_err(dd, |
---|
.. | .. |
---|
2146 | 2159 | kfree(dd->cr_base); |
---|
2147 | 2160 | dd->cr_base = NULL; |
---|
2148 | 2161 | } |
---|
| 2162 | + |
---|
| 2163 | +void seqfile_dump_sci(struct seq_file *s, u32 i, |
---|
| 2164 | + struct send_context_info *sci) |
---|
| 2165 | +{ |
---|
| 2166 | + struct send_context *sc = sci->sc; |
---|
| 2167 | + u64 reg; |
---|
| 2168 | + |
---|
| 2169 | + seq_printf(s, "SCI %u: type %u base %u credits %u\n", |
---|
| 2170 | + i, sci->type, sci->base, sci->credits); |
---|
| 2171 | + seq_printf(s, " flags 0x%x sw_inx %u hw_ctxt %u grp %u\n", |
---|
| 2172 | + sc->flags, sc->sw_index, sc->hw_context, sc->group); |
---|
| 2173 | + seq_printf(s, " sr_size %u credits %u sr_head %u sr_tail %u\n", |
---|
| 2174 | + sc->sr_size, sc->credits, sc->sr_head, sc->sr_tail); |
---|
| 2175 | + seq_printf(s, " fill %lu free %lu fill_wrap %u alloc_free %lu\n", |
---|
| 2176 | + sc->fill, sc->free, sc->fill_wrap, sc->alloc_free); |
---|
| 2177 | + seq_printf(s, " credit_intr_count %u credit_ctrl 0x%llx\n", |
---|
| 2178 | + sc->credit_intr_count, sc->credit_ctrl); |
---|
| 2179 | + reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_STATUS)); |
---|
| 2180 | + seq_printf(s, " *hw_free %llu CurrentFree %llu LastReturned %llu\n", |
---|
| 2181 | + (le64_to_cpu(*sc->hw_free) & CR_COUNTER_SMASK) >> |
---|
| 2182 | + CR_COUNTER_SHIFT, |
---|
| 2183 | + (reg >> SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_SHIFT)) & |
---|
| 2184 | + SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_MASK), |
---|
| 2185 | + reg & SC(CREDIT_STATUS_LAST_RETURNED_COUNTER_SMASK)); |
---|
| 2186 | +} |
---|