| .. | .. |
|---|
| 239 | 239 | unsigned int num; /* num skbs in per port queues */ |
|---|
| 240 | 240 | struct sched_port p[MAX_NPORTS]; |
|---|
| 241 | 241 | struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */ |
|---|
| 242 | + struct sge *sge; |
|---|
| 242 | 243 | }; |
|---|
| 243 | | -static void restart_sched(unsigned long); |
|---|
| 244 | + |
|---|
| 245 | +static void restart_sched(struct tasklet_struct *t); |
|---|
| 244 | 246 | |
|---|
| 245 | 247 | |
|---|
| 246 | 248 | /* |
|---|
| .. | .. |
|---|
| 378 | 380 | return -ENOMEM; |
|---|
| 379 | 381 | |
|---|
| 380 | 382 | pr_debug("tx_sched_init\n"); |
|---|
| 381 | | - tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge); |
|---|
| 383 | + tasklet_setup(&s->sched_tsk, restart_sched); |
|---|
| 384 | + s->sge = sge; |
|---|
| 382 | 385 | sge->tx_sched = s; |
|---|
| 383 | 386 | |
|---|
| 384 | 387 | for (i = 0; i < MAX_NPORTS; i++) { |
|---|
| .. | .. |
|---|
| 509 | 512 | while (q->credits--) { |
|---|
| 510 | 513 | struct freelQ_ce *ce = &q->centries[cidx]; |
|---|
| 511 | 514 | |
|---|
| 512 | | - pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), |
|---|
| 513 | | - dma_unmap_len(ce, dma_len), |
|---|
| 514 | | - PCI_DMA_FROMDEVICE); |
|---|
| 515 | + dma_unmap_single(&pdev->dev, dma_unmap_addr(ce, dma_addr), |
|---|
| 516 | + dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE); |
|---|
| 515 | 517 | dev_kfree_skb(ce->skb); |
|---|
| 516 | 518 | ce->skb = NULL; |
|---|
| 517 | 519 | if (++cidx == q->size) |
|---|
| .. | .. |
|---|
| 529 | 531 | |
|---|
| 530 | 532 | if (sge->respQ.entries) { |
|---|
| 531 | 533 | size = sizeof(struct respQ_e) * sge->respQ.size; |
|---|
| 532 | | - pci_free_consistent(pdev, size, sge->respQ.entries, |
|---|
| 533 | | - sge->respQ.dma_addr); |
|---|
| 534 | + dma_free_coherent(&pdev->dev, size, sge->respQ.entries, |
|---|
| 535 | + sge->respQ.dma_addr); |
|---|
| 534 | 536 | } |
|---|
| 535 | 537 | |
|---|
| 536 | 538 | for (i = 0; i < SGE_FREELQ_N; i++) { |
|---|
| .. | .. |
|---|
| 542 | 544 | } |
|---|
| 543 | 545 | if (q->entries) { |
|---|
| 544 | 546 | size = sizeof(struct freelQ_e) * q->size; |
|---|
| 545 | | - pci_free_consistent(pdev, size, q->entries, |
|---|
| 546 | | - q->dma_addr); |
|---|
| 547 | + dma_free_coherent(&pdev->dev, size, q->entries, |
|---|
| 548 | + q->dma_addr); |
|---|
| 547 | 549 | } |
|---|
| 548 | 550 | } |
|---|
| 549 | 551 | } |
|---|
| .. | .. |
|---|
| 564 | 566 | q->size = p->freelQ_size[i]; |
|---|
| 565 | 567 | q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; |
|---|
| 566 | 568 | size = sizeof(struct freelQ_e) * q->size; |
|---|
| 567 | | - q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); |
|---|
| 569 | + q->entries = dma_alloc_coherent(&pdev->dev, size, |
|---|
| 570 | + &q->dma_addr, GFP_KERNEL); |
|---|
| 568 | 571 | if (!q->entries) |
|---|
| 569 | 572 | goto err_no_mem; |
|---|
| 570 | 573 | |
|---|
| .. | .. |
|---|
| 585 | 588 | sizeof(struct cpl_rx_data) + |
|---|
| 586 | 589 | sge->freelQ[!sge->jumbo_fl].dma_offset; |
|---|
| 587 | 590 | |
|---|
| 588 | | - size = (16 * 1024) - |
|---|
| 589 | | - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
|---|
| 591 | + size = (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
|---|
| 590 | 592 | |
|---|
| 591 | 593 | sge->freelQ[sge->jumbo_fl].rx_buffer_size = size; |
|---|
| 592 | 594 | |
|---|
| .. | .. |
|---|
| 602 | 604 | sge->respQ.credits = 0; |
|---|
| 603 | 605 | size = sizeof(struct respQ_e) * sge->respQ.size; |
|---|
| 604 | 606 | sge->respQ.entries = |
|---|
| 605 | | - pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); |
|---|
| 607 | + dma_alloc_coherent(&pdev->dev, size, &sge->respQ.dma_addr, |
|---|
| 608 | + GFP_KERNEL); |
|---|
| 606 | 609 | if (!sge->respQ.entries) |
|---|
| 607 | 610 | goto err_no_mem; |
|---|
| 608 | 611 | return 0; |
|---|
| .. | .. |
|---|
| 625 | 628 | ce = &q->centries[cidx]; |
|---|
| 626 | 629 | while (n--) { |
|---|
| 627 | 630 | if (likely(dma_unmap_len(ce, dma_len))) { |
|---|
| 628 | | - pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), |
|---|
| 631 | + dma_unmap_single(&pdev->dev, |
|---|
| 632 | + dma_unmap_addr(ce, dma_addr), |
|---|
| 629 | 633 | dma_unmap_len(ce, dma_len), |
|---|
| 630 | | - PCI_DMA_TODEVICE); |
|---|
| 634 | + DMA_TO_DEVICE); |
|---|
| 631 | 635 | if (q->sop) |
|---|
| 632 | 636 | q->sop = 0; |
|---|
| 633 | 637 | } |
|---|
| .. | .. |
|---|
| 664 | 668 | } |
|---|
| 665 | 669 | if (q->entries) { |
|---|
| 666 | 670 | size = sizeof(struct cmdQ_e) * q->size; |
|---|
| 667 | | - pci_free_consistent(pdev, size, q->entries, |
|---|
| 668 | | - q->dma_addr); |
|---|
| 671 | + dma_free_coherent(&pdev->dev, size, q->entries, |
|---|
| 672 | + q->dma_addr); |
|---|
| 669 | 673 | } |
|---|
| 670 | 674 | } |
|---|
| 671 | 675 | } |
|---|
| .. | .. |
|---|
| 690 | 694 | q->stop_thres = 0; |
|---|
| 691 | 695 | spin_lock_init(&q->lock); |
|---|
| 692 | 696 | size = sizeof(struct cmdQ_e) * q->size; |
|---|
| 693 | | - q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); |
|---|
| 697 | + q->entries = dma_alloc_coherent(&pdev->dev, size, |
|---|
| 698 | + &q->dma_addr, GFP_KERNEL); |
|---|
| 694 | 699 | if (!q->entries) |
|---|
| 695 | 700 | goto err_no_mem; |
|---|
| 696 | 701 | |
|---|
| .. | .. |
|---|
| 838 | 843 | break; |
|---|
| 839 | 844 | |
|---|
| 840 | 845 | skb_reserve(skb, q->dma_offset); |
|---|
| 841 | | - mapping = pci_map_single(pdev, skb->data, dma_len, |
|---|
| 842 | | - PCI_DMA_FROMDEVICE); |
|---|
| 846 | + mapping = dma_map_single(&pdev->dev, skb->data, dma_len, |
|---|
| 847 | + DMA_FROM_DEVICE); |
|---|
| 843 | 848 | skb_reserve(skb, sge->rx_pkt_pad); |
|---|
| 844 | 849 | |
|---|
| 845 | 850 | ce->skb = skb; |
|---|
| .. | .. |
|---|
| 935 | 940 | /* |
|---|
| 936 | 941 | * SGE 'Error' interrupt handler |
|---|
| 937 | 942 | */ |
|---|
| 938 | | -int t1_sge_intr_error_handler(struct sge *sge) |
|---|
| 943 | +bool t1_sge_intr_error_handler(struct sge *sge) |
|---|
| 939 | 944 | { |
|---|
| 940 | 945 | struct adapter *adapter = sge->adapter; |
|---|
| 941 | 946 | u32 cause = readl(adapter->regs + A_SG_INT_CAUSE); |
|---|
| 947 | + bool wake = false; |
|---|
| 942 | 948 | |
|---|
| 943 | 949 | if (adapter->port[0].dev->hw_features & NETIF_F_TSO) |
|---|
| 944 | 950 | cause &= ~F_PACKET_TOO_BIG; |
|---|
| .. | .. |
|---|
| 962 | 968 | sge->stats.pkt_mismatch++; |
|---|
| 963 | 969 | pr_alert("%s: SGE packet mismatch\n", adapter->name); |
|---|
| 964 | 970 | } |
|---|
| 965 | | - if (cause & SGE_INT_FATAL) |
|---|
| 966 | | - t1_fatal_err(adapter); |
|---|
| 971 | + if (cause & SGE_INT_FATAL) { |
|---|
| 972 | + t1_interrupts_disable(adapter); |
|---|
| 973 | + adapter->pending_thread_intr |= F_PL_INTR_SGE_ERR; |
|---|
| 974 | + wake = true; |
|---|
| 975 | + } |
|---|
| 967 | 976 | |
|---|
| 968 | 977 | writel(cause, adapter->regs + A_SG_INT_CAUSE); |
|---|
| 969 | | - return 0; |
|---|
| 978 | + return wake; |
|---|
| 970 | 979 | } |
|---|
| 971 | 980 | |
|---|
| 972 | 981 | const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge) |
|---|
| .. | .. |
|---|
| 1050 | 1059 | goto use_orig_buf; |
|---|
| 1051 | 1060 | |
|---|
| 1052 | 1061 | skb_put(skb, len); |
|---|
| 1053 | | - pci_dma_sync_single_for_cpu(pdev, |
|---|
| 1054 | | - dma_unmap_addr(ce, dma_addr), |
|---|
| 1055 | | - dma_unmap_len(ce, dma_len), |
|---|
| 1056 | | - PCI_DMA_FROMDEVICE); |
|---|
| 1062 | + dma_sync_single_for_cpu(&pdev->dev, |
|---|
| 1063 | + dma_unmap_addr(ce, dma_addr), |
|---|
| 1064 | + dma_unmap_len(ce, dma_len), |
|---|
| 1065 | + DMA_FROM_DEVICE); |
|---|
| 1057 | 1066 | skb_copy_from_linear_data(ce->skb, skb->data, len); |
|---|
| 1058 | | - pci_dma_sync_single_for_device(pdev, |
|---|
| 1059 | | - dma_unmap_addr(ce, dma_addr), |
|---|
| 1060 | | - dma_unmap_len(ce, dma_len), |
|---|
| 1061 | | - PCI_DMA_FROMDEVICE); |
|---|
| 1067 | + dma_sync_single_for_device(&pdev->dev, |
|---|
| 1068 | + dma_unmap_addr(ce, dma_addr), |
|---|
| 1069 | + dma_unmap_len(ce, dma_len), |
|---|
| 1070 | + DMA_FROM_DEVICE); |
|---|
| 1062 | 1071 | recycle_fl_buf(fl, fl->cidx); |
|---|
| 1063 | 1072 | return skb; |
|---|
| 1064 | 1073 | } |
|---|
| .. | .. |
|---|
| 1069 | 1078 | return NULL; |
|---|
| 1070 | 1079 | } |
|---|
| 1071 | 1080 | |
|---|
| 1072 | | - pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), |
|---|
| 1073 | | - dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); |
|---|
| 1081 | + dma_unmap_single(&pdev->dev, dma_unmap_addr(ce, dma_addr), |
|---|
| 1082 | + dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE); |
|---|
| 1074 | 1083 | skb = ce->skb; |
|---|
| 1075 | 1084 | prefetch(skb->data); |
|---|
| 1076 | 1085 | |
|---|
| .. | .. |
|---|
| 1092 | 1101 | struct freelQ_ce *ce = &fl->centries[fl->cidx]; |
|---|
| 1093 | 1102 | struct sk_buff *skb = ce->skb; |
|---|
| 1094 | 1103 | |
|---|
| 1095 | | - pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr), |
|---|
| 1096 | | - dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); |
|---|
| 1104 | + dma_sync_single_for_cpu(&adapter->pdev->dev, |
|---|
| 1105 | + dma_unmap_addr(ce, dma_addr), |
|---|
| 1106 | + dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE); |
|---|
| 1097 | 1107 | pr_err("%s: unexpected offload packet, cmd %u\n", |
|---|
| 1098 | 1108 | adapter->name, *skb->data); |
|---|
| 1099 | 1109 | recycle_fl_buf(fl, fl->cidx); |
|---|
| .. | .. |
|---|
| 1210 | 1220 | e = e1 = &q->entries[pidx]; |
|---|
| 1211 | 1221 | ce = &q->centries[pidx]; |
|---|
| 1212 | 1222 | |
|---|
| 1213 | | - mapping = pci_map_single(adapter->pdev, skb->data, |
|---|
| 1214 | | - skb_headlen(skb), PCI_DMA_TODEVICE); |
|---|
| 1223 | + mapping = dma_map_single(&adapter->pdev->dev, skb->data, |
|---|
| 1224 | + skb_headlen(skb), DMA_TO_DEVICE); |
|---|
| 1215 | 1225 | |
|---|
| 1216 | 1226 | desc_mapping = mapping; |
|---|
| 1217 | 1227 | desc_len = skb_headlen(skb); |
|---|
| .. | .. |
|---|
| 1302 | 1312 | * Called from tasklet. Checks the scheduler for any |
|---|
| 1303 | 1313 | * pending skbs that can be sent. |
|---|
| 1304 | 1314 | */ |
|---|
| 1305 | | -static void restart_sched(unsigned long arg) |
|---|
| 1315 | +static void restart_sched(struct tasklet_struct *t) |
|---|
| 1306 | 1316 | { |
|---|
| 1307 | | - struct sge *sge = (struct sge *) arg; |
|---|
| 1317 | + struct sched *s = from_tasklet(s, t, sched_tsk); |
|---|
| 1318 | + struct sge *sge = s->sge; |
|---|
| 1308 | 1319 | struct adapter *adapter = sge->adapter; |
|---|
| 1309 | 1320 | struct cmdQ *q = &sge->cmdQ[0]; |
|---|
| 1310 | 1321 | struct sk_buff *skb; |
|---|
| .. | .. |
|---|
| 1612 | 1623 | return work_done; |
|---|
| 1613 | 1624 | } |
|---|
| 1614 | 1625 | |
|---|
| 1626 | +irqreturn_t t1_interrupt_thread(int irq, void *data) |
|---|
| 1627 | +{ |
|---|
| 1628 | + struct adapter *adapter = data; |
|---|
| 1629 | + u32 pending_thread_intr; |
|---|
| 1630 | + |
|---|
| 1631 | + spin_lock_irq(&adapter->async_lock); |
|---|
| 1632 | + pending_thread_intr = adapter->pending_thread_intr; |
|---|
| 1633 | + adapter->pending_thread_intr = 0; |
|---|
| 1634 | + spin_unlock_irq(&adapter->async_lock); |
|---|
| 1635 | + |
|---|
| 1636 | + if (!pending_thread_intr) |
|---|
| 1637 | + return IRQ_NONE; |
|---|
| 1638 | + |
|---|
| 1639 | + if (pending_thread_intr & F_PL_INTR_EXT) |
|---|
| 1640 | + t1_elmer0_ext_intr_handler(adapter); |
|---|
| 1641 | + |
|---|
| 1642 | + /* This error is fatal, interrupts remain off */ |
|---|
| 1643 | + if (pending_thread_intr & F_PL_INTR_SGE_ERR) { |
|---|
| 1644 | + pr_alert("%s: encountered fatal error, operation suspended\n", |
|---|
| 1645 | + adapter->name); |
|---|
| 1646 | + t1_sge_stop(adapter->sge); |
|---|
| 1647 | + return IRQ_HANDLED; |
|---|
| 1648 | + } |
|---|
| 1649 | + |
|---|
| 1650 | + spin_lock_irq(&adapter->async_lock); |
|---|
| 1651 | + adapter->slow_intr_mask |= F_PL_INTR_EXT; |
|---|
| 1652 | + |
|---|
| 1653 | + writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE); |
|---|
| 1654 | + writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, |
|---|
| 1655 | + adapter->regs + A_PL_ENABLE); |
|---|
| 1656 | + spin_unlock_irq(&adapter->async_lock); |
|---|
| 1657 | + |
|---|
| 1658 | + return IRQ_HANDLED; |
|---|
| 1659 | +} |
|---|
| 1660 | + |
|---|
| 1615 | 1661 | irqreturn_t t1_interrupt(int irq, void *data) |
|---|
| 1616 | 1662 | { |
|---|
| 1617 | 1663 | struct adapter *adapter = data; |
|---|
| 1618 | 1664 | struct sge *sge = adapter->sge; |
|---|
| 1619 | | - int handled; |
|---|
| 1665 | + irqreturn_t handled; |
|---|
| 1620 | 1666 | |
|---|
| 1621 | 1667 | if (likely(responses_pending(adapter))) { |
|---|
| 1622 | 1668 | writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); |
|---|
| .. | .. |
|---|
| 1638 | 1684 | handled = t1_slow_intr_handler(adapter); |
|---|
| 1639 | 1685 | spin_unlock(&adapter->async_lock); |
|---|
| 1640 | 1686 | |
|---|
| 1641 | | - if (!handled) |
|---|
| 1687 | + if (handled == IRQ_NONE) |
|---|
| 1642 | 1688 | sge->stats.unhandled_irqs++; |
|---|
| 1643 | 1689 | |
|---|
| 1644 | | - return IRQ_RETVAL(handled != 0); |
|---|
| 1690 | + return handled; |
|---|
| 1645 | 1691 | } |
|---|
| 1646 | 1692 | |
|---|
| 1647 | 1693 | /* |
|---|