.. | .. |
---|
344 | 344 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
---|
345 | 345 | static int yellowfin_open(struct net_device *dev); |
---|
346 | 346 | static void yellowfin_timer(struct timer_list *t); |
---|
347 | | -static void yellowfin_tx_timeout(struct net_device *dev); |
---|
| 347 | +static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue); |
---|
348 | 348 | static int yellowfin_init_ring(struct net_device *dev); |
---|
349 | 349 | static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb, |
---|
350 | 350 | struct net_device *dev); |
---|
.. | .. |
---|
434 | 434 | np->drv_flags = drv_flags; |
---|
435 | 435 | np->base = ioaddr; |
---|
436 | 436 | |
---|
437 | | - ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); |
---|
| 437 | + ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma, |
---|
| 438 | + GFP_KERNEL); |
---|
438 | 439 | if (!ring_space) |
---|
439 | 440 | goto err_out_cleardev; |
---|
440 | 441 | np->tx_ring = ring_space; |
---|
441 | 442 | np->tx_ring_dma = ring_dma; |
---|
442 | 443 | |
---|
443 | | - ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); |
---|
| 444 | + ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma, |
---|
| 445 | + GFP_KERNEL); |
---|
444 | 446 | if (!ring_space) |
---|
445 | 447 | goto err_out_unmap_tx; |
---|
446 | 448 | np->rx_ring = ring_space; |
---|
447 | 449 | np->rx_ring_dma = ring_dma; |
---|
448 | 450 | |
---|
449 | | - ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma); |
---|
| 451 | + ring_space = dma_alloc_coherent(&pdev->dev, STATUS_TOTAL_SIZE, |
---|
| 452 | + &ring_dma, GFP_KERNEL); |
---|
450 | 453 | if (!ring_space) |
---|
451 | 454 | goto err_out_unmap_rx; |
---|
452 | 455 | np->tx_status = ring_space; |
---|
.. | .. |
---|
505 | 508 | return 0; |
---|
506 | 509 | |
---|
507 | 510 | err_out_unmap_status: |
---|
508 | | - pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status, |
---|
509 | | - np->tx_status_dma); |
---|
| 511 | + dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status, |
---|
| 512 | + np->tx_status_dma); |
---|
510 | 513 | err_out_unmap_rx: |
---|
511 | | - pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); |
---|
| 514 | + dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, |
---|
| 515 | + np->rx_ring_dma); |
---|
512 | 516 | err_out_unmap_tx: |
---|
513 | | - pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); |
---|
| 517 | + dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, |
---|
| 518 | + np->tx_ring_dma); |
---|
514 | 519 | err_out_cleardev: |
---|
515 | 520 | pci_iounmap(pdev, ioaddr); |
---|
516 | 521 | err_out_free_res: |
---|
.. | .. |
---|
677 | 682 | add_timer(&yp->timer); |
---|
678 | 683 | } |
---|
679 | 684 | |
---|
680 | | -static void yellowfin_tx_timeout(struct net_device *dev) |
---|
| 685 | +static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue) |
---|
681 | 686 | { |
---|
682 | 687 | struct yellowfin_private *yp = netdev_priv(dev); |
---|
683 | 688 | void __iomem *ioaddr = yp->base; |
---|
.. | .. |
---|
740 | 745 | if (skb == NULL) |
---|
741 | 746 | break; |
---|
742 | 747 | skb_reserve(skb, 2); /* 16 byte align the IP header. */ |
---|
743 | | - yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, |
---|
744 | | - skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); |
---|
| 748 | + yp->rx_ring[i].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev, |
---|
| 749 | + skb->data, |
---|
| 750 | + yp->rx_buf_sz, |
---|
| 751 | + DMA_FROM_DEVICE)); |
---|
745 | 752 | } |
---|
746 | 753 | if (i != RX_RING_SIZE) { |
---|
747 | 754 | for (j = 0; j < i; j++) |
---|
.. | .. |
---|
831 | 838 | yp->tx_skbuff[entry] = skb; |
---|
832 | 839 | |
---|
833 | 840 | #ifdef NO_TXSTATS |
---|
834 | | - yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev, |
---|
835 | | - skb->data, len, PCI_DMA_TODEVICE)); |
---|
| 841 | + yp->tx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev, |
---|
| 842 | + skb->data, |
---|
| 843 | + len, DMA_TO_DEVICE)); |
---|
836 | 844 | yp->tx_ring[entry].result_status = 0; |
---|
837 | 845 | if (entry >= TX_RING_SIZE-1) { |
---|
838 | 846 | /* New stop command. */ |
---|
.. | .. |
---|
847 | 855 | yp->cur_tx++; |
---|
848 | 856 | #else |
---|
849 | 857 | yp->tx_ring[entry<<1].request_cnt = len; |
---|
850 | | - yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev, |
---|
851 | | - skb->data, len, PCI_DMA_TODEVICE)); |
---|
| 858 | + yp->tx_ring[entry<<1].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev, |
---|
| 859 | + skb->data, |
---|
| 860 | + len, DMA_TO_DEVICE)); |
---|
852 | 861 | /* The input_last (status-write) command is constant, but we must |
---|
853 | 862 | rewrite the subsequent 'stop' command. */ |
---|
854 | 863 | |
---|
.. | .. |
---|
923 | 932 | dev->stats.tx_packets++; |
---|
924 | 933 | dev->stats.tx_bytes += skb->len; |
---|
925 | 934 | /* Free the original skb. */ |
---|
926 | | - pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr), |
---|
927 | | - skb->len, PCI_DMA_TODEVICE); |
---|
928 | | - dev_kfree_skb_irq(skb); |
---|
| 935 | + dma_unmap_single(&yp->pci_dev->dev, |
---|
| 936 | + le32_to_cpu(yp->tx_ring[entry].addr), |
---|
| 937 | + skb->len, DMA_TO_DEVICE); |
---|
| 938 | + dev_consume_skb_irq(skb); |
---|
929 | 939 | yp->tx_skbuff[entry] = NULL; |
---|
930 | 940 | } |
---|
931 | 941 | if (yp->tx_full && |
---|
.. | .. |
---|
980 | 990 | dev->stats.tx_packets++; |
---|
981 | 991 | } |
---|
982 | 992 | /* Free the original skb. */ |
---|
983 | | - pci_unmap_single(yp->pci_dev, |
---|
984 | | - yp->tx_ring[entry<<1].addr, skb->len, |
---|
985 | | - PCI_DMA_TODEVICE); |
---|
986 | | - dev_kfree_skb_irq(skb); |
---|
| 993 | + dma_unmap_single(&yp->pci_dev->dev, |
---|
| 994 | + yp->tx_ring[entry << 1].addr, |
---|
| 995 | + skb->len, DMA_TO_DEVICE); |
---|
| 996 | + dev_consume_skb_irq(skb); |
---|
987 | 997 | yp->tx_skbuff[entry] = 0; |
---|
988 | 998 | /* Mark status as empty. */ |
---|
989 | 999 | yp->tx_status[entry].tx_errs = 0; |
---|
.. | .. |
---|
1050 | 1060 | struct sk_buff *rx_skb = yp->rx_skbuff[entry]; |
---|
1051 | 1061 | s16 frame_status; |
---|
1052 | 1062 | u16 desc_status; |
---|
1053 | | - int data_size, yf_size; |
---|
| 1063 | + int data_size, __maybe_unused yf_size; |
---|
1054 | 1064 | u8 *buf_addr; |
---|
1055 | 1065 | |
---|
1056 | 1066 | if(!desc->result_status) |
---|
1057 | 1067 | break; |
---|
1058 | | - pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr), |
---|
1059 | | - yp->rx_buf_sz, PCI_DMA_FROMDEVICE); |
---|
| 1068 | + dma_sync_single_for_cpu(&yp->pci_dev->dev, |
---|
| 1069 | + le32_to_cpu(desc->addr), |
---|
| 1070 | + yp->rx_buf_sz, DMA_FROM_DEVICE); |
---|
1060 | 1071 | desc_status = le32_to_cpu(desc->result_status) >> 16; |
---|
1061 | 1072 | buf_addr = rx_skb->data; |
---|
1062 | 1073 | data_size = (le32_to_cpu(desc->dbdma_cmd) - |
---|
.. | .. |
---|
1121 | 1132 | without copying to a properly sized skbuff. */ |
---|
1122 | 1133 | if (pkt_len > rx_copybreak) { |
---|
1123 | 1134 | skb_put(skb = rx_skb, pkt_len); |
---|
1124 | | - pci_unmap_single(yp->pci_dev, |
---|
1125 | | - le32_to_cpu(yp->rx_ring[entry].addr), |
---|
1126 | | - yp->rx_buf_sz, |
---|
1127 | | - PCI_DMA_FROMDEVICE); |
---|
| 1135 | + dma_unmap_single(&yp->pci_dev->dev, |
---|
| 1136 | + le32_to_cpu(yp->rx_ring[entry].addr), |
---|
| 1137 | + yp->rx_buf_sz, |
---|
| 1138 | + DMA_FROM_DEVICE); |
---|
1128 | 1139 | yp->rx_skbuff[entry] = NULL; |
---|
1129 | 1140 | } else { |
---|
1130 | 1141 | skb = netdev_alloc_skb(dev, pkt_len + 2); |
---|
.. | .. |
---|
1133 | 1144 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
---|
1134 | 1145 | skb_copy_to_linear_data(skb, rx_skb->data, pkt_len); |
---|
1135 | 1146 | skb_put(skb, pkt_len); |
---|
1136 | | - pci_dma_sync_single_for_device(yp->pci_dev, |
---|
1137 | | - le32_to_cpu(desc->addr), |
---|
1138 | | - yp->rx_buf_sz, |
---|
1139 | | - PCI_DMA_FROMDEVICE); |
---|
| 1147 | + dma_sync_single_for_device(&yp->pci_dev->dev, |
---|
| 1148 | + le32_to_cpu(desc->addr), |
---|
| 1149 | + yp->rx_buf_sz, |
---|
| 1150 | + DMA_FROM_DEVICE); |
---|
1140 | 1151 | } |
---|
1141 | 1152 | skb->protocol = eth_type_trans(skb, dev); |
---|
1142 | 1153 | netif_rx(skb); |
---|
.. | .. |
---|
1155 | 1166 | break; /* Better luck next round. */ |
---|
1156 | 1167 | yp->rx_skbuff[entry] = skb; |
---|
1157 | 1168 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
---|
1158 | | - yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev, |
---|
1159 | | - skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); |
---|
| 1169 | + yp->rx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev, |
---|
| 1170 | + skb->data, |
---|
| 1171 | + yp->rx_buf_sz, |
---|
| 1172 | + DMA_FROM_DEVICE)); |
---|
1160 | 1173 | } |
---|
1161 | 1174 | yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP); |
---|
1162 | 1175 | yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */ |
---|
.. | .. |
---|
1258 | 1271 | yp->rx_skbuff[i] = NULL; |
---|
1259 | 1272 | } |
---|
1260 | 1273 | for (i = 0; i < TX_RING_SIZE; i++) { |
---|
1261 | | - if (yp->tx_skbuff[i]) |
---|
1262 | | - dev_kfree_skb(yp->tx_skbuff[i]); |
---|
| 1274 | + dev_kfree_skb(yp->tx_skbuff[i]); |
---|
1263 | 1275 | yp->tx_skbuff[i] = NULL; |
---|
1264 | 1276 | } |
---|
1265 | 1277 | |
---|
.. | .. |
---|
1344 | 1356 | switch(cmd) { |
---|
1345 | 1357 | case SIOCGMIIPHY: /* Get address of MII PHY in use. */ |
---|
1346 | 1358 | data->phy_id = np->phys[0] & 0x1f; |
---|
1347 | | - /* Fall Through */ |
---|
| 1359 | + fallthrough; |
---|
1348 | 1360 | |
---|
1349 | 1361 | case SIOCGMIIREG: /* Read MII PHY register. */ |
---|
1350 | 1362 | data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f); |
---|
.. | .. |
---|
1380 | 1392 | BUG_ON(!dev); |
---|
1381 | 1393 | np = netdev_priv(dev); |
---|
1382 | 1394 | |
---|
1383 | | - pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status, |
---|
1384 | | - np->tx_status_dma); |
---|
1385 | | - pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); |
---|
1386 | | - pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); |
---|
| 1395 | + dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status, |
---|
| 1396 | + np->tx_status_dma); |
---|
| 1397 | + dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, |
---|
| 1398 | + np->rx_ring_dma); |
---|
| 1399 | + dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, |
---|
| 1400 | + np->tx_ring_dma); |
---|
1387 | 1401 | unregister_netdev (dev); |
---|
1388 | 1402 | |
---|
1389 | 1403 | pci_iounmap(pdev, np->base); |
---|