.. | .. |
---|
74 | 74 | if (skb == NULL) |
---|
75 | 75 | break; |
---|
76 | 76 | |
---|
77 | | - mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ, |
---|
78 | | - PCI_DMA_FROMDEVICE); |
---|
| 77 | + mapping = dma_map_single(&tp->pdev->dev, skb->data, |
---|
| 78 | + PKT_BUF_SZ, DMA_FROM_DEVICE); |
---|
79 | 79 | if (dma_mapping_error(&tp->pdev->dev, mapping)) { |
---|
80 | 80 | dev_kfree_skb(skb); |
---|
81 | 81 | tp->rx_buffers[entry].skb = NULL; |
---|
.. | .. |
---|
210 | 210 | if (pkt_len < tulip_rx_copybreak && |
---|
211 | 211 | (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { |
---|
212 | 212 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
---|
213 | | - pci_dma_sync_single_for_cpu(tp->pdev, |
---|
214 | | - tp->rx_buffers[entry].mapping, |
---|
215 | | - pkt_len, PCI_DMA_FROMDEVICE); |
---|
| 213 | + dma_sync_single_for_cpu(&tp->pdev->dev, |
---|
| 214 | + tp->rx_buffers[entry].mapping, |
---|
| 215 | + pkt_len, |
---|
| 216 | + DMA_FROM_DEVICE); |
---|
216 | 217 | #if ! defined(__alpha__) |
---|
217 | 218 | skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data, |
---|
218 | 219 | pkt_len); |
---|
.. | .. |
---|
222 | 223 | tp->rx_buffers[entry].skb->data, |
---|
223 | 224 | pkt_len); |
---|
224 | 225 | #endif |
---|
225 | | - pci_dma_sync_single_for_device(tp->pdev, |
---|
226 | | - tp->rx_buffers[entry].mapping, |
---|
227 | | - pkt_len, PCI_DMA_FROMDEVICE); |
---|
| 226 | + dma_sync_single_for_device(&tp->pdev->dev, |
---|
| 227 | + tp->rx_buffers[entry].mapping, |
---|
| 228 | + pkt_len, |
---|
| 229 | + DMA_FROM_DEVICE); |
---|
228 | 230 | } else { /* Pass up the skb already on the Rx ring. */ |
---|
229 | 231 | char *temp = skb_put(skb = tp->rx_buffers[entry].skb, |
---|
230 | 232 | pkt_len); |
---|
.. | .. |
---|
240 | 242 | } |
---|
241 | 243 | #endif |
---|
242 | 244 | |
---|
243 | | - pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, |
---|
244 | | - PKT_BUF_SZ, PCI_DMA_FROMDEVICE); |
---|
| 245 | + dma_unmap_single(&tp->pdev->dev, |
---|
| 246 | + tp->rx_buffers[entry].mapping, |
---|
| 247 | + PKT_BUF_SZ, |
---|
| 248 | + DMA_FROM_DEVICE); |
---|
245 | 249 | |
---|
246 | 250 | tp->rx_buffers[entry].skb = NULL; |
---|
247 | 251 | tp->rx_buffers[entry].mapping = 0; |
---|
.. | .. |
---|
436 | 440 | if (pkt_len < tulip_rx_copybreak && |
---|
437 | 441 | (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { |
---|
438 | 442 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
---|
439 | | - pci_dma_sync_single_for_cpu(tp->pdev, |
---|
440 | | - tp->rx_buffers[entry].mapping, |
---|
441 | | - pkt_len, PCI_DMA_FROMDEVICE); |
---|
| 443 | + dma_sync_single_for_cpu(&tp->pdev->dev, |
---|
| 444 | + tp->rx_buffers[entry].mapping, |
---|
| 445 | + pkt_len, |
---|
| 446 | + DMA_FROM_DEVICE); |
---|
442 | 447 | #if ! defined(__alpha__) |
---|
443 | 448 | skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data, |
---|
444 | 449 | pkt_len); |
---|
.. | .. |
---|
448 | 453 | tp->rx_buffers[entry].skb->data, |
---|
449 | 454 | pkt_len); |
---|
450 | 455 | #endif |
---|
451 | | - pci_dma_sync_single_for_device(tp->pdev, |
---|
452 | | - tp->rx_buffers[entry].mapping, |
---|
453 | | - pkt_len, PCI_DMA_FROMDEVICE); |
---|
| 456 | + dma_sync_single_for_device(&tp->pdev->dev, |
---|
| 457 | + tp->rx_buffers[entry].mapping, |
---|
| 458 | + pkt_len, |
---|
| 459 | + DMA_FROM_DEVICE); |
---|
454 | 460 | } else { /* Pass up the skb already on the Rx ring. */ |
---|
455 | 461 | char *temp = skb_put(skb = tp->rx_buffers[entry].skb, |
---|
456 | 462 | pkt_len); |
---|
.. | .. |
---|
466 | 472 | } |
---|
467 | 473 | #endif |
---|
468 | 474 | |
---|
469 | | - pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, |
---|
470 | | - PKT_BUF_SZ, PCI_DMA_FROMDEVICE); |
---|
| 475 | + dma_unmap_single(&tp->pdev->dev, |
---|
| 476 | + tp->rx_buffers[entry].mapping, |
---|
| 477 | + PKT_BUF_SZ, DMA_FROM_DEVICE); |
---|
471 | 478 | |
---|
472 | 479 | tp->rx_buffers[entry].skb = NULL; |
---|
473 | 480 | tp->rx_buffers[entry].mapping = 0; |
---|
.. | .. |
---|
597 | 604 | if (tp->tx_buffers[entry].skb == NULL) { |
---|
598 | 605 | /* test because dummy frames not mapped */ |
---|
599 | 606 | if (tp->tx_buffers[entry].mapping) |
---|
600 | | - pci_unmap_single(tp->pdev, |
---|
601 | | - tp->tx_buffers[entry].mapping, |
---|
602 | | - sizeof(tp->setup_frame), |
---|
603 | | - PCI_DMA_TODEVICE); |
---|
| 607 | + dma_unmap_single(&tp->pdev->dev, |
---|
| 608 | + tp->tx_buffers[entry].mapping, |
---|
| 609 | + sizeof(tp->setup_frame), |
---|
| 610 | + DMA_TO_DEVICE); |
---|
604 | 611 | continue; |
---|
605 | 612 | } |
---|
606 | 613 | |
---|
.. | .. |
---|
629 | 636 | dev->stats.tx_packets++; |
---|
630 | 637 | } |
---|
631 | 638 | |
---|
632 | | - pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, |
---|
| 639 | + dma_unmap_single(&tp->pdev->dev, |
---|
| 640 | + tp->tx_buffers[entry].mapping, |
---|
633 | 641 | tp->tx_buffers[entry].skb->len, |
---|
634 | | - PCI_DMA_TODEVICE); |
---|
| 642 | + DMA_TO_DEVICE); |
---|
635 | 643 | |
---|
636 | 644 | /* Free the original skb. */ |
---|
637 | 645 | dev_kfree_skb_irq(tp->tx_buffers[entry].skb); |
---|