| .. | .. |
|---|
| 98 | 98 | { |
|---|
| 99 | 99 | struct sk_buff **skb_ptr = NULL; |
|---|
| 100 | 100 | struct sk_buff **temp; |
|---|
| 101 | | -#define NR_SKB_COMPLETED 128 |
|---|
| 101 | +#define NR_SKB_COMPLETED 16 |
|---|
| 102 | 102 | struct sk_buff *completed[NR_SKB_COMPLETED]; |
|---|
| 103 | 103 | int more; |
|---|
| 104 | 104 | |
|---|
| .. | .. |
|---|
| 114 | 114 | |
|---|
| 115 | 115 | /* free SKBs */ |
|---|
| 116 | 116 | for (temp = completed; temp != skb_ptr; temp++) |
|---|
| 117 | | - dev_kfree_skb_irq(*temp); |
|---|
| 117 | + dev_consume_skb_irq(*temp); |
|---|
| 118 | 118 | } while (more); |
|---|
| 119 | 119 | } |
|---|
| 120 | 120 | |
|---|
| .. | .. |
|---|
| 241 | 241 | rx_priv = vxge_hw_ring_rxd_private_get(dtrh); |
|---|
| 242 | 242 | |
|---|
| 243 | 243 | rx_priv->skb_data = rx_priv->skb->data; |
|---|
| 244 | | - dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data, |
|---|
| 245 | | - rx_priv->data_size, PCI_DMA_FROMDEVICE); |
|---|
| 244 | + dma_addr = dma_map_single(&ring->pdev->dev, rx_priv->skb_data, |
|---|
| 245 | + rx_priv->data_size, DMA_FROM_DEVICE); |
|---|
| 246 | 246 | |
|---|
| 247 | | - if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) { |
|---|
| 247 | + if (unlikely(dma_mapping_error(&ring->pdev->dev, dma_addr))) { |
|---|
| 248 | 248 | ring->stats.pci_map_fail++; |
|---|
| 249 | 249 | return -EIO; |
|---|
| 250 | 250 | } |
|---|
| .. | .. |
|---|
| 323 | 323 | static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring, |
|---|
| 324 | 324 | struct vxge_rx_priv *rx_priv) |
|---|
| 325 | 325 | { |
|---|
| 326 | | - pci_dma_sync_single_for_device(ring->pdev, |
|---|
| 327 | | - rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE); |
|---|
| 326 | + dma_sync_single_for_device(&ring->pdev->dev, rx_priv->data_dma, |
|---|
| 327 | + rx_priv->data_size, DMA_FROM_DEVICE); |
|---|
| 328 | 328 | |
|---|
| 329 | 329 | vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size); |
|---|
| 330 | 330 | vxge_hw_ring_rxd_pre_post(ring->handle, dtr); |
|---|
| .. | .. |
|---|
| 425 | 425 | if (!vxge_rx_map(dtr, ring)) { |
|---|
| 426 | 426 | skb_put(skb, pkt_length); |
|---|
| 427 | 427 | |
|---|
| 428 | | - pci_unmap_single(ring->pdev, data_dma, |
|---|
| 429 | | - data_size, PCI_DMA_FROMDEVICE); |
|---|
| 428 | + dma_unmap_single(&ring->pdev->dev, |
|---|
| 429 | + data_dma, data_size, |
|---|
| 430 | + DMA_FROM_DEVICE); |
|---|
| 430 | 431 | |
|---|
| 431 | 432 | vxge_hw_ring_rxd_pre_post(ringh, dtr); |
|---|
| 432 | 433 | vxge_post(&dtr_cnt, &first_dtr, dtr, |
|---|
| .. | .. |
|---|
| 458 | 459 | skb_reserve(skb_up, |
|---|
| 459 | 460 | VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); |
|---|
| 460 | 461 | |
|---|
| 461 | | - pci_dma_sync_single_for_cpu(ring->pdev, |
|---|
| 462 | | - data_dma, data_size, |
|---|
| 463 | | - PCI_DMA_FROMDEVICE); |
|---|
| 462 | + dma_sync_single_for_cpu(&ring->pdev->dev, |
|---|
| 463 | + data_dma, data_size, |
|---|
| 464 | + DMA_FROM_DEVICE); |
|---|
| 464 | 465 | |
|---|
| 465 | 466 | vxge_debug_mem(VXGE_TRACE, |
|---|
| 466 | 467 | "%s: %s:%d skb_up = %p", |
|---|
| .. | .. |
|---|
| 585 | 586 | } |
|---|
| 586 | 587 | |
|---|
| 587 | 588 | /* for unfragmented skb */ |
|---|
| 588 | | - pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++], |
|---|
| 589 | | - skb_headlen(skb), PCI_DMA_TODEVICE); |
|---|
| 589 | + dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++], |
|---|
| 590 | + skb_headlen(skb), DMA_TO_DEVICE); |
|---|
| 590 | 591 | |
|---|
| 591 | 592 | for (j = 0; j < frg_cnt; j++) { |
|---|
| 592 | | - pci_unmap_page(fifo->pdev, |
|---|
| 593 | | - txd_priv->dma_buffers[i++], |
|---|
| 594 | | - skb_frag_size(frag), PCI_DMA_TODEVICE); |
|---|
| 593 | + dma_unmap_page(&fifo->pdev->dev, |
|---|
| 594 | + txd_priv->dma_buffers[i++], |
|---|
| 595 | + skb_frag_size(frag), DMA_TO_DEVICE); |
|---|
| 595 | 596 | frag += 1; |
|---|
| 596 | 597 | } |
|---|
| 597 | 598 | |
|---|
| .. | .. |
|---|
| 897 | 898 | |
|---|
| 898 | 899 | first_frg_len = skb_headlen(skb); |
|---|
| 899 | 900 | |
|---|
| 900 | | - dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len, |
|---|
| 901 | | - PCI_DMA_TODEVICE); |
|---|
| 901 | + dma_pointer = dma_map_single(&fifo->pdev->dev, skb->data, |
|---|
| 902 | + first_frg_len, DMA_TO_DEVICE); |
|---|
| 902 | 903 | |
|---|
| 903 | | - if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) { |
|---|
| 904 | + if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer))) { |
|---|
| 904 | 905 | vxge_hw_fifo_txdl_free(fifo_hw, dtr); |
|---|
| 905 | 906 | fifo->stats.pci_map_fail++; |
|---|
| 906 | 907 | goto _exit0; |
|---|
| .. | .. |
|---|
| 977 | 978 | j = 0; |
|---|
| 978 | 979 | frag = &skb_shinfo(skb)->frags[0]; |
|---|
| 979 | 980 | |
|---|
| 980 | | - pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++], |
|---|
| 981 | | - skb_headlen(skb), PCI_DMA_TODEVICE); |
|---|
| 981 | + dma_unmap_single(&fifo->pdev->dev, txdl_priv->dma_buffers[j++], |
|---|
| 982 | + skb_headlen(skb), DMA_TO_DEVICE); |
|---|
| 982 | 983 | |
|---|
| 983 | 984 | for (; j < i; j++) { |
|---|
| 984 | | - pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j], |
|---|
| 985 | | - skb_frag_size(frag), PCI_DMA_TODEVICE); |
|---|
| 985 | + dma_unmap_page(&fifo->pdev->dev, txdl_priv->dma_buffers[j], |
|---|
| 986 | + skb_frag_size(frag), DMA_TO_DEVICE); |
|---|
| 986 | 987 | frag += 1; |
|---|
| 987 | 988 | } |
|---|
| 988 | 989 | |
|---|
| .. | .. |
|---|
| 1012 | 1013 | if (state != VXGE_HW_RXD_STATE_POSTED) |
|---|
| 1013 | 1014 | return; |
|---|
| 1014 | 1015 | |
|---|
| 1015 | | - pci_unmap_single(ring->pdev, rx_priv->data_dma, |
|---|
| 1016 | | - rx_priv->data_size, PCI_DMA_FROMDEVICE); |
|---|
| 1016 | + dma_unmap_single(&ring->pdev->dev, rx_priv->data_dma, |
|---|
| 1017 | + rx_priv->data_size, DMA_FROM_DEVICE); |
|---|
| 1017 | 1018 | |
|---|
| 1018 | 1019 | dev_kfree_skb(rx_priv->skb); |
|---|
| 1019 | 1020 | rx_priv->skb_data = NULL; |
|---|
| .. | .. |
|---|
| 1048 | 1049 | frag = &skb_shinfo(skb)->frags[0]; |
|---|
| 1049 | 1050 | |
|---|
| 1050 | 1051 | /* for unfragmented skb */ |
|---|
| 1051 | | - pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++], |
|---|
| 1052 | | - skb_headlen(skb), PCI_DMA_TODEVICE); |
|---|
| 1052 | + dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++], |
|---|
| 1053 | + skb_headlen(skb), DMA_TO_DEVICE); |
|---|
| 1053 | 1054 | |
|---|
| 1054 | 1055 | for (j = 0; j < frg_cnt; j++) { |
|---|
| 1055 | | - pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++], |
|---|
| 1056 | | - skb_frag_size(frag), PCI_DMA_TODEVICE); |
|---|
| 1056 | + dma_unmap_page(&fifo->pdev->dev, txd_priv->dma_buffers[i++], |
|---|
| 1057 | + skb_frag_size(frag), DMA_TO_DEVICE); |
|---|
| 1057 | 1058 | frag += 1; |
|---|
| 1058 | 1059 | } |
|---|
| 1059 | 1060 | |
|---|
| .. | .. |
|---|
| 1075 | 1076 | list_for_each_safe(entry, next, &vpath->mac_addr_list) { |
|---|
| 1076 | 1077 | if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) { |
|---|
| 1077 | 1078 | list_del(entry); |
|---|
| 1078 | | - kfree((struct vxge_mac_addrs *)entry); |
|---|
| 1079 | + kfree(entry); |
|---|
| 1079 | 1080 | vpath->mac_addr_cnt--; |
|---|
| 1080 | 1081 | |
|---|
| 1081 | 1082 | if (is_multicast_ether_addr(mac->macaddr)) |
|---|
| .. | .. |
|---|
| 1274 | 1275 | /** |
|---|
| 1275 | 1276 | * vxge_set_mac_addr |
|---|
| 1276 | 1277 | * @dev: pointer to the device structure |
|---|
| 1278 | + * @p: socket info |
|---|
| 1277 | 1279 | * |
|---|
| 1278 | 1280 | * Update entry "0" (default MAC addr) |
|---|
| 1279 | 1281 | */ |
|---|
| .. | .. |
|---|
| 1798 | 1800 | |
|---|
| 1799 | 1801 | /** |
|---|
| 1800 | 1802 | * vxge_poll - Receive handler when Receive Polling is used. |
|---|
| 1801 | | - * @dev: pointer to the device structure. |
|---|
| 1803 | + * @napi: pointer to the napi structure. |
|---|
| 1802 | 1804 | * @budget: Number of packets budgeted to be processed in this iteration. |
|---|
| 1803 | 1805 | * |
|---|
| 1804 | 1806 | * This function comes into picture only if Receive side is being handled |
|---|
| .. | .. |
|---|
| 1826 | 1828 | vxge_hw_channel_msix_unmask( |
|---|
| 1827 | 1829 | (struct __vxge_hw_channel *)ring->handle, |
|---|
| 1828 | 1830 | ring->rx_vector_no); |
|---|
| 1829 | | - mmiowb(); |
|---|
| 1830 | 1831 | } |
|---|
| 1831 | 1832 | |
|---|
| 1832 | 1833 | /* We are copying and returning the local variable, in case if after |
|---|
| .. | .. |
|---|
| 2234 | 2235 | vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle, |
|---|
| 2235 | 2236 | fifo->tx_vector_no); |
|---|
| 2236 | 2237 | |
|---|
| 2237 | | - mmiowb(); |
|---|
| 2238 | | - |
|---|
| 2239 | 2238 | return IRQ_HANDLED; |
|---|
| 2240 | 2239 | } |
|---|
| 2241 | 2240 | |
|---|
| .. | .. |
|---|
| 2272 | 2271 | */ |
|---|
| 2273 | 2272 | vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); |
|---|
| 2274 | 2273 | vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id); |
|---|
| 2275 | | - mmiowb(); |
|---|
| 2276 | 2274 | |
|---|
| 2277 | 2275 | status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, |
|---|
| 2278 | 2276 | vdev->exec_mode); |
|---|
| 2279 | 2277 | if (status == VXGE_HW_OK) { |
|---|
| 2280 | 2278 | vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, |
|---|
| 2281 | 2279 | msix_id); |
|---|
| 2282 | | - mmiowb(); |
|---|
| 2283 | 2280 | continue; |
|---|
| 2284 | 2281 | } |
|---|
| 2285 | 2282 | vxge_debug_intr(VXGE_ERR, |
|---|
| .. | .. |
|---|
| 2553 | 2550 | vxge_debug_init(VXGE_ERR, |
|---|
| 2554 | 2551 | "%s: Defaulting to INTA", |
|---|
| 2555 | 2552 | vdev->ndev->name); |
|---|
| 2556 | | - goto INTA_MODE; |
|---|
| 2553 | + goto INTA_MODE; |
|---|
| 2557 | 2554 | } |
|---|
| 2558 | 2555 | |
|---|
| 2559 | 2556 | msix_idx = (vdev->vpaths[0].handle->vpath->vp_id * |
|---|
| .. | .. |
|---|
| 2917 | 2914 | |
|---|
| 2918 | 2915 | list_for_each_safe(entry, next, &vpath->mac_addr_list) { |
|---|
| 2919 | 2916 | list_del(entry); |
|---|
| 2920 | | - kfree((struct vxge_mac_addrs *)entry); |
|---|
| 2917 | + kfree(entry); |
|---|
| 2921 | 2918 | } |
|---|
| 2922 | 2919 | } |
|---|
| 2923 | 2920 | |
|---|
| .. | .. |
|---|
| 3100 | 3097 | /** |
|---|
| 3101 | 3098 | * vxge_get_stats64 |
|---|
| 3102 | 3099 | * @dev: pointer to the device structure |
|---|
| 3103 | | - * @stats: pointer to struct rtnl_link_stats64 |
|---|
| 3100 | + * @net_stats: pointer to struct rtnl_link_stats64 |
|---|
| 3104 | 3101 | * |
|---|
| 3105 | 3102 | */ |
|---|
| 3106 | 3103 | static void |
|---|
| .. | .. |
|---|
| 3249 | 3246 | /** |
|---|
| 3250 | 3247 | * vxge_ioctl |
|---|
| 3251 | 3248 | * @dev: Device pointer. |
|---|
| 3252 | | - * @ifr: An IOCTL specific structure, that can contain a pointer to |
|---|
| 3249 | + * @rq: An IOCTL specific structure, that can contain a pointer to |
|---|
| 3253 | 3250 | * a proprietary structure used to pass information to the driver. |
|---|
| 3254 | 3251 | * @cmd: This is used to distinguish between the different commands that |
|---|
| 3255 | 3252 | * can be passed to the IOCTL functions. |
|---|
| .. | .. |
|---|
| 3273 | 3270 | /** |
|---|
| 3274 | 3271 | * vxge_tx_watchdog |
|---|
| 3275 | 3272 | * @dev: pointer to net device structure |
|---|
| 3273 | + * @txqueue: index of the hanging queue |
|---|
| 3276 | 3274 | * |
|---|
| 3277 | 3275 | * Watchdog for transmit side. |
|---|
| 3278 | 3276 | * This function is triggered if the Tx Queue is stopped |
|---|
| 3279 | 3277 | * for a pre-defined amount of time when the Interface is still up. |
|---|
| 3280 | 3278 | */ |
|---|
| 3281 | | -static void vxge_tx_watchdog(struct net_device *dev) |
|---|
| 3279 | +static void vxge_tx_watchdog(struct net_device *dev, unsigned int txqueue) |
|---|
| 3282 | 3280 | { |
|---|
| 3283 | 3281 | struct vxgedev *vdev; |
|---|
| 3284 | 3282 | |
|---|
| .. | .. |
|---|
| 4004 | 4002 | } |
|---|
| 4005 | 4003 | } |
|---|
| 4006 | 4004 | |
|---|
| 4007 | | -#ifdef CONFIG_PM |
|---|
| 4008 | 4005 | /** |
|---|
| 4009 | 4006 | * vxge_pm_suspend - vxge power management suspend entry point |
|---|
| 4007 | + * @dev_d: device pointer |
|---|
| 4010 | 4008 | * |
|---|
| 4011 | 4009 | */ |
|---|
| 4012 | | -static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state) |
|---|
| 4010 | +static int __maybe_unused vxge_pm_suspend(struct device *dev_d) |
|---|
| 4013 | 4011 | { |
|---|
| 4014 | 4012 | return -ENOSYS; |
|---|
| 4015 | 4013 | } |
|---|
| 4016 | 4014 | /** |
|---|
| 4017 | 4015 | * vxge_pm_resume - vxge power management resume entry point |
|---|
| 4016 | + * @dev_d: device pointer |
|---|
| 4018 | 4017 | * |
|---|
| 4019 | 4018 | */ |
|---|
| 4020 | | -static int vxge_pm_resume(struct pci_dev *pdev) |
|---|
| 4019 | +static int __maybe_unused vxge_pm_resume(struct device *dev_d) |
|---|
| 4021 | 4020 | { |
|---|
| 4022 | 4021 | return -ENOSYS; |
|---|
| 4023 | 4022 | } |
|---|
| 4024 | | - |
|---|
| 4025 | | -#endif |
|---|
| 4026 | 4023 | |
|---|
| 4027 | 4024 | /** |
|---|
| 4028 | 4025 | * vxge_io_error_detected - called when PCI error is detected |
|---|
| .. | .. |
|---|
| 4395 | 4392 | goto _exit0; |
|---|
| 4396 | 4393 | } |
|---|
| 4397 | 4394 | |
|---|
| 4398 | | - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { |
|---|
| 4395 | + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { |
|---|
| 4399 | 4396 | vxge_debug_ll_config(VXGE_TRACE, |
|---|
| 4400 | 4397 | "%s : using 64bit DMA", __func__); |
|---|
| 4401 | 4398 | |
|---|
| 4402 | 4399 | high_dma = 1; |
|---|
| 4403 | 4400 | |
|---|
| 4404 | | - if (pci_set_consistent_dma_mask(pdev, |
|---|
| 4405 | | - DMA_BIT_MASK(64))) { |
|---|
| 4401 | + if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { |
|---|
| 4406 | 4402 | vxge_debug_init(VXGE_ERR, |
|---|
| 4407 | 4403 | "%s : unable to obtain 64bit DMA for " |
|---|
| 4408 | 4404 | "consistent allocations", __func__); |
|---|
| 4409 | 4405 | ret = -ENOMEM; |
|---|
| 4410 | 4406 | goto _exit1; |
|---|
| 4411 | 4407 | } |
|---|
| 4412 | | - } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { |
|---|
| 4408 | + } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { |
|---|
| 4413 | 4409 | vxge_debug_ll_config(VXGE_TRACE, |
|---|
| 4414 | 4410 | "%s : using 32bit DMA", __func__); |
|---|
| 4415 | 4411 | } else { |
|---|
| .. | .. |
|---|
| 4547 | 4543 | * due to the fact that HWTS is using the FCS as the location of the |
|---|
| 4548 | 4544 | * timestamp. The HW FCS checking will still correctly determine if |
|---|
| 4549 | 4545 | * there is a valid checksum, and the FCS is being removed by the driver |
|---|
| 4550 | | - * anyway. So no fucntionality is being lost. Since it is always |
|---|
| 4546 | + * anyway. So no functionality is being lost. Since it is always |
|---|
| 4551 | 4547 | * enabled, we now simply use the ioctl call to set whether or not the |
|---|
| 4552 | 4548 | * driver should be paying attention to the HWTS. |
|---|
| 4553 | 4549 | */ |
|---|
| .. | .. |
|---|
| 4801 | 4797 | .resume = vxge_io_resume, |
|---|
| 4802 | 4798 | }; |
|---|
| 4803 | 4799 | |
|---|
| 4800 | +static SIMPLE_DEV_PM_OPS(vxge_pm_ops, vxge_pm_suspend, vxge_pm_resume); |
|---|
| 4801 | + |
|---|
| 4804 | 4802 | static struct pci_driver vxge_driver = { |
|---|
| 4805 | 4803 | .name = VXGE_DRIVER_NAME, |
|---|
| 4806 | 4804 | .id_table = vxge_id_table, |
|---|
| 4807 | 4805 | .probe = vxge_probe, |
|---|
| 4808 | 4806 | .remove = vxge_remove, |
|---|
| 4809 | | -#ifdef CONFIG_PM |
|---|
| 4810 | | - .suspend = vxge_pm_suspend, |
|---|
| 4811 | | - .resume = vxge_pm_resume, |
|---|
| 4812 | | -#endif |
|---|
| 4807 | + .driver.pm = &vxge_pm_ops, |
|---|
| 4813 | 4808 | .err_handler = &vxge_err_handler, |
|---|
| 4814 | 4809 | }; |
|---|
| 4815 | 4810 | |
|---|