| .. | .. |
|---|
| 13 | 13 | ******************************************************************************/ |
|---|
| 14 | 14 | #include <linux/vmalloc.h> |
|---|
| 15 | 15 | #include <linux/etherdevice.h> |
|---|
| 16 | +#include <linux/io-64-nonatomic-lo-hi.h> |
|---|
| 16 | 17 | #include <linux/pci.h> |
|---|
| 17 | 18 | #include <linux/slab.h> |
|---|
| 18 | 19 | |
|---|
| .. | .. |
|---|
| 987 | 988 | |
|---|
| 988 | 989 | /** |
|---|
| 989 | 990 | * vxge_hw_device_hw_info_get - Get the hw information |
|---|
| 991 | + * @bar0: the bar |
|---|
| 992 | + * @hw_info: the hw_info struct |
|---|
| 993 | + * |
|---|
| 990 | 994 | * Returns the vpath mask that has the bits set for each vpath allocated |
|---|
| 991 | 995 | * for the driver, FW version information, and the first mac address for |
|---|
| 992 | 996 | * each vpath |
|---|
| .. | .. |
|---|
| 1101 | 1105 | hldev = blockpool->hldev; |
|---|
| 1102 | 1106 | |
|---|
| 1103 | 1107 | list_for_each_safe(p, n, &blockpool->free_block_list) { |
|---|
| 1104 | | - pci_unmap_single(hldev->pdev, |
|---|
| 1105 | | - ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, |
|---|
| 1106 | | - ((struct __vxge_hw_blockpool_entry *)p)->length, |
|---|
| 1107 | | - PCI_DMA_BIDIRECTIONAL); |
|---|
| 1108 | + dma_unmap_single(&hldev->pdev->dev, |
|---|
| 1109 | + ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, |
|---|
| 1110 | + ((struct __vxge_hw_blockpool_entry *)p)->length, |
|---|
| 1111 | + DMA_BIDIRECTIONAL); |
|---|
| 1108 | 1112 | |
|---|
| 1109 | 1113 | vxge_os_dma_free(hldev->pdev, |
|---|
| 1110 | 1114 | ((struct __vxge_hw_blockpool_entry *)p)->memblock, |
|---|
| .. | .. |
|---|
| 1177 | 1181 | goto blockpool_create_exit; |
|---|
| 1178 | 1182 | } |
|---|
| 1179 | 1183 | |
|---|
| 1180 | | - dma_addr = pci_map_single(hldev->pdev, memblock, |
|---|
| 1181 | | - VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL); |
|---|
| 1182 | | - if (unlikely(pci_dma_mapping_error(hldev->pdev, |
|---|
| 1183 | | - dma_addr))) { |
|---|
| 1184 | + dma_addr = dma_map_single(&hldev->pdev->dev, memblock, |
|---|
| 1185 | + VXGE_HW_BLOCK_SIZE, |
|---|
| 1186 | + DMA_BIDIRECTIONAL); |
|---|
| 1187 | + if (unlikely(dma_mapping_error(&hldev->pdev->dev, dma_addr))) { |
|---|
| 1184 | 1188 | vxge_os_dma_free(hldev->pdev, memblock, &acc_handle); |
|---|
| 1185 | 1189 | __vxge_hw_blockpool_destroy(blockpool); |
|---|
| 1186 | 1190 | status = VXGE_HW_ERR_OUT_OF_MEMORY; |
|---|
| .. | .. |
|---|
| 2263 | 2267 | goto exit; |
|---|
| 2264 | 2268 | } |
|---|
| 2265 | 2269 | |
|---|
| 2266 | | - dma_addr = pci_map_single(devh->pdev, block_addr, length, |
|---|
| 2267 | | - PCI_DMA_BIDIRECTIONAL); |
|---|
| 2270 | + dma_addr = dma_map_single(&devh->pdev->dev, block_addr, length, |
|---|
| 2271 | + DMA_BIDIRECTIONAL); |
|---|
| 2268 | 2272 | |
|---|
| 2269 | | - if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) { |
|---|
| 2273 | + if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_addr))) { |
|---|
| 2270 | 2274 | vxge_os_dma_free(devh->pdev, block_addr, &acc_handle); |
|---|
| 2271 | 2275 | blockpool->req_out--; |
|---|
| 2272 | 2276 | goto exit; |
|---|
| .. | .. |
|---|
| 2302 | 2306 | static inline void |
|---|
| 2303 | 2307 | vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size) |
|---|
| 2304 | 2308 | { |
|---|
| 2305 | | - gfp_t flags; |
|---|
| 2306 | 2309 | void *vaddr; |
|---|
| 2307 | 2310 | |
|---|
| 2308 | | - if (in_interrupt()) |
|---|
| 2309 | | - flags = GFP_ATOMIC | GFP_DMA; |
|---|
| 2310 | | - else |
|---|
| 2311 | | - flags = GFP_KERNEL | GFP_DMA; |
|---|
| 2312 | | - |
|---|
| 2313 | | - vaddr = kmalloc((size), flags); |
|---|
| 2314 | | - |
|---|
| 2311 | + vaddr = kmalloc(size, GFP_KERNEL | GFP_DMA); |
|---|
| 2315 | 2312 | vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev); |
|---|
| 2316 | 2313 | } |
|---|
| 2317 | 2314 | |
|---|
| .. | .. |
|---|
| 2358 | 2355 | if (!memblock) |
|---|
| 2359 | 2356 | goto exit; |
|---|
| 2360 | 2357 | |
|---|
| 2361 | | - dma_object->addr = pci_map_single(devh->pdev, memblock, size, |
|---|
| 2362 | | - PCI_DMA_BIDIRECTIONAL); |
|---|
| 2358 | + dma_object->addr = dma_map_single(&devh->pdev->dev, memblock, |
|---|
| 2359 | + size, DMA_BIDIRECTIONAL); |
|---|
| 2363 | 2360 | |
|---|
| 2364 | | - if (unlikely(pci_dma_mapping_error(devh->pdev, |
|---|
| 2365 | | - dma_object->addr))) { |
|---|
| 2361 | + if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_object->addr))) { |
|---|
| 2366 | 2362 | vxge_os_dma_free(devh->pdev, memblock, |
|---|
| 2367 | 2363 | &dma_object->acc_handle); |
|---|
| 2368 | 2364 | memblock = NULL; |
|---|
| .. | .. |
|---|
| 2409 | 2405 | if (blockpool->pool_size < blockpool->pool_max) |
|---|
| 2410 | 2406 | break; |
|---|
| 2411 | 2407 | |
|---|
| 2412 | | - pci_unmap_single( |
|---|
| 2413 | | - (blockpool->hldev)->pdev, |
|---|
| 2414 | | - ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, |
|---|
| 2415 | | - ((struct __vxge_hw_blockpool_entry *)p)->length, |
|---|
| 2416 | | - PCI_DMA_BIDIRECTIONAL); |
|---|
| 2408 | + dma_unmap_single(&(blockpool->hldev)->pdev->dev, |
|---|
| 2409 | + ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, |
|---|
| 2410 | + ((struct __vxge_hw_blockpool_entry *)p)->length, |
|---|
| 2411 | + DMA_BIDIRECTIONAL); |
|---|
| 2417 | 2412 | |
|---|
| 2418 | 2413 | vxge_os_dma_free( |
|---|
| 2419 | 2414 | (blockpool->hldev)->pdev, |
|---|
| .. | .. |
|---|
| 2444 | 2439 | blockpool = &devh->block_pool; |
|---|
| 2445 | 2440 | |
|---|
| 2446 | 2441 | if (size != blockpool->block_size) { |
|---|
| 2447 | | - pci_unmap_single(devh->pdev, dma_object->addr, size, |
|---|
| 2448 | | - PCI_DMA_BIDIRECTIONAL); |
|---|
| 2442 | + dma_unmap_single(&devh->pdev->dev, dma_object->addr, size, |
|---|
| 2443 | + DMA_BIDIRECTIONAL); |
|---|
| 2449 | 2444 | vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); |
|---|
| 2450 | 2445 | } else { |
|---|
| 2451 | 2446 | |
|---|
| .. | .. |
|---|
| 3769 | 3764 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN | |
|---|
| 3770 | 3765 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA( |
|---|
| 3771 | 3766 | itable[j]); |
|---|
| 3772 | | - /* fall through */ |
|---|
| 3767 | + fallthrough; |
|---|
| 3773 | 3768 | case 2: |
|---|
| 3774 | 3769 | *data0 |= |
|---|
| 3775 | 3770 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)| |
|---|
| 3776 | 3771 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN | |
|---|
| 3777 | 3772 | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA( |
|---|
| 3778 | 3773 | itable[j]); |
|---|
| 3779 | | - /* fall through */ |
|---|
| 3774 | + fallthrough; |
|---|
| 3780 | 3775 | case 3: |
|---|
| 3781 | 3776 | *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)| |
|---|
| 3782 | 3777 | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN | |
|---|
| 3783 | 3778 | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA( |
|---|
| 3784 | 3779 | itable[j]); |
|---|
| 3785 | | - /* fall through */ |
|---|
| 3780 | + fallthrough; |
|---|
| 3786 | 3781 | case 4: |
|---|
| 3787 | 3782 | *data1 |= |
|---|
| 3788 | 3783 | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)| |
|---|
| .. | .. |
|---|
| 3927 | 3922 | |
|---|
| 3928 | 3923 | /** |
|---|
| 3929 | 3924 | * vxge_hw_vpath_check_leak - Check for memory leak |
|---|
| 3930 | | - * @ringh: Handle to the ring object used for receive |
|---|
| 3925 | + * @ring: Handle to the ring object used for receive |
|---|
| 3931 | 3926 | * |
|---|
| 3932 | 3927 | * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to |
|---|
| 3933 | 3928 | * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred. |
|---|