.. | .. |
---|
533 | 533 | |
---|
534 | 534 | static int he_init_tpdrq(struct he_dev *he_dev) |
---|
535 | 535 | { |
---|
536 | | - he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, |
---|
537 | | - CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), |
---|
538 | | - &he_dev->tpdrq_phys, GFP_KERNEL); |
---|
| 536 | + he_dev->tpdrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, |
---|
| 537 | + CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), |
---|
| 538 | + &he_dev->tpdrq_phys, |
---|
| 539 | + GFP_KERNEL); |
---|
539 | 540 | if (he_dev->tpdrq_base == NULL) { |
---|
540 | 541 | hprintk("failed to alloc tpdrq\n"); |
---|
541 | 542 | return -ENOMEM; |
---|
.. | .. |
---|
805 | 806 | goto out_free_rbpl_virt; |
---|
806 | 807 | } |
---|
807 | 808 | |
---|
808 | | - he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, |
---|
809 | | - CONFIG_RBPL_SIZE * sizeof(struct he_rbp), |
---|
810 | | - &he_dev->rbpl_phys, GFP_KERNEL); |
---|
| 809 | + he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev, |
---|
| 810 | + CONFIG_RBPL_SIZE * sizeof(struct he_rbp), |
---|
| 811 | + &he_dev->rbpl_phys, GFP_KERNEL); |
---|
811 | 812 | if (he_dev->rbpl_base == NULL) { |
---|
812 | 813 | hprintk("failed to alloc rbpl_base\n"); |
---|
813 | 814 | goto out_destroy_rbpl_pool; |
---|
.. | .. |
---|
844 | 845 | |
---|
845 | 846 | /* rx buffer ready queue */ |
---|
846 | 847 | |
---|
847 | | - he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, |
---|
848 | | - CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), |
---|
849 | | - &he_dev->rbrq_phys, GFP_KERNEL); |
---|
| 848 | + he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, |
---|
| 849 | + CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), |
---|
| 850 | + &he_dev->rbrq_phys, GFP_KERNEL); |
---|
850 | 851 | if (he_dev->rbrq_base == NULL) { |
---|
851 | 852 | hprintk("failed to allocate rbrq\n"); |
---|
852 | 853 | goto out_free_rbpl; |
---|
.. | .. |
---|
868 | 869 | |
---|
869 | 870 | /* tx buffer ready queue */ |
---|
870 | 871 | |
---|
871 | | - he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, |
---|
872 | | - CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), |
---|
873 | | - &he_dev->tbrq_phys, GFP_KERNEL); |
---|
| 872 | + he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, |
---|
| 873 | + CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), |
---|
| 874 | + &he_dev->tbrq_phys, GFP_KERNEL); |
---|
874 | 875 | if (he_dev->tbrq_base == NULL) { |
---|
875 | 876 | hprintk("failed to allocate tbrq\n"); |
---|
876 | 877 | goto out_free_rbpq_base; |
---|
.. | .. |
---|
913 | 914 | /* 2.9.3.5 tail offset for each interrupt queue is located after the |
---|
914 | 915 | end of the interrupt queue */ |
---|
915 | 916 | |
---|
916 | | - he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, |
---|
917 | | - (CONFIG_IRQ_SIZE + 1) |
---|
918 | | - * sizeof(struct he_irq), |
---|
919 | | - &he_dev->irq_phys, |
---|
920 | | - GFP_KERNEL); |
---|
| 917 | + he_dev->irq_base = dma_alloc_coherent(&he_dev->pci_dev->dev, |
---|
| 918 | + (CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq), |
---|
| 919 | + &he_dev->irq_phys, GFP_KERNEL); |
---|
921 | 920 | if (he_dev->irq_base == NULL) { |
---|
922 | 921 | hprintk("failed to allocate irq\n"); |
---|
923 | 922 | return -ENOMEM; |
---|
.. | .. |
---|
1464 | 1463 | |
---|
1465 | 1464 | /* host status page */ |
---|
1466 | 1465 | |
---|
1467 | | - he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev, |
---|
1468 | | - sizeof(struct he_hsp), |
---|
1469 | | - &he_dev->hsp_phys, GFP_KERNEL); |
---|
| 1466 | + he_dev->hsp = dma_alloc_coherent(&he_dev->pci_dev->dev, |
---|
| 1467 | + sizeof(struct he_hsp), |
---|
| 1468 | + &he_dev->hsp_phys, GFP_KERNEL); |
---|
1470 | 1469 | if (he_dev->hsp == NULL) { |
---|
1471 | 1470 | hprintk("failed to allocate host status page\n"); |
---|
1472 | 1471 | return -ENOMEM; |
---|
.. | .. |
---|
1691 | 1690 | |
---|
1692 | 1691 | if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) { |
---|
1693 | 1692 | hprintk("HBUF_ERR! (cid 0x%x)\n", cid); |
---|
1694 | | - atomic_inc(&vcc->stats->rx_drop); |
---|
| 1693 | + atomic_inc(&vcc->stats->rx_drop); |
---|
1695 | 1694 | goto return_host_buffers; |
---|
1696 | 1695 | } |
---|
1697 | 1696 | |
---|
.. | .. |
---|
1945 | 1944 | switch (type) { |
---|
1946 | 1945 | case ITYPE_RBRQ_THRESH: |
---|
1947 | 1946 | HPRINTK("rbrq%d threshold\n", group); |
---|
1948 | | - /* fall through */ |
---|
| 1947 | + fallthrough; |
---|
1949 | 1948 | case ITYPE_RBRQ_TIMER: |
---|
1950 | 1949 | if (he_service_rbrq(he_dev, group)) |
---|
1951 | 1950 | he_service_rbpl(he_dev, group); |
---|
1952 | 1951 | break; |
---|
1953 | 1952 | case ITYPE_TBRQ_THRESH: |
---|
1954 | 1953 | HPRINTK("tbrq%d threshold\n", group); |
---|
1955 | | - /* fall through */ |
---|
| 1954 | + fallthrough; |
---|
1956 | 1955 | case ITYPE_TPD_COMPLETE: |
---|
1957 | 1956 | he_service_tbrq(he_dev, group); |
---|
1958 | 1957 | break; |
---|
.. | .. |
---|
2581 | 2580 | slot = 0; |
---|
2582 | 2581 | } |
---|
2583 | 2582 | |
---|
2584 | | - tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, |
---|
2585 | | - (void *) page_address(frag->page) + frag->page_offset, |
---|
2586 | | - frag->size, DMA_TO_DEVICE); |
---|
2587 | | - tpd->iovec[slot].len = frag->size; |
---|
| 2583 | + tpd->iovec[slot].addr = skb_frag_dma_map(&he_dev->pci_dev->dev, |
---|
| 2584 | + frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); |
---|
| 2585 | + tpd->iovec[slot].len = skb_frag_size(frag); |
---|
2588 | 2586 | ++slot; |
---|
2589 | 2587 | |
---|
2590 | 2588 | } |
---|