.. | .. |
---|
57 | 57 | int i; |
---|
58 | 58 | |
---|
59 | 59 | if (chunk->nsg > 0) |
---|
60 | | - pci_unmap_sg(dev->persist->pdev, chunk->sg, chunk->npages, |
---|
61 | | - PCI_DMA_BIDIRECTIONAL); |
---|
| 60 | + dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages, |
---|
| 61 | + DMA_BIDIRECTIONAL); |
---|
62 | 62 | |
---|
63 | 63 | for (i = 0; i < chunk->npages; ++i) |
---|
64 | 64 | __free_pages(sg_page(&chunk->sg[i]), |
---|
.. | .. |
---|
204 | 204 | if (coherent) |
---|
205 | 205 | ++chunk->nsg; |
---|
206 | 206 | else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { |
---|
207 | | - chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg, |
---|
208 | | - chunk->npages, |
---|
209 | | - PCI_DMA_BIDIRECTIONAL); |
---|
| 207 | + chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, |
---|
| 208 | + chunk->sg, chunk->npages, |
---|
| 209 | + DMA_BIDIRECTIONAL); |
---|
210 | 210 | |
---|
211 | 211 | if (chunk->nsg <= 0) |
---|
212 | 212 | goto fail; |
---|
.. | .. |
---|
219 | 219 | } |
---|
220 | 220 | |
---|
221 | 221 | if (!coherent && chunk) { |
---|
222 | | - chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg, |
---|
223 | | - chunk->npages, |
---|
224 | | - PCI_DMA_BIDIRECTIONAL); |
---|
| 222 | + chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, chunk->sg, |
---|
| 223 | + chunk->npages, DMA_BIDIRECTIONAL); |
---|
225 | 224 | |
---|
226 | 225 | if (chunk->nsg <= 0) |
---|
227 | 226 | goto fail; |
---|
.. | .. |
---|
426 | 425 | obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; |
---|
427 | 426 | if (WARN_ON(!obj_per_chunk)) |
---|
428 | 427 | return -EINVAL; |
---|
429 | | - num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; |
---|
| 428 | + num_icm = DIV_ROUND_UP(nobj, obj_per_chunk); |
---|
430 | 429 | |
---|
431 | 430 | table->icm = kvcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL); |
---|
432 | 431 | if (!table->icm) |
---|