.. | .. |
---|
239 | 239 | unsigned int num; /* num skbs in per port queues */ |
---|
240 | 240 | struct sched_port p[MAX_NPORTS]; |
---|
241 | 241 | struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */ |
---|
| 242 | + struct sge *sge; |
---|
242 | 243 | }; |
---|
243 | | -static void restart_sched(unsigned long); |
---|
| 244 | + |
---|
| 245 | +static void restart_sched(struct tasklet_struct *t); |
---|
244 | 246 | |
---|
245 | 247 | |
---|
246 | 248 | /* |
---|
.. | .. |
---|
378 | 380 | return -ENOMEM; |
---|
379 | 381 | |
---|
380 | 382 | pr_debug("tx_sched_init\n"); |
---|
381 | | - tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge); |
---|
| 383 | + tasklet_setup(&s->sched_tsk, restart_sched); |
---|
| 384 | + s->sge = sge; |
---|
382 | 385 | sge->tx_sched = s; |
---|
383 | 386 | |
---|
384 | 387 | for (i = 0; i < MAX_NPORTS; i++) { |
---|
.. | .. |
---|
509 | 512 | while (q->credits--) { |
---|
510 | 513 | struct freelQ_ce *ce = &q->centries[cidx]; |
---|
511 | 514 | |
---|
512 | | - pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), |
---|
513 | | - dma_unmap_len(ce, dma_len), |
---|
514 | | - PCI_DMA_FROMDEVICE); |
---|
| 515 | + dma_unmap_single(&pdev->dev, dma_unmap_addr(ce, dma_addr), |
---|
| 516 | + dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE); |
---|
515 | 517 | dev_kfree_skb(ce->skb); |
---|
516 | 518 | ce->skb = NULL; |
---|
517 | 519 | if (++cidx == q->size) |
---|
.. | .. |
---|
529 | 531 | |
---|
530 | 532 | if (sge->respQ.entries) { |
---|
531 | 533 | size = sizeof(struct respQ_e) * sge->respQ.size; |
---|
532 | | - pci_free_consistent(pdev, size, sge->respQ.entries, |
---|
533 | | - sge->respQ.dma_addr); |
---|
| 534 | + dma_free_coherent(&pdev->dev, size, sge->respQ.entries, |
---|
| 535 | + sge->respQ.dma_addr); |
---|
534 | 536 | } |
---|
535 | 537 | |
---|
536 | 538 | for (i = 0; i < SGE_FREELQ_N; i++) { |
---|
.. | .. |
---|
542 | 544 | } |
---|
543 | 545 | if (q->entries) { |
---|
544 | 546 | size = sizeof(struct freelQ_e) * q->size; |
---|
545 | | - pci_free_consistent(pdev, size, q->entries, |
---|
546 | | - q->dma_addr); |
---|
| 547 | + dma_free_coherent(&pdev->dev, size, q->entries, |
---|
| 548 | + q->dma_addr); |
---|
547 | 549 | } |
---|
548 | 550 | } |
---|
549 | 551 | } |
---|
.. | .. |
---|
564 | 566 | q->size = p->freelQ_size[i]; |
---|
565 | 567 | q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; |
---|
566 | 568 | size = sizeof(struct freelQ_e) * q->size; |
---|
567 | | - q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); |
---|
| 569 | + q->entries = dma_alloc_coherent(&pdev->dev, size, |
---|
| 570 | + &q->dma_addr, GFP_KERNEL); |
---|
568 | 571 | if (!q->entries) |
---|
569 | 572 | goto err_no_mem; |
---|
570 | 573 | |
---|
.. | .. |
---|
585 | 588 | sizeof(struct cpl_rx_data) + |
---|
586 | 589 | sge->freelQ[!sge->jumbo_fl].dma_offset; |
---|
587 | 590 | |
---|
588 | | - size = (16 * 1024) - |
---|
589 | | - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
---|
| 591 | + size = (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
---|
590 | 592 | |
---|
591 | 593 | sge->freelQ[sge->jumbo_fl].rx_buffer_size = size; |
---|
592 | 594 | |
---|
.. | .. |
---|
602 | 604 | sge->respQ.credits = 0; |
---|
603 | 605 | size = sizeof(struct respQ_e) * sge->respQ.size; |
---|
604 | 606 | sge->respQ.entries = |
---|
605 | | - pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); |
---|
| 607 | + dma_alloc_coherent(&pdev->dev, size, &sge->respQ.dma_addr, |
---|
| 608 | + GFP_KERNEL); |
---|
606 | 609 | if (!sge->respQ.entries) |
---|
607 | 610 | goto err_no_mem; |
---|
608 | 611 | return 0; |
---|
.. | .. |
---|
625 | 628 | ce = &q->centries[cidx]; |
---|
626 | 629 | while (n--) { |
---|
627 | 630 | if (likely(dma_unmap_len(ce, dma_len))) { |
---|
628 | | - pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), |
---|
| 631 | + dma_unmap_single(&pdev->dev, |
---|
| 632 | + dma_unmap_addr(ce, dma_addr), |
---|
629 | 633 | dma_unmap_len(ce, dma_len), |
---|
630 | | - PCI_DMA_TODEVICE); |
---|
| 634 | + DMA_TO_DEVICE); |
---|
631 | 635 | if (q->sop) |
---|
632 | 636 | q->sop = 0; |
---|
633 | 637 | } |
---|
.. | .. |
---|
664 | 668 | } |
---|
665 | 669 | if (q->entries) { |
---|
666 | 670 | size = sizeof(struct cmdQ_e) * q->size; |
---|
667 | | - pci_free_consistent(pdev, size, q->entries, |
---|
668 | | - q->dma_addr); |
---|
| 671 | + dma_free_coherent(&pdev->dev, size, q->entries, |
---|
| 672 | + q->dma_addr); |
---|
669 | 673 | } |
---|
670 | 674 | } |
---|
671 | 675 | } |
---|
.. | .. |
---|
690 | 694 | q->stop_thres = 0; |
---|
691 | 695 | spin_lock_init(&q->lock); |
---|
692 | 696 | size = sizeof(struct cmdQ_e) * q->size; |
---|
693 | | - q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); |
---|
| 697 | + q->entries = dma_alloc_coherent(&pdev->dev, size, |
---|
| 698 | + &q->dma_addr, GFP_KERNEL); |
---|
694 | 699 | if (!q->entries) |
---|
695 | 700 | goto err_no_mem; |
---|
696 | 701 | |
---|
.. | .. |
---|
838 | 843 | break; |
---|
839 | 844 | |
---|
840 | 845 | skb_reserve(skb, q->dma_offset); |
---|
841 | | - mapping = pci_map_single(pdev, skb->data, dma_len, |
---|
842 | | - PCI_DMA_FROMDEVICE); |
---|
| 846 | + mapping = dma_map_single(&pdev->dev, skb->data, dma_len, |
---|
| 847 | + DMA_FROM_DEVICE); |
---|
843 | 848 | skb_reserve(skb, sge->rx_pkt_pad); |
---|
844 | 849 | |
---|
845 | 850 | ce->skb = skb; |
---|
.. | .. |
---|
1050 | 1055 | goto use_orig_buf; |
---|
1051 | 1056 | |
---|
1052 | 1057 | skb_put(skb, len); |
---|
1053 | | - pci_dma_sync_single_for_cpu(pdev, |
---|
1054 | | - dma_unmap_addr(ce, dma_addr), |
---|
1055 | | - dma_unmap_len(ce, dma_len), |
---|
1056 | | - PCI_DMA_FROMDEVICE); |
---|
| 1058 | + dma_sync_single_for_cpu(&pdev->dev, |
---|
| 1059 | + dma_unmap_addr(ce, dma_addr), |
---|
| 1060 | + dma_unmap_len(ce, dma_len), |
---|
| 1061 | + DMA_FROM_DEVICE); |
---|
1057 | 1062 | skb_copy_from_linear_data(ce->skb, skb->data, len); |
---|
1058 | | - pci_dma_sync_single_for_device(pdev, |
---|
1059 | | - dma_unmap_addr(ce, dma_addr), |
---|
1060 | | - dma_unmap_len(ce, dma_len), |
---|
1061 | | - PCI_DMA_FROMDEVICE); |
---|
| 1063 | + dma_sync_single_for_device(&pdev->dev, |
---|
| 1064 | + dma_unmap_addr(ce, dma_addr), |
---|
| 1065 | + dma_unmap_len(ce, dma_len), |
---|
| 1066 | + DMA_FROM_DEVICE); |
---|
1062 | 1067 | recycle_fl_buf(fl, fl->cidx); |
---|
1063 | 1068 | return skb; |
---|
1064 | 1069 | } |
---|
.. | .. |
---|
1069 | 1074 | return NULL; |
---|
1070 | 1075 | } |
---|
1071 | 1076 | |
---|
1072 | | - pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), |
---|
1073 | | - dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); |
---|
| 1077 | + dma_unmap_single(&pdev->dev, dma_unmap_addr(ce, dma_addr), |
---|
| 1078 | + dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE); |
---|
1074 | 1079 | skb = ce->skb; |
---|
1075 | 1080 | prefetch(skb->data); |
---|
1076 | 1081 | |
---|
.. | .. |
---|
1092 | 1097 | struct freelQ_ce *ce = &fl->centries[fl->cidx]; |
---|
1093 | 1098 | struct sk_buff *skb = ce->skb; |
---|
1094 | 1099 | |
---|
1095 | | - pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr), |
---|
1096 | | - dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); |
---|
| 1100 | + dma_sync_single_for_cpu(&adapter->pdev->dev, |
---|
| 1101 | + dma_unmap_addr(ce, dma_addr), |
---|
| 1102 | + dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE); |
---|
1097 | 1103 | pr_err("%s: unexpected offload packet, cmd %u\n", |
---|
1098 | 1104 | adapter->name, *skb->data); |
---|
1099 | 1105 | recycle_fl_buf(fl, fl->cidx); |
---|
.. | .. |
---|
1210 | 1216 | e = e1 = &q->entries[pidx]; |
---|
1211 | 1217 | ce = &q->centries[pidx]; |
---|
1212 | 1218 | |
---|
1213 | | - mapping = pci_map_single(adapter->pdev, skb->data, |
---|
1214 | | - skb_headlen(skb), PCI_DMA_TODEVICE); |
---|
| 1219 | + mapping = dma_map_single(&adapter->pdev->dev, skb->data, |
---|
| 1220 | + skb_headlen(skb), DMA_TO_DEVICE); |
---|
1215 | 1221 | |
---|
1216 | 1222 | desc_mapping = mapping; |
---|
1217 | 1223 | desc_len = skb_headlen(skb); |
---|
.. | .. |
---|
1302 | 1308 | * Called from tasklet. Checks the scheduler for any |
---|
1303 | 1309 | * pending skbs that can be sent. |
---|
1304 | 1310 | */ |
---|
1305 | | -static void restart_sched(unsigned long arg) |
---|
| 1311 | +static void restart_sched(struct tasklet_struct *t) |
---|
1306 | 1312 | { |
---|
1307 | | - struct sge *sge = (struct sge *) arg; |
---|
| 1313 | + struct sched *s = from_tasklet(s, t, sched_tsk); |
---|
| 1314 | + struct sge *sge = s->sge; |
---|
1308 | 1315 | struct adapter *adapter = sge->adapter; |
---|
1309 | 1316 | struct cmdQ *q = &sge->cmdQ[0]; |
---|
1310 | 1317 | struct sk_buff *skb; |
---|