| .. | .. |
|---|
| 961 | 961 | /* We can handle large RDMA requests, so allow larger segments. */ |
|---|
| 962 | 962 | dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); |
|---|
| 963 | 963 | |
|---|
| 964 | | - mdev = (struct mthca_dev *) ib_alloc_device(sizeof *mdev); |
|---|
| 964 | + mdev = ib_alloc_device(mthca_dev, ib_dev); |
|---|
| 965 | 965 | if (!mdev) { |
|---|
| 966 | 966 | dev_err(&pdev->dev, "Device struct alloc failed, " |
|---|
| 967 | 967 | "aborting.\n"); |
|---|
| .. | .. |
|---|
| 1015 | 1015 | |
|---|
| 1016 | 1016 | err = mthca_setup_hca(mdev); |
|---|
| 1017 | 1017 | if (err == -EBUSY && (mdev->mthca_flags & MTHCA_FLAG_MSI_X)) { |
|---|
| 1018 | | - if (mdev->mthca_flags & MTHCA_FLAG_MSI_X) |
|---|
| 1019 | | - pci_free_irq_vectors(pdev); |
|---|
| 1018 | + pci_free_irq_vectors(pdev); |
|---|
| 1020 | 1019 | mdev->mthca_flags &= ~MTHCA_FLAG_MSI_X; |
|---|
| 1021 | 1020 | |
|---|
| 1022 | 1021 | err = mthca_setup_hca(mdev); |
|---|