.. | .. |
---|
214 | 214 | "CXN_KILLED_IMM_DATA_RCVD" |
---|
215 | 215 | }; |
---|
216 | 216 | |
---|
217 | | -static int beiscsi_slave_configure(struct scsi_device *sdev) |
---|
218 | | -{ |
---|
219 | | - blk_queue_max_segment_size(sdev->request_queue, 65536); |
---|
220 | | - return 0; |
---|
221 | | -} |
---|
222 | | - |
---|
223 | 217 | static int beiscsi_eh_abort(struct scsi_cmnd *sc) |
---|
224 | 218 | { |
---|
225 | 219 | struct iscsi_task *abrt_task = (struct iscsi_task *)sc->SCp.ptr; |
---|
.. | .. |
---|
393 | 387 | .proc_name = DRV_NAME, |
---|
394 | 388 | .queuecommand = iscsi_queuecommand, |
---|
395 | 389 | .change_queue_depth = scsi_change_queue_depth, |
---|
396 | | - .slave_configure = beiscsi_slave_configure, |
---|
397 | 390 | .target_alloc = iscsi_target_alloc, |
---|
398 | 391 | .eh_timed_out = iscsi_eh_cmd_timed_out, |
---|
399 | 392 | .eh_abort_handler = beiscsi_eh_abort, |
---|
.. | .. |
---|
404 | 397 | .can_queue = BE2_IO_DEPTH, |
---|
405 | 398 | .this_id = -1, |
---|
406 | 399 | .max_sectors = BEISCSI_MAX_SECTORS, |
---|
| 400 | + .max_segment_size = 65536, |
---|
407 | 401 | .cmd_per_lun = BEISCSI_CMD_PER_LUN, |
---|
408 | | - .use_clustering = ENABLE_CLUSTERING, |
---|
409 | 402 | .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID, |
---|
410 | 403 | .track_queue_depth = 1, |
---|
411 | 404 | }; |
---|
.. | .. |
---|
460 | 453 | u8 __iomem *addr; |
---|
461 | 454 | int pcicfg_reg; |
---|
462 | 455 | |
---|
463 | | - addr = ioremap_nocache(pci_resource_start(pcidev, 2), |
---|
| 456 | + addr = ioremap(pci_resource_start(pcidev, 2), |
---|
464 | 457 | pci_resource_len(pcidev, 2)); |
---|
465 | 458 | if (addr == NULL) |
---|
466 | 459 | return -ENOMEM; |
---|
467 | 460 | phba->ctrl.csr = addr; |
---|
468 | 461 | phba->csr_va = addr; |
---|
469 | 462 | |
---|
470 | | - addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024); |
---|
| 463 | + addr = ioremap(pci_resource_start(pcidev, 4), 128 * 1024); |
---|
471 | 464 | if (addr == NULL) |
---|
472 | 465 | goto pci_map_err; |
---|
473 | 466 | phba->ctrl.db = addr; |
---|
.. | .. |
---|
478 | 471 | else |
---|
479 | 472 | pcicfg_reg = 0; |
---|
480 | 473 | |
---|
481 | | - addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg), |
---|
| 474 | + addr = ioremap(pci_resource_start(pcidev, pcicfg_reg), |
---|
482 | 475 | pci_resource_len(pcidev, pcicfg_reg)); |
---|
483 | 476 | |
---|
484 | 477 | if (addr == NULL) |
---|
.. | .. |
---|
511 | 504 | } |
---|
512 | 505 | |
---|
513 | 506 | pci_set_master(pcidev); |
---|
514 | | - ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); |
---|
| 507 | + ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64)); |
---|
515 | 508 | if (ret) { |
---|
516 | | - ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); |
---|
517 | | - if (ret) { |
---|
518 | | - dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); |
---|
519 | | - goto pci_region_release; |
---|
520 | | - } else { |
---|
521 | | - ret = pci_set_consistent_dma_mask(pcidev, |
---|
522 | | - DMA_BIT_MASK(32)); |
---|
523 | | - } |
---|
524 | | - } else { |
---|
525 | | - ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64)); |
---|
| 509 | + ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)); |
---|
526 | 510 | if (ret) { |
---|
527 | 511 | dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); |
---|
528 | 512 | goto pci_region_release; |
---|
.. | .. |
---|
550 | 534 | if (status) |
---|
551 | 535 | return status; |
---|
552 | 536 | mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; |
---|
553 | | - mbox_mem_alloc->va = pci_alloc_consistent(pdev, |
---|
554 | | - mbox_mem_alloc->size, |
---|
555 | | - &mbox_mem_alloc->dma); |
---|
| 537 | + mbox_mem_alloc->va = dma_alloc_coherent(&pdev->dev, |
---|
| 538 | + mbox_mem_alloc->size, &mbox_mem_alloc->dma, GFP_KERNEL); |
---|
556 | 539 | if (!mbox_mem_alloc->va) { |
---|
557 | 540 | beiscsi_unmap_pci_function(phba); |
---|
558 | 541 | return -ENOMEM; |
---|
.. | .. |
---|
994 | 977 | * alloc_wrb_handle - To allocate a wrb handle |
---|
995 | 978 | * @phba: The hba pointer |
---|
996 | 979 | * @cid: The cid to use for allocation |
---|
997 | | - * @pwrb_context: ptr to ptr to wrb context |
---|
| 980 | + * @pcontext: ptr to ptr to wrb context |
---|
998 | 981 | * |
---|
999 | 982 | * This happens under session_lock until submission to chip |
---|
1000 | 983 | */ |
---|
.. | .. |
---|
1411 | 1394 | spin_unlock_bh(&session->back_lock); |
---|
1412 | 1395 | } |
---|
1413 | 1396 | |
---|
1414 | | -/** |
---|
| 1397 | +/* |
---|
1415 | 1398 | * ASYNC PDUs include |
---|
1416 | 1399 | * a. Unsolicited NOP-In (target initiated NOP-In) |
---|
1417 | 1400 | * b. ASYNC Messages |
---|
.. | .. |
---|
1549 | 1532 | break; |
---|
1550 | 1533 | case UNSOL_DATA_DIGEST_ERROR_NOTIFY: |
---|
1551 | 1534 | error = 1; |
---|
| 1535 | + fallthrough; |
---|
1552 | 1536 | case UNSOL_DATA_NOTIFY: |
---|
1553 | 1537 | pasync_handle = pasync_ctx->async_entry[ci].data; |
---|
1554 | 1538 | break; |
---|
.. | .. |
---|
1866 | 1850 | { |
---|
1867 | 1851 | struct be_queue_info *cq; |
---|
1868 | 1852 | struct sol_cqe *sol; |
---|
1869 | | - struct dmsg_cqe *dmsg; |
---|
1870 | 1853 | unsigned int total = 0; |
---|
1871 | 1854 | unsigned int num_processed = 0; |
---|
1872 | 1855 | unsigned short code = 0, cid = 0; |
---|
.. | .. |
---|
1939 | 1922 | "BM_%d : Received %s[%d] on CID : %d\n", |
---|
1940 | 1923 | cqe_desc[code], code, cid); |
---|
1941 | 1924 | |
---|
1942 | | - dmsg = (struct dmsg_cqe *)sol; |
---|
1943 | 1925 | hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); |
---|
1944 | 1926 | break; |
---|
1945 | 1927 | case UNSOL_HDR_NOTIFY: |
---|
.. | .. |
---|
2304 | 2286 | |
---|
2305 | 2287 | /* Map addr only if there is data_count */ |
---|
2306 | 2288 | if (dsp_value) { |
---|
2307 | | - io_task->mtask_addr = pci_map_single(phba->pcidev, |
---|
| 2289 | + io_task->mtask_addr = dma_map_single(&phba->pcidev->dev, |
---|
2308 | 2290 | task->data, |
---|
2309 | 2291 | task->data_count, |
---|
2310 | | - PCI_DMA_TODEVICE); |
---|
2311 | | - if (pci_dma_mapping_error(phba->pcidev, |
---|
| 2292 | + DMA_TO_DEVICE); |
---|
| 2293 | + if (dma_mapping_error(&phba->pcidev->dev, |
---|
2312 | 2294 | io_task->mtask_addr)) |
---|
2313 | 2295 | return -ENOMEM; |
---|
2314 | 2296 | io_task->mtask_data_count = task->data_count; |
---|
.. | .. |
---|
2519 | 2501 | BEISCSI_MAX_FRAGS_INIT); |
---|
2520 | 2502 | curr_alloc_size = min(be_max_phys_size * 1024, alloc_size); |
---|
2521 | 2503 | do { |
---|
2522 | | - mem_arr->virtual_address = pci_alloc_consistent( |
---|
2523 | | - phba->pcidev, |
---|
2524 | | - curr_alloc_size, |
---|
2525 | | - &bus_add); |
---|
| 2504 | + mem_arr->virtual_address = |
---|
| 2505 | + dma_alloc_coherent(&phba->pcidev->dev, |
---|
| 2506 | + curr_alloc_size, &bus_add, GFP_KERNEL); |
---|
2526 | 2507 | if (!mem_arr->virtual_address) { |
---|
2527 | 2508 | if (curr_alloc_size <= BE_MIN_MEM_SIZE) |
---|
2528 | 2509 | goto free_mem; |
---|
.. | .. |
---|
2560 | 2541 | mem_descr->num_elements = j; |
---|
2561 | 2542 | while ((i) || (j)) { |
---|
2562 | 2543 | for (j = mem_descr->num_elements; j > 0; j--) { |
---|
2563 | | - pci_free_consistent(phba->pcidev, |
---|
| 2544 | + dma_free_coherent(&phba->pcidev->dev, |
---|
2564 | 2545 | mem_descr->mem_array[j - 1].size, |
---|
2565 | 2546 | mem_descr->mem_array[j - 1]. |
---|
2566 | 2547 | virtual_address, |
---|
.. | .. |
---|
3031 | 3012 | eq = &phwi_context->be_eq[i].q; |
---|
3032 | 3013 | mem = &eq->dma_mem; |
---|
3033 | 3014 | phwi_context->be_eq[i].phba = phba; |
---|
3034 | | - eq_vaddress = pci_alloc_consistent(phba->pcidev, |
---|
| 3015 | + eq_vaddress = dma_alloc_coherent(&phba->pcidev->dev, |
---|
3035 | 3016 | num_eq_pages * PAGE_SIZE, |
---|
3036 | | - &paddr); |
---|
| 3017 | + &paddr, GFP_KERNEL); |
---|
3037 | 3018 | if (!eq_vaddress) { |
---|
3038 | 3019 | ret = -ENOMEM; |
---|
3039 | 3020 | goto create_eq_error; |
---|
.. | .. |
---|
3069 | 3050 | eq = &phwi_context->be_eq[i].q; |
---|
3070 | 3051 | mem = &eq->dma_mem; |
---|
3071 | 3052 | if (mem->va) |
---|
3072 | | - pci_free_consistent(phba->pcidev, num_eq_pages |
---|
| 3053 | + dma_free_coherent(&phba->pcidev->dev, num_eq_pages |
---|
3073 | 3054 | * PAGE_SIZE, |
---|
3074 | 3055 | mem->va, mem->dma); |
---|
3075 | 3056 | } |
---|
.. | .. |
---|
3097 | 3078 | pbe_eq->cq = cq; |
---|
3098 | 3079 | pbe_eq->phba = phba; |
---|
3099 | 3080 | mem = &cq->dma_mem; |
---|
3100 | | - cq_vaddress = pci_alloc_consistent(phba->pcidev, |
---|
| 3081 | + cq_vaddress = dma_alloc_coherent(&phba->pcidev->dev, |
---|
3101 | 3082 | num_cq_pages * PAGE_SIZE, |
---|
3102 | | - &paddr); |
---|
| 3083 | + &paddr, GFP_KERNEL); |
---|
3103 | 3084 | if (!cq_vaddress) { |
---|
3104 | 3085 | ret = -ENOMEM; |
---|
3105 | 3086 | goto create_cq_error; |
---|
.. | .. |
---|
3134 | 3115 | cq = &phwi_context->be_cq[i]; |
---|
3135 | 3116 | mem = &cq->dma_mem; |
---|
3136 | 3117 | if (mem->va) |
---|
3137 | | - pci_free_consistent(phba->pcidev, num_cq_pages |
---|
| 3118 | + dma_free_coherent(&phba->pcidev->dev, num_cq_pages |
---|
3138 | 3119 | * PAGE_SIZE, |
---|
3139 | 3120 | mem->va, mem->dma); |
---|
3140 | 3121 | } |
---|
.. | .. |
---|
3326 | 3307 | { |
---|
3327 | 3308 | struct be_dma_mem *mem = &q->dma_mem; |
---|
3328 | 3309 | if (mem->va) { |
---|
3329 | | - pci_free_consistent(phba->pcidev, mem->size, |
---|
| 3310 | + dma_free_coherent(&phba->pcidev->dev, mem->size, |
---|
3330 | 3311 | mem->va, mem->dma); |
---|
3331 | 3312 | mem->va = NULL; |
---|
3332 | 3313 | } |
---|
.. | .. |
---|
3341 | 3322 | q->len = len; |
---|
3342 | 3323 | q->entry_size = entry_size; |
---|
3343 | 3324 | mem->size = len * entry_size; |
---|
3344 | | - mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma); |
---|
| 3325 | + mem->va = dma_alloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma, |
---|
| 3326 | + GFP_KERNEL); |
---|
3345 | 3327 | if (!mem->va) |
---|
3346 | 3328 | return -ENOMEM; |
---|
3347 | 3329 | return 0; |
---|
.. | .. |
---|
3479 | 3461 | &ctrl->ptag_state[tag].tag_state)) { |
---|
3480 | 3462 | ptag_mem = &ctrl->ptag_state[tag].tag_mem_state; |
---|
3481 | 3463 | if (ptag_mem->size) { |
---|
3482 | | - pci_free_consistent(ctrl->pdev, |
---|
| 3464 | + dma_free_coherent(&ctrl->pdev->dev, |
---|
3483 | 3465 | ptag_mem->size, |
---|
3484 | 3466 | ptag_mem->va, |
---|
3485 | 3467 | ptag_mem->dma); |
---|
.. | .. |
---|
3585 | 3567 | |
---|
3586 | 3568 | /* if eqid_count == 1 fall back to INTX */ |
---|
3587 | 3569 | if (enable_msix && nvec > 1) { |
---|
3588 | | - const struct irq_affinity desc = { .post_vectors = 1 }; |
---|
| 3570 | + struct irq_affinity desc = { .post_vectors = 1 }; |
---|
3589 | 3571 | |
---|
3590 | 3572 | if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec, |
---|
3591 | 3573 | PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) { |
---|
.. | .. |
---|
3880 | 3862 | j = 0; |
---|
3881 | 3863 | for (i = 0; i < SE_MEM_MAX; i++) { |
---|
3882 | 3864 | for (j = mem_descr->num_elements; j > 0; j--) { |
---|
3883 | | - pci_free_consistent(phba->pcidev, |
---|
| 3865 | + dma_free_coherent(&phba->pcidev->dev, |
---|
3884 | 3866 | mem_descr->mem_array[j - 1].size, |
---|
3885 | 3867 | mem_descr->mem_array[j - 1].virtual_address, |
---|
3886 | 3868 | (unsigned long)mem_descr->mem_array[j - 1]. |
---|
.. | .. |
---|
4255 | 4237 | } |
---|
4256 | 4238 | |
---|
4257 | 4239 | if (io_task->mtask_addr) { |
---|
4258 | | - pci_unmap_single(phba->pcidev, |
---|
| 4240 | + dma_unmap_single(&phba->pcidev->dev, |
---|
4259 | 4241 | io_task->mtask_addr, |
---|
4260 | 4242 | io_task->mtask_data_count, |
---|
4261 | | - PCI_DMA_TODEVICE); |
---|
| 4243 | + DMA_TO_DEVICE); |
---|
4262 | 4244 | io_task->mtask_addr = 0; |
---|
4263 | 4245 | } |
---|
4264 | 4246 | } |
---|
.. | .. |
---|
4852 | 4834 | |
---|
4853 | 4835 | switch (bsg_req->msgcode) { |
---|
4854 | 4836 | case ISCSI_BSG_HST_VENDOR: |
---|
4855 | | - nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, |
---|
| 4837 | + nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev, |
---|
4856 | 4838 | job->request_payload.payload_len, |
---|
4857 | | - &nonemb_cmd.dma); |
---|
| 4839 | + &nonemb_cmd.dma, GFP_KERNEL); |
---|
4858 | 4840 | if (nonemb_cmd.va == NULL) { |
---|
4859 | 4841 | beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, |
---|
4860 | 4842 | "BM_%d : Failed to allocate memory for " |
---|
.. | .. |
---|
4867 | 4849 | beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, |
---|
4868 | 4850 | "BM_%d : MBX Tag Allocation Failed\n"); |
---|
4869 | 4851 | |
---|
4870 | | - pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, |
---|
| 4852 | + dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, |
---|
4871 | 4853 | nonemb_cmd.va, nonemb_cmd.dma); |
---|
4872 | 4854 | return -EAGAIN; |
---|
4873 | 4855 | } |
---|
.. | .. |
---|
4881 | 4863 | if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { |
---|
4882 | 4864 | clear_bit(MCC_TAG_STATE_RUNNING, |
---|
4883 | 4865 | &phba->ctrl.ptag_state[tag].tag_state); |
---|
4884 | | - pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, |
---|
| 4866 | + dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, |
---|
4885 | 4867 | nonemb_cmd.va, nonemb_cmd.dma); |
---|
4886 | 4868 | return -EIO; |
---|
4887 | 4869 | } |
---|
.. | .. |
---|
4898 | 4880 | bsg_reply->result = status; |
---|
4899 | 4881 | bsg_job_done(job, bsg_reply->result, |
---|
4900 | 4882 | bsg_reply->reply_payload_rcv_len); |
---|
4901 | | - pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, |
---|
| 4883 | + dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, |
---|
4902 | 4884 | nonemb_cmd.va, nonemb_cmd.dma); |
---|
4903 | 4885 | if (status || extd_status) { |
---|
4904 | 4886 | beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, |
---|
.. | .. |
---|
5529 | 5511 | return PCI_ERS_RESULT_DISCONNECT; |
---|
5530 | 5512 | } |
---|
5531 | 5513 | |
---|
5532 | | - pci_cleanup_aer_uncorrect_error_status(pdev); |
---|
5533 | 5514 | return PCI_ERS_RESULT_RECOVERED; |
---|
5534 | 5515 | } |
---|
5535 | 5516 | |
---|
.. | .. |
---|
5755 | 5736 | beiscsi_cleanup_port(phba); |
---|
5756 | 5737 | beiscsi_free_mem(phba); |
---|
5757 | 5738 | free_port: |
---|
5758 | | - pci_free_consistent(phba->pcidev, |
---|
| 5739 | + dma_free_coherent(&phba->pcidev->dev, |
---|
5759 | 5740 | phba->ctrl.mbox_mem_alloced.size, |
---|
5760 | 5741 | phba->ctrl.mbox_mem_alloced.va, |
---|
5761 | 5742 | phba->ctrl.mbox_mem_alloced.dma); |
---|
.. | .. |
---|
5800 | 5781 | |
---|
5801 | 5782 | /* ctrl uninit */ |
---|
5802 | 5783 | beiscsi_unmap_pci_function(phba); |
---|
5803 | | - pci_free_consistent(phba->pcidev, |
---|
| 5784 | + dma_free_coherent(&phba->pcidev->dev, |
---|
5804 | 5785 | phba->ctrl.mbox_mem_alloced.size, |
---|
5805 | 5786 | phba->ctrl.mbox_mem_alloced.va, |
---|
5806 | 5787 | phba->ctrl.mbox_mem_alloced.dma); |
---|
.. | .. |
---|
5829 | 5810 | .destroy_session = beiscsi_session_destroy, |
---|
5830 | 5811 | .create_conn = beiscsi_conn_create, |
---|
5831 | 5812 | .bind_conn = beiscsi_conn_bind, |
---|
| 5813 | + .unbind_conn = iscsi_conn_unbind, |
---|
5832 | 5814 | .destroy_conn = iscsi_conn_teardown, |
---|
5833 | 5815 | .attr_is_visible = beiscsi_attr_is_visible, |
---|
5834 | 5816 | .set_iface_param = beiscsi_iface_set_param, |
---|