hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/scsi/vmw_pvscsi.c
....@@ -335,7 +335,7 @@
335335 BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
336336
337337 sge = &ctx->sgl->sge[0];
338
- for (i = 0; i < count; i++, sg++) {
338
+ for (i = 0; i < count; i++, sg = sg_next(sg)) {
339339 sge[i].addr = sg_dma_address(sg);
340340 sge[i].length = sg_dma_len(sg);
341341 sge[i].flags = 0;
....@@ -365,16 +365,16 @@
365365 int segs = scsi_dma_map(cmd);
366366
367367 if (segs == -ENOMEM) {
368
- scmd_printk(KERN_ERR, cmd,
368
+ scmd_printk(KERN_DEBUG, cmd,
369369 "vmw_pvscsi: Failed to map cmd sglist for DMA.\n");
370370 return -ENOMEM;
371371 } else if (segs > 1) {
372372 pvscsi_create_sg(ctx, sg, segs);
373373
374374 e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
375
- ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
376
- SGL_SIZE, PCI_DMA_TODEVICE);
377
- if (pci_dma_mapping_error(adapter->dev, ctx->sglPA)) {
375
+ ctx->sglPA = dma_map_single(&adapter->dev->dev,
376
+ ctx->sgl, SGL_SIZE, DMA_TO_DEVICE);
377
+ if (dma_mapping_error(&adapter->dev->dev, ctx->sglPA)) {
378378 scmd_printk(KERN_ERR, cmd,
379379 "vmw_pvscsi: Failed to map ctx sglist for DMA.\n");
380380 scsi_dma_unmap(cmd);
....@@ -389,10 +389,10 @@
389389 * In case there is no S/G list, scsi_sglist points
390390 * directly to the buffer.
391391 */
392
- ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
392
+ ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen,
393393 cmd->sc_data_direction);
394
- if (pci_dma_mapping_error(adapter->dev, ctx->dataPA)) {
395
- scmd_printk(KERN_ERR, cmd,
394
+ if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) {
395
+ scmd_printk(KERN_DEBUG, cmd,
396396 "vmw_pvscsi: Failed to map direct data buffer for DMA.\n");
397397 return -ENOMEM;
398398 }
....@@ -400,6 +400,17 @@
400400 }
401401
402402 return 0;
403
+}
404
+
405
+/*
406
+ * The device incorrectly doesn't clear the first byte of the sense
407
+ * buffer in some cases. We have to do it ourselves.
408
+ * Otherwise we run into trouble when SWIOTLB is forced.
409
+ */
410
+static void pvscsi_patch_sense(struct scsi_cmnd *cmd)
411
+{
412
+ if (cmd->sense_buffer)
413
+ cmd->sense_buffer[0] = 0;
403414 }
404415
405416 static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
....@@ -417,23 +428,23 @@
417428 if (count != 0) {
418429 scsi_dma_unmap(cmd);
419430 if (ctx->sglPA) {
420
- pci_unmap_single(adapter->dev, ctx->sglPA,
421
- SGL_SIZE, PCI_DMA_TODEVICE);
431
+ dma_unmap_single(&adapter->dev->dev, ctx->sglPA,
432
+ SGL_SIZE, DMA_TO_DEVICE);
422433 ctx->sglPA = 0;
423434 }
424435 } else
425
- pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
426
- cmd->sc_data_direction);
436
+ dma_unmap_single(&adapter->dev->dev, ctx->dataPA,
437
+ bufflen, cmd->sc_data_direction);
427438 }
428439 if (cmd->sense_buffer)
429
- pci_unmap_single(adapter->dev, ctx->sensePA,
430
- SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
440
+ dma_unmap_single(&adapter->dev->dev, ctx->sensePA,
441
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
431442 }
432443
433444 static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
434445 {
435
- adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
436
- &adapter->ringStatePA);
446
+ adapter->rings_state = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE,
447
+ &adapter->ringStatePA, GFP_KERNEL);
437448 if (!adapter->rings_state)
438449 return -ENOMEM;
439450
....@@ -441,17 +452,17 @@
441452 pvscsi_ring_pages);
442453 adapter->req_depth = adapter->req_pages
443454 * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
444
- adapter->req_ring = pci_alloc_consistent(adapter->dev,
445
- adapter->req_pages * PAGE_SIZE,
446
- &adapter->reqRingPA);
455
+ adapter->req_ring = dma_alloc_coherent(&adapter->dev->dev,
456
+ adapter->req_pages * PAGE_SIZE, &adapter->reqRingPA,
457
+ GFP_KERNEL);
447458 if (!adapter->req_ring)
448459 return -ENOMEM;
449460
450461 adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
451462 pvscsi_ring_pages);
452
- adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
453
- adapter->cmp_pages * PAGE_SIZE,
454
- &adapter->cmpRingPA);
463
+ adapter->cmp_ring = dma_alloc_coherent(&adapter->dev->dev,
464
+ adapter->cmp_pages * PAGE_SIZE, &adapter->cmpRingPA,
465
+ GFP_KERNEL);
455466 if (!adapter->cmp_ring)
456467 return -ENOMEM;
457468
....@@ -464,9 +475,9 @@
464475
465476 adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
466477 pvscsi_msg_ring_pages);
467
- adapter->msg_ring = pci_alloc_consistent(adapter->dev,
468
- adapter->msg_pages * PAGE_SIZE,
469
- &adapter->msgRingPA);
478
+ adapter->msg_ring = dma_alloc_coherent(&adapter->dev->dev,
479
+ adapter->msg_pages * PAGE_SIZE, &adapter->msgRingPA,
480
+ GFP_KERNEL);
470481 if (!adapter->msg_ring)
471482 return -ENOMEM;
472483 BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
....@@ -544,6 +555,8 @@
544555 cmd = ctx->cmd;
545556 abort_cmp = ctx->abort_cmp;
546557 pvscsi_unmap_buffers(adapter, ctx);
558
+ if (sdstat != SAM_STAT_CHECK_CONDITION)
559
+ pvscsi_patch_sense(cmd);
547560 pvscsi_release_context(adapter, ctx);
548561 if (abort_cmp) {
549562 /*
....@@ -603,7 +616,7 @@
603616 case BTSTAT_TAGREJECT:
604617 case BTSTAT_BADMSG:
605618 cmd->result = (DRIVER_INVALID << 24);
606
- /* fall through */
619
+ fallthrough;
607620
608621 case BTSTAT_HAHARDWARE:
609622 case BTSTAT_INVPHASE:
....@@ -717,11 +730,11 @@
717730 e->lun[1] = sdev->lun;
718731
719732 if (cmd->sense_buffer) {
720
- ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
721
- SCSI_SENSE_BUFFERSIZE,
722
- PCI_DMA_FROMDEVICE);
723
- if (pci_dma_mapping_error(adapter->dev, ctx->sensePA)) {
724
- scmd_printk(KERN_ERR, cmd,
733
+ ctx->sensePA = dma_map_single(&adapter->dev->dev,
734
+ cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
735
+ DMA_FROM_DEVICE);
736
+ if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) {
737
+ scmd_printk(KERN_DEBUG, cmd,
725738 "vmw_pvscsi: Failed to map sense buffer for DMA.\n");
726739 ctx->sensePA = 0;
727740 return -ENOMEM;
....@@ -749,9 +762,9 @@
749762
750763 if (pvscsi_map_buffers(adapter, ctx, cmd, e) != 0) {
751764 if (cmd->sense_buffer) {
752
- pci_unmap_single(adapter->dev, ctx->sensePA,
765
+ dma_unmap_single(&adapter->dev->dev, ctx->sensePA,
753766 SCSI_SENSE_BUFFERSIZE,
754
- PCI_DMA_FROMDEVICE);
767
+ DMA_FROM_DEVICE);
755768 ctx->sensePA = 0;
756769 }
757770 return -ENOMEM;
....@@ -882,6 +895,7 @@
882895 scmd_printk(KERN_ERR, cmd,
883896 "Forced reset on cmd %p\n", cmd);
884897 pvscsi_unmap_buffers(adapter, ctx);
898
+ pvscsi_patch_sense(cmd);
885899 pvscsi_release_context(adapter, ctx);
886900 cmd->result = (DID_RESET << 16);
887901 cmd->scsi_done(cmd);
....@@ -903,7 +917,7 @@
903917 use_msg = adapter->use_msg;
904918
905919 if (use_msg) {
906
- adapter->use_msg = 0;
920
+ adapter->use_msg = false;
907921 spin_unlock_irqrestore(&adapter->hw_lock, flags);
908922
909923 /*
....@@ -1018,7 +1032,6 @@
10181032 .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT,
10191033 .dma_boundary = UINT_MAX,
10201034 .max_sectors = 0xffff,
1021
- .use_clustering = ENABLE_CLUSTERING,
10221035 .change_queue_depth = pvscsi_change_queue_depth,
10231036 .eh_abort_handler = pvscsi_abort,
10241037 .eh_device_reset_handler = pvscsi_device_reset,
....@@ -1227,21 +1240,21 @@
12271240 }
12281241
12291242 if (adapter->rings_state)
1230
- pci_free_consistent(adapter->dev, PAGE_SIZE,
1243
+ dma_free_coherent(&adapter->dev->dev, PAGE_SIZE,
12311244 adapter->rings_state, adapter->ringStatePA);
12321245
12331246 if (adapter->req_ring)
1234
- pci_free_consistent(adapter->dev,
1247
+ dma_free_coherent(&adapter->dev->dev,
12351248 adapter->req_pages * PAGE_SIZE,
12361249 adapter->req_ring, adapter->reqRingPA);
12371250
12381251 if (adapter->cmp_ring)
1239
- pci_free_consistent(adapter->dev,
1252
+ dma_free_coherent(&adapter->dev->dev,
12401253 adapter->cmp_pages * PAGE_SIZE,
12411254 adapter->cmp_ring, adapter->cmpRingPA);
12421255
12431256 if (adapter->msg_ring)
1244
- pci_free_consistent(adapter->dev,
1257
+ dma_free_coherent(&adapter->dev->dev,
12451258 adapter->msg_pages * PAGE_SIZE,
12461259 adapter->msg_ring, adapter->msgRingPA);
12471260 }
....@@ -1300,8 +1313,8 @@
13001313 u32 numPhys = 16;
13011314
13021315 dev = pvscsi_dev(adapter);
1303
- config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
1304
- &configPagePA);
1316
+ config_page = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE,
1317
+ &configPagePA, GFP_KERNEL);
13051318 if (!config_page) {
13061319 dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n");
13071320 goto exit;
....@@ -1335,7 +1348,8 @@
13351348 } else
13361349 dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n",
13371350 header->hostStatus, header->scsiStatus);
1338
- pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA);
1351
+ dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, config_page,
1352
+ configPagePA);
13391353 exit:
13401354 return numPhys;
13411355 }
....@@ -1355,11 +1369,9 @@
13551369 if (pci_enable_device(pdev))
13561370 return error;
13571371
1358
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
1359
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
1372
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
13601373 printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
1361
- } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
1362
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
1374
+ } else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
13631375 printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
13641376 } else {
13651377 printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");