hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/nvme/host/pci.c
....@@ -33,7 +33,7 @@
3333 #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
3434 #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
3535
36
-#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
36
+#define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc))
3737
3838 /*
3939 * These can be higher, but we need to ensure that any command doesn't
....@@ -139,9 +139,9 @@
139139 mempool_t *iod_mempool;
140140
141141 /* shadow doorbell buffer support: */
142
- u32 *dbbuf_dbs;
142
+ __le32 *dbbuf_dbs;
143143 dma_addr_t dbbuf_dbs_dma_addr;
144
- u32 *dbbuf_eis;
144
+ __le32 *dbbuf_eis;
145145 dma_addr_t dbbuf_eis_dma_addr;
146146
147147 /* host memory buffer support: */
....@@ -209,10 +209,10 @@
209209 #define NVMEQ_SQ_CMB 1
210210 #define NVMEQ_DELETE_ERROR 2
211211 #define NVMEQ_POLLED 3
212
- u32 *dbbuf_sq_db;
213
- u32 *dbbuf_cq_db;
214
- u32 *dbbuf_sq_ei;
215
- u32 *dbbuf_cq_ei;
212
+ __le32 *dbbuf_sq_db;
213
+ __le32 *dbbuf_cq_db;
214
+ __le32 *dbbuf_sq_ei;
215
+ __le32 *dbbuf_cq_ei;
216216 struct completion delete_done;
217217 };
218218
....@@ -334,11 +334,11 @@
334334 }
335335
336336 /* Update dbbuf and return true if an MMIO is required */
337
-static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
338
- volatile u32 *dbbuf_ei)
337
+static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
338
+ volatile __le32 *dbbuf_ei)
339339 {
340340 if (dbbuf_db) {
341
- u16 old_value;
341
+ u16 old_value, event_idx;
342342
343343 /*
344344 * Ensure that the queue is written before updating
....@@ -346,8 +346,8 @@
346346 */
347347 wmb();
348348
349
- old_value = *dbbuf_db;
350
- *dbbuf_db = value;
349
+ old_value = le32_to_cpu(*dbbuf_db);
350
+ *dbbuf_db = cpu_to_le32(value);
351351
352352 /*
353353 * Ensure that the doorbell is updated before reading the event
....@@ -357,7 +357,8 @@
357357 */
358358 mb();
359359
360
- if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
360
+ event_idx = le32_to_cpu(*dbbuf_ei);
361
+ if (!nvme_dbbuf_need_event(event_idx, value, old_value))
361362 return false;
362363 }
363364
....@@ -371,9 +372,9 @@
371372 */
372373 static int nvme_pci_npages_prp(void)
373374 {
374
- unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE,
375
- NVME_CTRL_PAGE_SIZE);
376
- return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
375
+ unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE;
376
+ unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE);
377
+ return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8);
377378 }
378379
379380 /*
....@@ -383,15 +384,7 @@
383384 static int nvme_pci_npages_sgl(void)
384385 {
385386 return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc),
386
- PAGE_SIZE);
387
-}
388
-
389
-static size_t nvme_pci_iod_alloc_size(void)
390
-{
391
- size_t npages = max(nvme_pci_npages_prp(), nvme_pci_npages_sgl());
392
-
393
- return sizeof(__le64 *) * npages +
394
- sizeof(struct scatterlist) * NVME_MAX_SEGS;
387
+ NVME_CTRL_PAGE_SIZE);
395388 }
396389
397390 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
....@@ -734,7 +727,7 @@
734727 sge->length = cpu_to_le32(entries * sizeof(*sge));
735728 sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
736729 } else {
737
- sge->length = cpu_to_le32(PAGE_SIZE);
730
+ sge->length = cpu_to_le32(NVME_CTRL_PAGE_SIZE);
738731 sge->type = NVME_SGL_FMT_SEG_DESC << 4;
739732 }
740733 }
....@@ -967,7 +960,8 @@
967960
968961 if (blk_integrity_rq(req))
969962 dma_unmap_page(dev->dev, iod->meta_dma,
970
- rq_integrity_vec(req)->bv_len, rq_data_dir(req));
963
+ rq_integrity_vec(req)->bv_len, rq_dma_dir(req));
964
+
971965 if (blk_rq_nr_phys_segments(req))
972966 nvme_unmap_data(dev, req);
973967 nvme_complete_rq(req);
....@@ -1291,7 +1285,7 @@
12911285 else
12921286 nvme_poll_irqdisable(nvmeq);
12931287
1294
- if (blk_mq_request_completed(req)) {
1288
+ if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) {
12951289 dev_warn(dev->ctrl.device,
12961290 "I/O %d QID %d timeout, completion polled\n",
12971291 req->tag, nvmeq->qid);
....@@ -2394,6 +2388,9 @@
23942388
23952389 dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
23962390
2391
+ if (dev->ctrl.quirks & NVME_QUIRK_LIMIT_IOQD32)
2392
+ io_queue_depth = 32;
2393
+
23972394 dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1,
23982395 io_queue_depth);
23992396 dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
....@@ -2555,6 +2552,22 @@
25552552 dma_pool_destroy(dev->prp_small_pool);
25562553 }
25572554
2555
+static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
2556
+{
2557
+ size_t npages = max(nvme_pci_npages_prp(), nvme_pci_npages_sgl());
2558
+ size_t alloc_size = sizeof(__le64 *) * npages +
2559
+ sizeof(struct scatterlist) * NVME_MAX_SEGS;
2560
+
2561
+ WARN_ON_ONCE(alloc_size > PAGE_SIZE);
2562
+ dev->iod_mempool = mempool_create_node(1,
2563
+ mempool_kmalloc, mempool_kfree,
2564
+ (void *)alloc_size, GFP_KERNEL,
2565
+ dev_to_node(dev->dev));
2566
+ if (!dev->iod_mempool)
2567
+ return -ENOMEM;
2568
+ return 0;
2569
+}
2570
+
25582571 static void nvme_free_tagset(struct nvme_dev *dev)
25592572 {
25602573 if (dev->tagset.tags)
....@@ -2562,6 +2575,7 @@
25622575 dev->ctrl.tagset = NULL;
25632576 }
25642577
2578
+/* pairs with nvme_pci_alloc_dev */
25652579 static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
25662580 {
25672581 struct nvme_dev *dev = to_nvme_dev(ctrl);
....@@ -2838,32 +2852,6 @@
28382852 return 0;
28392853 }
28402854
2841
-#ifdef CONFIG_ACPI
2842
-static bool nvme_acpi_storage_d3(struct pci_dev *dev)
2843
-{
2844
- struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
2845
- u8 val;
2846
-
2847
- /*
2848
- * Look for _DSD property specifying that the storage device on the port
2849
- * must use D3 to support deep platform power savings during
2850
- * suspend-to-idle.
2851
- */
2852
-
2853
- if (!adev)
2854
- return false;
2855
- if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable",
2856
- &val))
2857
- return false;
2858
- return val == 1;
2859
-}
2860
-#else
2861
-static inline bool nvme_acpi_storage_d3(struct pci_dev *dev)
2862
-{
2863
- return false;
2864
-}
2865
-#endif /* CONFIG_ACPI */
2866
-
28672855 static void nvme_async_probe(void *data, async_cookie_t cookie)
28682856 {
28692857 struct nvme_dev *dev = data;
....@@ -2873,20 +2861,20 @@
28732861 nvme_put_ctrl(&dev->ctrl);
28742862 }
28752863
2876
-static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2864
+static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
2865
+ const struct pci_device_id *id)
28772866 {
2878
- int node, result = -ENOMEM;
2879
- struct nvme_dev *dev;
28802867 unsigned long quirks = id->driver_data;
2881
- size_t alloc_size;
2882
-
2883
- node = dev_to_node(&pdev->dev);
2884
- if (node == NUMA_NO_NODE)
2885
- set_dev_node(&pdev->dev, first_memory_node);
2868
+ int node = dev_to_node(&pdev->dev);
2869
+ struct nvme_dev *dev;
2870
+ int ret = -ENOMEM;
28862871
28872872 dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
28882873 if (!dev)
2889
- return -ENOMEM;
2874
+ return ERR_PTR(-ENOMEM);
2875
+ INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
2876
+ INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
2877
+ mutex_init(&dev->shutdown_lock);
28902878
28912879 dev->nr_write_queues = write_queues;
28922880 dev->nr_poll_queues = poll_queues;
....@@ -2894,26 +2882,12 @@
28942882 dev->queues = kcalloc_node(dev->nr_allocated_queues,
28952883 sizeof(struct nvme_queue), GFP_KERNEL, node);
28962884 if (!dev->queues)
2897
- goto free;
2885
+ goto out_free_dev;
28982886
28992887 dev->dev = get_device(&pdev->dev);
2900
- pci_set_drvdata(pdev, dev);
2901
-
2902
- result = nvme_dev_map(dev);
2903
- if (result)
2904
- goto put_pci;
2905
-
2906
- INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
2907
- INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
2908
- mutex_init(&dev->shutdown_lock);
2909
-
2910
- result = nvme_setup_prp_pools(dev);
2911
- if (result)
2912
- goto unmap;
29132888
29142889 quirks |= check_vendor_combination_bug(pdev);
2915
-
2916
- if (!noacpi && nvme_acpi_storage_d3(pdev)) {
2890
+ if (!noacpi && acpi_storage_d3(&pdev->dev)) {
29172891 /*
29182892 * Some systems use a bios work around to ask for D3 on
29192893 * platforms that support kernel managed suspend.
....@@ -2922,46 +2896,54 @@
29222896 "platform quirk: setting simple suspend\n");
29232897 quirks |= NVME_QUIRK_SIMPLE_SUSPEND;
29242898 }
2899
+ ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
2900
+ quirks);
2901
+ if (ret)
2902
+ goto out_put_device;
2903
+ return dev;
29252904
2926
- /*
2927
- * Double check that our mempool alloc size will cover the biggest
2928
- * command we support.
2929
- */
2930
- alloc_size = nvme_pci_iod_alloc_size();
2931
- WARN_ON_ONCE(alloc_size > PAGE_SIZE);
2905
+out_put_device:
2906
+ put_device(dev->dev);
2907
+ kfree(dev->queues);
2908
+out_free_dev:
2909
+ kfree(dev);
2910
+ return ERR_PTR(ret);
2911
+}
29322912
2933
- dev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
2934
- mempool_kfree,
2935
- (void *) alloc_size,
2936
- GFP_KERNEL, node);
2937
- if (!dev->iod_mempool) {
2938
- result = -ENOMEM;
2939
- goto release_pools;
2940
- }
2913
+static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2914
+{
2915
+ struct nvme_dev *dev;
2916
+ int result = -ENOMEM;
29412917
2942
- result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
2943
- quirks);
2918
+ dev = nvme_pci_alloc_dev(pdev, id);
2919
+ if (IS_ERR(dev))
2920
+ return PTR_ERR(dev);
2921
+
2922
+ result = nvme_dev_map(dev);
29442923 if (result)
2945
- goto release_mempool;
2924
+ goto out_uninit_ctrl;
2925
+
2926
+ result = nvme_setup_prp_pools(dev);
2927
+ if (result)
2928
+ goto out_dev_unmap;
2929
+
2930
+ result = nvme_pci_alloc_iod_mempool(dev);
2931
+ if (result)
2932
+ goto out_release_prp_pools;
29462933
29472934 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
2935
+ pci_set_drvdata(pdev, dev);
29482936
29492937 nvme_reset_ctrl(&dev->ctrl);
29502938 async_schedule(nvme_async_probe, dev);
2951
-
29522939 return 0;
29532940
2954
- release_mempool:
2955
- mempool_destroy(dev->iod_mempool);
2956
- release_pools:
2941
+out_release_prp_pools:
29572942 nvme_release_prp_pools(dev);
2958
- unmap:
2943
+out_dev_unmap:
29592944 nvme_dev_unmap(dev);
2960
- put_pci:
2961
- put_device(dev->dev);
2962
- free:
2963
- kfree(dev->queues);
2964
- kfree(dev);
2945
+out_uninit_ctrl:
2946
+ nvme_uninit_ctrl(&dev->ctrl);
29652947 return result;
29662948 }
29672949
....@@ -3236,6 +3218,8 @@
32363218 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
32373219 NVME_QUIRK_DISABLE_WRITE_ZEROES|
32383220 NVME_QUIRK_IGNORE_DEV_SUBNQN, },
3221
+ { PCI_DEVICE(0x1987, 0x5013), /* Phison E13 */
3222
+ .driver_data = NVME_QUIRK_LIMIT_IOQD32},
32393223 { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
32403224 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
32413225 NVME_QUIRK_BOGUS_NID, },