| .. | .. |
|---|
| 33 | 33 | #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) |
|---|
| 34 | 34 | #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) |
|---|
| 35 | 35 | |
|---|
| 36 | | -#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) |
|---|
| 36 | +#define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc)) |
|---|
| 37 | 37 | |
|---|
| 38 | 38 | /* |
|---|
| 39 | 39 | * These can be higher, but we need to ensure that any command doesn't |
|---|
| .. | .. |
|---|
| 139 | 139 | mempool_t *iod_mempool; |
|---|
| 140 | 140 | |
|---|
| 141 | 141 | /* shadow doorbell buffer support: */ |
|---|
| 142 | | - u32 *dbbuf_dbs; |
|---|
| 142 | + __le32 *dbbuf_dbs; |
|---|
| 143 | 143 | dma_addr_t dbbuf_dbs_dma_addr; |
|---|
| 144 | | - u32 *dbbuf_eis; |
|---|
| 144 | + __le32 *dbbuf_eis; |
|---|
| 145 | 145 | dma_addr_t dbbuf_eis_dma_addr; |
|---|
| 146 | 146 | |
|---|
| 147 | 147 | /* host memory buffer support: */ |
|---|
| .. | .. |
|---|
| 209 | 209 | #define NVMEQ_SQ_CMB 1 |
|---|
| 210 | 210 | #define NVMEQ_DELETE_ERROR 2 |
|---|
| 211 | 211 | #define NVMEQ_POLLED 3 |
|---|
| 212 | | - u32 *dbbuf_sq_db; |
|---|
| 213 | | - u32 *dbbuf_cq_db; |
|---|
| 214 | | - u32 *dbbuf_sq_ei; |
|---|
| 215 | | - u32 *dbbuf_cq_ei; |
|---|
| 212 | + __le32 *dbbuf_sq_db; |
|---|
| 213 | + __le32 *dbbuf_cq_db; |
|---|
| 214 | + __le32 *dbbuf_sq_ei; |
|---|
| 215 | + __le32 *dbbuf_cq_ei; |
|---|
| 216 | 216 | struct completion delete_done; |
|---|
| 217 | 217 | }; |
|---|
| 218 | 218 | |
|---|
| .. | .. |
|---|
| 334 | 334 | } |
|---|
| 335 | 335 | |
|---|
| 336 | 336 | /* Update dbbuf and return true if an MMIO is required */ |
|---|
| 337 | | -static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, |
|---|
| 338 | | - volatile u32 *dbbuf_ei) |
|---|
| 337 | +static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db, |
|---|
| 338 | + volatile __le32 *dbbuf_ei) |
|---|
| 339 | 339 | { |
|---|
| 340 | 340 | if (dbbuf_db) { |
|---|
| 341 | | - u16 old_value; |
|---|
| 341 | + u16 old_value, event_idx; |
|---|
| 342 | 342 | |
|---|
| 343 | 343 | /* |
|---|
| 344 | 344 | * Ensure that the queue is written before updating |
|---|
| .. | .. |
|---|
| 346 | 346 | */ |
|---|
| 347 | 347 | wmb(); |
|---|
| 348 | 348 | |
|---|
| 349 | | - old_value = *dbbuf_db; |
|---|
| 350 | | - *dbbuf_db = value; |
|---|
| 349 | + old_value = le32_to_cpu(*dbbuf_db); |
|---|
| 350 | + *dbbuf_db = cpu_to_le32(value); |
|---|
| 351 | 351 | |
|---|
| 352 | 352 | /* |
|---|
| 353 | 353 | * Ensure that the doorbell is updated before reading the event |
|---|
| .. | .. |
|---|
| 357 | 357 | */ |
|---|
| 358 | 358 | mb(); |
|---|
| 359 | 359 | |
|---|
| 360 | | - if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) |
|---|
| 360 | + event_idx = le32_to_cpu(*dbbuf_ei); |
|---|
| 361 | + if (!nvme_dbbuf_need_event(event_idx, value, old_value)) |
|---|
| 361 | 362 | return false; |
|---|
| 362 | 363 | } |
|---|
| 363 | 364 | |
|---|
| .. | .. |
|---|
| 371 | 372 | */ |
|---|
| 372 | 373 | static int nvme_pci_npages_prp(void) |
|---|
| 373 | 374 | { |
|---|
| 374 | | - unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, |
|---|
| 375 | | - NVME_CTRL_PAGE_SIZE); |
|---|
| 376 | | - return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); |
|---|
| 375 | + unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE; |
|---|
| 376 | + unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE); |
|---|
| 377 | + return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8); |
|---|
| 377 | 378 | } |
|---|
| 378 | 379 | |
|---|
| 379 | 380 | /* |
|---|
| .. | .. |
|---|
| 383 | 384 | static int nvme_pci_npages_sgl(void) |
|---|
| 384 | 385 | { |
|---|
| 385 | 386 | return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc), |
|---|
| 386 | | - PAGE_SIZE); |
|---|
| 387 | | -} |
|---|
| 388 | | - |
|---|
| 389 | | -static size_t nvme_pci_iod_alloc_size(void) |
|---|
| 390 | | -{ |
|---|
| 391 | | - size_t npages = max(nvme_pci_npages_prp(), nvme_pci_npages_sgl()); |
|---|
| 392 | | - |
|---|
| 393 | | - return sizeof(__le64 *) * npages + |
|---|
| 394 | | - sizeof(struct scatterlist) * NVME_MAX_SEGS; |
|---|
| 387 | + NVME_CTRL_PAGE_SIZE); |
|---|
| 395 | 388 | } |
|---|
| 396 | 389 | |
|---|
| 397 | 390 | static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, |
|---|
| .. | .. |
|---|
| 734 | 727 | sge->length = cpu_to_le32(entries * sizeof(*sge)); |
|---|
| 735 | 728 | sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; |
|---|
| 736 | 729 | } else { |
|---|
| 737 | | - sge->length = cpu_to_le32(PAGE_SIZE); |
|---|
| 730 | + sge->length = cpu_to_le32(NVME_CTRL_PAGE_SIZE); |
|---|
| 738 | 731 | sge->type = NVME_SGL_FMT_SEG_DESC << 4; |
|---|
| 739 | 732 | } |
|---|
| 740 | 733 | } |
|---|
| .. | .. |
|---|
| 967 | 960 | |
|---|
| 968 | 961 | if (blk_integrity_rq(req)) |
|---|
| 969 | 962 | dma_unmap_page(dev->dev, iod->meta_dma, |
|---|
| 970 | | - rq_integrity_vec(req)->bv_len, rq_data_dir(req)); |
|---|
| 963 | + rq_integrity_vec(req)->bv_len, rq_dma_dir(req)); |
|---|
| 964 | + |
|---|
| 971 | 965 | if (blk_rq_nr_phys_segments(req)) |
|---|
| 972 | 966 | nvme_unmap_data(dev, req); |
|---|
| 973 | 967 | nvme_complete_rq(req); |
|---|
| .. | .. |
|---|
| 1291 | 1285 | else |
|---|
| 1292 | 1286 | nvme_poll_irqdisable(nvmeq); |
|---|
| 1293 | 1287 | |
|---|
| 1294 | | - if (blk_mq_request_completed(req)) { |
|---|
| 1288 | + if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) { |
|---|
| 1295 | 1289 | dev_warn(dev->ctrl.device, |
|---|
| 1296 | 1290 | "I/O %d QID %d timeout, completion polled\n", |
|---|
| 1297 | 1291 | req->tag, nvmeq->qid); |
|---|
| .. | .. |
|---|
| 2394 | 2388 | |
|---|
| 2395 | 2389 | dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); |
|---|
| 2396 | 2390 | |
|---|
| 2391 | + if (dev->ctrl.quirks & NVME_QUIRK_LIMIT_IOQD32) |
|---|
| 2392 | + io_queue_depth = 32; |
|---|
| 2393 | + |
|---|
| 2397 | 2394 | dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, |
|---|
| 2398 | 2395 | io_queue_depth); |
|---|
| 2399 | 2396 | dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ |
|---|
| .. | .. |
|---|
| 2555 | 2552 | dma_pool_destroy(dev->prp_small_pool); |
|---|
| 2556 | 2553 | } |
|---|
| 2557 | 2554 | |
|---|
| 2555 | +static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev) |
|---|
| 2556 | +{ |
|---|
| 2557 | + size_t npages = max(nvme_pci_npages_prp(), nvme_pci_npages_sgl()); |
|---|
| 2558 | + size_t alloc_size = sizeof(__le64 *) * npages + |
|---|
| 2559 | + sizeof(struct scatterlist) * NVME_MAX_SEGS; |
|---|
| 2560 | + |
|---|
| 2561 | + WARN_ON_ONCE(alloc_size > PAGE_SIZE); |
|---|
| 2562 | + dev->iod_mempool = mempool_create_node(1, |
|---|
| 2563 | + mempool_kmalloc, mempool_kfree, |
|---|
| 2564 | + (void *)alloc_size, GFP_KERNEL, |
|---|
| 2565 | + dev_to_node(dev->dev)); |
|---|
| 2566 | + if (!dev->iod_mempool) |
|---|
| 2567 | + return -ENOMEM; |
|---|
| 2568 | + return 0; |
|---|
| 2569 | +} |
|---|
| 2570 | + |
|---|
| 2558 | 2571 | static void nvme_free_tagset(struct nvme_dev *dev) |
|---|
| 2559 | 2572 | { |
|---|
| 2560 | 2573 | if (dev->tagset.tags) |
|---|
| .. | .. |
|---|
| 2562 | 2575 | dev->ctrl.tagset = NULL; |
|---|
| 2563 | 2576 | } |
|---|
| 2564 | 2577 | |
|---|
| 2578 | +/* pairs with nvme_pci_alloc_dev */ |
|---|
| 2565 | 2579 | static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) |
|---|
| 2566 | 2580 | { |
|---|
| 2567 | 2581 | struct nvme_dev *dev = to_nvme_dev(ctrl); |
|---|
| .. | .. |
|---|
| 2838 | 2852 | return 0; |
|---|
| 2839 | 2853 | } |
|---|
| 2840 | 2854 | |
|---|
| 2841 | | -#ifdef CONFIG_ACPI |
|---|
| 2842 | | -static bool nvme_acpi_storage_d3(struct pci_dev *dev) |
|---|
| 2843 | | -{ |
|---|
| 2844 | | - struct acpi_device *adev = ACPI_COMPANION(&dev->dev); |
|---|
| 2845 | | - u8 val; |
|---|
| 2846 | | - |
|---|
| 2847 | | - /* |
|---|
| 2848 | | - * Look for _DSD property specifying that the storage device on the port |
|---|
| 2849 | | - * must use D3 to support deep platform power savings during |
|---|
| 2850 | | - * suspend-to-idle. |
|---|
| 2851 | | - */ |
|---|
| 2852 | | - |
|---|
| 2853 | | - if (!adev) |
|---|
| 2854 | | - return false; |
|---|
| 2855 | | - if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable", |
|---|
| 2856 | | - &val)) |
|---|
| 2857 | | - return false; |
|---|
| 2858 | | - return val == 1; |
|---|
| 2859 | | -} |
|---|
| 2860 | | -#else |
|---|
| 2861 | | -static inline bool nvme_acpi_storage_d3(struct pci_dev *dev) |
|---|
| 2862 | | -{ |
|---|
| 2863 | | - return false; |
|---|
| 2864 | | -} |
|---|
| 2865 | | -#endif /* CONFIG_ACPI */ |
|---|
| 2866 | | - |
|---|
| 2867 | 2855 | static void nvme_async_probe(void *data, async_cookie_t cookie) |
|---|
| 2868 | 2856 | { |
|---|
| 2869 | 2857 | struct nvme_dev *dev = data; |
|---|
| .. | .. |
|---|
| 2873 | 2861 | nvme_put_ctrl(&dev->ctrl); |
|---|
| 2874 | 2862 | } |
|---|
| 2875 | 2863 | |
|---|
| 2876 | | -static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
|---|
| 2864 | +static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, |
|---|
| 2865 | + const struct pci_device_id *id) |
|---|
| 2877 | 2866 | { |
|---|
| 2878 | | - int node, result = -ENOMEM; |
|---|
| 2879 | | - struct nvme_dev *dev; |
|---|
| 2880 | 2867 | unsigned long quirks = id->driver_data; |
|---|
| 2881 | | - size_t alloc_size; |
|---|
| 2882 | | - |
|---|
| 2883 | | - node = dev_to_node(&pdev->dev); |
|---|
| 2884 | | - if (node == NUMA_NO_NODE) |
|---|
| 2885 | | - set_dev_node(&pdev->dev, first_memory_node); |
|---|
| 2868 | + int node = dev_to_node(&pdev->dev); |
|---|
| 2869 | + struct nvme_dev *dev; |
|---|
| 2870 | + int ret = -ENOMEM; |
|---|
| 2886 | 2871 | |
|---|
| 2887 | 2872 | dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); |
|---|
| 2888 | 2873 | if (!dev) |
|---|
| 2889 | | - return -ENOMEM; |
|---|
| 2874 | + return ERR_PTR(-ENOMEM); |
|---|
| 2875 | + INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); |
|---|
| 2876 | + INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); |
|---|
| 2877 | + mutex_init(&dev->shutdown_lock); |
|---|
| 2890 | 2878 | |
|---|
| 2891 | 2879 | dev->nr_write_queues = write_queues; |
|---|
| 2892 | 2880 | dev->nr_poll_queues = poll_queues; |
|---|
| .. | .. |
|---|
| 2894 | 2882 | dev->queues = kcalloc_node(dev->nr_allocated_queues, |
|---|
| 2895 | 2883 | sizeof(struct nvme_queue), GFP_KERNEL, node); |
|---|
| 2896 | 2884 | if (!dev->queues) |
|---|
| 2897 | | - goto free; |
|---|
| 2885 | + goto out_free_dev; |
|---|
| 2898 | 2886 | |
|---|
| 2899 | 2887 | dev->dev = get_device(&pdev->dev); |
|---|
| 2900 | | - pci_set_drvdata(pdev, dev); |
|---|
| 2901 | | - |
|---|
| 2902 | | - result = nvme_dev_map(dev); |
|---|
| 2903 | | - if (result) |
|---|
| 2904 | | - goto put_pci; |
|---|
| 2905 | | - |
|---|
| 2906 | | - INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); |
|---|
| 2907 | | - INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); |
|---|
| 2908 | | - mutex_init(&dev->shutdown_lock); |
|---|
| 2909 | | - |
|---|
| 2910 | | - result = nvme_setup_prp_pools(dev); |
|---|
| 2911 | | - if (result) |
|---|
| 2912 | | - goto unmap; |
|---|
| 2913 | 2888 | |
|---|
| 2914 | 2889 | quirks |= check_vendor_combination_bug(pdev); |
|---|
| 2915 | | - |
|---|
| 2916 | | - if (!noacpi && nvme_acpi_storage_d3(pdev)) { |
|---|
| 2890 | + if (!noacpi && acpi_storage_d3(&pdev->dev)) { |
|---|
| 2917 | 2891 | /* |
|---|
| 2918 | 2892 | * Some systems use a bios work around to ask for D3 on |
|---|
| 2919 | 2893 | * platforms that support kernel managed suspend. |
|---|
| .. | .. |
|---|
| 2922 | 2896 | "platform quirk: setting simple suspend\n"); |
|---|
| 2923 | 2897 | quirks |= NVME_QUIRK_SIMPLE_SUSPEND; |
|---|
| 2924 | 2898 | } |
|---|
| 2899 | + ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, |
|---|
| 2900 | + quirks); |
|---|
| 2901 | + if (ret) |
|---|
| 2902 | + goto out_put_device; |
|---|
| 2903 | + return dev; |
|---|
| 2925 | 2904 | |
|---|
| 2926 | | - /* |
|---|
| 2927 | | - * Double check that our mempool alloc size will cover the biggest |
|---|
| 2928 | | - * command we support. |
|---|
| 2929 | | - */ |
|---|
| 2930 | | - alloc_size = nvme_pci_iod_alloc_size(); |
|---|
| 2931 | | - WARN_ON_ONCE(alloc_size > PAGE_SIZE); |
|---|
| 2905 | +out_put_device: |
|---|
| 2906 | + put_device(dev->dev); |
|---|
| 2907 | + kfree(dev->queues); |
|---|
| 2908 | +out_free_dev: |
|---|
| 2909 | + kfree(dev); |
|---|
| 2910 | + return ERR_PTR(ret); |
|---|
| 2911 | +} |
|---|
| 2932 | 2912 | |
|---|
| 2933 | | - dev->iod_mempool = mempool_create_node(1, mempool_kmalloc, |
|---|
| 2934 | | - mempool_kfree, |
|---|
| 2935 | | - (void *) alloc_size, |
|---|
| 2936 | | - GFP_KERNEL, node); |
|---|
| 2937 | | - if (!dev->iod_mempool) { |
|---|
| 2938 | | - result = -ENOMEM; |
|---|
| 2939 | | - goto release_pools; |
|---|
| 2940 | | - } |
|---|
| 2913 | +static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
|---|
| 2914 | +{ |
|---|
| 2915 | + struct nvme_dev *dev; |
|---|
| 2916 | + int result = -ENOMEM; |
|---|
| 2941 | 2917 | |
|---|
| 2942 | | - result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, |
|---|
| 2943 | | - quirks); |
|---|
| 2918 | + dev = nvme_pci_alloc_dev(pdev, id); |
|---|
| 2919 | + if (IS_ERR(dev)) |
|---|
| 2920 | + return PTR_ERR(dev); |
|---|
| 2921 | + |
|---|
| 2922 | + result = nvme_dev_map(dev); |
|---|
| 2944 | 2923 | if (result) |
|---|
| 2945 | | - goto release_mempool; |
|---|
| 2924 | + goto out_uninit_ctrl; |
|---|
| 2925 | + |
|---|
| 2926 | + result = nvme_setup_prp_pools(dev); |
|---|
| 2927 | + if (result) |
|---|
| 2928 | + goto out_dev_unmap; |
|---|
| 2929 | + |
|---|
| 2930 | + result = nvme_pci_alloc_iod_mempool(dev); |
|---|
| 2931 | + if (result) |
|---|
| 2932 | + goto out_release_prp_pools; |
|---|
| 2946 | 2933 | |
|---|
| 2947 | 2934 | dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); |
|---|
| 2935 | + pci_set_drvdata(pdev, dev); |
|---|
| 2948 | 2936 | |
|---|
| 2949 | 2937 | nvme_reset_ctrl(&dev->ctrl); |
|---|
| 2950 | 2938 | async_schedule(nvme_async_probe, dev); |
|---|
| 2951 | | - |
|---|
| 2952 | 2939 | return 0; |
|---|
| 2953 | 2940 | |
|---|
| 2954 | | - release_mempool: |
|---|
| 2955 | | - mempool_destroy(dev->iod_mempool); |
|---|
| 2956 | | - release_pools: |
|---|
| 2941 | +out_release_prp_pools: |
|---|
| 2957 | 2942 | nvme_release_prp_pools(dev); |
|---|
| 2958 | | - unmap: |
|---|
| 2943 | +out_dev_unmap: |
|---|
| 2959 | 2944 | nvme_dev_unmap(dev); |
|---|
| 2960 | | - put_pci: |
|---|
| 2961 | | - put_device(dev->dev); |
|---|
| 2962 | | - free: |
|---|
| 2963 | | - kfree(dev->queues); |
|---|
| 2964 | | - kfree(dev); |
|---|
| 2945 | +out_uninit_ctrl: |
|---|
| 2946 | + nvme_uninit_ctrl(&dev->ctrl); |
|---|
| 2965 | 2947 | return result; |
|---|
| 2966 | 2948 | } |
|---|
| 2967 | 2949 | |
|---|
| .. | .. |
|---|
| 3236 | 3218 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | |
|---|
| 3237 | 3219 | NVME_QUIRK_DISABLE_WRITE_ZEROES| |
|---|
| 3238 | 3220 | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, |
|---|
| 3221 | + { PCI_DEVICE(0x1987, 0x5013), /* Phison E13 */ |
|---|
| 3222 | + .driver_data = NVME_QUIRK_LIMIT_IOQD32}, |
|---|
| 3239 | 3223 | { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ |
|---|
| 3240 | 3224 | .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | |
|---|
| 3241 | 3225 | NVME_QUIRK_BOGUS_NID, }, |
|---|