hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/nvme/host/core.c
....@@ -723,16 +723,26 @@
723723 range = page_address(ns->ctrl->discard_page);
724724 }
725725
726
- __rq_for_each_bio(bio, req) {
727
- u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
728
- u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
726
+ if (queue_max_discard_segments(req->q) == 1) {
727
+ u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req));
728
+ u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9);
729729
730
- if (n < segments) {
731
- range[n].cattr = cpu_to_le32(0);
732
- range[n].nlb = cpu_to_le32(nlb);
733
- range[n].slba = cpu_to_le64(slba);
730
+ range[0].cattr = cpu_to_le32(0);
731
+ range[0].nlb = cpu_to_le32(nlb);
732
+ range[0].slba = cpu_to_le64(slba);
733
+ n = 1;
734
+ } else {
735
+ __rq_for_each_bio(bio, req) {
736
+ u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
737
+ u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
738
+
739
+ if (n < segments) {
740
+ range[n].cattr = cpu_to_le32(0);
741
+ range[n].nlb = cpu_to_le32(nlb);
742
+ range[n].slba = cpu_to_le64(slba);
743
+ }
744
+ n++;
734745 }
735
- n++;
736746 }
737747
738748 if (WARN_ON_ONCE(n != segments)) {
....@@ -4406,11 +4416,19 @@
44064416 nvme_get_fw_slot_info(ctrl);
44074417 }
44084418
4419
+static u32 nvme_aer_type(u32 result)
4420
+{
4421
+ return result & 0x7;
4422
+}
4423
+
4424
+static u32 nvme_aer_subtype(u32 result)
4425
+{
4426
+ return (result & 0xff00) >> 8;
4427
+}
4428
+
44094429 static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
44104430 {
4411
- u32 aer_notice_type = (result & 0xff00) >> 8;
4412
-
4413
- trace_nvme_async_event(ctrl, aer_notice_type);
4431
+ u32 aer_notice_type = nvme_aer_subtype(result);
44144432
44154433 switch (aer_notice_type) {
44164434 case NVME_AER_NOTICE_NS_CHANGED:
....@@ -4441,24 +4459,40 @@
44414459 }
44424460 }
44434461
4462
+static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
4463
+{
4464
+ dev_warn(ctrl->device, "resetting controller due to AER\n");
4465
+ nvme_reset_ctrl(ctrl);
4466
+}
4467
+
44444468 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
44454469 volatile union nvme_result *res)
44464470 {
44474471 u32 result = le32_to_cpu(res->u32);
4448
- u32 aer_type = result & 0x07;
4472
+ u32 aer_type = nvme_aer_type(result);
4473
+ u32 aer_subtype = nvme_aer_subtype(result);
44494474
44504475 if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
44514476 return;
44524477
4478
+ trace_nvme_async_event(ctrl, result);
44534479 switch (aer_type) {
44544480 case NVME_AER_NOTICE:
44554481 nvme_handle_aen_notice(ctrl, result);
44564482 break;
44574483 case NVME_AER_ERROR:
4484
+ /*
4485
+ * For a persistent internal error, don't run async_event_work
4486
+ * to submit a new AER. The controller reset will do it.
4487
+ */
4488
+ if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) {
4489
+ nvme_handle_aer_persistent_error(ctrl);
4490
+ return;
4491
+ }
4492
+ fallthrough;
44584493 case NVME_AER_SMART:
44594494 case NVME_AER_CSS:
44604495 case NVME_AER_VS:
4461
- trace_nvme_async_event(ctrl, aer_type);
44624496 ctrl->aen_result = result;
44634497 break;
44644498 default: