hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/dma/ioat/dma.c
....@@ -1,19 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Intel I/OAT DMA Linux driver
34 * Copyright(c) 2004 - 2015 Intel Corporation.
4
- *
5
- * This program is free software; you can redistribute it and/or modify it
6
- * under the terms and conditions of the GNU General Public License,
7
- * version 2, as published by the Free Software Foundation.
8
- *
9
- * This program is distributed in the hope that it will be useful, but WITHOUT
10
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12
- * more details.
13
- *
14
- * The full GNU General Public License is included in this distribution in
15
- * the file called "COPYING".
16
- *
175 */
186
197 /*
....@@ -38,11 +26,11 @@
3826
3927 #include "../dmaengine.h"
4028
41
-int completion_timeout = 200;
29
+static int completion_timeout = 200;
4230 module_param(completion_timeout, int, 0644);
4331 MODULE_PARM_DESC(completion_timeout,
4432 "set ioat completion timeout [msec] (default 200 [msec])");
45
-int idle_timeout = 2000;
33
+static int idle_timeout = 2000;
4634 module_param(idle_timeout, int, 0644);
4735 MODULE_PARM_DESC(idle_timeout,
4836 "set ioat idel timeout [msec] (default 2000 [msec])");
....@@ -177,7 +165,7 @@
177165 tasklet_kill(&ioat_chan->cleanup_task);
178166
179167 /* final cleanup now that everything is quiesced and can't re-arm */
180
- ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan);
168
+ ioat_cleanup_event(&ioat_chan->cleanup_task);
181169 }
182170
183171 static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
....@@ -205,7 +193,7 @@
205193
206194 /**
207195 * ioat_update_pending - log pending descriptors
208
- * @ioat: ioat+ channel
196
+ * @ioat_chan: ioat+ channel
209197 *
210198 * Check if the number of unsubmitted descriptors has exceeded the
211199 * watermark. Called with prep_lock held
....@@ -356,8 +344,8 @@
356344 u8 *pos;
357345 off_t offs;
358346
359
- chunk = idx / IOAT_DESCS_PER_2M;
360
- idx &= (IOAT_DESCS_PER_2M - 1);
347
+ chunk = idx / IOAT_DESCS_PER_CHUNK;
348
+ idx &= (IOAT_DESCS_PER_CHUNK - 1);
361349 offs = idx * IOAT_DESC_SZ;
362350 pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
363351 phys = ioat_chan->descs[chunk].hw + offs;
....@@ -384,6 +372,7 @@
384372 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
385373 {
386374 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
375
+ struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
387376 struct ioat_ring_ent **ring;
388377 int total_descs = 1 << order;
389378 int i, chunks;
....@@ -393,20 +382,22 @@
393382 if (!ring)
394383 return NULL;
395384
396
- ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
385
+ chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE;
386
+ ioat_chan->desc_chunks = chunks;
397387
398388 for (i = 0; i < chunks; i++) {
399389 struct ioat_descs *descs = &ioat_chan->descs[i];
400390
401391 descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
402
- SZ_2M, &descs->hw, flags);
392
+ IOAT_CHUNK_SIZE, &descs->hw, flags);
403393 if (!descs->virt) {
404394 int idx;
405395
406396 for (idx = 0; idx < i; idx++) {
407397 descs = &ioat_chan->descs[idx];
408
- dma_free_coherent(to_dev(ioat_chan), SZ_2M,
409
- descs->virt, descs->hw);
398
+ dma_free_coherent(to_dev(ioat_chan),
399
+ IOAT_CHUNK_SIZE,
400
+ descs->virt, descs->hw);
410401 descs->virt = NULL;
411402 descs->hw = 0;
412403 }
....@@ -427,7 +418,7 @@
427418
428419 for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
429420 dma_free_coherent(to_dev(ioat_chan),
430
- SZ_2M,
421
+ IOAT_CHUNK_SIZE,
431422 ioat_chan->descs[idx].virt,
432423 ioat_chan->descs[idx].hw);
433424 ioat_chan->descs[idx].virt = NULL;
....@@ -450,12 +441,23 @@
450441 }
451442 ring[i]->hw->next = ring[0]->txd.phys;
452443
444
+ /* setup descriptor pre-fetching for v3.4 */
445
+ if (ioat_dma->cap & IOAT_CAP_DPS) {
446
+ u16 drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN;
447
+
448
+ if (chunks == 1)
449
+ drsctl |= IOAT_CHAN_DRS_AUTOWRAP;
450
+
451
+ writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET);
452
+
453
+ }
454
+
453455 return ring;
454456 }
455457
456458 /**
457459 * ioat_check_space_lock - verify space and grab ring producer lock
458
- * @ioat: ioat,3 channel (ring) to operate on
460
+ * @ioat_chan: ioat,3 channel (ring) to operate on
459461 * @num_descs: allocation length
460462 */
461463 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
....@@ -583,7 +585,8 @@
583585
584586 /**
585587 * __cleanup - reclaim used descriptors
586
- * @ioat: channel (ring) to clean
588
+ * @ioat_chan: channel (ring) to clean
589
+ * @phys_complete: zeroed (or not) completion address (from status)
587590 */
588591 static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
589592 {
....@@ -653,7 +656,7 @@
653656 if (active - i == 0) {
654657 dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
655658 __func__);
656
- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
659
+ mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
657660 }
658661
659662 /* microsecond delay by sysfs variable per pending descriptor */
....@@ -679,7 +682,7 @@
679682
680683 if (chanerr &
681684 (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
682
- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
685
+ mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
683686 ioat_eh(ioat_chan);
684687 }
685688 }
....@@ -687,9 +690,9 @@
687690 spin_unlock_bh(&ioat_chan->cleanup_lock);
688691 }
689692
690
-void ioat_cleanup_event(unsigned long data)
693
+void ioat_cleanup_event(struct tasklet_struct *t)
691694 {
692
- struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
695
+ struct ioatdma_chan *ioat_chan = from_tasklet(ioat_chan, t, cleanup_task);
693696
694697 ioat_cleanup(ioat_chan);
695698 if (!test_bit(IOAT_RUN, &ioat_chan->state))
....@@ -876,7 +879,24 @@
876879 }
877880
878881 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
879
- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
882
+ mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
883
+}
884
+
885
+static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan)
886
+{
887
+ spin_lock_bh(&ioat_chan->prep_lock);
888
+ set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
889
+ spin_unlock_bh(&ioat_chan->prep_lock);
890
+
891
+ ioat_abort_descs(ioat_chan);
892
+ dev_warn(to_dev(ioat_chan), "Reset channel...\n");
893
+ ioat_reset_hw(ioat_chan);
894
+ dev_warn(to_dev(ioat_chan), "Restart channel...\n");
895
+ ioat_restart_channel(ioat_chan);
896
+
897
+ spin_lock_bh(&ioat_chan->prep_lock);
898
+ clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
899
+ spin_unlock_bh(&ioat_chan->prep_lock);
880900 }
881901
882902 void ioat_timer_event(struct timer_list *t)
....@@ -901,19 +921,7 @@
901921
902922 if (test_bit(IOAT_RUN, &ioat_chan->state)) {
903923 spin_lock_bh(&ioat_chan->cleanup_lock);
904
- spin_lock_bh(&ioat_chan->prep_lock);
905
- set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
906
- spin_unlock_bh(&ioat_chan->prep_lock);
907
-
908
- ioat_abort_descs(ioat_chan);
909
- dev_warn(to_dev(ioat_chan), "Reset channel...\n");
910
- ioat_reset_hw(ioat_chan);
911
- dev_warn(to_dev(ioat_chan), "Restart channel...\n");
912
- ioat_restart_channel(ioat_chan);
913
-
914
- spin_lock_bh(&ioat_chan->prep_lock);
915
- clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
916
- spin_unlock_bh(&ioat_chan->prep_lock);
924
+ ioat_reboot_chan(ioat_chan);
917925 spin_unlock_bh(&ioat_chan->cleanup_lock);
918926 }
919927
....@@ -927,17 +935,23 @@
927935 spin_lock_bh(&ioat_chan->prep_lock);
928936 check_active(ioat_chan);
929937 spin_unlock_bh(&ioat_chan->prep_lock);
930
- spin_unlock_bh(&ioat_chan->cleanup_lock);
931
- return;
938
+ goto unlock_out;
939
+ }
940
+
941
+ /* handle the missed cleanup case */
942
+ if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) {
943
+ /* timer restarted in ioat_cleanup_preamble
944
+ * and IOAT_COMPLETION_ACK cleared
945
+ */
946
+ __cleanup(ioat_chan, phys_complete);
947
+ goto unlock_out;
932948 }
933949
934950 /* if we haven't made progress and we have already
935951 * acknowledged a pending completion once, then be more
936952 * forceful with a restart
937953 */
938
- if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
939
- __cleanup(ioat_chan, phys_complete);
940
- else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
954
+ if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
941955 u32 chanerr;
942956
943957 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
....@@ -949,25 +963,23 @@
949963 dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
950964 ioat_ring_active(ioat_chan));
951965
966
+ ioat_reboot_chan(ioat_chan);
967
+
968
+ goto unlock_out;
969
+ }
970
+
971
+ /* handle missed issue pending case */
972
+ if (ioat_ring_pending(ioat_chan)) {
973
+ dev_warn(to_dev(ioat_chan),
974
+ "Completion timeout with pending descriptors\n");
952975 spin_lock_bh(&ioat_chan->prep_lock);
953
- set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
976
+ __ioat_issue_pending(ioat_chan);
954977 spin_unlock_bh(&ioat_chan->prep_lock);
978
+ }
955979
956
- ioat_abort_descs(ioat_chan);
957
- dev_warn(to_dev(ioat_chan), "Resetting channel...\n");
958
- ioat_reset_hw(ioat_chan);
959
- dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
960
- ioat_restart_channel(ioat_chan);
961
-
962
- spin_lock_bh(&ioat_chan->prep_lock);
963
- clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
964
- spin_unlock_bh(&ioat_chan->prep_lock);
965
- spin_unlock_bh(&ioat_chan->cleanup_lock);
966
- return;
967
- } else
968
- set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
969
-
980
+ set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
970981 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
982
+unlock_out:
971983 spin_unlock_bh(&ioat_chan->cleanup_lock);
972984 }
973985