.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Intel I/OAT DMA Linux driver |
---|
3 | 4 | * Copyright(c) 2004 - 2015 Intel Corporation. |
---|
4 | | - * |
---|
5 | | - * This program is free software; you can redistribute it and/or modify it |
---|
6 | | - * under the terms and conditions of the GNU General Public License, |
---|
7 | | - * version 2, as published by the Free Software Foundation. |
---|
8 | | - * |
---|
9 | | - * This program is distributed in the hope that it will be useful, but WITHOUT |
---|
10 | | - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
---|
11 | | - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
---|
12 | | - * more details. |
---|
13 | | - * |
---|
14 | | - * The full GNU General Public License is included in this distribution in |
---|
15 | | - * the file called "COPYING". |
---|
16 | | - * |
---|
17 | 5 | */ |
---|
18 | 6 | |
---|
19 | 7 | /* |
---|
.. | .. |
---|
38 | 26 | |
---|
39 | 27 | #include "../dmaengine.h" |
---|
40 | 28 | |
---|
41 | | -int completion_timeout = 200; |
---|
| 29 | +static int completion_timeout = 200; |
---|
42 | 30 | module_param(completion_timeout, int, 0644); |
---|
43 | 31 | MODULE_PARM_DESC(completion_timeout, |
---|
44 | 32 | "set ioat completion timeout [msec] (default 200 [msec])"); |
---|
45 | | -int idle_timeout = 2000; |
---|
| 33 | +static int idle_timeout = 2000; |
---|
46 | 34 | module_param(idle_timeout, int, 0644); |
---|
47 | 35 | MODULE_PARM_DESC(idle_timeout, |
---|
48 | 36 | "set ioat idel timeout [msec] (default 2000 [msec])"); |
---|
.. | .. |
---|
177 | 165 | tasklet_kill(&ioat_chan->cleanup_task); |
---|
178 | 166 | |
---|
179 | 167 | /* final cleanup now that everything is quiesced and can't re-arm */ |
---|
180 | | - ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan); |
---|
| 168 | + ioat_cleanup_event(&ioat_chan->cleanup_task); |
---|
181 | 169 | } |
---|
182 | 170 | |
---|
183 | 171 | static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan) |
---|
.. | .. |
---|
205 | 193 | |
---|
206 | 194 | /** |
---|
207 | 195 | * ioat_update_pending - log pending descriptors |
---|
208 | | - * @ioat: ioat+ channel |
---|
| 196 | + * @ioat_chan: ioat+ channel |
---|
209 | 197 | * |
---|
210 | 198 | * Check if the number of unsubmitted descriptors has exceeded the |
---|
211 | 199 | * watermark. Called with prep_lock held |
---|
.. | .. |
---|
356 | 344 | u8 *pos; |
---|
357 | 345 | off_t offs; |
---|
358 | 346 | |
---|
359 | | - chunk = idx / IOAT_DESCS_PER_2M; |
---|
360 | | - idx &= (IOAT_DESCS_PER_2M - 1); |
---|
| 347 | + chunk = idx / IOAT_DESCS_PER_CHUNK; |
---|
| 348 | + idx &= (IOAT_DESCS_PER_CHUNK - 1); |
---|
361 | 349 | offs = idx * IOAT_DESC_SZ; |
---|
362 | 350 | pos = (u8 *)ioat_chan->descs[chunk].virt + offs; |
---|
363 | 351 | phys = ioat_chan->descs[chunk].hw + offs; |
---|
.. | .. |
---|
384 | 372 | ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) |
---|
385 | 373 | { |
---|
386 | 374 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); |
---|
| 375 | + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; |
---|
387 | 376 | struct ioat_ring_ent **ring; |
---|
388 | 377 | int total_descs = 1 << order; |
---|
389 | 378 | int i, chunks; |
---|
.. | .. |
---|
393 | 382 | if (!ring) |
---|
394 | 383 | return NULL; |
---|
395 | 384 | |
---|
396 | | - ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M; |
---|
| 385 | + chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE; |
---|
| 386 | + ioat_chan->desc_chunks = chunks; |
---|
397 | 387 | |
---|
398 | 388 | for (i = 0; i < chunks; i++) { |
---|
399 | 389 | struct ioat_descs *descs = &ioat_chan->descs[i]; |
---|
400 | 390 | |
---|
401 | 391 | descs->virt = dma_alloc_coherent(to_dev(ioat_chan), |
---|
402 | | - SZ_2M, &descs->hw, flags); |
---|
| 392 | + IOAT_CHUNK_SIZE, &descs->hw, flags); |
---|
403 | 393 | if (!descs->virt) { |
---|
404 | 394 | int idx; |
---|
405 | 395 | |
---|
406 | 396 | for (idx = 0; idx < i; idx++) { |
---|
407 | 397 | descs = &ioat_chan->descs[idx]; |
---|
408 | | - dma_free_coherent(to_dev(ioat_chan), SZ_2M, |
---|
409 | | - descs->virt, descs->hw); |
---|
| 398 | + dma_free_coherent(to_dev(ioat_chan), |
---|
| 399 | + IOAT_CHUNK_SIZE, |
---|
| 400 | + descs->virt, descs->hw); |
---|
410 | 401 | descs->virt = NULL; |
---|
411 | 402 | descs->hw = 0; |
---|
412 | 403 | } |
---|
.. | .. |
---|
427 | 418 | |
---|
428 | 419 | for (idx = 0; idx < ioat_chan->desc_chunks; idx++) { |
---|
429 | 420 | dma_free_coherent(to_dev(ioat_chan), |
---|
430 | | - SZ_2M, |
---|
| 421 | + IOAT_CHUNK_SIZE, |
---|
431 | 422 | ioat_chan->descs[idx].virt, |
---|
432 | 423 | ioat_chan->descs[idx].hw); |
---|
433 | 424 | ioat_chan->descs[idx].virt = NULL; |
---|
.. | .. |
---|
450 | 441 | } |
---|
451 | 442 | ring[i]->hw->next = ring[0]->txd.phys; |
---|
452 | 443 | |
---|
| 444 | + /* setup descriptor pre-fetching for v3.4 */ |
---|
| 445 | + if (ioat_dma->cap & IOAT_CAP_DPS) { |
---|
| 446 | + u16 drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN; |
---|
| 447 | + |
---|
| 448 | + if (chunks == 1) |
---|
| 449 | + drsctl |= IOAT_CHAN_DRS_AUTOWRAP; |
---|
| 450 | + |
---|
| 451 | + writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET); |
---|
| 452 | + |
---|
| 453 | + } |
---|
| 454 | + |
---|
453 | 455 | return ring; |
---|
454 | 456 | } |
---|
455 | 457 | |
---|
456 | 458 | /** |
---|
457 | 459 | * ioat_check_space_lock - verify space and grab ring producer lock |
---|
458 | | - * @ioat: ioat,3 channel (ring) to operate on |
---|
| 460 | + * @ioat_chan: ioat,3 channel (ring) to operate on |
---|
459 | 461 | * @num_descs: allocation length |
---|
460 | 462 | */ |
---|
461 | 463 | int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs) |
---|
.. | .. |
---|
583 | 585 | |
---|
584 | 586 | /** |
---|
585 | 587 | * __cleanup - reclaim used descriptors |
---|
586 | | - * @ioat: channel (ring) to clean |
---|
| 588 | + * @ioat_chan: channel (ring) to clean |
---|
| 589 | + * @phys_complete: zeroed (or not) completion address (from status) |
---|
587 | 590 | */ |
---|
588 | 591 | static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) |
---|
589 | 592 | { |
---|
.. | .. |
---|
653 | 656 | if (active - i == 0) { |
---|
654 | 657 | dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n", |
---|
655 | 658 | __func__); |
---|
656 | | - mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); |
---|
| 659 | + mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); |
---|
657 | 660 | } |
---|
658 | 661 | |
---|
659 | 662 | /* microsecond delay by sysfs variable per pending descriptor */ |
---|
.. | .. |
---|
679 | 682 | |
---|
680 | 683 | if (chanerr & |
---|
681 | 684 | (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) { |
---|
682 | | - mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); |
---|
| 685 | + mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); |
---|
683 | 686 | ioat_eh(ioat_chan); |
---|
684 | 687 | } |
---|
685 | 688 | } |
---|
.. | .. |
---|
687 | 690 | spin_unlock_bh(&ioat_chan->cleanup_lock); |
---|
688 | 691 | } |
---|
689 | 692 | |
---|
690 | | -void ioat_cleanup_event(unsigned long data) |
---|
| 693 | +void ioat_cleanup_event(struct tasklet_struct *t) |
---|
691 | 694 | { |
---|
692 | | - struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data); |
---|
| 695 | + struct ioatdma_chan *ioat_chan = from_tasklet(ioat_chan, t, cleanup_task); |
---|
693 | 696 | |
---|
694 | 697 | ioat_cleanup(ioat_chan); |
---|
695 | 698 | if (!test_bit(IOAT_RUN, &ioat_chan->state)) |
---|
.. | .. |
---|
876 | 879 | } |
---|
877 | 880 | |
---|
878 | 881 | if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state)) |
---|
879 | | - mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); |
---|
| 882 | + mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); |
---|
| 883 | +} |
---|
| 884 | + |
---|
| 885 | +static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan) |
---|
| 886 | +{ |
---|
| 887 | + spin_lock_bh(&ioat_chan->prep_lock); |
---|
| 888 | + set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); |
---|
| 889 | + spin_unlock_bh(&ioat_chan->prep_lock); |
---|
| 890 | + |
---|
| 891 | + ioat_abort_descs(ioat_chan); |
---|
| 892 | + dev_warn(to_dev(ioat_chan), "Reset channel...\n"); |
---|
| 893 | + ioat_reset_hw(ioat_chan); |
---|
| 894 | + dev_warn(to_dev(ioat_chan), "Restart channel...\n"); |
---|
| 895 | + ioat_restart_channel(ioat_chan); |
---|
| 896 | + |
---|
| 897 | + spin_lock_bh(&ioat_chan->prep_lock); |
---|
| 898 | + clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state); |
---|
| 899 | + spin_unlock_bh(&ioat_chan->prep_lock); |
---|
880 | 900 | } |
---|
881 | 901 | |
---|
882 | 902 | void ioat_timer_event(struct timer_list *t) |
---|
.. | .. |
---|
901 | 921 | |
---|
902 | 922 | if (test_bit(IOAT_RUN, &ioat_chan->state)) { |
---|
903 | 923 | spin_lock_bh(&ioat_chan->cleanup_lock); |
---|
904 | | - spin_lock_bh(&ioat_chan->prep_lock); |
---|
905 | | - set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); |
---|
906 | | - spin_unlock_bh(&ioat_chan->prep_lock); |
---|
907 | | - |
---|
908 | | - ioat_abort_descs(ioat_chan); |
---|
909 | | - dev_warn(to_dev(ioat_chan), "Reset channel...\n"); |
---|
910 | | - ioat_reset_hw(ioat_chan); |
---|
911 | | - dev_warn(to_dev(ioat_chan), "Restart channel...\n"); |
---|
912 | | - ioat_restart_channel(ioat_chan); |
---|
913 | | - |
---|
914 | | - spin_lock_bh(&ioat_chan->prep_lock); |
---|
915 | | - clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state); |
---|
916 | | - spin_unlock_bh(&ioat_chan->prep_lock); |
---|
| 924 | + ioat_reboot_chan(ioat_chan); |
---|
917 | 925 | spin_unlock_bh(&ioat_chan->cleanup_lock); |
---|
918 | 926 | } |
---|
919 | 927 | |
---|
.. | .. |
---|
927 | 935 | spin_lock_bh(&ioat_chan->prep_lock); |
---|
928 | 936 | check_active(ioat_chan); |
---|
929 | 937 | spin_unlock_bh(&ioat_chan->prep_lock); |
---|
930 | | - spin_unlock_bh(&ioat_chan->cleanup_lock); |
---|
931 | | - return; |
---|
| 938 | + goto unlock_out; |
---|
| 939 | + } |
---|
| 940 | + |
---|
| 941 | + /* handle the missed cleanup case */ |
---|
| 942 | + if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) { |
---|
| 943 | + /* timer restarted in ioat_cleanup_preamble |
---|
| 944 | + * and IOAT_COMPLETION_ACK cleared |
---|
| 945 | + */ |
---|
| 946 | + __cleanup(ioat_chan, phys_complete); |
---|
| 947 | + goto unlock_out; |
---|
932 | 948 | } |
---|
933 | 949 | |
---|
934 | 950 | /* if we haven't made progress and we have already |
---|
935 | 951 | * acknowledged a pending completion once, then be more |
---|
936 | 952 | * forceful with a restart |
---|
937 | 953 | */ |
---|
938 | | - if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) |
---|
939 | | - __cleanup(ioat_chan, phys_complete); |
---|
940 | | - else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { |
---|
| 954 | + if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { |
---|
941 | 955 | u32 chanerr; |
---|
942 | 956 | |
---|
943 | 957 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); |
---|
.. | .. |
---|
949 | 963 | dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n", |
---|
950 | 964 | ioat_ring_active(ioat_chan)); |
---|
951 | 965 | |
---|
| 966 | + ioat_reboot_chan(ioat_chan); |
---|
| 967 | + |
---|
| 968 | + goto unlock_out; |
---|
| 969 | + } |
---|
| 970 | + |
---|
| 971 | + /* handle missed issue pending case */ |
---|
| 972 | + if (ioat_ring_pending(ioat_chan)) { |
---|
| 973 | + dev_warn(to_dev(ioat_chan), |
---|
| 974 | + "Completion timeout with pending descriptors\n"); |
---|
952 | 975 | spin_lock_bh(&ioat_chan->prep_lock); |
---|
953 | | - set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); |
---|
| 976 | + __ioat_issue_pending(ioat_chan); |
---|
954 | 977 | spin_unlock_bh(&ioat_chan->prep_lock); |
---|
| 978 | + } |
---|
955 | 979 | |
---|
956 | | - ioat_abort_descs(ioat_chan); |
---|
957 | | - dev_warn(to_dev(ioat_chan), "Resetting channel...\n"); |
---|
958 | | - ioat_reset_hw(ioat_chan); |
---|
959 | | - dev_warn(to_dev(ioat_chan), "Restarting channel...\n"); |
---|
960 | | - ioat_restart_channel(ioat_chan); |
---|
961 | | - |
---|
962 | | - spin_lock_bh(&ioat_chan->prep_lock); |
---|
963 | | - clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state); |
---|
964 | | - spin_unlock_bh(&ioat_chan->prep_lock); |
---|
965 | | - spin_unlock_bh(&ioat_chan->cleanup_lock); |
---|
966 | | - return; |
---|
967 | | - } else |
---|
968 | | - set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); |
---|
969 | | - |
---|
| 980 | + set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); |
---|
970 | 981 | mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); |
---|
| 982 | +unlock_out: |
---|
971 | 983 | spin_unlock_bh(&ioat_chan->cleanup_lock); |
---|
972 | 984 | } |
---|
973 | 985 | |
---|