.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * DMA driver for Xilinx Video DMA Engine |
---|
3 | 4 | * |
---|
.. | .. |
---|
25 | 26 | * Access (DMA) between a memory-mapped source address and a memory-mapped |
---|
26 | 27 | * destination address. |
---|
27 | 28 | * |
---|
28 | | - * This program is free software: you can redistribute it and/or modify |
---|
29 | | - * it under the terms of the GNU General Public License as published by |
---|
30 | | - * the Free Software Foundation, either version 2 of the License, or |
---|
31 | | - * (at your option) any later version. |
---|
| 29 | + * The AXI Multichannel Direct Memory Access (AXI MCDMA) core is a soft |
---|
| 30 | + * Xilinx IP that provides high-bandwidth direct memory access between |
---|
| 31 | + * memory and AXI4-Stream target peripherals. It provides scatter gather |
---|
| 32 | + * (SG) interface with multiple channels independent configuration support. |
---|
| 33 | + * |
---|
32 | 34 | */ |
---|
33 | 35 | |
---|
34 | 36 | #include <linux/bitops.h> |
---|
.. | .. |
---|
89 | 91 | #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) |
---|
90 | 92 | #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) |
---|
91 | 93 | #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) |
---|
| 94 | +#define XILINX_DMA_DMASR_SG_MASK BIT(3) |
---|
92 | 95 | #define XILINX_DMA_DMASR_IDLE BIT(1) |
---|
93 | 96 | #define XILINX_DMA_DMASR_HALTED BIT(0) |
---|
94 | 97 | #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) |
---|
.. | .. |
---|
122 | 125 | #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0) |
---|
123 | 126 | |
---|
124 | 127 | /* HW specific definitions */ |
---|
125 | | -#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 |
---|
| 128 | +#define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20 |
---|
| 129 | +#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2 |
---|
| 130 | +#define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1 |
---|
126 | 131 | |
---|
127 | 132 | #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ |
---|
128 | 133 | (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ |
---|
.. | .. |
---|
164 | 169 | #define XILINX_DMA_REG_BTT 0x28 |
---|
165 | 170 | |
---|
166 | 171 | /* AXI DMA Specific Masks/Bit fields */ |
---|
167 | | -#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) |
---|
| 172 | +#define XILINX_DMA_MAX_TRANS_LEN_MIN 8 |
---|
| 173 | +#define XILINX_DMA_MAX_TRANS_LEN_MAX 23 |
---|
| 174 | +#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26 |
---|
168 | 175 | #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) |
---|
169 | 176 | #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) |
---|
170 | 177 | #define XILINX_DMA_CR_COALESCE_SHIFT 16 |
---|
.. | .. |
---|
174 | 181 | #define XILINX_DMA_NUM_DESCS 255 |
---|
175 | 182 | #define XILINX_DMA_NUM_APP_WORDS 5 |
---|
176 | 183 | |
---|
177 | | -/* Multi-Channel DMA Descriptor offsets*/ |
---|
178 | | -#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20) |
---|
179 | | -#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20) |
---|
180 | | - |
---|
181 | | -/* Multi-Channel DMA Masks/Shifts */ |
---|
182 | | -#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0) |
---|
183 | | -#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0) |
---|
184 | | -#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19) |
---|
185 | | -#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0) |
---|
186 | | -#define XILINX_DMA_BD_STRIDE_SHIFT 0 |
---|
187 | | -#define XILINX_DMA_BD_VSIZE_SHIFT 19 |
---|
188 | | - |
---|
189 | 184 | /* AXI CDMA Specific Registers/Offsets */ |
---|
190 | 185 | #define XILINX_CDMA_REG_SRCADDR 0x18 |
---|
191 | 186 | #define XILINX_CDMA_REG_DSTADDR 0x20 |
---|
192 | 187 | |
---|
193 | 188 | /* AXI CDMA Specific Masks */ |
---|
194 | 189 | #define XILINX_CDMA_CR_SGMODE BIT(3) |
---|
| 190 | + |
---|
| 191 | +#define xilinx_prep_dma_addr_t(addr) \ |
---|
| 192 | + ((dma_addr_t)((u64)addr##_##msb << 32 | (addr))) |
---|
| 193 | + |
---|
| 194 | +/* AXI MCDMA Specific Registers/Offsets */ |
---|
| 195 | +#define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000 |
---|
| 196 | +#define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500 |
---|
| 197 | +#define XILINX_MCDMA_CHEN_OFFSET 0x0008 |
---|
| 198 | +#define XILINX_MCDMA_CH_ERR_OFFSET 0x0010 |
---|
| 199 | +#define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020 |
---|
| 200 | +#define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028 |
---|
| 201 | +#define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40) |
---|
| 202 | +#define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40) |
---|
| 203 | +#define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40) |
---|
| 204 | +#define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40) |
---|
| 205 | + |
---|
| 206 | +/* AXI MCDMA Specific Masks/Shifts */ |
---|
| 207 | +#define XILINX_MCDMA_COALESCE_SHIFT 16 |
---|
| 208 | +#define XILINX_MCDMA_COALESCE_MAX 24 |
---|
| 209 | +#define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5) |
---|
| 210 | +#define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16) |
---|
| 211 | +#define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0) |
---|
| 212 | +#define XILINX_MCDMA_IRQ_IOC_MASK BIT(5) |
---|
| 213 | +#define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6) |
---|
| 214 | +#define XILINX_MCDMA_IRQ_ERR_MASK BIT(7) |
---|
| 215 | +#define XILINX_MCDMA_BD_EOP BIT(30) |
---|
| 216 | +#define XILINX_MCDMA_BD_SOP BIT(31) |
---|
195 | 217 | |
---|
196 | 218 | /** |
---|
197 | 219 | * struct xilinx_vdma_desc_hw - Hardware Descriptor |
---|
.. | .. |
---|
220 | 242 | * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 |
---|
221 | 243 | * @buf_addr: Buffer address @0x08 |
---|
222 | 244 | * @buf_addr_msb: MSB of Buffer address @0x0C |
---|
223 | | - * @mcdma_control: Control field for mcdma @0x10 |
---|
224 | | - * @vsize_stride: Vsize and Stride field for mcdma @0x14 |
---|
| 245 | + * @reserved1: Reserved @0x10 |
---|
| 246 | + * @reserved2: Reserved @0x14 |
---|
225 | 247 | * @control: Control field @0x18 |
---|
226 | 248 | * @status: Status field @0x1C |
---|
227 | 249 | * @app: APP Fields @0x20 - 0x30 |
---|
.. | .. |
---|
231 | 253 | u32 next_desc_msb; |
---|
232 | 254 | u32 buf_addr; |
---|
233 | 255 | u32 buf_addr_msb; |
---|
234 | | - u32 mcdma_control; |
---|
235 | | - u32 vsize_stride; |
---|
| 256 | + u32 reserved1; |
---|
| 257 | + u32 reserved2; |
---|
236 | 258 | u32 control; |
---|
237 | 259 | u32 status; |
---|
| 260 | + u32 app[XILINX_DMA_NUM_APP_WORDS]; |
---|
| 261 | +} __aligned(64); |
---|
| 262 | + |
---|
| 263 | +/** |
---|
| 264 | + * struct xilinx_aximcdma_desc_hw - Hardware Descriptor for AXI MCDMA |
---|
| 265 | + * @next_desc: Next Descriptor Pointer @0x00 |
---|
| 266 | + * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 |
---|
| 267 | + * @buf_addr: Buffer address @0x08 |
---|
| 268 | + * @buf_addr_msb: MSB of Buffer address @0x0C |
---|
| 269 | + * @rsvd: Reserved field @0x10 |
---|
| 270 | + * @control: Control Information field @0x14 |
---|
| 271 | + * @status: Status field @0x18 |
---|
| 272 | + * @sideband_status: Status of sideband signals @0x1C |
---|
| 273 | + * @app: APP Fields @0x20 - 0x30 |
---|
| 274 | + */ |
---|
| 275 | +struct xilinx_aximcdma_desc_hw { |
---|
| 276 | + u32 next_desc; |
---|
| 277 | + u32 next_desc_msb; |
---|
| 278 | + u32 buf_addr; |
---|
| 279 | + u32 buf_addr_msb; |
---|
| 280 | + u32 rsvd; |
---|
| 281 | + u32 control; |
---|
| 282 | + u32 status; |
---|
| 283 | + u32 sideband_status; |
---|
238 | 284 | u32 app[XILINX_DMA_NUM_APP_WORDS]; |
---|
239 | 285 | } __aligned(64); |
---|
240 | 286 | |
---|
.. | .. |
---|
285 | 331 | } __aligned(64); |
---|
286 | 332 | |
---|
287 | 333 | /** |
---|
| 334 | + * struct xilinx_aximcdma_tx_segment - Descriptor segment |
---|
| 335 | + * @hw: Hardware descriptor |
---|
| 336 | + * @node: Node in the descriptor segments list |
---|
| 337 | + * @phys: Physical address of segment |
---|
| 338 | + */ |
---|
| 339 | +struct xilinx_aximcdma_tx_segment { |
---|
| 340 | + struct xilinx_aximcdma_desc_hw hw; |
---|
| 341 | + struct list_head node; |
---|
| 342 | + dma_addr_t phys; |
---|
| 343 | +} __aligned(64); |
---|
| 344 | + |
---|
| 345 | +/** |
---|
288 | 346 | * struct xilinx_cdma_tx_segment - Descriptor segment |
---|
289 | 347 | * @hw: Hardware descriptor |
---|
290 | 348 | * @node: Node in the descriptor segments list |
---|
.. | .. |
---|
302 | 360 | * @segments: TX segments list |
---|
303 | 361 | * @node: Node in the channel descriptors list |
---|
304 | 362 | * @cyclic: Check for cyclic transfers. |
---|
| 363 | + * @err: Whether the descriptor has an error. |
---|
| 364 | + * @residue: Residue of the completed descriptor |
---|
305 | 365 | */ |
---|
306 | 366 | struct xilinx_dma_tx_descriptor { |
---|
307 | 367 | struct dma_async_tx_descriptor async_tx; |
---|
308 | 368 | struct list_head segments; |
---|
309 | 369 | struct list_head node; |
---|
310 | 370 | bool cyclic; |
---|
| 371 | + bool err; |
---|
| 372 | + u32 residue; |
---|
311 | 373 | }; |
---|
312 | 374 | |
---|
313 | 375 | /** |
---|
.. | .. |
---|
339 | 401 | * @desc_pendingcount: Descriptor pending count |
---|
340 | 402 | * @ext_addr: Indicates 64 bit addressing is supported by dma channel |
---|
341 | 403 | * @desc_submitcount: Descriptor h/w submitted count |
---|
342 | | - * @residue: Residue for AXI DMA |
---|
343 | 404 | * @seg_v: Statically allocated segments base |
---|
| 405 | + * @seg_mv: Statically allocated segments base for MCDMA |
---|
344 | 406 | * @seg_p: Physical allocated segments base |
---|
345 | 407 | * @cyclic_seg_v: Statically allocated segment base for cyclic transfers |
---|
346 | 408 | * @cyclic_seg_p: Physical allocated segments base for cyclic dma |
---|
.. | .. |
---|
377 | 439 | u32 desc_pendingcount; |
---|
378 | 440 | bool ext_addr; |
---|
379 | 441 | u32 desc_submitcount; |
---|
380 | | - u32 residue; |
---|
381 | 442 | struct xilinx_axidma_tx_segment *seg_v; |
---|
| 443 | + struct xilinx_aximcdma_tx_segment *seg_mv; |
---|
382 | 444 | dma_addr_t seg_p; |
---|
383 | 445 | struct xilinx_axidma_tx_segment *cyclic_seg_v; |
---|
384 | 446 | dma_addr_t cyclic_seg_p; |
---|
.. | .. |
---|
394 | 456 | * @XDMA_TYPE_AXIDMA: Axi dma ip. |
---|
395 | 457 | * @XDMA_TYPE_CDMA: Axi cdma ip. |
---|
396 | 458 | * @XDMA_TYPE_VDMA: Axi vdma ip. |
---|
| 459 | + * @XDMA_TYPE_AXIMCDMA: Axi MCDMA ip. |
---|
397 | 460 | * |
---|
398 | 461 | */ |
---|
399 | 462 | enum xdma_ip_type { |
---|
400 | 463 | XDMA_TYPE_AXIDMA = 0, |
---|
401 | 464 | XDMA_TYPE_CDMA, |
---|
402 | 465 | XDMA_TYPE_VDMA, |
---|
| 466 | + XDMA_TYPE_AXIMCDMA |
---|
403 | 467 | }; |
---|
404 | 468 | |
---|
405 | 469 | struct xilinx_dma_config { |
---|
.. | .. |
---|
407 | 471 | int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, |
---|
408 | 472 | struct clk **tx_clk, struct clk **txs_clk, |
---|
409 | 473 | struct clk **rx_clk, struct clk **rxs_clk); |
---|
| 474 | + irqreturn_t (*irq_handler)(int irq, void *data); |
---|
| 475 | + const int max_channels; |
---|
410 | 476 | }; |
---|
411 | 477 | |
---|
412 | 478 | /** |
---|
.. | .. |
---|
415 | 481 | * @dev: Device Structure |
---|
416 | 482 | * @common: DMA device structure |
---|
417 | 483 | * @chan: Driver specific DMA channel |
---|
418 | | - * @has_sg: Specifies whether Scatter-Gather is present or not |
---|
419 | | - * @mcdma: Specifies whether Multi-Channel is present or not |
---|
420 | 484 | * @flush_on_fsync: Flush on frame sync |
---|
421 | 485 | * @ext_addr: Indicates 64 bit addressing is supported by dma device |
---|
422 | 486 | * @pdev: Platform device structure pointer |
---|
.. | .. |
---|
426 | 490 | * @txs_clk: DMA mm2s stream clock |
---|
427 | 491 | * @rx_clk: DMA s2mm clock |
---|
428 | 492 | * @rxs_clk: DMA s2mm stream clock |
---|
429 | | - * @nr_channels: Number of channels DMA device supports |
---|
430 | | - * @chan_id: DMA channel identifier |
---|
| 493 | + * @s2mm_chan_id: DMA s2mm channel identifier |
---|
| 494 | + * @mm2s_chan_id: DMA mm2s channel identifier |
---|
| 495 | + * @max_buffer_len: Max buffer length |
---|
431 | 496 | */ |
---|
432 | 497 | struct xilinx_dma_device { |
---|
433 | 498 | void __iomem *regs; |
---|
434 | 499 | struct device *dev; |
---|
435 | 500 | struct dma_device common; |
---|
436 | | - struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; |
---|
437 | | - bool has_sg; |
---|
438 | | - bool mcdma; |
---|
| 501 | + struct xilinx_dma_chan *chan[XILINX_MCDMA_MAX_CHANS_PER_DEVICE]; |
---|
439 | 502 | u32 flush_on_fsync; |
---|
440 | 503 | bool ext_addr; |
---|
441 | 504 | struct platform_device *pdev; |
---|
.. | .. |
---|
445 | 508 | struct clk *txs_clk; |
---|
446 | 509 | struct clk *rx_clk; |
---|
447 | 510 | struct clk *rxs_clk; |
---|
448 | | - u32 nr_channels; |
---|
449 | | - u32 chan_id; |
---|
| 511 | + u32 s2mm_chan_id; |
---|
| 512 | + u32 mm2s_chan_id; |
---|
| 513 | + u32 max_buffer_len; |
---|
450 | 514 | }; |
---|
451 | 515 | |
---|
452 | 516 | /* Macros */ |
---|
.. | .. |
---|
547 | 611 | } |
---|
548 | 612 | } |
---|
549 | 613 | |
---|
| 614 | +static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan, |
---|
| 615 | + struct xilinx_aximcdma_desc_hw *hw, |
---|
| 616 | + dma_addr_t buf_addr, size_t sg_used) |
---|
| 617 | +{ |
---|
| 618 | + if (chan->ext_addr) { |
---|
| 619 | + hw->buf_addr = lower_32_bits(buf_addr + sg_used); |
---|
| 620 | + hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used); |
---|
| 621 | + } else { |
---|
| 622 | + hw->buf_addr = buf_addr + sg_used; |
---|
| 623 | + } |
---|
| 624 | +} |
---|
| 625 | + |
---|
550 | 626 | /* ----------------------------------------------------------------------------- |
---|
551 | 627 | * Descriptors and segments alloc and free |
---|
552 | 628 | */ |
---|
.. | .. |
---|
614 | 690 | } |
---|
615 | 691 | spin_unlock_irqrestore(&chan->lock, flags); |
---|
616 | 692 | |
---|
| 693 | + if (!segment) |
---|
| 694 | + dev_dbg(chan->dev, "Could not find free tx segment\n"); |
---|
| 695 | + |
---|
| 696 | + return segment; |
---|
| 697 | +} |
---|
| 698 | + |
---|
| 699 | +/** |
---|
| 700 | + * xilinx_aximcdma_alloc_tx_segment - Allocate transaction segment |
---|
| 701 | + * @chan: Driver specific DMA channel |
---|
| 702 | + * |
---|
| 703 | + * Return: The allocated segment on success and NULL on failure. |
---|
| 704 | + */ |
---|
| 705 | +static struct xilinx_aximcdma_tx_segment * |
---|
| 706 | +xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan) |
---|
| 707 | +{ |
---|
| 708 | + struct xilinx_aximcdma_tx_segment *segment = NULL; |
---|
| 709 | + unsigned long flags; |
---|
| 710 | + |
---|
| 711 | + spin_lock_irqsave(&chan->lock, flags); |
---|
| 712 | + if (!list_empty(&chan->free_seg_list)) { |
---|
| 713 | + segment = list_first_entry(&chan->free_seg_list, |
---|
| 714 | + struct xilinx_aximcdma_tx_segment, |
---|
| 715 | + node); |
---|
| 716 | + list_del(&segment->node); |
---|
| 717 | + } |
---|
| 718 | + spin_unlock_irqrestore(&chan->lock, flags); |
---|
| 719 | + |
---|
617 | 720 | return segment; |
---|
618 | 721 | } |
---|
619 | 722 | |
---|
.. | .. |
---|
628 | 731 | hw->next_desc_msb = next_desc_msb; |
---|
629 | 732 | } |
---|
630 | 733 | |
---|
| 734 | +static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw) |
---|
| 735 | +{ |
---|
| 736 | + u32 next_desc = hw->next_desc; |
---|
| 737 | + u32 next_desc_msb = hw->next_desc_msb; |
---|
| 738 | + |
---|
| 739 | + memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw)); |
---|
| 740 | + |
---|
| 741 | + hw->next_desc = next_desc; |
---|
| 742 | + hw->next_desc_msb = next_desc_msb; |
---|
| 743 | +} |
---|
| 744 | + |
---|
631 | 745 | /** |
---|
632 | 746 | * xilinx_dma_free_tx_segment - Free transaction segment |
---|
633 | 747 | * @chan: Driver specific DMA channel |
---|
.. | .. |
---|
637 | 751 | struct xilinx_axidma_tx_segment *segment) |
---|
638 | 752 | { |
---|
639 | 753 | xilinx_dma_clean_hw_desc(&segment->hw); |
---|
| 754 | + |
---|
| 755 | + list_add_tail(&segment->node, &chan->free_seg_list); |
---|
| 756 | +} |
---|
| 757 | + |
---|
| 758 | +/** |
---|
| 759 | + * xilinx_mcdma_free_tx_segment - Free transaction segment |
---|
| 760 | + * @chan: Driver specific DMA channel |
---|
| 761 | + * @segment: DMA transaction segment |
---|
| 762 | + */ |
---|
| 763 | +static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan, |
---|
| 764 | + struct xilinx_aximcdma_tx_segment * |
---|
| 765 | + segment) |
---|
| 766 | +{ |
---|
| 767 | + xilinx_mcdma_clean_hw_desc(&segment->hw); |
---|
640 | 768 | |
---|
641 | 769 | list_add_tail(&segment->node, &chan->free_seg_list); |
---|
642 | 770 | } |
---|
.. | .. |
---|
695 | 823 | struct xilinx_vdma_tx_segment *segment, *next; |
---|
696 | 824 | struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next; |
---|
697 | 825 | struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next; |
---|
| 826 | + struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next; |
---|
698 | 827 | |
---|
699 | 828 | if (!desc) |
---|
700 | 829 | return; |
---|
.. | .. |
---|
710 | 839 | list_del(&cdma_segment->node); |
---|
711 | 840 | xilinx_cdma_free_tx_segment(chan, cdma_segment); |
---|
712 | 841 | } |
---|
713 | | - } else { |
---|
| 842 | + } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
---|
714 | 843 | list_for_each_entry_safe(axidma_segment, axidma_next, |
---|
715 | 844 | &desc->segments, node) { |
---|
716 | 845 | list_del(&axidma_segment->node); |
---|
717 | 846 | xilinx_dma_free_tx_segment(chan, axidma_segment); |
---|
| 847 | + } |
---|
| 848 | + } else { |
---|
| 849 | + list_for_each_entry_safe(aximcdma_segment, aximcdma_next, |
---|
| 850 | + &desc->segments, node) { |
---|
| 851 | + list_del(&aximcdma_segment->node); |
---|
| 852 | + xilinx_mcdma_free_tx_segment(chan, aximcdma_segment); |
---|
718 | 853 | } |
---|
719 | 854 | } |
---|
720 | 855 | |
---|
.. | .. |
---|
784 | 919 | chan->cyclic_seg_v, chan->cyclic_seg_p); |
---|
785 | 920 | } |
---|
786 | 921 | |
---|
787 | | - if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) { |
---|
| 922 | + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { |
---|
| 923 | + spin_lock_irqsave(&chan->lock, flags); |
---|
| 924 | + INIT_LIST_HEAD(&chan->free_seg_list); |
---|
| 925 | + spin_unlock_irqrestore(&chan->lock, flags); |
---|
| 926 | + |
---|
| 927 | + /* Free memory that is allocated for BD */ |
---|
| 928 | + dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) * |
---|
| 929 | + XILINX_DMA_NUM_DESCS, chan->seg_mv, |
---|
| 930 | + chan->seg_p); |
---|
| 931 | + } |
---|
| 932 | + |
---|
| 933 | + if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA && |
---|
| 934 | + chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) { |
---|
788 | 935 | dma_pool_destroy(chan->desc_pool); |
---|
789 | 936 | chan->desc_pool = NULL; |
---|
790 | 937 | } |
---|
| 938 | + |
---|
| 939 | +} |
---|
| 940 | + |
---|
| 941 | +/** |
---|
| 942 | + * xilinx_dma_get_residue - Compute residue for a given descriptor |
---|
| 943 | + * @chan: Driver specific dma channel |
---|
| 944 | + * @desc: dma transaction descriptor |
---|
| 945 | + * |
---|
| 946 | + * Return: The number of residue bytes for the descriptor. |
---|
| 947 | + */ |
---|
| 948 | +static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan, |
---|
| 949 | + struct xilinx_dma_tx_descriptor *desc) |
---|
| 950 | +{ |
---|
| 951 | + struct xilinx_cdma_tx_segment *cdma_seg; |
---|
| 952 | + struct xilinx_axidma_tx_segment *axidma_seg; |
---|
| 953 | + struct xilinx_aximcdma_tx_segment *aximcdma_seg; |
---|
| 954 | + struct xilinx_cdma_desc_hw *cdma_hw; |
---|
| 955 | + struct xilinx_axidma_desc_hw *axidma_hw; |
---|
| 956 | + struct xilinx_aximcdma_desc_hw *aximcdma_hw; |
---|
| 957 | + struct list_head *entry; |
---|
| 958 | + u32 residue = 0; |
---|
| 959 | + |
---|
| 960 | + list_for_each(entry, &desc->segments) { |
---|
| 961 | + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { |
---|
| 962 | + cdma_seg = list_entry(entry, |
---|
| 963 | + struct xilinx_cdma_tx_segment, |
---|
| 964 | + node); |
---|
| 965 | + cdma_hw = &cdma_seg->hw; |
---|
| 966 | + residue += (cdma_hw->control - cdma_hw->status) & |
---|
| 967 | + chan->xdev->max_buffer_len; |
---|
| 968 | + } else if (chan->xdev->dma_config->dmatype == |
---|
| 969 | + XDMA_TYPE_AXIDMA) { |
---|
| 970 | + axidma_seg = list_entry(entry, |
---|
| 971 | + struct xilinx_axidma_tx_segment, |
---|
| 972 | + node); |
---|
| 973 | + axidma_hw = &axidma_seg->hw; |
---|
| 974 | + residue += (axidma_hw->control - axidma_hw->status) & |
---|
| 975 | + chan->xdev->max_buffer_len; |
---|
| 976 | + } else { |
---|
| 977 | + aximcdma_seg = |
---|
| 978 | + list_entry(entry, |
---|
| 979 | + struct xilinx_aximcdma_tx_segment, |
---|
| 980 | + node); |
---|
| 981 | + aximcdma_hw = &aximcdma_seg->hw; |
---|
| 982 | + residue += |
---|
| 983 | + (aximcdma_hw->control - aximcdma_hw->status) & |
---|
| 984 | + chan->xdev->max_buffer_len; |
---|
| 985 | + } |
---|
| 986 | + } |
---|
| 987 | + |
---|
| 988 | + return residue; |
---|
791 | 989 | } |
---|
792 | 990 | |
---|
793 | 991 | /** |
---|
.. | .. |
---|
824 | 1022 | spin_lock_irqsave(&chan->lock, flags); |
---|
825 | 1023 | |
---|
826 | 1024 | list_for_each_entry_safe(desc, next, &chan->done_list, node) { |
---|
827 | | - struct dmaengine_desc_callback cb; |
---|
| 1025 | + struct dmaengine_result result; |
---|
828 | 1026 | |
---|
829 | 1027 | if (desc->cyclic) { |
---|
830 | 1028 | xilinx_dma_chan_handle_cyclic(chan, desc, &flags); |
---|
.. | .. |
---|
834 | 1032 | /* Remove from the list of running transactions */ |
---|
835 | 1033 | list_del(&desc->node); |
---|
836 | 1034 | |
---|
837 | | - /* Run the link descriptor callback function */ |
---|
838 | | - dmaengine_desc_get_callback(&desc->async_tx, &cb); |
---|
839 | | - if (dmaengine_desc_callback_valid(&cb)) { |
---|
840 | | - spin_unlock_irqrestore(&chan->lock, flags); |
---|
841 | | - dmaengine_desc_callback_invoke(&cb, NULL); |
---|
842 | | - spin_lock_irqsave(&chan->lock, flags); |
---|
| 1035 | + if (unlikely(desc->err)) { |
---|
| 1036 | + if (chan->direction == DMA_DEV_TO_MEM) |
---|
| 1037 | + result.result = DMA_TRANS_READ_FAILED; |
---|
| 1038 | + else |
---|
| 1039 | + result.result = DMA_TRANS_WRITE_FAILED; |
---|
| 1040 | + } else { |
---|
| 1041 | + result.result = DMA_TRANS_NOERROR; |
---|
843 | 1042 | } |
---|
| 1043 | + |
---|
| 1044 | + result.residue = desc->residue; |
---|
| 1045 | + |
---|
| 1046 | + /* Run the link descriptor callback function */ |
---|
| 1047 | + spin_unlock_irqrestore(&chan->lock, flags); |
---|
| 1048 | + dmaengine_desc_get_callback_invoke(&desc->async_tx, &result); |
---|
| 1049 | + spin_lock_irqsave(&chan->lock, flags); |
---|
844 | 1050 | |
---|
845 | 1051 | /* Run any dependencies, then free the descriptor */ |
---|
846 | 1052 | dma_run_dependencies(&desc->async_tx); |
---|
.. | .. |
---|
859 | 1065 | |
---|
860 | 1066 | /** |
---|
861 | 1067 | * xilinx_dma_do_tasklet - Schedule completion tasklet |
---|
862 | | - * @data: Pointer to the Xilinx DMA channel structure |
---|
| 1068 | + * @t: Pointer to the Xilinx DMA channel structure |
---|
863 | 1069 | */ |
---|
864 | | -static void xilinx_dma_do_tasklet(unsigned long data) |
---|
| 1070 | +static void xilinx_dma_do_tasklet(struct tasklet_struct *t) |
---|
865 | 1071 | { |
---|
866 | | - struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data; |
---|
| 1072 | + struct xilinx_dma_chan *chan = from_tasklet(chan, t, tasklet); |
---|
867 | 1073 | |
---|
868 | 1074 | xilinx_dma_chan_desc_cleanup(chan); |
---|
869 | 1075 | } |
---|
.. | .. |
---|
889 | 1095 | */ |
---|
890 | 1096 | if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
---|
891 | 1097 | /* Allocate the buffer descriptors. */ |
---|
892 | | - chan->seg_v = dma_zalloc_coherent(chan->dev, |
---|
893 | | - sizeof(*chan->seg_v) * |
---|
894 | | - XILINX_DMA_NUM_DESCS, |
---|
895 | | - &chan->seg_p, GFP_KERNEL); |
---|
| 1098 | + chan->seg_v = dma_alloc_coherent(chan->dev, |
---|
| 1099 | + sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS, |
---|
| 1100 | + &chan->seg_p, GFP_KERNEL); |
---|
896 | 1101 | if (!chan->seg_v) { |
---|
897 | 1102 | dev_err(chan->dev, |
---|
898 | 1103 | "unable to allocate channel %d descriptors\n", |
---|
899 | 1104 | chan->id); |
---|
900 | 1105 | return -ENOMEM; |
---|
901 | 1106 | } |
---|
| 1107 | + /* |
---|
| 1108 | + * For cyclic DMA mode we need to program the tail Descriptor |
---|
| 1109 | + * register with a value which is not a part of the BD chain |
---|
| 1110 | + * so allocating a desc segment during channel allocation for |
---|
| 1111 | + * programming tail descriptor. |
---|
| 1112 | + */ |
---|
| 1113 | + chan->cyclic_seg_v = dma_alloc_coherent(chan->dev, |
---|
| 1114 | + sizeof(*chan->cyclic_seg_v), |
---|
| 1115 | + &chan->cyclic_seg_p, |
---|
| 1116 | + GFP_KERNEL); |
---|
| 1117 | + if (!chan->cyclic_seg_v) { |
---|
| 1118 | + dev_err(chan->dev, |
---|
| 1119 | + "unable to allocate desc segment for cyclic DMA\n"); |
---|
| 1120 | + dma_free_coherent(chan->dev, sizeof(*chan->seg_v) * |
---|
| 1121 | + XILINX_DMA_NUM_DESCS, chan->seg_v, |
---|
| 1122 | + chan->seg_p); |
---|
| 1123 | + return -ENOMEM; |
---|
| 1124 | + } |
---|
| 1125 | + chan->cyclic_seg_v->phys = chan->cyclic_seg_p; |
---|
902 | 1126 | |
---|
903 | 1127 | for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { |
---|
904 | 1128 | chan->seg_v[i].hw.next_desc = |
---|
.. | .. |
---|
910 | 1134 | chan->seg_v[i].phys = chan->seg_p + |
---|
911 | 1135 | sizeof(*chan->seg_v) * i; |
---|
912 | 1136 | list_add_tail(&chan->seg_v[i].node, |
---|
| 1137 | + &chan->free_seg_list); |
---|
| 1138 | + } |
---|
| 1139 | + } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { |
---|
| 1140 | + /* Allocate the buffer descriptors. */ |
---|
| 1141 | + chan->seg_mv = dma_alloc_coherent(chan->dev, |
---|
| 1142 | + sizeof(*chan->seg_mv) * |
---|
| 1143 | + XILINX_DMA_NUM_DESCS, |
---|
| 1144 | + &chan->seg_p, GFP_KERNEL); |
---|
| 1145 | + if (!chan->seg_mv) { |
---|
| 1146 | + dev_err(chan->dev, |
---|
| 1147 | + "unable to allocate channel %d descriptors\n", |
---|
| 1148 | + chan->id); |
---|
| 1149 | + return -ENOMEM; |
---|
| 1150 | + } |
---|
| 1151 | + for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { |
---|
| 1152 | + chan->seg_mv[i].hw.next_desc = |
---|
| 1153 | + lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) * |
---|
| 1154 | + ((i + 1) % XILINX_DMA_NUM_DESCS)); |
---|
| 1155 | + chan->seg_mv[i].hw.next_desc_msb = |
---|
| 1156 | + upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) * |
---|
| 1157 | + ((i + 1) % XILINX_DMA_NUM_DESCS)); |
---|
| 1158 | + chan->seg_mv[i].phys = chan->seg_p + |
---|
| 1159 | + sizeof(*chan->seg_mv) * i; |
---|
| 1160 | + list_add_tail(&chan->seg_mv[i].node, |
---|
913 | 1161 | &chan->free_seg_list); |
---|
914 | 1162 | } |
---|
915 | 1163 | } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { |
---|
.. | .. |
---|
927 | 1175 | } |
---|
928 | 1176 | |
---|
929 | 1177 | if (!chan->desc_pool && |
---|
930 | | - (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) { |
---|
| 1178 | + ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) && |
---|
| 1179 | + chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) { |
---|
931 | 1180 | dev_err(chan->dev, |
---|
932 | 1181 | "unable to allocate channel %d descriptor pool\n", |
---|
933 | 1182 | chan->id); |
---|
934 | 1183 | return -ENOMEM; |
---|
935 | | - } |
---|
936 | | - |
---|
937 | | - if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
---|
938 | | - /* |
---|
939 | | - * For cyclic DMA mode we need to program the tail Descriptor |
---|
940 | | - * register with a value which is not a part of the BD chain |
---|
941 | | - * so allocating a desc segment during channel allocation for |
---|
942 | | - * programming tail descriptor. |
---|
943 | | - */ |
---|
944 | | - chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev, |
---|
945 | | - sizeof(*chan->cyclic_seg_v), |
---|
946 | | - &chan->cyclic_seg_p, GFP_KERNEL); |
---|
947 | | - if (!chan->cyclic_seg_v) { |
---|
948 | | - dev_err(chan->dev, |
---|
949 | | - "unable to allocate desc segment for cyclic DMA\n"); |
---|
950 | | - return -ENOMEM; |
---|
951 | | - } |
---|
952 | | - chan->cyclic_seg_v->phys = chan->cyclic_seg_p; |
---|
953 | 1184 | } |
---|
954 | 1185 | |
---|
955 | 1186 | dma_cookie_init(dchan); |
---|
.. | .. |
---|
970 | 1201 | } |
---|
971 | 1202 | |
---|
972 | 1203 | /** |
---|
| 1204 | + * xilinx_dma_calc_copysize - Calculate the amount of data to copy |
---|
| 1205 | + * @chan: Driver specific DMA channel |
---|
| 1206 | + * @size: Total data that needs to be copied |
---|
| 1207 | + * @done: Amount of data that has been already copied |
---|
| 1208 | + * |
---|
| 1209 | + * Return: Amount of data that has to be copied |
---|
| 1210 | + */ |
---|
| 1211 | +static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan, |
---|
| 1212 | + int size, int done) |
---|
| 1213 | +{ |
---|
| 1214 | + size_t copy; |
---|
| 1215 | + |
---|
| 1216 | + copy = min_t(size_t, size - done, |
---|
| 1217 | + chan->xdev->max_buffer_len); |
---|
| 1218 | + |
---|
| 1219 | + if ((copy + done < size) && |
---|
| 1220 | + chan->xdev->common.copy_align) { |
---|
| 1221 | + /* |
---|
| 1222 | + * If this is not the last descriptor, make sure |
---|
| 1223 | + * the next one will be properly aligned |
---|
| 1224 | + */ |
---|
| 1225 | + copy = rounddown(copy, |
---|
| 1226 | + (1 << chan->xdev->common.copy_align)); |
---|
| 1227 | + } |
---|
| 1228 | + return copy; |
---|
| 1229 | +} |
---|
| 1230 | + |
---|
| 1231 | +/** |
---|
973 | 1232 | * xilinx_dma_tx_status - Get DMA transaction status |
---|
974 | 1233 | * @dchan: DMA channel |
---|
975 | 1234 | * @cookie: Transaction identifier |
---|
.. | .. |
---|
983 | 1242 | { |
---|
984 | 1243 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
---|
985 | 1244 | struct xilinx_dma_tx_descriptor *desc; |
---|
986 | | - struct xilinx_axidma_tx_segment *segment; |
---|
987 | | - struct xilinx_axidma_desc_hw *hw; |
---|
988 | 1245 | enum dma_status ret; |
---|
989 | 1246 | unsigned long flags; |
---|
990 | 1247 | u32 residue = 0; |
---|
.. | .. |
---|
993 | 1250 | if (ret == DMA_COMPLETE || !txstate) |
---|
994 | 1251 | return ret; |
---|
995 | 1252 | |
---|
996 | | - if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
---|
997 | | - spin_lock_irqsave(&chan->lock, flags); |
---|
998 | | - |
---|
| 1253 | + spin_lock_irqsave(&chan->lock, flags); |
---|
| 1254 | + if (!list_empty(&chan->active_list)) { |
---|
999 | 1255 | desc = list_last_entry(&chan->active_list, |
---|
1000 | 1256 | struct xilinx_dma_tx_descriptor, node); |
---|
1001 | | - if (chan->has_sg) { |
---|
1002 | | - list_for_each_entry(segment, &desc->segments, node) { |
---|
1003 | | - hw = &segment->hw; |
---|
1004 | | - residue += (hw->control - hw->status) & |
---|
1005 | | - XILINX_DMA_MAX_TRANS_LEN; |
---|
1006 | | - } |
---|
1007 | | - } |
---|
1008 | | - spin_unlock_irqrestore(&chan->lock, flags); |
---|
1009 | | - |
---|
1010 | | - chan->residue = residue; |
---|
1011 | | - dma_set_residue(txstate, chan->residue); |
---|
| 1257 | + /* |
---|
| 1258 | + * VDMA and simple mode do not support residue reporting, so the |
---|
| 1259 | + * residue field will always be 0. |
---|
| 1260 | + */ |
---|
| 1261 | + if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA) |
---|
| 1262 | + residue = xilinx_dma_get_residue(chan, desc); |
---|
1012 | 1263 | } |
---|
| 1264 | + spin_unlock_irqrestore(&chan->lock, flags); |
---|
| 1265 | + |
---|
| 1266 | + dma_set_residue(txstate, residue); |
---|
1013 | 1267 | |
---|
1014 | 1268 | return ret; |
---|
1015 | 1269 | } |
---|
.. | .. |
---|
1078 | 1332 | static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) |
---|
1079 | 1333 | { |
---|
1080 | 1334 | struct xilinx_vdma_config *config = &chan->config; |
---|
1081 | | - struct xilinx_dma_tx_descriptor *desc, *tail_desc; |
---|
| 1335 | + struct xilinx_dma_tx_descriptor *desc; |
---|
1082 | 1336 | u32 reg, j; |
---|
1083 | | - struct xilinx_vdma_tx_segment *tail_segment; |
---|
| 1337 | + struct xilinx_vdma_tx_segment *segment, *last = NULL; |
---|
| 1338 | + int i = 0; |
---|
1084 | 1339 | |
---|
1085 | 1340 | /* This function was invoked with lock held */ |
---|
1086 | 1341 | if (chan->err) |
---|
.. | .. |
---|
1094 | 1349 | |
---|
1095 | 1350 | desc = list_first_entry(&chan->pending_list, |
---|
1096 | 1351 | struct xilinx_dma_tx_descriptor, node); |
---|
1097 | | - tail_desc = list_last_entry(&chan->pending_list, |
---|
1098 | | - struct xilinx_dma_tx_descriptor, node); |
---|
1099 | | - |
---|
1100 | | - tail_segment = list_last_entry(&tail_desc->segments, |
---|
1101 | | - struct xilinx_vdma_tx_segment, node); |
---|
1102 | | - |
---|
1103 | | - /* |
---|
1104 | | - * If hardware is idle, then all descriptors on the running lists are |
---|
1105 | | - * done, start new transfers |
---|
1106 | | - */ |
---|
1107 | | - if (chan->has_sg) |
---|
1108 | | - dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, |
---|
1109 | | - desc->async_tx.phys); |
---|
1110 | 1352 | |
---|
1111 | 1353 | /* Configure the hardware using info in the config structure */ |
---|
1112 | 1354 | if (chan->has_vflip) { |
---|
.. | .. |
---|
1124 | 1366 | else |
---|
1125 | 1367 | reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; |
---|
1126 | 1368 | |
---|
1127 | | - /* |
---|
1128 | | - * With SG, start with circular mode, so that BDs can be fetched. |
---|
1129 | | - * In direct register mode, if not parking, enable circular mode |
---|
1130 | | - */ |
---|
1131 | | - if (chan->has_sg || !config->park) |
---|
1132 | | - reg |= XILINX_DMA_DMACR_CIRC_EN; |
---|
1133 | | - |
---|
| 1369 | + /* If not parking, enable circular mode */ |
---|
1134 | 1370 | if (config->park) |
---|
1135 | 1371 | reg &= ~XILINX_DMA_DMACR_CIRC_EN; |
---|
| 1372 | + else |
---|
| 1373 | + reg |= XILINX_DMA_DMACR_CIRC_EN; |
---|
1136 | 1374 | |
---|
1137 | 1375 | dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); |
---|
1138 | 1376 | |
---|
.. | .. |
---|
1154 | 1392 | return; |
---|
1155 | 1393 | |
---|
1156 | 1394 | /* Start the transfer */ |
---|
1157 | | - if (chan->has_sg) { |
---|
1158 | | - dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, |
---|
1159 | | - tail_segment->phys); |
---|
1160 | | - list_splice_tail_init(&chan->pending_list, &chan->active_list); |
---|
1161 | | - chan->desc_pendingcount = 0; |
---|
1162 | | - } else { |
---|
1163 | | - struct xilinx_vdma_tx_segment *segment, *last = NULL; |
---|
1164 | | - int i = 0; |
---|
| 1395 | + if (chan->desc_submitcount < chan->num_frms) |
---|
| 1396 | + i = chan->desc_submitcount; |
---|
1165 | 1397 | |
---|
1166 | | - if (chan->desc_submitcount < chan->num_frms) |
---|
1167 | | - i = chan->desc_submitcount; |
---|
1168 | | - |
---|
1169 | | - list_for_each_entry(segment, &desc->segments, node) { |
---|
1170 | | - if (chan->ext_addr) |
---|
1171 | | - vdma_desc_write_64(chan, |
---|
1172 | | - XILINX_VDMA_REG_START_ADDRESS_64(i++), |
---|
1173 | | - segment->hw.buf_addr, |
---|
1174 | | - segment->hw.buf_addr_msb); |
---|
1175 | | - else |
---|
1176 | | - vdma_desc_write(chan, |
---|
| 1398 | + list_for_each_entry(segment, &desc->segments, node) { |
---|
| 1399 | + if (chan->ext_addr) |
---|
| 1400 | + vdma_desc_write_64(chan, |
---|
| 1401 | + XILINX_VDMA_REG_START_ADDRESS_64(i++), |
---|
| 1402 | + segment->hw.buf_addr, |
---|
| 1403 | + segment->hw.buf_addr_msb); |
---|
| 1404 | + else |
---|
| 1405 | + vdma_desc_write(chan, |
---|
1177 | 1406 | XILINX_VDMA_REG_START_ADDRESS(i++), |
---|
1178 | 1407 | segment->hw.buf_addr); |
---|
1179 | 1408 | |
---|
1180 | | - last = segment; |
---|
1181 | | - } |
---|
1182 | | - |
---|
1183 | | - if (!last) |
---|
1184 | | - return; |
---|
1185 | | - |
---|
1186 | | - /* HW expects these parameters to be same for one transaction */ |
---|
1187 | | - vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); |
---|
1188 | | - vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, |
---|
1189 | | - last->hw.stride); |
---|
1190 | | - vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); |
---|
1191 | | - |
---|
1192 | | - chan->desc_submitcount++; |
---|
1193 | | - chan->desc_pendingcount--; |
---|
1194 | | - list_del(&desc->node); |
---|
1195 | | - list_add_tail(&desc->node, &chan->active_list); |
---|
1196 | | - if (chan->desc_submitcount == chan->num_frms) |
---|
1197 | | - chan->desc_submitcount = 0; |
---|
| 1409 | + last = segment; |
---|
1198 | 1410 | } |
---|
| 1411 | + |
---|
| 1412 | + if (!last) |
---|
| 1413 | + return; |
---|
| 1414 | + |
---|
| 1415 | + /* HW expects these parameters to be same for one transaction */ |
---|
| 1416 | + vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); |
---|
| 1417 | + vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, |
---|
| 1418 | + last->hw.stride); |
---|
| 1419 | + vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); |
---|
| 1420 | + |
---|
| 1421 | + chan->desc_submitcount++; |
---|
| 1422 | + chan->desc_pendingcount--; |
---|
| 1423 | + list_del(&desc->node); |
---|
| 1424 | + list_add_tail(&desc->node, &chan->active_list); |
---|
| 1425 | + if (chan->desc_submitcount == chan->num_frms) |
---|
| 1426 | + chan->desc_submitcount = 0; |
---|
1199 | 1427 | |
---|
1200 | 1428 | chan->idle = false; |
---|
1201 | 1429 | } |
---|
.. | .. |
---|
1257 | 1485 | |
---|
1258 | 1486 | hw = &segment->hw; |
---|
1259 | 1487 | |
---|
1260 | | - xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr); |
---|
1261 | | - xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr); |
---|
| 1488 | + xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, |
---|
| 1489 | + xilinx_prep_dma_addr_t(hw->src_addr)); |
---|
| 1490 | + xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, |
---|
| 1491 | + xilinx_prep_dma_addr_t(hw->dest_addr)); |
---|
1262 | 1492 | |
---|
1263 | 1493 | /* Start the transfer */ |
---|
1264 | 1494 | dma_ctrl_write(chan, XILINX_DMA_REG_BTT, |
---|
1265 | | - hw->control & XILINX_DMA_MAX_TRANS_LEN); |
---|
| 1495 | + hw->control & chan->xdev->max_buffer_len); |
---|
1266 | 1496 | } |
---|
1267 | 1497 | |
---|
1268 | 1498 | list_splice_tail_init(&chan->pending_list, &chan->active_list); |
---|
.. | .. |
---|
1305 | 1535 | dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); |
---|
1306 | 1536 | } |
---|
1307 | 1537 | |
---|
1308 | | - if (chan->has_sg && !chan->xdev->mcdma) |
---|
| 1538 | + if (chan->has_sg) |
---|
1309 | 1539 | xilinx_write(chan, XILINX_DMA_REG_CURDESC, |
---|
1310 | 1540 | head_desc->async_tx.phys); |
---|
1311 | | - |
---|
1312 | | - if (chan->has_sg && chan->xdev->mcdma) { |
---|
1313 | | - if (chan->direction == DMA_MEM_TO_DEV) { |
---|
1314 | | - dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, |
---|
1315 | | - head_desc->async_tx.phys); |
---|
1316 | | - } else { |
---|
1317 | | - if (!chan->tdest) { |
---|
1318 | | - dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, |
---|
1319 | | - head_desc->async_tx.phys); |
---|
1320 | | - } else { |
---|
1321 | | - dma_ctrl_write(chan, |
---|
1322 | | - XILINX_DMA_MCRX_CDESC(chan->tdest), |
---|
1323 | | - head_desc->async_tx.phys); |
---|
1324 | | - } |
---|
1325 | | - } |
---|
1326 | | - } |
---|
1327 | 1541 | |
---|
1328 | 1542 | xilinx_dma_start(chan); |
---|
1329 | 1543 | |
---|
.. | .. |
---|
1331 | 1545 | return; |
---|
1332 | 1546 | |
---|
1333 | 1547 | /* Start the transfer */ |
---|
1334 | | - if (chan->has_sg && !chan->xdev->mcdma) { |
---|
| 1548 | + if (chan->has_sg) { |
---|
1335 | 1549 | if (chan->cyclic) |
---|
1336 | 1550 | xilinx_write(chan, XILINX_DMA_REG_TAILDESC, |
---|
1337 | 1551 | chan->cyclic_seg_v->phys); |
---|
1338 | 1552 | else |
---|
1339 | 1553 | xilinx_write(chan, XILINX_DMA_REG_TAILDESC, |
---|
1340 | 1554 | tail_segment->phys); |
---|
1341 | | - } else if (chan->has_sg && chan->xdev->mcdma) { |
---|
1342 | | - if (chan->direction == DMA_MEM_TO_DEV) { |
---|
1343 | | - dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, |
---|
1344 | | - tail_segment->phys); |
---|
1345 | | - } else { |
---|
1346 | | - if (!chan->tdest) { |
---|
1347 | | - dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, |
---|
1348 | | - tail_segment->phys); |
---|
1349 | | - } else { |
---|
1350 | | - dma_ctrl_write(chan, |
---|
1351 | | - XILINX_DMA_MCRX_TDESC(chan->tdest), |
---|
1352 | | - tail_segment->phys); |
---|
1353 | | - } |
---|
1354 | | - } |
---|
1355 | 1555 | } else { |
---|
1356 | 1556 | struct xilinx_axidma_tx_segment *segment; |
---|
1357 | 1557 | struct xilinx_axidma_desc_hw *hw; |
---|
.. | .. |
---|
1361 | 1561 | node); |
---|
1362 | 1562 | hw = &segment->hw; |
---|
1363 | 1563 | |
---|
1364 | | - xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); |
---|
| 1564 | + xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, |
---|
| 1565 | + xilinx_prep_dma_addr_t(hw->buf_addr)); |
---|
1365 | 1566 | |
---|
1366 | 1567 | /* Start the transfer */ |
---|
1367 | 1568 | dma_ctrl_write(chan, XILINX_DMA_REG_BTT, |
---|
1368 | | - hw->control & XILINX_DMA_MAX_TRANS_LEN); |
---|
| 1569 | + hw->control & chan->xdev->max_buffer_len); |
---|
1369 | 1570 | } |
---|
| 1571 | + |
---|
| 1572 | + list_splice_tail_init(&chan->pending_list, &chan->active_list); |
---|
| 1573 | + chan->desc_pendingcount = 0; |
---|
| 1574 | + chan->idle = false; |
---|
| 1575 | +} |
---|
| 1576 | + |
---|
| 1577 | +/** |
---|
| 1578 | + * xilinx_mcdma_start_transfer - Starts MCDMA transfer |
---|
| 1579 | + * @chan: Driver specific channel struct pointer |
---|
| 1580 | + */ |
---|
| 1581 | +static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan) |
---|
| 1582 | +{ |
---|
| 1583 | + struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; |
---|
| 1584 | + struct xilinx_aximcdma_tx_segment *tail_segment; |
---|
| 1585 | + u32 reg; |
---|
| 1586 | + |
---|
| 1587 | + /* |
---|
| 1588 | + * lock has been held by calling functions, so we don't need it |
---|
| 1589 | + * to take it here again. |
---|
| 1590 | + */ |
---|
| 1591 | + |
---|
| 1592 | + if (chan->err) |
---|
| 1593 | + return; |
---|
| 1594 | + |
---|
| 1595 | + if (!chan->idle) |
---|
| 1596 | + return; |
---|
| 1597 | + |
---|
| 1598 | + if (list_empty(&chan->pending_list)) |
---|
| 1599 | + return; |
---|
| 1600 | + |
---|
| 1601 | + head_desc = list_first_entry(&chan->pending_list, |
---|
| 1602 | + struct xilinx_dma_tx_descriptor, node); |
---|
| 1603 | + tail_desc = list_last_entry(&chan->pending_list, |
---|
| 1604 | + struct xilinx_dma_tx_descriptor, node); |
---|
| 1605 | + tail_segment = list_last_entry(&tail_desc->segments, |
---|
| 1606 | + struct xilinx_aximcdma_tx_segment, node); |
---|
| 1607 | + |
---|
| 1608 | + reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest)); |
---|
| 1609 | + |
---|
| 1610 | + if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) { |
---|
| 1611 | + reg &= ~XILINX_MCDMA_COALESCE_MASK; |
---|
| 1612 | + reg |= chan->desc_pendingcount << |
---|
| 1613 | + XILINX_MCDMA_COALESCE_SHIFT; |
---|
| 1614 | + } |
---|
| 1615 | + |
---|
| 1616 | + reg |= XILINX_MCDMA_IRQ_ALL_MASK; |
---|
| 1617 | + dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg); |
---|
| 1618 | + |
---|
| 1619 | + /* Program current descriptor */ |
---|
| 1620 | + xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest), |
---|
| 1621 | + head_desc->async_tx.phys); |
---|
| 1622 | + |
---|
| 1623 | + /* Program channel enable register */ |
---|
| 1624 | + reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET); |
---|
| 1625 | + reg |= BIT(chan->tdest); |
---|
| 1626 | + dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg); |
---|
| 1627 | + |
---|
| 1628 | + /* Start the fetch of BDs for the channel */ |
---|
| 1629 | + reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest)); |
---|
| 1630 | + reg |= XILINX_MCDMA_CR_RUNSTOP_MASK; |
---|
| 1631 | + dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg); |
---|
| 1632 | + |
---|
| 1633 | + xilinx_dma_start(chan); |
---|
| 1634 | + |
---|
| 1635 | + if (chan->err) |
---|
| 1636 | + return; |
---|
| 1637 | + |
---|
| 1638 | + /* Start the transfer */ |
---|
| 1639 | + xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest), |
---|
| 1640 | + tail_segment->phys); |
---|
1370 | 1641 | |
---|
1371 | 1642 | list_splice_tail_init(&chan->pending_list, &chan->active_list); |
---|
1372 | 1643 | chan->desc_pendingcount = 0; |
---|
.. | .. |
---|
1402 | 1673 | return; |
---|
1403 | 1674 | |
---|
1404 | 1675 | list_for_each_entry_safe(desc, next, &chan->active_list, node) { |
---|
| 1676 | + if (chan->has_sg && chan->xdev->dma_config->dmatype != |
---|
| 1677 | + XDMA_TYPE_VDMA) |
---|
| 1678 | + desc->residue = xilinx_dma_get_residue(chan, desc); |
---|
| 1679 | + else |
---|
| 1680 | + desc->residue = 0; |
---|
| 1681 | + desc->err = chan->err; |
---|
| 1682 | + |
---|
1405 | 1683 | list_del(&desc->node); |
---|
1406 | 1684 | if (!desc->cyclic) |
---|
1407 | 1685 | dma_cookie_complete(&desc->async_tx); |
---|
.. | .. |
---|
1462 | 1740 | XILINX_DMA_DMAXR_ALL_IRQ_MASK); |
---|
1463 | 1741 | |
---|
1464 | 1742 | return 0; |
---|
| 1743 | +} |
---|
| 1744 | + |
---|
| 1745 | +/** |
---|
| 1746 | + * xilinx_mcdma_irq_handler - MCDMA Interrupt handler |
---|
| 1747 | + * @irq: IRQ number |
---|
| 1748 | + * @data: Pointer to the Xilinx MCDMA channel structure |
---|
| 1749 | + * |
---|
| 1750 | + * Return: IRQ_HANDLED/IRQ_NONE |
---|
| 1751 | + */ |
---|
| 1752 | +static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data) |
---|
| 1753 | +{ |
---|
| 1754 | + struct xilinx_dma_chan *chan = data; |
---|
| 1755 | + u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id; |
---|
| 1756 | + |
---|
| 1757 | + if (chan->direction == DMA_DEV_TO_MEM) |
---|
| 1758 | + ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET; |
---|
| 1759 | + else |
---|
| 1760 | + ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET; |
---|
| 1761 | + |
---|
| 1762 | + /* Read the channel id raising the interrupt*/ |
---|
| 1763 | + chan_sermask = dma_ctrl_read(chan, ser_offset); |
---|
| 1764 | + chan_id = ffs(chan_sermask); |
---|
| 1765 | + |
---|
| 1766 | + if (!chan_id) |
---|
| 1767 | + return IRQ_NONE; |
---|
| 1768 | + |
---|
| 1769 | + if (chan->direction == DMA_DEV_TO_MEM) |
---|
| 1770 | + chan_offset = chan->xdev->dma_config->max_channels / 2; |
---|
| 1771 | + |
---|
| 1772 | + chan_offset = chan_offset + (chan_id - 1); |
---|
| 1773 | + chan = chan->xdev->chan[chan_offset]; |
---|
| 1774 | + /* Read the status and ack the interrupts. */ |
---|
| 1775 | + status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest)); |
---|
| 1776 | + if (!(status & XILINX_MCDMA_IRQ_ALL_MASK)) |
---|
| 1777 | + return IRQ_NONE; |
---|
| 1778 | + |
---|
| 1779 | + dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest), |
---|
| 1780 | + status & XILINX_MCDMA_IRQ_ALL_MASK); |
---|
| 1781 | + |
---|
| 1782 | + if (status & XILINX_MCDMA_IRQ_ERR_MASK) { |
---|
| 1783 | + dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n", |
---|
| 1784 | + chan, |
---|
| 1785 | + dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET), |
---|
| 1786 | + dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET |
---|
| 1787 | + (chan->tdest)), |
---|
| 1788 | + dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET |
---|
| 1789 | + (chan->tdest))); |
---|
| 1790 | + chan->err = true; |
---|
| 1791 | + } |
---|
| 1792 | + |
---|
| 1793 | + if (status & XILINX_MCDMA_IRQ_DELAY_MASK) { |
---|
| 1794 | + /* |
---|
| 1795 | + * Device takes too long to do the transfer when user requires |
---|
| 1796 | + * responsiveness. |
---|
| 1797 | + */ |
---|
| 1798 | + dev_dbg(chan->dev, "Inter-packet latency too long\n"); |
---|
| 1799 | + } |
---|
| 1800 | + |
---|
| 1801 | + if (status & XILINX_MCDMA_IRQ_IOC_MASK) { |
---|
| 1802 | + spin_lock(&chan->lock); |
---|
| 1803 | + xilinx_dma_complete_descriptor(chan); |
---|
| 1804 | + chan->idle = true; |
---|
| 1805 | + chan->start_transfer(chan); |
---|
| 1806 | + spin_unlock(&chan->lock); |
---|
| 1807 | + } |
---|
| 1808 | + |
---|
| 1809 | + tasklet_schedule(&chan->tasklet); |
---|
| 1810 | + return IRQ_HANDLED; |
---|
1465 | 1811 | } |
---|
1466 | 1812 | |
---|
1467 | 1813 | /** |
---|
.. | .. |
---|
1539 | 1885 | struct xilinx_vdma_tx_segment *tail_segment; |
---|
1540 | 1886 | struct xilinx_dma_tx_descriptor *tail_desc; |
---|
1541 | 1887 | struct xilinx_axidma_tx_segment *axidma_tail_segment; |
---|
| 1888 | + struct xilinx_aximcdma_tx_segment *aximcdma_tail_segment; |
---|
1542 | 1889 | struct xilinx_cdma_tx_segment *cdma_tail_segment; |
---|
1543 | 1890 | |
---|
1544 | 1891 | if (list_empty(&chan->pending_list)) |
---|
.. | .. |
---|
1560 | 1907 | struct xilinx_cdma_tx_segment, |
---|
1561 | 1908 | node); |
---|
1562 | 1909 | cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; |
---|
1563 | | - } else { |
---|
| 1910 | + } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
---|
1564 | 1911 | axidma_tail_segment = list_last_entry(&tail_desc->segments, |
---|
1565 | 1912 | struct xilinx_axidma_tx_segment, |
---|
1566 | 1913 | node); |
---|
1567 | 1914 | axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; |
---|
| 1915 | + } else { |
---|
| 1916 | + aximcdma_tail_segment = |
---|
| 1917 | + list_last_entry(&tail_desc->segments, |
---|
| 1918 | + struct xilinx_aximcdma_tx_segment, |
---|
| 1919 | + node); |
---|
| 1920 | + aximcdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; |
---|
1568 | 1921 | } |
---|
1569 | 1922 | |
---|
1570 | 1923 | /* |
---|
.. | .. |
---|
1729 | 2082 | struct xilinx_cdma_tx_segment *segment; |
---|
1730 | 2083 | struct xilinx_cdma_desc_hw *hw; |
---|
1731 | 2084 | |
---|
1732 | | - if (!len || len > XILINX_DMA_MAX_TRANS_LEN) |
---|
| 2085 | + if (!len || len > chan->xdev->max_buffer_len) |
---|
1733 | 2086 | return NULL; |
---|
1734 | 2087 | |
---|
1735 | 2088 | desc = xilinx_dma_alloc_tx_descriptor(chan); |
---|
.. | .. |
---|
1819 | 2172 | * Calculate the maximum number of bytes to transfer, |
---|
1820 | 2173 | * making sure it is less than the hw limit |
---|
1821 | 2174 | */ |
---|
1822 | | - copy = min_t(size_t, sg_dma_len(sg) - sg_used, |
---|
1823 | | - XILINX_DMA_MAX_TRANS_LEN); |
---|
| 2175 | + copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg), |
---|
| 2176 | + sg_used); |
---|
1824 | 2177 | hw = &segment->hw; |
---|
1825 | 2178 | |
---|
1826 | 2179 | /* Fill in the descriptor */ |
---|
.. | .. |
---|
1924 | 2277 | * Calculate the maximum number of bytes to transfer, |
---|
1925 | 2278 | * making sure it is less than the hw limit |
---|
1926 | 2279 | */ |
---|
1927 | | - copy = min_t(size_t, period_len - sg_used, |
---|
1928 | | - XILINX_DMA_MAX_TRANS_LEN); |
---|
| 2280 | + copy = xilinx_dma_calc_copysize(chan, period_len, |
---|
| 2281 | + sg_used); |
---|
1929 | 2282 | hw = &segment->hw; |
---|
1930 | 2283 | xilinx_axidma_buf(chan, hw, buf_addr, sg_used, |
---|
1931 | 2284 | period_len * i); |
---|
.. | .. |
---|
1973 | 2326 | } |
---|
1974 | 2327 | |
---|
1975 | 2328 | /** |
---|
1976 | | - * xilinx_dma_prep_interleaved - prepare a descriptor for a |
---|
1977 | | - * DMA_SLAVE transaction |
---|
| 2329 | + * xilinx_mcdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction |
---|
1978 | 2330 | * @dchan: DMA channel |
---|
1979 | | - * @xt: Interleaved template pointer |
---|
| 2331 | + * @sgl: scatterlist to transfer to/from |
---|
| 2332 | + * @sg_len: number of entries in @scatterlist |
---|
| 2333 | + * @direction: DMA direction |
---|
1980 | 2334 | * @flags: transfer ack flags |
---|
| 2335 | + * @context: APP words of the descriptor |
---|
1981 | 2336 | * |
---|
1982 | 2337 | * Return: Async transaction descriptor on success and NULL on failure |
---|
1983 | 2338 | */ |
---|
1984 | 2339 | static struct dma_async_tx_descriptor * |
---|
1985 | | -xilinx_dma_prep_interleaved(struct dma_chan *dchan, |
---|
1986 | | - struct dma_interleaved_template *xt, |
---|
1987 | | - unsigned long flags) |
---|
| 2340 | +xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, |
---|
| 2341 | + unsigned int sg_len, |
---|
| 2342 | + enum dma_transfer_direction direction, |
---|
| 2343 | + unsigned long flags, void *context) |
---|
1988 | 2344 | { |
---|
1989 | 2345 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
---|
1990 | 2346 | struct xilinx_dma_tx_descriptor *desc; |
---|
1991 | | - struct xilinx_axidma_tx_segment *segment; |
---|
1992 | | - struct xilinx_axidma_desc_hw *hw; |
---|
| 2347 | + struct xilinx_aximcdma_tx_segment *segment = NULL; |
---|
| 2348 | + u32 *app_w = (u32 *)context; |
---|
| 2349 | + struct scatterlist *sg; |
---|
| 2350 | + size_t copy; |
---|
| 2351 | + size_t sg_used; |
---|
| 2352 | + unsigned int i; |
---|
1993 | 2353 | |
---|
1994 | | - if (!is_slave_direction(xt->dir)) |
---|
1995 | | - return NULL; |
---|
1996 | | - |
---|
1997 | | - if (!xt->numf || !xt->sgl[0].size) |
---|
1998 | | - return NULL; |
---|
1999 | | - |
---|
2000 | | - if (xt->frame_size != 1) |
---|
| 2354 | + if (!is_slave_direction(direction)) |
---|
2001 | 2355 | return NULL; |
---|
2002 | 2356 | |
---|
2003 | 2357 | /* Allocate a transaction descriptor. */ |
---|
.. | .. |
---|
2005 | 2359 | if (!desc) |
---|
2006 | 2360 | return NULL; |
---|
2007 | 2361 | |
---|
2008 | | - chan->direction = xt->dir; |
---|
2009 | 2362 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); |
---|
2010 | 2363 | desc->async_tx.tx_submit = xilinx_dma_tx_submit; |
---|
2011 | 2364 | |
---|
2012 | | - /* Get a free segment */ |
---|
2013 | | - segment = xilinx_axidma_alloc_tx_segment(chan); |
---|
2014 | | - if (!segment) |
---|
2015 | | - goto error; |
---|
| 2365 | + /* Build transactions using information in the scatter gather list */ |
---|
| 2366 | + for_each_sg(sgl, sg, sg_len, i) { |
---|
| 2367 | + sg_used = 0; |
---|
2016 | 2368 | |
---|
2017 | | - hw = &segment->hw; |
---|
| 2369 | + /* Loop until the entire scatterlist entry is used */ |
---|
| 2370 | + while (sg_used < sg_dma_len(sg)) { |
---|
| 2371 | + struct xilinx_aximcdma_desc_hw *hw; |
---|
2018 | 2372 | |
---|
2019 | | - /* Fill in the descriptor */ |
---|
2020 | | - if (xt->dir != DMA_MEM_TO_DEV) |
---|
2021 | | - hw->buf_addr = xt->dst_start; |
---|
2022 | | - else |
---|
2023 | | - hw->buf_addr = xt->src_start; |
---|
| 2373 | + /* Get a free segment */ |
---|
| 2374 | + segment = xilinx_aximcdma_alloc_tx_segment(chan); |
---|
| 2375 | + if (!segment) |
---|
| 2376 | + goto error; |
---|
2024 | 2377 | |
---|
2025 | | - hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK; |
---|
2026 | | - hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) & |
---|
2027 | | - XILINX_DMA_BD_VSIZE_MASK; |
---|
2028 | | - hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) & |
---|
2029 | | - XILINX_DMA_BD_STRIDE_MASK; |
---|
2030 | | - hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK; |
---|
| 2378 | + /* |
---|
| 2379 | + * Calculate the maximum number of bytes to transfer, |
---|
| 2380 | + * making sure it is less than the hw limit |
---|
| 2381 | + */ |
---|
| 2382 | + copy = min_t(size_t, sg_dma_len(sg) - sg_used, |
---|
| 2383 | + chan->xdev->max_buffer_len); |
---|
| 2384 | + hw = &segment->hw; |
---|
2031 | 2385 | |
---|
2032 | | - /* |
---|
2033 | | - * Insert the segment into the descriptor segments |
---|
2034 | | - * list. |
---|
2035 | | - */ |
---|
2036 | | - list_add_tail(&segment->node, &desc->segments); |
---|
| 2386 | + /* Fill in the descriptor */ |
---|
| 2387 | + xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg), |
---|
| 2388 | + sg_used); |
---|
| 2389 | + hw->control = copy; |
---|
2037 | 2390 | |
---|
| 2391 | + if (chan->direction == DMA_MEM_TO_DEV && app_w) { |
---|
| 2392 | + memcpy(hw->app, app_w, sizeof(u32) * |
---|
| 2393 | + XILINX_DMA_NUM_APP_WORDS); |
---|
| 2394 | + } |
---|
| 2395 | + |
---|
| 2396 | + sg_used += copy; |
---|
| 2397 | + /* |
---|
| 2398 | + * Insert the segment into the descriptor segments |
---|
| 2399 | + * list. |
---|
| 2400 | + */ |
---|
| 2401 | + list_add_tail(&segment->node, &desc->segments); |
---|
| 2402 | + } |
---|
| 2403 | + } |
---|
2038 | 2404 | |
---|
2039 | 2405 | segment = list_first_entry(&desc->segments, |
---|
2040 | | - struct xilinx_axidma_tx_segment, node); |
---|
| 2406 | + struct xilinx_aximcdma_tx_segment, node); |
---|
2041 | 2407 | desc->async_tx.phys = segment->phys; |
---|
2042 | 2408 | |
---|
2043 | 2409 | /* For the last DMA_MEM_TO_DEV transfer, set EOP */ |
---|
2044 | | - if (xt->dir == DMA_MEM_TO_DEV) { |
---|
2045 | | - segment->hw.control |= XILINX_DMA_BD_SOP; |
---|
| 2410 | + if (chan->direction == DMA_MEM_TO_DEV) { |
---|
| 2411 | + segment->hw.control |= XILINX_MCDMA_BD_SOP; |
---|
2046 | 2412 | segment = list_last_entry(&desc->segments, |
---|
2047 | | - struct xilinx_axidma_tx_segment, |
---|
| 2413 | + struct xilinx_aximcdma_tx_segment, |
---|
2048 | 2414 | node); |
---|
2049 | | - segment->hw.control |= XILINX_DMA_BD_EOP; |
---|
| 2415 | + segment->hw.control |= XILINX_MCDMA_BD_EOP; |
---|
2050 | 2416 | } |
---|
2051 | 2417 | |
---|
2052 | 2418 | return &desc->async_tx; |
---|
2053 | 2419 | |
---|
2054 | 2420 | error: |
---|
2055 | 2421 | xilinx_dma_free_tx_descriptor(chan, desc); |
---|
| 2422 | + |
---|
2056 | 2423 | return NULL; |
---|
2057 | 2424 | } |
---|
2058 | 2425 | |
---|
.. | .. |
---|
2068 | 2435 | u32 reg; |
---|
2069 | 2436 | int err; |
---|
2070 | 2437 | |
---|
2071 | | - if (chan->cyclic) |
---|
2072 | | - xilinx_dma_chan_reset(chan); |
---|
2073 | | - |
---|
2074 | | - err = chan->stop_transfer(chan); |
---|
2075 | | - if (err) { |
---|
2076 | | - dev_err(chan->dev, "Cannot stop channel %p: %x\n", |
---|
2077 | | - chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); |
---|
2078 | | - chan->err = true; |
---|
| 2438 | + if (!chan->cyclic) { |
---|
| 2439 | + err = chan->stop_transfer(chan); |
---|
| 2440 | + if (err) { |
---|
| 2441 | + dev_err(chan->dev, "Cannot stop channel %p: %x\n", |
---|
| 2442 | + chan, dma_ctrl_read(chan, |
---|
| 2443 | + XILINX_DMA_REG_DMASR)); |
---|
| 2444 | + chan->err = true; |
---|
| 2445 | + } |
---|
2079 | 2446 | } |
---|
2080 | 2447 | |
---|
| 2448 | + xilinx_dma_chan_reset(chan); |
---|
2081 | 2449 | /* Remove and free all of the descriptors in the lists */ |
---|
2082 | 2450 | chan->terminating = true; |
---|
2083 | 2451 | xilinx_dma_free_descriptors(chan); |
---|
.. | .. |
---|
2199 | 2567 | *tmp_clk = NULL; |
---|
2200 | 2568 | |
---|
2201 | 2569 | *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); |
---|
2202 | | - if (IS_ERR(*axi_clk)) { |
---|
2203 | | - err = PTR_ERR(*axi_clk); |
---|
2204 | | - dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); |
---|
2205 | | - return err; |
---|
2206 | | - } |
---|
| 2570 | + if (IS_ERR(*axi_clk)) |
---|
| 2571 | + return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n"); |
---|
2207 | 2572 | |
---|
2208 | 2573 | *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); |
---|
2209 | 2574 | if (IS_ERR(*tx_clk)) |
---|
.. | .. |
---|
2264 | 2629 | *tmp2_clk = NULL; |
---|
2265 | 2630 | |
---|
2266 | 2631 | *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); |
---|
2267 | | - if (IS_ERR(*axi_clk)) { |
---|
2268 | | - err = PTR_ERR(*axi_clk); |
---|
2269 | | - dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err); |
---|
2270 | | - return err; |
---|
2271 | | - } |
---|
| 2632 | + if (IS_ERR(*axi_clk)) |
---|
| 2633 | + return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n"); |
---|
2272 | 2634 | |
---|
2273 | 2635 | *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk"); |
---|
2274 | | - if (IS_ERR(*dev_clk)) { |
---|
2275 | | - err = PTR_ERR(*dev_clk); |
---|
2276 | | - dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err); |
---|
2277 | | - return err; |
---|
2278 | | - } |
---|
| 2636 | + if (IS_ERR(*dev_clk)) |
---|
| 2637 | + return dev_err_probe(&pdev->dev, PTR_ERR(*dev_clk), "failed to get dev_clk\n"); |
---|
2279 | 2638 | |
---|
2280 | 2639 | err = clk_prepare_enable(*axi_clk); |
---|
2281 | 2640 | if (err) { |
---|
.. | .. |
---|
2304 | 2663 | int err; |
---|
2305 | 2664 | |
---|
2306 | 2665 | *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); |
---|
2307 | | - if (IS_ERR(*axi_clk)) { |
---|
2308 | | - err = PTR_ERR(*axi_clk); |
---|
2309 | | - dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); |
---|
2310 | | - return err; |
---|
2311 | | - } |
---|
| 2666 | + if (IS_ERR(*axi_clk)) |
---|
| 2667 | + return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n"); |
---|
2312 | 2668 | |
---|
2313 | 2669 | *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); |
---|
2314 | 2670 | if (IS_ERR(*tx_clk)) |
---|
.. | .. |
---|
2328 | 2684 | |
---|
2329 | 2685 | err = clk_prepare_enable(*axi_clk); |
---|
2330 | 2686 | if (err) { |
---|
2331 | | - dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); |
---|
| 2687 | + dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", |
---|
| 2688 | + err); |
---|
2332 | 2689 | return err; |
---|
2333 | 2690 | } |
---|
2334 | 2691 | |
---|
.. | .. |
---|
2386 | 2743 | * |
---|
2387 | 2744 | * @xdev: Driver specific device structure |
---|
2388 | 2745 | * @node: Device node |
---|
2389 | | - * @chan_id: DMA Channel id |
---|
2390 | 2746 | * |
---|
2391 | 2747 | * Return: '0' on success and failure value on error |
---|
2392 | 2748 | */ |
---|
2393 | 2749 | static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, |
---|
2394 | | - struct device_node *node, int chan_id) |
---|
| 2750 | + struct device_node *node) |
---|
2395 | 2751 | { |
---|
2396 | 2752 | struct xilinx_dma_chan *chan; |
---|
2397 | 2753 | bool has_dre = false; |
---|
.. | .. |
---|
2405 | 2761 | |
---|
2406 | 2762 | chan->dev = xdev->dev; |
---|
2407 | 2763 | chan->xdev = xdev; |
---|
2408 | | - chan->has_sg = xdev->has_sg; |
---|
2409 | 2764 | chan->desc_pendingcount = 0x0; |
---|
2410 | 2765 | chan->ext_addr = xdev->ext_addr; |
---|
2411 | 2766 | /* This variable ensures that descriptors are not |
---|
.. | .. |
---|
2444 | 2799 | of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || |
---|
2445 | 2800 | of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { |
---|
2446 | 2801 | chan->direction = DMA_MEM_TO_DEV; |
---|
2447 | | - chan->id = chan_id; |
---|
2448 | | - chan->tdest = chan_id; |
---|
| 2802 | + chan->id = xdev->mm2s_chan_id++; |
---|
| 2803 | + chan->tdest = chan->id; |
---|
2449 | 2804 | |
---|
2450 | 2805 | chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; |
---|
2451 | 2806 | if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
---|
.. | .. |
---|
2461 | 2816 | of_device_is_compatible(node, |
---|
2462 | 2817 | "xlnx,axi-dma-s2mm-channel")) { |
---|
2463 | 2818 | chan->direction = DMA_DEV_TO_MEM; |
---|
2464 | | - chan->id = chan_id; |
---|
2465 | | - chan->tdest = chan_id - xdev->nr_channels; |
---|
| 2819 | + chan->id = xdev->s2mm_chan_id++; |
---|
| 2820 | + chan->tdest = chan->id - xdev->dma_config->max_channels / 2; |
---|
2466 | 2821 | chan->has_vflip = of_property_read_bool(node, |
---|
2467 | 2822 | "xlnx,enable-vert-flip"); |
---|
2468 | 2823 | if (chan->has_vflip) { |
---|
.. | .. |
---|
2471 | 2826 | XILINX_VDMA_ENABLE_VERTICAL_FLIP; |
---|
2472 | 2827 | } |
---|
2473 | 2828 | |
---|
2474 | | - chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; |
---|
| 2829 | + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) |
---|
| 2830 | + chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET; |
---|
| 2831 | + else |
---|
| 2832 | + chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; |
---|
| 2833 | + |
---|
2475 | 2834 | if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
---|
2476 | 2835 | chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; |
---|
2477 | 2836 | chan->config.park = 1; |
---|
.. | .. |
---|
2486 | 2845 | } |
---|
2487 | 2846 | |
---|
2488 | 2847 | /* Request the interrupt */ |
---|
2489 | | - chan->irq = irq_of_parse_and_map(node, 0); |
---|
2490 | | - err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED, |
---|
2491 | | - "xilinx-dma-controller", chan); |
---|
| 2848 | + chan->irq = irq_of_parse_and_map(node, chan->tdest); |
---|
| 2849 | + err = request_irq(chan->irq, xdev->dma_config->irq_handler, |
---|
| 2850 | + IRQF_SHARED, "xilinx-dma-controller", chan); |
---|
2492 | 2851 | if (err) { |
---|
2493 | 2852 | dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); |
---|
2494 | 2853 | return err; |
---|
.. | .. |
---|
2496 | 2855 | |
---|
2497 | 2856 | if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
---|
2498 | 2857 | chan->start_transfer = xilinx_dma_start_transfer; |
---|
| 2858 | + chan->stop_transfer = xilinx_dma_stop_transfer; |
---|
| 2859 | + } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { |
---|
| 2860 | + chan->start_transfer = xilinx_mcdma_start_transfer; |
---|
2499 | 2861 | chan->stop_transfer = xilinx_dma_stop_transfer; |
---|
2500 | 2862 | } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { |
---|
2501 | 2863 | chan->start_transfer = xilinx_cdma_start_transfer; |
---|
.. | .. |
---|
2505 | 2867 | chan->stop_transfer = xilinx_dma_stop_transfer; |
---|
2506 | 2868 | } |
---|
2507 | 2869 | |
---|
| 2870 | + /* check if SG is enabled (only for AXIDMA, AXIMCDMA, and CDMA) */ |
---|
| 2871 | + if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) { |
---|
| 2872 | + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA || |
---|
| 2873 | + dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & |
---|
| 2874 | + XILINX_DMA_DMASR_SG_MASK) |
---|
| 2875 | + chan->has_sg = true; |
---|
| 2876 | + dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id, |
---|
| 2877 | + chan->has_sg ? "enabled" : "disabled"); |
---|
| 2878 | + } |
---|
| 2879 | + |
---|
2508 | 2880 | /* Initialize the tasklet */ |
---|
2509 | | - tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, |
---|
2510 | | - (unsigned long)chan); |
---|
| 2881 | + tasklet_setup(&chan->tasklet, xilinx_dma_do_tasklet); |
---|
2511 | 2882 | |
---|
2512 | 2883 | /* |
---|
2513 | 2884 | * Initialize the DMA channel and add it to the DMA engine channels |
---|
.. | .. |
---|
2545 | 2916 | u32 nr_channels = 1; |
---|
2546 | 2917 | |
---|
2547 | 2918 | ret = of_property_read_u32(node, "dma-channels", &nr_channels); |
---|
2548 | | - if ((ret < 0) && xdev->mcdma) |
---|
| 2919 | + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0) |
---|
2549 | 2920 | dev_warn(xdev->dev, "missing dma-channels property\n"); |
---|
2550 | 2921 | |
---|
2551 | 2922 | for (i = 0; i < nr_channels; i++) |
---|
2552 | | - xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); |
---|
2553 | | - |
---|
2554 | | - xdev->nr_channels += nr_channels; |
---|
| 2923 | + xilinx_dma_chan_probe(xdev, node); |
---|
2555 | 2924 | |
---|
2556 | 2925 | return 0; |
---|
2557 | 2926 | } |
---|
.. | .. |
---|
2569 | 2938 | struct xilinx_dma_device *xdev = ofdma->of_dma_data; |
---|
2570 | 2939 | int chan_id = dma_spec->args[0]; |
---|
2571 | 2940 | |
---|
2572 | | - if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id]) |
---|
| 2941 | + if (chan_id >= xdev->dma_config->max_channels || !xdev->chan[chan_id]) |
---|
2573 | 2942 | return NULL; |
---|
2574 | 2943 | |
---|
2575 | 2944 | return dma_get_slave_channel(&xdev->chan[chan_id]->common); |
---|
.. | .. |
---|
2578 | 2947 | static const struct xilinx_dma_config axidma_config = { |
---|
2579 | 2948 | .dmatype = XDMA_TYPE_AXIDMA, |
---|
2580 | 2949 | .clk_init = axidma_clk_init, |
---|
| 2950 | + .irq_handler = xilinx_dma_irq_handler, |
---|
| 2951 | + .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE, |
---|
2581 | 2952 | }; |
---|
2582 | 2953 | |
---|
| 2954 | +static const struct xilinx_dma_config aximcdma_config = { |
---|
| 2955 | + .dmatype = XDMA_TYPE_AXIMCDMA, |
---|
| 2956 | + .clk_init = axidma_clk_init, |
---|
| 2957 | + .irq_handler = xilinx_mcdma_irq_handler, |
---|
| 2958 | + .max_channels = XILINX_MCDMA_MAX_CHANS_PER_DEVICE, |
---|
| 2959 | +}; |
---|
2583 | 2960 | static const struct xilinx_dma_config axicdma_config = { |
---|
2584 | 2961 | .dmatype = XDMA_TYPE_CDMA, |
---|
2585 | 2962 | .clk_init = axicdma_clk_init, |
---|
| 2963 | + .irq_handler = xilinx_dma_irq_handler, |
---|
| 2964 | + .max_channels = XILINX_CDMA_MAX_CHANS_PER_DEVICE, |
---|
2586 | 2965 | }; |
---|
2587 | 2966 | |
---|
2588 | 2967 | static const struct xilinx_dma_config axivdma_config = { |
---|
2589 | 2968 | .dmatype = XDMA_TYPE_VDMA, |
---|
2590 | 2969 | .clk_init = axivdma_clk_init, |
---|
| 2970 | + .irq_handler = xilinx_dma_irq_handler, |
---|
| 2971 | + .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE, |
---|
2591 | 2972 | }; |
---|
2592 | 2973 | |
---|
2593 | 2974 | static const struct of_device_id xilinx_dma_of_ids[] = { |
---|
2594 | 2975 | { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config }, |
---|
2595 | 2976 | { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config }, |
---|
2596 | 2977 | { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config }, |
---|
| 2978 | + { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config }, |
---|
2597 | 2979 | {} |
---|
2598 | 2980 | }; |
---|
2599 | 2981 | MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); |
---|
.. | .. |
---|
2612 | 2994 | struct device_node *node = pdev->dev.of_node; |
---|
2613 | 2995 | struct xilinx_dma_device *xdev; |
---|
2614 | 2996 | struct device_node *child, *np = pdev->dev.of_node; |
---|
2615 | | - struct resource *io; |
---|
2616 | | - u32 num_frames, addr_width; |
---|
| 2997 | + u32 num_frames, addr_width, len_width; |
---|
2617 | 2998 | int i, err; |
---|
2618 | 2999 | |
---|
2619 | 3000 | /* Allocate and initialize the DMA engine structure */ |
---|
.. | .. |
---|
2638 | 3019 | return err; |
---|
2639 | 3020 | |
---|
2640 | 3021 | /* Request and map I/O memory */ |
---|
2641 | | - io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
---|
2642 | | - xdev->regs = devm_ioremap_resource(&pdev->dev, io); |
---|
2643 | | - if (IS_ERR(xdev->regs)) |
---|
2644 | | - return PTR_ERR(xdev->regs); |
---|
2645 | | - |
---|
| 3022 | + xdev->regs = devm_platform_ioremap_resource(pdev, 0); |
---|
| 3023 | + if (IS_ERR(xdev->regs)) { |
---|
| 3024 | + err = PTR_ERR(xdev->regs); |
---|
| 3025 | + goto disable_clks; |
---|
| 3026 | + } |
---|
2646 | 3027 | /* Retrieve the DMA engine properties from the device tree */ |
---|
2647 | | - xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); |
---|
2648 | | - if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) |
---|
2649 | | - xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); |
---|
| 3028 | + xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); |
---|
| 3029 | + xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2; |
---|
| 3030 | + |
---|
| 3031 | + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA || |
---|
| 3032 | + xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { |
---|
| 3033 | + if (!of_property_read_u32(node, "xlnx,sg-length-width", |
---|
| 3034 | + &len_width)) { |
---|
| 3035 | + if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN || |
---|
| 3036 | + len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) { |
---|
| 3037 | + dev_warn(xdev->dev, |
---|
| 3038 | + "invalid xlnx,sg-length-width property value. Using default width\n"); |
---|
| 3039 | + } else { |
---|
| 3040 | + if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX) |
---|
| 3041 | + dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n"); |
---|
| 3042 | + xdev->max_buffer_len = |
---|
| 3043 | + GENMASK(len_width - 1, 0); |
---|
| 3044 | + } |
---|
| 3045 | + } |
---|
| 3046 | + } |
---|
2650 | 3047 | |
---|
2651 | 3048 | if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
---|
2652 | 3049 | err = of_property_read_u32(node, "xlnx,num-fstores", |
---|
.. | .. |
---|
2654 | 3051 | if (err < 0) { |
---|
2655 | 3052 | dev_err(xdev->dev, |
---|
2656 | 3053 | "missing xlnx,num-fstores property\n"); |
---|
2657 | | - return err; |
---|
| 3054 | + goto disable_clks; |
---|
2658 | 3055 | } |
---|
2659 | 3056 | |
---|
2660 | 3057 | err = of_property_read_u32(node, "xlnx,flush-fsync", |
---|
.. | .. |
---|
2674 | 3071 | xdev->ext_addr = false; |
---|
2675 | 3072 | |
---|
2676 | 3073 | /* Set the dma mask bits */ |
---|
2677 | | - dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width)); |
---|
| 3074 | + err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width)); |
---|
| 3075 | + if (err < 0) { |
---|
| 3076 | + dev_err(xdev->dev, "DMA mask error %d\n", err); |
---|
| 3077 | + goto disable_clks; |
---|
| 3078 | + } |
---|
2678 | 3079 | |
---|
2679 | 3080 | /* Initialize the DMA engine */ |
---|
2680 | 3081 | xdev->common.dev = &pdev->dev; |
---|
.. | .. |
---|
2697 | 3098 | xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; |
---|
2698 | 3099 | xdev->common.device_prep_dma_cyclic = |
---|
2699 | 3100 | xilinx_dma_prep_dma_cyclic; |
---|
2700 | | - xdev->common.device_prep_interleaved_dma = |
---|
2701 | | - xilinx_dma_prep_interleaved; |
---|
2702 | | - /* Residue calculation is supported by only AXI DMA */ |
---|
| 3101 | + /* Residue calculation is supported by only AXI DMA and CDMA */ |
---|
2703 | 3102 | xdev->common.residue_granularity = |
---|
2704 | 3103 | DMA_RESIDUE_GRANULARITY_SEGMENT; |
---|
2705 | 3104 | } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { |
---|
2706 | 3105 | dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); |
---|
2707 | 3106 | xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; |
---|
| 3107 | + /* Residue calculation is supported by only AXI DMA and CDMA */ |
---|
| 3108 | + xdev->common.residue_granularity = |
---|
| 3109 | + DMA_RESIDUE_GRANULARITY_SEGMENT; |
---|
| 3110 | + } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { |
---|
| 3111 | + xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg; |
---|
2708 | 3112 | } else { |
---|
2709 | 3113 | xdev->common.device_prep_interleaved_dma = |
---|
2710 | 3114 | xilinx_vdma_dma_prep_interleaved; |
---|
.. | .. |
---|
2715 | 3119 | /* Initialize the channels */ |
---|
2716 | 3120 | for_each_child_of_node(node, child) { |
---|
2717 | 3121 | err = xilinx_dma_child_probe(xdev, child); |
---|
2718 | | - if (err < 0) |
---|
2719 | | - goto disable_clks; |
---|
| 3122 | + if (err < 0) { |
---|
| 3123 | + of_node_put(child); |
---|
| 3124 | + goto error; |
---|
| 3125 | + } |
---|
2720 | 3126 | } |
---|
2721 | 3127 | |
---|
2722 | 3128 | if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
---|
2723 | | - for (i = 0; i < xdev->nr_channels; i++) |
---|
| 3129 | + for (i = 0; i < xdev->dma_config->max_channels; i++) |
---|
2724 | 3130 | if (xdev->chan[i]) |
---|
2725 | 3131 | xdev->chan[i]->num_frms = num_frames; |
---|
2726 | 3132 | } |
---|
.. | .. |
---|
2744 | 3150 | dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n"); |
---|
2745 | 3151 | else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) |
---|
2746 | 3152 | dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n"); |
---|
| 3153 | + else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) |
---|
| 3154 | + dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n"); |
---|
2747 | 3155 | else |
---|
2748 | 3156 | dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); |
---|
2749 | 3157 | |
---|
2750 | 3158 | return 0; |
---|
2751 | 3159 | |
---|
2752 | | -disable_clks: |
---|
2753 | | - xdma_disable_allclks(xdev); |
---|
2754 | 3160 | error: |
---|
2755 | | - for (i = 0; i < xdev->nr_channels; i++) |
---|
| 3161 | + for (i = 0; i < xdev->dma_config->max_channels; i++) |
---|
2756 | 3162 | if (xdev->chan[i]) |
---|
2757 | 3163 | xilinx_dma_chan_remove(xdev->chan[i]); |
---|
| 3164 | +disable_clks: |
---|
| 3165 | + xdma_disable_allclks(xdev); |
---|
2758 | 3166 | |
---|
2759 | 3167 | return err; |
---|
2760 | 3168 | } |
---|
.. | .. |
---|
2774 | 3182 | |
---|
2775 | 3183 | dma_async_device_unregister(&xdev->common); |
---|
2776 | 3184 | |
---|
2777 | | - for (i = 0; i < xdev->nr_channels; i++) |
---|
| 3185 | + for (i = 0; i < xdev->dma_config->max_channels; i++) |
---|
2778 | 3186 | if (xdev->chan[i]) |
---|
2779 | 3187 | xilinx_dma_chan_remove(xdev->chan[i]); |
---|
2780 | 3188 | |
---|