| .. | .. |
|---|
| 15 | 15 | |
|---|
| 16 | 16 | #include <linux/dmaengine.h> |
|---|
| 17 | 17 | #include <linux/dma-mapping.h> |
|---|
| 18 | | -#include <linux/edma.h> |
|---|
| 18 | +#include <linux/bitmap.h> |
|---|
| 19 | 19 | #include <linux/err.h> |
|---|
| 20 | 20 | #include <linux/init.h> |
|---|
| 21 | 21 | #include <linux/interrupt.h> |
|---|
| .. | .. |
|---|
| 133 | 133 | #define EDMA_CONT_PARAMS_FIXED_EXACT 1002 |
|---|
| 134 | 134 | #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003 |
|---|
| 135 | 135 | |
|---|
| 136 | +/* |
|---|
| 137 | + * 64bit array registers are split into two 32bit registers: |
|---|
| 138 | + * reg0: channel/event 0-31 |
|---|
| 139 | + * reg1: channel/event 32-63 |
|---|
| 140 | + * |
|---|
| 141 | + * bit 5 in the channel number tells the array index (0/1) |
|---|
| 142 | + * bit 0-4 (0x1f) is the bit offset within the register |
|---|
| 143 | + */ |
|---|
| 144 | +#define EDMA_REG_ARRAY_INDEX(channel) ((channel) >> 5) |
|---|
| 145 | +#define EDMA_CHANNEL_BIT(channel) (BIT((channel) & 0x1f)) |
|---|
| 146 | + |
|---|
| 136 | 147 | /* PaRAM slots are laid out like this */ |
|---|
| 137 | 148 | struct edmacc_param { |
|---|
| 138 | 149 | u32 opt; |
|---|
| .. | .. |
|---|
| 169 | 180 | struct list_head node; |
|---|
| 170 | 181 | enum dma_transfer_direction direction; |
|---|
| 171 | 182 | int cyclic; |
|---|
| 183 | + bool polled; |
|---|
| 172 | 184 | int absync; |
|---|
| 173 | 185 | int pset_nr; |
|---|
| 174 | 186 | struct edma_chan *echan; |
|---|
| .. | .. |
|---|
| 199 | 211 | u32 residue; |
|---|
| 200 | 212 | u32 residue_stat; |
|---|
| 201 | 213 | |
|---|
| 202 | | - struct edma_pset pset[0]; |
|---|
| 214 | + struct edma_pset pset[]; |
|---|
| 203 | 215 | }; |
|---|
| 204 | 216 | |
|---|
| 205 | 217 | struct edma_cc; |
|---|
| .. | .. |
|---|
| 247 | 259 | * in use by Linux or if it is allocated to be used by DSP. |
|---|
| 248 | 260 | */ |
|---|
| 249 | 261 | unsigned long *slot_inuse; |
|---|
| 262 | + |
|---|
| 263 | + /* |
|---|
| 264 | + * For tracking reserved channels used by DSP. |
|---|
| 265 | + * If the bit is cleared, the channel is allocated to be used by DSP |
|---|
| 266 | + * and Linux must not touch it. |
|---|
| 267 | + */ |
|---|
| 268 | + unsigned long *channels_mask; |
|---|
| 250 | 269 | |
|---|
| 251 | 270 | struct dma_device dma_slave; |
|---|
| 252 | 271 | struct dma_device *dma_memcpy; |
|---|
| .. | .. |
|---|
| 412 | 431 | edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or); |
|---|
| 413 | 432 | } |
|---|
| 414 | 433 | |
|---|
| 415 | | -static inline void edma_set_bits(int offset, int len, unsigned long *p) |
|---|
| 416 | | -{ |
|---|
| 417 | | - for (; len > 0; len--) |
|---|
| 418 | | - set_bit(offset + (len - 1), p); |
|---|
| 419 | | -} |
|---|
| 420 | | - |
|---|
| 421 | 434 | static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no, |
|---|
| 422 | 435 | int priority) |
|---|
| 423 | 436 | { |
|---|
| .. | .. |
|---|
| 441 | 454 | { |
|---|
| 442 | 455 | struct edma_cc *ecc = echan->ecc; |
|---|
| 443 | 456 | int channel = EDMA_CHAN_SLOT(echan->ch_num); |
|---|
| 457 | + int idx = EDMA_REG_ARRAY_INDEX(channel); |
|---|
| 458 | + int ch_bit = EDMA_CHANNEL_BIT(channel); |
|---|
| 444 | 459 | |
|---|
| 445 | 460 | if (enable) { |
|---|
| 446 | | - edma_shadow0_write_array(ecc, SH_ICR, channel >> 5, |
|---|
| 447 | | - BIT(channel & 0x1f)); |
|---|
| 448 | | - edma_shadow0_write_array(ecc, SH_IESR, channel >> 5, |
|---|
| 449 | | - BIT(channel & 0x1f)); |
|---|
| 461 | + edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit); |
|---|
| 462 | + edma_shadow0_write_array(ecc, SH_IESR, idx, ch_bit); |
|---|
| 450 | 463 | } else { |
|---|
| 451 | | - edma_shadow0_write_array(ecc, SH_IECR, channel >> 5, |
|---|
| 452 | | - BIT(channel & 0x1f)); |
|---|
| 464 | + edma_shadow0_write_array(ecc, SH_IECR, idx, ch_bit); |
|---|
| 453 | 465 | } |
|---|
| 454 | 466 | } |
|---|
| 455 | 467 | |
|---|
| .. | .. |
|---|
| 587 | 599 | { |
|---|
| 588 | 600 | struct edma_cc *ecc = echan->ecc; |
|---|
| 589 | 601 | int channel = EDMA_CHAN_SLOT(echan->ch_num); |
|---|
| 590 | | - int j = (channel >> 5); |
|---|
| 591 | | - unsigned int mask = BIT(channel & 0x1f); |
|---|
| 602 | + int idx = EDMA_REG_ARRAY_INDEX(channel); |
|---|
| 603 | + int ch_bit = EDMA_CHANNEL_BIT(channel); |
|---|
| 592 | 604 | |
|---|
| 593 | 605 | if (!echan->hw_triggered) { |
|---|
| 594 | 606 | /* EDMA channels without event association */ |
|---|
| 595 | | - dev_dbg(ecc->dev, "ESR%d %08x\n", j, |
|---|
| 596 | | - edma_shadow0_read_array(ecc, SH_ESR, j)); |
|---|
| 597 | | - edma_shadow0_write_array(ecc, SH_ESR, j, mask); |
|---|
| 607 | + dev_dbg(ecc->dev, "ESR%d %08x\n", idx, |
|---|
| 608 | + edma_shadow0_read_array(ecc, SH_ESR, idx)); |
|---|
| 609 | + edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit); |
|---|
| 598 | 610 | } else { |
|---|
| 599 | 611 | /* EDMA channel with event association */ |
|---|
| 600 | | - dev_dbg(ecc->dev, "ER%d %08x\n", j, |
|---|
| 601 | | - edma_shadow0_read_array(ecc, SH_ER, j)); |
|---|
| 612 | + dev_dbg(ecc->dev, "ER%d %08x\n", idx, |
|---|
| 613 | + edma_shadow0_read_array(ecc, SH_ER, idx)); |
|---|
| 602 | 614 | /* Clear any pending event or error */ |
|---|
| 603 | | - edma_write_array(ecc, EDMA_ECR, j, mask); |
|---|
| 604 | | - edma_write_array(ecc, EDMA_EMCR, j, mask); |
|---|
| 615 | + edma_write_array(ecc, EDMA_ECR, idx, ch_bit); |
|---|
| 616 | + edma_write_array(ecc, EDMA_EMCR, idx, ch_bit); |
|---|
| 605 | 617 | /* Clear any SER */ |
|---|
| 606 | | - edma_shadow0_write_array(ecc, SH_SECR, j, mask); |
|---|
| 607 | | - edma_shadow0_write_array(ecc, SH_EESR, j, mask); |
|---|
| 608 | | - dev_dbg(ecc->dev, "EER%d %08x\n", j, |
|---|
| 609 | | - edma_shadow0_read_array(ecc, SH_EER, j)); |
|---|
| 618 | + edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit); |
|---|
| 619 | + edma_shadow0_write_array(ecc, SH_EESR, idx, ch_bit); |
|---|
| 620 | + dev_dbg(ecc->dev, "EER%d %08x\n", idx, |
|---|
| 621 | + edma_shadow0_read_array(ecc, SH_EER, idx)); |
|---|
| 610 | 622 | } |
|---|
| 611 | 623 | } |
|---|
| 612 | 624 | |
|---|
| .. | .. |
|---|
| 614 | 626 | { |
|---|
| 615 | 627 | struct edma_cc *ecc = echan->ecc; |
|---|
| 616 | 628 | int channel = EDMA_CHAN_SLOT(echan->ch_num); |
|---|
| 617 | | - int j = (channel >> 5); |
|---|
| 618 | | - unsigned int mask = BIT(channel & 0x1f); |
|---|
| 629 | + int idx = EDMA_REG_ARRAY_INDEX(channel); |
|---|
| 630 | + int ch_bit = EDMA_CHANNEL_BIT(channel); |
|---|
| 619 | 631 | |
|---|
| 620 | | - edma_shadow0_write_array(ecc, SH_EECR, j, mask); |
|---|
| 621 | | - edma_shadow0_write_array(ecc, SH_ECR, j, mask); |
|---|
| 622 | | - edma_shadow0_write_array(ecc, SH_SECR, j, mask); |
|---|
| 623 | | - edma_write_array(ecc, EDMA_EMCR, j, mask); |
|---|
| 632 | + edma_shadow0_write_array(ecc, SH_EECR, idx, ch_bit); |
|---|
| 633 | + edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit); |
|---|
| 634 | + edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit); |
|---|
| 635 | + edma_write_array(ecc, EDMA_EMCR, idx, ch_bit); |
|---|
| 624 | 636 | |
|---|
| 625 | 637 | /* clear possibly pending completion interrupt */ |
|---|
| 626 | | - edma_shadow0_write_array(ecc, SH_ICR, j, mask); |
|---|
| 638 | + edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit); |
|---|
| 627 | 639 | |
|---|
| 628 | | - dev_dbg(ecc->dev, "EER%d %08x\n", j, |
|---|
| 629 | | - edma_shadow0_read_array(ecc, SH_EER, j)); |
|---|
| 640 | + dev_dbg(ecc->dev, "EER%d %08x\n", idx, |
|---|
| 641 | + edma_shadow0_read_array(ecc, SH_EER, idx)); |
|---|
| 630 | 642 | |
|---|
| 631 | 643 | /* REVISIT: consider guarding against inappropriate event |
|---|
| 632 | 644 | * chaining by overwriting with dummy_paramset. |
|---|
| .. | .. |
|---|
| 640 | 652 | static void edma_pause(struct edma_chan *echan) |
|---|
| 641 | 653 | { |
|---|
| 642 | 654 | int channel = EDMA_CHAN_SLOT(echan->ch_num); |
|---|
| 643 | | - unsigned int mask = BIT(channel & 0x1f); |
|---|
| 644 | 655 | |
|---|
| 645 | | - edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask); |
|---|
| 656 | + edma_shadow0_write_array(echan->ecc, SH_EECR, |
|---|
| 657 | + EDMA_REG_ARRAY_INDEX(channel), |
|---|
| 658 | + EDMA_CHANNEL_BIT(channel)); |
|---|
| 646 | 659 | } |
|---|
| 647 | 660 | |
|---|
| 648 | 661 | /* Re-enable EDMA hardware events on the specified channel. */ |
|---|
| 649 | 662 | static void edma_resume(struct edma_chan *echan) |
|---|
| 650 | 663 | { |
|---|
| 651 | 664 | int channel = EDMA_CHAN_SLOT(echan->ch_num); |
|---|
| 652 | | - unsigned int mask = BIT(channel & 0x1f); |
|---|
| 653 | 665 | |
|---|
| 654 | | - edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask); |
|---|
| 666 | + edma_shadow0_write_array(echan->ecc, SH_EESR, |
|---|
| 667 | + EDMA_REG_ARRAY_INDEX(channel), |
|---|
| 668 | + EDMA_CHANNEL_BIT(channel)); |
|---|
| 655 | 669 | } |
|---|
| 656 | 670 | |
|---|
| 657 | 671 | static void edma_trigger_channel(struct edma_chan *echan) |
|---|
| 658 | 672 | { |
|---|
| 659 | 673 | struct edma_cc *ecc = echan->ecc; |
|---|
| 660 | 674 | int channel = EDMA_CHAN_SLOT(echan->ch_num); |
|---|
| 661 | | - unsigned int mask = BIT(channel & 0x1f); |
|---|
| 675 | + int idx = EDMA_REG_ARRAY_INDEX(channel); |
|---|
| 676 | + int ch_bit = EDMA_CHANNEL_BIT(channel); |
|---|
| 662 | 677 | |
|---|
| 663 | | - edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask); |
|---|
| 678 | + edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit); |
|---|
| 664 | 679 | |
|---|
| 665 | | - dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5), |
|---|
| 666 | | - edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5))); |
|---|
| 680 | + dev_dbg(ecc->dev, "ESR%d %08x\n", idx, |
|---|
| 681 | + edma_shadow0_read_array(ecc, SH_ESR, idx)); |
|---|
| 667 | 682 | } |
|---|
| 668 | 683 | |
|---|
| 669 | 684 | static void edma_clean_channel(struct edma_chan *echan) |
|---|
| 670 | 685 | { |
|---|
| 671 | 686 | struct edma_cc *ecc = echan->ecc; |
|---|
| 672 | 687 | int channel = EDMA_CHAN_SLOT(echan->ch_num); |
|---|
| 673 | | - int j = (channel >> 5); |
|---|
| 674 | | - unsigned int mask = BIT(channel & 0x1f); |
|---|
| 688 | + int idx = EDMA_REG_ARRAY_INDEX(channel); |
|---|
| 689 | + int ch_bit = EDMA_CHANNEL_BIT(channel); |
|---|
| 675 | 690 | |
|---|
| 676 | | - dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j)); |
|---|
| 677 | | - edma_shadow0_write_array(ecc, SH_ECR, j, mask); |
|---|
| 691 | + dev_dbg(ecc->dev, "EMR%d %08x\n", idx, |
|---|
| 692 | + edma_read_array(ecc, EDMA_EMR, idx)); |
|---|
| 693 | + edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit); |
|---|
| 678 | 694 | /* Clear the corresponding EMR bits */ |
|---|
| 679 | | - edma_write_array(ecc, EDMA_EMCR, j, mask); |
|---|
| 695 | + edma_write_array(ecc, EDMA_EMCR, idx, ch_bit); |
|---|
| 680 | 696 | /* Clear any SER */ |
|---|
| 681 | | - edma_shadow0_write_array(ecc, SH_SECR, j, mask); |
|---|
| 697 | + edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit); |
|---|
| 682 | 698 | edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0)); |
|---|
| 683 | 699 | } |
|---|
| 684 | 700 | |
|---|
| .. | .. |
|---|
| 707 | 723 | struct edma_cc *ecc = echan->ecc; |
|---|
| 708 | 724 | int channel = EDMA_CHAN_SLOT(echan->ch_num); |
|---|
| 709 | 725 | |
|---|
| 726 | + if (!test_bit(echan->ch_num, ecc->channels_mask)) { |
|---|
| 727 | + dev_err(ecc->dev, "Channel%d is reserved, can not be used!\n", |
|---|
| 728 | + echan->ch_num); |
|---|
| 729 | + return -EINVAL; |
|---|
| 730 | + } |
|---|
| 731 | + |
|---|
| 710 | 732 | /* ensure access through shadow region 0 */ |
|---|
| 711 | | - edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f)); |
|---|
| 733 | + edma_or_array2(ecc, EDMA_DRAE, 0, EDMA_REG_ARRAY_INDEX(channel), |
|---|
| 734 | + EDMA_CHANNEL_BIT(channel)); |
|---|
| 712 | 735 | |
|---|
| 713 | 736 | /* ensure no events are pending */ |
|---|
| 714 | 737 | edma_stop(echan); |
|---|
| .. | .. |
|---|
| 1011 | 1034 | src_cidx = cidx; |
|---|
| 1012 | 1035 | dst_bidx = acnt; |
|---|
| 1013 | 1036 | dst_cidx = cidx; |
|---|
| 1037 | + epset->addr = src_addr; |
|---|
| 1014 | 1038 | } else { |
|---|
| 1015 | 1039 | dev_err(dev, "%s: direction not implemented yet\n", __func__); |
|---|
| 1016 | 1040 | return -EINVAL; |
|---|
| .. | .. |
|---|
| 1211 | 1235 | |
|---|
| 1212 | 1236 | edesc->pset[0].param.opt |= ITCCHEN; |
|---|
| 1213 | 1237 | if (nslots == 1) { |
|---|
| 1214 | | - /* Enable transfer complete interrupt */ |
|---|
| 1215 | | - edesc->pset[0].param.opt |= TCINTEN; |
|---|
| 1238 | + /* Enable transfer complete interrupt if requested */ |
|---|
| 1239 | + if (tx_flags & DMA_PREP_INTERRUPT) |
|---|
| 1240 | + edesc->pset[0].param.opt |= TCINTEN; |
|---|
| 1216 | 1241 | } else { |
|---|
| 1217 | 1242 | /* Enable transfer complete chaining for the first slot */ |
|---|
| 1218 | 1243 | edesc->pset[0].param.opt |= TCCHEN; |
|---|
| .. | .. |
|---|
| 1239 | 1264 | } |
|---|
| 1240 | 1265 | |
|---|
| 1241 | 1266 | edesc->pset[1].param.opt |= ITCCHEN; |
|---|
| 1242 | | - edesc->pset[1].param.opt |= TCINTEN; |
|---|
| 1267 | + /* Enable transfer complete interrupt if requested */ |
|---|
| 1268 | + if (tx_flags & DMA_PREP_INTERRUPT) |
|---|
| 1269 | + edesc->pset[1].param.opt |= TCINTEN; |
|---|
| 1243 | 1270 | } |
|---|
| 1271 | + |
|---|
| 1272 | + if (!(tx_flags & DMA_PREP_INTERRUPT)) |
|---|
| 1273 | + edesc->polled = true; |
|---|
| 1274 | + |
|---|
| 1275 | + return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); |
|---|
| 1276 | +} |
|---|
| 1277 | + |
|---|
| 1278 | +static struct dma_async_tx_descriptor * |
|---|
| 1279 | +edma_prep_dma_interleaved(struct dma_chan *chan, |
|---|
| 1280 | + struct dma_interleaved_template *xt, |
|---|
| 1281 | + unsigned long tx_flags) |
|---|
| 1282 | +{ |
|---|
| 1283 | + struct device *dev = chan->device->dev; |
|---|
| 1284 | + struct edma_chan *echan = to_edma_chan(chan); |
|---|
| 1285 | + struct edmacc_param *param; |
|---|
| 1286 | + struct edma_desc *edesc; |
|---|
| 1287 | + size_t src_icg, dst_icg; |
|---|
| 1288 | + int src_bidx, dst_bidx; |
|---|
| 1289 | + |
|---|
| 1290 | + /* Slave mode is not supported */ |
|---|
| 1291 | + if (is_slave_direction(xt->dir)) |
|---|
| 1292 | + return NULL; |
|---|
| 1293 | + |
|---|
| 1294 | + if (xt->frame_size != 1 || xt->numf == 0) |
|---|
| 1295 | + return NULL; |
|---|
| 1296 | + |
|---|
| 1297 | + if (xt->sgl[0].size > SZ_64K || xt->numf > SZ_64K) |
|---|
| 1298 | + return NULL; |
|---|
| 1299 | + |
|---|
| 1300 | + src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]); |
|---|
| 1301 | + if (src_icg) { |
|---|
| 1302 | + src_bidx = src_icg + xt->sgl[0].size; |
|---|
| 1303 | + } else if (xt->src_inc) { |
|---|
| 1304 | + src_bidx = xt->sgl[0].size; |
|---|
| 1305 | + } else { |
|---|
| 1306 | + dev_err(dev, "%s: SRC constant addressing is not supported\n", |
|---|
| 1307 | + __func__); |
|---|
| 1308 | + return NULL; |
|---|
| 1309 | + } |
|---|
| 1310 | + |
|---|
| 1311 | + dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]); |
|---|
| 1312 | + if (dst_icg) { |
|---|
| 1313 | + dst_bidx = dst_icg + xt->sgl[0].size; |
|---|
| 1314 | + } else if (xt->dst_inc) { |
|---|
| 1315 | + dst_bidx = xt->sgl[0].size; |
|---|
| 1316 | + } else { |
|---|
| 1317 | + dev_err(dev, "%s: DST constant addressing is not supported\n", |
|---|
| 1318 | + __func__); |
|---|
| 1319 | + return NULL; |
|---|
| 1320 | + } |
|---|
| 1321 | + |
|---|
| 1322 | + if (src_bidx > SZ_64K || dst_bidx > SZ_64K) |
|---|
| 1323 | + return NULL; |
|---|
| 1324 | + |
|---|
| 1325 | + edesc = kzalloc(struct_size(edesc, pset, 1), GFP_ATOMIC); |
|---|
| 1326 | + if (!edesc) |
|---|
| 1327 | + return NULL; |
|---|
| 1328 | + |
|---|
| 1329 | + edesc->direction = DMA_MEM_TO_MEM; |
|---|
| 1330 | + edesc->echan = echan; |
|---|
| 1331 | + edesc->pset_nr = 1; |
|---|
| 1332 | + |
|---|
| 1333 | + param = &edesc->pset[0].param; |
|---|
| 1334 | + |
|---|
| 1335 | + param->src = xt->src_start; |
|---|
| 1336 | + param->dst = xt->dst_start; |
|---|
| 1337 | + param->a_b_cnt = xt->numf << 16 | xt->sgl[0].size; |
|---|
| 1338 | + param->ccnt = 1; |
|---|
| 1339 | + param->src_dst_bidx = (dst_bidx << 16) | src_bidx; |
|---|
| 1340 | + param->src_dst_cidx = 0; |
|---|
| 1341 | + |
|---|
| 1342 | + param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); |
|---|
| 1343 | + param->opt |= ITCCHEN; |
|---|
| 1344 | + /* Enable transfer complete interrupt if requested */ |
|---|
| 1345 | + if (tx_flags & DMA_PREP_INTERRUPT) |
|---|
| 1346 | + param->opt |= TCINTEN; |
|---|
| 1347 | + else |
|---|
| 1348 | + edesc->polled = true; |
|---|
| 1244 | 1349 | |
|---|
| 1245 | 1350 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); |
|---|
| 1246 | 1351 | } |
|---|
| .. | .. |
|---|
| 1721 | 1826 | int loop_count = EDMA_MAX_TR_WAIT_LOOPS; |
|---|
| 1722 | 1827 | struct edma_chan *echan = edesc->echan; |
|---|
| 1723 | 1828 | struct edma_pset *pset = edesc->pset; |
|---|
| 1724 | | - dma_addr_t done, pos; |
|---|
| 1829 | + dma_addr_t done, pos, pos_old; |
|---|
| 1830 | + int channel = EDMA_CHAN_SLOT(echan->ch_num); |
|---|
| 1831 | + int idx = EDMA_REG_ARRAY_INDEX(channel); |
|---|
| 1832 | + int ch_bit = EDMA_CHANNEL_BIT(channel); |
|---|
| 1833 | + int event_reg; |
|---|
| 1725 | 1834 | int i; |
|---|
| 1726 | 1835 | |
|---|
| 1727 | 1836 | /* |
|---|
| .. | .. |
|---|
| 1734 | 1843 | * "pos" may represent a transfer request that is still being |
|---|
| 1735 | 1844 | * processed by the EDMACC or EDMATC. We will busy wait until |
|---|
| 1736 | 1845 | * any one of the situations occurs: |
|---|
| 1737 | | - * 1. the DMA hardware is idle |
|---|
| 1738 | | - * 2. a new transfer request is setup |
|---|
| 1846 | + * 1. while and event is pending for the channel |
|---|
| 1847 | + * 2. a position updated |
|---|
| 1739 | 1848 | * 3. we hit the loop limit |
|---|
| 1740 | 1849 | */ |
|---|
| 1741 | | - while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) { |
|---|
| 1742 | | - /* check if a new transfer request is setup */ |
|---|
| 1743 | | - if (edma_get_position(echan->ecc, |
|---|
| 1744 | | - echan->slot[0], dst) != pos) { |
|---|
| 1850 | + if (is_slave_direction(edesc->direction)) |
|---|
| 1851 | + event_reg = SH_ER; |
|---|
| 1852 | + else |
|---|
| 1853 | + event_reg = SH_ESR; |
|---|
| 1854 | + |
|---|
| 1855 | + pos_old = pos; |
|---|
| 1856 | + while (edma_shadow0_read_array(echan->ecc, event_reg, idx) & ch_bit) { |
|---|
| 1857 | + pos = edma_get_position(echan->ecc, echan->slot[0], dst); |
|---|
| 1858 | + if (pos != pos_old) |
|---|
| 1745 | 1859 | break; |
|---|
| 1746 | | - } |
|---|
| 1747 | 1860 | |
|---|
| 1748 | 1861 | if (!--loop_count) { |
|---|
| 1749 | 1862 | dev_dbg_ratelimited(echan->vchan.chan.device->dev, |
|---|
| .. | .. |
|---|
| 1768 | 1881 | return edesc->residue_stat; |
|---|
| 1769 | 1882 | } |
|---|
| 1770 | 1883 | |
|---|
| 1884 | + /* |
|---|
| 1885 | + * If the position is 0, then EDMA loaded the closing dummy slot, the |
|---|
| 1886 | + * transfer is completed |
|---|
| 1887 | + */ |
|---|
| 1888 | + if (!pos) |
|---|
| 1889 | + return 0; |
|---|
| 1771 | 1890 | /* |
|---|
| 1772 | 1891 | * For SG operation we catch up with the last processed |
|---|
| 1773 | 1892 | * status. |
|---|
| .. | .. |
|---|
| 1796 | 1915 | struct dma_tx_state *txstate) |
|---|
| 1797 | 1916 | { |
|---|
| 1798 | 1917 | struct edma_chan *echan = to_edma_chan(chan); |
|---|
| 1799 | | - struct virt_dma_desc *vdesc; |
|---|
| 1918 | + struct dma_tx_state txstate_tmp; |
|---|
| 1800 | 1919 | enum dma_status ret; |
|---|
| 1801 | 1920 | unsigned long flags; |
|---|
| 1802 | 1921 | |
|---|
| 1803 | 1922 | ret = dma_cookie_status(chan, cookie, txstate); |
|---|
| 1804 | | - if (ret == DMA_COMPLETE || !txstate) |
|---|
| 1923 | + |
|---|
| 1924 | + if (ret == DMA_COMPLETE) |
|---|
| 1805 | 1925 | return ret; |
|---|
| 1806 | 1926 | |
|---|
| 1927 | + /* Provide a dummy dma_tx_state for completion checking */ |
|---|
| 1928 | + if (!txstate) |
|---|
| 1929 | + txstate = &txstate_tmp; |
|---|
| 1930 | + |
|---|
| 1807 | 1931 | spin_lock_irqsave(&echan->vchan.lock, flags); |
|---|
| 1808 | | - if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) |
|---|
| 1932 | + if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { |
|---|
| 1809 | 1933 | txstate->residue = edma_residue(echan->edesc); |
|---|
| 1810 | | - else if ((vdesc = vchan_find_desc(&echan->vchan, cookie))) |
|---|
| 1811 | | - txstate->residue = to_edma_desc(&vdesc->tx)->residue; |
|---|
| 1934 | + } else { |
|---|
| 1935 | + struct virt_dma_desc *vdesc = vchan_find_desc(&echan->vchan, |
|---|
| 1936 | + cookie); |
|---|
| 1937 | + |
|---|
| 1938 | + if (vdesc) |
|---|
| 1939 | + txstate->residue = to_edma_desc(&vdesc->tx)->residue; |
|---|
| 1940 | + else |
|---|
| 1941 | + txstate->residue = 0; |
|---|
| 1942 | + } |
|---|
| 1943 | + |
|---|
| 1944 | + /* |
|---|
| 1945 | + * Mark the cookie completed if the residue is 0 for non cyclic |
|---|
| 1946 | + * transfers |
|---|
| 1947 | + */ |
|---|
| 1948 | + if (ret != DMA_COMPLETE && !txstate->residue && |
|---|
| 1949 | + echan->edesc && echan->edesc->polled && |
|---|
| 1950 | + echan->edesc->vdesc.tx.cookie == cookie) { |
|---|
| 1951 | + edma_stop(echan); |
|---|
| 1952 | + vchan_cookie_complete(&echan->edesc->vdesc); |
|---|
| 1953 | + echan->edesc = NULL; |
|---|
| 1954 | + edma_execute(echan); |
|---|
| 1955 | + ret = DMA_COMPLETE; |
|---|
| 1956 | + } |
|---|
| 1957 | + |
|---|
| 1812 | 1958 | spin_unlock_irqrestore(&echan->vchan.lock, flags); |
|---|
| 1813 | 1959 | |
|---|
| 1814 | 1960 | return ret; |
|---|
| .. | .. |
|---|
| 1846 | 1992 | "Legacy memcpy is enabled, things might not work\n"); |
|---|
| 1847 | 1993 | |
|---|
| 1848 | 1994 | dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask); |
|---|
| 1995 | + dma_cap_set(DMA_INTERLEAVE, s_ddev->cap_mask); |
|---|
| 1849 | 1996 | s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; |
|---|
| 1997 | + s_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved; |
|---|
| 1850 | 1998 | s_ddev->directions = BIT(DMA_MEM_TO_MEM); |
|---|
| 1851 | 1999 | } |
|---|
| 1852 | 2000 | |
|---|
| .. | .. |
|---|
| 1882 | 2030 | |
|---|
| 1883 | 2031 | dma_cap_zero(m_ddev->cap_mask); |
|---|
| 1884 | 2032 | dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask); |
|---|
| 2033 | + dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask); |
|---|
| 1885 | 2034 | |
|---|
| 1886 | 2035 | m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; |
|---|
| 2036 | + m_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved; |
|---|
| 1887 | 2037 | m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources; |
|---|
| 1888 | 2038 | m_ddev->device_free_chan_resources = edma_free_chan_resources; |
|---|
| 1889 | 2039 | m_ddev->device_issue_pending = edma_issue_pending; |
|---|
| .. | .. |
|---|
| 2185 | 2335 | } |
|---|
| 2186 | 2336 | #endif |
|---|
| 2187 | 2337 | |
|---|
| 2338 | +static bool edma_filter_fn(struct dma_chan *chan, void *param); |
|---|
| 2339 | + |
|---|
| 2188 | 2340 | static int edma_probe(struct platform_device *pdev) |
|---|
| 2189 | 2341 | { |
|---|
| 2190 | 2342 | struct edma_soc_info *info = pdev->dev.platform_data; |
|---|
| 2191 | 2343 | s8 (*queue_priority_mapping)[2]; |
|---|
| 2192 | | - int i, off, ln; |
|---|
| 2193 | | - const s16 (*rsv_slots)[2]; |
|---|
| 2194 | | - const s16 (*xbar_chans)[2]; |
|---|
| 2195 | | - int irq; |
|---|
| 2344 | + const s16 (*reserved)[2]; |
|---|
| 2345 | + int i, irq; |
|---|
| 2196 | 2346 | char *irq_name; |
|---|
| 2197 | 2347 | struct resource *mem; |
|---|
| 2198 | 2348 | struct device_node *node = pdev->dev.of_node; |
|---|
| .. | .. |
|---|
| 2217 | 2367 | |
|---|
| 2218 | 2368 | if (!info) |
|---|
| 2219 | 2369 | return -ENODEV; |
|---|
| 2220 | | - |
|---|
| 2221 | | - pm_runtime_enable(dev); |
|---|
| 2222 | | - ret = pm_runtime_get_sync(dev); |
|---|
| 2223 | | - if (ret < 0) { |
|---|
| 2224 | | - dev_err(dev, "pm_runtime_get_sync() failed\n"); |
|---|
| 2225 | | - return ret; |
|---|
| 2226 | | - } |
|---|
| 2227 | 2370 | |
|---|
| 2228 | 2371 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); |
|---|
| 2229 | 2372 | if (ret) |
|---|
| .. | .. |
|---|
| 2255 | 2398 | |
|---|
| 2256 | 2399 | platform_set_drvdata(pdev, ecc); |
|---|
| 2257 | 2400 | |
|---|
| 2401 | + pm_runtime_enable(dev); |
|---|
| 2402 | + ret = pm_runtime_get_sync(dev); |
|---|
| 2403 | + if (ret < 0) { |
|---|
| 2404 | + dev_err(dev, "pm_runtime_get_sync() failed\n"); |
|---|
| 2405 | + pm_runtime_disable(dev); |
|---|
| 2406 | + return ret; |
|---|
| 2407 | + } |
|---|
| 2408 | + |
|---|
| 2258 | 2409 | /* Get eDMA3 configuration from IP */ |
|---|
| 2259 | 2410 | ret = edma_setup_from_hw(dev, info, ecc); |
|---|
| 2260 | 2411 | if (ret) |
|---|
| 2261 | | - return ret; |
|---|
| 2412 | + goto err_disable_pm; |
|---|
| 2262 | 2413 | |
|---|
| 2263 | 2414 | /* Allocate memory based on the information we got from the IP */ |
|---|
| 2264 | 2415 | ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels, |
|---|
| 2265 | 2416 | sizeof(*ecc->slave_chans), GFP_KERNEL); |
|---|
| 2266 | | - if (!ecc->slave_chans) |
|---|
| 2267 | | - return -ENOMEM; |
|---|
| 2268 | 2417 | |
|---|
| 2269 | 2418 | ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots), |
|---|
| 2270 | 2419 | sizeof(unsigned long), GFP_KERNEL); |
|---|
| 2271 | | - if (!ecc->slot_inuse) |
|---|
| 2272 | | - return -ENOMEM; |
|---|
| 2420 | + |
|---|
| 2421 | + ecc->channels_mask = devm_kcalloc(dev, |
|---|
| 2422 | + BITS_TO_LONGS(ecc->num_channels), |
|---|
| 2423 | + sizeof(unsigned long), GFP_KERNEL); |
|---|
| 2424 | + if (!ecc->slave_chans || !ecc->slot_inuse || !ecc->channels_mask) { |
|---|
| 2425 | + ret = -ENOMEM; |
|---|
| 2426 | + goto err_disable_pm; |
|---|
| 2427 | + } |
|---|
| 2428 | + |
|---|
| 2429 | + /* Mark all channels available initially */ |
|---|
| 2430 | + bitmap_fill(ecc->channels_mask, ecc->num_channels); |
|---|
| 2273 | 2431 | |
|---|
| 2274 | 2432 | ecc->default_queue = info->default_queue; |
|---|
| 2275 | 2433 | |
|---|
| 2276 | 2434 | if (info->rsv) { |
|---|
| 2277 | 2435 | /* Set the reserved slots in inuse list */ |
|---|
| 2278 | | - rsv_slots = info->rsv->rsv_slots; |
|---|
| 2279 | | - if (rsv_slots) { |
|---|
| 2280 | | - for (i = 0; rsv_slots[i][0] != -1; i++) { |
|---|
| 2281 | | - off = rsv_slots[i][0]; |
|---|
| 2282 | | - ln = rsv_slots[i][1]; |
|---|
| 2283 | | - edma_set_bits(off, ln, ecc->slot_inuse); |
|---|
| 2284 | | - } |
|---|
| 2436 | + reserved = info->rsv->rsv_slots; |
|---|
| 2437 | + if (reserved) { |
|---|
| 2438 | + for (i = 0; reserved[i][0] != -1; i++) |
|---|
| 2439 | + bitmap_set(ecc->slot_inuse, reserved[i][0], |
|---|
| 2440 | + reserved[i][1]); |
|---|
| 2441 | + } |
|---|
| 2442 | + |
|---|
| 2443 | + /* Clear channels not usable for Linux */ |
|---|
| 2444 | + reserved = info->rsv->rsv_chans; |
|---|
| 2445 | + if (reserved) { |
|---|
| 2446 | + for (i = 0; reserved[i][0] != -1; i++) |
|---|
| 2447 | + bitmap_clear(ecc->channels_mask, reserved[i][0], |
|---|
| 2448 | + reserved[i][1]); |
|---|
| 2285 | 2449 | } |
|---|
| 2286 | 2450 | } |
|---|
| 2287 | 2451 | |
|---|
| .. | .. |
|---|
| 2289 | 2453 | /* Reset only unused - not reserved - paRAM slots */ |
|---|
| 2290 | 2454 | if (!test_bit(i, ecc->slot_inuse)) |
|---|
| 2291 | 2455 | edma_write_slot(ecc, i, &dummy_paramset); |
|---|
| 2292 | | - } |
|---|
| 2293 | | - |
|---|
| 2294 | | - /* Clear the xbar mapped channels in unused list */ |
|---|
| 2295 | | - xbar_chans = info->xbar_chans; |
|---|
| 2296 | | - if (xbar_chans) { |
|---|
| 2297 | | - for (i = 0; xbar_chans[i][1] != -1; i++) { |
|---|
| 2298 | | - off = xbar_chans[i][1]; |
|---|
| 2299 | | - } |
|---|
| 2300 | 2456 | } |
|---|
| 2301 | 2457 | |
|---|
| 2302 | 2458 | irq = platform_get_irq_byname(pdev, "edma3_ccint"); |
|---|
| .. | .. |
|---|
| 2310 | 2466 | ecc); |
|---|
| 2311 | 2467 | if (ret) { |
|---|
| 2312 | 2468 | dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret); |
|---|
| 2313 | | - return ret; |
|---|
| 2469 | + goto err_disable_pm; |
|---|
| 2314 | 2470 | } |
|---|
| 2315 | 2471 | ecc->ccint = irq; |
|---|
| 2316 | 2472 | } |
|---|
| .. | .. |
|---|
| 2326 | 2482 | ecc); |
|---|
| 2327 | 2483 | if (ret) { |
|---|
| 2328 | 2484 | dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret); |
|---|
| 2329 | | - return ret; |
|---|
| 2485 | + goto err_disable_pm; |
|---|
| 2330 | 2486 | } |
|---|
| 2331 | 2487 | ecc->ccerrint = irq; |
|---|
| 2332 | 2488 | } |
|---|
| .. | .. |
|---|
| 2334 | 2490 | ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY); |
|---|
| 2335 | 2491 | if (ecc->dummy_slot < 0) { |
|---|
| 2336 | 2492 | dev_err(dev, "Can't allocate PaRAM dummy slot\n"); |
|---|
| 2337 | | - return ecc->dummy_slot; |
|---|
| 2493 | + ret = ecc->dummy_slot; |
|---|
| 2494 | + goto err_disable_pm; |
|---|
| 2338 | 2495 | } |
|---|
| 2339 | 2496 | |
|---|
| 2340 | 2497 | queue_priority_mapping = info->queue_priority_mapping; |
|---|
| 2341 | 2498 | |
|---|
| 2342 | 2499 | if (!ecc->legacy_mode) { |
|---|
| 2343 | 2500 | int lowest_priority = 0; |
|---|
| 2501 | + unsigned int array_max; |
|---|
| 2344 | 2502 | struct of_phandle_args tc_args; |
|---|
| 2345 | 2503 | |
|---|
| 2346 | 2504 | ecc->tc_list = devm_kcalloc(dev, ecc->num_tc, |
|---|
| .. | .. |
|---|
| 2364 | 2522 | info->default_queue = i; |
|---|
| 2365 | 2523 | } |
|---|
| 2366 | 2524 | } |
|---|
| 2525 | + |
|---|
| 2526 | + /* See if we have optional dma-channel-mask array */ |
|---|
| 2527 | + array_max = DIV_ROUND_UP(ecc->num_channels, BITS_PER_TYPE(u32)); |
|---|
| 2528 | + ret = of_property_read_variable_u32_array(node, |
|---|
| 2529 | + "dma-channel-mask", |
|---|
| 2530 | + (u32 *)ecc->channels_mask, |
|---|
| 2531 | + 1, array_max); |
|---|
| 2532 | + if (ret > 0 && ret != array_max) |
|---|
| 2533 | + dev_warn(dev, "dma-channel-mask is not complete.\n"); |
|---|
| 2534 | + else if (ret == -EOVERFLOW || ret == -ENODATA) |
|---|
| 2535 | + dev_warn(dev, |
|---|
| 2536 | + "dma-channel-mask is out of range or empty\n"); |
|---|
| 2367 | 2537 | } |
|---|
| 2368 | 2538 | |
|---|
| 2369 | 2539 | /* Event queue priority mapping */ |
|---|
| .. | .. |
|---|
| 2371 | 2541 | edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0], |
|---|
| 2372 | 2542 | queue_priority_mapping[i][1]); |
|---|
| 2373 | 2543 | |
|---|
| 2374 | | - for (i = 0; i < ecc->num_region; i++) { |
|---|
| 2375 | | - edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0); |
|---|
| 2376 | | - edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0); |
|---|
| 2377 | | - edma_write_array(ecc, EDMA_QRAE, i, 0x0); |
|---|
| 2378 | | - } |
|---|
| 2544 | + edma_write_array2(ecc, EDMA_DRAE, 0, 0, 0x0); |
|---|
| 2545 | + edma_write_array2(ecc, EDMA_DRAE, 0, 1, 0x0); |
|---|
| 2546 | + edma_write_array(ecc, EDMA_QRAE, 0, 0x0); |
|---|
| 2547 | + |
|---|
| 2379 | 2548 | ecc->info = info; |
|---|
| 2380 | 2549 | |
|---|
| 2381 | 2550 | /* Init the dma device and channels */ |
|---|
| 2382 | 2551 | edma_dma_init(ecc, legacy_mode); |
|---|
| 2383 | 2552 | |
|---|
| 2384 | 2553 | for (i = 0; i < ecc->num_channels; i++) { |
|---|
| 2554 | + /* Do not touch reserved channels */ |
|---|
| 2555 | + if (!test_bit(i, ecc->channels_mask)) |
|---|
| 2556 | + continue; |
|---|
| 2557 | + |
|---|
| 2385 | 2558 | /* Assign all channels to the default queue */ |
|---|
| 2386 | 2559 | edma_assign_channel_eventq(&ecc->slave_chans[i], |
|---|
| 2387 | 2560 | info->default_queue); |
|---|
| .. | .. |
|---|
| 2418 | 2591 | |
|---|
| 2419 | 2592 | err_reg1: |
|---|
| 2420 | 2593 | edma_free_slot(ecc, ecc->dummy_slot); |
|---|
| 2594 | +err_disable_pm: |
|---|
| 2595 | + pm_runtime_put_sync(dev); |
|---|
| 2596 | + pm_runtime_disable(dev); |
|---|
| 2421 | 2597 | return ret; |
|---|
| 2422 | 2598 | } |
|---|
| 2423 | 2599 | |
|---|
| .. | .. |
|---|
| 2448 | 2624 | if (ecc->dma_memcpy) |
|---|
| 2449 | 2625 | dma_async_device_unregister(ecc->dma_memcpy); |
|---|
| 2450 | 2626 | edma_free_slot(ecc, ecc->dummy_slot); |
|---|
| 2627 | + pm_runtime_put_sync(dev); |
|---|
| 2628 | + pm_runtime_disable(dev); |
|---|
| 2451 | 2629 | |
|---|
| 2452 | 2630 | return 0; |
|---|
| 2453 | 2631 | } |
|---|
| .. | .. |
|---|
| 2487 | 2665 | for (i = 0; i < ecc->num_channels; i++) { |
|---|
| 2488 | 2666 | if (echan[i].alloced) { |
|---|
| 2489 | 2667 | /* ensure access through shadow region 0 */ |
|---|
| 2490 | | - edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5, |
|---|
| 2491 | | - BIT(i & 0x1f)); |
|---|
| 2668 | + edma_or_array2(ecc, EDMA_DRAE, 0, |
|---|
| 2669 | + EDMA_REG_ARRAY_INDEX(i), |
|---|
| 2670 | + EDMA_CHANNEL_BIT(i)); |
|---|
| 2492 | 2671 | |
|---|
| 2493 | 2672 | edma_setup_interrupt(&echan[i], true); |
|---|
| 2494 | 2673 | |
|---|
| .. | .. |
|---|
| 2529 | 2708 | }, |
|---|
| 2530 | 2709 | }; |
|---|
| 2531 | 2710 | |
|---|
| 2532 | | -bool edma_filter_fn(struct dma_chan *chan, void *param) |
|---|
| 2711 | +static bool edma_filter_fn(struct dma_chan *chan, void *param) |
|---|
| 2533 | 2712 | { |
|---|
| 2534 | 2713 | bool match = false; |
|---|
| 2535 | 2714 | |
|---|
| .. | .. |
|---|
| 2544 | 2723 | } |
|---|
| 2545 | 2724 | return match; |
|---|
| 2546 | 2725 | } |
|---|
| 2547 | | -EXPORT_SYMBOL(edma_filter_fn); |
|---|
| 2548 | 2726 | |
|---|
| 2549 | 2727 | static int edma_init(void) |
|---|
| 2550 | 2728 | { |
|---|