| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Driver For Marvell Two-channel DMA Engine |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * Copyright: Marvell International Ltd. |
|---|
| 5 | | - * |
|---|
| 6 | | - * The code contained herein is licensed under the GNU General Public |
|---|
| 7 | | - * License. You may obtain a copy of the GNU General Public License |
|---|
| 8 | | - * Version 2 or later at the following locations: |
|---|
| 9 | | - * |
|---|
| 10 | 6 | */ |
|---|
| 11 | 7 | |
|---|
| 12 | 8 | #include <linux/err.h> |
|---|
| .. | .. |
|---|
| 116 | 112 | u32 burst_sz; |
|---|
| 117 | 113 | enum dma_slave_buswidth buswidth; |
|---|
| 118 | 114 | enum dma_status status; |
|---|
| 115 | + struct dma_slave_config slave_config; |
|---|
| 119 | 116 | |
|---|
| 120 | 117 | int idx; |
|---|
| 121 | 118 | enum mmp_tdma_type type; |
|---|
| .. | .. |
|---|
| 138 | 135 | }; |
|---|
| 139 | 136 | |
|---|
| 140 | 137 | #define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan) |
|---|
| 138 | + |
|---|
| 139 | +static int mmp_tdma_config_write(struct dma_chan *chan, |
|---|
| 140 | + enum dma_transfer_direction dir, |
|---|
| 141 | + struct dma_slave_config *dmaengine_cfg); |
|---|
| 141 | 142 | |
|---|
| 142 | 143 | static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys) |
|---|
| 143 | 144 | { |
|---|
| .. | .. |
|---|
| 234 | 235 | tdcr |= TDCR_BURSTSZ_128B; |
|---|
| 235 | 236 | break; |
|---|
| 236 | 237 | default: |
|---|
| 237 | | - dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n"); |
|---|
| 238 | + dev_err(tdmac->dev, "unknown burst size.\n"); |
|---|
| 238 | 239 | return -EINVAL; |
|---|
| 239 | 240 | } |
|---|
| 240 | 241 | |
|---|
| .. | .. |
|---|
| 249 | 250 | tdcr |= TDCR_SSZ_32_BITS; |
|---|
| 250 | 251 | break; |
|---|
| 251 | 252 | default: |
|---|
| 252 | | - dev_err(tdmac->dev, "mmp_tdma: unknown bus size.\n"); |
|---|
| 253 | + dev_err(tdmac->dev, "unknown bus size.\n"); |
|---|
| 253 | 254 | return -EINVAL; |
|---|
| 254 | 255 | } |
|---|
| 255 | 256 | } else if (tdmac->type == PXA910_SQU) { |
|---|
| .. | .. |
|---|
| 275 | 276 | tdcr |= TDCR_BURSTSZ_SQU_32B; |
|---|
| 276 | 277 | break; |
|---|
| 277 | 278 | default: |
|---|
| 278 | | - dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n"); |
|---|
| 279 | + dev_err(tdmac->dev, "unknown burst size.\n"); |
|---|
| 279 | 280 | return -EINVAL; |
|---|
| 280 | 281 | } |
|---|
| 281 | 282 | } |
|---|
| .. | .. |
|---|
| 345 | 346 | return IRQ_NONE; |
|---|
| 346 | 347 | } |
|---|
| 347 | 348 | |
|---|
| 348 | | -static void dma_do_tasklet(unsigned long data) |
|---|
| 349 | +static void dma_do_tasklet(struct tasklet_struct *t) |
|---|
| 349 | 350 | { |
|---|
| 350 | | - struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data; |
|---|
| 351 | + struct mmp_tdma_chan *tdmac = from_tasklet(tdmac, t, tasklet); |
|---|
| 351 | 352 | |
|---|
| 352 | 353 | dmaengine_desc_get_callback_invoke(&tdmac->desc, NULL); |
|---|
| 353 | 354 | } |
|---|
| .. | .. |
|---|
| 428 | 429 | int num_periods = buf_len / period_len; |
|---|
| 429 | 430 | int i = 0, buf = 0; |
|---|
| 430 | 431 | |
|---|
| 431 | | - if (tdmac->status != DMA_COMPLETE) |
|---|
| 432 | + if (!is_slave_direction(direction)) { |
|---|
| 433 | + dev_err(tdmac->dev, "unsupported transfer direction\n"); |
|---|
| 432 | 434 | return NULL; |
|---|
| 435 | + } |
|---|
| 436 | + |
|---|
| 437 | + if (tdmac->status != DMA_COMPLETE) { |
|---|
| 438 | + dev_err(tdmac->dev, "controller busy"); |
|---|
| 439 | + return NULL; |
|---|
| 440 | + } |
|---|
| 433 | 441 | |
|---|
| 434 | 442 | if (period_len > TDMA_MAX_XFER_BYTES) { |
|---|
| 435 | 443 | dev_err(tdmac->dev, |
|---|
| .. | .. |
|---|
| 442 | 450 | tdmac->desc_num = num_periods; |
|---|
| 443 | 451 | desc = mmp_tdma_alloc_descriptor(tdmac); |
|---|
| 444 | 452 | if (!desc) |
|---|
| 453 | + goto err_out; |
|---|
| 454 | + |
|---|
| 455 | + if (mmp_tdma_config_write(chan, direction, &tdmac->slave_config)) |
|---|
| 445 | 456 | goto err_out; |
|---|
| 446 | 457 | |
|---|
| 447 | 458 | while (buf < buf_len) { |
|---|
| .. | .. |
|---|
| 497 | 508 | { |
|---|
| 498 | 509 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); |
|---|
| 499 | 510 | |
|---|
| 500 | | - if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { |
|---|
| 511 | + memcpy(&tdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg)); |
|---|
| 512 | + |
|---|
| 513 | + return 0; |
|---|
| 514 | +} |
|---|
| 515 | + |
|---|
| 516 | +static int mmp_tdma_config_write(struct dma_chan *chan, |
|---|
| 517 | + enum dma_transfer_direction dir, |
|---|
| 518 | + struct dma_slave_config *dmaengine_cfg) |
|---|
| 519 | +{ |
|---|
| 520 | + struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); |
|---|
| 521 | + |
|---|
| 522 | + if (dir == DMA_DEV_TO_MEM) { |
|---|
| 501 | 523 | tdmac->dev_addr = dmaengine_cfg->src_addr; |
|---|
| 502 | 524 | tdmac->burst_sz = dmaengine_cfg->src_maxburst; |
|---|
| 503 | 525 | tdmac->buswidth = dmaengine_cfg->src_addr_width; |
|---|
| .. | .. |
|---|
| 506 | 528 | tdmac->burst_sz = dmaengine_cfg->dst_maxburst; |
|---|
| 507 | 529 | tdmac->buswidth = dmaengine_cfg->dst_addr_width; |
|---|
| 508 | 530 | } |
|---|
| 509 | | - tdmac->dir = dmaengine_cfg->direction; |
|---|
| 531 | + tdmac->dir = dir; |
|---|
| 510 | 532 | |
|---|
| 511 | 533 | return mmp_tdma_config_chan(chan); |
|---|
| 512 | 534 | } |
|---|
| .. | .. |
|---|
| 532 | 554 | |
|---|
| 533 | 555 | static int mmp_tdma_remove(struct platform_device *pdev) |
|---|
| 534 | 556 | { |
|---|
| 535 | | - struct mmp_tdma_device *tdev = platform_get_drvdata(pdev); |
|---|
| 557 | + if (pdev->dev.of_node) |
|---|
| 558 | + of_dma_controller_free(pdev->dev.of_node); |
|---|
| 536 | 559 | |
|---|
| 537 | | - dma_async_device_unregister(&tdev->device); |
|---|
| 538 | 560 | return 0; |
|---|
| 539 | 561 | } |
|---|
| 540 | 562 | |
|---|
| .. | .. |
|---|
| 564 | 586 | tdmac->pool = pool; |
|---|
| 565 | 587 | tdmac->status = DMA_COMPLETE; |
|---|
| 566 | 588 | tdev->tdmac[tdmac->idx] = tdmac; |
|---|
| 567 | | - tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac); |
|---|
| 589 | + tasklet_setup(&tdmac->tasklet, dma_do_tasklet); |
|---|
| 568 | 590 | |
|---|
| 569 | 591 | /* add the channel to tdma_chan list */ |
|---|
| 570 | 592 | list_add_tail(&tdmac->chan.device_node, |
|---|
| .. | .. |
|---|
| 573 | 595 | } |
|---|
| 574 | 596 | |
|---|
| 575 | 597 | struct mmp_tdma_filter_param { |
|---|
| 576 | | - struct device_node *of_node; |
|---|
| 577 | 598 | unsigned int chan_id; |
|---|
| 578 | 599 | }; |
|---|
| 579 | 600 | |
|---|
| 580 | 601 | static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param) |
|---|
| 581 | 602 | { |
|---|
| 582 | 603 | struct mmp_tdma_filter_param *param = fn_param; |
|---|
| 583 | | - struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); |
|---|
| 584 | | - struct dma_device *pdma_device = tdmac->chan.device; |
|---|
| 585 | | - |
|---|
| 586 | | - if (pdma_device->dev->of_node != param->of_node) |
|---|
| 587 | | - return false; |
|---|
| 588 | 604 | |
|---|
| 589 | 605 | if (chan->chan_id != param->chan_id) |
|---|
| 590 | 606 | return false; |
|---|
| .. | .. |
|---|
| 602 | 618 | if (dma_spec->args_count != 1) |
|---|
| 603 | 619 | return NULL; |
|---|
| 604 | 620 | |
|---|
| 605 | | - param.of_node = ofdma->of_node; |
|---|
| 606 | 621 | param.chan_id = dma_spec->args[0]; |
|---|
| 607 | 622 | |
|---|
| 608 | 623 | if (param.chan_id >= TDMA_CHANNEL_NUM) |
|---|
| 609 | 624 | return NULL; |
|---|
| 610 | 625 | |
|---|
| 611 | | - return dma_request_channel(mask, mmp_tdma_filter_fn, ¶m); |
|---|
| 626 | + return __dma_request_channel(&mask, mmp_tdma_filter_fn, ¶m, |
|---|
| 627 | + ofdma->of_node); |
|---|
| 612 | 628 | } |
|---|
| 613 | 629 | |
|---|
| 614 | 630 | static const struct of_device_id mmp_tdma_dt_ids[] = { |
|---|
| .. | .. |
|---|
| 666 | 682 | if (irq_num != chan_num) { |
|---|
| 667 | 683 | irq = platform_get_irq(pdev, 0); |
|---|
| 668 | 684 | ret = devm_request_irq(&pdev->dev, irq, |
|---|
| 669 | | - mmp_tdma_int_handler, 0, "tdma", tdev); |
|---|
| 685 | + mmp_tdma_int_handler, IRQF_SHARED, "tdma", tdev); |
|---|
| 670 | 686 | if (ret) |
|---|
| 671 | 687 | return ret; |
|---|
| 672 | 688 | } |
|---|
| .. | .. |
|---|
| 695 | 711 | tdev->device.device_terminate_all = mmp_tdma_terminate_all; |
|---|
| 696 | 712 | tdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES; |
|---|
| 697 | 713 | |
|---|
| 714 | + tdev->device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
|---|
| 715 | + if (type == MMP_AUD_TDMA) { |
|---|
| 716 | + tdev->device.max_burst = SZ_128; |
|---|
| 717 | + tdev->device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); |
|---|
| 718 | + tdev->device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); |
|---|
| 719 | + } else if (type == PXA910_SQU) { |
|---|
| 720 | + tdev->device.max_burst = SZ_32; |
|---|
| 721 | + } |
|---|
| 722 | + tdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
|---|
| 723 | + tdev->device.descriptor_reuse = true; |
|---|
| 724 | + |
|---|
| 698 | 725 | dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); |
|---|
| 699 | 726 | platform_set_drvdata(pdev, tdev); |
|---|
| 700 | 727 | |
|---|
| 701 | | - ret = dma_async_device_register(&tdev->device); |
|---|
| 728 | + ret = dmaenginem_async_device_register(&tdev->device); |
|---|
| 702 | 729 | if (ret) { |
|---|
| 703 | 730 | dev_err(tdev->device.dev, "unable to register\n"); |
|---|
| 704 | 731 | return ret; |
|---|
| .. | .. |
|---|
| 710 | 737 | if (ret) { |
|---|
| 711 | 738 | dev_err(tdev->device.dev, |
|---|
| 712 | 739 | "failed to register controller\n"); |
|---|
| 713 | | - dma_async_device_unregister(&tdev->device); |
|---|
| 740 | + return ret; |
|---|
| 714 | 741 | } |
|---|
| 715 | 742 | } |
|---|
| 716 | 743 | |
|---|