| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. |
|---|
| 3 | | - * |
|---|
| 4 | | - * This program is free software; you can redistribute it and/or modify it |
|---|
| 5 | | - * under the terms of the GNU General Public License as published by the Free |
|---|
| 6 | | - * Software Foundation; either version 2 of the License, or (at your option) |
|---|
| 7 | | - * any later version. |
|---|
| 8 | | - * |
|---|
| 9 | | - * This program is distributed in the hope that it will be useful, but WITHOUT |
|---|
| 10 | | - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|---|
| 11 | | - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
|---|
| 12 | | - * more details. |
|---|
| 13 | | - * |
|---|
| 14 | | - * The full GNU General Public License is included in this distribution in the |
|---|
| 15 | | - * file called COPYING. |
|---|
| 16 | 4 | */ |
|---|
| 17 | 5 | #ifndef LINUX_DMAENGINE_H |
|---|
| 18 | 6 | #define LINUX_DMAENGINE_H |
|---|
| .. | .. |
|---|
| 24 | 12 | #include <linux/scatterlist.h> |
|---|
| 25 | 13 | #include <linux/bitmap.h> |
|---|
| 26 | 14 | #include <linux/types.h> |
|---|
| 15 | +#include <linux/android_kabi.h> |
|---|
| 27 | 16 | #include <asm/page.h> |
|---|
| 28 | 17 | |
|---|
| 29 | 18 | /** |
|---|
| .. | .. |
|---|
| 51 | 40 | DMA_IN_PROGRESS, |
|---|
| 52 | 41 | DMA_PAUSED, |
|---|
| 53 | 42 | DMA_ERROR, |
|---|
| 43 | + DMA_OUT_OF_ORDER, |
|---|
| 54 | 44 | }; |
|---|
| 55 | 45 | |
|---|
| 56 | 46 | /** |
|---|
| .. | .. |
|---|
| 73 | 63 | DMA_SLAVE, |
|---|
| 74 | 64 | DMA_CYCLIC, |
|---|
| 75 | 65 | DMA_INTERLEAVE, |
|---|
| 66 | + DMA_COMPLETION_NO_ORDER, |
|---|
| 67 | + DMA_REPEAT, |
|---|
| 68 | + DMA_LOAD_EOT, |
|---|
| 76 | 69 | /* last transaction type for creation of the capabilities mask */ |
|---|
| 77 | 70 | DMA_TX_TYPE_END, |
|---|
| 78 | 71 | }; |
|---|
| .. | .. |
|---|
| 95 | 88 | /** |
|---|
| 96 | 89 | * Interleaved Transfer Request |
|---|
| 97 | 90 | * ---------------------------- |
|---|
| 98 | | - * A chunk is collection of contiguous bytes to be transfered. |
|---|
| 91 | + * A chunk is collection of contiguous bytes to be transferred. |
|---|
| 99 | 92 | * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG). |
|---|
| 100 | | - * ICGs may or maynot change between chunks. |
|---|
| 93 | + * ICGs may or may not change between chunks. |
|---|
| 101 | 94 | * A FRAME is the smallest series of contiguous {chunk,icg} pairs, |
|---|
| 102 | 95 | * that when repeated an integral number of times, specifies the transfer. |
|---|
| 103 | 96 | * A transfer template is specification of a Frame, the number of times |
|---|
| .. | .. |
|---|
| 165 | 158 | bool dst_sgl; |
|---|
| 166 | 159 | size_t numf; |
|---|
| 167 | 160 | size_t frame_size; |
|---|
| 168 | | - struct data_chunk sgl[0]; |
|---|
| 161 | + struct data_chunk sgl[]; |
|---|
| 169 | 162 | }; |
|---|
| 170 | 163 | |
|---|
| 171 | 164 | /** |
|---|
| .. | .. |
|---|
| 174 | 167 | * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of |
|---|
| 175 | 168 | * this transaction |
|---|
| 176 | 169 | * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client |
|---|
| 177 | | - * acknowledges receipt, i.e. has has a chance to establish any dependency |
|---|
| 170 | + * acknowledges receipt, i.e. has a chance to establish any dependency |
|---|
| 178 | 171 | * chains |
|---|
| 179 | 172 | * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q |
|---|
| 180 | 173 | * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P |
|---|
| .. | .. |
|---|
| 188 | 181 | * @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command |
|---|
| 189 | 182 | * data and the descriptor should be in different format from normal |
|---|
| 190 | 183 | * data descriptors. |
|---|
| 184 | + * @DMA_PREP_REPEAT: tell the driver that the transaction shall be automatically |
|---|
| 185 | + * repeated when it ends until a transaction is issued on the same channel |
|---|
| 186 | + * with the DMA_PREP_LOAD_EOT flag set. This flag is only applicable to |
|---|
| 187 | + * interleaved transactions and is ignored for all other transaction types. |
|---|
| 188 | + * @DMA_PREP_LOAD_EOT: tell the driver that the transaction shall replace any |
|---|
| 189 | + * active repeated (as indicated by DMA_PREP_REPEAT) transaction when the |
|---|
| 190 | + * repeated transaction ends. Not setting this flag when the previously queued |
|---|
| 191 | + * transaction is marked with DMA_PREP_REPEAT will cause the new transaction |
|---|
| 192 | + * to never be processed and stay in the issued queue forever. The flag is |
|---|
| 193 | + * ignored if the previous transaction is not a repeated transaction. |
|---|
| 191 | 194 | */ |
|---|
| 192 | 195 | enum dma_ctrl_flags { |
|---|
| 193 | 196 | DMA_PREP_INTERRUPT = (1 << 0), |
|---|
| .. | .. |
|---|
| 198 | 201 | DMA_PREP_FENCE = (1 << 5), |
|---|
| 199 | 202 | DMA_CTRL_REUSE = (1 << 6), |
|---|
| 200 | 203 | DMA_PREP_CMD = (1 << 7), |
|---|
| 204 | + DMA_PREP_REPEAT = (1 << 8), |
|---|
| 205 | + DMA_PREP_LOAD_EOT = (1 << 9), |
|---|
| 201 | 206 | }; |
|---|
| 202 | 207 | |
|---|
| 203 | 208 | /** |
|---|
| .. | .. |
|---|
| 231 | 236 | * @bytes_transferred: byte counter |
|---|
| 232 | 237 | */ |
|---|
| 233 | 238 | |
|---|
| 239 | +/** |
|---|
| 240 | + * enum dma_desc_metadata_mode - per descriptor metadata mode types supported |
|---|
| 241 | + * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the |
|---|
| 242 | + * client driver and it is attached (via the dmaengine_desc_attach_metadata() |
|---|
| 243 | + * helper) to the descriptor. |
|---|
| 244 | + * |
|---|
| 245 | + * Client drivers interested to use this mode can follow: |
|---|
| 246 | + * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM: |
|---|
| 247 | + * 1. prepare the descriptor (dmaengine_prep_*) |
|---|
| 248 | + * construct the metadata in the client's buffer |
|---|
| 249 | + * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the |
|---|
| 250 | + * descriptor |
|---|
| 251 | + * 3. submit the transfer |
|---|
| 252 | + * - DMA_DEV_TO_MEM: |
|---|
| 253 | + * 1. prepare the descriptor (dmaengine_prep_*) |
|---|
| 254 | + * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the |
|---|
| 255 | + * descriptor |
|---|
| 256 | + * 3. submit the transfer |
|---|
| 257 | + * 4. when the transfer is completed, the metadata should be available in the |
|---|
| 258 | + * attached buffer |
|---|
| 259 | + * |
|---|
| 260 | + * @DESC_METADATA_ENGINE - the metadata buffer is allocated/managed by the DMA |
|---|
| 261 | + * driver. The client driver can ask for the pointer, maximum size and the |
|---|
| 262 | + * currently used size of the metadata and can directly update or read it. |
|---|
| 263 | + * dmaengine_desc_get_metadata_ptr() and dmaengine_desc_set_metadata_len() is |
|---|
| 264 | + * provided as helper functions. |
|---|
| 265 | + * |
|---|
| 266 | + * Note: the metadata area for the descriptor is no longer valid after the |
|---|
| 267 | + * transfer has been completed (valid up to the point when the completion |
|---|
| 268 | + * callback returns if used). |
|---|
| 269 | + * |
|---|
| 270 | + * Client drivers interested to use this mode can follow: |
|---|
| 271 | + * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM: |
|---|
| 272 | + * 1. prepare the descriptor (dmaengine_prep_*) |
|---|
| 273 | + * 2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's |
|---|
| 274 | + * metadata area |
|---|
| 275 | + * 3. update the metadata at the pointer |
|---|
| 276 | + * 4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the amount |
|---|
| 277 | + * of data the client has placed into the metadata buffer |
|---|
| 278 | + * 5. submit the transfer |
|---|
| 279 | + * - DMA_DEV_TO_MEM: |
|---|
| 280 | + * 1. prepare the descriptor (dmaengine_prep_*) |
|---|
| 281 | + * 2. submit the transfer |
|---|
| 282 | + * 3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get the |
|---|
| 283 | + * pointer to the engine's metadata area |
|---|
| 284 | + * 4. Read out the metadata from the pointer |
|---|
| 285 | + * |
|---|
| 286 | + * Note: the two mode is not compatible and clients must use one mode for a |
|---|
| 287 | + * descriptor. |
|---|
| 288 | + */ |
|---|
| 289 | +enum dma_desc_metadata_mode { |
|---|
| 290 | + DESC_METADATA_NONE = 0, |
|---|
| 291 | + DESC_METADATA_CLIENT = BIT(0), |
|---|
| 292 | + DESC_METADATA_ENGINE = BIT(1), |
|---|
| 293 | +}; |
|---|
| 294 | + |
|---|
| 234 | 295 | struct dma_chan_percpu { |
|---|
| 235 | 296 | /* stats */ |
|---|
| 236 | 297 | unsigned long memcpy_count; |
|---|
| .. | .. |
|---|
| 250 | 311 | /** |
|---|
| 251 | 312 | * struct dma_chan - devices supply DMA channels, clients use them |
|---|
| 252 | 313 | * @device: ptr to the dma device who supplies this channel, always !%NULL |
|---|
| 314 | + * @slave: ptr to the device using this channel |
|---|
| 253 | 315 | * @cookie: last cookie value returned to client |
|---|
| 254 | 316 | * @completed_cookie: last completed cookie for this channel |
|---|
| 255 | 317 | * @chan_id: channel ID for sysfs |
|---|
| 256 | 318 | * @dev: class device for sysfs |
|---|
| 319 | + * @name: backlink name for sysfs |
|---|
| 320 | + * @dbg_client_name: slave name for debugfs in format: |
|---|
| 321 | + * dev_name(requester's dev):channel name, for example: "2b00000.mcasp:tx" |
|---|
| 257 | 322 | * @device_node: used to add this to the device chan list |
|---|
| 258 | 323 | * @local: per-cpu pointer to a struct dma_chan_percpu |
|---|
| 259 | 324 | * @client_count: how many clients are using this channel |
|---|
| .. | .. |
|---|
| 264 | 329 | */ |
|---|
| 265 | 330 | struct dma_chan { |
|---|
| 266 | 331 | struct dma_device *device; |
|---|
| 332 | + struct device *slave; |
|---|
| 267 | 333 | dma_cookie_t cookie; |
|---|
| 268 | 334 | dma_cookie_t completed_cookie; |
|---|
| 269 | 335 | |
|---|
| 270 | 336 | /* sysfs */ |
|---|
| 271 | 337 | int chan_id; |
|---|
| 272 | 338 | struct dma_chan_dev *dev; |
|---|
| 339 | + const char *name; |
|---|
| 340 | +#ifdef CONFIG_DEBUG_FS |
|---|
| 341 | + char *dbg_client_name; |
|---|
| 342 | +#endif |
|---|
| 273 | 343 | |
|---|
| 274 | 344 | struct list_head device_node; |
|---|
| 275 | 345 | struct dma_chan_percpu __percpu *local; |
|---|
| .. | .. |
|---|
| 288 | 358 | * @chan: driver channel device |
|---|
| 289 | 359 | * @device: sysfs device |
|---|
| 290 | 360 | * @dev_id: parent dma_device dev_id |
|---|
| 291 | | - * @idr_ref: reference count to gate release of dma_device dev_id |
|---|
| 292 | 361 | */ |
|---|
| 293 | 362 | struct dma_chan_dev { |
|---|
| 294 | 363 | struct dma_chan *chan; |
|---|
| 295 | 364 | struct device device; |
|---|
| 296 | 365 | int dev_id; |
|---|
| 297 | | - atomic_t *idr_ref; |
|---|
| 298 | 366 | }; |
|---|
| 299 | 367 | |
|---|
| 300 | 368 | /** |
|---|
| .. | .. |
|---|
| 351 | 419 | * @slave_id: Slave requester id. Only valid for slave channels. The dma |
|---|
| 352 | 420 | * slave peripheral will have unique id as dma requester which need to be |
|---|
| 353 | 421 | * pass as slave config. |
|---|
| 422 | + * @peripheral_config: peripheral configuration for programming peripheral |
|---|
| 423 | + * for dmaengine transfer |
|---|
| 424 | + * @peripheral_size: peripheral configuration buffer size |
|---|
| 354 | 425 | * |
|---|
| 355 | 426 | * This struct is passed in as configuration data to a DMA engine |
|---|
| 356 | 427 | * in order to set up a certain channel for DMA transport at runtime. |
|---|
| .. | .. |
|---|
| 376 | 447 | u32 dst_port_window_size; |
|---|
| 377 | 448 | bool device_fc; |
|---|
| 378 | 449 | unsigned int slave_id; |
|---|
| 450 | + void *peripheral_config; |
|---|
| 451 | + size_t peripheral_size; |
|---|
| 452 | +#ifdef CONFIG_NO_GKI |
|---|
| 379 | 453 | unsigned int src_interlace_size; |
|---|
| 380 | 454 | unsigned int dst_interlace_size; |
|---|
| 455 | +#endif |
|---|
| 381 | 456 | }; |
|---|
| 382 | 457 | |
|---|
| 383 | 458 | /** |
|---|
| .. | .. |
|---|
| 416 | 491 | * Since the enum dma_transfer_direction is not defined as bit flag for |
|---|
| 417 | 492 | * each type, the dma controller should set BIT(<TYPE>) and same |
|---|
| 418 | 493 | * should be checked by controller as well |
|---|
| 494 | + * @min_burst: min burst capability per-transfer |
|---|
| 419 | 495 | * @max_burst: max burst capability per-transfer |
|---|
| 496 | + * @max_sg_burst: max number of SG list entries executed in a single burst |
|---|
| 497 | + * DMA tansaction with no software intervention for reinitialization. |
|---|
| 498 | + * Zero value means unlimited number of entries. |
|---|
| 420 | 499 | * @cmd_pause: true, if pause is supported (i.e. for reading residue or |
|---|
| 421 | 500 | * for resume later) |
|---|
| 422 | 501 | * @cmd_resume: true, if resume is supported |
|---|
| .. | .. |
|---|
| 429 | 508 | u32 src_addr_widths; |
|---|
| 430 | 509 | u32 dst_addr_widths; |
|---|
| 431 | 510 | u32 directions; |
|---|
| 511 | + u32 min_burst; |
|---|
| 432 | 512 | u32 max_burst; |
|---|
| 513 | + u32 max_sg_burst; |
|---|
| 433 | 514 | bool cmd_pause; |
|---|
| 434 | 515 | bool cmd_resume; |
|---|
| 435 | 516 | bool cmd_terminate; |
|---|
| .. | .. |
|---|
| 486 | 567 | struct device *dev; |
|---|
| 487 | 568 | struct kref kref; |
|---|
| 488 | 569 | size_t len; |
|---|
| 489 | | - dma_addr_t addr[0]; |
|---|
| 570 | + dma_addr_t addr[]; |
|---|
| 571 | +}; |
|---|
| 572 | + |
|---|
| 573 | +struct dma_async_tx_descriptor; |
|---|
| 574 | + |
|---|
| 575 | +struct dma_descriptor_metadata_ops { |
|---|
| 576 | + int (*attach)(struct dma_async_tx_descriptor *desc, void *data, |
|---|
| 577 | + size_t len); |
|---|
| 578 | + |
|---|
| 579 | + void *(*get_ptr)(struct dma_async_tx_descriptor *desc, |
|---|
| 580 | + size_t *payload_len, size_t *max_len); |
|---|
| 581 | + int (*set_len)(struct dma_async_tx_descriptor *desc, |
|---|
| 582 | + size_t payload_len); |
|---|
| 490 | 583 | }; |
|---|
| 491 | 584 | |
|---|
| 492 | 585 | /** |
|---|
| .. | .. |
|---|
| 495 | 588 | * @cookie: tracking cookie for this transaction, set to -EBUSY if |
|---|
| 496 | 589 | * this tx is sitting on a dependency list |
|---|
| 497 | 590 | * @flags: flags to augment operation preparation, control completion, and |
|---|
| 498 | | - * communicate status |
|---|
| 591 | + * communicate status |
|---|
| 499 | 592 | * @phys: physical address of the descriptor |
|---|
| 500 | 593 | * @chan: target channel for this operation |
|---|
| 501 | 594 | * @tx_submit: accept the descriptor, assign ordered cookie and mark the |
|---|
| 502 | 595 | * descriptor pending. To be pushed on .issue_pending() call |
|---|
| 503 | 596 | * @callback: routine to call after this operation is complete |
|---|
| 504 | 597 | * @callback_param: general parameter to pass to the callback routine |
|---|
| 598 | + * @desc_metadata_mode: core managed metadata mode to protect mixed use of |
|---|
| 599 | + * DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise |
|---|
| 600 | + * DESC_METADATA_NONE |
|---|
| 601 | + * @metadata_ops: DMA driver provided metadata mode ops, need to be set by the |
|---|
| 602 | + * DMA driver if metadata mode is supported with the descriptor |
|---|
| 505 | 603 | * ---async_tx api specific fields--- |
|---|
| 506 | 604 | * @next: at completion submit this descriptor |
|---|
| 507 | 605 | * @parent: pointer to the next level up in the dependency chain |
|---|
| .. | .. |
|---|
| 518 | 616 | dma_async_tx_callback_result callback_result; |
|---|
| 519 | 617 | void *callback_param; |
|---|
| 520 | 618 | struct dmaengine_unmap_data *unmap; |
|---|
| 619 | + enum dma_desc_metadata_mode desc_metadata_mode; |
|---|
| 620 | + struct dma_descriptor_metadata_ops *metadata_ops; |
|---|
| 521 | 621 | #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
|---|
| 522 | 622 | struct dma_async_tx_descriptor *next; |
|---|
| 523 | 623 | struct dma_async_tx_descriptor *parent; |
|---|
| .. | .. |
|---|
| 553 | 653 | |
|---|
| 554 | 654 | static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) |
|---|
| 555 | 655 | { |
|---|
| 556 | | - if (tx->unmap) { |
|---|
| 557 | | - dmaengine_unmap_put(tx->unmap); |
|---|
| 558 | | - tx->unmap = NULL; |
|---|
| 559 | | - } |
|---|
| 656 | + if (!tx->unmap) |
|---|
| 657 | + return; |
|---|
| 658 | + |
|---|
| 659 | + dmaengine_unmap_put(tx->unmap); |
|---|
| 660 | + tx->unmap = NULL; |
|---|
| 560 | 661 | } |
|---|
| 561 | 662 | |
|---|
| 562 | 663 | #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
|---|
| .. | .. |
|---|
| 625 | 726 | * @residue: the remaining number of bytes left to transmit |
|---|
| 626 | 727 | * on the selected transfer for states DMA_IN_PROGRESS and |
|---|
| 627 | 728 | * DMA_PAUSED if this is implemented in the driver, else 0 |
|---|
| 729 | + * @in_flight_bytes: amount of data in bytes cached by the DMA. |
|---|
| 628 | 730 | */ |
|---|
| 629 | 731 | struct dma_tx_state { |
|---|
| 630 | 732 | dma_cookie_t last; |
|---|
| 631 | 733 | dma_cookie_t used; |
|---|
| 632 | 734 | u32 residue; |
|---|
| 735 | + u32 in_flight_bytes; |
|---|
| 633 | 736 | }; |
|---|
| 634 | 737 | |
|---|
| 635 | 738 | /** |
|---|
| .. | .. |
|---|
| 680 | 783 | * @global_node: list_head for global dma_device_list |
|---|
| 681 | 784 | * @filter: information for device/slave to filter function/param mapping |
|---|
| 682 | 785 | * @cap_mask: one or more dma_capability flags |
|---|
| 786 | + * @desc_metadata_modes: supported metadata modes by the DMA device |
|---|
| 683 | 787 | * @max_xor: maximum number of xor sources, 0 if no capability |
|---|
| 684 | 788 | * @max_pq: maximum number of PQ sources and PQ-continue capability |
|---|
| 685 | 789 | * @copy_align: alignment shift for memcpy operations |
|---|
| .. | .. |
|---|
| 697 | 801 | * Since the enum dma_transfer_direction is not defined as bit flag for |
|---|
| 698 | 802 | * each type, the dma controller should set BIT(<TYPE>) and same |
|---|
| 699 | 803 | * should be checked by controller as well |
|---|
| 804 | + * @min_burst: min burst capability per-transfer |
|---|
| 700 | 805 | * @max_burst: max burst capability per-transfer |
|---|
| 806 | + * @max_sg_burst: max number of SG list entries executed in a single burst |
|---|
| 807 | + * DMA tansaction with no software intervention for reinitialization. |
|---|
| 808 | + * Zero value means unlimited number of entries. |
|---|
| 701 | 809 | * @residue_granularity: granularity of the transfer residue reported |
|---|
| 702 | 810 | * by tx_status |
|---|
| 703 | 811 | * @device_alloc_chan_resources: allocate resources and return the |
|---|
| .. | .. |
|---|
| 717 | 825 | * be called after period_len bytes have been transferred. |
|---|
| 718 | 826 | * @device_prep_interleaved_dma: Transfer expression in a generic way. |
|---|
| 719 | 827 | * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address |
|---|
| 828 | + * @device_caps: May be used to override the generic DMA slave capabilities |
|---|
| 829 | + * with per-channel specific ones |
|---|
| 720 | 830 | * @device_config: Pushes a new configuration to a channel, return 0 or an error |
|---|
| 721 | 831 | * code |
|---|
| 722 | 832 | * @device_pause: Pauses any transfer happening on a channel. Returns |
|---|
| .. | .. |
|---|
| 733 | 843 | * will just return a simple status code |
|---|
| 734 | 844 | * @device_issue_pending: push pending transactions to hardware |
|---|
| 735 | 845 | * @descriptor_reuse: a submitted transfer can be resubmitted after completion |
|---|
| 846 | + * @device_release: called sometime atfer dma_async_device_unregister() is |
|---|
| 847 | + * called and there are no further references to this structure. This |
|---|
| 848 | + * must be implemented to free resources however many existing drivers |
|---|
| 849 | + * do not and are therefore not safe to unbind while in use. |
|---|
| 850 | + * @dbg_summary_show: optional routine to show contents in debugfs; default code |
|---|
| 851 | + * will be used when this is omitted, but custom code can show extra, |
|---|
| 852 | + * controller specific information. |
|---|
| 736 | 853 | */ |
|---|
| 737 | 854 | struct dma_device { |
|---|
| 738 | | - |
|---|
| 855 | + struct kref ref; |
|---|
| 739 | 856 | unsigned int chancnt; |
|---|
| 740 | 857 | unsigned int privatecnt; |
|---|
| 741 | 858 | struct list_head channels; |
|---|
| 742 | 859 | struct list_head global_node; |
|---|
| 743 | 860 | struct dma_filter filter; |
|---|
| 744 | 861 | dma_cap_mask_t cap_mask; |
|---|
| 862 | + enum dma_desc_metadata_mode desc_metadata_modes; |
|---|
| 745 | 863 | unsigned short max_xor; |
|---|
| 746 | 864 | unsigned short max_pq; |
|---|
| 747 | 865 | enum dmaengine_alignment copy_align; |
|---|
| .. | .. |
|---|
| 753 | 871 | int dev_id; |
|---|
| 754 | 872 | struct device *dev; |
|---|
| 755 | 873 | struct module *owner; |
|---|
| 874 | + struct ida chan_ida; |
|---|
| 875 | + struct mutex chan_mutex; /* to protect chan_ida */ |
|---|
| 756 | 876 | |
|---|
| 757 | 877 | u32 src_addr_widths; |
|---|
| 758 | 878 | u32 dst_addr_widths; |
|---|
| 759 | 879 | u32 directions; |
|---|
| 880 | + u32 min_burst; |
|---|
| 760 | 881 | u32 max_burst; |
|---|
| 882 | + u32 max_sg_burst; |
|---|
| 761 | 883 | bool descriptor_reuse; |
|---|
| 762 | 884 | enum dma_residue_granularity residue_granularity; |
|---|
| 763 | 885 | |
|---|
| .. | .. |
|---|
| 805 | 927 | struct dma_chan *chan, dma_addr_t dst, u64 data, |
|---|
| 806 | 928 | unsigned long flags); |
|---|
| 807 | 929 | |
|---|
| 930 | + void (*device_caps)(struct dma_chan *chan, |
|---|
| 931 | + struct dma_slave_caps *caps); |
|---|
| 808 | 932 | int (*device_config)(struct dma_chan *chan, |
|---|
| 809 | 933 | struct dma_slave_config *config); |
|---|
| 810 | 934 | int (*device_pause)(struct dma_chan *chan); |
|---|
| .. | .. |
|---|
| 816 | 940 | dma_cookie_t cookie, |
|---|
| 817 | 941 | struct dma_tx_state *txstate); |
|---|
| 818 | 942 | void (*device_issue_pending)(struct dma_chan *chan); |
|---|
| 943 | + void (*device_release)(struct dma_device *dev); |
|---|
| 944 | + /* debugfs support */ |
|---|
| 945 | +#ifdef CONFIG_DEBUG_FS |
|---|
| 946 | + void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev); |
|---|
| 947 | + struct dentry *dbg_dev_root; |
|---|
| 948 | +#endif |
|---|
| 949 | + |
|---|
| 950 | + ANDROID_KABI_RESERVE(1); |
|---|
| 951 | + ANDROID_KABI_RESERVE(2); |
|---|
| 952 | + ANDROID_KABI_RESERVE(3); |
|---|
| 953 | + ANDROID_KABI_RESERVE(4); |
|---|
| 819 | 954 | }; |
|---|
| 820 | 955 | |
|---|
| 821 | 956 | static inline int dmaengine_slave_config(struct dma_chan *chan, |
|---|
| .. | .. |
|---|
| 892 | 1027 | { |
|---|
| 893 | 1028 | if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma) |
|---|
| 894 | 1029 | return NULL; |
|---|
| 1030 | + if (flags & DMA_PREP_REPEAT && |
|---|
| 1031 | + !test_bit(DMA_REPEAT, chan->device->cap_mask.bits)) |
|---|
| 1032 | + return NULL; |
|---|
| 895 | 1033 | |
|---|
| 896 | 1034 | return chan->device->device_prep_interleaved_dma(chan, xt, flags); |
|---|
| 897 | 1035 | } |
|---|
| .. | .. |
|---|
| 917 | 1055 | return chan->device->device_prep_dma_memcpy(chan, dest, src, |
|---|
| 918 | 1056 | len, flags); |
|---|
| 919 | 1057 | } |
|---|
| 1058 | + |
|---|
| 1059 | +static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan, |
|---|
| 1060 | + enum dma_desc_metadata_mode mode) |
|---|
| 1061 | +{ |
|---|
| 1062 | + if (!chan) |
|---|
| 1063 | + return false; |
|---|
| 1064 | + |
|---|
| 1065 | + return !!(chan->device->desc_metadata_modes & mode); |
|---|
| 1066 | +} |
|---|
| 1067 | + |
|---|
| 1068 | +#ifdef CONFIG_DMA_ENGINE |
|---|
| 1069 | +int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc, |
|---|
| 1070 | + void *data, size_t len); |
|---|
| 1071 | +void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc, |
|---|
| 1072 | + size_t *payload_len, size_t *max_len); |
|---|
| 1073 | +int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc, |
|---|
| 1074 | + size_t payload_len); |
|---|
| 1075 | +#else /* CONFIG_DMA_ENGINE */ |
|---|
| 1076 | +static inline int dmaengine_desc_attach_metadata( |
|---|
| 1077 | + struct dma_async_tx_descriptor *desc, void *data, size_t len) |
|---|
| 1078 | +{ |
|---|
| 1079 | + return -EINVAL; |
|---|
| 1080 | +} |
|---|
| 1081 | +static inline void *dmaengine_desc_get_metadata_ptr( |
|---|
| 1082 | + struct dma_async_tx_descriptor *desc, size_t *payload_len, |
|---|
| 1083 | + size_t *max_len) |
|---|
| 1084 | +{ |
|---|
| 1085 | + return NULL; |
|---|
| 1086 | +} |
|---|
| 1087 | +static inline int dmaengine_desc_set_metadata_len( |
|---|
| 1088 | + struct dma_async_tx_descriptor *desc, size_t payload_len) |
|---|
| 1089 | +{ |
|---|
| 1090 | + return -EINVAL; |
|---|
| 1091 | +} |
|---|
| 1092 | +#endif /* CONFIG_DMA_ENGINE */ |
|---|
| 920 | 1093 | |
|---|
| 921 | 1094 | /** |
|---|
| 922 | 1095 | * dmaengine_terminate_all() - Terminate all active DMA transfers |
|---|
| .. | .. |
|---|
| 946 | 1119 | * dmaengine_synchronize() needs to be called before it is safe to free |
|---|
| 947 | 1120 | * any memory that is accessed by previously submitted descriptors or before |
|---|
| 948 | 1121 | * freeing any resources accessed from within the completion callback of any |
|---|
| 949 | | - * perviously submitted descriptors. |
|---|
| 1122 | + * previously submitted descriptors. |
|---|
| 950 | 1123 | * |
|---|
| 951 | 1124 | * This function can be called from atomic context as well as from within a |
|---|
| 952 | 1125 | * complete callback of a descriptor submitted on the same channel. |
|---|
| .. | .. |
|---|
| 968 | 1141 | * |
|---|
| 969 | 1142 | * Synchronizes to the DMA channel termination to the current context. When this |
|---|
| 970 | 1143 | * function returns it is guaranteed that all transfers for previously issued |
|---|
| 971 | | - * descriptors have stopped and and it is safe to free the memory assoicated |
|---|
| 1144 | + * descriptors have stopped and it is safe to free the memory associated |
|---|
| 972 | 1145 | * with them. Furthermore it is guaranteed that all complete callback functions |
|---|
| 973 | 1146 | * for a previously submitted descriptor have finished running and it is safe to |
|---|
| 974 | 1147 | * free resources accessed from within the complete callbacks. |
|---|
| .. | .. |
|---|
| 1045 | 1218 | static inline bool dmaengine_check_align(enum dmaengine_alignment align, |
|---|
| 1046 | 1219 | size_t off1, size_t off2, size_t len) |
|---|
| 1047 | 1220 | { |
|---|
| 1048 | | - size_t mask; |
|---|
| 1049 | | - |
|---|
| 1050 | | - if (!align) |
|---|
| 1051 | | - return true; |
|---|
| 1052 | | - mask = (1 << align) - 1; |
|---|
| 1053 | | - if (mask & (off1 | off2 | len)) |
|---|
| 1054 | | - return false; |
|---|
| 1055 | | - return true; |
|---|
| 1221 | + return !(((1 << align) - 1) & (off1 | off2 | len)); |
|---|
| 1056 | 1222 | } |
|---|
| 1057 | 1223 | |
|---|
| 1058 | 1224 | static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1, |
|---|
| .. | .. |
|---|
| 1126 | 1292 | { |
|---|
| 1127 | 1293 | if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags)) |
|---|
| 1128 | 1294 | return dma_dev_to_maxpq(dma); |
|---|
| 1129 | | - else if (dmaf_p_disabled_continue(flags)) |
|---|
| 1295 | + if (dmaf_p_disabled_continue(flags)) |
|---|
| 1130 | 1296 | return dma_dev_to_maxpq(dma) - 1; |
|---|
| 1131 | | - else if (dmaf_continue(flags)) |
|---|
| 1297 | + if (dmaf_continue(flags)) |
|---|
| 1132 | 1298 | return dma_dev_to_maxpq(dma) - 3; |
|---|
| 1133 | 1299 | BUG(); |
|---|
| 1134 | 1300 | } |
|---|
| .. | .. |
|---|
| 1139 | 1305 | if (inc) { |
|---|
| 1140 | 1306 | if (dir_icg) |
|---|
| 1141 | 1307 | return dir_icg; |
|---|
| 1142 | | - else if (sgl) |
|---|
| 1308 | + if (sgl) |
|---|
| 1143 | 1309 | return icg; |
|---|
| 1144 | 1310 | } |
|---|
| 1145 | 1311 | |
|---|
| .. | .. |
|---|
| 1305 | 1471 | static inline void |
|---|
| 1306 | 1472 | dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue) |
|---|
| 1307 | 1473 | { |
|---|
| 1308 | | - if (st) { |
|---|
| 1309 | | - st->last = last; |
|---|
| 1310 | | - st->used = used; |
|---|
| 1311 | | - st->residue = residue; |
|---|
| 1312 | | - } |
|---|
| 1474 | + if (!st) |
|---|
| 1475 | + return; |
|---|
| 1476 | + |
|---|
| 1477 | + st->last = last; |
|---|
| 1478 | + st->used = used; |
|---|
| 1479 | + st->residue = residue; |
|---|
| 1313 | 1480 | } |
|---|
| 1314 | 1481 | |
|---|
| 1315 | 1482 | #ifdef CONFIG_DMA_ENGINE |
|---|
| .. | .. |
|---|
| 1318 | 1485 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); |
|---|
| 1319 | 1486 | void dma_issue_pending_all(void); |
|---|
| 1320 | 1487 | struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, |
|---|
| 1321 | | - dma_filter_fn fn, void *fn_param); |
|---|
| 1322 | | -struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); |
|---|
| 1488 | + dma_filter_fn fn, void *fn_param, |
|---|
| 1489 | + struct device_node *np); |
|---|
| 1323 | 1490 | |
|---|
| 1324 | 1491 | struct dma_chan *dma_request_chan(struct device *dev, const char *name); |
|---|
| 1325 | 1492 | struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask); |
|---|
| .. | .. |
|---|
| 1343 | 1510 | { |
|---|
| 1344 | 1511 | } |
|---|
| 1345 | 1512 | static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, |
|---|
| 1346 | | - dma_filter_fn fn, void *fn_param) |
|---|
| 1347 | | -{ |
|---|
| 1348 | | - return NULL; |
|---|
| 1349 | | -} |
|---|
| 1350 | | -static inline struct dma_chan *dma_request_slave_channel(struct device *dev, |
|---|
| 1351 | | - const char *name) |
|---|
| 1513 | + dma_filter_fn fn, |
|---|
| 1514 | + void *fn_param, |
|---|
| 1515 | + struct device_node *np) |
|---|
| 1352 | 1516 | { |
|---|
| 1353 | 1517 | return NULL; |
|---|
| 1354 | 1518 | } |
|---|
| .. | .. |
|---|
| 1372 | 1536 | } |
|---|
| 1373 | 1537 | #endif |
|---|
| 1374 | 1538 | |
|---|
| 1375 | | -#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name) |
|---|
| 1376 | | - |
|---|
| 1377 | 1539 | static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) |
|---|
| 1378 | 1540 | { |
|---|
| 1379 | 1541 | struct dma_slave_caps caps; |
|---|
| .. | .. |
|---|
| 1383 | 1545 | if (ret) |
|---|
| 1384 | 1546 | return ret; |
|---|
| 1385 | 1547 | |
|---|
| 1386 | | - if (caps.descriptor_reuse) { |
|---|
| 1387 | | - tx->flags |= DMA_CTRL_REUSE; |
|---|
| 1388 | | - return 0; |
|---|
| 1389 | | - } else { |
|---|
| 1548 | + if (!caps.descriptor_reuse) |
|---|
| 1390 | 1549 | return -EPERM; |
|---|
| 1391 | | - } |
|---|
| 1550 | + |
|---|
| 1551 | + tx->flags |= DMA_CTRL_REUSE; |
|---|
| 1552 | + return 0; |
|---|
| 1392 | 1553 | } |
|---|
| 1393 | 1554 | |
|---|
| 1394 | 1555 | static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx) |
|---|
| .. | .. |
|---|
| 1404 | 1565 | static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc) |
|---|
| 1405 | 1566 | { |
|---|
| 1406 | 1567 | /* this is supported for reusable desc, so check that */ |
|---|
| 1407 | | - if (dmaengine_desc_test_reuse(desc)) |
|---|
| 1408 | | - return desc->desc_free(desc); |
|---|
| 1409 | | - else |
|---|
| 1568 | + if (!dmaengine_desc_test_reuse(desc)) |
|---|
| 1410 | 1569 | return -EPERM; |
|---|
| 1570 | + |
|---|
| 1571 | + return desc->desc_free(desc); |
|---|
| 1411 | 1572 | } |
|---|
| 1412 | 1573 | |
|---|
| 1413 | 1574 | /* --- DMA device --- */ |
|---|
| .. | .. |
|---|
| 1415 | 1576 | int dma_async_device_register(struct dma_device *device); |
|---|
| 1416 | 1577 | int dmaenginem_async_device_register(struct dma_device *device); |
|---|
| 1417 | 1578 | void dma_async_device_unregister(struct dma_device *device); |
|---|
| 1579 | +int dma_async_device_channel_register(struct dma_device *device, |
|---|
| 1580 | + struct dma_chan *chan); |
|---|
| 1581 | +void dma_async_device_channel_unregister(struct dma_device *device, |
|---|
| 1582 | + struct dma_chan *chan); |
|---|
| 1418 | 1583 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx); |
|---|
| 1419 | | -struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); |
|---|
| 1420 | | -struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); |
|---|
| 1421 | | -#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) |
|---|
| 1422 | | -#define dma_request_slave_channel_compat(mask, x, y, dev, name) \ |
|---|
| 1423 | | - __dma_request_slave_channel_compat(&(mask), x, y, dev, name) |
|---|
| 1584 | +#define dma_request_channel(mask, x, y) \ |
|---|
| 1585 | + __dma_request_channel(&(mask), x, y, NULL) |
|---|
| 1586 | + |
|---|
| 1587 | +/* Deprecated, please use dma_request_chan() directly */ |
|---|
| 1588 | +static inline struct dma_chan * __deprecated |
|---|
| 1589 | +dma_request_slave_channel(struct device *dev, const char *name) |
|---|
| 1590 | +{ |
|---|
| 1591 | + struct dma_chan *ch = dma_request_chan(dev, name); |
|---|
| 1592 | + |
|---|
| 1593 | + return IS_ERR(ch) ? NULL : ch; |
|---|
| 1594 | +} |
|---|
| 1424 | 1595 | |
|---|
| 1425 | 1596 | static inline struct dma_chan |
|---|
| 1426 | | -*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask, |
|---|
| 1597 | +*dma_request_slave_channel_compat(const dma_cap_mask_t mask, |
|---|
| 1427 | 1598 | dma_filter_fn fn, void *fn_param, |
|---|
| 1428 | 1599 | struct device *dev, const char *name) |
|---|
| 1429 | 1600 | { |
|---|
| .. | .. |
|---|
| 1436 | 1607 | if (!fn || !fn_param) |
|---|
| 1437 | 1608 | return NULL; |
|---|
| 1438 | 1609 | |
|---|
| 1439 | | - return __dma_request_channel(mask, fn, fn_param); |
|---|
| 1610 | + return __dma_request_channel(&mask, fn, fn_param, NULL); |
|---|
| 1611 | +} |
|---|
| 1612 | + |
|---|
| 1613 | +static inline char * |
|---|
| 1614 | +dmaengine_get_direction_text(enum dma_transfer_direction dir) |
|---|
| 1615 | +{ |
|---|
| 1616 | + switch (dir) { |
|---|
| 1617 | + case DMA_DEV_TO_MEM: |
|---|
| 1618 | + return "DEV_TO_MEM"; |
|---|
| 1619 | + case DMA_MEM_TO_DEV: |
|---|
| 1620 | + return "MEM_TO_DEV"; |
|---|
| 1621 | + case DMA_MEM_TO_MEM: |
|---|
| 1622 | + return "MEM_TO_MEM"; |
|---|
| 1623 | + case DMA_DEV_TO_DEV: |
|---|
| 1624 | + return "DEV_TO_DEV"; |
|---|
| 1625 | + default: |
|---|
| 1626 | + return "invalid"; |
|---|
| 1627 | + } |
|---|
| 1440 | 1628 | } |
|---|
| 1441 | 1629 | #endif /* DMAENGINE_H */ |
|---|