forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 072de836f53be56a70cecf70b43ae43b7ce17376
kernel/drivers/dma/dmaengine.c
....@@ -1,18 +1,6 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3
- *
4
- * This program is free software; you can redistribute it and/or modify it
5
- * under the terms of the GNU General Public License as published by the Free
6
- * Software Foundation; either version 2 of the License, or (at your option)
7
- * any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful, but WITHOUT
10
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12
- * more details.
13
- *
14
- * The full GNU General Public License is included in this distribution in the
15
- * file called COPYING.
164 */
175
186 /*
....@@ -63,19 +51,105 @@
6351 #include <linux/acpi_dma.h>
6452 #include <linux/of_dma.h>
6553 #include <linux/mempool.h>
54
+#include <linux/numa.h>
55
+
56
+#include "dmaengine.h"
6657
6758 static DEFINE_MUTEX(dma_list_mutex);
6859 static DEFINE_IDA(dma_ida);
6960 static LIST_HEAD(dma_device_list);
7061 static long dmaengine_ref_count;
7162
63
+/* --- debugfs implementation --- */
64
+#ifdef CONFIG_DEBUG_FS
65
+#include <linux/debugfs.h>
66
+
67
+static struct dentry *rootdir;
68
+
69
+static void dmaengine_debug_register(struct dma_device *dma_dev)
70
+{
71
+ dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev),
72
+ rootdir);
73
+ if (IS_ERR(dma_dev->dbg_dev_root))
74
+ dma_dev->dbg_dev_root = NULL;
75
+}
76
+
77
+static void dmaengine_debug_unregister(struct dma_device *dma_dev)
78
+{
79
+ debugfs_remove_recursive(dma_dev->dbg_dev_root);
80
+ dma_dev->dbg_dev_root = NULL;
81
+}
82
+
83
+static void dmaengine_dbg_summary_show(struct seq_file *s,
84
+ struct dma_device *dma_dev)
85
+{
86
+ struct dma_chan *chan;
87
+
88
+ list_for_each_entry(chan, &dma_dev->channels, device_node) {
89
+ if (chan->client_count) {
90
+ seq_printf(s, " %-13s| %s", dma_chan_name(chan),
91
+ chan->dbg_client_name ?: "in-use");
92
+
93
+ if (chan->router)
94
+ seq_printf(s, " (via router: %s)\n",
95
+ dev_name(chan->router->dev));
96
+ else
97
+ seq_puts(s, "\n");
98
+ }
99
+ }
100
+}
101
+
102
+static int dmaengine_summary_show(struct seq_file *s, void *data)
103
+{
104
+ struct dma_device *dma_dev = NULL;
105
+
106
+ mutex_lock(&dma_list_mutex);
107
+ list_for_each_entry(dma_dev, &dma_device_list, global_node) {
108
+ seq_printf(s, "dma%d (%s): number of channels: %u\n",
109
+ dma_dev->dev_id, dev_name(dma_dev->dev),
110
+ dma_dev->chancnt);
111
+
112
+ if (dma_dev->dbg_summary_show)
113
+ dma_dev->dbg_summary_show(s, dma_dev);
114
+ else
115
+ dmaengine_dbg_summary_show(s, dma_dev);
116
+
117
+ if (!list_is_last(&dma_dev->global_node, &dma_device_list))
118
+ seq_puts(s, "\n");
119
+ }
120
+ mutex_unlock(&dma_list_mutex);
121
+
122
+ return 0;
123
+}
124
+DEFINE_SHOW_ATTRIBUTE(dmaengine_summary);
125
+
126
+static void __init dmaengine_debugfs_init(void)
127
+{
128
+ rootdir = debugfs_create_dir("dmaengine", NULL);
129
+
130
+ /* /sys/kernel/debug/dmaengine/summary */
131
+ debugfs_create_file("summary", 0444, rootdir, NULL,
132
+ &dmaengine_summary_fops);
133
+}
134
+#else
135
+static inline void dmaengine_debugfs_init(void) { }
136
+static inline int dmaengine_debug_register(struct dma_device *dma_dev)
137
+{
138
+ return 0;
139
+}
140
+
141
+static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
142
+#endif /* DEBUG_FS */
143
+
72144 /* --- sysfs implementation --- */
73145
146
+#define DMA_SLAVE_NAME "slave"
147
+
74148 /**
75
- * dev_to_dma_chan - convert a device pointer to the its sysfs container object
76
- * @dev - device node
149
+ * dev_to_dma_chan - convert a device pointer to its sysfs container object
150
+ * @dev: device node
77151 *
78
- * Must be called under dma_list_mutex
152
+ * Must be called under dma_list_mutex.
79153 */
80154 static struct dma_chan *dev_to_dma_chan(struct device *dev)
81155 {
....@@ -160,10 +234,6 @@
160234 struct dma_chan_dev *chan_dev;
161235
162236 chan_dev = container_of(dev, typeof(*chan_dev), device);
163
- if (atomic_dec_and_test(chan_dev->idr_ref)) {
164
- ida_free(&dma_ida, chan_dev->dev_id);
165
- kfree(chan_dev->idr_ref);
166
- }
167237 kfree(chan_dev);
168238 }
169239
....@@ -175,146 +245,18 @@
175245
176246 /* --- client and device registration --- */
177247
178
-#define dma_device_satisfies_mask(device, mask) \
179
- __dma_device_satisfies_mask((device), &(mask))
180
-static int
181
-__dma_device_satisfies_mask(struct dma_device *device,
182
- const dma_cap_mask_t *want)
183
-{
184
- dma_cap_mask_t has;
185
-
186
- bitmap_and(has.bits, want->bits, device->cap_mask.bits,
187
- DMA_TX_TYPE_END);
188
- return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
189
-}
190
-
191
-static struct module *dma_chan_to_owner(struct dma_chan *chan)
192
-{
193
- return chan->device->owner;
194
-}
195
-
196
-/**
197
- * balance_ref_count - catch up the channel reference count
198
- * @chan - channel to balance ->client_count versus dmaengine_ref_count
199
- *
200
- * balance_ref_count must be called under dma_list_mutex
201
- */
202
-static void balance_ref_count(struct dma_chan *chan)
203
-{
204
- struct module *owner = dma_chan_to_owner(chan);
205
-
206
- while (chan->client_count < dmaengine_ref_count) {
207
- __module_get(owner);
208
- chan->client_count++;
209
- }
210
-}
211
-
212
-/**
213
- * dma_chan_get - try to grab a dma channel's parent driver module
214
- * @chan - channel to grab
215
- *
216
- * Must be called under dma_list_mutex
217
- */
218
-static int dma_chan_get(struct dma_chan *chan)
219
-{
220
- struct module *owner = dma_chan_to_owner(chan);
221
- int ret;
222
-
223
- /* The channel is already in use, update client count */
224
- if (chan->client_count) {
225
- __module_get(owner);
226
- goto out;
227
- }
228
-
229
- if (!try_module_get(owner))
230
- return -ENODEV;
231
-
232
- /* allocate upon first client reference */
233
- if (chan->device->device_alloc_chan_resources) {
234
- ret = chan->device->device_alloc_chan_resources(chan);
235
- if (ret < 0)
236
- goto err_out;
237
- }
238
-
239
- if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
240
- balance_ref_count(chan);
241
-
242
-out:
243
- chan->client_count++;
244
- return 0;
245
-
246
-err_out:
247
- module_put(owner);
248
- return ret;
249
-}
250
-
251
-/**
252
- * dma_chan_put - drop a reference to a dma channel's parent driver module
253
- * @chan - channel to release
254
- *
255
- * Must be called under dma_list_mutex
256
- */
257
-static void dma_chan_put(struct dma_chan *chan)
258
-{
259
- /* This channel is not in use, bail out */
260
- if (!chan->client_count)
261
- return;
262
-
263
- chan->client_count--;
264
- module_put(dma_chan_to_owner(chan));
265
-
266
- /* This channel is not in use anymore, free it */
267
- if (!chan->client_count && chan->device->device_free_chan_resources) {
268
- /* Make sure all operations have completed */
269
- dmaengine_synchronize(chan);
270
- chan->device->device_free_chan_resources(chan);
271
- }
272
-
273
- /* If the channel is used via a DMA request router, free the mapping */
274
- if (chan->router && chan->router->route_free) {
275
- chan->router->route_free(chan->router->dev, chan->route_data);
276
- chan->router = NULL;
277
- chan->route_data = NULL;
278
- }
279
-}
280
-
281
-enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
282
-{
283
- enum dma_status status;
284
- unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
285
-
286
- dma_async_issue_pending(chan);
287
- do {
288
- status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
289
- if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
290
- dev_err(chan->device->dev, "%s: timeout!\n", __func__);
291
- return DMA_ERROR;
292
- }
293
- if (status != DMA_IN_PROGRESS)
294
- break;
295
- cpu_relax();
296
- } while (1);
297
-
298
- return status;
299
-}
300
-EXPORT_SYMBOL(dma_sync_wait);
301
-
302
-/**
303
- * dma_cap_mask_all - enable iteration over all operation types
304
- */
248
+/* enable iteration over all operation types */
305249 static dma_cap_mask_t dma_cap_mask_all;
306250
307251 /**
308
- * dma_chan_tbl_ent - tracks channel allocations per core/operation
309
- * @chan - associated channel for this entry
252
+ * struct dma_chan_tbl_ent - tracks channel allocations per core/operation
253
+ * @chan: associated channel for this entry
310254 */
311255 struct dma_chan_tbl_ent {
312256 struct dma_chan *chan;
313257 };
314258
315
-/**
316
- * channel_table - percpu lookup table for memory-to-memory offload providers
317
- */
259
+/* percpu lookup table for memory-to-memory offload providers */
318260 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
319261
320262 static int __init dma_channel_table_init(void)
....@@ -341,7 +283,7 @@
341283 }
342284
343285 if (err) {
344
- pr_err("initialization failure\n");
286
+ pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
345287 for_each_dma_cap_mask(cap, dma_cap_mask_all)
346288 free_percpu(channel_table[cap]);
347289 }
....@@ -351,52 +293,28 @@
351293 arch_initcall(dma_channel_table_init);
352294
353295 /**
354
- * dma_find_channel - find a channel to carry out the operation
355
- * @tx_type: transaction type
356
- */
357
-struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
358
-{
359
- return this_cpu_read(channel_table[tx_type]->chan);
360
-}
361
-EXPORT_SYMBOL(dma_find_channel);
362
-
363
-/**
364
- * dma_issue_pending_all - flush all pending operations across all channels
365
- */
366
-void dma_issue_pending_all(void)
367
-{
368
- struct dma_device *device;
369
- struct dma_chan *chan;
370
-
371
- rcu_read_lock();
372
- list_for_each_entry_rcu(device, &dma_device_list, global_node) {
373
- if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
374
- continue;
375
- list_for_each_entry(chan, &device->channels, device_node)
376
- if (chan->client_count)
377
- device->device_issue_pending(chan);
378
- }
379
- rcu_read_unlock();
380
-}
381
-EXPORT_SYMBOL(dma_issue_pending_all);
382
-
383
-/**
384
- * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
296
+ * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU
297
+ * @chan: DMA channel to test
298
+ * @cpu: CPU index which the channel should be close to
299
+ *
300
+ * Returns true if the channel is in the same NUMA-node as the CPU.
385301 */
386302 static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
387303 {
388304 int node = dev_to_node(chan->device->dev);
389
- return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
305
+ return node == NUMA_NO_NODE ||
306
+ cpumask_test_cpu(cpu, cpumask_of_node(node));
390307 }
391308
392309 /**
393
- * min_chan - returns the channel with min count and in the same numa-node as the cpu
394
- * @cap: capability to match
395
- * @cpu: cpu index which the channel should be close to
310
+ * min_chan - finds the channel with min count and in the same NUMA-node as the CPU
311
+ * @cap: capability to match
312
+ * @cpu: CPU index which the channel should be close to
396313 *
397
- * If some channels are close to the given cpu, the one with the lowest
398
- * reference count is returned. Otherwise, cpu is ignored and only the
314
+ * If some channels are close to the given CPU, the one with the lowest
315
+ * reference count is returned. Otherwise, CPU is ignored and only the
399316 * reference count is taken into account.
317
+ *
400318 * Must be called under dma_list_mutex.
401319 */
402320 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
....@@ -434,10 +352,11 @@
434352 /**
435353 * dma_channel_rebalance - redistribute the available channels
436354 *
437
- * Optimize for cpu isolation (each cpu gets a dedicated channel for an
438
- * operation type) in the SMP case, and operation isolation (avoid
439
- * multi-tasking channels) in the non-SMP case. Must be called under
440
- * dma_list_mutex.
355
+ * Optimize for CPU isolation (each CPU gets a dedicated channel for an
356
+ * operation type) in the SMP case, and operation isolation (avoid
357
+ * multi-tasking channels) in the non-SMP case.
358
+ *
359
+ * Must be called under dma_list_mutex.
441360 */
442361 static void dma_channel_rebalance(void)
443362 {
....@@ -470,6 +389,184 @@
470389 }
471390 }
472391
392
+static int dma_device_satisfies_mask(struct dma_device *device,
393
+ const dma_cap_mask_t *want)
394
+{
395
+ dma_cap_mask_t has;
396
+
397
+ bitmap_and(has.bits, want->bits, device->cap_mask.bits,
398
+ DMA_TX_TYPE_END);
399
+ return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
400
+}
401
+
402
+static struct module *dma_chan_to_owner(struct dma_chan *chan)
403
+{
404
+ return chan->device->owner;
405
+}
406
+
407
+/**
408
+ * balance_ref_count - catch up the channel reference count
409
+ * @chan: channel to balance ->client_count versus dmaengine_ref_count
410
+ *
411
+ * Must be called under dma_list_mutex.
412
+ */
413
+static void balance_ref_count(struct dma_chan *chan)
414
+{
415
+ struct module *owner = dma_chan_to_owner(chan);
416
+
417
+ while (chan->client_count < dmaengine_ref_count) {
418
+ __module_get(owner);
419
+ chan->client_count++;
420
+ }
421
+}
422
+
423
+static void dma_device_release(struct kref *ref)
424
+{
425
+ struct dma_device *device = container_of(ref, struct dma_device, ref);
426
+
427
+ list_del_rcu(&device->global_node);
428
+ dma_channel_rebalance();
429
+
430
+ if (device->device_release)
431
+ device->device_release(device);
432
+}
433
+
434
+static void dma_device_put(struct dma_device *device)
435
+{
436
+ lockdep_assert_held(&dma_list_mutex);
437
+ kref_put(&device->ref, dma_device_release);
438
+}
439
+
440
+/**
441
+ * dma_chan_get - try to grab a DMA channel's parent driver module
442
+ * @chan: channel to grab
443
+ *
444
+ * Must be called under dma_list_mutex.
445
+ */
446
+static int dma_chan_get(struct dma_chan *chan)
447
+{
448
+ struct module *owner = dma_chan_to_owner(chan);
449
+ int ret;
450
+
451
+ /* The channel is already in use, update client count */
452
+ if (chan->client_count) {
453
+ __module_get(owner);
454
+ goto out;
455
+ }
456
+
457
+ if (!try_module_get(owner))
458
+ return -ENODEV;
459
+
460
+ ret = kref_get_unless_zero(&chan->device->ref);
461
+ if (!ret) {
462
+ ret = -ENODEV;
463
+ goto module_put_out;
464
+ }
465
+
466
+ /* allocate upon first client reference */
467
+ if (chan->device->device_alloc_chan_resources) {
468
+ ret = chan->device->device_alloc_chan_resources(chan);
469
+ if (ret < 0)
470
+ goto err_out;
471
+ }
472
+
473
+ if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
474
+ balance_ref_count(chan);
475
+
476
+out:
477
+ chan->client_count++;
478
+ return 0;
479
+
480
+err_out:
481
+ dma_device_put(chan->device);
482
+module_put_out:
483
+ module_put(owner);
484
+ return ret;
485
+}
486
+
487
+/**
488
+ * dma_chan_put - drop a reference to a DMA channel's parent driver module
489
+ * @chan: channel to release
490
+ *
491
+ * Must be called under dma_list_mutex.
492
+ */
493
+static void dma_chan_put(struct dma_chan *chan)
494
+{
495
+ /* This channel is not in use, bail out */
496
+ if (!chan->client_count)
497
+ return;
498
+
499
+ chan->client_count--;
500
+
501
+ /* This channel is not in use anymore, free it */
502
+ if (!chan->client_count && chan->device->device_free_chan_resources) {
503
+ /* Make sure all operations have completed */
504
+ dmaengine_synchronize(chan);
505
+ chan->device->device_free_chan_resources(chan);
506
+ }
507
+
508
+ /* If the channel is used via a DMA request router, free the mapping */
509
+ if (chan->router && chan->router->route_free) {
510
+ chan->router->route_free(chan->router->dev, chan->route_data);
511
+ chan->router = NULL;
512
+ chan->route_data = NULL;
513
+ }
514
+
515
+ dma_device_put(chan->device);
516
+ module_put(dma_chan_to_owner(chan));
517
+}
518
+
519
+enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
520
+{
521
+ enum dma_status status;
522
+ unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
523
+
524
+ dma_async_issue_pending(chan);
525
+ do {
526
+ status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
527
+ if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
528
+ dev_err(chan->device->dev, "%s: timeout!\n", __func__);
529
+ return DMA_ERROR;
530
+ }
531
+ if (status != DMA_IN_PROGRESS)
532
+ break;
533
+ cpu_relax();
534
+ } while (1);
535
+
536
+ return status;
537
+}
538
+EXPORT_SYMBOL(dma_sync_wait);
539
+
540
+/**
541
+ * dma_find_channel - find a channel to carry out the operation
542
+ * @tx_type: transaction type
543
+ */
544
+struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
545
+{
546
+ return this_cpu_read(channel_table[tx_type]->chan);
547
+}
548
+EXPORT_SYMBOL(dma_find_channel);
549
+
550
+/**
551
+ * dma_issue_pending_all - flush all pending operations across all channels
552
+ */
553
+void dma_issue_pending_all(void)
554
+{
555
+ struct dma_device *device;
556
+ struct dma_chan *chan;
557
+
558
+ rcu_read_lock();
559
+ list_for_each_entry_rcu(device, &dma_device_list, global_node) {
560
+ if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
561
+ continue;
562
+ list_for_each_entry(chan, &device->channels, device_node)
563
+ if (chan->client_count)
564
+ device->device_issue_pending(chan);
565
+ }
566
+ rcu_read_unlock();
567
+}
568
+EXPORT_SYMBOL(dma_issue_pending_all);
569
+
473570 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
474571 {
475572 struct dma_device *device;
....@@ -495,12 +592,24 @@
495592 caps->src_addr_widths = device->src_addr_widths;
496593 caps->dst_addr_widths = device->dst_addr_widths;
497594 caps->directions = device->directions;
595
+ caps->min_burst = device->min_burst;
498596 caps->max_burst = device->max_burst;
597
+ caps->max_sg_burst = device->max_sg_burst;
499598 caps->residue_granularity = device->residue_granularity;
500599 caps->descriptor_reuse = device->descriptor_reuse;
501600 caps->cmd_pause = !!device->device_pause;
502601 caps->cmd_resume = !!device->device_resume;
503602 caps->cmd_terminate = !!device->device_terminate_all;
603
+
604
+ /*
605
+ * DMA engine device might be configured with non-uniformly
606
+ * distributed slave capabilities per device channels. In this
607
+ * case the corresponding driver may provide the device_caps
608
+ * callback to override the generic capabilities with
609
+ * channel-specific ones.
610
+ */
611
+ if (device->device_caps)
612
+ device->device_caps(chan, caps);
504613
505614 return 0;
506615 }
....@@ -512,7 +621,7 @@
512621 {
513622 struct dma_chan *chan;
514623
515
- if (mask && !__dma_device_satisfies_mask(dev, mask)) {
624
+ if (mask && !dma_device_satisfies_mask(dev, mask)) {
516625 dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
517626 return NULL;
518627 }
....@@ -582,7 +691,7 @@
582691
583692 /**
584693 * dma_get_slave_channel - try to get specific channel exclusively
585
- * @chan: target channel
694
+ * @chan: target channel
586695 */
587696 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
588697 {
....@@ -636,14 +745,16 @@
636745
637746 /**
638747 * __dma_request_channel - try to allocate an exclusive channel
639
- * @mask: capabilities that the channel must satisfy
640
- * @fn: optional callback to disposition available channels
641
- * @fn_param: opaque parameter to pass to dma_filter_fn
748
+ * @mask: capabilities that the channel must satisfy
749
+ * @fn: optional callback to disposition available channels
750
+ * @fn_param: opaque parameter to pass to dma_filter_fn()
751
+ * @np: device node to look for DMA channels
642752 *
643753 * Returns pointer to appropriate DMA channel on success or NULL.
644754 */
645755 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
646
- dma_filter_fn fn, void *fn_param)
756
+ dma_filter_fn fn, void *fn_param,
757
+ struct device_node *np)
647758 {
648759 struct dma_device *device, *_d;
649760 struct dma_chan *chan = NULL;
....@@ -651,6 +762,10 @@
651762 /* Find a channel */
652763 mutex_lock(&dma_list_mutex);
653764 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
765
+ /* Finds a DMA controller with matching device node */
766
+ if (np && device->dev->of_node && np != device->dev->of_node)
767
+ continue;
768
+
654769 chan = find_candidate(device, mask, fn, fn_param);
655770 if (!IS_ERR(chan))
656771 break;
....@@ -708,11 +823,11 @@
708823 if (has_acpi_companion(dev) && !chan)
709824 chan = acpi_dma_request_slave_chan_by_name(dev, name);
710825
711
- if (chan) {
712
- /* Valid channel found or requester need to be deferred */
713
- if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
714
- return chan;
715
- }
826
+ if (PTR_ERR(chan) == -EPROBE_DEFER)
827
+ return chan;
828
+
829
+ if (!IS_ERR_OR_NULL(chan))
830
+ goto found;
716831
717832 /* Try to find the channel via the DMA filter map(s) */
718833 mutex_lock(&dma_list_mutex);
....@@ -732,31 +847,35 @@
732847 }
733848 mutex_unlock(&dma_list_mutex);
734849
735
- return chan ? chan : ERR_PTR(-EPROBE_DEFER);
850
+ if (IS_ERR(chan))
851
+ return chan;
852
+ if (!chan)
853
+ return ERR_PTR(-EPROBE_DEFER);
854
+
855
+found:
856
+#ifdef CONFIG_DEBUG_FS
857
+ chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev),
858
+ name);
859
+#endif
860
+
861
+ chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
862
+ if (!chan->name)
863
+ return chan;
864
+ chan->slave = dev;
865
+
866
+ if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
867
+ DMA_SLAVE_NAME))
868
+ dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
869
+ if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
870
+ dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name);
871
+
872
+ return chan;
736873 }
737874 EXPORT_SYMBOL_GPL(dma_request_chan);
738875
739876 /**
740
- * dma_request_slave_channel - try to allocate an exclusive slave channel
741
- * @dev: pointer to client device structure
742
- * @name: slave channel name
743
- *
744
- * Returns pointer to appropriate DMA channel on success or NULL.
745
- */
746
-struct dma_chan *dma_request_slave_channel(struct device *dev,
747
- const char *name)
748
-{
749
- struct dma_chan *ch = dma_request_chan(dev, name);
750
- if (IS_ERR(ch))
751
- return NULL;
752
-
753
- return ch;
754
-}
755
-EXPORT_SYMBOL_GPL(dma_request_slave_channel);
756
-
757
-/**
758877 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
759
- * @mask: capabilities that the channel must satisfy
878
+ * @mask: capabilities that the channel must satisfy
760879 *
761880 * Returns pointer to appropriate DMA channel on success or an error pointer.
762881 */
....@@ -767,7 +886,7 @@
767886 if (!mask)
768887 return ERR_PTR(-ENODEV);
769888
770
- chan = __dma_request_channel(mask, NULL, NULL);
889
+ chan = __dma_request_channel(mask, NULL, NULL, NULL);
771890 if (!chan) {
772891 mutex_lock(&dma_list_mutex);
773892 if (list_empty(&dma_device_list))
....@@ -790,6 +909,19 @@
790909 /* drop PRIVATE cap enabled by __dma_request_channel() */
791910 if (--chan->device->privatecnt == 0)
792911 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
912
+
913
+ if (chan->slave) {
914
+ sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
915
+ sysfs_remove_link(&chan->slave->kobj, chan->name);
916
+ kfree(chan->name);
917
+ chan->name = NULL;
918
+ chan->slave = NULL;
919
+ }
920
+
921
+#ifdef CONFIG_DEBUG_FS
922
+ kfree(chan->dbg_client_name);
923
+ chan->dbg_client_name = NULL;
924
+#endif
793925 mutex_unlock(&dma_list_mutex);
794926 }
795927 EXPORT_SYMBOL_GPL(dma_release_channel);
....@@ -834,18 +966,18 @@
834966 EXPORT_SYMBOL(dmaengine_get);
835967
836968 /**
837
- * dmaengine_put - let dma drivers be removed when ref_count == 0
969
+ * dmaengine_put - let DMA drivers be removed when ref_count == 0
838970 */
839971 void dmaengine_put(void)
840972 {
841
- struct dma_device *device;
973
+ struct dma_device *device, *_d;
842974 struct dma_chan *chan;
843975
844976 mutex_lock(&dma_list_mutex);
845977 dmaengine_ref_count--;
846978 BUG_ON(dmaengine_ref_count < 0);
847979 /* drop channel references */
848
- list_for_each_entry(device, &dma_device_list, global_node) {
980
+ list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
849981 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
850982 continue;
851983 list_for_each_entry(chan, &device->channels, device_node)
....@@ -904,15 +1036,111 @@
9041036 return 0;
9051037 }
9061038
1039
+static int __dma_async_device_channel_register(struct dma_device *device,
1040
+ struct dma_chan *chan)
1041
+{
1042
+ int rc;
1043
+
1044
+ chan->local = alloc_percpu(typeof(*chan->local));
1045
+ if (!chan->local)
1046
+ return -ENOMEM;
1047
+ chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
1048
+ if (!chan->dev) {
1049
+ rc = -ENOMEM;
1050
+ goto err_free_local;
1051
+ }
1052
+
1053
+ /*
1054
+ * When the chan_id is a negative value, we are dynamically adding
1055
+ * the channel. Otherwise we are static enumerating.
1056
+ */
1057
+ mutex_lock(&device->chan_mutex);
1058
+ chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
1059
+ mutex_unlock(&device->chan_mutex);
1060
+ if (chan->chan_id < 0) {
1061
+ pr_err("%s: unable to alloc ida for chan: %d\n",
1062
+ __func__, chan->chan_id);
1063
+ rc = chan->chan_id;
1064
+ goto err_free_dev;
1065
+ }
1066
+
1067
+ chan->dev->device.class = &dma_devclass;
1068
+ chan->dev->device.parent = device->dev;
1069
+ chan->dev->chan = chan;
1070
+ chan->dev->dev_id = device->dev_id;
1071
+ dev_set_name(&chan->dev->device, "dma%dchan%d",
1072
+ device->dev_id, chan->chan_id);
1073
+ rc = device_register(&chan->dev->device);
1074
+ if (rc)
1075
+ goto err_out_ida;
1076
+ chan->client_count = 0;
1077
+ device->chancnt++;
1078
+
1079
+ return 0;
1080
+
1081
+ err_out_ida:
1082
+ mutex_lock(&device->chan_mutex);
1083
+ ida_free(&device->chan_ida, chan->chan_id);
1084
+ mutex_unlock(&device->chan_mutex);
1085
+ err_free_dev:
1086
+ kfree(chan->dev);
1087
+ err_free_local:
1088
+ free_percpu(chan->local);
1089
+ chan->local = NULL;
1090
+ return rc;
1091
+}
1092
+
1093
+int dma_async_device_channel_register(struct dma_device *device,
1094
+ struct dma_chan *chan)
1095
+{
1096
+ int rc;
1097
+
1098
+ rc = __dma_async_device_channel_register(device, chan);
1099
+ if (rc < 0)
1100
+ return rc;
1101
+
1102
+ dma_channel_rebalance();
1103
+ return 0;
1104
+}
1105
+EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
1106
+
1107
+static void __dma_async_device_channel_unregister(struct dma_device *device,
1108
+ struct dma_chan *chan)
1109
+{
1110
+ WARN_ONCE(!device->device_release && chan->client_count,
1111
+ "%s called while %d clients hold a reference\n",
1112
+ __func__, chan->client_count);
1113
+ mutex_lock(&dma_list_mutex);
1114
+ device->chancnt--;
1115
+ chan->dev->chan = NULL;
1116
+ mutex_unlock(&dma_list_mutex);
1117
+ mutex_lock(&device->chan_mutex);
1118
+ ida_free(&device->chan_ida, chan->chan_id);
1119
+ mutex_unlock(&device->chan_mutex);
1120
+ device_unregister(&chan->dev->device);
1121
+ free_percpu(chan->local);
1122
+}
1123
+
1124
+void dma_async_device_channel_unregister(struct dma_device *device,
1125
+ struct dma_chan *chan)
1126
+{
1127
+ __dma_async_device_channel_unregister(device, chan);
1128
+ dma_channel_rebalance();
1129
+}
1130
+EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
1131
+
9071132 /**
9081133 * dma_async_device_register - registers DMA devices found
909
- * @device: &dma_device
1134
+ * @device: pointer to &struct dma_device
1135
+ *
1136
+ * After calling this routine the structure should not be freed except in the
1137
+ * device_release() callback which will be called after
1138
+ * dma_async_device_unregister() is called and no further references are taken.
9101139 */
9111140 int dma_async_device_register(struct dma_device *device)
9121141 {
913
- int chancnt = 0, rc;
1142
+ int rc;
9141143 struct dma_chan* chan;
915
- atomic_t *idr_ref;
9161144
9171145 if (!device)
9181146 return -ENODEV;
....@@ -1000,64 +1228,31 @@
10001228 return -EIO;
10011229 }
10021230
1231
+ if (!device->device_release)
1232
+ dev_dbg(device->dev,
1233
+ "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
1234
+
1235
+ kref_init(&device->ref);
1236
+
10031237 /* note: this only matters in the
10041238 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
10051239 */
10061240 if (device_has_all_tx_types(device))
10071241 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
10081242
1009
- idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
1010
- if (!idr_ref)
1011
- return -ENOMEM;
10121243 rc = get_dma_id(device);
1013
- if (rc != 0) {
1014
- kfree(idr_ref);
1244
+ if (rc != 0)
10151245 return rc;
1016
- }
10171246
1018
- atomic_set(idr_ref, 0);
1247
+ mutex_init(&device->chan_mutex);
1248
+ ida_init(&device->chan_ida);
10191249
10201250 /* represent channels in sysfs. Probably want devs too */
10211251 list_for_each_entry(chan, &device->channels, device_node) {
1022
- rc = -ENOMEM;
1023
- chan->local = alloc_percpu(typeof(*chan->local));
1024
- if (chan->local == NULL)
1252
+ rc = __dma_async_device_channel_register(device, chan);
1253
+ if (rc < 0)
10251254 goto err_out;
1026
- chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
1027
- if (chan->dev == NULL) {
1028
- free_percpu(chan->local);
1029
- chan->local = NULL;
1030
- goto err_out;
1031
- }
1032
-
1033
- chan->chan_id = chancnt++;
1034
- chan->dev->device.class = &dma_devclass;
1035
- chan->dev->device.parent = device->dev;
1036
- chan->dev->chan = chan;
1037
- chan->dev->idr_ref = idr_ref;
1038
- chan->dev->dev_id = device->dev_id;
1039
- atomic_inc(idr_ref);
1040
- dev_set_name(&chan->dev->device, "dma%dchan%d",
1041
- device->dev_id, chan->chan_id);
1042
-
1043
- rc = device_register(&chan->dev->device);
1044
- if (rc) {
1045
- free_percpu(chan->local);
1046
- chan->local = NULL;
1047
- kfree(chan->dev);
1048
- atomic_dec(idr_ref);
1049
- goto err_out;
1050
- }
1051
- chan->client_count = 0;
10521255 }
1053
-
1054
- if (!chancnt) {
1055
- dev_err(device->dev, "%s: device has no channels!\n", __func__);
1056
- rc = -ENODEV;
1057
- goto err_out;
1058
- }
1059
-
1060
- device->chancnt = chancnt;
10611256
10621257 mutex_lock(&dma_list_mutex);
10631258 /* take references on public channels */
....@@ -1082,13 +1277,14 @@
10821277 dma_channel_rebalance();
10831278 mutex_unlock(&dma_list_mutex);
10841279
1280
+ dmaengine_debug_register(device);
1281
+
10851282 return 0;
10861283
10871284 err_out:
10881285 /* if we never registered a channel just release the idr */
1089
- if (atomic_read(idr_ref) == 0) {
1286
+ if (!device->chancnt) {
10901287 ida_free(&dma_ida, device->dev_id);
1091
- kfree(idr_ref);
10921288 return rc;
10931289 }
10941290
....@@ -1107,30 +1303,30 @@
11071303
11081304 /**
11091305 * dma_async_device_unregister - unregister a DMA device
1110
- * @device: &dma_device
1306
+ * @device: pointer to &struct dma_device
11111307 *
11121308 * This routine is called by dma driver exit routines, dmaengine holds module
11131309 * references to prevent it being called while channels are in use.
11141310 */
11151311 void dma_async_device_unregister(struct dma_device *device)
11161312 {
1117
- struct dma_chan *chan;
1313
+ struct dma_chan *chan, *n;
1314
+
1315
+ dmaengine_debug_unregister(device);
1316
+
1317
+ list_for_each_entry_safe(chan, n, &device->channels, device_node)
1318
+ __dma_async_device_channel_unregister(device, chan);
11181319
11191320 mutex_lock(&dma_list_mutex);
1120
- list_del_rcu(&device->global_node);
1321
+ /*
1322
+ * setting DMA_PRIVATE ensures the device being torn down will not
1323
+ * be used in the channel_table
1324
+ */
1325
+ dma_cap_set(DMA_PRIVATE, device->cap_mask);
11211326 dma_channel_rebalance();
1327
+ ida_free(&dma_ida, device->dev_id);
1328
+ dma_device_put(device);
11221329 mutex_unlock(&dma_list_mutex);
1123
-
1124
- list_for_each_entry(chan, &device->channels, device_node) {
1125
- WARN_ONCE(chan->client_count,
1126
- "%s called while %d clients hold a reference\n",
1127
- __func__, chan->client_count);
1128
- mutex_lock(&dma_list_mutex);
1129
- chan->dev->chan = NULL;
1130
- mutex_unlock(&dma_list_mutex);
1131
- device_unregister(&chan->dev->device);
1132
- free_percpu(chan->local);
1133
- }
11341330 }
11351331 EXPORT_SYMBOL(dma_async_device_unregister);
11361332
....@@ -1144,7 +1340,7 @@
11441340
11451341 /**
11461342 * dmaenginem_async_device_register - registers DMA devices found
1147
- * @device: &dma_device
1343
+ * @device: pointer to &struct dma_device
11481344 *
11491345 * The operation is managed and will be undone on driver detach.
11501346 */
....@@ -1308,8 +1504,82 @@
13081504 }
13091505 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
13101506
1311
-/* dma_wait_for_async_tx - spin wait for a transaction to complete
1312
- * @tx: in-flight transaction to wait on
1507
+static inline int desc_check_and_set_metadata_mode(
1508
+ struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
1509
+{
1510
+ /* Make sure that the metadata mode is not mixed */
1511
+ if (!desc->desc_metadata_mode) {
1512
+ if (dmaengine_is_metadata_mode_supported(desc->chan, mode))
1513
+ desc->desc_metadata_mode = mode;
1514
+ else
1515
+ return -ENOTSUPP;
1516
+ } else if (desc->desc_metadata_mode != mode) {
1517
+ return -EINVAL;
1518
+ }
1519
+
1520
+ return 0;
1521
+}
1522
+
1523
+int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
1524
+ void *data, size_t len)
1525
+{
1526
+ int ret;
1527
+
1528
+ if (!desc)
1529
+ return -EINVAL;
1530
+
1531
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT);
1532
+ if (ret)
1533
+ return ret;
1534
+
1535
+ if (!desc->metadata_ops || !desc->metadata_ops->attach)
1536
+ return -ENOTSUPP;
1537
+
1538
+ return desc->metadata_ops->attach(desc, data, len);
1539
+}
1540
+EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata);
1541
+
1542
+void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
1543
+ size_t *payload_len, size_t *max_len)
1544
+{
1545
+ int ret;
1546
+
1547
+ if (!desc)
1548
+ return ERR_PTR(-EINVAL);
1549
+
1550
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
1551
+ if (ret)
1552
+ return ERR_PTR(ret);
1553
+
1554
+ if (!desc->metadata_ops || !desc->metadata_ops->get_ptr)
1555
+ return ERR_PTR(-ENOTSUPP);
1556
+
1557
+ return desc->metadata_ops->get_ptr(desc, payload_len, max_len);
1558
+}
1559
+EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr);
1560
+
1561
+int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
1562
+ size_t payload_len)
1563
+{
1564
+ int ret;
1565
+
1566
+ if (!desc)
1567
+ return -EINVAL;
1568
+
1569
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
1570
+ if (ret)
1571
+ return ret;
1572
+
1573
+ if (!desc->metadata_ops || !desc->metadata_ops->set_len)
1574
+ return -ENOTSUPP;
1575
+
1576
+ return desc->metadata_ops->set_len(desc, payload_len);
1577
+}
1578
+EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
1579
+
1580
+/**
1581
+ * dma_wait_for_async_tx - spin wait for a transaction to complete
1582
+ * @tx: in-flight transaction to wait on
13131583 */
13141584 enum dma_status
13151585 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
....@@ -1332,9 +1602,12 @@
13321602 }
13331603 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
13341604
1335
-/* dma_run_dependencies - helper routine for dma drivers to process
1336
- * (start) dependent operations on their target channel
1337
- * @tx: transaction with dependencies
1605
+/**
1606
+ * dma_run_dependencies - process dependent operations on the target channel
1607
+ * @tx: transaction with dependencies
1608
+ *
1609
+ * Helper routine for DMA drivers to process (start) dependent operations
1610
+ * on their target channel.
13381611 */
13391612 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
13401613 {
....@@ -1376,8 +1649,11 @@
13761649
13771650 if (err)
13781651 return err;
1379
- return class_register(&dma_devclass);
1652
+
1653
+ err = class_register(&dma_devclass);
1654
+ if (!err)
1655
+ dmaengine_debugfs_init();
1656
+
1657
+ return err;
13801658 }
13811659 arch_initcall(dma_bus_init);
1382
-
1383
-