hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/dma/dmaengine.c
....@@ -1,18 +1,6 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3
- *
4
- * This program is free software; you can redistribute it and/or modify it
5
- * under the terms of the GNU General Public License as published by the Free
6
- * Software Foundation; either version 2 of the License, or (at your option)
7
- * any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful, but WITHOUT
10
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12
- * more details.
13
- *
14
- * The full GNU General Public License is included in this distribution in the
15
- * file called COPYING.
164 */
175
186 /*
....@@ -63,19 +51,105 @@
6351 #include <linux/acpi_dma.h>
6452 #include <linux/of_dma.h>
6553 #include <linux/mempool.h>
54
+#include <linux/numa.h>
55
+
56
+#include "dmaengine.h"
6657
6758 static DEFINE_MUTEX(dma_list_mutex);
6859 static DEFINE_IDA(dma_ida);
6960 static LIST_HEAD(dma_device_list);
7061 static long dmaengine_ref_count;
7162
63
+/* --- debugfs implementation --- */
64
+#ifdef CONFIG_DEBUG_FS
65
+#include <linux/debugfs.h>
66
+
67
+static struct dentry *rootdir;
68
+
69
+static void dmaengine_debug_register(struct dma_device *dma_dev)
70
+{
71
+ dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev),
72
+ rootdir);
73
+ if (IS_ERR(dma_dev->dbg_dev_root))
74
+ dma_dev->dbg_dev_root = NULL;
75
+}
76
+
77
+static void dmaengine_debug_unregister(struct dma_device *dma_dev)
78
+{
79
+ debugfs_remove_recursive(dma_dev->dbg_dev_root);
80
+ dma_dev->dbg_dev_root = NULL;
81
+}
82
+
83
+static void dmaengine_dbg_summary_show(struct seq_file *s,
84
+ struct dma_device *dma_dev)
85
+{
86
+ struct dma_chan *chan;
87
+
88
+ list_for_each_entry(chan, &dma_dev->channels, device_node) {
89
+ if (chan->client_count) {
90
+ seq_printf(s, " %-13s| %s", dma_chan_name(chan),
91
+ chan->dbg_client_name ?: "in-use");
92
+
93
+ if (chan->router)
94
+ seq_printf(s, " (via router: %s)\n",
95
+ dev_name(chan->router->dev));
96
+ else
97
+ seq_puts(s, "\n");
98
+ }
99
+ }
100
+}
101
+
102
+static int dmaengine_summary_show(struct seq_file *s, void *data)
103
+{
104
+ struct dma_device *dma_dev = NULL;
105
+
106
+ mutex_lock(&dma_list_mutex);
107
+ list_for_each_entry(dma_dev, &dma_device_list, global_node) {
108
+ seq_printf(s, "dma%d (%s): number of channels: %u\n",
109
+ dma_dev->dev_id, dev_name(dma_dev->dev),
110
+ dma_dev->chancnt);
111
+
112
+ if (dma_dev->dbg_summary_show)
113
+ dma_dev->dbg_summary_show(s, dma_dev);
114
+ else
115
+ dmaengine_dbg_summary_show(s, dma_dev);
116
+
117
+ if (!list_is_last(&dma_dev->global_node, &dma_device_list))
118
+ seq_puts(s, "\n");
119
+ }
120
+ mutex_unlock(&dma_list_mutex);
121
+
122
+ return 0;
123
+}
124
+DEFINE_SHOW_ATTRIBUTE(dmaengine_summary);
125
+
126
+static void __init dmaengine_debugfs_init(void)
127
+{
128
+ rootdir = debugfs_create_dir("dmaengine", NULL);
129
+
130
+ /* /sys/kernel/debug/dmaengine/summary */
131
+ debugfs_create_file("summary", 0444, rootdir, NULL,
132
+ &dmaengine_summary_fops);
133
+}
134
+#else
135
+static inline void dmaengine_debugfs_init(void) { }
136
+static inline int dmaengine_debug_register(struct dma_device *dma_dev)
137
+{
138
+ return 0;
139
+}
140
+
141
+static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
142
+#endif /* DEBUG_FS */
143
+
72144 /* --- sysfs implementation --- */
73145
146
+#define DMA_SLAVE_NAME "slave"
147
+
74148 /**
75
- * dev_to_dma_chan - convert a device pointer to the its sysfs container object
76
- * @dev - device node
149
+ * dev_to_dma_chan - convert a device pointer to its sysfs container object
150
+ * @dev: device node
77151 *
78
- * Must be called under dma_list_mutex
152
+ * Must be called under dma_list_mutex.
79153 */
80154 static struct dma_chan *dev_to_dma_chan(struct device *dev)
81155 {
....@@ -160,10 +234,6 @@
160234 struct dma_chan_dev *chan_dev;
161235
162236 chan_dev = container_of(dev, typeof(*chan_dev), device);
163
- if (atomic_dec_and_test(chan_dev->idr_ref)) {
164
- ida_free(&dma_ida, chan_dev->dev_id);
165
- kfree(chan_dev->idr_ref);
166
- }
167237 kfree(chan_dev);
168238 }
169239
....@@ -175,146 +245,18 @@
175245
176246 /* --- client and device registration --- */
177247
178
-#define dma_device_satisfies_mask(device, mask) \
179
- __dma_device_satisfies_mask((device), &(mask))
180
-static int
181
-__dma_device_satisfies_mask(struct dma_device *device,
182
- const dma_cap_mask_t *want)
183
-{
184
- dma_cap_mask_t has;
185
-
186
- bitmap_and(has.bits, want->bits, device->cap_mask.bits,
187
- DMA_TX_TYPE_END);
188
- return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
189
-}
190
-
191
-static struct module *dma_chan_to_owner(struct dma_chan *chan)
192
-{
193
- return chan->device->owner;
194
-}
195
-
196
-/**
197
- * balance_ref_count - catch up the channel reference count
198
- * @chan - channel to balance ->client_count versus dmaengine_ref_count
199
- *
200
- * balance_ref_count must be called under dma_list_mutex
201
- */
202
-static void balance_ref_count(struct dma_chan *chan)
203
-{
204
- struct module *owner = dma_chan_to_owner(chan);
205
-
206
- while (chan->client_count < dmaengine_ref_count) {
207
- __module_get(owner);
208
- chan->client_count++;
209
- }
210
-}
211
-
212
-/**
213
- * dma_chan_get - try to grab a dma channel's parent driver module
214
- * @chan - channel to grab
215
- *
216
- * Must be called under dma_list_mutex
217
- */
218
-static int dma_chan_get(struct dma_chan *chan)
219
-{
220
- struct module *owner = dma_chan_to_owner(chan);
221
- int ret;
222
-
223
- /* The channel is already in use, update client count */
224
- if (chan->client_count) {
225
- __module_get(owner);
226
- goto out;
227
- }
228
-
229
- if (!try_module_get(owner))
230
- return -ENODEV;
231
-
232
- /* allocate upon first client reference */
233
- if (chan->device->device_alloc_chan_resources) {
234
- ret = chan->device->device_alloc_chan_resources(chan);
235
- if (ret < 0)
236
- goto err_out;
237
- }
238
-
239
- if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
240
- balance_ref_count(chan);
241
-
242
-out:
243
- chan->client_count++;
244
- return 0;
245
-
246
-err_out:
247
- module_put(owner);
248
- return ret;
249
-}
250
-
251
-/**
252
- * dma_chan_put - drop a reference to a dma channel's parent driver module
253
- * @chan - channel to release
254
- *
255
- * Must be called under dma_list_mutex
256
- */
257
-static void dma_chan_put(struct dma_chan *chan)
258
-{
259
- /* This channel is not in use, bail out */
260
- if (!chan->client_count)
261
- return;
262
-
263
- chan->client_count--;
264
- module_put(dma_chan_to_owner(chan));
265
-
266
- /* This channel is not in use anymore, free it */
267
- if (!chan->client_count && chan->device->device_free_chan_resources) {
268
- /* Make sure all operations have completed */
269
- dmaengine_synchronize(chan);
270
- chan->device->device_free_chan_resources(chan);
271
- }
272
-
273
- /* If the channel is used via a DMA request router, free the mapping */
274
- if (chan->router && chan->router->route_free) {
275
- chan->router->route_free(chan->router->dev, chan->route_data);
276
- chan->router = NULL;
277
- chan->route_data = NULL;
278
- }
279
-}
280
-
281
-enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
282
-{
283
- enum dma_status status;
284
- unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
285
-
286
- dma_async_issue_pending(chan);
287
- do {
288
- status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
289
- if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
290
- dev_err(chan->device->dev, "%s: timeout!\n", __func__);
291
- return DMA_ERROR;
292
- }
293
- if (status != DMA_IN_PROGRESS)
294
- break;
295
- cpu_relax();
296
- } while (1);
297
-
298
- return status;
299
-}
300
-EXPORT_SYMBOL(dma_sync_wait);
301
-
302
-/**
303
- * dma_cap_mask_all - enable iteration over all operation types
304
- */
248
+/* enable iteration over all operation types */
305249 static dma_cap_mask_t dma_cap_mask_all;
306250
307251 /**
308
- * dma_chan_tbl_ent - tracks channel allocations per core/operation
309
- * @chan - associated channel for this entry
252
+ * struct dma_chan_tbl_ent - tracks channel allocations per core/operation
253
+ * @chan: associated channel for this entry
310254 */
311255 struct dma_chan_tbl_ent {
312256 struct dma_chan *chan;
313257 };
314258
315
-/**
316
- * channel_table - percpu lookup table for memory-to-memory offload providers
317
- */
259
+/* percpu lookup table for memory-to-memory offload providers */
318260 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
319261
320262 static int __init dma_channel_table_init(void)
....@@ -341,7 +283,7 @@
341283 }
342284
343285 if (err) {
344
- pr_err("initialization failure\n");
286
+ pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
345287 for_each_dma_cap_mask(cap, dma_cap_mask_all)
346288 free_percpu(channel_table[cap]);
347289 }
....@@ -351,52 +293,28 @@
351293 arch_initcall(dma_channel_table_init);
352294
353295 /**
354
- * dma_find_channel - find a channel to carry out the operation
355
- * @tx_type: transaction type
356
- */
357
-struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
358
-{
359
- return this_cpu_read(channel_table[tx_type]->chan);
360
-}
361
-EXPORT_SYMBOL(dma_find_channel);
362
-
363
-/**
364
- * dma_issue_pending_all - flush all pending operations across all channels
365
- */
366
-void dma_issue_pending_all(void)
367
-{
368
- struct dma_device *device;
369
- struct dma_chan *chan;
370
-
371
- rcu_read_lock();
372
- list_for_each_entry_rcu(device, &dma_device_list, global_node) {
373
- if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
374
- continue;
375
- list_for_each_entry(chan, &device->channels, device_node)
376
- if (chan->client_count)
377
- device->device_issue_pending(chan);
378
- }
379
- rcu_read_unlock();
380
-}
381
-EXPORT_SYMBOL(dma_issue_pending_all);
382
-
383
-/**
384
- * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
296
+ * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU
297
+ * @chan: DMA channel to test
298
+ * @cpu: CPU index which the channel should be close to
299
+ *
300
+ * Returns true if the channel is in the same NUMA-node as the CPU.
385301 */
386302 static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
387303 {
388304 int node = dev_to_node(chan->device->dev);
389
- return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
305
+ return node == NUMA_NO_NODE ||
306
+ cpumask_test_cpu(cpu, cpumask_of_node(node));
390307 }
391308
392309 /**
393
- * min_chan - returns the channel with min count and in the same numa-node as the cpu
394
- * @cap: capability to match
395
- * @cpu: cpu index which the channel should be close to
310
+ * min_chan - finds the channel with min count and in the same NUMA-node as the CPU
311
+ * @cap: capability to match
312
+ * @cpu: CPU index which the channel should be close to
396313 *
397
- * If some channels are close to the given cpu, the one with the lowest
398
- * reference count is returned. Otherwise, cpu is ignored and only the
314
+ * If some channels are close to the given CPU, the one with the lowest
315
+ * reference count is returned. Otherwise, CPU is ignored and only the
399316 * reference count is taken into account.
317
+ *
400318 * Must be called under dma_list_mutex.
401319 */
402320 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
....@@ -434,10 +352,11 @@
434352 /**
435353 * dma_channel_rebalance - redistribute the available channels
436354 *
437
- * Optimize for cpu isolation (each cpu gets a dedicated channel for an
438
- * operation type) in the SMP case, and operation isolation (avoid
439
- * multi-tasking channels) in the non-SMP case. Must be called under
440
- * dma_list_mutex.
355
+ * Optimize for CPU isolation (each CPU gets a dedicated channel for an
356
+ * operation type) in the SMP case, and operation isolation (avoid
357
+ * multi-tasking channels) in the non-SMP case.
358
+ *
359
+ * Must be called under dma_list_mutex.
441360 */
442361 static void dma_channel_rebalance(void)
443362 {
....@@ -470,6 +389,185 @@
470389 }
471390 }
472391
392
+static int dma_device_satisfies_mask(struct dma_device *device,
393
+ const dma_cap_mask_t *want)
394
+{
395
+ dma_cap_mask_t has;
396
+
397
+ bitmap_and(has.bits, want->bits, device->cap_mask.bits,
398
+ DMA_TX_TYPE_END);
399
+ return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
400
+}
401
+
402
+static struct module *dma_chan_to_owner(struct dma_chan *chan)
403
+{
404
+ return chan->device->owner;
405
+}
406
+
407
+/**
408
+ * balance_ref_count - catch up the channel reference count
409
+ * @chan: channel to balance ->client_count versus dmaengine_ref_count
410
+ *
411
+ * Must be called under dma_list_mutex.
412
+ */
413
+static void balance_ref_count(struct dma_chan *chan)
414
+{
415
+ struct module *owner = dma_chan_to_owner(chan);
416
+
417
+ while (chan->client_count < dmaengine_ref_count) {
418
+ __module_get(owner);
419
+ chan->client_count++;
420
+ }
421
+}
422
+
423
+static void dma_device_release(struct kref *ref)
424
+{
425
+ struct dma_device *device = container_of(ref, struct dma_device, ref);
426
+
427
+ list_del_rcu(&device->global_node);
428
+ dma_channel_rebalance();
429
+
430
+ if (device->device_release)
431
+ device->device_release(device);
432
+}
433
+
434
+static void dma_device_put(struct dma_device *device)
435
+{
436
+ lockdep_assert_held(&dma_list_mutex);
437
+ kref_put(&device->ref, dma_device_release);
438
+}
439
+
440
+/**
441
+ * dma_chan_get - try to grab a DMA channel's parent driver module
442
+ * @chan: channel to grab
443
+ *
444
+ * Must be called under dma_list_mutex.
445
+ */
446
+static int dma_chan_get(struct dma_chan *chan)
447
+{
448
+ struct module *owner = dma_chan_to_owner(chan);
449
+ int ret;
450
+
451
+ /* The channel is already in use, update client count */
452
+ if (chan->client_count) {
453
+ __module_get(owner);
454
+ chan->client_count++;
455
+ return 0;
456
+ }
457
+
458
+ if (!try_module_get(owner))
459
+ return -ENODEV;
460
+
461
+ ret = kref_get_unless_zero(&chan->device->ref);
462
+ if (!ret) {
463
+ ret = -ENODEV;
464
+ goto module_put_out;
465
+ }
466
+
467
+ /* allocate upon first client reference */
468
+ if (chan->device->device_alloc_chan_resources) {
469
+ ret = chan->device->device_alloc_chan_resources(chan);
470
+ if (ret < 0)
471
+ goto err_out;
472
+ }
473
+
474
+ chan->client_count++;
475
+
476
+ if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
477
+ balance_ref_count(chan);
478
+
479
+ return 0;
480
+
481
+err_out:
482
+ dma_device_put(chan->device);
483
+module_put_out:
484
+ module_put(owner);
485
+ return ret;
486
+}
487
+
488
+/**
489
+ * dma_chan_put - drop a reference to a DMA channel's parent driver module
490
+ * @chan: channel to release
491
+ *
492
+ * Must be called under dma_list_mutex.
493
+ */
494
+static void dma_chan_put(struct dma_chan *chan)
495
+{
496
+ /* This channel is not in use, bail out */
497
+ if (!chan->client_count)
498
+ return;
499
+
500
+ chan->client_count--;
501
+
502
+ /* This channel is not in use anymore, free it */
503
+ if (!chan->client_count && chan->device->device_free_chan_resources) {
504
+ /* Make sure all operations have completed */
505
+ dmaengine_synchronize(chan);
506
+ chan->device->device_free_chan_resources(chan);
507
+ }
508
+
509
+ /* If the channel is used via a DMA request router, free the mapping */
510
+ if (chan->router && chan->router->route_free) {
511
+ chan->router->route_free(chan->router->dev, chan->route_data);
512
+ chan->router = NULL;
513
+ chan->route_data = NULL;
514
+ }
515
+
516
+ dma_device_put(chan->device);
517
+ module_put(dma_chan_to_owner(chan));
518
+}
519
+
520
+enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
521
+{
522
+ enum dma_status status;
523
+ unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
524
+
525
+ dma_async_issue_pending(chan);
526
+ do {
527
+ status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
528
+ if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
529
+ dev_err(chan->device->dev, "%s: timeout!\n", __func__);
530
+ return DMA_ERROR;
531
+ }
532
+ if (status != DMA_IN_PROGRESS)
533
+ break;
534
+ cpu_relax();
535
+ } while (1);
536
+
537
+ return status;
538
+}
539
+EXPORT_SYMBOL(dma_sync_wait);
540
+
541
+/**
542
+ * dma_find_channel - find a channel to carry out the operation
543
+ * @tx_type: transaction type
544
+ */
545
+struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
546
+{
547
+ return this_cpu_read(channel_table[tx_type]->chan);
548
+}
549
+EXPORT_SYMBOL(dma_find_channel);
550
+
551
+/**
552
+ * dma_issue_pending_all - flush all pending operations across all channels
553
+ */
554
+void dma_issue_pending_all(void)
555
+{
556
+ struct dma_device *device;
557
+ struct dma_chan *chan;
558
+
559
+ rcu_read_lock();
560
+ list_for_each_entry_rcu(device, &dma_device_list, global_node) {
561
+ if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
562
+ continue;
563
+ list_for_each_entry(chan, &device->channels, device_node)
564
+ if (chan->client_count)
565
+ device->device_issue_pending(chan);
566
+ }
567
+ rcu_read_unlock();
568
+}
569
+EXPORT_SYMBOL(dma_issue_pending_all);
570
+
473571 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
474572 {
475573 struct dma_device *device;
....@@ -495,12 +593,24 @@
495593 caps->src_addr_widths = device->src_addr_widths;
496594 caps->dst_addr_widths = device->dst_addr_widths;
497595 caps->directions = device->directions;
596
+ caps->min_burst = device->min_burst;
498597 caps->max_burst = device->max_burst;
598
+ caps->max_sg_burst = device->max_sg_burst;
499599 caps->residue_granularity = device->residue_granularity;
500600 caps->descriptor_reuse = device->descriptor_reuse;
501601 caps->cmd_pause = !!device->device_pause;
502602 caps->cmd_resume = !!device->device_resume;
503603 caps->cmd_terminate = !!device->device_terminate_all;
604
+
605
+ /*
606
+ * DMA engine device might be configured with non-uniformly
607
+ * distributed slave capabilities per device channels. In this
608
+ * case the corresponding driver may provide the device_caps
609
+ * callback to override the generic capabilities with
610
+ * channel-specific ones.
611
+ */
612
+ if (device->device_caps)
613
+ device->device_caps(chan, caps);
504614
505615 return 0;
506616 }
....@@ -512,7 +622,7 @@
512622 {
513623 struct dma_chan *chan;
514624
515
- if (mask && !__dma_device_satisfies_mask(dev, mask)) {
625
+ if (mask && !dma_device_satisfies_mask(dev, mask)) {
516626 dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
517627 return NULL;
518628 }
....@@ -582,7 +692,7 @@
582692
583693 /**
584694 * dma_get_slave_channel - try to get specific channel exclusively
585
- * @chan: target channel
695
+ * @chan: target channel
586696 */
587697 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
588698 {
....@@ -636,14 +746,16 @@
636746
637747 /**
638748 * __dma_request_channel - try to allocate an exclusive channel
639
- * @mask: capabilities that the channel must satisfy
640
- * @fn: optional callback to disposition available channels
641
- * @fn_param: opaque parameter to pass to dma_filter_fn
749
+ * @mask: capabilities that the channel must satisfy
750
+ * @fn: optional callback to disposition available channels
751
+ * @fn_param: opaque parameter to pass to dma_filter_fn()
752
+ * @np: device node to look for DMA channels
642753 *
643754 * Returns pointer to appropriate DMA channel on success or NULL.
644755 */
645756 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
646
- dma_filter_fn fn, void *fn_param)
757
+ dma_filter_fn fn, void *fn_param,
758
+ struct device_node *np)
647759 {
648760 struct dma_device *device, *_d;
649761 struct dma_chan *chan = NULL;
....@@ -651,6 +763,10 @@
651763 /* Find a channel */
652764 mutex_lock(&dma_list_mutex);
653765 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
766
+ /* Finds a DMA controller with matching device node */
767
+ if (np && device->dev->of_node && np != device->dev->of_node)
768
+ continue;
769
+
654770 chan = find_candidate(device, mask, fn, fn_param);
655771 if (!IS_ERR(chan))
656772 break;
....@@ -708,11 +824,11 @@
708824 if (has_acpi_companion(dev) && !chan)
709825 chan = acpi_dma_request_slave_chan_by_name(dev, name);
710826
711
- if (chan) {
712
- /* Valid channel found or requester need to be deferred */
713
- if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
714
- return chan;
715
- }
827
+ if (PTR_ERR(chan) == -EPROBE_DEFER)
828
+ return chan;
829
+
830
+ if (!IS_ERR_OR_NULL(chan))
831
+ goto found;
716832
717833 /* Try to find the channel via the DMA filter map(s) */
718834 mutex_lock(&dma_list_mutex);
....@@ -732,31 +848,35 @@
732848 }
733849 mutex_unlock(&dma_list_mutex);
734850
735
- return chan ? chan : ERR_PTR(-EPROBE_DEFER);
851
+ if (IS_ERR(chan))
852
+ return chan;
853
+ if (!chan)
854
+ return ERR_PTR(-EPROBE_DEFER);
855
+
856
+found:
857
+#ifdef CONFIG_DEBUG_FS
858
+ chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev),
859
+ name);
860
+#endif
861
+
862
+ chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
863
+ if (!chan->name)
864
+ return chan;
865
+ chan->slave = dev;
866
+
867
+ if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
868
+ DMA_SLAVE_NAME))
869
+ dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
870
+ if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
871
+ dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name);
872
+
873
+ return chan;
736874 }
737875 EXPORT_SYMBOL_GPL(dma_request_chan);
738876
739877 /**
740
- * dma_request_slave_channel - try to allocate an exclusive slave channel
741
- * @dev: pointer to client device structure
742
- * @name: slave channel name
743
- *
744
- * Returns pointer to appropriate DMA channel on success or NULL.
745
- */
746
-struct dma_chan *dma_request_slave_channel(struct device *dev,
747
- const char *name)
748
-{
749
- struct dma_chan *ch = dma_request_chan(dev, name);
750
- if (IS_ERR(ch))
751
- return NULL;
752
-
753
- return ch;
754
-}
755
-EXPORT_SYMBOL_GPL(dma_request_slave_channel);
756
-
757
-/**
758878 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
759
- * @mask: capabilities that the channel must satisfy
879
+ * @mask: capabilities that the channel must satisfy
760880 *
761881 * Returns pointer to appropriate DMA channel on success or an error pointer.
762882 */
....@@ -767,7 +887,7 @@
767887 if (!mask)
768888 return ERR_PTR(-ENODEV);
769889
770
- chan = __dma_request_channel(mask, NULL, NULL);
890
+ chan = __dma_request_channel(mask, NULL, NULL, NULL);
771891 if (!chan) {
772892 mutex_lock(&dma_list_mutex);
773893 if (list_empty(&dma_device_list))
....@@ -790,6 +910,19 @@
790910 /* drop PRIVATE cap enabled by __dma_request_channel() */
791911 if (--chan->device->privatecnt == 0)
792912 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
913
+
914
+ if (chan->slave) {
915
+ sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
916
+ sysfs_remove_link(&chan->slave->kobj, chan->name);
917
+ kfree(chan->name);
918
+ chan->name = NULL;
919
+ chan->slave = NULL;
920
+ }
921
+
922
+#ifdef CONFIG_DEBUG_FS
923
+ kfree(chan->dbg_client_name);
924
+ chan->dbg_client_name = NULL;
925
+#endif
793926 mutex_unlock(&dma_list_mutex);
794927 }
795928 EXPORT_SYMBOL_GPL(dma_release_channel);
....@@ -834,18 +967,18 @@
834967 EXPORT_SYMBOL(dmaengine_get);
835968
836969 /**
837
- * dmaengine_put - let dma drivers be removed when ref_count == 0
970
+ * dmaengine_put - let DMA drivers be removed when ref_count == 0
838971 */
839972 void dmaengine_put(void)
840973 {
841
- struct dma_device *device;
974
+ struct dma_device *device, *_d;
842975 struct dma_chan *chan;
843976
844977 mutex_lock(&dma_list_mutex);
845978 dmaengine_ref_count--;
846979 BUG_ON(dmaengine_ref_count < 0);
847980 /* drop channel references */
848
- list_for_each_entry(device, &dma_device_list, global_node) {
981
+ list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
849982 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
850983 continue;
851984 list_for_each_entry(chan, &device->channels, device_node)
....@@ -904,15 +1037,111 @@
9041037 return 0;
9051038 }
9061039
1040
+static int __dma_async_device_channel_register(struct dma_device *device,
1041
+ struct dma_chan *chan)
1042
+{
1043
+ int rc;
1044
+
1045
+ chan->local = alloc_percpu(typeof(*chan->local));
1046
+ if (!chan->local)
1047
+ return -ENOMEM;
1048
+ chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
1049
+ if (!chan->dev) {
1050
+ rc = -ENOMEM;
1051
+ goto err_free_local;
1052
+ }
1053
+
1054
+ /*
1055
+ * When the chan_id is a negative value, we are dynamically adding
1056
+ * the channel. Otherwise we are static enumerating.
1057
+ */
1058
+ mutex_lock(&device->chan_mutex);
1059
+ chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
1060
+ mutex_unlock(&device->chan_mutex);
1061
+ if (chan->chan_id < 0) {
1062
+ pr_err("%s: unable to alloc ida for chan: %d\n",
1063
+ __func__, chan->chan_id);
1064
+ rc = chan->chan_id;
1065
+ goto err_free_dev;
1066
+ }
1067
+
1068
+ chan->dev->device.class = &dma_devclass;
1069
+ chan->dev->device.parent = device->dev;
1070
+ chan->dev->chan = chan;
1071
+ chan->dev->dev_id = device->dev_id;
1072
+ dev_set_name(&chan->dev->device, "dma%dchan%d",
1073
+ device->dev_id, chan->chan_id);
1074
+ rc = device_register(&chan->dev->device);
1075
+ if (rc)
1076
+ goto err_out_ida;
1077
+ chan->client_count = 0;
1078
+ device->chancnt++;
1079
+
1080
+ return 0;
1081
+
1082
+ err_out_ida:
1083
+ mutex_lock(&device->chan_mutex);
1084
+ ida_free(&device->chan_ida, chan->chan_id);
1085
+ mutex_unlock(&device->chan_mutex);
1086
+ err_free_dev:
1087
+ kfree(chan->dev);
1088
+ err_free_local:
1089
+ free_percpu(chan->local);
1090
+ chan->local = NULL;
1091
+ return rc;
1092
+}
1093
+
1094
+int dma_async_device_channel_register(struct dma_device *device,
1095
+ struct dma_chan *chan)
1096
+{
1097
+ int rc;
1098
+
1099
+ rc = __dma_async_device_channel_register(device, chan);
1100
+ if (rc < 0)
1101
+ return rc;
1102
+
1103
+ dma_channel_rebalance();
1104
+ return 0;
1105
+}
1106
+EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
1107
+
1108
+static void __dma_async_device_channel_unregister(struct dma_device *device,
1109
+ struct dma_chan *chan)
1110
+{
1111
+ WARN_ONCE(!device->device_release && chan->client_count,
1112
+ "%s called while %d clients hold a reference\n",
1113
+ __func__, chan->client_count);
1114
+ mutex_lock(&dma_list_mutex);
1115
+ device->chancnt--;
1116
+ chan->dev->chan = NULL;
1117
+ mutex_unlock(&dma_list_mutex);
1118
+ mutex_lock(&device->chan_mutex);
1119
+ ida_free(&device->chan_ida, chan->chan_id);
1120
+ mutex_unlock(&device->chan_mutex);
1121
+ device_unregister(&chan->dev->device);
1122
+ free_percpu(chan->local);
1123
+}
1124
+
1125
+void dma_async_device_channel_unregister(struct dma_device *device,
1126
+ struct dma_chan *chan)
1127
+{
1128
+ __dma_async_device_channel_unregister(device, chan);
1129
+ dma_channel_rebalance();
1130
+}
1131
+EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
1132
+
9071133 /**
9081134 * dma_async_device_register - registers DMA devices found
909
- * @device: &dma_device
1135
+ * @device: pointer to &struct dma_device
1136
+ *
1137
+ * After calling this routine the structure should not be freed except in the
1138
+ * device_release() callback which will be called after
1139
+ * dma_async_device_unregister() is called and no further references are taken.
9101140 */
9111141 int dma_async_device_register(struct dma_device *device)
9121142 {
913
- int chancnt = 0, rc;
1143
+ int rc;
9141144 struct dma_chan* chan;
915
- atomic_t *idr_ref;
9161145
9171146 if (!device)
9181147 return -ENODEV;
....@@ -1000,64 +1229,31 @@
10001229 return -EIO;
10011230 }
10021231
1232
+ if (!device->device_release)
1233
+ dev_dbg(device->dev,
1234
+ "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
1235
+
1236
+ kref_init(&device->ref);
1237
+
10031238 /* note: this only matters in the
10041239 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
10051240 */
10061241 if (device_has_all_tx_types(device))
10071242 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
10081243
1009
- idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
1010
- if (!idr_ref)
1011
- return -ENOMEM;
10121244 rc = get_dma_id(device);
1013
- if (rc != 0) {
1014
- kfree(idr_ref);
1245
+ if (rc != 0)
10151246 return rc;
1016
- }
10171247
1018
- atomic_set(idr_ref, 0);
1248
+ mutex_init(&device->chan_mutex);
1249
+ ida_init(&device->chan_ida);
10191250
10201251 /* represent channels in sysfs. Probably want devs too */
10211252 list_for_each_entry(chan, &device->channels, device_node) {
1022
- rc = -ENOMEM;
1023
- chan->local = alloc_percpu(typeof(*chan->local));
1024
- if (chan->local == NULL)
1253
+ rc = __dma_async_device_channel_register(device, chan);
1254
+ if (rc < 0)
10251255 goto err_out;
1026
- chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
1027
- if (chan->dev == NULL) {
1028
- free_percpu(chan->local);
1029
- chan->local = NULL;
1030
- goto err_out;
1031
- }
1032
-
1033
- chan->chan_id = chancnt++;
1034
- chan->dev->device.class = &dma_devclass;
1035
- chan->dev->device.parent = device->dev;
1036
- chan->dev->chan = chan;
1037
- chan->dev->idr_ref = idr_ref;
1038
- chan->dev->dev_id = device->dev_id;
1039
- atomic_inc(idr_ref);
1040
- dev_set_name(&chan->dev->device, "dma%dchan%d",
1041
- device->dev_id, chan->chan_id);
1042
-
1043
- rc = device_register(&chan->dev->device);
1044
- if (rc) {
1045
- free_percpu(chan->local);
1046
- chan->local = NULL;
1047
- kfree(chan->dev);
1048
- atomic_dec(idr_ref);
1049
- goto err_out;
1050
- }
1051
- chan->client_count = 0;
10521256 }
1053
-
1054
- if (!chancnt) {
1055
- dev_err(device->dev, "%s: device has no channels!\n", __func__);
1056
- rc = -ENODEV;
1057
- goto err_out;
1058
- }
1059
-
1060
- device->chancnt = chancnt;
10611257
10621258 mutex_lock(&dma_list_mutex);
10631259 /* take references on public channels */
....@@ -1082,13 +1278,14 @@
10821278 dma_channel_rebalance();
10831279 mutex_unlock(&dma_list_mutex);
10841280
1281
+ dmaengine_debug_register(device);
1282
+
10851283 return 0;
10861284
10871285 err_out:
10881286 /* if we never registered a channel just release the idr */
1089
- if (atomic_read(idr_ref) == 0) {
1287
+ if (!device->chancnt) {
10901288 ida_free(&dma_ida, device->dev_id);
1091
- kfree(idr_ref);
10921289 return rc;
10931290 }
10941291
....@@ -1107,30 +1304,30 @@
11071304
11081305 /**
11091306 * dma_async_device_unregister - unregister a DMA device
1110
- * @device: &dma_device
1307
+ * @device: pointer to &struct dma_device
11111308 *
11121309 * This routine is called by dma driver exit routines, dmaengine holds module
11131310 * references to prevent it being called while channels are in use.
11141311 */
11151312 void dma_async_device_unregister(struct dma_device *device)
11161313 {
1117
- struct dma_chan *chan;
1314
+ struct dma_chan *chan, *n;
1315
+
1316
+ dmaengine_debug_unregister(device);
1317
+
1318
+ list_for_each_entry_safe(chan, n, &device->channels, device_node)
1319
+ __dma_async_device_channel_unregister(device, chan);
11181320
11191321 mutex_lock(&dma_list_mutex);
1120
- list_del_rcu(&device->global_node);
1322
+ /*
1323
+ * setting DMA_PRIVATE ensures the device being torn down will not
1324
+ * be used in the channel_table
1325
+ */
1326
+ dma_cap_set(DMA_PRIVATE, device->cap_mask);
11211327 dma_channel_rebalance();
1328
+ ida_free(&dma_ida, device->dev_id);
1329
+ dma_device_put(device);
11221330 mutex_unlock(&dma_list_mutex);
1123
-
1124
- list_for_each_entry(chan, &device->channels, device_node) {
1125
- WARN_ONCE(chan->client_count,
1126
- "%s called while %d clients hold a reference\n",
1127
- __func__, chan->client_count);
1128
- mutex_lock(&dma_list_mutex);
1129
- chan->dev->chan = NULL;
1130
- mutex_unlock(&dma_list_mutex);
1131
- device_unregister(&chan->dev->device);
1132
- free_percpu(chan->local);
1133
- }
11341331 }
11351332 EXPORT_SYMBOL(dma_async_device_unregister);
11361333
....@@ -1144,7 +1341,7 @@
11441341
11451342 /**
11461343 * dmaenginem_async_device_register - registers DMA devices found
1147
- * @device: &dma_device
1344
+ * @device: pointer to &struct dma_device
11481345 *
11491346 * The operation is managed and will be undone on driver detach.
11501347 */
....@@ -1308,8 +1505,82 @@
13081505 }
13091506 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
13101507
1311
-/* dma_wait_for_async_tx - spin wait for a transaction to complete
1312
- * @tx: in-flight transaction to wait on
1508
+static inline int desc_check_and_set_metadata_mode(
1509
+ struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
1510
+{
1511
+ /* Make sure that the metadata mode is not mixed */
1512
+ if (!desc->desc_metadata_mode) {
1513
+ if (dmaengine_is_metadata_mode_supported(desc->chan, mode))
1514
+ desc->desc_metadata_mode = mode;
1515
+ else
1516
+ return -ENOTSUPP;
1517
+ } else if (desc->desc_metadata_mode != mode) {
1518
+ return -EINVAL;
1519
+ }
1520
+
1521
+ return 0;
1522
+}
1523
+
1524
+int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
1525
+ void *data, size_t len)
1526
+{
1527
+ int ret;
1528
+
1529
+ if (!desc)
1530
+ return -EINVAL;
1531
+
1532
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT);
1533
+ if (ret)
1534
+ return ret;
1535
+
1536
+ if (!desc->metadata_ops || !desc->metadata_ops->attach)
1537
+ return -ENOTSUPP;
1538
+
1539
+ return desc->metadata_ops->attach(desc, data, len);
1540
+}
1541
+EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata);
1542
+
1543
+void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
1544
+ size_t *payload_len, size_t *max_len)
1545
+{
1546
+ int ret;
1547
+
1548
+ if (!desc)
1549
+ return ERR_PTR(-EINVAL);
1550
+
1551
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
1552
+ if (ret)
1553
+ return ERR_PTR(ret);
1554
+
1555
+ if (!desc->metadata_ops || !desc->metadata_ops->get_ptr)
1556
+ return ERR_PTR(-ENOTSUPP);
1557
+
1558
+ return desc->metadata_ops->get_ptr(desc, payload_len, max_len);
1559
+}
1560
+EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr);
1561
+
1562
+int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
1563
+ size_t payload_len)
1564
+{
1565
+ int ret;
1566
+
1567
+ if (!desc)
1568
+ return -EINVAL;
1569
+
1570
+ ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
1571
+ if (ret)
1572
+ return ret;
1573
+
1574
+ if (!desc->metadata_ops || !desc->metadata_ops->set_len)
1575
+ return -ENOTSUPP;
1576
+
1577
+ return desc->metadata_ops->set_len(desc, payload_len);
1578
+}
1579
+EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
1580
+
1581
+/**
1582
+ * dma_wait_for_async_tx - spin wait for a transaction to complete
1583
+ * @tx: in-flight transaction to wait on
13131584 */
13141585 enum dma_status
13151586 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
....@@ -1332,9 +1603,12 @@
13321603 }
13331604 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
13341605
1335
-/* dma_run_dependencies - helper routine for dma drivers to process
1336
- * (start) dependent operations on their target channel
1337
- * @tx: transaction with dependencies
1606
+/**
1607
+ * dma_run_dependencies - process dependent operations on the target channel
1608
+ * @tx: transaction with dependencies
1609
+ *
1610
+ * Helper routine for DMA drivers to process (start) dependent operations
1611
+ * on their target channel.
13381612 */
13391613 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
13401614 {
....@@ -1376,8 +1650,11 @@
13761650
13771651 if (err)
13781652 return err;
1379
- return class_register(&dma_devclass);
1653
+
1654
+ err = class_register(&dma_devclass);
1655
+ if (!err)
1656
+ dmaengine_debugfs_init();
1657
+
1658
+ return err;
13801659 }
13811660 arch_initcall(dma_bus_init);
1382
-
1383
-