hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/kernel/dma/Kconfig
....@@ -1,8 +1,24 @@
1
+# SPDX-License-Identifier: GPL-2.0-only
2
+
3
+config NO_DMA
4
+ bool
15
26 config HAS_DMA
37 bool
48 depends on !NO_DMA
59 default y
10
+
11
+config DMA_OPS
12
+ depends on HAS_DMA
13
+ bool
14
+
15
+#
16
+# IOMMU drivers that can bypass the IOMMU code and optionally use the direct
17
+# mapping fast path should select this option and set the dma_ops_bypass
18
+# flag in struct device where applicable
19
+#
20
+config DMA_OPS_BYPASS
21
+ bool
622
723 config NEED_SG_DMA_LENGTH
824 bool
....@@ -13,7 +29,34 @@
1329 config ARCH_DMA_ADDR_T_64BIT
1430 def_bool 64BIT || PHYS_ADDR_T_64BIT
1531
16
-config HAVE_GENERIC_DMA_COHERENT
32
+config ARCH_HAS_DMA_COHERENCE_H
33
+ bool
34
+
35
+config ARCH_HAS_DMA_SET_MASK
36
+ bool
37
+
38
+#
39
+# Select this option if the architecture needs special handling for
40
+# DMA_ATTR_WRITE_COMBINE. Normally the "uncached" mapping should be what
41
+# people thing of when saying write combine, so very few platforms should
42
+# need to enable this.
43
+#
44
+config ARCH_HAS_DMA_WRITE_COMBINE
45
+ bool
46
+
47
+#
48
+# Select if the architectures provides the arch_dma_mark_clean hook
49
+#
50
+config ARCH_HAS_DMA_MARK_CLEAN
51
+ bool
52
+
53
+config DMA_DECLARE_COHERENT
54
+ bool
55
+
56
+config ARCH_HAS_SETUP_DMA_OPS
57
+ bool
58
+
59
+config ARCH_HAS_TEARDOWN_DMA_OPS
1760 bool
1861
1962 config ARCH_HAS_SYNC_DMA_FOR_DEVICE
....@@ -26,28 +69,159 @@
2669 config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
2770 bool
2871
29
-config DMA_DIRECT_OPS
72
+config ARCH_HAS_DMA_PREP_COHERENT
3073 bool
31
- depends on HAS_DMA
3274
33
-config DMA_NONCOHERENT_OPS
75
+config ARCH_HAS_FORCE_DMA_UNENCRYPTED
3476 bool
35
- depends on HAS_DMA
36
- select DMA_DIRECT_OPS
37
-
38
-config DMA_NONCOHERENT_MMAP
39
- bool
40
- depends on DMA_NONCOHERENT_OPS
41
-
42
-config DMA_NONCOHERENT_CACHE_SYNC
43
- bool
44
- depends on DMA_NONCOHERENT_OPS
4577
4678 config DMA_VIRT_OPS
4779 bool
4880 depends on HAS_DMA
81
+ select DMA_OPS
4982
5083 config SWIOTLB
5184 bool
52
- select DMA_DIRECT_OPS
5385 select NEED_DMA_MAP_STATE
86
+
87
+#
88
+# Should be selected if we can mmap non-coherent mappings to userspace.
89
+# The only thing that is really required is a way to set an uncached bit
90
+# in the pagetables
91
+#
92
+config DMA_NONCOHERENT_MMAP
93
+ default y if !MMU
94
+ bool
95
+
96
+config DMA_COHERENT_POOL
97
+ select GENERIC_ALLOCATOR
98
+ bool
99
+
100
+config DMA_REMAP
101
+ bool
102
+ depends on MMU
103
+ select DMA_NONCOHERENT_MMAP
104
+
105
+config DMA_DIRECT_REMAP
106
+ bool
107
+ select DMA_REMAP
108
+ select DMA_COHERENT_POOL
109
+
110
+config DMA_CMA
111
+ bool "DMA Contiguous Memory Allocator"
112
+ depends on HAVE_DMA_CONTIGUOUS && CMA
113
+ help
114
+ This enables the Contiguous Memory Allocator which allows drivers
115
+ to allocate big physically-contiguous blocks of memory for use with
116
+ hardware components that do not support I/O map nor scatter-gather.
117
+
118
+ You can disable CMA by specifying "cma=0" on the kernel's command
119
+ line.
120
+
121
+ For more information see <kernel/dma/contiguous.c>.
122
+ If unsure, say "n".
123
+
124
+if DMA_CMA
125
+
126
+config DMA_PERNUMA_CMA
127
+ bool "Enable separate DMA Contiguous Memory Area for each NUMA Node"
128
+ default NUMA && ARM64
129
+ help
130
+ Enable this option to get pernuma CMA areas so that devices like
131
+ ARM64 SMMU can get local memory by DMA coherent APIs.
132
+
133
+ You can set the size of pernuma CMA by specifying "cma_pernuma=size"
134
+ on the kernel's command line.
135
+
136
+comment "Default contiguous memory area size:"
137
+
138
+config CMA_SIZE_MBYTES
139
+ int "Size in Mega Bytes"
140
+ depends on !CMA_SIZE_SEL_PERCENTAGE
141
+ default 0 if X86
142
+ default 16
143
+ help
144
+ Defines the size (in MiB) of the default memory area for Contiguous
145
+ Memory Allocator. If the size of 0 is selected, CMA is disabled by
146
+ default, but it can be enabled by passing cma=size[MG] to the kernel.
147
+
148
+
149
+config CMA_SIZE_PERCENTAGE
150
+ int "Percentage of total memory"
151
+ depends on !CMA_SIZE_SEL_MBYTES
152
+ default 0 if X86
153
+ default 10
154
+ help
155
+ Defines the size of the default memory area for Contiguous Memory
156
+ Allocator as a percentage of the total memory in the system.
157
+ If 0 percent is selected, CMA is disabled by default, but it can be
158
+ enabled by passing cma=size[MG] to the kernel.
159
+
160
+choice
161
+ prompt "Selected region size"
162
+ default CMA_SIZE_SEL_MBYTES
163
+
164
+config CMA_SIZE_SEL_MBYTES
165
+ bool "Use mega bytes value only"
166
+
167
+config CMA_SIZE_SEL_PERCENTAGE
168
+ bool "Use percentage value only"
169
+
170
+config CMA_SIZE_SEL_MIN
171
+ bool "Use lower value (minimum)"
172
+
173
+config CMA_SIZE_SEL_MAX
174
+ bool "Use higher value (maximum)"
175
+
176
+endchoice
177
+
178
+config CMA_ALIGNMENT
179
+ int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
180
+ range 2 12
181
+ default 8
182
+ help
183
+ DMA mapping framework by default aligns all buffers to the smallest
184
+ PAGE_SIZE order which is greater than or equal to the requested buffer
185
+ size. This works well for buffers up to a few hundreds kilobytes, but
186
+ for larger buffers it just a memory waste. With this parameter you can
187
+ specify the maximum PAGE_SIZE order for contiguous buffers. Larger
188
+ buffers will be aligned only to this specified order. The order is
189
+ expressed as a power of two multiplied by the PAGE_SIZE.
190
+
191
+ For example, if your system defaults to 4KiB pages, the order value
192
+ of 8 means that the buffers will be aligned up to 1MiB only.
193
+
194
+ If unsure, leave the default value "8".
195
+
196
+endif
197
+
198
+config DMA_API_DEBUG
199
+ bool "Enable debugging of DMA-API usage"
200
+ select NEED_DMA_MAP_STATE
201
+ help
202
+ Enable this option to debug the use of the DMA API by device drivers.
203
+ With this option you will be able to detect common bugs in device
204
+ drivers like double-freeing of DMA mappings or freeing mappings that
205
+ were never allocated.
206
+
207
+ This option causes a performance degradation. Use only if you want to
208
+ debug device drivers and dma interactions.
209
+
210
+ If unsure, say N.
211
+
212
+config DMA_API_DEBUG_SG
213
+ bool "Debug DMA scatter-gather usage"
214
+ default y
215
+ depends on DMA_API_DEBUG
216
+ help
217
+ Perform extra checking that callers of dma_map_sg() have respected the
218
+ appropriate segment length/boundary limits for the given device when
219
+ preparing DMA scatterlists.
220
+
221
+ This is particularly likely to have been overlooked in cases where the
222
+ dma_map_sg() API is used for general bulk mapping of pages rather than
223
+ preparing literal scatter-gather descriptors, where there is a risk of
224
+ unexpected behaviour from DMA API implementations if the scatterlist
225
+ is technically out-of-spec.
226
+
227
+ If unsure, say N.