hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
....@@ -22,63 +22,213 @@
2222 * Authors: Christian König
2323 */
2424
25
-#include <drm/drmP.h>
25
+#include <linux/dma-mapping.h>
2626 #include "amdgpu.h"
27
+#include "amdgpu_vm.h"
28
+#include "amdgpu_atomfirmware.h"
29
+#include "atom.h"
2730
28
-struct amdgpu_vram_mgr {
29
- struct drm_mm mm;
30
- spinlock_t lock;
31
- atomic64_t usage;
32
- atomic64_t vis_usage;
31
+static inline struct amdgpu_vram_mgr *to_vram_mgr(struct ttm_resource_manager *man)
32
+{
33
+ return container_of(man, struct amdgpu_vram_mgr, manager);
34
+}
35
+
36
+static inline struct amdgpu_device *to_amdgpu_device(struct amdgpu_vram_mgr *mgr)
37
+{
38
+ return container_of(mgr, struct amdgpu_device, mman.vram_mgr);
39
+}
40
+
41
+/**
42
+ * DOC: mem_info_vram_total
43
+ *
44
+ * The amdgpu driver provides a sysfs API for reporting current total VRAM
45
+ * available on the device
46
+ * The file mem_info_vram_total is used for this and returns the total
47
+ * amount of VRAM in bytes
48
+ */
49
+static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
50
+ struct device_attribute *attr, char *buf)
51
+{
52
+ struct drm_device *ddev = dev_get_drvdata(dev);
53
+ struct amdgpu_device *adev = drm_to_adev(ddev);
54
+
55
+ return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size);
56
+}
57
+
58
+/**
59
+ * DOC: mem_info_vis_vram_total
60
+ *
61
+ * The amdgpu driver provides a sysfs API for reporting current total
62
+ * visible VRAM available on the device
63
+ * The file mem_info_vis_vram_total is used for this and returns the total
64
+ * amount of visible VRAM in bytes
65
+ */
66
+static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
67
+ struct device_attribute *attr, char *buf)
68
+{
69
+ struct drm_device *ddev = dev_get_drvdata(dev);
70
+ struct amdgpu_device *adev = drm_to_adev(ddev);
71
+
72
+ return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size);
73
+}
74
+
75
+/**
76
+ * DOC: mem_info_vram_used
77
+ *
78
+ * The amdgpu driver provides a sysfs API for reporting current total VRAM
79
+ * available on the device
80
+ * The file mem_info_vram_used is used for this and returns the total
81
+ * amount of currently used VRAM in bytes
82
+ */
83
+static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
84
+ struct device_attribute *attr, char *buf)
85
+{
86
+ struct drm_device *ddev = dev_get_drvdata(dev);
87
+ struct amdgpu_device *adev = drm_to_adev(ddev);
88
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
89
+
90
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
91
+ amdgpu_vram_mgr_usage(man));
92
+}
93
+
94
+/**
95
+ * DOC: mem_info_vis_vram_used
96
+ *
97
+ * The amdgpu driver provides a sysfs API for reporting current total of
98
+ * used visible VRAM
99
+ * The file mem_info_vis_vram_used is used for this and returns the total
100
+ * amount of currently used visible VRAM in bytes
101
+ */
102
+static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
103
+ struct device_attribute *attr, char *buf)
104
+{
105
+ struct drm_device *ddev = dev_get_drvdata(dev);
106
+ struct amdgpu_device *adev = drm_to_adev(ddev);
107
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
108
+
109
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
110
+ amdgpu_vram_mgr_vis_usage(man));
111
+}
112
+
113
+static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
114
+ struct device_attribute *attr,
115
+ char *buf)
116
+{
117
+ struct drm_device *ddev = dev_get_drvdata(dev);
118
+ struct amdgpu_device *adev = drm_to_adev(ddev);
119
+
120
+ switch (adev->gmc.vram_vendor) {
121
+ case SAMSUNG:
122
+ return snprintf(buf, PAGE_SIZE, "samsung\n");
123
+ case INFINEON:
124
+ return snprintf(buf, PAGE_SIZE, "infineon\n");
125
+ case ELPIDA:
126
+ return snprintf(buf, PAGE_SIZE, "elpida\n");
127
+ case ETRON:
128
+ return snprintf(buf, PAGE_SIZE, "etron\n");
129
+ case NANYA:
130
+ return snprintf(buf, PAGE_SIZE, "nanya\n");
131
+ case HYNIX:
132
+ return snprintf(buf, PAGE_SIZE, "hynix\n");
133
+ case MOSEL:
134
+ return snprintf(buf, PAGE_SIZE, "mosel\n");
135
+ case WINBOND:
136
+ return snprintf(buf, PAGE_SIZE, "winbond\n");
137
+ case ESMT:
138
+ return snprintf(buf, PAGE_SIZE, "esmt\n");
139
+ case MICRON:
140
+ return snprintf(buf, PAGE_SIZE, "micron\n");
141
+ default:
142
+ return snprintf(buf, PAGE_SIZE, "unknown\n");
143
+ }
144
+}
145
+
146
+static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
147
+ amdgpu_mem_info_vram_total_show, NULL);
148
+static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
149
+ amdgpu_mem_info_vis_vram_total_show,NULL);
150
+static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
151
+ amdgpu_mem_info_vram_used_show, NULL);
152
+static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
153
+ amdgpu_mem_info_vis_vram_used_show, NULL);
154
+static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
155
+ amdgpu_mem_info_vram_vendor, NULL);
156
+
157
+static const struct attribute *amdgpu_vram_mgr_attributes[] = {
158
+ &dev_attr_mem_info_vram_total.attr,
159
+ &dev_attr_mem_info_vis_vram_total.attr,
160
+ &dev_attr_mem_info_vram_used.attr,
161
+ &dev_attr_mem_info_vis_vram_used.attr,
162
+ &dev_attr_mem_info_vram_vendor.attr,
163
+ NULL
33164 };
165
+
166
+static const struct ttm_resource_manager_func amdgpu_vram_mgr_func;
34167
35168 /**
36169 * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
37170 *
38
- * @man: TTM memory type manager
39
- * @p_size: maximum size of VRAM
171
+ * @adev: amdgpu_device pointer
40172 *
41173 * Allocate and initialize the VRAM manager.
42174 */
43
-static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
44
- unsigned long p_size)
175
+int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
45176 {
46
- struct amdgpu_vram_mgr *mgr;
177
+ struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
178
+ struct ttm_resource_manager *man = &mgr->manager;
179
+ int ret;
47180
48
- mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
49
- if (!mgr)
50
- return -ENOMEM;
181
+ ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT);
51182
52
- drm_mm_init(&mgr->mm, 0, p_size);
183
+ man->func = &amdgpu_vram_mgr_func;
184
+
185
+ drm_mm_init(&mgr->mm, 0, man->size);
53186 spin_lock_init(&mgr->lock);
54
- man->priv = mgr;
187
+
188
+ /* Add the two VRAM-related sysfs files */
189
+ ret = sysfs_create_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
190
+ if (ret)
191
+ DRM_ERROR("Failed to register sysfs\n");
192
+
193
+ ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
194
+ ttm_resource_manager_set_used(man, true);
55195 return 0;
56196 }
57197
58198 /**
59199 * amdgpu_vram_mgr_fini - free and destroy VRAM manager
60200 *
61
- * @man: TTM memory type manager
201
+ * @adev: amdgpu_device pointer
62202 *
63203 * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
64204 * allocated inside it.
65205 */
66
-static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
206
+void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
67207 {
68
- struct amdgpu_vram_mgr *mgr = man->priv;
208
+ struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
209
+ struct ttm_resource_manager *man = &mgr->manager;
210
+ int ret;
211
+
212
+ ttm_resource_manager_set_used(man, false);
213
+
214
+ ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man);
215
+ if (ret)
216
+ return;
69217
70218 spin_lock(&mgr->lock);
71219 drm_mm_takedown(&mgr->mm);
72220 spin_unlock(&mgr->lock);
73
- kfree(mgr);
74
- man->priv = NULL;
75
- return 0;
221
+
222
+ sysfs_remove_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
223
+
224
+ ttm_resource_manager_cleanup(man);
225
+ ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
76226 }
77227
78228 /**
79229 * amdgpu_vram_mgr_vis_size - Calculate visible node size
80230 *
81
- * @adev: amdgpu device structure
231
+ * @adev: amdgpu_device pointer
82232 * @node: MM node structure
83233 *
84234 * Calculate how many bytes of the MM node are inside visible VRAM
....@@ -107,7 +257,7 @@
107257 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
108258 {
109259 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
110
- struct ttm_mem_reg *mem = &bo->tbo.mem;
260
+ struct ttm_resource *mem = &bo->tbo.mem;
111261 struct drm_mm_node *nodes = mem->mm_node;
112262 unsigned pages = mem->num_pages;
113263 u64 usage;
....@@ -125,6 +275,28 @@
125275 }
126276
127277 /**
278
+ * amdgpu_vram_mgr_virt_start - update virtual start address
279
+ *
280
+ * @mem: ttm_resource to update
281
+ * @node: just allocated node
282
+ *
283
+ * Calculate a virtual BO start address to easily check if everything is CPU
284
+ * accessible.
285
+ */
286
+static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
287
+ struct drm_mm_node *node)
288
+{
289
+ unsigned long start;
290
+
291
+ start = node->start + node->size;
292
+ if (start > mem->num_pages)
293
+ start -= mem->num_pages;
294
+ else
295
+ start = 0;
296
+ mem->start = max(mem->start, start);
297
+}
298
+
299
+/**
128300 * amdgpu_vram_mgr_new - allocate new ranges
129301 *
130302 * @man: TTM memory type manager
....@@ -134,18 +306,18 @@
134306 *
135307 * Allocate VRAM for the given BO.
136308 */
137
-static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
309
+static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
138310 struct ttm_buffer_object *tbo,
139311 const struct ttm_place *place,
140
- struct ttm_mem_reg *mem)
312
+ struct ttm_resource *mem)
141313 {
142
- struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
143
- struct amdgpu_vram_mgr *mgr = man->priv;
314
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
315
+ struct amdgpu_device *adev = to_amdgpu_device(mgr);
144316 struct drm_mm *mm = &mgr->mm;
145317 struct drm_mm_node *nodes;
146318 enum drm_mm_insert_mode mode;
147319 unsigned long lpfn, num_nodes, pages_per_node, pages_left;
148
- uint64_t usage = 0, vis_usage = 0;
320
+ uint64_t vis_usage = 0, mem_bytes, max_bytes;
149321 unsigned i;
150322 int r;
151323
....@@ -153,20 +325,37 @@
153325 if (!lpfn)
154326 lpfn = man->size;
155327
156
- if (place->flags & TTM_PL_FLAG_CONTIGUOUS ||
157
- amdgpu_vram_page_split == -1) {
328
+ max_bytes = adev->gmc.mc_vram_size;
329
+ if (tbo->type != ttm_bo_type_kernel)
330
+ max_bytes -= AMDGPU_VM_RESERVED_VRAM;
331
+
332
+ /* bail out quickly if there's likely not enough VRAM for this BO */
333
+ mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
334
+ if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
335
+ atomic64_sub(mem_bytes, &mgr->usage);
336
+ return -ENOSPC;
337
+ }
338
+
339
+ if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
158340 pages_per_node = ~0ul;
159341 num_nodes = 1;
160342 } else {
161
- pages_per_node = max((uint32_t)amdgpu_vram_page_split,
162
- mem->page_alignment);
343
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
344
+ pages_per_node = HPAGE_PMD_NR;
345
+#else
346
+ /* default to 2MB */
347
+ pages_per_node = (2UL << (20UL - PAGE_SHIFT));
348
+#endif
349
+ pages_per_node = max((uint32_t)pages_per_node, mem->page_alignment);
163350 num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
164351 }
165352
166
- nodes = kvmalloc_array(num_nodes, sizeof(*nodes),
353
+ nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes),
167354 GFP_KERNEL | __GFP_ZERO);
168
- if (!nodes)
355
+ if (!nodes) {
356
+ atomic64_sub(mem_bytes, &mgr->usage);
169357 return -ENOMEM;
358
+ }
170359
171360 mode = DRM_MM_INSERT_BEST;
172361 if (place->flags & TTM_PL_FLAG_TOPDOWN)
....@@ -176,10 +365,24 @@
176365 pages_left = mem->num_pages;
177366
178367 spin_lock(&mgr->lock);
179
- for (i = 0; i < num_nodes; ++i) {
368
+ for (i = 0; pages_left >= pages_per_node; ++i) {
369
+ unsigned long pages = rounddown_pow_of_two(pages_left);
370
+
371
+ r = drm_mm_insert_node_in_range(mm, &nodes[i], pages,
372
+ pages_per_node, 0,
373
+ place->fpfn, lpfn,
374
+ mode);
375
+ if (unlikely(r))
376
+ break;
377
+
378
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
379
+ amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
380
+ pages_left -= pages;
381
+ }
382
+
383
+ for (; pages_left; ++i) {
180384 unsigned long pages = min(pages_left, pages_per_node);
181385 uint32_t alignment = mem->page_alignment;
182
- unsigned long start;
183386
184387 if (pages == pages_per_node)
185388 alignment = pages_per_node;
....@@ -191,23 +394,12 @@
191394 if (unlikely(r))
192395 goto error;
193396
194
- usage += nodes[i].size << PAGE_SHIFT;
195397 vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
196
-
197
- /* Calculate a virtual BO start address to easily check if
198
- * everything is CPU accessible.
199
- */
200
- start = nodes[i].start + nodes[i].size;
201
- if (start > mem->num_pages)
202
- start -= mem->num_pages;
203
- else
204
- start = 0;
205
- mem->start = max(mem->start, start);
398
+ amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
206399 pages_left -= pages;
207400 }
208401 spin_unlock(&mgr->lock);
209402
210
- atomic64_add(usage, &mgr->usage);
211403 atomic64_add(vis_usage, &mgr->vis_usage);
212404
213405 mem->mm_node = nodes;
....@@ -218,26 +410,25 @@
218410 while (i--)
219411 drm_mm_remove_node(&nodes[i]);
220412 spin_unlock(&mgr->lock);
413
+ atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
221414
222415 kvfree(nodes);
223
- return r == -ENOSPC ? 0 : r;
416
+ return r;
224417 }
225418
226419 /**
227420 * amdgpu_vram_mgr_del - free ranges
228421 *
229422 * @man: TTM memory type manager
230
- * @tbo: TTM BO we need this range for
231
- * @place: placement flags and restrictions
232423 * @mem: TTM memory object
233424 *
234425 * Free the allocated VRAM again.
235426 */
236
-static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
237
- struct ttm_mem_reg *mem)
427
+static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
428
+ struct ttm_resource *mem)
238429 {
239
- struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
240
- struct amdgpu_vram_mgr *mgr = man->priv;
430
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
431
+ struct amdgpu_device *adev = to_amdgpu_device(mgr);
241432 struct drm_mm_node *nodes = mem->mm_node;
242433 uint64_t usage = 0, vis_usage = 0;
243434 unsigned pages = mem->num_pages;
....@@ -263,15 +454,113 @@
263454 }
264455
265456 /**
457
+ * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
458
+ *
459
+ * @adev: amdgpu device pointer
460
+ * @mem: TTM memory object
461
+ * @dev: the other device
462
+ * @dir: dma direction
463
+ * @sgt: resulting sg table
464
+ *
465
+ * Allocate and fill a sg table from a VRAM allocation.
466
+ */
467
+int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
468
+ struct ttm_resource *mem,
469
+ struct device *dev,
470
+ enum dma_data_direction dir,
471
+ struct sg_table **sgt)
472
+{
473
+ struct drm_mm_node *node;
474
+ struct scatterlist *sg;
475
+ int num_entries = 0;
476
+ unsigned int pages;
477
+ int i, r;
478
+
479
+ *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
480
+ if (!*sgt)
481
+ return -ENOMEM;
482
+
483
+ for (pages = mem->num_pages, node = mem->mm_node;
484
+ pages; pages -= node->size, ++node)
485
+ ++num_entries;
486
+
487
+ r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
488
+ if (r)
489
+ goto error_free;
490
+
491
+ for_each_sgtable_sg((*sgt), sg, i)
492
+ sg->length = 0;
493
+
494
+ node = mem->mm_node;
495
+ for_each_sgtable_sg((*sgt), sg, i) {
496
+ phys_addr_t phys = (node->start << PAGE_SHIFT) +
497
+ adev->gmc.aper_base;
498
+ size_t size = node->size << PAGE_SHIFT;
499
+ dma_addr_t addr;
500
+
501
+ ++node;
502
+ addr = dma_map_resource(dev, phys, size, dir,
503
+ DMA_ATTR_SKIP_CPU_SYNC);
504
+ r = dma_mapping_error(dev, addr);
505
+ if (r)
506
+ goto error_unmap;
507
+
508
+ sg_set_page(sg, NULL, size, 0);
509
+ sg_dma_address(sg) = addr;
510
+ sg_dma_len(sg) = size;
511
+ }
512
+ return 0;
513
+
514
+error_unmap:
515
+ for_each_sgtable_sg((*sgt), sg, i) {
516
+ if (!sg->length)
517
+ continue;
518
+
519
+ dma_unmap_resource(dev, sg->dma_address,
520
+ sg->length, dir,
521
+ DMA_ATTR_SKIP_CPU_SYNC);
522
+ }
523
+ sg_free_table(*sgt);
524
+
525
+error_free:
526
+ kfree(*sgt);
527
+ return r;
528
+}
529
+
530
+/**
531
+ * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
532
+ *
533
+ * @adev: amdgpu device pointer
534
+ * @sgt: sg table to free
535
+ *
536
+ * Free a previously allocate sg table.
537
+ */
538
+void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
539
+ struct device *dev,
540
+ enum dma_data_direction dir,
541
+ struct sg_table *sgt)
542
+{
543
+ struct scatterlist *sg;
544
+ int i;
545
+
546
+ for_each_sgtable_sg(sgt, sg, i)
547
+ dma_unmap_resource(dev, sg->dma_address,
548
+ sg->length, dir,
549
+ DMA_ATTR_SKIP_CPU_SYNC);
550
+ sg_free_table(sgt);
551
+ kfree(sgt);
552
+}
553
+
554
+/**
266555 * amdgpu_vram_mgr_usage - how many bytes are used in this domain
267556 *
268557 * @man: TTM memory type manager
269558 *
270559 * Returns how many bytes are used in this domain.
271560 */
272
-uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man)
561
+uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man)
273562 {
274
- struct amdgpu_vram_mgr *mgr = man->priv;
563
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
275564
276565 return atomic64_read(&mgr->usage);
277566 }
....@@ -283,9 +572,9 @@
283572 *
284573 * Returns how many bytes are used in the visible part of VRAM
285574 */
286
-uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man)
575
+uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man)
287576 {
288
- struct amdgpu_vram_mgr *mgr = man->priv;
577
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
289578
290579 return atomic64_read(&mgr->vis_usage);
291580 }
....@@ -298,10 +587,10 @@
298587 *
299588 * Dump the table content using printk.
300589 */
301
-static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
590
+static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
302591 struct drm_printer *printer)
303592 {
304
- struct amdgpu_vram_mgr *mgr = man->priv;
593
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
305594
306595 spin_lock(&mgr->lock);
307596 drm_mm_print(&mgr->mm, printer);
....@@ -312,10 +601,8 @@
312601 amdgpu_vram_mgr_vis_usage(man) >> 20);
313602 }
314603
315
-const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
316
- .init = amdgpu_vram_mgr_init,
317
- .takedown = amdgpu_vram_mgr_fini,
318
- .get_node = amdgpu_vram_mgr_new,
319
- .put_node = amdgpu_vram_mgr_del,
320
- .debug = amdgpu_vram_mgr_debug
604
+static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
605
+ .alloc = amdgpu_vram_mgr_new,
606
+ .free = amdgpu_vram_mgr_del,
607
+ .debug = amdgpu_vram_mgr_debug
321608 };