.. | .. |
---|
22 | 22 | * Authors: Christian König |
---|
23 | 23 | */ |
---|
24 | 24 | |
---|
25 | | -#include <drm/drmP.h> |
---|
| 25 | +#include <linux/dma-mapping.h> |
---|
26 | 26 | #include "amdgpu.h" |
---|
| 27 | +#include "amdgpu_vm.h" |
---|
| 28 | +#include "amdgpu_atomfirmware.h" |
---|
| 29 | +#include "atom.h" |
---|
27 | 30 | |
---|
28 | | -struct amdgpu_vram_mgr { |
---|
29 | | - struct drm_mm mm; |
---|
30 | | - spinlock_t lock; |
---|
31 | | - atomic64_t usage; |
---|
32 | | - atomic64_t vis_usage; |
---|
| 31 | +static inline struct amdgpu_vram_mgr *to_vram_mgr(struct ttm_resource_manager *man) |
---|
| 32 | +{ |
---|
| 33 | + return container_of(man, struct amdgpu_vram_mgr, manager); |
---|
| 34 | +} |
---|
| 35 | + |
---|
| 36 | +static inline struct amdgpu_device *to_amdgpu_device(struct amdgpu_vram_mgr *mgr) |
---|
| 37 | +{ |
---|
| 38 | + return container_of(mgr, struct amdgpu_device, mman.vram_mgr); |
---|
| 39 | +} |
---|
| 40 | + |
---|
| 41 | +/** |
---|
| 42 | + * DOC: mem_info_vram_total |
---|
| 43 | + * |
---|
| 44 | + * The amdgpu driver provides a sysfs API for reporting current total VRAM |
---|
| 45 | + * available on the device |
---|
| 46 | + * The file mem_info_vram_total is used for this and returns the total |
---|
| 47 | + * amount of VRAM in bytes |
---|
| 48 | + */ |
---|
| 49 | +static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev, |
---|
| 50 | + struct device_attribute *attr, char *buf) |
---|
| 51 | +{ |
---|
| 52 | + struct drm_device *ddev = dev_get_drvdata(dev); |
---|
| 53 | + struct amdgpu_device *adev = drm_to_adev(ddev); |
---|
| 54 | + |
---|
| 55 | + return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size); |
---|
| 56 | +} |
---|
| 57 | + |
---|
| 58 | +/** |
---|
| 59 | + * DOC: mem_info_vis_vram_total |
---|
| 60 | + * |
---|
| 61 | + * The amdgpu driver provides a sysfs API for reporting current total |
---|
| 62 | + * visible VRAM available on the device |
---|
| 63 | + * The file mem_info_vis_vram_total is used for this and returns the total |
---|
| 64 | + * amount of visible VRAM in bytes |
---|
| 65 | + */ |
---|
| 66 | +static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev, |
---|
| 67 | + struct device_attribute *attr, char *buf) |
---|
| 68 | +{ |
---|
| 69 | + struct drm_device *ddev = dev_get_drvdata(dev); |
---|
| 70 | + struct amdgpu_device *adev = drm_to_adev(ddev); |
---|
| 71 | + |
---|
| 72 | + return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size); |
---|
| 73 | +} |
---|
| 74 | + |
---|
| 75 | +/** |
---|
| 76 | + * DOC: mem_info_vram_used |
---|
| 77 | + * |
---|
| 78 | + * The amdgpu driver provides a sysfs API for reporting current total VRAM |
---|
| 79 | + * available on the device |
---|
| 80 | + * The file mem_info_vram_used is used for this and returns the total |
---|
| 81 | + * amount of currently used VRAM in bytes |
---|
| 82 | + */ |
---|
| 83 | +static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev, |
---|
| 84 | + struct device_attribute *attr, char *buf) |
---|
| 85 | +{ |
---|
| 86 | + struct drm_device *ddev = dev_get_drvdata(dev); |
---|
| 87 | + struct amdgpu_device *adev = drm_to_adev(ddev); |
---|
| 88 | + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); |
---|
| 89 | + |
---|
| 90 | + return snprintf(buf, PAGE_SIZE, "%llu\n", |
---|
| 91 | + amdgpu_vram_mgr_usage(man)); |
---|
| 92 | +} |
---|
| 93 | + |
---|
| 94 | +/** |
---|
| 95 | + * DOC: mem_info_vis_vram_used |
---|
| 96 | + * |
---|
| 97 | + * The amdgpu driver provides a sysfs API for reporting current total of |
---|
| 98 | + * used visible VRAM |
---|
| 99 | + * The file mem_info_vis_vram_used is used for this and returns the total |
---|
| 100 | + * amount of currently used visible VRAM in bytes |
---|
| 101 | + */ |
---|
| 102 | +static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev, |
---|
| 103 | + struct device_attribute *attr, char *buf) |
---|
| 104 | +{ |
---|
| 105 | + struct drm_device *ddev = dev_get_drvdata(dev); |
---|
| 106 | + struct amdgpu_device *adev = drm_to_adev(ddev); |
---|
| 107 | + struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); |
---|
| 108 | + |
---|
| 109 | + return snprintf(buf, PAGE_SIZE, "%llu\n", |
---|
| 110 | + amdgpu_vram_mgr_vis_usage(man)); |
---|
| 111 | +} |
---|
| 112 | + |
---|
| 113 | +static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev, |
---|
| 114 | + struct device_attribute *attr, |
---|
| 115 | + char *buf) |
---|
| 116 | +{ |
---|
| 117 | + struct drm_device *ddev = dev_get_drvdata(dev); |
---|
| 118 | + struct amdgpu_device *adev = drm_to_adev(ddev); |
---|
| 119 | + |
---|
| 120 | + switch (adev->gmc.vram_vendor) { |
---|
| 121 | + case SAMSUNG: |
---|
| 122 | + return snprintf(buf, PAGE_SIZE, "samsung\n"); |
---|
| 123 | + case INFINEON: |
---|
| 124 | + return snprintf(buf, PAGE_SIZE, "infineon\n"); |
---|
| 125 | + case ELPIDA: |
---|
| 126 | + return snprintf(buf, PAGE_SIZE, "elpida\n"); |
---|
| 127 | + case ETRON: |
---|
| 128 | + return snprintf(buf, PAGE_SIZE, "etron\n"); |
---|
| 129 | + case NANYA: |
---|
| 130 | + return snprintf(buf, PAGE_SIZE, "nanya\n"); |
---|
| 131 | + case HYNIX: |
---|
| 132 | + return snprintf(buf, PAGE_SIZE, "hynix\n"); |
---|
| 133 | + case MOSEL: |
---|
| 134 | + return snprintf(buf, PAGE_SIZE, "mosel\n"); |
---|
| 135 | + case WINBOND: |
---|
| 136 | + return snprintf(buf, PAGE_SIZE, "winbond\n"); |
---|
| 137 | + case ESMT: |
---|
| 138 | + return snprintf(buf, PAGE_SIZE, "esmt\n"); |
---|
| 139 | + case MICRON: |
---|
| 140 | + return snprintf(buf, PAGE_SIZE, "micron\n"); |
---|
| 141 | + default: |
---|
| 142 | + return snprintf(buf, PAGE_SIZE, "unknown\n"); |
---|
| 143 | + } |
---|
| 144 | +} |
---|
| 145 | + |
---|
| 146 | +static DEVICE_ATTR(mem_info_vram_total, S_IRUGO, |
---|
| 147 | + amdgpu_mem_info_vram_total_show, NULL); |
---|
| 148 | +static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO, |
---|
| 149 | + amdgpu_mem_info_vis_vram_total_show,NULL); |
---|
| 150 | +static DEVICE_ATTR(mem_info_vram_used, S_IRUGO, |
---|
| 151 | + amdgpu_mem_info_vram_used_show, NULL); |
---|
| 152 | +static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO, |
---|
| 153 | + amdgpu_mem_info_vis_vram_used_show, NULL); |
---|
| 154 | +static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO, |
---|
| 155 | + amdgpu_mem_info_vram_vendor, NULL); |
---|
| 156 | + |
---|
| 157 | +static const struct attribute *amdgpu_vram_mgr_attributes[] = { |
---|
| 158 | + &dev_attr_mem_info_vram_total.attr, |
---|
| 159 | + &dev_attr_mem_info_vis_vram_total.attr, |
---|
| 160 | + &dev_attr_mem_info_vram_used.attr, |
---|
| 161 | + &dev_attr_mem_info_vis_vram_used.attr, |
---|
| 162 | + &dev_attr_mem_info_vram_vendor.attr, |
---|
| 163 | + NULL |
---|
33 | 164 | }; |
---|
| 165 | + |
---|
| 166 | +static const struct ttm_resource_manager_func amdgpu_vram_mgr_func; |
---|
34 | 167 | |
---|
35 | 168 | /** |
---|
36 | 169 | * amdgpu_vram_mgr_init - init VRAM manager and DRM MM |
---|
37 | 170 | * |
---|
38 | | - * @man: TTM memory type manager |
---|
39 | | - * @p_size: maximum size of VRAM |
---|
| 171 | + * @adev: amdgpu_device pointer |
---|
40 | 172 | * |
---|
41 | 173 | * Allocate and initialize the VRAM manager. |
---|
42 | 174 | */ |
---|
43 | | -static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man, |
---|
44 | | - unsigned long p_size) |
---|
| 175 | +int amdgpu_vram_mgr_init(struct amdgpu_device *adev) |
---|
45 | 176 | { |
---|
46 | | - struct amdgpu_vram_mgr *mgr; |
---|
| 177 | + struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; |
---|
| 178 | + struct ttm_resource_manager *man = &mgr->manager; |
---|
| 179 | + int ret; |
---|
47 | 180 | |
---|
48 | | - mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); |
---|
49 | | - if (!mgr) |
---|
50 | | - return -ENOMEM; |
---|
| 181 | + ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT); |
---|
51 | 182 | |
---|
52 | | - drm_mm_init(&mgr->mm, 0, p_size); |
---|
| 183 | + man->func = &amdgpu_vram_mgr_func; |
---|
| 184 | + |
---|
| 185 | + drm_mm_init(&mgr->mm, 0, man->size); |
---|
53 | 186 | spin_lock_init(&mgr->lock); |
---|
54 | | - man->priv = mgr; |
---|
| 187 | + |
---|
| 188 | + /* Add the two VRAM-related sysfs files */ |
---|
| 189 | + ret = sysfs_create_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes); |
---|
| 190 | + if (ret) |
---|
| 191 | + DRM_ERROR("Failed to register sysfs\n"); |
---|
| 192 | + |
---|
| 193 | + ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager); |
---|
| 194 | + ttm_resource_manager_set_used(man, true); |
---|
55 | 195 | return 0; |
---|
56 | 196 | } |
---|
57 | 197 | |
---|
58 | 198 | /** |
---|
59 | 199 | * amdgpu_vram_mgr_fini - free and destroy VRAM manager |
---|
60 | 200 | * |
---|
61 | | - * @man: TTM memory type manager |
---|
| 201 | + * @adev: amdgpu_device pointer |
---|
62 | 202 | * |
---|
63 | 203 | * Destroy and free the VRAM manager, returns -EBUSY if ranges are still |
---|
64 | 204 | * allocated inside it. |
---|
65 | 205 | */ |
---|
66 | | -static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man) |
---|
| 206 | +void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) |
---|
67 | 207 | { |
---|
68 | | - struct amdgpu_vram_mgr *mgr = man->priv; |
---|
| 208 | + struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; |
---|
| 209 | + struct ttm_resource_manager *man = &mgr->manager; |
---|
| 210 | + int ret; |
---|
| 211 | + |
---|
| 212 | + ttm_resource_manager_set_used(man, false); |
---|
| 213 | + |
---|
| 214 | + ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man); |
---|
| 215 | + if (ret) |
---|
| 216 | + return; |
---|
69 | 217 | |
---|
70 | 218 | spin_lock(&mgr->lock); |
---|
71 | 219 | drm_mm_takedown(&mgr->mm); |
---|
72 | 220 | spin_unlock(&mgr->lock); |
---|
73 | | - kfree(mgr); |
---|
74 | | - man->priv = NULL; |
---|
75 | | - return 0; |
---|
| 221 | + |
---|
| 222 | + sysfs_remove_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes); |
---|
| 223 | + |
---|
| 224 | + ttm_resource_manager_cleanup(man); |
---|
| 225 | + ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL); |
---|
76 | 226 | } |
---|
77 | 227 | |
---|
78 | 228 | /** |
---|
79 | 229 | * amdgpu_vram_mgr_vis_size - Calculate visible node size |
---|
80 | 230 | * |
---|
81 | | - * @adev: amdgpu device structure |
---|
| 231 | + * @adev: amdgpu_device pointer |
---|
82 | 232 | * @node: MM node structure |
---|
83 | 233 | * |
---|
84 | 234 | * Calculate how many bytes of the MM node are inside visible VRAM |
---|
.. | .. |
---|
107 | 257 | u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) |
---|
108 | 258 | { |
---|
109 | 259 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
---|
110 | | - struct ttm_mem_reg *mem = &bo->tbo.mem; |
---|
| 260 | + struct ttm_resource *mem = &bo->tbo.mem; |
---|
111 | 261 | struct drm_mm_node *nodes = mem->mm_node; |
---|
112 | 262 | unsigned pages = mem->num_pages; |
---|
113 | 263 | u64 usage; |
---|
.. | .. |
---|
125 | 275 | } |
---|
126 | 276 | |
---|
127 | 277 | /** |
---|
| 278 | + * amdgpu_vram_mgr_virt_start - update virtual start address |
---|
| 279 | + * |
---|
| 280 | + * @mem: ttm_resource to update |
---|
| 281 | + * @node: just allocated node |
---|
| 282 | + * |
---|
| 283 | + * Calculate a virtual BO start address to easily check if everything is CPU |
---|
| 284 | + * accessible. |
---|
| 285 | + */ |
---|
| 286 | +static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem, |
---|
| 287 | + struct drm_mm_node *node) |
---|
| 288 | +{ |
---|
| 289 | + unsigned long start; |
---|
| 290 | + |
---|
| 291 | + start = node->start + node->size; |
---|
| 292 | + if (start > mem->num_pages) |
---|
| 293 | + start -= mem->num_pages; |
---|
| 294 | + else |
---|
| 295 | + start = 0; |
---|
| 296 | + mem->start = max(mem->start, start); |
---|
| 297 | +} |
---|
| 298 | + |
---|
| 299 | +/** |
---|
128 | 300 | * amdgpu_vram_mgr_new - allocate new ranges |
---|
129 | 301 | * |
---|
130 | 302 | * @man: TTM memory type manager |
---|
.. | .. |
---|
134 | 306 | * |
---|
135 | 307 | * Allocate VRAM for the given BO. |
---|
136 | 308 | */ |
---|
137 | | -static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, |
---|
| 309 | +static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, |
---|
138 | 310 | struct ttm_buffer_object *tbo, |
---|
139 | 311 | const struct ttm_place *place, |
---|
140 | | - struct ttm_mem_reg *mem) |
---|
| 312 | + struct ttm_resource *mem) |
---|
141 | 313 | { |
---|
142 | | - struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); |
---|
143 | | - struct amdgpu_vram_mgr *mgr = man->priv; |
---|
| 314 | + struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); |
---|
| 315 | + struct amdgpu_device *adev = to_amdgpu_device(mgr); |
---|
144 | 316 | struct drm_mm *mm = &mgr->mm; |
---|
145 | 317 | struct drm_mm_node *nodes; |
---|
146 | 318 | enum drm_mm_insert_mode mode; |
---|
147 | 319 | unsigned long lpfn, num_nodes, pages_per_node, pages_left; |
---|
148 | | - uint64_t usage = 0, vis_usage = 0; |
---|
| 320 | + uint64_t vis_usage = 0, mem_bytes, max_bytes; |
---|
149 | 321 | unsigned i; |
---|
150 | 322 | int r; |
---|
151 | 323 | |
---|
.. | .. |
---|
153 | 325 | if (!lpfn) |
---|
154 | 326 | lpfn = man->size; |
---|
155 | 327 | |
---|
156 | | - if (place->flags & TTM_PL_FLAG_CONTIGUOUS || |
---|
157 | | - amdgpu_vram_page_split == -1) { |
---|
| 328 | + max_bytes = adev->gmc.mc_vram_size; |
---|
| 329 | + if (tbo->type != ttm_bo_type_kernel) |
---|
| 330 | + max_bytes -= AMDGPU_VM_RESERVED_VRAM; |
---|
| 331 | + |
---|
| 332 | + /* bail out quickly if there's likely not enough VRAM for this BO */ |
---|
| 333 | + mem_bytes = (u64)mem->num_pages << PAGE_SHIFT; |
---|
| 334 | + if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) { |
---|
| 335 | + atomic64_sub(mem_bytes, &mgr->usage); |
---|
| 336 | + return -ENOSPC; |
---|
| 337 | + } |
---|
| 338 | + |
---|
| 339 | + if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { |
---|
158 | 340 | pages_per_node = ~0ul; |
---|
159 | 341 | num_nodes = 1; |
---|
160 | 342 | } else { |
---|
161 | | - pages_per_node = max((uint32_t)amdgpu_vram_page_split, |
---|
162 | | - mem->page_alignment); |
---|
| 343 | +#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
---|
| 344 | + pages_per_node = HPAGE_PMD_NR; |
---|
| 345 | +#else |
---|
| 346 | + /* default to 2MB */ |
---|
| 347 | + pages_per_node = (2UL << (20UL - PAGE_SHIFT)); |
---|
| 348 | +#endif |
---|
| 349 | + pages_per_node = max((uint32_t)pages_per_node, mem->page_alignment); |
---|
163 | 350 | num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); |
---|
164 | 351 | } |
---|
165 | 352 | |
---|
166 | | - nodes = kvmalloc_array(num_nodes, sizeof(*nodes), |
---|
| 353 | + nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes), |
---|
167 | 354 | GFP_KERNEL | __GFP_ZERO); |
---|
168 | | - if (!nodes) |
---|
| 355 | + if (!nodes) { |
---|
| 356 | + atomic64_sub(mem_bytes, &mgr->usage); |
---|
169 | 357 | return -ENOMEM; |
---|
| 358 | + } |
---|
170 | 359 | |
---|
171 | 360 | mode = DRM_MM_INSERT_BEST; |
---|
172 | 361 | if (place->flags & TTM_PL_FLAG_TOPDOWN) |
---|
.. | .. |
---|
176 | 365 | pages_left = mem->num_pages; |
---|
177 | 366 | |
---|
178 | 367 | spin_lock(&mgr->lock); |
---|
179 | | - for (i = 0; i < num_nodes; ++i) { |
---|
| 368 | + for (i = 0; pages_left >= pages_per_node; ++i) { |
---|
| 369 | + unsigned long pages = rounddown_pow_of_two(pages_left); |
---|
| 370 | + |
---|
| 371 | + r = drm_mm_insert_node_in_range(mm, &nodes[i], pages, |
---|
| 372 | + pages_per_node, 0, |
---|
| 373 | + place->fpfn, lpfn, |
---|
| 374 | + mode); |
---|
| 375 | + if (unlikely(r)) |
---|
| 376 | + break; |
---|
| 377 | + |
---|
| 378 | + vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]); |
---|
| 379 | + amdgpu_vram_mgr_virt_start(mem, &nodes[i]); |
---|
| 380 | + pages_left -= pages; |
---|
| 381 | + } |
---|
| 382 | + |
---|
| 383 | + for (; pages_left; ++i) { |
---|
180 | 384 | unsigned long pages = min(pages_left, pages_per_node); |
---|
181 | 385 | uint32_t alignment = mem->page_alignment; |
---|
182 | | - unsigned long start; |
---|
183 | 386 | |
---|
184 | 387 | if (pages == pages_per_node) |
---|
185 | 388 | alignment = pages_per_node; |
---|
.. | .. |
---|
191 | 394 | if (unlikely(r)) |
---|
192 | 395 | goto error; |
---|
193 | 396 | |
---|
194 | | - usage += nodes[i].size << PAGE_SHIFT; |
---|
195 | 397 | vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]); |
---|
196 | | - |
---|
197 | | - /* Calculate a virtual BO start address to easily check if |
---|
198 | | - * everything is CPU accessible. |
---|
199 | | - */ |
---|
200 | | - start = nodes[i].start + nodes[i].size; |
---|
201 | | - if (start > mem->num_pages) |
---|
202 | | - start -= mem->num_pages; |
---|
203 | | - else |
---|
204 | | - start = 0; |
---|
205 | | - mem->start = max(mem->start, start); |
---|
| 398 | + amdgpu_vram_mgr_virt_start(mem, &nodes[i]); |
---|
206 | 399 | pages_left -= pages; |
---|
207 | 400 | } |
---|
208 | 401 | spin_unlock(&mgr->lock); |
---|
209 | 402 | |
---|
210 | | - atomic64_add(usage, &mgr->usage); |
---|
211 | 403 | atomic64_add(vis_usage, &mgr->vis_usage); |
---|
212 | 404 | |
---|
213 | 405 | mem->mm_node = nodes; |
---|
.. | .. |
---|
218 | 410 | while (i--) |
---|
219 | 411 | drm_mm_remove_node(&nodes[i]); |
---|
220 | 412 | spin_unlock(&mgr->lock); |
---|
| 413 | + atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage); |
---|
221 | 414 | |
---|
222 | 415 | kvfree(nodes); |
---|
223 | | - return r == -ENOSPC ? 0 : r; |
---|
| 416 | + return r; |
---|
224 | 417 | } |
---|
225 | 418 | |
---|
226 | 419 | /** |
---|
227 | 420 | * amdgpu_vram_mgr_del - free ranges |
---|
228 | 421 | * |
---|
229 | 422 | * @man: TTM memory type manager |
---|
230 | | - * @tbo: TTM BO we need this range for |
---|
231 | | - * @place: placement flags and restrictions |
---|
232 | 423 | * @mem: TTM memory object |
---|
233 | 424 | * |
---|
234 | 425 | * Free the allocated VRAM again. |
---|
235 | 426 | */ |
---|
236 | | -static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man, |
---|
237 | | - struct ttm_mem_reg *mem) |
---|
| 427 | +static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, |
---|
| 428 | + struct ttm_resource *mem) |
---|
238 | 429 | { |
---|
239 | | - struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); |
---|
240 | | - struct amdgpu_vram_mgr *mgr = man->priv; |
---|
| 430 | + struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); |
---|
| 431 | + struct amdgpu_device *adev = to_amdgpu_device(mgr); |
---|
241 | 432 | struct drm_mm_node *nodes = mem->mm_node; |
---|
242 | 433 | uint64_t usage = 0, vis_usage = 0; |
---|
243 | 434 | unsigned pages = mem->num_pages; |
---|
.. | .. |
---|
263 | 454 | } |
---|
264 | 455 | |
---|
265 | 456 | /** |
---|
| 457 | + * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table |
---|
| 458 | + * |
---|
| 459 | + * @adev: amdgpu device pointer |
---|
| 460 | + * @mem: TTM memory object |
---|
| 461 | + * @dev: the other device |
---|
| 462 | + * @dir: dma direction |
---|
| 463 | + * @sgt: resulting sg table |
---|
| 464 | + * |
---|
| 465 | + * Allocate and fill a sg table from a VRAM allocation. |
---|
| 466 | + */ |
---|
| 467 | +int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, |
---|
| 468 | + struct ttm_resource *mem, |
---|
| 469 | + struct device *dev, |
---|
| 470 | + enum dma_data_direction dir, |
---|
| 471 | + struct sg_table **sgt) |
---|
| 472 | +{ |
---|
| 473 | + struct drm_mm_node *node; |
---|
| 474 | + struct scatterlist *sg; |
---|
| 475 | + int num_entries = 0; |
---|
| 476 | + unsigned int pages; |
---|
| 477 | + int i, r; |
---|
| 478 | + |
---|
| 479 | + *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL); |
---|
| 480 | + if (!*sgt) |
---|
| 481 | + return -ENOMEM; |
---|
| 482 | + |
---|
| 483 | + for (pages = mem->num_pages, node = mem->mm_node; |
---|
| 484 | + pages; pages -= node->size, ++node) |
---|
| 485 | + ++num_entries; |
---|
| 486 | + |
---|
| 487 | + r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL); |
---|
| 488 | + if (r) |
---|
| 489 | + goto error_free; |
---|
| 490 | + |
---|
| 491 | + for_each_sgtable_sg((*sgt), sg, i) |
---|
| 492 | + sg->length = 0; |
---|
| 493 | + |
---|
| 494 | + node = mem->mm_node; |
---|
| 495 | + for_each_sgtable_sg((*sgt), sg, i) { |
---|
| 496 | + phys_addr_t phys = (node->start << PAGE_SHIFT) + |
---|
| 497 | + adev->gmc.aper_base; |
---|
| 498 | + size_t size = node->size << PAGE_SHIFT; |
---|
| 499 | + dma_addr_t addr; |
---|
| 500 | + |
---|
| 501 | + ++node; |
---|
| 502 | + addr = dma_map_resource(dev, phys, size, dir, |
---|
| 503 | + DMA_ATTR_SKIP_CPU_SYNC); |
---|
| 504 | + r = dma_mapping_error(dev, addr); |
---|
| 505 | + if (r) |
---|
| 506 | + goto error_unmap; |
---|
| 507 | + |
---|
| 508 | + sg_set_page(sg, NULL, size, 0); |
---|
| 509 | + sg_dma_address(sg) = addr; |
---|
| 510 | + sg_dma_len(sg) = size; |
---|
| 511 | + } |
---|
| 512 | + return 0; |
---|
| 513 | + |
---|
| 514 | +error_unmap: |
---|
| 515 | + for_each_sgtable_sg((*sgt), sg, i) { |
---|
| 516 | + if (!sg->length) |
---|
| 517 | + continue; |
---|
| 518 | + |
---|
| 519 | + dma_unmap_resource(dev, sg->dma_address, |
---|
| 520 | + sg->length, dir, |
---|
| 521 | + DMA_ATTR_SKIP_CPU_SYNC); |
---|
| 522 | + } |
---|
| 523 | + sg_free_table(*sgt); |
---|
| 524 | + |
---|
| 525 | +error_free: |
---|
| 526 | + kfree(*sgt); |
---|
| 527 | + return r; |
---|
| 528 | +} |
---|
| 529 | + |
---|
| 530 | +/** |
---|
| 531 | + * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table |
---|
| 532 | + * |
---|
| 533 | + * @adev: amdgpu device pointer |
---|
| 534 | + * @sgt: sg table to free |
---|
| 535 | + * |
---|
| 536 | + * Free a previously allocate sg table. |
---|
| 537 | + */ |
---|
| 538 | +void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev, |
---|
| 539 | + struct device *dev, |
---|
| 540 | + enum dma_data_direction dir, |
---|
| 541 | + struct sg_table *sgt) |
---|
| 542 | +{ |
---|
| 543 | + struct scatterlist *sg; |
---|
| 544 | + int i; |
---|
| 545 | + |
---|
| 546 | + for_each_sgtable_sg(sgt, sg, i) |
---|
| 547 | + dma_unmap_resource(dev, sg->dma_address, |
---|
| 548 | + sg->length, dir, |
---|
| 549 | + DMA_ATTR_SKIP_CPU_SYNC); |
---|
| 550 | + sg_free_table(sgt); |
---|
| 551 | + kfree(sgt); |
---|
| 552 | +} |
---|
| 553 | + |
---|
| 554 | +/** |
---|
266 | 555 | * amdgpu_vram_mgr_usage - how many bytes are used in this domain |
---|
267 | 556 | * |
---|
268 | 557 | * @man: TTM memory type manager |
---|
269 | 558 | * |
---|
270 | 559 | * Returns how many bytes are used in this domain. |
---|
271 | 560 | */ |
---|
272 | | -uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man) |
---|
| 561 | +uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man) |
---|
273 | 562 | { |
---|
274 | | - struct amdgpu_vram_mgr *mgr = man->priv; |
---|
| 563 | + struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); |
---|
275 | 564 | |
---|
276 | 565 | return atomic64_read(&mgr->usage); |
---|
277 | 566 | } |
---|
.. | .. |
---|
283 | 572 | * |
---|
284 | 573 | * Returns how many bytes are used in the visible part of VRAM |
---|
285 | 574 | */ |
---|
286 | | -uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man) |
---|
| 575 | +uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man) |
---|
287 | 576 | { |
---|
288 | | - struct amdgpu_vram_mgr *mgr = man->priv; |
---|
| 577 | + struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); |
---|
289 | 578 | |
---|
290 | 579 | return atomic64_read(&mgr->vis_usage); |
---|
291 | 580 | } |
---|
.. | .. |
---|
298 | 587 | * |
---|
299 | 588 | * Dump the table content using printk. |
---|
300 | 589 | */ |
---|
301 | | -static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man, |
---|
| 590 | +static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, |
---|
302 | 591 | struct drm_printer *printer) |
---|
303 | 592 | { |
---|
304 | | - struct amdgpu_vram_mgr *mgr = man->priv; |
---|
| 593 | + struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); |
---|
305 | 594 | |
---|
306 | 595 | spin_lock(&mgr->lock); |
---|
307 | 596 | drm_mm_print(&mgr->mm, printer); |
---|
.. | .. |
---|
312 | 601 | amdgpu_vram_mgr_vis_usage(man) >> 20); |
---|
313 | 602 | } |
---|
314 | 603 | |
---|
315 | | -const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = { |
---|
316 | | - .init = amdgpu_vram_mgr_init, |
---|
317 | | - .takedown = amdgpu_vram_mgr_fini, |
---|
318 | | - .get_node = amdgpu_vram_mgr_new, |
---|
319 | | - .put_node = amdgpu_vram_mgr_del, |
---|
320 | | - .debug = amdgpu_vram_mgr_debug |
---|
| 604 | +static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { |
---|
| 605 | + .alloc = amdgpu_vram_mgr_new, |
---|
| 606 | + .free = amdgpu_vram_mgr_del, |
---|
| 607 | + .debug = amdgpu_vram_mgr_debug |
---|
321 | 608 | }; |
---|