forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/include/drm/ttm/ttm_bo_driver.h
....@@ -31,12 +31,11 @@
3131 #define _TTM_BO_DRIVER_H_
3232
3333 #include <drm/drm_mm.h>
34
-#include <drm/drm_global.h>
3534 #include <drm/drm_vma_manager.h>
3635 #include <linux/workqueue.h>
3736 #include <linux/fs.h>
3837 #include <linux/spinlock.h>
39
-#include <linux/reservation.h>
38
+#include <linux/dma-resv.h>
4039
4140 #include "ttm_bo_api.h"
4241 #include "ttm_memory.h"
....@@ -44,177 +43,10 @@
4443 #include "ttm_placement.h"
4544 #include "ttm_tt.h"
4645
47
-#define TTM_MAX_BO_PRIORITY 4U
48
-
49
-#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
50
-#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
51
-#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
52
-
53
-struct ttm_mem_type_manager;
54
-
55
-struct ttm_mem_type_manager_func {
56
- /**
57
- * struct ttm_mem_type_manager member init
58
- *
59
- * @man: Pointer to a memory type manager.
60
- * @p_size: Implementation dependent, but typically the size of the
61
- * range to be managed in pages.
62
- *
63
- * Called to initialize a private range manager. The function is
64
- * expected to initialize the man::priv member.
65
- * Returns 0 on success, negative error code on failure.
66
- */
67
- int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
68
-
69
- /**
70
- * struct ttm_mem_type_manager member takedown
71
- *
72
- * @man: Pointer to a memory type manager.
73
- *
74
- * Called to undo the setup done in init. All allocated resources
75
- * should be freed.
76
- */
77
- int (*takedown)(struct ttm_mem_type_manager *man);
78
-
79
- /**
80
- * struct ttm_mem_type_manager member get_node
81
- *
82
- * @man: Pointer to a memory type manager.
83
- * @bo: Pointer to the buffer object we're allocating space for.
84
- * @placement: Placement details.
85
- * @flags: Additional placement flags.
86
- * @mem: Pointer to a struct ttm_mem_reg to be filled in.
87
- *
88
- * This function should allocate space in the memory type managed
89
- * by @man. Placement details if
90
- * applicable are given by @placement. If successful,
91
- * @mem::mm_node should be set to a non-null value, and
92
- * @mem::start should be set to a value identifying the beginning
93
- * of the range allocated, and the function should return zero.
94
- * If the memory region accommodate the buffer object, @mem::mm_node
95
- * should be set to NULL, and the function should return 0.
96
- * If a system error occurred, preventing the request to be fulfilled,
97
- * the function should return a negative error code.
98
- *
99
- * Note that @mem::mm_node will only be dereferenced by
100
- * struct ttm_mem_type_manager functions and optionally by the driver,
101
- * which has knowledge of the underlying type.
102
- *
103
- * This function may not be called from within atomic context, so
104
- * an implementation can and must use either a mutex or a spinlock to
105
- * protect any data structures managing the space.
106
- */
107
- int (*get_node)(struct ttm_mem_type_manager *man,
108
- struct ttm_buffer_object *bo,
109
- const struct ttm_place *place,
110
- struct ttm_mem_reg *mem);
111
-
112
- /**
113
- * struct ttm_mem_type_manager member put_node
114
- *
115
- * @man: Pointer to a memory type manager.
116
- * @mem: Pointer to a struct ttm_mem_reg to be filled in.
117
- *
118
- * This function frees memory type resources previously allocated
119
- * and that are identified by @mem::mm_node and @mem::start. May not
120
- * be called from within atomic context.
121
- */
122
- void (*put_node)(struct ttm_mem_type_manager *man,
123
- struct ttm_mem_reg *mem);
124
-
125
- /**
126
- * struct ttm_mem_type_manager member debug
127
- *
128
- * @man: Pointer to a memory type manager.
129
- * @printer: Prefix to be used in printout to identify the caller.
130
- *
131
- * This function is called to print out the state of the memory
132
- * type manager to aid debugging of out-of-memory conditions.
133
- * It may not be called from within atomic context.
134
- */
135
- void (*debug)(struct ttm_mem_type_manager *man,
136
- struct drm_printer *printer);
137
-};
138
-
139
-/**
140
- * struct ttm_mem_type_manager
141
- *
142
- * @has_type: The memory type has been initialized.
143
- * @use_type: The memory type is enabled.
144
- * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
145
- * managed by this memory type.
146
- * @gpu_offset: If used, the GPU offset of the first managed page of
147
- * fixed memory or the first managed location in an aperture.
148
- * @size: Size of the managed region.
149
- * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
150
- * as defined in ttm_placement_common.h
151
- * @default_caching: The default caching policy used for a buffer object
152
- * placed in this memory type if the user doesn't provide one.
153
- * @func: structure pointer implementing the range manager. See above
154
- * @priv: Driver private closure for @func.
155
- * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
156
- * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
157
- * reserved by the TTM vm system.
158
- * @io_reserve_lru: Optional lru list for unreserving io mem regions.
159
- * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
160
- * @move_lock: lock for move fence
161
- * static information. bdev::driver::io_mem_free is never used.
162
- * @lru: The lru list for this memory type.
163
- * @move: The fence of the last pipelined move operation.
164
- *
165
- * This structure is used to identify and manage memory types for a device.
166
- * It's set up by the ttm_bo_driver::init_mem_type method.
167
- */
168
-
169
-
170
-
171
-struct ttm_mem_type_manager {
172
- struct ttm_bo_device *bdev;
173
-
174
- /*
175
- * No protection. Constant from start.
176
- */
177
-
178
- bool has_type;
179
- bool use_type;
180
- uint32_t flags;
181
- uint64_t gpu_offset; /* GPU address space is independent of CPU word size */
182
- uint64_t size;
183
- uint32_t available_caching;
184
- uint32_t default_caching;
185
- const struct ttm_mem_type_manager_func *func;
186
- void *priv;
187
- struct mutex io_reserve_mutex;
188
- bool use_io_reserve_lru;
189
- bool io_reserve_fastpath;
190
- spinlock_t move_lock;
191
-
192
- /*
193
- * Protected by @io_reserve_mutex:
194
- */
195
-
196
- struct list_head io_reserve_lru;
197
-
198
- /*
199
- * Protected by the global->lru_lock.
200
- */
201
-
202
- struct list_head lru[TTM_MAX_BO_PRIORITY];
203
-
204
- /*
205
- * Protected by @move_lock.
206
- */
207
- struct dma_fence *move;
208
-};
209
-
21046 /**
21147 * struct ttm_bo_driver
21248 *
21349 * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
214
- * @invalidate_caches: Callback to invalidate read caches when a buffer object
215
- * has been evicted.
216
- * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
217
- * structure.
21850 * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
21951 * @move: Callback for a driver to hook in accelerated functions to
22052 * move a buffer.
....@@ -245,8 +77,9 @@
24577 * Returns:
24678 * -ENOMEM: Out of memory.
24779 */
248
- int (*ttm_tt_populate)(struct ttm_tt *ttm,
249
- struct ttm_operation_ctx *ctx);
80
+ int (*ttm_tt_populate)(struct ttm_bo_device *bdev,
81
+ struct ttm_tt *ttm,
82
+ struct ttm_operation_ctx *ctx);
25083
25184 /**
25285 * ttm_tt_unpopulate
....@@ -255,23 +88,43 @@
25588 *
25689 * Free all backing page
25790 */
258
- void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
91
+ void (*ttm_tt_unpopulate)(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
25992
26093 /**
261
- * struct ttm_bo_driver member invalidate_caches
94
+ * ttm_tt_bind
26295 *
263
- * @bdev: the buffer object device.
264
- * @flags: new placement of the rebound buffer object.
96
+ * @bdev: Pointer to a ttm device
97
+ * @ttm: Pointer to a struct ttm_tt.
98
+ * @bo_mem: Pointer to a struct ttm_resource describing the
99
+ * memory type and location for binding.
265100 *
266
- * A previosly evicted buffer has been rebound in a
267
- * potentially new location. Tell the driver that it might
268
- * consider invalidating read (texture) caches on the next command
269
- * submission as a consequence.
101
+ * Bind the backend pages into the aperture in the location
102
+ * indicated by @bo_mem. This function should be able to handle
103
+ * differences between aperture and system page sizes.
270104 */
105
+ int (*ttm_tt_bind)(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *bo_mem);
271106
272
- int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags);
273
- int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type,
274
- struct ttm_mem_type_manager *man);
107
+ /**
108
+ * ttm_tt_unbind
109
+ *
110
+ * @bdev: Pointer to a ttm device
111
+ * @ttm: Pointer to a struct ttm_tt.
112
+ *
113
+ * Unbind previously bound backend pages. This function should be
114
+ * able to handle differences between aperture and system page sizes.
115
+ */
116
+ void (*ttm_tt_unbind)(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
117
+
118
+ /**
119
+ * ttm_tt_destroy
120
+ *
121
+ * @bdev: Pointer to a ttm device
122
+ * @ttm: Pointer to a struct ttm_tt.
123
+ *
124
+ * Destroy the backend. This will be call back from ttm_tt_destroy so
125
+ * don't call ttm_tt_destroy from the callback or infinite loop.
126
+ */
127
+ void (*ttm_tt_destroy)(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
275128
276129 /**
277130 * struct ttm_bo_driver member eviction_valuable
....@@ -310,7 +163,7 @@
310163 */
311164 int (*move)(struct ttm_buffer_object *bo, bool evict,
312165 struct ttm_operation_ctx *ctx,
313
- struct ttm_mem_reg *new_mem);
166
+ struct ttm_resource *new_mem);
314167
315168 /**
316169 * struct ttm_bo_driver_member verify_access
....@@ -336,7 +189,7 @@
336189 */
337190 void (*move_notify)(struct ttm_buffer_object *bo,
338191 bool evict,
339
- struct ttm_mem_reg *new_mem);
192
+ struct ttm_resource *new_mem);
340193 /* notify the driver we are taking a fault on this BO
341194 * and have reserved it */
342195 int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
....@@ -353,9 +206,9 @@
353206 * are balanced.
354207 */
355208 int (*io_mem_reserve)(struct ttm_bo_device *bdev,
356
- struct ttm_mem_reg *mem);
209
+ struct ttm_resource *mem);
357210 void (*io_mem_free)(struct ttm_bo_device *bdev,
358
- struct ttm_mem_reg *mem);
211
+ struct ttm_resource *mem);
359212
360213 /**
361214 * Return the pfn for a given page_offset inside the BO.
....@@ -382,21 +235,30 @@
382235 */
383236 int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
384237 void *buf, int len, int write);
385
-};
386238
387
-/**
388
- * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
389
- */
239
+ /**
240
+ * struct ttm_bo_driver member del_from_lru_notify
241
+ *
242
+ * @bo: the buffer object deleted from lru
243
+ *
244
+ * notify driver that a BO was deleted from LRU.
245
+ */
246
+ void (*del_from_lru_notify)(struct ttm_buffer_object *bo);
390247
391
-struct ttm_bo_global_ref {
392
- struct drm_global_reference ref;
393
- struct ttm_mem_global *mem_glob;
248
+ /**
249
+ * Notify the driver that we're about to release a BO
250
+ *
251
+ * @bo: BO that is about to be released
252
+ *
253
+ * Gives the driver a chance to do any cleanup, including
254
+ * adding fences that may force a delayed delete
255
+ */
256
+ void (*release_notify)(struct ttm_buffer_object *bo);
394257 };
395258
396259 /**
397260 * struct ttm_bo_global - Buffer object driver global data.
398261 *
399
- * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
400262 * @dummy_read_page: Pointer to a dummy page used for mapping requests
401263 * of unpopulated pages.
402264 * @shrink: A shrink callback object used for buffer object swap.
....@@ -407,20 +269,18 @@
407269 * @swap_lru: Lru list of buffer objects used for swapping.
408270 */
409271
410
-struct ttm_bo_global {
272
+extern struct ttm_bo_global {
411273
412274 /**
413275 * Constant after init.
414276 */
415277
416278 struct kobject kobj;
417
- struct ttm_mem_global *mem_glob;
418279 struct page *dummy_read_page;
419
- struct mutex device_list_mutex;
420280 spinlock_t lru_lock;
421281
422282 /**
423
- * Protected by device_list_mutex.
283
+ * Protected by ttm_global_mutex.
424284 */
425285 struct list_head device_list;
426286
....@@ -433,7 +293,7 @@
433293 * Internal protection.
434294 */
435295 atomic_t bo_count;
436
-};
296
+} ttm_bo_glob;
437297
438298
439299 #define TTM_NUM_MEM_TYPES 8
....@@ -442,8 +302,8 @@
442302 * struct ttm_bo_device - Buffer object driver device-specific data.
443303 *
444304 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
445
- * @man: An array of mem_type_managers.
446
- * @vma_manager: Address space manager
305
+ * @man: An array of resource_managers.
306
+ * @vma_manager: Address space manager (pointer)
447307 * lru_lock: Spinlock that protects the buffer+device lru lists and
448308 * ddestroy lists.
449309 * @dev_mapping: A pointer to the struct address_space representing the
....@@ -459,14 +319,16 @@
459319 * Constant after bo device init / atomic.
460320 */
461321 struct list_head device_list;
462
- struct ttm_bo_global *glob;
463322 struct ttm_bo_driver *driver;
464
- struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
465
-
323
+ /*
324
+ * access via ttm_manager_type.
325
+ */
326
+ struct ttm_resource_manager sysman;
327
+ struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES];
466328 /*
467329 * Protected by internal locks.
468330 */
469
- struct drm_vma_offset_manager vma_manager;
331
+ struct drm_vma_offset_manager *vma_manager;
470332
471333 /*
472334 * Protected by the global:lru lock.
....@@ -490,37 +352,50 @@
490352 bool no_retry;
491353 };
492354
493
-/**
494
- * ttm_flag_masked
495
- *
496
- * @old: Pointer to the result and original value.
497
- * @new: New value of bits.
498
- * @mask: Mask of bits to change.
499
- *
500
- * Convenience function to change a number of bits identified by a mask.
501
- */
502
-
503
-static inline uint32_t
504
-ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
355
+static inline struct ttm_resource_manager *ttm_manager_type(struct ttm_bo_device *bdev,
356
+ int mem_type)
505357 {
506
- *old ^= (*old ^ new) & mask;
507
- return *old;
358
+ return bdev->man_drv[mem_type];
508359 }
360
+
361
+static inline void ttm_set_driver_manager(struct ttm_bo_device *bdev,
362
+ int type,
363
+ struct ttm_resource_manager *manager)
364
+{
365
+ bdev->man_drv[type] = manager;
366
+}
367
+
368
+/**
369
+ * struct ttm_lru_bulk_move_pos
370
+ *
371
+ * @first: first BO in the bulk move range
372
+ * @last: last BO in the bulk move range
373
+ *
374
+ * Positions for a lru bulk move.
375
+ */
376
+struct ttm_lru_bulk_move_pos {
377
+ struct ttm_buffer_object *first;
378
+ struct ttm_buffer_object *last;
379
+};
380
+
381
+/**
382
+ * struct ttm_lru_bulk_move
383
+ *
384
+ * @tt: first/last lru entry for BOs in the TT domain
385
+ * @vram: first/last lru entry for BOs in the VRAM domain
386
+ * @swap: first/last lru entry for BOs on the swap list
387
+ *
388
+ * Helper structure for bulk moves on the LRU list.
389
+ */
390
+struct ttm_lru_bulk_move {
391
+ struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY];
392
+ struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY];
393
+ struct ttm_lru_bulk_move_pos swap[TTM_MAX_BO_PRIORITY];
394
+};
509395
510396 /*
511397 * ttm_bo.c
512398 */
513
-
514
-/**
515
- * ttm_mem_reg_is_pci
516
- *
517
- * @bdev: Pointer to a struct ttm_bo_device.
518
- * @mem: A valid struct ttm_mem_reg.
519
- *
520
- * Returns true if the memory described by @mem is PCI memory,
521
- * false otherwise.
522
- */
523
-bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
524399
525400 /**
526401 * ttm_bo_mem_space
....@@ -528,7 +403,7 @@
528403 * @bo: Pointer to a struct ttm_buffer_object. the data of which
529404 * we want to allocate space for.
530405 * @proposed_placement: Proposed new placement for the buffer object.
531
- * @mem: A struct ttm_mem_reg.
406
+ * @mem: A struct ttm_resource.
532407 * @interruptible: Sleep interruptible when sliping.
533408 * @no_wait_gpu: Return immediately if the GPU is busy.
534409 *
....@@ -543,15 +418,8 @@
543418 */
544419 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
545420 struct ttm_placement *placement,
546
- struct ttm_mem_reg *mem,
421
+ struct ttm_resource *mem,
547422 struct ttm_operation_ctx *ctx);
548
-
549
-void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem);
550
-void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
551
- struct ttm_mem_reg *mem);
552
-
553
-void ttm_bo_global_release(struct drm_global_reference *ref);
554
-int ttm_bo_global_init(struct drm_global_reference *ref);
555423
556424 int ttm_bo_device_release(struct ttm_bo_device *bdev);
557425
....@@ -562,6 +430,7 @@
562430 * @glob: A pointer to an initialized struct ttm_bo_global.
563431 * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
564432 * @mapping: The address space to use for this bo.
433
+ * @vma_manager: A pointer to a vma manager.
565434 * @file_page_offset: Offset into the device address space that is available
566435 * for buffer data. This ensures compatibility with other users of the
567436 * address space.
....@@ -570,10 +439,11 @@
570439 * Returns:
571440 * !0: Failure.
572441 */
573
-int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob,
442
+int ttm_bo_device_init(struct ttm_bo_device *bdev,
574443 struct ttm_bo_driver *driver,
575444 struct address_space *mapping,
576
- uint64_t file_page_offset, bool need_dma32);
445
+ struct drm_vma_offset_manager *vma_manager,
446
+ bool need_dma32);
577447
578448 /**
579449 * ttm_bo_unmap_virtual
....@@ -591,59 +461,6 @@
591461 */
592462 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
593463
594
-int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
595
-void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
596
-int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible);
597
-void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
598
-
599
-void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
600
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
601
-
602
-/**
603
- * __ttm_bo_reserve:
604
- *
605
- * @bo: A pointer to a struct ttm_buffer_object.
606
- * @interruptible: Sleep interruptible if waiting.
607
- * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
608
- * @ticket: ticket used to acquire the ww_mutex.
609
- *
610
- * Will not remove reserved buffers from the lru lists.
611
- * Otherwise identical to ttm_bo_reserve.
612
- *
613
- * Returns:
614
- * -EDEADLK: The reservation may cause a deadlock.
615
- * Release all buffer reservations, wait for @bo to become unreserved and
616
- * try again. (only if use_sequence == 1).
617
- * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
618
- * a signal. Release all buffer reservations and return to user-space.
619
- * -EBUSY: The function needed to sleep, but @no_wait was true
620
- * -EALREADY: Bo already reserved using @ticket. This error code will only
621
- * be returned if @use_ticket is set to true.
622
- */
623
-static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
624
- bool interruptible, bool no_wait,
625
- struct ww_acquire_ctx *ticket)
626
-{
627
- int ret = 0;
628
-
629
- if (no_wait) {
630
- bool success;
631
- if (WARN_ON(ticket))
632
- return -EBUSY;
633
-
634
- success = reservation_object_trylock(bo->resv);
635
- return success ? 0 : -EBUSY;
636
- }
637
-
638
- if (interruptible)
639
- ret = reservation_object_lock_interruptible(bo->resv, ticket);
640
- else
641
- ret = reservation_object_lock(bo->resv, ticket);
642
- if (ret == -EINTR)
643
- return -ERESTARTSYS;
644
- return ret;
645
-}
646
-
647464 /**
648465 * ttm_bo_reserve:
649466 *
....@@ -653,35 +470,13 @@
653470 * @ticket: ticket used to acquire the ww_mutex.
654471 *
655472 * Locks a buffer object for validation. (Or prevents other processes from
656
- * locking it for validation) and removes it from lru lists, while taking
657
- * a number of measures to prevent deadlocks.
658
- *
659
- * Deadlocks may occur when two processes try to reserve multiple buffers in
660
- * different order, either by will or as a result of a buffer being evicted
661
- * to make room for a buffer already reserved. (Buffers are reserved before
662
- * they are evicted). The following algorithm prevents such deadlocks from
663
- * occurring:
664
- * Processes attempting to reserve multiple buffers other than for eviction,
665
- * (typically execbuf), should first obtain a unique 32-bit
666
- * validation sequence number,
667
- * and call this function with @use_ticket == 1 and @ticket->stamp == the unique
668
- * sequence number. If upon call of this function, the buffer object is already
669
- * reserved, the validation sequence is checked against the validation
670
- * sequence of the process currently reserving the buffer,
671
- * and if the current validation sequence is greater than that of the process
672
- * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps
673
- * waiting for the buffer to become unreserved, after which it retries
674
- * reserving.
675
- * The caller should, when receiving an -EDEADLK error
676
- * release all its buffer reservations, wait for @bo to become unreserved, and
677
- * then rerun the validation with the same validation sequence. This procedure
678
- * will always guarantee that the process with the lowest validation sequence
679
- * will eventually succeed, preventing both deadlocks and starvation.
473
+ * locking it for validation), while taking a number of measures to prevent
474
+ * deadlocks.
680475 *
681476 * Returns:
682477 * -EDEADLK: The reservation may cause a deadlock.
683478 * Release all buffer reservations, wait for @bo to become unreserved and
684
- * try again. (only if use_sequence == 1).
479
+ * try again.
685480 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
686481 * a signal. Release all buffer reservations and return to user-space.
687482 * -EBUSY: The function needed to sleep, but @no_wait was true
....@@ -692,14 +487,23 @@
692487 bool interruptible, bool no_wait,
693488 struct ww_acquire_ctx *ticket)
694489 {
695
- int ret;
490
+ int ret = 0;
696491
697
- WARN_ON(!kref_read(&bo->kref));
492
+ if (no_wait) {
493
+ bool success;
494
+ if (WARN_ON(ticket))
495
+ return -EBUSY;
698496
699
- ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
700
- if (likely(ret == 0))
701
- ttm_bo_del_sub_from_lru(bo);
497
+ success = dma_resv_trylock(bo->base.resv);
498
+ return success ? 0 : -EBUSY;
499
+ }
702500
501
+ if (interruptible)
502
+ ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
503
+ else
504
+ ret = dma_resv_lock(bo->base.resv, ticket);
505
+ if (ret == -EINTR)
506
+ return -ERESTARTSYS;
703507 return ret;
704508 }
705509
....@@ -717,22 +521,45 @@
717521 bool interruptible,
718522 struct ww_acquire_ctx *ticket)
719523 {
720
- int ret = 0;
524
+ if (interruptible) {
525
+ int ret = dma_resv_lock_slow_interruptible(bo->base.resv,
526
+ ticket);
527
+ if (ret == -EINTR)
528
+ ret = -ERESTARTSYS;
529
+ return ret;
530
+ }
531
+ dma_resv_lock_slow(bo->base.resv, ticket);
532
+ return 0;
533
+}
721534
722
- WARN_ON(!kref_read(&bo->kref));
535
+static inline void ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
536
+{
537
+ spin_lock(&ttm_bo_glob.lru_lock);
538
+ ttm_bo_move_to_lru_tail(bo, NULL);
539
+ spin_unlock(&ttm_bo_glob.lru_lock);
540
+}
723541
724
- if (interruptible)
725
- ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
726
- ticket);
727
- else
728
- ww_mutex_lock_slow(&bo->resv->lock, ticket);
542
+static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
543
+ struct ttm_resource *new_mem)
544
+{
545
+ bo->mem = *new_mem;
546
+ new_mem->mm_node = NULL;
547
+}
729548
730
- if (likely(ret == 0))
731
- ttm_bo_del_sub_from_lru(bo);
732
- else if (ret == -EINTR)
733
- ret = -ERESTARTSYS;
549
+/**
550
+ * ttm_bo_move_null = assign memory for a buffer object.
551
+ * @bo: The bo to assign the memory to
552
+ * @new_mem: The memory to be assigned.
553
+ *
554
+ * Assign the memory from new_mem to the memory of the buffer object bo.
555
+ */
556
+static inline void ttm_bo_move_null(struct ttm_buffer_object *bo,
557
+ struct ttm_resource *new_mem)
558
+{
559
+ struct ttm_resource *old_mem = &bo->mem;
734560
735
- return ret;
561
+ WARN_ON(old_mem->mm_node != NULL);
562
+ ttm_bo_assign_mem(bo, new_mem);
736563 }
737564
738565 /**
....@@ -744,12 +571,8 @@
744571 */
745572 static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
746573 {
747
- if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
748
- spin_lock(&bo->bdev->glob->lru_lock);
749
- ttm_bo_add_to_lru(bo);
750
- spin_unlock(&bo->bdev->glob->lru_lock);
751
- }
752
- reservation_object_unlock(bo->resv);
574
+ ttm_bo_move_to_lru_tail_unlocked(bo);
575
+ dma_resv_unlock(bo->base.resv);
753576 }
754577
755578 /*
....@@ -757,16 +580,16 @@
757580 */
758581
759582 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
760
- struct ttm_mem_reg *mem);
583
+ struct ttm_resource *mem);
761584 void ttm_mem_io_free(struct ttm_bo_device *bdev,
762
- struct ttm_mem_reg *mem);
585
+ struct ttm_resource *mem);
763586 /**
764587 * ttm_bo_move_ttm
765588 *
766589 * @bo: A pointer to a struct ttm_buffer_object.
767590 * @interruptible: Sleep interruptible if waiting.
768591 * @no_wait_gpu: Return immediately if the GPU is busy.
769
- * @new_mem: struct ttm_mem_reg indicating where to move.
592
+ * @new_mem: struct ttm_resource indicating where to move.
770593 *
771594 * Optimized move function for a buffer object with both old and
772595 * new placement backed by a TTM. The function will, if successful,
....@@ -780,7 +603,7 @@
780603
781604 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
782605 struct ttm_operation_ctx *ctx,
783
- struct ttm_mem_reg *new_mem);
606
+ struct ttm_resource *new_mem);
784607
785608 /**
786609 * ttm_bo_move_memcpy
....@@ -788,7 +611,7 @@
788611 * @bo: A pointer to a struct ttm_buffer_object.
789612 * @interruptible: Sleep interruptible if waiting.
790613 * @no_wait_gpu: Return immediately if the GPU is busy.
791
- * @new_mem: struct ttm_mem_reg indicating where to move.
614
+ * @new_mem: struct ttm_resource indicating where to move.
792615 *
793616 * Fallback move function for a mappable buffer object in mappable memory.
794617 * The function will, if successful,
....@@ -802,7 +625,7 @@
802625
803626 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
804627 struct ttm_operation_ctx *ctx,
805
- struct ttm_mem_reg *new_mem);
628
+ struct ttm_resource *new_mem);
806629
807630 /**
808631 * ttm_bo_free_old_node
....@@ -819,7 +642,8 @@
819642 * @bo: A pointer to a struct ttm_buffer_object.
820643 * @fence: A fence object that signals when moving is complete.
821644 * @evict: This is an evict move. Don't return until the buffer is idle.
822
- * @new_mem: struct ttm_mem_reg indicating where to move.
645
+ * @pipeline: evictions are to be pipelined.
646
+ * @new_mem: struct ttm_resource indicating where to move.
823647 *
824648 * Accelerated move function to be called when an accelerated move
825649 * has been scheduled. The function will create a new temporary buffer object
....@@ -830,29 +654,15 @@
830654 */
831655 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
832656 struct dma_fence *fence, bool evict,
833
- struct ttm_mem_reg *new_mem);
834
-
835
-/**
836
- * ttm_bo_pipeline_move.
837
- *
838
- * @bo: A pointer to a struct ttm_buffer_object.
839
- * @fence: A fence object that signals when moving is complete.
840
- * @evict: This is an evict move. Don't return until the buffer is idle.
841
- * @new_mem: struct ttm_mem_reg indicating where to move.
842
- *
843
- * Function for pipelining accelerated moves. Either free the memory
844
- * immediately or hang it on a temporary buffer object.
845
- */
846
-int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
847
- struct dma_fence *fence, bool evict,
848
- struct ttm_mem_reg *new_mem);
657
+ bool pipeline,
658
+ struct ttm_resource *new_mem);
849659
850660 /**
851661 * ttm_bo_pipeline_gutting.
852662 *
853663 * @bo: A pointer to a struct ttm_buffer_object.
854664 *
855
- * Pipelined gutting a BO of it's backing store.
665
+ * Pipelined gutting a BO of its backing store.
856666 */
857667 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
858668
....@@ -867,6 +677,49 @@
867677 */
868678 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
869679
870
-extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
680
+/**
681
+ * ttm_bo_tt_bind
682
+ *
683
+ * Bind the object tt to a memory resource.
684
+ */
685
+int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem);
686
+
687
+/**
688
+ * ttm_bo_tt_bind
689
+ *
690
+ * Unbind the object tt from a memory resource.
691
+ */
692
+void ttm_bo_tt_unbind(struct ttm_buffer_object *bo);
693
+
694
+/**
695
+ * ttm_bo_tt_destroy.
696
+ */
697
+void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
698
+
699
+/**
700
+ * ttm_range_man_init
701
+ *
702
+ * @bdev: ttm device
703
+ * @type: memory manager type
704
+ * @use_tt: if the memory manager uses tt
705
+ * @p_size: size of area to be managed in pages.
706
+ *
707
+ * Initialise a generic range manager for the selected memory type.
708
+ * The range manager is installed for this device in the type slot.
709
+ */
710
+int ttm_range_man_init(struct ttm_bo_device *bdev,
711
+ unsigned type, bool use_tt,
712
+ unsigned long p_size);
713
+
714
+/**
715
+ * ttm_range_man_fini
716
+ *
717
+ * @bdev: ttm device
718
+ * @type: memory manager type
719
+ *
720
+ * Remove the generic range manager from a slot and tear it down.
721
+ */
722
+int ttm_range_man_fini(struct ttm_bo_device *bdev,
723
+ unsigned type);
871724
872725 #endif