Line data Source code
1 : /*
2 : * Copyright 2020 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : * Authors: Christian König
23 : */
24 :
25 : #ifndef _TTM_DEVICE_H_
26 : #define _TTM_DEVICE_H_
27 :
28 : #include <linux/types.h>
29 : #include <linux/workqueue.h>
30 : #include <drm/ttm/ttm_resource.h>
31 : #include <drm/ttm/ttm_pool.h>
32 :
33 : struct ttm_device;
34 : struct ttm_placement;
35 : struct ttm_buffer_object;
36 : struct ttm_operation_ctx;
37 :
38 : /**
39 : * struct ttm_global - Buffer object driver global data.
40 : */
41 : extern struct ttm_global {
42 :
43 : /**
44 : * @dummy_read_page: Pointer to a dummy page used for mapping requests
45 : * of unpopulated pages. Constant after init.
46 : */
47 : struct page *dummy_read_page;
48 :
49 : /**
50 : * @device_list: List of buffer object devices. Protected by
51 : * ttm_global_mutex.
52 : */
53 : struct list_head device_list;
54 :
55 : /**
56 : * @bo_count: Number of buffer objects allocated by devices.
57 : */
58 : atomic_t bo_count;
59 : } ttm_glob;
60 :
61 : struct ttm_device_funcs {
62 : /**
63 : * ttm_tt_create
64 : *
65 : * @bo: The buffer object to create the ttm for.
66 : * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
67 : *
68 : * Create a struct ttm_tt to back data with system memory pages.
69 : * No pages are actually allocated.
70 : * Returns:
71 : * NULL: Out of memory.
72 : */
73 : struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
74 : uint32_t page_flags);
75 :
76 : /**
77 : * ttm_tt_populate
78 : *
79 : * @ttm: The struct ttm_tt to contain the backing pages.
80 : *
81 : * Allocate all backing pages
82 : * Returns:
83 : * -ENOMEM: Out of memory.
84 : */
85 : int (*ttm_tt_populate)(struct ttm_device *bdev,
86 : struct ttm_tt *ttm,
87 : struct ttm_operation_ctx *ctx);
88 :
89 : /**
90 : * ttm_tt_unpopulate
91 : *
92 : * @ttm: The struct ttm_tt to contain the backing pages.
93 : *
94 : * Free all backing page
95 : */
96 : void (*ttm_tt_unpopulate)(struct ttm_device *bdev,
97 : struct ttm_tt *ttm);
98 :
99 : /**
100 : * ttm_tt_destroy
101 : *
102 : * @bdev: Pointer to a ttm device
103 : * @ttm: Pointer to a struct ttm_tt.
104 : *
105 : * Destroy the backend. This will be call back from ttm_tt_destroy so
106 : * don't call ttm_tt_destroy from the callback or infinite loop.
107 : */
108 : void (*ttm_tt_destroy)(struct ttm_device *bdev, struct ttm_tt *ttm);
109 :
110 : /**
111 : * struct ttm_bo_driver member eviction_valuable
112 : *
113 : * @bo: the buffer object to be evicted
114 : * @place: placement we need room for
115 : *
116 : * Check with the driver if it is valuable to evict a BO to make room
117 : * for a certain placement.
118 : */
119 : bool (*eviction_valuable)(struct ttm_buffer_object *bo,
120 : const struct ttm_place *place);
121 : /**
122 : * struct ttm_bo_driver member evict_flags:
123 : *
124 : * @bo: the buffer object to be evicted
125 : *
126 : * Return the bo flags for a buffer which is not mapped to the hardware.
127 : * These will be placed in proposed_flags so that when the move is
128 : * finished, they'll end up in bo->mem.flags
129 : * This should not cause multihop evictions, and the core will warn
130 : * if one is proposed.
131 : */
132 :
133 : void (*evict_flags)(struct ttm_buffer_object *bo,
134 : struct ttm_placement *placement);
135 :
136 : /**
137 : * struct ttm_bo_driver member move:
138 : *
139 : * @bo: the buffer to move
140 : * @evict: whether this motion is evicting the buffer from
141 : * the graphics address space
142 : * @ctx: context for this move with parameters
143 : * @new_mem: the new memory region receiving the buffer
144 : @ @hop: placement for driver directed intermediate hop
145 : *
146 : * Move a buffer between two memory regions.
147 : * Returns errno -EMULTIHOP if driver requests a hop
148 : */
149 : int (*move)(struct ttm_buffer_object *bo, bool evict,
150 : struct ttm_operation_ctx *ctx,
151 : struct ttm_resource *new_mem,
152 : struct ttm_place *hop);
153 :
154 : /**
155 : * Hook to notify driver about a resource delete.
156 : */
157 : void (*delete_mem_notify)(struct ttm_buffer_object *bo);
158 :
159 : /**
160 : * notify the driver that we're about to swap out this bo
161 : */
162 : void (*swap_notify)(struct ttm_buffer_object *bo);
163 :
164 : /**
165 : * Driver callback on when mapping io memory (for bo_move_memcpy
166 : * for instance). TTM will take care to call io_mem_free whenever
167 : * the mapping is not use anymore. io_mem_reserve & io_mem_free
168 : * are balanced.
169 : */
170 : int (*io_mem_reserve)(struct ttm_device *bdev,
171 : struct ttm_resource *mem);
172 : void (*io_mem_free)(struct ttm_device *bdev,
173 : struct ttm_resource *mem);
174 :
175 : /**
176 : * Return the pfn for a given page_offset inside the BO.
177 : *
178 : * @bo: the BO to look up the pfn for
179 : * @page_offset: the offset to look up
180 : */
181 : unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
182 : unsigned long page_offset);
183 :
184 : /**
185 : * Read/write memory buffers for ptrace access
186 : *
187 : * @bo: the BO to access
188 : * @offset: the offset from the start of the BO
189 : * @buf: pointer to source/destination buffer
190 : * @len: number of bytes to copy
191 : * @write: whether to read (0) from or write (non-0) to BO
192 : *
193 : * If successful, this function should return the number of
194 : * bytes copied, -EIO otherwise. If the number of bytes
195 : * returned is < len, the function may be called again with
196 : * the remainder of the buffer to copy.
197 : */
198 : int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
199 : void *buf, int len, int write);
200 :
201 : /**
202 : * Notify the driver that we're about to release a BO
203 : *
204 : * @bo: BO that is about to be released
205 : *
206 : * Gives the driver a chance to do any cleanup, including
207 : * adding fences that may force a delayed delete
208 : */
209 : void (*release_notify)(struct ttm_buffer_object *bo);
210 : };
211 :
212 : /**
213 : * struct ttm_device - Buffer object driver device-specific data.
214 : */
215 : struct ttm_device {
216 : /**
217 : * @device_list: Our entry in the global device list.
218 : * Constant after bo device init
219 : */
220 : struct list_head device_list;
221 :
222 : /**
223 : * @funcs: Function table for the device.
224 : * Constant after bo device init
225 : */
226 : struct ttm_device_funcs *funcs;
227 :
228 : /**
229 : * @sysman: Resource manager for the system domain.
230 : * Access via ttm_manager_type.
231 : */
232 : struct ttm_resource_manager sysman;
233 :
234 : /**
235 : * @man_drv: An array of resource_managers, one per resource type.
236 : */
237 : struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES];
238 :
239 : /**
240 : * @vma_manager: Address space manager for finding BOs to mmap.
241 : */
242 : struct drm_vma_offset_manager *vma_manager;
243 :
244 : /**
245 : * @pool: page pool for the device.
246 : */
247 : struct ttm_pool pool;
248 :
249 : /**
250 : * @lru_lock: Protection for the per manager LRU and ddestroy lists.
251 : */
252 : spinlock_t lru_lock;
253 :
254 : /**
255 : * @ddestroy: Destroyed but not yet cleaned up buffer objects.
256 : */
257 : struct list_head ddestroy;
258 :
259 : /**
260 : * @pinned: Buffer objects which are pinned and so not on any LRU list.
261 : */
262 : struct list_head pinned;
263 :
264 : /**
265 : * @dev_mapping: A pointer to the struct address_space for invalidating
266 : * CPU mappings on buffer move. Protected by load/unload sync.
267 : */
268 : struct address_space *dev_mapping;
269 :
270 : /**
271 : * @wq: Work queue structure for the delayed delete workqueue.
272 : */
273 : struct delayed_work wq;
274 : };
275 :
276 : int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
277 : int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
278 : gfp_t gfp_flags);
279 :
280 : static inline struct ttm_resource_manager *
281 : ttm_manager_type(struct ttm_device *bdev, int mem_type)
282 : {
283 0 : BUILD_BUG_ON(__builtin_constant_p(mem_type)
284 : && mem_type >= TTM_NUM_MEM_TYPES);
285 0 : return bdev->man_drv[mem_type];
286 : }
287 :
288 : static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type,
289 : struct ttm_resource_manager *manager)
290 : {
291 0 : BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
292 0 : bdev->man_drv[type] = manager;
293 : }
294 :
295 : int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
296 : struct device *dev, struct address_space *mapping,
297 : struct drm_vma_offset_manager *vma_manager,
298 : bool use_dma_alloc, bool use_dma32);
299 : void ttm_device_fini(struct ttm_device *bdev);
300 : void ttm_device_clear_dma_mappings(struct ttm_device *bdev);
301 :
302 : #endif
|