2 * Copyright 2011 Red Hat Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
32 * We store the last allocated bo in "hole", we always try to allocate
33 * after the last allocated bo. Principle is that in a linear GPU ring
34 * progression was is after last is the oldest bo we allocated and thus
35 * the first one that should no longer be in use by the GPU.
37 * If it's not the case we skip over the bo after last to the closest
38 * done bo if such one exist. If none exist and we are not asked to
39 * block we report failure to allocate.
41 * If we are asked to block we wait on all the oldest fence of all
42 * rings. We just wait for any of those fence to complete.
48 static void radeon_sa_bo_remove_locked(struct radeon_sa_bo
*sa_bo
);
49 static void radeon_sa_bo_try_free(struct radeon_sa_manager
*sa_manager
);
51 int radeon_sa_bo_manager_init(struct radeon_device
*rdev
,
52 struct radeon_sa_manager
*sa_manager
,
53 unsigned size
, u32 domain
)
57 spin_lock_init(&sa_manager
->lock
);
58 sa_manager
->bo
= NULL
;
59 sa_manager
->size
= size
;
60 sa_manager
->domain
= domain
;
61 sa_manager
->hole
= &sa_manager
->olist
;
62 INIT_LIST_HEAD(&sa_manager
->olist
);
63 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
64 INIT_LIST_HEAD(&sa_manager
->flist
[i
]);
67 r
= radeon_bo_create(rdev
, size
, RADEON_GPU_PAGE_SIZE
, true,
68 RADEON_GEM_DOMAIN_CPU
, NULL
, &sa_manager
->bo
);
70 dev_err(rdev
->dev
, "(%d) failed to allocate bo for manager\n", r
);
77 void radeon_sa_bo_manager_fini(struct radeon_device
*rdev
,
78 struct radeon_sa_manager
*sa_manager
)
80 struct radeon_sa_bo
*sa_bo
, *tmp
;
82 if (!list_empty(&sa_manager
->olist
)) {
83 sa_manager
->hole
= &sa_manager
->olist
,
84 radeon_sa_bo_try_free(sa_manager
);
85 if (!list_empty(&sa_manager
->olist
)) {
86 dev_err(rdev
->dev
, "sa_manager is not empty, clearing anyway\n");
89 list_for_each_entry_safe(sa_bo
, tmp
, &sa_manager
->olist
, olist
) {
90 radeon_sa_bo_remove_locked(sa_bo
);
92 radeon_bo_unref(&sa_manager
->bo
);
96 int radeon_sa_bo_manager_start(struct radeon_device
*rdev
,
97 struct radeon_sa_manager
*sa_manager
)
101 if (sa_manager
->bo
== NULL
) {
102 dev_err(rdev
->dev
, "no bo for sa manager\n");
107 r
= radeon_bo_reserve(sa_manager
->bo
, false);
109 dev_err(rdev
->dev
, "(%d) failed to reserve manager bo\n", r
);
112 r
= radeon_bo_pin(sa_manager
->bo
, sa_manager
->domain
, &sa_manager
->gpu_addr
);
114 radeon_bo_unreserve(sa_manager
->bo
);
115 dev_err(rdev
->dev
, "(%d) failed to pin manager bo\n", r
);
118 r
= radeon_bo_kmap(sa_manager
->bo
, &sa_manager
->cpu_ptr
);
119 radeon_bo_unreserve(sa_manager
->bo
);
123 int radeon_sa_bo_manager_suspend(struct radeon_device
*rdev
,
124 struct radeon_sa_manager
*sa_manager
)
128 if (sa_manager
->bo
== NULL
) {
129 dev_err(rdev
->dev
, "no bo for sa manager\n");
133 r
= radeon_bo_reserve(sa_manager
->bo
, false);
135 radeon_bo_kunmap(sa_manager
->bo
);
136 radeon_bo_unpin(sa_manager
->bo
);
137 radeon_bo_unreserve(sa_manager
->bo
);
142 static void radeon_sa_bo_remove_locked(struct radeon_sa_bo
*sa_bo
)
144 struct radeon_sa_manager
*sa_manager
= sa_bo
->manager
;
145 if (sa_manager
->hole
== &sa_bo
->olist
) {
146 sa_manager
->hole
= sa_bo
->olist
.prev
;
148 list_del_init(&sa_bo
->olist
);
149 list_del_init(&sa_bo
->flist
);
150 radeon_fence_unref(&sa_bo
->fence
);
154 static void radeon_sa_bo_try_free(struct radeon_sa_manager
*sa_manager
)
156 struct radeon_sa_bo
*sa_bo
, *tmp
;
158 if (sa_manager
->hole
->next
== &sa_manager
->olist
)
161 sa_bo
= list_entry(sa_manager
->hole
->next
, struct radeon_sa_bo
, olist
);
162 list_for_each_entry_safe_from(sa_bo
, tmp
, &sa_manager
->olist
, olist
) {
163 if (sa_bo
->fence
== NULL
|| !radeon_fence_signaled(sa_bo
->fence
)) {
166 radeon_sa_bo_remove_locked(sa_bo
);
170 static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager
*sa_manager
)
172 struct list_head
*hole
= sa_manager
->hole
;
174 if (hole
!= &sa_manager
->olist
) {
175 return list_entry(hole
, struct radeon_sa_bo
, olist
)->eoffset
;
180 static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager
*sa_manager
)
182 struct list_head
*hole
= sa_manager
->hole
;
184 if (hole
->next
!= &sa_manager
->olist
) {
185 return list_entry(hole
->next
, struct radeon_sa_bo
, olist
)->soffset
;
187 return sa_manager
->size
;
190 static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager
*sa_manager
,
191 struct radeon_sa_bo
*sa_bo
,
192 unsigned size
, unsigned align
)
194 unsigned soffset
, eoffset
, wasted
;
196 soffset
= radeon_sa_bo_hole_soffset(sa_manager
);
197 eoffset
= radeon_sa_bo_hole_eoffset(sa_manager
);
198 wasted
= (align
- (soffset
% align
)) % align
;
200 if ((eoffset
- soffset
) >= (size
+ wasted
)) {
203 sa_bo
->manager
= sa_manager
;
204 sa_bo
->soffset
= soffset
;
205 sa_bo
->eoffset
= soffset
+ size
;
206 list_add(&sa_bo
->olist
, sa_manager
->hole
);
207 INIT_LIST_HEAD(&sa_bo
->flist
);
208 sa_manager
->hole
= &sa_bo
->olist
;
214 static bool radeon_sa_bo_next_hole(struct radeon_sa_manager
*sa_manager
,
215 struct radeon_fence
**fences
,
218 struct radeon_sa_bo
*best_bo
= NULL
;
219 unsigned i
, soffset
, best
, tmp
;
221 /* if hole points to the end of the buffer */
222 if (sa_manager
->hole
->next
== &sa_manager
->olist
) {
223 /* try again with its beginning */
224 sa_manager
->hole
= &sa_manager
->olist
;
228 soffset
= radeon_sa_bo_hole_soffset(sa_manager
);
229 /* to handle wrap around we add sa_manager->size */
230 best
= sa_manager
->size
* 2;
231 /* go over all fence list and try to find the closest sa_bo
232 * of the current last
234 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
235 struct radeon_sa_bo
*sa_bo
;
237 if (list_empty(&sa_manager
->flist
[i
])) {
241 sa_bo
= list_first_entry(&sa_manager
->flist
[i
],
242 struct radeon_sa_bo
, flist
);
244 if (!radeon_fence_signaled(sa_bo
->fence
)) {
245 fences
[i
] = sa_bo
->fence
;
249 /* limit the number of tries each ring gets */
254 tmp
= sa_bo
->soffset
;
256 /* wrap around, pretend it's after */
257 tmp
+= sa_manager
->size
;
261 /* this sa bo is the closest one */
268 ++tries
[best_bo
->fence
->ring
];
269 sa_manager
->hole
= best_bo
->olist
.prev
;
271 /* we knew that this one is signaled,
272 so it's save to remote it */
273 radeon_sa_bo_remove_locked(best_bo
);
279 int radeon_sa_bo_new(struct radeon_device
*rdev
,
280 struct radeon_sa_manager
*sa_manager
,
281 struct radeon_sa_bo
**sa_bo
,
282 unsigned size
, unsigned align
, bool block
)
284 struct radeon_fence
*fences
[RADEON_NUM_RINGS
];
285 unsigned tries
[RADEON_NUM_RINGS
];
288 BUG_ON(align
> RADEON_GPU_PAGE_SIZE
);
289 BUG_ON(size
> sa_manager
->size
);
291 *sa_bo
= kmalloc(sizeof(struct radeon_sa_bo
), GFP_KERNEL
);
292 if ((*sa_bo
) == NULL
) {
295 (*sa_bo
)->manager
= sa_manager
;
296 (*sa_bo
)->fence
= NULL
;
297 INIT_LIST_HEAD(&(*sa_bo
)->olist
);
298 INIT_LIST_HEAD(&(*sa_bo
)->flist
);
300 spin_lock(&sa_manager
->lock
);
302 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
308 radeon_sa_bo_try_free(sa_manager
);
310 if (radeon_sa_bo_try_alloc(sa_manager
, *sa_bo
,
312 spin_unlock(&sa_manager
->lock
);
316 /* see if we can skip over some allocations */
317 } while (radeon_sa_bo_next_hole(sa_manager
, fences
, tries
));
320 spin_unlock(&sa_manager
->lock
);
321 r
= radeon_fence_wait_any(rdev
, fences
, false);
322 spin_lock(&sa_manager
->lock
);
324 /* if we have nothing to wait for we
325 are practically out of memory */
335 spin_unlock(&sa_manager
->lock
);
341 void radeon_sa_bo_free(struct radeon_device
*rdev
, struct radeon_sa_bo
**sa_bo
,
342 struct radeon_fence
*fence
)
344 struct radeon_sa_manager
*sa_manager
;
346 if (sa_bo
== NULL
|| *sa_bo
== NULL
) {
350 sa_manager
= (*sa_bo
)->manager
;
351 spin_lock(&sa_manager
->lock
);
352 if (fence
&& fence
->seq
&& fence
->seq
< RADEON_FENCE_NOTEMITED_SEQ
) {
353 (*sa_bo
)->fence
= radeon_fence_ref(fence
);
354 list_add_tail(&(*sa_bo
)->flist
,
355 &sa_manager
->flist
[fence
->ring
]);
357 radeon_sa_bo_remove_locked(*sa_bo
);
359 spin_unlock(&sa_manager
->lock
);
363 #if defined(CONFIG_DEBUG_FS)
364 void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager
*sa_manager
,
367 struct radeon_sa_bo
*i
;
369 spin_lock(&sa_manager
->lock
);
370 list_for_each_entry(i
, &sa_manager
->olist
, olist
) {
371 if (&i
->olist
== sa_manager
->hole
) {
376 seq_printf(m
, "[0x%08x 0x%08x] size %8d",
377 i
->soffset
, i
->eoffset
, i
->eoffset
- i
->soffset
);
379 seq_printf(m
, " protected by 0x%016llx on ring %d",
380 i
->fence
->seq
, i
->fence
->ring
);
384 spin_unlock(&sa_manager
->lock
);