drm/nouveau/core/mm: fill in holes with "allocated" nodes
authorBen Skeggs <bskeggs@redhat.com>
Tue, 12 Aug 2014 03:54:37 +0000 (13:54 +1000)
committerBen Skeggs <bskeggs@redhat.com>
Mon, 15 Sep 2014 12:22:13 +0000 (22:22 +1000)
The allocation algorithm doesn't expect there to be holes in the mm, which
causes its alignment/cutoff calculations to choke (and go negative) when
encountering the last chunk of a block before a hole.

The least expensive solution is to simply fill in any holes with nodes
that are pre-marked as being allocated.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/nouveau/core/core/mm.c
drivers/gpu/drm/nouveau/core/include/core/mm.h

index 8a77a8bf9cc093ac9a4c66580aa0371cc2b1a94e..02ce615687acd0d9b51282b6210b4aab0d224fa6 100644 (file)
@@ -116,7 +116,7 @@ nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
        u32 splitoff;
        u32 s, e;
 
-       BUG_ON(type == NVKM_MM_TYPE_NONE);
+       BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE);
 
        list_for_each_entry(this, &mm->free, fl_entry) {
                e = this->offset + this->length;
@@ -182,7 +182,7 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
        struct nouveau_mm_node *prev, *this, *next;
        u32 mask = align - 1;
 
-       BUG_ON(type == NVKM_MM_TYPE_NONE);
+       BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE);
 
        list_for_each_entry_reverse(this, &mm->free, fl_entry) {
                u32 e = this->offset + this->length;
@@ -227,9 +227,21 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
 int
 nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
 {
-       struct nouveau_mm_node *node;
+       struct nouveau_mm_node *node, *prev;
+       u32 next;
 
        if (nouveau_mm_initialised(mm)) {
+               prev = list_last_entry(&mm->nodes, typeof(*node), nl_entry);
+               next = prev->offset + prev->length;
+               if (next != offset) {
+                       BUG_ON(next > offset);
+                       if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
+                               return -ENOMEM;
+                       node->type   = NVKM_MM_TYPE_HOLE;
+                       node->offset = next;
+                       node->length = offset - next;
+                       list_add_tail(&node->nl_entry, &mm->nodes);
+               }
                BUG_ON(block != mm->block_size);
        } else {
                INIT_LIST_HEAD(&mm->nodes);
@@ -264,9 +276,11 @@ nouveau_mm_fini(struct nouveau_mm *mm)
                return 0;
 
        list_for_each_entry(node, &mm->nodes, nl_entry) {
-               if (++nodes > mm->heap_nodes) {
-                       nouveau_mm_dump(mm, "mm not clean!");
-                       return -EBUSY;
+               if (node->type != NVKM_MM_TYPE_HOLE) {
+                       if (++nodes > mm->heap_nodes) {
+                               nouveau_mm_dump(mm, "mm not clean!");
+                               return -EBUSY;
+                       }
                }
        }
 
index 7848c04024970efe9363529ffcb909b23aab430f..d4ef40460e42bf587398c81919326b5dcd076b6b 100644 (file)
@@ -7,6 +7,7 @@ struct nouveau_mm_node {
        struct list_head rl_entry;
 
 #define NVKM_MM_TYPE_NONE 0x00
+#define NVKM_MM_TYPE_HOLE 0xff
        u8  type;
        u32 offset;
        u32 length;