From: Ben Skeggs Date: Tue, 12 Aug 2014 03:54:37 +0000 (+1000) Subject: drm/nouveau/core/mm: fill in holes with "allocated" nodes X-Git-Url: http://drtracing.org/?a=commitdiff_plain;h=13dfe1286d1ea1af4c9330b039c2316d0d92c484;p=deliverable%2Flinux.git drm/nouveau/core/mm: fill in holes with "allocated" nodes The allocation algorithm doesn't expect there to be holes in the mm, which causes its alignment/cutoff calculations to choke (and go negative) when encountering the last chunk of a block before a hole. The least expensive solution is to simply fill in any holes with nodes that are pre-marked as being allocated. Signed-off-by: Ben Skeggs --- diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c index 8a77a8bf9cc0..02ce615687ac 100644 --- a/drivers/gpu/drm/nouveau/core/core/mm.c +++ b/drivers/gpu/drm/nouveau/core/core/mm.c @@ -116,7 +116,7 @@ nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min, u32 splitoff; u32 s, e; - BUG_ON(type == NVKM_MM_TYPE_NONE); + BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE); list_for_each_entry(this, &mm->free, fl_entry) { e = this->offset + this->length; @@ -182,7 +182,7 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min, struct nouveau_mm_node *prev, *this, *next; u32 mask = align - 1; - BUG_ON(type == NVKM_MM_TYPE_NONE); + BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE); list_for_each_entry_reverse(this, &mm->free, fl_entry) { u32 e = this->offset + this->length; @@ -227,9 +227,21 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min, int nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block) { - struct nouveau_mm_node *node; + struct nouveau_mm_node *node, *prev; + u32 next; if (nouveau_mm_initialised(mm)) { + prev = list_last_entry(&mm->nodes, typeof(*node), nl_entry); + next = prev->offset + prev->length; + if (next != offset) { + BUG_ON(next > offset); + if (!(node = kzalloc(sizeof(*node), GFP_KERNEL))) + return -ENOMEM; + node->type = NVKM_MM_TYPE_HOLE; + node->offset = next; + node->length = offset - next; + list_add_tail(&node->nl_entry, &mm->nodes); + } BUG_ON(block != mm->block_size); } else { INIT_LIST_HEAD(&mm->nodes); @@ -264,9 +276,11 @@ nouveau_mm_fini(struct nouveau_mm *mm) return 0; list_for_each_entry(node, &mm->nodes, nl_entry) { - if (++nodes > mm->heap_nodes) { - nouveau_mm_dump(mm, "mm not clean!"); - return -EBUSY; + if (node->type != NVKM_MM_TYPE_HOLE) { + if (++nodes > mm->heap_nodes) { + nouveau_mm_dump(mm, "mm not clean!"); + return -EBUSY; + } } } diff --git a/drivers/gpu/drm/nouveau/core/include/core/mm.h b/drivers/gpu/drm/nouveau/core/include/core/mm.h index 7848c0402497..d4ef40460e42 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/mm.h +++ b/drivers/gpu/drm/nouveau/core/include/core/mm.h @@ -7,6 +7,7 @@ struct nouveau_mm_node { struct list_head rl_entry; #define NVKM_MM_TYPE_NONE 0x00 +#define NVKM_MM_TYPE_HOLE 0xff u8 type; u32 offset; u32 length;