Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/nouveau/core/mm: replace region list with next pointer

We never have any need for a double-linked list here, and as there's
generally a large number of these objects, replace it with a single-
linked list in order to save some memory.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>

+34 -37
+7 -1
drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
··· 5 5 struct nvkm_mm_node { 6 6 struct list_head nl_entry; 7 7 struct list_head fl_entry; 8 - struct list_head rl_entry; 8 + struct nvkm_mm_node *next; 9 9 10 10 #define NVKM_MM_HEAP_ANY 0x00 11 11 u8 heap; ··· 38 38 u32 size_min, u32 align, struct nvkm_mm_node **); 39 39 void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **); 40 40 void nvkm_mm_dump(struct nvkm_mm *, const char *); 41 + 42 + static inline bool 43 + nvkm_mm_contiguous(struct nvkm_mm_node *node) 44 + { 45 + return !node->next; 46 + } 41 47 #endif
+1 -1
drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
··· 29 29 u8 page_shift; 30 30 31 31 struct nvkm_mm_node *tag; 32 - struct list_head regions; 32 + struct nvkm_mm_node *mem; 33 33 dma_addr_t *pages; 34 34 u32 memtype; 35 35 u64 offset;
+1 -1
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 321 321 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) { 322 322 if (bo->mem.mem_type == TTM_PL_VRAM) { 323 323 struct nvkm_mem *mem = bo->mem.mm_node; 324 - if (!list_is_singular(&mem->regions)) 324 + if (!nvkm_mm_contiguous(mem->mem)) 325 325 evict = true; 326 326 } 327 327 nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
+2
drivers/gpu/drm/nouveau/nvkm/core/mm.c
··· 147 147 if (!this) 148 148 return -ENOMEM; 149 149 150 + this->next = NULL; 150 151 this->type = type; 151 152 list_del(&this->fl_entry); 152 153 *pnode = this; ··· 226 225 if (!this) 227 226 return -ENOMEM; 228 227 228 + this->next = NULL; 229 229 this->type = type; 230 230 list_del(&this->fl_entry); 231 231 *pnode = this;
+5 -5
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
··· 445 445 { 446 446 struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc; 447 447 struct nvkm_mm *mm = &ram->vram; 448 - struct nvkm_mm_node *r; 448 + struct nvkm_mm_node **node, *r; 449 449 struct nvkm_mem *mem; 450 450 int type = (memtype & 0x0ff); 451 451 int back = (memtype & 0x800); ··· 462 462 if (!mem) 463 463 return -ENOMEM; 464 464 465 - INIT_LIST_HEAD(&mem->regions); 466 465 mem->size = size; 467 466 468 467 mutex_lock(&ram->fb->subdev.mutex); ··· 477 478 } 478 479 mem->memtype = type; 479 480 481 + node = &mem->mem; 480 482 do { 481 483 if (back) 482 484 ret = nvkm_mm_tail(mm, 0, 1, size, ncmin, align, &r); ··· 489 489 return ret; 490 490 } 491 491 492 - list_add_tail(&r->rl_entry, &mem->regions); 492 + *node = r; 493 + node = &r->next; 493 494 size -= r->length; 494 495 } while (size); 495 496 mutex_unlock(&ram->fb->subdev.mutex); 496 497 497 - r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry); 498 - mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT; 498 + mem->offset = (u64)mem->mem->offset << NVKM_RAM_MM_SHIFT; 499 499 *pmem = mem; 500 500 return 0; 501 501 }
+10 -13
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
··· 496 496 void 497 497 __nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem *mem) 498 498 { 499 - struct nvkm_mm_node *this; 500 - 501 - while (!list_empty(&mem->regions)) { 502 - this = list_first_entry(&mem->regions, typeof(*this), rl_entry); 503 - 504 - list_del(&this->rl_entry); 505 - nvkm_mm_free(&ram->vram, &this); 499 + struct nvkm_mm_node *next = mem->mem; 500 + struct nvkm_mm_node *node; 501 + while ((node = next)) { 502 + next = node->next; 503 + nvkm_mm_free(&ram->vram, &node); 506 504 } 507 - 508 505 nvkm_mm_free(&ram->tags, &mem->tag); 509 506 } 510 507 ··· 527 530 { 528 531 struct nvkm_mm *heap = &ram->vram; 529 532 struct nvkm_mm *tags = &ram->tags; 530 - struct nvkm_mm_node *r; 533 + struct nvkm_mm_node **node, *r; 531 534 struct nvkm_mem *mem; 532 535 int comp = (memtype & 0x300) >> 8; 533 536 int type = (memtype & 0x07f); ··· 556 559 comp = 0; 557 560 } 558 561 559 - INIT_LIST_HEAD(&mem->regions); 560 562 mem->memtype = (comp << 7) | type; 561 563 mem->size = max; 562 564 563 565 type = nv50_fb_memtype[type]; 566 + node = &mem->mem; 564 567 do { 565 568 if (back) 566 569 ret = nvkm_mm_tail(heap, 0, type, max, min, align, &r); ··· 572 575 return ret; 573 576 } 574 577 575 - list_add_tail(&r->rl_entry, &mem->regions); 578 + *node = r; 579 + node = &r->next; 576 580 max -= r->length; 577 581 } while (max); 578 582 mutex_unlock(&ram->fb->subdev.mutex); 579 583 580 - r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry); 581 - mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT; 584 + mem->offset = (u64)mem->mem->offset << NVKM_RAM_MM_SHIFT; 582 585 *pmem = mem; 583 586 return 0; 584 587 }
+4 -13
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
··· 305 305 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); 306 306 struct gk20a_instmem *imem = node->base.imem; 307 307 struct device *dev = imem->base.subdev.device->dev; 308 - struct nvkm_mm_node *r; 308 + struct nvkm_mm_node *r = node->base.mem.mem; 309 309 unsigned long flags; 310 310 int i; 311 311 312 - if (unlikely(list_empty(&node->base.mem.regions))) 312 + if (unlikely(!r)) 313 313 goto out; 314 314 315 315 spin_lock_irqsave(&imem->lock, flags); ··· 319 319 gk20a_instobj_iommu_recycle_vaddr(node); 320 320 321 321 spin_unlock_irqrestore(&imem->lock, flags); 322 - 323 - r = list_first_entry(&node->base.mem.regions, struct nvkm_mm_node, 324 - rl_entry); 325 322 326 323 /* clear IOMMU bit to unmap pages */ 327 324 r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift); ··· 401 404 node->r.length = (npages << PAGE_SHIFT) >> 12; 402 405 403 406 node->base.mem.offset = node->handle; 404 - 405 - INIT_LIST_HEAD(&node->base.mem.regions); 406 - list_add_tail(&node->r.rl_entry, &node->base.mem.regions); 407 - 407 + node->base.mem.mem = &node->r; 408 408 return 0; 409 409 } 410 410 ··· 478 484 r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift); 479 485 480 486 node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift; 481 - 482 - INIT_LIST_HEAD(&node->base.mem.regions); 483 - list_add_tail(&r->rl_entry, &node->base.mem.regions); 484 - 487 + node->base.mem.mem = r; 485 488 return 0; 486 489 487 490 release_area:
+4 -3
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
··· 31 31 { 32 32 struct nvkm_vm *vm = vma->vm; 33 33 struct nvkm_mmu *mmu = vm->mmu; 34 - struct nvkm_mm_node *r; 34 + struct nvkm_mm_node *r = node->mem; 35 35 int big = vma->node->type != mmu->func->spg_shift; 36 36 u32 offset = vma->node->offset + (delta >> 12); 37 37 u32 bits = vma->node->type - 12; ··· 41 41 u32 end, len; 42 42 43 43 delta = 0; 44 - list_for_each_entry(r, &node->regions, rl_entry) { 44 + while (r) { 45 45 u64 phys = (u64)r->offset << 12; 46 46 u32 num = r->length >> bits; 47 47 ··· 65 65 66 66 delta += (u64)len << vma->node->type; 67 67 } 68 - } 68 + r = r->next; 69 + }; 69 70 70 71 mmu->func->flush(vm); 71 72 }