Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
drm: fix for non-coherent DMA PowerPC
drm: radeon: fix sparse integer as NULL pointer warnings in radeon_mem.c
drm/i915: fix oops on agp=off
drm/r300: fix bug in r300 userspace hardware wait emission

+92 -22
+6
drivers/char/drm/ati_pcigart.c
··· 168 } 169 } 170 171 ret = 1; 172 173 #if defined(__i386__) || defined(__x86_64__)
··· 168 } 169 } 170 171 + if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) 172 + dma_sync_single_for_device(&dev->pdev->dev, 173 + bus_address, 174 + max_pages * sizeof(u32), 175 + PCI_DMA_TODEVICE); 176 + 177 ret = 1; 178 179 #if defined(__i386__) || defined(__x86_64__)
+10 -1
drivers/char/drm/drm_scatter.c
··· 36 37 #define DEBUG_SCATTER 0 38 39 void drm_sg_cleanup(struct drm_sg_mem * entry) 40 { 41 struct page *page; ··· 113 } 114 memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr)); 115 116 - entry->virtual = vmalloc_32(pages << PAGE_SHIFT); 117 if (!entry->virtual) { 118 drm_free(entry->busaddr, 119 entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES);
··· 36 37 #define DEBUG_SCATTER 0 38 39 + static inline void *drm_vmalloc_dma(unsigned long size) 40 + { 41 + #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) 42 + return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE); 43 + #else 44 + return vmalloc_32(size); 45 + #endif 46 + } 47 + 48 void drm_sg_cleanup(struct drm_sg_mem * entry) 49 { 50 struct page *page; ··· 104 } 105 memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr)); 106 107 + entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT); 108 if (!entry->virtual) { 109 drm_free(entry->busaddr, 110 entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES);
+15 -5
drivers/char/drm/drm_vm.c
··· 54 pgprot_val(tmp) |= _PAGE_NO_CACHE; 55 if (map_type == _DRM_REGISTERS) 56 pgprot_val(tmp) |= _PAGE_GUARDED; 57 - #endif 58 - #if defined(__ia64__) 59 if (efi_range_is_wc(vma->vm_start, vma->vm_end - 60 vma->vm_start)) 61 tmp = pgprot_writecombine(tmp); 62 else 63 tmp = pgprot_noncached(tmp); 64 #endif 65 return tmp; 66 } ··· 614 offset = dev->driver->get_reg_ofs(dev); 615 vma->vm_flags |= VM_IO; /* not in core dump */ 616 vma->vm_page_prot = drm_io_prot(map->type, vma); 617 - #ifdef __sparc__ 618 - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 619 - #endif 620 if (io_remap_pfn_range(vma, vma->vm_start, 621 (map->offset + offset) >> PAGE_SHIFT, 622 vma->vm_end - vma->vm_start, ··· 632 page_to_pfn(virt_to_page(map->handle)), 633 vma->vm_end - vma->vm_start, vma->vm_page_prot)) 634 return -EAGAIN; 635 /* fall through to _DRM_SHM */ 636 case _DRM_SHM: 637 vma->vm_ops = &drm_vm_shm_ops; ··· 640 /* Don't let this area swap. Change when 641 DRM_KERNEL advisory is supported. */ 642 vma->vm_flags |= VM_RESERVED; 643 break; 644 case _DRM_SCATTER_GATHER: 645 vma->vm_ops = &drm_vm_sg_ops;
··· 54 pgprot_val(tmp) |= _PAGE_NO_CACHE; 55 if (map_type == _DRM_REGISTERS) 56 pgprot_val(tmp) |= _PAGE_GUARDED; 57 + #elif defined(__ia64__) 58 if (efi_range_is_wc(vma->vm_start, vma->vm_end - 59 vma->vm_start)) 60 tmp = pgprot_writecombine(tmp); 61 else 62 tmp = pgprot_noncached(tmp); 63 + #elif defined(__sparc__) 64 + tmp = pgprot_noncached(tmp); 65 + #endif 66 + return tmp; 67 + } 68 + 69 + static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) 70 + { 71 + pgprot_t tmp = vm_get_page_prot(vma->vm_flags); 72 + 73 + #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) 74 + tmp |= _PAGE_NO_CACHE; 75 #endif 76 return tmp; 77 } ··· 603 offset = dev->driver->get_reg_ofs(dev); 604 vma->vm_flags |= VM_IO; /* not in core dump */ 605 vma->vm_page_prot = drm_io_prot(map->type, vma); 606 if (io_remap_pfn_range(vma, vma->vm_start, 607 (map->offset + offset) >> PAGE_SHIFT, 608 vma->vm_end - vma->vm_start, ··· 624 page_to_pfn(virt_to_page(map->handle)), 625 vma->vm_end - vma->vm_start, vma->vm_page_prot)) 626 return -EAGAIN; 627 + vma->vm_page_prot = drm_dma_prot(map->type, vma); 628 /* fall through to _DRM_SHM */ 629 case _DRM_SHM: 630 vma->vm_ops = &drm_vm_shm_ops; ··· 631 /* Don't let this area swap. Change when 632 DRM_KERNEL advisory is supported. */ 633 vma->vm_flags |= VM_RESERVED; 634 + vma->vm_page_prot = drm_dma_prot(map->type, vma); 635 break; 636 case _DRM_SCATTER_GATHER: 637 vma->vm_ops = &drm_vm_sg_ops;
+3
drivers/char/drm/i915_dma.c
··· 804 { 805 drm_i915_private_t *dev_priv = dev->dev_private; 806 807 if (dev_priv->agp_heap) 808 i915_mem_takedown(&(dev_priv->agp_heap)); 809
··· 804 { 805 drm_i915_private_t *dev_priv = dev->dev_private; 806 807 + if (!dev_priv) 808 + return; 809 + 810 if (dev_priv->agp_heap) 811 i915_mem_takedown(&(dev_priv->agp_heap)); 812
+42 -12
drivers/char/drm/r300_cmdbuf.c
··· 729 buf->used = 0; 730 } 731 732 static int r300_scratch(drm_radeon_private_t *dev_priv, 733 drm_radeon_kcmd_buffer_t *cmdbuf, 734 drm_r300_cmd_header_t header) ··· 950 break; 951 952 case R300_CMD_WAIT: 953 - /* simple enough, we can do it here */ 954 DRM_DEBUG("R300_CMD_WAIT\n"); 955 - if (header.wait.flags == 0) 956 - break; /* nothing to do */ 957 - 958 - { 959 - RING_LOCALS; 960 - 961 - BEGIN_RING(2); 962 - OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); 963 - OUT_RING((header.wait.flags & 0xf) << 14); 964 - ADVANCE_RING(); 965 - } 966 break; 967 968 case R300_CMD_SCRATCH:
··· 729 buf->used = 0; 730 } 731 732 + static void r300_cmd_wait(drm_radeon_private_t * dev_priv, 733 + drm_r300_cmd_header_t header) 734 + { 735 + u32 wait_until; 736 + RING_LOCALS; 737 + 738 + if (!header.wait.flags) 739 + return; 740 + 741 + wait_until = 0; 742 + 743 + switch(header.wait.flags) { 744 + case R300_WAIT_2D: 745 + wait_until = RADEON_WAIT_2D_IDLE; 746 + break; 747 + case R300_WAIT_3D: 748 + wait_until = RADEON_WAIT_3D_IDLE; 749 + break; 750 + case R300_NEW_WAIT_2D_3D: 751 + wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE; 752 + break; 753 + case R300_NEW_WAIT_2D_2D_CLEAN: 754 + wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN; 755 + break; 756 + case R300_NEW_WAIT_3D_3D_CLEAN: 757 + wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN; 758 + break; 759 + case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN: 760 + wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN; 761 + wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN; 762 + break; 763 + default: 764 + return; 765 + } 766 + 767 + BEGIN_RING(2); 768 + OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); 769 + OUT_RING(wait_until); 770 + ADVANCE_RING(); 771 + } 772 + 773 static int r300_scratch(drm_radeon_private_t *dev_priv, 774 drm_radeon_kcmd_buffer_t *cmdbuf, 775 drm_r300_cmd_header_t header) ··· 909 break; 910 911 case R300_CMD_WAIT: 912 DRM_DEBUG("R300_CMD_WAIT\n"); 913 + r300_cmd_wait(dev_priv, header); 914 break; 915 916 case R300_CMD_SCRATCH:
+12
drivers/char/drm/radeon_drm.h
··· 225 #define R300_CMD_WAIT 7 226 # define R300_WAIT_2D 0x1 227 # define R300_WAIT_3D 0x2 228 # define R300_WAIT_2D_CLEAN 0x3 229 # define R300_WAIT_3D_CLEAN 0x4 230 #define R300_CMD_SCRATCH 8 231 232 typedef union {
··· 225 #define R300_CMD_WAIT 7 226 # define R300_WAIT_2D 0x1 227 # define R300_WAIT_3D 0x2 228 + /* these two defines are DOING IT WRONG - however 229 + * we have userspace which relies on using these. 230 + * The wait interface is backwards compat new 231 + * code should use the NEW_WAIT defines below 232 + * THESE ARE NOT BIT FIELDS 233 + */ 234 # define R300_WAIT_2D_CLEAN 0x3 235 # define R300_WAIT_3D_CLEAN 0x4 236 + 237 + # define R300_NEW_WAIT_2D_3D 0x3 238 + # define R300_NEW_WAIT_2D_2D_CLEAN 0x4 239 + # define R300_NEW_WAIT_3D_3D_CLEAN 0x6 240 + # define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN 0x8 241 + 242 #define R300_CMD_SCRATCH 8 243 244 typedef union {
+4 -4
drivers/char/drm/radeon_mem.c
··· 88 89 list_for_each(p, heap) { 90 int start = (p->start + mask) & ~mask; 91 - if (p->file_priv == 0 && start + size <= p->start + p->size) 92 return split_block(p, start, size, file_priv); 93 } 94 ··· 113 /* Assumes a single contiguous range. Needs a special file_priv in 114 * 'heap' to stop it being subsumed. 115 */ 116 - if (p->next->file_priv == 0) { 117 struct mem_block *q = p->next; 118 p->size += q->size; 119 p->next = q->next; ··· 121 drm_free(q, sizeof(*q), DRM_MEM_BUFS); 122 } 123 124 - if (p->prev->file_priv == 0) { 125 struct mem_block *q = p->prev; 126 q->size += p->size; 127 q->next = p->next; ··· 174 * 'heap' to stop it being subsumed. 175 */ 176 list_for_each(p, heap) { 177 - while (p->file_priv == 0 && p->next->file_priv == 0) { 178 struct mem_block *q = p->next; 179 p->size += q->size; 180 p->next = q->next;
··· 88 89 list_for_each(p, heap) { 90 int start = (p->start + mask) & ~mask; 91 + if (p->file_priv == NULL && start + size <= p->start + p->size) 92 return split_block(p, start, size, file_priv); 93 } 94 ··· 113 /* Assumes a single contiguous range. Needs a special file_priv in 114 * 'heap' to stop it being subsumed. 115 */ 116 + if (p->next->file_priv == NULL) { 117 struct mem_block *q = p->next; 118 p->size += q->size; 119 p->next = q->next; ··· 121 drm_free(q, sizeof(*q), DRM_MEM_BUFS); 122 } 123 124 + if (p->prev->file_priv == NULL) { 125 struct mem_block *q = p->prev; 126 q->size += p->size; 127 q->next = p->next; ··· 174 * 'heap' to stop it being subsumed. 175 */ 176 list_for_each(p, heap) { 177 + while (p->file_priv == NULL && p->next->file_priv == NULL) { 178 struct mem_block *q = p->next; 179 p->size += q->size; 180 p->next = q->next;