Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
drm/radeon/kms: balance asic_reset functions
drm/radeon/kms: remove duplicate card_posted() functions
drm/radeon/kms: add module option for pcie gen2
drm/radeon/kms: fix typo in evergreen safe reg
drm/nouveau: fix gpu page faults triggered by plymouthd
drm/nouveau: greatly simplify mm, killing some bugs in the process
drm/nvc0: enable protection of system-use-only structures in vm
drm/nv40: initialise 0x17xx on all chipsets that have it
drm/nv40: make detection of 0x4097-ful chipsets available everywhere

+126 -264
+15
drivers/gpu/drm/nouveau/nouveau_drv.h
··· 160 160 #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) 161 161 #define NVOBJ_FLAG_ZERO_FREE (1 << 2) 162 162 #define NVOBJ_FLAG_VM (1 << 3) 163 + #define NVOBJ_FLAG_VM_USER (1 << 4) 163 164 164 165 #define NVOBJ_CINST_GLOBAL 0xdeadbeef 165 166 ··· 1575 1574 return dev->pdev->device == device && 1576 1575 dev->pdev->subsystem_vendor == sub_vendor && 1577 1576 dev->pdev->subsystem_device == sub_device; 1577 + } 1578 + 1579 + /* returns 1 if device is one of the nv4x using the 0x4497 object class, 1580 + * helpful to determine a number of other hardware features 1581 + */ 1582 + static inline int 1583 + nv44_graph_class(struct drm_device *dev) 1584 + { 1585 + struct drm_nouveau_private *dev_priv = dev->dev_private; 1586 + 1587 + if ((dev_priv->chipset & 0xf0) == 0x60) 1588 + return 1; 1589 + 1590 + return !(0x0baf & (1 << (dev_priv->chipset & 0x0f))); 1578 1591 } 1579 1592 1580 1593 /* memory type/access flags, do not match hardware values */
+2 -2
drivers/gpu/drm/nouveau/nouveau_fbcon.c
··· 352 352 FBINFO_HWACCEL_IMAGEBLIT; 353 353 info->flags |= FBINFO_CAN_FORCE_OUTPUT; 354 354 info->fbops = &nouveau_fbcon_sw_ops; 355 - info->fix.smem_start = dev->mode_config.fb_base + 356 - (nvbo->bo.mem.start << PAGE_SHIFT); 355 + info->fix.smem_start = nvbo->bo.mem.bus.base + 356 + nvbo->bo.mem.bus.offset; 357 357 info->fix.smem_len = size; 358 358 359 359 info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
+10 -16
drivers/gpu/drm/nouveau/nouveau_mem.c
··· 742 742 { 743 743 struct nouveau_mm *mm = man->priv; 744 744 struct nouveau_mm_node *r; 745 - u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {}; 746 - int i; 745 + u32 total = 0, free = 0; 747 746 748 747 mutex_lock(&mm->mutex); 749 748 list_for_each_entry(r, &mm->nodes, nl_entry) { 750 - printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n", 751 - prefix, r->free ? "free" : "used", r->type, 752 - ((u64)r->offset << 12), 749 + printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n", 750 + prefix, r->type, ((u64)r->offset << 12), 753 751 (((u64)r->offset + r->length) << 12)); 752 + 754 753 total += r->length; 755 - ttotal[r->type] += r->length; 756 - if (r->free) 757 - tfree[r->type] += r->length; 758 - else 759 - tused[r->type] += r->length; 754 + if (!r->type) 755 + free += r->length; 760 756 } 761 757 mutex_unlock(&mm->mutex); 762 758 763 - printk(KERN_DEBUG "%s total: 0x%010llx\n", prefix, total << 12); 764 - for (i = 0; i < 3; i++) { 765 - printk(KERN_DEBUG "%s type %d: 0x%010llx, " 766 - "used 0x%010llx, free 0x%010llx\n", prefix, 767 - i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12); 768 - } 759 + printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n", 760 + prefix, (u64)total << 12, (u64)free << 12); 761 + printk(KERN_DEBUG "%s block: 0x%08x\n", 762 + prefix, mm->block_size << 12); 769 763 } 770 764 771 765 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
+43 -143
drivers/gpu/drm/nouveau/nouveau_mm.c
··· 48 48 49 49 b->offset = a->offset; 50 50 b->length = size; 51 - b->free = a->free; 52 51 b->type = a->type; 53 52 a->offset += size; 54 53 a->length -= size; 55 54 list_add_tail(&b->nl_entry, &a->nl_entry); 56 - if (b->free) 55 + if (b->type == 0) 57 56 list_add_tail(&b->fl_entry, &a->fl_entry); 58 57 return b; 59 58 } 60 59 61 - static struct nouveau_mm_node * 62 - nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this) 63 - { 64 - struct nouveau_mm_node *prev, *next; 65 - 66 - /* try to merge with free adjacent entries of same type */ 67 - prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry); 68 - if (this->nl_entry.prev != &rmm->nodes) { 69 - if (prev->free && prev->type == this->type) { 70 - prev->length += this->length; 71 - region_put(rmm, this); 72 - this = prev; 73 - } 74 - } 75 - 76 - next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry); 77 - if (this->nl_entry.next != &rmm->nodes) { 78 - if (next->free && next->type == this->type) { 79 - next->offset = this->offset; 80 - next->length += this->length; 81 - region_put(rmm, this); 82 - this = next; 83 - } 84 - } 85 - 86 - return this; 87 - } 60 + #define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \ 61 + list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry) 88 62 89 63 void 90 64 nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this) 91 65 { 92 - u32 block_s, block_l; 66 + struct nouveau_mm_node *prev = node(this, prev); 67 + struct nouveau_mm_node *next = node(this, next); 93 68 94 - this->free = true; 95 69 list_add(&this->fl_entry, &rmm->free); 96 - this = nouveau_mm_merge(rmm, this); 97 - 98 - /* any entirely free blocks now? we'll want to remove typing 99 - * on them now so they can be use for any memory allocation 100 - */ 101 - block_s = roundup(this->offset, rmm->block_size); 102 - if (block_s + rmm->block_size > this->offset + this->length) 103 - return; 104 - 105 - /* split off any still-typed region at the start */ 106 - if (block_s != this->offset) { 107 - if (!region_split(rmm, this, block_s - this->offset)) 108 - return; 109 - } 110 - 111 - /* split off the soon-to-be-untyped block(s) */ 112 - block_l = rounddown(this->length, rmm->block_size); 113 - if (block_l != this->length) { 114 - this = region_split(rmm, this, block_l); 115 - if (!this) 116 - return; 117 - } 118 - 119 - /* mark as having no type, and retry merge with any adjacent 120 - * untyped blocks 121 - */ 122 70 this->type = 0; 123 - nouveau_mm_merge(rmm, this); 71 + 72 + if (prev && prev->type == 0) { 73 + prev->length += this->length; 74 + region_put(rmm, this); 75 + this = prev; 76 + } 77 + 78 + if (next && next->type == 0) { 79 + next->offset = this->offset; 80 + next->length += this->length; 81 + region_put(rmm, this); 82 + } 124 83 } 125 84 126 85 int 127 86 nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, 128 87 u32 align, struct nouveau_mm_node **pnode) 129 88 { 130 - struct nouveau_mm_node *this, *tmp, *next; 131 - u32 splitoff, avail, alloc; 89 + struct nouveau_mm_node *prev, *this, *next; 90 + u32 min = size_nc ? size_nc : size; 91 + u32 align_mask = align - 1; 92 + u32 splitoff; 93 + u32 s, e; 132 94 133 - list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) { 134 - next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry); 135 - if (this->nl_entry.next == &rmm->nodes) 136 - next = NULL; 95 + list_for_each_entry(this, &rmm->free, fl_entry) { 96 + e = this->offset + this->length; 97 + s = this->offset; 137 98 138 - /* skip wrongly typed blocks */ 139 - if (this->type && this->type != type) 99 + prev = node(this, prev); 100 + if (prev && prev->type != type) 101 + s = roundup(s, rmm->block_size); 102 + 103 + next = node(this, next); 104 + if (next && next->type != type) 105 + e = rounddown(e, rmm->block_size); 106 + 107 + s = (s + align_mask) & ~align_mask; 108 + e &= ~align_mask; 109 + if (s > e || e - s < min) 140 110 continue; 141 111 142 - /* account for alignment */ 143 - splitoff = this->offset & (align - 1); 144 - if (splitoff) 145 - splitoff = align - splitoff; 146 - 147 - if (this->length <= splitoff) 148 - continue; 149 - 150 - /* determine total memory available from this, and 151 - * the next block (if appropriate) 152 - */ 153 - avail = this->length; 154 - if (next && next->free && (!next->type || next->type == type)) 155 - avail += next->length; 156 - 157 - avail -= splitoff; 158 - 159 - /* determine allocation size */ 160 - if (size_nc) { 161 - alloc = min(avail, size); 162 - alloc = rounddown(alloc, size_nc); 163 - if (alloc == 0) 164 - continue; 165 - } else { 166 - alloc = size; 167 - if (avail < alloc) 168 - continue; 169 - } 170 - 171 - /* untyped block, split off a chunk that's a multiple 172 - * of block_size and type it 173 - */ 174 - if (!this->type) { 175 - u32 block = roundup(alloc + splitoff, rmm->block_size); 176 - if (this->length < block) 177 - continue; 178 - 179 - this = region_split(rmm, this, block); 180 - if (!this) 181 - return -ENOMEM; 182 - 183 - this->type = type; 184 - } 185 - 186 - /* stealing memory from adjacent block */ 187 - if (alloc > this->length) { 188 - u32 amount = alloc - (this->length - splitoff); 189 - 190 - if (!next->type) { 191 - amount = roundup(amount, rmm->block_size); 192 - 193 - next = region_split(rmm, next, amount); 194 - if (!next) 195 - return -ENOMEM; 196 - 197 - next->type = type; 198 - } 199 - 200 - this->length += amount; 201 - next->offset += amount; 202 - next->length -= amount; 203 - if (!next->length) { 204 - list_del(&next->nl_entry); 205 - list_del(&next->fl_entry); 206 - kfree(next); 207 - } 208 - } 209 - 210 - if (splitoff) { 211 - if (!region_split(rmm, this, splitoff)) 212 - return -ENOMEM; 213 - } 214 - 215 - this = region_split(rmm, this, alloc); 216 - if (this == NULL) 112 + splitoff = s - this->offset; 113 + if (splitoff && !region_split(rmm, this, splitoff)) 217 114 return -ENOMEM; 218 115 219 - this->free = false; 116 + this = region_split(rmm, this, min(size, e - s)); 117 + if (!this) 118 + return -ENOMEM; 119 + 120 + this->type = type; 220 121 list_del(&this->fl_entry); 221 122 *pnode = this; 222 123 return 0; ··· 135 234 heap = kzalloc(sizeof(*heap), GFP_KERNEL); 136 235 if (!heap) 137 236 return -ENOMEM; 138 - heap->free = true; 139 237 heap->offset = roundup(offset, block); 140 238 heap->length = rounddown(offset + length, block) - heap->offset; 141 239
+1 -3
drivers/gpu/drm/nouveau/nouveau_mm.h
··· 30 30 struct list_head fl_entry; 31 31 struct list_head rl_entry; 32 32 33 - bool free; 34 - int type; 35 - 33 + u8 type; 36 34 u32 offset; 37 35 u32 length; 38 36 };
+1 -2
drivers/gpu/drm/nouveau/nv40_graph.c
··· 451 451 NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */ 452 452 453 453 /* curie */ 454 - if (dev_priv->chipset >= 0x60 || 455 - 0x00005450 & (1 << (dev_priv->chipset & 0x0f))) 454 + if (nv44_graph_class(dev)) 456 455 NVOBJ_CLASS(dev, 0x4497, GR); 457 456 else 458 457 NVOBJ_CLASS(dev, 0x4097, GR);
+5 -16
drivers/gpu/drm/nouveau/nv40_grctx.c
··· 118 118 */ 119 119 120 120 static int 121 - nv40_graph_4097(struct drm_device *dev) 122 - { 123 - struct drm_nouveau_private *dev_priv = dev->dev_private; 124 - 125 - if ((dev_priv->chipset & 0xf0) == 0x60) 126 - return 0; 127 - 128 - return !!(0x0baf & (1 << dev_priv->chipset)); 129 - } 130 - 131 - static int 132 121 nv40_graph_vs_count(struct drm_device *dev) 133 122 { 134 123 struct drm_nouveau_private *dev_priv = dev->dev_private; ··· 208 219 gr_def(ctx, 0x4009dc, 0x80000000); 209 220 } else { 210 221 cp_ctx(ctx, 0x400840, 20); 211 - if (!nv40_graph_4097(ctx->dev)) { 222 + if (nv44_graph_class(ctx->dev)) { 212 223 for (i = 0; i < 8; i++) 213 224 gr_def(ctx, 0x400860 + (i * 4), 0x00000001); 214 225 } ··· 217 228 gr_def(ctx, 0x400888, 0x00000040); 218 229 cp_ctx(ctx, 0x400894, 11); 219 230 gr_def(ctx, 0x400894, 0x00000040); 220 - if (nv40_graph_4097(ctx->dev)) { 231 + if (!nv44_graph_class(ctx->dev)) { 221 232 for (i = 0; i < 8; i++) 222 233 gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000); 223 234 } ··· 535 546 static void 536 547 nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx) 537 548 { 538 - int len = nv40_graph_4097(ctx->dev) ? 0x0684 : 0x0084; 549 + int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684; 539 550 540 551 cp_out (ctx, 0x300000); 541 552 cp_lsr (ctx, len - 4); ··· 571 582 } else { 572 583 b0_offset = 0x1d40/4; /* 2200 */ 573 584 b1_offset = 0x3f40/4; /* 0b00 : 0a40 */ 574 - vs_len = nv40_graph_4097(dev) ? 0x4a40/4 : 0x4980/4; 585 + vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4; 575 586 } 576 587 577 588 cp_lsr(ctx, vs_len * vs_nr + 0x300/4); 578 - cp_out(ctx, nv40_graph_4097(dev) ? 0x800041 : 0x800029); 589 + cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041); 579 590 580 591 offset = ctx->ctxvals_pos; 581 592 ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
+2 -12
drivers/gpu/drm/nouveau/nv40_mc.c
··· 6 6 int 7 7 nv40_mc_init(struct drm_device *dev) 8 8 { 9 - struct drm_nouveau_private *dev_priv = dev->dev_private; 10 - uint32_t tmp; 11 - 12 9 /* Power up everything, resetting each individual unit will 13 10 * be done later if needed. 14 11 */ 15 12 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF); 16 13 17 - switch (dev_priv->chipset) { 18 - case 0x44: 19 - case 0x46: /* G72 */ 20 - case 0x4e: 21 - case 0x4c: /* C51_G7X */ 22 - tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA); 14 + if (nv44_graph_class(dev)) { 15 + u32 tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA); 23 16 nv_wr32(dev, NV40_PMC_1700, tmp); 24 17 nv_wr32(dev, NV40_PMC_1704, 0); 25 18 nv_wr32(dev, NV40_PMC_1708, 0); 26 19 nv_wr32(dev, NV40_PMC_170C, tmp); 27 - break; 28 - default: 29 - break; 30 20 } 31 21 32 22 return 0;
+5 -2
drivers/gpu/drm/nouveau/nv50_instmem.c
··· 332 332 gpuobj->vinst = node->vram->offset; 333 333 334 334 if (gpuobj->flags & NVOBJ_FLAG_VM) { 335 - ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, 336 - NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS, 335 + u32 flags = NV_MEM_ACCESS_RW; 336 + if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER)) 337 + flags |= NV_MEM_ACCESS_SYS; 338 + 339 + ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, flags, 337 340 &node->chan_vma); 338 341 if (ret) { 339 342 vram->put(dev, &node->vram);
+2 -1
drivers/gpu/drm/nouveau/nvc0_graph.c
··· 105 105 if (ret) 106 106 return ret; 107 107 108 - ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, NVOBJ_FLAG_VM, 108 + ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, 109 + NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER, 109 110 &grch->unk418810); 110 111 if (ret) 111 112 return ret;
+2 -2
drivers/gpu/drm/nouveau/nvc0_vm.c
··· 48 48 phys >>= 8; 49 49 50 50 phys |= 0x00000001; /* present */ 51 - // if (vma->access & NV_MEM_ACCESS_SYS) 52 - // phys |= 0x00000002; 51 + if (vma->access & NV_MEM_ACCESS_SYS) 52 + phys |= 0x00000002; 53 53 54 54 phys |= ((u64)target << 32); 55 55 phys |= ((u64)memtype << 36);
+4 -26
drivers/gpu/drm/radeon/evergreen.c
··· 3002 3002 return 0; 3003 3003 } 3004 3004 3005 - static bool evergreen_card_posted(struct radeon_device *rdev) 3006 - { 3007 - u32 reg; 3008 - 3009 - /* first check CRTCs */ 3010 - if (rdev->flags & RADEON_IS_IGP) 3011 - reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 3012 - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); 3013 - else 3014 - reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 3015 - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | 3016 - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | 3017 - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | 3018 - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | 3019 - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); 3020 - if (reg & EVERGREEN_CRTC_MASTER_EN) 3021 - return true; 3022 - 3023 - /* then check MEM_SIZE, in case the crtcs are off */ 3024 - if (RREG32(CONFIG_MEMSIZE)) 3025 - return true; 3026 - 3027 - return false; 3028 - } 3029 - 3030 3005 /* Plan is to move initialization in that function and use 3031 3006 * helper function so that radeon_device_init pretty much 3032 3007 * do nothing more than calling asic specific function. This ··· 3038 3063 if (radeon_asic_reset(rdev)) 3039 3064 dev_warn(rdev->dev, "GPU reset failed !\n"); 3040 3065 /* Post card if necessary */ 3041 - if (!evergreen_card_posted(rdev)) { 3066 + if (!radeon_card_posted(rdev)) { 3042 3067 if (!rdev->bios) { 3043 3068 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 3044 3069 return -EINVAL; ··· 3132 3157 static void evergreen_pcie_gen2_enable(struct radeon_device *rdev) 3133 3158 { 3134 3159 u32 link_width_cntl, speed_cntl; 3160 + 3161 + if (radeon_pcie_gen2 == 0) 3162 + return; 3135 3163 3136 3164 if (rdev->flags & RADEON_IS_IGP) 3137 3165 return;
+6 -5
drivers/gpu/drm/radeon/r100.c
··· 2086 2086 { 2087 2087 struct r100_mc_save save; 2088 2088 u32 status, tmp; 2089 + int ret = 0; 2089 2090 2090 - r100_mc_stop(rdev, &save); 2091 2091 status = RREG32(R_000E40_RBBM_STATUS); 2092 2092 if (!G_000E40_GUI_ACTIVE(status)) { 2093 2093 return 0; 2094 2094 } 2095 + r100_mc_stop(rdev, &save); 2095 2096 status = RREG32(R_000E40_RBBM_STATUS); 2096 2097 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 2097 2098 /* stop CP */ ··· 2132 2131 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { 2133 2132 dev_err(rdev->dev, "failed to reset GPU\n"); 2134 2133 rdev->gpu_lockup = true; 2135 - return -1; 2136 - } 2134 + ret = -1; 2135 + } else 2136 + dev_info(rdev->dev, "GPU reset succeed\n"); 2137 2137 r100_mc_resume(rdev, &save); 2138 - dev_info(rdev->dev, "GPU reset succeed\n"); 2139 - return 0; 2138 + return ret; 2140 2139 } 2141 2140 2142 2141 void r100_set_common_regs(struct radeon_device *rdev)
+6 -5
drivers/gpu/drm/radeon/r300.c
··· 405 405 { 406 406 struct r100_mc_save save; 407 407 u32 status, tmp; 408 + int ret = 0; 408 409 409 - r100_mc_stop(rdev, &save); 410 410 status = RREG32(R_000E40_RBBM_STATUS); 411 411 if (!G_000E40_GUI_ACTIVE(status)) { 412 412 return 0; 413 413 } 414 + r100_mc_stop(rdev, &save); 414 415 status = RREG32(R_000E40_RBBM_STATUS); 415 416 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 416 417 /* stop CP */ ··· 452 451 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 453 452 dev_err(rdev->dev, "failed to reset GPU\n"); 454 453 rdev->gpu_lockup = true; 455 - return -1; 456 - } 454 + ret = -1; 455 + } else 456 + dev_info(rdev->dev, "GPU reset succeed\n"); 457 457 r100_mc_resume(rdev, &save); 458 - dev_info(rdev->dev, "GPU reset succeed\n"); 459 - return 0; 458 + return ret; 460 459 } 461 460 462 461 /*
+4 -19
drivers/gpu/drm/radeon/r600.c
··· 2358 2358 /* FIXME: implement */ 2359 2359 } 2360 2360 2361 - 2362 - bool r600_card_posted(struct radeon_device *rdev) 2363 - { 2364 - uint32_t reg; 2365 - 2366 - /* first check CRTCs */ 2367 - reg = RREG32(D1CRTC_CONTROL) | 2368 - RREG32(D2CRTC_CONTROL); 2369 - if (reg & CRTC_EN) 2370 - return true; 2371 - 2372 - /* then check MEM_SIZE, in case the crtcs are off */ 2373 - if (RREG32(CONFIG_MEMSIZE)) 2374 - return true; 2375 - 2376 - return false; 2377 - } 2378 - 2379 2361 int r600_startup(struct radeon_device *rdev) 2380 2362 { 2381 2363 int r; ··· 2518 2536 if (r) 2519 2537 return r; 2520 2538 /* Post card if necessary */ 2521 - if (!r600_card_posted(rdev)) { 2539 + if (!radeon_card_posted(rdev)) { 2522 2540 if (!rdev->bios) { 2523 2541 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 2524 2542 return -EINVAL; ··· 3639 3657 { 3640 3658 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp; 3641 3659 u16 link_cntl2; 3660 + 3661 + if (radeon_pcie_gen2 == 0) 3662 + return; 3642 3663 3643 3664 if (rdev->flags & RADEON_IS_IGP) 3644 3665 return;
+1
drivers/gpu/drm/radeon/radeon.h
··· 92 92 extern int radeon_audio; 93 93 extern int radeon_disp_priority; 94 94 extern int radeon_hw_i2c; 95 + extern int radeon_pcie_gen2; 95 96 96 97 /* 97 98 * Copy from radeon_drv.h so we don't have to include both and have conflicting
+4
drivers/gpu/drm/radeon/radeon_drv.c
··· 104 104 int radeon_audio = 1; 105 105 int radeon_disp_priority = 0; 106 106 int radeon_hw_i2c = 0; 107 + int radeon_pcie_gen2 = 0; 107 108 108 109 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 109 110 module_param_named(no_wb, radeon_no_wb, int, 0444); ··· 147 146 148 147 MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)"); 149 148 module_param_named(hw_i2c, radeon_hw_i2c, int, 0444); 149 + 150 + MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)"); 151 + module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444); 150 152 151 153 static int radeon_suspend(struct drm_device *dev, pm_message_t state) 152 154 {
+1 -1
drivers/gpu/drm/radeon/reg_srcs/evergreen
··· 439 439 0x000286EC SPI_COMPUTE_NUM_THREAD_X 440 440 0x000286F0 SPI_COMPUTE_NUM_THREAD_Y 441 441 0x000286F4 SPI_COMPUTE_NUM_THREAD_Z 442 - 0x000286F8 GDS_ADDR_SIZE 442 + 0x00028724 GDS_ADDR_SIZE 443 443 0x00028780 CB_BLEND0_CONTROL 444 444 0x00028784 CB_BLEND1_CONTROL 445 445 0x00028788 CB_BLEND2_CONTROL
+8 -8
drivers/gpu/drm/radeon/rs600.c
··· 339 339 340 340 int rs600_asic_reset(struct radeon_device *rdev) 341 341 { 342 - u32 status, tmp; 343 - 344 342 struct rv515_mc_save save; 343 + u32 status, tmp; 344 + int ret = 0; 345 345 346 - /* Stops all mc clients */ 347 - rv515_mc_stop(rdev, &save); 348 346 status = RREG32(R_000E40_RBBM_STATUS); 349 347 if (!G_000E40_GUI_ACTIVE(status)) { 350 348 return 0; 351 349 } 350 + /* Stops all mc clients */ 351 + rv515_mc_stop(rdev, &save); 352 352 status = RREG32(R_000E40_RBBM_STATUS); 353 353 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); 354 354 /* stop CP */ ··· 392 392 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { 393 393 dev_err(rdev->dev, "failed to reset GPU\n"); 394 394 rdev->gpu_lockup = true; 395 - return -1; 396 - } 395 + ret = -1; 396 + } else 397 + dev_info(rdev->dev, "GPU reset succeed\n"); 397 398 rv515_mc_resume(rdev, &save); 398 - dev_info(rdev->dev, "GPU reset succeed\n"); 399 - return 0; 399 + return ret; 400 400 } 401 401 402 402 /*
+4 -1
drivers/gpu/drm/radeon/rv770.c
··· 1268 1268 if (r) 1269 1269 return r; 1270 1270 /* Post card if necessary */ 1271 - if (!r600_card_posted(rdev)) { 1271 + if (!radeon_card_posted(rdev)) { 1272 1272 if (!rdev->bios) { 1273 1273 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 1274 1274 return -EINVAL; ··· 1371 1371 { 1372 1372 u32 link_width_cntl, lanes, speed_cntl, tmp; 1373 1373 u16 link_cntl2; 1374 + 1375 + if (radeon_pcie_gen2 == 0) 1376 + return; 1374 1377 1375 1378 if (rdev->flags & RADEON_IS_IGP) 1376 1379 return;