Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
"Nothing too astounding

- nouveau: bunch of regression fixes and oops fixes
- radeon: UMS fixes, rn50 fix, dma fix
- udl: fix EDID retrieval for large EDIDs."

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
udldrmfb: udl_get_edid: drop unneeded i--
udldrmfb: udl_get_edid: usb_control_msg buffer must not be on the stack
udldrmfb: Fix EDID not working with monitors with EDID extension blocks
drm/nvc0/fb: fix crash when different mutex is used to protect same list
drm/nouveau/clock: fix support for more than 2 monitors on nve0
drm/nv50/disp: fix selection of bios script for analog outputs
drm/nv17-50: restore fence buffer on resume
drm/nouveau: fix blank LVDS screen regression on pre-nv50 cards
drm/nouveau: fix nouveau_client allocation failure path
drm/nouveau: don't return freed object from nouveau_handle_create
drm/nouveau/vm: fix memory corruption when pgt allocation fails
drm/nouveau: add locking around instobj list operations
drm/nouveau: do not forcibly power on lvds panels
drm/nouveau/devinit: ensure legacy vga control is enabled during post
radeon/kms: fix dma relocation checking
radeon/kms: force rn50 chip to always report connected on analog output
drm/radeon: fix error path in kpage allocation
drm/radeon: fix a bogus kfree
drm/radeon: fix NULL pointer dereference in UMS mode

+149 -63
+1 -3
drivers/gpu/drm/nouveau/core/core/client.c
··· 66 66 67 67 ret = nouveau_handle_create(nv_object(client), ~0, ~0, 68 68 nv_object(client), &client->root); 69 - if (ret) { 70 - nouveau_namedb_destroy(&client->base); 69 + if (ret) 71 70 return ret; 72 - } 73 71 74 72 /* prevent init/fini being called, os in in charge of this */ 75 73 atomic_set(&nv_object(client)->usecount, 2);
+4 -1
drivers/gpu/drm/nouveau/core/core/handle.c
··· 109 109 while (!nv_iclass(namedb, NV_NAMEDB_CLASS)) 110 110 namedb = namedb->parent; 111 111 112 - handle = *phandle = kzalloc(sizeof(*handle), GFP_KERNEL); 112 + handle = kzalloc(sizeof(*handle), GFP_KERNEL); 113 113 if (!handle) 114 114 return -ENOMEM; 115 115 ··· 146 146 } 147 147 148 148 hprintk(handle, TRACE, "created\n"); 149 + 150 + *phandle = handle; 151 + 149 152 return 0; 150 153 } 151 154
+26 -20
drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
··· 851 851 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) 852 852 ctrl = nv_rd32(priv, 0x610b5c + (i * 8)); 853 853 854 - if (nv_device(priv)->chipset < 0x90 || 855 - nv_device(priv)->chipset == 0x92 || 856 - nv_device(priv)->chipset == 0xa0) { 857 - for (i = 0; !(ctrl & (1 << head)) && i < 2; i++) 858 - ctrl = nv_rd32(priv, 0x610b74 + (i * 8)); 859 - i += 3; 860 - } else { 861 - for (i = 0; !(ctrl & (1 << head)) && i < 4; i++) 862 - ctrl = nv_rd32(priv, 0x610798 + (i * 8)); 863 - i += 3; 854 + if (!(ctrl & (1 << head))) { 855 + if (nv_device(priv)->chipset < 0x90 || 856 + nv_device(priv)->chipset == 0x92 || 857 + nv_device(priv)->chipset == 0xa0) { 858 + for (i = 0; !(ctrl & (1 << head)) && i < 2; i++) 859 + ctrl = nv_rd32(priv, 0x610b74 + (i * 8)); 860 + i += 4; 861 + } else { 862 + for (i = 0; !(ctrl & (1 << head)) && i < 4; i++) 863 + ctrl = nv_rd32(priv, 0x610798 + (i * 8)); 864 + i += 4; 865 + } 864 866 } 865 867 866 868 if (!(ctrl & (1 << head))) 867 869 return false; 870 + i--; 868 871 869 872 data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info); 870 873 if (data) { ··· 901 898 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) 902 899 ctrl = nv_rd32(priv, 0x610b58 + (i * 8)); 903 900 904 - if (nv_device(priv)->chipset < 0x90 || 905 - nv_device(priv)->chipset == 0x92 || 906 - nv_device(priv)->chipset == 0xa0) { 907 - for (i = 0; !(ctrl & (1 << head)) && i < 2; i++) 908 - ctrl = nv_rd32(priv, 0x610b70 + (i * 8)); 909 - i += 3; 910 - } else { 911 - for (i = 0; !(ctrl & (1 << head)) && i < 4; i++) 912 - ctrl = nv_rd32(priv, 0x610794 + (i * 8)); 913 - i += 3; 901 + if (!(ctrl & (1 << head))) { 902 + if (nv_device(priv)->chipset < 0x90 || 903 + nv_device(priv)->chipset == 0x92 || 904 + nv_device(priv)->chipset == 0xa0) { 905 + for (i = 0; !(ctrl & (1 << head)) && i < 2; i++) 906 + ctrl = nv_rd32(priv, 0x610b70 + (i * 8)); 907 + i += 4; 908 + } else { 909 + for (i = 0; !(ctrl & (1 << head)) && i < 4; i++) 910 + ctrl = nv_rd32(priv, 0x610794 + (i * 8)); 911 + i += 4; 912 + } 914 913 } 915 914 916 915 if (!(ctrl & (1 << head))) 917 916 return 0x0000; 917 + i--; 918 918 919 919 data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1); 920 920 if (!data)
+3
drivers/gpu/drm/nouveau/core/include/core/client.h
··· 36 36 37 37 int nouveau_client_create_(const char *name, u64 device, const char *cfg, 38 38 const char *dbg, int, void **); 39 + #define nouveau_client_destroy(p) \ 40 + nouveau_namedb_destroy(&(p)->base) 41 + 39 42 int nouveau_client_init(struct nouveau_client *); 40 43 int nouveau_client_fini(struct nouveau_client *, bool suspend); 41 44
+2
drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
··· 38 38 PLL_UNK42 = 0x42, 39 39 PLL_VPLL0 = 0x80, 40 40 PLL_VPLL1 = 0x81, 41 + PLL_VPLL2 = 0x82, 42 + PLL_VPLL3 = 0x83, 41 43 PLL_MAX = 0xff 42 44 }; 43 45
-1
drivers/gpu/drm/nouveau/core/subdev/bios/init.c
··· 1534 1534 mdelay(10); 1535 1535 init_wr32(init, 0x614100, 0x10000018); 1536 1536 init_wr32(init, 0x614900, 0x10000018); 1537 - return; 1538 1537 } 1539 1538 1540 1539 value = init_rdport(init, port) & mask;
+2
drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
··· 52 52 switch (info.type) { 53 53 case PLL_VPLL0: 54 54 case PLL_VPLL1: 55 + case PLL_VPLL2: 56 + case PLL_VPLL3: 55 57 nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100); 56 58 nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M); 57 59 nv_wr32(priv, info.reg + 0x10, fN << 16);
+3 -3
drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
··· 145 145 mem->memtype = type; 146 146 mem->size = size; 147 147 148 - mutex_lock(&mm->mutex); 148 + mutex_lock(&pfb->base.mutex); 149 149 do { 150 150 if (back) 151 151 ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r); 152 152 else 153 153 ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r); 154 154 if (ret) { 155 - mutex_unlock(&mm->mutex); 155 + mutex_unlock(&pfb->base.mutex); 156 156 pfb->ram.put(pfb, &mem); 157 157 return ret; 158 158 } ··· 160 160 list_add_tail(&r->rl_entry, &mem->regions); 161 161 size -= r->length; 162 162 } while (size); 163 - mutex_unlock(&mm->mutex); 163 + mutex_unlock(&pfb->base.mutex); 164 164 165 165 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry); 166 166 mem->offset = (u64)r->offset << 12;
+27 -8
drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
··· 40 40 if (ret) 41 41 return ret; 42 42 43 + mutex_lock(&imem->base.mutex); 43 44 list_add(&iobj->head, &imem->list); 45 + mutex_unlock(&imem->base.mutex); 44 46 return 0; 45 47 } 46 48 47 49 void 48 50 nouveau_instobj_destroy(struct nouveau_instobj *iobj) 49 51 { 50 - if (iobj->head.prev) 51 - list_del(&iobj->head); 52 + struct nouveau_subdev *subdev = nv_subdev(iobj->base.engine); 53 + 54 + mutex_lock(&subdev->mutex); 55 + list_del(&iobj->head); 56 + mutex_unlock(&subdev->mutex); 57 + 52 58 return nouveau_object_destroy(&iobj->base); 53 59 } 54 60 ··· 94 88 if (ret) 95 89 return ret; 96 90 91 + mutex_lock(&imem->base.mutex); 92 + 97 93 list_for_each_entry(iobj, &imem->list, head) { 98 94 if (iobj->suspend) { 99 95 for (i = 0; i < iobj->size; i += 4) ··· 105 97 } 106 98 } 107 99 100 + mutex_unlock(&imem->base.mutex); 101 + 108 102 return 0; 109 103 } 110 104 ··· 114 104 nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend) 115 105 { 116 106 struct nouveau_instobj *iobj; 117 - int i; 107 + int i, ret = 0; 118 108 119 109 if (suspend) { 110 + mutex_lock(&imem->base.mutex); 111 + 120 112 list_for_each_entry(iobj, &imem->list, head) { 121 113 iobj->suspend = vmalloc(iobj->size); 122 - if (iobj->suspend) { 123 - for (i = 0; i < iobj->size; i += 4) 124 - iobj->suspend[i / 4] = nv_ro32(iobj, i); 125 - } else 126 - return -ENOMEM; 114 + if (!iobj->suspend) { 115 + ret = -ENOMEM; 116 + break; 117 + } 118 + 119 + for (i = 0; i < iobj->size; i += 4) 120 + iobj->suspend[i / 4] = nv_ro32(iobj, i); 127 121 } 122 + 123 + mutex_unlock(&imem->base.mutex); 124 + 125 + if (ret) 126 + return ret; 128 127 } 129 128 130 129 return nouveau_subdev_fini(&imem->base, suspend);
+3 -1
drivers/gpu/drm/nouveau/core/subdev/vm/base.c
··· 352 352 u64 mm_length = (offset + length) - mm_offset; 353 353 int ret; 354 354 355 - vm = *pvm = kzalloc(sizeof(*vm), GFP_KERNEL); 355 + vm = kzalloc(sizeof(*vm), GFP_KERNEL); 356 356 if (!vm) 357 357 return -ENOMEM; 358 358 ··· 375 375 kfree(vm); 376 376 return ret; 377 377 } 378 + 379 + *pvm = vm; 378 380 379 381 return 0; 380 382 }
+26 -4
drivers/gpu/drm/nouveau/nouveau_connector.c
··· 127 127 struct nouveau_encoder **pnv_encoder) 128 128 { 129 129 struct drm_device *dev = connector->dev; 130 + struct nouveau_connector *nv_connector = nouveau_connector(connector); 130 131 struct nouveau_drm *drm = nouveau_drm(dev); 132 + struct nouveau_gpio *gpio = nouveau_gpio(drm->device); 131 133 struct nouveau_i2c *i2c = nouveau_i2c(drm->device); 132 - int i; 134 + struct nouveau_i2c_port *port = NULL; 135 + int i, panel = -ENODEV; 136 + 137 + /* eDP panels need powering on by us (if the VBIOS doesn't default it 138 + * to on) before doing any AUX channel transactions. LVDS panel power 139 + * is handled by the SOR itself, and not required for LVDS DDC. 140 + */ 141 + if (nv_connector->type == DCB_CONNECTOR_eDP) { 142 + panel = gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff); 143 + if (panel == 0) { 144 + gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1); 145 + msleep(300); 146 + } 147 + } 133 148 134 149 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 135 - struct nouveau_i2c_port *port = NULL; 136 150 struct nouveau_encoder *nv_encoder; 137 151 struct drm_mode_object *obj; 138 152 int id; ··· 164 150 port = i2c->find(i2c, nv_encoder->dcb->i2c_index); 165 151 if (port && nv_probe_i2c(port, 0x50)) { 166 152 *pnv_encoder = nv_encoder; 167 - return port; 153 + break; 168 154 } 155 + 156 + port = NULL; 169 157 } 170 158 171 - return NULL; 159 + /* eDP panel not detected, restore panel power GPIO to previous 160 + * state to avoid confusing the SOR for other output types. 161 + */ 162 + if (!port && panel == 0) 163 + gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel); 164 + 165 + return port; 172 166 } 173 167 174 168 static struct nouveau_encoder *
-9
drivers/gpu/drm/nouveau/nouveau_display.c
··· 225 225 if (ret) 226 226 return ret; 227 227 228 - /* power on internal panel if it's not already. the init tables of 229 - * some vbios default this to off for some reason, causing the 230 - * panel to not work after resume 231 - */ 232 - if (gpio && gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff) == 0) { 233 - gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1); 234 - msleep(300); 235 - } 236 - 237 228 /* enable polling for external displays */ 238 229 drm_kms_helper_poll_enable(dev); 239 230
+6 -1
drivers/gpu/drm/nouveau/nouveau_drm.c
··· 84 84 struct nouveau_cli *cli; 85 85 int ret; 86 86 87 + *pcli = NULL; 87 88 ret = nouveau_client_create_(name, nouveau_name(pdev), nouveau_config, 88 89 nouveau_debug, size, pcli); 89 90 cli = *pcli; 90 - if (ret) 91 + if (ret) { 92 + if (cli) 93 + nouveau_client_destroy(&cli->base); 94 + *pcli = NULL; 91 95 return ret; 96 + } 92 97 93 98 mutex_init(&cli->mutex); 94 99 return 0;
+1
drivers/gpu/drm/nouveau/nouveau_fence.h
··· 60 60 void nv10_fence_context_del(struct nouveau_channel *); 61 61 void nv10_fence_destroy(struct nouveau_drm *); 62 62 int nv10_fence_create(struct nouveau_drm *); 63 + void nv17_fence_resume(struct nouveau_drm *drm); 63 64 64 65 int nv50_fence_create(struct nouveau_drm *); 65 66 int nv84_fence_create(struct nouveau_drm *);
+1 -1
drivers/gpu/drm/nouveau/nv04_dfp.c
··· 505 505 506 506 static inline bool is_powersaving_dpms(int mode) 507 507 { 508 - return (mode != DRM_MODE_DPMS_ON); 508 + return mode != DRM_MODE_DPMS_ON && mode != NV_DPMS_CLEARED; 509 509 } 510 510 511 511 static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
+8
drivers/gpu/drm/nouveau/nv10_fence.c
··· 162 162 kfree(priv); 163 163 } 164 164 165 + void nv17_fence_resume(struct nouveau_drm *drm) 166 + { 167 + struct nv10_fence_priv *priv = drm->fence; 168 + 169 + nouveau_bo_wr32(priv->bo, 0, priv->sequence); 170 + } 171 + 165 172 int 166 173 nv10_fence_create(struct nouveau_drm *drm) 167 174 { ··· 204 197 if (ret == 0) { 205 198 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000); 206 199 priv->base.sync = nv17_fence_sync; 200 + priv->base.resume = nv17_fence_resume; 207 201 } 208 202 } 209 203
+1
drivers/gpu/drm/nouveau/nv50_fence.c
··· 122 122 if (ret == 0) { 123 123 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000); 124 124 priv->base.sync = nv17_fence_sync; 125 + priv->base.resume = nv17_fence_resume; 125 126 } 126 127 127 128 if (ret)
+7 -5
drivers/gpu/drm/radeon/r600_cs.c
··· 2476 2476 kfree(parser->relocs); 2477 2477 for (i = 0; i < parser->nchunks; i++) { 2478 2478 kfree(parser->chunks[i].kdata); 2479 - kfree(parser->chunks[i].kpage[0]); 2480 - kfree(parser->chunks[i].kpage[1]); 2479 + if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) { 2480 + kfree(parser->chunks[i].kpage[0]); 2481 + kfree(parser->chunks[i].kpage[1]); 2482 + } 2481 2483 } 2482 2484 kfree(parser->chunks); 2483 2485 kfree(parser->chunks_array); ··· 2563 2561 struct radeon_cs_chunk *relocs_chunk; 2564 2562 unsigned idx; 2565 2563 2564 + *cs_reloc = NULL; 2566 2565 if (p->chunk_relocs_idx == -1) { 2567 2566 DRM_ERROR("No relocation chunk !\n"); 2568 2567 return -EINVAL; 2569 2568 } 2570 - *cs_reloc = NULL; 2571 2569 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 2572 2570 idx = p->dma_reloc_idx; 2573 - if (idx >= relocs_chunk->length_dw) { 2571 + if (idx >= p->nrelocs) { 2574 2572 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 2575 - idx, relocs_chunk->length_dw); 2573 + idx, p->nrelocs); 2576 2574 return -EINVAL; 2577 2575 } 2578 2576 *cs_reloc = p->relocs_ptr[idx];
+5 -4
drivers/gpu/drm/radeon/radeon_cs.c
··· 279 279 p->chunks[p->chunk_ib_idx].length_dw); 280 280 return -EINVAL; 281 281 } 282 - if ((p->rdev->flags & RADEON_IS_AGP)) { 282 + if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) { 283 283 p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); 284 284 p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); 285 285 if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL || 286 286 p->chunks[p->chunk_ib_idx].kpage[1] == NULL) { 287 - kfree(p->chunks[i].kpage[0]); 288 - kfree(p->chunks[i].kpage[1]); 287 + kfree(p->chunks[p->chunk_ib_idx].kpage[0]); 288 + kfree(p->chunks[p->chunk_ib_idx].kpage[1]); 289 289 return -ENOMEM; 290 290 } 291 291 } ··· 583 583 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; 584 584 int i; 585 585 int size = PAGE_SIZE; 586 - bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true; 586 + bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ? 587 + false : true; 587 588 588 589 for (i = ibc->last_copied_page + 1; i < pg_idx; i++) { 589 590 if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
+8
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
··· 640 640 enum drm_connector_status found = connector_status_disconnected; 641 641 bool color = true; 642 642 643 + /* just don't bother on RN50 those chip are often connected to remoting 644 + * console hw and often we get failure to load detect those. So to make 645 + * everyone happy report the encoder as always connected. 646 + */ 647 + if (ASIC_IS_RN50(rdev)) { 648 + return connector_status_connected; 649 + } 650 + 643 651 /* save the regs we need */ 644 652 vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL); 645 653 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+15 -2
drivers/gpu/drm/udl/udl_connector.c
··· 22 22 static u8 *udl_get_edid(struct udl_device *udl) 23 23 { 24 24 u8 *block; 25 - char rbuf[3]; 25 + char *rbuf; 26 26 int ret, i; 27 27 28 28 block = kmalloc(EDID_LENGTH, GFP_KERNEL); 29 29 if (block == NULL) 30 30 return NULL; 31 + 32 + rbuf = kmalloc(2, GFP_KERNEL); 33 + if (rbuf == NULL) 34 + goto error; 31 35 32 36 for (i = 0; i < EDID_LENGTH; i++) { 33 37 ret = usb_control_msg(udl->ddev->usbdev, ··· 40 36 HZ); 41 37 if (ret < 1) { 42 38 DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret); 43 - i--; 44 39 goto error; 45 40 } 46 41 block[i] = rbuf[1]; 47 42 } 48 43 44 + kfree(rbuf); 49 45 return block; 50 46 51 47 error: 52 48 kfree(block); 49 + kfree(rbuf); 53 50 return NULL; 54 51 } 55 52 ··· 61 56 int ret; 62 57 63 58 edid = (struct edid *)udl_get_edid(udl); 59 + 60 + /* 61 + * We only read the main block, but if the monitor reports extension 62 + * blocks then the drm edid code expects them to be present, so patch 63 + * the extension count to 0. 64 + */ 65 + edid->checksum += edid->extensions; 66 + edid->extensions = 0; 64 67 65 68 drm_mode_connector_update_edid_property(connector, edid); 66 69 ret = drm_add_edid_modes(connector, edid);