Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (30 commits)
vgaarb: fix incorrect dereference of userspace pointer.
drm/radeon/kms: retry auxch on 0x20 timeout value.
drm/radeon: Skip dma copy test in benchmark if card doesn't have dma engine.
drm/vmwgfx: Fix a circular locking dependency bug.
drm/vmwgfx: Drop scanout flag compat and add execbuf ioctl parameter members. Bumps major.
drm/vmwgfx: Report propper framebuffer_{max|min}_{width|height}
drm/vmwgfx: Update the user-space interface.
drm/radeon/kms: fix screen clearing before fbcon.
nouveau: fix state detection with switchable graphics
drm/nouveau: move dereferences after null checks
drm/nv50: make the pgraph irq handler loop like the pre-nv50 version
drm/nv50: delete ramfc object after disabling fifo, not before
drm/nv50: avoid unloading pgraph context when ctxprog is running
drm/nv50: align size of buffer object to the right boundaries.
drm/nv50: disregard dac outputs in nv50_sor_dpms()
drm/nv50: prevent multiple init tables being parsed at the same time
drm/nouveau: make dp auxch xfer len check for reads only
drm/nv40: make INIT_COMPUTE_MEM a NOP, just like nv50
drm/nouveau: Add proper vgaarb support.
drm/nouveau: Fix fbcon on mixed pre-NV50 + NV50 multicard.
...

+398 -226
+6 -6
drivers/gpu/drm/nouveau/nouveau_acpi.c
··· 90 { 91 int result; 92 93 - if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY, 94 &result)) 95 return -ENODEV; 96 97 NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); 98 99 - if (result & 0x1) { /* Stamina mode - disable the external GPU */ 100 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, 101 NULL); 102 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, 103 - NULL); 104 - } else { /* Ensure that the external GPU is enabled */ 105 - nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL); 106 - nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED, 107 NULL); 108 } 109
··· 90 { 91 int result; 92 93 + if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE, 94 &result)) 95 return -ENODEV; 96 97 NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); 98 99 + if (result) { /* Ensure that the external GPU is enabled */ 100 + nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL); 101 + nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED, 102 + NULL); 103 + } else { /* Stamina mode - disable the external GPU */ 104 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, 105 NULL); 106 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, 107 NULL); 108 } 109
+10 -9
drivers/gpu/drm/nouveau/nouveau_bios.c
··· 1865 1866 struct drm_nouveau_private *dev_priv = bios->dev->dev_private; 1867 1868 - if (dev_priv->card_type >= NV_50) 1869 return 1; 1870 1871 /* ··· 3765 */ 3766 3767 struct drm_nouveau_private *dev_priv = dev->dev_private; 3768 - struct init_exec iexec = {true, false}; 3769 struct nvbios *bios = &dev_priv->VBIOS; 3770 uint8_t *table = &bios->data[bios->display.script_table_ptr]; 3771 uint8_t *otable = NULL; ··· 3844 } 3845 } 3846 3847 - bios->display.output = dcbent; 3848 - 3849 if (pxclk == 0) { 3850 script = ROM16(otable[6]); 3851 if (!script) { ··· 3852 } 3853 3854 NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); 3855 - parse_init_table(bios, script, &iexec); 3856 } else 3857 if (pxclk == -1) { 3858 script = ROM16(otable[8]); ··· 3862 } 3863 3864 NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); 3865 - parse_init_table(bios, script, &iexec); 3866 } else 3867 if (pxclk == -2) { 3868 if (table[4] >= 12) ··· 3875 } 3876 3877 NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); 3878 - parse_init_table(bios, script, &iexec); 3879 } else 3880 if (pxclk > 0) { 3881 script = ROM16(otable[table[4] + i*6 + 2]); ··· 3887 } 3888 3889 NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); 3890 - parse_init_table(bios, script, &iexec); 3891 } else 3892 if (pxclk < 0) { 3893 script = ROM16(otable[table[4] + i*6 + 4]); ··· 3899 } 3900 3901 NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); 3902 - parse_init_table(bios, script, &iexec); 3903 } 3904 3905 return 0; ··· 5861 struct drm_nouveau_private *dev_priv = dev->dev_private; 5862 struct nvbios *bios = &dev_priv->VBIOS; 5863 struct init_exec iexec = { true, false }; 5864 5865 bios->display.output = dcbent; 5866 parse_init_table(bios, table, &iexec); 5867 bios->display.output = NULL; 5868 } 5869 5870 static bool NVInitVBIOS(struct drm_device *dev) ··· 5876 struct nvbios *bios = &dev_priv->VBIOS; 5877 5878 memset(bios, 0, sizeof(struct nvbios)); 5879 bios->dev = dev; 5880 5881 if (!NVShadowVBIOS(dev, bios->data))
··· 1865 1866 struct drm_nouveau_private *dev_priv = bios->dev->dev_private; 1867 1868 + if (dev_priv->card_type >= NV_40) 1869 return 1; 1870 1871 /* ··· 3765 */ 3766 3767 struct drm_nouveau_private *dev_priv = dev->dev_private; 3768 struct nvbios *bios = &dev_priv->VBIOS; 3769 uint8_t *table = &bios->data[bios->display.script_table_ptr]; 3770 uint8_t *otable = NULL; ··· 3845 } 3846 } 3847 3848 if (pxclk == 0) { 3849 script = ROM16(otable[6]); 3850 if (!script) { ··· 3855 } 3856 3857 NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); 3858 + nouveau_bios_run_init_table(dev, script, dcbent); 3859 } else 3860 if (pxclk == -1) { 3861 script = ROM16(otable[8]); ··· 3865 } 3866 3867 NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); 3868 + nouveau_bios_run_init_table(dev, script, dcbent); 3869 } else 3870 if (pxclk == -2) { 3871 if (table[4] >= 12) ··· 3878 } 3879 3880 NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); 3881 + nouveau_bios_run_init_table(dev, script, dcbent); 3882 } else 3883 if (pxclk > 0) { 3884 script = ROM16(otable[table[4] + i*6 + 2]); ··· 3890 } 3891 3892 NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); 3893 + nouveau_bios_run_init_table(dev, script, dcbent); 3894 } else 3895 if (pxclk < 0) { 3896 script = ROM16(otable[table[4] + i*6 + 4]); ··· 3902 } 3903 3904 NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); 3905 + nouveau_bios_run_init_table(dev, script, dcbent); 3906 } 3907 3908 return 0; ··· 5864 struct drm_nouveau_private *dev_priv = dev->dev_private; 5865 struct nvbios *bios = &dev_priv->VBIOS; 5866 struct init_exec iexec = { true, false }; 5867 + unsigned long flags; 5868 5869 + spin_lock_irqsave(&bios->lock, flags); 5870 bios->display.output = dcbent; 5871 parse_init_table(bios, table, &iexec); 5872 bios->display.output = NULL; 5873 + spin_unlock_irqrestore(&bios->lock, flags); 5874 } 5875 5876 static bool NVInitVBIOS(struct drm_device *dev) ··· 5876 struct nvbios *bios = &dev_priv->VBIOS; 5877 5878 memset(bios, 0, sizeof(struct nvbios)); 5879 + spin_lock_init(&bios->lock); 5880 bios->dev = dev; 5881 5882 if (!NVShadowVBIOS(dev, bios->data))
+2
drivers/gpu/drm/nouveau/nouveau_bios.h
··· 205 struct drm_device *dev; 206 struct nouveau_bios_info pub; 207 208 uint8_t data[NV_PROM_SIZE]; 209 unsigned int length; 210 bool execute;
··· 205 struct drm_device *dev; 206 struct nouveau_bios_info pub; 207 208 + spinlock_t lock; 209 + 210 uint8_t data[NV_PROM_SIZE]; 211 unsigned int length; 212 bool execute;
+5 -5
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 65 66 /* 67 * Some of the tile_flags have a periodic structure of N*4096 bytes, 68 - * align to to that as well as the page size. Overallocate memory to 69 - * avoid corruption of other buffer objects. 70 */ 71 if (dev_priv->card_type == NV_50) { 72 uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; ··· 79 case 0x2800: 80 case 0x4800: 81 case 0x7a00: 82 - *size = roundup(*size, block_size); 83 if (is_power_of_2(block_size)) { 84 - *size += 3 * block_size; 85 for (i = 1; i < 10; i++) { 86 *align = 12 * i * block_size; 87 if (!(*align % 65536)) 88 break; 89 } 90 } else { 91 - *size += 6 * block_size; 92 for (i = 1; i < 10; i++) { 93 *align = 8 * i * block_size; 94 if (!(*align % 65536)) 95 break; 96 } 97 } 98 break; 99 default: 100 break;
··· 65 66 /* 67 * Some of the tile_flags have a periodic structure of N*4096 bytes, 68 + * align to to that as well as the page size. Align the size to the 69 + * appropriate boundaries. This does imply that sizes are rounded up 70 + * 3-7 pages, so be aware of this and do not waste memory by allocating 71 + * many small buffers. 72 */ 73 if (dev_priv->card_type == NV_50) { 74 uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; ··· 77 case 0x2800: 78 case 0x4800: 79 case 0x7a00: 80 if (is_power_of_2(block_size)) { 81 for (i = 1; i < 10; i++) { 82 *align = 12 * i * block_size; 83 if (!(*align % 65536)) 84 break; 85 } 86 } else { 87 for (i = 1; i < 10; i++) { 88 *align = 8 * i * block_size; 89 if (!(*align % 65536)) 90 break; 91 } 92 } 93 + *size = roundup(*size, *align); 94 break; 95 default: 96 break;
+3 -4
drivers/gpu/drm/nouveau/nouveau_channel.c
··· 278 /* Ensure the channel is no longer active on the GPU */ 279 pfifo->reassign(dev, false); 280 281 - if (pgraph->channel(dev) == chan) { 282 - pgraph->fifo_access(dev, false); 283 pgraph->unload_context(dev); 284 - pgraph->fifo_access(dev, true); 285 - } 286 pgraph->destroy_context(chan); 287 288 if (pfifo->channel_id(dev) == chan->id) { 289 pfifo->disable(dev);
··· 278 /* Ensure the channel is no longer active on the GPU */ 279 pfifo->reassign(dev, false); 280 281 + pgraph->fifo_access(dev, false); 282 + if (pgraph->channel(dev) == chan) 283 pgraph->unload_context(dev); 284 pgraph->destroy_context(chan); 285 + pgraph->fifo_access(dev, true); 286 287 if (pfifo->channel_id(dev) == chan->id) { 288 pfifo->disable(dev);
+4 -3
drivers/gpu/drm/nouveau/nouveau_connector.c
··· 88 { 89 struct nouveau_connector *nv_connector = 90 nouveau_connector(drm_connector); 91 - struct drm_device *dev = nv_connector->base.dev; 92 - 93 - NV_DEBUG_KMS(dev, "\n"); 94 95 if (!nv_connector) 96 return; 97 98 kfree(nv_connector->edid); 99 drm_sysfs_connector_remove(drm_connector);
··· 88 { 89 struct nouveau_connector *nv_connector = 90 nouveau_connector(drm_connector); 91 + struct drm_device *dev; 92 93 if (!nv_connector) 94 return; 95 + 96 + dev = nv_connector->base.dev; 97 + NV_DEBUG_KMS(dev, "\n"); 98 99 kfree(nv_connector->edid); 100 drm_sysfs_connector_remove(drm_connector);
+5 -5
drivers/gpu/drm/nouveau/nouveau_dp.c
··· 502 break; 503 } 504 505 - if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) { 506 - ret = -EREMOTEIO; 507 - goto out; 508 - } 509 - 510 if (cmd & 1) { 511 for (i = 0; i < 4; i++) { 512 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); 513 NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
··· 502 break; 503 } 504 505 if (cmd & 1) { 506 + if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) { 507 + ret = -EREMOTEIO; 508 + goto out; 509 + } 510 + 511 for (i = 0; i < 4; i++) { 512 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); 513 NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
+9 -1
drivers/gpu/drm/nouveau/nouveau_drv.c
··· 56 module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); 57 58 MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); 59 - int nouveau_vram_notify; 60 module_param_named(vram_notify, nouveau_vram_notify, int, 0400); 61 62 MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); ··· 74 MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status"); 75 int nouveau_ignorelid = 0; 76 module_param_named(ignorelid, nouveau_ignorelid, int, 0400); 77 78 MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" 79 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
··· 56 module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); 57 58 MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); 59 + int nouveau_vram_notify = 1; 60 module_param_named(vram_notify, nouveau_vram_notify, int, 0400); 61 62 MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); ··· 74 MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status"); 75 int nouveau_ignorelid = 0; 76 module_param_named(ignorelid, nouveau_ignorelid, int, 0400); 77 + 78 + MODULE_PARM_DESC(noagp, "Disable all acceleration"); 79 + int nouveau_noaccel = 0; 80 + module_param_named(noaccel, nouveau_noaccel, int, 0400); 81 + 82 + MODULE_PARM_DESC(noagp, "Disable fbcon acceleration"); 83 + int nouveau_nofbaccel = 0; 84 + module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); 85 86 MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" 87 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
+2
drivers/gpu/drm/nouveau/nouveau_drv.h
··· 678 extern char *nouveau_vbios; 679 extern int nouveau_ctxfw; 680 extern int nouveau_ignorelid; 681 682 /* nouveau_state.c */ 683 extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
··· 678 extern char *nouveau_vbios; 679 extern int nouveau_ctxfw; 680 extern int nouveau_ignorelid; 681 + extern int nouveau_nofbaccel; 682 + extern int nouveau_noaccel; 683 684 /* nouveau_state.c */ 685 extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
+37 -3
drivers/gpu/drm/nouveau/nouveau_fbcon.c
··· 107 .fb_setcmap = drm_fb_helper_setcmap, 108 }; 109 110 static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 111 u16 blue, int regno) 112 { ··· 295 dev_priv->fbdev_info = info; 296 297 strcpy(info->fix.id, "nouveaufb"); 298 - info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | 299 - FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; 300 info->fbops = &nouveau_fbcon_ops; 301 info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - 302 dev_priv->vm_vram_base; ··· 348 par->nouveau_fb = nouveau_fb; 349 par->dev = dev; 350 351 - if (dev_priv->channel) { 352 switch (dev_priv->card_type) { 353 case NV_50: 354 nv50_fbcon_accel_init(info); 355 break; 356 default: 357 nv04_fbcon_accel_init(info); 358 break; 359 }; 360 }
··· 107 .fb_setcmap = drm_fb_helper_setcmap, 108 }; 109 110 + static struct fb_ops nv04_fbcon_ops = { 111 + .owner = THIS_MODULE, 112 + .fb_check_var = drm_fb_helper_check_var, 113 + .fb_set_par = drm_fb_helper_set_par, 114 + .fb_setcolreg = drm_fb_helper_setcolreg, 115 + .fb_fillrect = nv04_fbcon_fillrect, 116 + .fb_copyarea = nv04_fbcon_copyarea, 117 + .fb_imageblit = nv04_fbcon_imageblit, 118 + .fb_sync = nouveau_fbcon_sync, 119 + .fb_pan_display = drm_fb_helper_pan_display, 120 + .fb_blank = drm_fb_helper_blank, 121 + .fb_setcmap = drm_fb_helper_setcmap, 122 + }; 123 + 124 + static struct fb_ops nv50_fbcon_ops = { 125 + .owner = THIS_MODULE, 126 + .fb_check_var = drm_fb_helper_check_var, 127 + .fb_set_par = drm_fb_helper_set_par, 128 + .fb_setcolreg = drm_fb_helper_setcolreg, 129 + .fb_fillrect = nv50_fbcon_fillrect, 130 + .fb_copyarea = nv50_fbcon_copyarea, 131 + .fb_imageblit = nv50_fbcon_imageblit, 132 + .fb_sync = nouveau_fbcon_sync, 133 + .fb_pan_display = drm_fb_helper_pan_display, 134 + .fb_blank = drm_fb_helper_blank, 135 + .fb_setcmap = drm_fb_helper_setcmap, 136 + }; 137 + 138 static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 139 u16 blue, int regno) 140 { ··· 267 dev_priv->fbdev_info = info; 268 269 strcpy(info->fix.id, "nouveaufb"); 270 + if (nouveau_nofbaccel) 271 + info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED; 272 + else 273 + info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | 274 + FBINFO_HWACCEL_FILLRECT | 275 + FBINFO_HWACCEL_IMAGEBLIT; 276 info->fbops = &nouveau_fbcon_ops; 277 info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - 278 dev_priv->vm_vram_base; ··· 316 par->nouveau_fb = nouveau_fb; 317 par->dev = dev; 318 319 + if (dev_priv->channel && !nouveau_nofbaccel) { 320 switch (dev_priv->card_type) { 321 case NV_50: 322 nv50_fbcon_accel_init(info); 323 + info->fbops = &nv50_fbcon_ops; 324 break; 325 default: 326 nv04_fbcon_accel_init(info); 327 + info->fbops = &nv04_fbcon_ops; 328 break; 329 }; 330 }
+6
drivers/gpu/drm/nouveau/nouveau_fbcon.h
··· 40 void nouveau_fbcon_restore(void); 41 void nouveau_fbcon_zfill(struct drm_device *dev); 42 43 int nv04_fbcon_accel_init(struct fb_info *info); 44 int nv50_fbcon_accel_init(struct fb_info *info); 45 46 void nouveau_fbcon_gpu_lockup(struct fb_info *info);
··· 40 void nouveau_fbcon_restore(void); 41 void nouveau_fbcon_zfill(struct drm_device *dev); 42 43 + void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); 44 + void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); 45 + void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); 46 int nv04_fbcon_accel_init(struct fb_info *info); 47 + void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); 48 + void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); 49 + void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); 50 int nv50_fbcon_accel_init(struct fb_info *info); 51 52 void nouveau_fbcon_gpu_lockup(struct fb_info *info);
+2
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 925 } 926 927 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { 928 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); 929 } else { 930 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); 931 if (ret == 0)
··· 925 } 926 927 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { 928 + spin_lock(&nvbo->bo.lock); 929 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); 930 + spin_unlock(&nvbo->bo.lock); 931 } else { 932 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); 933 if (ret == 0)
+2 -2
drivers/gpu/drm/nouveau/nouveau_grctx.c
··· 97 } 98 99 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); 100 - if (!pgraph->ctxprog) { 101 - NV_ERROR(dev, "OOM copying ctxprog\n"); 102 release_firmware(fw); 103 nouveau_grctx_fini(dev); 104 return -ENOMEM;
··· 97 } 98 99 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); 100 + if (!pgraph->ctxvals) { 101 + NV_ERROR(dev, "OOM copying ctxvals\n"); 102 release_firmware(fw); 103 nouveau_grctx_fini(dev); 104 return -ENOMEM;
+87 -60
drivers/gpu/drm/nouveau/nouveau_irq.c
··· 211 get + 4); 212 } 213 214 if (status) { 215 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", 216 status, chid); ··· 580 static void 581 nv50_pgraph_irq_handler(struct drm_device *dev) 582 { 583 - uint32_t status, nsource; 584 585 - status = nv_rd32(dev, NV03_PGRAPH_INTR); 586 - nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); 587 588 - if (status & 0x00000001) { 589 - nouveau_pgraph_intr_notify(dev, nsource); 590 - status &= ~0x00000001; 591 - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); 592 - } 593 594 - if (status & 0x00000010) { 595 - nouveau_pgraph_intr_error(dev, nsource | 596 - NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); 597 598 - status &= ~0x00000010; 599 - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); 600 - } 601 602 - if (status & 0x00001000) { 603 - nv_wr32(dev, 0x400500, 0x00000000); 604 - nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); 605 - nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, 606 - NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH); 607 - nv_wr32(dev, 0x400500, 0x00010001); 608 609 - nv50_graph_context_switch(dev); 610 611 - status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 612 - } 613 614 - if (status & 0x00100000) { 615 - nouveau_pgraph_intr_error(dev, nsource | 616 - NV03_PGRAPH_NSOURCE_DATA_ERROR); 617 618 - status &= ~0x00100000; 619 - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); 620 - } 621 622 - if (status & 0x00200000) { 623 - int r; 624 625 - nouveau_pgraph_intr_error(dev, nsource | 626 - NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); 627 628 - NV_ERROR(dev, "magic set 1:\n"); 629 - for (r = 0x408900; r <= 0x408910; r += 4) 630 - NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 631 - nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000); 632 - for (r = 0x408e08; r <= 0x408e24; r += 4) 633 - NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 634 - nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000); 635 636 - NV_ERROR(dev, "magic set 2:\n"); 637 - for (r = 0x409900; r <= 0x409910; r += 4) 638 - NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 639 - nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000); 640 - for (r = 0x409e08; r <= 0x409e24; r += 4) 641 - NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 642 - nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000); 643 644 - status &= ~0x00200000; 645 - nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); 646 - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); 647 - } 648 649 - if (status) { 650 - NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status); 651 - nv_wr32(dev, NV03_PGRAPH_INTR, status); 652 - } 653 654 - { 655 - const int isb = (1 << 16) | (1 << 0); 656 657 - if ((nv_rd32(dev, 0x400500) & isb) != isb) 658 - nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb); 659 - nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); 660 } 661 662 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); 663 } 664 665 static void
··· 211 get + 4); 212 } 213 214 + if (status & NV_PFIFO_INTR_SEMAPHORE) { 215 + uint32_t sem; 216 + 217 + status &= ~NV_PFIFO_INTR_SEMAPHORE; 218 + nv_wr32(dev, NV03_PFIFO_INTR_0, 219 + NV_PFIFO_INTR_SEMAPHORE); 220 + 221 + sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE); 222 + nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); 223 + 224 + nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); 225 + nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); 226 + } 227 + 228 if (status) { 229 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", 230 status, chid); ··· 566 static void 567 nv50_pgraph_irq_handler(struct drm_device *dev) 568 { 569 + uint32_t status; 570 571 + while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { 572 + uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); 573 574 + if (status & 0x00000001) { 575 + nouveau_pgraph_intr_notify(dev, nsource); 576 + status &= ~0x00000001; 577 + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); 578 + } 579 580 + if (status & 0x00000010) { 581 + nouveau_pgraph_intr_error(dev, nsource | 582 + NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); 583 584 + status &= ~0x00000010; 585 + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); 586 + } 587 588 + if (status & 0x00001000) { 589 + nv_wr32(dev, 0x400500, 0x00000000); 590 + nv_wr32(dev, NV03_PGRAPH_INTR, 591 + NV_PGRAPH_INTR_CONTEXT_SWITCH); 592 + nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, 593 + NV40_PGRAPH_INTR_EN) & 594 + ~NV_PGRAPH_INTR_CONTEXT_SWITCH); 595 + nv_wr32(dev, 0x400500, 0x00010001); 596 597 + nv50_graph_context_switch(dev); 598 599 + status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 600 + } 601 602 + if (status & 0x00100000) { 603 + nouveau_pgraph_intr_error(dev, nsource | 604 + NV03_PGRAPH_NSOURCE_DATA_ERROR); 605 606 + status &= ~0x00100000; 607 + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); 608 + } 609 610 + if (status & 0x00200000) { 611 + int r; 612 613 + nouveau_pgraph_intr_error(dev, nsource | 614 + NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); 615 616 + NV_ERROR(dev, "magic set 1:\n"); 617 + for (r = 0x408900; r <= 0x408910; r += 4) 618 + NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, 619 + nv_rd32(dev, r)); 620 + nv_wr32(dev, 0x408900, 621 + nv_rd32(dev, 0x408904) | 0xc0000000); 622 + for (r = 0x408e08; r <= 0x408e24; r += 4) 623 + NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, 624 + nv_rd32(dev, r)); 625 + nv_wr32(dev, 0x408e08, 626 + nv_rd32(dev, 0x408e08) | 0xc0000000); 627 628 + NV_ERROR(dev, "magic set 2:\n"); 629 + for (r = 0x409900; r <= 0x409910; r += 4) 630 + NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, 631 + nv_rd32(dev, r)); 632 + nv_wr32(dev, 0x409900, 633 + nv_rd32(dev, 0x409904) | 0xc0000000); 634 + for (r = 0x409e08; r <= 0x409e24; r += 4) 635 + NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, 636 + nv_rd32(dev, r)); 637 + nv_wr32(dev, 0x409e08, 638 + nv_rd32(dev, 0x409e08) | 0xc0000000); 639 640 + status &= ~0x00200000; 641 + nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); 642 + nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); 643 + } 644 645 + if (status) { 646 + NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", 647 + status); 648 + nv_wr32(dev, NV03_PGRAPH_INTR, status); 649 + } 650 651 + { 652 + const int isb = (1 << 16) | (1 << 0); 653 654 + if ((nv_rd32(dev, 0x400500) & isb) != isb) 655 + nv_wr32(dev, 0x400500, 656 + nv_rd32(dev, 0x400500) | isb); 657 + } 658 } 659 660 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); 661 + nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); 662 } 663 664 static void
+10 -3
drivers/gpu/drm/nouveau/nouveau_notifier.c
··· 34 { 35 struct drm_device *dev = chan->dev; 36 struct nouveau_bo *ntfy = NULL; 37 int ret; 38 39 - ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ? 40 - TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT, 41 0, 0x0000, false, true, &ntfy); 42 if (ret) 43 return ret; 44 45 - ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM); 46 if (ret) 47 goto out_err; 48 ··· 133 target = NV_DMA_TARGET_PCI; 134 } else { 135 target = NV_DMA_TARGET_AGP; 136 } 137 } else { 138 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
··· 34 { 35 struct drm_device *dev = chan->dev; 36 struct nouveau_bo *ntfy = NULL; 37 + uint32_t flags; 38 int ret; 39 40 + if (nouveau_vram_notify) 41 + flags = TTM_PL_FLAG_VRAM; 42 + else 43 + flags = TTM_PL_FLAG_TT; 44 + 45 + ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 46 0, 0x0000, false, true, &ntfy); 47 if (ret) 48 return ret; 49 50 + ret = nouveau_bo_pin(ntfy, flags); 51 if (ret) 52 goto out_err; 53 ··· 128 target = NV_DMA_TARGET_PCI; 129 } else { 130 target = NV_DMA_TARGET_AGP; 131 + if (dev_priv->card_type >= NV_50) 132 + offset += dev_priv->vm_gart_base; 133 } 134 } else { 135 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
+2 -1
drivers/gpu/drm/nouveau/nouveau_object.c
··· 885 nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, 886 struct nouveau_gpuobj **gpuobj_ret) 887 { 888 - struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 889 struct nouveau_gpuobj *gpuobj; 890 891 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) 892 return -EINVAL; 893 894 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); 895 if (!gpuobj)
··· 885 nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, 886 struct nouveau_gpuobj **gpuobj_ret) 887 { 888 + struct drm_nouveau_private *dev_priv; 889 struct nouveau_gpuobj *gpuobj; 890 891 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) 892 return -EINVAL; 893 + dev_priv = chan->dev->dev_private; 894 895 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); 896 if (!gpuobj)
+1
drivers/gpu/drm/nouveau/nouveau_reg.h
··· 99 * the card will hang early on in the X init process. 100 */ 101 # define NV_PMC_ENABLE_UNK13 (1<<13) 102 #define NV40_PMC_BACKLIGHT 0x000015f0 103 # define NV40_PMC_BACKLIGHT_MASK 0x001f0000 104 #define NV40_PMC_1700 0x00001700
··· 99 * the card will hang early on in the X init process. 100 */ 101 # define NV_PMC_ENABLE_UNK13 (1<<13) 102 + #define NV40_PMC_GRAPH_UNITS 0x00001540 103 #define NV40_PMC_BACKLIGHT 0x000015f0 104 # define NV40_PMC_BACKLIGHT_MASK 0x001f0000 105 #define NV40_PMC_1700 0x00001700
+4 -3
drivers/gpu/drm/nouveau/nouveau_sgdma.c
··· 54 nouveau_sgdma_clear(struct ttm_backend *be) 55 { 56 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 57 - struct drm_device *dev = nvbe->dev; 58 - 59 - NV_DEBUG(nvbe->dev, "\n"); 60 61 if (nvbe && nvbe->pages) { 62 if (nvbe->bound) 63 be->func->unbind(be); 64
··· 54 nouveau_sgdma_clear(struct ttm_backend *be) 55 { 56 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 57 + struct drm_device *dev; 58 59 if (nvbe && nvbe->pages) { 60 + dev = nvbe->dev; 61 + NV_DEBUG(dev, "\n"); 62 + 63 if (nvbe->bound) 64 be->func->unbind(be); 65
+37 -12
drivers/gpu/drm/nouveau/nouveau_state.c
··· 310 static unsigned int 311 nouveau_vga_set_decode(void *priv, bool state) 312 { 313 if (state) 314 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 315 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; ··· 435 if (ret) 436 goto out_timer; 437 438 - /* PGRAPH */ 439 - ret = engine->graph.init(dev); 440 - if (ret) 441 - goto out_fb; 442 443 - /* PFIFO */ 444 - ret = engine->fifo.init(dev); 445 - if (ret) 446 - goto out_graph; 447 448 /* this call irq_preinstall, register irq handler and 449 * call irq_postinstall ··· 491 out_irq: 492 drm_irq_uninstall(dev); 493 out_fifo: 494 - engine->fifo.takedown(dev); 495 out_graph: 496 - engine->graph.takedown(dev); 497 out_fb: 498 engine->fb.takedown(dev); 499 out_timer: ··· 532 dev_priv->channel = NULL; 533 } 534 535 - engine->fifo.takedown(dev); 536 - engine->graph.takedown(dev); 537 engine->fb.takedown(dev); 538 engine->timer.takedown(dev); 539 engine->mc.takedown(dev); ··· 833 case NOUVEAU_GETPARAM_VM_VRAM_BASE: 834 getparam->value = dev_priv->vm_vram_base; 835 break; 836 default: 837 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); 838 return -EINVAL;
··· 310 static unsigned int 311 nouveau_vga_set_decode(void *priv, bool state) 312 { 313 + struct drm_device *dev = priv; 314 + struct drm_nouveau_private *dev_priv = dev->dev_private; 315 + 316 + if (dev_priv->chipset >= 0x40) 317 + nv_wr32(dev, 0x88054, state); 318 + else 319 + nv_wr32(dev, 0x1854, state); 320 + 321 if (state) 322 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 323 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; ··· 427 if (ret) 428 goto out_timer; 429 430 + if (nouveau_noaccel) 431 + engine->graph.accel_blocked = true; 432 + else { 433 + /* PGRAPH */ 434 + ret = engine->graph.init(dev); 435 + if (ret) 436 + goto out_fb; 437 438 + /* PFIFO */ 439 + ret = engine->fifo.init(dev); 440 + if (ret) 441 + goto out_graph; 442 + } 443 444 /* this call irq_preinstall, register irq handler and 445 * call irq_postinstall ··· 479 out_irq: 480 drm_irq_uninstall(dev); 481 out_fifo: 482 + if (!nouveau_noaccel) 483 + engine->fifo.takedown(dev); 484 out_graph: 485 + if (!nouveau_noaccel) 486 + engine->graph.takedown(dev); 487 out_fb: 488 engine->fb.takedown(dev); 489 out_timer: ··· 518 dev_priv->channel = NULL; 519 } 520 521 + if (!nouveau_noaccel) { 522 + engine->fifo.takedown(dev); 523 + engine->graph.takedown(dev); 524 + } 525 engine->fb.takedown(dev); 526 engine->timer.takedown(dev); 527 engine->mc.takedown(dev); ··· 817 case NOUVEAU_GETPARAM_VM_VRAM_BASE: 818 getparam->value = dev_priv->vm_vram_base; 819 break; 820 + case NOUVEAU_GETPARAM_GRAPH_UNITS: 821 + /* NV40 and NV50 versions are quite different, but register 822 + * address is the same. User is supposed to know the card 823 + * family anyway... */ 824 + if (dev_priv->chipset >= 0x40) { 825 + getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS); 826 + break; 827 + } 828 + /* FALLTHRU */ 829 default: 830 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); 831 return -EINVAL;
+3 -6
drivers/gpu/drm/nouveau/nv04_fbcon.c
··· 27 #include "nouveau_dma.h" 28 #include "nouveau_fbcon.h" 29 30 - static void 31 nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 32 { 33 struct nouveau_fbcon_par *par = info->par; ··· 54 FIRE_RING(chan); 55 } 56 57 - static void 58 nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 59 { 60 struct nouveau_fbcon_par *par = info->par; ··· 88 FIRE_RING(chan); 89 } 90 91 - static void 92 nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 93 { 94 struct nouveau_fbcon_par *par = info->par; ··· 307 308 FIRE_RING(chan); 309 310 - info->fbops->fb_fillrect = nv04_fbcon_fillrect; 311 - info->fbops->fb_copyarea = nv04_fbcon_copyarea; 312 - info->fbops->fb_imageblit = nv04_fbcon_imageblit; 313 return 0; 314 } 315
··· 27 #include "nouveau_dma.h" 28 #include "nouveau_fbcon.h" 29 30 + void 31 nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 32 { 33 struct nouveau_fbcon_par *par = info->par; ··· 54 FIRE_RING(chan); 55 } 56 57 + void 58 nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 59 { 60 struct nouveau_fbcon_par *par = info->par; ··· 88 FIRE_RING(chan); 89 } 90 91 + void 92 nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 93 { 94 struct nouveau_fbcon_par *par = info->par; ··· 307 308 FIRE_RING(chan); 309 310 return 0; 311 } 312
+7 -4
drivers/gpu/drm/nouveau/nv50_crtc.c
··· 298 static void 299 nv50_crtc_destroy(struct drm_crtc *crtc) 300 { 301 - struct drm_device *dev = crtc->dev; 302 - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 303 - 304 - NV_DEBUG_KMS(dev, "\n"); 305 306 if (!crtc) 307 return; 308 309 drm_crtc_cleanup(&nv_crtc->base); 310
··· 298 static void 299 nv50_crtc_destroy(struct drm_crtc *crtc) 300 { 301 + struct drm_device *dev; 302 + struct nouveau_crtc *nv_crtc; 303 304 if (!crtc) 305 return; 306 + 307 + dev = crtc->dev; 308 + nv_crtc = nouveau_crtc(crtc); 309 + 310 + NV_DEBUG_KMS(dev, "\n"); 311 312 drm_crtc_cleanup(&nv_crtc->base); 313
+3 -6
drivers/gpu/drm/nouveau/nv50_fbcon.c
··· 3 #include "nouveau_dma.h" 4 #include "nouveau_fbcon.h" 5 6 - static void 7 nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 8 { 9 struct nouveau_fbcon_par *par = info->par; ··· 46 FIRE_RING(chan); 47 } 48 49 - static void 50 nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 51 { 52 struct nouveau_fbcon_par *par = info->par; ··· 81 FIRE_RING(chan); 82 } 83 84 - static void 85 nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 86 { 87 struct nouveau_fbcon_par *par = info->par; ··· 262 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + 263 dev_priv->vm_vram_base); 264 265 - info->fbops->fb_fillrect = nv50_fbcon_fillrect; 266 - info->fbops->fb_copyarea = nv50_fbcon_copyarea; 267 - info->fbops->fb_imageblit = nv50_fbcon_imageblit; 268 return 0; 269 } 270
··· 3 #include "nouveau_dma.h" 4 #include "nouveau_fbcon.h" 5 6 + void 7 nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 8 { 9 struct nouveau_fbcon_par *par = info->par; ··· 46 FIRE_RING(chan); 47 } 48 49 + void 50 nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 51 { 52 struct nouveau_fbcon_par *par = info->par; ··· 81 FIRE_RING(chan); 82 } 83 84 + void 85 nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 86 { 87 struct nouveau_fbcon_par *par = info->par; ··· 262 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + 263 dev_priv->vm_vram_base); 264 265 return 0; 266 } 267
+6 -3
drivers/gpu/drm/nouveau/nv50_fifo.c
··· 317 nv50_fifo_destroy_context(struct nouveau_channel *chan) 318 { 319 struct drm_device *dev = chan->dev; 320 321 NV_DEBUG(dev, "ch%d\n", chan->id); 322 323 - nouveau_gpuobj_ref_del(dev, &chan->ramfc); 324 - nouveau_gpuobj_ref_del(dev, &chan->cache); 325 - 326 nv50_fifo_channel_disable(dev, chan->id, false); 327 328 /* Dummy channel, also used on ch 127 */ 329 if (chan->id == 0) 330 nv50_fifo_channel_disable(dev, 127, false); 331 } 332 333 int
··· 317 nv50_fifo_destroy_context(struct nouveau_channel *chan) 318 { 319 struct drm_device *dev = chan->dev; 320 + struct nouveau_gpuobj_ref *ramfc = chan->ramfc; 321 322 NV_DEBUG(dev, "ch%d\n", chan->id); 323 324 + /* This will ensure the channel is seen as disabled. */ 325 + chan->ramfc = NULL; 326 nv50_fifo_channel_disable(dev, chan->id, false); 327 328 /* Dummy channel, also used on ch 127 */ 329 if (chan->id == 0) 330 nv50_fifo_channel_disable(dev, 127, false); 331 + 332 + nouveau_gpuobj_ref_del(dev, &ramfc); 333 + nouveau_gpuobj_ref_del(dev, &chan->cache); 334 } 335 336 int
+7 -3
drivers/gpu/drm/nouveau/nv50_graph.c
··· 165 uint32_t inst; 166 int i; 167 168 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 169 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 170 return NULL; ··· 281 int 282 nv50_graph_unload_context(struct drm_device *dev) 283 { 284 - uint32_t inst, fifo = nv_rd32(dev, 0x400500); 285 286 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 287 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) ··· 289 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; 290 291 nouveau_wait_for_idle(dev); 292 - nv_wr32(dev, 0x400500, fifo & ~1); 293 nv_wr32(dev, 0x400784, inst); 294 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); 295 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); 296 nouveau_wait_for_idle(dev); 297 - nv_wr32(dev, 0x400500, fifo); 298 299 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); 300 return 0;
··· 165 uint32_t inst; 166 int i; 167 168 + /* Be sure we're not in the middle of a context switch or bad things 169 + * will happen, such as unloading the wrong pgraph context. 170 + */ 171 + if (!nv_wait(0x400300, 0x00000001, 0x00000000)) 172 + NV_ERROR(dev, "Ctxprog is still running\n"); 173 + 174 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 175 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 176 return NULL; ··· 275 int 276 nv50_graph_unload_context(struct drm_device *dev) 277 { 278 + uint32_t inst; 279 280 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 281 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) ··· 283 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; 284 285 nouveau_wait_for_idle(dev); 286 nv_wr32(dev, 0x400784, inst); 287 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); 288 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); 289 nouveau_wait_for_idle(dev); 290 291 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); 292 return 0;
+1
drivers/gpu/drm/nouveau/nv50_sor.c
··· 101 struct nouveau_encoder *nvenc = nouveau_encoder(enc); 102 103 if (nvenc == nv_encoder || 104 nvenc->dcb->or != nv_encoder->dcb->or) 105 continue; 106
··· 101 struct nouveau_encoder *nvenc = nouveau_encoder(enc); 102 103 if (nvenc == nv_encoder || 104 + nvenc->disconnect != nv50_sor_disconnect || 105 nvenc->dcb->or != nv_encoder->dcb->or) 106 continue; 107
+8 -4
drivers/gpu/drm/radeon/Kconfig
··· 1 config DRM_RADEON_KMS 2 - bool "Enable modesetting on radeon by default" 3 depends on DRM_RADEON 4 help 5 - Choose this option if you want kernel modesetting enabled by default, 6 - and you have a new enough userspace to support this. Running old 7 - userspaces with this enabled will cause pain. 8 9 When kernel modesetting is enabled the IOCTL of radeon/drm 10 driver are considered as invalid and an error message is printed
··· 1 config DRM_RADEON_KMS 2 + bool "Enable modesetting on radeon by default - NEW DRIVER" 3 depends on DRM_RADEON 4 help 5 + Choose this option if you want kernel modesetting enabled by default. 6 + 7 + This is a completely new driver. It's only part of the existing drm 8 + for compatibility reasons. It requires an entirely different graphics 9 + stack above it and works very differently from the old drm stack. 10 + i.e. don't enable this unless you know what you are doing it may 11 + cause issues or bugs compared to the previous userspace driver stack. 12 13 When kernel modesetting is enabled the IOCTL of radeon/drm 14 driver are considered as invalid and an error message is printed
+7 -3
drivers/gpu/drm/radeon/atombios_dp.c
··· 332 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; 333 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); 334 unsigned char *base; 335 336 memset(&args, 0, sizeof(args)); 337 338 base = (unsigned char *)rdev->mode_info.atom_context->scratch; 339 340 memcpy(base, req_bytes, num_bytes); 341 342 args.lpAuxRequest = 0; ··· 349 350 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 351 352 - if (args.ucReplyStatus) { 353 - DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n", 354 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], 355 - chan->rec.i2c_id, args.ucReplyStatus); 356 return false; 357 } 358
··· 332 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; 333 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); 334 unsigned char *base; 335 + int retry_count = 0; 336 337 memset(&args, 0, sizeof(args)); 338 339 base = (unsigned char *)rdev->mode_info.atom_context->scratch; 340 341 + retry: 342 memcpy(base, req_bytes, num_bytes); 343 344 args.lpAuxRequest = 0; ··· 347 348 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 349 350 + if (args.ucReplyStatus && !args.ucDataOutLen) { 351 + if (args.ucReplyStatus == 0x20 && retry_count < 10) 352 + goto retry; 353 + DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n", 354 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], 355 + chan->rec.i2c_id, args.ucReplyStatus, retry_count); 356 return false; 357 } 358
+8
drivers/gpu/drm/radeon/r600.c
··· 1950 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1951 return r; 1952 } 1953 return r; 1954 } 1955 ··· 1964 { 1965 int r; 1966 1967 /* FIXME: we should wait for ring to be empty */ 1968 r600_cp_stop(rdev); 1969 rdev->cp.ready = false;
··· 1950 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1951 return r; 1952 } 1953 + 1954 + r = r600_audio_init(rdev); 1955 + if (r) { 1956 + DRM_ERROR("radeon: audio resume failed\n"); 1957 + return r; 1958 + } 1959 + 1960 return r; 1961 } 1962 ··· 1957 { 1958 int r; 1959 1960 + r600_audio_fini(rdev); 1961 /* FIXME: we should wait for ring to be empty */ 1962 r600_cp_stop(rdev); 1963 rdev->cp.ready = false;
+1 -2
drivers/gpu/drm/radeon/r600_audio.c
··· 261 if (!r600_audio_chipset_supported(rdev)) 262 return; 263 264 - WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000); 265 - 266 del_timer(&rdev->audio_timer); 267 }
··· 261 if (!r600_audio_chipset_supported(rdev)) 262 return; 263 264 del_timer(&rdev->audio_timer); 265 + WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000); 266 }
+9
drivers/gpu/drm/radeon/radeon_atombios.c
··· 287 *connector_type = DRM_MODE_CONNECTOR_DVID; 288 } 289 290 return true; 291 } 292
··· 287 *connector_type = DRM_MODE_CONNECTOR_DVID; 288 } 289 290 + /* XFX Pine Group device rv730 reports no VGA DDC lines 291 + * even though they are wired up to record 0x93 292 + */ 293 + if ((dev->pdev->device == 0x9498) && 294 + (dev->pdev->subsystem_vendor == 0x1682) && 295 + (dev->pdev->subsystem_device == 0x2452)) { 296 + struct radeon_device *rdev = dev->dev_private; 297 + *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93); 298 + } 299 return true; 300 } 301
+33 -22
drivers/gpu/drm/radeon/radeon_benchmark.c
··· 65 if (r) { 66 goto out_cleanup; 67 } 68 - start_jiffies = jiffies; 69 - for (i = 0; i < n; i++) { 70 - r = radeon_fence_create(rdev, &fence); 71 - if (r) { 72 - goto out_cleanup; 73 } 74 - r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence); 75 - if (r) { 76 - goto out_cleanup; 77 } 78 - r = radeon_fence_wait(fence, false); 79 - if (r) { 80 - goto out_cleanup; 81 - } 82 - radeon_fence_unref(&fence); 83 } 84 - end_jiffies = jiffies; 85 - time = end_jiffies - start_jiffies; 86 - time = jiffies_to_msecs(time); 87 - if (time > 0) { 88 - i = ((n * size) >> 10) / time; 89 - printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d" 90 - " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10, 91 - sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024); 92 - } 93 start_jiffies = jiffies; 94 for (i = 0; i < n; i++) { 95 r = radeon_fence_create(rdev, &fence);
··· 65 if (r) { 66 goto out_cleanup; 67 } 68 + 69 + /* r100 doesn't have dma engine so skip the test */ 70 + if (rdev->asic->copy_dma) { 71 + 72 + start_jiffies = jiffies; 73 + for (i = 0; i < n; i++) { 74 + r = radeon_fence_create(rdev, &fence); 75 + if (r) { 76 + goto out_cleanup; 77 + } 78 + 79 + r = radeon_copy_dma(rdev, saddr, daddr, 80 + size / RADEON_GPU_PAGE_SIZE, fence); 81 + 82 + if (r) { 83 + goto out_cleanup; 84 + } 85 + r = radeon_fence_wait(fence, false); 86 + if (r) { 87 + goto out_cleanup; 88 + } 89 + radeon_fence_unref(&fence); 90 } 91 + end_jiffies = jiffies; 92 + time = end_jiffies - start_jiffies; 93 + time = jiffies_to_msecs(time); 94 + if (time > 0) { 95 + i = ((n * size) >> 10) / time; 96 + printk(KERN_INFO "radeon: dma %u bo moves of %ukb from" 97 + " %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n", 98 + n, size >> 10, 99 + sdomain, ddomain, time, 100 + i, i * 1000, (i * 1000) / 1024); 101 } 102 } 103 + 104 start_jiffies = jiffies; 105 for (i = 0; i < n; i++) { 106 r = radeon_fence_create(rdev, &fence);
+12 -8
drivers/gpu/drm/radeon/radeon_connectors.c
··· 580 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 581 struct drm_encoder *encoder; 582 struct drm_encoder_helper_funcs *encoder_funcs; 583 - bool dret; 584 enum drm_connector_status ret = connector_status_disconnected; 585 586 encoder = radeon_best_single_encoder(connector); 587 if (!encoder) 588 ret = connector_status_disconnected; 589 590 - radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); 591 - dret = radeon_ddc_probe(radeon_connector); 592 - radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); 593 if (dret) { 594 if (radeon_connector->edid) { 595 kfree(radeon_connector->edid); ··· 742 struct drm_mode_object *obj; 743 int i; 744 enum drm_connector_status ret = connector_status_disconnected; 745 - bool dret; 746 747 - radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); 748 - dret = radeon_ddc_probe(radeon_connector); 749 - radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); 750 if (dret) { 751 if (radeon_connector->edid) { 752 kfree(radeon_connector->edid);
··· 580 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 581 struct drm_encoder *encoder; 582 struct drm_encoder_helper_funcs *encoder_funcs; 583 + bool dret = false; 584 enum drm_connector_status ret = connector_status_disconnected; 585 586 encoder = radeon_best_single_encoder(connector); 587 if (!encoder) 588 ret = connector_status_disconnected; 589 590 + if (radeon_connector->ddc_bus) { 591 + radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); 592 + dret = radeon_ddc_probe(radeon_connector); 593 + radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); 594 + } 595 if (dret) { 596 if (radeon_connector->edid) { 597 kfree(radeon_connector->edid); ··· 740 struct drm_mode_object *obj; 741 int i; 742 enum drm_connector_status ret = connector_status_disconnected; 743 + bool dret = false; 744 745 + if (radeon_connector->ddc_bus) { 746 + radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); 747 + dret = radeon_ddc_probe(radeon_connector); 748 + radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); 749 + } 750 if (dret) { 751 if (radeon_connector->edid) { 752 kfree(radeon_connector->edid);
+10 -1
drivers/gpu/drm/radeon/radeon_display.c
··· 278 DRM_INFO(" %s\n", connector_names[connector->connector_type]); 279 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 280 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); 281 - if (radeon_connector->ddc_bus) 282 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 283 radeon_connector->ddc_bus->rec.mask_clk_reg, 284 radeon_connector->ddc_bus->rec.mask_data_reg, ··· 288 radeon_connector->ddc_bus->rec.en_data_reg, 289 radeon_connector->ddc_bus->rec.y_clk_reg, 290 radeon_connector->ddc_bus->rec.y_data_reg); 291 DRM_INFO(" Encoders:\n"); 292 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 293 radeon_encoder = to_radeon_encoder(encoder);
··· 278 DRM_INFO(" %s\n", connector_names[connector->connector_type]); 279 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 280 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); 281 + if (radeon_connector->ddc_bus) { 282 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 283 radeon_connector->ddc_bus->rec.mask_clk_reg, 284 radeon_connector->ddc_bus->rec.mask_data_reg, ··· 288 radeon_connector->ddc_bus->rec.en_data_reg, 289 radeon_connector->ddc_bus->rec.y_clk_reg, 290 radeon_connector->ddc_bus->rec.y_data_reg); 291 + } else { 292 + if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || 293 + connector->connector_type == DRM_MODE_CONNECTOR_DVII || 294 + connector->connector_type == DRM_MODE_CONNECTOR_DVID || 295 + connector->connector_type == DRM_MODE_CONNECTOR_DVIA || 296 + connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || 297 + connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) 298 + DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n"); 299 + } 300 DRM_INFO(" Encoders:\n"); 301 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 302 radeon_encoder = to_radeon_encoder(encoder);
+1 -1
drivers/gpu/drm/radeon/radeon_fb.c
··· 248 if (ret) 249 goto out_unref; 250 251 - memset_io(fbptr, 0xff, aligned_size); 252 253 strcpy(info->fix.id, "radeondrmfb"); 254
··· 248 if (ret) 249 goto out_unref; 250 251 + memset_io(fbptr, 0x0, aligned_size); 252 253 strcpy(info->fix.id, "radeondrmfb"); 254
+6 -5
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 39 #include "ttm/ttm_execbuf_util.h" 40 #include "ttm/ttm_module.h" 41 42 - #define VMWGFX_DRIVER_DATE "20090724" 43 - #define VMWGFX_DRIVER_MAJOR 0 44 - #define VMWGFX_DRIVER_MINOR 1 45 - #define VMWGFX_DRIVER_PATCHLEVEL 2 46 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 47 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 48 #define VMWGFX_MAX_RELOCATIONS 2048 ··· 113 unsigned long static_buffer_size; 114 bool using_bounce_buffer; 115 uint32_t capabilities; 116 struct rw_semaphore rwsem; 117 }; 118 ··· 214 * Fencing and IRQs. 215 */ 216 217 - uint32_t fence_seq; 218 wait_queue_head_t fence_queue; 219 wait_queue_head_t fifo_queue; 220 atomic_t fence_queue_waiters;
··· 39 #include "ttm/ttm_execbuf_util.h" 40 #include "ttm/ttm_module.h" 41 42 + #define VMWGFX_DRIVER_DATE "20100209" 43 + #define VMWGFX_DRIVER_MAJOR 1 44 + #define VMWGFX_DRIVER_MINOR 0 45 + #define VMWGFX_DRIVER_PATCHLEVEL 0 46 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 47 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 48 #define VMWGFX_MAX_RELOCATIONS 2048 ··· 113 unsigned long static_buffer_size; 114 bool using_bounce_buffer; 115 uint32_t capabilities; 116 + struct mutex fifo_mutex; 117 struct rw_semaphore rwsem; 118 }; 119 ··· 213 * Fencing and IRQs. 214 */ 215 216 + atomic_t fence_seq; 217 wait_queue_head_t fence_queue; 218 wait_queue_head_t fifo_queue; 219 atomic_t fence_queue_waiters;
+9 -8
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
··· 74 fifo->reserved_size = 0; 75 fifo->using_bounce_buffer = false; 76 77 init_rwsem(&fifo->rwsem); 78 79 /* ··· 118 (unsigned int) min, 119 (unsigned int) fifo->capabilities); 120 121 - dev_priv->fence_seq = dev_priv->last_read_sequence; 122 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); 123 124 return vmw_fifo_send_fence(dev_priv, &dummy); ··· 284 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; 285 int ret; 286 287 - down_write(&fifo_state->rwsem); 288 max = ioread32(fifo_mem + SVGA_FIFO_MAX); 289 min = ioread32(fifo_mem + SVGA_FIFO_MIN); 290 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); ··· 352 } 353 out_err: 354 fifo_state->reserved_size = 0; 355 - up_write(&fifo_state->rwsem); 356 return NULL; 357 } 358 ··· 427 428 } 429 430 if (fifo_state->using_bounce_buffer || reserveable) { 431 next_cmd += bytes; 432 if (next_cmd >= max) ··· 439 if (reserveable) 440 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); 441 mb(); 442 - vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 443 up_write(&fifo_state->rwsem); 444 } 445 446 int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) ··· 454 455 fm = vmw_fifo_reserve(dev_priv, bytes); 456 if (unlikely(fm == NULL)) { 457 - down_write(&fifo_state->rwsem); 458 - *sequence = dev_priv->fence_seq; 459 - up_write(&fifo_state->rwsem); 460 ret = -ENOMEM; 461 (void)vmw_fallback_wait(dev_priv, false, true, *sequence, 462 false, 3*HZ); ··· 462 } 463 464 do { 465 - *sequence = dev_priv->fence_seq++; 466 } while (*sequence == 0); 467 468 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
··· 74 fifo->reserved_size = 0; 75 fifo->using_bounce_buffer = false; 76 77 + mutex_init(&fifo->fifo_mutex); 78 init_rwsem(&fifo->rwsem); 79 80 /* ··· 117 (unsigned int) min, 118 (unsigned int) fifo->capabilities); 119 120 + atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); 121 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); 122 123 return vmw_fifo_send_fence(dev_priv, &dummy); ··· 283 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; 284 int ret; 285 286 + mutex_lock(&fifo_state->fifo_mutex); 287 max = ioread32(fifo_mem + SVGA_FIFO_MAX); 288 min = ioread32(fifo_mem + SVGA_FIFO_MIN); 289 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); ··· 351 } 352 out_err: 353 fifo_state->reserved_size = 0; 354 + mutex_unlock(&fifo_state->fifo_mutex); 355 return NULL; 356 } 357 ··· 426 427 } 428 429 + down_write(&fifo_state->rwsem); 430 if (fifo_state->using_bounce_buffer || reserveable) { 431 next_cmd += bytes; 432 if (next_cmd >= max) ··· 437 if (reserveable) 438 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); 439 mb(); 440 up_write(&fifo_state->rwsem); 441 + vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 442 + mutex_unlock(&fifo_state->fifo_mutex); 443 } 444 445 int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) ··· 451 452 fm = vmw_fifo_reserve(dev_priv, bytes); 453 if (unlikely(fm == NULL)) { 454 + *sequence = atomic_read(&dev_priv->fence_seq); 455 ret = -ENOMEM; 456 (void)vmw_fallback_wait(dev_priv, false, true, *sequence, 457 false, 3*HZ); ··· 461 } 462 463 do { 464 + *sequence = atomic_add_return(1, &dev_priv->fence_seq); 465 } while (*sequence == 0); 466 467 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
+6
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
··· 48 case DRM_VMW_PARAM_FIFO_OFFSET: 49 param->value = dev_priv->mmio_start; 50 break; 51 default: 52 DRM_ERROR("Illegal vmwgfx get param request: %d\n", 53 param->param);
··· 48 case DRM_VMW_PARAM_FIFO_OFFSET: 49 param->value = dev_priv->mmio_start; 50 break; 51 + case DRM_VMW_PARAM_HW_CAPS: 52 + param->value = dev_priv->capabilities; 53 + break; 54 + case DRM_VMW_PARAM_FIFO_CAPS: 55 + param->value = dev_priv->fifo.capabilities; 56 + break; 57 default: 58 DRM_ERROR("Illegal vmwgfx get param request: %d\n", 59 param->param);
+3 -10
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
··· 85 return true; 86 87 /** 88 - * Below is to signal stale fences that have wrapped. 89 - * First, block fence submission. 90 - */ 91 - 92 - down_read(&fifo_state->rwsem); 93 - 94 - /** 95 * Then check if the sequence is higher than what we've actually 96 * emitted. Then the fence is stale and signaled. 97 */ 98 99 - ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP); 100 - up_read(&fifo_state->rwsem); 101 102 return ret; 103 } ··· 120 121 if (fifo_idle) 122 down_read(&fifo_state->rwsem); 123 - signal_seq = dev_priv->fence_seq; 124 ret = 0; 125 126 for (;;) {
··· 85 return true; 86 87 /** 88 * Then check if the sequence is higher than what we've actually 89 * emitted. Then the fence is stale and signaled. 90 */ 91 92 + ret = ((atomic_read(&dev_priv->fence_seq) - sequence) 93 + > VMW_FENCE_WRAP); 94 95 return ret; 96 } ··· 127 128 if (fifo_idle) 129 down_read(&fifo_state->rwsem); 130 + signal_seq = atomic_read(&dev_priv->fence_seq); 131 ret = 0; 132 133 for (;;) {
+4 -4
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 769 770 drm_mode_config_init(dev); 771 dev->mode_config.funcs = &vmw_kms_funcs; 772 - dev->mode_config.min_width = 640; 773 - dev->mode_config.min_height = 480; 774 - dev->mode_config.max_width = 2048; 775 - dev->mode_config.max_height = 2048; 776 777 ret = vmw_kms_init_legacy_display_system(dev_priv); 778
··· 769 770 drm_mode_config_init(dev); 771 dev->mode_config.funcs = &vmw_kms_funcs; 772 + dev->mode_config.min_width = 1; 773 + dev->mode_config.min_height = 1; 774 + dev->mode_config.max_width = dev_priv->fb_max_width; 775 + dev->mode_config.max_height = dev_priv->fb_max_height; 776 777 ret = vmw_kms_init_legacy_display_system(dev_priv); 778
+1 -15
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 35 #define VMW_RES_SURFACE ttm_driver_type1 36 #define VMW_RES_STREAM ttm_driver_type2 37 38 - /* XXX: This isn't a real hardware flag, but just a hack for kernel to 39 - * know about primary surfaces. Find a better way to accomplish this. 40 - */ 41 - #define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9) 42 - 43 struct vmw_user_context { 44 struct ttm_base_object base; 45 struct vmw_resource res; ··· 574 575 srf->flags = req->flags; 576 srf->format = req->format; 577 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); 578 srf->num_sizes = 0; 579 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) ··· 599 srf->num_sizes * sizeof(*srf->sizes)); 600 if (unlikely(ret != 0)) 601 goto out_err1; 602 - 603 - if (srf->flags & SVGA3D_SURFACE_HINT_SCANOUT) { 604 - /* we should not send this flag down to hardware since 605 - * its not a official one 606 - */ 607 - srf->flags &= ~SVGA3D_SURFACE_HINT_SCANOUT; 608 - srf->scanout = true; 609 - } else { 610 - srf->scanout = false; 611 - } 612 613 if (srf->scanout && 614 srf->num_sizes == 1 &&
··· 35 #define VMW_RES_SURFACE ttm_driver_type1 36 #define VMW_RES_STREAM ttm_driver_type2 37 38 struct vmw_user_context { 39 struct ttm_base_object base; 40 struct vmw_resource res; ··· 579 580 srf->flags = req->flags; 581 srf->format = req->format; 582 + srf->scanout = req->scanout; 583 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); 584 srf->num_sizes = 0; 585 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) ··· 603 srf->num_sizes * sizeof(*srf->sizes)); 604 if (unlikely(ret != 0)) 605 goto out_err1; 606 607 if (srf->scanout && 608 srf->num_sizes == 1 &&
+1 -1
drivers/gpu/vga/vgaarb.c
··· 961 remaining -= 7; 962 pr_devel("client 0x%p called 'target'\n", priv); 963 /* if target is default */ 964 - if (!strncmp(buf, "default", 7)) 965 pdev = pci_dev_get(vga_default_device()); 966 else { 967 if (!vga_pci_str_to_vars(curr_pos, remaining,
··· 961 remaining -= 7; 962 pr_devel("client 0x%p called 'target'\n", priv); 963 /* if target is default */ 964 + if (!strncmp(kbuf, "default", 7)) 965 pdev = pci_dev_get(vga_default_device()); 966 else { 967 if (!vga_pci_str_to_vars(curr_pos, remaining,
+1
include/drm/nouveau_drm.h
··· 77 #define NOUVEAU_GETPARAM_PCI_PHYSICAL 10 78 #define NOUVEAU_GETPARAM_CHIPSET_ID 11 79 #define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 80 struct drm_nouveau_getparam { 81 uint64_t param; 82 uint64_t value;
··· 77 #define NOUVEAU_GETPARAM_PCI_PHYSICAL 10 78 #define NOUVEAU_GETPARAM_CHIPSET_ID 11 79 #define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 80 + #define NOUVEAU_GETPARAM_GRAPH_UNITS 13 81 struct drm_nouveau_getparam { 82 uint64_t param; 83 uint64_t value;
+17 -3
include/drm/vmwgfx_drm.h
··· 68 #define DRM_VMW_PARAM_NUM_FREE_STREAMS 1 69 #define DRM_VMW_PARAM_3D 2 70 #define DRM_VMW_PARAM_FIFO_OFFSET 3 71 - 72 73 /** 74 * struct drm_vmw_getparam_arg ··· 182 * The size of the array should equal the total number of mipmap levels. 183 * @shareable: Boolean whether other clients (as identified by file descriptors) 184 * may reference this surface. 185 * 186 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl. 187 * Output data from the DRM_VMW_REF_SURFACE Ioctl. ··· 195 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 196 uint64_t size_addr; 197 int32_t shareable; 198 - uint32_t pad64; 199 }; 200 201 /** ··· 298 * 299 * @commands: User-space address of a command buffer cast to an uint64_t. 300 * @command-size: Size in bytes of the command buffer. 301 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an 302 * uint64_t. 303 * 304 * Argument to the DRM_VMW_EXECBUF Ioctl. 305 */ 306 307 struct drm_vmw_execbuf_arg { 308 uint64_t commands; 309 uint32_t command_size; 310 - uint32_t pad64; 311 uint64_t fence_rep; 312 }; 313 314 /**
··· 68 #define DRM_VMW_PARAM_NUM_FREE_STREAMS 1 69 #define DRM_VMW_PARAM_3D 2 70 #define DRM_VMW_PARAM_FIFO_OFFSET 3 71 + #define DRM_VMW_PARAM_HW_CAPS 4 72 + #define DRM_VMW_PARAM_FIFO_CAPS 5 73 74 /** 75 * struct drm_vmw_getparam_arg ··· 181 * The size of the array should equal the total number of mipmap levels. 182 * @shareable: Boolean whether other clients (as identified by file descriptors) 183 * may reference this surface. 184 + * @scanout: Boolean whether the surface is intended to be used as a 185 + * scanout. 186 * 187 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl. 188 * Output data from the DRM_VMW_REF_SURFACE Ioctl. ··· 192 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 193 uint64_t size_addr; 194 int32_t shareable; 195 + int32_t scanout; 196 }; 197 198 /** ··· 295 * 296 * @commands: User-space address of a command buffer cast to an uint64_t. 297 * @command-size: Size in bytes of the command buffer. 298 + * @throttle-us: Sleep until software is less than @throttle_us 299 + * microseconds ahead of hardware. The driver may round this value 300 + * to the nearest kernel tick. 301 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an 302 * uint64_t. 303 + * @version: Allows expanding the execbuf ioctl parameters without breaking 304 + * backwards compatibility, since user-space will always tell the kernel 305 + * which version it uses. 306 + * @flags: Execbuf flags. None currently. 307 * 308 * Argument to the DRM_VMW_EXECBUF Ioctl. 309 */ 310 311 + #define DRM_VMW_EXECBUF_VERSION 0 312 + 313 struct drm_vmw_execbuf_arg { 314 uint64_t commands; 315 uint32_t command_size; 316 + uint32_t throttle_us; 317 uint64_t fence_rep; 318 + uint32_t version; 319 + uint32_t flags; 320 }; 321 322 /**