Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

staging: gma500: Intel GMA500 staging driver

This is an initial staging driver for the GMA500. It's been stripped out
of the PVR drivers and crunched together from various bits of code and
different kernels.

Currently it's unaccelerated but still pretty snappy even compositing with
the frame buffer X server.

Lots of work is needed to rework the ttm and bo interfaces from being
ripped out and then 2D acceleration wants putting back for framebuffer and
somehow eventually via DRM.

There is no support for the parts without open source userspace (video
accelerators, 3D) as per kernel policy.

Signed-off-by: Alan Cox <alan@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>

authored by

Alan Cox and committed by
Greg Kroah-Hartman
0867b421 008536e8

+19346 -4
+2
drivers/staging/Kconfig
··· 179 179 180 180 source "drivers/staging/ste_rmi4/Kconfig" 181 181 182 + source "drivers/staging/gma500/Kconfig" 183 + 182 184 endif # !STAGING_EXCLUDE_BUILD 183 185 endif # STAGING
+5 -4
drivers/staging/Makefile
··· 61 61 obj-$(CONFIG_TIDSPBRIDGE) += tidspbridge/ 62 62 obj-$(CONFIG_ACPI_QUICKSTART) += quickstart/ 63 63 obj-$(CONFIG_WESTBRIDGE_ASTORIA) += westbridge/astoria/ 64 - obj-$(CONFIG_SBE_2T3E3) += sbe-2t3e3/ 64 + obj-$(CONFIG_SBE_2T3E3) += sbe-2t3e3/ 65 65 obj-$(CONFIG_ATH6K_LEGACY) += ath6kl/ 66 66 obj-$(CONFIG_USB_ENESTORAGE) += keucr/ 67 - obj-$(CONFIG_BCM_WIMAX) += bcm/ 67 + obj-$(CONFIG_BCM_WIMAX) += bcm/ 68 68 obj-$(CONFIG_FT1000) += ft1000/ 69 - obj-$(CONFIG_SND_INTEL_SST) += intel_sst/ 70 - obj-$(CONFIG_SPEAKUP) += speakup/ 69 + obj-$(CONFIG_SND_INTEL_SST) += intel_sst/ 70 + obj-$(CONFIG_SPEAKUP) += speakup/ 71 71 obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += cptm1217/ 72 72 obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += ste_rmi4/ 73 + obj-$(CONFIG_DRM_PSB) += gma500/
+12
drivers/staging/gma500/Kconfig
··· 1 + config DRM_PSB 2 + tristate "Intel GMA500 KMS Framebuffer" 3 + depends on DRM && PCI 4 + select FB_CFB_COPYAREA 5 + select FB_CFB_FILLRECT 6 + select FB_CFB_IMAGEBLIT 7 + select DRM_KMS_HELPER 8 + select DRM_TTM 9 + help 10 + Say yes for an experimental KMS framebuffer driver for the 11 + Intel GMA500 ('Poulsbo') graphics support. 12 +
+30
drivers/staging/gma500/Makefile
··· 1 + # 2 + # KMS driver for the GMA500 3 + # 4 + ccflags-y += -Iinclude/drm 5 + 6 + psb_gfx-y += psb_bl.o \ 7 + psb_drv.o \ 8 + psb_fb.o \ 9 + psb_gtt.o \ 10 + psb_intel_bios.o \ 11 + psb_intel_opregion.o \ 12 + psb_intel_display.o \ 13 + psb_intel_i2c.o \ 14 + psb_intel_lvds.o \ 15 + psb_intel_modes.o \ 16 + psb_intel_sdvo.o \ 17 + psb_reset.o \ 18 + psb_sgx.o \ 19 + psb_pvr_glue.o \ 20 + psb_buffer.o \ 21 + psb_fence.o \ 22 + psb_mmu.o \ 23 + psb_ttm_glue.o \ 24 + psb_ttm_fence.o \ 25 + psb_ttm_fence_user.o \ 26 + psb_ttm_placement_user.o \ 27 + psb_powermgmt.o \ 28 + psb_irq.o 29 + 30 + obj-$(CONFIG_DRM_PSB) += psb_gfx.o
+26
drivers/staging/gma500/TODO
··· 1 + - Test on more platforms 2 + - Clean up the various chunks of unused code 3 + - Sort out the power management side. Not important for Poulsbo but 4 + matters for Moorestown 5 + - Add Moorestown support (single pipe, no BIOS, no stolen memory, 6 + some other differences) 7 + - Sort out the bo and ttm code to support userframe buffers and DRM 8 + interfaces rather than just faking it enough for a framebuffer 9 + - Add 2D acceleration via console and DRM 10 + 11 + As per kernel policy and the in the interest of the safety of various 12 + kittens there is no support or plans to add hooks for the closed user space 13 + stuff. 14 + 15 + 16 + Why bother ? 17 + - Proper display configuration 18 + - Can be made to work on Moorestown where VESA won't 19 + - Works on systems where the VESA BIOS is bust or the tables are broken 20 + without hacks 21 + - 2D acceleration 22 + 23 + Currently tested on 24 + + Dell Mini 10 100x600 25 + 26 +
+167
drivers/staging/gma500/psb_bl.c
··· 1 + /* 2 + * psb backlight using HAL 3 + * 4 + * Copyright (c) 2009, Intel Corporation. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License along with 16 + * this program; if not, write to the Free Software Foundation, Inc., 17 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 + * 19 + * Authors: Eric Knopp 20 + * 21 + */ 22 + 23 + #include <linux/backlight.h> 24 + #include <linux/version.h> 25 + #include "psb_drv.h" 26 + #include "psb_intel_reg.h" 27 + #include "psb_intel_drv.h" 28 + #include "psb_intel_bios.h" 29 + #include "psb_powermgmt.h" 30 + 31 + #define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF 32 + #define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */ 33 + #define BLC_PWM_FREQ_CALC_CONSTANT 32 34 + #define MHz 1000000 35 + #define BRIGHTNESS_MIN_LEVEL 1 36 + #define BRIGHTNESS_MAX_LEVEL 100 37 + #define BRIGHTNESS_MASK 0xFF 38 + #define BLC_POLARITY_NORMAL 0 39 + #define BLC_POLARITY_INVERSE 1 40 + #define BLC_ADJUSTMENT_MAX 100 41 + 42 + #define PSB_BLC_PWM_PRECISION_FACTOR 10 43 + #define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE 44 + #define PSB_BLC_MIN_PWM_REG_FREQ 0x2 45 + 46 + #define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE) 47 + #define PSB_BACKLIGHT_PWM_CTL_SHIFT (16) 48 + 49 + static int psb_brightness; 50 + static struct backlight_device *psb_backlight_device; 51 + static u8 blc_brightnesscmd; 52 + static u8 blc_pol; 53 + static u8 blc_type; 54 + 55 + int psb_set_brightness(struct backlight_device *bd) 56 + { 57 + struct drm_device *dev = bl_get_data(psb_backlight_device); 58 + int level = bd->props.brightness; 59 + 60 + DRM_DEBUG_DRIVER("backlight level set to %d\n", level); 61 + 62 + /* Perform value bounds checking */ 63 + if (level < BRIGHTNESS_MIN_LEVEL) 64 + level = BRIGHTNESS_MIN_LEVEL; 65 + 66 + psb_intel_lvds_set_brightness(dev, level); 67 + psb_brightness = level; 68 + return 0; 69 + } 70 + 71 + int psb_get_brightness(struct backlight_device *bd) 72 + { 73 + DRM_DEBUG_DRIVER("brightness = 0x%x\n", psb_brightness); 74 + 75 + /* return locally cached var instead of HW read (due to DPST etc.) */ 76 + return psb_brightness; 77 + } 78 + 79 + static const struct backlight_ops psb_ops = { 80 + .get_brightness = psb_get_brightness, 81 + .update_status = psb_set_brightness, 82 + }; 83 + 84 + static int device_backlight_init(struct drm_device *dev) 85 + { 86 + unsigned long CoreClock; 87 + /* u32 bl_max_freq; */ 88 + /* unsigned long value; */ 89 + u16 bl_max_freq; 90 + uint32_t value; 91 + uint32_t blc_pwm_precision_factor; 92 + struct drm_psb_private *dev_priv = dev->dev_private; 93 + 94 + /* get bl_max_freq and pol from dev_priv*/ 95 + if (!dev_priv->lvds_bl) { 96 + DRM_ERROR("Has no valid LVDS backlight info\n"); 97 + return 1; 98 + } 99 + bl_max_freq = dev_priv->lvds_bl->freq; 100 + blc_pol = dev_priv->lvds_bl->pol; 101 + blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR; 102 + blc_brightnesscmd = dev_priv->lvds_bl->brightnesscmd; 103 + blc_type = dev_priv->lvds_bl->type; 104 + 105 + CoreClock = dev_priv->core_freq; 106 + 107 + value = (CoreClock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT; 108 + value *= blc_pwm_precision_factor; 109 + value /= bl_max_freq; 110 + value /= blc_pwm_precision_factor; 111 + 112 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, 113 + OSPM_UHB_ONLY_IF_ON)) { 114 + /* Check: may be MFLD only */ 115 + if ( 116 + value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ || 117 + value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ) 118 + return 2; 119 + else { 120 + value &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR; 121 + REG_WRITE(BLC_PWM_CTL, 122 + (value << PSB_BACKLIGHT_PWM_CTL_SHIFT) | 123 + (value)); 124 + } 125 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 126 + } 127 + return 0; 128 + } 129 + 130 + int psb_backlight_init(struct drm_device *dev) 131 + { 132 + #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 133 + int ret = 0; 134 + 135 + struct backlight_properties props; 136 + memset(&props, 0, sizeof(struct backlight_properties)); 137 + props.max_brightness = BRIGHTNESS_MAX_LEVEL; 138 + 139 + psb_backlight_device = backlight_device_register("psb-bl", NULL, 140 + (void *)dev, &psb_ops, &props); 141 + if (IS_ERR(psb_backlight_device)) 142 + return PTR_ERR(psb_backlight_device); 143 + 144 + ret = device_backlight_init(dev); 145 + if (ret < 0) 146 + return ret; 147 + 148 + psb_backlight_device->props.brightness = BRIGHTNESS_MAX_LEVEL; 149 + psb_backlight_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL; 150 + backlight_update_status(psb_backlight_device); 151 + #endif 152 + return 0; 153 + } 154 + 155 + void psb_backlight_exit(void) 156 + { 157 + #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 158 + psb_backlight_device->props.brightness = 0; 159 + backlight_update_status(psb_backlight_device); 160 + backlight_device_unregister(psb_backlight_device); 161 + #endif 162 + } 163 + 164 + struct backlight_device *psb_get_backlight_device(void) 165 + { 166 + return psb_backlight_device; 167 + }
+450
drivers/staging/gma500/psb_buffer.c
··· 1 + /************************************************************************** 2 + * Copyright (c) 2007, Intel Corporation. 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms and conditions of the GNU General Public License, 6 + * version 2, as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope it will be useful, but WITHOUT 9 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 + * more details. 12 + * 13 + * You should have received a copy of the GNU General Public License along with 14 + * this program; if not, write to the Free Software Foundation, Inc., 15 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 + * 17 + **************************************************************************/ 18 + /* 19 + * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com> 20 + */ 21 + #include "ttm/ttm_placement.h" 22 + #include "ttm/ttm_execbuf_util.h" 23 + #include "psb_ttm_fence_api.h" 24 + #include <drm/drmP.h> 25 + #include "psb_drv.h" 26 + 27 + #define DRM_MEM_TTM 26 28 + 29 + struct drm_psb_ttm_backend { 30 + struct ttm_backend base; 31 + struct page **pages; 32 + unsigned int desired_tile_stride; 33 + unsigned int hw_tile_stride; 34 + int mem_type; 35 + unsigned long offset; 36 + unsigned long num_pages; 37 + }; 38 + 39 + /* 40 + * MSVDX/TOPAZ GPU virtual space looks like this 41 + * (We currently use only one MMU context). 42 + * PSB_MEM_MMU_START: from 0x00000000~0xe000000, for generic buffers 43 + * TTM_PL_CI: from 0xe0000000+half GTT space, for camear/video buffer sharing 44 + * TTM_PL_RAR: from TTM_PL_CI+CI size, for RAR/video buffer sharing 45 + * TTM_PL_TT: from TTM_PL_RAR+RAR size, for buffers need to mapping into GTT 46 + */ 47 + static int psb_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 48 + struct ttm_mem_type_manager *man) 49 + { 50 + 51 + struct drm_psb_private *dev_priv = 52 + container_of(bdev, struct drm_psb_private, bdev); 53 + struct psb_gtt *pg = dev_priv->pg; 54 + 55 + switch (type) { 56 + case TTM_PL_SYSTEM: 57 + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 58 + man->available_caching = TTM_PL_FLAG_CACHED | 59 + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; 60 + man->default_caching = TTM_PL_FLAG_CACHED; 61 + break; 62 + case DRM_PSB_MEM_MMU: 63 + man->func = &ttm_bo_manager_func; 64 + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | 65 + TTM_MEMTYPE_FLAG_CMA; 66 + man->gpu_offset = PSB_MEM_MMU_START; 67 + man->available_caching = TTM_PL_FLAG_CACHED | 68 + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; 69 + man->default_caching = TTM_PL_FLAG_WC; 70 + break; 71 + case TTM_PL_CI: 72 + man->func = &ttm_bo_manager_func; 73 + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | 74 + TTM_MEMTYPE_FLAG_FIXED; 75 + man->gpu_offset = pg->mmu_gatt_start + (pg->ci_start); 76 + man->available_caching = TTM_PL_FLAG_UNCACHED; 77 + man->default_caching = TTM_PL_FLAG_UNCACHED; 78 + break; 79 + case TTM_PL_RAR: /* Unmappable RAR memory */ 80 + man->func = &ttm_bo_manager_func; 81 + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | 82 + TTM_MEMTYPE_FLAG_FIXED; 83 + man->available_caching = TTM_PL_FLAG_UNCACHED; 84 + man->default_caching = TTM_PL_FLAG_UNCACHED; 85 + man->gpu_offset = pg->mmu_gatt_start + (pg->rar_start); 86 + break; 87 + case TTM_PL_TT: /* Mappable GATT memory */ 88 + man->func = &ttm_bo_manager_func; 89 + #ifdef PSB_WORKING_HOST_MMU_ACCESS 90 + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 91 + #else 92 + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | 93 + TTM_MEMTYPE_FLAG_CMA; 94 + #endif 95 + man->available_caching = TTM_PL_FLAG_CACHED | 96 + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; 97 + man->default_caching = TTM_PL_FLAG_WC; 98 + man->gpu_offset = pg->mmu_gatt_start + 99 + (pg->rar_start + dev_priv->rar_region_size); 100 + break; 101 + default: 102 + DRM_ERROR("Unsupported memory type %u\n", (unsigned) type); 103 + return -EINVAL; 104 + } 105 + return 0; 106 + } 107 + 108 + 109 + static void psb_evict_mask(struct ttm_buffer_object *bo, 110 + struct ttm_placement *placement) 111 + { 112 + static uint32_t cur_placement; 113 + 114 + cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEM; 115 + cur_placement |= TTM_PL_FLAG_SYSTEM; 116 + 117 + placement->fpfn = 0; 118 + placement->lpfn = 0; 119 + placement->num_placement = 1; 120 + placement->placement = &cur_placement; 121 + placement->num_busy_placement = 0; 122 + placement->busy_placement = NULL; 123 + 124 + /* all buffers evicted to system memory */ 125 + /* return cur_placement | TTM_PL_FLAG_SYSTEM; */ 126 + } 127 + 128 + static int psb_invalidate_caches(struct ttm_bo_device *bdev, 129 + uint32_t placement) 130 + { 131 + return 0; 132 + } 133 + 134 + static int psb_move_blit(struct ttm_buffer_object *bo, 135 + bool evict, bool no_wait, 136 + struct ttm_mem_reg *new_mem) 137 + { 138 + BUG(); 139 + return 0; 140 + } 141 + 142 + /* 143 + * Flip destination ttm into GATT, 144 + * then blit and subsequently move out again. 145 + */ 146 + 147 + static int psb_move_flip(struct ttm_buffer_object *bo, 148 + bool evict, bool interruptible, bool no_wait, 149 + struct ttm_mem_reg *new_mem) 150 + { 151 + /*struct ttm_bo_device *bdev = bo->bdev;*/ 152 + struct ttm_mem_reg tmp_mem; 153 + int ret; 154 + struct ttm_placement placement; 155 + uint32_t flags = TTM_PL_FLAG_TT; 156 + 157 + tmp_mem = *new_mem; 158 + tmp_mem.mm_node = NULL; 159 + 160 + placement.fpfn = 0; 161 + placement.lpfn = 0; 162 + placement.num_placement = 1; 163 + placement.placement = &flags; 164 + placement.num_busy_placement = 0; /* FIXME */ 165 + placement.busy_placement = NULL; 166 + 167 + ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, 168 + false, no_wait); 169 + if (ret) 170 + return ret; 171 + ret = ttm_tt_bind(bo->ttm, &tmp_mem); 172 + if (ret) 173 + goto out_cleanup; 174 + ret = psb_move_blit(bo, true, no_wait, &tmp_mem); 175 + if (ret) 176 + goto out_cleanup; 177 + 178 + ret = ttm_bo_move_ttm(bo, evict, false, no_wait, new_mem); 179 + out_cleanup: 180 + if (tmp_mem.mm_node) { 181 + drm_mm_put_block(tmp_mem.mm_node); 182 + tmp_mem.mm_node = NULL; 183 + } 184 + return ret; 185 + } 186 + 187 + static int psb_move(struct ttm_buffer_object *bo, 188 + bool evict, bool interruptible, bool no_wait_reserve, 189 + bool no_wait, struct ttm_mem_reg *new_mem) 190 + { 191 + struct ttm_mem_reg *old_mem = &bo->mem; 192 + 193 + if ((old_mem->mem_type == TTM_PL_RAR) || 194 + (new_mem->mem_type == TTM_PL_RAR)) { 195 + if (old_mem->mm_node) { 196 + spin_lock(&bo->glob->lru_lock); 197 + drm_mm_put_block(old_mem->mm_node); 198 + spin_unlock(&bo->glob->lru_lock); 199 + } 200 + old_mem->mm_node = NULL; 201 + *old_mem = *new_mem; 202 + } else if (old_mem->mem_type == TTM_PL_SYSTEM) { 203 + return ttm_bo_move_memcpy(bo, evict, false, no_wait, new_mem); 204 + } else if (new_mem->mem_type == TTM_PL_SYSTEM) { 205 + int ret = psb_move_flip(bo, evict, interruptible, 206 + no_wait, new_mem); 207 + if (unlikely(ret != 0)) { 208 + if (ret == -ERESTART) 209 + return ret; 210 + else 211 + return ttm_bo_move_memcpy(bo, evict, false, 212 + no_wait, new_mem); 213 + } 214 + } else { 215 + if (psb_move_blit(bo, evict, no_wait, new_mem)) 216 + return ttm_bo_move_memcpy(bo, evict, false, no_wait, 217 + new_mem); 218 + } 219 + return 0; 220 + } 221 + 222 + static int drm_psb_tbe_populate(struct ttm_backend *backend, 223 + unsigned long num_pages, 224 + struct page **pages, 225 + struct page *dummy_read_page, 226 + dma_addr_t *dma_addrs) 227 + { 228 + struct drm_psb_ttm_backend *psb_be = 229 + container_of(backend, struct drm_psb_ttm_backend, base); 230 + 231 + psb_be->pages = pages; 232 + return 0; 233 + } 234 + 235 + static int drm_psb_tbe_unbind(struct ttm_backend *backend) 236 + { 237 + struct ttm_bo_device *bdev = backend->bdev; 238 + struct drm_psb_private *dev_priv = 239 + container_of(bdev, struct drm_psb_private, bdev); 240 + struct drm_psb_ttm_backend *psb_be = 241 + container_of(backend, struct drm_psb_ttm_backend, base); 242 + struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu); 243 + /* struct ttm_mem_type_manager *man = &bdev->man[psb_be->mem_type]; */ 244 + 245 + if (psb_be->mem_type == TTM_PL_TT) { 246 + uint32_t gatt_p_offset = 247 + (psb_be->offset - dev_priv->pg->mmu_gatt_start) 248 + >> PAGE_SHIFT; 249 + 250 + (void) psb_gtt_remove_pages(dev_priv->pg, gatt_p_offset, 251 + psb_be->num_pages, 252 + psb_be->desired_tile_stride, 253 + psb_be->hw_tile_stride, 0); 254 + } 255 + 256 + psb_mmu_remove_pages(pd, psb_be->offset, 257 + psb_be->num_pages, 258 + psb_be->desired_tile_stride, 259 + psb_be->hw_tile_stride); 260 + 261 + return 0; 262 + } 263 + 264 + static int drm_psb_tbe_bind(struct ttm_backend *backend, 265 + struct ttm_mem_reg *bo_mem) 266 + { 267 + struct ttm_bo_device *bdev = backend->bdev; 268 + struct drm_psb_private *dev_priv = 269 + container_of(bdev, struct drm_psb_private, bdev); 270 + struct drm_psb_ttm_backend *psb_be = 271 + container_of(backend, struct drm_psb_ttm_backend, base); 272 + struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu); 273 + struct ttm_mem_type_manager *man = &bdev->man[bo_mem->mem_type]; 274 + struct drm_mm_node *mm_node = bo_mem->mm_node; 275 + int type; 276 + int ret = 0; 277 + 278 + psb_be->mem_type = bo_mem->mem_type; 279 + psb_be->num_pages = bo_mem->num_pages; 280 + psb_be->desired_tile_stride = 0; 281 + psb_be->hw_tile_stride = 0; 282 + psb_be->offset = (mm_node->start << PAGE_SHIFT) + 283 + man->gpu_offset; 284 + 285 + type = 286 + (bo_mem-> 287 + placement & TTM_PL_FLAG_CACHED) ? PSB_MMU_CACHED_MEMORY : 0; 288 + 289 + if (psb_be->mem_type == TTM_PL_TT) { 290 + uint32_t gatt_p_offset = 291 + (psb_be->offset - dev_priv->pg->mmu_gatt_start) 292 + >> PAGE_SHIFT; 293 + 294 + ret = psb_gtt_insert_pages(dev_priv->pg, psb_be->pages, 295 + gatt_p_offset, 296 + psb_be->num_pages, 297 + psb_be->desired_tile_stride, 298 + psb_be->hw_tile_stride, type); 299 + } 300 + 301 + ret = psb_mmu_insert_pages(pd, psb_be->pages, 302 + psb_be->offset, psb_be->num_pages, 303 + psb_be->desired_tile_stride, 304 + psb_be->hw_tile_stride, type); 305 + if (ret) 306 + goto out_err; 307 + 308 + return 0; 309 + out_err: 310 + drm_psb_tbe_unbind(backend); 311 + return ret; 312 + 313 + } 314 + 315 + static void drm_psb_tbe_clear(struct ttm_backend *backend) 316 + { 317 + struct drm_psb_ttm_backend *psb_be = 318 + container_of(backend, struct drm_psb_ttm_backend, base); 319 + 320 + psb_be->pages = NULL; 321 + return; 322 + } 323 + 324 + static void drm_psb_tbe_destroy(struct ttm_backend *backend) 325 + { 326 + struct drm_psb_ttm_backend *psb_be = 327 + container_of(backend, struct drm_psb_ttm_backend, base); 328 + 329 + if (backend) 330 + kfree(psb_be); 331 + } 332 + 333 + static struct ttm_backend_func psb_ttm_backend = { 334 + .populate = drm_psb_tbe_populate, 335 + .clear = drm_psb_tbe_clear, 336 + .bind = drm_psb_tbe_bind, 337 + .unbind = drm_psb_tbe_unbind, 338 + .destroy = drm_psb_tbe_destroy, 339 + }; 340 + 341 + static struct ttm_backend *drm_psb_tbe_init(struct ttm_bo_device *bdev) 342 + { 343 + struct drm_psb_ttm_backend *psb_be; 344 + 345 + psb_be = kzalloc(sizeof(*psb_be), GFP_KERNEL); 346 + if (!psb_be) 347 + return NULL; 348 + psb_be->pages = NULL; 349 + psb_be->base.func = &psb_ttm_backend; 350 + psb_be->base.bdev = bdev; 351 + return &psb_be->base; 352 + } 353 + 354 + static int psb_ttm_io_mem_reserve(struct ttm_bo_device *bdev, 355 + struct ttm_mem_reg *mem) 356 + { 357 + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 358 + struct drm_psb_private *dev_priv = 359 + container_of(bdev, struct drm_psb_private, bdev); 360 + struct psb_gtt *pg = dev_priv->pg; 361 + struct drm_mm_node *mm_node = mem->mm_node; 362 + 363 + mem->bus.addr = NULL; 364 + mem->bus.offset = 0; 365 + mem->bus.size = mem->num_pages << PAGE_SHIFT; 366 + mem->bus.base = 0; 367 + mem->bus.is_iomem = false; 368 + if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) 369 + return -EINVAL; 370 + switch (mem->mem_type) { 371 + case TTM_PL_SYSTEM: 372 + /* system memory */ 373 + return 0; 374 + case TTM_PL_TT: 375 + mem->bus.offset = mm_node->start << PAGE_SHIFT; 376 + mem->bus.base = pg->gatt_start; 377 + mem->bus.is_iomem = false; 378 + /* Don't know whether it is IO_MEM, this flag 379 + used in vm_fault handle */ 380 + break; 381 + case DRM_PSB_MEM_MMU: 382 + mem->bus.offset = mm_node->start << PAGE_SHIFT; 383 + mem->bus.base = 0x00000000; 384 + break; 385 + case TTM_PL_CI: 386 + mem->bus.offset = mm_node->start << PAGE_SHIFT; 387 + mem->bus.base = dev_priv->ci_region_start;; 388 + mem->bus.is_iomem = true; 389 + break; 390 + case TTM_PL_RAR: 391 + mem->bus.offset = mm_node->start << PAGE_SHIFT; 392 + mem->bus.base = dev_priv->rar_region_start;; 393 + mem->bus.is_iomem = true; 394 + break; 395 + default: 396 + return -EINVAL; 397 + } 398 + return 0; 399 + } 400 + 401 + static void psb_ttm_io_mem_free(struct ttm_bo_device *bdev, 402 + struct ttm_mem_reg *mem) 403 + { 404 + } 405 + 406 + /* 407 + * Use this memory type priority if no eviction is needed. 408 + */ 409 + /* 410 + static uint32_t psb_mem_prios[] = { 411 + TTM_PL_CI, 412 + TTM_PL_RAR, 413 + TTM_PL_TT, 414 + DRM_PSB_MEM_MMU, 415 + TTM_PL_SYSTEM 416 + }; 417 + */ 418 + /* 419 + * Use this memory type priority if need to evict. 420 + */ 421 + /* 422 + static uint32_t psb_busy_prios[] = { 423 + TTM_PL_TT, 424 + TTM_PL_CI, 425 + TTM_PL_RAR, 426 + DRM_PSB_MEM_MMU, 427 + TTM_PL_SYSTEM 428 + }; 429 + */ 430 + struct ttm_bo_driver psb_ttm_bo_driver = { 431 + /* 432 + .mem_type_prio = psb_mem_prios, 433 + .mem_busy_prio = psb_busy_prios, 434 + .num_mem_type_prio = ARRAY_SIZE(psb_mem_prios), 435 + .num_mem_busy_prio = ARRAY_SIZE(psb_busy_prios), 436 + */ 437 + .create_ttm_backend_entry = &drm_psb_tbe_init, 438 + .invalidate_caches = &psb_invalidate_caches, 439 + .init_mem_type = &psb_init_mem_type, 440 + .evict_flags = &psb_evict_mask, 441 + .move = &psb_move, 442 + .verify_access = &psb_verify_access, 443 + .sync_obj_signaled = &ttm_fence_sync_obj_signaled, 444 + .sync_obj_wait = &ttm_fence_sync_obj_wait, 445 + .sync_obj_flush = &ttm_fence_sync_obj_flush, 446 + .sync_obj_unref = &ttm_fence_sync_obj_unref, 447 + .sync_obj_ref = &ttm_fence_sync_obj_ref, 448 + .io_mem_reserve = &psb_ttm_io_mem_reserve, 449 + .io_mem_free = &psb_ttm_io_mem_free 450 + };
+696
drivers/staging/gma500/psb_drm.h
··· 1 + /************************************************************************** 2 + * Copyright (c) 2007, Intel Corporation. 3 + * All Rights Reserved. 4 + * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA. 5 + * All Rights Reserved. 6 + * 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms and conditions of the GNU General Public License, 9 + * version 2, as published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope it will be useful, but WITHOUT 12 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 + * more details. 15 + * 16 + * You should have received a copy of the GNU General Public License along with 17 + * this program; if not, write to the Free Software Foundation, Inc., 18 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 19 + * 20 + **************************************************************************/ 21 + 22 + #ifndef _PSB_DRM_H_ 23 + #define _PSB_DRM_H_ 24 + 25 + #if defined(__linux__) && !defined(__KERNEL__) 26 + #include<stdint.h> 27 + #include <linux/types.h> 28 + #include "drm_mode.h" 29 + #endif 30 + 31 + #include "psb_ttm_fence_user.h" 32 + #include "psb_ttm_placement_user.h" 33 + 34 + /* 35 + * Menlow/MRST graphics driver package version 36 + * a.b.c.xxxx 37 + * a - Product Family: 5 - Linux 38 + * b - Major Release Version: 0 - non-Gallium (Unbuntu); 39 + * 1 - Gallium (Moblin2) 40 + * c - Hotfix Release 41 + * xxxx - Graphics internal build # 42 + */ 43 + #define PSB_PACKAGE_VERSION "5.3.0.32L.0036" 44 + 45 + #define DRM_PSB_SAREA_MAJOR 0 46 + #define DRM_PSB_SAREA_MINOR 2 47 + #define PSB_FIXED_SHIFT 16 48 + 49 + #define PSB_NUM_PIPE 3 50 + 51 + /* 52 + * Public memory types. 53 + */ 54 + 55 + #define DRM_PSB_MEM_MMU TTM_PL_PRIV1 56 + #define DRM_PSB_FLAG_MEM_MMU TTM_PL_FLAG_PRIV1 57 + 58 + #define TTM_PL_CI TTM_PL_PRIV0 59 + #define TTM_PL_FLAG_CI TTM_PL_FLAG_PRIV0 60 + 61 + #define TTM_PL_RAR TTM_PL_PRIV2 62 + #define TTM_PL_FLAG_RAR TTM_PL_FLAG_PRIV2 63 + 64 + typedef int32_t psb_fixed; 65 + typedef uint32_t psb_ufixed; 66 + 67 + static inline int32_t psb_int_to_fixed(int a) 68 + { 69 + return a * (1 << PSB_FIXED_SHIFT); 70 + } 71 + 72 + static inline uint32_t psb_unsigned_to_ufixed(unsigned int a) 73 + { 74 + return a << PSB_FIXED_SHIFT; 75 + } 76 + 77 + /*Status of the command sent to the gfx device.*/ 78 + typedef enum { 79 + DRM_CMD_SUCCESS, 80 + DRM_CMD_FAILED, 81 + DRM_CMD_HANG 82 + } drm_cmd_status_t; 83 + 84 + struct drm_psb_scanout { 85 + uint32_t buffer_id; /* DRM buffer object ID */ 86 + uint32_t rotation; /* Rotation as in RR_rotation definitions */ 87 + uint32_t stride; /* Buffer stride in bytes */ 88 + uint32_t depth; /* Buffer depth in bits (NOT) bpp */ 89 + uint32_t width; /* Buffer width in pixels */ 90 + uint32_t height; /* Buffer height in lines */ 91 + int32_t transform[3][3]; /* Buffer composite transform */ 92 + /* (scaling, rot, reflect) */ 93 + }; 94 + 95 + #define DRM_PSB_SAREA_OWNERS 16 96 + #define DRM_PSB_SAREA_OWNER_2D 0 97 + #define DRM_PSB_SAREA_OWNER_3D 1 98 + 99 + #define DRM_PSB_SAREA_SCANOUTS 3 100 + 101 + struct drm_psb_sarea { 102 + /* Track changes of this data structure */ 103 + 104 + uint32_t major; 105 + uint32_t minor; 106 + 107 + /* Last context to touch part of hw */ 108 + uint32_t ctx_owners[DRM_PSB_SAREA_OWNERS]; 109 + 110 + /* Definition of front- and rotated buffers */ 111 + uint32_t num_scanouts; 112 + struct drm_psb_scanout scanouts[DRM_PSB_SAREA_SCANOUTS]; 113 + 114 + int planeA_x; 115 + int planeA_y; 116 + int planeA_w; 117 + int planeA_h; 118 + int planeB_x; 119 + int planeB_y; 120 + int planeB_w; 121 + int planeB_h; 122 + /* Number of active scanouts */ 123 + uint32_t num_active_scanouts; 124 + }; 125 + 126 + #define PSB_RELOC_MAGIC 0x67676767 127 + #define PSB_RELOC_SHIFT_MASK 0x0000FFFF 128 + #define PSB_RELOC_SHIFT_SHIFT 0 129 + #define PSB_RELOC_ALSHIFT_MASK 0xFFFF0000 130 + #define PSB_RELOC_ALSHIFT_SHIFT 16 131 + 132 + #define PSB_RELOC_OP_OFFSET 0 /* Offset of the indicated 133 + * buffer 134 + */ 135 + 136 + struct drm_psb_reloc { 137 + uint32_t reloc_op; 138 + uint32_t where; /* offset in destination buffer */ 139 + uint32_t buffer; /* Buffer reloc applies to */ 140 + uint32_t mask; /* Destination format: */ 141 + uint32_t shift; /* Destination format: */ 142 + uint32_t pre_add; /* Destination format: */ 143 + uint32_t background; /* Destination add */ 144 + uint32_t dst_buffer; /* Destination buffer. Index into buffer_list */ 145 + uint32_t arg0; /* Reloc-op dependant */ 146 + uint32_t arg1; 147 + }; 148 + 149 + 150 + #define PSB_GPU_ACCESS_READ (1ULL << 32) 151 + #define PSB_GPU_ACCESS_WRITE (1ULL << 33) 152 + #define PSB_GPU_ACCESS_MASK (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE) 153 + 154 + #define PSB_BO_FLAG_COMMAND (1ULL << 52) 155 + 156 + #define PSB_ENGINE_2D 0 157 + #define PSB_ENGINE_VIDEO 1 158 + #define LNC_ENGINE_ENCODE 5 159 + 160 + /* 161 + * For this fence class we have a couple of 162 + * fence types. 163 + */ 164 + 165 + #define _PSB_FENCE_EXE_SHIFT 0 166 + #define _PSB_FENCE_FEEDBACK_SHIFT 4 167 + 168 + #define _PSB_FENCE_TYPE_EXE (1 << _PSB_FENCE_EXE_SHIFT) 169 + #define _PSB_FENCE_TYPE_FEEDBACK (1 << _PSB_FENCE_FEEDBACK_SHIFT) 170 + 171 + #define PSB_NUM_ENGINES 6 172 + 173 + 174 + #define PSB_FEEDBACK_OP_VISTEST (1 << 0) 175 + 176 + struct drm_psb_extension_rep { 177 + int32_t exists; 178 + uint32_t driver_ioctl_offset; 179 + uint32_t sarea_offset; 180 + uint32_t major; 181 + uint32_t minor; 182 + uint32_t pl; 183 + }; 184 + 185 + #define DRM_PSB_EXT_NAME_LEN 128 186 + 187 + union drm_psb_extension_arg { 188 + char extension[DRM_PSB_EXT_NAME_LEN]; 189 + struct drm_psb_extension_rep rep; 190 + }; 191 + 192 + struct psb_validate_req { 193 + uint64_t set_flags; 194 + uint64_t clear_flags; 195 + uint64_t next; 196 + uint64_t presumed_gpu_offset; 197 + uint32_t buffer_handle; 198 + uint32_t presumed_flags; 199 + uint32_t group; 200 + uint32_t pad64; 201 + }; 202 + 203 + struct psb_validate_rep { 204 + uint64_t gpu_offset; 205 + uint32_t placement; 206 + uint32_t fence_type_mask; 207 + }; 208 + 209 + #define PSB_USE_PRESUMED (1 << 0) 210 + 211 + struct psb_validate_arg { 212 + int handled; 213 + int ret; 214 + union { 215 + struct psb_validate_req req; 216 + struct psb_validate_rep rep; 217 + } d; 218 + }; 219 + 220 + 221 + #define DRM_PSB_FENCE_NO_USER (1 << 0) 222 + 223 + struct psb_ttm_fence_rep { 224 + uint32_t handle; 225 + uint32_t fence_class; 226 + uint32_t fence_type; 227 + uint32_t signaled_types; 228 + uint32_t error; 229 + }; 230 + 231 + typedef struct drm_psb_cmdbuf_arg { 232 + uint64_t buffer_list; /* List of buffers to validate */ 233 + uint64_t clip_rects; /* See i915 counterpart */ 234 + uint64_t scene_arg; 235 + uint64_t fence_arg; 236 + 237 + uint32_t ta_flags; 238 + 239 + uint32_t ta_handle; /* TA reg-value pairs */ 240 + uint32_t ta_offset; 241 + uint32_t ta_size; 242 + 243 + uint32_t oom_handle; 244 + uint32_t oom_offset; 245 + uint32_t oom_size; 246 + 247 + uint32_t cmdbuf_handle; /* 2D Command buffer object or, */ 248 + uint32_t cmdbuf_offset; /* rasterizer reg-value pairs */ 249 + uint32_t cmdbuf_size; 250 + 251 + uint32_t reloc_handle; /* Reloc buffer object */ 252 + uint32_t reloc_offset; 253 + uint32_t num_relocs; 254 + 255 + int32_t damage; /* Damage front buffer with cliprects */ 256 + /* Not implemented yet */ 257 + uint32_t fence_flags; 258 + uint32_t engine; 259 + 260 + /* 261 + * Feedback; 262 + */ 263 + 264 + uint32_t feedback_ops; 265 + uint32_t feedback_handle; 266 + uint32_t feedback_offset; 267 + uint32_t feedback_breakpoints; 268 + uint32_t feedback_size; 269 + } drm_psb_cmdbuf_arg_t; 270 + 271 + typedef struct drm_psb_pageflip_arg { 272 + uint32_t flip_offset; 273 + uint32_t stride; 274 + } drm_psb_pageflip_arg_t; 275 + 276 + typedef enum { 277 + LNC_VIDEO_DEVICE_INFO, 278 + LNC_VIDEO_GETPARAM_RAR_INFO, 279 + LNC_VIDEO_GETPARAM_CI_INFO, 280 + LNC_VIDEO_GETPARAM_RAR_HANDLER_OFFSET, 281 + LNC_VIDEO_FRAME_SKIP, 282 + IMG_VIDEO_DECODE_STATUS, 283 + IMG_VIDEO_NEW_CONTEXT, 284 + IMG_VIDEO_RM_CONTEXT, 285 + IMG_VIDEO_MB_ERROR 286 + } lnc_getparam_key_t; 287 + 288 + struct drm_lnc_video_getparam_arg { 289 + lnc_getparam_key_t key; 290 + uint64_t arg; /* argument pointer */ 291 + uint64_t value; /* feed back pointer */ 292 + }; 293 + 294 + 295 + /* 296 + * Feedback components: 297 + */ 298 + 299 + /* 300 + * Vistest component. The number of these in the feedback buffer 301 + * equals the number of vistest breakpoints + 1. 302 + * This is currently the only feedback component. 303 + */ 304 + 305 + struct drm_psb_vistest { 306 + uint32_t vt[8]; 307 + }; 308 + 309 + struct drm_psb_sizes_arg { 310 + uint32_t ta_mem_size; 311 + uint32_t mmu_size; 312 + uint32_t pds_size; 313 + uint32_t rastgeom_size; 314 + uint32_t tt_size; 315 + uint32_t vram_size; 316 + }; 317 + 318 + struct drm_psb_hist_status_arg { 319 + uint32_t buf[32]; 320 + }; 321 + 322 + struct drm_psb_dpst_lut_arg { 323 + uint8_t lut[256]; 324 + int output_id; 325 + }; 326 + 327 + struct mrst_timing_info { 328 + uint16_t pixel_clock; 329 + uint8_t hactive_lo; 330 + uint8_t hblank_lo; 331 + uint8_t hblank_hi:4; 332 + uint8_t hactive_hi:4; 333 + uint8_t vactive_lo; 334 + uint8_t vblank_lo; 335 + uint8_t vblank_hi:4; 336 + uint8_t vactive_hi:4; 337 + uint8_t hsync_offset_lo; 338 + uint8_t hsync_pulse_width_lo; 339 + uint8_t vsync_pulse_width_lo:4; 340 + uint8_t vsync_offset_lo:4; 341 + uint8_t vsync_pulse_width_hi:2; 342 + uint8_t vsync_offset_hi:2; 343 + uint8_t hsync_pulse_width_hi:2; 344 + uint8_t hsync_offset_hi:2; 345 + uint8_t width_mm_lo; 346 + uint8_t height_mm_lo; 347 + uint8_t height_mm_hi:4; 348 + uint8_t width_mm_hi:4; 349 + uint8_t hborder; 350 + uint8_t vborder; 351 + uint8_t unknown0:1; 352 + uint8_t hsync_positive:1; 353 + uint8_t vsync_positive:1; 354 + uint8_t separate_sync:2; 355 + uint8_t stereo:1; 356 + uint8_t unknown6:1; 357 + uint8_t interlaced:1; 358 + } __attribute__((packed)); 359 + 360 + struct gct_r10_timing_info { 361 + uint16_t pixel_clock; 362 + uint32_t hactive_lo:8; 363 + uint32_t hactive_hi:4; 364 + uint32_t hblank_lo:8; 365 + uint32_t hblank_hi:4; 366 + uint32_t hsync_offset_lo:8; 367 + uint16_t hsync_offset_hi:2; 368 + uint16_t hsync_pulse_width_lo:8; 369 + uint16_t hsync_pulse_width_hi:2; 370 + uint16_t hsync_positive:1; 371 + uint16_t rsvd_1:3; 372 + uint8_t vactive_lo:8; 373 + uint16_t vactive_hi:4; 374 + uint16_t vblank_lo:8; 375 + uint16_t vblank_hi:4; 376 + uint16_t vsync_offset_lo:4; 377 + uint16_t vsync_offset_hi:2; 378 + uint16_t vsync_pulse_width_lo:4; 379 + uint16_t vsync_pulse_width_hi:2; 380 + uint16_t vsync_positive:1; 381 + uint16_t rsvd_2:3; 382 + } __attribute__((packed)); 383 + 384 + struct mrst_panel_descriptor_v1{ 385 + uint32_t Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */ 386 + /* 0x61190 if MIPI */ 387 + uint32_t Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/ 388 + uint32_t Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/ 389 + uint32_t Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 dword */ 390 + /* Register 0x61210 */ 391 + struct mrst_timing_info DTD;/*18 bytes, Standard definition */ 392 + uint16_t Panel_Backlight_Inverter_Descriptor;/* 16 bits, as follows */ 393 + /* Bit 0, Frequency, 15 bits,0 - 32767Hz */ 394 + /* Bit 15, Polarity, 1 bit, 0: Normal, 1: Inverted */ 395 + uint16_t Panel_MIPI_Display_Descriptor; 396 + /*16 bits, Defined as follows: */ 397 + /* if MIPI, 0x0000 if LVDS */ 398 + /* Bit 0, Type, 2 bits, */ 399 + /* 0: Type-1, */ 400 + /* 1: Type-2, */ 401 + /* 2: Type-3, */ 402 + /* 3: Type-4 */ 403 + /* Bit 2, Pixel Format, 4 bits */ 404 + /* Bit0: 16bpp (not supported in LNC), */ 405 + /* Bit1: 18bpp loosely packed, */ 406 + /* Bit2: 18bpp packed, */ 407 + /* Bit3: 24bpp */ 408 + /* Bit 6, Reserved, 2 bits, 00b */ 409 + /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */ 410 + /* Bit 14, Reserved, 2 bits, 00b */ 411 + } __attribute__ ((packed)); 412 + 413 + struct mrst_panel_descriptor_v2{ 414 + uint32_t Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */ 415 + /* 0x61190 if MIPI */ 416 + uint32_t Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/ 417 + uint32_t Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/ 418 + uint8_t Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 byte */ 419 + /* Register 0x61210 */ 420 + struct mrst_timing_info DTD;/*18 bytes, Standard definition */ 421 + uint16_t Panel_Backlight_Inverter_Descriptor;/*16 bits, as follows*/ 422 + /*Bit 0, Frequency, 16 bits, 0 - 32767Hz*/ 423 + uint8_t Panel_Initial_Brightness;/* [7:0] 0 - 100% */ 424 + /*Bit 7, Polarity, 1 bit,0: Normal, 1: Inverted*/ 425 + uint16_t Panel_MIPI_Display_Descriptor; 426 + /*16 bits, Defined as follows: */ 427 + /* if MIPI, 0x0000 if LVDS */ 428 + /* Bit 0, Type, 2 bits, */ 429 + /* 0: Type-1, */ 430 + /* 1: Type-2, */ 431 + /* 2: Type-3, */ 432 + /* 3: Type-4 */ 433 + /* Bit 2, Pixel Format, 4 bits */ 434 + /* Bit0: 16bpp (not supported in LNC), */ 435 + /* Bit1: 18bpp loosely packed, */ 436 + /* Bit2: 18bpp packed, */ 437 + /* Bit3: 24bpp */ 438 + /* Bit 6, Reserved, 2 bits, 00b */ 439 + /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */ 440 + /* Bit 14, Reserved, 2 bits, 00b */ 441 + } __attribute__ ((packed)); 442 + 443 + union mrst_panel_rx{ 444 + struct{ 445 + uint16_t NumberOfLanes:2; /*Num of Lanes, 2 bits,0 = 1 lane,*/ 446 + /* 1 = 2 lanes, 2 = 3 lanes, 3 = 4 lanes. */ 447 + uint16_t MaxLaneFreq:3; /* 0: 100MHz, 1: 200MHz, 2: 300MHz, */ 448 + /*3: 400MHz, 4: 500MHz, 5: 600MHz, 6: 700MHz, 7: 800MHz.*/ 449 + uint16_t SupportedVideoTransferMode:2; /*0: Non-burst only */ 450 + /* 1: Burst and non-burst */ 451 + /* 2/3: Reserved */ 452 + uint16_t HSClkBehavior:1; /*0: Continuous, 1: Non-continuous*/ 453 + uint16_t DuoDisplaySupport:1; /*1 bit,0: No, 1: Yes*/ 454 + uint16_t ECC_ChecksumCapabilities:1;/*1 bit,0: No, 1: Yes*/ 455 + uint16_t BidirectionalCommunication:1;/*1 bit,0: No, 1: Yes */ 456 + uint16_t Rsvd:5;/*5 bits,00000b */ 457 + } panelrx; 458 + uint16_t panel_receiver; 459 + } __attribute__ ((packed)); 460 + 461 + struct gct_ioctl_arg{ 462 + uint8_t bpi; /* boot panel index, number of panel used during boot */ 463 + uint8_t pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */ 464 + struct mrst_timing_info DTD; /* timing info for the selected panel */ 465 + uint32_t Panel_Port_Control; 466 + uint32_t PP_On_Sequencing;/*1 dword,Register 0x61208,*/ 467 + uint32_t PP_Off_Sequencing;/*1 dword,Register 0x6120C,*/ 468 + uint32_t PP_Cycle_Delay; 469 + uint16_t Panel_Backlight_Inverter_Descriptor; 470 + uint16_t Panel_MIPI_Display_Descriptor; 471 + } __attribute__ ((packed)); 472 + 473 + struct mrst_vbt{ 474 + char Signature[4]; /*4 bytes,"$GCT" */ 475 + uint8_t Revision; /*1 byte */ 476 + uint8_t Size; /*1 byte */ 477 + uint8_t Checksum; /*1 byte,Calculated*/ 478 + void *mrst_gct; 479 + } __attribute__ ((packed)); 480 + 481 + struct mrst_gct_v1{ /* expect this table to change per customer request*/ 482 + union{ /*8 bits,Defined as follows: */ 483 + struct{ 484 + uint8_t PanelType:4; /*4 bits, Bit field for panels*/ 485 + /* 0 - 3: 0 = LVDS, 1 = MIPI*/ 486 + /*2 bits,Specifies which of the*/ 487 + uint8_t BootPanelIndex:2; 488 + /* 4 panels to use by default*/ 489 + uint8_t BootMIPI_DSI_RxIndex:2;/*Specifies which of*/ 490 + /* the 4 MIPI DSI receivers to use*/ 491 + } PD; 492 + uint8_t PanelDescriptor; 493 + }; 494 + struct mrst_panel_descriptor_v1 panel[4];/*panel descrs,38 bytes each*/ 495 + union mrst_panel_rx panelrx[4]; /* panel receivers*/ 496 + } __attribute__ ((packed)); 497 + 498 + struct mrst_gct_v2{ /* expect this table to change per customer request*/ 499 + union{ /*8 bits,Defined as follows: */ 500 + struct{ 501 + uint8_t PanelType:4; /*4 bits, Bit field for panels*/ 502 + /* 0 - 3: 0 = LVDS, 1 = MIPI*/ 503 + /*2 bits,Specifies which of the*/ 504 + uint8_t BootPanelIndex:2; 505 + /* 4 panels to use by default*/ 506 + uint8_t BootMIPI_DSI_RxIndex:2;/*Specifies which of*/ 507 + /* the 4 MIPI DSI receivers to use*/ 508 + } PD; 509 + uint8_t PanelDescriptor; 510 + }; 511 + struct mrst_panel_descriptor_v2 panel[4];/*panel descrs,38 bytes each*/ 512 + union mrst_panel_rx panelrx[4]; /* panel receivers*/ 513 + } __attribute__ ((packed)); 514 + 515 + #define PSB_DC_CRTC_SAVE 0x01 516 + #define PSB_DC_CRTC_RESTORE 0x02 517 + #define PSB_DC_OUTPUT_SAVE 0x04 518 + #define PSB_DC_OUTPUT_RESTORE 0x08 519 + #define PSB_DC_CRTC_MASK 0x03 520 + #define PSB_DC_OUTPUT_MASK 0x0C 521 + 522 + struct drm_psb_dc_state_arg { 523 + uint32_t flags; 524 + uint32_t obj_id; 525 + }; 526 + 527 + struct drm_psb_mode_operation_arg { 528 + uint32_t obj_id; 529 + uint16_t operation; 530 + struct drm_mode_modeinfo mode; 531 + void *data; 532 + }; 533 + 534 + struct drm_psb_stolen_memory_arg { 535 + uint32_t base; 536 + uint32_t size; 537 + }; 538 + 539 + /*Display Register Bits*/ 540 + #define REGRWBITS_PFIT_CONTROLS (1 << 0) 541 + #define REGRWBITS_PFIT_AUTOSCALE_RATIOS (1 << 1) 542 + #define REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS (1 << 2) 543 + #define REGRWBITS_PIPEASRC (1 << 3) 544 + #define REGRWBITS_PIPEBSRC (1 << 4) 545 + #define REGRWBITS_VTOTAL_A (1 << 5) 546 + #define REGRWBITS_VTOTAL_B (1 << 6) 547 + #define REGRWBITS_DSPACNTR (1 << 8) 548 + #define REGRWBITS_DSPBCNTR (1 << 9) 549 + #define REGRWBITS_DSPCCNTR (1 << 10) 550 + 551 + /*Overlay Register Bits*/ 552 + #define OV_REGRWBITS_OVADD (1 << 0) 553 + #define OV_REGRWBITS_OGAM_ALL (1 << 1) 554 + 555 + #define OVC_REGRWBITS_OVADD (1 << 2) 556 + #define OVC_REGRWBITS_OGAM_ALL (1 << 3) 557 + 558 + struct drm_psb_register_rw_arg { 559 + uint32_t b_force_hw_on; 560 + 561 + uint32_t display_read_mask; 562 + uint32_t display_write_mask; 563 + 564 + struct { 565 + uint32_t pfit_controls; 566 + uint32_t pfit_autoscale_ratios; 567 + uint32_t pfit_programmed_scale_ratios; 568 + uint32_t pipeasrc; 569 + uint32_t pipebsrc; 570 + uint32_t vtotal_a; 571 + uint32_t vtotal_b; 572 + } display; 573 + 574 + uint32_t overlay_read_mask; 575 + uint32_t overlay_write_mask; 576 + 577 + struct { 578 + uint32_t OVADD; 579 + uint32_t OGAMC0; 580 + uint32_t OGAMC1; 581 + uint32_t OGAMC2; 582 + uint32_t OGAMC3; 583 + uint32_t OGAMC4; 584 + uint32_t OGAMC5; 585 + uint32_t IEP_ENABLED; 586 + uint32_t IEP_BLE_MINMAX; 587 + uint32_t IEP_BSSCC_CONTROL; 588 + uint32_t b_wait_vblank; 589 + } overlay; 590 + 591 + uint32_t sprite_enable_mask; 592 + uint32_t sprite_disable_mask; 593 + 594 + struct { 595 + uint32_t dspa_control; 596 + uint32_t dspa_key_value; 597 + uint32_t dspa_key_mask; 598 + uint32_t dspc_control; 599 + uint32_t dspc_stride; 600 + uint32_t dspc_position; 601 + uint32_t dspc_linear_offset; 602 + uint32_t dspc_size; 603 + uint32_t dspc_surface; 604 + } sprite; 605 + 606 + uint32_t subpicture_enable_mask; 607 + uint32_t subpicture_disable_mask; 608 + }; 609 + 610 + struct psb_gtt_mapping_arg { 611 + void *hKernelMemInfo; 612 + uint32_t offset_pages; 613 + }; 614 + 615 + struct drm_psb_getpageaddrs_arg { 616 + uint32_t handle; 617 + unsigned long *page_addrs; 618 + unsigned long gtt_offset; 619 + }; 620 + 621 + /* Controlling the kernel modesetting buffers */ 622 + 623 + #define DRM_PSB_KMS_OFF 0x00 624 + #define DRM_PSB_KMS_ON 0x01 625 + #define DRM_PSB_VT_LEAVE 0x02 626 + #define DRM_PSB_VT_ENTER 0x03 627 + #define DRM_PSB_EXTENSION 0x06 628 + #define DRM_PSB_SIZES 0x07 629 + #define DRM_PSB_FUSE_REG 0x08 630 + #define DRM_PSB_VBT 0x09 631 + #define DRM_PSB_DC_STATE 0x0A 632 + #define DRM_PSB_ADB 0x0B 633 + #define DRM_PSB_MODE_OPERATION 0x0C 634 + #define DRM_PSB_STOLEN_MEMORY 0x0D 635 + #define DRM_PSB_REGISTER_RW 0x0E 636 + #define DRM_PSB_GTT_MAP 0x0F 637 + #define DRM_PSB_GTT_UNMAP 0x10 638 + #define DRM_PSB_GETPAGEADDRS 0x11 639 + /** 640 + * NOTE: Add new commands here, but increment 641 + * the values below and increment their 642 + * corresponding defines where they're 643 + * defined elsewhere. 644 + */ 645 + #define DRM_PVR_RESERVED1 0x12 646 + #define DRM_PVR_RESERVED2 0x13 647 + #define DRM_PVR_RESERVED3 0x14 648 + #define DRM_PVR_RESERVED4 0x15 649 + #define DRM_PVR_RESERVED5 0x16 650 + 651 + #define DRM_PSB_HIST_ENABLE 0x17 652 + #define DRM_PSB_HIST_STATUS 0x18 653 + #define DRM_PSB_UPDATE_GUARD 0x19 654 + #define DRM_PSB_INIT_COMM 0x1A 655 + #define DRM_PSB_DPST 0x1B 656 + #define DRM_PSB_GAMMA 0x1C 657 + #define DRM_PSB_DPST_BL 0x1D 658 + 659 + #define DRM_PVR_RESERVED6 0x1E 660 + 661 + #define DRM_PSB_GET_PIPE_FROM_CRTC_ID 0x1F 662 + #define DRM_PSB_DPU_QUERY 0x20 663 + #define DRM_PSB_DPU_DSR_ON 0x21 664 + #define DRM_PSB_DPU_DSR_OFF 0x22 665 + 666 + #define DRM_PSB_DSR_ENABLE 0xfffffffe 667 + #define DRM_PSB_DSR_DISABLE 0xffffffff 668 + 669 + struct psb_drm_dpu_rect { 670 + int x, y; 671 + int width, height; 672 + }; 673 + 674 + struct drm_psb_drv_dsr_off_arg { 675 + int screen; 676 + struct psb_drm_dpu_rect damage_rect; 677 + }; 678 + 679 + 680 + struct drm_psb_dev_info_arg { 681 + uint32_t num_use_attribute_registers; 682 + }; 683 + #define DRM_PSB_DEVINFO 0x01 684 + 685 + #define PSB_MODE_OPERATION_MODE_VALID 0x01 686 + #define PSB_MODE_OPERATION_SET_DC_BASE 0x02 687 + 688 + struct drm_psb_get_pipe_from_crtc_id_arg { 689 + /** ID of CRTC being requested **/ 690 + uint32_t crtc_id; 691 + 692 + /** pipe of requested CRTC **/ 693 + uint32_t pipe; 694 + }; 695 + 696 + #endif
+1677
drivers/staging/gma500/psb_drv.c
··· 1 + /************************************************************************** 2 + * Copyright (c) 2007, Intel Corporation. 3 + * All Rights Reserved. 4 + * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA. 5 + * All Rights Reserved. 6 + * 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms and conditions of the GNU General Public License, 9 + * version 2, as published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope it will be useful, but WITHOUT 12 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 + * more details. 15 + * 16 + * You should have received a copy of the GNU General Public License along with 17 + * this program; if not, write to the Free Software Foundation, Inc., 18 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 19 + * 20 + **************************************************************************/ 21 + 22 + #include <drm/drmP.h> 23 + #include <drm/drm.h> 24 + #include "psb_drm.h" 25 + #include "psb_drv.h" 26 + #include "psb_fb.h" 27 + #include "psb_reg.h" 28 + #include "psb_intel_reg.h" 29 + #include "psb_intel_bios.h" 30 + #include <drm/drm_pciids.h> 31 + #include "psb_powermgmt.h" 32 + #include <linux/cpu.h> 33 + #include <linux/notifier.h> 34 + #include <linux/spinlock.h> 35 + #include <linux/pm_runtime.h> 36 + 37 + int drm_psb_debug; 38 + static int drm_psb_trap_pagefaults; 39 + 40 + int drm_psb_disable_vsync = 1; 41 + int drm_psb_no_fb; 42 + int drm_psb_force_pipeb; 43 + int drm_idle_check_interval = 5; 44 + int gfxrtdelay = 2 * 1000; 45 + 46 + static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 47 + 48 + MODULE_PARM_DESC(debug, "Enable debug output"); 49 + MODULE_PARM_DESC(no_fb, "Disable FBdev"); 50 + MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults"); 51 + MODULE_PARM_DESC(disable_vsync, "Disable vsync interrupts"); 52 + MODULE_PARM_DESC(force_pipeb, "Forces PIPEB to become primary fb"); 53 + MODULE_PARM_DESC(ta_mem_size, "TA memory size in kiB"); 54 + MODULE_PARM_DESC(ospm, "switch for ospm support"); 55 + MODULE_PARM_DESC(rtpm, "Specifies Runtime PM delay for GFX"); 56 + MODULE_PARM_DESC(hdmi_edid, "EDID info for HDMI monitor"); 57 + module_param_named(debug, drm_psb_debug, int, 0600); 58 + module_param_named(no_fb, drm_psb_no_fb, int, 0600); 59 + module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600); 60 + module_param_named(force_pipeb, drm_psb_force_pipeb, int, 0600); 61 + module_param_named(rtpm, gfxrtdelay, int, 0600); 62 + 63 + 64 + static struct pci_device_id pciidlist[] = { 65 + { 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108 }, 66 + { 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109 }, 67 + { 0, 0, 0} 68 + }; 69 + MODULE_DEVICE_TABLE(pci, pciidlist); 70 + 71 + /* 72 + * Standard IOCTLs. 73 + */ 74 + 75 + #define DRM_IOCTL_PSB_KMS_OFF \ 76 + DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE) 77 + #define DRM_IOCTL_PSB_KMS_ON \ 78 + DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE) 79 + #define DRM_IOCTL_PSB_VT_LEAVE \ 80 + DRM_IO(DRM_PSB_VT_LEAVE + DRM_COMMAND_BASE) 81 + #define DRM_IOCTL_PSB_VT_ENTER \ 82 + DRM_IO(DRM_PSB_VT_ENTER + DRM_COMMAND_BASE) 83 + #define DRM_IOCTL_PSB_SIZES \ 84 + DRM_IOR(DRM_PSB_SIZES + DRM_COMMAND_BASE, \ 85 + struct drm_psb_sizes_arg) 86 + #define DRM_IOCTL_PSB_FUSE_REG \ 87 + DRM_IOWR(DRM_PSB_FUSE_REG + DRM_COMMAND_BASE, uint32_t) 88 + #define DRM_IOCTL_PSB_DC_STATE \ 89 + DRM_IOW(DRM_PSB_DC_STATE + DRM_COMMAND_BASE, \ 90 + struct drm_psb_dc_state_arg) 91 + #define DRM_IOCTL_PSB_ADB \ 92 + DRM_IOWR(DRM_PSB_ADB + DRM_COMMAND_BASE, uint32_t) 93 + #define DRM_IOCTL_PSB_MODE_OPERATION \ 94 + DRM_IOWR(DRM_PSB_MODE_OPERATION + DRM_COMMAND_BASE, \ 95 + struct drm_psb_mode_operation_arg) 96 + #define DRM_IOCTL_PSB_STOLEN_MEMORY \ 97 + DRM_IOWR(DRM_PSB_STOLEN_MEMORY + DRM_COMMAND_BASE, \ 98 + struct drm_psb_stolen_memory_arg) 99 + #define DRM_IOCTL_PSB_REGISTER_RW \ 100 + DRM_IOWR(DRM_PSB_REGISTER_RW + DRM_COMMAND_BASE, \ 101 + struct drm_psb_register_rw_arg) 102 + #define DRM_IOCTL_PSB_GTT_MAP \ 103 + DRM_IOWR(DRM_PSB_GTT_MAP + DRM_COMMAND_BASE, \ 104 + struct psb_gtt_mapping_arg) 105 + #define DRM_IOCTL_PSB_GTT_UNMAP \ 106 + DRM_IOW(DRM_PSB_GTT_UNMAP + DRM_COMMAND_BASE, \ 107 + struct psb_gtt_mapping_arg) 108 + #define DRM_IOCTL_PSB_GETPAGEADDRS \ 109 + DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_GETPAGEADDRS,\ 110 + struct drm_psb_getpageaddrs_arg) 111 + #define DRM_IOCTL_PSB_HIST_ENABLE \ 112 + DRM_IOWR(DRM_PSB_HIST_ENABLE + DRM_COMMAND_BASE, \ 113 + uint32_t) 114 + #define DRM_IOCTL_PSB_HIST_STATUS \ 115 + DRM_IOWR(DRM_PSB_HIST_STATUS + DRM_COMMAND_BASE, \ 116 + struct drm_psb_hist_status_arg) 117 + #define DRM_IOCTL_PSB_UPDATE_GUARD \ 118 + DRM_IOWR(DRM_PSB_UPDATE_GUARD + DRM_COMMAND_BASE, \ 119 + uint32_t) 120 + #define DRM_IOCTL_PSB_DPST \ 121 + DRM_IOWR(DRM_PSB_DPST + DRM_COMMAND_BASE, \ 122 + uint32_t) 123 + #define DRM_IOCTL_PSB_GAMMA \ 124 + DRM_IOWR(DRM_PSB_GAMMA + DRM_COMMAND_BASE, \ 125 + struct drm_psb_dpst_lut_arg) 126 + #define DRM_IOCTL_PSB_DPST_BL \ 127 + DRM_IOWR(DRM_PSB_DPST_BL + DRM_COMMAND_BASE, \ 128 + uint32_t) 129 + #define DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID \ 130 + DRM_IOWR(DRM_PSB_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \ 131 + struct drm_psb_get_pipe_from_crtc_id_arg) 132 + 133 + /* 134 + * TTM execbuf extension. 135 + */ 136 + #define DRM_PSB_CMDBUF (DRM_PSB_DPU_DSR_OFF + 1) 137 + 138 + #define DRM_PSB_SCENE_UNREF (DRM_PSB_CMDBUF + 1) 139 + #define DRM_IOCTL_PSB_CMDBUF \ 140 + DRM_IOW(DRM_PSB_CMDBUF + DRM_COMMAND_BASE, \ 141 + struct drm_psb_cmdbuf_arg) 142 + #define DRM_IOCTL_PSB_SCENE_UNREF \ 143 + DRM_IOW(DRM_PSB_SCENE_UNREF + DRM_COMMAND_BASE, \ 144 + struct drm_psb_scene) 145 + #define DRM_IOCTL_PSB_KMS_OFF DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE) 146 + #define DRM_IOCTL_PSB_KMS_ON DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE) 147 + /* 148 + * TTM placement user extension. 149 + */ 150 + 151 + #define DRM_PSB_PLACEMENT_OFFSET (DRM_PSB_SCENE_UNREF + 1) 152 + 153 + #define DRM_PSB_TTM_PL_CREATE (TTM_PL_CREATE + DRM_PSB_PLACEMENT_OFFSET) 154 + #define DRM_PSB_TTM_PL_REFERENCE (TTM_PL_REFERENCE + DRM_PSB_PLACEMENT_OFFSET) 155 + #define DRM_PSB_TTM_PL_UNREF (TTM_PL_UNREF + DRM_PSB_PLACEMENT_OFFSET) 156 + #define DRM_PSB_TTM_PL_SYNCCPU (TTM_PL_SYNCCPU + DRM_PSB_PLACEMENT_OFFSET) 157 + #define DRM_PSB_TTM_PL_WAITIDLE (TTM_PL_WAITIDLE + DRM_PSB_PLACEMENT_OFFSET) 158 + #define DRM_PSB_TTM_PL_SETSTATUS (TTM_PL_SETSTATUS + DRM_PSB_PLACEMENT_OFFSET) 159 + #define DRM_PSB_TTM_PL_CREATE_UB (TTM_PL_CREATE_UB + DRM_PSB_PLACEMENT_OFFSET) 160 + 161 + /* 162 + * TTM fence extension. 163 + */ 164 + 165 + #define DRM_PSB_FENCE_OFFSET (DRM_PSB_TTM_PL_CREATE_UB + 1) 166 + #define DRM_PSB_TTM_FENCE_SIGNALED (TTM_FENCE_SIGNALED + DRM_PSB_FENCE_OFFSET) 167 + #define DRM_PSB_TTM_FENCE_FINISH (TTM_FENCE_FINISH + DRM_PSB_FENCE_OFFSET) 168 + #define DRM_PSB_TTM_FENCE_UNREF (TTM_FENCE_UNREF + DRM_PSB_FENCE_OFFSET) 169 + 170 + #define DRM_PSB_FLIP (DRM_PSB_TTM_FENCE_UNREF + 1) /*20*/ 171 + /* PSB video extension */ 172 + #define DRM_LNC_VIDEO_GETPARAM (DRM_PSB_FLIP + 1) 173 + 174 + #define DRM_IOCTL_PSB_TTM_PL_CREATE \ 175 + DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_CREATE,\ 176 + union ttm_pl_create_arg) 177 + #define DRM_IOCTL_PSB_TTM_PL_REFERENCE \ 178 + DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_REFERENCE,\ 179 + union ttm_pl_reference_arg) 180 + #define DRM_IOCTL_PSB_TTM_PL_UNREF \ 181 + DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_UNREF,\ 182 + struct ttm_pl_reference_req) 183 + #define DRM_IOCTL_PSB_TTM_PL_SYNCCPU \ 184 + DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SYNCCPU,\ 185 + struct ttm_pl_synccpu_arg) 186 + #define DRM_IOCTL_PSB_TTM_PL_WAITIDLE \ 187 + DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_WAITIDLE,\ 188 + struct ttm_pl_waitidle_arg) 189 + #define DRM_IOCTL_PSB_TTM_PL_SETSTATUS \ 190 + DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SETSTATUS,\ 191 + union ttm_pl_setstatus_arg) 192 + #define DRM_IOCTL_PSB_TTM_PL_CREATE_UB \ 193 + DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_CREATE_UB,\ 194 + union ttm_pl_create_ub_arg) 195 + #define DRM_IOCTL_PSB_TTM_FENCE_SIGNALED \ 196 + DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_SIGNALED, \ 197 + union ttm_fence_signaled_arg) 198 + #define DRM_IOCTL_PSB_TTM_FENCE_FINISH \ 199 + DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_FINISH, \ 200 + union ttm_fence_finish_arg) 201 + #define DRM_IOCTL_PSB_TTM_FENCE_UNREF \ 202 + DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_UNREF, \ 203 + struct ttm_fence_unref_arg) 204 + #define DRM_IOCTL_PSB_FLIP \ 205 + DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_FLIP, \ 206 + struct drm_psb_pageflip_arg) 207 + #define DRM_IOCTL_LNC_VIDEO_GETPARAM \ 208 + DRM_IOWR(DRM_COMMAND_BASE + DRM_LNC_VIDEO_GETPARAM, \ 209 + struct drm_lnc_video_getparam_arg) 210 + 211 + static int psb_vt_leave_ioctl(struct drm_device *dev, void *data, 212 + struct drm_file *file_priv); 213 + static int psb_vt_enter_ioctl(struct drm_device *dev, void *data, 214 + struct drm_file *file_priv); 215 + static int psb_sizes_ioctl(struct drm_device *dev, void *data, 216 + struct drm_file *file_priv); 217 + static int psb_dc_state_ioctl(struct drm_device *dev, void * data, 218 + struct drm_file *file_priv); 219 + static int psb_adb_ioctl(struct drm_device *dev, void *data, 220 + struct drm_file *file_priv); 221 + static int psb_mode_operation_ioctl(struct drm_device *dev, void *data, 222 + struct drm_file *file_priv); 223 + static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data, 224 + struct drm_file *file_priv); 225 + static int psb_register_rw_ioctl(struct drm_device *dev, void *data, 226 + struct drm_file *file_priv); 227 + static int psb_dpst_ioctl(struct drm_device *dev, void *data, 228 + struct drm_file *file_priv); 229 + static int psb_gamma_ioctl(struct drm_device *dev, void *data, 230 + struct drm_file *file_priv); 231 + static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data, 232 + struct drm_file *file_priv); 233 + 234 + #define PSB_IOCTL_DEF(ioctl, func, flags) \ 235 + [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func} 236 + 237 + static struct drm_ioctl_desc psb_ioctls[] = { 238 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_OFF, psbfb_kms_off_ioctl, 239 + DRM_ROOT_ONLY), 240 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_ON, 241 + psbfb_kms_on_ioctl, 242 + DRM_ROOT_ONLY), 243 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_LEAVE, psb_vt_leave_ioctl, 244 + DRM_ROOT_ONLY), 245 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_ENTER, 246 + psb_vt_enter_ioctl, 247 + DRM_ROOT_ONLY), 248 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_SIZES, psb_sizes_ioctl, DRM_AUTH), 249 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_DC_STATE, psb_dc_state_ioctl, DRM_AUTH), 250 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_ADB, psb_adb_ioctl, DRM_AUTH), 251 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_MODE_OPERATION, psb_mode_operation_ioctl, 252 + DRM_AUTH), 253 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_STOLEN_MEMORY, psb_stolen_memory_ioctl, 254 + DRM_AUTH), 255 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_REGISTER_RW, psb_register_rw_ioctl, 256 + DRM_AUTH), 257 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_GTT_MAP, 258 + psb_gtt_map_meminfo_ioctl, 259 + DRM_AUTH), 260 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_GTT_UNMAP, 261 + psb_gtt_unmap_meminfo_ioctl, 262 + DRM_AUTH), 263 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_GETPAGEADDRS, 264 + psb_getpageaddrs_ioctl, 265 + DRM_AUTH), 266 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST, psb_dpst_ioctl, DRM_AUTH), 267 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_GAMMA, psb_gamma_ioctl, DRM_AUTH), 268 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH), 269 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID, 270 + psb_intel_get_pipe_from_crtc_id, 0), 271 + /*to be removed later*/ 272 + /*PSB_IOCTL_DEF(DRM_IOCTL_PSB_SCENE_UNREF, drm_psb_scene_unref_ioctl, 273 + DRM_AUTH),*/ 274 + 275 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_CREATE, psb_pl_create_ioctl, 276 + DRM_AUTH), 277 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_REFERENCE, psb_pl_reference_ioctl, 278 + DRM_AUTH), 279 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_UNREF, psb_pl_unref_ioctl, 280 + DRM_AUTH), 281 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SYNCCPU, psb_pl_synccpu_ioctl, 282 + DRM_AUTH), 283 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_WAITIDLE, psb_pl_waitidle_ioctl, 284 + DRM_AUTH), 285 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SETSTATUS, psb_pl_setstatus_ioctl, 286 + DRM_AUTH), 287 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_CREATE_UB, psb_pl_ub_create_ioctl, 288 + DRM_AUTH), 289 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_SIGNALED, 290 + psb_fence_signaled_ioctl, DRM_AUTH), 291 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_FINISH, psb_fence_finish_ioctl, 292 + DRM_AUTH), 293 + PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_UNREF, psb_fence_unref_ioctl, 294 + DRM_AUTH), 295 + }; 296 + 297 + static void psb_set_uopt(struct drm_psb_uopt *uopt) 298 + { 299 + return; 300 + } 301 + 302 + static void psb_lastclose(struct drm_device *dev) 303 + { 304 + struct drm_psb_private *dev_priv = 305 + (struct drm_psb_private *) dev->dev_private; 306 + 307 + return; 308 + 309 + if (!dev->dev_private) 310 + return; 311 + 312 + mutex_lock(&dev_priv->cmdbuf_mutex); 313 + if (dev_priv->context.buffers) { 314 + vfree(dev_priv->context.buffers); 315 + dev_priv->context.buffers = NULL; 316 + } 317 + mutex_unlock(&dev_priv->cmdbuf_mutex); 318 + } 319 + 320 + static void psb_do_takedown(struct drm_device *dev) 321 + { 322 + struct drm_psb_private *dev_priv = 323 + (struct drm_psb_private *) dev->dev_private; 324 + struct ttm_bo_device *bdev = &dev_priv->bdev; 325 + 326 + 327 + if (dev_priv->have_mem_mmu) { 328 + ttm_bo_clean_mm(bdev, DRM_PSB_MEM_MMU); 329 + dev_priv->have_mem_mmu = 0; 330 + } 331 + 332 + if (dev_priv->have_tt) { 333 + ttm_bo_clean_mm(bdev, TTM_PL_TT); 334 + dev_priv->have_tt = 0; 335 + } 336 + 337 + if (dev_priv->have_camera) { 338 + ttm_bo_clean_mm(bdev, TTM_PL_CI); 339 + dev_priv->have_camera = 0; 340 + } 341 + if (dev_priv->have_rar) { 342 + ttm_bo_clean_mm(bdev, TTM_PL_RAR); 343 + dev_priv->have_rar = 0; 344 + } 345 + 346 + } 347 + 348 + static void psb_get_core_freq(struct drm_device *dev) 349 + { 350 + uint32_t clock; 351 + struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); 352 + struct drm_psb_private *dev_priv = dev->dev_private; 353 + 354 + /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/ 355 + /*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/ 356 + 357 + pci_write_config_dword(pci_root, 0xD0, 0xD0050300); 358 + pci_read_config_dword(pci_root, 0xD4, &clock); 359 + pci_dev_put(pci_root); 360 + 361 + switch (clock & 0x07) { 362 + case 0: 363 + dev_priv->core_freq = 100; 364 + break; 365 + case 1: 366 + dev_priv->core_freq = 133; 367 + break; 368 + case 2: 369 + dev_priv->core_freq = 150; 370 + break; 371 + case 3: 372 + dev_priv->core_freq = 178; 373 + break; 374 + case 4: 375 + dev_priv->core_freq = 200; 376 + break; 377 + case 5: 378 + case 6: 379 + case 7: 380 + dev_priv->core_freq = 266; 381 + default: 382 + dev_priv->core_freq = 0; 383 + } 384 + } 385 + 386 + #define FB_REG06 0xD0810600 387 + #define FB_TOPAZ_DISABLE BIT0 388 + #define FB_MIPI_DISABLE BIT11 389 + #define FB_REG09 0xD0810900 390 + #define FB_SKU_MASK (BIT12|BIT13|BIT14) 391 + #define FB_SKU_SHIFT 12 392 + #define FB_SKU_100 0 393 + #define FB_SKU_100L 1 394 + #define FB_SKU_83 2 395 + #if 1 /* FIXME remove it after PO */ 396 + #define FB_GFX_CLK_DIVIDE_MASK (BIT20|BIT21|BIT22) 397 + #define FB_GFX_CLK_DIVIDE_SHIFT 20 398 + #define FB_VED_CLK_DIVIDE_MASK (BIT23|BIT24) 399 + #define FB_VED_CLK_DIVIDE_SHIFT 23 400 + #define FB_VEC_CLK_DIVIDE_MASK (BIT25|BIT26) 401 + #define FB_VEC_CLK_DIVIDE_SHIFT 25 402 + #endif /* FIXME remove it after PO */ 403 + 404 + 405 + bool mid_get_pci_revID(struct drm_psb_private *dev_priv) 406 + { 407 + uint32_t platform_rev_id = 0; 408 + struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0)); 409 + 410 + /*get the revison ID, B0:D2:F0;0x08 */ 411 + pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id); 412 + dev_priv->platform_rev_id = (uint8_t) platform_rev_id; 413 + pci_dev_put(pci_gfx_root); 414 + PSB_DEBUG_ENTRY("platform_rev_id is %x\n", 415 + dev_priv->platform_rev_id); 416 + 417 + return true; 418 + } 419 + 420 + static int psb_do_init(struct drm_device *dev) 421 + { 422 + struct drm_psb_private *dev_priv = 423 + (struct drm_psb_private *) dev->dev_private; 424 + struct ttm_bo_device *bdev = &dev_priv->bdev; 425 + struct psb_gtt *pg = dev_priv->pg; 426 + 427 + uint32_t stolen_gtt; 428 + uint32_t tt_start; 429 + uint32_t tt_pages; 430 + 431 + int ret = -ENOMEM; 432 + 433 + 434 + /* 435 + * Initialize sequence numbers for the different command 436 + * submission mechanisms. 437 + */ 438 + 439 + dev_priv->sequence[PSB_ENGINE_2D] = 0; 440 + dev_priv->sequence[PSB_ENGINE_VIDEO] = 0; 441 + dev_priv->sequence[LNC_ENGINE_ENCODE] = 0; 442 + 443 + if (pg->mmu_gatt_start & 0x0FFFFFFF) { 444 + DRM_ERROR("Gatt must be 256M aligned. This is a bug.\n"); 445 + ret = -EINVAL; 446 + goto out_err; 447 + } 448 + 449 + stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4; 450 + stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT; 451 + stolen_gtt = 452 + (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages; 453 + 454 + dev_priv->gatt_free_offset = pg->mmu_gatt_start + 455 + (stolen_gtt << PAGE_SHIFT) * 1024; 456 + 457 + if (1 || drm_debug) { 458 + uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID); 459 + uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION); 460 + DRM_INFO("SGX core id = 0x%08x\n", core_id); 461 + DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n", 462 + (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >> 463 + _PSB_CC_REVISION_MAJOR_SHIFT, 464 + (core_rev & _PSB_CC_REVISION_MINOR_MASK) >> 465 + _PSB_CC_REVISION_MINOR_SHIFT); 466 + DRM_INFO 467 + ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n", 468 + (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >> 469 + _PSB_CC_REVISION_MAINTENANCE_SHIFT, 470 + (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >> 471 + _PSB_CC_REVISION_DESIGNER_SHIFT); 472 + } 473 + 474 + spin_lock_init(&dev_priv->irqmask_lock); 475 + 476 + tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ? 477 + pg->gatt_pages : PSB_TT_PRIV0_PLIMIT; 478 + tt_start = dev_priv->gatt_free_offset - pg->mmu_gatt_start; 479 + tt_pages -= tt_start >> PAGE_SHIFT; 480 + dev_priv->sizes.ta_mem_size = 0; 481 + 482 + 483 + /* TT region managed by TTM. */ 484 + if (!ttm_bo_init_mm(bdev, TTM_PL_TT, 485 + pg->gatt_pages - 486 + (pg->ci_start >> PAGE_SHIFT) - 487 + ((dev_priv->ci_region_size + dev_priv->rar_region_size) 488 + >> PAGE_SHIFT))) { 489 + 490 + dev_priv->have_tt = 1; 491 + dev_priv->sizes.tt_size = 492 + (tt_pages << PAGE_SHIFT) / (1024 * 1024) / 2; 493 + } 494 + 495 + if (!ttm_bo_init_mm(bdev, 496 + DRM_PSB_MEM_MMU, 497 + PSB_MEM_TT_START >> PAGE_SHIFT)) { 498 + dev_priv->have_mem_mmu = 1; 499 + dev_priv->sizes.mmu_size = 500 + PSB_MEM_TT_START / (1024*1024); 501 + } 502 + 503 + 504 + PSB_DEBUG_INIT("Init MSVDX\n"); 505 + return 0; 506 + out_err: 507 + psb_do_takedown(dev); 508 + return ret; 509 + } 510 + 511 + static int psb_driver_unload(struct drm_device *dev) 512 + { 513 + struct drm_psb_private *dev_priv = 514 + (struct drm_psb_private *) dev->dev_private; 515 + 516 + /* Kill vblank etc here */ 517 + 518 + psb_backlight_exit(); /*writes minimum value to backlight HW reg */ 519 + 520 + if (drm_psb_no_fb == 0) 521 + psb_modeset_cleanup(dev); 522 + 523 + if (dev_priv) { 524 + psb_lid_timer_takedown(dev_priv); 525 + 526 + psb_do_takedown(dev); 527 + 528 + 529 + if (dev_priv->pf_pd) { 530 + psb_mmu_free_pagedir(dev_priv->pf_pd); 531 + dev_priv->pf_pd = NULL; 532 + } 533 + if (dev_priv->mmu) { 534 + struct psb_gtt *pg = dev_priv->pg; 535 + 536 + down_read(&pg->sem); 537 + psb_mmu_remove_pfn_sequence( 538 + psb_mmu_get_default_pd 539 + (dev_priv->mmu), 540 + pg->mmu_gatt_start, 541 + pg->vram_stolen_size >> PAGE_SHIFT); 542 + if (pg->ci_stolen_size != 0) 543 + psb_mmu_remove_pfn_sequence( 544 + psb_mmu_get_default_pd 545 + (dev_priv->mmu), 546 + pg->ci_start, 547 + pg->ci_stolen_size >> PAGE_SHIFT); 548 + if (pg->rar_stolen_size != 0) 549 + psb_mmu_remove_pfn_sequence( 550 + psb_mmu_get_default_pd 551 + (dev_priv->mmu), 552 + pg->rar_start, 553 + pg->rar_stolen_size >> PAGE_SHIFT); 554 + up_read(&pg->sem); 555 + psb_mmu_driver_takedown(dev_priv->mmu); 556 + dev_priv->mmu = NULL; 557 + } 558 + psb_gtt_takedown(dev_priv->pg, 1); 559 + if (dev_priv->scratch_page) { 560 + __free_page(dev_priv->scratch_page); 561 + dev_priv->scratch_page = NULL; 562 + } 563 + if (dev_priv->has_bo_device) { 564 + ttm_bo_device_release(&dev_priv->bdev); 565 + dev_priv->has_bo_device = 0; 566 + } 567 + if (dev_priv->has_fence_device) { 568 + ttm_fence_device_release(&dev_priv->fdev); 569 + dev_priv->has_fence_device = 0; 570 + } 571 + if (dev_priv->vdc_reg) { 572 + iounmap(dev_priv->vdc_reg); 573 + dev_priv->vdc_reg = NULL; 574 + } 575 + if (dev_priv->sgx_reg) { 576 + iounmap(dev_priv->sgx_reg); 577 + dev_priv->sgx_reg = NULL; 578 + } 579 + 580 + if (dev_priv->tdev) 581 + ttm_object_device_release(&dev_priv->tdev); 582 + 583 + if (dev_priv->has_global) 584 + psb_ttm_global_release(dev_priv); 585 + 586 + kfree(dev_priv); 587 + dev->dev_private = NULL; 588 + 589 + /*destory VBT data*/ 590 + psb_intel_destory_bios(dev); 591 + } 592 + 593 + ospm_power_uninit(); 594 + 595 + return 0; 596 + } 597 + 598 + 599 + static int psb_driver_load(struct drm_device *dev, unsigned long chipset) 600 + { 601 + struct drm_psb_private *dev_priv; 602 + struct ttm_bo_device *bdev; 603 + unsigned long resource_start; 604 + struct psb_gtt *pg; 605 + unsigned long irqflags; 606 + int ret = -ENOMEM; 607 + uint32_t tt_pages; 608 + 609 + DRM_INFO("psb - %s\n", PSB_PACKAGE_VERSION); 610 + 611 + DRM_INFO("Run drivers on Poulsbo platform!\n"); 612 + 613 + dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 614 + if (dev_priv == NULL) 615 + return -ENOMEM; 616 + INIT_LIST_HEAD(&dev_priv->video_ctx); 617 + 618 + dev_priv->num_pipe = 2; 619 + 620 + 621 + dev_priv->dev = dev; 622 + bdev = &dev_priv->bdev; 623 + 624 + ret = psb_ttm_global_init(dev_priv); 625 + if (unlikely(ret != 0)) 626 + goto out_err; 627 + dev_priv->has_global = 1; 628 + 629 + dev_priv->tdev = ttm_object_device_init 630 + (dev_priv->mem_global_ref.object, PSB_OBJECT_HASH_ORDER); 631 + if (unlikely(dev_priv->tdev == NULL)) 632 + goto out_err; 633 + 634 + mutex_init(&dev_priv->temp_mem); 635 + mutex_init(&dev_priv->cmdbuf_mutex); 636 + mutex_init(&dev_priv->reset_mutex); 637 + INIT_LIST_HEAD(&dev_priv->context.validate_list); 638 + INIT_LIST_HEAD(&dev_priv->context.kern_validate_list); 639 + 640 + /* mutex_init(&dev_priv->dsr_mutex); */ 641 + 642 + spin_lock_init(&dev_priv->reloc_lock); 643 + 644 + DRM_INIT_WAITQUEUE(&dev_priv->rel_mapped_queue); 645 + 646 + dev->dev_private = (void *) dev_priv; 647 + dev_priv->chipset = chipset; 648 + psb_set_uopt(&dev_priv->uopt); 649 + 650 + PSB_DEBUG_INIT("Mapping MMIO\n"); 651 + resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE); 652 + 653 + dev_priv->vdc_reg = 654 + ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE); 655 + if (!dev_priv->vdc_reg) 656 + goto out_err; 657 + 658 + dev_priv->sgx_reg = ioremap(resource_start + PSB_SGX_OFFSET, 659 + PSB_SGX_SIZE); 660 + 661 + if (!dev_priv->sgx_reg) 662 + goto out_err; 663 + 664 + psb_get_core_freq(dev); 665 + psb_intel_opregion_init(dev); 666 + psb_intel_init_bios(dev); 667 + 668 + PSB_DEBUG_INIT("Init TTM fence and BO driver\n"); 669 + 670 + /* Init OSPM support */ 671 + ospm_power_init(dev); 672 + 673 + ret = psb_ttm_fence_device_init(&dev_priv->fdev); 674 + if (unlikely(ret != 0)) 675 + goto out_err; 676 + 677 + dev_priv->has_fence_device = 1; 678 + ret = ttm_bo_device_init(bdev, 679 + dev_priv->bo_global_ref.ref.object, 680 + &psb_ttm_bo_driver, 681 + DRM_PSB_FILE_PAGE_OFFSET, false); 682 + if (unlikely(ret != 0)) 683 + goto out_err; 684 + dev_priv->has_bo_device = 1; 685 + ttm_lock_init(&dev_priv->ttm_lock); 686 + 687 + ret = -ENOMEM; 688 + 689 + dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO); 690 + if (!dev_priv->scratch_page) 691 + goto out_err; 692 + 693 + set_pages_uc(dev_priv->scratch_page, 1); 694 + 695 + dev_priv->pg = psb_gtt_alloc(dev); 696 + if (!dev_priv->pg) 697 + goto out_err; 698 + 699 + ret = psb_gtt_init(dev_priv->pg, 0); 700 + if (ret) 701 + goto out_err; 702 + 703 + ret = psb_gtt_mm_init(dev_priv->pg); 704 + if (ret) 705 + goto out_err; 706 + 707 + dev_priv->mmu = psb_mmu_driver_init((void *)0, 708 + drm_psb_trap_pagefaults, 0, 709 + dev_priv); 710 + if (!dev_priv->mmu) 711 + goto out_err; 712 + 713 + pg = dev_priv->pg; 714 + 715 + tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ? 716 + (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT; 717 + 718 + /* CI/RAR use the lower half of TT. */ 719 + pg->ci_start = (tt_pages / 2) << PAGE_SHIFT; 720 + pg->rar_start = pg->ci_start + pg->ci_stolen_size; 721 + 722 + 723 + /* 724 + * Make MSVDX/TOPAZ MMU aware of the CI stolen memory area. 725 + */ 726 + if (dev_priv->pg->ci_stolen_size != 0) { 727 + down_read(&pg->sem); 728 + ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd 729 + (dev_priv->mmu), 730 + dev_priv->ci_region_start >> PAGE_SHIFT, 731 + pg->mmu_gatt_start + pg->ci_start, 732 + pg->ci_stolen_size >> PAGE_SHIFT, 0); 733 + up_read(&pg->sem); 734 + if (ret) 735 + goto out_err; 736 + } 737 + 738 + /* 739 + * Make MSVDX/TOPAZ MMU aware of the rar stolen memory area. 740 + */ 741 + if (dev_priv->pg->rar_stolen_size != 0) { 742 + down_read(&pg->sem); 743 + ret = psb_mmu_insert_pfn_sequence( 744 + psb_mmu_get_default_pd(dev_priv->mmu), 745 + dev_priv->rar_region_start >> PAGE_SHIFT, 746 + pg->mmu_gatt_start + pg->rar_start, 747 + pg->rar_stolen_size >> PAGE_SHIFT, 0); 748 + up_read(&pg->sem); 749 + if (ret) 750 + goto out_err; 751 + } 752 + 753 + dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0); 754 + if (!dev_priv->pf_pd) 755 + goto out_err; 756 + 757 + psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0); 758 + psb_mmu_set_pd_context(dev_priv->pf_pd, 1); 759 + 760 + spin_lock_init(&dev_priv->sequence_lock); 761 + 762 + PSB_DEBUG_INIT("Begin to init MSVDX/Topaz\n"); 763 + 764 + ret = psb_do_init(dev); 765 + if (ret) 766 + return ret; 767 + 768 + /** 769 + * Init lid switch timer. 770 + * NOTE: must do this after psb_intel_opregion_init 771 + * and psb_backlight_init 772 + */ 773 + if (dev_priv->lid_state) 774 + psb_lid_timer_init(dev_priv); 775 + 776 + ret = drm_vblank_init(dev, dev_priv->num_pipe); 777 + if (ret) 778 + goto out_err; 779 + 780 + /* 781 + * Install interrupt handlers prior to powering off SGX or else we will 782 + * crash. 783 + */ 784 + dev_priv->vdc_irq_mask = 0; 785 + dev_priv->pipestat[0] = 0; 786 + dev_priv->pipestat[1] = 0; 787 + dev_priv->pipestat[2] = 0; 788 + spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); 789 + PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R); 790 + PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R); 791 + spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 792 + if (drm_core_check_feature(dev, DRIVER_MODESET)) 793 + drm_irq_install(dev); 794 + 795 + dev->vblank_disable_allowed = 1; 796 + 797 + dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 798 + 799 + dev->driver->get_vblank_counter = psb_get_vblank_counter; 800 + 801 + if (drm_psb_no_fb == 0) { 802 + psb_modeset_init(dev); 803 + psb_fbdev_init(dev); 804 + drm_kms_helper_poll_init(dev); 805 + } 806 + 807 + ret = psb_backlight_init(dev); 808 + if (ret) 809 + return ret; 810 + #if 0 811 + /*enable runtime pm at last*/ 812 + pm_runtime_enable(&dev->pdev->dev); 813 + pm_runtime_set_active(&dev->pdev->dev); 814 + #endif 815 + /*Intel drm driver load is done, continue doing pvr load*/ 816 + DRM_DEBUG("Pvr driver load\n"); 817 + 818 + /* if (PVRCore_Init() < 0) 819 + goto out_err; */ 820 + /* if (MRSTLFBInit(dev) < 0) 821 + goto out_err;*/ 822 + return 0; 823 + out_err: 824 + psb_driver_unload(dev); 825 + return ret; 826 + } 827 + 828 + int psb_driver_device_is_agp(struct drm_device *dev) 829 + { 830 + return 0; 831 + } 832 + 833 + 834 + static int psb_vt_leave_ioctl(struct drm_device *dev, void *data, 835 + struct drm_file *file_priv) 836 + { 837 + struct drm_psb_private *dev_priv = psb_priv(dev); 838 + struct ttm_bo_device *bdev = &dev_priv->bdev; 839 + struct ttm_mem_type_manager *man; 840 + int ret; 841 + 842 + ret = ttm_vt_lock(&dev_priv->ttm_lock, 1, 843 + psb_fpriv(file_priv)->tfile); 844 + if (unlikely(ret != 0)) 845 + return ret; 846 + 847 + ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_TT); 848 + if (unlikely(ret != 0)) 849 + goto out_unlock; 850 + 851 + man = &bdev->man[TTM_PL_TT]; 852 + 853 + #if 0 /* What to do with this ? */ 854 + if (unlikely(!drm_mm_clean(&man->manager))) 855 + DRM_INFO("Warning: GATT was not clean after VT switch.\n"); 856 + #endif 857 + 858 + ttm_bo_swapout_all(&dev_priv->bdev); 859 + 860 + return 0; 861 + out_unlock: 862 + (void) ttm_vt_unlock(&dev_priv->ttm_lock); 863 + return ret; 864 + } 865 + 866 + static int psb_vt_enter_ioctl(struct drm_device *dev, void *data, 867 + struct drm_file *file_priv) 868 + { 869 + struct drm_psb_private *dev_priv = psb_priv(dev); 870 + return ttm_vt_unlock(&dev_priv->ttm_lock); 871 + } 872 + 873 + static int psb_sizes_ioctl(struct drm_device *dev, void *data, 874 + struct drm_file *file_priv) 875 + { 876 + struct drm_psb_private *dev_priv = psb_priv(dev); 877 + struct drm_psb_sizes_arg *arg = 878 + (struct drm_psb_sizes_arg *) data; 879 + 880 + *arg = dev_priv->sizes; 881 + return 0; 882 + } 883 + 884 + static int psb_dc_state_ioctl(struct drm_device *dev, void * data, 885 + struct drm_file *file_priv) 886 + { 887 + uint32_t flags; 888 + uint32_t obj_id; 889 + struct drm_mode_object *obj; 890 + struct drm_connector *connector; 891 + struct drm_crtc *crtc; 892 + struct drm_psb_dc_state_arg *arg = 893 + (struct drm_psb_dc_state_arg *)data; 894 + 895 + flags = arg->flags; 896 + obj_id = arg->obj_id; 897 + 898 + if (flags & PSB_DC_CRTC_MASK) { 899 + obj = drm_mode_object_find(dev, obj_id, 900 + DRM_MODE_OBJECT_CRTC); 901 + if (!obj) { 902 + DRM_DEBUG("Invalid CRTC object.\n"); 903 + return -EINVAL; 904 + } 905 + 906 + crtc = obj_to_crtc(obj); 907 + 908 + mutex_lock(&dev->mode_config.mutex); 909 + if (drm_helper_crtc_in_use(crtc)) { 910 + if (flags & PSB_DC_CRTC_SAVE) 911 + crtc->funcs->save(crtc); 912 + else 913 + crtc->funcs->restore(crtc); 914 + } 915 + mutex_unlock(&dev->mode_config.mutex); 916 + 917 + return 0; 918 + } else if (flags & PSB_DC_OUTPUT_MASK) { 919 + obj = drm_mode_object_find(dev, obj_id, 920 + DRM_MODE_OBJECT_CONNECTOR); 921 + if (!obj) { 922 + DRM_DEBUG("Invalid connector id.\n"); 923 + return -EINVAL; 924 + } 925 + 926 + connector = obj_to_connector(obj); 927 + if (flags & PSB_DC_OUTPUT_SAVE) 928 + connector->funcs->save(connector); 929 + else 930 + connector->funcs->restore(connector); 931 + 932 + return 0; 933 + } 934 + 935 + DRM_DEBUG("Bad flags 0x%x\n", flags); 936 + return -EINVAL; 937 + } 938 + 939 + static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data, 940 + struct drm_file *file_priv) 941 + { 942 + struct drm_psb_private *dev_priv = psb_priv(dev); 943 + uint32_t *arg = data; 944 + struct backlight_device bd; 945 + dev_priv->blc_adj2 = *arg; 946 + 947 + #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 948 + bd.props.brightness = psb_get_brightness(&bd); 949 + psb_set_brightness(&bd); 950 + #endif 951 + return 0; 952 + } 953 + 954 + static int psb_adb_ioctl(struct drm_device *dev, void *data, 955 + struct drm_file *file_priv) 956 + { 957 + struct drm_psb_private *dev_priv = psb_priv(dev); 958 + uint32_t *arg = data; 959 + struct backlight_device bd; 960 + dev_priv->blc_adj1 = *arg; 961 + 962 + #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 963 + bd.props.brightness = psb_get_brightness(&bd); 964 + psb_set_brightness(&bd); 965 + #endif 966 + return 0; 967 + } 968 + 969 + /* return the current mode to the dpst module */ 970 + static int psb_dpst_ioctl(struct drm_device *dev, void *data, 971 + struct drm_file *file_priv) 972 + { 973 + struct drm_psb_private *dev_priv = psb_priv(dev); 974 + uint32_t *arg = data; 975 + uint32_t x; 976 + uint32_t y; 977 + uint32_t reg; 978 + 979 + if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, 980 + OSPM_UHB_ONLY_IF_ON)) 981 + return 0; 982 + 983 + reg = PSB_RVDC32(PIPEASRC); 984 + 985 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 986 + 987 + /* horizontal is the left 16 bits */ 988 + x = reg >> 16; 989 + /* vertical is the right 16 bits */ 990 + y = reg & 0x0000ffff; 991 + 992 + /* the values are the image size minus one */ 993 + x++; 994 + y++; 995 + 996 + *arg = (x << 16) | y; 997 + 998 + return 0; 999 + } 1000 + static int psb_gamma_ioctl(struct drm_device *dev, void *data, 1001 + struct drm_file *file_priv) 1002 + { 1003 + struct drm_psb_dpst_lut_arg *lut_arg = data; 1004 + struct drm_mode_object *obj; 1005 + struct drm_crtc *crtc; 1006 + struct drm_connector *connector; 1007 + struct psb_intel_crtc *psb_intel_crtc; 1008 + int i = 0; 1009 + int32_t obj_id; 1010 + 1011 + obj_id = lut_arg->output_id; 1012 + obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR); 1013 + if (!obj) { 1014 + DRM_DEBUG("Invalid Connector object.\n"); 1015 + return -EINVAL; 1016 + } 1017 + 1018 + connector = obj_to_connector(obj); 1019 + crtc = connector->encoder->crtc; 1020 + psb_intel_crtc = to_psb_intel_crtc(crtc); 1021 + 1022 + for (i = 0; i < 256; i++) 1023 + psb_intel_crtc->lut_adj[i] = lut_arg->lut[i]; 1024 + 1025 + psb_intel_crtc_load_lut(crtc); 1026 + 1027 + return 0; 1028 + } 1029 + 1030 + static int psb_mode_operation_ioctl(struct drm_device *dev, void *data, 1031 + struct drm_file *file_priv) 1032 + { 1033 + uint32_t obj_id; 1034 + uint16_t op; 1035 + struct drm_mode_modeinfo *umode; 1036 + struct drm_display_mode *mode = NULL; 1037 + struct drm_psb_mode_operation_arg *arg; 1038 + struct drm_mode_object *obj; 1039 + struct drm_connector *connector; 1040 + struct drm_framebuffer *drm_fb; 1041 + struct psb_framebuffer *psb_fb; 1042 + struct drm_connector_helper_funcs *connector_funcs; 1043 + int ret = 0; 1044 + int resp = MODE_OK; 1045 + struct drm_psb_private *dev_priv = psb_priv(dev); 1046 + 1047 + arg = (struct drm_psb_mode_operation_arg *)data; 1048 + obj_id = arg->obj_id; 1049 + op = arg->operation; 1050 + 1051 + switch (op) { 1052 + case PSB_MODE_OPERATION_SET_DC_BASE: 1053 + obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_FB); 1054 + if (!obj) { 1055 + DRM_ERROR("Invalid FB id %d\n", obj_id); 1056 + return -EINVAL; 1057 + } 1058 + 1059 + drm_fb = obj_to_fb(obj); 1060 + psb_fb = to_psb_fb(drm_fb); 1061 + 1062 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, 1063 + OSPM_UHB_ONLY_IF_ON)) { 1064 + REG_WRITE(DSPASURF, psb_fb->offset); 1065 + REG_READ(DSPASURF); 1066 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 1067 + } else { 1068 + dev_priv->saveDSPASURF = psb_fb->offset; 1069 + } 1070 + 1071 + return 0; 1072 + case PSB_MODE_OPERATION_MODE_VALID: 1073 + umode = &arg->mode; 1074 + 1075 + mutex_lock(&dev->mode_config.mutex); 1076 + 1077 + obj = drm_mode_object_find(dev, obj_id, 1078 + DRM_MODE_OBJECT_CONNECTOR); 1079 + if (!obj) { 1080 + ret = -EINVAL; 1081 + goto mode_op_out; 1082 + } 1083 + 1084 + connector = obj_to_connector(obj); 1085 + 1086 + mode = drm_mode_create(dev); 1087 + if (!mode) { 1088 + ret = -ENOMEM; 1089 + goto mode_op_out; 1090 + } 1091 + 1092 + /* drm_crtc_convert_umode(mode, umode); */ 1093 + { 1094 + mode->clock = umode->clock; 1095 + mode->hdisplay = umode->hdisplay; 1096 + mode->hsync_start = umode->hsync_start; 1097 + mode->hsync_end = umode->hsync_end; 1098 + mode->htotal = umode->htotal; 1099 + mode->hskew = umode->hskew; 1100 + mode->vdisplay = umode->vdisplay; 1101 + mode->vsync_start = umode->vsync_start; 1102 + mode->vsync_end = umode->vsync_end; 1103 + mode->vtotal = umode->vtotal; 1104 + mode->vscan = umode->vscan; 1105 + mode->vrefresh = umode->vrefresh; 1106 + mode->flags = umode->flags; 1107 + mode->type = umode->type; 1108 + strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN); 1109 + mode->name[DRM_DISPLAY_MODE_LEN-1] = 0; 1110 + } 1111 + 1112 + connector_funcs = (struct drm_connector_helper_funcs *) 1113 + connector->helper_private; 1114 + 1115 + if (connector_funcs->mode_valid) { 1116 + resp = connector_funcs->mode_valid(connector, mode); 1117 + arg->data = (void *)resp; 1118 + } 1119 + 1120 + /*do some clean up work*/ 1121 + if (mode) 1122 + drm_mode_destroy(dev, mode); 1123 + mode_op_out: 1124 + mutex_unlock(&dev->mode_config.mutex); 1125 + return ret; 1126 + 1127 + default: 1128 + DRM_DEBUG("Unsupported psb mode operation"); 1129 + return -EOPNOTSUPP; 1130 + } 1131 + 1132 + return 0; 1133 + } 1134 + 1135 + static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data, 1136 + struct drm_file *file_priv) 1137 + { 1138 + struct drm_psb_private *dev_priv = psb_priv(dev); 1139 + struct drm_psb_stolen_memory_arg *arg = data; 1140 + 1141 + arg->base = dev_priv->pg->stolen_base; 1142 + arg->size = dev_priv->pg->vram_stolen_size; 1143 + 1144 + return 0; 1145 + } 1146 + 1147 + static int psb_register_rw_ioctl(struct drm_device *dev, void *data, 1148 + struct drm_file *file_priv) 1149 + { 1150 + struct drm_psb_private *dev_priv = psb_priv(dev); 1151 + struct drm_psb_register_rw_arg *arg = data; 1152 + UHBUsage usage = 1153 + arg->b_force_hw_on ? OSPM_UHB_FORCE_POWER_ON : OSPM_UHB_ONLY_IF_ON; 1154 + 1155 + if (arg->display_write_mask != 0) { 1156 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) { 1157 + if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS) 1158 + PSB_WVDC32(arg->display.pfit_controls, 1159 + PFIT_CONTROL); 1160 + if (arg->display_write_mask & 1161 + REGRWBITS_PFIT_AUTOSCALE_RATIOS) 1162 + PSB_WVDC32(arg->display.pfit_autoscale_ratios, 1163 + PFIT_AUTO_RATIOS); 1164 + if (arg->display_write_mask & 1165 + REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS) 1166 + PSB_WVDC32( 1167 + arg->display.pfit_programmed_scale_ratios, 1168 + PFIT_PGM_RATIOS); 1169 + if (arg->display_write_mask & REGRWBITS_PIPEASRC) 1170 + PSB_WVDC32(arg->display.pipeasrc, 1171 + PIPEASRC); 1172 + if (arg->display_write_mask & REGRWBITS_PIPEBSRC) 1173 + PSB_WVDC32(arg->display.pipebsrc, 1174 + PIPEBSRC); 1175 + if (arg->display_write_mask & REGRWBITS_VTOTAL_A) 1176 + PSB_WVDC32(arg->display.vtotal_a, 1177 + VTOTAL_A); 1178 + if (arg->display_write_mask & REGRWBITS_VTOTAL_B) 1179 + PSB_WVDC32(arg->display.vtotal_b, 1180 + VTOTAL_B); 1181 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 1182 + } else { 1183 + if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS) 1184 + dev_priv->savePFIT_CONTROL = 1185 + arg->display.pfit_controls; 1186 + if (arg->display_write_mask & 1187 + REGRWBITS_PFIT_AUTOSCALE_RATIOS) 1188 + dev_priv->savePFIT_AUTO_RATIOS = 1189 + arg->display.pfit_autoscale_ratios; 1190 + if (arg->display_write_mask & 1191 + REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS) 1192 + dev_priv->savePFIT_PGM_RATIOS = 1193 + arg->display.pfit_programmed_scale_ratios; 1194 + if (arg->display_write_mask & REGRWBITS_PIPEASRC) 1195 + dev_priv->savePIPEASRC = arg->display.pipeasrc; 1196 + if (arg->display_write_mask & REGRWBITS_PIPEBSRC) 1197 + dev_priv->savePIPEBSRC = arg->display.pipebsrc; 1198 + if (arg->display_write_mask & REGRWBITS_VTOTAL_A) 1199 + dev_priv->saveVTOTAL_A = arg->display.vtotal_a; 1200 + if (arg->display_write_mask & REGRWBITS_VTOTAL_B) 1201 + dev_priv->saveVTOTAL_B = arg->display.vtotal_b; 1202 + } 1203 + } 1204 + 1205 + if (arg->display_read_mask != 0) { 1206 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) { 1207 + if (arg->display_read_mask & 1208 + REGRWBITS_PFIT_CONTROLS) 1209 + arg->display.pfit_controls = 1210 + PSB_RVDC32(PFIT_CONTROL); 1211 + if (arg->display_read_mask & 1212 + REGRWBITS_PFIT_AUTOSCALE_RATIOS) 1213 + arg->display.pfit_autoscale_ratios = 1214 + PSB_RVDC32(PFIT_AUTO_RATIOS); 1215 + if (arg->display_read_mask & 1216 + REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS) 1217 + arg->display.pfit_programmed_scale_ratios = 1218 + PSB_RVDC32(PFIT_PGM_RATIOS); 1219 + if (arg->display_read_mask & REGRWBITS_PIPEASRC) 1220 + arg->display.pipeasrc = PSB_RVDC32(PIPEASRC); 1221 + if (arg->display_read_mask & REGRWBITS_PIPEBSRC) 1222 + arg->display.pipebsrc = PSB_RVDC32(PIPEBSRC); 1223 + if (arg->display_read_mask & REGRWBITS_VTOTAL_A) 1224 + arg->display.vtotal_a = PSB_RVDC32(VTOTAL_A); 1225 + if (arg->display_read_mask & REGRWBITS_VTOTAL_B) 1226 + arg->display.vtotal_b = PSB_RVDC32(VTOTAL_B); 1227 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 1228 + } else { 1229 + if (arg->display_read_mask & 1230 + REGRWBITS_PFIT_CONTROLS) 1231 + arg->display.pfit_controls = 1232 + dev_priv->savePFIT_CONTROL; 1233 + if (arg->display_read_mask & 1234 + REGRWBITS_PFIT_AUTOSCALE_RATIOS) 1235 + arg->display.pfit_autoscale_ratios = 1236 + dev_priv->savePFIT_AUTO_RATIOS; 1237 + if (arg->display_read_mask & 1238 + REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS) 1239 + arg->display.pfit_programmed_scale_ratios = 1240 + dev_priv->savePFIT_PGM_RATIOS; 1241 + if (arg->display_read_mask & REGRWBITS_PIPEASRC) 1242 + arg->display.pipeasrc = dev_priv->savePIPEASRC; 1243 + if (arg->display_read_mask & REGRWBITS_PIPEBSRC) 1244 + arg->display.pipebsrc = dev_priv->savePIPEBSRC; 1245 + if (arg->display_read_mask & REGRWBITS_VTOTAL_A) 1246 + arg->display.vtotal_a = dev_priv->saveVTOTAL_A; 1247 + if (arg->display_read_mask & REGRWBITS_VTOTAL_B) 1248 + arg->display.vtotal_b = dev_priv->saveVTOTAL_B; 1249 + } 1250 + } 1251 + 1252 + if (arg->overlay_write_mask != 0) { 1253 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) { 1254 + if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) { 1255 + PSB_WVDC32(arg->overlay.OGAMC5, OV_OGAMC5); 1256 + PSB_WVDC32(arg->overlay.OGAMC4, OV_OGAMC4); 1257 + PSB_WVDC32(arg->overlay.OGAMC3, OV_OGAMC3); 1258 + PSB_WVDC32(arg->overlay.OGAMC2, OV_OGAMC2); 1259 + PSB_WVDC32(arg->overlay.OGAMC1, OV_OGAMC1); 1260 + PSB_WVDC32(arg->overlay.OGAMC0, OV_OGAMC0); 1261 + } 1262 + if (arg->overlay_write_mask & OVC_REGRWBITS_OGAM_ALL) { 1263 + PSB_WVDC32(arg->overlay.OGAMC5, OVC_OGAMC5); 1264 + PSB_WVDC32(arg->overlay.OGAMC4, OVC_OGAMC4); 1265 + PSB_WVDC32(arg->overlay.OGAMC3, OVC_OGAMC3); 1266 + PSB_WVDC32(arg->overlay.OGAMC2, OVC_OGAMC2); 1267 + PSB_WVDC32(arg->overlay.OGAMC1, OVC_OGAMC1); 1268 + PSB_WVDC32(arg->overlay.OGAMC0, OVC_OGAMC0); 1269 + } 1270 + 1271 + if (arg->overlay_write_mask & OV_REGRWBITS_OVADD) { 1272 + PSB_WVDC32(arg->overlay.OVADD, OV_OVADD); 1273 + 1274 + if (arg->overlay.b_wait_vblank) { 1275 + /* Wait for 20ms.*/ 1276 + unsigned long vblank_timeout = jiffies 1277 + + HZ/50; 1278 + uint32_t temp; 1279 + while (time_before_eq(jiffies, 1280 + vblank_timeout)) { 1281 + temp = PSB_RVDC32(OV_DOVASTA); 1282 + if ((temp & (0x1 << 31)) != 0) 1283 + break; 1284 + cpu_relax(); 1285 + } 1286 + } 1287 + } 1288 + if (arg->overlay_write_mask & OVC_REGRWBITS_OVADD) { 1289 + PSB_WVDC32(arg->overlay.OVADD, OVC_OVADD); 1290 + if (arg->overlay.b_wait_vblank) { 1291 + /* Wait for 20ms.*/ 1292 + unsigned long vblank_timeout = 1293 + jiffies + HZ/50; 1294 + uint32_t temp; 1295 + while (time_before_eq(jiffies, 1296 + vblank_timeout)) { 1297 + temp = PSB_RVDC32(OVC_DOVCSTA); 1298 + if ((temp & (0x1 << 31)) != 0) 1299 + break; 1300 + cpu_relax(); 1301 + } 1302 + } 1303 + } 1304 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 1305 + } else { 1306 + if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) { 1307 + dev_priv->saveOV_OGAMC5 = arg->overlay.OGAMC5; 1308 + dev_priv->saveOV_OGAMC4 = arg->overlay.OGAMC4; 1309 + dev_priv->saveOV_OGAMC3 = arg->overlay.OGAMC3; 1310 + dev_priv->saveOV_OGAMC2 = arg->overlay.OGAMC2; 1311 + dev_priv->saveOV_OGAMC1 = arg->overlay.OGAMC1; 1312 + dev_priv->saveOV_OGAMC0 = arg->overlay.OGAMC0; 1313 + } 1314 + if (arg->overlay_write_mask & OVC_REGRWBITS_OGAM_ALL) { 1315 + dev_priv->saveOVC_OGAMC5 = arg->overlay.OGAMC5; 1316 + dev_priv->saveOVC_OGAMC4 = arg->overlay.OGAMC4; 1317 + dev_priv->saveOVC_OGAMC3 = arg->overlay.OGAMC3; 1318 + dev_priv->saveOVC_OGAMC2 = arg->overlay.OGAMC2; 1319 + dev_priv->saveOVC_OGAMC1 = arg->overlay.OGAMC1; 1320 + dev_priv->saveOVC_OGAMC0 = arg->overlay.OGAMC0; 1321 + } 1322 + if (arg->overlay_write_mask & OV_REGRWBITS_OVADD) 1323 + dev_priv->saveOV_OVADD = arg->overlay.OVADD; 1324 + if (arg->overlay_write_mask & OVC_REGRWBITS_OVADD) 1325 + dev_priv->saveOVC_OVADD = arg->overlay.OVADD; 1326 + } 1327 + } 1328 + 1329 + if (arg->overlay_read_mask != 0) { 1330 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) { 1331 + if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) { 1332 + arg->overlay.OGAMC5 = PSB_RVDC32(OV_OGAMC5); 1333 + arg->overlay.OGAMC4 = PSB_RVDC32(OV_OGAMC4); 1334 + arg->overlay.OGAMC3 = PSB_RVDC32(OV_OGAMC3); 1335 + arg->overlay.OGAMC2 = PSB_RVDC32(OV_OGAMC2); 1336 + arg->overlay.OGAMC1 = PSB_RVDC32(OV_OGAMC1); 1337 + arg->overlay.OGAMC0 = PSB_RVDC32(OV_OGAMC0); 1338 + } 1339 + if (arg->overlay_read_mask & OVC_REGRWBITS_OGAM_ALL) { 1340 + arg->overlay.OGAMC5 = PSB_RVDC32(OVC_OGAMC5); 1341 + arg->overlay.OGAMC4 = PSB_RVDC32(OVC_OGAMC4); 1342 + arg->overlay.OGAMC3 = PSB_RVDC32(OVC_OGAMC3); 1343 + arg->overlay.OGAMC2 = PSB_RVDC32(OVC_OGAMC2); 1344 + arg->overlay.OGAMC1 = PSB_RVDC32(OVC_OGAMC1); 1345 + arg->overlay.OGAMC0 = PSB_RVDC32(OVC_OGAMC0); 1346 + } 1347 + if (arg->overlay_read_mask & OV_REGRWBITS_OVADD) 1348 + arg->overlay.OVADD = PSB_RVDC32(OV_OVADD); 1349 + if (arg->overlay_read_mask & OVC_REGRWBITS_OVADD) 1350 + arg->overlay.OVADD = PSB_RVDC32(OVC_OVADD); 1351 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 1352 + } else { 1353 + if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) { 1354 + arg->overlay.OGAMC5 = dev_priv->saveOV_OGAMC5; 1355 + arg->overlay.OGAMC4 = dev_priv->saveOV_OGAMC4; 1356 + arg->overlay.OGAMC3 = dev_priv->saveOV_OGAMC3; 1357 + arg->overlay.OGAMC2 = dev_priv->saveOV_OGAMC2; 1358 + arg->overlay.OGAMC1 = dev_priv->saveOV_OGAMC1; 1359 + arg->overlay.OGAMC0 = dev_priv->saveOV_OGAMC0; 1360 + } 1361 + if (arg->overlay_read_mask & OVC_REGRWBITS_OGAM_ALL) { 1362 + arg->overlay.OGAMC5 = dev_priv->saveOVC_OGAMC5; 1363 + arg->overlay.OGAMC4 = dev_priv->saveOVC_OGAMC4; 1364 + arg->overlay.OGAMC3 = dev_priv->saveOVC_OGAMC3; 1365 + arg->overlay.OGAMC2 = dev_priv->saveOVC_OGAMC2; 1366 + arg->overlay.OGAMC1 = dev_priv->saveOVC_OGAMC1; 1367 + arg->overlay.OGAMC0 = dev_priv->saveOVC_OGAMC0; 1368 + } 1369 + if (arg->overlay_read_mask & OV_REGRWBITS_OVADD) 1370 + arg->overlay.OVADD = dev_priv->saveOV_OVADD; 1371 + if (arg->overlay_read_mask & OVC_REGRWBITS_OVADD) 1372 + arg->overlay.OVADD = dev_priv->saveOVC_OVADD; 1373 + } 1374 + } 1375 + 1376 + if (arg->sprite_enable_mask != 0) { 1377 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) { 1378 + PSB_WVDC32(0x1F3E, DSPARB); 1379 + PSB_WVDC32(arg->sprite.dspa_control 1380 + | PSB_RVDC32(DSPACNTR), DSPACNTR); 1381 + PSB_WVDC32(arg->sprite.dspa_key_value, DSPAKEYVAL); 1382 + PSB_WVDC32(arg->sprite.dspa_key_mask, DSPAKEYMASK); 1383 + PSB_WVDC32(PSB_RVDC32(DSPASURF), DSPASURF); 1384 + PSB_RVDC32(DSPASURF); 1385 + PSB_WVDC32(arg->sprite.dspc_control, DSPCCNTR); 1386 + PSB_WVDC32(arg->sprite.dspc_stride, DSPCSTRIDE); 1387 + PSB_WVDC32(arg->sprite.dspc_position, DSPCPOS); 1388 + PSB_WVDC32(arg->sprite.dspc_linear_offset, DSPCLINOFF); 1389 + PSB_WVDC32(arg->sprite.dspc_size, DSPCSIZE); 1390 + PSB_WVDC32(arg->sprite.dspc_surface, DSPCSURF); 1391 + PSB_RVDC32(DSPCSURF); 1392 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 1393 + } 1394 + } 1395 + 1396 + if (arg->sprite_disable_mask != 0) { 1397 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) { 1398 + PSB_WVDC32(0x3F3E, DSPARB); 1399 + PSB_WVDC32(0x0, DSPCCNTR); 1400 + PSB_WVDC32(arg->sprite.dspc_surface, DSPCSURF); 1401 + PSB_RVDC32(DSPCSURF); 1402 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 1403 + } 1404 + } 1405 + 1406 + if (arg->subpicture_enable_mask != 0) { 1407 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) { 1408 + uint32_t temp; 1409 + if (arg->subpicture_enable_mask & REGRWBITS_DSPACNTR) { 1410 + temp = PSB_RVDC32(DSPACNTR); 1411 + temp &= ~DISPPLANE_PIXFORMAT_MASK; 1412 + temp &= ~DISPPLANE_BOTTOM; 1413 + temp |= DISPPLANE_32BPP; 1414 + PSB_WVDC32(temp, DSPACNTR); 1415 + 1416 + temp = PSB_RVDC32(DSPABASE); 1417 + PSB_WVDC32(temp, DSPABASE); 1418 + PSB_RVDC32(DSPABASE); 1419 + temp = PSB_RVDC32(DSPASURF); 1420 + PSB_WVDC32(temp, DSPASURF); 1421 + PSB_RVDC32(DSPASURF); 1422 + } 1423 + if (arg->subpicture_enable_mask & REGRWBITS_DSPBCNTR) { 1424 + temp = PSB_RVDC32(DSPBCNTR); 1425 + temp &= ~DISPPLANE_PIXFORMAT_MASK; 1426 + temp &= ~DISPPLANE_BOTTOM; 1427 + temp |= DISPPLANE_32BPP; 1428 + PSB_WVDC32(temp, DSPBCNTR); 1429 + 1430 + temp = PSB_RVDC32(DSPBBASE); 1431 + PSB_WVDC32(temp, DSPBBASE); 1432 + PSB_RVDC32(DSPBBASE); 1433 + temp = PSB_RVDC32(DSPBSURF); 1434 + PSB_WVDC32(temp, DSPBSURF); 1435 + PSB_RVDC32(DSPBSURF); 1436 + } 1437 + if (arg->subpicture_enable_mask & REGRWBITS_DSPCCNTR) { 1438 + temp = PSB_RVDC32(DSPCCNTR); 1439 + temp &= ~DISPPLANE_PIXFORMAT_MASK; 1440 + temp &= ~DISPPLANE_BOTTOM; 1441 + temp |= DISPPLANE_32BPP; 1442 + PSB_WVDC32(temp, DSPCCNTR); 1443 + 1444 + temp = PSB_RVDC32(DSPCBASE); 1445 + PSB_WVDC32(temp, DSPCBASE); 1446 + PSB_RVDC32(DSPCBASE); 1447 + temp = PSB_RVDC32(DSPCSURF); 1448 + PSB_WVDC32(temp, DSPCSURF); 1449 + PSB_RVDC32(DSPCSURF); 1450 + } 1451 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 1452 + } 1453 + } 1454 + 1455 + if (arg->subpicture_disable_mask != 0) { 1456 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) { 1457 + uint32_t temp; 1458 + if (arg->subpicture_disable_mask & REGRWBITS_DSPACNTR) { 1459 + temp = PSB_RVDC32(DSPACNTR); 1460 + temp &= ~DISPPLANE_PIXFORMAT_MASK; 1461 + temp |= DISPPLANE_32BPP_NO_ALPHA; 1462 + PSB_WVDC32(temp, DSPACNTR); 1463 + 1464 + temp = PSB_RVDC32(DSPABASE); 1465 + PSB_WVDC32(temp, DSPABASE); 1466 + PSB_RVDC32(DSPABASE); 1467 + temp = PSB_RVDC32(DSPASURF); 1468 + PSB_WVDC32(temp, DSPASURF); 1469 + PSB_RVDC32(DSPASURF); 1470 + } 1471 + if (arg->subpicture_disable_mask & REGRWBITS_DSPBCNTR) { 1472 + temp = PSB_RVDC32(DSPBCNTR); 1473 + temp &= ~DISPPLANE_PIXFORMAT_MASK; 1474 + temp |= DISPPLANE_32BPP_NO_ALPHA; 1475 + PSB_WVDC32(temp, DSPBCNTR); 1476 + 1477 + temp = PSB_RVDC32(DSPBBASE); 1478 + PSB_WVDC32(temp, DSPBBASE); 1479 + PSB_RVDC32(DSPBBASE); 1480 + temp = PSB_RVDC32(DSPBSURF); 1481 + PSB_WVDC32(temp, DSPBSURF); 1482 + PSB_RVDC32(DSPBSURF); 1483 + } 1484 + if (arg->subpicture_disable_mask & REGRWBITS_DSPCCNTR) { 1485 + temp = PSB_RVDC32(DSPCCNTR); 1486 + temp &= ~DISPPLANE_PIXFORMAT_MASK; 1487 + temp |= DISPPLANE_32BPP_NO_ALPHA; 1488 + PSB_WVDC32(temp, DSPCCNTR); 1489 + 1490 + temp = PSB_RVDC32(DSPCBASE); 1491 + PSB_WVDC32(temp, DSPCBASE); 1492 + PSB_RVDC32(DSPCBASE); 1493 + temp = PSB_RVDC32(DSPCSURF); 1494 + PSB_WVDC32(temp, DSPCSURF); 1495 + PSB_RVDC32(DSPCSURF); 1496 + } 1497 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 1498 + } 1499 + } 1500 + 1501 + return 0; 1502 + } 1503 + 1504 + /* always available as we are SIGIO'd */ 1505 + static unsigned int psb_poll(struct file *filp, 1506 + struct poll_table_struct *wait) 1507 + { 1508 + return POLLIN | POLLRDNORM; 1509 + } 1510 + 1511 + /* Not sure what we will need yet - in the PVR driver this disappears into 1512 + a tangle of abstracted handlers and per process crap */ 1513 + 1514 + struct psb_priv { 1515 + int dummy; 1516 + }; 1517 + 1518 + static int psb_driver_open(struct drm_device *dev, struct drm_file *priv) 1519 + { 1520 + struct psb_priv *psb = kzalloc(sizeof(struct psb_priv), GFP_KERNEL); 1521 + if (psb == NULL) 1522 + return -ENOMEM; 1523 + priv->driver_priv = psb; 1524 + DRM_DEBUG("\n"); 1525 + /*return PVRSRVOpen(dev, priv);*/ 1526 + return 0; 1527 + } 1528 + 1529 + static void psb_driver_close(struct drm_device *dev, struct drm_file *priv) 1530 + { 1531 + kfree(priv->driver_priv); 1532 + priv->driver_priv = NULL; 1533 + } 1534 + 1535 + static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd, 1536 + unsigned long arg) 1537 + { 1538 + struct drm_file *file_priv = filp->private_data; 1539 + struct drm_device *dev = file_priv->minor->dev; 1540 + struct drm_psb_private *dev_priv = dev->dev_private; 1541 + static unsigned int runtime_allowed; 1542 + unsigned int nr = DRM_IOCTL_NR(cmd); 1543 + 1544 + DRM_DEBUG("cmd = %x, nr = %x\n", cmd, nr); 1545 + 1546 + if (runtime_allowed == 1 && dev_priv->is_lvds_on) { 1547 + runtime_allowed++; 1548 + pm_runtime_allow(&dev->pdev->dev); 1549 + dev_priv->rpm_enabled = 1; 1550 + } 1551 + /* 1552 + * The driver private ioctls and TTM ioctls should be 1553 + * thread-safe. 1554 + */ 1555 + 1556 + if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) 1557 + && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { 1558 + struct drm_ioctl_desc *ioctl = 1559 + &psb_ioctls[nr - DRM_COMMAND_BASE]; 1560 + 1561 + if (unlikely(ioctl->cmd != cmd)) { 1562 + DRM_ERROR( 1563 + "Invalid drm cmnd %d ioctl->cmd %x, cmd %x\n", 1564 + nr - DRM_COMMAND_BASE, ioctl->cmd, cmd); 1565 + return -EINVAL; 1566 + } 1567 + 1568 + return drm_ioctl(filp, cmd, arg); 1569 + } 1570 + /* 1571 + * Not all old drm ioctls are thread-safe. 1572 + */ 1573 + 1574 + return drm_ioctl(filp, cmd, arg); 1575 + } 1576 + 1577 + 1578 + /* When a client dies: 1579 + * - Check for and clean up flipped page state 1580 + */ 1581 + void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv) 1582 + { 1583 + } 1584 + 1585 + static void psb_remove(struct pci_dev *pdev) 1586 + { 1587 + struct drm_device *dev = pci_get_drvdata(pdev); 1588 + drm_put_dev(dev); 1589 + } 1590 + 1591 + 1592 + static const struct dev_pm_ops psb_pm_ops = { 1593 + .runtime_suspend = psb_runtime_suspend, 1594 + .runtime_resume = psb_runtime_resume, 1595 + .runtime_idle = psb_runtime_idle, 1596 + }; 1597 + 1598 + static struct drm_driver driver = { 1599 + .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \ 1600 + DRIVER_IRQ_VBL | DRIVER_MODESET, 1601 + .load = psb_driver_load, 1602 + .unload = psb_driver_unload, 1603 + 1604 + .ioctls = psb_ioctls, 1605 + .num_ioctls = DRM_ARRAY_SIZE(psb_ioctls), 1606 + .device_is_agp = psb_driver_device_is_agp, 1607 + .irq_preinstall = psb_irq_preinstall, 1608 + .irq_postinstall = psb_irq_postinstall, 1609 + .irq_uninstall = psb_irq_uninstall, 1610 + .irq_handler = psb_irq_handler, 1611 + .enable_vblank = psb_enable_vblank, 1612 + .disable_vblank = psb_disable_vblank, 1613 + .get_vblank_counter = psb_get_vblank_counter, 1614 + .firstopen = NULL, 1615 + .lastclose = psb_lastclose, 1616 + .open = psb_driver_open, 1617 + .postclose = psb_driver_close, 1618 + #if 0 /* ACFIXME */ 1619 + .get_map_ofs = drm_core_get_map_ofs, 1620 + .get_reg_ofs = drm_core_get_reg_ofs, 1621 + .proc_init = psb_proc_init, 1622 + .proc_cleanup = psb_proc_cleanup, 1623 + #endif 1624 + .preclose = psb_driver_preclose, 1625 + .fops = { 1626 + .owner = THIS_MODULE, 1627 + .open = psb_open, 1628 + .release = psb_release, 1629 + .unlocked_ioctl = psb_unlocked_ioctl, 1630 + .mmap = psb_mmap, 1631 + .poll = psb_poll, 1632 + .fasync = drm_fasync, 1633 + .read = drm_read, 1634 + }, 1635 + .pci_driver = { 1636 + .name = DRIVER_NAME, 1637 + .id_table = pciidlist, 1638 + .resume = ospm_power_resume, 1639 + .suspend = ospm_power_suspend, 1640 + .probe = psb_probe, 1641 + .remove = psb_remove, 1642 + #ifdef CONFIG_PM 1643 + .driver.pm = &psb_pm_ops, 1644 + #endif 1645 + }, 1646 + .name = DRIVER_NAME, 1647 + .desc = DRIVER_DESC, 1648 + .date = PSB_DRM_DRIVER_DATE, 1649 + .major = PSB_DRM_DRIVER_MAJOR, 1650 + .minor = PSB_DRM_DRIVER_MINOR, 1651 + .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL 1652 + }; 1653 + 1654 + static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1655 + { 1656 + /* MLD Added this from Inaky's patch */ 1657 + if (pci_enable_msi(pdev)) 1658 + DRM_ERROR("Enable MSI failed!\n"); 1659 + return drm_get_pci_dev(pdev, ent, &driver); 1660 + } 1661 + 1662 + static int __init psb_init(void) 1663 + { 1664 + return drm_init(&driver); 1665 + } 1666 + 1667 + static void __exit psb_exit(void) 1668 + { 1669 + drm_exit(&driver); 1670 + } 1671 + 1672 + late_initcall(psb_init); 1673 + module_exit(psb_exit); 1674 + 1675 + MODULE_AUTHOR(DRIVER_AUTHOR); 1676 + MODULE_DESCRIPTION(DRIVER_DESC); 1677 + MODULE_LICENSE("GPL");
+1139
drivers/staging/gma500/psb_drv.h
··· 1 + /************************************************************************** 2 + * Copyright (c) 2007-2008, Intel Corporation. 3 + * All Rights Reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms and conditions of the GNU General Public License, 7 + * version 2, as published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope it will be useful, but WITHOUT 10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 + * more details. 13 + * 14 + * You should have received a copy of the GNU General Public License along with 15 + * this program; if not, write to the Free Software Foundation, Inc., 16 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 + * 18 + **************************************************************************/ 19 + 20 + #ifndef _PSB_DRV_H_ 21 + #define _PSB_DRV_H_ 22 + 23 + #include <linux/version.h> 24 + 25 + #include <drm/drmP.h> 26 + #include "drm_global.h" 27 + #include "psb_drm.h" 28 + #include "psb_reg.h" 29 + #include "psb_intel_drv.h" 30 + #include "psb_gtt.h" 31 + #include "psb_powermgmt.h" 32 + #include "ttm/ttm_object.h" 33 + #include "psb_ttm_fence_driver.h" 34 + #include "psb_ttm_userobj_api.h" 35 + #include "ttm/ttm_bo_driver.h" 36 + #include "ttm/ttm_lock.h" 37 + 38 + /*Append new drm mode definition here, align with libdrm definition*/ 39 + #define DRM_MODE_SCALE_NO_SCALE 2 40 + 41 + extern struct ttm_bo_driver psb_ttm_bo_driver; 42 + 43 + enum { 44 + CHIP_PSB_8108 = 0, 45 + CHIP_PSB_8109 = 1, 46 + }; 47 + 48 + /* 49 + *Hardware bugfixes 50 + */ 51 + 52 + #define DRIVER_NAME "pvrsrvkm" 53 + #define DRIVER_DESC "drm driver for the Intel GMA500" 54 + #define DRIVER_AUTHOR "Intel Corporation" 55 + #define OSPM_PROC_ENTRY "ospm" 56 + #define RTPM_PROC_ENTRY "rtpm" 57 + #define BLC_PROC_ENTRY "mrst_blc" 58 + #define DISPLAY_PROC_ENTRY "display_status" 59 + 60 + #define PSB_DRM_DRIVER_DATE "2009-03-10" 61 + #define PSB_DRM_DRIVER_MAJOR 8 62 + #define PSB_DRM_DRIVER_MINOR 1 63 + #define PSB_DRM_DRIVER_PATCHLEVEL 0 64 + 65 + /* 66 + *TTM driver private offsets. 67 + */ 68 + 69 + #define DRM_PSB_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) 70 + 71 + #define PSB_OBJECT_HASH_ORDER 13 72 + #define PSB_FILE_OBJECT_HASH_ORDER 12 73 + #define PSB_BO_HASH_ORDER 12 74 + 75 + #define PSB_VDC_OFFSET 0x00000000 76 + #define PSB_VDC_SIZE 0x000080000 77 + #define MRST_MMIO_SIZE 0x0000C0000 78 + #define MDFLD_MMIO_SIZE 0x000100000 79 + #define PSB_SGX_SIZE 0x8000 80 + #define PSB_SGX_OFFSET 0x00040000 81 + #define MRST_SGX_OFFSET 0x00080000 82 + #define PSB_MMIO_RESOURCE 0 83 + #define PSB_GATT_RESOURCE 2 84 + #define PSB_GTT_RESOURCE 3 85 + #define PSB_GMCH_CTRL 0x52 86 + #define PSB_BSM 0x5C 87 + #define _PSB_GMCH_ENABLED 0x4 88 + #define PSB_PGETBL_CTL 0x2020 89 + #define _PSB_PGETBL_ENABLED 0x00000001 90 + #define PSB_SGX_2D_SLAVE_PORT 0x4000 91 + #define PSB_TT_PRIV0_LIMIT (256*1024*1024) 92 + #define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT) 93 + #define PSB_NUM_VALIDATE_BUFFERS 2048 94 + 95 + #define PSB_MEM_MMU_START 0x00000000 96 + #define PSB_MEM_TT_START 0xE0000000 97 + 98 + #define PSB_GL3_CACHE_CTL 0x2100 99 + #define PSB_GL3_CACHE_STAT 0x2108 100 + 101 + /* 102 + *Flags for external memory type field. 103 + */ 104 + 105 + #define MRST_MSVDX_OFFSET 0x90000 /*MSVDX Base offset */ 106 + #define PSB_MSVDX_OFFSET 0x50000 /*MSVDX Base offset */ 107 + /* MSVDX MMIO region is 0x50000 - 0x57fff ==> 32KB */ 108 + #define PSB_MSVDX_SIZE 0x10000 109 + 110 + #define LNC_TOPAZ_OFFSET 0xA0000 111 + #define PNW_TOPAZ_OFFSET 0xC0000 112 + #define PNW_GL3_OFFSET 0xB0000 113 + #define LNC_TOPAZ_SIZE 0x10000 114 + #define PNW_TOPAZ_SIZE 0x30000 /* PNW VXE285 has two cores */ 115 + #define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */ 116 + #define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */ 117 + #define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */ 118 + 119 + /* 120 + *PTE's and PDE's 121 + */ 122 + 123 + #define PSB_PDE_MASK 0x003FFFFF 124 + #define PSB_PDE_SHIFT 22 125 + #define PSB_PTE_SHIFT 12 126 + 127 + #define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */ 128 + #define PSB_PTE_WO 0x0002 /* Write only */ 129 + #define PSB_PTE_RO 0x0004 /* Read only */ 130 + #define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */ 131 + 132 + /* 133 + *VDC registers and bits 134 + */ 135 + #define PSB_MSVDX_CLOCKGATING 0x2064 136 + #define PSB_TOPAZ_CLOCKGATING 0x2068 137 + #define PSB_HWSTAM 0x2098 138 + #define PSB_INSTPM 0x20C0 139 + #define PSB_INT_IDENTITY_R 0x20A4 140 + #define _MDFLD_PIPEC_EVENT_FLAG (1<<2) 141 + #define _MDFLD_PIPEC_VBLANK_FLAG (1<<3) 142 + #define _PSB_DPST_PIPEB_FLAG (1<<4) 143 + #define _MDFLD_PIPEB_EVENT_FLAG (1<<4) 144 + #define _PSB_VSYNC_PIPEB_FLAG (1<<5) 145 + #define _PSB_DPST_PIPEA_FLAG (1<<6) 146 + #define _PSB_PIPEA_EVENT_FLAG (1<<6) 147 + #define _PSB_VSYNC_PIPEA_FLAG (1<<7) 148 + #define _MDFLD_MIPIA_FLAG (1<<16) 149 + #define _MDFLD_MIPIC_FLAG (1<<17) 150 + #define _PSB_IRQ_SGX_FLAG (1<<18) 151 + #define _PSB_IRQ_MSVDX_FLAG (1<<19) 152 + #define _LNC_IRQ_TOPAZ_FLAG (1<<20) 153 + 154 + /* This flag includes all the display IRQ bits excepts the vblank irqs. */ 155 + #define _MDFLD_DISP_ALL_IRQ_FLAG (_MDFLD_PIPEC_EVENT_FLAG | _MDFLD_PIPEB_EVENT_FLAG | \ 156 + _PSB_PIPEA_EVENT_FLAG | _PSB_VSYNC_PIPEA_FLAG | _MDFLD_MIPIA_FLAG | _MDFLD_MIPIC_FLAG) 157 + #define PSB_INT_IDENTITY_R 0x20A4 158 + #define PSB_INT_MASK_R 0x20A8 159 + #define PSB_INT_ENABLE_R 0x20A0 160 + 161 + #define _PSB_MMU_ER_MASK 0x0001FF00 162 + #define _PSB_MMU_ER_HOST (1 << 16) 163 + #define GPIOA 0x5010 164 + #define GPIOB 0x5014 165 + #define GPIOC 0x5018 166 + #define GPIOD 0x501c 167 + #define GPIOE 0x5020 168 + #define GPIOF 0x5024 169 + #define GPIOG 0x5028 170 + #define GPIOH 0x502c 171 + #define GPIO_CLOCK_DIR_MASK (1 << 0) 172 + #define GPIO_CLOCK_DIR_IN (0 << 1) 173 + #define GPIO_CLOCK_DIR_OUT (1 << 1) 174 + #define GPIO_CLOCK_VAL_MASK (1 << 2) 175 + #define GPIO_CLOCK_VAL_OUT (1 << 3) 176 + #define GPIO_CLOCK_VAL_IN (1 << 4) 177 + #define GPIO_CLOCK_PULLUP_DISABLE (1 << 5) 178 + #define GPIO_DATA_DIR_MASK (1 << 8) 179 + #define GPIO_DATA_DIR_IN (0 << 9) 180 + #define GPIO_DATA_DIR_OUT (1 << 9) 181 + #define GPIO_DATA_VAL_MASK (1 << 10) 182 + #define GPIO_DATA_VAL_OUT (1 << 11) 183 + #define GPIO_DATA_VAL_IN (1 << 12) 184 + #define GPIO_DATA_PULLUP_DISABLE (1 << 13) 185 + 186 + #define VCLK_DIVISOR_VGA0 0x6000 187 + #define VCLK_DIVISOR_VGA1 0x6004 188 + #define VCLK_POST_DIV 0x6010 189 + 190 + #define PSB_COMM_2D (PSB_ENGINE_2D << 4) 191 + #define PSB_COMM_3D (PSB_ENGINE_3D << 4) 192 + #define PSB_COMM_TA (PSB_ENGINE_TA << 4) 193 + #define PSB_COMM_HP (PSB_ENGINE_HP << 4) 194 + #define PSB_COMM_USER_IRQ (1024 >> 2) 195 + #define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1) 196 + #define PSB_COMM_FW (2048 >> 2) 197 + 198 + #define PSB_UIRQ_VISTEST 1 199 + #define PSB_UIRQ_OOM_REPLY 2 200 + #define PSB_UIRQ_FIRE_TA_REPLY 3 201 + #define PSB_UIRQ_FIRE_RASTER_REPLY 4 202 + 203 + #define PSB_2D_SIZE (256*1024*1024) 204 + #define PSB_MAX_RELOC_PAGES 1024 205 + 206 + #define PSB_LOW_REG_OFFS 0x0204 207 + #define PSB_HIGH_REG_OFFS 0x0600 208 + 209 + #define PSB_NUM_VBLANKS 2 210 + 211 + 212 + #define PSB_2D_SIZE (256*1024*1024) 213 + #define PSB_MAX_RELOC_PAGES 1024 214 + 215 + #define PSB_LOW_REG_OFFS 0x0204 216 + #define PSB_HIGH_REG_OFFS 0x0600 217 + 218 + #define PSB_NUM_VBLANKS 2 219 + #define PSB_WATCHDOG_DELAY (DRM_HZ * 2) 220 + #define PSB_LID_DELAY (DRM_HZ / 10) 221 + 222 + #define MDFLD_PNW_A0 0x00 223 + #define MDFLD_PNW_B0 0x04 224 + #define MDFLD_PNW_C0 0x08 225 + 226 + #define MDFLD_DSR_2D_3D_0 BIT0 227 + #define MDFLD_DSR_2D_3D_2 BIT1 228 + #define MDFLD_DSR_CURSOR_0 BIT2 229 + #define MDFLD_DSR_CURSOR_2 BIT3 230 + #define MDFLD_DSR_OVERLAY_0 BIT4 231 + #define MDFLD_DSR_OVERLAY_2 BIT5 232 + #define MDFLD_DSR_MIPI_CONTROL BIT6 233 + #define MDFLD_DSR_2D_3D (MDFLD_DSR_2D_3D_0 | MDFLD_DSR_2D_3D_2) 234 + 235 + #define MDFLD_DSR_RR 45 236 + #define MDFLD_DPU_ENABLE BIT31 237 + #define MDFLD_DSR_FULLSCREEN BIT30 238 + #define MDFLD_DSR_DELAY (DRM_HZ / MDFLD_DSR_RR) 239 + 240 + #define PSB_PWR_STATE_ON 1 241 + #define PSB_PWR_STATE_OFF 2 242 + 243 + #define PSB_PMPOLICY_NOPM 0 244 + #define PSB_PMPOLICY_CLOCKGATING 1 245 + #define PSB_PMPOLICY_POWERDOWN 2 246 + 247 + #define PSB_PMSTATE_POWERUP 0 248 + #define PSB_PMSTATE_CLOCKGATED 1 249 + #define PSB_PMSTATE_POWERDOWN 2 250 + #define PSB_PCIx_MSI_ADDR_LOC 0x94 251 + #define PSB_PCIx_MSI_DATA_LOC 0x98 252 + 253 + #define MDFLD_PLANE_MAX_WIDTH 2048 254 + #define MDFLD_PLANE_MAX_HEIGHT 2048 255 + 256 + struct opregion_header; 257 + struct opregion_acpi; 258 + struct opregion_swsci; 259 + struct opregion_asle; 260 + 261 + struct psb_intel_opregion { 262 + struct opregion_header *header; 263 + struct opregion_acpi *acpi; 264 + struct opregion_swsci *swsci; 265 + struct opregion_asle *asle; 266 + int enabled; 267 + }; 268 + 269 + /* 270 + *User options. 271 + */ 272 + 273 + struct drm_psb_uopt { 274 + int pad; /*keep it here in case we use it in future*/ 275 + }; 276 + 277 + /** 278 + *struct psb_context 279 + * 280 + *@buffers: array of pre-allocated validate buffers. 281 + *@used_buffers: number of buffers in @buffers array currently in use. 282 + *@validate_buffer: buffers validated from user-space. 283 + *@kern_validate_buffers : buffers validated from kernel-space. 284 + *@fence_flags : Fence flags to be used for fence creation. 285 + * 286 + *This structure is used during execbuf validation. 287 + */ 288 + 289 + struct psb_context { 290 + struct psb_validate_buffer *buffers; 291 + uint32_t used_buffers; 292 + struct list_head validate_list; 293 + struct list_head kern_validate_list; 294 + uint32_t fence_types; 295 + uint32_t val_seq; 296 + }; 297 + 298 + struct psb_validate_buffer; 299 + 300 + /* Currently defined profiles */ 301 + enum VAProfile { 302 + VAProfileMPEG2Simple = 0, 303 + VAProfileMPEG2Main = 1, 304 + VAProfileMPEG4Simple = 2, 305 + VAProfileMPEG4AdvancedSimple = 3, 306 + VAProfileMPEG4Main = 4, 307 + VAProfileH264Baseline = 5, 308 + VAProfileH264Main = 6, 309 + VAProfileH264High = 7, 310 + VAProfileVC1Simple = 8, 311 + VAProfileVC1Main = 9, 312 + VAProfileVC1Advanced = 10, 313 + VAProfileH263Baseline = 11, 314 + VAProfileJPEGBaseline = 12, 315 + VAProfileH264ConstrainedBaseline = 13 316 + }; 317 + 318 + /* Currently defined entrypoints */ 319 + enum VAEntrypoint { 320 + VAEntrypointVLD = 1, 321 + VAEntrypointIZZ = 2, 322 + VAEntrypointIDCT = 3, 323 + VAEntrypointMoComp = 4, 324 + VAEntrypointDeblocking = 5, 325 + VAEntrypointEncSlice = 6, /* slice level encode */ 326 + VAEntrypointEncPicture = 7 /* pictuer encode, JPEG, etc */ 327 + }; 328 + 329 + 330 + struct psb_video_ctx { 331 + struct list_head head; 332 + struct file *filp; /* DRM device file pointer */ 333 + int ctx_type; /* profile<<8|entrypoint */ 334 + /* todo: more context specific data for multi-context support */ 335 + }; 336 + 337 + #define MODE_SETTING_IN_CRTC 0x1 338 + #define MODE_SETTING_IN_ENCODER 0x2 339 + #define MODE_SETTING_ON_GOING 0x3 340 + #define MODE_SETTING_IN_DSR 0x4 341 + #define MODE_SETTING_ENCODER_DONE 0x8 342 + #define GCT_R10_HEADER_SIZE 16 343 + #define GCT_R10_DISPLAY_DESC_SIZE 28 344 + 345 + struct drm_psb_private { 346 + /* 347 + * DSI info. 348 + */ 349 + void * dbi_dsr_info; 350 + void * dsi_configs[2]; 351 + 352 + /* 353 + *TTM Glue. 354 + */ 355 + 356 + struct drm_global_reference mem_global_ref; 357 + struct ttm_bo_global_ref bo_global_ref; 358 + int has_global; 359 + 360 + struct drm_device *dev; 361 + struct ttm_object_device *tdev; 362 + struct ttm_fence_device fdev; 363 + struct ttm_bo_device bdev; 364 + struct ttm_lock ttm_lock; 365 + struct vm_operations_struct *ttm_vm_ops; 366 + int has_fence_device; 367 + int has_bo_device; 368 + 369 + unsigned long chipset; 370 + 371 + struct drm_psb_dev_info_arg dev_info; 372 + struct drm_psb_uopt uopt; 373 + 374 + struct psb_gtt *pg; 375 + 376 + /*GTT Memory manager*/ 377 + struct psb_gtt_mm *gtt_mm; 378 + 379 + struct page *scratch_page; 380 + uint32_t sequence[PSB_NUM_ENGINES]; 381 + uint32_t last_sequence[PSB_NUM_ENGINES]; 382 + uint32_t last_submitted_seq[PSB_NUM_ENGINES]; 383 + 384 + struct psb_mmu_driver *mmu; 385 + struct psb_mmu_pd *pf_pd; 386 + 387 + uint8_t *sgx_reg; 388 + uint8_t *vdc_reg; 389 + uint32_t gatt_free_offset; 390 + 391 + /* IMG video context */ 392 + struct list_head video_ctx; 393 + 394 + 395 + 396 + /* 397 + *Fencing / irq. 398 + */ 399 + 400 + uint32_t vdc_irq_mask; 401 + uint32_t pipestat[PSB_NUM_PIPE]; 402 + bool vblanksEnabledForFlips; 403 + 404 + spinlock_t irqmask_lock; 405 + spinlock_t sequence_lock; 406 + 407 + /* 408 + *Modesetting 409 + */ 410 + struct psb_intel_mode_device mode_dev; 411 + 412 + struct drm_crtc *plane_to_crtc_mapping[PSB_NUM_PIPE]; 413 + struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE]; 414 + uint32_t num_pipe; 415 + 416 + /* 417 + * CI share buffer 418 + */ 419 + unsigned int ci_region_start; 420 + unsigned int ci_region_size; 421 + 422 + /* 423 + * RAR share buffer; 424 + */ 425 + unsigned int rar_region_start; 426 + unsigned int rar_region_size; 427 + 428 + /* 429 + *Memory managers 430 + */ 431 + 432 + int have_camera; 433 + int have_rar; 434 + int have_tt; 435 + int have_mem_mmu; 436 + struct mutex temp_mem; 437 + 438 + /* 439 + *Relocation buffer mapping. 440 + */ 441 + 442 + spinlock_t reloc_lock; 443 + unsigned int rel_mapped_pages; 444 + wait_queue_head_t rel_mapped_queue; 445 + 446 + /* 447 + *SAREA 448 + */ 449 + struct drm_psb_sarea *sarea_priv; 450 + 451 + /* 452 + *OSPM info 453 + */ 454 + uint32_t ospm_base; 455 + 456 + /* 457 + * Sizes info 458 + */ 459 + 460 + struct drm_psb_sizes_arg sizes; 461 + 462 + uint32_t fuse_reg_value; 463 + 464 + /* pci revision id for B0:D2:F0 */ 465 + uint8_t platform_rev_id; 466 + 467 + /* 468 + *LVDS info 469 + */ 470 + int backlight_duty_cycle; /* restore backlight to this value */ 471 + bool panel_wants_dither; 472 + struct drm_display_mode *panel_fixed_mode; 473 + struct drm_display_mode *lfp_lvds_vbt_mode; 474 + struct drm_display_mode *sdvo_lvds_vbt_mode; 475 + 476 + struct bdb_lvds_backlight *lvds_bl; /*LVDS backlight info from VBT*/ 477 + struct psb_intel_i2c_chan *lvds_i2c_bus; 478 + 479 + /* Feature bits from the VBIOS*/ 480 + unsigned int int_tv_support:1; 481 + unsigned int lvds_dither:1; 482 + unsigned int lvds_vbt:1; 483 + unsigned int int_crt_support:1; 484 + unsigned int lvds_use_ssc:1; 485 + int lvds_ssc_freq; 486 + bool is_lvds_on; 487 + 488 + unsigned int core_freq; 489 + uint32_t iLVDS_enable; 490 + 491 + /*runtime PM state*/ 492 + int rpm_enabled; 493 + 494 + /* 495 + *Register state 496 + */ 497 + uint32_t saveDSPACNTR; 498 + uint32_t saveDSPBCNTR; 499 + uint32_t savePIPEACONF; 500 + uint32_t savePIPEBCONF; 501 + uint32_t savePIPEASRC; 502 + uint32_t savePIPEBSRC; 503 + uint32_t saveFPA0; 504 + uint32_t saveFPA1; 505 + uint32_t saveDPLL_A; 506 + uint32_t saveDPLL_A_MD; 507 + uint32_t saveHTOTAL_A; 508 + uint32_t saveHBLANK_A; 509 + uint32_t saveHSYNC_A; 510 + uint32_t saveVTOTAL_A; 511 + uint32_t saveVBLANK_A; 512 + uint32_t saveVSYNC_A; 513 + uint32_t saveDSPASTRIDE; 514 + uint32_t saveDSPASIZE; 515 + uint32_t saveDSPAPOS; 516 + uint32_t saveDSPABASE; 517 + uint32_t saveDSPASURF; 518 + uint32_t saveFPB0; 519 + uint32_t saveFPB1; 520 + uint32_t saveDPLL_B; 521 + uint32_t saveDPLL_B_MD; 522 + uint32_t saveHTOTAL_B; 523 + uint32_t saveHBLANK_B; 524 + uint32_t saveHSYNC_B; 525 + uint32_t saveVTOTAL_B; 526 + uint32_t saveVBLANK_B; 527 + uint32_t saveVSYNC_B; 528 + uint32_t saveDSPBSTRIDE; 529 + uint32_t saveDSPBSIZE; 530 + uint32_t saveDSPBPOS; 531 + uint32_t saveDSPBBASE; 532 + uint32_t saveDSPBSURF; 533 + uint32_t saveVCLK_DIVISOR_VGA0; 534 + uint32_t saveVCLK_DIVISOR_VGA1; 535 + uint32_t saveVCLK_POST_DIV; 536 + uint32_t saveVGACNTRL; 537 + uint32_t saveADPA; 538 + uint32_t saveLVDS; 539 + uint32_t saveDVOA; 540 + uint32_t saveDVOB; 541 + uint32_t saveDVOC; 542 + uint32_t savePP_ON; 543 + uint32_t savePP_OFF; 544 + uint32_t savePP_CONTROL; 545 + uint32_t savePP_CYCLE; 546 + uint32_t savePFIT_CONTROL; 547 + uint32_t savePaletteA[256]; 548 + uint32_t savePaletteB[256]; 549 + uint32_t saveBLC_PWM_CTL2; 550 + uint32_t saveBLC_PWM_CTL; 551 + uint32_t saveCLOCKGATING; 552 + uint32_t saveDSPARB; 553 + uint32_t saveDSPATILEOFF; 554 + uint32_t saveDSPBTILEOFF; 555 + uint32_t saveDSPAADDR; 556 + uint32_t saveDSPBADDR; 557 + uint32_t savePFIT_AUTO_RATIOS; 558 + uint32_t savePFIT_PGM_RATIOS; 559 + uint32_t savePP_ON_DELAYS; 560 + uint32_t savePP_OFF_DELAYS; 561 + uint32_t savePP_DIVISOR; 562 + uint32_t saveBSM; 563 + uint32_t saveVBT; 564 + uint32_t saveBCLRPAT_A; 565 + uint32_t saveBCLRPAT_B; 566 + uint32_t saveDSPALINOFF; 567 + uint32_t saveDSPBLINOFF; 568 + uint32_t savePERF_MODE; 569 + uint32_t saveDSPFW1; 570 + uint32_t saveDSPFW2; 571 + uint32_t saveDSPFW3; 572 + uint32_t saveDSPFW4; 573 + uint32_t saveDSPFW5; 574 + uint32_t saveDSPFW6; 575 + uint32_t saveCHICKENBIT; 576 + uint32_t saveDSPACURSOR_CTRL; 577 + uint32_t saveDSPBCURSOR_CTRL; 578 + uint32_t saveDSPACURSOR_BASE; 579 + uint32_t saveDSPBCURSOR_BASE; 580 + uint32_t saveDSPACURSOR_POS; 581 + uint32_t saveDSPBCURSOR_POS; 582 + uint32_t save_palette_a[256]; 583 + uint32_t save_palette_b[256]; 584 + uint32_t saveOV_OVADD; 585 + uint32_t saveOV_OGAMC0; 586 + uint32_t saveOV_OGAMC1; 587 + uint32_t saveOV_OGAMC2; 588 + uint32_t saveOV_OGAMC3; 589 + uint32_t saveOV_OGAMC4; 590 + uint32_t saveOV_OGAMC5; 591 + uint32_t saveOVC_OVADD; 592 + uint32_t saveOVC_OGAMC0; 593 + uint32_t saveOVC_OGAMC1; 594 + uint32_t saveOVC_OGAMC2; 595 + uint32_t saveOVC_OGAMC3; 596 + uint32_t saveOVC_OGAMC4; 597 + uint32_t saveOVC_OGAMC5; 598 + 599 + /* 600 + * extra MDFLD Register state 601 + */ 602 + uint32_t saveHDMIPHYMISCCTL; 603 + uint32_t saveHDMIB_CONTROL; 604 + uint32_t saveDSPCCNTR; 605 + uint32_t savePIPECCONF; 606 + uint32_t savePIPECSRC; 607 + uint32_t saveHTOTAL_C; 608 + uint32_t saveHBLANK_C; 609 + uint32_t saveHSYNC_C; 610 + uint32_t saveVTOTAL_C; 611 + uint32_t saveVBLANK_C; 612 + uint32_t saveVSYNC_C; 613 + uint32_t saveDSPCSTRIDE; 614 + uint32_t saveDSPCSIZE; 615 + uint32_t saveDSPCPOS; 616 + uint32_t saveDSPCSURF; 617 + uint32_t saveDSPCLINOFF; 618 + uint32_t saveDSPCTILEOFF; 619 + uint32_t saveDSPCCURSOR_CTRL; 620 + uint32_t saveDSPCCURSOR_BASE; 621 + uint32_t saveDSPCCURSOR_POS; 622 + uint32_t save_palette_c[256]; 623 + uint32_t saveOV_OVADD_C; 624 + uint32_t saveOV_OGAMC0_C; 625 + uint32_t saveOV_OGAMC1_C; 626 + uint32_t saveOV_OGAMC2_C; 627 + uint32_t saveOV_OGAMC3_C; 628 + uint32_t saveOV_OGAMC4_C; 629 + uint32_t saveOV_OGAMC5_C; 630 + 631 + /* DSI reg save */ 632 + uint32_t saveDEVICE_READY_REG; 633 + uint32_t saveINTR_EN_REG; 634 + uint32_t saveDSI_FUNC_PRG_REG; 635 + uint32_t saveHS_TX_TIMEOUT_REG; 636 + uint32_t saveLP_RX_TIMEOUT_REG; 637 + uint32_t saveTURN_AROUND_TIMEOUT_REG; 638 + uint32_t saveDEVICE_RESET_REG; 639 + uint32_t saveDPI_RESOLUTION_REG; 640 + uint32_t saveHORIZ_SYNC_PAD_COUNT_REG; 641 + uint32_t saveHORIZ_BACK_PORCH_COUNT_REG; 642 + uint32_t saveHORIZ_FRONT_PORCH_COUNT_REG; 643 + uint32_t saveHORIZ_ACTIVE_AREA_COUNT_REG; 644 + uint32_t saveVERT_SYNC_PAD_COUNT_REG; 645 + uint32_t saveVERT_BACK_PORCH_COUNT_REG; 646 + uint32_t saveVERT_FRONT_PORCH_COUNT_REG; 647 + uint32_t saveHIGH_LOW_SWITCH_COUNT_REG; 648 + uint32_t saveINIT_COUNT_REG; 649 + uint32_t saveMAX_RET_PAK_REG; 650 + uint32_t saveVIDEO_FMT_REG; 651 + uint32_t saveEOT_DISABLE_REG; 652 + uint32_t saveLP_BYTECLK_REG; 653 + uint32_t saveHS_LS_DBI_ENABLE_REG; 654 + uint32_t saveTXCLKESC_REG; 655 + uint32_t saveDPHY_PARAM_REG; 656 + uint32_t saveMIPI_CONTROL_REG; 657 + uint32_t saveMIPI; 658 + uint32_t saveMIPI_C; 659 + void (*init_drvIC)(struct drm_device *dev); 660 + void (*dsi_prePowerState)(struct drm_device *dev); 661 + void (*dsi_postPowerState)(struct drm_device *dev); 662 + 663 + /* DPST Register Save */ 664 + uint32_t saveHISTOGRAM_INT_CONTROL_REG; 665 + uint32_t saveHISTOGRAM_LOGIC_CONTROL_REG; 666 + uint32_t savePWM_CONTROL_LOGIC; 667 + 668 + /* MSI reg save */ 669 + 670 + uint32_t msi_addr; 671 + uint32_t msi_data; 672 + 673 + /* 674 + *Scheduling. 675 + */ 676 + 677 + struct mutex reset_mutex; 678 + struct mutex cmdbuf_mutex; 679 + /*uint32_t ta_mem_pages; 680 + struct psb_ta_mem *ta_mem; 681 + int force_ta_mem_load;*/ 682 + atomic_t val_seq; 683 + 684 + /* 685 + *TODO: change this to be per drm-context. 686 + */ 687 + 688 + struct psb_context context; 689 + 690 + /* 691 + * LID-Switch 692 + */ 693 + spinlock_t lid_lock; 694 + struct timer_list lid_timer; 695 + struct psb_intel_opregion opregion; 696 + u32 *lid_state; 697 + u32 lid_last_state; 698 + 699 + /* 700 + *Watchdog 701 + */ 702 + 703 + int timer_available; 704 + 705 + uint32_t apm_reg; 706 + uint16_t apm_base; 707 + 708 + /* 709 + * Used for modifying backlight from 710 + * xrandr -- consider removing and using HAL instead 711 + */ 712 + struct drm_property *backlight_property; 713 + uint32_t blc_adj1; 714 + uint32_t blc_adj2; 715 + 716 + void * fbdev; 717 + }; 718 + 719 + 720 + struct psb_file_data { /* TODO: Audit this, remove the indirection and set 721 + it up properly in open/postclose ACFIXME */ 722 + void *priv; 723 + }; 724 + 725 + struct psb_fpriv { 726 + struct ttm_object_file *tfile; 727 + }; 728 + 729 + struct psb_mmu_driver; 730 + 731 + extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int); 732 + extern int drm_pick_crtcs(struct drm_device *dev); 733 + 734 + static inline struct psb_fpriv *psb_fpriv(struct drm_file *file_priv) 735 + { 736 + struct psb_file_data *pvr_file_priv 737 + = (struct psb_file_data *)file_priv->driver_priv; 738 + return (struct psb_fpriv *) pvr_file_priv->priv; 739 + } 740 + 741 + static inline struct drm_psb_private *psb_priv(struct drm_device *dev) 742 + { 743 + return (struct drm_psb_private *) dev->dev_private; 744 + } 745 + 746 + /* 747 + *TTM glue. psb_ttm_glue.c 748 + */ 749 + 750 + extern int psb_open(struct inode *inode, struct file *filp); 751 + extern int psb_release(struct inode *inode, struct file *filp); 752 + extern int psb_mmap(struct file *filp, struct vm_area_struct *vma); 753 + 754 + extern int psb_fence_signaled_ioctl(struct drm_device *dev, void *data, 755 + struct drm_file *file_priv); 756 + extern int psb_verify_access(struct ttm_buffer_object *bo, 757 + struct file *filp); 758 + extern ssize_t psb_ttm_read(struct file *filp, char __user *buf, 759 + size_t count, loff_t *f_pos); 760 + extern ssize_t psb_ttm_write(struct file *filp, const char __user *buf, 761 + size_t count, loff_t *f_pos); 762 + extern int psb_fence_finish_ioctl(struct drm_device *dev, void *data, 763 + struct drm_file *file_priv); 764 + extern int psb_fence_unref_ioctl(struct drm_device *dev, void *data, 765 + struct drm_file *file_priv); 766 + extern int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data, 767 + struct drm_file *file_priv); 768 + extern int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data, 769 + struct drm_file *file_priv); 770 + extern int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data, 771 + struct drm_file *file_priv); 772 + extern int psb_pl_unref_ioctl(struct drm_device *dev, void *data, 773 + struct drm_file *file_priv); 774 + extern int psb_pl_reference_ioctl(struct drm_device *dev, void *data, 775 + struct drm_file *file_priv); 776 + extern int psb_pl_create_ioctl(struct drm_device *dev, void *data, 777 + struct drm_file *file_priv); 778 + extern int psb_pl_ub_create_ioctl(struct drm_device *dev, void *data, 779 + struct drm_file *file_priv); 780 + extern int psb_extension_ioctl(struct drm_device *dev, void *data, 781 + struct drm_file *file_priv); 782 + extern int psb_ttm_global_init(struct drm_psb_private *dev_priv); 783 + extern void psb_ttm_global_release(struct drm_psb_private *dev_priv); 784 + extern int psb_getpageaddrs_ioctl(struct drm_device *dev, void *data, 785 + struct drm_file *file_priv); 786 + /* 787 + *MMU stuff. 788 + */ 789 + 790 + extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers, 791 + int trap_pagefaults, 792 + int invalid_type, 793 + struct drm_psb_private *dev_priv); 794 + extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver); 795 + extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver 796 + *driver); 797 + extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset, 798 + uint32_t gtt_start, uint32_t gtt_pages); 799 + extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver, 800 + int trap_pagefaults, 801 + int invalid_type); 802 + extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd); 803 + extern void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot); 804 + extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, 805 + unsigned long address, 806 + uint32_t num_pages); 807 + extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, 808 + uint32_t start_pfn, 809 + unsigned long address, 810 + uint32_t num_pages, int type); 811 + extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual, 812 + unsigned long *pfn); 813 + 814 + /* 815 + *Enable / disable MMU for different requestors. 816 + */ 817 + 818 + 819 + extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context); 820 + extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, 821 + unsigned long address, uint32_t num_pages, 822 + uint32_t desired_tile_stride, 823 + uint32_t hw_tile_stride, int type); 824 + extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd, 825 + unsigned long address, uint32_t num_pages, 826 + uint32_t desired_tile_stride, 827 + uint32_t hw_tile_stride); 828 + /* 829 + *psb_sgx.c 830 + */ 831 + 832 + 833 + 834 + extern int psb_cmdbuf_ioctl(struct drm_device *dev, void *data, 835 + struct drm_file *file_priv); 836 + extern int psb_reg_submit(struct drm_psb_private *dev_priv, 837 + uint32_t *regs, unsigned int cmds); 838 + 839 + 840 + extern void psb_fence_or_sync(struct drm_file *file_priv, 841 + uint32_t engine, 842 + uint32_t fence_types, 843 + uint32_t fence_flags, 844 + struct list_head *list, 845 + struct psb_ttm_fence_rep *fence_arg, 846 + struct ttm_fence_object **fence_p); 847 + extern int psb_validate_kernel_buffer(struct psb_context *context, 848 + struct ttm_buffer_object *bo, 849 + uint32_t fence_class, 850 + uint64_t set_flags, 851 + uint64_t clr_flags); 852 + 853 + /* 854 + *psb_irq.c 855 + */ 856 + 857 + extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS); 858 + extern int psb_irq_enable_dpst(struct drm_device *dev); 859 + extern int psb_irq_disable_dpst(struct drm_device *dev); 860 + extern void psb_irq_preinstall(struct drm_device *dev); 861 + extern int psb_irq_postinstall(struct drm_device *dev); 862 + extern void psb_irq_uninstall(struct drm_device *dev); 863 + extern void psb_irq_preinstall_islands(struct drm_device *dev, int hw_islands); 864 + extern int psb_irq_postinstall_islands(struct drm_device *dev, int hw_islands); 865 + extern void psb_irq_turn_on_dpst(struct drm_device *dev); 866 + extern void psb_irq_turn_off_dpst(struct drm_device *dev); 867 + 868 + extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands); 869 + extern int psb_vblank_wait2(struct drm_device *dev,unsigned int *sequence); 870 + extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence); 871 + extern int psb_enable_vblank(struct drm_device *dev, int crtc); 872 + extern void psb_disable_vblank(struct drm_device *dev, int crtc); 873 + void 874 + psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask); 875 + 876 + void 877 + psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask); 878 + 879 + extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc); 880 + 881 + /* 882 + *psb_fence.c 883 + */ 884 + 885 + extern void psb_fence_handler(struct drm_device *dev, uint32_t class); 886 + 887 + extern int psb_fence_emit_sequence(struct ttm_fence_device *fdev, 888 + uint32_t fence_class, 889 + uint32_t flags, uint32_t *sequence, 890 + unsigned long *timeout_jiffies); 891 + extern void psb_fence_error(struct drm_device *dev, 892 + uint32_t class, 893 + uint32_t sequence, uint32_t type, int error); 894 + extern int psb_ttm_fence_device_init(struct ttm_fence_device *fdev); 895 + 896 + /* MSVDX/Topaz stuff */ 897 + extern int psb_remove_videoctx(struct drm_psb_private *dev_priv, struct file *filp); 898 + 899 + extern int lnc_video_frameskip(struct drm_device *dev, 900 + uint64_t user_pointer); 901 + extern int lnc_video_getparam(struct drm_device *dev, void *data, 902 + struct drm_file *file_priv); 903 + 904 + /* 905 + * psb_opregion.c 906 + */ 907 + extern int psb_intel_opregion_init(struct drm_device *dev); 908 + 909 + /* 910 + *psb_fb.c 911 + */ 912 + extern int psbfb_probed(struct drm_device *dev); 913 + extern int psbfb_remove(struct drm_device *dev, 914 + struct drm_framebuffer *fb); 915 + extern int psbfb_kms_off_ioctl(struct drm_device *dev, void *data, 916 + struct drm_file *file_priv); 917 + extern int psbfb_kms_on_ioctl(struct drm_device *dev, void *data, 918 + struct drm_file *file_priv); 919 + extern void *psbfb_vdc_reg(struct drm_device* dev); 920 + 921 + /* 922 + *psb_reset.c 923 + */ 924 + 925 + extern void psb_lid_timer_init(struct drm_psb_private *dev_priv); 926 + extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv); 927 + extern void psb_print_pagefault(struct drm_psb_private *dev_priv); 928 + 929 + /* modesetting */ 930 + extern void psb_modeset_init(struct drm_device *dev); 931 + extern void psb_modeset_cleanup(struct drm_device *dev); 932 + extern int psb_fbdev_init(struct drm_device * dev); 933 + 934 + /* psb_bl.c */ 935 + int psb_backlight_init(struct drm_device *dev); 936 + void psb_backlight_exit(void); 937 + int psb_set_brightness(struct backlight_device *bd); 938 + int psb_get_brightness(struct backlight_device *bd); 939 + struct backlight_device * psb_get_backlight_device(void); 940 + 941 + /* 942 + *Debug print bits setting 943 + */ 944 + #define PSB_D_GENERAL (1 << 0) 945 + #define PSB_D_INIT (1 << 1) 946 + #define PSB_D_IRQ (1 << 2) 947 + #define PSB_D_ENTRY (1 << 3) 948 + /* debug the get H/V BP/FP count */ 949 + #define PSB_D_HV (1 << 4) 950 + #define PSB_D_DBI_BF (1 << 5) 951 + #define PSB_D_PM (1 << 6) 952 + #define PSB_D_RENDER (1 << 7) 953 + #define PSB_D_REG (1 << 8) 954 + #define PSB_D_MSVDX (1 << 9) 955 + #define PSB_D_TOPAZ (1 << 10) 956 + 957 + #ifndef DRM_DEBUG_CODE 958 + /* To enable debug printout, set drm_psb_debug in psb_drv.c 959 + * to any combination of above print flags. 960 + */ 961 + /* #define DRM_DEBUG_CODE 2 */ 962 + #endif 963 + 964 + extern int drm_psb_debug; 965 + extern int drm_psb_no_fb; 966 + extern int drm_psb_disable_vsync; 967 + extern int drm_idle_check_interval; 968 + 969 + #define PSB_DEBUG_GENERAL(_fmt, _arg...) \ 970 + PSB_DEBUG(PSB_D_GENERAL, _fmt, ##_arg) 971 + #define PSB_DEBUG_INIT(_fmt, _arg...) \ 972 + PSB_DEBUG(PSB_D_INIT, _fmt, ##_arg) 973 + #define PSB_DEBUG_IRQ(_fmt, _arg...) \ 974 + PSB_DEBUG(PSB_D_IRQ, _fmt, ##_arg) 975 + #define PSB_DEBUG_ENTRY(_fmt, _arg...) \ 976 + PSB_DEBUG(PSB_D_ENTRY, _fmt, ##_arg) 977 + #define PSB_DEBUG_HV(_fmt, _arg...) \ 978 + PSB_DEBUG(PSB_D_HV, _fmt, ##_arg) 979 + #define PSB_DEBUG_DBI_BF(_fmt, _arg...) \ 980 + PSB_DEBUG(PSB_D_DBI_BF, _fmt, ##_arg) 981 + #define PSB_DEBUG_PM(_fmt, _arg...) \ 982 + PSB_DEBUG(PSB_D_PM, _fmt, ##_arg) 983 + #define PSB_DEBUG_RENDER(_fmt, _arg...) \ 984 + PSB_DEBUG(PSB_D_RENDER, _fmt, ##_arg) 985 + #define PSB_DEBUG_REG(_fmt, _arg...) \ 986 + PSB_DEBUG(PSB_D_REG, _fmt, ##_arg) 987 + #define PSB_DEBUG_MSVDX(_fmt, _arg...) \ 988 + PSB_DEBUG(PSB_D_MSVDX, _fmt, ##_arg) 989 + #define PSB_DEBUG_TOPAZ(_fmt, _arg...) \ 990 + PSB_DEBUG(PSB_D_TOPAZ, _fmt, ##_arg) 991 + 992 + #if DRM_DEBUG_CODE 993 + #define PSB_DEBUG(_flag, _fmt, _arg...) \ 994 + do { \ 995 + if (unlikely((_flag) & drm_psb_debug)) \ 996 + printk(KERN_DEBUG \ 997 + "[psb:0x%02x:%s] " _fmt , _flag, \ 998 + __func__ , ##_arg); \ 999 + } while (0) 1000 + #else 1001 + #define PSB_DEBUG(_fmt, _arg...) do { } while (0) 1002 + #endif 1003 + 1004 + /* 1005 + *Utilities 1006 + */ 1007 + #define DRM_DRIVER_PRIVATE_T struct drm_psb_private 1008 + 1009 + static inline u32 MRST_MSG_READ32(uint port, uint offset) 1010 + { 1011 + int mcr = (0xD0<<24) | (port << 16) | (offset << 8); 1012 + uint32_t ret_val = 0; 1013 + struct pci_dev *pci_root = pci_get_bus_and_slot (0, 0); 1014 + pci_write_config_dword (pci_root, 0xD0, mcr); 1015 + pci_read_config_dword (pci_root, 0xD4, &ret_val); 1016 + pci_dev_put(pci_root); 1017 + return ret_val; 1018 + } 1019 + static inline void MRST_MSG_WRITE32(uint port, uint offset, u32 value) 1020 + { 1021 + int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0; 1022 + struct pci_dev *pci_root = pci_get_bus_and_slot (0, 0); 1023 + pci_write_config_dword (pci_root, 0xD4, value); 1024 + pci_write_config_dword (pci_root, 0xD0, mcr); 1025 + pci_dev_put(pci_root); 1026 + } 1027 + static inline u32 MDFLD_MSG_READ32(uint port, uint offset) 1028 + { 1029 + int mcr = (0x10<<24) | (port << 16) | (offset << 8); 1030 + uint32_t ret_val = 0; 1031 + struct pci_dev *pci_root = pci_get_bus_and_slot (0, 0); 1032 + pci_write_config_dword (pci_root, 0xD0, mcr); 1033 + pci_read_config_dword (pci_root, 0xD4, &ret_val); 1034 + pci_dev_put(pci_root); 1035 + return ret_val; 1036 + } 1037 + static inline void MDFLD_MSG_WRITE32(uint port, uint offset, u32 value) 1038 + { 1039 + int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0; 1040 + struct pci_dev *pci_root = pci_get_bus_and_slot (0, 0); 1041 + pci_write_config_dword (pci_root, 0xD4, value); 1042 + pci_write_config_dword (pci_root, 0xD0, mcr); 1043 + pci_dev_put(pci_root); 1044 + } 1045 + 1046 + static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg) 1047 + { 1048 + struct drm_psb_private *dev_priv = dev->dev_private; 1049 + int reg_val = ioread32(dev_priv->vdc_reg + (reg)); 1050 + PSB_DEBUG_REG("reg = 0x%x. reg_val = 0x%x. \n", reg, reg_val); 1051 + return reg_val; 1052 + } 1053 + 1054 + #define REG_READ(reg) REGISTER_READ(dev, (reg)) 1055 + static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg, 1056 + uint32_t val) 1057 + { 1058 + struct drm_psb_private *dev_priv = dev->dev_private; 1059 + if ((reg < 0x70084 || reg >0x70088) && (reg < 0xa000 || reg >0xa3ff)) 1060 + PSB_DEBUG_REG("reg = 0x%x, val = 0x%x. \n", reg, val); 1061 + 1062 + iowrite32((val), dev_priv->vdc_reg + (reg)); 1063 + } 1064 + 1065 + #define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val)) 1066 + 1067 + static inline void REGISTER_WRITE16(struct drm_device *dev, 1068 + uint32_t reg, uint32_t val) 1069 + { 1070 + struct drm_psb_private *dev_priv = dev->dev_private; 1071 + 1072 + PSB_DEBUG_REG("reg = 0x%x, val = 0x%x. \n", reg, val); 1073 + 1074 + iowrite16((val), dev_priv->vdc_reg + (reg)); 1075 + } 1076 + 1077 + #define REG_WRITE16(reg, val) REGISTER_WRITE16(dev, (reg), (val)) 1078 + 1079 + static inline void REGISTER_WRITE8(struct drm_device *dev, 1080 + uint32_t reg, uint32_t val) 1081 + { 1082 + struct drm_psb_private *dev_priv = dev->dev_private; 1083 + 1084 + PSB_DEBUG_REG("reg = 0x%x, val = 0x%x. \n", reg, val); 1085 + 1086 + iowrite8((val), dev_priv->vdc_reg + (reg)); 1087 + } 1088 + 1089 + #define REG_WRITE8(reg, val) REGISTER_WRITE8(dev, (reg), (val)) 1090 + 1091 + #define PSB_ALIGN_TO(_val, _align) \ 1092 + (((_val) + ((_align) - 1)) & ~((_align) - 1)) 1093 + #define PSB_WVDC32(_val, _offs) \ 1094 + iowrite32(_val, dev_priv->vdc_reg + (_offs)) 1095 + #define PSB_RVDC32(_offs) \ 1096 + ioread32(dev_priv->vdc_reg + (_offs)) 1097 + 1098 + /* #define TRAP_SGX_PM_FAULT 1 */ 1099 + #ifdef TRAP_SGX_PM_FAULT 1100 + #define PSB_RSGX32(_offs) \ 1101 + ({ \ 1102 + if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) { \ 1103 + printk(KERN_ERR "access sgx when it's off!! (READ) %s, %d\n", \ 1104 + __FILE__, __LINE__); \ 1105 + mdelay(1000); \ 1106 + } \ 1107 + ioread32(dev_priv->sgx_reg + (_offs)); \ 1108 + }) 1109 + #else 1110 + #define PSB_RSGX32(_offs) \ 1111 + ioread32(dev_priv->sgx_reg + (_offs)) 1112 + #endif 1113 + #define PSB_WSGX32(_val, _offs) \ 1114 + iowrite32(_val, dev_priv->sgx_reg + (_offs)) 1115 + 1116 + #define MSVDX_REG_DUMP 0 1117 + #if MSVDX_REG_DUMP 1118 + 1119 + #define PSB_WMSVDX32(_val, _offs) \ 1120 + printk("MSVDX: write %08x to reg 0x%08x\n", (unsigned int)(_val), (unsigned int)(_offs));\ 1121 + iowrite32(_val, dev_priv->msvdx_reg + (_offs)) 1122 + #define PSB_RMSVDX32(_offs) \ 1123 + ioread32(dev_priv->msvdx_reg + (_offs)) 1124 + 1125 + #else 1126 + 1127 + #define PSB_WMSVDX32(_val, _offs) \ 1128 + iowrite32(_val, dev_priv->msvdx_reg + (_offs)) 1129 + #define PSB_RMSVDX32(_offs) \ 1130 + ioread32(dev_priv->msvdx_reg + (_offs)) 1131 + 1132 + #endif 1133 + 1134 + #define PSB_ALPL(_val, _base) \ 1135 + (((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) 1136 + #define PSB_ALPLM(_val, _base) \ 1137 + ((((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) & (_base ## _MASK)) 1138 + 1139 + #endif
+840
drivers/staging/gma500/psb_fb.c
··· 1 + /************************************************************************** 2 + * Copyright (c) 2007, Intel Corporation. 3 + * All Rights Reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms and conditions of the GNU General Public License, 7 + * version 2, as published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope it will be useful, but WITHOUT 10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 + * more details. 13 + * 14 + * You should have received a copy of the GNU General Public License along with 15 + * this program; if not, write to the Free Software Foundation, Inc., 16 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 + * 18 + **************************************************************************/ 19 + 20 + #include <linux/module.h> 21 + #include <linux/kernel.h> 22 + #include <linux/errno.h> 23 + #include <linux/string.h> 24 + #include <linux/mm.h> 25 + #include <linux/tty.h> 26 + #include <linux/slab.h> 27 + #include <linux/delay.h> 28 + #include <linux/fb.h> 29 + #include <linux/init.h> 30 + #include <linux/console.h> 31 + 32 + #include <drm/drmP.h> 33 + #include <drm/drm.h> 34 + #include <drm/drm_crtc.h> 35 + 36 + #include "psb_drv.h" 37 + #include "psb_intel_reg.h" 38 + #include "psb_intel_drv.h" 39 + #include "psb_ttm_userobj_api.h" 40 + #include "psb_fb.h" 41 + #include "psb_sgx.h" 42 + #include "psb_pvr_glue.h" 43 + 44 + static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb); 45 + static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb, 46 + struct drm_file *file_priv, 47 + unsigned int *handle); 48 + 49 + static const struct drm_framebuffer_funcs psb_fb_funcs = { 50 + .destroy = psb_user_framebuffer_destroy, 51 + .create_handle = psb_user_framebuffer_create_handle, 52 + }; 53 + 54 + #define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16) 55 + 56 + void *psbfb_vdc_reg(struct drm_device *dev) 57 + { 58 + struct drm_psb_private *dev_priv; 59 + dev_priv = (struct drm_psb_private *) dev->dev_private; 60 + return dev_priv->vdc_reg; 61 + } 62 + /*EXPORT_SYMBOL(psbfb_vdc_reg); */ 63 + 64 + static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green, 65 + unsigned blue, unsigned transp, 66 + struct fb_info *info) 67 + { 68 + struct psb_fbdev *fbdev = info->par; 69 + struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb; 70 + uint32_t v; 71 + 72 + if (!fb) 73 + return -ENOMEM; 74 + 75 + if (regno > 255) 76 + return 1; 77 + 78 + red = CMAP_TOHW(red, info->var.red.length); 79 + blue = CMAP_TOHW(blue, info->var.blue.length); 80 + green = CMAP_TOHW(green, info->var.green.length); 81 + transp = CMAP_TOHW(transp, info->var.transp.length); 82 + 83 + v = (red << info->var.red.offset) | 84 + (green << info->var.green.offset) | 85 + (blue << info->var.blue.offset) | 86 + (transp << info->var.transp.offset); 87 + 88 + if (regno < 16) { 89 + switch (fb->bits_per_pixel) { 90 + case 16: 91 + ((uint32_t *) info->pseudo_palette)[regno] = v; 92 + break; 93 + case 24: 94 + case 32: 95 + ((uint32_t *) info->pseudo_palette)[regno] = v; 96 + break; 97 + } 98 + } 99 + 100 + return 0; 101 + } 102 + 103 + static int psbfb_kms_off(struct drm_device *dev, int suspend) 104 + { 105 + struct drm_framebuffer *fb = 0; 106 + struct psb_framebuffer *psbfb = to_psb_fb(fb); 107 + DRM_DEBUG("psbfb_kms_off_ioctl\n"); 108 + 109 + mutex_lock(&dev->mode_config.mutex); 110 + list_for_each_entry(fb, &dev->mode_config.fb_list, head) { 111 + struct fb_info *info = psbfb->fbdev; 112 + 113 + if (suspend) { 114 + fb_set_suspend(info, 1); 115 + drm_fb_helper_blank(FB_BLANK_POWERDOWN, info); 116 + } 117 + } 118 + mutex_unlock(&dev->mode_config.mutex); 119 + return 0; 120 + } 121 + 122 + int psbfb_kms_off_ioctl(struct drm_device *dev, void *data, 123 + struct drm_file *file_priv) 124 + { 125 + int ret; 126 + 127 + if (drm_psb_no_fb) 128 + return 0; 129 + console_lock(); 130 + ret = psbfb_kms_off(dev, 0); 131 + console_unlock(); 132 + 133 + return ret; 134 + } 135 + 136 + static int psbfb_kms_on(struct drm_device *dev, int resume) 137 + { 138 + struct drm_framebuffer *fb = 0; 139 + struct psb_framebuffer *psbfb = to_psb_fb(fb); 140 + 141 + DRM_DEBUG("psbfb_kms_on_ioctl\n"); 142 + 143 + mutex_lock(&dev->mode_config.mutex); 144 + list_for_each_entry(fb, &dev->mode_config.fb_list, head) { 145 + struct fb_info *info = psbfb->fbdev; 146 + 147 + if (resume) { 148 + fb_set_suspend(info, 0); 149 + drm_fb_helper_blank(FB_BLANK_UNBLANK, info); 150 + } 151 + } 152 + mutex_unlock(&dev->mode_config.mutex); 153 + 154 + return 0; 155 + } 156 + 157 + int psbfb_kms_on_ioctl(struct drm_device *dev, void *data, 158 + struct drm_file *file_priv) 159 + { 160 + int ret; 161 + 162 + if (drm_psb_no_fb) 163 + return 0; 164 + console_lock(); 165 + ret = psbfb_kms_on(dev, 0); 166 + console_unlock(); 167 + drm_helper_disable_unused_functions(dev); 168 + return ret; 169 + } 170 + 171 + void psbfb_suspend(struct drm_device *dev) 172 + { 173 + console_lock(); 174 + psbfb_kms_off(dev, 1); 175 + console_unlock(); 176 + } 177 + 178 + void psbfb_resume(struct drm_device *dev) 179 + { 180 + console_lock(); 181 + psbfb_kms_on(dev, 1); 182 + console_unlock(); 183 + drm_helper_disable_unused_functions(dev); 184 + } 185 + 186 + static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 187 + { 188 + int page_num = 0; 189 + int i; 190 + unsigned long address = 0; 191 + int ret; 192 + unsigned long pfn; 193 + struct psb_framebuffer *psbfb = vma->vm_private_data; 194 + struct drm_device *dev = psbfb->base.dev; 195 + struct drm_psb_private *dev_priv = dev->dev_private; 196 + struct psb_gtt *pg = dev_priv->pg; 197 + unsigned long phys_addr = (unsigned long)pg->stolen_base;; 198 + 199 + page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 200 + 201 + address = (unsigned long)vmf->virtual_address; 202 + 203 + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 204 + 205 + for (i = 0; i < page_num; i++) { 206 + pfn = (phys_addr >> PAGE_SHIFT); /* phys_to_pfn(phys_addr); */ 207 + 208 + ret = vm_insert_mixed(vma, address, pfn); 209 + if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0))) 210 + break; 211 + else if (unlikely(ret != 0)) { 212 + ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; 213 + return ret; 214 + } 215 + 216 + address += PAGE_SIZE; 217 + phys_addr += PAGE_SIZE; 218 + } 219 + 220 + return VM_FAULT_NOPAGE; 221 + } 222 + 223 + static void psbfb_vm_open(struct vm_area_struct *vma) 224 + { 225 + DRM_DEBUG("vm_open\n"); 226 + } 227 + 228 + static void psbfb_vm_close(struct vm_area_struct *vma) 229 + { 230 + DRM_DEBUG("vm_close\n"); 231 + } 232 + 233 + static struct vm_operations_struct psbfb_vm_ops = { 234 + .fault = psbfb_vm_fault, 235 + .open = psbfb_vm_open, 236 + .close = psbfb_vm_close 237 + }; 238 + 239 + static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma) 240 + { 241 + struct psb_fbdev *fbdev = info->par; 242 + struct psb_framebuffer *psbfb = fbdev->pfb; 243 + char *fb_screen_base = NULL; 244 + struct drm_device *dev = psbfb->base.dev; 245 + struct drm_psb_private *dev_priv = dev->dev_private; 246 + struct psb_gtt *pg = dev_priv->pg; 247 + 248 + if (vma->vm_pgoff != 0) 249 + return -EINVAL; 250 + if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) 251 + return -EINVAL; 252 + 253 + if (!psbfb->addr_space) 254 + psbfb->addr_space = vma->vm_file->f_mapping; 255 + 256 + fb_screen_base = (char *)info->screen_base; 257 + 258 + DRM_DEBUG("vm_pgoff 0x%lx, screen base %p vram_addr %p\n", 259 + vma->vm_pgoff, fb_screen_base, pg->vram_addr); 260 + 261 + /*if using stolen memory, */ 262 + if (fb_screen_base == pg->vram_addr) { 263 + vma->vm_ops = &psbfb_vm_ops; 264 + vma->vm_private_data = (void *)psbfb; 265 + vma->vm_flags |= VM_RESERVED | VM_IO | 266 + VM_MIXEDMAP | VM_DONTEXPAND; 267 + } else { 268 + /*using IMG meminfo, can I use pvrmmap to map it?*/ 269 + 270 + } 271 + 272 + return 0; 273 + } 274 + 275 + 276 + static struct fb_ops psbfb_ops = { 277 + .owner = THIS_MODULE, 278 + .fb_check_var = drm_fb_helper_check_var, 279 + .fb_set_par = drm_fb_helper_set_par, 280 + .fb_blank = drm_fb_helper_blank, 281 + .fb_setcolreg = psbfb_setcolreg, 282 + .fb_fillrect = cfb_fillrect, 283 + .fb_copyarea = cfb_copyarea, 284 + .fb_imageblit = cfb_imageblit, 285 + .fb_mmap = psbfb_mmap, 286 + }; 287 + 288 + static struct drm_framebuffer *psb_framebuffer_create 289 + (struct drm_device *dev, struct drm_mode_fb_cmd *r, 290 + void *mm_private) 291 + { 292 + struct psb_framebuffer *fb; 293 + int ret; 294 + 295 + fb = kzalloc(sizeof(*fb), GFP_KERNEL); 296 + if (!fb) 297 + return NULL; 298 + 299 + ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs); 300 + 301 + if (ret) 302 + goto err; 303 + 304 + drm_helper_mode_fill_fb_struct(&fb->base, r); 305 + 306 + fb->bo = mm_private; 307 + 308 + return &fb->base; 309 + 310 + err: 311 + kfree(fb); 312 + return NULL; 313 + } 314 + 315 + static struct drm_framebuffer *psb_user_framebuffer_create 316 + (struct drm_device *dev, struct drm_file *filp, 317 + struct drm_mode_fb_cmd *r) 318 + { 319 + struct ttm_buffer_object *bo = NULL; 320 + uint64_t size; 321 + 322 + bo = ttm_buffer_object_lookup(psb_fpriv(filp)->tfile, r->handle); 323 + if (!bo) 324 + return NULL; 325 + 326 + /* JB: TODO not drop, make smarter */ 327 + size = ((uint64_t) bo->num_pages) << PAGE_SHIFT; 328 + if (size < r->width * r->height * 4) 329 + return NULL; 330 + 331 + /* JB: TODO not drop, refcount buffer */ 332 + return psb_framebuffer_create(dev, r, bo); 333 + 334 + #if 0 335 + struct psb_framebuffer *psbfb; 336 + struct drm_framebuffer *fb; 337 + struct fb_info *info; 338 + void *psKernelMemInfo = NULL; 339 + void * hKernelMemInfo = (void *)r->handle; 340 + struct drm_psb_private *dev_priv 341 + = (struct drm_psb_private *)dev->dev_private; 342 + struct psb_fbdev *fbdev = dev_priv->fbdev; 343 + struct psb_gtt *pg = dev_priv->pg; 344 + int ret; 345 + uint32_t offset; 346 + uint64_t size; 347 + 348 + ret = psb_get_meminfo_by_handle(hKernelMemInfo, &psKernelMemInfo); 349 + if (ret) { 350 + DRM_ERROR("Cannot get meminfo for handle 0x%x\n", 351 + (u32)hKernelMemInfo); 352 + return NULL; 353 + } 354 + 355 + DRM_DEBUG("Got Kernel MemInfo for handle %lx\n", 356 + (u32)hKernelMemInfo); 357 + 358 + /* JB: TODO not drop, make smarter */ 359 + size = psKernelMemInfo->ui32AllocSize; 360 + if (size < r->height * r->pitch) 361 + return NULL; 362 + 363 + /* JB: TODO not drop, refcount buffer */ 364 + /* return psb_framebuffer_create(dev, r, bo); */ 365 + 366 + fb = psb_framebuffer_create(dev, r, (void *)psKernelMemInfo); 367 + if (!fb) { 368 + DRM_ERROR("failed to allocate fb.\n"); 369 + return NULL; 370 + } 371 + 372 + psbfb = to_psb_fb(fb); 373 + psbfb->size = size; 374 + psbfb->hKernelMemInfo = hKernelMemInfo; 375 + 376 + DRM_DEBUG("Mapping to gtt..., KernelMemInfo %p\n", psKernelMemInfo); 377 + 378 + /*if not VRAM, map it into tt aperture*/ 379 + if (psKernelMemInfo->pvLinAddrKM != pg->vram_addr) { 380 + ret = psb_gtt_map_meminfo(dev, hKernelMemInfo, &offset); 381 + if (ret) { 382 + DRM_ERROR("map meminfo for 0x%x failed\n", 383 + (u32)hKernelMemInfo); 384 + return NULL; 385 + } 386 + psbfb->offset = (offset << PAGE_SHIFT); 387 + } else { 388 + psbfb->offset = 0; 389 + } 390 + info = framebuffer_alloc(0, &dev->pdev->dev); 391 + if (!info) 392 + return NULL; 393 + 394 + strcpy(info->fix.id, "psbfb"); 395 + 396 + info->flags = FBINFO_DEFAULT; 397 + info->fbops = &psbfb_ops; 398 + 399 + info->fix.smem_start = dev->mode_config.fb_base; 400 + info->fix.smem_len = size; 401 + 402 + info->screen_base = psKernelMemInfo->pvLinAddrKM; 403 + info->screen_size = size; 404 + 405 + drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); 406 + drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper, 407 + fb->width, fb->height); 408 + 409 + info->fix.mmio_start = pci_resource_start(dev->pdev, 0); 410 + info->fix.mmio_len = pci_resource_len(dev->pdev, 0); 411 + 412 + info->pixmap.size = 64 * 1024; 413 + info->pixmap.buf_align = 8; 414 + info->pixmap.access_align = 32; 415 + info->pixmap.flags = FB_PIXMAP_SYSTEM; 416 + info->pixmap.scan_align = 1; 417 + 418 + psbfb->fbdev = info; 419 + fbdev->pfb = psbfb; 420 + 421 + fbdev->psb_fb_helper.fb = fb; 422 + fbdev->psb_fb_helper.fbdev = info; 423 + MRSTLFBHandleChangeFB(dev, psbfb); 424 + 425 + return fb; 426 + #endif 427 + } 428 + 429 + static int psbfb_create(struct psb_fbdev *fbdev, 430 + struct drm_fb_helper_surface_size *sizes) 431 + { 432 + struct drm_device *dev = fbdev->psb_fb_helper.dev; 433 + struct drm_psb_private *dev_priv = dev->dev_private; 434 + struct psb_gtt *pg = dev_priv->pg; 435 + struct fb_info *info; 436 + struct drm_framebuffer *fb; 437 + struct psb_framebuffer *psbfb; 438 + struct drm_mode_fb_cmd mode_cmd; 439 + struct device *device = &dev->pdev->dev; 440 + 441 + struct ttm_buffer_object *fbo = NULL; 442 + int size, aligned_size; 443 + int ret; 444 + 445 + mode_cmd.width = sizes->surface_width; 446 + mode_cmd.height = sizes->surface_height; 447 + 448 + mode_cmd.bpp = 32; 449 + /* HW requires pitch to be 64 byte aligned */ 450 + mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64); 451 + mode_cmd.depth = 24; 452 + 453 + size = mode_cmd.pitch * mode_cmd.height; 454 + aligned_size = ALIGN(size, PAGE_SIZE); 455 + 456 + mutex_lock(&dev->struct_mutex); 457 + fb = psb_framebuffer_create(dev, &mode_cmd, fbo); 458 + if (!fb) { 459 + DRM_ERROR("failed to allocate fb.\n"); 460 + ret = -ENOMEM; 461 + goto out_err0; 462 + } 463 + psbfb = to_psb_fb(fb); 464 + psbfb->size = size; 465 + 466 + info = framebuffer_alloc(sizeof(struct psb_fbdev), device); 467 + if (!info) { 468 + ret = -ENOMEM; 469 + goto out_err1; 470 + } 471 + 472 + info->par = fbdev; 473 + 474 + psbfb->fbdev = info; 475 + 476 + fbdev->psb_fb_helper.fb = fb; 477 + fbdev->psb_fb_helper.fbdev = info; 478 + fbdev->pfb = psbfb; 479 + 480 + strcpy(info->fix.id, "psbfb"); 481 + 482 + info->flags = FBINFO_DEFAULT; 483 + info->fbops = &psbfb_ops; 484 + info->fix.smem_start = dev->mode_config.fb_base; 485 + info->fix.smem_len = size; 486 + info->screen_base = (char *)pg->vram_addr; 487 + info->screen_size = size; 488 + memset(info->screen_base, 0, size); 489 + 490 + drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); 491 + drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper, 492 + sizes->fb_width, sizes->fb_height); 493 + 494 + info->fix.mmio_start = pci_resource_start(dev->pdev, 0); 495 + info->fix.mmio_len = pci_resource_len(dev->pdev, 0); 496 + 497 + info->pixmap.size = 64 * 1024; 498 + info->pixmap.buf_align = 8; 499 + info->pixmap.access_align = 32; 500 + info->pixmap.flags = FB_PIXMAP_SYSTEM; 501 + info->pixmap.scan_align = 1; 502 + 503 + DRM_DEBUG("fb depth is %d\n", fb->depth); 504 + DRM_DEBUG(" pitch is %d\n", fb->pitch); 505 + 506 + printk(KERN_INFO"allocated %dx%d fb\n", 507 + psbfb->base.width, psbfb->base.height); 508 + 509 + mutex_unlock(&dev->struct_mutex); 510 + 511 + return 0; 512 + out_err0: 513 + fb->funcs->destroy(fb); 514 + out_err1: 515 + mutex_unlock(&dev->struct_mutex); 516 + return ret; 517 + } 518 + 519 + static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 520 + u16 blue, int regno) 521 + { 522 + DRM_DEBUG("%s\n", __func__); 523 + } 524 + 525 + static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red, 526 + u16 *green, u16 *blue, int regno) 527 + { 528 + DRM_DEBUG("%s\n", __func__); 529 + } 530 + 531 + static int psbfb_probe(struct drm_fb_helper *helper, 532 + struct drm_fb_helper_surface_size *sizes) 533 + { 534 + struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper; 535 + int new_fb = 0; 536 + int ret; 537 + 538 + DRM_DEBUG("%s\n", __func__); 539 + 540 + if (!helper->fb) { 541 + ret = psbfb_create(psb_fbdev, sizes); 542 + if (ret) 543 + return ret; 544 + new_fb = 1; 545 + } 546 + return new_fb; 547 + } 548 + 549 + struct drm_fb_helper_funcs psb_fb_helper_funcs = { 550 + .gamma_set = psbfb_gamma_set, 551 + .gamma_get = psbfb_gamma_get, 552 + .fb_probe = psbfb_probe, 553 + }; 554 + 555 + int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev) 556 + { 557 + struct fb_info *info; 558 + struct psb_framebuffer *psbfb = fbdev->pfb; 559 + 560 + if (fbdev->psb_fb_helper.fbdev) { 561 + info = fbdev->psb_fb_helper.fbdev; 562 + unregister_framebuffer(info); 563 + iounmap(info->screen_base); 564 + framebuffer_release(info); 565 + } 566 + 567 + drm_fb_helper_fini(&fbdev->psb_fb_helper); 568 + 569 + drm_framebuffer_cleanup(&psbfb->base); 570 + 571 + return 0; 572 + } 573 + 574 + int psb_fbdev_init(struct drm_device *dev) 575 + { 576 + struct psb_fbdev *fbdev; 577 + struct drm_psb_private *dev_priv = dev->dev_private; 578 + int num_crtc; 579 + 580 + fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL); 581 + if (!fbdev) { 582 + DRM_ERROR("no memory\n"); 583 + return -ENOMEM; 584 + } 585 + 586 + dev_priv->fbdev = fbdev; 587 + fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs; 588 + 589 + num_crtc = 2; 590 + 591 + drm_fb_helper_init(dev, &fbdev->psb_fb_helper, num_crtc, 592 + INTELFB_CONN_LIMIT); 593 + 594 + drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper); 595 + drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32); 596 + return 0; 597 + } 598 + 599 + void psb_fbdev_fini(struct drm_device *dev) 600 + { 601 + struct drm_psb_private *dev_priv = dev->dev_private; 602 + 603 + if (!dev_priv->fbdev) 604 + return; 605 + 606 + psb_fbdev_destroy(dev, dev_priv->fbdev); 607 + kfree(dev_priv->fbdev); 608 + dev_priv->fbdev = NULL; 609 + } 610 + 611 + 612 + static void psbfb_output_poll_changed(struct drm_device *dev) 613 + { 614 + struct drm_psb_private *dev_priv = dev->dev_private; 615 + struct psb_fbdev *fbdev = (struct psb_fbdev *)dev_priv->fbdev; 616 + drm_fb_helper_hotplug_event(&fbdev->psb_fb_helper); 617 + } 618 + 619 + int psbfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) 620 + { 621 + struct fb_info *info; 622 + struct psb_framebuffer *psbfb = to_psb_fb(fb); 623 + 624 + if (drm_psb_no_fb) 625 + return 0; 626 + 627 + info = psbfb->fbdev; 628 + psbfb->pvrBO = NULL; 629 + 630 + if (info) 631 + framebuffer_release(info); 632 + return 0; 633 + } 634 + /*EXPORT_SYMBOL(psbfb_remove); */ 635 + 636 + static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb, 637 + struct drm_file *file_priv, 638 + unsigned int *handle) 639 + { 640 + /* JB: TODO currently we can't go from a bo to a handle with ttm */ 641 + (void) file_priv; 642 + *handle = 0; 643 + return 0; 644 + } 645 + 646 + static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb) 647 + { 648 + struct drm_device *dev = fb->dev; 649 + struct psb_framebuffer *psbfb = to_psb_fb(fb); 650 + 651 + /*ummap gtt pages*/ 652 + psb_gtt_unmap_meminfo(dev, psbfb->hKernelMemInfo); 653 + if (psbfb->fbdev) 654 + psbfb_remove(dev, fb); 655 + 656 + /* JB: TODO not drop, refcount buffer */ 657 + drm_framebuffer_cleanup(fb); 658 + kfree(fb); 659 + } 660 + 661 + static const struct drm_mode_config_funcs psb_mode_funcs = { 662 + .fb_create = psb_user_framebuffer_create, 663 + .output_poll_changed = psbfb_output_poll_changed, 664 + }; 665 + 666 + static int psb_create_backlight_property(struct drm_device *dev) 667 + { 668 + struct drm_psb_private *dev_priv 669 + = (struct drm_psb_private *) dev->dev_private; 670 + struct drm_property *backlight; 671 + 672 + if (dev_priv->backlight_property) 673 + return 0; 674 + 675 + backlight = drm_property_create(dev, 676 + DRM_MODE_PROP_RANGE, 677 + "backlight", 678 + 2); 679 + backlight->values[0] = 0; 680 + backlight->values[1] = 100; 681 + 682 + dev_priv->backlight_property = backlight; 683 + 684 + return 0; 685 + } 686 + 687 + static void psb_setup_outputs(struct drm_device *dev) 688 + { 689 + struct drm_psb_private *dev_priv = 690 + (struct drm_psb_private *) dev->dev_private; 691 + struct drm_connector *connector; 692 + 693 + PSB_DEBUG_ENTRY("\n"); 694 + 695 + drm_mode_create_scaling_mode_property(dev); 696 + 697 + psb_create_backlight_property(dev); 698 + 699 + psb_intel_lvds_init(dev, &dev_priv->mode_dev); 700 + /* psb_intel_sdvo_init(dev, SDVOB); */ 701 + 702 + list_for_each_entry(connector, &dev->mode_config.connector_list, 703 + head) { 704 + struct psb_intel_output *psb_intel_output = 705 + to_psb_intel_output(connector); 706 + struct drm_encoder *encoder = &psb_intel_output->enc; 707 + int crtc_mask = 0, clone_mask = 0; 708 + 709 + /* valid crtcs */ 710 + switch (psb_intel_output->type) { 711 + case INTEL_OUTPUT_SDVO: 712 + crtc_mask = ((1 << 0) | (1 << 1)); 713 + clone_mask = (1 << INTEL_OUTPUT_SDVO); 714 + break; 715 + case INTEL_OUTPUT_LVDS: 716 + PSB_DEBUG_ENTRY("LVDS.\n"); 717 + crtc_mask = (1 << 1); 718 + clone_mask = (1 << INTEL_OUTPUT_LVDS); 719 + break; 720 + case INTEL_OUTPUT_MIPI: 721 + PSB_DEBUG_ENTRY("MIPI.\n"); 722 + crtc_mask = (1 << 0); 723 + clone_mask = (1 << INTEL_OUTPUT_MIPI); 724 + break; 725 + case INTEL_OUTPUT_MIPI2: 726 + PSB_DEBUG_ENTRY("MIPI2.\n"); 727 + crtc_mask = (1 << 2); 728 + clone_mask = (1 << INTEL_OUTPUT_MIPI2); 729 + break; 730 + case INTEL_OUTPUT_HDMI: 731 + PSB_DEBUG_ENTRY("HDMI.\n"); 732 + crtc_mask = (1 << 1); 733 + clone_mask = (1 << INTEL_OUTPUT_HDMI); 734 + break; 735 + } 736 + 737 + encoder->possible_crtcs = crtc_mask; 738 + encoder->possible_clones = 739 + psb_intel_connector_clones(dev, clone_mask); 740 + 741 + } 742 + } 743 + 744 + static void *psb_bo_from_handle(struct drm_device *dev, 745 + struct drm_file *file_priv, 746 + unsigned int handle) 747 + { 748 + void *psKernelMemInfo = NULL; 749 + void * hKernelMemInfo = (void *)handle; 750 + int ret; 751 + 752 + ret = psb_get_meminfo_by_handle(hKernelMemInfo, &psKernelMemInfo); 753 + if (ret) { 754 + DRM_ERROR("Cannot get meminfo for handle 0x%x\n", 755 + (u32)hKernelMemInfo); 756 + return NULL; 757 + } 758 + 759 + return (void *)psKernelMemInfo; 760 + } 761 + 762 + static size_t psb_bo_size(struct drm_device *dev, void *bof) 763 + { 764 + #if 0 765 + void *psKernelMemInfo = (void *)bof; 766 + return (size_t)psKernelMemInfo->ui32AllocSize; 767 + #else 768 + return 0; 769 + #endif 770 + } 771 + 772 + static size_t psb_bo_offset(struct drm_device *dev, void *bof) 773 + { 774 + struct psb_framebuffer *psbfb 775 + = (struct psb_framebuffer *)bof; 776 + 777 + return (size_t)psbfb->offset; 778 + } 779 + 780 + static int psb_bo_pin_for_scanout(struct drm_device *dev, void *bo) 781 + { 782 + return 0; 783 + } 784 + 785 + static int psb_bo_unpin_for_scanout(struct drm_device *dev, void *bo) 786 + { 787 + return 0; 788 + } 789 + 790 + void psb_modeset_init(struct drm_device *dev) 791 + { 792 + struct drm_psb_private *dev_priv = 793 + (struct drm_psb_private *) dev->dev_private; 794 + struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; 795 + int i; 796 + 797 + PSB_DEBUG_ENTRY("\n"); 798 + /* Init mm functions */ 799 + mode_dev->bo_from_handle = psb_bo_from_handle; 800 + mode_dev->bo_size = psb_bo_size; 801 + mode_dev->bo_offset = psb_bo_offset; 802 + mode_dev->bo_pin_for_scanout = psb_bo_pin_for_scanout; 803 + mode_dev->bo_unpin_for_scanout = psb_bo_unpin_for_scanout; 804 + 805 + drm_mode_config_init(dev); 806 + 807 + dev->mode_config.min_width = 0; 808 + dev->mode_config.min_height = 0; 809 + 810 + dev->mode_config.funcs = (void *) &psb_mode_funcs; 811 + 812 + /* set memory base */ 813 + /* MRST and PSB should use BAR 2*/ 814 + pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *) 815 + &(dev->mode_config.fb_base)); 816 + 817 + /* num pipes is 2 for PSB but 1 for Mrst */ 818 + for (i = 0; i < dev_priv->num_pipe; i++) 819 + psb_intel_crtc_init(dev, i, mode_dev); 820 + 821 + dev->mode_config.max_width = 2048; 822 + dev->mode_config.max_height = 2048; 823 + 824 + psb_setup_outputs(dev); 825 + 826 + /* setup fbs */ 827 + /* drm_initial_config(dev); */ 828 + } 829 + 830 + void psb_modeset_cleanup(struct drm_device *dev) 831 + { 832 + mutex_lock(&dev->struct_mutex); 833 + 834 + drm_kms_helper_poll_fini(dev); 835 + psb_fbdev_fini(dev); 836 + 837 + drm_mode_config_cleanup(dev); 838 + 839 + mutex_unlock(&dev->struct_mutex); 840 + }
+59
drivers/staging/gma500/psb_fb.h
··· 1 + /* 2 + * Copyright (c) 2008, Intel Corporation 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms and conditions of the GNU General Public License, 6 + * version 2, as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope it will be useful, but WITHOUT 9 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 + * more details. 12 + * 13 + * You should have received a copy of the GNU General Public License along with 14 + * this program; if not, write to the Free Software Foundation, Inc., 15 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 + * 17 + * Authors: 18 + * Eric Anholt <eric@anholt.net> 19 + * 20 + */ 21 + 22 + #ifndef _PSB_FB_H_ 23 + #define _PSB_FB_H_ 24 + 25 + #include <linux/version.h> 26 + #include <drm/drmP.h> 27 + #include <drm/drm_fb_helper.h> 28 + 29 + #include "psb_drv.h" 30 + 31 + /*IMG Headers*/ 32 + /*#include "servicesint.h"*/ 33 + 34 + struct psb_framebuffer { 35 + struct drm_framebuffer base; 36 + struct address_space *addr_space; 37 + struct ttm_buffer_object *bo; 38 + struct fb_info * fbdev; 39 + /* struct ttm_bo_kmap_obj kmap; */ 40 + void *pvrBO; /* FIXME: sort this out */ 41 + void * hKernelMemInfo; 42 + uint32_t size; 43 + uint32_t offset; 44 + }; 45 + 46 + struct psb_fbdev { 47 + struct drm_fb_helper psb_fb_helper; 48 + struct psb_framebuffer * pfb; 49 + }; 50 + 51 + 52 + #define to_psb_fb(x) container_of(x, struct psb_framebuffer, base) 53 + 54 + 55 + extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask); 56 + 57 + 58 + #endif 59 +
+122
drivers/staging/gma500/psb_fence.c
··· 1 + /* 2 + * Copyright (c) 2007, Intel Corporation. 3 + * All Rights Reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms and conditions of the GNU General Public License, 7 + * version 2, as published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope it will be useful, but WITHOUT 10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 + * more details. 13 + * 14 + * You should have received a copy of the GNU General Public License along with 15 + * this program; if not, write to the Free Software Foundation, Inc., 16 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 + * 18 + * 19 + * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 20 + */ 21 + 22 + #include <drm/drmP.h> 23 + #include "psb_drv.h" 24 + 25 + 26 + static void psb_fence_poll(struct ttm_fence_device *fdev, 27 + uint32_t fence_class, uint32_t waiting_types) 28 + { 29 + struct drm_psb_private *dev_priv = 30 + container_of(fdev, struct drm_psb_private, fdev); 31 + 32 + 33 + if (unlikely(!dev_priv)) 34 + return; 35 + 36 + if (waiting_types == 0) 37 + return; 38 + 39 + /* DRM_ERROR("Polling fence sequence, got 0x%08x\n", sequence); */ 40 + ttm_fence_handler(fdev, fence_class, 0 /* Sequence */, 41 + _PSB_FENCE_TYPE_EXE, 0); 42 + } 43 + 44 + void psb_fence_error(struct drm_device *dev, 45 + uint32_t fence_class, 46 + uint32_t sequence, uint32_t type, int error) 47 + { 48 + struct drm_psb_private *dev_priv = psb_priv(dev); 49 + struct ttm_fence_device *fdev = &dev_priv->fdev; 50 + unsigned long irq_flags; 51 + struct ttm_fence_class_manager *fc = 52 + &fdev->fence_class[fence_class]; 53 + 54 + BUG_ON(fence_class >= PSB_NUM_ENGINES); 55 + write_lock_irqsave(&fc->lock, irq_flags); 56 + ttm_fence_handler(fdev, fence_class, sequence, type, error); 57 + write_unlock_irqrestore(&fc->lock, irq_flags); 58 + } 59 + 60 + int psb_fence_emit_sequence(struct ttm_fence_device *fdev, 61 + uint32_t fence_class, 62 + uint32_t flags, uint32_t *sequence, 63 + unsigned long *timeout_jiffies) 64 + { 65 + struct drm_psb_private *dev_priv = 66 + container_of(fdev, struct drm_psb_private, fdev); 67 + 68 + if (!dev_priv) 69 + return -EINVAL; 70 + 71 + if (fence_class >= PSB_NUM_ENGINES) 72 + return -EINVAL; 73 + 74 + DRM_ERROR("Unexpected fence class\n"); 75 + return -EINVAL; 76 + } 77 + 78 + static void psb_fence_lockup(struct ttm_fence_object *fence, 79 + uint32_t fence_types) 80 + { 81 + DRM_ERROR("Unsupported fence class\n"); 82 + } 83 + 84 + void psb_fence_handler(struct drm_device *dev, uint32_t fence_class) 85 + { 86 + struct drm_psb_private *dev_priv = psb_priv(dev); 87 + struct ttm_fence_device *fdev = &dev_priv->fdev; 88 + struct ttm_fence_class_manager *fc = 89 + &fdev->fence_class[fence_class]; 90 + unsigned long irq_flags; 91 + 92 + write_lock_irqsave(&fc->lock, irq_flags); 93 + psb_fence_poll(fdev, fence_class, fc->waiting_types); 94 + write_unlock_irqrestore(&fc->lock, irq_flags); 95 + } 96 + 97 + 98 + static struct ttm_fence_driver psb_ttm_fence_driver = { 99 + .has_irq = NULL, 100 + .emit = psb_fence_emit_sequence, 101 + .flush = NULL, 102 + .poll = psb_fence_poll, 103 + .needed_flush = NULL, 104 + .wait = NULL, 105 + .signaled = NULL, 106 + .lockup = psb_fence_lockup, 107 + }; 108 + 109 + int psb_ttm_fence_device_init(struct ttm_fence_device *fdev) 110 + { 111 + struct drm_psb_private *dev_priv = 112 + container_of(fdev, struct drm_psb_private, fdev); 113 + struct ttm_fence_class_init fci = {.wrap_diff = (1 << 30), 114 + .flush_diff = (1 << 29), 115 + .sequence_mask = 0xFFFFFFFF 116 + }; 117 + 118 + return ttm_fence_device_init(PSB_NUM_ENGINES, 119 + dev_priv->mem_global_ref.object, 120 + fdev, &fci, 1, 121 + &psb_ttm_fence_driver); 122 + }
+27
drivers/staging/gma500/psb_gfx.mod.c
··· 1 + #include <linux/module.h> 2 + #include <linux/vermagic.h> 3 + #include <linux/compiler.h> 4 + 5 + MODULE_INFO(vermagic, VERMAGIC_STRING); 6 + 7 + struct module __this_module 8 + __attribute__((section(".gnu.linkonce.this_module"))) = { 9 + .name = KBUILD_MODNAME, 10 + .init = init_module, 11 + #ifdef CONFIG_MODULE_UNLOAD 12 + .exit = cleanup_module, 13 + #endif 14 + .arch = MODULE_ARCH_INIT, 15 + }; 16 + 17 + MODULE_INFO(staging, "Y"); 18 + 19 + static const char __module_depends[] 20 + __used 21 + __attribute__((section(".modinfo"))) = 22 + "depends=ttm,drm,drm_kms_helper,i2c-core,cfbfillrect,cfbimgblt,cfbcopyarea,i2c-algo-bit"; 23 + 24 + MODULE_ALIAS("pci:v00008086d00008108sv*sd*bc*sc*i*"); 25 + MODULE_ALIAS("pci:v00008086d00008109sv*sd*bc*sc*i*"); 26 + 27 + MODULE_INFO(srcversion, "933CCC78041722973001B78");
+1034
drivers/staging/gma500/psb_gtt.c
··· 1 + /* 2 + * Copyright (c) 2007, Intel Corporation. 3 + * All Rights Reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms and conditions of the GNU General Public License, 7 + * version 2, as published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope it will be useful, but WITHOUT 10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 + * more details. 13 + * 14 + * You should have received a copy of the GNU General Public License along with 15 + * this program; if not, write to the Free Software Foundation, Inc., 16 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 + * 18 + * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com> 19 + */ 20 + 21 + #include <drm/drmP.h> 22 + #include "psb_drv.h" 23 + #include "psb_pvr_glue.h" 24 + 25 + static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type) 26 + { 27 + uint32_t mask = PSB_PTE_VALID; 28 + 29 + if (type & PSB_MMU_CACHED_MEMORY) 30 + mask |= PSB_PTE_CACHED; 31 + if (type & PSB_MMU_RO_MEMORY) 32 + mask |= PSB_PTE_RO; 33 + if (type & PSB_MMU_WO_MEMORY) 34 + mask |= PSB_PTE_WO; 35 + 36 + return (pfn << PAGE_SHIFT) | mask; 37 + } 38 + 39 + struct psb_gtt *psb_gtt_alloc(struct drm_device *dev) 40 + { 41 + struct psb_gtt *tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 42 + 43 + if (!tmp) 44 + return NULL; 45 + 46 + init_rwsem(&tmp->sem); 47 + tmp->dev = dev; 48 + 49 + return tmp; 50 + } 51 + 52 + void psb_gtt_takedown(struct psb_gtt *pg, int free) 53 + { 54 + struct drm_psb_private *dev_priv = pg->dev->dev_private; 55 + 56 + if (!pg) 57 + return; 58 + 59 + if (pg->gtt_map) { 60 + iounmap(pg->gtt_map); 61 + pg->gtt_map = NULL; 62 + } 63 + if (pg->initialized) { 64 + pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL, 65 + pg->gmch_ctrl); 66 + PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL); 67 + (void) PSB_RVDC32(PSB_PGETBL_CTL); 68 + } 69 + if (free) 70 + kfree(pg); 71 + } 72 + 73 + int psb_gtt_init(struct psb_gtt *pg, int resume) 74 + { 75 + struct drm_device *dev = pg->dev; 76 + struct drm_psb_private *dev_priv = dev->dev_private; 77 + unsigned gtt_pages; 78 + unsigned long stolen_size, vram_stolen_size, ci_stolen_size; 79 + unsigned long rar_stolen_size; 80 + unsigned i, num_pages; 81 + unsigned pfn_base; 82 + uint32_t ci_pages, vram_pages; 83 + uint32_t tt_pages; 84 + uint32_t *ttm_gtt_map; 85 + uint32_t dvmt_mode = 0; 86 + 87 + int ret = 0; 88 + uint32_t pte; 89 + 90 + pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl); 91 + pci_write_config_word(dev->pdev, PSB_GMCH_CTRL, 92 + pg->gmch_ctrl | _PSB_GMCH_ENABLED); 93 + 94 + pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL); 95 + PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL); 96 + (void) PSB_RVDC32(PSB_PGETBL_CTL); 97 + 98 + pg->initialized = 1; 99 + 100 + pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK; 101 + 102 + pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE); 103 + /* fix me: video mmu has hw bug to access 0x0D0000000, 104 + * then make gatt start at 0x0e000,0000 */ 105 + pg->mmu_gatt_start = PSB_MEM_TT_START; 106 + pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE); 107 + gtt_pages = 108 + pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT; 109 + pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE) 110 + >> PAGE_SHIFT; 111 + 112 + pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base); 113 + vram_stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE; 114 + 115 + /* CI is not included in the stolen size since the TOPAZ MMU bug */ 116 + ci_stolen_size = dev_priv->ci_region_size; 117 + /* Don't add CI & RAR share buffer space 118 + * managed by TTM to stolen_size */ 119 + stolen_size = vram_stolen_size; 120 + 121 + rar_stolen_size = dev_priv->rar_region_size; 122 + 123 + printk(KERN_INFO"GMMADR(region 0) start: 0x%08x (%dM).\n", 124 + pg->gatt_start, pg->gatt_pages/256); 125 + printk(KERN_INFO"GTTADR(region 3) start: 0x%08x (can map %dM RAM), and actual RAM base 0x%08x.\n", 126 + pg->gtt_start, gtt_pages * 4, pg->gtt_phys_start); 127 + printk(KERN_INFO "Stole memory information\n"); 128 + printk(KERN_INFO " base in RAM: 0x%x\n", pg->stolen_base); 129 + printk(KERN_INFO " size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n", 130 + vram_stolen_size/1024); 131 + dvmt_mode = (pg->gmch_ctrl >> 4) & 0x7; 132 + printk(KERN_INFO " the correct size should be: %dM(dvmt mode=%d)\n", 133 + (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode); 134 + 135 + if (ci_stolen_size > 0) 136 + printk(KERN_INFO"CI Stole memory: RAM base = 0x%08x, size = %lu M\n", 137 + dev_priv->ci_region_start, 138 + ci_stolen_size / 1024 / 1024); 139 + if (rar_stolen_size > 0) 140 + printk(KERN_INFO "RAR Stole memory: RAM base = 0x%08x, size = %lu M\n", 141 + dev_priv->rar_region_start, 142 + rar_stolen_size / 1024 / 1024); 143 + 144 + if (resume && (gtt_pages != pg->gtt_pages) && 145 + (stolen_size != pg->stolen_size)) { 146 + DRM_ERROR("GTT resume error.\n"); 147 + ret = -EINVAL; 148 + goto out_err; 149 + } 150 + 151 + pg->gtt_pages = gtt_pages; 152 + pg->stolen_size = stolen_size; 153 + pg->vram_stolen_size = vram_stolen_size; 154 + pg->ci_stolen_size = ci_stolen_size; 155 + pg->rar_stolen_size = rar_stolen_size; 156 + pg->gtt_map = 157 + ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT); 158 + if (!pg->gtt_map) { 159 + DRM_ERROR("Failure to map gtt.\n"); 160 + ret = -ENOMEM; 161 + goto out_err; 162 + } 163 + 164 + pg->vram_addr = ioremap_wc(pg->stolen_base, stolen_size); 165 + if (!pg->vram_addr) { 166 + DRM_ERROR("Failure to map stolen base.\n"); 167 + ret = -ENOMEM; 168 + goto out_err; 169 + } 170 + 171 + DRM_DEBUG("%s: vram kernel virtual address %p\n", pg->vram_addr); 172 + 173 + tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ? 174 + (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT; 175 + 176 + ttm_gtt_map = pg->gtt_map + tt_pages / 2; 177 + 178 + /* 179 + * insert vram stolen pages. 180 + */ 181 + 182 + pfn_base = pg->stolen_base >> PAGE_SHIFT; 183 + vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT; 184 + printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n", 185 + num_pages, pfn_base, 0); 186 + for (i = 0; i < num_pages; ++i) { 187 + pte = psb_gtt_mask_pte(pfn_base + i, 0); 188 + iowrite32(pte, pg->gtt_map + i); 189 + } 190 + 191 + /* 192 + * Init rest of gtt managed by IMG. 193 + */ 194 + pfn_base = page_to_pfn(dev_priv->scratch_page); 195 + pte = psb_gtt_mask_pte(pfn_base, 0); 196 + for (; i < tt_pages / 2 - 1; ++i) 197 + iowrite32(pte, pg->gtt_map + i); 198 + 199 + /* 200 + * insert CI stolen pages 201 + */ 202 + 203 + pfn_base = dev_priv->ci_region_start >> PAGE_SHIFT; 204 + ci_pages = num_pages = ci_stolen_size >> PAGE_SHIFT; 205 + printk(KERN_INFO"Set up %d CI stolen pages starting at 0x%08x, GTT offset %dK\n", 206 + num_pages, pfn_base, (ttm_gtt_map - pg->gtt_map) * 4); 207 + for (i = 0; i < num_pages; ++i) { 208 + pte = psb_gtt_mask_pte(pfn_base + i, 0); 209 + iowrite32(pte, ttm_gtt_map + i); 210 + } 211 + 212 + /* 213 + * insert RAR stolen pages 214 + */ 215 + if (rar_stolen_size != 0) { 216 + pfn_base = dev_priv->rar_region_start >> PAGE_SHIFT; 217 + num_pages = rar_stolen_size >> PAGE_SHIFT; 218 + printk(KERN_INFO"Set up %d RAR stolen pages starting at 0x%08x, GTT offset %dK\n", 219 + num_pages, pfn_base, 220 + (ttm_gtt_map - pg->gtt_map + i) * 4); 221 + for (; i < num_pages + ci_pages; ++i) { 222 + pte = psb_gtt_mask_pte(pfn_base + i - ci_pages, 0); 223 + iowrite32(pte, ttm_gtt_map + i); 224 + } 225 + } 226 + /* 227 + * Init rest of gtt managed by TTM. 228 + */ 229 + 230 + pfn_base = page_to_pfn(dev_priv->scratch_page); 231 + pte = psb_gtt_mask_pte(pfn_base, 0); 232 + PSB_DEBUG_INIT("Initializing the rest of a total " 233 + "of %d gtt pages.\n", pg->gatt_pages); 234 + 235 + for (; i < pg->gatt_pages - tt_pages / 2; ++i) 236 + iowrite32(pte, ttm_gtt_map + i); 237 + (void) ioread32(pg->gtt_map + i - 1); 238 + 239 + return 0; 240 + 241 + out_err: 242 + psb_gtt_takedown(pg, 0); 243 + return ret; 244 + } 245 + 246 + int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages, 247 + unsigned offset_pages, unsigned num_pages, 248 + unsigned desired_tile_stride, 249 + unsigned hw_tile_stride, int type) 250 + { 251 + unsigned rows = 1; 252 + unsigned add; 253 + unsigned row_add; 254 + unsigned i; 255 + unsigned j; 256 + uint32_t *cur_page = NULL; 257 + uint32_t pte; 258 + 259 + if (hw_tile_stride) 260 + rows = num_pages / desired_tile_stride; 261 + else 262 + desired_tile_stride = num_pages; 263 + 264 + add = desired_tile_stride; 265 + row_add = hw_tile_stride; 266 + 267 + down_read(&pg->sem); 268 + for (i = 0; i < rows; ++i) { 269 + cur_page = pg->gtt_map + offset_pages; 270 + for (j = 0; j < desired_tile_stride; ++j) { 271 + pte = 272 + psb_gtt_mask_pte(page_to_pfn(*pages++), type); 273 + iowrite32(pte, cur_page++); 274 + } 275 + offset_pages += add; 276 + } 277 + (void) ioread32(cur_page - 1); 278 + up_read(&pg->sem); 279 + 280 + return 0; 281 + } 282 + 283 + int psb_gtt_insert_phys_addresses(struct psb_gtt *pg, dma_addr_t *pPhysFrames, 284 + unsigned offset_pages, unsigned num_pages, int type) 285 + { 286 + unsigned j; 287 + uint32_t *cur_page = NULL; 288 + uint32_t pte; 289 + u32 ba; 290 + 291 + down_read(&pg->sem); 292 + cur_page = pg->gtt_map + offset_pages; 293 + for (j = 0; j < num_pages; ++j) { 294 + ba = *pPhysFrames++; 295 + pte = psb_gtt_mask_pte(ba >> PAGE_SHIFT, type); 296 + iowrite32(pte, cur_page++); 297 + } 298 + (void) ioread32(cur_page - 1); 299 + up_read(&pg->sem); 300 + return 0; 301 + } 302 + 303 + int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages, 304 + unsigned num_pages, unsigned desired_tile_stride, 305 + unsigned hw_tile_stride, int rc_prot) 306 + { 307 + struct drm_psb_private *dev_priv = pg->dev->dev_private; 308 + unsigned rows = 1; 309 + unsigned add; 310 + unsigned row_add; 311 + unsigned i; 312 + unsigned j; 313 + uint32_t *cur_page = NULL; 314 + unsigned pfn_base = page_to_pfn(dev_priv->scratch_page); 315 + uint32_t pte = psb_gtt_mask_pte(pfn_base, 0); 316 + 317 + if (hw_tile_stride) 318 + rows = num_pages / desired_tile_stride; 319 + else 320 + desired_tile_stride = num_pages; 321 + 322 + add = desired_tile_stride; 323 + row_add = hw_tile_stride; 324 + 325 + if (rc_prot) 326 + down_read(&pg->sem); 327 + for (i = 0; i < rows; ++i) { 328 + cur_page = pg->gtt_map + offset_pages; 329 + for (j = 0; j < desired_tile_stride; ++j) 330 + iowrite32(pte, cur_page++); 331 + 332 + offset_pages += add; 333 + } 334 + (void) ioread32(cur_page - 1); 335 + if (rc_prot) 336 + up_read(&pg->sem); 337 + 338 + return 0; 339 + } 340 + 341 + int psb_gtt_mm_init(struct psb_gtt *pg) 342 + { 343 + struct psb_gtt_mm *gtt_mm; 344 + struct drm_psb_private *dev_priv = pg->dev->dev_private; 345 + struct drm_open_hash *ht; 346 + struct drm_mm *mm; 347 + int ret; 348 + uint32_t tt_start; 349 + uint32_t tt_size; 350 + 351 + if (!pg || !pg->initialized) { 352 + DRM_DEBUG("Invalid gtt struct\n"); 353 + return -EINVAL; 354 + } 355 + 356 + gtt_mm = kzalloc(sizeof(struct psb_gtt_mm), GFP_KERNEL); 357 + if (!gtt_mm) 358 + return -ENOMEM; 359 + 360 + spin_lock_init(&gtt_mm->lock); 361 + 362 + ht = &gtt_mm->hash; 363 + ret = drm_ht_create(ht, 20); 364 + if (ret) { 365 + DRM_DEBUG("Create hash table failed(%d)\n", ret); 366 + goto err_free; 367 + } 368 + 369 + tt_start = (pg->stolen_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 370 + tt_start = (tt_start < pg->gatt_pages) ? tt_start : pg->gatt_pages; 371 + tt_size = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ? 372 + (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT; 373 + 374 + mm = &gtt_mm->base; 375 + 376 + /*will use tt_start ~ 128M for IMG TT buffers*/ 377 + ret = drm_mm_init(mm, tt_start, ((tt_size / 2) - tt_start)); 378 + if (ret) { 379 + DRM_DEBUG("drm_mm_int error(%d)\n", ret); 380 + goto err_mm_init; 381 + } 382 + 383 + gtt_mm->count = 0; 384 + 385 + dev_priv->gtt_mm = gtt_mm; 386 + 387 + DRM_INFO("PSB GTT mem manager ready, tt_start %ld, tt_size %ld pages\n", 388 + (unsigned long)tt_start, 389 + (unsigned long)((tt_size / 2) - tt_start)); 390 + return 0; 391 + err_mm_init: 392 + drm_ht_remove(ht); 393 + 394 + err_free: 395 + kfree(gtt_mm); 396 + return ret; 397 + } 398 + 399 + /** 400 + * Delete all hash entries; 401 + */ 402 + void psb_gtt_mm_takedown(void) 403 + { 404 + return; 405 + } 406 + 407 + static int psb_gtt_mm_get_ht_by_pid_locked(struct psb_gtt_mm *mm, 408 + u32 tgid, 409 + struct psb_gtt_hash_entry **hentry) 410 + { 411 + struct drm_hash_item *entry; 412 + struct psb_gtt_hash_entry *psb_entry; 413 + int ret; 414 + 415 + ret = drm_ht_find_item(&mm->hash, tgid, &entry); 416 + if (ret) { 417 + DRM_DEBUG("Cannot find entry pid=%ld\n", tgid); 418 + return ret; 419 + } 420 + 421 + psb_entry = container_of(entry, struct psb_gtt_hash_entry, item); 422 + if (!psb_entry) { 423 + DRM_DEBUG("Invalid entry"); 424 + return -EINVAL; 425 + } 426 + 427 + *hentry = psb_entry; 428 + return 0; 429 + } 430 + 431 + 432 + static int psb_gtt_mm_insert_ht_locked(struct psb_gtt_mm *mm, 433 + u32 tgid, 434 + struct psb_gtt_hash_entry *hentry) 435 + { 436 + struct drm_hash_item *item; 437 + int ret; 438 + 439 + if (!hentry) { 440 + DRM_DEBUG("Invalid parameters\n"); 441 + return -EINVAL; 442 + } 443 + 444 + item = &hentry->item; 445 + item->key = tgid; 446 + 447 + /** 448 + * NOTE: drm_ht_insert_item will perform such a check 449 + ret = psb_gtt_mm_get_ht_by_pid(mm, tgid, &tmp); 450 + if (!ret) { 451 + DRM_DEBUG("Entry already exists for pid %ld\n", tgid); 452 + return -EAGAIN; 453 + } 454 + */ 455 + 456 + /*Insert the given entry*/ 457 + ret = drm_ht_insert_item(&mm->hash, item); 458 + if (ret) { 459 + DRM_DEBUG("Insert failure\n"); 460 + return ret; 461 + } 462 + 463 + mm->count++; 464 + 465 + return 0; 466 + } 467 + 468 + static int psb_gtt_mm_alloc_insert_ht(struct psb_gtt_mm *mm, 469 + u32 tgid, 470 + struct psb_gtt_hash_entry **entry) 471 + { 472 + struct psb_gtt_hash_entry *hentry; 473 + int ret; 474 + 475 + /*if the hentry for this tgid exists, just get it and return*/ 476 + spin_lock(&mm->lock); 477 + ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &hentry); 478 + if (!ret) { 479 + DRM_DEBUG("Entry for tgid %ld exist, hentry %p\n", 480 + tgid, hentry); 481 + *entry = hentry; 482 + spin_unlock(&mm->lock); 483 + return 0; 484 + } 485 + spin_unlock(&mm->lock); 486 + 487 + DRM_DEBUG("Entry for tgid %ld doesn't exist, will create it\n", tgid); 488 + 489 + hentry = kzalloc(sizeof(struct psb_gtt_hash_entry), GFP_KERNEL); 490 + if (!hentry) { 491 + DRM_DEBUG("Kmalloc failled\n"); 492 + return -ENOMEM; 493 + } 494 + 495 + ret = drm_ht_create(&hentry->ht, 20); 496 + if (ret) { 497 + DRM_DEBUG("Create hash table failed\n"); 498 + return ret; 499 + } 500 + 501 + spin_lock(&mm->lock); 502 + ret = psb_gtt_mm_insert_ht_locked(mm, tgid, hentry); 503 + spin_unlock(&mm->lock); 504 + 505 + if (!ret) 506 + *entry = hentry; 507 + 508 + return ret; 509 + } 510 + 511 + static struct psb_gtt_hash_entry * 512 + psb_gtt_mm_remove_ht_locked(struct psb_gtt_mm *mm, u32 tgid) 513 + { 514 + struct psb_gtt_hash_entry *tmp; 515 + int ret; 516 + 517 + ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &tmp); 518 + if (ret) { 519 + DRM_DEBUG("Cannot find entry pid %ld\n", tgid); 520 + return NULL; 521 + } 522 + 523 + /*remove it from ht*/ 524 + drm_ht_remove_item(&mm->hash, &tmp->item); 525 + 526 + mm->count--; 527 + 528 + return tmp; 529 + } 530 + 531 + static int psb_gtt_mm_remove_free_ht_locked(struct psb_gtt_mm *mm, u32 tgid) 532 + { 533 + struct psb_gtt_hash_entry *entry; 534 + 535 + entry = psb_gtt_mm_remove_ht_locked(mm, tgid); 536 + 537 + if (!entry) { 538 + DRM_DEBUG("Invalid entry"); 539 + return -EINVAL; 540 + } 541 + 542 + /*delete ht*/ 543 + drm_ht_remove(&entry->ht); 544 + 545 + /*free this entry*/ 546 + kfree(entry); 547 + return 0; 548 + } 549 + 550 + static int 551 + psb_gtt_mm_get_mem_mapping_locked(struct drm_open_hash *ht, 552 + u32 key, 553 + struct psb_gtt_mem_mapping **hentry) 554 + { 555 + struct drm_hash_item *entry; 556 + struct psb_gtt_mem_mapping *mapping; 557 + int ret; 558 + 559 + ret = drm_ht_find_item(ht, key, &entry); 560 + if (ret) { 561 + DRM_DEBUG("Cannot find key %ld\n", key); 562 + return ret; 563 + } 564 + 565 + mapping = container_of(entry, struct psb_gtt_mem_mapping, item); 566 + if (!mapping) { 567 + DRM_DEBUG("Invalid entry\n"); 568 + return -EINVAL; 569 + } 570 + 571 + *hentry = mapping; 572 + return 0; 573 + } 574 + 575 + static int 576 + psb_gtt_mm_insert_mem_mapping_locked(struct drm_open_hash *ht, 577 + u32 key, 578 + struct psb_gtt_mem_mapping *hentry) 579 + { 580 + struct drm_hash_item *item; 581 + struct psb_gtt_hash_entry *entry; 582 + int ret; 583 + 584 + if (!hentry) { 585 + DRM_DEBUG("hentry is NULL\n"); 586 + return -EINVAL; 587 + } 588 + 589 + item = &hentry->item; 590 + item->key = key; 591 + 592 + ret = drm_ht_insert_item(ht, item); 593 + if (ret) { 594 + DRM_DEBUG("insert_item failed\n"); 595 + return ret; 596 + } 597 + 598 + entry = container_of(ht, struct psb_gtt_hash_entry, ht); 599 + if (entry) 600 + entry->count++; 601 + 602 + return 0; 603 + } 604 + 605 + static int 606 + psb_gtt_mm_alloc_insert_mem_mapping(struct psb_gtt_mm *mm, 607 + struct drm_open_hash *ht, 608 + u32 key, 609 + struct drm_mm_node *node, 610 + struct psb_gtt_mem_mapping **entry) 611 + { 612 + struct psb_gtt_mem_mapping *mapping; 613 + int ret; 614 + 615 + if (!node || !ht) { 616 + DRM_DEBUG("parameter error\n"); 617 + return -EINVAL; 618 + } 619 + 620 + /*try to get this mem_map */ 621 + spin_lock(&mm->lock); 622 + ret = psb_gtt_mm_get_mem_mapping_locked(ht, key, &mapping); 623 + if (!ret) { 624 + DRM_DEBUG("mapping entry for key %ld exists, entry %p\n", 625 + key, mapping); 626 + *entry = mapping; 627 + spin_unlock(&mm->lock); 628 + return 0; 629 + } 630 + spin_unlock(&mm->lock); 631 + 632 + DRM_DEBUG("Mapping entry for key %ld doesn't exist, will create it\n", 633 + key); 634 + 635 + mapping = kzalloc(sizeof(struct psb_gtt_mem_mapping), GFP_KERNEL); 636 + if (!mapping) { 637 + DRM_DEBUG("kmalloc failed\n"); 638 + return -ENOMEM; 639 + } 640 + 641 + mapping->node = node; 642 + 643 + spin_lock(&mm->lock); 644 + ret = psb_gtt_mm_insert_mem_mapping_locked(ht, key, mapping); 645 + spin_unlock(&mm->lock); 646 + 647 + if (!ret) 648 + *entry = mapping; 649 + 650 + return ret; 651 + } 652 + 653 + static struct psb_gtt_mem_mapping * 654 + psb_gtt_mm_remove_mem_mapping_locked(struct drm_open_hash *ht, u32 key) 655 + { 656 + struct psb_gtt_mem_mapping *tmp; 657 + struct psb_gtt_hash_entry *entry; 658 + int ret; 659 + 660 + ret = psb_gtt_mm_get_mem_mapping_locked(ht, key, &tmp); 661 + if (ret) { 662 + DRM_DEBUG("Cannot find key %ld\n", key); 663 + return NULL; 664 + } 665 + 666 + drm_ht_remove_item(ht, &tmp->item); 667 + 668 + entry = container_of(ht, struct psb_gtt_hash_entry, ht); 669 + if (entry) 670 + entry->count--; 671 + 672 + return tmp; 673 + } 674 + 675 + static int psb_gtt_mm_remove_free_mem_mapping_locked(struct drm_open_hash *ht, 676 + u32 key, 677 + struct drm_mm_node **node) 678 + { 679 + struct psb_gtt_mem_mapping *entry; 680 + 681 + entry = psb_gtt_mm_remove_mem_mapping_locked(ht, key); 682 + if (!entry) { 683 + DRM_DEBUG("entry is NULL\n"); 684 + return -EINVAL; 685 + } 686 + 687 + *node = entry->node; 688 + 689 + kfree(entry); 690 + return 0; 691 + } 692 + 693 + static int psb_gtt_add_node(struct psb_gtt_mm *mm, 694 + u32 tgid, 695 + u32 key, 696 + struct drm_mm_node *node, 697 + struct psb_gtt_mem_mapping **entry) 698 + { 699 + struct psb_gtt_hash_entry *hentry; 700 + struct psb_gtt_mem_mapping *mapping; 701 + int ret; 702 + 703 + ret = psb_gtt_mm_alloc_insert_ht(mm, tgid, &hentry); 704 + if (ret) { 705 + DRM_DEBUG("alloc_insert failed\n"); 706 + return ret; 707 + } 708 + 709 + ret = psb_gtt_mm_alloc_insert_mem_mapping(mm, 710 + &hentry->ht, 711 + key, 712 + node, 713 + &mapping); 714 + if (ret) { 715 + DRM_DEBUG("mapping alloc_insert failed\n"); 716 + return ret; 717 + } 718 + 719 + *entry = mapping; 720 + 721 + return 0; 722 + } 723 + 724 + static int psb_gtt_remove_node(struct psb_gtt_mm *mm, 725 + u32 tgid, 726 + u32 key, 727 + struct drm_mm_node **node) 728 + { 729 + struct psb_gtt_hash_entry *hentry; 730 + struct drm_mm_node *tmp; 731 + int ret; 732 + 733 + spin_lock(&mm->lock); 734 + ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &hentry); 735 + if (ret) { 736 + DRM_DEBUG("Cannot find entry for pid %ld\n", tgid); 737 + spin_unlock(&mm->lock); 738 + return ret; 739 + } 740 + spin_unlock(&mm->lock); 741 + 742 + /*remove mapping entry*/ 743 + spin_lock(&mm->lock); 744 + ret = psb_gtt_mm_remove_free_mem_mapping_locked(&hentry->ht, 745 + key, 746 + &tmp); 747 + if (ret) { 748 + DRM_DEBUG("remove_free failed\n"); 749 + spin_unlock(&mm->lock); 750 + return ret; 751 + } 752 + 753 + *node = tmp; 754 + 755 + /*check the count of mapping entry*/ 756 + if (!hentry->count) { 757 + DRM_DEBUG("count of mapping entry is zero, tgid=%ld\n", tgid); 758 + psb_gtt_mm_remove_free_ht_locked(mm, tgid); 759 + } 760 + 761 + spin_unlock(&mm->lock); 762 + 763 + return 0; 764 + } 765 + 766 + static int psb_gtt_mm_alloc_mem(struct psb_gtt_mm *mm, 767 + uint32_t pages, 768 + uint32_t align, 769 + struct drm_mm_node **node) 770 + { 771 + struct drm_mm_node *tmp_node; 772 + int ret; 773 + 774 + do { 775 + ret = drm_mm_pre_get(&mm->base); 776 + if (unlikely(ret)) { 777 + DRM_DEBUG("drm_mm_pre_get error\n"); 778 + return ret; 779 + } 780 + 781 + spin_lock(&mm->lock); 782 + tmp_node = drm_mm_search_free(&mm->base, pages, align, 1); 783 + if (unlikely(!tmp_node)) { 784 + DRM_DEBUG("No free node found\n"); 785 + spin_unlock(&mm->lock); 786 + break; 787 + } 788 + 789 + tmp_node = drm_mm_get_block_atomic(tmp_node, pages, align); 790 + spin_unlock(&mm->lock); 791 + } while (!tmp_node); 792 + 793 + if (!tmp_node) { 794 + DRM_DEBUG("Node allocation failed\n"); 795 + return -ENOMEM; 796 + } 797 + 798 + *node = tmp_node; 799 + return 0; 800 + } 801 + 802 + static void psb_gtt_mm_free_mem(struct psb_gtt_mm *mm, struct drm_mm_node *node) 803 + { 804 + spin_lock(&mm->lock); 805 + drm_mm_put_block(node); 806 + spin_unlock(&mm->lock); 807 + } 808 + 809 + int psb_gtt_map_meminfo(struct drm_device *dev, 810 + void *hKernelMemInfo, 811 + uint32_t *offset) 812 + { 813 + return -EINVAL; 814 + /* FIXMEAC */ 815 + #if 0 816 + struct drm_psb_private *dev_priv 817 + = (struct drm_psb_private *)dev->dev_private; 818 + void *psKernelMemInfo; 819 + struct psb_gtt_mm *mm = dev_priv->gtt_mm; 820 + struct psb_gtt *pg = dev_priv->pg; 821 + uint32_t size, pages, offset_pages; 822 + void *kmem; 823 + struct drm_mm_node *node; 824 + struct page **page_list; 825 + struct psb_gtt_mem_mapping *mapping = NULL; 826 + int ret; 827 + 828 + ret = psb_get_meminfo_by_handle(hKernelMemInfo, &psKernelMemInfo); 829 + if (ret) { 830 + DRM_DEBUG("Cannot find kernelMemInfo handle %ld\n", 831 + hKernelMemInfo); 832 + return -EINVAL; 833 + } 834 + 835 + DRM_DEBUG("Got psKernelMemInfo %p for handle %lx\n", 836 + psKernelMemInfo, (u32)hKernelMemInfo); 837 + size = psKernelMemInfo->ui32AllocSize; 838 + kmem = psKernelMemInfo->pvLinAddrKM; 839 + pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 840 + 841 + DRM_DEBUG("KerMemInfo size %ld, cpuVadr %lx, pages %ld, osMemHdl %lx\n", 842 + size, kmem, pages, psKernelMemInfo->sMemBlk.hOSMemHandle); 843 + 844 + if (!kmem) 845 + DRM_DEBUG("kmem is NULL"); 846 + 847 + /*get pages*/ 848 + ret = psb_get_pages_by_mem_handle(psKernelMemInfo->sMemBlk.hOSMemHandle, 849 + &page_list); 850 + if (ret) { 851 + DRM_DEBUG("get pages error\n"); 852 + return ret; 853 + } 854 + 855 + DRM_DEBUG("get %ld pages\n", pages); 856 + 857 + /*alloc memory in TT apeture*/ 858 + ret = psb_gtt_mm_alloc_mem(mm, pages, 0, &node); 859 + if (ret) { 860 + DRM_DEBUG("alloc TT memory error\n"); 861 + goto failed_pages_alloc; 862 + } 863 + 864 + /*update psb_gtt_mm*/ 865 + ret = psb_gtt_add_node(mm, 866 + task_tgid_nr(current), 867 + (u32)hKernelMemInfo, 868 + node, 869 + &mapping); 870 + if (ret) { 871 + DRM_DEBUG("add_node failed"); 872 + goto failed_add_node; 873 + } 874 + 875 + node = mapping->node; 876 + offset_pages = node->start; 877 + 878 + DRM_DEBUG("get free node for %ld pages, offset %ld pages", 879 + pages, offset_pages); 880 + 881 + /*update gtt*/ 882 + psb_gtt_insert_pages(pg, page_list, 883 + (unsigned)offset_pages, 884 + (unsigned)pages, 885 + 0, 886 + 0, 887 + 0); 888 + 889 + *offset = offset_pages; 890 + return 0; 891 + 892 + failed_add_node: 893 + psb_gtt_mm_free_mem(mm, node); 894 + failed_pages_alloc: 895 + kfree(page_list); 896 + return ret; 897 + #endif 898 + } 899 + 900 + int psb_gtt_unmap_meminfo(struct drm_device *dev, void * hKernelMemInfo) 901 + { 902 + struct drm_psb_private *dev_priv 903 + = (struct drm_psb_private *)dev->dev_private; 904 + struct psb_gtt_mm *mm = dev_priv->gtt_mm; 905 + struct psb_gtt *pg = dev_priv->pg; 906 + uint32_t pages, offset_pages; 907 + struct drm_mm_node *node; 908 + int ret; 909 + 910 + ret = psb_gtt_remove_node(mm, 911 + task_tgid_nr(current), 912 + (u32)hKernelMemInfo, 913 + &node); 914 + if (ret) { 915 + DRM_DEBUG("remove node failed\n"); 916 + return ret; 917 + } 918 + 919 + /*remove gtt entries*/ 920 + offset_pages = node->start; 921 + pages = node->size; 922 + 923 + psb_gtt_remove_pages(pg, offset_pages, pages, 0, 0, 1); 924 + 925 + 926 + /*free tt node*/ 927 + 928 + psb_gtt_mm_free_mem(mm, node); 929 + return 0; 930 + } 931 + 932 + int psb_gtt_map_meminfo_ioctl(struct drm_device *dev, void *data, 933 + struct drm_file *file_priv) 934 + { 935 + struct psb_gtt_mapping_arg *arg 936 + = (struct psb_gtt_mapping_arg *)data; 937 + uint32_t *offset_pages = &arg->offset_pages; 938 + 939 + DRM_DEBUG("\n"); 940 + 941 + return psb_gtt_map_meminfo(dev, arg->hKernelMemInfo, offset_pages); 942 + } 943 + 944 + int psb_gtt_unmap_meminfo_ioctl(struct drm_device *dev, void *data, 945 + struct drm_file *file_priv) 946 + { 947 + 948 + struct psb_gtt_mapping_arg *arg 949 + = (struct psb_gtt_mapping_arg *)data; 950 + 951 + DRM_DEBUG("\n"); 952 + 953 + return psb_gtt_unmap_meminfo(dev, arg->hKernelMemInfo); 954 + } 955 + 956 + int psb_gtt_map_pvr_memory(struct drm_device *dev, unsigned int hHandle, 957 + unsigned int ui32TaskId, dma_addr_t *pPages, 958 + unsigned int ui32PagesNum, unsigned int *ui32Offset) 959 + { 960 + struct drm_psb_private *dev_priv = dev->dev_private; 961 + struct psb_gtt_mm *mm = dev_priv->gtt_mm; 962 + struct psb_gtt *pg = dev_priv->pg; 963 + uint32_t size, pages, offset_pages; 964 + struct drm_mm_node *node = NULL; 965 + struct psb_gtt_mem_mapping *mapping = NULL; 966 + int ret; 967 + 968 + size = ui32PagesNum * PAGE_SIZE; 969 + pages = 0; 970 + 971 + /*alloc memory in TT apeture*/ 972 + ret = psb_gtt_mm_alloc_mem(mm, ui32PagesNum, 0, &node); 973 + if (ret) { 974 + DRM_DEBUG("alloc TT memory error\n"); 975 + goto failed_pages_alloc; 976 + } 977 + 978 + /*update psb_gtt_mm*/ 979 + ret = psb_gtt_add_node(mm, 980 + (u32)ui32TaskId, 981 + (u32)hHandle, 982 + node, 983 + &mapping); 984 + if (ret) { 985 + DRM_DEBUG("add_node failed"); 986 + goto failed_add_node; 987 + } 988 + 989 + node = mapping->node; 990 + offset_pages = node->start; 991 + 992 + DRM_DEBUG("get free node for %ld pages, offset %ld pages", 993 + pages, offset_pages); 994 + 995 + /*update gtt*/ 996 + psb_gtt_insert_phys_addresses(pg, pPages, (unsigned)offset_pages, 997 + (unsigned)ui32PagesNum, 0); 998 + 999 + *ui32Offset = offset_pages; 1000 + return 0; 1001 + 1002 + failed_add_node: 1003 + psb_gtt_mm_free_mem(mm, node); 1004 + failed_pages_alloc: 1005 + return ret; 1006 + } 1007 + 1008 + 1009 + int psb_gtt_unmap_pvr_memory(struct drm_device *dev, unsigned int hHandle, 1010 + unsigned int ui32TaskId) 1011 + { 1012 + struct drm_psb_private *dev_priv = dev->dev_private; 1013 + struct psb_gtt_mm *mm = dev_priv->gtt_mm; 1014 + struct psb_gtt *pg = dev_priv->pg; 1015 + uint32_t pages, offset_pages; 1016 + struct drm_mm_node *node; 1017 + int ret; 1018 + 1019 + ret = psb_gtt_remove_node(mm, (u32)ui32TaskId, (u32)hHandle, &node); 1020 + if (ret) { 1021 + printk(KERN_ERR "remove node failed\n"); 1022 + return ret; 1023 + } 1024 + 1025 + /*remove gtt entries*/ 1026 + offset_pages = node->start; 1027 + pages = node->size; 1028 + 1029 + psb_gtt_remove_pages(pg, offset_pages, pages, 0, 0, 1); 1030 + 1031 + /*free tt node*/ 1032 + psb_gtt_mm_free_mem(mm, node); 1033 + return 0; 1034 + }
+105
drivers/staging/gma500/psb_gtt.h
··· 1 + /************************************************************************** 2 + * Copyright (c) 2007-2008, Intel Corporation. 3 + * All Rights Reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms and conditions of the GNU General Public License, 7 + * version 2, as published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope it will be useful, but WITHOUT 10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 + * more details. 13 + * 14 + * You should have received a copy of the GNU General Public License along with 15 + * this program; if not, write to the Free Software Foundation, Inc., 16 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 + * 18 + **************************************************************************/ 19 + 20 + #ifndef _PSB_GTT_H_ 21 + #define _PSB_GTT_H_ 22 + 23 + #include <drm/drmP.h> 24 + 25 + /*#include "img_types.h"*/ 26 + 27 + struct psb_gtt { 28 + struct drm_device *dev; 29 + int initialized; 30 + uint32_t gatt_start; 31 + uint32_t mmu_gatt_start; 32 + uint32_t ci_start; 33 + uint32_t rar_start; 34 + uint32_t gtt_start; 35 + uint32_t gtt_phys_start; 36 + unsigned gtt_pages; 37 + unsigned gatt_pages; 38 + uint32_t stolen_base; 39 + void *vram_addr; 40 + uint32_t pge_ctl; 41 + u16 gmch_ctrl; 42 + unsigned long stolen_size; 43 + unsigned long vram_stolen_size; 44 + unsigned long ci_stolen_size; 45 + unsigned long rar_stolen_size; 46 + uint32_t *gtt_map; 47 + struct rw_semaphore sem; 48 + }; 49 + 50 + struct psb_gtt_mm { 51 + struct drm_mm base; 52 + struct drm_open_hash hash; 53 + uint32_t count; 54 + spinlock_t lock; 55 + }; 56 + 57 + struct psb_gtt_hash_entry { 58 + struct drm_open_hash ht; 59 + uint32_t count; 60 + struct drm_hash_item item; 61 + }; 62 + 63 + struct psb_gtt_mem_mapping { 64 + struct drm_mm_node *node; 65 + struct drm_hash_item item; 66 + }; 67 + 68 + /*Exported functions*/ 69 + extern int psb_gtt_init(struct psb_gtt *pg, int resume); 70 + extern int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages, 71 + unsigned offset_pages, unsigned num_pages, 72 + unsigned desired_tile_stride, 73 + unsigned hw_tile_stride, int type); 74 + extern int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages, 75 + unsigned num_pages, 76 + unsigned desired_tile_stride, 77 + unsigned hw_tile_stride, 78 + int rc_prot); 79 + 80 + extern struct psb_gtt *psb_gtt_alloc(struct drm_device *dev); 81 + extern void psb_gtt_takedown(struct psb_gtt *pg, int free); 82 + extern int psb_gtt_map_meminfo(struct drm_device *dev, 83 + void * hKernelMemInfo, 84 + uint32_t *offset); 85 + extern int psb_gtt_unmap_meminfo(struct drm_device *dev, 86 + void * hKernelMemInfo); 87 + extern int psb_gtt_map_meminfo_ioctl(struct drm_device *dev, void *data, 88 + struct drm_file *file_priv); 89 + extern int psb_gtt_unmap_meminfo_ioctl(struct drm_device *dev, void *data, 90 + struct drm_file *file_priv); 91 + extern int psb_gtt_mm_init(struct psb_gtt *pg); 92 + extern void psb_gtt_mm_takedown(void); 93 + 94 + extern int psb_gtt_map_pvr_memory(struct drm_device *dev, 95 + unsigned int hHandle, 96 + unsigned int ui32TaskId, 97 + dma_addr_t *pPages, 98 + unsigned int ui32PagesNum, 99 + unsigned int *ui32Offset); 100 + 101 + extern int psb_gtt_unmap_pvr_memory(struct drm_device *dev, 102 + unsigned int hHandle, 103 + unsigned int ui32TaskId); 104 + 105 + #endif
+301
drivers/staging/gma500/psb_intel_bios.c
··· 1 + /* 2 + * Copyright (c) 2006 Intel Corporation 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms and conditions of the GNU General Public License, 6 + * version 2, as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope it will be useful, but WITHOUT 9 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 + * more details. 12 + * 13 + * You should have received a copy of the GNU General Public License along with 14 + * this program; if not, write to the Free Software Foundation, Inc., 15 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 + * 17 + * Authors: 18 + * Eric Anholt <eric@anholt.net> 19 + * 20 + */ 21 + #include <drm/drmP.h> 22 + #include <drm/drm.h> 23 + #include "psb_drm.h" 24 + #include "psb_drv.h" 25 + #include "psb_intel_drv.h" 26 + #include "psb_intel_reg.h" 27 + #include "psb_intel_bios.h" 28 + 29 + 30 + static void *find_section(struct bdb_header *bdb, int section_id) 31 + { 32 + u8 *base = (u8 *)bdb; 33 + int index = 0; 34 + u16 total, current_size; 35 + u8 current_id; 36 + 37 + /* skip to first section */ 38 + index += bdb->header_size; 39 + total = bdb->bdb_size; 40 + 41 + /* walk the sections looking for section_id */ 42 + while (index < total) { 43 + current_id = *(base + index); 44 + index++; 45 + current_size = *((u16 *)(base + index)); 46 + index += 2; 47 + if (current_id == section_id) 48 + return base + index; 49 + index += current_size; 50 + } 51 + 52 + return NULL; 53 + } 54 + 55 + static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, 56 + struct lvds_dvo_timing *dvo_timing) 57 + { 58 + panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) | 59 + dvo_timing->hactive_lo; 60 + panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay + 61 + ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo); 62 + panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start + 63 + dvo_timing->hsync_pulse_width; 64 + panel_fixed_mode->htotal = panel_fixed_mode->hdisplay + 65 + ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo); 66 + 67 + panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) | 68 + dvo_timing->vactive_lo; 69 + panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay + 70 + dvo_timing->vsync_off; 71 + panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start + 72 + dvo_timing->vsync_pulse_width; 73 + panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay + 74 + ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo); 75 + panel_fixed_mode->clock = dvo_timing->clock * 10; 76 + panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; 77 + 78 + /* Some VBTs have bogus h/vtotal values */ 79 + if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) 80 + panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; 81 + if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal) 82 + panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1; 83 + 84 + drm_mode_set_name(panel_fixed_mode); 85 + } 86 + 87 + static void parse_backlight_data(struct drm_psb_private *dev_priv, 88 + struct bdb_header *bdb) 89 + { 90 + struct bdb_lvds_backlight *vbt_lvds_bl = NULL; 91 + struct bdb_lvds_backlight *lvds_bl; 92 + u8 p_type = 0; 93 + void *bl_start = NULL; 94 + struct bdb_lvds_options *lvds_opts 95 + = find_section(bdb, BDB_LVDS_OPTIONS); 96 + 97 + dev_priv->lvds_bl = NULL; 98 + 99 + if (lvds_opts) { 100 + DRM_DEBUG("lvds_options found at %p\n", lvds_opts); 101 + p_type = lvds_opts->panel_type; 102 + } else { 103 + DRM_DEBUG("no lvds_options\n"); 104 + return; 105 + } 106 + 107 + bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT); 108 + vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type; 109 + 110 + lvds_bl = kzalloc(sizeof(*vbt_lvds_bl), GFP_KERNEL); 111 + if (!lvds_bl) { 112 + DRM_DEBUG("No memory\n"); 113 + return; 114 + } 115 + 116 + memcpy(lvds_bl, vbt_lvds_bl, sizeof(*vbt_lvds_bl)); 117 + 118 + dev_priv->lvds_bl = lvds_bl; 119 + } 120 + 121 + /* Try to find integrated panel data */ 122 + static void parse_lfp_panel_data(struct drm_psb_private *dev_priv, 123 + struct bdb_header *bdb) 124 + { 125 + struct bdb_lvds_options *lvds_options; 126 + struct bdb_lvds_lfp_data *lvds_lfp_data; 127 + struct bdb_lvds_lfp_data_entry *entry; 128 + struct lvds_dvo_timing *dvo_timing; 129 + struct drm_display_mode *panel_fixed_mode; 130 + 131 + /* Defaults if we can't find VBT info */ 132 + dev_priv->lvds_dither = 0; 133 + dev_priv->lvds_vbt = 0; 134 + 135 + lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); 136 + if (!lvds_options) 137 + return; 138 + 139 + dev_priv->lvds_dither = lvds_options->pixel_dither; 140 + if (lvds_options->panel_type == 0xff) 141 + return; 142 + 143 + lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); 144 + if (!lvds_lfp_data) 145 + return; 146 + 147 + dev_priv->lvds_vbt = 1; 148 + 149 + entry = &lvds_lfp_data->data[lvds_options->panel_type]; 150 + dvo_timing = &entry->dvo_timing; 151 + 152 + panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), 153 + GFP_KERNEL); 154 + 155 + fill_detail_timing_data(panel_fixed_mode, dvo_timing); 156 + 157 + dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode; 158 + 159 + DRM_DEBUG("Found panel mode in BIOS VBT tables:\n"); 160 + drm_mode_debug_printmodeline(panel_fixed_mode); 161 + 162 + return; 163 + } 164 + 165 + /* Try to find sdvo panel data */ 166 + static void parse_sdvo_panel_data(struct drm_psb_private *dev_priv, 167 + struct bdb_header *bdb) 168 + { 169 + struct bdb_sdvo_lvds_options *sdvo_lvds_options; 170 + struct lvds_dvo_timing *dvo_timing; 171 + struct drm_display_mode *panel_fixed_mode; 172 + 173 + dev_priv->sdvo_lvds_vbt_mode = NULL; 174 + 175 + sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS); 176 + if (!sdvo_lvds_options) 177 + return; 178 + 179 + dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS); 180 + if (!dvo_timing) 181 + return; 182 + 183 + panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); 184 + 185 + if (!panel_fixed_mode) 186 + return; 187 + 188 + fill_detail_timing_data(panel_fixed_mode, 189 + dvo_timing + sdvo_lvds_options->panel_type); 190 + 191 + dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode; 192 + 193 + return; 194 + } 195 + 196 + static void parse_general_features(struct drm_psb_private *dev_priv, 197 + struct bdb_header *bdb) 198 + { 199 + struct bdb_general_features *general; 200 + 201 + /* Set sensible defaults in case we can't find the general block */ 202 + dev_priv->int_tv_support = 1; 203 + dev_priv->int_crt_support = 1; 204 + 205 + general = find_section(bdb, BDB_GENERAL_FEATURES); 206 + if (general) { 207 + dev_priv->int_tv_support = general->int_tv_support; 208 + dev_priv->int_crt_support = general->int_crt_support; 209 + dev_priv->lvds_use_ssc = general->enable_ssc; 210 + 211 + if (dev_priv->lvds_use_ssc) { 212 + dev_priv->lvds_ssc_freq 213 + = general->ssc_freq ? 100 : 96; 214 + } 215 + } 216 + } 217 + 218 + /** 219 + * psb_intel_init_bios - initialize VBIOS settings & find VBT 220 + * @dev: DRM device 221 + * 222 + * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers 223 + * to appropriate values. 224 + * 225 + * VBT existence is a sanity check that is relied on by other i830_bios.c code. 226 + * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may 227 + * feed an updated VBT back through that, compared to what we'll fetch using 228 + * this method of groping around in the BIOS data. 229 + * 230 + * Returns 0 on success, nonzero on failure. 231 + */ 232 + bool psb_intel_init_bios(struct drm_device *dev) 233 + { 234 + struct drm_psb_private *dev_priv = dev->dev_private; 235 + struct pci_dev *pdev = dev->pdev; 236 + struct vbt_header *vbt = NULL; 237 + struct bdb_header *bdb; 238 + u8 __iomem *bios; 239 + size_t size; 240 + int i; 241 + 242 + bios = pci_map_rom(pdev, &size); 243 + if (!bios) 244 + return -1; 245 + 246 + /* Scour memory looking for the VBT signature */ 247 + for (i = 0; i + 4 < size; i++) { 248 + if (!memcmp(bios + i, "$VBT", 4)) { 249 + vbt = (struct vbt_header *)(bios + i); 250 + break; 251 + } 252 + } 253 + 254 + if (!vbt) { 255 + DRM_ERROR("VBT signature missing\n"); 256 + pci_unmap_rom(pdev, bios); 257 + return -1; 258 + } 259 + 260 + bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset); 261 + 262 + /* Grab useful general definitions */ 263 + parse_general_features(dev_priv, bdb); 264 + parse_lfp_panel_data(dev_priv, bdb); 265 + parse_sdvo_panel_data(dev_priv, bdb); 266 + parse_backlight_data(dev_priv, bdb); 267 + 268 + pci_unmap_rom(pdev, bios); 269 + 270 + return 0; 271 + } 272 + 273 + /** 274 + * Destory and free VBT data 275 + */ 276 + void psb_intel_destory_bios(struct drm_device *dev) 277 + { 278 + struct drm_psb_private *dev_priv = dev->dev_private; 279 + struct drm_display_mode *sdvo_lvds_vbt_mode = 280 + dev_priv->sdvo_lvds_vbt_mode; 281 + struct drm_display_mode *lfp_lvds_vbt_mode = 282 + dev_priv->lfp_lvds_vbt_mode; 283 + struct bdb_lvds_backlight *lvds_bl = 284 + dev_priv->lvds_bl; 285 + 286 + /*free sdvo panel mode*/ 287 + if (sdvo_lvds_vbt_mode) { 288 + dev_priv->sdvo_lvds_vbt_mode = NULL; 289 + kfree(sdvo_lvds_vbt_mode); 290 + } 291 + 292 + if (lfp_lvds_vbt_mode) { 293 + dev_priv->lfp_lvds_vbt_mode = NULL; 294 + kfree(lfp_lvds_vbt_mode); 295 + } 296 + 297 + if (lvds_bl) { 298 + dev_priv->lvds_bl = NULL; 299 + kfree(lvds_bl); 300 + } 301 + }
+430
drivers/staging/gma500/psb_intel_bios.h
··· 1 + /* 2 + * Copyright (c) 2006 Intel Corporation 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms and conditions of the GNU General Public License, 6 + * version 2, as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope it will be useful, but WITHOUT 9 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 + * more details. 12 + * 13 + * You should have received a copy of the GNU General Public License along with 14 + * this program; if not, write to the Free Software Foundation, Inc., 15 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 + * 17 + * Authors: 18 + * Eric Anholt <eric@anholt.net> 19 + * 20 + */ 21 + 22 + #ifndef _I830_BIOS_H_ 23 + #define _I830_BIOS_H_ 24 + 25 + #include <drm/drmP.h> 26 + 27 + struct vbt_header { 28 + u8 signature[20]; /**< Always starts with 'VBT$' */ 29 + u16 version; /**< decimal */ 30 + u16 header_size; /**< in bytes */ 31 + u16 vbt_size; /**< in bytes */ 32 + u8 vbt_checksum; 33 + u8 reserved0; 34 + u32 bdb_offset; /**< from beginning of VBT */ 35 + u32 aim_offset[4]; /**< from beginning of VBT */ 36 + } __attribute__((packed)); 37 + 38 + 39 + struct bdb_header { 40 + u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */ 41 + u16 version; /**< decimal */ 42 + u16 header_size; /**< in bytes */ 43 + u16 bdb_size; /**< in bytes */ 44 + }; 45 + 46 + /* strictly speaking, this is a "skip" block, but it has interesting info */ 47 + struct vbios_data { 48 + u8 type; /* 0 == desktop, 1 == mobile */ 49 + u8 relstage; 50 + u8 chipset; 51 + u8 lvds_present:1; 52 + u8 tv_present:1; 53 + u8 rsvd2:6; /* finish byte */ 54 + u8 rsvd3[4]; 55 + u8 signon[155]; 56 + u8 copyright[61]; 57 + u16 code_segment; 58 + u8 dos_boot_mode; 59 + u8 bandwidth_percent; 60 + u8 rsvd4; /* popup memory size */ 61 + u8 resize_pci_bios; 62 + u8 rsvd5; /* is crt already on ddc2 */ 63 + } __attribute__((packed)); 64 + 65 + /* 66 + * There are several types of BIOS data blocks (BDBs), each block has 67 + * an ID and size in the first 3 bytes (ID in first, size in next 2). 68 + * Known types are listed below. 69 + */ 70 + #define BDB_GENERAL_FEATURES 1 71 + #define BDB_GENERAL_DEFINITIONS 2 72 + #define BDB_OLD_TOGGLE_LIST 3 73 + #define BDB_MODE_SUPPORT_LIST 4 74 + #define BDB_GENERIC_MODE_TABLE 5 75 + #define BDB_EXT_MMIO_REGS 6 76 + #define BDB_SWF_IO 7 77 + #define BDB_SWF_MMIO 8 78 + #define BDB_DOT_CLOCK_TABLE 9 79 + #define BDB_MODE_REMOVAL_TABLE 10 80 + #define BDB_CHILD_DEVICE_TABLE 11 81 + #define BDB_DRIVER_FEATURES 12 82 + #define BDB_DRIVER_PERSISTENCE 13 83 + #define BDB_EXT_TABLE_PTRS 14 84 + #define BDB_DOT_CLOCK_OVERRIDE 15 85 + #define BDB_DISPLAY_SELECT 16 86 + /* 17 rsvd */ 87 + #define BDB_DRIVER_ROTATION 18 88 + #define BDB_DISPLAY_REMOVE 19 89 + #define BDB_OEM_CUSTOM 20 90 + #define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */ 91 + #define BDB_SDVO_LVDS_OPTIONS 22 92 + #define BDB_SDVO_PANEL_DTDS 23 93 + #define BDB_SDVO_LVDS_PNP_IDS 24 94 + #define BDB_SDVO_LVDS_POWER_SEQ 25 95 + #define BDB_TV_OPTIONS 26 96 + #define BDB_LVDS_OPTIONS 40 97 + #define BDB_LVDS_LFP_DATA_PTRS 41 98 + #define BDB_LVDS_LFP_DATA 42 99 + #define BDB_LVDS_BACKLIGHT 43 100 + #define BDB_LVDS_POWER 44 101 + #define BDB_SKIP 254 /* VBIOS private block, ignore */ 102 + 103 + struct bdb_general_features { 104 + /* bits 1 */ 105 + u8 panel_fitting:2; 106 + u8 flexaim:1; 107 + u8 msg_enable:1; 108 + u8 clear_screen:3; 109 + u8 color_flip:1; 110 + 111 + /* bits 2 */ 112 + u8 download_ext_vbt:1; 113 + u8 enable_ssc:1; 114 + u8 ssc_freq:1; 115 + u8 enable_lfp_on_override:1; 116 + u8 disable_ssc_ddt:1; 117 + u8 rsvd8:3; /* finish byte */ 118 + 119 + /* bits 3 */ 120 + u8 disable_smooth_vision:1; 121 + u8 single_dvi:1; 122 + u8 rsvd9:6; /* finish byte */ 123 + 124 + /* bits 4 */ 125 + u8 legacy_monitor_detect; 126 + 127 + /* bits 5 */ 128 + u8 int_crt_support:1; 129 + u8 int_tv_support:1; 130 + u8 rsvd11:6; /* finish byte */ 131 + } __attribute__((packed)); 132 + 133 + struct bdb_general_definitions { 134 + /* DDC GPIO */ 135 + u8 crt_ddc_gmbus_pin; 136 + 137 + /* DPMS bits */ 138 + u8 dpms_acpi:1; 139 + u8 skip_boot_crt_detect:1; 140 + u8 dpms_aim:1; 141 + u8 rsvd1:5; /* finish byte */ 142 + 143 + /* boot device bits */ 144 + u8 boot_display[2]; 145 + u8 child_dev_size; 146 + 147 + /* device info */ 148 + u8 tv_or_lvds_info[33]; 149 + u8 dev1[33]; 150 + u8 dev2[33]; 151 + u8 dev3[33]; 152 + u8 dev4[33]; 153 + /* may be another device block here on some platforms */ 154 + }; 155 + 156 + struct bdb_lvds_options { 157 + u8 panel_type; 158 + u8 rsvd1; 159 + /* LVDS capabilities, stored in a dword */ 160 + u8 pfit_mode:2; 161 + u8 pfit_text_mode_enhanced:1; 162 + u8 pfit_gfx_mode_enhanced:1; 163 + u8 pfit_ratio_auto:1; 164 + u8 pixel_dither:1; 165 + u8 lvds_edid:1; 166 + u8 rsvd2:1; 167 + u8 rsvd4; 168 + } __attribute__((packed)); 169 + 170 + struct bdb_lvds_backlight { 171 + u8 type:2; 172 + u8 pol:1; 173 + u8 gpio:3; 174 + u8 gmbus:2; 175 + u16 freq; 176 + u8 minbrightness; 177 + u8 i2caddr; 178 + u8 brightnesscmd; 179 + /*FIXME: more...*/ 180 + } __attribute__((packed)); 181 + 182 + /* LFP pointer table contains entries to the struct below */ 183 + struct bdb_lvds_lfp_data_ptr { 184 + u16 fp_timing_offset; /* offsets are from start of bdb */ 185 + u8 fp_table_size; 186 + u16 dvo_timing_offset; 187 + u8 dvo_table_size; 188 + u16 panel_pnp_id_offset; 189 + u8 pnp_table_size; 190 + } __attribute__((packed)); 191 + 192 + struct bdb_lvds_lfp_data_ptrs { 193 + u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */ 194 + struct bdb_lvds_lfp_data_ptr ptr[16]; 195 + } __attribute__((packed)); 196 + 197 + /* LFP data has 3 blocks per entry */ 198 + struct lvds_fp_timing { 199 + u16 x_res; 200 + u16 y_res; 201 + u32 lvds_reg; 202 + u32 lvds_reg_val; 203 + u32 pp_on_reg; 204 + u32 pp_on_reg_val; 205 + u32 pp_off_reg; 206 + u32 pp_off_reg_val; 207 + u32 pp_cycle_reg; 208 + u32 pp_cycle_reg_val; 209 + u32 pfit_reg; 210 + u32 pfit_reg_val; 211 + u16 terminator; 212 + } __attribute__((packed)); 213 + 214 + struct lvds_dvo_timing { 215 + u16 clock; /**< In 10khz */ 216 + u8 hactive_lo; 217 + u8 hblank_lo; 218 + u8 hblank_hi:4; 219 + u8 hactive_hi:4; 220 + u8 vactive_lo; 221 + u8 vblank_lo; 222 + u8 vblank_hi:4; 223 + u8 vactive_hi:4; 224 + u8 hsync_off_lo; 225 + u8 hsync_pulse_width; 226 + u8 vsync_pulse_width:4; 227 + u8 vsync_off:4; 228 + u8 rsvd0:6; 229 + u8 hsync_off_hi:2; 230 + u8 h_image; 231 + u8 v_image; 232 + u8 max_hv; 233 + u8 h_border; 234 + u8 v_border; 235 + u8 rsvd1:3; 236 + u8 digital:2; 237 + u8 vsync_positive:1; 238 + u8 hsync_positive:1; 239 + u8 rsvd2:1; 240 + } __attribute__((packed)); 241 + 242 + struct lvds_pnp_id { 243 + u16 mfg_name; 244 + u16 product_code; 245 + u32 serial; 246 + u8 mfg_week; 247 + u8 mfg_year; 248 + } __attribute__((packed)); 249 + 250 + struct bdb_lvds_lfp_data_entry { 251 + struct lvds_fp_timing fp_timing; 252 + struct lvds_dvo_timing dvo_timing; 253 + struct lvds_pnp_id pnp_id; 254 + } __attribute__((packed)); 255 + 256 + struct bdb_lvds_lfp_data { 257 + struct bdb_lvds_lfp_data_entry data[16]; 258 + } __attribute__((packed)); 259 + 260 + struct aimdb_header { 261 + char signature[16]; 262 + char oem_device[20]; 263 + u16 aimdb_version; 264 + u16 aimdb_header_size; 265 + u16 aimdb_size; 266 + } __attribute__((packed)); 267 + 268 + struct aimdb_block { 269 + u8 aimdb_id; 270 + u16 aimdb_size; 271 + } __attribute__((packed)); 272 + 273 + struct vch_panel_data { 274 + u16 fp_timing_offset; 275 + u8 fp_timing_size; 276 + u16 dvo_timing_offset; 277 + u8 dvo_timing_size; 278 + u16 text_fitting_offset; 279 + u8 text_fitting_size; 280 + u16 graphics_fitting_offset; 281 + u8 graphics_fitting_size; 282 + } __attribute__((packed)); 283 + 284 + struct vch_bdb_22 { 285 + struct aimdb_block aimdb_block; 286 + struct vch_panel_data panels[16]; 287 + } __attribute__((packed)); 288 + 289 + struct bdb_sdvo_lvds_options { 290 + u8 panel_backlight; 291 + u8 h40_set_panel_type; 292 + u8 panel_type; 293 + u8 ssc_clk_freq; 294 + u16 als_low_trip; 295 + u16 als_high_trip; 296 + u8 sclalarcoeff_tab_row_num; 297 + u8 sclalarcoeff_tab_row_size; 298 + u8 coefficient[8]; 299 + u8 panel_misc_bits_1; 300 + u8 panel_misc_bits_2; 301 + u8 panel_misc_bits_3; 302 + u8 panel_misc_bits_4; 303 + } __attribute__((packed)); 304 + 305 + 306 + extern bool psb_intel_init_bios(struct drm_device *dev); 307 + extern void psb_intel_destory_bios(struct drm_device *dev); 308 + 309 + /* 310 + * Driver<->VBIOS interaction occurs through scratch bits in 311 + * GR18 & SWF*. 312 + */ 313 + 314 + /* GR18 bits are set on display switch and hotkey events */ 315 + #define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */ 316 + #define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */ 317 + #define GR18_HK_NONE (0x0<<3) 318 + #define GR18_HK_LFP_STRETCH (0x1<<3) 319 + #define GR18_HK_TOGGLE_DISP (0x2<<3) 320 + #define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */ 321 + #define GR18_HK_POPUP_DISABLED (0x6<<3) 322 + #define GR18_HK_POPUP_ENABLED (0x7<<3) 323 + #define GR18_HK_PFIT (0x8<<3) 324 + #define GR18_HK_APM_CHANGE (0xa<<3) 325 + #define GR18_HK_MULTIPLE (0xc<<3) 326 + #define GR18_USER_INT_EN (1<<2) 327 + #define GR18_A0000_FLUSH_EN (1<<1) 328 + #define GR18_SMM_EN (1<<0) 329 + 330 + /* Set by driver, cleared by VBIOS */ 331 + #define SWF00_YRES_SHIFT 16 332 + #define SWF00_XRES_SHIFT 0 333 + #define SWF00_RES_MASK 0xffff 334 + 335 + /* Set by VBIOS at boot time and driver at runtime */ 336 + #define SWF01_TV2_FORMAT_SHIFT 8 337 + #define SWF01_TV1_FORMAT_SHIFT 0 338 + #define SWF01_TV_FORMAT_MASK 0xffff 339 + 340 + #define SWF10_VBIOS_BLC_I2C_EN (1<<29) 341 + #define SWF10_GTT_OVERRIDE_EN (1<<28) 342 + #define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */ 343 + #define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24) 344 + #define SWF10_OLD_TOGGLE 0x0 345 + #define SWF10_TOGGLE_LIST_1 0x1 346 + #define SWF10_TOGGLE_LIST_2 0x2 347 + #define SWF10_TOGGLE_LIST_3 0x3 348 + #define SWF10_TOGGLE_LIST_4 0x4 349 + #define SWF10_PANNING_EN (1<<23) 350 + #define SWF10_DRIVER_LOADED (1<<22) 351 + #define SWF10_EXTENDED_DESKTOP (1<<21) 352 + #define SWF10_EXCLUSIVE_MODE (1<<20) 353 + #define SWF10_OVERLAY_EN (1<<19) 354 + #define SWF10_PLANEB_HOLDOFF (1<<18) 355 + #define SWF10_PLANEA_HOLDOFF (1<<17) 356 + #define SWF10_VGA_HOLDOFF (1<<16) 357 + #define SWF10_ACTIVE_DISP_MASK 0xffff 358 + #define SWF10_PIPEB_LFP2 (1<<15) 359 + #define SWF10_PIPEB_EFP2 (1<<14) 360 + #define SWF10_PIPEB_TV2 (1<<13) 361 + #define SWF10_PIPEB_CRT2 (1<<12) 362 + #define SWF10_PIPEB_LFP (1<<11) 363 + #define SWF10_PIPEB_EFP (1<<10) 364 + #define SWF10_PIPEB_TV (1<<9) 365 + #define SWF10_PIPEB_CRT (1<<8) 366 + #define SWF10_PIPEA_LFP2 (1<<7) 367 + #define SWF10_PIPEA_EFP2 (1<<6) 368 + #define SWF10_PIPEA_TV2 (1<<5) 369 + #define SWF10_PIPEA_CRT2 (1<<4) 370 + #define SWF10_PIPEA_LFP (1<<3) 371 + #define SWF10_PIPEA_EFP (1<<2) 372 + #define SWF10_PIPEA_TV (1<<1) 373 + #define SWF10_PIPEA_CRT (1<<0) 374 + 375 + #define SWF11_MEMORY_SIZE_SHIFT 16 376 + #define SWF11_SV_TEST_EN (1<<15) 377 + #define SWF11_IS_AGP (1<<14) 378 + #define SWF11_DISPLAY_HOLDOFF (1<<13) 379 + #define SWF11_DPMS_REDUCED (1<<12) 380 + #define SWF11_IS_VBE_MODE (1<<11) 381 + #define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */ 382 + #define SWF11_DPMS_MASK 0x07 383 + #define SWF11_DPMS_OFF (1<<2) 384 + #define SWF11_DPMS_SUSPEND (1<<1) 385 + #define SWF11_DPMS_STANDBY (1<<0) 386 + #define SWF11_DPMS_ON 0 387 + 388 + #define SWF14_GFX_PFIT_EN (1<<31) 389 + #define SWF14_TEXT_PFIT_EN (1<<30) 390 + #define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */ 391 + #define SWF14_POPUP_EN (1<<28) 392 + #define SWF14_DISPLAY_HOLDOFF (1<<27) 393 + #define SWF14_DISP_DETECT_EN (1<<26) 394 + #define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */ 395 + #define SWF14_DRIVER_STATUS (1<<24) 396 + #define SWF14_OS_TYPE_WIN9X (1<<23) 397 + #define SWF14_OS_TYPE_WINNT (1<<22) 398 + /* 21:19 rsvd */ 399 + #define SWF14_PM_TYPE_MASK 0x00070000 400 + #define SWF14_PM_ACPI_VIDEO (0x4 << 16) 401 + #define SWF14_PM_ACPI (0x3 << 16) 402 + #define SWF14_PM_APM_12 (0x2 << 16) 403 + #define SWF14_PM_APM_11 (0x1 << 16) 404 + #define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */ 405 + /* if GR18 indicates a display switch */ 406 + #define SWF14_DS_PIPEB_LFP2_EN (1<<15) 407 + #define SWF14_DS_PIPEB_EFP2_EN (1<<14) 408 + #define SWF14_DS_PIPEB_TV2_EN (1<<13) 409 + #define SWF14_DS_PIPEB_CRT2_EN (1<<12) 410 + #define SWF14_DS_PIPEB_LFP_EN (1<<11) 411 + #define SWF14_DS_PIPEB_EFP_EN (1<<10) 412 + #define SWF14_DS_PIPEB_TV_EN (1<<9) 413 + #define SWF14_DS_PIPEB_CRT_EN (1<<8) 414 + #define SWF14_DS_PIPEA_LFP2_EN (1<<7) 415 + #define SWF14_DS_PIPEA_EFP2_EN (1<<6) 416 + #define SWF14_DS_PIPEA_TV2_EN (1<<5) 417 + #define SWF14_DS_PIPEA_CRT2_EN (1<<4) 418 + #define SWF14_DS_PIPEA_LFP_EN (1<<3) 419 + #define SWF14_DS_PIPEA_EFP_EN (1<<2) 420 + #define SWF14_DS_PIPEA_TV_EN (1<<1) 421 + #define SWF14_DS_PIPEA_CRT_EN (1<<0) 422 + /* if GR18 indicates a panel fitting request */ 423 + #define SWF14_PFIT_EN (1<<0) /* 0 means disable */ 424 + /* if GR18 indicates an APM change request */ 425 + #define SWF14_APM_HIBERNATE 0x4 426 + #define SWF14_APM_SUSPEND 0x3 427 + #define SWF14_APM_STANDBY 0x1 428 + #define SWF14_APM_RESTORE 0x0 429 + 430 + #endif /* _I830_BIOS_H_ */
+1489
drivers/staging/gma500/psb_intel_display.c
··· 1 + /* 2 + * Copyright © 2006-2007 Intel Corporation 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms and conditions of the GNU General Public License, 6 + * version 2, as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope it will be useful, but WITHOUT 9 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 + * more details. 12 + * 13 + * You should have received a copy of the GNU General Public License along with 14 + * this program; if not, write to the Free Software Foundation, Inc., 15 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 + * 17 + * Authors: 18 + * Eric Anholt <eric@anholt.net> 19 + */ 20 + 21 + #include <linux/i2c.h> 22 + #include <linux/pm_runtime.h> 23 + 24 + #include <drm/drmP.h> 25 + #include "psb_fb.h" 26 + #include "psb_drv.h" 27 + #include "psb_intel_drv.h" 28 + #include "psb_intel_reg.h" 29 + #include "psb_intel_display.h" 30 + #include "psb_powermgmt.h" 31 + 32 + 33 + struct psb_intel_clock_t { 34 + /* given values */ 35 + int n; 36 + int m1, m2; 37 + int p1, p2; 38 + /* derived values */ 39 + int dot; 40 + int vco; 41 + int m; 42 + int p; 43 + }; 44 + 45 + struct psb_intel_range_t { 46 + int min, max; 47 + }; 48 + 49 + struct psb_intel_p2_t { 50 + int dot_limit; 51 + int p2_slow, p2_fast; 52 + }; 53 + 54 + #define INTEL_P2_NUM 2 55 + 56 + struct psb_intel_limit_t { 57 + struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1; 58 + struct psb_intel_p2_t p2; 59 + }; 60 + 61 + #define I8XX_DOT_MIN 25000 62 + #define I8XX_DOT_MAX 350000 63 + #define I8XX_VCO_MIN 930000 64 + #define I8XX_VCO_MAX 1400000 65 + #define I8XX_N_MIN 3 66 + #define I8XX_N_MAX 16 67 + #define I8XX_M_MIN 96 68 + #define I8XX_M_MAX 140 69 + #define I8XX_M1_MIN 18 70 + #define I8XX_M1_MAX 26 71 + #define I8XX_M2_MIN 6 72 + #define I8XX_M2_MAX 16 73 + #define I8XX_P_MIN 4 74 + #define I8XX_P_MAX 128 75 + #define I8XX_P1_MIN 2 76 + #define I8XX_P1_MAX 33 77 + #define I8XX_P1_LVDS_MIN 1 78 + #define I8XX_P1_LVDS_MAX 6 79 + #define I8XX_P2_SLOW 4 80 + #define I8XX_P2_FAST 2 81 + #define I8XX_P2_LVDS_SLOW 14 82 + #define I8XX_P2_LVDS_FAST 14 /* No fast option */ 83 + #define I8XX_P2_SLOW_LIMIT 165000 84 + 85 + #define I9XX_DOT_MIN 20000 86 + #define I9XX_DOT_MAX 400000 87 + #define I9XX_VCO_MIN 1400000 88 + #define I9XX_VCO_MAX 2800000 89 + #define I9XX_N_MIN 3 90 + #define I9XX_N_MAX 8 91 + #define I9XX_M_MIN 70 92 + #define I9XX_M_MAX 120 93 + #define I9XX_M1_MIN 10 94 + #define I9XX_M1_MAX 20 95 + #define I9XX_M2_MIN 5 96 + #define I9XX_M2_MAX 9 97 + #define I9XX_P_SDVO_DAC_MIN 5 98 + #define I9XX_P_SDVO_DAC_MAX 80 99 + #define I9XX_P_LVDS_MIN 7 100 + #define I9XX_P_LVDS_MAX 98 101 + #define I9XX_P1_MIN 1 102 + #define I9XX_P1_MAX 8 103 + #define I9XX_P2_SDVO_DAC_SLOW 10 104 + #define I9XX_P2_SDVO_DAC_FAST 5 105 + #define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000 106 + #define I9XX_P2_LVDS_SLOW 14 107 + #define I9XX_P2_LVDS_FAST 7 108 + #define I9XX_P2_LVDS_SLOW_LIMIT 112000 109 + 110 + #define INTEL_LIMIT_I8XX_DVO_DAC 0 111 + #define INTEL_LIMIT_I8XX_LVDS 1 112 + #define INTEL_LIMIT_I9XX_SDVO_DAC 2 113 + #define INTEL_LIMIT_I9XX_LVDS 3 114 + 115 + static const struct psb_intel_limit_t psb_intel_limits[] = { 116 + { /* INTEL_LIMIT_I8XX_DVO_DAC */ 117 + .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX}, 118 + .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX}, 119 + .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX}, 120 + .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX}, 121 + .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX}, 122 + .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX}, 123 + .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX}, 124 + .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX}, 125 + .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT, 126 + .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST}, 127 + }, 128 + { /* INTEL_LIMIT_I8XX_LVDS */ 129 + .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX}, 130 + .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX}, 131 + .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX}, 132 + .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX}, 133 + .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX}, 134 + .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX}, 135 + .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX}, 136 + .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX}, 137 + .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT, 138 + .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST}, 139 + }, 140 + { /* INTEL_LIMIT_I9XX_SDVO_DAC */ 141 + .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, 142 + .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX}, 143 + .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX}, 144 + .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX}, 145 + .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX}, 146 + .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX}, 147 + .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX}, 148 + .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX}, 149 + .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 150 + .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = 151 + I9XX_P2_SDVO_DAC_FAST}, 152 + }, 153 + { /* INTEL_LIMIT_I9XX_LVDS */ 154 + .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, 155 + .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX}, 156 + .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX}, 157 + .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX}, 158 + .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX}, 159 + .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX}, 160 + .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX}, 161 + .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX}, 162 + /* The single-channel range is 25-112Mhz, and dual-channel 163 + * is 80-224Mhz. Prefer single channel as much as possible. 164 + */ 165 + .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 166 + .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST}, 167 + }, 168 + }; 169 + 170 + static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc) 171 + { 172 + const struct psb_intel_limit_t *limit; 173 + 174 + if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 175 + limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS]; 176 + else 177 + limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; 178 + return limit; 179 + } 180 + 181 + /** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ 182 + 183 + static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock) 184 + { 185 + clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 186 + clock->p = clock->p1 * clock->p2; 187 + clock->vco = refclk * clock->m / (clock->n + 2); 188 + clock->dot = clock->vco / clock->p; 189 + } 190 + 191 + /** Derive the pixel clock for the given refclk and divisors for 9xx chips. */ 192 + 193 + static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock) 194 + { 195 + clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 196 + clock->p = clock->p1 * clock->p2; 197 + clock->vco = refclk * clock->m / (clock->n + 2); 198 + clock->dot = clock->vco / clock->p; 199 + } 200 + 201 + static void psb_intel_clock(struct drm_device *dev, int refclk, 202 + struct psb_intel_clock_t *clock) 203 + { 204 + return i9xx_clock(refclk, clock); 205 + } 206 + 207 + /** 208 + * Returns whether any output on the specified pipe is of the specified type 209 + */ 210 + bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type) 211 + { 212 + struct drm_device *dev = crtc->dev; 213 + struct drm_mode_config *mode_config = &dev->mode_config; 214 + struct drm_connector *l_entry; 215 + 216 + list_for_each_entry(l_entry, &mode_config->connector_list, head) { 217 + if (l_entry->encoder && l_entry->encoder->crtc == crtc) { 218 + struct psb_intel_output *psb_intel_output = 219 + to_psb_intel_output(l_entry); 220 + if (psb_intel_output->type == type) 221 + return true; 222 + } 223 + } 224 + return false; 225 + } 226 + 227 + #define INTELPllInvalid(s) { /* ErrorF (s) */; return false; } 228 + /** 229 + * Returns whether the given set of divisors are valid for a given refclk with 230 + * the given connectors. 231 + */ 232 + 233 + static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc, 234 + struct psb_intel_clock_t *clock) 235 + { 236 + const struct psb_intel_limit_t *limit = psb_intel_limit(crtc); 237 + 238 + if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 239 + INTELPllInvalid("p1 out of range\n"); 240 + if (clock->p < limit->p.min || limit->p.max < clock->p) 241 + INTELPllInvalid("p out of range\n"); 242 + if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 243 + INTELPllInvalid("m2 out of range\n"); 244 + if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 245 + INTELPllInvalid("m1 out of range\n"); 246 + if (clock->m1 <= clock->m2) 247 + INTELPllInvalid("m1 <= m2\n"); 248 + if (clock->m < limit->m.min || limit->m.max < clock->m) 249 + INTELPllInvalid("m out of range\n"); 250 + if (clock->n < limit->n.min || limit->n.max < clock->n) 251 + INTELPllInvalid("n out of range\n"); 252 + if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 253 + INTELPllInvalid("vco out of range\n"); 254 + /* XXX: We may need to be checking "Dot clock" 255 + * depending on the multiplier, connector, etc., 256 + * rather than just a single range. 257 + */ 258 + if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 259 + INTELPllInvalid("dot out of range\n"); 260 + 261 + return true; 262 + } 263 + 264 + /** 265 + * Returns a set of divisors for the desired target clock with the given 266 + * refclk, or FALSE. The returned values represent the clock equation: 267 + * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 268 + */ 269 + static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target, 270 + int refclk, 271 + struct psb_intel_clock_t *best_clock) 272 + { 273 + struct drm_device *dev = crtc->dev; 274 + struct psb_intel_clock_t clock; 275 + const struct psb_intel_limit_t *limit = psb_intel_limit(crtc); 276 + int err = target; 277 + 278 + if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 279 + (REG_READ(LVDS) & LVDS_PORT_EN) != 0) { 280 + /* 281 + * For LVDS, if the panel is on, just rely on its current 282 + * settings for dual-channel. We haven't figured out how to 283 + * reliably set up different single/dual channel state, if we 284 + * even can. 285 + */ 286 + if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 287 + LVDS_CLKB_POWER_UP) 288 + clock.p2 = limit->p2.p2_fast; 289 + else 290 + clock.p2 = limit->p2.p2_slow; 291 + } else { 292 + if (target < limit->p2.dot_limit) 293 + clock.p2 = limit->p2.p2_slow; 294 + else 295 + clock.p2 = limit->p2.p2_fast; 296 + } 297 + 298 + memset(best_clock, 0, sizeof(*best_clock)); 299 + 300 + for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 301 + clock.m1++) { 302 + for (clock.m2 = limit->m2.min; 303 + clock.m2 < clock.m1 && clock.m2 <= limit->m2.max; 304 + clock.m2++) { 305 + for (clock.n = limit->n.min; 306 + clock.n <= limit->n.max; clock.n++) { 307 + for (clock.p1 = limit->p1.min; 308 + clock.p1 <= limit->p1.max; 309 + clock.p1++) { 310 + int this_err; 311 + 312 + psb_intel_clock(dev, refclk, &clock); 313 + 314 + if (!psb_intel_PLL_is_valid 315 + (crtc, &clock)) 316 + continue; 317 + 318 + this_err = abs(clock.dot - target); 319 + if (this_err < err) { 320 + *best_clock = clock; 321 + err = this_err; 322 + } 323 + } 324 + } 325 + } 326 + } 327 + 328 + return err != target; 329 + } 330 + 331 + void psb_intel_wait_for_vblank(struct drm_device *dev) 332 + { 333 + /* Wait for 20ms, i.e. one cycle at 50hz. */ 334 + udelay(20000); 335 + } 336 + 337 + int psb_intel_pipe_set_base(struct drm_crtc *crtc, 338 + int x, int y, struct drm_framebuffer *old_fb) 339 + { 340 + struct drm_device *dev = crtc->dev; 341 + /* struct drm_i915_master_private *master_priv; */ 342 + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 343 + struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); 344 + struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev; 345 + int pipe = psb_intel_crtc->pipe; 346 + unsigned long Start, Offset; 347 + int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE); 348 + int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF); 349 + int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; 350 + int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; 351 + u32 dspcntr; 352 + int ret = 0; 353 + 354 + PSB_DEBUG_ENTRY("\n"); 355 + 356 + /* no fb bound */ 357 + if (!crtc->fb) { 358 + DRM_DEBUG("No FB bound\n"); 359 + return 0; 360 + } 361 + 362 + if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, 363 + OSPM_UHB_FORCE_POWER_ON)) 364 + return 0; 365 + 366 + Start = mode_dev->bo_offset(dev, psbfb); 367 + Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); 368 + 369 + REG_WRITE(dspstride, crtc->fb->pitch); 370 + 371 + dspcntr = REG_READ(dspcntr_reg); 372 + dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 373 + 374 + switch (crtc->fb->bits_per_pixel) { 375 + case 8: 376 + dspcntr |= DISPPLANE_8BPP; 377 + break; 378 + case 16: 379 + if (crtc->fb->depth == 15) 380 + dspcntr |= DISPPLANE_15_16BPP; 381 + else 382 + dspcntr |= DISPPLANE_16BPP; 383 + break; 384 + case 24: 385 + case 32: 386 + dspcntr |= DISPPLANE_32BPP_NO_ALPHA; 387 + break; 388 + default: 389 + DRM_ERROR("Unknown color depth\n"); 390 + ret = -EINVAL; 391 + goto psb_intel_pipe_set_base_exit; 392 + } 393 + REG_WRITE(dspcntr_reg, dspcntr); 394 + 395 + DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); 396 + if (0 /* FIXMEAC - check what PSB needs */) { 397 + REG_WRITE(dspbase, Offset); 398 + REG_READ(dspbase); 399 + REG_WRITE(dspsurf, Start); 400 + REG_READ(dspsurf); 401 + } else { 402 + REG_WRITE(dspbase, Start + Offset); 403 + REG_READ(dspbase); 404 + } 405 + 406 + psb_intel_pipe_set_base_exit: 407 + 408 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 409 + 410 + return ret; 411 + } 412 + 413 + /** 414 + * Sets the power management mode of the pipe and plane. 415 + * 416 + * This code should probably grow support for turning the cursor off and back 417 + * on appropriately at the same time as we're turning the pipe off/on. 418 + */ 419 + static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode) 420 + { 421 + struct drm_device *dev = crtc->dev; 422 + /* struct drm_i915_master_private *master_priv; */ 423 + /* struct drm_i915_private *dev_priv = dev->dev_private; */ 424 + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 425 + int pipe = psb_intel_crtc->pipe; 426 + int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 427 + int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; 428 + int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE; 429 + int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; 430 + u32 temp; 431 + bool enabled; 432 + 433 + /* XXX: When our outputs are all unaware of DPMS modes other than off 434 + * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 435 + */ 436 + switch (mode) { 437 + case DRM_MODE_DPMS_ON: 438 + case DRM_MODE_DPMS_STANDBY: 439 + case DRM_MODE_DPMS_SUSPEND: 440 + /* Enable the DPLL */ 441 + temp = REG_READ(dpll_reg); 442 + if ((temp & DPLL_VCO_ENABLE) == 0) { 443 + REG_WRITE(dpll_reg, temp); 444 + REG_READ(dpll_reg); 445 + /* Wait for the clocks to stabilize. */ 446 + udelay(150); 447 + REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); 448 + REG_READ(dpll_reg); 449 + /* Wait for the clocks to stabilize. */ 450 + udelay(150); 451 + REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); 452 + REG_READ(dpll_reg); 453 + /* Wait for the clocks to stabilize. */ 454 + udelay(150); 455 + } 456 + 457 + /* Enable the pipe */ 458 + temp = REG_READ(pipeconf_reg); 459 + if ((temp & PIPEACONF_ENABLE) == 0) 460 + REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); 461 + 462 + /* Enable the plane */ 463 + temp = REG_READ(dspcntr_reg); 464 + if ((temp & DISPLAY_PLANE_ENABLE) == 0) { 465 + REG_WRITE(dspcntr_reg, 466 + temp | DISPLAY_PLANE_ENABLE); 467 + /* Flush the plane changes */ 468 + REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); 469 + } 470 + 471 + psb_intel_crtc_load_lut(crtc); 472 + 473 + /* Give the overlay scaler a chance to enable 474 + * if it's on this pipe */ 475 + /* psb_intel_crtc_dpms_video(crtc, true); TODO */ 476 + break; 477 + case DRM_MODE_DPMS_OFF: 478 + /* Give the overlay scaler a chance to disable 479 + * if it's on this pipe */ 480 + /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */ 481 + 482 + /* Disable the VGA plane that we never use */ 483 + REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); 484 + 485 + /* Disable display plane */ 486 + temp = REG_READ(dspcntr_reg); 487 + if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 488 + REG_WRITE(dspcntr_reg, 489 + temp & ~DISPLAY_PLANE_ENABLE); 490 + /* Flush the plane changes */ 491 + REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); 492 + REG_READ(dspbase_reg); 493 + } 494 + 495 + /* Next, disable display pipes */ 496 + temp = REG_READ(pipeconf_reg); 497 + if ((temp & PIPEACONF_ENABLE) != 0) { 498 + REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); 499 + REG_READ(pipeconf_reg); 500 + } 501 + 502 + /* Wait for vblank for the disable to take effect. */ 503 + psb_intel_wait_for_vblank(dev); 504 + 505 + temp = REG_READ(dpll_reg); 506 + if ((temp & DPLL_VCO_ENABLE) != 0) { 507 + REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); 508 + REG_READ(dpll_reg); 509 + } 510 + 511 + /* Wait for the clocks to turn off. */ 512 + udelay(150); 513 + break; 514 + } 515 + 516 + enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; 517 + 518 + /*Set FIFO Watermarks*/ 519 + REG_WRITE(DSPARB, 0x3F3E); 520 + } 521 + 522 + static void psb_intel_crtc_prepare(struct drm_crtc *crtc) 523 + { 524 + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 525 + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 526 + } 527 + 528 + static void psb_intel_crtc_commit(struct drm_crtc *crtc) 529 + { 530 + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 531 + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); 532 + } 533 + 534 + void psb_intel_encoder_prepare(struct drm_encoder *encoder) 535 + { 536 + struct drm_encoder_helper_funcs *encoder_funcs = 537 + encoder->helper_private; 538 + /* lvds has its own version of prepare see psb_intel_lvds_prepare */ 539 + encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); 540 + } 541 + 542 + void psb_intel_encoder_commit(struct drm_encoder *encoder) 543 + { 544 + struct drm_encoder_helper_funcs *encoder_funcs = 545 + encoder->helper_private; 546 + /* lvds has its own version of commit see psb_intel_lvds_commit */ 547 + encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 548 + } 549 + 550 + static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc, 551 + struct drm_display_mode *mode, 552 + struct drm_display_mode *adjusted_mode) 553 + { 554 + return true; 555 + } 556 + 557 + 558 + /** 559 + * Return the pipe currently connected to the panel fitter, 560 + * or -1 if the panel fitter is not present or not in use 561 + */ 562 + static int psb_intel_panel_fitter_pipe(struct drm_device *dev) 563 + { 564 + u32 pfit_control; 565 + 566 + pfit_control = REG_READ(PFIT_CONTROL); 567 + 568 + /* See if the panel fitter is in use */ 569 + if ((pfit_control & PFIT_ENABLE) == 0) 570 + return -1; 571 + /* Must be on PIPE 1 for PSB */ 572 + return 1; 573 + } 574 + 575 + static int psb_intel_crtc_mode_set(struct drm_crtc *crtc, 576 + struct drm_display_mode *mode, 577 + struct drm_display_mode *adjusted_mode, 578 + int x, int y, 579 + struct drm_framebuffer *old_fb) 580 + { 581 + struct drm_device *dev = crtc->dev; 582 + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 583 + int pipe = psb_intel_crtc->pipe; 584 + int fp_reg = (pipe == 0) ? FPA0 : FPB0; 585 + int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 586 + int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; 587 + int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; 588 + int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; 589 + int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; 590 + int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; 591 + int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; 592 + int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; 593 + int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; 594 + int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; 595 + int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; 596 + int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; 597 + int refclk; 598 + struct psb_intel_clock_t clock; 599 + u32 dpll = 0, fp = 0, dspcntr, pipeconf; 600 + bool ok, is_sdvo = false, is_dvo = false; 601 + bool is_crt = false, is_lvds = false, is_tv = false; 602 + struct drm_mode_config *mode_config = &dev->mode_config; 603 + struct drm_connector *connector; 604 + 605 + list_for_each_entry(connector, &mode_config->connector_list, head) { 606 + struct psb_intel_output *psb_intel_output = 607 + to_psb_intel_output(connector); 608 + 609 + if (!connector->encoder 610 + || connector->encoder->crtc != crtc) 611 + continue; 612 + 613 + switch (psb_intel_output->type) { 614 + case INTEL_OUTPUT_LVDS: 615 + is_lvds = true; 616 + break; 617 + case INTEL_OUTPUT_SDVO: 618 + is_sdvo = true; 619 + break; 620 + case INTEL_OUTPUT_DVO: 621 + is_dvo = true; 622 + break; 623 + case INTEL_OUTPUT_TVOUT: 624 + is_tv = true; 625 + break; 626 + case INTEL_OUTPUT_ANALOG: 627 + is_crt = true; 628 + break; 629 + } 630 + } 631 + 632 + refclk = 96000; 633 + 634 + ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, 635 + &clock); 636 + if (!ok) { 637 + DRM_ERROR("Couldn't find PLL settings for mode!\n"); 638 + return 0; 639 + } 640 + 641 + fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 642 + 643 + dpll = DPLL_VGA_MODE_DIS; 644 + if (is_lvds) { 645 + dpll |= DPLLB_MODE_LVDS; 646 + dpll |= DPLL_DVO_HIGH_SPEED; 647 + } else 648 + dpll |= DPLLB_MODE_DAC_SERIAL; 649 + if (is_sdvo) { 650 + int sdvo_pixel_multiply = 651 + adjusted_mode->clock / mode->clock; 652 + dpll |= DPLL_DVO_HIGH_SPEED; 653 + dpll |= 654 + (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 655 + } 656 + 657 + /* compute bitmask from p1 value */ 658 + dpll |= (1 << (clock.p1 - 1)) << 16; 659 + switch (clock.p2) { 660 + case 5: 661 + dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 662 + break; 663 + case 7: 664 + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 665 + break; 666 + case 10: 667 + dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 668 + break; 669 + case 14: 670 + dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 671 + break; 672 + } 673 + 674 + if (is_tv) { 675 + /* XXX: just matching BIOS for now */ 676 + /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 677 + dpll |= 3; 678 + } 679 + dpll |= PLL_REF_INPUT_DREFCLK; 680 + 681 + /* setup pipeconf */ 682 + pipeconf = REG_READ(pipeconf_reg); 683 + 684 + /* Set up the display plane register */ 685 + dspcntr = DISPPLANE_GAMMA_ENABLE; 686 + 687 + if (pipe == 0) 688 + dspcntr |= DISPPLANE_SEL_PIPE_A; 689 + else 690 + dspcntr |= DISPPLANE_SEL_PIPE_B; 691 + 692 + dspcntr |= DISPLAY_PLANE_ENABLE; 693 + pipeconf |= PIPEACONF_ENABLE; 694 + dpll |= DPLL_VCO_ENABLE; 695 + 696 + 697 + /* Disable the panel fitter if it was on our pipe */ 698 + if (psb_intel_panel_fitter_pipe(dev) == pipe) 699 + REG_WRITE(PFIT_CONTROL, 0); 700 + 701 + DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 702 + drm_mode_debug_printmodeline(mode); 703 + 704 + if (dpll & DPLL_VCO_ENABLE) { 705 + REG_WRITE(fp_reg, fp); 706 + REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); 707 + REG_READ(dpll_reg); 708 + udelay(150); 709 + } 710 + 711 + /* The LVDS pin pair needs to be on before the DPLLs are enabled. 712 + * This is an exception to the general rule that mode_set doesn't turn 713 + * things on. 714 + */ 715 + if (is_lvds) { 716 + u32 lvds = REG_READ(LVDS); 717 + 718 + lvds |= 719 + LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | 720 + LVDS_PIPEB_SELECT; 721 + /* Set the B0-B3 data pairs corresponding to 722 + * whether we're going to 723 + * set the DPLLs for dual-channel mode or not. 724 + */ 725 + if (clock.p2 == 7) 726 + lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; 727 + else 728 + lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); 729 + 730 + /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 731 + * appropriately here, but we need to look more 732 + * thoroughly into how panels behave in the two modes. 733 + */ 734 + 735 + REG_WRITE(LVDS, lvds); 736 + REG_READ(LVDS); 737 + } 738 + 739 + REG_WRITE(fp_reg, fp); 740 + REG_WRITE(dpll_reg, dpll); 741 + REG_READ(dpll_reg); 742 + /* Wait for the clocks to stabilize. */ 743 + udelay(150); 744 + 745 + /* write it again -- the BIOS does, after all */ 746 + REG_WRITE(dpll_reg, dpll); 747 + 748 + REG_READ(dpll_reg); 749 + /* Wait for the clocks to stabilize. */ 750 + udelay(150); 751 + 752 + REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | 753 + ((adjusted_mode->crtc_htotal - 1) << 16)); 754 + REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | 755 + ((adjusted_mode->crtc_hblank_end - 1) << 16)); 756 + REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | 757 + ((adjusted_mode->crtc_hsync_end - 1) << 16)); 758 + REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | 759 + ((adjusted_mode->crtc_vtotal - 1) << 16)); 760 + REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | 761 + ((adjusted_mode->crtc_vblank_end - 1) << 16)); 762 + REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | 763 + ((adjusted_mode->crtc_vsync_end - 1) << 16)); 764 + /* pipesrc and dspsize control the size that is scaled from, 765 + * which should always be the user's requested size. 766 + */ 767 + REG_WRITE(dspsize_reg, 768 + ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); 769 + REG_WRITE(dsppos_reg, 0); 770 + REG_WRITE(pipesrc_reg, 771 + ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 772 + REG_WRITE(pipeconf_reg, pipeconf); 773 + REG_READ(pipeconf_reg); 774 + 775 + psb_intel_wait_for_vblank(dev); 776 + 777 + REG_WRITE(dspcntr_reg, dspcntr); 778 + 779 + /* Flush the plane changes */ 780 + { 781 + struct drm_crtc_helper_funcs *crtc_funcs = 782 + crtc->helper_private; 783 + crtc_funcs->mode_set_base(crtc, x, y, old_fb); 784 + } 785 + 786 + psb_intel_wait_for_vblank(dev); 787 + 788 + return 0; 789 + } 790 + 791 + /** Loads the palette/gamma unit for the CRTC with the prepared values */ 792 + void psb_intel_crtc_load_lut(struct drm_crtc *crtc) 793 + { 794 + struct drm_device *dev = crtc->dev; 795 + struct drm_psb_private *dev_priv = 796 + (struct drm_psb_private *)dev->dev_private; 797 + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 798 + int palreg = PALETTE_A; 799 + int i; 800 + 801 + /* The clocks have to be on to load the palette. */ 802 + if (!crtc->enabled) 803 + return; 804 + 805 + switch (psb_intel_crtc->pipe) { 806 + case 0: 807 + break; 808 + case 1: 809 + palreg = PALETTE_B; 810 + break; 811 + case 2: 812 + palreg = PALETTE_C; 813 + break; 814 + default: 815 + DRM_ERROR("Illegal Pipe Number.\n"); 816 + return; 817 + } 818 + 819 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, 820 + OSPM_UHB_ONLY_IF_ON)) { 821 + for (i = 0; i < 256; i++) { 822 + REG_WRITE(palreg + 4 * i, 823 + ((psb_intel_crtc->lut_r[i] + 824 + psb_intel_crtc->lut_adj[i]) << 16) | 825 + ((psb_intel_crtc->lut_g[i] + 826 + psb_intel_crtc->lut_adj[i]) << 8) | 827 + (psb_intel_crtc->lut_b[i] + 828 + psb_intel_crtc->lut_adj[i])); 829 + } 830 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 831 + } else { 832 + for (i = 0; i < 256; i++) { 833 + dev_priv->save_palette_a[i] = 834 + ((psb_intel_crtc->lut_r[i] + 835 + psb_intel_crtc->lut_adj[i]) << 16) | 836 + ((psb_intel_crtc->lut_g[i] + 837 + psb_intel_crtc->lut_adj[i]) << 8) | 838 + (psb_intel_crtc->lut_b[i] + 839 + psb_intel_crtc->lut_adj[i]); 840 + } 841 + 842 + } 843 + } 844 + 845 + /** 846 + * Save HW states of giving crtc 847 + */ 848 + static void psb_intel_crtc_save(struct drm_crtc *crtc) 849 + { 850 + struct drm_device *dev = crtc->dev; 851 + /* struct drm_psb_private *dev_priv = 852 + (struct drm_psb_private *)dev->dev_private; */ 853 + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 854 + struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state; 855 + int pipeA = (psb_intel_crtc->pipe == 0); 856 + uint32_t paletteReg; 857 + int i; 858 + 859 + DRM_DEBUG("\n"); 860 + 861 + if (!crtc_state) { 862 + DRM_DEBUG("No CRTC state found\n"); 863 + return; 864 + } 865 + 866 + crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR); 867 + crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF); 868 + crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC); 869 + crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0); 870 + crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1); 871 + crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B); 872 + crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B); 873 + crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B); 874 + crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B); 875 + crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B); 876 + crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B); 877 + crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B); 878 + crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE); 879 + 880 + /*NOTE: DSPSIZE DSPPOS only for psb*/ 881 + crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE); 882 + crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS); 883 + 884 + crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE); 885 + 886 + DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n", 887 + crtc_state->saveDSPCNTR, 888 + crtc_state->savePIPECONF, 889 + crtc_state->savePIPESRC, 890 + crtc_state->saveFP0, 891 + crtc_state->saveFP1, 892 + crtc_state->saveDPLL, 893 + crtc_state->saveHTOTAL, 894 + crtc_state->saveHBLANK, 895 + crtc_state->saveHSYNC, 896 + crtc_state->saveVTOTAL, 897 + crtc_state->saveVBLANK, 898 + crtc_state->saveVSYNC, 899 + crtc_state->saveDSPSTRIDE, 900 + crtc_state->saveDSPSIZE, 901 + crtc_state->saveDSPPOS, 902 + crtc_state->saveDSPBASE 903 + ); 904 + 905 + paletteReg = pipeA ? PALETTE_A : PALETTE_B; 906 + for (i = 0; i < 256; ++i) 907 + crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2)); 908 + } 909 + 910 + /** 911 + * Restore HW states of giving crtc 912 + */ 913 + static void psb_intel_crtc_restore(struct drm_crtc *crtc) 914 + { 915 + struct drm_device *dev = crtc->dev; 916 + /* struct drm_psb_private * dev_priv = 917 + (struct drm_psb_private *)dev->dev_private; */ 918 + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 919 + struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state; 920 + /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */ 921 + int pipeA = (psb_intel_crtc->pipe == 0); 922 + uint32_t paletteReg; 923 + int i; 924 + 925 + DRM_DEBUG("\n"); 926 + 927 + if (!crtc_state) { 928 + DRM_DEBUG("No crtc state\n"); 929 + return; 930 + } 931 + 932 + DRM_DEBUG( 933 + "current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n", 934 + REG_READ(pipeA ? DSPACNTR : DSPBCNTR), 935 + REG_READ(pipeA ? PIPEACONF : PIPEBCONF), 936 + REG_READ(pipeA ? PIPEASRC : PIPEBSRC), 937 + REG_READ(pipeA ? FPA0 : FPB0), 938 + REG_READ(pipeA ? FPA1 : FPB1), 939 + REG_READ(pipeA ? DPLL_A : DPLL_B), 940 + REG_READ(pipeA ? HTOTAL_A : HTOTAL_B), 941 + REG_READ(pipeA ? HBLANK_A : HBLANK_B), 942 + REG_READ(pipeA ? HSYNC_A : HSYNC_B), 943 + REG_READ(pipeA ? VTOTAL_A : VTOTAL_B), 944 + REG_READ(pipeA ? VBLANK_A : VBLANK_B), 945 + REG_READ(pipeA ? VSYNC_A : VSYNC_B), 946 + REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE), 947 + REG_READ(pipeA ? DSPASIZE : DSPBSIZE), 948 + REG_READ(pipeA ? DSPAPOS : DSPBPOS), 949 + REG_READ(pipeA ? DSPABASE : DSPBBASE) 950 + ); 951 + 952 + DRM_DEBUG( 953 + "saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n", 954 + crtc_state->saveDSPCNTR, 955 + crtc_state->savePIPECONF, 956 + crtc_state->savePIPESRC, 957 + crtc_state->saveFP0, 958 + crtc_state->saveFP1, 959 + crtc_state->saveDPLL, 960 + crtc_state->saveHTOTAL, 961 + crtc_state->saveHBLANK, 962 + crtc_state->saveHSYNC, 963 + crtc_state->saveVTOTAL, 964 + crtc_state->saveVBLANK, 965 + crtc_state->saveVSYNC, 966 + crtc_state->saveDSPSTRIDE, 967 + crtc_state->saveDSPSIZE, 968 + crtc_state->saveDSPPOS, 969 + crtc_state->saveDSPBASE 970 + ); 971 + 972 + 973 + if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) { 974 + REG_WRITE(pipeA ? DPLL_A : DPLL_B, 975 + crtc_state->saveDPLL & ~DPLL_VCO_ENABLE); 976 + REG_READ(pipeA ? DPLL_A : DPLL_B); 977 + DRM_DEBUG("write dpll: %x\n", 978 + REG_READ(pipeA ? DPLL_A : DPLL_B)); 979 + udelay(150); 980 + } 981 + 982 + REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0); 983 + REG_READ(pipeA ? FPA0 : FPB0); 984 + 985 + REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1); 986 + REG_READ(pipeA ? FPA1 : FPB1); 987 + 988 + REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL); 989 + REG_READ(pipeA ? DPLL_A : DPLL_B); 990 + udelay(150); 991 + 992 + REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL); 993 + REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK); 994 + REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC); 995 + REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL); 996 + REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK); 997 + REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC); 998 + REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE); 999 + 1000 + REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE); 1001 + REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS); 1002 + 1003 + REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC); 1004 + REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE); 1005 + REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF); 1006 + 1007 + psb_intel_wait_for_vblank(dev); 1008 + 1009 + REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR); 1010 + REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE); 1011 + 1012 + psb_intel_wait_for_vblank(dev); 1013 + 1014 + paletteReg = pipeA ? PALETTE_A : PALETTE_B; 1015 + for (i = 0; i < 256; ++i) 1016 + REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]); 1017 + } 1018 + 1019 + static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc, 1020 + struct drm_file *file_priv, 1021 + uint32_t handle, 1022 + uint32_t width, uint32_t height) 1023 + { 1024 + struct drm_device *dev = crtc->dev; 1025 + struct drm_psb_private *dev_priv = 1026 + (struct drm_psb_private *)dev->dev_private; 1027 + struct psb_gtt *pg = dev_priv->pg; 1028 + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 1029 + struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev; 1030 + int pipe = psb_intel_crtc->pipe; 1031 + uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; 1032 + uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; 1033 + uint32_t temp; 1034 + size_t addr = 0; 1035 + uint32_t page_offset; 1036 + size_t size; 1037 + void *bo; 1038 + int ret; 1039 + 1040 + DRM_DEBUG("\n"); 1041 + 1042 + /* if we want to turn of the cursor ignore width and height */ 1043 + if (!handle) { 1044 + DRM_DEBUG("cursor off\n"); 1045 + /* turn off the cursor */ 1046 + temp = 0; 1047 + temp |= CURSOR_MODE_DISABLE; 1048 + 1049 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, 1050 + OSPM_UHB_ONLY_IF_ON)) { 1051 + REG_WRITE(control, temp); 1052 + REG_WRITE(base, 0); 1053 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 1054 + } 1055 + 1056 + /* unpin the old bo */ 1057 + if (psb_intel_crtc->cursor_bo) { 1058 + mode_dev->bo_unpin_for_scanout(dev, 1059 + psb_intel_crtc-> 1060 + cursor_bo); 1061 + psb_intel_crtc->cursor_bo = NULL; 1062 + } 1063 + 1064 + return 0; 1065 + } 1066 + 1067 + /* Currently we only support 64x64 cursors */ 1068 + if (width != 64 || height != 64) { 1069 + DRM_ERROR("we currently only support 64x64 cursors\n"); 1070 + return -EINVAL; 1071 + } 1072 + 1073 + bo = mode_dev->bo_from_handle(dev, file_priv, handle); 1074 + if (!bo) 1075 + return -ENOENT; 1076 + 1077 + ret = mode_dev->bo_pin_for_scanout(dev, bo); 1078 + if (ret) 1079 + return ret; 1080 + size = mode_dev->bo_size(dev, bo); 1081 + if (size < width * height * 4) { 1082 + DRM_ERROR("buffer is to small\n"); 1083 + return -ENOMEM; 1084 + } 1085 + 1086 + /*insert this bo into gtt*/ 1087 + DRM_DEBUG("%s: map meminfo for hw cursor. handle %x\n", 1088 + __func__, handle); 1089 + 1090 + ret = psb_gtt_map_meminfo(dev, (void *)handle, &page_offset); 1091 + if (ret) { 1092 + DRM_ERROR("Can not map meminfo to GTT. handle 0x%x\n", handle); 1093 + return ret; 1094 + } 1095 + 1096 + addr = page_offset << PAGE_SHIFT; 1097 + 1098 + addr += pg->stolen_base; 1099 + 1100 + psb_intel_crtc->cursor_addr = addr; 1101 + 1102 + temp = 0; 1103 + /* set the pipe for the cursor */ 1104 + temp |= (pipe << 28); 1105 + temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; 1106 + 1107 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, 1108 + OSPM_UHB_ONLY_IF_ON)) { 1109 + REG_WRITE(control, temp); 1110 + REG_WRITE(base, addr); 1111 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 1112 + } 1113 + 1114 + /* unpin the old bo */ 1115 + if (psb_intel_crtc->cursor_bo && psb_intel_crtc->cursor_bo != bo) { 1116 + mode_dev->bo_unpin_for_scanout(dev, psb_intel_crtc->cursor_bo); 1117 + psb_intel_crtc->cursor_bo = bo; 1118 + } 1119 + 1120 + return 0; 1121 + } 1122 + 1123 + static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 1124 + { 1125 + struct drm_device *dev = crtc->dev; 1126 + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 1127 + int pipe = psb_intel_crtc->pipe; 1128 + uint32_t temp = 0; 1129 + uint32_t adder; 1130 + 1131 + 1132 + if (x < 0) { 1133 + temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT); 1134 + x = -x; 1135 + } 1136 + if (y < 0) { 1137 + temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT); 1138 + y = -y; 1139 + } 1140 + 1141 + temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT); 1142 + temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT); 1143 + 1144 + adder = psb_intel_crtc->cursor_addr; 1145 + 1146 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, 1147 + OSPM_UHB_ONLY_IF_ON)) { 1148 + REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); 1149 + REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder); 1150 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 1151 + } 1152 + return 0; 1153 + } 1154 + 1155 + static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, 1156 + u16 *green, u16 *blue, uint32_t type, uint32_t size) 1157 + { 1158 + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 1159 + int i; 1160 + 1161 + if (size != 256) 1162 + return; 1163 + 1164 + for (i = 0; i < 256; i++) { 1165 + psb_intel_crtc->lut_r[i] = red[i] >> 8; 1166 + psb_intel_crtc->lut_g[i] = green[i] >> 8; 1167 + psb_intel_crtc->lut_b[i] = blue[i] >> 8; 1168 + } 1169 + 1170 + psb_intel_crtc_load_lut(crtc); 1171 + } 1172 + 1173 + static int psb_crtc_set_config(struct drm_mode_set *set) 1174 + { 1175 + int ret; 1176 + struct drm_device *dev = set->crtc->dev; 1177 + struct drm_psb_private *dev_priv = dev->dev_private; 1178 + 1179 + if (!dev_priv->rpm_enabled) 1180 + return drm_crtc_helper_set_config(set); 1181 + 1182 + pm_runtime_forbid(&dev->pdev->dev); 1183 + ret = drm_crtc_helper_set_config(set); 1184 + pm_runtime_allow(&dev->pdev->dev); 1185 + return ret; 1186 + } 1187 + 1188 + /* Returns the clock of the currently programmed mode of the given pipe. */ 1189 + static int psb_intel_crtc_clock_get(struct drm_device *dev, 1190 + struct drm_crtc *crtc) 1191 + { 1192 + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 1193 + int pipe = psb_intel_crtc->pipe; 1194 + u32 dpll; 1195 + u32 fp; 1196 + struct psb_intel_clock_t clock; 1197 + bool is_lvds; 1198 + struct drm_psb_private *dev_priv = dev->dev_private; 1199 + 1200 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, 1201 + OSPM_UHB_ONLY_IF_ON)) { 1202 + dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B); 1203 + if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 1204 + fp = REG_READ((pipe == 0) ? FPA0 : FPB0); 1205 + else 1206 + fp = REG_READ((pipe == 0) ? FPA1 : FPB1); 1207 + is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN); 1208 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 1209 + } else { 1210 + dpll = (pipe == 0) ? 1211 + dev_priv->saveDPLL_A : dev_priv->saveDPLL_B; 1212 + 1213 + if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 1214 + fp = (pipe == 0) ? 1215 + dev_priv->saveFPA0 : 1216 + dev_priv->saveFPB0; 1217 + else 1218 + fp = (pipe == 0) ? 1219 + dev_priv->saveFPA1 : 1220 + dev_priv->saveFPB1; 1221 + 1222 + is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN); 1223 + } 1224 + 1225 + clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 1226 + clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 1227 + clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 1228 + 1229 + if (is_lvds) { 1230 + clock.p1 = 1231 + ffs((dpll & 1232 + DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 1233 + DPLL_FPA01_P1_POST_DIV_SHIFT); 1234 + clock.p2 = 14; 1235 + 1236 + if ((dpll & PLL_REF_INPUT_MASK) == 1237 + PLLB_REF_INPUT_SPREADSPECTRUMIN) { 1238 + /* XXX: might not be 66MHz */ 1239 + i8xx_clock(66000, &clock); 1240 + } else 1241 + i8xx_clock(48000, &clock); 1242 + } else { 1243 + if (dpll & PLL_P1_DIVIDE_BY_TWO) 1244 + clock.p1 = 2; 1245 + else { 1246 + clock.p1 = 1247 + ((dpll & 1248 + DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 1249 + DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 1250 + } 1251 + if (dpll & PLL_P2_DIVIDE_BY_4) 1252 + clock.p2 = 4; 1253 + else 1254 + clock.p2 = 2; 1255 + 1256 + i8xx_clock(48000, &clock); 1257 + } 1258 + 1259 + /* XXX: It would be nice to validate the clocks, but we can't reuse 1260 + * i830PllIsValid() because it relies on the xf86_config connector 1261 + * configuration being accurate, which it isn't necessarily. 1262 + */ 1263 + 1264 + return clock.dot; 1265 + } 1266 + 1267 + /** Returns the currently programmed mode of the given pipe. */ 1268 + struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, 1269 + struct drm_crtc *crtc) 1270 + { 1271 + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 1272 + int pipe = psb_intel_crtc->pipe; 1273 + struct drm_display_mode *mode; 1274 + int htot; 1275 + int hsync; 1276 + int vtot; 1277 + int vsync; 1278 + struct drm_psb_private *dev_priv = dev->dev_private; 1279 + 1280 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, 1281 + OSPM_UHB_ONLY_IF_ON)) { 1282 + htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B); 1283 + hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B); 1284 + vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B); 1285 + vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B); 1286 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 1287 + } else { 1288 + htot = (pipe == 0) ? 1289 + dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B; 1290 + hsync = (pipe == 0) ? 1291 + dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B; 1292 + vtot = (pipe == 0) ? 1293 + dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B; 1294 + vsync = (pipe == 0) ? 1295 + dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B; 1296 + } 1297 + 1298 + mode = kzalloc(sizeof(*mode), GFP_KERNEL); 1299 + if (!mode) 1300 + return NULL; 1301 + 1302 + mode->clock = psb_intel_crtc_clock_get(dev, crtc); 1303 + mode->hdisplay = (htot & 0xffff) + 1; 1304 + mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 1305 + mode->hsync_start = (hsync & 0xffff) + 1; 1306 + mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; 1307 + mode->vdisplay = (vtot & 0xffff) + 1; 1308 + mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; 1309 + mode->vsync_start = (vsync & 0xffff) + 1; 1310 + mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 1311 + 1312 + drm_mode_set_name(mode); 1313 + drm_mode_set_crtcinfo(mode, 0); 1314 + 1315 + return mode; 1316 + } 1317 + 1318 + static void psb_intel_crtc_destroy(struct drm_crtc *crtc) 1319 + { 1320 + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 1321 + 1322 + kfree(psb_intel_crtc->crtc_state); 1323 + drm_crtc_cleanup(crtc); 1324 + kfree(psb_intel_crtc); 1325 + } 1326 + 1327 + static const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { 1328 + .dpms = psb_intel_crtc_dpms, 1329 + .mode_fixup = psb_intel_crtc_mode_fixup, 1330 + .mode_set = psb_intel_crtc_mode_set, 1331 + .mode_set_base = psb_intel_pipe_set_base, 1332 + .prepare = psb_intel_crtc_prepare, 1333 + .commit = psb_intel_crtc_commit, 1334 + }; 1335 + 1336 + static const struct drm_crtc_helper_funcs mrst_helper_funcs; 1337 + static const struct drm_crtc_helper_funcs mdfld_helper_funcs; 1338 + const struct drm_crtc_funcs mdfld_intel_crtc_funcs; 1339 + 1340 + const struct drm_crtc_funcs psb_intel_crtc_funcs = { 1341 + .save = psb_intel_crtc_save, 1342 + .restore = psb_intel_crtc_restore, 1343 + .cursor_set = psb_intel_crtc_cursor_set, 1344 + .cursor_move = psb_intel_crtc_cursor_move, 1345 + .gamma_set = psb_intel_crtc_gamma_set, 1346 + .set_config = psb_crtc_set_config, 1347 + .destroy = psb_intel_crtc_destroy, 1348 + }; 1349 + 1350 + void psb_intel_crtc_init(struct drm_device *dev, int pipe, 1351 + struct psb_intel_mode_device *mode_dev) 1352 + { 1353 + struct drm_psb_private *dev_priv = dev->dev_private; 1354 + struct psb_intel_crtc *psb_intel_crtc; 1355 + int i; 1356 + uint16_t *r_base, *g_base, *b_base; 1357 + 1358 + PSB_DEBUG_ENTRY("\n"); 1359 + 1360 + /* We allocate a extra array of drm_connector pointers 1361 + * for fbdev after the crtc */ 1362 + psb_intel_crtc = 1363 + kzalloc(sizeof(struct psb_intel_crtc) + 1364 + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), 1365 + GFP_KERNEL); 1366 + if (psb_intel_crtc == NULL) 1367 + return; 1368 + 1369 + psb_intel_crtc->crtc_state = 1370 + kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL); 1371 + if (!psb_intel_crtc->crtc_state) { 1372 + DRM_INFO("Crtc state error: No memory\n"); 1373 + kfree(psb_intel_crtc); 1374 + return; 1375 + } 1376 + 1377 + drm_crtc_init(dev, &psb_intel_crtc->base, &psb_intel_crtc_funcs); 1378 + 1379 + drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256); 1380 + psb_intel_crtc->pipe = pipe; 1381 + psb_intel_crtc->plane = pipe; 1382 + 1383 + r_base = psb_intel_crtc->base.gamma_store; 1384 + g_base = r_base + 256; 1385 + b_base = g_base + 256; 1386 + for (i = 0; i < 256; i++) { 1387 + psb_intel_crtc->lut_r[i] = i; 1388 + psb_intel_crtc->lut_g[i] = i; 1389 + psb_intel_crtc->lut_b[i] = i; 1390 + r_base[i] = i << 8; 1391 + g_base[i] = i << 8; 1392 + b_base[i] = i << 8; 1393 + 1394 + psb_intel_crtc->lut_adj[i] = 0; 1395 + } 1396 + 1397 + psb_intel_crtc->mode_dev = mode_dev; 1398 + psb_intel_crtc->cursor_addr = 0; 1399 + 1400 + drm_crtc_helper_add(&psb_intel_crtc->base, 1401 + &psb_intel_helper_funcs); 1402 + 1403 + /* Setup the array of drm_connector pointer array */ 1404 + psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base; 1405 + BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 1406 + dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL); 1407 + dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] = 1408 + &psb_intel_crtc->base; 1409 + dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] = 1410 + &psb_intel_crtc->base; 1411 + psb_intel_crtc->mode_set.connectors = 1412 + (struct drm_connector **) (psb_intel_crtc + 1); 1413 + psb_intel_crtc->mode_set.num_connectors = 0; 1414 + } 1415 + 1416 + int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 1417 + struct drm_file *file_priv) 1418 + { 1419 + struct drm_psb_private *dev_priv = dev->dev_private; 1420 + struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data; 1421 + struct drm_mode_object *drmmode_obj; 1422 + struct psb_intel_crtc *crtc; 1423 + 1424 + if (!dev_priv) { 1425 + DRM_ERROR("called with no initialization\n"); 1426 + return -EINVAL; 1427 + } 1428 + 1429 + drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, 1430 + DRM_MODE_OBJECT_CRTC); 1431 + 1432 + if (!drmmode_obj) { 1433 + DRM_ERROR("no such CRTC id\n"); 1434 + return -EINVAL; 1435 + } 1436 + 1437 + crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj)); 1438 + pipe_from_crtc_id->pipe = crtc->pipe; 1439 + 1440 + return 0; 1441 + } 1442 + 1443 + struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) 1444 + { 1445 + struct drm_crtc *crtc = NULL; 1446 + 1447 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1448 + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 1449 + if (psb_intel_crtc->pipe == pipe) 1450 + break; 1451 + } 1452 + return crtc; 1453 + } 1454 + 1455 + int psb_intel_connector_clones(struct drm_device *dev, int type_mask) 1456 + { 1457 + int index_mask = 0; 1458 + struct drm_connector *connector; 1459 + int entry = 0; 1460 + 1461 + list_for_each_entry(connector, &dev->mode_config.connector_list, 1462 + head) { 1463 + struct psb_intel_output *psb_intel_output = 1464 + to_psb_intel_output(connector); 1465 + if (type_mask & (1 << psb_intel_output->type)) 1466 + index_mask |= (1 << entry); 1467 + entry++; 1468 + } 1469 + return index_mask; 1470 + } 1471 + 1472 + 1473 + void psb_intel_modeset_cleanup(struct drm_device *dev) 1474 + { 1475 + drm_mode_config_cleanup(dev); 1476 + } 1477 + 1478 + 1479 + /* current intel driver doesn't take advantage of encoders 1480 + always give back the encoder for the connector 1481 + */ 1482 + struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector) 1483 + { 1484 + struct psb_intel_output *psb_intel_output = 1485 + to_psb_intel_output(connector); 1486 + 1487 + return &psb_intel_output->enc; 1488 + } 1489 +
+25
drivers/staging/gma500/psb_intel_display.h
··· 1 + /* copyright (c) 2008, Intel Corporation 2 + * 3 + * This program is free software; you can redistribute it and/or modify it 4 + * under the terms and conditions of the GNU General Public License, 5 + * version 2, as published by the Free Software Foundation. 6 + * 7 + * This program is distributed in the hope it will be useful, but WITHOUT 8 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 9 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 10 + * more details. 11 + * 12 + * You should have received a copy of the GNU General Public License along with 13 + * this program; if not, write to the Free Software Foundation, Inc., 14 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 15 + * 16 + * Authors: 17 + * Eric Anholt <eric@anholt.net> 18 + */ 19 + 20 + #ifndef _INTEL_DISPLAY_H_ 21 + #define _INTEL_DISPLAY_H_ 22 + 23 + bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type); 24 + 25 + #endif
+247
drivers/staging/gma500/psb_intel_drv.h
··· 1 + /* 2 + * Copyright (c) 2009, Intel Corporation. 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms and conditions of the GNU General Public License, 6 + * version 2, as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope it will be useful, but WITHOUT 9 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 + * more details. 12 + * 13 + * You should have received a copy of the GNU General Public License along with 14 + * this program; if not, write to the Free Software Foundation, Inc., 15 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 + * 17 + */ 18 + 19 + #ifndef __INTEL_DRV_H__ 20 + #define __INTEL_DRV_H__ 21 + 22 + #include <linux/i2c.h> 23 + #include <linux/i2c-algo-bit.h> 24 + #include <drm/drm_crtc.h> 25 + #include <drm/drm_crtc_helper.h> 26 + #include <linux/gpio.h> 27 + 28 + /* 29 + * MOORESTOWN defines 30 + */ 31 + #define DELAY_TIME1 2000 /* 1000 = 1ms */ 32 + 33 + /* 34 + * Display related stuff 35 + */ 36 + 37 + /* store information about an Ixxx DVO */ 38 + /* The i830->i865 use multiple DVOs with multiple i2cs */ 39 + /* the i915, i945 have a single sDVO i2c bus - which is different */ 40 + #define MAX_OUTPUTS 6 41 + /* maximum connectors per crtcs in the mode set */ 42 + #define INTELFB_CONN_LIMIT 4 43 + 44 + #define INTEL_I2C_BUS_DVO 1 45 + #define INTEL_I2C_BUS_SDVO 2 46 + 47 + /* these are outputs from the chip - integrated only 48 + * external chips are via DVO or SDVO output */ 49 + #define INTEL_OUTPUT_UNUSED 0 50 + #define INTEL_OUTPUT_ANALOG 1 51 + #define INTEL_OUTPUT_DVO 2 52 + #define INTEL_OUTPUT_SDVO 3 53 + #define INTEL_OUTPUT_LVDS 4 54 + #define INTEL_OUTPUT_TVOUT 5 55 + #define INTEL_OUTPUT_HDMI 6 56 + #define INTEL_OUTPUT_MIPI 7 57 + #define INTEL_OUTPUT_MIPI2 8 58 + 59 + #define INTEL_DVO_CHIP_NONE 0 60 + #define INTEL_DVO_CHIP_LVDS 1 61 + #define INTEL_DVO_CHIP_TMDS 2 62 + #define INTEL_DVO_CHIP_TVOUT 4 63 + 64 + enum mipi_panel_type { 65 + NSC_800X480 = 1, 66 + LGE_480X1024 = 2, 67 + TPO_864X480 = 3 68 + }; 69 + 70 + /** 71 + * Hold information useally put on the device driver privates here, 72 + * since it needs to be shared across multiple of devices drivers privates. 73 + */ 74 + struct psb_intel_mode_device { 75 + 76 + /* 77 + * Abstracted memory manager operations 78 + */ 79 + void *(*bo_from_handle) (struct drm_device *dev, 80 + struct drm_file *file_priv, 81 + unsigned int handle); 82 + size_t(*bo_size) (struct drm_device *dev, void *bo); 83 + size_t(*bo_offset) (struct drm_device *dev, void *bo); 84 + int (*bo_pin_for_scanout) (struct drm_device *dev, void *bo); 85 + int (*bo_unpin_for_scanout) (struct drm_device *dev, void *bo); 86 + 87 + /* 88 + * Cursor 89 + */ 90 + int cursor_needs_physical; 91 + 92 + /* 93 + * LVDS info 94 + */ 95 + int backlight_duty_cycle; /* restore backlight to this value */ 96 + bool panel_wants_dither; 97 + struct drm_display_mode *panel_fixed_mode; 98 + struct drm_display_mode *panel_fixed_mode2; 99 + struct drm_display_mode *vbt_mode; /* if any */ 100 + 101 + uint32_t saveBLC_PWM_CTL; 102 + }; 103 + 104 + struct psb_intel_i2c_chan { 105 + /* for getting at dev. private (mmio etc.) */ 106 + struct drm_device *drm_dev; 107 + u32 reg; /* GPIO reg */ 108 + struct i2c_adapter adapter; 109 + struct i2c_algo_bit_data algo; 110 + u8 slave_addr; 111 + }; 112 + 113 + struct psb_intel_output { 114 + struct drm_connector base; 115 + 116 + struct drm_encoder enc; 117 + int type; 118 + 119 + struct psb_intel_i2c_chan *i2c_bus; /* for control functions */ 120 + struct psb_intel_i2c_chan *ddc_bus; /* for DDC only stuff */ 121 + bool load_detect_temp; 122 + void *dev_priv; 123 + 124 + struct psb_intel_mode_device *mode_dev; 125 + 126 + }; 127 + 128 + struct psb_intel_crtc_state { 129 + uint32_t saveDSPCNTR; 130 + uint32_t savePIPECONF; 131 + uint32_t savePIPESRC; 132 + uint32_t saveDPLL; 133 + uint32_t saveFP0; 134 + uint32_t saveFP1; 135 + uint32_t saveHTOTAL; 136 + uint32_t saveHBLANK; 137 + uint32_t saveHSYNC; 138 + uint32_t saveVTOTAL; 139 + uint32_t saveVBLANK; 140 + uint32_t saveVSYNC; 141 + uint32_t saveDSPSTRIDE; 142 + uint32_t saveDSPSIZE; 143 + uint32_t saveDSPPOS; 144 + uint32_t saveDSPBASE; 145 + uint32_t savePalette[256]; 146 + }; 147 + 148 + struct psb_intel_crtc { 149 + struct drm_crtc base; 150 + int pipe; 151 + int plane; 152 + uint32_t cursor_addr; 153 + u8 lut_r[256], lut_g[256], lut_b[256]; 154 + u8 lut_adj[256]; 155 + struct psb_intel_framebuffer *fbdev_fb; 156 + /* a mode_set for fbdev users on this crtc */ 157 + struct drm_mode_set mode_set; 158 + 159 + /* current bo we scanout from */ 160 + void *scanout_bo; 161 + 162 + /* current bo we cursor from */ 163 + void *cursor_bo; 164 + 165 + struct drm_display_mode saved_mode; 166 + struct drm_display_mode saved_adjusted_mode; 167 + 168 + struct psb_intel_mode_device *mode_dev; 169 + 170 + /*crtc mode setting flags*/ 171 + u32 mode_flags; 172 + 173 + /* Saved Crtc HW states */ 174 + struct psb_intel_crtc_state *crtc_state; 175 + }; 176 + 177 + #define to_psb_intel_crtc(x) \ 178 + container_of(x, struct psb_intel_crtc, base) 179 + #define to_psb_intel_output(x) \ 180 + container_of(x, struct psb_intel_output, base) 181 + #define enc_to_psb_intel_output(x) \ 182 + container_of(x, struct psb_intel_output, enc) 183 + #define to_psb_intel_framebuffer(x) \ 184 + container_of(x, struct psb_intel_framebuffer, base) 185 + 186 + struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev, 187 + const u32 reg, const char *name); 188 + void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan); 189 + int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output); 190 + extern bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output); 191 + 192 + extern void psb_intel_crtc_init(struct drm_device *dev, int pipe, 193 + struct psb_intel_mode_device *mode_dev); 194 + extern void psb_intel_crt_init(struct drm_device *dev); 195 + extern void psb_intel_sdvo_init(struct drm_device *dev, int output_device); 196 + extern void psb_intel_dvo_init(struct drm_device *dev); 197 + extern void psb_intel_tv_init(struct drm_device *dev); 198 + extern void psb_intel_lvds_init(struct drm_device *dev, 199 + struct psb_intel_mode_device *mode_dev); 200 + extern void psb_intel_lvds_set_brightness(struct drm_device *dev, int level); 201 + extern void mrst_lvds_init(struct drm_device *dev, 202 + struct psb_intel_mode_device *mode_dev); 203 + extern void mrst_wait_for_INTR_PKT_SENT(struct drm_device *dev); 204 + extern void mrst_dsi_init(struct drm_device *dev, 205 + struct psb_intel_mode_device *mode_dev); 206 + extern void mid_dsi_init(struct drm_device *dev, 207 + struct psb_intel_mode_device *mode_dev, int dsi_num); 208 + 209 + extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc); 210 + extern void psb_intel_encoder_prepare(struct drm_encoder *encoder); 211 + extern void psb_intel_encoder_commit(struct drm_encoder *encoder); 212 + 213 + extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector 214 + *connector); 215 + 216 + extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, 217 + struct drm_crtc *crtc); 218 + extern void psb_intel_wait_for_vblank(struct drm_device *dev); 219 + extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 220 + struct drm_file *file_priv); 221 + extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, 222 + int pipe); 223 + extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, 224 + int sdvoB); 225 + extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector); 226 + extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, 227 + int enable); 228 + extern int intelfb_probe(struct drm_device *dev); 229 + extern int intelfb_remove(struct drm_device *dev, 230 + struct drm_framebuffer *fb); 231 + extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device 232 + *dev, struct 233 + drm_mode_fb_cmd 234 + *mode_cmd, 235 + void *mm_private); 236 + extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, 237 + struct drm_display_mode *mode, 238 + struct drm_display_mode *adjusted_mode); 239 + extern int psb_intel_lvds_mode_valid(struct drm_connector *connector, 240 + struct drm_display_mode *mode); 241 + extern int psb_intel_lvds_set_property(struct drm_connector *connector, 242 + struct drm_property *property, 243 + uint64_t value); 244 + extern void psb_intel_lvds_destroy(struct drm_connector *connector); 245 + extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs; 246 + 247 + #endif /* __INTEL_DRV_H__ */
+169
drivers/staging/gma500/psb_intel_i2c.c
··· 1 + /* 2 + * Copyright © 2006-2007 Intel Corporation 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms and conditions of the GNU General Public License, 6 + * version 2, as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope it will be useful, but WITHOUT 9 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 + * more details. 12 + * 13 + * You should have received a copy of the GNU General Public License along with 14 + * this program; if not, write to the Free Software Foundation, Inc., 15 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 + * 17 + * Authors: 18 + * Eric Anholt <eric@anholt.net> 19 + */ 20 + 21 + #include <linux/i2c.h> 22 + #include <linux/i2c-algo-bit.h> 23 + 24 + #include "psb_drv.h" 25 + #include "psb_intel_reg.h" 26 + 27 + /* 28 + * Intel GPIO access functions 29 + */ 30 + 31 + #define I2C_RISEFALL_TIME 20 32 + 33 + static int get_clock(void *data) 34 + { 35 + struct psb_intel_i2c_chan *chan = data; 36 + struct drm_device *dev = chan->drm_dev; 37 + u32 val; 38 + 39 + val = REG_READ(chan->reg); 40 + return (val & GPIO_CLOCK_VAL_IN) != 0; 41 + } 42 + 43 + static int get_data(void *data) 44 + { 45 + struct psb_intel_i2c_chan *chan = data; 46 + struct drm_device *dev = chan->drm_dev; 47 + u32 val; 48 + 49 + val = REG_READ(chan->reg); 50 + return (val & GPIO_DATA_VAL_IN) != 0; 51 + } 52 + 53 + static void set_clock(void *data, int state_high) 54 + { 55 + struct psb_intel_i2c_chan *chan = data; 56 + struct drm_device *dev = chan->drm_dev; 57 + u32 reserved = 0, clock_bits; 58 + 59 + /* On most chips, these bits must be preserved in software. */ 60 + reserved = 61 + REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE | 62 + GPIO_CLOCK_PULLUP_DISABLE); 63 + 64 + if (state_high) 65 + clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK; 66 + else 67 + clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | 68 + GPIO_CLOCK_VAL_MASK; 69 + REG_WRITE(chan->reg, reserved | clock_bits); 70 + udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ 71 + } 72 + 73 + static void set_data(void *data, int state_high) 74 + { 75 + struct psb_intel_i2c_chan *chan = data; 76 + struct drm_device *dev = chan->drm_dev; 77 + u32 reserved = 0, data_bits; 78 + 79 + /* On most chips, these bits must be preserved in software. */ 80 + reserved = 81 + REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE | 82 + GPIO_CLOCK_PULLUP_DISABLE); 83 + 84 + if (state_high) 85 + data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK; 86 + else 87 + data_bits = 88 + GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | 89 + GPIO_DATA_VAL_MASK; 90 + 91 + REG_WRITE(chan->reg, reserved | data_bits); 92 + udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ 93 + } 94 + 95 + /** 96 + * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg 97 + * @dev: DRM device 98 + * @output: driver specific output device 99 + * @reg: GPIO reg to use 100 + * @name: name for this bus 101 + * 102 + * Creates and registers a new i2c bus with the Linux i2c layer, for use 103 + * in output probing and control (e.g. DDC or SDVO control functions). 104 + * 105 + * Possible values for @reg include: 106 + * %GPIOA 107 + * %GPIOB 108 + * %GPIOC 109 + * %GPIOD 110 + * %GPIOE 111 + * %GPIOF 112 + * %GPIOG 113 + * %GPIOH 114 + * see PRM for details on how these different busses are used. 115 + */ 116 + struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev, 117 + const u32 reg, const char *name) 118 + { 119 + struct psb_intel_i2c_chan *chan; 120 + 121 + chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL); 122 + if (!chan) 123 + goto out_free; 124 + 125 + chan->drm_dev = dev; 126 + chan->reg = reg; 127 + snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name); 128 + chan->adapter.owner = THIS_MODULE; 129 + chan->adapter.algo_data = &chan->algo; 130 + chan->adapter.dev.parent = &dev->pdev->dev; 131 + chan->algo.setsda = set_data; 132 + chan->algo.setscl = set_clock; 133 + chan->algo.getsda = get_data; 134 + chan->algo.getscl = get_clock; 135 + chan->algo.udelay = 20; 136 + chan->algo.timeout = usecs_to_jiffies(2200); 137 + chan->algo.data = chan; 138 + 139 + i2c_set_adapdata(&chan->adapter, chan); 140 + 141 + if (i2c_bit_add_bus(&chan->adapter)) 142 + goto out_free; 143 + 144 + /* JJJ: raise SCL and SDA? */ 145 + set_data(chan, 1); 146 + set_clock(chan, 1); 147 + udelay(20); 148 + 149 + return chan; 150 + 151 + out_free: 152 + kfree(chan); 153 + return NULL; 154 + } 155 + 156 + /** 157 + * psb_intel_i2c_destroy - unregister and free i2c bus resources 158 + * @output: channel to free 159 + * 160 + * Unregister the adapter from the i2c layer, then free the structure. 161 + */ 162 + void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan) 163 + { 164 + if (!chan) 165 + return; 166 + 167 + i2c_del_adapter(&chan->adapter); 168 + kfree(chan); 169 + }
+889
drivers/staging/gma500/psb_intel_lvds.c
··· 1 + /* 2 + * Copyright © 2006-2007 Intel Corporation 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms and conditions of the GNU General Public License, 6 + * version 2, as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope it will be useful, but WITHOUT 9 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 + * more details. 12 + * 13 + * You should have received a copy of the GNU General Public License along with 14 + * this program; if not, write to the Free Software Foundation, Inc., 15 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 + * 17 + * Authors: 18 + * Eric Anholt <eric@anholt.net> 19 + * Dave Airlie <airlied@linux.ie> 20 + * Jesse Barnes <jesse.barnes@intel.com> 21 + */ 22 + 23 + #include <linux/i2c.h> 24 + /* #include <drm/drm_crtc.h> */ 25 + /* #include <drm/drm_edid.h> */ 26 + #include <drm/drmP.h> 27 + 28 + #include "psb_intel_bios.h" 29 + #include "psb_drv.h" 30 + #include "psb_intel_drv.h" 31 + #include "psb_intel_reg.h" 32 + #include "psb_powermgmt.h" 33 + #include <linux/pm_runtime.h> 34 + 35 + /* MRST defines start */ 36 + uint8_t blc_freq; 37 + uint8_t blc_minbrightness; 38 + uint8_t blc_i2caddr; 39 + uint8_t blc_brightnesscmd; 40 + int lvds_backlight; /* restore backlight to this value */ 41 + 42 + u32 CoreClock; 43 + u32 PWMControlRegFreq; 44 + 45 + /** 46 + * LVDS I2C backlight control macros 47 + */ 48 + #define BRIGHTNESS_MAX_LEVEL 100 49 + #define BRIGHTNESS_MASK 0xFF 50 + #define BLC_I2C_TYPE 0x01 51 + #define BLC_PWM_TYPT 0x02 52 + 53 + #define BLC_POLARITY_NORMAL 0 54 + #define BLC_POLARITY_INVERSE 1 55 + 56 + #define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE) 57 + #define PSB_BLC_MIN_PWM_REG_FREQ (0x2) 58 + #define PSB_BLC_PWM_PRECISION_FACTOR (10) 59 + #define PSB_BACKLIGHT_PWM_CTL_SHIFT (16) 60 + #define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE) 61 + 62 + struct psb_intel_lvds_priv { 63 + /** 64 + * Saved LVDO output states 65 + */ 66 + uint32_t savePP_ON; 67 + uint32_t savePP_OFF; 68 + uint32_t saveLVDS; 69 + uint32_t savePP_CONTROL; 70 + uint32_t savePP_CYCLE; 71 + uint32_t savePFIT_CONTROL; 72 + uint32_t savePFIT_PGM_RATIOS; 73 + uint32_t saveBLC_PWM_CTL; 74 + }; 75 + 76 + /* MRST defines end */ 77 + 78 + /** 79 + * Returns the maximum level of the backlight duty cycle field. 80 + */ 81 + static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev) 82 + { 83 + struct drm_psb_private *dev_priv = dev->dev_private; 84 + u32 retVal; 85 + 86 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, 87 + OSPM_UHB_ONLY_IF_ON)) { 88 + retVal = ((REG_READ(BLC_PWM_CTL) & 89 + BACKLIGHT_MODULATION_FREQ_MASK) >> 90 + BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; 91 + 92 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 93 + } else 94 + retVal = ((dev_priv->saveBLC_PWM_CTL & 95 + BACKLIGHT_MODULATION_FREQ_MASK) >> 96 + BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; 97 + 98 + return retVal; 99 + } 100 + 101 + /** 102 + * Set LVDS backlight level by I2C command 103 + */ 104 + static int psb_lvds_i2c_set_brightness(struct drm_device *dev, 105 + unsigned int level) 106 + { 107 + struct drm_psb_private *dev_priv = 108 + (struct drm_psb_private *)dev->dev_private; 109 + 110 + struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus; 111 + u8 out_buf[2]; 112 + unsigned int blc_i2c_brightness; 113 + 114 + struct i2c_msg msgs[] = { 115 + { 116 + .addr = lvds_i2c_bus->slave_addr, 117 + .flags = 0, 118 + .len = 2, 119 + .buf = out_buf, 120 + } 121 + }; 122 + 123 + blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level * 124 + BRIGHTNESS_MASK / 125 + BRIGHTNESS_MAX_LEVEL); 126 + 127 + if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE) 128 + blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness; 129 + 130 + out_buf[0] = dev_priv->lvds_bl->brightnesscmd; 131 + out_buf[1] = (u8)blc_i2c_brightness; 132 + 133 + if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) { 134 + DRM_DEBUG("I2C set brightness.(command, value) (%d, %d)\n", 135 + blc_brightnesscmd, 136 + blc_i2c_brightness); 137 + return 0; 138 + } 139 + 140 + DRM_ERROR("I2C transfer error\n"); 141 + return -1; 142 + } 143 + 144 + 145 + static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level) 146 + { 147 + struct drm_psb_private *dev_priv = 148 + (struct drm_psb_private *)dev->dev_private; 149 + 150 + u32 max_pwm_blc; 151 + u32 blc_pwm_duty_cycle; 152 + 153 + max_pwm_blc = psb_intel_lvds_get_max_backlight(dev); 154 + 155 + /*BLC_PWM_CTL Should be initiated while backlight device init*/ 156 + BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0); 157 + 158 + blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL; 159 + 160 + if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE) 161 + blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle; 162 + 163 + blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR; 164 + REG_WRITE(BLC_PWM_CTL, 165 + (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) | 166 + (blc_pwm_duty_cycle)); 167 + 168 + return 0; 169 + } 170 + 171 + /** 172 + * Set LVDS backlight level either by I2C or PWM 173 + */ 174 + void psb_intel_lvds_set_brightness(struct drm_device *dev, int level) 175 + { 176 + /*u32 blc_pwm_ctl;*/ 177 + struct drm_psb_private *dev_priv = 178 + (struct drm_psb_private *)dev->dev_private; 179 + 180 + DRM_DEBUG("backlight level is %d\n", level); 181 + 182 + if (!dev_priv->lvds_bl) { 183 + DRM_ERROR("NO LVDS Backlight Info\n"); 184 + return; 185 + } 186 + 187 + if (dev_priv->lvds_bl->type == BLC_I2C_TYPE) 188 + psb_lvds_i2c_set_brightness(dev, level); 189 + else 190 + psb_lvds_pwm_set_brightness(dev, level); 191 + } 192 + 193 + /** 194 + * Sets the backlight level. 195 + * 196 + * \param level backlight level, from 0 to psb_intel_lvds_get_max_backlight(). 197 + */ 198 + static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level) 199 + { 200 + struct drm_psb_private *dev_priv = dev->dev_private; 201 + u32 blc_pwm_ctl; 202 + 203 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, 204 + OSPM_UHB_ONLY_IF_ON)) { 205 + blc_pwm_ctl = 206 + REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; 207 + REG_WRITE(BLC_PWM_CTL, 208 + (blc_pwm_ctl | 209 + (level << BACKLIGHT_DUTY_CYCLE_SHIFT))); 210 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 211 + } else { 212 + blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL & 213 + ~BACKLIGHT_DUTY_CYCLE_MASK; 214 + dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl | 215 + (level << BACKLIGHT_DUTY_CYCLE_SHIFT)); 216 + } 217 + } 218 + 219 + /** 220 + * Sets the power state for the panel. 221 + */ 222 + static void psb_intel_lvds_set_power(struct drm_device *dev, 223 + struct psb_intel_output *output, bool on) 224 + { 225 + u32 pp_status; 226 + 227 + if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, 228 + OSPM_UHB_FORCE_POWER_ON)) 229 + return; 230 + 231 + if (on) { 232 + REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | 233 + POWER_TARGET_ON); 234 + do { 235 + pp_status = REG_READ(PP_STATUS); 236 + } while ((pp_status & PP_ON) == 0); 237 + 238 + psb_intel_lvds_set_backlight(dev, 239 + output-> 240 + mode_dev->backlight_duty_cycle); 241 + } else { 242 + psb_intel_lvds_set_backlight(dev, 0); 243 + 244 + REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & 245 + ~POWER_TARGET_ON); 246 + do { 247 + pp_status = REG_READ(PP_STATUS); 248 + } while (pp_status & PP_ON); 249 + } 250 + 251 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 252 + } 253 + 254 + static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode) 255 + { 256 + struct drm_device *dev = encoder->dev; 257 + struct psb_intel_output *output = enc_to_psb_intel_output(encoder); 258 + 259 + if (mode == DRM_MODE_DPMS_ON) 260 + psb_intel_lvds_set_power(dev, output, true); 261 + else 262 + psb_intel_lvds_set_power(dev, output, false); 263 + 264 + /* XXX: We never power down the LVDS pairs. */ 265 + } 266 + 267 + static void psb_intel_lvds_save(struct drm_connector *connector) 268 + { 269 + struct drm_device *dev = connector->dev; 270 + struct drm_psb_private *dev_priv = 271 + (struct drm_psb_private *)dev->dev_private; 272 + struct psb_intel_output *psb_intel_output = 273 + to_psb_intel_output(connector); 274 + struct psb_intel_lvds_priv *lvds_priv = 275 + (struct psb_intel_lvds_priv *)psb_intel_output->dev_priv; 276 + 277 + lvds_priv->savePP_ON = REG_READ(LVDSPP_ON); 278 + lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF); 279 + lvds_priv->saveLVDS = REG_READ(LVDS); 280 + lvds_priv->savePP_CONTROL = REG_READ(PP_CONTROL); 281 + lvds_priv->savePP_CYCLE = REG_READ(PP_CYCLE); 282 + /*lvds_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);*/ 283 + lvds_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL); 284 + lvds_priv->savePFIT_CONTROL = REG_READ(PFIT_CONTROL); 285 + lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS); 286 + 287 + /*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/ 288 + dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & 289 + BACKLIGHT_DUTY_CYCLE_MASK); 290 + 291 + /* 292 + * If the light is off at server startup, 293 + * just make it full brightness 294 + */ 295 + if (dev_priv->backlight_duty_cycle == 0) 296 + dev_priv->backlight_duty_cycle = 297 + psb_intel_lvds_get_max_backlight(dev); 298 + 299 + DRM_DEBUG("(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", 300 + lvds_priv->savePP_ON, 301 + lvds_priv->savePP_OFF, 302 + lvds_priv->saveLVDS, 303 + lvds_priv->savePP_CONTROL, 304 + lvds_priv->savePP_CYCLE, 305 + lvds_priv->saveBLC_PWM_CTL); 306 + } 307 + 308 + static void psb_intel_lvds_restore(struct drm_connector *connector) 309 + { 310 + struct drm_device *dev = connector->dev; 311 + u32 pp_status; 312 + 313 + /*struct drm_psb_private *dev_priv = 314 + (struct drm_psb_private *)dev->dev_private;*/ 315 + struct psb_intel_output *psb_intel_output = 316 + to_psb_intel_output(connector); 317 + struct psb_intel_lvds_priv *lvds_priv = 318 + (struct psb_intel_lvds_priv *)psb_intel_output->dev_priv; 319 + 320 + DRM_DEBUG("(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", 321 + lvds_priv->savePP_ON, 322 + lvds_priv->savePP_OFF, 323 + lvds_priv->saveLVDS, 324 + lvds_priv->savePP_CONTROL, 325 + lvds_priv->savePP_CYCLE, 326 + lvds_priv->saveBLC_PWM_CTL); 327 + 328 + REG_WRITE(BLC_PWM_CTL, lvds_priv->saveBLC_PWM_CTL); 329 + REG_WRITE(PFIT_CONTROL, lvds_priv->savePFIT_CONTROL); 330 + REG_WRITE(PFIT_PGM_RATIOS, lvds_priv->savePFIT_PGM_RATIOS); 331 + REG_WRITE(LVDSPP_ON, lvds_priv->savePP_ON); 332 + REG_WRITE(LVDSPP_OFF, lvds_priv->savePP_OFF); 333 + /*REG_WRITE(PP_DIVISOR, lvds_priv->savePP_DIVISOR);*/ 334 + REG_WRITE(PP_CYCLE, lvds_priv->savePP_CYCLE); 335 + REG_WRITE(PP_CONTROL, lvds_priv->savePP_CONTROL); 336 + REG_WRITE(LVDS, lvds_priv->saveLVDS); 337 + 338 + if (lvds_priv->savePP_CONTROL & POWER_TARGET_ON) { 339 + REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | 340 + POWER_TARGET_ON); 341 + do { 342 + pp_status = REG_READ(PP_STATUS); 343 + } while ((pp_status & PP_ON) == 0); 344 + } else { 345 + REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & 346 + ~POWER_TARGET_ON); 347 + do { 348 + pp_status = REG_READ(PP_STATUS); 349 + } while (pp_status & PP_ON); 350 + } 351 + } 352 + 353 + int psb_intel_lvds_mode_valid(struct drm_connector *connector, 354 + struct drm_display_mode *mode) 355 + { 356 + struct psb_intel_output *psb_intel_output = 357 + to_psb_intel_output(connector); 358 + struct drm_display_mode *fixed_mode = 359 + psb_intel_output->mode_dev->panel_fixed_mode; 360 + 361 + PSB_DEBUG_ENTRY("\n"); 362 + 363 + if (psb_intel_output->type == INTEL_OUTPUT_MIPI2) 364 + fixed_mode = psb_intel_output->mode_dev->panel_fixed_mode2; 365 + 366 + /* just in case */ 367 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 368 + return MODE_NO_DBLESCAN; 369 + 370 + /* just in case */ 371 + if (mode->flags & DRM_MODE_FLAG_INTERLACE) 372 + return MODE_NO_INTERLACE; 373 + 374 + if (fixed_mode) { 375 + if (mode->hdisplay > fixed_mode->hdisplay) 376 + return MODE_PANEL; 377 + if (mode->vdisplay > fixed_mode->vdisplay) 378 + return MODE_PANEL; 379 + } 380 + return MODE_OK; 381 + } 382 + 383 + bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, 384 + struct drm_display_mode *mode, 385 + struct drm_display_mode *adjusted_mode) 386 + { 387 + struct psb_intel_mode_device *mode_dev = 388 + enc_to_psb_intel_output(encoder)->mode_dev; 389 + struct drm_device *dev = encoder->dev; 390 + struct psb_intel_crtc *psb_intel_crtc = 391 + to_psb_intel_crtc(encoder->crtc); 392 + struct drm_encoder *tmp_encoder; 393 + struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode; 394 + struct psb_intel_output *psb_intel_output = 395 + enc_to_psb_intel_output(encoder); 396 + 397 + PSB_DEBUG_ENTRY("type = 0x%x, pipe = %d.\n", 398 + psb_intel_output->type, psb_intel_crtc->pipe); 399 + 400 + if (psb_intel_output->type == INTEL_OUTPUT_MIPI2) 401 + panel_fixed_mode = mode_dev->panel_fixed_mode2; 402 + 403 + /* PSB doesn't appear to be GEN4 */ 404 + if (psb_intel_crtc->pipe == 0) { 405 + printk(KERN_ERR "Can't support LVDS on pipe A\n"); 406 + return false; 407 + } 408 + /* Should never happen!! */ 409 + list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, 410 + head) { 411 + if (tmp_encoder != encoder 412 + && tmp_encoder->crtc == encoder->crtc) { 413 + printk(KERN_ERR "Can't enable LVDS and another " 414 + "encoder on the same pipe\n"); 415 + return false; 416 + } 417 + } 418 + 419 + /* 420 + * If we have timings from the BIOS for the panel, put them in 421 + * to the adjusted mode. The CRTC will be set up for this mode, 422 + * with the panel scaling set up to source from the H/VDisplay 423 + * of the original mode. 424 + */ 425 + if (panel_fixed_mode != NULL) { 426 + adjusted_mode->hdisplay = panel_fixed_mode->hdisplay; 427 + adjusted_mode->hsync_start = panel_fixed_mode->hsync_start; 428 + adjusted_mode->hsync_end = panel_fixed_mode->hsync_end; 429 + adjusted_mode->htotal = panel_fixed_mode->htotal; 430 + adjusted_mode->vdisplay = panel_fixed_mode->vdisplay; 431 + adjusted_mode->vsync_start = panel_fixed_mode->vsync_start; 432 + adjusted_mode->vsync_end = panel_fixed_mode->vsync_end; 433 + adjusted_mode->vtotal = panel_fixed_mode->vtotal; 434 + adjusted_mode->clock = panel_fixed_mode->clock; 435 + drm_mode_set_crtcinfo(adjusted_mode, 436 + CRTC_INTERLACE_HALVE_V); 437 + } 438 + 439 + /* 440 + * XXX: It would be nice to support lower refresh rates on the 441 + * panels to reduce power consumption, and perhaps match the 442 + * user's requested refresh rate. 443 + */ 444 + 445 + return true; 446 + } 447 + 448 + static void psb_intel_lvds_prepare(struct drm_encoder *encoder) 449 + { 450 + struct drm_device *dev = encoder->dev; 451 + struct psb_intel_output *output = enc_to_psb_intel_output(encoder); 452 + struct psb_intel_mode_device *mode_dev = output->mode_dev; 453 + 454 + PSB_DEBUG_ENTRY("\n"); 455 + 456 + if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, 457 + OSPM_UHB_FORCE_POWER_ON)) 458 + return; 459 + 460 + mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL); 461 + mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL & 462 + BACKLIGHT_DUTY_CYCLE_MASK); 463 + 464 + psb_intel_lvds_set_power(dev, output, false); 465 + 466 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 467 + } 468 + 469 + static void psb_intel_lvds_commit(struct drm_encoder *encoder) 470 + { 471 + struct drm_device *dev = encoder->dev; 472 + struct psb_intel_output *output = enc_to_psb_intel_output(encoder); 473 + struct psb_intel_mode_device *mode_dev = output->mode_dev; 474 + 475 + PSB_DEBUG_ENTRY("\n"); 476 + 477 + if (mode_dev->backlight_duty_cycle == 0) 478 + mode_dev->backlight_duty_cycle = 479 + psb_intel_lvds_get_max_backlight(dev); 480 + 481 + psb_intel_lvds_set_power(dev, output, true); 482 + } 483 + 484 + static void psb_intel_lvds_mode_set(struct drm_encoder *encoder, 485 + struct drm_display_mode *mode, 486 + struct drm_display_mode *adjusted_mode) 487 + { 488 + struct psb_intel_mode_device *mode_dev = 489 + enc_to_psb_intel_output(encoder)->mode_dev; 490 + struct drm_device *dev = encoder->dev; 491 + u32 pfit_control; 492 + 493 + /* 494 + * The LVDS pin pair will already have been turned on in the 495 + * psb_intel_crtc_mode_set since it has a large impact on the DPLL 496 + * settings. 497 + */ 498 + 499 + /* 500 + * Enable automatic panel scaling so that non-native modes fill the 501 + * screen. Should be enabled before the pipe is enabled, according to 502 + * register description and PRM. 503 + */ 504 + if (mode->hdisplay != adjusted_mode->hdisplay || 505 + mode->vdisplay != adjusted_mode->vdisplay) 506 + pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE | 507 + HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR | 508 + HORIZ_INTERP_BILINEAR); 509 + else 510 + pfit_control = 0; 511 + 512 + if (mode_dev->panel_wants_dither) 513 + pfit_control |= PANEL_8TO6_DITHER_ENABLE; 514 + 515 + REG_WRITE(PFIT_CONTROL, pfit_control); 516 + } 517 + 518 + /** 519 + * Detect the LVDS connection. 520 + * 521 + * This always returns CONNECTOR_STATUS_CONNECTED. 522 + * This connector should only have 523 + * been set up if the LVDS was actually connected anyway. 524 + */ 525 + static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector 526 + *connector, bool force) 527 + { 528 + return connector_status_connected; 529 + } 530 + 531 + /** 532 + * Return the list of DDC modes if available, or the BIOS fixed mode otherwise. 533 + */ 534 + static int psb_intel_lvds_get_modes(struct drm_connector *connector) 535 + { 536 + struct drm_device *dev = connector->dev; 537 + struct psb_intel_output *psb_intel_output = 538 + to_psb_intel_output(connector); 539 + struct psb_intel_mode_device *mode_dev = 540 + psb_intel_output->mode_dev; 541 + int ret = 0; 542 + 543 + ret = psb_intel_ddc_get_modes(psb_intel_output); 544 + 545 + if (ret) 546 + return ret; 547 + 548 + /* Didn't get an EDID, so 549 + * Set wide sync ranges so we get all modes 550 + * handed to valid_mode for checking 551 + */ 552 + connector->display_info.min_vfreq = 0; 553 + connector->display_info.max_vfreq = 200; 554 + connector->display_info.min_hfreq = 0; 555 + connector->display_info.max_hfreq = 200; 556 + 557 + if (mode_dev->panel_fixed_mode != NULL) { 558 + struct drm_display_mode *mode = 559 + drm_mode_duplicate(dev, mode_dev->panel_fixed_mode); 560 + drm_mode_probed_add(connector, mode); 561 + return 1; 562 + } 563 + 564 + return 0; 565 + } 566 + 567 + /** 568 + * psb_intel_lvds_destroy - unregister and free LVDS structures 569 + * @connector: connector to free 570 + * 571 + * Unregister the DDC bus for this connector then free the driver private 572 + * structure. 573 + */ 574 + void psb_intel_lvds_destroy(struct drm_connector *connector) 575 + { 576 + struct psb_intel_output *psb_intel_output = 577 + to_psb_intel_output(connector); 578 + 579 + if (psb_intel_output->ddc_bus) 580 + psb_intel_i2c_destroy(psb_intel_output->ddc_bus); 581 + drm_sysfs_connector_remove(connector); 582 + drm_connector_cleanup(connector); 583 + kfree(connector); 584 + } 585 + 586 + int psb_intel_lvds_set_property(struct drm_connector *connector, 587 + struct drm_property *property, 588 + uint64_t value) 589 + { 590 + struct drm_encoder *pEncoder = connector->encoder; 591 + 592 + PSB_DEBUG_ENTRY("\n"); 593 + 594 + if (!strcmp(property->name, "scaling mode") && pEncoder) { 595 + struct psb_intel_crtc *pPsbCrtc = 596 + to_psb_intel_crtc(pEncoder->crtc); 597 + uint64_t curValue; 598 + 599 + PSB_DEBUG_ENTRY("scaling mode\n"); 600 + 601 + if (!pPsbCrtc) 602 + goto set_prop_error; 603 + 604 + switch (value) { 605 + case DRM_MODE_SCALE_FULLSCREEN: 606 + break; 607 + case DRM_MODE_SCALE_NO_SCALE: 608 + break; 609 + case DRM_MODE_SCALE_ASPECT: 610 + break; 611 + default: 612 + goto set_prop_error; 613 + } 614 + 615 + if (drm_connector_property_get_value(connector, 616 + property, 617 + &curValue)) 618 + goto set_prop_error; 619 + 620 + if (curValue == value) 621 + goto set_prop_done; 622 + 623 + if (drm_connector_property_set_value(connector, 624 + property, 625 + value)) 626 + goto set_prop_error; 627 + 628 + if (pPsbCrtc->saved_mode.hdisplay != 0 && 629 + pPsbCrtc->saved_mode.vdisplay != 0) { 630 + if (!drm_crtc_helper_set_mode(pEncoder->crtc, 631 + &pPsbCrtc->saved_mode, 632 + pEncoder->crtc->x, 633 + pEncoder->crtc->y, 634 + pEncoder->crtc->fb)) 635 + goto set_prop_error; 636 + } 637 + } else if (!strcmp(property->name, "backlight") && pEncoder) { 638 + PSB_DEBUG_ENTRY("backlight\n"); 639 + 640 + if (drm_connector_property_set_value(connector, 641 + property, 642 + value)) 643 + goto set_prop_error; 644 + else { 645 + #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 646 + struct backlight_device bd; 647 + bd.props.brightness = value; 648 + psb_set_brightness(&bd); 649 + #endif 650 + } 651 + } else if (!strcmp(property->name, "DPMS") && pEncoder) { 652 + struct drm_encoder_helper_funcs *pEncHFuncs 653 + = pEncoder->helper_private; 654 + PSB_DEBUG_ENTRY("DPMS\n"); 655 + pEncHFuncs->dpms(pEncoder, value); 656 + } 657 + 658 + set_prop_done: 659 + return 0; 660 + set_prop_error: 661 + return -1; 662 + } 663 + 664 + static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = { 665 + .dpms = psb_intel_lvds_encoder_dpms, 666 + .mode_fixup = psb_intel_lvds_mode_fixup, 667 + .prepare = psb_intel_lvds_prepare, 668 + .mode_set = psb_intel_lvds_mode_set, 669 + .commit = psb_intel_lvds_commit, 670 + }; 671 + 672 + static const struct drm_connector_helper_funcs 673 + psb_intel_lvds_connector_helper_funcs = { 674 + .get_modes = psb_intel_lvds_get_modes, 675 + .mode_valid = psb_intel_lvds_mode_valid, 676 + .best_encoder = psb_intel_best_encoder, 677 + }; 678 + 679 + static const struct drm_connector_funcs psb_intel_lvds_connector_funcs = { 680 + .dpms = drm_helper_connector_dpms, 681 + .save = psb_intel_lvds_save, 682 + .restore = psb_intel_lvds_restore, 683 + .detect = psb_intel_lvds_detect, 684 + .fill_modes = drm_helper_probe_single_connector_modes, 685 + .set_property = psb_intel_lvds_set_property, 686 + .destroy = psb_intel_lvds_destroy, 687 + }; 688 + 689 + 690 + static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder) 691 + { 692 + drm_encoder_cleanup(encoder); 693 + } 694 + 695 + const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = { 696 + .destroy = psb_intel_lvds_enc_destroy, 697 + }; 698 + 699 + 700 + 701 + /** 702 + * psb_intel_lvds_init - setup LVDS connectors on this device 703 + * @dev: drm device 704 + * 705 + * Create the connector, register the LVDS DDC bus, and try to figure out what 706 + * modes we can display on the LVDS panel (if present). 707 + */ 708 + void psb_intel_lvds_init(struct drm_device *dev, 709 + struct psb_intel_mode_device *mode_dev) 710 + { 711 + struct psb_intel_output *psb_intel_output; 712 + struct psb_intel_lvds_priv *lvds_priv; 713 + struct drm_connector *connector; 714 + struct drm_encoder *encoder; 715 + struct drm_display_mode *scan; /* *modes, *bios_mode; */ 716 + struct drm_crtc *crtc; 717 + struct drm_psb_private *dev_priv = 718 + (struct drm_psb_private *)dev->dev_private; 719 + u32 lvds; 720 + int pipe; 721 + 722 + psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL); 723 + if (!psb_intel_output) 724 + return; 725 + 726 + lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL); 727 + if (!lvds_priv) { 728 + kfree(psb_intel_output); 729 + DRM_DEBUG("LVDS private allocation error\n"); 730 + return; 731 + } 732 + 733 + psb_intel_output->dev_priv = lvds_priv; 734 + 735 + psb_intel_output->mode_dev = mode_dev; 736 + connector = &psb_intel_output->base; 737 + encoder = &psb_intel_output->enc; 738 + drm_connector_init(dev, &psb_intel_output->base, 739 + &psb_intel_lvds_connector_funcs, 740 + DRM_MODE_CONNECTOR_LVDS); 741 + 742 + drm_encoder_init(dev, &psb_intel_output->enc, 743 + &psb_intel_lvds_enc_funcs, 744 + DRM_MODE_ENCODER_LVDS); 745 + 746 + drm_mode_connector_attach_encoder(&psb_intel_output->base, 747 + &psb_intel_output->enc); 748 + psb_intel_output->type = INTEL_OUTPUT_LVDS; 749 + 750 + drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs); 751 + drm_connector_helper_add(connector, 752 + &psb_intel_lvds_connector_helper_funcs); 753 + connector->display_info.subpixel_order = SubPixelHorizontalRGB; 754 + connector->interlace_allowed = false; 755 + connector->doublescan_allowed = false; 756 + 757 + /*Attach connector properties*/ 758 + drm_connector_attach_property(connector, 759 + dev->mode_config.scaling_mode_property, 760 + DRM_MODE_SCALE_FULLSCREEN); 761 + drm_connector_attach_property(connector, 762 + dev_priv->backlight_property, 763 + BRIGHTNESS_MAX_LEVEL); 764 + 765 + /** 766 + * Set up I2C bus 767 + * FIXME: distroy i2c_bus when exit 768 + */ 769 + psb_intel_output->i2c_bus = psb_intel_i2c_create(dev, 770 + GPIOB, 771 + "LVDSBLC_B"); 772 + if (!psb_intel_output->i2c_bus) { 773 + dev_printk(KERN_ERR, 774 + &dev->pdev->dev, "I2C bus registration failed.\n"); 775 + goto failed_blc_i2c; 776 + } 777 + psb_intel_output->i2c_bus->slave_addr = 0x2C; 778 + dev_priv->lvds_i2c_bus = psb_intel_output->i2c_bus; 779 + 780 + /* 781 + * LVDS discovery: 782 + * 1) check for EDID on DDC 783 + * 2) check for VBT data 784 + * 3) check to see if LVDS is already on 785 + * if none of the above, no panel 786 + * 4) make sure lid is open 787 + * if closed, act like it's not there for now 788 + */ 789 + 790 + /* Set up the DDC bus. */ 791 + psb_intel_output->ddc_bus = psb_intel_i2c_create(dev, 792 + GPIOC, 793 + "LVDSDDC_C"); 794 + if (!psb_intel_output->ddc_bus) { 795 + dev_printk(KERN_ERR, &dev->pdev->dev, 796 + "DDC bus registration " "failed.\n"); 797 + goto failed_ddc; 798 + } 799 + 800 + /* 801 + * Attempt to get the fixed panel mode from DDC. Assume that the 802 + * preferred mode is the right one. 803 + */ 804 + psb_intel_ddc_get_modes(psb_intel_output); 805 + list_for_each_entry(scan, &connector->probed_modes, head) { 806 + if (scan->type & DRM_MODE_TYPE_PREFERRED) { 807 + mode_dev->panel_fixed_mode = 808 + drm_mode_duplicate(dev, scan); 809 + goto out; /* FIXME: check for quirks */ 810 + } 811 + } 812 + 813 + /* Failed to get EDID, what about VBT? do we need this?*/ 814 + if (mode_dev->vbt_mode) 815 + mode_dev->panel_fixed_mode = 816 + drm_mode_duplicate(dev, mode_dev->vbt_mode); 817 + 818 + if (!mode_dev->panel_fixed_mode) 819 + if (dev_priv->lfp_lvds_vbt_mode) 820 + mode_dev->panel_fixed_mode = 821 + drm_mode_duplicate(dev, 822 + dev_priv->lfp_lvds_vbt_mode); 823 + 824 + /* 825 + * If we didn't get EDID, try checking if the panel is already turned 826 + * on. If so, assume that whatever is currently programmed is the 827 + * correct mode. 828 + */ 829 + lvds = REG_READ(LVDS); 830 + pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; 831 + crtc = psb_intel_get_crtc_from_pipe(dev, pipe); 832 + 833 + if (crtc && (lvds & LVDS_PORT_EN)) { 834 + mode_dev->panel_fixed_mode = 835 + psb_intel_crtc_mode_get(dev, crtc); 836 + if (mode_dev->panel_fixed_mode) { 837 + mode_dev->panel_fixed_mode->type |= 838 + DRM_MODE_TYPE_PREFERRED; 839 + goto out; /* FIXME: check for quirks */ 840 + } 841 + } 842 + 843 + /* If we still don't have a mode after all that, give up. */ 844 + if (!mode_dev->panel_fixed_mode) { 845 + DRM_DEBUG 846 + ("Found no modes on the lvds, ignoring the LVDS\n"); 847 + goto failed_find; 848 + } 849 + 850 + /* 851 + * Blacklist machines with BIOSes that list an LVDS panel without 852 + * actually having one. 853 + */ 854 + out: 855 + drm_sysfs_connector_add(connector); 856 + 857 + PSB_DEBUG_ENTRY("hdisplay = %d\n", 858 + mode_dev->panel_fixed_mode->hdisplay); 859 + PSB_DEBUG_ENTRY(" vdisplay = %d\n", 860 + mode_dev->panel_fixed_mode->vdisplay); 861 + PSB_DEBUG_ENTRY(" hsync_start = %d\n", 862 + mode_dev->panel_fixed_mode->hsync_start); 863 + PSB_DEBUG_ENTRY(" hsync_end = %d\n", 864 + mode_dev->panel_fixed_mode->hsync_end); 865 + PSB_DEBUG_ENTRY(" htotal = %d\n", 866 + mode_dev->panel_fixed_mode->htotal); 867 + PSB_DEBUG_ENTRY(" vsync_start = %d\n", 868 + mode_dev->panel_fixed_mode->vsync_start); 869 + PSB_DEBUG_ENTRY(" vsync_end = %d\n", 870 + mode_dev->panel_fixed_mode->vsync_end); 871 + PSB_DEBUG_ENTRY(" vtotal = %d\n", 872 + mode_dev->panel_fixed_mode->vtotal); 873 + PSB_DEBUG_ENTRY(" clock = %d\n", 874 + mode_dev->panel_fixed_mode->clock); 875 + 876 + return; 877 + 878 + failed_find: 879 + if (psb_intel_output->ddc_bus) 880 + psb_intel_i2c_destroy(psb_intel_output->ddc_bus); 881 + failed_ddc: 882 + if (psb_intel_output->i2c_bus) 883 + psb_intel_i2c_destroy(psb_intel_output->i2c_bus); 884 + failed_blc_i2c: 885 + drm_encoder_cleanup(encoder); 886 + drm_connector_cleanup(connector); 887 + kfree(connector); 888 + } 889 +
+77
drivers/staging/gma500/psb_intel_modes.c
··· 1 + /* 2 + * Copyright (c) 2007 Intel Corporation 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms and conditions of the GNU General Public License, 6 + * version 2, as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope it will be useful, but WITHOUT 9 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 + * more details. 12 + * 13 + * You should have received a copy of the GNU General Public License along with 14 + * this program; if not, write to the Free Software Foundation, Inc., 15 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 + * 17 + * Authers: Jesse Barnes <jesse.barnes@intel.com> 18 + */ 19 + 20 + #include <linux/i2c.h> 21 + #include <linux/fb.h> 22 + #include <drm/drmP.h> 23 + #include "psb_intel_drv.h" 24 + 25 + /** 26 + * psb_intel_ddc_probe 27 + * 28 + */ 29 + bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output) 30 + { 31 + u8 out_buf[] = { 0x0, 0x0 }; 32 + u8 buf[2]; 33 + int ret; 34 + struct i2c_msg msgs[] = { 35 + { 36 + .addr = 0x50, 37 + .flags = 0, 38 + .len = 1, 39 + .buf = out_buf, 40 + }, 41 + { 42 + .addr = 0x50, 43 + .flags = I2C_M_RD, 44 + .len = 1, 45 + .buf = buf, 46 + } 47 + }; 48 + 49 + ret = i2c_transfer(&psb_intel_output->ddc_bus->adapter, msgs, 2); 50 + if (ret == 2) 51 + return true; 52 + 53 + return false; 54 + } 55 + 56 + /** 57 + * psb_intel_ddc_get_modes - get modelist from monitor 58 + * @connector: DRM connector device to use 59 + * 60 + * Fetch the EDID information from @connector using the DDC bus. 61 + */ 62 + int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output) 63 + { 64 + struct edid *edid; 65 + int ret = 0; 66 + 67 + edid = 68 + drm_get_edid(&psb_intel_output->base, 69 + &psb_intel_output->ddc_bus->adapter); 70 + if (edid) { 71 + drm_mode_connector_update_edid_property(&psb_intel_output-> 72 + base, edid); 73 + ret = drm_add_edid_modes(&psb_intel_output->base, edid); 74 + kfree(edid); 75 + } 76 + return ret; 77 + }
+78
drivers/staging/gma500/psb_intel_opregion.c
··· 1 + /* 2 + * Copyright 2010 Intel Corporation 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice (including the next 12 + * paragraph) shall be included in all copies or substantial portions of the 13 + * Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 + * DEALINGS IN THE SOFTWARE. 22 + * 23 + */ 24 + 25 + #include "psb_drv.h" 26 + 27 + struct opregion_header { 28 + u8 signature[16]; 29 + u32 size; 30 + u32 opregion_ver; 31 + u8 bios_ver[32]; 32 + u8 vbios_ver[16]; 33 + u8 driver_ver[16]; 34 + u32 mboxes; 35 + u8 reserved[164]; 36 + } __attribute__((packed)); 37 + 38 + struct opregion_apci { 39 + /*FIXME: add it later*/ 40 + } __attribute__((packed)); 41 + 42 + struct opregion_swsci { 43 + /*FIXME: add it later*/ 44 + } __attribute__((packed)); 45 + 46 + struct opregion_acpi { 47 + /*FIXME: add it later*/ 48 + } __attribute__((packed)); 49 + 50 + int psb_intel_opregion_init(struct drm_device *dev) 51 + { 52 + struct drm_psb_private *dev_priv = dev->dev_private; 53 + /*struct psb_intel_opregion * opregion = &dev_priv->opregion;*/ 54 + u32 opregion_phy; 55 + void *base; 56 + u32 *lid_state; 57 + 58 + dev_priv->lid_state = NULL; 59 + 60 + pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy); 61 + if (opregion_phy == 0) { 62 + DRM_DEBUG("Opregion not supported, won't support lid-switch\n"); 63 + return -ENOTSUPP; 64 + } 65 + DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy); 66 + 67 + base = ioremap(opregion_phy, 8*1024); 68 + if (!base) 69 + return -ENOMEM; 70 + 71 + lid_state = base + 0x01ac; 72 + 73 + DRM_DEBUG("Lid switch state 0x%08x\n", *lid_state); 74 + 75 + dev_priv->lid_state = lid_state; 76 + dev_priv->lid_last_state = *lid_state; 77 + return 0; 78 + }
+1200
drivers/staging/gma500/psb_intel_reg.h
··· 1 + /* 2 + * Copyright (c) 2009, Intel Corporation. 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms and conditions of the GNU General Public License, 6 + * version 2, as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope it will be useful, but WITHOUT 9 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 + * more details. 12 + * 13 + * You should have received a copy of the GNU General Public License along with 14 + * this program; if not, write to the Free Software Foundation, Inc., 15 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 + */ 17 + #ifndef __PSB_INTEL_REG_H__ 18 + #define __PSB_INTEL_REG_H__ 19 + 20 + #define BLC_PWM_CTL 0x61254 21 + #define BLC_PWM_CTL2 0x61250 22 + #define BLC_PWM_CTL_C 0x62254 23 + #define BLC_PWM_CTL2_C 0x62250 24 + #define BACKLIGHT_MODULATION_FREQ_SHIFT (17) 25 + /** 26 + * This is the most significant 15 bits of the number of backlight cycles in a 27 + * complete cycle of the modulated backlight control. 28 + * 29 + * The actual value is this field multiplied by two. 30 + */ 31 + #define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) 32 + #define BLM_LEGACY_MODE (1 << 16) 33 + /** 34 + * This is the number of cycles out of the backlight modulation cycle for which 35 + * the backlight is on. 36 + * 37 + * This field must be no greater than the number of cycles in the complete 38 + * backlight modulation cycle. 39 + */ 40 + #define BACKLIGHT_DUTY_CYCLE_SHIFT (0) 41 + #define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) 42 + 43 + #define I915_GCFGC 0xf0 44 + #define I915_LOW_FREQUENCY_ENABLE (1 << 7) 45 + #define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4) 46 + #define I915_DISPLAY_CLOCK_333_MHZ (4 << 4) 47 + #define I915_DISPLAY_CLOCK_MASK (7 << 4) 48 + 49 + #define I855_HPLLCC 0xc0 50 + #define I855_CLOCK_CONTROL_MASK (3 << 0) 51 + #define I855_CLOCK_133_200 (0 << 0) 52 + #define I855_CLOCK_100_200 (1 << 0) 53 + #define I855_CLOCK_100_133 (2 << 0) 54 + #define I855_CLOCK_166_250 (3 << 0) 55 + 56 + /* I830 CRTC registers */ 57 + #define HTOTAL_A 0x60000 58 + #define HBLANK_A 0x60004 59 + #define HSYNC_A 0x60008 60 + #define VTOTAL_A 0x6000c 61 + #define VBLANK_A 0x60010 62 + #define VSYNC_A 0x60014 63 + #define PIPEASRC 0x6001c 64 + #define BCLRPAT_A 0x60020 65 + #define VSYNCSHIFT_A 0x60028 66 + 67 + #define HTOTAL_B 0x61000 68 + #define HBLANK_B 0x61004 69 + #define HSYNC_B 0x61008 70 + #define VTOTAL_B 0x6100c 71 + #define VBLANK_B 0x61010 72 + #define VSYNC_B 0x61014 73 + #define PIPEBSRC 0x6101c 74 + #define BCLRPAT_B 0x61020 75 + #define VSYNCSHIFT_B 0x61028 76 + 77 + #define HTOTAL_C 0x62000 78 + #define HBLANK_C 0x62004 79 + #define HSYNC_C 0x62008 80 + #define VTOTAL_C 0x6200c 81 + #define VBLANK_C 0x62010 82 + #define VSYNC_C 0x62014 83 + #define PIPECSRC 0x6201c 84 + #define BCLRPAT_C 0x62020 85 + #define VSYNCSHIFT_C 0x62028 86 + 87 + #define PP_STATUS 0x61200 88 + # define PP_ON (1 << 31) 89 + /** 90 + * Indicates that all dependencies of the panel are on: 91 + * 92 + * - PLL enabled 93 + * - pipe enabled 94 + * - LVDS/DVOB/DVOC on 95 + */ 96 + # define PP_READY (1 << 30) 97 + # define PP_SEQUENCE_NONE (0 << 28) 98 + # define PP_SEQUENCE_ON (1 << 28) 99 + # define PP_SEQUENCE_OFF (2 << 28) 100 + # define PP_SEQUENCE_MASK 0x30000000 101 + #define PP_CONTROL 0x61204 102 + # define POWER_TARGET_ON (1 << 0) 103 + 104 + #define LVDSPP_ON 0x61208 105 + #define LVDSPP_OFF 0x6120c 106 + #define PP_CYCLE 0x61210 107 + 108 + #define PFIT_CONTROL 0x61230 109 + # define PFIT_ENABLE (1 << 31) 110 + # define PFIT_PIPE_MASK (3 << 29) 111 + # define PFIT_PIPE_SHIFT 29 112 + # define PFIT_SCALING_MODE_PILLARBOX (1 << 27) 113 + # define PFIT_SCALING_MODE_LETTERBOX (3 << 26) 114 + # define VERT_INTERP_DISABLE (0 << 10) 115 + # define VERT_INTERP_BILINEAR (1 << 10) 116 + # define VERT_INTERP_MASK (3 << 10) 117 + # define VERT_AUTO_SCALE (1 << 9) 118 + # define HORIZ_INTERP_DISABLE (0 << 6) 119 + # define HORIZ_INTERP_BILINEAR (1 << 6) 120 + # define HORIZ_INTERP_MASK (3 << 6) 121 + # define HORIZ_AUTO_SCALE (1 << 5) 122 + # define PANEL_8TO6_DITHER_ENABLE (1 << 3) 123 + 124 + #define PFIT_PGM_RATIOS 0x61234 125 + # define PFIT_VERT_SCALE_MASK 0xfff00000 126 + # define PFIT_HORIZ_SCALE_MASK 0x0000fff0 127 + 128 + #define PFIT_AUTO_RATIOS 0x61238 129 + 130 + 131 + #define DPLL_A 0x06014 132 + #define DPLL_B 0x06018 133 + # define DPLL_VCO_ENABLE (1 << 31) 134 + # define DPLL_DVO_HIGH_SPEED (1 << 30) 135 + # define DPLL_SYNCLOCK_ENABLE (1 << 29) 136 + # define DPLL_VGA_MODE_DIS (1 << 28) 137 + # define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ 138 + # define DPLLB_MODE_LVDS (2 << 26) /* i915 */ 139 + # define DPLL_MODE_MASK (3 << 26) 140 + # define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */ 141 + # define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */ 142 + # define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */ 143 + # define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ 144 + # define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ 145 + # define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ 146 + /** 147 + * The i830 generation, in DAC/serial mode, defines p1 as two plus this 148 + * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set. 149 + */ 150 + # define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 151 + /** 152 + * The i830 generation, in LVDS mode, defines P1 as the bit number set within 153 + * this field (only one bit may be set). 154 + */ 155 + # define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 156 + # define DPLL_FPA01_P1_POST_DIV_SHIFT 16 157 + # define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required 158 + * in DVO non-gang */ 159 + # define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ 160 + # define PLL_REF_INPUT_DREFCLK (0 << 13) 161 + # define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */ 162 + # define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO 163 + * TVCLKIN */ 164 + # define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) 165 + # define PLL_REF_INPUT_MASK (3 << 13) 166 + # define PLL_LOAD_PULSE_PHASE_SHIFT 9 167 + /* 168 + * Parallel to Serial Load Pulse phase selection. 169 + * Selects the phase for the 10X DPLL clock for the PCIe 170 + * digital display port. The range is 4 to 13; 10 or more 171 + * is just a flip delay. The default is 6 172 + */ 173 + # define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT) 174 + # define DISPLAY_RATE_SELECT_FPA1 (1 << 8) 175 + 176 + /** 177 + * SDVO multiplier for 945G/GM. Not used on 965. 178 + * 179 + * \sa DPLL_MD_UDI_MULTIPLIER_MASK 180 + */ 181 + # define SDVO_MULTIPLIER_MASK 0x000000ff 182 + # define SDVO_MULTIPLIER_SHIFT_HIRES 4 183 + # define SDVO_MULTIPLIER_SHIFT_VGA 0 184 + 185 + /** @defgroup DPLL_MD 186 + * @{ 187 + */ 188 + /** Pipe A SDVO/UDI clock multiplier/divider register for G965. */ 189 + #define DPLL_A_MD 0x0601c 190 + /** Pipe B SDVO/UDI clock multiplier/divider register for G965. */ 191 + #define DPLL_B_MD 0x06020 192 + /** 193 + * UDI pixel divider, controlling how many pixels are stuffed into a packet. 194 + * 195 + * Value is pixels minus 1. Must be set to 1 pixel for SDVO. 196 + */ 197 + # define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000 198 + # define DPLL_MD_UDI_DIVIDER_SHIFT 24 199 + /** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */ 200 + # define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000 201 + # define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16 202 + /** 203 + * SDVO/UDI pixel multiplier. 204 + * 205 + * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus 206 + * clock rate is 10 times the DPLL clock. At low resolution/refresh rate 207 + * modes, the bus rate would be below the limits, so SDVO allows for stuffing 208 + * dummy bytes in the datastream at an increased clock rate, with both sides of 209 + * the link knowing how many bytes are fill. 210 + * 211 + * So, for a mode with a dotclock of 65Mhz, we would want to double the clock 212 + * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be 213 + * set to 130Mhz, and the SDVO multiplier set to 2x in this register and 214 + * through an SDVO command. 215 + * 216 + * This register field has values of multiplication factor minus 1, with 217 + * a maximum multiplier of 5 for SDVO. 218 + */ 219 + # define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00 220 + # define DPLL_MD_UDI_MULTIPLIER_SHIFT 8 221 + /** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK. 222 + * This best be set to the default value (3) or the CRT won't work. No, 223 + * I don't entirely understand what this does... 224 + */ 225 + # define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f 226 + # define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 227 + /** @} */ 228 + 229 + #define DPLL_TEST 0x606c 230 + # define DPLLB_TEST_SDVO_DIV_1 (0 << 22) 231 + # define DPLLB_TEST_SDVO_DIV_2 (1 << 22) 232 + # define DPLLB_TEST_SDVO_DIV_4 (2 << 22) 233 + # define DPLLB_TEST_SDVO_DIV_MASK (3 << 22) 234 + # define DPLLB_TEST_N_BYPASS (1 << 19) 235 + # define DPLLB_TEST_M_BYPASS (1 << 18) 236 + # define DPLLB_INPUT_BUFFER_ENABLE (1 << 16) 237 + # define DPLLA_TEST_N_BYPASS (1 << 3) 238 + # define DPLLA_TEST_M_BYPASS (1 << 2) 239 + # define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) 240 + 241 + #define ADPA 0x61100 242 + #define ADPA_DAC_ENABLE (1<<31) 243 + #define ADPA_DAC_DISABLE 0 244 + #define ADPA_PIPE_SELECT_MASK (1<<30) 245 + #define ADPA_PIPE_A_SELECT 0 246 + #define ADPA_PIPE_B_SELECT (1<<30) 247 + #define ADPA_USE_VGA_HVPOLARITY (1<<15) 248 + #define ADPA_SETS_HVPOLARITY 0 249 + #define ADPA_VSYNC_CNTL_DISABLE (1<<11) 250 + #define ADPA_VSYNC_CNTL_ENABLE 0 251 + #define ADPA_HSYNC_CNTL_DISABLE (1<<10) 252 + #define ADPA_HSYNC_CNTL_ENABLE 0 253 + #define ADPA_VSYNC_ACTIVE_HIGH (1<<4) 254 + #define ADPA_VSYNC_ACTIVE_LOW 0 255 + #define ADPA_HSYNC_ACTIVE_HIGH (1<<3) 256 + #define ADPA_HSYNC_ACTIVE_LOW 0 257 + 258 + #define FPA0 0x06040 259 + #define FPA1 0x06044 260 + #define FPB0 0x06048 261 + #define FPB1 0x0604c 262 + # define FP_N_DIV_MASK 0x003f0000 263 + # define FP_N_DIV_SHIFT 16 264 + # define FP_M1_DIV_MASK 0x00003f00 265 + # define FP_M1_DIV_SHIFT 8 266 + # define FP_M2_DIV_MASK 0x0000003f 267 + # define FP_M2_DIV_SHIFT 0 268 + 269 + 270 + #define PORT_HOTPLUG_EN 0x61110 271 + # define SDVOB_HOTPLUG_INT_EN (1 << 26) 272 + # define SDVOC_HOTPLUG_INT_EN (1 << 25) 273 + # define TV_HOTPLUG_INT_EN (1 << 18) 274 + # define CRT_HOTPLUG_INT_EN (1 << 9) 275 + # define CRT_HOTPLUG_FORCE_DETECT (1 << 3) 276 + 277 + #define PORT_HOTPLUG_STAT 0x61114 278 + # define CRT_HOTPLUG_INT_STATUS (1 << 11) 279 + # define TV_HOTPLUG_INT_STATUS (1 << 10) 280 + # define CRT_HOTPLUG_MONITOR_MASK (3 << 8) 281 + # define CRT_HOTPLUG_MONITOR_COLOR (3 << 8) 282 + # define CRT_HOTPLUG_MONITOR_MONO (2 << 8) 283 + # define CRT_HOTPLUG_MONITOR_NONE (0 << 8) 284 + # define SDVOC_HOTPLUG_INT_STATUS (1 << 7) 285 + # define SDVOB_HOTPLUG_INT_STATUS (1 << 6) 286 + 287 + #define SDVOB 0x61140 288 + #define SDVOC 0x61160 289 + #define SDVO_ENABLE (1 << 31) 290 + #define SDVO_PIPE_B_SELECT (1 << 30) 291 + #define SDVO_STALL_SELECT (1 << 29) 292 + #define SDVO_INTERRUPT_ENABLE (1 << 26) 293 + /** 294 + * 915G/GM SDVO pixel multiplier. 295 + * 296 + * Programmed value is multiplier - 1, up to 5x. 297 + * 298 + * \sa DPLL_MD_UDI_MULTIPLIER_MASK 299 + */ 300 + #define SDVO_PORT_MULTIPLY_MASK (7 << 23) 301 + #define SDVO_PORT_MULTIPLY_SHIFT 23 302 + #define SDVO_PHASE_SELECT_MASK (15 << 19) 303 + #define SDVO_PHASE_SELECT_DEFAULT (6 << 19) 304 + #define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) 305 + #define SDVOC_GANG_MODE (1 << 16) 306 + #define SDVO_BORDER_ENABLE (1 << 7) 307 + #define SDVOB_PCIE_CONCURRENCY (1 << 3) 308 + #define SDVO_DETECTED (1 << 2) 309 + /* Bits to be preserved when writing */ 310 + #define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14)) 311 + #define SDVOC_PRESERVE_MASK (1 << 17) 312 + 313 + /** @defgroup LVDS 314 + * @{ 315 + */ 316 + /** 317 + * This register controls the LVDS output enable, pipe selection, and data 318 + * format selection. 319 + * 320 + * All of the clock/data pairs are force powered down by power sequencing. 321 + */ 322 + #define LVDS 0x61180 323 + /** 324 + * Enables the LVDS port. This bit must be set before DPLLs are enabled, as 325 + * the DPLL semantics change when the LVDS is assigned to that pipe. 326 + */ 327 + # define LVDS_PORT_EN (1 << 31) 328 + /** Selects pipe B for LVDS data. Must be set on pre-965. */ 329 + # define LVDS_PIPEB_SELECT (1 << 30) 330 + 331 + /** Turns on border drawing to allow centered display. */ 332 + # define LVDS_BORDER_EN (1 << 15) 333 + 334 + /** 335 + * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per 336 + * pixel. 337 + */ 338 + # define LVDS_A0A2_CLKA_POWER_MASK (3 << 8) 339 + # define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8) 340 + # define LVDS_A0A2_CLKA_POWER_UP (3 << 8) 341 + /** 342 + * Controls the A3 data pair, which contains the additional LSBs for 24 bit 343 + * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be 344 + * on. 345 + */ 346 + # define LVDS_A3_POWER_MASK (3 << 6) 347 + # define LVDS_A3_POWER_DOWN (0 << 6) 348 + # define LVDS_A3_POWER_UP (3 << 6) 349 + /** 350 + * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP 351 + * is set. 352 + */ 353 + # define LVDS_CLKB_POWER_MASK (3 << 4) 354 + # define LVDS_CLKB_POWER_DOWN (0 << 4) 355 + # define LVDS_CLKB_POWER_UP (3 << 4) 356 + 357 + /** 358 + * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 359 + * setting for whether we are in dual-channel mode. The B3 pair will 360 + * additionally only be powered up when LVDS_A3_POWER_UP is set. 361 + */ 362 + # define LVDS_B0B3_POWER_MASK (3 << 2) 363 + # define LVDS_B0B3_POWER_DOWN (0 << 2) 364 + # define LVDS_B0B3_POWER_UP (3 << 2) 365 + 366 + #define PIPEACONF 0x70008 367 + #define PIPEACONF_ENABLE (1<<31) 368 + #define PIPEACONF_DISABLE 0 369 + #define PIPEACONF_DOUBLE_WIDE (1<<30) 370 + #define PIPECONF_ACTIVE (1<<30) 371 + #define I965_PIPECONF_ACTIVE (1<<30) 372 + #define PIPECONF_DSIPLL_LOCK (1<<29) 373 + #define PIPEACONF_SINGLE_WIDE 0 374 + #define PIPEACONF_PIPE_UNLOCKED 0 375 + #define PIPEACONF_DSR (1<<26) 376 + #define PIPEACONF_PIPE_LOCKED (1<<25) 377 + #define PIPEACONF_PALETTE 0 378 + #define PIPECONF_FORCE_BORDER (1<<25) 379 + #define PIPEACONF_GAMMA (1<<24) 380 + #define PIPECONF_PROGRESSIVE (0 << 21) 381 + #define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) 382 + #define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) 383 + #define PIPECONF_PLANE_OFF (1<<19) 384 + #define PIPECONF_CURSOR_OFF (1<<18) 385 + 386 + 387 + #define PIPEBCONF 0x71008 388 + #define PIPEBCONF_ENABLE (1<<31) 389 + #define PIPEBCONF_DISABLE 0 390 + #define PIPEBCONF_DOUBLE_WIDE (1<<30) 391 + #define PIPEBCONF_DISABLE 0 392 + #define PIPEBCONF_GAMMA (1<<24) 393 + #define PIPEBCONF_PALETTE 0 394 + 395 + #define PIPECCONF 0x72008 396 + 397 + #define PIPEBGCMAXRED 0x71010 398 + #define PIPEBGCMAXGREEN 0x71014 399 + #define PIPEBGCMAXBLUE 0x71018 400 + 401 + #define PIPEASTAT 0x70024 402 + #define PIPEBSTAT 0x71024 403 + #define PIPECSTAT 0x72024 404 + #define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) 405 + #define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) 406 + #define PIPE_VBLANK_CLEAR (1 << 1) 407 + #define PIPE_VBLANK_STATUS (1 << 1) 408 + #define PIPE_TE_STATUS (1UL<<6) 409 + #define PIPE_DPST_EVENT_STATUS (1UL<<7) 410 + #define PIPE_VSYNC_CLEAR (1UL<<9) 411 + #define PIPE_VSYNC_STATUS (1UL<<9) 412 + #define PIPE_HDMI_AUDIO_UNDERRUN_STATUS (1UL<<10) 413 + #define PIPE_HDMI_AUDIO_BUFFER_DONE_STATUS (1UL<<11) 414 + #define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) 415 + #define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) 416 + #define PIPE_TE_ENABLE (1UL<<22) 417 + #define PIPE_DPST_EVENT_ENABLE (1UL<<23) 418 + #define PIPE_VSYNC_ENABL (1UL<<25) 419 + #define PIPE_HDMI_AUDIO_UNDERRUN (1UL<<26) 420 + #define PIPE_HDMI_AUDIO_BUFFER_DONE (1UL<<27) 421 + #define PIPE_HDMI_AUDIO_INT_MASK (PIPE_HDMI_AUDIO_UNDERRUN | PIPE_HDMI_AUDIO_BUFFER_DONE) 422 + #define PIPE_EVENT_MASK (BIT29|BIT28|BIT27|BIT26|BIT24|BIT23|BIT22|BIT21|BIT20|BIT16) 423 + #define PIPE_VBLANK_MASK (BIT25|BIT24|BIT18|BIT17) 424 + #define HISTOGRAM_INT_CONTROL 0x61268 425 + #define HISTOGRAM_BIN_DATA 0X61264 426 + #define HISTOGRAM_LOGIC_CONTROL 0x61260 427 + #define PWM_CONTROL_LOGIC 0x61250 428 + #define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) 429 + #define HISTOGRAM_INTERRUPT_ENABLE (1UL<<31) 430 + #define HISTOGRAM_LOGIC_ENABLE (1UL<<31) 431 + #define PWM_LOGIC_ENABLE (1UL<<31) 432 + #define PWM_PHASEIN_ENABLE (1UL<<25) 433 + #define PWM_PHASEIN_INT_ENABLE (1UL<<24) 434 + #define PWM_PHASEIN_VB_COUNT 0x00001f00 435 + #define PWM_PHASEIN_INC 0x0000001f 436 + #define HISTOGRAM_INT_CTRL_CLEAR (1UL<<30) 437 + #define DPST_YUV_LUMA_MODE 0 438 + 439 + struct dpst_ie_histogram_control { 440 + union { 441 + uint32_t data; 442 + struct { 443 + uint32_t bin_reg_index:7; 444 + uint32_t reserved:4; 445 + uint32_t bin_reg_func_select:1; 446 + uint32_t sync_to_phase_in:1; 447 + uint32_t alt_enhancement_mode:2; 448 + uint32_t reserved1:1; 449 + uint32_t sync_to_phase_in_count:8; 450 + uint32_t histogram_mode_select:1; 451 + uint32_t reserved2:4; 452 + uint32_t ie_pipe_assignment:1; 453 + uint32_t ie_mode_table_enabled:1; 454 + uint32_t ie_histogram_enable:1; 455 + }; 456 + }; 457 + }; 458 + 459 + struct dpst_guardband { 460 + union { 461 + uint32_t data; 462 + struct { 463 + uint32_t guardband:22; 464 + uint32_t guardband_interrupt_delay:8; 465 + uint32_t interrupt_status:1; 466 + uint32_t interrupt_enable:1; 467 + }; 468 + }; 469 + }; 470 + 471 + #define PIPEAFRAMEHIGH 0x70040 472 + #define PIPEAFRAMEPIXEL 0x70044 473 + #define PIPEBFRAMEHIGH 0x71040 474 + #define PIPEBFRAMEPIXEL 0x71044 475 + #define PIPECFRAMEHIGH 0x72040 476 + #define PIPECFRAMEPIXEL 0x72044 477 + #define PIPE_FRAME_HIGH_MASK 0x0000ffff 478 + #define PIPE_FRAME_HIGH_SHIFT 0 479 + #define PIPE_FRAME_LOW_MASK 0xff000000 480 + #define PIPE_FRAME_LOW_SHIFT 24 481 + #define PIPE_PIXEL_MASK 0x00ffffff 482 + #define PIPE_PIXEL_SHIFT 0 483 + 484 + #define DSPARB 0x70030 485 + #define DSPFW1 0x70034 486 + #define DSPFW2 0x70038 487 + #define DSPFW3 0x7003c 488 + #define DSPFW4 0x70050 489 + #define DSPFW5 0x70054 490 + #define DSPFW6 0x70058 491 + #define DSPCHICKENBIT 0x70400 492 + #define DSPACNTR 0x70180 493 + #define DSPBCNTR 0x71180 494 + #define DSPCCNTR 0x72180 495 + #define DISPLAY_PLANE_ENABLE (1<<31) 496 + #define DISPLAY_PLANE_DISABLE 0 497 + #define DISPPLANE_GAMMA_ENABLE (1<<30) 498 + #define DISPPLANE_GAMMA_DISABLE 0 499 + #define DISPPLANE_PIXFORMAT_MASK (0xf<<26) 500 + #define DISPPLANE_8BPP (0x2<<26) 501 + #define DISPPLANE_15_16BPP (0x4<<26) 502 + #define DISPPLANE_16BPP (0x5<<26) 503 + #define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) 504 + #define DISPPLANE_32BPP (0x7<<26) 505 + #define DISPPLANE_STEREO_ENABLE (1<<25) 506 + #define DISPPLANE_STEREO_DISABLE 0 507 + #define DISPPLANE_SEL_PIPE_MASK (1<<24) 508 + #define DISPPLANE_SEL_PIPE_POS 24 509 + #define DISPPLANE_SEL_PIPE_A 0 510 + #define DISPPLANE_SEL_PIPE_B (1<<24) 511 + #define DISPPLANE_SRC_KEY_ENABLE (1<<22) 512 + #define DISPPLANE_SRC_KEY_DISABLE 0 513 + #define DISPPLANE_LINE_DOUBLE (1<<20) 514 + #define DISPPLANE_NO_LINE_DOUBLE 0 515 + #define DISPPLANE_STEREO_POLARITY_FIRST 0 516 + #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) 517 + /* plane B only */ 518 + #define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) 519 + #define DISPPLANE_ALPHA_TRANS_DISABLE 0 520 + #define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0 521 + #define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) 522 + #define DISPPLANE_BOTTOM (4) 523 + 524 + #define DSPABASE 0x70184 525 + #define DSPALINOFF 0x70184 526 + #define DSPASTRIDE 0x70188 527 + 528 + #define DSPBBASE 0x71184 529 + #define DSPBLINOFF 0X71184 530 + #define DSPBADDR DSPBBASE 531 + #define DSPBSTRIDE 0x71188 532 + 533 + #define DSPCBASE 0x72184 534 + #define DSPCLINOFF 0x72184 535 + #define DSPCSTRIDE 0x72188 536 + 537 + #define DSPAKEYVAL 0x70194 538 + #define DSPAKEYMASK 0x70198 539 + 540 + #define DSPAPOS 0x7018C /* reserved */ 541 + #define DSPASIZE 0x70190 542 + #define DSPBPOS 0x7118C 543 + #define DSPBSIZE 0x71190 544 + #define DSPCPOS 0x7218C 545 + #define DSPCSIZE 0x72190 546 + 547 + #define DSPASURF 0x7019C 548 + #define DSPATILEOFF 0x701A4 549 + 550 + #define DSPBSURF 0x7119C 551 + #define DSPBTILEOFF 0x711A4 552 + 553 + #define DSPCSURF 0x7219C 554 + #define DSPCTILEOFF 0x721A4 555 + #define DSPCKEYMAXVAL 0x721A0 556 + #define DSPCKEYMINVAL 0x72194 557 + #define DSPCKEYMSK 0x72198 558 + 559 + #define VGACNTRL 0x71400 560 + # define VGA_DISP_DISABLE (1 << 31) 561 + # define VGA_2X_MODE (1 << 30) 562 + # define VGA_PIPE_B_SELECT (1 << 29) 563 + 564 + /* 565 + * Overlay registers 566 + */ 567 + #define OV_C_OFFSET 0x08000 568 + #define OV_OVADD 0x30000 569 + #define OV_DOVASTA 0x30008 570 + # define OV_PIPE_SELECT (BIT6|BIT7) 571 + # define OV_PIPE_SELECT_POS 6 572 + # define OV_PIPE_A 0 573 + # define OV_PIPE_C 1 574 + #define OV_OGAMC5 0x30010 575 + #define OV_OGAMC4 0x30014 576 + #define OV_OGAMC3 0x30018 577 + #define OV_OGAMC2 0x3001C 578 + #define OV_OGAMC1 0x30020 579 + #define OV_OGAMC0 0x30024 580 + #define OVC_OVADD 0x38000 581 + #define OVC_DOVCSTA 0x38008 582 + #define OVC_OGAMC5 0x38010 583 + #define OVC_OGAMC4 0x38014 584 + #define OVC_OGAMC3 0x38018 585 + #define OVC_OGAMC2 0x3801C 586 + #define OVC_OGAMC1 0x38020 587 + #define OVC_OGAMC0 0x38024 588 + 589 + /* 590 + * Some BIOS scratch area registers. The 845 (and 830?) store the amount 591 + * of video memory available to the BIOS in SWF1. 592 + */ 593 + #define SWF0 0x71410 594 + #define SWF1 0x71414 595 + #define SWF2 0x71418 596 + #define SWF3 0x7141c 597 + #define SWF4 0x71420 598 + #define SWF5 0x71424 599 + #define SWF6 0x71428 600 + 601 + /* 602 + * 855 scratch registers. 603 + */ 604 + #define SWF00 0x70410 605 + #define SWF01 0x70414 606 + #define SWF02 0x70418 607 + #define SWF03 0x7041c 608 + #define SWF04 0x70420 609 + #define SWF05 0x70424 610 + #define SWF06 0x70428 611 + 612 + #define SWF10 SWF0 613 + #define SWF11 SWF1 614 + #define SWF12 SWF2 615 + #define SWF13 SWF3 616 + #define SWF14 SWF4 617 + #define SWF15 SWF5 618 + #define SWF16 SWF6 619 + 620 + #define SWF30 0x72414 621 + #define SWF31 0x72418 622 + #define SWF32 0x7241c 623 + 624 + 625 + /* 626 + * Palette registers 627 + */ 628 + #define PALETTE_A 0x0a000 629 + #define PALETTE_B 0x0a800 630 + #define PALETTE_C 0x0ac00 631 + 632 + #define IS_I830(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82830_CGC) 633 + #define IS_845G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82845G_IG) 634 + #define IS_I85X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG) 635 + #define IS_I855(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG) 636 + #define IS_I865G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82865_IG) 637 + 638 + 639 + /* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G) */ 640 + #define IS_I915G(dev) (dev->pci_device == PCI_DEVICE_ID_INTEL_82915G_IG) 641 + #define IS_I915GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82915GM_IG) 642 + #define IS_I945G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945G_IG) 643 + #define IS_I945GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GM_IG) 644 + 645 + #define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ 646 + (dev)->pci_device == 0x2982 || \ 647 + (dev)->pci_device == 0x2992 || \ 648 + (dev)->pci_device == 0x29A2 || \ 649 + (dev)->pci_device == 0x2A02 || \ 650 + (dev)->pci_device == 0x2A12) 651 + 652 + #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) 653 + 654 + #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ 655 + (dev)->pci_device == 0x29B2 || \ 656 + (dev)->pci_device == 0x29D2) 657 + 658 + #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ 659 + IS_I945GM(dev) || IS_I965G(dev) || IS_POULSBO(dev) || \ 660 + IS_MRST(dev)) 661 + 662 + #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ 663 + IS_I945GM(dev) || IS_I965GM(dev) || \ 664 + IS_POULSBO(dev) || IS_MRST(dev)) 665 + 666 + /* Cursor A & B regs */ 667 + #define CURACNTR 0x70080 668 + #define CURSOR_MODE_DISABLE 0x00 669 + #define CURSOR_MODE_64_32B_AX 0x07 670 + #define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) 671 + #define MCURSOR_GAMMA_ENABLE (1 << 26) 672 + #define CURABASE 0x70084 673 + #define CURAPOS 0x70088 674 + #define CURSOR_POS_MASK 0x007FF 675 + #define CURSOR_POS_SIGN 0x8000 676 + #define CURSOR_X_SHIFT 0 677 + #define CURSOR_Y_SHIFT 16 678 + #define CURBCNTR 0x700c0 679 + #define CURBBASE 0x700c4 680 + #define CURBPOS 0x700c8 681 + #define CURCCNTR 0x700e0 682 + #define CURCBASE 0x700e4 683 + #define CURCPOS 0x700e8 684 + 685 + /* 686 + * Interrupt Registers 687 + */ 688 + #define IER 0x020a0 689 + #define IIR 0x020a4 690 + #define IMR 0x020a8 691 + #define ISR 0x020ac 692 + 693 + /* 694 + * MOORESTOWN delta registers 695 + */ 696 + #define MRST_DPLL_A 0x0f014 697 + #define MDFLD_DPLL_B 0x0f018 698 + #define MDFLD_INPUT_REF_SEL (1 << 14) 699 + #define MDFLD_VCO_SEL (1 << 16) 700 + #define DPLLA_MODE_LVDS (2 << 26) /* mrst */ 701 + #define MDFLD_PLL_LATCHEN (1 << 28) 702 + #define MDFLD_PWR_GATE_EN (1 << 30) 703 + #define MDFLD_P1_MASK (0x1FF << 17) 704 + #define MRST_FPA0 0x0f040 705 + #define MRST_FPA1 0x0f044 706 + #define MDFLD_DPLL_DIV0 0x0f048 707 + #define MDFLD_DPLL_DIV1 0x0f04c 708 + #define MRST_PERF_MODE 0x020f4 709 + 710 + /* MEDFIELD HDMI registers */ 711 + #define HDMIPHYMISCCTL 0x61134 712 + # define HDMI_PHY_POWER_DOWN 0x7f 713 + #define HDMIB_CONTROL 0x61140 714 + # define HDMIB_PORT_EN (1 << 31) 715 + # define HDMIB_PIPE_B_SELECT (1 << 30) 716 + # define HDMIB_NULL_PACKET (1 << 9) 717 + #define HDMIB_HDCP_PORT (1 << 5) 718 + 719 + /* #define LVDS 0x61180 */ 720 + # define MRST_PANEL_8TO6_DITHER_ENABLE (1 << 25) 721 + # define MRST_PANEL_24_DOT_1_FORMAT (1 << 24) 722 + # define LVDS_A3_POWER_UP_0_OUTPUT (1 << 6) 723 + 724 + #define MIPI 0x61190 725 + #define MIPI_C 0x62190 726 + # define MIPI_PORT_EN (1 << 31) 727 + /** Turns on border drawing to allow centered display. */ 728 + # define SEL_FLOPPED_HSTX (1 << 23) 729 + # define PASS_FROM_SPHY_TO_AFE (1 << 16) 730 + # define MIPI_BORDER_EN (1 << 15) 731 + # define MIPIA_3LANE_MIPIC_1LANE 0x1 732 + # define MIPIA_2LANE_MIPIC_2LANE 0x2 733 + # define TE_TRIGGER_DSI_PROTOCOL (1 << 2) 734 + # define TE_TRIGGER_GPIO_PIN (1 << 3) 735 + #define MIPI_TE_COUNT 0x61194 736 + 737 + /* #define PP_CONTROL 0x61204 */ 738 + # define POWER_DOWN_ON_RESET (1 << 1) 739 + 740 + /* #define PFIT_CONTROL 0x61230 */ 741 + # define PFIT_PIPE_SELECT (3 << 29) 742 + # define PFIT_PIPE_SELECT_SHIFT (29) 743 + 744 + /* #define BLC_PWM_CTL 0x61254 */ 745 + #define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16) 746 + #define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16) 747 + 748 + /* #define PIPEACONF 0x70008 */ 749 + #define PIPEACONF_PIPE_STATE (1<<30) 750 + /* #define DSPACNTR 0x70180 */ 751 + 752 + #define MRST_DSPABASE 0x7019c 753 + #define MRST_DSPBBASE 0x7119c 754 + #define MDFLD_DSPCBASE 0x7219c 755 + 756 + /* 757 + * Moorestown registers. 758 + */ 759 + /*=========================================================================== 760 + ; General Constants 761 + ;--------------------------------------------------------------------------*/ 762 + #define BIT0 0x00000001 763 + #define BIT1 0x00000002 764 + #define BIT2 0x00000004 765 + #define BIT3 0x00000008 766 + #define BIT4 0x00000010 767 + #define BIT5 0x00000020 768 + #define BIT6 0x00000040 769 + #define BIT7 0x00000080 770 + #define BIT8 0x00000100 771 + #define BIT9 0x00000200 772 + #define BIT10 0x00000400 773 + #define BIT11 0x00000800 774 + #define BIT12 0x00001000 775 + #define BIT13 0x00002000 776 + #define BIT14 0x00004000 777 + #define BIT15 0x00008000 778 + #define BIT16 0x00010000 779 + #define BIT17 0x00020000 780 + #define BIT18 0x00040000 781 + #define BIT19 0x00080000 782 + #define BIT20 0x00100000 783 + #define BIT21 0x00200000 784 + #define BIT22 0x00400000 785 + #define BIT23 0x00800000 786 + #define BIT24 0x01000000 787 + #define BIT25 0x02000000 788 + #define BIT26 0x04000000 789 + #define BIT27 0x08000000 790 + #define BIT28 0x10000000 791 + #define BIT29 0x20000000 792 + #define BIT30 0x40000000 793 + #define BIT31 0x80000000 794 + /*=========================================================================== 795 + ; MIPI IP registers 796 + ;--------------------------------------------------------------------------*/ 797 + #define MIPIC_REG_OFFSET 0x800 798 + #define DEVICE_READY_REG 0xb000 799 + #define LP_OUTPUT_HOLD BIT16 800 + #define EXIT_ULPS_DEV_READY 0x3 801 + #define LP_OUTPUT_HOLD_RELEASE 0x810000 802 + # define ENTERING_ULPS (2 << 1) 803 + # define EXITING_ULPS (1 << 1) 804 + # define ULPS_MASK (3 << 1) 805 + # define BUS_POSSESSION (1 << 3) 806 + #define INTR_STAT_REG 0xb004 807 + #define RX_SOT_ERROR BIT0 808 + #define RX_SOT_SYNC_ERROR BIT1 809 + #define RX_ESCAPE_MODE_ENTRY_ERROR BIT3 810 + #define RX_LP_TX_SYNC_ERROR BIT4 811 + #define RX_HS_RECEIVE_TIMEOUT_ERROR BIT5 812 + #define RX_FALSE_CONTROL_ERROR BIT6 813 + #define RX_ECC_SINGLE_BIT_ERROR BIT7 814 + #define RX_ECC_MULTI_BIT_ERROR BIT8 815 + #define RX_CHECKSUM_ERROR BIT9 816 + #define RX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT10 817 + #define RX_DSI_VC_ID_INVALID BIT11 818 + #define TX_FALSE_CONTROL_ERROR BIT12 819 + #define TX_ECC_SINGLE_BIT_ERROR BIT13 820 + #define TX_ECC_MULTI_BIT_ERROR BIT14 821 + #define TX_CHECKSUM_ERROR BIT15 822 + #define TX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT16 823 + #define TX_DSI_VC_ID_INVALID BIT17 824 + #define HIGH_CONTENTION BIT18 825 + #define LOW_CONTENTION BIT19 826 + #define DPI_FIFO_UNDER_RUN BIT20 827 + #define HS_TX_TIMEOUT BIT21 828 + #define LP_RX_TIMEOUT BIT22 829 + #define TURN_AROUND_ACK_TIMEOUT BIT23 830 + #define ACK_WITH_NO_ERROR BIT24 831 + #define HS_GENERIC_WR_FIFO_FULL BIT27 832 + #define LP_GENERIC_WR_FIFO_FULL BIT28 833 + #define SPL_PKT_SENT BIT30 834 + #define INTR_EN_REG 0xb008 835 + #define DSI_FUNC_PRG_REG 0xb00c 836 + #define DPI_CHANNEL_NUMBER_POS 0x03 837 + #define DBI_CHANNEL_NUMBER_POS 0x05 838 + #define FMT_DPI_POS 0x07 839 + #define FMT_DBI_POS 0x0A 840 + #define DBI_DATA_WIDTH_POS 0x0D 841 + /* DPI PIXEL FORMATS */ 842 + #define RGB_565_FMT 0x01 /* RGB 565 FORMAT */ 843 + #define RGB_666_FMT 0x02 /* RGB 666 FORMAT */ 844 + #define LRGB_666_FMT 0x03 /* RGB LOOSELY PACKED 845 + * 666 FORMAT 846 + */ 847 + #define RGB_888_FMT 0x04 /* RGB 888 FORMAT */ 848 + #define VIRTUAL_CHANNEL_NUMBER_0 0x00 /* Virtual channel 0 */ 849 + #define VIRTUAL_CHANNEL_NUMBER_1 0x01 /* Virtual channel 1 */ 850 + #define VIRTUAL_CHANNEL_NUMBER_2 0x02 /* Virtual channel 2 */ 851 + #define VIRTUAL_CHANNEL_NUMBER_3 0x03 /* Virtual channel 3 */ 852 + #define DBI_NOT_SUPPORTED 0x00 /* command mode 853 + * is not supported 854 + */ 855 + #define DBI_DATA_WIDTH_16BIT 0x01 /* 16 bit data */ 856 + #define DBI_DATA_WIDTH_9BIT 0x02 /* 9 bit data */ 857 + #define DBI_DATA_WIDTH_8BIT 0x03 /* 8 bit data */ 858 + #define DBI_DATA_WIDTH_OPT1 0x04 /* option 1 */ 859 + #define DBI_DATA_WIDTH_OPT2 0x05 /* option 2 */ 860 + #define HS_TX_TIMEOUT_REG 0xb010 861 + #define LP_RX_TIMEOUT_REG 0xb014 862 + #define TURN_AROUND_TIMEOUT_REG 0xb018 863 + #define DEVICE_RESET_REG 0xb01C 864 + #define DPI_RESOLUTION_REG 0xb020 865 + #define RES_V_POS 0x10 866 + #define DBI_RESOLUTION_REG 0xb024 /* Reserved for MDFLD */ 867 + #define HORIZ_SYNC_PAD_COUNT_REG 0xb028 868 + #define HORIZ_BACK_PORCH_COUNT_REG 0xb02C 869 + #define HORIZ_FRONT_PORCH_COUNT_REG 0xb030 870 + #define HORIZ_ACTIVE_AREA_COUNT_REG 0xb034 871 + #define VERT_SYNC_PAD_COUNT_REG 0xb038 872 + #define VERT_BACK_PORCH_COUNT_REG 0xb03c 873 + #define VERT_FRONT_PORCH_COUNT_REG 0xb040 874 + #define HIGH_LOW_SWITCH_COUNT_REG 0xb044 875 + #define DPI_CONTROL_REG 0xb048 876 + #define DPI_SHUT_DOWN BIT0 877 + #define DPI_TURN_ON BIT1 878 + #define DPI_COLOR_MODE_ON BIT2 879 + #define DPI_COLOR_MODE_OFF BIT3 880 + #define DPI_BACK_LIGHT_ON BIT4 881 + #define DPI_BACK_LIGHT_OFF BIT5 882 + #define DPI_LP BIT6 883 + #define DPI_DATA_REG 0xb04c 884 + #define DPI_BACK_LIGHT_ON_DATA 0x07 885 + #define DPI_BACK_LIGHT_OFF_DATA 0x17 886 + #define INIT_COUNT_REG 0xb050 887 + #define MAX_RET_PAK_REG 0xb054 888 + #define VIDEO_FMT_REG 0xb058 889 + #define COMPLETE_LAST_PCKT BIT2 890 + #define EOT_DISABLE_REG 0xb05c 891 + #define ENABLE_CLOCK_STOPPING BIT1 892 + #define LP_BYTECLK_REG 0xb060 893 + #define LP_GEN_DATA_REG 0xb064 894 + #define HS_GEN_DATA_REG 0xb068 895 + #define LP_GEN_CTRL_REG 0xb06C 896 + #define HS_GEN_CTRL_REG 0xb070 897 + #define DCS_CHANNEL_NUMBER_POS 0x06 898 + #define MCS_COMMANDS_POS 0x8 899 + #define WORD_COUNTS_POS 0x8 900 + #define MCS_PARAMETER_POS 0x10 901 + #define GEN_FIFO_STAT_REG 0xb074 902 + #define HS_DATA_FIFO_FULL BIT0 903 + #define HS_DATA_FIFO_HALF_EMPTY BIT1 904 + #define HS_DATA_FIFO_EMPTY BIT2 905 + #define LP_DATA_FIFO_FULL BIT8 906 + #define LP_DATA_FIFO_HALF_EMPTY BIT9 907 + #define LP_DATA_FIFO_EMPTY BIT10 908 + #define HS_CTRL_FIFO_FULL BIT16 909 + #define HS_CTRL_FIFO_HALF_EMPTY BIT17 910 + #define HS_CTRL_FIFO_EMPTY BIT18 911 + #define LP_CTRL_FIFO_FULL BIT24 912 + #define LP_CTRL_FIFO_HALF_EMPTY BIT25 913 + #define LP_CTRL_FIFO_EMPTY BIT26 914 + #define DBI_FIFO_EMPTY BIT27 915 + #define DPI_FIFO_EMPTY BIT28 916 + #define HS_LS_DBI_ENABLE_REG 0xb078 917 + #define TXCLKESC_REG 0xb07c 918 + #define DPHY_PARAM_REG 0xb080 919 + #define DBI_BW_CTRL_REG 0xb084 920 + #define CLK_LANE_SWT_REG 0xb088 921 + /*=========================================================================== 922 + ; MIPI Adapter registers 923 + ;--------------------------------------------------------------------------*/ 924 + #define MIPI_CONTROL_REG 0xb104 925 + #define MIPI_2X_CLOCK_BITS (BIT0 | BIT1) 926 + #define MIPI_DATA_ADDRESS_REG 0xb108 927 + #define MIPI_DATA_LENGTH_REG 0xb10C 928 + #define MIPI_COMMAND_ADDRESS_REG 0xb110 929 + #define MIPI_COMMAND_LENGTH_REG 0xb114 930 + #define MIPI_READ_DATA_RETURN_REG0 0xb118 931 + #define MIPI_READ_DATA_RETURN_REG1 0xb11C 932 + #define MIPI_READ_DATA_RETURN_REG2 0xb120 933 + #define MIPI_READ_DATA_RETURN_REG3 0xb124 934 + #define MIPI_READ_DATA_RETURN_REG4 0xb128 935 + #define MIPI_READ_DATA_RETURN_REG5 0xb12C 936 + #define MIPI_READ_DATA_RETURN_REG6 0xb130 937 + #define MIPI_READ_DATA_RETURN_REG7 0xb134 938 + #define MIPI_READ_DATA_VALID_REG 0xb138 939 + /* DBI COMMANDS */ 940 + #define soft_reset 0x01 941 + /* ************************************************************************* *\ 942 + The display module performs a software reset. 943 + Registers are written with their SW Reset default values. 944 + \* ************************************************************************* */ 945 + #define get_power_mode 0x0a 946 + /* ************************************************************************* *\ 947 + The display module returns the current power mode 948 + \* ************************************************************************* */ 949 + #define get_address_mode 0x0b 950 + /* ************************************************************************* *\ 951 + The display module returns the current status. 952 + \* ************************************************************************* */ 953 + #define get_pixel_format 0x0c 954 + /* ************************************************************************* *\ 955 + This command gets the pixel format for the RGB image data 956 + used by the interface. 957 + \* ************************************************************************* */ 958 + #define get_display_mode 0x0d 959 + /* ************************************************************************* *\ 960 + The display module returns the Display Image Mode status. 961 + \* ************************************************************************* */ 962 + #define get_signal_mode 0x0e 963 + /* ************************************************************************* *\ 964 + The display module returns the Display Signal Mode. 965 + \* ************************************************************************* */ 966 + #define get_diagnostic_result 0x0f 967 + /* ************************************************************************* *\ 968 + The display module returns the self-diagnostic results following 969 + a Sleep Out command. 970 + \* ************************************************************************* */ 971 + #define enter_sleep_mode 0x10 972 + /* ************************************************************************* *\ 973 + This command causes the display module to enter the Sleep mode. 974 + In this mode, all unnecessary blocks inside the display module are disabled 975 + except interface communication. This is the lowest power mode 976 + the display module supports. 977 + \* ************************************************************************* */ 978 + #define exit_sleep_mode 0x11 979 + /* ************************************************************************* *\ 980 + This command causes the display module to exit Sleep mode. 981 + All blocks inside the display module are enabled. 982 + \* ************************************************************************* */ 983 + #define enter_partial_mode 0x12 984 + /* ************************************************************************* *\ 985 + This command causes the display module to enter the Partial Display Mode. 986 + The Partial Display Mode window is described by the set_partial_area command. 987 + \* ************************************************************************* */ 988 + #define enter_normal_mode 0x13 989 + /* ************************************************************************* *\ 990 + This command causes the display module to enter the Normal mode. 991 + Normal Mode is defined as Partial Display mode and Scroll mode are off 992 + \* ************************************************************************* */ 993 + #define exit_invert_mode 0x20 994 + /* ************************************************************************* *\ 995 + This command causes the display module to stop inverting the image data on 996 + the display device. The frame memory contents remain unchanged. 997 + No status bits are changed. 998 + \* ************************************************************************* */ 999 + #define enter_invert_mode 0x21 1000 + /* ************************************************************************* *\ 1001 + This command causes the display module to invert the image data only on 1002 + the display device. The frame memory contents remain unchanged. 1003 + No status bits are changed. 1004 + \* ************************************************************************* */ 1005 + #define set_gamma_curve 0x26 1006 + /* ************************************************************************* *\ 1007 + This command selects the desired gamma curve for the display device. 1008 + Four fixed gamma curves are defined in section DCS spec. 1009 + \* ************************************************************************* */ 1010 + #define set_display_off 0x28 1011 + /* ************************************************************************* *\ 1012 + This command causes the display module to stop displaying the image data 1013 + on the display device. The frame memory contents remain unchanged. 1014 + No status bits are changed. 1015 + \* ************************************************************************* */ 1016 + #define set_display_on 0x29 1017 + /* ************************************************************************* *\ 1018 + This command causes the display module to start displaying the image data 1019 + on the display device. The frame memory contents remain unchanged. 1020 + No status bits are changed. 1021 + \* ************************************************************************* */ 1022 + #define set_column_address 0x2a 1023 + /* ************************************************************************* *\ 1024 + This command defines the column extent of the frame memory accessed by the 1025 + hostprocessor with the read_memory_continue and write_memory_continue commands. 1026 + No status bits are changed. 1027 + \* ************************************************************************* */ 1028 + #define set_page_addr 0x2b 1029 + /* ************************************************************************* *\ 1030 + This command defines the page extent of the frame memory accessed by the host 1031 + processor with the write_memory_continue and read_memory_continue command. 1032 + No status bits are changed. 1033 + \* ************************************************************************* */ 1034 + #define write_mem_start 0x2c 1035 + /* ************************************************************************* *\ 1036 + This command transfers image data from the host processor to the display 1037 + module s frame memory starting at the pixel location specified by 1038 + preceding set_column_address and set_page_address commands. 1039 + \* ************************************************************************* */ 1040 + #define set_partial_area 0x30 1041 + /* ************************************************************************* *\ 1042 + This command defines the Partial Display mode s display area. 1043 + There are two parameters associated with 1044 + this command, the first defines the Start Row (SR) and the second the End Row 1045 + (ER). SR and ER refer to the Frame Memory Line Pointer. 1046 + \* ************************************************************************* */ 1047 + #define set_scroll_area 0x33 1048 + /* ************************************************************************* *\ 1049 + This command defines the display modules Vertical Scrolling Area. 1050 + \* ************************************************************************* */ 1051 + #define set_tear_off 0x34 1052 + /* ************************************************************************* *\ 1053 + This command turns off the display modules Tearing Effect output signal on 1054 + the TE signal line. 1055 + \* ************************************************************************* */ 1056 + #define set_tear_on 0x35 1057 + /* ************************************************************************* *\ 1058 + This command turns on the display modules Tearing Effect output signal 1059 + on the TE signal line. 1060 + \* ************************************************************************* */ 1061 + #define set_address_mode 0x36 1062 + /* ************************************************************************* *\ 1063 + This command sets the data order for transfers from the host processor to 1064 + display modules frame memory,bits B[7:5] and B3, and from the display 1065 + modules frame memory to the display device, bits B[2:0] and B4. 1066 + \* ************************************************************************* */ 1067 + #define set_scroll_start 0x37 1068 + /* ************************************************************************* *\ 1069 + This command sets the start of the vertical scrolling area in the frame memory. 1070 + The vertical scrolling area is fully defined when this command is used with 1071 + the set_scroll_area command The set_scroll_start command has one parameter, 1072 + the Vertical Scroll Pointer. The VSP defines the line in the frame memory 1073 + that is written to the display device as the first line of the vertical 1074 + scroll area. 1075 + \* ************************************************************************* */ 1076 + #define exit_idle_mode 0x38 1077 + /* ************************************************************************* *\ 1078 + This command causes the display module to exit Idle mode. 1079 + \* ************************************************************************* */ 1080 + #define enter_idle_mode 0x39 1081 + /* ************************************************************************* *\ 1082 + This command causes the display module to enter Idle Mode. 1083 + In Idle Mode, color expression is reduced. Colors are shown on the display 1084 + device using the MSB of each of the R, G and B color components in the frame 1085 + memory 1086 + \* ************************************************************************* */ 1087 + #define set_pixel_format 0x3a 1088 + /* ************************************************************************* *\ 1089 + This command sets the pixel format for the RGB image data used by the interface. 1090 + Bits D[6:4] DPI Pixel Format Definition 1091 + Bits D[2:0] DBI Pixel Format Definition 1092 + Bits D7 and D3 are not used. 1093 + \* ************************************************************************* */ 1094 + #define DCS_PIXEL_FORMAT_3bbp 0x1 1095 + #define DCS_PIXEL_FORMAT_8bbp 0x2 1096 + #define DCS_PIXEL_FORMAT_12bbp 0x3 1097 + #define DCS_PIXEL_FORMAT_16bbp 0x5 1098 + #define DCS_PIXEL_FORMAT_18bbp 0x6 1099 + #define DCS_PIXEL_FORMAT_24bbp 0x7 1100 + #define write_mem_cont 0x3c 1101 + /* ************************************************************************* *\ 1102 + This command transfers image data from the host processor to the display 1103 + module's frame memory continuing from the pixel location following the 1104 + previous write_memory_continue or write_memory_start command. 1105 + \* ************************************************************************* */ 1106 + #define set_tear_scanline 0x44 1107 + /* ************************************************************************* *\ 1108 + This command turns on the display modules Tearing Effect output signal on the 1109 + TE signal line when the display module reaches line N. 1110 + \* ************************************************************************* */ 1111 + #define get_scanline 0x45 1112 + /* ************************************************************************* *\ 1113 + The display module returns the current scanline, N, used to update the 1114 + display device. The total number of scanlines on a display device is 1115 + defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as 1116 + the first line of V Sync and is denoted as Line 0. 1117 + When in Sleep Mode, the value returned by get_scanline is undefined. 1118 + \* ************************************************************************* */ 1119 + 1120 + /* MCS or Generic COMMANDS */ 1121 + /* MCS/generic data type */ 1122 + #define GEN_SHORT_WRITE_0 0x03 /* generic short write, no parameters */ 1123 + #define GEN_SHORT_WRITE_1 0x13 /* generic short write, 1 parameters */ 1124 + #define GEN_SHORT_WRITE_2 0x23 /* generic short write, 2 parameters */ 1125 + #define GEN_READ_0 0x04 /* generic read, no parameters */ 1126 + #define GEN_READ_1 0x14 /* generic read, 1 parameters */ 1127 + #define GEN_READ_2 0x24 /* generic read, 2 parameters */ 1128 + #define GEN_LONG_WRITE 0x29 /* generic long write */ 1129 + #define MCS_SHORT_WRITE_0 0x05 /* MCS short write, no parameters */ 1130 + #define MCS_SHORT_WRITE_1 0x15 /* MCS short write, 1 parameters */ 1131 + #define MCS_READ 0x06 /* MCS read, no parameters */ 1132 + #define MCS_LONG_WRITE 0x39 /* MCS long write */ 1133 + /* MCS/generic commands */ 1134 + /*****TPO MCS**********/ 1135 + #define write_display_profile 0x50 1136 + #define write_display_brightness 0x51 1137 + #define write_ctrl_display 0x53 1138 + #define write_ctrl_cabc 0x55 1139 + #define UI_IMAGE 0x01 1140 + #define STILL_IMAGE 0x02 1141 + #define MOVING_IMAGE 0x03 1142 + #define write_hysteresis 0x57 1143 + #define write_gamma_setting 0x58 1144 + #define write_cabc_min_bright 0x5e 1145 + #define write_kbbc_profile 0x60 1146 + /*****TMD MCS**************/ 1147 + #define tmd_write_display_brightness 0x8c 1148 + 1149 + /* ************************************************************************* *\ 1150 + This command is used to control ambient light, panel backlight brightness and 1151 + gamma settings. 1152 + \* ************************************************************************* */ 1153 + #define BRIGHT_CNTL_BLOCK_ON BIT5 1154 + #define AMBIENT_LIGHT_SENSE_ON BIT4 1155 + #define DISPLAY_DIMMING_ON BIT3 1156 + #define BACKLIGHT_ON BIT2 1157 + #define DISPLAY_BRIGHTNESS_AUTO BIT1 1158 + #define GAMMA_AUTO BIT0 1159 + 1160 + /* DCS Interface Pixel Formats */ 1161 + #define DCS_PIXEL_FORMAT_3BPP 0x1 1162 + #define DCS_PIXEL_FORMAT_8BPP 0x2 1163 + #define DCS_PIXEL_FORMAT_12BPP 0x3 1164 + #define DCS_PIXEL_FORMAT_16BPP 0x5 1165 + #define DCS_PIXEL_FORMAT_18BPP 0x6 1166 + #define DCS_PIXEL_FORMAT_24BPP 0x7 1167 + /* ONE PARAMETER READ DATA */ 1168 + #define addr_mode_data 0xfc 1169 + #define diag_res_data 0x00 1170 + #define disp_mode_data 0x23 1171 + #define pxl_fmt_data 0x77 1172 + #define pwr_mode_data 0x74 1173 + #define sig_mode_data 0x00 1174 + /* TWO PARAMETERS READ DATA */ 1175 + #define scanline_data1 0xff 1176 + #define scanline_data2 0xff 1177 + #define NON_BURST_MODE_SYNC_PULSE 0x01 /* Non Burst Mode 1178 + * with Sync Pulse 1179 + */ 1180 + #define NON_BURST_MODE_SYNC_EVENTS 0x02 /* Non Burst Mode 1181 + * with Sync events 1182 + */ 1183 + #define BURST_MODE 0x03 /* Burst Mode */ 1184 + #define DBI_COMMAND_BUFFER_SIZE 0x240 /* 0x32 */ /* 0x120 */ /* Allocate at least 1185 + * 0x100 Byte with 32 1186 + * byte alignment 1187 + */ 1188 + #define DBI_DATA_BUFFER_SIZE 0x120 /* Allocate at least 1189 + * 0x100 Byte with 32 1190 + * byte alignment 1191 + */ 1192 + #define DBI_CB_TIME_OUT 0xFFFF 1193 + #define GEN_FB_TIME_OUT 2000 1194 + #define ALIGNMENT_32BYTE_MASK (~(BIT0|BIT1|BIT2|BIT3|BIT4)) 1195 + #define SKU_83 0x01 1196 + #define SKU_100 0x02 1197 + #define SKU_100L 0x04 1198 + #define SKU_BYPASS 0x08 1199 + 1200 + #endif
+1298
drivers/staging/gma500/psb_intel_sdvo.c
··· 1 + /* 2 + * Copyright (c) 2006-2007 Intel Corporation 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms and conditions of the GNU General Public License, 6 + * version 2, as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope it will be useful, but WITHOUT 9 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 + * more details. 12 + * 13 + * You should have received a copy of the GNU General Public License along with 14 + * this program; if not, write to the Free Software Foundation, Inc., 15 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 + * 17 + * Authors: 18 + * Eric Anholt <eric@anholt.net> 19 + */ 20 + 21 + #include <linux/i2c.h> 22 + #include <linux/delay.h> 23 + /* #include <drm/drm_crtc.h> */ 24 + #include <drm/drmP.h> 25 + #include "psb_drv.h" 26 + #include "psb_intel_drv.h" 27 + #include "psb_intel_reg.h" 28 + #include "psb_intel_sdvo_regs.h" 29 + 30 + struct psb_intel_sdvo_priv { 31 + struct psb_intel_i2c_chan *i2c_bus; 32 + int slaveaddr; 33 + int output_device; 34 + 35 + u16 active_outputs; 36 + 37 + struct psb_intel_sdvo_caps caps; 38 + int pixel_clock_min, pixel_clock_max; 39 + 40 + int save_sdvo_mult; 41 + u16 save_active_outputs; 42 + struct psb_intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; 43 + struct psb_intel_sdvo_dtd save_output_dtd[16]; 44 + u32 save_SDVOX; 45 + u8 in_out_map[4]; 46 + 47 + u8 by_input_wiring; 48 + u32 active_device; 49 + }; 50 + 51 + /** 52 + * Writes the SDVOB or SDVOC with the given value, but always writes both 53 + * SDVOB and SDVOC to work around apparent hardware issues (according to 54 + * comments in the BIOS). 55 + */ 56 + void psb_intel_sdvo_write_sdvox(struct psb_intel_output *psb_intel_output, 57 + u32 val) 58 + { 59 + struct drm_device *dev = psb_intel_output->base.dev; 60 + struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; 61 + u32 bval = val, cval = val; 62 + int i; 63 + 64 + if (sdvo_priv->output_device == SDVOB) 65 + cval = REG_READ(SDVOC); 66 + else 67 + bval = REG_READ(SDVOB); 68 + /* 69 + * Write the registers twice for luck. Sometimes, 70 + * writing them only once doesn't appear to 'stick'. 71 + * The BIOS does this too. Yay, magic 72 + */ 73 + for (i = 0; i < 2; i++) { 74 + REG_WRITE(SDVOB, bval); 75 + REG_READ(SDVOB); 76 + REG_WRITE(SDVOC, cval); 77 + REG_READ(SDVOC); 78 + } 79 + } 80 + 81 + static bool psb_intel_sdvo_read_byte( 82 + struct psb_intel_output *psb_intel_output, 83 + u8 addr, u8 *ch) 84 + { 85 + struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; 86 + u8 out_buf[2]; 87 + u8 buf[2]; 88 + int ret; 89 + 90 + struct i2c_msg msgs[] = { 91 + { 92 + .addr = sdvo_priv->i2c_bus->slave_addr, 93 + .flags = 0, 94 + .len = 1, 95 + .buf = out_buf, 96 + }, 97 + { 98 + .addr = sdvo_priv->i2c_bus->slave_addr, 99 + .flags = I2C_M_RD, 100 + .len = 1, 101 + .buf = buf, 102 + } 103 + }; 104 + 105 + out_buf[0] = addr; 106 + out_buf[1] = 0; 107 + 108 + ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2); 109 + if (ret == 2) { 110 + /* DRM_DEBUG("got back from addr %02X = %02x\n", 111 + * out_buf[0], buf[0]); 112 + */ 113 + *ch = buf[0]; 114 + return true; 115 + } 116 + 117 + DRM_DEBUG("i2c transfer returned %d\n", ret); 118 + return false; 119 + } 120 + 121 + static bool psb_intel_sdvo_write_byte( 122 + struct psb_intel_output *psb_intel_output, 123 + int addr, u8 ch) 124 + { 125 + u8 out_buf[2]; 126 + struct i2c_msg msgs[] = { 127 + { 128 + .addr = psb_intel_output->i2c_bus->slave_addr, 129 + .flags = 0, 130 + .len = 2, 131 + .buf = out_buf, 132 + } 133 + }; 134 + 135 + out_buf[0] = addr; 136 + out_buf[1] = ch; 137 + 138 + if (i2c_transfer(&psb_intel_output->i2c_bus->adapter, msgs, 1) == 1) 139 + return true; 140 + return false; 141 + } 142 + 143 + #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} 144 + /** Mapping of command numbers to names, for debug output */ 145 + static const struct _sdvo_cmd_name { 146 + u8 cmd; 147 + char *name; 148 + } sdvo_cmd_names[] = { 149 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), 150 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), 151 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV), 152 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS), 153 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS), 154 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS), 155 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP), 156 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP), 157 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS), 158 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT), 159 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG), 160 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG), 161 + SDVO_CMD_NAME_ENTRY 162 + (SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE), 163 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT), 164 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT), 165 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1), 166 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2), 167 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), 168 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2), 169 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), 170 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1), 171 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2), 172 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1), 173 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2), 174 + SDVO_CMD_NAME_ENTRY 175 + (SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING), 176 + SDVO_CMD_NAME_ENTRY 177 + (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1), 178 + SDVO_CMD_NAME_ENTRY 179 + (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2), 180 + SDVO_CMD_NAME_ENTRY 181 + (SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE), 182 + SDVO_CMD_NAME_ENTRY 183 + (SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE), 184 + SDVO_CMD_NAME_ENTRY 185 + (SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS), 186 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT), 187 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT), 188 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS), 189 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT), 190 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT), 191 + SDVO_CMD_NAME_ENTRY 192 + (SDVO_CMD_SET_TV_RESOLUTION_SUPPORT), 193 + SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),}; 194 + 195 + #define SDVO_NAME(dev_priv) \ 196 + ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") 197 + #define SDVO_PRIV(output) ((struct psb_intel_sdvo_priv *) (output)->dev_priv) 198 + 199 + static void psb_intel_sdvo_write_cmd(struct psb_intel_output *psb_intel_output, 200 + u8 cmd, 201 + void *args, 202 + int args_len) 203 + { 204 + struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; 205 + int i; 206 + 207 + if (1) { 208 + DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd); 209 + for (i = 0; i < args_len; i++) 210 + printk(KERN_INFO"%02X ", ((u8 *) args)[i]); 211 + for (; i < 8; i++) 212 + printk(" "); 213 + for (i = 0; 214 + i < 215 + sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); 216 + i++) { 217 + if (cmd == sdvo_cmd_names[i].cmd) { 218 + printk("(%s)", sdvo_cmd_names[i].name); 219 + break; 220 + } 221 + } 222 + if (i == 223 + sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0])) 224 + printk("(%02X)", cmd); 225 + printk("\n"); 226 + } 227 + 228 + for (i = 0; i < args_len; i++) { 229 + psb_intel_sdvo_write_byte(psb_intel_output, 230 + SDVO_I2C_ARG_0 - i, 231 + ((u8 *) args)[i]); 232 + } 233 + 234 + psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_OPCODE, cmd); 235 + } 236 + 237 + static const char *const cmd_status_names[] = { 238 + "Power on", 239 + "Success", 240 + "Not supported", 241 + "Invalid arg", 242 + "Pending", 243 + "Target not specified", 244 + "Scaling not supported" 245 + }; 246 + 247 + static u8 psb_intel_sdvo_read_response( 248 + struct psb_intel_output *psb_intel_output, 249 + void *response, int response_len) 250 + { 251 + struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; 252 + int i; 253 + u8 status; 254 + u8 retry = 50; 255 + 256 + while (retry--) { 257 + /* Read the command response */ 258 + for (i = 0; i < response_len; i++) { 259 + psb_intel_sdvo_read_byte(psb_intel_output, 260 + SDVO_I2C_RETURN_0 + i, 261 + &((u8 *) response)[i]); 262 + } 263 + 264 + /* read the return status */ 265 + psb_intel_sdvo_read_byte(psb_intel_output, 266 + SDVO_I2C_CMD_STATUS, 267 + &status); 268 + 269 + if (1) { 270 + DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv)); 271 + for (i = 0; i < response_len; i++) 272 + printk(KERN_INFO"%02X ", ((u8 *) response)[i]); 273 + for (; i < 8; i++) 274 + printk(" "); 275 + if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) 276 + printk(KERN_INFO"(%s)", 277 + cmd_status_names[status]); 278 + else 279 + printk(KERN_INFO"(??? %d)", status); 280 + printk("\n"); 281 + } 282 + 283 + if (status != SDVO_CMD_STATUS_PENDING) 284 + return status; 285 + 286 + mdelay(50); 287 + } 288 + 289 + return status; 290 + } 291 + 292 + int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) 293 + { 294 + if (mode->clock >= 100000) 295 + return 1; 296 + else if (mode->clock >= 50000) 297 + return 2; 298 + else 299 + return 4; 300 + } 301 + 302 + /** 303 + * Don't check status code from this as it switches the bus back to the 304 + * SDVO chips which defeats the purpose of doing a bus switch in the first 305 + * place. 306 + */ 307 + void psb_intel_sdvo_set_control_bus_switch( 308 + struct psb_intel_output *psb_intel_output, 309 + u8 target) 310 + { 311 + psb_intel_sdvo_write_cmd(psb_intel_output, 312 + SDVO_CMD_SET_CONTROL_BUS_SWITCH, 313 + &target, 314 + 1); 315 + } 316 + 317 + static bool psb_intel_sdvo_set_target_input( 318 + struct psb_intel_output *psb_intel_output, 319 + bool target_0, bool target_1) 320 + { 321 + struct psb_intel_sdvo_set_target_input_args targets = { 0 }; 322 + u8 status; 323 + 324 + if (target_0 && target_1) 325 + return SDVO_CMD_STATUS_NOTSUPP; 326 + 327 + if (target_1) 328 + targets.target_1 = 1; 329 + 330 + psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_INPUT, 331 + &targets, sizeof(targets)); 332 + 333 + status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); 334 + 335 + return status == SDVO_CMD_STATUS_SUCCESS; 336 + } 337 + 338 + /** 339 + * Return whether each input is trained. 340 + * 341 + * This function is making an assumption about the layout of the response, 342 + * which should be checked against the docs. 343 + */ 344 + static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_output 345 + *psb_intel_output, bool *input_1, 346 + bool *input_2) 347 + { 348 + struct psb_intel_sdvo_get_trained_inputs_response response; 349 + u8 status; 350 + 351 + psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_TRAINED_INPUTS, 352 + NULL, 0); 353 + status = 354 + psb_intel_sdvo_read_response(psb_intel_output, &response, 355 + sizeof(response)); 356 + if (status != SDVO_CMD_STATUS_SUCCESS) 357 + return false; 358 + 359 + *input_1 = response.input0_trained; 360 + *input_2 = response.input1_trained; 361 + return true; 362 + } 363 + 364 + static bool psb_intel_sdvo_get_active_outputs(struct psb_intel_output 365 + *psb_intel_output, u16 *outputs) 366 + { 367 + u8 status; 368 + 369 + psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, 370 + NULL, 0); 371 + status = 372 + psb_intel_sdvo_read_response(psb_intel_output, outputs, 373 + sizeof(*outputs)); 374 + 375 + return status == SDVO_CMD_STATUS_SUCCESS; 376 + } 377 + 378 + static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_output 379 + *psb_intel_output, u16 outputs) 380 + { 381 + u8 status; 382 + 383 + psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, 384 + &outputs, sizeof(outputs)); 385 + status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); 386 + return status == SDVO_CMD_STATUS_SUCCESS; 387 + } 388 + 389 + static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_output 390 + *psb_intel_output, int mode) 391 + { 392 + u8 status, state = SDVO_ENCODER_STATE_ON; 393 + 394 + switch (mode) { 395 + case DRM_MODE_DPMS_ON: 396 + state = SDVO_ENCODER_STATE_ON; 397 + break; 398 + case DRM_MODE_DPMS_STANDBY: 399 + state = SDVO_ENCODER_STATE_STANDBY; 400 + break; 401 + case DRM_MODE_DPMS_SUSPEND: 402 + state = SDVO_ENCODER_STATE_SUSPEND; 403 + break; 404 + case DRM_MODE_DPMS_OFF: 405 + state = SDVO_ENCODER_STATE_OFF; 406 + break; 407 + } 408 + 409 + psb_intel_sdvo_write_cmd(psb_intel_output, 410 + SDVO_CMD_SET_ENCODER_POWER_STATE, &state, 411 + sizeof(state)); 412 + status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); 413 + 414 + return status == SDVO_CMD_STATUS_SUCCESS; 415 + } 416 + 417 + static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_output 418 + *psb_intel_output, 419 + int *clock_min, 420 + int *clock_max) 421 + { 422 + struct psb_intel_sdvo_pixel_clock_range clocks; 423 + u8 status; 424 + 425 + psb_intel_sdvo_write_cmd(psb_intel_output, 426 + SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, NULL, 427 + 0); 428 + 429 + status = 430 + psb_intel_sdvo_read_response(psb_intel_output, &clocks, 431 + sizeof(clocks)); 432 + 433 + if (status != SDVO_CMD_STATUS_SUCCESS) 434 + return false; 435 + 436 + /* Convert the values from units of 10 kHz to kHz. */ 437 + *clock_min = clocks.min * 10; 438 + *clock_max = clocks.max * 10; 439 + 440 + return true; 441 + } 442 + 443 + static bool psb_intel_sdvo_set_target_output( 444 + struct psb_intel_output *psb_intel_output, 445 + u16 outputs) 446 + { 447 + u8 status; 448 + 449 + psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_OUTPUT, 450 + &outputs, sizeof(outputs)); 451 + 452 + status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); 453 + return status == SDVO_CMD_STATUS_SUCCESS; 454 + } 455 + 456 + static bool psb_intel_sdvo_get_timing(struct psb_intel_output *psb_intel_output, 457 + u8 cmd, struct psb_intel_sdvo_dtd *dtd) 458 + { 459 + u8 status; 460 + 461 + psb_intel_sdvo_write_cmd(psb_intel_output, cmd, NULL, 0); 462 + status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1, 463 + sizeof(dtd->part1)); 464 + if (status != SDVO_CMD_STATUS_SUCCESS) 465 + return false; 466 + 467 + psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, NULL, 0); 468 + status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2, 469 + sizeof(dtd->part2)); 470 + if (status != SDVO_CMD_STATUS_SUCCESS) 471 + return false; 472 + 473 + return true; 474 + } 475 + 476 + static bool psb_intel_sdvo_get_input_timing( 477 + struct psb_intel_output *psb_intel_output, 478 + struct psb_intel_sdvo_dtd *dtd) 479 + { 480 + return psb_intel_sdvo_get_timing(psb_intel_output, 481 + SDVO_CMD_GET_INPUT_TIMINGS_PART1, 482 + dtd); 483 + } 484 + 485 + static bool psb_intel_sdvo_set_timing( 486 + struct psb_intel_output *psb_intel_output, 487 + u8 cmd, 488 + struct psb_intel_sdvo_dtd *dtd) 489 + { 490 + u8 status; 491 + 492 + psb_intel_sdvo_write_cmd(psb_intel_output, cmd, &dtd->part1, 493 + sizeof(dtd->part1)); 494 + status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); 495 + if (status != SDVO_CMD_STATUS_SUCCESS) 496 + return false; 497 + 498 + psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, &dtd->part2, 499 + sizeof(dtd->part2)); 500 + status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); 501 + if (status != SDVO_CMD_STATUS_SUCCESS) 502 + return false; 503 + 504 + return true; 505 + } 506 + 507 + static bool psb_intel_sdvo_set_input_timing( 508 + struct psb_intel_output *psb_intel_output, 509 + struct psb_intel_sdvo_dtd *dtd) 510 + { 511 + return psb_intel_sdvo_set_timing(psb_intel_output, 512 + SDVO_CMD_SET_INPUT_TIMINGS_PART1, 513 + dtd); 514 + } 515 + 516 + static bool psb_intel_sdvo_set_output_timing( 517 + struct psb_intel_output *psb_intel_output, 518 + struct psb_intel_sdvo_dtd *dtd) 519 + { 520 + return psb_intel_sdvo_set_timing(psb_intel_output, 521 + SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, 522 + dtd); 523 + } 524 + 525 + static int psb_intel_sdvo_get_clock_rate_mult(struct psb_intel_output 526 + *psb_intel_output) 527 + { 528 + u8 response, status; 529 + 530 + psb_intel_sdvo_write_cmd(psb_intel_output, 531 + SDVO_CMD_GET_CLOCK_RATE_MULT, 532 + NULL, 533 + 0); 534 + 535 + status = psb_intel_sdvo_read_response(psb_intel_output, &response, 1); 536 + 537 + if (status != SDVO_CMD_STATUS_SUCCESS) { 538 + DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n"); 539 + return SDVO_CLOCK_RATE_MULT_1X; 540 + } else { 541 + DRM_DEBUG("Current clock rate multiplier: %d\n", response); 542 + } 543 + 544 + return response; 545 + } 546 + 547 + static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_output 548 + *psb_intel_output, u8 val) 549 + { 550 + u8 status; 551 + 552 + psb_intel_sdvo_write_cmd(psb_intel_output, 553 + SDVO_CMD_SET_CLOCK_RATE_MULT, 554 + &val, 555 + 1); 556 + 557 + status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); 558 + if (status != SDVO_CMD_STATUS_SUCCESS) 559 + return false; 560 + 561 + return true; 562 + } 563 + 564 + static bool psb_sdvo_set_current_inoutmap(struct psb_intel_output *output, 565 + u32 in0outputmask, 566 + u32 in1outputmask) 567 + { 568 + u8 byArgs[4]; 569 + u8 status; 570 + int i; 571 + struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv; 572 + 573 + /* Make all fields of the args/ret to zero */ 574 + memset(byArgs, 0, sizeof(byArgs)); 575 + 576 + /* Fill up the arguement values; */ 577 + byArgs[0] = (u8) (in0outputmask & 0xFF); 578 + byArgs[1] = (u8) ((in0outputmask >> 8) & 0xFF); 579 + byArgs[2] = (u8) (in1outputmask & 0xFF); 580 + byArgs[3] = (u8) ((in1outputmask >> 8) & 0xFF); 581 + 582 + 583 + /*save inoutmap arg here*/ 584 + for (i = 0; i < 4; i++) 585 + sdvo_priv->in_out_map[i] = byArgs[0]; 586 + 587 + psb_intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, byArgs, 4); 588 + status = psb_intel_sdvo_read_response(output, NULL, 0); 589 + 590 + if (status != SDVO_CMD_STATUS_SUCCESS) 591 + return false; 592 + return true; 593 + } 594 + 595 + 596 + static void psb_intel_sdvo_set_iomap(struct psb_intel_output *output) 597 + { 598 + u32 dwCurrentSDVOIn0 = 0; 599 + u32 dwCurrentSDVOIn1 = 0; 600 + u32 dwDevMask = 0; 601 + 602 + 603 + struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv; 604 + 605 + /* Please DO NOT change the following code. */ 606 + /* SDVOB_IN0 or SDVOB_IN1 ==> sdvo_in0 */ 607 + /* SDVOC_IN0 or SDVOC_IN1 ==> sdvo_in1 */ 608 + if (sdvo_priv->by_input_wiring & (SDVOB_IN0 | SDVOC_IN0)) { 609 + switch (sdvo_priv->active_device) { 610 + case SDVO_DEVICE_LVDS: 611 + dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1; 612 + break; 613 + case SDVO_DEVICE_TMDS: 614 + dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1; 615 + break; 616 + case SDVO_DEVICE_TV: 617 + dwDevMask = 618 + SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 | 619 + SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB1 | 620 + SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 | 621 + SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1; 622 + break; 623 + case SDVO_DEVICE_CRT: 624 + dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1; 625 + break; 626 + } 627 + dwCurrentSDVOIn0 = (sdvo_priv->active_outputs & dwDevMask); 628 + } else if (sdvo_priv->by_input_wiring & (SDVOB_IN1 | SDVOC_IN1)) { 629 + switch (sdvo_priv->active_device) { 630 + case SDVO_DEVICE_LVDS: 631 + dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1; 632 + break; 633 + case SDVO_DEVICE_TMDS: 634 + dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1; 635 + break; 636 + case SDVO_DEVICE_TV: 637 + dwDevMask = 638 + SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 | 639 + SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB1 | 640 + SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 | 641 + SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1; 642 + break; 643 + case SDVO_DEVICE_CRT: 644 + dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1; 645 + break; 646 + } 647 + dwCurrentSDVOIn1 = (sdvo_priv->active_outputs & dwDevMask); 648 + } 649 + 650 + psb_sdvo_set_current_inoutmap(output, dwCurrentSDVOIn0, 651 + dwCurrentSDVOIn1); 652 + } 653 + 654 + 655 + static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder, 656 + struct drm_display_mode *mode, 657 + struct drm_display_mode *adjusted_mode) 658 + { 659 + /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO 660 + * device will be told of the multiplier during mode_set. 661 + */ 662 + adjusted_mode->clock *= psb_intel_sdvo_get_pixel_multiplier(mode); 663 + return true; 664 + } 665 + 666 + static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder, 667 + struct drm_display_mode *mode, 668 + struct drm_display_mode *adjusted_mode) 669 + { 670 + struct drm_device *dev = encoder->dev; 671 + struct drm_crtc *crtc = encoder->crtc; 672 + struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); 673 + struct psb_intel_output *psb_intel_output = 674 + enc_to_psb_intel_output(encoder); 675 + struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; 676 + u16 width, height; 677 + u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len; 678 + u16 h_sync_offset, v_sync_offset; 679 + u32 sdvox; 680 + struct psb_intel_sdvo_dtd output_dtd; 681 + int sdvo_pixel_multiply; 682 + 683 + if (!mode) 684 + return; 685 + 686 + psb_intel_sdvo_set_target_output(psb_intel_output, 0); 687 + 688 + width = mode->crtc_hdisplay; 689 + height = mode->crtc_vdisplay; 690 + 691 + /* do some mode translations */ 692 + h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start; 693 + h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; 694 + 695 + v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start; 696 + v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; 697 + 698 + h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; 699 + v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; 700 + 701 + output_dtd.part1.clock = mode->clock / 10; 702 + output_dtd.part1.h_active = width & 0xff; 703 + output_dtd.part1.h_blank = h_blank_len & 0xff; 704 + output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) | 705 + ((h_blank_len >> 8) & 0xf); 706 + output_dtd.part1.v_active = height & 0xff; 707 + output_dtd.part1.v_blank = v_blank_len & 0xff; 708 + output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) | 709 + ((v_blank_len >> 8) & 0xf); 710 + 711 + output_dtd.part2.h_sync_off = h_sync_offset; 712 + output_dtd.part2.h_sync_width = h_sync_len & 0xff; 713 + output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 | 714 + (v_sync_len & 0xf); 715 + output_dtd.part2.sync_off_width_high = 716 + ((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) | 717 + ((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4); 718 + 719 + output_dtd.part2.dtd_flags = 0x18; 720 + if (mode->flags & DRM_MODE_FLAG_PHSYNC) 721 + output_dtd.part2.dtd_flags |= 0x2; 722 + if (mode->flags & DRM_MODE_FLAG_PVSYNC) 723 + output_dtd.part2.dtd_flags |= 0x4; 724 + 725 + output_dtd.part2.sdvo_flags = 0; 726 + output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0; 727 + output_dtd.part2.reserved = 0; 728 + 729 + /* Set the output timing to the screen */ 730 + psb_intel_sdvo_set_target_output(psb_intel_output, 731 + sdvo_priv->active_outputs); 732 + 733 + /* Set the input timing to the screen. Assume always input 0. */ 734 + psb_intel_sdvo_set_target_input(psb_intel_output, true, false); 735 + 736 + psb_intel_sdvo_set_output_timing(psb_intel_output, &output_dtd); 737 + 738 + /* We would like to use i830_sdvo_create_preferred_input_timing() to 739 + * provide the device with a timing it can support, if it supports that 740 + * feature. However, presumably we would need to adjust the CRTC to 741 + * output the preferred timing, and we don't support that currently. 742 + */ 743 + psb_intel_sdvo_set_input_timing(psb_intel_output, &output_dtd); 744 + 745 + switch (psb_intel_sdvo_get_pixel_multiplier(mode)) { 746 + case 1: 747 + psb_intel_sdvo_set_clock_rate_mult(psb_intel_output, 748 + SDVO_CLOCK_RATE_MULT_1X); 749 + break; 750 + case 2: 751 + psb_intel_sdvo_set_clock_rate_mult(psb_intel_output, 752 + SDVO_CLOCK_RATE_MULT_2X); 753 + break; 754 + case 4: 755 + psb_intel_sdvo_set_clock_rate_mult(psb_intel_output, 756 + SDVO_CLOCK_RATE_MULT_4X); 757 + break; 758 + } 759 + 760 + /* Set the SDVO control regs. */ 761 + sdvox = REG_READ(sdvo_priv->output_device); 762 + switch (sdvo_priv->output_device) { 763 + case SDVOB: 764 + sdvox &= SDVOB_PRESERVE_MASK; 765 + break; 766 + case SDVOC: 767 + sdvox &= SDVOC_PRESERVE_MASK; 768 + break; 769 + } 770 + sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; 771 + if (psb_intel_crtc->pipe == 1) 772 + sdvox |= SDVO_PIPE_B_SELECT; 773 + 774 + sdvo_pixel_multiply = psb_intel_sdvo_get_pixel_multiplier(mode); 775 + 776 + psb_intel_sdvo_write_sdvox(psb_intel_output, sdvox); 777 + 778 + psb_intel_sdvo_set_iomap(psb_intel_output); 779 + } 780 + 781 + static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode) 782 + { 783 + struct drm_device *dev = encoder->dev; 784 + struct psb_intel_output *psb_intel_output = 785 + enc_to_psb_intel_output(encoder); 786 + struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; 787 + u32 temp; 788 + 789 + if (mode != DRM_MODE_DPMS_ON) { 790 + psb_intel_sdvo_set_active_outputs(psb_intel_output, 0); 791 + if (0) 792 + psb_intel_sdvo_set_encoder_power_state( 793 + psb_intel_output, 794 + mode); 795 + 796 + if (mode == DRM_MODE_DPMS_OFF) { 797 + temp = REG_READ(sdvo_priv->output_device); 798 + if ((temp & SDVO_ENABLE) != 0) { 799 + psb_intel_sdvo_write_sdvox(psb_intel_output, 800 + temp & 801 + ~SDVO_ENABLE); 802 + } 803 + } 804 + } else { 805 + bool input1, input2; 806 + int i; 807 + u8 status; 808 + 809 + temp = REG_READ(sdvo_priv->output_device); 810 + if ((temp & SDVO_ENABLE) == 0) 811 + psb_intel_sdvo_write_sdvox(psb_intel_output, 812 + temp | SDVO_ENABLE); 813 + for (i = 0; i < 2; i++) 814 + psb_intel_wait_for_vblank(dev); 815 + 816 + status = 817 + psb_intel_sdvo_get_trained_inputs(psb_intel_output, 818 + &input1, 819 + &input2); 820 + 821 + 822 + /* Warn if the device reported failure to sync. 823 + * A lot of SDVO devices fail to notify of sync, but it's 824 + * a given it the status is a success, we succeeded. 825 + */ 826 + if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { 827 + DRM_DEBUG 828 + ("First %s output reported failure to sync\n", 829 + SDVO_NAME(sdvo_priv)); 830 + } 831 + 832 + if (0) 833 + psb_intel_sdvo_set_encoder_power_state( 834 + psb_intel_output, 835 + mode); 836 + psb_intel_sdvo_set_active_outputs(psb_intel_output, 837 + sdvo_priv->active_outputs); 838 + } 839 + return; 840 + } 841 + 842 + static void psb_intel_sdvo_save(struct drm_connector *connector) 843 + { 844 + struct drm_device *dev = connector->dev; 845 + struct psb_intel_output *psb_intel_output = 846 + to_psb_intel_output(connector); 847 + struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; 848 + /*int o;*/ 849 + 850 + sdvo_priv->save_sdvo_mult = 851 + psb_intel_sdvo_get_clock_rate_mult(psb_intel_output); 852 + psb_intel_sdvo_get_active_outputs(psb_intel_output, 853 + &sdvo_priv->save_active_outputs); 854 + 855 + if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { 856 + psb_intel_sdvo_set_target_input(psb_intel_output, 857 + true, 858 + false); 859 + psb_intel_sdvo_get_input_timing(psb_intel_output, 860 + &sdvo_priv->save_input_dtd_1); 861 + } 862 + 863 + if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { 864 + psb_intel_sdvo_set_target_input(psb_intel_output, 865 + false, 866 + true); 867 + psb_intel_sdvo_get_input_timing(psb_intel_output, 868 + &sdvo_priv->save_input_dtd_2); 869 + } 870 + sdvo_priv->save_SDVOX = REG_READ(sdvo_priv->output_device); 871 + 872 + /*TODO: save the in_out_map state*/ 873 + } 874 + 875 + static void psb_intel_sdvo_restore(struct drm_connector *connector) 876 + { 877 + struct drm_device *dev = connector->dev; 878 + struct psb_intel_output *psb_intel_output = 879 + to_psb_intel_output(connector); 880 + struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; 881 + /*int o;*/ 882 + int i; 883 + bool input1, input2; 884 + u8 status; 885 + 886 + psb_intel_sdvo_set_active_outputs(psb_intel_output, 0); 887 + 888 + if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { 889 + psb_intel_sdvo_set_target_input(psb_intel_output, true, false); 890 + psb_intel_sdvo_set_input_timing(psb_intel_output, 891 + &sdvo_priv->save_input_dtd_1); 892 + } 893 + 894 + if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { 895 + psb_intel_sdvo_set_target_input(psb_intel_output, false, true); 896 + psb_intel_sdvo_set_input_timing(psb_intel_output, 897 + &sdvo_priv->save_input_dtd_2); 898 + } 899 + 900 + psb_intel_sdvo_set_clock_rate_mult(psb_intel_output, 901 + sdvo_priv->save_sdvo_mult); 902 + 903 + REG_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX); 904 + 905 + if (sdvo_priv->save_SDVOX & SDVO_ENABLE) { 906 + for (i = 0; i < 2; i++) 907 + psb_intel_wait_for_vblank(dev); 908 + status = 909 + psb_intel_sdvo_get_trained_inputs(psb_intel_output, 910 + &input1, 911 + &input2); 912 + if (status == SDVO_CMD_STATUS_SUCCESS && !input1) 913 + DRM_DEBUG 914 + ("First %s output reported failure to sync\n", 915 + SDVO_NAME(sdvo_priv)); 916 + } 917 + 918 + psb_intel_sdvo_set_active_outputs(psb_intel_output, 919 + sdvo_priv->save_active_outputs); 920 + 921 + /*TODO: restore in_out_map*/ 922 + psb_intel_sdvo_write_cmd(psb_intel_output, 923 + SDVO_CMD_SET_IN_OUT_MAP, 924 + sdvo_priv->in_out_map, 925 + 4); 926 + 927 + psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); 928 + } 929 + 930 + static int psb_intel_sdvo_mode_valid(struct drm_connector *connector, 931 + struct drm_display_mode *mode) 932 + { 933 + struct psb_intel_output *psb_intel_output = 934 + to_psb_intel_output(connector); 935 + struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; 936 + 937 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 938 + return MODE_NO_DBLESCAN; 939 + 940 + if (sdvo_priv->pixel_clock_min > mode->clock) 941 + return MODE_CLOCK_LOW; 942 + 943 + if (sdvo_priv->pixel_clock_max < mode->clock) 944 + return MODE_CLOCK_HIGH; 945 + 946 + return MODE_OK; 947 + } 948 + 949 + static bool psb_intel_sdvo_get_capabilities( 950 + struct psb_intel_output *psb_intel_output, 951 + struct psb_intel_sdvo_caps *caps) 952 + { 953 + u8 status; 954 + 955 + psb_intel_sdvo_write_cmd(psb_intel_output, 956 + SDVO_CMD_GET_DEVICE_CAPS, 957 + NULL, 958 + 0); 959 + status = psb_intel_sdvo_read_response(psb_intel_output, 960 + caps, 961 + sizeof(*caps)); 962 + if (status != SDVO_CMD_STATUS_SUCCESS) 963 + return false; 964 + 965 + return true; 966 + } 967 + 968 + struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, int sdvoB) 969 + { 970 + struct drm_connector *connector = NULL; 971 + struct psb_intel_output *iout = NULL; 972 + struct psb_intel_sdvo_priv *sdvo; 973 + 974 + /* find the sdvo connector */ 975 + list_for_each_entry(connector, &dev->mode_config.connector_list, 976 + head) { 977 + iout = to_psb_intel_output(connector); 978 + 979 + if (iout->type != INTEL_OUTPUT_SDVO) 980 + continue; 981 + 982 + sdvo = iout->dev_priv; 983 + 984 + if (sdvo->output_device == SDVOB && sdvoB) 985 + return connector; 986 + 987 + if (sdvo->output_device == SDVOC && !sdvoB) 988 + return connector; 989 + 990 + } 991 + 992 + return NULL; 993 + } 994 + 995 + int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector) 996 + { 997 + u8 response[2]; 998 + u8 status; 999 + struct psb_intel_output *psb_intel_output; 1000 + DRM_DEBUG("\n"); 1001 + 1002 + if (!connector) 1003 + return 0; 1004 + 1005 + psb_intel_output = to_psb_intel_output(connector); 1006 + 1007 + psb_intel_sdvo_write_cmd(psb_intel_output, 1008 + SDVO_CMD_GET_HOT_PLUG_SUPPORT, 1009 + NULL, 1010 + 0); 1011 + status = psb_intel_sdvo_read_response(psb_intel_output, 1012 + &response, 1013 + 2); 1014 + 1015 + if (response[0] != 0) 1016 + return 1; 1017 + 1018 + return 0; 1019 + } 1020 + 1021 + void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on) 1022 + { 1023 + u8 response[2]; 1024 + u8 status; 1025 + struct psb_intel_output *psb_intel_output = 1026 + to_psb_intel_output(connector); 1027 + 1028 + psb_intel_sdvo_write_cmd(psb_intel_output, 1029 + SDVO_CMD_GET_ACTIVE_HOT_PLUG, 1030 + NULL, 1031 + 0); 1032 + psb_intel_sdvo_read_response(psb_intel_output, &response, 2); 1033 + 1034 + if (on) { 1035 + psb_intel_sdvo_write_cmd(psb_intel_output, 1036 + SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 1037 + 0); 1038 + status = psb_intel_sdvo_read_response(psb_intel_output, 1039 + &response, 1040 + 2); 1041 + 1042 + psb_intel_sdvo_write_cmd(psb_intel_output, 1043 + SDVO_CMD_SET_ACTIVE_HOT_PLUG, 1044 + &response, 2); 1045 + } else { 1046 + response[0] = 0; 1047 + response[1] = 0; 1048 + psb_intel_sdvo_write_cmd(psb_intel_output, 1049 + SDVO_CMD_SET_ACTIVE_HOT_PLUG, 1050 + &response, 2); 1051 + } 1052 + 1053 + psb_intel_sdvo_write_cmd(psb_intel_output, 1054 + SDVO_CMD_GET_ACTIVE_HOT_PLUG, 1055 + NULL, 1056 + 0); 1057 + psb_intel_sdvo_read_response(psb_intel_output, &response, 2); 1058 + } 1059 + 1060 + static enum drm_connector_status psb_intel_sdvo_detect(struct drm_connector 1061 + *connector, bool force) 1062 + { 1063 + u8 response[2]; 1064 + u8 status; 1065 + struct psb_intel_output *psb_intel_output = 1066 + to_psb_intel_output(connector); 1067 + 1068 + psb_intel_sdvo_write_cmd(psb_intel_output, 1069 + SDVO_CMD_GET_ATTACHED_DISPLAYS, 1070 + NULL, 1071 + 0); 1072 + status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2); 1073 + 1074 + DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]); 1075 + if ((response[0] != 0) || (response[1] != 0)) 1076 + return connector_status_connected; 1077 + else 1078 + return connector_status_disconnected; 1079 + } 1080 + 1081 + static int psb_intel_sdvo_get_modes(struct drm_connector *connector) 1082 + { 1083 + struct psb_intel_output *psb_intel_output = 1084 + to_psb_intel_output(connector); 1085 + 1086 + /* set the bus switch and get the modes */ 1087 + psb_intel_sdvo_set_control_bus_switch(psb_intel_output, 1088 + SDVO_CONTROL_BUS_DDC2); 1089 + psb_intel_ddc_get_modes(psb_intel_output); 1090 + 1091 + if (list_empty(&connector->probed_modes)) 1092 + return 0; 1093 + return 1; 1094 + } 1095 + 1096 + static void psb_intel_sdvo_destroy(struct drm_connector *connector) 1097 + { 1098 + struct psb_intel_output *psb_intel_output = 1099 + to_psb_intel_output(connector); 1100 + 1101 + if (psb_intel_output->i2c_bus) 1102 + psb_intel_i2c_destroy(psb_intel_output->i2c_bus); 1103 + drm_sysfs_connector_remove(connector); 1104 + drm_connector_cleanup(connector); 1105 + kfree(psb_intel_output); 1106 + } 1107 + 1108 + static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = { 1109 + .dpms = psb_intel_sdvo_dpms, 1110 + .mode_fixup = psb_intel_sdvo_mode_fixup, 1111 + .prepare = psb_intel_encoder_prepare, 1112 + .mode_set = psb_intel_sdvo_mode_set, 1113 + .commit = psb_intel_encoder_commit, 1114 + }; 1115 + 1116 + static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = { 1117 + .dpms = drm_helper_connector_dpms, 1118 + .save = psb_intel_sdvo_save, 1119 + .restore = psb_intel_sdvo_restore, 1120 + .detect = psb_intel_sdvo_detect, 1121 + .fill_modes = drm_helper_probe_single_connector_modes, 1122 + .destroy = psb_intel_sdvo_destroy, 1123 + }; 1124 + 1125 + static const struct drm_connector_helper_funcs 1126 + psb_intel_sdvo_connector_helper_funcs = { 1127 + .get_modes = psb_intel_sdvo_get_modes, 1128 + .mode_valid = psb_intel_sdvo_mode_valid, 1129 + .best_encoder = psb_intel_best_encoder, 1130 + }; 1131 + 1132 + void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder) 1133 + { 1134 + drm_encoder_cleanup(encoder); 1135 + } 1136 + 1137 + static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = { 1138 + .destroy = psb_intel_sdvo_enc_destroy, 1139 + }; 1140 + 1141 + 1142 + void psb_intel_sdvo_init(struct drm_device *dev, int output_device) 1143 + { 1144 + struct drm_connector *connector; 1145 + struct psb_intel_output *psb_intel_output; 1146 + struct psb_intel_sdvo_priv *sdvo_priv; 1147 + struct psb_intel_i2c_chan *i2cbus = NULL; 1148 + int connector_type; 1149 + u8 ch[0x40]; 1150 + int i; 1151 + int encoder_type, output_id; 1152 + 1153 + psb_intel_output = 1154 + kcalloc(sizeof(struct psb_intel_output) + 1155 + sizeof(struct psb_intel_sdvo_priv), 1, GFP_KERNEL); 1156 + if (!psb_intel_output) 1157 + return; 1158 + 1159 + connector = &psb_intel_output->base; 1160 + 1161 + drm_connector_init(dev, connector, &psb_intel_sdvo_connector_funcs, 1162 + DRM_MODE_CONNECTOR_Unknown); 1163 + drm_connector_helper_add(connector, 1164 + &psb_intel_sdvo_connector_helper_funcs); 1165 + sdvo_priv = (struct psb_intel_sdvo_priv *) (psb_intel_output + 1); 1166 + psb_intel_output->type = INTEL_OUTPUT_SDVO; 1167 + 1168 + connector->interlace_allowed = 0; 1169 + connector->doublescan_allowed = 0; 1170 + 1171 + /* setup the DDC bus. */ 1172 + if (output_device == SDVOB) 1173 + i2cbus = 1174 + psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); 1175 + else 1176 + i2cbus = 1177 + psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); 1178 + 1179 + if (!i2cbus) 1180 + goto err_connector; 1181 + 1182 + sdvo_priv->i2c_bus = i2cbus; 1183 + 1184 + if (output_device == SDVOB) { 1185 + output_id = 1; 1186 + sdvo_priv->by_input_wiring = SDVOB_IN0; 1187 + sdvo_priv->i2c_bus->slave_addr = 0x38; 1188 + } else { 1189 + output_id = 2; 1190 + sdvo_priv->i2c_bus->slave_addr = 0x39; 1191 + } 1192 + 1193 + sdvo_priv->output_device = output_device; 1194 + psb_intel_output->i2c_bus = i2cbus; 1195 + psb_intel_output->dev_priv = sdvo_priv; 1196 + 1197 + 1198 + /* Read the regs to test if we can talk to the device */ 1199 + for (i = 0; i < 0x40; i++) { 1200 + if (!psb_intel_sdvo_read_byte(psb_intel_output, i, &ch[i])) { 1201 + DRM_DEBUG("No SDVO device found on SDVO%c\n", 1202 + output_device == SDVOB ? 'B' : 'C'); 1203 + goto err_i2c; 1204 + } 1205 + } 1206 + 1207 + psb_intel_sdvo_get_capabilities(psb_intel_output, &sdvo_priv->caps); 1208 + 1209 + memset(&sdvo_priv->active_outputs, 0, 1210 + sizeof(sdvo_priv->active_outputs)); 1211 + 1212 + /* TODO, CVBS, SVID, YPRPB & SCART outputs. */ 1213 + if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) { 1214 + sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0; 1215 + sdvo_priv->active_device = SDVO_DEVICE_CRT; 1216 + connector->display_info.subpixel_order = 1217 + SubPixelHorizontalRGB; 1218 + encoder_type = DRM_MODE_ENCODER_DAC; 1219 + connector_type = DRM_MODE_CONNECTOR_VGA; 1220 + } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) { 1221 + sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1; 1222 + sdvo_priv->active_outputs = SDVO_DEVICE_CRT; 1223 + connector->display_info.subpixel_order = 1224 + SubPixelHorizontalRGB; 1225 + encoder_type = DRM_MODE_ENCODER_DAC; 1226 + connector_type = DRM_MODE_CONNECTOR_VGA; 1227 + } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) { 1228 + sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0; 1229 + sdvo_priv->active_device = SDVO_DEVICE_TMDS; 1230 + connector->display_info.subpixel_order = 1231 + SubPixelHorizontalRGB; 1232 + encoder_type = DRM_MODE_ENCODER_TMDS; 1233 + connector_type = DRM_MODE_CONNECTOR_DVID; 1234 + } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) { 1235 + sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1; 1236 + sdvo_priv->active_device = SDVO_DEVICE_TMDS; 1237 + connector->display_info.subpixel_order = 1238 + SubPixelHorizontalRGB; 1239 + encoder_type = DRM_MODE_ENCODER_TMDS; 1240 + connector_type = DRM_MODE_CONNECTOR_DVID; 1241 + } else { 1242 + unsigned char bytes[2]; 1243 + 1244 + memcpy(bytes, &sdvo_priv->caps.output_flags, 2); 1245 + DRM_DEBUG 1246 + ("%s: No active RGB or TMDS outputs (0x%02x%02x)\n", 1247 + SDVO_NAME(sdvo_priv), bytes[0], bytes[1]); 1248 + goto err_i2c; 1249 + } 1250 + 1251 + drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_sdvo_enc_funcs, 1252 + encoder_type); 1253 + drm_encoder_helper_add(&psb_intel_output->enc, 1254 + &psb_intel_sdvo_helper_funcs); 1255 + connector->connector_type = connector_type; 1256 + 1257 + drm_mode_connector_attach_encoder(&psb_intel_output->base, 1258 + &psb_intel_output->enc); 1259 + drm_sysfs_connector_add(connector); 1260 + 1261 + /* Set the input timing to the screen. Assume always input 0. */ 1262 + psb_intel_sdvo_set_target_input(psb_intel_output, true, false); 1263 + 1264 + psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_output, 1265 + &sdvo_priv->pixel_clock_min, 1266 + &sdvo_priv-> 1267 + pixel_clock_max); 1268 + 1269 + 1270 + DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, " 1271 + "clock range %dMHz - %dMHz, " 1272 + "input 1: %c, input 2: %c, " 1273 + "output 1: %c, output 2: %c\n", 1274 + SDVO_NAME(sdvo_priv), 1275 + sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id, 1276 + sdvo_priv->caps.device_rev_id, 1277 + sdvo_priv->pixel_clock_min / 1000, 1278 + sdvo_priv->pixel_clock_max / 1000, 1279 + (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', 1280 + (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', 1281 + /* check currently supported outputs */ 1282 + sdvo_priv->caps.output_flags & 1283 + (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N', 1284 + sdvo_priv->caps.output_flags & 1285 + (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); 1286 + 1287 + psb_intel_output->ddc_bus = i2cbus; 1288 + 1289 + return; 1290 + 1291 + err_i2c: 1292 + psb_intel_i2c_destroy(psb_intel_output->i2c_bus); 1293 + err_connector: 1294 + drm_connector_cleanup(connector); 1295 + kfree(psb_intel_output); 1296 + 1297 + return; 1298 + }
+338
drivers/staging/gma500/psb_intel_sdvo_regs.h
··· 1 + /* 2 + * SDVO command definitions and structures. 3 + * 4 + * Copyright (c) 2008, Intel Corporation 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License along with 16 + * this program; if not, write to the Free Software Foundation, Inc., 17 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 + * 19 + * Authors: 20 + * Eric Anholt <eric@anholt.net> 21 + */ 22 + 23 + #define SDVO_OUTPUT_FIRST (0) 24 + #define SDVO_OUTPUT_TMDS0 (1 << 0) 25 + #define SDVO_OUTPUT_RGB0 (1 << 1) 26 + #define SDVO_OUTPUT_CVBS0 (1 << 2) 27 + #define SDVO_OUTPUT_SVID0 (1 << 3) 28 + #define SDVO_OUTPUT_YPRPB0 (1 << 4) 29 + #define SDVO_OUTPUT_SCART0 (1 << 5) 30 + #define SDVO_OUTPUT_LVDS0 (1 << 6) 31 + #define SDVO_OUTPUT_TMDS1 (1 << 8) 32 + #define SDVO_OUTPUT_RGB1 (1 << 9) 33 + #define SDVO_OUTPUT_CVBS1 (1 << 10) 34 + #define SDVO_OUTPUT_SVID1 (1 << 11) 35 + #define SDVO_OUTPUT_YPRPB1 (1 << 12) 36 + #define SDVO_OUTPUT_SCART1 (1 << 13) 37 + #define SDVO_OUTPUT_LVDS1 (1 << 14) 38 + #define SDVO_OUTPUT_LAST (14) 39 + 40 + struct psb_intel_sdvo_caps { 41 + u8 vendor_id; 42 + u8 device_id; 43 + u8 device_rev_id; 44 + u8 sdvo_version_major; 45 + u8 sdvo_version_minor; 46 + unsigned int sdvo_inputs_mask:2; 47 + unsigned int smooth_scaling:1; 48 + unsigned int sharp_scaling:1; 49 + unsigned int up_scaling:1; 50 + unsigned int down_scaling:1; 51 + unsigned int stall_support:1; 52 + unsigned int pad:1; 53 + u16 output_flags; 54 + } __attribute__ ((packed)); 55 + 56 + /** This matches the EDID DTD structure, more or less */ 57 + struct psb_intel_sdvo_dtd { 58 + struct { 59 + u16 clock; /**< pixel clock, in 10kHz units */ 60 + u8 h_active; /**< lower 8 bits (pixels) */ 61 + u8 h_blank; /**< lower 8 bits (pixels) */ 62 + u8 h_high; /**< upper 4 bits each h_active, h_blank */ 63 + u8 v_active; /**< lower 8 bits (lines) */ 64 + u8 v_blank; /**< lower 8 bits (lines) */ 65 + u8 v_high; /**< upper 4 bits each v_active, v_blank */ 66 + } part1; 67 + 68 + struct { 69 + u8 h_sync_off; 70 + /**< lower 8 bits, from hblank start */ 71 + u8 h_sync_width;/**< lower 8 bits (pixels) */ 72 + /** lower 4 bits each vsync offset, vsync width */ 73 + u8 v_sync_off_width; 74 + /** 75 + * 2 high bits of hsync offset, 2 high bits of hsync width, 76 + * bits 4-5 of vsync offset, and 2 high bits of vsync width. 77 + */ 78 + u8 sync_off_width_high; 79 + u8 dtd_flags; 80 + u8 sdvo_flags; 81 + /** bits 6-7 of vsync offset at bits 6-7 */ 82 + u8 v_sync_off_high; 83 + u8 reserved; 84 + } part2; 85 + } __attribute__ ((packed)); 86 + 87 + struct psb_intel_sdvo_pixel_clock_range { 88 + u16 min; /**< pixel clock, in 10kHz units */ 89 + u16 max; /**< pixel clock, in 10kHz units */ 90 + } __attribute__ ((packed)); 91 + 92 + struct psb_intel_sdvo_preferred_input_timing_args { 93 + u16 clock; 94 + u16 width; 95 + u16 height; 96 + } __attribute__ ((packed)); 97 + 98 + /* I2C registers for SDVO */ 99 + #define SDVO_I2C_ARG_0 0x07 100 + #define SDVO_I2C_ARG_1 0x06 101 + #define SDVO_I2C_ARG_2 0x05 102 + #define SDVO_I2C_ARG_3 0x04 103 + #define SDVO_I2C_ARG_4 0x03 104 + #define SDVO_I2C_ARG_5 0x02 105 + #define SDVO_I2C_ARG_6 0x01 106 + #define SDVO_I2C_ARG_7 0x00 107 + #define SDVO_I2C_OPCODE 0x08 108 + #define SDVO_I2C_CMD_STATUS 0x09 109 + #define SDVO_I2C_RETURN_0 0x0a 110 + #define SDVO_I2C_RETURN_1 0x0b 111 + #define SDVO_I2C_RETURN_2 0x0c 112 + #define SDVO_I2C_RETURN_3 0x0d 113 + #define SDVO_I2C_RETURN_4 0x0e 114 + #define SDVO_I2C_RETURN_5 0x0f 115 + #define SDVO_I2C_RETURN_6 0x10 116 + #define SDVO_I2C_RETURN_7 0x11 117 + #define SDVO_I2C_VENDOR_BEGIN 0x20 118 + 119 + /* Status results */ 120 + #define SDVO_CMD_STATUS_POWER_ON 0x0 121 + #define SDVO_CMD_STATUS_SUCCESS 0x1 122 + #define SDVO_CMD_STATUS_NOTSUPP 0x2 123 + #define SDVO_CMD_STATUS_INVALID_ARG 0x3 124 + #define SDVO_CMD_STATUS_PENDING 0x4 125 + #define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5 126 + #define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6 127 + 128 + /* SDVO commands, argument/result registers */ 129 + 130 + #define SDVO_CMD_RESET 0x01 131 + 132 + /** Returns a struct psb_intel_sdvo_caps */ 133 + #define SDVO_CMD_GET_DEVICE_CAPS 0x02 134 + 135 + #define SDVO_CMD_GET_FIRMWARE_REV 0x86 136 + # define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0 137 + # define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1 138 + # define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2 139 + 140 + /** 141 + * Reports which inputs are trained (managed to sync). 142 + * 143 + * Devices must have trained within 2 vsyncs of a mode change. 144 + */ 145 + #define SDVO_CMD_GET_TRAINED_INPUTS 0x03 146 + struct psb_intel_sdvo_get_trained_inputs_response { 147 + unsigned int input0_trained:1; 148 + unsigned int input1_trained:1; 149 + unsigned int pad:6; 150 + } __attribute__ ((packed)); 151 + 152 + /** Returns a struct psb_intel_sdvo_output_flags of active outputs. */ 153 + #define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04 154 + 155 + /** 156 + * Sets the current set of active outputs. 157 + * 158 + * Takes a struct psb_intel_sdvo_output_flags. 159 + * Must be preceded by a SET_IN_OUT_MAP 160 + * on multi-output devices. 161 + */ 162 + #define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05 163 + 164 + /** 165 + * Returns the current mapping of SDVO inputs to outputs on the device. 166 + * 167 + * Returns two struct psb_intel_sdvo_output_flags structures. 168 + */ 169 + #define SDVO_CMD_GET_IN_OUT_MAP 0x06 170 + 171 + /** 172 + * Sets the current mapping of SDVO inputs to outputs on the device. 173 + * 174 + * Takes two struct i380_sdvo_output_flags structures. 175 + */ 176 + #define SDVO_CMD_SET_IN_OUT_MAP 0x07 177 + 178 + /** 179 + * Returns a struct psb_intel_sdvo_output_flags of attached displays. 180 + */ 181 + #define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b 182 + 183 + /** 184 + * Returns a struct psb_intel_sdvo_ouptut_flags of displays supporting hot plugging. 185 + */ 186 + #define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c 187 + 188 + /** 189 + * Takes a struct psb_intel_sdvo_output_flags. 190 + */ 191 + #define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d 192 + 193 + /** 194 + * Returns a struct psb_intel_sdvo_output_flags of displays with hot plug 195 + * interrupts enabled. 196 + */ 197 + #define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e 198 + 199 + #define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f 200 + struct psb_intel_sdvo_get_interrupt_event_source_response { 201 + u16 interrupt_status; 202 + unsigned int ambient_light_interrupt:1; 203 + unsigned int pad:7; 204 + } __attribute__ ((packed)); 205 + 206 + /** 207 + * Selects which input is affected by future input commands. 208 + * 209 + * Commands affected include SET_INPUT_TIMINGS_PART[12], 210 + * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12], 211 + * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS. 212 + */ 213 + #define SDVO_CMD_SET_TARGET_INPUT 0x10 214 + struct psb_intel_sdvo_set_target_input_args { 215 + unsigned int target_1:1; 216 + unsigned int pad:7; 217 + } __attribute__ ((packed)); 218 + 219 + /** 220 + * Takes a struct psb_intel_sdvo_output_flags of which outputs are targetted by 221 + * future output commands. 222 + * 223 + * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12], 224 + * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE. 225 + */ 226 + #define SDVO_CMD_SET_TARGET_OUTPUT 0x11 227 + 228 + #define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12 229 + #define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13 230 + #define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14 231 + #define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15 232 + #define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16 233 + #define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17 234 + #define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18 235 + #define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19 236 + /* Part 1 */ 237 + # define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0 238 + # define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1 239 + # define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2 240 + # define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3 241 + # define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4 242 + # define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5 243 + # define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6 244 + # define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7 245 + /* Part 2 */ 246 + # define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0 247 + # define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1 248 + # define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2 249 + # define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3 250 + # define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4 251 + # define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7) 252 + # define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5) 253 + # define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3) 254 + # define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1) 255 + # define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5 256 + # define SDVO_DTD_SDVO_FLAG_STALL (1 << 7) 257 + # define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6) 258 + # define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6) 259 + # define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4) 260 + # define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4) 261 + # define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4) 262 + # define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4) 263 + # define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6 264 + 265 + /** 266 + * Generates a DTD based on the given width, height, and flags. 267 + * 268 + * This will be supported by any device supporting scaling or interlaced 269 + * modes. 270 + */ 271 + #define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a 272 + # define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0 273 + # define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1 274 + # define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2 275 + # define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3 276 + # define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4 277 + # define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5 278 + # define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6 279 + # define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0) 280 + # define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1) 281 + 282 + #define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b 283 + #define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c 284 + 285 + /** Returns a struct psb_intel_sdvo_pixel_clock_range */ 286 + #define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d 287 + /** Returns a struct psb_intel_sdvo_pixel_clock_range */ 288 + #define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e 289 + 290 + /** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */ 291 + #define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f 292 + 293 + /** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ 294 + #define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20 295 + /** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ 296 + #define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21 297 + # define SDVO_CLOCK_RATE_MULT_1X (1 << 0) 298 + # define SDVO_CLOCK_RATE_MULT_2X (1 << 1) 299 + # define SDVO_CLOCK_RATE_MULT_4X (1 << 3) 300 + 301 + #define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27 302 + 303 + #define SDVO_CMD_GET_TV_FORMAT 0x28 304 + 305 + #define SDVO_CMD_SET_TV_FORMAT 0x29 306 + 307 + #define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a 308 + #define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b 309 + #define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c 310 + # define SDVO_ENCODER_STATE_ON (1 << 0) 311 + # define SDVO_ENCODER_STATE_STANDBY (1 << 1) 312 + # define SDVO_ENCODER_STATE_SUSPEND (1 << 2) 313 + # define SDVO_ENCODER_STATE_OFF (1 << 3) 314 + 315 + #define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93 316 + 317 + #define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a 318 + # define SDVO_CONTROL_BUS_PROM 0x0 319 + # define SDVO_CONTROL_BUS_DDC1 0x1 320 + # define SDVO_CONTROL_BUS_DDC2 0x2 321 + # define SDVO_CONTROL_BUS_DDC3 0x3 322 + 323 + /* SDVO Bus & SDVO Inputs wiring details*/ 324 + /* Bit 0: Is SDVOB connected to In0 (1 = yes, 0 = no*/ 325 + /* Bit 1: Is SDVOB connected to In1 (1 = yes, 0 = no*/ 326 + /* Bit 2: Is SDVOC connected to In0 (1 = yes, 0 = no*/ 327 + /* Bit 3: Is SDVOC connected to In1 (1 = yes, 0 = no*/ 328 + #define SDVOB_IN0 0x01 329 + #define SDVOB_IN1 0x02 330 + #define SDVOC_IN0 0x04 331 + #define SDVOC_IN1 0x08 332 + 333 + #define SDVO_DEVICE_NONE 0x00 334 + #define SDVO_DEVICE_CRT 0x01 335 + #define SDVO_DEVICE_TV 0x02 336 + #define SDVO_DEVICE_LVDS 0x04 337 + #define SDVO_DEVICE_TMDS 0x08 338 +
+637
drivers/staging/gma500/psb_irq.c
··· 1 + /************************************************************************** 2 + * Copyright (c) 2007, Intel Corporation. 3 + * All Rights Reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms and conditions of the GNU General Public License, 7 + * version 2, as published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope it will be useful, but WITHOUT 10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 + * more details. 13 + * 14 + * You should have received a copy of the GNU General Public License along with 15 + * this program; if not, write to the Free Software Foundation, Inc., 16 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 + * 18 + * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to 19 + * develop this driver. 20 + * 21 + **************************************************************************/ 22 + /* 23 + */ 24 + 25 + #include <drm/drmP.h> 26 + #include "psb_drv.h" 27 + #include "psb_reg.h" 28 + #include "psb_intel_reg.h" 29 + #include "psb_powermgmt.h" 30 + 31 + 32 + /* 33 + * inline functions 34 + */ 35 + 36 + static inline u32 37 + psb_pipestat(int pipe) 38 + { 39 + if (pipe == 0) 40 + return PIPEASTAT; 41 + if (pipe == 1) 42 + return PIPEBSTAT; 43 + if (pipe == 2) 44 + return PIPECSTAT; 45 + BUG(); 46 + } 47 + 48 + static inline u32 49 + mid_pipe_event(int pipe) 50 + { 51 + if (pipe == 0) 52 + return _PSB_PIPEA_EVENT_FLAG; 53 + if (pipe == 1) 54 + return _MDFLD_PIPEB_EVENT_FLAG; 55 + if (pipe == 2) 56 + return _MDFLD_PIPEC_EVENT_FLAG; 57 + BUG(); 58 + } 59 + 60 + static inline u32 61 + mid_pipe_vsync(int pipe) 62 + { 63 + if (pipe == 0) 64 + return _PSB_VSYNC_PIPEA_FLAG; 65 + if (pipe == 1) 66 + return _PSB_VSYNC_PIPEB_FLAG; 67 + if (pipe == 2) 68 + return _MDFLD_PIPEC_VBLANK_FLAG; 69 + BUG(); 70 + } 71 + 72 + static inline u32 73 + mid_pipeconf(int pipe) 74 + { 75 + if (pipe == 0) 76 + return PIPEACONF; 77 + if (pipe == 1) 78 + return PIPEBCONF; 79 + if (pipe == 2) 80 + return PIPECCONF; 81 + BUG(); 82 + } 83 + 84 + void 85 + psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask) 86 + { 87 + if ((dev_priv->pipestat[pipe] & mask) != mask) { 88 + u32 reg = psb_pipestat(pipe); 89 + dev_priv->pipestat[pipe] |= mask; 90 + /* Enable the interrupt, clear any pending status */ 91 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, 92 + OSPM_UHB_ONLY_IF_ON)) { 93 + u32 writeVal = PSB_RVDC32(reg); 94 + writeVal |= (mask | (mask >> 16)); 95 + PSB_WVDC32(writeVal, reg); 96 + (void) PSB_RVDC32(reg); 97 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 98 + } 99 + } 100 + } 101 + 102 + void 103 + psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask) 104 + { 105 + if ((dev_priv->pipestat[pipe] & mask) != 0) { 106 + u32 reg = psb_pipestat(pipe); 107 + dev_priv->pipestat[pipe] &= ~mask; 108 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, 109 + OSPM_UHB_ONLY_IF_ON)) { 110 + u32 writeVal = PSB_RVDC32(reg); 111 + writeVal &= ~mask; 112 + PSB_WVDC32(writeVal, reg); 113 + (void) PSB_RVDC32(reg); 114 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 115 + } 116 + } 117 + } 118 + 119 + void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe) 120 + { 121 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, 122 + OSPM_UHB_ONLY_IF_ON)) { 123 + u32 pipe_event = mid_pipe_event(pipe); 124 + dev_priv->vdc_irq_mask |= pipe_event; 125 + PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R); 126 + PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); 127 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 128 + } 129 + } 130 + 131 + void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe) 132 + { 133 + if (dev_priv->pipestat[pipe] == 0) { 134 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, 135 + OSPM_UHB_ONLY_IF_ON)) { 136 + u32 pipe_event = mid_pipe_event(pipe); 137 + dev_priv->vdc_irq_mask &= ~pipe_event; 138 + PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R); 139 + PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); 140 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 141 + } 142 + } 143 + } 144 + 145 + /** 146 + * Display controller interrupt handler for vsync/vblank. 147 + * 148 + */ 149 + static void mid_vblank_handler(struct drm_device *dev, uint32_t pipe) 150 + { 151 + drm_handle_vblank(dev, pipe); 152 + } 153 + 154 + 155 + /** 156 + * Display controller interrupt handler for pipe event. 157 + * 158 + */ 159 + #define WAIT_STATUS_CLEAR_LOOP_COUNT 0xffff 160 + static void mid_pipe_event_handler(struct drm_device *dev, uint32_t pipe) 161 + { 162 + struct drm_psb_private *dev_priv = 163 + (struct drm_psb_private *) dev->dev_private; 164 + 165 + uint32_t pipe_stat_val = 0; 166 + uint32_t pipe_stat_reg = psb_pipestat(pipe); 167 + uint32_t pipe_enable = dev_priv->pipestat[pipe]; 168 + uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16; 169 + uint32_t i = 0; 170 + 171 + spin_lock(&dev_priv->irqmask_lock); 172 + 173 + pipe_stat_val = PSB_RVDC32(pipe_stat_reg); 174 + pipe_stat_val &= pipe_enable | pipe_status; 175 + pipe_stat_val &= pipe_stat_val >> 16; 176 + 177 + spin_unlock(&dev_priv->irqmask_lock); 178 + 179 + /* clear the 2nd level interrupt status bits */ 180 + /** 181 + * FIXME: shouldn't use while loop here. However, the interrupt 182 + * status 'sticky' bits cannot be cleared by setting '1' to that 183 + * bit once... 184 + */ 185 + for (i = 0; i < WAIT_STATUS_CLEAR_LOOP_COUNT; i++) { 186 + PSB_WVDC32(PSB_RVDC32(pipe_stat_reg), pipe_stat_reg); 187 + (void) PSB_RVDC32(pipe_stat_reg); 188 + 189 + if ((PSB_RVDC32(pipe_stat_reg) & pipe_status) == 0) 190 + break; 191 + } 192 + 193 + if (i == WAIT_STATUS_CLEAR_LOOP_COUNT) 194 + DRM_ERROR("%s, can't clear the status bits in pipe_stat_reg, its value = 0x%x.\n", 195 + __func__, PSB_RVDC32(pipe_stat_reg)); 196 + 197 + if (pipe_stat_val & PIPE_VBLANK_STATUS) 198 + mid_vblank_handler(dev, pipe); 199 + 200 + if (pipe_stat_val & PIPE_TE_STATUS) 201 + drm_handle_vblank(dev, pipe); 202 + } 203 + 204 + /* 205 + * Display controller interrupt handler. 206 + */ 207 + static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat) 208 + { 209 + if (vdc_stat & _PSB_PIPEA_EVENT_FLAG) 210 + mid_pipe_event_handler(dev, 0); 211 + } 212 + 213 + irqreturn_t psb_irq_handler(DRM_IRQ_ARGS) 214 + { 215 + struct drm_device *dev = (struct drm_device *) arg; 216 + struct drm_psb_private *dev_priv = 217 + (struct drm_psb_private *) dev->dev_private; 218 + 219 + uint32_t vdc_stat, dsp_int = 0, sgx_int = 0; 220 + int handled = 0; 221 + 222 + spin_lock(&dev_priv->irqmask_lock); 223 + 224 + vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R); 225 + 226 + if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG) { 227 + PSB_DEBUG_IRQ("Got DISP interrupt\n"); 228 + dsp_int = 1; 229 + } 230 + 231 + if (vdc_stat & _PSB_IRQ_SGX_FLAG) { 232 + PSB_DEBUG_IRQ("Got SGX interrupt\n"); 233 + sgx_int = 1; 234 + } 235 + if (vdc_stat & _PSB_IRQ_MSVDX_FLAG) 236 + PSB_DEBUG_IRQ("Got MSVDX interrupt\n"); 237 + 238 + if (vdc_stat & _LNC_IRQ_TOPAZ_FLAG) 239 + PSB_DEBUG_IRQ("Got TOPAZ interrupt\n"); 240 + 241 + 242 + vdc_stat &= dev_priv->vdc_irq_mask; 243 + spin_unlock(&dev_priv->irqmask_lock); 244 + 245 + if (dsp_int && ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) { 246 + psb_vdc_interrupt(dev, vdc_stat); 247 + handled = 1; 248 + } 249 + 250 + if (sgx_int) { 251 + /* Not expected - we have it masked, shut it up */ 252 + u32 s, s2; 253 + s = PSB_RSGX32(PSB_CR_EVENT_STATUS); 254 + s2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2); 255 + PSB_WSGX32(s, PSB_CR_EVENT_HOST_CLEAR); 256 + PSB_WSGX32(s2, PSB_CR_EVENT_HOST_CLEAR2); 257 + /* if s & _PSB_CE_TWOD_COMPLETE we have 2D done but 258 + we may as well poll even if we add that ! */ 259 + } 260 + 261 + PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R); 262 + (void) PSB_RVDC32(PSB_INT_IDENTITY_R); 263 + DRM_READMEMORYBARRIER(); 264 + 265 + if (!handled) 266 + return IRQ_NONE; 267 + 268 + return IRQ_HANDLED; 269 + } 270 + 271 + void psb_irq_preinstall(struct drm_device *dev) 272 + { 273 + psb_irq_preinstall_islands(dev, OSPM_ALL_ISLANDS); 274 + } 275 + 276 + /** 277 + * FIXME: should I remove display irq enable here?? 278 + */ 279 + void psb_irq_preinstall_islands(struct drm_device *dev, int hw_islands) 280 + { 281 + struct drm_psb_private *dev_priv = 282 + (struct drm_psb_private *) dev->dev_private; 283 + unsigned long irqflags; 284 + 285 + PSB_DEBUG_ENTRY("\n"); 286 + 287 + spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); 288 + 289 + if (hw_islands & OSPM_DISPLAY_ISLAND) { 290 + if (ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) { 291 + PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 292 + if (dev->vblank_enabled[0]) 293 + dev_priv->vdc_irq_mask |= 294 + _PSB_PIPEA_EVENT_FLAG; 295 + if (dev->vblank_enabled[1]) 296 + dev_priv->vdc_irq_mask |= 297 + _MDFLD_PIPEB_EVENT_FLAG; 298 + if (dev->vblank_enabled[2]) 299 + dev_priv->vdc_irq_mask |= 300 + _MDFLD_PIPEC_EVENT_FLAG; 301 + } 302 + } 303 + if (hw_islands & OSPM_GRAPHICS_ISLAND) 304 + dev_priv->vdc_irq_mask |= _PSB_IRQ_SGX_FLAG; 305 + 306 + /*This register is safe even if display island is off*/ 307 + PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R); 308 + 309 + spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 310 + } 311 + 312 + int psb_irq_postinstall(struct drm_device *dev) 313 + { 314 + return psb_irq_postinstall_islands(dev, OSPM_ALL_ISLANDS); 315 + } 316 + 317 + int psb_irq_postinstall_islands(struct drm_device *dev, int hw_islands) 318 + { 319 + 320 + struct drm_psb_private *dev_priv = 321 + (struct drm_psb_private *) dev->dev_private; 322 + unsigned long irqflags; 323 + 324 + PSB_DEBUG_ENTRY("\n"); 325 + 326 + spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); 327 + 328 + /*This register is safe even if display island is off*/ 329 + PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); 330 + 331 + if (hw_islands & OSPM_DISPLAY_ISLAND) { 332 + if (true/*powermgmt_is_hw_on(dev->pdev, PSB_DISPLAY_ISLAND)*/) { 333 + PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 334 + 335 + if (dev->vblank_enabled[0]) 336 + psb_enable_pipestat(dev_priv, 0, 337 + PIPE_VBLANK_INTERRUPT_ENABLE); 338 + else 339 + psb_disable_pipestat(dev_priv, 0, 340 + PIPE_VBLANK_INTERRUPT_ENABLE); 341 + 342 + if (dev->vblank_enabled[1]) 343 + psb_enable_pipestat(dev_priv, 1, 344 + PIPE_VBLANK_INTERRUPT_ENABLE); 345 + else 346 + psb_disable_pipestat(dev_priv, 1, 347 + PIPE_VBLANK_INTERRUPT_ENABLE); 348 + 349 + if (dev->vblank_enabled[2]) 350 + psb_enable_pipestat(dev_priv, 2, 351 + PIPE_VBLANK_INTERRUPT_ENABLE); 352 + else 353 + psb_disable_pipestat(dev_priv, 2, 354 + PIPE_VBLANK_INTERRUPT_ENABLE); 355 + } 356 + } 357 + 358 + spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 359 + 360 + return 0; 361 + } 362 + 363 + void psb_irq_uninstall(struct drm_device *dev) 364 + { 365 + psb_irq_uninstall_islands(dev, OSPM_ALL_ISLANDS); 366 + } 367 + 368 + void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands) 369 + { 370 + struct drm_psb_private *dev_priv = 371 + (struct drm_psb_private *) dev->dev_private; 372 + unsigned long irqflags; 373 + 374 + PSB_DEBUG_ENTRY("\n"); 375 + 376 + spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); 377 + 378 + if (hw_islands & OSPM_DISPLAY_ISLAND) { 379 + if (true/*powermgmt_is_hw_on(dev->pdev, PSB_DISPLAY_ISLAND)*/) { 380 + PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 381 + 382 + if (dev->vblank_enabled[0]) 383 + psb_disable_pipestat(dev_priv, 0, 384 + PIPE_VBLANK_INTERRUPT_ENABLE); 385 + 386 + if (dev->vblank_enabled[1]) 387 + psb_disable_pipestat(dev_priv, 1, 388 + PIPE_VBLANK_INTERRUPT_ENABLE); 389 + 390 + if (dev->vblank_enabled[2]) 391 + psb_disable_pipestat(dev_priv, 2, 392 + PIPE_VBLANK_INTERRUPT_ENABLE); 393 + } 394 + dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG | 395 + _PSB_IRQ_MSVDX_FLAG | 396 + _LNC_IRQ_TOPAZ_FLAG; 397 + } 398 + /*TODO: remove following code*/ 399 + if (hw_islands & OSPM_GRAPHICS_ISLAND) 400 + dev_priv->vdc_irq_mask &= ~_PSB_IRQ_SGX_FLAG; 401 + 402 + /*These two registers are safe even if display island is off*/ 403 + PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R); 404 + PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); 405 + 406 + wmb(); 407 + 408 + /*This register is safe even if display island is off*/ 409 + PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R); 410 + 411 + spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 412 + } 413 + 414 + void psb_irq_turn_on_dpst(struct drm_device *dev) 415 + { 416 + struct drm_psb_private *dev_priv = 417 + (struct drm_psb_private *) dev->dev_private; 418 + u32 hist_reg; 419 + u32 pwm_reg; 420 + 421 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, 422 + OSPM_UHB_ONLY_IF_ON)) { 423 + PSB_WVDC32(BIT31, HISTOGRAM_LOGIC_CONTROL); 424 + hist_reg = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL); 425 + PSB_WVDC32(BIT31, HISTOGRAM_INT_CONTROL); 426 + hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL); 427 + 428 + PSB_WVDC32(0x80010100, PWM_CONTROL_LOGIC); 429 + pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC); 430 + PSB_WVDC32(pwm_reg | PWM_PHASEIN_ENABLE 431 + | PWM_PHASEIN_INT_ENABLE, 432 + PWM_CONTROL_LOGIC); 433 + pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC); 434 + 435 + psb_enable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE); 436 + 437 + hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL); 438 + PSB_WVDC32(hist_reg | HISTOGRAM_INT_CTRL_CLEAR, 439 + HISTOGRAM_INT_CONTROL); 440 + pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC); 441 + PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE, 442 + PWM_CONTROL_LOGIC); 443 + 444 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 445 + } 446 + } 447 + 448 + int psb_irq_enable_dpst(struct drm_device *dev) 449 + { 450 + struct drm_psb_private *dev_priv = 451 + (struct drm_psb_private *) dev->dev_private; 452 + unsigned long irqflags; 453 + 454 + PSB_DEBUG_ENTRY("\n"); 455 + 456 + spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); 457 + 458 + /* enable DPST */ 459 + mid_enable_pipe_event(dev_priv, 0); 460 + psb_irq_turn_on_dpst(dev); 461 + 462 + spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 463 + return 0; 464 + } 465 + 466 + void psb_irq_turn_off_dpst(struct drm_device *dev) 467 + { 468 + struct drm_psb_private *dev_priv = 469 + (struct drm_psb_private *) dev->dev_private; 470 + u32 hist_reg; 471 + u32 pwm_reg; 472 + 473 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, 474 + OSPM_UHB_ONLY_IF_ON)) { 475 + PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL); 476 + hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL); 477 + 478 + psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE); 479 + 480 + pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC); 481 + PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE), 482 + PWM_CONTROL_LOGIC); 483 + pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC); 484 + 485 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 486 + } 487 + } 488 + 489 + int psb_irq_disable_dpst(struct drm_device *dev) 490 + { 491 + struct drm_psb_private *dev_priv = 492 + (struct drm_psb_private *) dev->dev_private; 493 + unsigned long irqflags; 494 + 495 + PSB_DEBUG_ENTRY("\n"); 496 + 497 + spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); 498 + 499 + mid_disable_pipe_event(dev_priv, 0); 500 + psb_irq_turn_off_dpst(dev); 501 + 502 + spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 503 + 504 + return 0; 505 + } 506 + 507 + #ifdef PSB_FIXME 508 + static int psb_vblank_do_wait(struct drm_device *dev, 509 + unsigned int *sequence, atomic_t *counter) 510 + { 511 + unsigned int cur_vblank; 512 + int ret = 0; 513 + DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 514 + (((cur_vblank = atomic_read(counter)) 515 + - *sequence) <= (1 << 23))); 516 + *sequence = cur_vblank; 517 + 518 + return ret; 519 + } 520 + #endif 521 + 522 + /* 523 + * It is used to enable VBLANK interrupt 524 + */ 525 + int psb_enable_vblank(struct drm_device *dev, int pipe) 526 + { 527 + struct drm_psb_private *dev_priv = 528 + (struct drm_psb_private *) dev->dev_private; 529 + unsigned long irqflags; 530 + uint32_t reg_val = 0; 531 + uint32_t pipeconf_reg = mid_pipeconf(pipe); 532 + 533 + PSB_DEBUG_ENTRY("\n"); 534 + 535 + if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, 536 + OSPM_UHB_ONLY_IF_ON)) { 537 + reg_val = REG_READ(pipeconf_reg); 538 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 539 + } 540 + 541 + if (!(reg_val & PIPEACONF_ENABLE)) 542 + return -EINVAL; 543 + 544 + spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); 545 + 546 + drm_psb_disable_vsync = 0; 547 + mid_enable_pipe_event(dev_priv, pipe); 548 + psb_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE); 549 + 550 + spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 551 + 552 + return 0; 553 + } 554 + 555 + /* 556 + * It is used to disable VBLANK interrupt 557 + */ 558 + void psb_disable_vblank(struct drm_device *dev, int pipe) 559 + { 560 + struct drm_psb_private *dev_priv = 561 + (struct drm_psb_private *) dev->dev_private; 562 + unsigned long irqflags; 563 + 564 + PSB_DEBUG_ENTRY("\n"); 565 + 566 + spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); 567 + 568 + drm_psb_disable_vsync = 1; 569 + mid_disable_pipe_event(dev_priv, pipe); 570 + psb_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE); 571 + 572 + spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); 573 + } 574 + 575 + /* Called from drm generic code, passed a 'crtc', which 576 + * we use as a pipe index 577 + */ 578 + u32 psb_get_vblank_counter(struct drm_device *dev, int pipe) 579 + { 580 + uint32_t high_frame = PIPEAFRAMEHIGH; 581 + uint32_t low_frame = PIPEAFRAMEPIXEL; 582 + uint32_t pipeconf_reg = PIPEACONF; 583 + uint32_t reg_val = 0; 584 + uint32_t high1 = 0, high2 = 0, low = 0, count = 0; 585 + 586 + switch (pipe) { 587 + case 0: 588 + break; 589 + case 1: 590 + high_frame = PIPEBFRAMEHIGH; 591 + low_frame = PIPEBFRAMEPIXEL; 592 + pipeconf_reg = PIPEBCONF; 593 + break; 594 + case 2: 595 + high_frame = PIPECFRAMEHIGH; 596 + low_frame = PIPECFRAMEPIXEL; 597 + pipeconf_reg = PIPECCONF; 598 + break; 599 + default: 600 + DRM_ERROR("%s, invalded pipe.\n", __func__); 601 + return 0; 602 + } 603 + 604 + if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, false)) 605 + return 0; 606 + 607 + reg_val = REG_READ(pipeconf_reg); 608 + 609 + if (!(reg_val & PIPEACONF_ENABLE)) { 610 + DRM_ERROR("trying to get vblank count for disabled pipe %d\n", 611 + pipe); 612 + goto psb_get_vblank_counter_exit; 613 + } 614 + 615 + /* 616 + * High & low register fields aren't synchronized, so make sure 617 + * we get a low value that's stable across two reads of the high 618 + * register. 619 + */ 620 + do { 621 + high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> 622 + PIPE_FRAME_HIGH_SHIFT); 623 + low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >> 624 + PIPE_FRAME_LOW_SHIFT); 625 + high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> 626 + PIPE_FRAME_HIGH_SHIFT); 627 + } while (high1 != high2); 628 + 629 + count = (high1 << 8) | low; 630 + 631 + psb_get_vblank_counter_exit: 632 + 633 + ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); 634 + 635 + return count; 636 + } 637 +
+49
drivers/staging/gma500/psb_irq.h
··· 1 + /************************************************************************** 2 + * Copyright (c) 2009, Intel Corporation. 3 + * All Rights Reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms and conditions of the GNU General Public License, 7 + * version 2, as published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope it will be useful, but WITHOUT 10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 + * more details. 13 + * 14 + * You should have received a copy of the GNU General Public License along with 15 + * this program; if not, write to the Free Software Foundation, Inc., 16 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 + * 18 + * Authors: 19 + * Benjamin Defnet <benjamin.r.defnet@intel.com> 20 + * Rajesh Poornachandran <rajesh.poornachandran@intel.com> 21 + * 22 + **************************************************************************/ 23 + 24 + #ifndef _SYSIRQ_H_ 25 + #define _SYSIRQ_H_ 26 + 27 + #include <drm/drmP.h> 28 + 29 + bool sysirq_init(struct drm_device *dev); 30 + void sysirq_uninit(struct drm_device *dev); 31 + 32 + void psb_irq_preinstall(struct drm_device *dev); 33 + int psb_irq_postinstall(struct drm_device *dev); 34 + void psb_irq_uninstall(struct drm_device *dev); 35 + irqreturn_t psb_irq_handler(DRM_IRQ_ARGS); 36 + 37 + void psb_irq_preinstall_islands(struct drm_device *dev, int hw_islands); 38 + int psb_irq_postinstall_islands(struct drm_device *dev, int hw_islands); 39 + void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands); 40 + 41 + int psb_irq_enable_dpst(struct drm_device *dev); 42 + int psb_irq_disable_dpst(struct drm_device *dev); 43 + void psb_irq_turn_on_dpst(struct drm_device *dev); 44 + void psb_irq_turn_off_dpst(struct drm_device *dev); 45 + int psb_enable_vblank(struct drm_device *dev, int pipe); 46 + void psb_disable_vblank(struct drm_device *dev, int pipe); 47 + u32 psb_get_vblank_counter(struct drm_device *dev, int pipe); 48 + 49 + #endif //_SYSIRQ_H_
+919
drivers/staging/gma500/psb_mmu.c
··· 1 + /************************************************************************** 2 + * Copyright (c) 2007, Intel Corporation. 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms and conditions of the GNU General Public License, 6 + * version 2, as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope it will be useful, but WITHOUT 9 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 + * more details. 12 + * 13 + * You should have received a copy of the GNU General Public License along with 14 + * this program; if not, write to the Free Software Foundation, Inc., 15 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 + * 17 + **************************************************************************/ 18 + #include <drm/drmP.h> 19 + #include "psb_drv.h" 20 + #include "psb_reg.h" 21 + 22 + /* 23 + * Code for the SGX MMU: 24 + */ 25 + 26 + /* 27 + * clflush on one processor only: 28 + * clflush should apparently flush the cache line on all processors in an 29 + * SMP system. 30 + */ 31 + 32 + /* 33 + * kmap atomic: 34 + * The usage of the slots must be completely encapsulated within a spinlock, and 35 + * no other functions that may be using the locks for other purposed may be 36 + * called from within the locked region. 37 + * Since the slots are per processor, this will guarantee that we are the only 38 + * user. 39 + */ 40 + 41 + /* 42 + * TODO: Inserting ptes from an interrupt handler: 43 + * This may be desirable for some SGX functionality where the GPU can fault in 44 + * needed pages. For that, we need to make an atomic insert_pages function, that 45 + * may fail. 46 + * If it fails, the caller need to insert the page using a workqueue function, 47 + * but on average it should be fast. 48 + */ 49 + 50 + struct psb_mmu_driver { 51 + /* protects driver- and pd structures. Always take in read mode 52 + * before taking the page table spinlock. 53 + */ 54 + struct rw_semaphore sem; 55 + 56 + /* protects page tables, directory tables and pt tables. 57 + * and pt structures. 58 + */ 59 + spinlock_t lock; 60 + 61 + atomic_t needs_tlbflush; 62 + 63 + uint8_t __iomem *register_map; 64 + struct psb_mmu_pd *default_pd; 65 + /*uint32_t bif_ctrl;*/ 66 + int has_clflush; 67 + int clflush_add; 68 + unsigned long clflush_mask; 69 + 70 + struct drm_psb_private *dev_priv; 71 + }; 72 + 73 + struct psb_mmu_pd; 74 + 75 + struct psb_mmu_pt { 76 + struct psb_mmu_pd *pd; 77 + uint32_t index; 78 + uint32_t count; 79 + struct page *p; 80 + uint32_t *v; 81 + }; 82 + 83 + struct psb_mmu_pd { 84 + struct psb_mmu_driver *driver; 85 + int hw_context; 86 + struct psb_mmu_pt **tables; 87 + struct page *p; 88 + struct page *dummy_pt; 89 + struct page *dummy_page; 90 + uint32_t pd_mask; 91 + uint32_t invalid_pde; 92 + uint32_t invalid_pte; 93 + }; 94 + 95 + static inline uint32_t psb_mmu_pt_index(uint32_t offset) 96 + { 97 + return (offset >> PSB_PTE_SHIFT) & 0x3FF; 98 + } 99 + 100 + static inline uint32_t psb_mmu_pd_index(uint32_t offset) 101 + { 102 + return offset >> PSB_PDE_SHIFT; 103 + } 104 + 105 + static inline void psb_clflush(void *addr) 106 + { 107 + __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory"); 108 + } 109 + 110 + static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, 111 + void *addr) 112 + { 113 + if (!driver->has_clflush) 114 + return; 115 + 116 + mb(); 117 + psb_clflush(addr); 118 + mb(); 119 + } 120 + 121 + static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page) 122 + { 123 + uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT; 124 + uint32_t clflush_count = PAGE_SIZE / clflush_add; 125 + int i; 126 + uint8_t *clf; 127 + 128 + clf = kmap_atomic(page, KM_USER0); 129 + mb(); 130 + for (i = 0; i < clflush_count; ++i) { 131 + psb_clflush(clf); 132 + clf += clflush_add; 133 + } 134 + mb(); 135 + kunmap_atomic(clf, KM_USER0); 136 + } 137 + 138 + static void psb_pages_clflush(struct psb_mmu_driver *driver, 139 + struct page *page[], unsigned long num_pages) 140 + { 141 + int i; 142 + 143 + if (!driver->has_clflush) 144 + return ; 145 + 146 + for (i = 0; i < num_pages; i++) 147 + psb_page_clflush(driver, *page++); 148 + } 149 + 150 + static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, 151 + int force) 152 + { 153 + atomic_set(&driver->needs_tlbflush, 0); 154 + } 155 + 156 + static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force) 157 + { 158 + down_write(&driver->sem); 159 + psb_mmu_flush_pd_locked(driver, force); 160 + up_write(&driver->sem); 161 + } 162 + 163 + void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot) 164 + { 165 + if (rc_prot) 166 + down_write(&driver->sem); 167 + if (rc_prot) 168 + up_write(&driver->sem); 169 + } 170 + 171 + void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context) 172 + { 173 + /*ttm_tt_cache_flush(&pd->p, 1);*/ 174 + psb_pages_clflush(pd->driver, &pd->p, 1); 175 + down_write(&pd->driver->sem); 176 + wmb(); 177 + psb_mmu_flush_pd_locked(pd->driver, 1); 178 + pd->hw_context = hw_context; 179 + up_write(&pd->driver->sem); 180 + 181 + } 182 + 183 + static inline unsigned long psb_pd_addr_end(unsigned long addr, 184 + unsigned long end) 185 + { 186 + 187 + addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK; 188 + return (addr < end) ? addr : end; 189 + } 190 + 191 + static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type) 192 + { 193 + uint32_t mask = PSB_PTE_VALID; 194 + 195 + if (type & PSB_MMU_CACHED_MEMORY) 196 + mask |= PSB_PTE_CACHED; 197 + if (type & PSB_MMU_RO_MEMORY) 198 + mask |= PSB_PTE_RO; 199 + if (type & PSB_MMU_WO_MEMORY) 200 + mask |= PSB_PTE_WO; 201 + 202 + return (pfn << PAGE_SHIFT) | mask; 203 + } 204 + 205 + struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver, 206 + int trap_pagefaults, int invalid_type) 207 + { 208 + struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL); 209 + uint32_t *v; 210 + int i; 211 + 212 + if (!pd) 213 + return NULL; 214 + 215 + pd->p = alloc_page(GFP_DMA32); 216 + if (!pd->p) 217 + goto out_err1; 218 + pd->dummy_pt = alloc_page(GFP_DMA32); 219 + if (!pd->dummy_pt) 220 + goto out_err2; 221 + pd->dummy_page = alloc_page(GFP_DMA32); 222 + if (!pd->dummy_page) 223 + goto out_err3; 224 + 225 + if (!trap_pagefaults) { 226 + pd->invalid_pde = 227 + psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt), 228 + invalid_type); 229 + pd->invalid_pte = 230 + psb_mmu_mask_pte(page_to_pfn(pd->dummy_page), 231 + invalid_type); 232 + } else { 233 + pd->invalid_pde = 0; 234 + pd->invalid_pte = 0; 235 + } 236 + 237 + v = kmap(pd->dummy_pt); 238 + for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) 239 + v[i] = pd->invalid_pte; 240 + 241 + kunmap(pd->dummy_pt); 242 + 243 + v = kmap(pd->p); 244 + for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) 245 + v[i] = pd->invalid_pde; 246 + 247 + kunmap(pd->p); 248 + 249 + clear_page(kmap(pd->dummy_page)); 250 + kunmap(pd->dummy_page); 251 + 252 + pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024); 253 + if (!pd->tables) 254 + goto out_err4; 255 + 256 + pd->hw_context = -1; 257 + pd->pd_mask = PSB_PTE_VALID; 258 + pd->driver = driver; 259 + 260 + return pd; 261 + 262 + out_err4: 263 + __free_page(pd->dummy_page); 264 + out_err3: 265 + __free_page(pd->dummy_pt); 266 + out_err2: 267 + __free_page(pd->p); 268 + out_err1: 269 + kfree(pd); 270 + return NULL; 271 + } 272 + 273 + void psb_mmu_free_pt(struct psb_mmu_pt *pt) 274 + { 275 + __free_page(pt->p); 276 + kfree(pt); 277 + } 278 + 279 + void psb_mmu_free_pagedir(struct psb_mmu_pd *pd) 280 + { 281 + struct psb_mmu_driver *driver = pd->driver; 282 + struct psb_mmu_pt *pt; 283 + int i; 284 + 285 + down_write(&driver->sem); 286 + if (pd->hw_context != -1) 287 + psb_mmu_flush_pd_locked(driver, 1); 288 + 289 + /* Should take the spinlock here, but we don't need to do that 290 + since we have the semaphore in write mode. */ 291 + 292 + for (i = 0; i < 1024; ++i) { 293 + pt = pd->tables[i]; 294 + if (pt) 295 + psb_mmu_free_pt(pt); 296 + } 297 + 298 + vfree(pd->tables); 299 + __free_page(pd->dummy_page); 300 + __free_page(pd->dummy_pt); 301 + __free_page(pd->p); 302 + kfree(pd); 303 + up_write(&driver->sem); 304 + } 305 + 306 + static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd) 307 + { 308 + struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL); 309 + void *v; 310 + uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT; 311 + uint32_t clflush_count = PAGE_SIZE / clflush_add; 312 + spinlock_t *lock = &pd->driver->lock; 313 + uint8_t *clf; 314 + uint32_t *ptes; 315 + int i; 316 + 317 + if (!pt) 318 + return NULL; 319 + 320 + pt->p = alloc_page(GFP_DMA32); 321 + if (!pt->p) { 322 + kfree(pt); 323 + return NULL; 324 + } 325 + 326 + spin_lock(lock); 327 + 328 + v = kmap_atomic(pt->p, KM_USER0); 329 + clf = (uint8_t *) v; 330 + ptes = (uint32_t *) v; 331 + for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) 332 + *ptes++ = pd->invalid_pte; 333 + 334 + 335 + if (pd->driver->has_clflush && pd->hw_context != -1) { 336 + mb(); 337 + for (i = 0; i < clflush_count; ++i) { 338 + psb_clflush(clf); 339 + clf += clflush_add; 340 + } 341 + mb(); 342 + } 343 + 344 + kunmap_atomic(v, KM_USER0); 345 + spin_unlock(lock); 346 + 347 + pt->count = 0; 348 + pt->pd = pd; 349 + pt->index = 0; 350 + 351 + return pt; 352 + } 353 + 354 + struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd, 355 + unsigned long addr) 356 + { 357 + uint32_t index = psb_mmu_pd_index(addr); 358 + struct psb_mmu_pt *pt; 359 + uint32_t *v; 360 + spinlock_t *lock = &pd->driver->lock; 361 + 362 + spin_lock(lock); 363 + pt = pd->tables[index]; 364 + while (!pt) { 365 + spin_unlock(lock); 366 + pt = psb_mmu_alloc_pt(pd); 367 + if (!pt) 368 + return NULL; 369 + spin_lock(lock); 370 + 371 + if (pd->tables[index]) { 372 + spin_unlock(lock); 373 + psb_mmu_free_pt(pt); 374 + spin_lock(lock); 375 + pt = pd->tables[index]; 376 + continue; 377 + } 378 + 379 + v = kmap_atomic(pd->p, KM_USER0); 380 + pd->tables[index] = pt; 381 + v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask; 382 + pt->index = index; 383 + kunmap_atomic((void *) v, KM_USER0); 384 + 385 + if (pd->hw_context != -1) { 386 + psb_mmu_clflush(pd->driver, (void *) &v[index]); 387 + atomic_set(&pd->driver->needs_tlbflush, 1); 388 + } 389 + } 390 + pt->v = kmap_atomic(pt->p, KM_USER0); 391 + return pt; 392 + } 393 + 394 + static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd, 395 + unsigned long addr) 396 + { 397 + uint32_t index = psb_mmu_pd_index(addr); 398 + struct psb_mmu_pt *pt; 399 + spinlock_t *lock = &pd->driver->lock; 400 + 401 + spin_lock(lock); 402 + pt = pd->tables[index]; 403 + if (!pt) { 404 + spin_unlock(lock); 405 + return NULL; 406 + } 407 + pt->v = kmap_atomic(pt->p, KM_USER0); 408 + return pt; 409 + } 410 + 411 + static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt) 412 + { 413 + struct psb_mmu_pd *pd = pt->pd; 414 + uint32_t *v; 415 + 416 + kunmap_atomic(pt->v, KM_USER0); 417 + if (pt->count == 0) { 418 + v = kmap_atomic(pd->p, KM_USER0); 419 + v[pt->index] = pd->invalid_pde; 420 + pd->tables[pt->index] = NULL; 421 + 422 + if (pd->hw_context != -1) { 423 + psb_mmu_clflush(pd->driver, 424 + (void *) &v[pt->index]); 425 + atomic_set(&pd->driver->needs_tlbflush, 1); 426 + } 427 + kunmap_atomic(pt->v, KM_USER0); 428 + spin_unlock(&pd->driver->lock); 429 + psb_mmu_free_pt(pt); 430 + return; 431 + } 432 + spin_unlock(&pd->driver->lock); 433 + } 434 + 435 + static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, 436 + unsigned long addr, uint32_t pte) 437 + { 438 + pt->v[psb_mmu_pt_index(addr)] = pte; 439 + } 440 + 441 + static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt, 442 + unsigned long addr) 443 + { 444 + pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte; 445 + } 446 + 447 + #if 0 448 + static uint32_t psb_mmu_check_pte_locked(struct psb_mmu_pd *pd, 449 + uint32_t mmu_offset) 450 + { 451 + uint32_t *v; 452 + uint32_t pfn; 453 + 454 + v = kmap_atomic(pd->p, KM_USER0); 455 + if (!v) { 456 + printk(KERN_INFO "Could not kmap pde page.\n"); 457 + return 0; 458 + } 459 + pfn = v[psb_mmu_pd_index(mmu_offset)]; 460 + /* printk(KERN_INFO "pde is 0x%08x\n",pfn); */ 461 + kunmap_atomic(v, KM_USER0); 462 + if (((pfn & 0x0F) != PSB_PTE_VALID)) { 463 + printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n", 464 + mmu_offset, pfn); 465 + } 466 + v = ioremap(pfn & 0xFFFFF000, 4096); 467 + if (!v) { 468 + printk(KERN_INFO "Could not kmap pte page.\n"); 469 + return 0; 470 + } 471 + pfn = v[psb_mmu_pt_index(mmu_offset)]; 472 + /* printk(KERN_INFO "pte is 0x%08x\n",pfn); */ 473 + iounmap(v); 474 + if (((pfn & 0x0F) != PSB_PTE_VALID)) { 475 + printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n", 476 + mmu_offset, pfn); 477 + } 478 + return pfn >> PAGE_SHIFT; 479 + } 480 + 481 + static void psb_mmu_check_mirrored_gtt(struct psb_mmu_pd *pd, 482 + uint32_t mmu_offset, 483 + uint32_t gtt_pages) 484 + { 485 + uint32_t start; 486 + uint32_t next; 487 + 488 + printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n", 489 + mmu_offset, gtt_pages); 490 + down_read(&pd->driver->sem); 491 + start = psb_mmu_check_pte_locked(pd, mmu_offset); 492 + mmu_offset += PAGE_SIZE; 493 + gtt_pages -= 1; 494 + while (gtt_pages--) { 495 + next = psb_mmu_check_pte_locked(pd, mmu_offset); 496 + if (next != start + 1) { 497 + printk(KERN_INFO 498 + "Ptes out of order: 0x%08x, 0x%08x.\n", 499 + start, next); 500 + } 501 + start = next; 502 + mmu_offset += PAGE_SIZE; 503 + } 504 + up_read(&pd->driver->sem); 505 + } 506 + 507 + #endif 508 + 509 + void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, 510 + uint32_t mmu_offset, uint32_t gtt_start, 511 + uint32_t gtt_pages) 512 + { 513 + uint32_t *v; 514 + uint32_t start = psb_mmu_pd_index(mmu_offset); 515 + struct psb_mmu_driver *driver = pd->driver; 516 + int num_pages = gtt_pages; 517 + 518 + down_read(&driver->sem); 519 + spin_lock(&driver->lock); 520 + 521 + v = kmap_atomic(pd->p, KM_USER0); 522 + v += start; 523 + 524 + while (gtt_pages--) { 525 + *v++ = gtt_start | pd->pd_mask; 526 + gtt_start += PAGE_SIZE; 527 + } 528 + 529 + /*ttm_tt_cache_flush(&pd->p, num_pages);*/ 530 + psb_pages_clflush(pd->driver, &pd->p, num_pages); 531 + kunmap_atomic(v, KM_USER0); 532 + spin_unlock(&driver->lock); 533 + 534 + if (pd->hw_context != -1) 535 + atomic_set(&pd->driver->needs_tlbflush, 1); 536 + 537 + up_read(&pd->driver->sem); 538 + psb_mmu_flush_pd(pd->driver, 0); 539 + } 540 + 541 + struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver) 542 + { 543 + struct psb_mmu_pd *pd; 544 + 545 + /* down_read(&driver->sem); */ 546 + pd = driver->default_pd; 547 + /* up_read(&driver->sem); */ 548 + 549 + return pd; 550 + } 551 + 552 + /* Returns the physical address of the PD shared by sgx/msvdx */ 553 + uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver) 554 + { 555 + struct psb_mmu_pd *pd; 556 + 557 + pd = psb_mmu_get_default_pd(driver); 558 + return page_to_pfn(pd->p) << PAGE_SHIFT; 559 + } 560 + 561 + void psb_mmu_driver_takedown(struct psb_mmu_driver *driver) 562 + { 563 + psb_mmu_free_pagedir(driver->default_pd); 564 + kfree(driver); 565 + } 566 + 567 + struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers, 568 + int trap_pagefaults, 569 + int invalid_type, 570 + struct drm_psb_private *dev_priv) 571 + { 572 + struct psb_mmu_driver *driver; 573 + 574 + driver = kmalloc(sizeof(*driver), GFP_KERNEL); 575 + 576 + if (!driver) 577 + return NULL; 578 + driver->dev_priv = dev_priv; 579 + 580 + driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults, 581 + invalid_type); 582 + if (!driver->default_pd) 583 + goto out_err1; 584 + 585 + spin_lock_init(&driver->lock); 586 + init_rwsem(&driver->sem); 587 + down_write(&driver->sem); 588 + driver->register_map = registers; 589 + atomic_set(&driver->needs_tlbflush, 1); 590 + 591 + driver->has_clflush = 0; 592 + 593 + if (boot_cpu_has(X86_FEATURE_CLFLSH)) { 594 + uint32_t tfms, misc, cap0, cap4, clflush_size; 595 + 596 + /* 597 + * clflush size is determined at kernel setup for x86_64 598 + * but not for i386. We have to do it here. 599 + */ 600 + 601 + cpuid(0x00000001, &tfms, &misc, &cap0, &cap4); 602 + clflush_size = ((misc >> 8) & 0xff) * 8; 603 + driver->has_clflush = 1; 604 + driver->clflush_add = 605 + PAGE_SIZE * clflush_size / sizeof(uint32_t); 606 + driver->clflush_mask = driver->clflush_add - 1; 607 + driver->clflush_mask = ~driver->clflush_mask; 608 + } 609 + 610 + up_write(&driver->sem); 611 + return driver; 612 + 613 + out_err1: 614 + kfree(driver); 615 + return NULL; 616 + } 617 + 618 + static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, 619 + unsigned long address, uint32_t num_pages, 620 + uint32_t desired_tile_stride, 621 + uint32_t hw_tile_stride) 622 + { 623 + struct psb_mmu_pt *pt; 624 + uint32_t rows = 1; 625 + uint32_t i; 626 + unsigned long addr; 627 + unsigned long end; 628 + unsigned long next; 629 + unsigned long add; 630 + unsigned long row_add; 631 + unsigned long clflush_add = pd->driver->clflush_add; 632 + unsigned long clflush_mask = pd->driver->clflush_mask; 633 + 634 + if (!pd->driver->has_clflush) { 635 + /*ttm_tt_cache_flush(&pd->p, num_pages);*/ 636 + psb_pages_clflush(pd->driver, &pd->p, num_pages); 637 + return; 638 + } 639 + 640 + if (hw_tile_stride) 641 + rows = num_pages / desired_tile_stride; 642 + else 643 + desired_tile_stride = num_pages; 644 + 645 + add = desired_tile_stride << PAGE_SHIFT; 646 + row_add = hw_tile_stride << PAGE_SHIFT; 647 + mb(); 648 + for (i = 0; i < rows; ++i) { 649 + 650 + addr = address; 651 + end = addr + add; 652 + 653 + do { 654 + next = psb_pd_addr_end(addr, end); 655 + pt = psb_mmu_pt_map_lock(pd, addr); 656 + if (!pt) 657 + continue; 658 + do { 659 + psb_clflush(&pt->v 660 + [psb_mmu_pt_index(addr)]); 661 + } while (addr += 662 + clflush_add, 663 + (addr & clflush_mask) < next); 664 + 665 + psb_mmu_pt_unmap_unlock(pt); 666 + } while (addr = next, next != end); 667 + address += row_add; 668 + } 669 + mb(); 670 + } 671 + 672 + void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, 673 + unsigned long address, uint32_t num_pages) 674 + { 675 + struct psb_mmu_pt *pt; 676 + unsigned long addr; 677 + unsigned long end; 678 + unsigned long next; 679 + unsigned long f_address = address; 680 + 681 + down_read(&pd->driver->sem); 682 + 683 + addr = address; 684 + end = addr + (num_pages << PAGE_SHIFT); 685 + 686 + do { 687 + next = psb_pd_addr_end(addr, end); 688 + pt = psb_mmu_pt_alloc_map_lock(pd, addr); 689 + if (!pt) 690 + goto out; 691 + do { 692 + psb_mmu_invalidate_pte(pt, addr); 693 + --pt->count; 694 + } while (addr += PAGE_SIZE, addr < next); 695 + psb_mmu_pt_unmap_unlock(pt); 696 + 697 + } while (addr = next, next != end); 698 + 699 + out: 700 + if (pd->hw_context != -1) 701 + psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); 702 + 703 + up_read(&pd->driver->sem); 704 + 705 + if (pd->hw_context != -1) 706 + psb_mmu_flush(pd->driver, 0); 707 + 708 + return; 709 + } 710 + 711 + void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address, 712 + uint32_t num_pages, uint32_t desired_tile_stride, 713 + uint32_t hw_tile_stride) 714 + { 715 + struct psb_mmu_pt *pt; 716 + uint32_t rows = 1; 717 + uint32_t i; 718 + unsigned long addr; 719 + unsigned long end; 720 + unsigned long next; 721 + unsigned long add; 722 + unsigned long row_add; 723 + unsigned long f_address = address; 724 + 725 + if (hw_tile_stride) 726 + rows = num_pages / desired_tile_stride; 727 + else 728 + desired_tile_stride = num_pages; 729 + 730 + add = desired_tile_stride << PAGE_SHIFT; 731 + row_add = hw_tile_stride << PAGE_SHIFT; 732 + 733 + /* down_read(&pd->driver->sem); */ 734 + 735 + /* Make sure we only need to flush this processor's cache */ 736 + 737 + for (i = 0; i < rows; ++i) { 738 + 739 + addr = address; 740 + end = addr + add; 741 + 742 + do { 743 + next = psb_pd_addr_end(addr, end); 744 + pt = psb_mmu_pt_map_lock(pd, addr); 745 + if (!pt) 746 + continue; 747 + do { 748 + psb_mmu_invalidate_pte(pt, addr); 749 + --pt->count; 750 + 751 + } while (addr += PAGE_SIZE, addr < next); 752 + psb_mmu_pt_unmap_unlock(pt); 753 + 754 + } while (addr = next, next != end); 755 + address += row_add; 756 + } 757 + if (pd->hw_context != -1) 758 + psb_mmu_flush_ptes(pd, f_address, num_pages, 759 + desired_tile_stride, hw_tile_stride); 760 + 761 + /* up_read(&pd->driver->sem); */ 762 + 763 + if (pd->hw_context != -1) 764 + psb_mmu_flush(pd->driver, 0); 765 + } 766 + 767 + int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, 768 + unsigned long address, uint32_t num_pages, 769 + int type) 770 + { 771 + struct psb_mmu_pt *pt; 772 + uint32_t pte; 773 + unsigned long addr; 774 + unsigned long end; 775 + unsigned long next; 776 + unsigned long f_address = address; 777 + int ret = 0; 778 + 779 + down_read(&pd->driver->sem); 780 + 781 + addr = address; 782 + end = addr + (num_pages << PAGE_SHIFT); 783 + 784 + do { 785 + next = psb_pd_addr_end(addr, end); 786 + pt = psb_mmu_pt_alloc_map_lock(pd, addr); 787 + if (!pt) { 788 + ret = -ENOMEM; 789 + goto out; 790 + } 791 + do { 792 + pte = psb_mmu_mask_pte(start_pfn++, type); 793 + psb_mmu_set_pte(pt, addr, pte); 794 + pt->count++; 795 + } while (addr += PAGE_SIZE, addr < next); 796 + psb_mmu_pt_unmap_unlock(pt); 797 + 798 + } while (addr = next, next != end); 799 + 800 + out: 801 + if (pd->hw_context != -1) 802 + psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); 803 + 804 + up_read(&pd->driver->sem); 805 + 806 + if (pd->hw_context != -1) 807 + psb_mmu_flush(pd->driver, 1); 808 + 809 + return ret; 810 + } 811 + 812 + int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, 813 + unsigned long address, uint32_t num_pages, 814 + uint32_t desired_tile_stride, 815 + uint32_t hw_tile_stride, int type) 816 + { 817 + struct psb_mmu_pt *pt; 818 + uint32_t rows = 1; 819 + uint32_t i; 820 + uint32_t pte; 821 + unsigned long addr; 822 + unsigned long end; 823 + unsigned long next; 824 + unsigned long add; 825 + unsigned long row_add; 826 + unsigned long f_address = address; 827 + int ret = 0; 828 + 829 + if (hw_tile_stride) { 830 + if (num_pages % desired_tile_stride != 0) 831 + return -EINVAL; 832 + rows = num_pages / desired_tile_stride; 833 + } else { 834 + desired_tile_stride = num_pages; 835 + } 836 + 837 + add = desired_tile_stride << PAGE_SHIFT; 838 + row_add = hw_tile_stride << PAGE_SHIFT; 839 + 840 + down_read(&pd->driver->sem); 841 + 842 + for (i = 0; i < rows; ++i) { 843 + 844 + addr = address; 845 + end = addr + add; 846 + 847 + do { 848 + next = psb_pd_addr_end(addr, end); 849 + pt = psb_mmu_pt_alloc_map_lock(pd, addr); 850 + if (!pt) { 851 + ret = -ENOMEM; 852 + goto out; 853 + } 854 + do { 855 + pte = 856 + psb_mmu_mask_pte(page_to_pfn(*pages++), 857 + type); 858 + psb_mmu_set_pte(pt, addr, pte); 859 + pt->count++; 860 + } while (addr += PAGE_SIZE, addr < next); 861 + psb_mmu_pt_unmap_unlock(pt); 862 + 863 + } while (addr = next, next != end); 864 + 865 + address += row_add; 866 + } 867 + out: 868 + if (pd->hw_context != -1) 869 + psb_mmu_flush_ptes(pd, f_address, num_pages, 870 + desired_tile_stride, hw_tile_stride); 871 + 872 + up_read(&pd->driver->sem); 873 + 874 + if (pd->hw_context != -1) 875 + psb_mmu_flush(pd->driver, 1); 876 + 877 + return ret; 878 + } 879 + 880 + int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual, 881 + unsigned long *pfn) 882 + { 883 + int ret; 884 + struct psb_mmu_pt *pt; 885 + uint32_t tmp; 886 + spinlock_t *lock = &pd->driver->lock; 887 + 888 + down_read(&pd->driver->sem); 889 + pt = psb_mmu_pt_map_lock(pd, virtual); 890 + if (!pt) { 891 + uint32_t *v; 892 + 893 + spin_lock(lock); 894 + v = kmap_atomic(pd->p, KM_USER0); 895 + tmp = v[psb_mmu_pd_index(virtual)]; 896 + kunmap_atomic(v, KM_USER0); 897 + spin_unlock(lock); 898 + 899 + if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) || 900 + !(pd->invalid_pte & PSB_PTE_VALID)) { 901 + ret = -EINVAL; 902 + goto out; 903 + } 904 + ret = 0; 905 + *pfn = pd->invalid_pte >> PAGE_SHIFT; 906 + goto out; 907 + } 908 + tmp = pt->v[psb_mmu_pt_index(virtual)]; 909 + if (!(tmp & PSB_PTE_VALID)) { 910 + ret = -EINVAL; 911 + } else { 912 + ret = 0; 913 + *pfn = tmp >> PAGE_SHIFT; 914 + } 915 + psb_mmu_pt_unmap_unlock(pt); 916 + out: 917 + up_read(&pd->driver->sem); 918 + return ret; 919 + }
+797
drivers/staging/gma500/psb_powermgmt.c
··· 1 + /************************************************************************** 2 + * Copyright (c) 2009, Intel Corporation. 3 + * All Rights Reserved. 4 + 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice (including the next 13 + * paragraph) shall be included in all copies or substantial portions of the 14 + * Software. 15 + * 16 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 + * SOFTWARE. 23 + * 24 + * Authors: 25 + * Benjamin Defnet <benjamin.r.defnet@intel.com> 26 + * Rajesh Poornachandran <rajesh.poornachandran@intel.com> 27 + * 28 + */ 29 + #include "psb_powermgmt.h" 30 + #include "psb_drv.h" 31 + #include "psb_intel_reg.h" 32 + #include <linux/mutex.h> 33 + #include <linux/pm_runtime.h> 34 + 35 + #undef OSPM_GFX_DPK 36 + 37 + extern u32 gui32SGXDeviceID; 38 + extern u32 gui32MRSTDisplayDeviceID; 39 + extern u32 gui32MRSTMSVDXDeviceID; 40 + extern u32 gui32MRSTTOPAZDeviceID; 41 + 42 + struct drm_device *gpDrmDevice = NULL; 43 + static struct mutex power_mutex; 44 + static bool gbSuspendInProgress = false; 45 + static bool gbResumeInProgress = false; 46 + static int g_hw_power_status_mask; 47 + static atomic_t g_display_access_count; 48 + static atomic_t g_graphics_access_count; 49 + static atomic_t g_videoenc_access_count; 50 + static atomic_t g_videodec_access_count; 51 + int allow_runtime_pm = 0; 52 + 53 + void ospm_power_island_up(int hw_islands); 54 + void ospm_power_island_down(int hw_islands); 55 + static bool gbSuspended = false; 56 + bool gbgfxsuspended = false; 57 + 58 + /* 59 + * ospm_power_init 60 + * 61 + * Description: Initialize this ospm power management module 62 + */ 63 + void ospm_power_init(struct drm_device *dev) 64 + { 65 + struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private; 66 + 67 + gpDrmDevice = dev; 68 + 69 + dev_priv->apm_base = dev_priv->apm_reg & 0xffff; 70 + dev_priv->ospm_base &= 0xffff; 71 + 72 + mutex_init(&power_mutex); 73 + g_hw_power_status_mask = OSPM_ALL_ISLANDS; 74 + atomic_set(&g_display_access_count, 0); 75 + atomic_set(&g_graphics_access_count, 0); 76 + atomic_set(&g_videoenc_access_count, 0); 77 + atomic_set(&g_videodec_access_count, 0); 78 + } 79 + 80 + /* 81 + * ospm_power_uninit 82 + * 83 + * Description: Uninitialize this ospm power management module 84 + */ 85 + void ospm_power_uninit(void) 86 + { 87 + mutex_destroy(&power_mutex); 88 + pm_runtime_disable(&gpDrmDevice->pdev->dev); 89 + pm_runtime_set_suspended(&gpDrmDevice->pdev->dev); 90 + } 91 + 92 + 93 + /* 94 + * save_display_registers 95 + * 96 + * Description: We are going to suspend so save current display 97 + * register state. 98 + */ 99 + static int save_display_registers(struct drm_device *dev) 100 + { 101 + struct drm_psb_private *dev_priv = dev->dev_private; 102 + struct drm_crtc * crtc; 103 + struct drm_connector * connector; 104 + 105 + /* Display arbitration control + watermarks */ 106 + dev_priv->saveDSPARB = PSB_RVDC32(DSPARB); 107 + dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1); 108 + dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2); 109 + dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3); 110 + dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4); 111 + dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5); 112 + dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6); 113 + dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT); 114 + 115 + /*save crtc and output state*/ 116 + mutex_lock(&dev->mode_config.mutex); 117 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 118 + if(drm_helper_crtc_in_use(crtc)) { 119 + crtc->funcs->save(crtc); 120 + } 121 + } 122 + 123 + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 124 + connector->funcs->save(connector); 125 + } 126 + mutex_unlock(&dev->mode_config.mutex); 127 + 128 + /* Interrupt state */ 129 + /* 130 + * Handled in psb_irq.c 131 + */ 132 + 133 + return 0; 134 + } 135 + 136 + /* 137 + * restore_display_registers 138 + * 139 + * Description: We are going to resume so restore display register state. 140 + */ 141 + static int restore_display_registers(struct drm_device *dev) 142 + { 143 + struct drm_psb_private *dev_priv = dev->dev_private; 144 + struct drm_crtc * crtc; 145 + struct drm_connector * connector; 146 + 147 + /* Display arbitration + watermarks */ 148 + PSB_WVDC32(dev_priv->saveDSPARB, DSPARB); 149 + PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1); 150 + PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2); 151 + PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3); 152 + PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4); 153 + PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5); 154 + PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6); 155 + PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT); 156 + 157 + /*make sure VGA plane is off. it initializes to on after reset!*/ 158 + PSB_WVDC32(0x80000000, VGACNTRL); 159 + 160 + mutex_lock(&dev->mode_config.mutex); 161 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 162 + if(drm_helper_crtc_in_use(crtc)) 163 + crtc->funcs->restore(crtc); 164 + } 165 + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 166 + connector->funcs->restore(connector); 167 + } 168 + mutex_unlock(&dev->mode_config.mutex); 169 + 170 + /*Interrupt state*/ 171 + /* 172 + * Handled in psb_irq.c 173 + */ 174 + 175 + return 0; 176 + } 177 + /* 178 + * powermgmt_suspend_display 179 + * 180 + * Description: Suspend the display hardware saving state and disabling 181 + * as necessary. 182 + */ 183 + void ospm_suspend_display(struct drm_device *dev) 184 + { 185 + struct drm_psb_private *dev_priv = dev->dev_private; 186 + int pp_stat, ret=0; 187 + 188 + printk(KERN_ALERT "%s \n", __func__); 189 + 190 + #ifdef OSPM_GFX_DPK 191 + printk(KERN_ALERT "%s \n", __func__); 192 + #endif 193 + if (!(g_hw_power_status_mask & OSPM_DISPLAY_ISLAND)) 194 + return; 195 + 196 + save_display_registers(dev); 197 + 198 + if (dev_priv->iLVDS_enable) { 199 + /*shutdown the panel*/ 200 + PSB_WVDC32(0, PP_CONTROL); 201 + 202 + do { 203 + pp_stat = PSB_RVDC32(PP_STATUS); 204 + } while (pp_stat & 0x80000000); 205 + 206 + /*turn off the plane*/ 207 + PSB_WVDC32(0x58000000, DSPACNTR); 208 + PSB_WVDC32(0, DSPASURF);/*trigger the plane disable*/ 209 + /*wait ~4 ticks*/ 210 + msleep(4); 211 + 212 + /*turn off pipe*/ 213 + PSB_WVDC32(0x0, PIPEACONF); 214 + /*wait ~8 ticks*/ 215 + msleep(8); 216 + 217 + /*turn off PLLs*/ 218 + PSB_WVDC32(0, MRST_DPLL_A); 219 + } else { 220 + PSB_WVDC32(DPI_SHUT_DOWN, DPI_CONTROL_REG); 221 + PSB_WVDC32(0x0, PIPEACONF); 222 + PSB_WVDC32(0x2faf0000, BLC_PWM_CTL); 223 + while (REG_READ(0x70008) & 0x40000000); 224 + while ((PSB_RVDC32(GEN_FIFO_STAT_REG) & DPI_FIFO_EMPTY) 225 + != DPI_FIFO_EMPTY); 226 + PSB_WVDC32(0, DEVICE_READY_REG); 227 + /* turn off panel power */ 228 + ret = 0; 229 + } 230 + ospm_power_island_down(OSPM_DISPLAY_ISLAND); 231 + } 232 + 233 + /* 234 + * ospm_resume_display 235 + * 236 + * Description: Resume the display hardware restoring state and enabling 237 + * as necessary. 238 + */ 239 + void ospm_resume_display(struct pci_dev *pdev) 240 + { 241 + struct drm_device *dev = pci_get_drvdata(pdev); 242 + struct drm_psb_private *dev_priv = dev->dev_private; 243 + struct psb_gtt *pg = dev_priv->pg; 244 + 245 + printk(KERN_ALERT "%s \n", __func__); 246 + 247 + #ifdef OSPM_GFX_DPK 248 + printk(KERN_ALERT "%s \n", __func__); 249 + #endif 250 + if (g_hw_power_status_mask & OSPM_DISPLAY_ISLAND) 251 + return; 252 + 253 + /* turn on the display power island */ 254 + ospm_power_island_up(OSPM_DISPLAY_ISLAND); 255 + 256 + PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL); 257 + pci_write_config_word(pdev, PSB_GMCH_CTRL, 258 + pg->gmch_ctrl | _PSB_GMCH_ENABLED); 259 + 260 + /* Don't reinitialize the GTT as it is unnecessary. The gtt is 261 + * stored in memory so it will automatically be restored. All 262 + * we need to do is restore the PGETBL_CTL which we already do 263 + * above. 264 + */ 265 + /*psb_gtt_init(dev_priv->pg, 1);*/ 266 + 267 + restore_display_registers(dev); 268 + } 269 + 270 + #if 1 271 + /* 272 + * ospm_suspend_pci 273 + * 274 + * Description: Suspend the pci device saving state and disabling 275 + * as necessary. 276 + */ 277 + static void ospm_suspend_pci(struct pci_dev *pdev) 278 + { 279 + struct drm_device *dev = pci_get_drvdata(pdev); 280 + struct drm_psb_private *dev_priv = dev->dev_private; 281 + int bsm, vbt; 282 + 283 + if (gbSuspended) 284 + return; 285 + 286 + #ifdef OSPM_GFX_DPK 287 + printk(KERN_ALERT "ospm_suspend_pci\n"); 288 + #endif 289 + 290 + #ifdef CONFIG_MDFD_GL3 291 + // Power off GL3 after all GFX sub-systems are powered off. 292 + ospm_power_island_down(OSPM_GL3_CACHE_ISLAND); 293 + #endif 294 + 295 + pci_save_state(pdev); 296 + pci_read_config_dword(pdev, 0x5C, &bsm); 297 + dev_priv->saveBSM = bsm; 298 + pci_read_config_dword(pdev, 0xFC, &vbt); 299 + dev_priv->saveVBT = vbt; 300 + pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr); 301 + pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data); 302 + 303 + pci_disable_device(pdev); 304 + pci_set_power_state(pdev, PCI_D3hot); 305 + 306 + gbSuspended = true; 307 + gbgfxsuspended = true; 308 + } 309 + 310 + /* 311 + * ospm_resume_pci 312 + * 313 + * Description: Resume the pci device restoring state and enabling 314 + * as necessary. 315 + */ 316 + static bool ospm_resume_pci(struct pci_dev *pdev) 317 + { 318 + struct drm_device *dev = pci_get_drvdata(pdev); 319 + struct drm_psb_private *dev_priv = dev->dev_private; 320 + int ret = 0; 321 + 322 + if (!gbSuspended) 323 + return true; 324 + 325 + #ifdef OSPM_GFX_DPK 326 + printk(KERN_ALERT "ospm_resume_pci\n"); 327 + #endif 328 + 329 + pci_set_power_state(pdev, PCI_D0); 330 + pci_restore_state(pdev); 331 + pci_write_config_dword(pdev, 0x5c, dev_priv->saveBSM); 332 + pci_write_config_dword(pdev, 0xFC, dev_priv->saveVBT); 333 + /* retoring MSI address and data in PCIx space */ 334 + pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr); 335 + pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data); 336 + ret = pci_enable_device(pdev); 337 + 338 + if (ret != 0) 339 + printk(KERN_ALERT "ospm_resume_pci: pci_enable_device failed: %d\n", ret); 340 + else 341 + gbSuspended = false; 342 + 343 + return !gbSuspended; 344 + } 345 + #endif 346 + /* 347 + * ospm_power_suspend 348 + * 349 + * Description: OSPM is telling our driver to suspend so save state 350 + * and power down all hardware. 351 + */ 352 + int ospm_power_suspend(struct pci_dev *pdev, pm_message_t state) 353 + { 354 + int ret = 0; 355 + int graphics_access_count; 356 + int videoenc_access_count; 357 + int videodec_access_count; 358 + int display_access_count; 359 + bool suspend_pci = true; 360 + 361 + if(gbSuspendInProgress || gbResumeInProgress) 362 + { 363 + #ifdef OSPM_GFX_DPK 364 + printk(KERN_ALERT "OSPM_GFX_DPK: %s system BUSY \n", __func__); 365 + #endif 366 + return -EBUSY; 367 + } 368 + 369 + mutex_lock(&power_mutex); 370 + 371 + if (!gbSuspended) { 372 + graphics_access_count = atomic_read(&g_graphics_access_count); 373 + videoenc_access_count = atomic_read(&g_videoenc_access_count); 374 + videodec_access_count = atomic_read(&g_videodec_access_count); 375 + display_access_count = atomic_read(&g_display_access_count); 376 + 377 + if (graphics_access_count || 378 + videoenc_access_count || 379 + videodec_access_count || 380 + display_access_count) 381 + ret = -EBUSY; 382 + 383 + if (!ret) { 384 + gbSuspendInProgress = true; 385 + 386 + psb_irq_uninstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND); 387 + ospm_suspend_display(gpDrmDevice); 388 + if (suspend_pci == true) { 389 + ospm_suspend_pci(pdev); 390 + } 391 + gbSuspendInProgress = false; 392 + } else { 393 + printk(KERN_ALERT "ospm_power_suspend: device busy: graphics %d videoenc %d videodec %d display %d\n", graphics_access_count, videoenc_access_count, videodec_access_count, display_access_count); 394 + } 395 + } 396 + 397 + 398 + mutex_unlock(&power_mutex); 399 + return ret; 400 + } 401 + 402 + /* 403 + * ospm_power_island_up 404 + * 405 + * Description: Restore power to the specified island(s) (powergating) 406 + */ 407 + void ospm_power_island_up(int hw_islands) 408 + { 409 + u32 pwr_cnt = 0; 410 + u32 pwr_sts = 0; 411 + u32 pwr_mask = 0; 412 + 413 + struct drm_psb_private *dev_priv = 414 + (struct drm_psb_private *) gpDrmDevice->dev_private; 415 + 416 + 417 + if (hw_islands & OSPM_DISPLAY_ISLAND) { 418 + pwr_mask = PSB_PWRGT_DISPLAY_MASK; 419 + 420 + pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC); 421 + pwr_cnt &= ~pwr_mask; 422 + outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC)); 423 + 424 + while (true) { 425 + pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS); 426 + if ((pwr_sts & pwr_mask) == 0) 427 + break; 428 + else 429 + udelay(10); 430 + } 431 + } 432 + 433 + g_hw_power_status_mask |= hw_islands; 434 + } 435 + 436 + /* 437 + * ospm_power_resume 438 + */ 439 + int ospm_power_resume(struct pci_dev *pdev) 440 + { 441 + if(gbSuspendInProgress || gbResumeInProgress) 442 + { 443 + #ifdef OSPM_GFX_DPK 444 + printk(KERN_ALERT "OSPM_GFX_DPK: %s hw_island: Suspend || gbResumeInProgress!!!! \n", __func__); 445 + #endif 446 + return 0; 447 + } 448 + 449 + mutex_lock(&power_mutex); 450 + 451 + #ifdef OSPM_GFX_DPK 452 + printk(KERN_ALERT "OSPM_GFX_DPK: ospm_power_resume \n"); 453 + #endif 454 + 455 + gbResumeInProgress = true; 456 + 457 + ospm_resume_pci(pdev); 458 + 459 + ospm_resume_display(gpDrmDevice->pdev); 460 + psb_irq_preinstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND); 461 + psb_irq_postinstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND); 462 + 463 + gbResumeInProgress = false; 464 + 465 + mutex_unlock(&power_mutex); 466 + 467 + return 0; 468 + } 469 + 470 + 471 + /* 472 + * ospm_power_island_down 473 + * 474 + * Description: Cut power to the specified island(s) (powergating) 475 + */ 476 + void ospm_power_island_down(int islands) 477 + { 478 + #if 0 479 + u32 pwr_cnt = 0; 480 + u32 pwr_mask = 0; 481 + u32 pwr_sts = 0; 482 + 483 + struct drm_psb_private *dev_priv = 484 + (struct drm_psb_private *) gpDrmDevice->dev_private; 485 + 486 + g_hw_power_status_mask &= ~islands; 487 + 488 + if (islands & OSPM_GRAPHICS_ISLAND) { 489 + pwr_cnt |= PSB_PWRGT_GFX_MASK; 490 + pwr_mask |= PSB_PWRGT_GFX_MASK; 491 + if (dev_priv->graphics_state == PSB_PWR_STATE_ON) { 492 + dev_priv->gfx_on_time += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ; 493 + dev_priv->gfx_last_mode_change = jiffies; 494 + dev_priv->graphics_state = PSB_PWR_STATE_OFF; 495 + dev_priv->gfx_off_cnt++; 496 + } 497 + } 498 + if (islands & OSPM_VIDEO_ENC_ISLAND) { 499 + pwr_cnt |= PSB_PWRGT_VID_ENC_MASK; 500 + pwr_mask |= PSB_PWRGT_VID_ENC_MASK; 501 + } 502 + if (islands & OSPM_VIDEO_DEC_ISLAND) { 503 + pwr_cnt |= PSB_PWRGT_VID_DEC_MASK; 504 + pwr_mask |= PSB_PWRGT_VID_DEC_MASK; 505 + } 506 + if (pwr_cnt) { 507 + pwr_cnt |= inl(dev_priv->apm_base); 508 + outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD); 509 + while (true) { 510 + pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS); 511 + 512 + if ((pwr_sts & pwr_mask) == pwr_mask) 513 + break; 514 + else 515 + udelay(10); 516 + } 517 + } 518 + 519 + if (islands & OSPM_DISPLAY_ISLAND) { 520 + pwr_mask = PSB_PWRGT_DISPLAY_MASK; 521 + 522 + outl(pwr_mask, (dev_priv->ospm_base + PSB_PM_SSC)); 523 + 524 + while (true) { 525 + pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS); 526 + if ((pwr_sts & pwr_mask) == pwr_mask) 527 + break; 528 + else 529 + udelay(10); 530 + } 531 + } 532 + #endif 533 + } 534 + 535 + 536 + /* 537 + * ospm_power_is_hw_on 538 + * 539 + * Description: do an instantaneous check for if the specified islands 540 + * are on. Only use this in cases where you know the g_state_change_mutex 541 + * is already held such as in irq install/uninstall. Otherwise, use 542 + * ospm_power_using_hw_begin(). 543 + */ 544 + bool ospm_power_is_hw_on(int hw_islands) 545 + { 546 + return ((g_hw_power_status_mask & hw_islands) == hw_islands) ? true:false; 547 + } 548 + 549 + /* 550 + * ospm_power_using_hw_begin 551 + * 552 + * Description: Notify PowerMgmt module that you will be accessing the 553 + * specified island's hw so don't power it off. If force_on is true, 554 + * this will power on the specified island if it is off. 555 + * Otherwise, this will return false and the caller is expected to not 556 + * access the hw. 557 + * 558 + * NOTE *** If this is called from and interrupt handler or other atomic 559 + * context, then it will return false if we are in the middle of a 560 + * power state transition and the caller will be expected to handle that 561 + * even if force_on is set to true. 562 + */ 563 + bool ospm_power_using_hw_begin(int hw_island, UHBUsage usage) 564 + { 565 + return 1; /*FIXMEAC */ 566 + #if 0 567 + bool ret = true; 568 + bool island_is_off = false; 569 + bool b_atomic = (in_interrupt() || in_atomic()); 570 + bool locked = true; 571 + struct pci_dev *pdev = gpDrmDevice->pdev; 572 + u32 deviceID = 0; 573 + bool force_on = usage ? true: false; 574 + /*quick path, not 100% race safe, but should be enough comapre to current other code in this file */ 575 + if (!force_on) { 576 + if (hw_island & (OSPM_ALL_ISLANDS & ~g_hw_power_status_mask)) 577 + return false; 578 + else { 579 + locked = false; 580 + #ifdef CONFIG_PM_RUNTIME 581 + /* increment pm_runtime_refcount */ 582 + pm_runtime_get(&pdev->dev); 583 + #endif 584 + goto increase_count; 585 + } 586 + } 587 + 588 + 589 + if (!b_atomic) 590 + mutex_lock(&power_mutex); 591 + 592 + island_is_off = hw_island & (OSPM_ALL_ISLANDS & ~g_hw_power_status_mask); 593 + 594 + if (b_atomic && (gbSuspendInProgress || gbResumeInProgress || gbSuspended) && force_on && island_is_off) 595 + ret = false; 596 + 597 + if (ret && island_is_off && !force_on) 598 + ret = false; 599 + 600 + if (ret && island_is_off && force_on) { 601 + gbResumeInProgress = true; 602 + 603 + ret = ospm_resume_pci(pdev); 604 + 605 + if (ret) { 606 + switch(hw_island) 607 + { 608 + case OSPM_DISPLAY_ISLAND: 609 + deviceID = gui32MRSTDisplayDeviceID; 610 + ospm_resume_display(pdev); 611 + psb_irq_preinstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND); 612 + psb_irq_postinstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND); 613 + break; 614 + case OSPM_GRAPHICS_ISLAND: 615 + deviceID = gui32SGXDeviceID; 616 + ospm_power_island_up(OSPM_GRAPHICS_ISLAND); 617 + psb_irq_preinstall_islands(gpDrmDevice, OSPM_GRAPHICS_ISLAND); 618 + psb_irq_postinstall_islands(gpDrmDevice, OSPM_GRAPHICS_ISLAND); 619 + break; 620 + #if 1 621 + case OSPM_VIDEO_DEC_ISLAND: 622 + if(!ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) { 623 + //printk(KERN_ALERT "%s power on display for video decode use\n", __func__); 624 + deviceID = gui32MRSTDisplayDeviceID; 625 + ospm_resume_display(pdev); 626 + psb_irq_preinstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND); 627 + psb_irq_postinstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND); 628 + } 629 + else{ 630 + //printk(KERN_ALERT "%s display is already on for video decode use\n", __func__); 631 + } 632 + 633 + if(!ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND)) { 634 + //printk(KERN_ALERT "%s power on video decode\n", __func__); 635 + deviceID = gui32MRSTMSVDXDeviceID; 636 + ospm_power_island_up(OSPM_VIDEO_DEC_ISLAND); 637 + psb_irq_preinstall_islands(gpDrmDevice, OSPM_VIDEO_DEC_ISLAND); 638 + psb_irq_postinstall_islands(gpDrmDevice, OSPM_VIDEO_DEC_ISLAND); 639 + } 640 + else{ 641 + //printk(KERN_ALERT "%s video decode is already on\n", __func__); 642 + } 643 + 644 + break; 645 + case OSPM_VIDEO_ENC_ISLAND: 646 + if(!ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) { 647 + //printk(KERN_ALERT "%s power on display for video encode\n", __func__); 648 + deviceID = gui32MRSTDisplayDeviceID; 649 + ospm_resume_display(pdev); 650 + psb_irq_preinstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND); 651 + psb_irq_postinstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND); 652 + } 653 + else{ 654 + //printk(KERN_ALERT "%s display is already on for video encode use\n", __func__); 655 + } 656 + 657 + if(!ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND)) { 658 + //printk(KERN_ALERT "%s power on video encode\n", __func__); 659 + deviceID = gui32MRSTTOPAZDeviceID; 660 + ospm_power_island_up(OSPM_VIDEO_ENC_ISLAND); 661 + psb_irq_preinstall_islands(gpDrmDevice, OSPM_VIDEO_ENC_ISLAND); 662 + psb_irq_postinstall_islands(gpDrmDevice, OSPM_VIDEO_ENC_ISLAND); 663 + } 664 + else{ 665 + //printk(KERN_ALERT "%s video decode is already on\n", __func__); 666 + } 667 + #endif 668 + break; 669 + 670 + default: 671 + printk(KERN_ALERT "%s unknown island !!!! \n", __func__); 672 + break; 673 + } 674 + 675 + } 676 + 677 + if (!ret) 678 + printk(KERN_ALERT "ospm_power_using_hw_begin: forcing on %d failed\n", hw_island); 679 + 680 + gbResumeInProgress = false; 681 + } 682 + increase_count: 683 + if (ret) { 684 + switch(hw_island) 685 + { 686 + case OSPM_GRAPHICS_ISLAND: 687 + atomic_inc(&g_graphics_access_count); 688 + break; 689 + case OSPM_VIDEO_ENC_ISLAND: 690 + atomic_inc(&g_videoenc_access_count); 691 + break; 692 + case OSPM_VIDEO_DEC_ISLAND: 693 + atomic_inc(&g_videodec_access_count); 694 + break; 695 + case OSPM_DISPLAY_ISLAND: 696 + atomic_inc(&g_display_access_count); 697 + break; 698 + } 699 + } 700 + 701 + if (!b_atomic && locked) 702 + mutex_unlock(&power_mutex); 703 + 704 + return ret; 705 + #endif 706 + } 707 + 708 + 709 + /* 710 + * ospm_power_using_hw_end 711 + * 712 + * Description: Notify PowerMgmt module that you are done accessing the 713 + * specified island's hw so feel free to power it off. Note that this 714 + * function doesn't actually power off the islands. 715 + */ 716 + void ospm_power_using_hw_end(int hw_island) 717 + { 718 + #if 0 /* FIXMEAC */ 719 + switch(hw_island) 720 + { 721 + case OSPM_GRAPHICS_ISLAND: 722 + atomic_dec(&g_graphics_access_count); 723 + break; 724 + case OSPM_VIDEO_ENC_ISLAND: 725 + atomic_dec(&g_videoenc_access_count); 726 + break; 727 + case OSPM_VIDEO_DEC_ISLAND: 728 + atomic_dec(&g_videodec_access_count); 729 + break; 730 + case OSPM_DISPLAY_ISLAND: 731 + atomic_dec(&g_display_access_count); 732 + break; 733 + } 734 + 735 + //decrement runtime pm ref count 736 + pm_runtime_put(&gpDrmDevice->pdev->dev); 737 + 738 + WARN_ON(atomic_read(&g_graphics_access_count) < 0); 739 + WARN_ON(atomic_read(&g_videoenc_access_count) < 0); 740 + WARN_ON(atomic_read(&g_videodec_access_count) < 0); 741 + WARN_ON(atomic_read(&g_display_access_count) < 0); 742 + #endif 743 + } 744 + 745 + int ospm_runtime_pm_allow(struct drm_device * dev) 746 + { 747 + return 0; 748 + } 749 + 750 + void ospm_runtime_pm_forbid(struct drm_device * dev) 751 + { 752 + struct drm_psb_private * dev_priv = dev->dev_private; 753 + 754 + DRM_INFO("%s\n", __FUNCTION__); 755 + 756 + pm_runtime_forbid(&dev->pdev->dev); 757 + dev_priv->rpm_enabled = 0; 758 + } 759 + 760 + int psb_runtime_suspend(struct device *dev) 761 + { 762 + pm_message_t state; 763 + int ret = 0; 764 + state.event = 0; 765 + 766 + #ifdef OSPM_GFX_DPK 767 + printk(KERN_ALERT "OSPM_GFX_DPK: %s \n", __func__); 768 + #endif 769 + if (atomic_read(&g_graphics_access_count) || atomic_read(&g_videoenc_access_count) 770 + || atomic_read(&g_videodec_access_count) || atomic_read(&g_display_access_count)){ 771 + #ifdef OSPM_GFX_DPK 772 + printk(KERN_ALERT "OSPM_GFX_DPK: GFX: %d VEC: %d VED: %d DC: %d DSR: %d \n", atomic_read(&g_graphics_access_count), 773 + atomic_read(&g_videoenc_access_count), atomic_read(&g_videodec_access_count), atomic_read(&g_display_access_count)); 774 + #endif 775 + return -EBUSY; 776 + } 777 + else 778 + ret = ospm_power_suspend(gpDrmDevice->pdev, state); 779 + 780 + return ret; 781 + } 782 + 783 + int psb_runtime_resume(struct device *dev) 784 + { 785 + return 0; 786 + } 787 + 788 + int psb_runtime_idle(struct device *dev) 789 + { 790 + /*printk (KERN_ALERT "lvds:%d,mipi:%d\n", dev_priv->is_lvds_on, dev_priv->is_mipi_on);*/ 791 + if (atomic_read(&g_graphics_access_count) || atomic_read(&g_videoenc_access_count) 792 + || atomic_read(&g_videodec_access_count) || atomic_read(&g_display_access_count)) 793 + return 1; 794 + else 795 + return 0; 796 + } 797 +
+96
drivers/staging/gma500/psb_powermgmt.h
··· 1 + /************************************************************************** 2 + * Copyright (c) 2009, Intel Corporation. 3 + * All Rights Reserved. 4 + 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice (including the next 13 + * paragraph) shall be included in all copies or substantial portions of the 14 + * Software. 15 + * 16 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 + * SOFTWARE. 23 + * 24 + * Authors: 25 + * Benjamin Defnet <benjamin.r.defnet@intel.com> 26 + * Rajesh Poornachandran <rajesh.poornachandran@intel.com> 27 + * 28 + */ 29 + #ifndef _PSB_POWERMGMT_H_ 30 + #define _PSB_POWERMGMT_H_ 31 + 32 + #include <linux/pci.h> 33 + #include <drm/drmP.h> 34 + 35 + #define OSPM_GRAPHICS_ISLAND 0x1 36 + #define OSPM_VIDEO_ENC_ISLAND 0x2 37 + #define OSPM_VIDEO_DEC_ISLAND 0x4 38 + #define OSPM_DISPLAY_ISLAND 0x8 39 + #define OSPM_GL3_CACHE_ISLAND 0x10 40 + #define OSPM_ALL_ISLANDS 0x1f 41 + 42 + /* IPC message and command defines used to enable/disable mipi panel voltages */ 43 + #define IPC_MSG_PANEL_ON_OFF 0xE9 44 + #define IPC_CMD_PANEL_ON 1 45 + #define IPC_CMD_PANEL_OFF 0 46 + 47 + typedef enum _UHBUsage 48 + { 49 + OSPM_UHB_ONLY_IF_ON = 0, 50 + OSPM_UHB_FORCE_POWER_ON, 51 + } UHBUsage; 52 + 53 + /* Use these functions to power down video HW for D0i3 purpose */ 54 + 55 + void ospm_power_init(struct drm_device *dev); 56 + void ospm_power_uninit(void); 57 + 58 + 59 + /* 60 + * OSPM will call these functions 61 + */ 62 + int ospm_power_suspend(struct pci_dev *pdev, pm_message_t state); 63 + int ospm_power_resume(struct pci_dev *pdev); 64 + 65 + /* 66 + * These are the functions the driver should use to wrap all hw access 67 + * (i.e. register reads and writes) 68 + */ 69 + bool ospm_power_using_hw_begin(int hw_island, UHBUsage usage); 70 + void ospm_power_using_hw_end(int hw_island); 71 + 72 + /* 73 + * Use this function to do an instantaneous check for if the hw is on. 74 + * Only use this in cases where you know the g_state_change_mutex 75 + * is already held such as in irq install/uninstall and you need to 76 + * prevent a deadlock situation. Otherwise use ospm_power_using_hw_begin(). 77 + */ 78 + bool ospm_power_is_hw_on(int hw_islands); 79 + 80 + /* 81 + * Power up/down different hw component rails/islands 82 + */ 83 + void ospm_power_island_down(int hw_islands); 84 + void ospm_power_island_up(int hw_islands); 85 + void ospm_suspend_graphics(void); 86 + /* 87 + * GFX-Runtime PM callbacks 88 + */ 89 + int psb_runtime_suspend(struct device *dev); 90 + int psb_runtime_resume(struct device *dev); 91 + int psb_runtime_idle(struct device *dev); 92 + int ospm_runtime_pm_allow(struct drm_device * dev); 93 + void ospm_runtime_pm_forbid(struct drm_device * dev); 94 + 95 + 96 + #endif /*_PSB_POWERMGMT_H_*/
+73
drivers/staging/gma500/psb_pvr_glue.c
··· 1 + /* 2 + * Copyright (c) 2009, Intel Corporation. 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms and conditions of the GNU General Public License, 6 + * version 2, as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope it will be useful, but WITHOUT 9 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 + * more details. 12 + * 13 + * You should have received a copy of the GNU General Public License along with 14 + * this program; if not, write to the Free Software Foundation, Inc., 15 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 + * 17 + */ 18 + 19 + #include "psb_pvr_glue.h" 20 + 21 + /** 22 + * FIXME: should NOT use these file under env/linux directly 23 + */ 24 + 25 + int psb_get_meminfo_by_handle(void *hKernelMemInfo, 26 + void **ppsKernelMemInfo) 27 + { 28 + return -EINVAL; 29 + #if 0 30 + void *psKernelMemInfo = IMG_NULL; 31 + PVRSRV_PER_PROCESS_DATA *psPerProc = IMG_NULL; 32 + PVRSRV_ERROR eError; 33 + 34 + psPerProc = PVRSRVPerProcessData(task_tgid_nr(current)); 35 + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, 36 + (IMG_VOID *)&psKernelMemInfo, 37 + hKernelMemInfo, 38 + PVRSRV_HANDLE_TYPE_MEM_INFO); 39 + if (eError != PVRSRV_OK) { 40 + DRM_ERROR("Cannot find kernel meminfo for handle 0x%x\n", 41 + (u32)hKernelMemInfo); 42 + return -EINVAL; 43 + } 44 + 45 + *ppsKernelMemInfo = psKernelMemInfo; 46 + 47 + DRM_DEBUG("Got Kernel MemInfo for handle %lx\n", 48 + (u32)hKernelMemInfo); 49 + return 0; 50 + #endif 51 + } 52 + 53 + int psb_get_pages_by_mem_handle(void *hOSMemHandle, struct page ***pages) 54 + { 55 + return -EINVAL; 56 + #if 0 57 + LinuxMemArea *psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; 58 + struct page **page_list; 59 + if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_ALLOC_PAGES) { 60 + DRM_ERROR("MemArea type is not LINUX_MEM_AREA_ALLOC_PAGES\n"); 61 + return -EINVAL; 62 + } 63 + 64 + page_list = psLinuxMemArea->uData.sPageList.pvPageList; 65 + if (!page_list) { 66 + DRM_DEBUG("Page List is NULL\n"); 67 + return -ENOMEM; 68 + } 69 + 70 + *pages = page_list; 71 + return 0; 72 + #endif 73 + }
+25
drivers/staging/gma500/psb_pvr_glue.h
··· 1 + /* 2 + * Copyright (c) 2009, Intel Corporation. 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms and conditions of the GNU General Public License, 6 + * version 2, as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope it will be useful, but WITHOUT 9 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 + * more details. 12 + * 13 + * You should have received a copy of the GNU General Public License along with 14 + * this program; if not, write to the Free Software Foundation, Inc., 15 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 + * 17 + */ 18 + 19 + #include "psb_drv.h" 20 + 21 + extern int psb_get_meminfo_by_handle(void * hKernelMemInfo, 22 + void **ppsKernelMemInfo); 23 + extern u32 psb_get_tgid(void); 24 + extern int psb_get_pages_by_mem_handle(void * hOSMemHandle, 25 + struct page ***pages);
+588
drivers/staging/gma500/psb_reg.h
··· 1 + /************************************************************************** 2 + * 3 + * Copyright (c) (2005-2007) Imagination Technologies Limited. 4 + * Copyright (c) 2007, Intel Corporation. 5 + * All Rights Reserved. 6 + * 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms and conditions of the GNU General Public License, 9 + * version 2, as published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope it will be useful, but WITHOUT 12 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 + * more details. 15 + * 16 + * You should have received a copy of the GNU General Public License along with 17 + * this program; if not, write to the Free Software Foundation, Inc., 18 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.. 19 + * 20 + **************************************************************************/ 21 + 22 + #ifndef _PSB_REG_H_ 23 + #define _PSB_REG_H_ 24 + 25 + #define PSB_CR_CLKGATECTL 0x0000 26 + #define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24) 27 + #define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20) 28 + #define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20) 29 + #define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16) 30 + #define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16) 31 + #define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12) 32 + #define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12) 33 + #define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8) 34 + #define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8) 35 + #define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4) 36 + #define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4) 37 + #define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0) 38 + #define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0) 39 + #define _PSB_C_CLKGATECTL_CLKG_ENABLED (0) 40 + #define _PSB_C_CLKGATECTL_CLKG_DISABLED (1) 41 + #define _PSB_C_CLKGATECTL_CLKG_AUTO (2) 42 + 43 + #define PSB_CR_CORE_ID 0x0010 44 + #define _PSB_CC_ID_ID_SHIFT (16) 45 + #define _PSB_CC_ID_ID_MASK (0xFFFF << 16) 46 + #define _PSB_CC_ID_CONFIG_SHIFT (0) 47 + #define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0) 48 + 49 + #define PSB_CR_CORE_REVISION 0x0014 50 + #define _PSB_CC_REVISION_DESIGNER_SHIFT (24) 51 + #define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24) 52 + #define _PSB_CC_REVISION_MAJOR_SHIFT (16) 53 + #define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16) 54 + #define _PSB_CC_REVISION_MINOR_SHIFT (8) 55 + #define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8) 56 + #define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0) 57 + #define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0) 58 + 59 + #define PSB_CR_DESIGNER_REV_FIELD1 0x0018 60 + 61 + #define PSB_CR_SOFT_RESET 0x0080 62 + #define _PSB_CS_RESET_TSP_RESET (1 << 6) 63 + #define _PSB_CS_RESET_ISP_RESET (1 << 5) 64 + #define _PSB_CS_RESET_USE_RESET (1 << 4) 65 + #define _PSB_CS_RESET_TA_RESET (1 << 3) 66 + #define _PSB_CS_RESET_DPM_RESET (1 << 2) 67 + #define _PSB_CS_RESET_TWOD_RESET (1 << 1) 68 + #define _PSB_CS_RESET_BIF_RESET (1 << 0) 69 + 70 + #define PSB_CR_DESIGNER_REV_FIELD2 0x001C 71 + 72 + #define PSB_CR_EVENT_HOST_ENABLE2 0x0110 73 + 74 + #define PSB_CR_EVENT_STATUS2 0x0118 75 + 76 + #define PSB_CR_EVENT_HOST_CLEAR2 0x0114 77 + #define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4) 78 + 79 + #define PSB_CR_EVENT_STATUS 0x012C 80 + 81 + #define PSB_CR_EVENT_HOST_ENABLE 0x0130 82 + 83 + #define PSB_CR_EVENT_HOST_CLEAR 0x0134 84 + #define _PSB_CE_MASTER_INTERRUPT (1 << 31) 85 + #define _PSB_CE_TA_DPM_FAULT (1 << 28) 86 + #define _PSB_CE_TWOD_COMPLETE (1 << 27) 87 + #define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25) 88 + #define _PSB_CE_DPM_TA_MEM_FREE (1 << 24) 89 + #define _PSB_CE_PIXELBE_END_RENDER (1 << 18) 90 + #define _PSB_CE_SW_EVENT (1 << 14) 91 + #define _PSB_CE_TA_FINISHED (1 << 13) 92 + #define _PSB_CE_TA_TERMINATE (1 << 12) 93 + #define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3) 94 + #define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2) 95 + #define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1) 96 + #define _PSB_CE_DPM_3D_MEM_FREE (1 << 0) 97 + 98 + 99 + #define PSB_USE_OFFSET_MASK 0x0007FFFF 100 + #define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1) 101 + #define PSB_CR_USE_CODE_BASE0 0x0A0C 102 + #define PSB_CR_USE_CODE_BASE1 0x0A10 103 + #define PSB_CR_USE_CODE_BASE2 0x0A14 104 + #define PSB_CR_USE_CODE_BASE3 0x0A18 105 + #define PSB_CR_USE_CODE_BASE4 0x0A1C 106 + #define PSB_CR_USE_CODE_BASE5 0x0A20 107 + #define PSB_CR_USE_CODE_BASE6 0x0A24 108 + #define PSB_CR_USE_CODE_BASE7 0x0A28 109 + #define PSB_CR_USE_CODE_BASE8 0x0A2C 110 + #define PSB_CR_USE_CODE_BASE9 0x0A30 111 + #define PSB_CR_USE_CODE_BASE10 0x0A34 112 + #define PSB_CR_USE_CODE_BASE11 0x0A38 113 + #define PSB_CR_USE_CODE_BASE12 0x0A3C 114 + #define PSB_CR_USE_CODE_BASE13 0x0A40 115 + #define PSB_CR_USE_CODE_BASE14 0x0A44 116 + #define PSB_CR_USE_CODE_BASE15 0x0A48 117 + #define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2)) 118 + #define _PSB_CUC_BASE_DM_SHIFT (25) 119 + #define _PSB_CUC_BASE_DM_MASK (0x3 << 25) 120 + #define _PSB_CUC_BASE_ADDR_SHIFT (0) /* 1024-bit aligned address? */ 121 + #define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7) 122 + #define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0) 123 + #define _PSB_CUC_DM_VERTEX (0) 124 + #define _PSB_CUC_DM_PIXEL (1) 125 + #define _PSB_CUC_DM_RESERVED (2) 126 + #define _PSB_CUC_DM_EDM (3) 127 + 128 + #define PSB_CR_PDS_EXEC_BASE 0x0AB8 129 + #define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) /* 1MB aligned address */ 130 + #define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20) 131 + 132 + #define PSB_CR_EVENT_KICKER 0x0AC4 133 + #define _PSB_CE_KICKER_ADDRESS_SHIFT (4) /* 128-bit aligned address */ 134 + 135 + #define PSB_CR_EVENT_KICK 0x0AC8 136 + #define _PSB_CE_KICK_NOW (1 << 0) 137 + 138 + 139 + #define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38 140 + 141 + #define PSB_CR_BIF_CTRL 0x0C00 142 + #define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4) 143 + #define _PSB_CB_CTRL_INVALDC (1 << 3) 144 + #define _PSB_CB_CTRL_FLUSH (1 << 2) 145 + 146 + #define PSB_CR_BIF_INT_STAT 0x0C04 147 + 148 + #define PSB_CR_BIF_FAULT 0x0C08 149 + #define _PSB_CBI_STAT_PF_N_RW (1 << 14) 150 + #define _PSB_CBI_STAT_FAULT_SHIFT (0) 151 + #define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0) 152 + #define _PSB_CBI_STAT_FAULT_CACHE (1 << 1) 153 + #define _PSB_CBI_STAT_FAULT_TA (1 << 2) 154 + #define _PSB_CBI_STAT_FAULT_VDM (1 << 3) 155 + #define _PSB_CBI_STAT_FAULT_2D (1 << 4) 156 + #define _PSB_CBI_STAT_FAULT_PBE (1 << 5) 157 + #define _PSB_CBI_STAT_FAULT_TSP (1 << 6) 158 + #define _PSB_CBI_STAT_FAULT_ISP (1 << 7) 159 + #define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8) 160 + #define _PSB_CBI_STAT_FAULT_HOST (1 << 9) 161 + 162 + #define PSB_CR_BIF_BANK0 0x0C78 163 + 164 + #define PSB_CR_BIF_BANK1 0x0C7C 165 + 166 + #define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84 167 + 168 + #define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88 169 + #define PSB_CR_BIF_3D_REQ_BASE 0x0CAC 170 + 171 + #define PSB_CR_2D_SOCIF 0x0E18 172 + #define _PSB_C2_SOCIF_FREESPACE_SHIFT (0) 173 + #define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0) 174 + #define _PSB_C2_SOCIF_EMPTY (0x80 << 0) 175 + 176 + #define PSB_CR_2D_BLIT_STATUS 0x0E04 177 + #define _PSB_C2B_STATUS_BUSY (1 << 24) 178 + #define _PSB_C2B_STATUS_COMPLETE_SHIFT (0) 179 + #define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0) 180 + 181 + /* 182 + * 2D defs. 183 + */ 184 + 185 + /* 186 + * 2D Slave Port Data : Block Header's Object Type 187 + */ 188 + 189 + #define PSB_2D_CLIP_BH (0x00000000) 190 + #define PSB_2D_PAT_BH (0x10000000) 191 + #define PSB_2D_CTRL_BH (0x20000000) 192 + #define PSB_2D_SRC_OFF_BH (0x30000000) 193 + #define PSB_2D_MASK_OFF_BH (0x40000000) 194 + #define PSB_2D_RESERVED1_BH (0x50000000) 195 + #define PSB_2D_RESERVED2_BH (0x60000000) 196 + #define PSB_2D_FENCE_BH (0x70000000) 197 + #define PSB_2D_BLIT_BH (0x80000000) 198 + #define PSB_2D_SRC_SURF_BH (0x90000000) 199 + #define PSB_2D_DST_SURF_BH (0xA0000000) 200 + #define PSB_2D_PAT_SURF_BH (0xB0000000) 201 + #define PSB_2D_SRC_PAL_BH (0xC0000000) 202 + #define PSB_2D_PAT_PAL_BH (0xD0000000) 203 + #define PSB_2D_MASK_SURF_BH (0xE0000000) 204 + #define PSB_2D_FLUSH_BH (0xF0000000) 205 + 206 + /* 207 + * Clip Definition block (PSB_2D_CLIP_BH) 208 + */ 209 + #define PSB_2D_CLIPCOUNT_MAX (1) 210 + #define PSB_2D_CLIPCOUNT_MASK (0x00000000) 211 + #define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF) 212 + #define PSB_2D_CLIPCOUNT_SHIFT (0) 213 + /* clip rectangle min & max */ 214 + #define PSB_2D_CLIP_XMAX_MASK (0x00FFF000) 215 + #define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF) 216 + #define PSB_2D_CLIP_XMAX_SHIFT (12) 217 + #define PSB_2D_CLIP_XMIN_MASK (0x00000FFF) 218 + #define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000) 219 + #define PSB_2D_CLIP_XMIN_SHIFT (0) 220 + /* clip rectangle offset */ 221 + #define PSB_2D_CLIP_YMAX_MASK (0x00FFF000) 222 + #define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF) 223 + #define PSB_2D_CLIP_YMAX_SHIFT (12) 224 + #define PSB_2D_CLIP_YMIN_MASK (0x00000FFF) 225 + #define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000) 226 + #define PSB_2D_CLIP_YMIN_SHIFT (0) 227 + 228 + /* 229 + * Pattern Control (PSB_2D_PAT_BH) 230 + */ 231 + #define PSB_2D_PAT_HEIGHT_MASK (0x0000001F) 232 + #define PSB_2D_PAT_HEIGHT_SHIFT (0) 233 + #define PSB_2D_PAT_WIDTH_MASK (0x000003E0) 234 + #define PSB_2D_PAT_WIDTH_SHIFT (5) 235 + #define PSB_2D_PAT_YSTART_MASK (0x00007C00) 236 + #define PSB_2D_PAT_YSTART_SHIFT (10) 237 + #define PSB_2D_PAT_XSTART_MASK (0x000F8000) 238 + #define PSB_2D_PAT_XSTART_SHIFT (15) 239 + 240 + /* 241 + * 2D Control block (PSB_2D_CTRL_BH) 242 + */ 243 + /* Present Flags */ 244 + #define PSB_2D_SRCCK_CTRL (0x00000001) 245 + #define PSB_2D_DSTCK_CTRL (0x00000002) 246 + #define PSB_2D_ALPHA_CTRL (0x00000004) 247 + /* Colour Key Colour (SRC/DST)*/ 248 + #define PSB_2D_CK_COL_MASK (0xFFFFFFFF) 249 + #define PSB_2D_CK_COL_CLRMASK (0x00000000) 250 + #define PSB_2D_CK_COL_SHIFT (0) 251 + /* Colour Key Mask (SRC/DST)*/ 252 + #define PSB_2D_CK_MASK_MASK (0xFFFFFFFF) 253 + #define PSB_2D_CK_MASK_CLRMASK (0x00000000) 254 + #define PSB_2D_CK_MASK_SHIFT (0) 255 + /* Alpha Control (Alpha/RGB)*/ 256 + #define PSB_2D_GBLALPHA_MASK (0x000FF000) 257 + #define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF) 258 + #define PSB_2D_GBLALPHA_SHIFT (12) 259 + #define PSB_2D_SRCALPHA_OP_MASK (0x00700000) 260 + #define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF) 261 + #define PSB_2D_SRCALPHA_OP_SHIFT (20) 262 + #define PSB_2D_SRCALPHA_OP_ONE (0x00000000) 263 + #define PSB_2D_SRCALPHA_OP_SRC (0x00100000) 264 + #define PSB_2D_SRCALPHA_OP_DST (0x00200000) 265 + #define PSB_2D_SRCALPHA_OP_SG (0x00300000) 266 + #define PSB_2D_SRCALPHA_OP_DG (0x00400000) 267 + #define PSB_2D_SRCALPHA_OP_GBL (0x00500000) 268 + #define PSB_2D_SRCALPHA_OP_ZERO (0x00600000) 269 + #define PSB_2D_SRCALPHA_INVERT (0x00800000) 270 + #define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF) 271 + #define PSB_2D_DSTALPHA_OP_MASK (0x07000000) 272 + #define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF) 273 + #define PSB_2D_DSTALPHA_OP_SHIFT (24) 274 + #define PSB_2D_DSTALPHA_OP_ONE (0x00000000) 275 + #define PSB_2D_DSTALPHA_OP_SRC (0x01000000) 276 + #define PSB_2D_DSTALPHA_OP_DST (0x02000000) 277 + #define PSB_2D_DSTALPHA_OP_SG (0x03000000) 278 + #define PSB_2D_DSTALPHA_OP_DG (0x04000000) 279 + #define PSB_2D_DSTALPHA_OP_GBL (0x05000000) 280 + #define PSB_2D_DSTALPHA_OP_ZERO (0x06000000) 281 + #define PSB_2D_DSTALPHA_INVERT (0x08000000) 282 + #define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF) 283 + 284 + #define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000) 285 + #define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF) 286 + #define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000) 287 + #define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF) 288 + 289 + /* 290 + *Source Offset (PSB_2D_SRC_OFF_BH) 291 + */ 292 + #define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12) 293 + #define PSB_2D_SRCOFF_XSTART_SHIFT (12) 294 + #define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF) 295 + #define PSB_2D_SRCOFF_YSTART_SHIFT (0) 296 + 297 + /* 298 + * Mask Offset (PSB_2D_MASK_OFF_BH) 299 + */ 300 + #define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12) 301 + #define PSB_2D_MASKOFF_XSTART_SHIFT (12) 302 + #define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF) 303 + #define PSB_2D_MASKOFF_YSTART_SHIFT (0) 304 + 305 + /* 306 + * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored 307 + */ 308 + 309 + /* 310 + *Blit Rectangle (PSB_2D_BLIT_BH) 311 + */ 312 + 313 + #define PSB_2D_ROT_MASK (3<<25) 314 + #define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK) 315 + #define PSB_2D_ROT_NONE (0<<25) 316 + #define PSB_2D_ROT_90DEGS (1<<25) 317 + #define PSB_2D_ROT_180DEGS (2<<25) 318 + #define PSB_2D_ROT_270DEGS (3<<25) 319 + 320 + #define PSB_2D_COPYORDER_MASK (3<<23) 321 + #define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK) 322 + #define PSB_2D_COPYORDER_TL2BR (0<<23) 323 + #define PSB_2D_COPYORDER_BR2TL (1<<23) 324 + #define PSB_2D_COPYORDER_TR2BL (2<<23) 325 + #define PSB_2D_COPYORDER_BL2TR (3<<23) 326 + 327 + #define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF) 328 + #define PSB_2D_DSTCK_DISABLE (0x00000000) 329 + #define PSB_2D_DSTCK_PASS (0x00200000) 330 + #define PSB_2D_DSTCK_REJECT (0x00400000) 331 + 332 + #define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF) 333 + #define PSB_2D_SRCCK_DISABLE (0x00000000) 334 + #define PSB_2D_SRCCK_PASS (0x00080000) 335 + #define PSB_2D_SRCCK_REJECT (0x00100000) 336 + 337 + #define PSB_2D_CLIP_ENABLE (0x00040000) 338 + 339 + #define PSB_2D_ALPHA_ENABLE (0x00020000) 340 + 341 + #define PSB_2D_PAT_CLRMASK (0xFFFEFFFF) 342 + #define PSB_2D_PAT_MASK (0x00010000) 343 + #define PSB_2D_USE_PAT (0x00010000) 344 + #define PSB_2D_USE_FILL (0x00000000) 345 + /* 346 + * Tungsten Graphics note on rop codes: If rop A and rop B are 347 + * identical, the mask surface will not be read and need not be 348 + * set up. 349 + */ 350 + 351 + #define PSB_2D_ROP3B_MASK (0x0000FF00) 352 + #define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF) 353 + #define PSB_2D_ROP3B_SHIFT (8) 354 + /* rop code A */ 355 + #define PSB_2D_ROP3A_MASK (0x000000FF) 356 + #define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00) 357 + #define PSB_2D_ROP3A_SHIFT (0) 358 + 359 + #define PSB_2D_ROP4_MASK (0x0000FFFF) 360 + /* 361 + * DWORD0: (Only pass if Pattern control == Use Fill Colour) 362 + * Fill Colour RGBA8888 363 + */ 364 + #define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF) 365 + #define PSB_2D_FILLCOLOUR_SHIFT (0) 366 + /* 367 + * DWORD1: (Always Present) 368 + * X Start (Dest) 369 + * Y Start (Dest) 370 + */ 371 + #define PSB_2D_DST_XSTART_MASK (0x00FFF000) 372 + #define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF) 373 + #define PSB_2D_DST_XSTART_SHIFT (12) 374 + #define PSB_2D_DST_YSTART_MASK (0x00000FFF) 375 + #define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000) 376 + #define PSB_2D_DST_YSTART_SHIFT (0) 377 + /* 378 + * DWORD2: (Always Present) 379 + * X Size (Dest) 380 + * Y Size (Dest) 381 + */ 382 + #define PSB_2D_DST_XSIZE_MASK (0x00FFF000) 383 + #define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF) 384 + #define PSB_2D_DST_XSIZE_SHIFT (12) 385 + #define PSB_2D_DST_YSIZE_MASK (0x00000FFF) 386 + #define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000) 387 + #define PSB_2D_DST_YSIZE_SHIFT (0) 388 + 389 + /* 390 + * Source Surface (PSB_2D_SRC_SURF_BH) 391 + */ 392 + /* 393 + * WORD 0 394 + */ 395 + 396 + #define PSB_2D_SRC_FORMAT_MASK (0x00078000) 397 + #define PSB_2D_SRC_1_PAL (0x00000000) 398 + #define PSB_2D_SRC_2_PAL (0x00008000) 399 + #define PSB_2D_SRC_4_PAL (0x00010000) 400 + #define PSB_2D_SRC_8_PAL (0x00018000) 401 + #define PSB_2D_SRC_8_ALPHA (0x00020000) 402 + #define PSB_2D_SRC_4_ALPHA (0x00028000) 403 + #define PSB_2D_SRC_332RGB (0x00030000) 404 + #define PSB_2D_SRC_4444ARGB (0x00038000) 405 + #define PSB_2D_SRC_555RGB (0x00040000) 406 + #define PSB_2D_SRC_1555ARGB (0x00048000) 407 + #define PSB_2D_SRC_565RGB (0x00050000) 408 + #define PSB_2D_SRC_0888ARGB (0x00058000) 409 + #define PSB_2D_SRC_8888ARGB (0x00060000) 410 + #define PSB_2D_SRC_8888UYVY (0x00068000) 411 + #define PSB_2D_SRC_RESERVED (0x00070000) 412 + #define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000) 413 + 414 + 415 + #define PSB_2D_SRC_STRIDE_MASK (0x00007FFF) 416 + #define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000) 417 + #define PSB_2D_SRC_STRIDE_SHIFT (0) 418 + /* 419 + * WORD 1 - Base Address 420 + */ 421 + #define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC) 422 + #define PSB_2D_SRC_ADDR_CLRMASK (0x00000003) 423 + #define PSB_2D_SRC_ADDR_SHIFT (2) 424 + #define PSB_2D_SRC_ADDR_ALIGNSHIFT (2) 425 + 426 + /* 427 + * Pattern Surface (PSB_2D_PAT_SURF_BH) 428 + */ 429 + /* 430 + * WORD 0 431 + */ 432 + 433 + #define PSB_2D_PAT_FORMAT_MASK (0x00078000) 434 + #define PSB_2D_PAT_1_PAL (0x00000000) 435 + #define PSB_2D_PAT_2_PAL (0x00008000) 436 + #define PSB_2D_PAT_4_PAL (0x00010000) 437 + #define PSB_2D_PAT_8_PAL (0x00018000) 438 + #define PSB_2D_PAT_8_ALPHA (0x00020000) 439 + #define PSB_2D_PAT_4_ALPHA (0x00028000) 440 + #define PSB_2D_PAT_332RGB (0x00030000) 441 + #define PSB_2D_PAT_4444ARGB (0x00038000) 442 + #define PSB_2D_PAT_555RGB (0x00040000) 443 + #define PSB_2D_PAT_1555ARGB (0x00048000) 444 + #define PSB_2D_PAT_565RGB (0x00050000) 445 + #define PSB_2D_PAT_0888ARGB (0x00058000) 446 + #define PSB_2D_PAT_8888ARGB (0x00060000) 447 + 448 + #define PSB_2D_PAT_STRIDE_MASK (0x00007FFF) 449 + #define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000) 450 + #define PSB_2D_PAT_STRIDE_SHIFT (0) 451 + /* 452 + * WORD 1 - Base Address 453 + */ 454 + #define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC) 455 + #define PSB_2D_PAT_ADDR_CLRMASK (0x00000003) 456 + #define PSB_2D_PAT_ADDR_SHIFT (2) 457 + #define PSB_2D_PAT_ADDR_ALIGNSHIFT (2) 458 + 459 + /* 460 + * Destination Surface (PSB_2D_DST_SURF_BH) 461 + */ 462 + /* 463 + * WORD 0 464 + */ 465 + 466 + #define PSB_2D_DST_FORMAT_MASK (0x00078000) 467 + #define PSB_2D_DST_332RGB (0x00030000) 468 + #define PSB_2D_DST_4444ARGB (0x00038000) 469 + #define PSB_2D_DST_555RGB (0x00040000) 470 + #define PSB_2D_DST_1555ARGB (0x00048000) 471 + #define PSB_2D_DST_565RGB (0x00050000) 472 + #define PSB_2D_DST_0888ARGB (0x00058000) 473 + #define PSB_2D_DST_8888ARGB (0x00060000) 474 + #define PSB_2D_DST_8888AYUV (0x00070000) 475 + 476 + #define PSB_2D_DST_STRIDE_MASK (0x00007FFF) 477 + #define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000) 478 + #define PSB_2D_DST_STRIDE_SHIFT (0) 479 + /* 480 + * WORD 1 - Base Address 481 + */ 482 + #define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC) 483 + #define PSB_2D_DST_ADDR_CLRMASK (0x00000003) 484 + #define PSB_2D_DST_ADDR_SHIFT (2) 485 + #define PSB_2D_DST_ADDR_ALIGNSHIFT (2) 486 + 487 + /* 488 + * Mask Surface (PSB_2D_MASK_SURF_BH) 489 + */ 490 + /* 491 + * WORD 0 492 + */ 493 + #define PSB_2D_MASK_STRIDE_MASK (0x00007FFF) 494 + #define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000) 495 + #define PSB_2D_MASK_STRIDE_SHIFT (0) 496 + /* 497 + * WORD 1 - Base Address 498 + */ 499 + #define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC) 500 + #define PSB_2D_MASK_ADDR_CLRMASK (0x00000003) 501 + #define PSB_2D_MASK_ADDR_SHIFT (2) 502 + #define PSB_2D_MASK_ADDR_ALIGNSHIFT (2) 503 + 504 + /* 505 + * Source Palette (PSB_2D_SRC_PAL_BH) 506 + */ 507 + 508 + #define PSB_2D_SRCPAL_ADDR_SHIFT (0) 509 + #define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007) 510 + #define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8) 511 + #define PSB_2D_SRCPAL_BYTEALIGN (1024) 512 + 513 + /* 514 + * Pattern Palette (PSB_2D_PAT_PAL_BH) 515 + */ 516 + 517 + #define PSB_2D_PATPAL_ADDR_SHIFT (0) 518 + #define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007) 519 + #define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8) 520 + #define PSB_2D_PATPAL_BYTEALIGN (1024) 521 + 522 + /* 523 + * Rop3 Codes (2 LS bytes) 524 + */ 525 + 526 + #define PSB_2D_ROP3_SRCCOPY (0xCCCC) 527 + #define PSB_2D_ROP3_PATCOPY (0xF0F0) 528 + #define PSB_2D_ROP3_WHITENESS (0xFFFF) 529 + #define PSB_2D_ROP3_BLACKNESS (0x0000) 530 + #define PSB_2D_ROP3_SRC (0xCC) 531 + #define PSB_2D_ROP3_PAT (0xF0) 532 + #define PSB_2D_ROP3_DST (0xAA) 533 + 534 + 535 + /* 536 + * Sizes. 537 + */ 538 + 539 + #define PSB_SCENE_HW_COOKIE_SIZE 16 540 + #define PSB_TA_MEM_HW_COOKIE_SIZE 16 541 + 542 + /* 543 + * Scene stuff. 544 + */ 545 + 546 + #define PSB_NUM_HW_SCENES 2 547 + 548 + /* 549 + * Scheduler completion actions. 550 + */ 551 + 552 + #define PSB_RASTER_BLOCK 0 553 + #define PSB_RASTER 1 554 + #define PSB_RETURN 2 555 + #define PSB_TA 3 556 + 557 + 558 + /*Power management*/ 559 + #define PSB_PUNIT_PORT 0x04 560 + #define PSB_OSPMBA 0x78 561 + #define PSB_APMBA 0x7a 562 + #define PSB_APM_CMD 0x0 563 + #define PSB_APM_STS 0x04 564 + #define PSB_PWRGT_VID_ENC_MASK 0x30 565 + #define PSB_PWRGT_VID_DEC_MASK 0xc 566 + #define PSB_PWRGT_GL3_MASK 0xc0 567 + 568 + #define PSB_PM_SSC 0x20 569 + #define PSB_PM_SSS 0x30 570 + #define PSB_PWRGT_DISPLAY_MASK 0xc /*on a different BA than video/gfx*/ 571 + #define MDFLD_PWRGT_DISPLAY_A_CNTR 0x0000000c 572 + #define MDFLD_PWRGT_DISPLAY_B_CNTR 0x0000c000 573 + #define MDFLD_PWRGT_DISPLAY_C_CNTR 0x00030000 574 + #define MDFLD_PWRGT_DISP_MIPI_CNTR 0x000c0000 575 + #define MDFLD_PWRGT_DISPLAY_CNTR (MDFLD_PWRGT_DISPLAY_A_CNTR | MDFLD_PWRGT_DISPLAY_B_CNTR | MDFLD_PWRGT_DISPLAY_C_CNTR | MDFLD_PWRGT_DISP_MIPI_CNTR)// 0x000fc00c 576 + // Display SSS register bits are different in A0 vs. B0 577 + #define PSB_PWRGT_GFX_MASK 0x3 578 + #define MDFLD_PWRGT_DISPLAY_A_STS 0x000000c0 579 + #define MDFLD_PWRGT_DISPLAY_B_STS 0x00000300 580 + #define MDFLD_PWRGT_DISPLAY_C_STS 0x00000c00 581 + #define PSB_PWRGT_GFX_MASK_B0 0xc3 582 + #define MDFLD_PWRGT_DISPLAY_A_STS_B0 0x0000000c 583 + #define MDFLD_PWRGT_DISPLAY_B_STS_B0 0x0000c000 584 + #define MDFLD_PWRGT_DISPLAY_C_STS_B0 0x00030000 585 + #define MDFLD_PWRGT_DISP_MIPI_STS 0x000c0000 586 + #define MDFLD_PWRGT_DISPLAY_STS_A0 (MDFLD_PWRGT_DISPLAY_A_STS | MDFLD_PWRGT_DISPLAY_B_STS | MDFLD_PWRGT_DISPLAY_C_STS | MDFLD_PWRGT_DISP_MIPI_STS)// 0x000fc00c 587 + #define MDFLD_PWRGT_DISPLAY_STS_B0 (MDFLD_PWRGT_DISPLAY_A_STS_B0 | MDFLD_PWRGT_DISPLAY_B_STS_B0 | MDFLD_PWRGT_DISPLAY_C_STS_B0 | MDFLD_PWRGT_DISP_MIPI_STS)// 0x000fc00c 588 + #endif
+90
drivers/staging/gma500/psb_reset.c
··· 1 + /************************************************************************** 2 + * Copyright (c) 2007, Intel Corporation. 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms and conditions of the GNU General Public License, 6 + * version 2, as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope it will be useful, but WITHOUT 9 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 + * more details. 12 + * 13 + * You should have received a copy of the GNU General Public License along with 14 + * this program; if not, write to the Free Software Foundation, Inc., 15 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 + * 17 + * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 18 + **************************************************************************/ 19 + 20 + #include <drm/drmP.h> 21 + #include "psb_drv.h" 22 + #include "psb_reg.h" 23 + #include "psb_intel_reg.h" 24 + #include <linux/spinlock.h> 25 + 26 + static void psb_lid_timer_func(unsigned long data) 27 + { 28 + struct drm_psb_private * dev_priv = (struct drm_psb_private *)data; 29 + struct drm_device *dev = (struct drm_device *)dev_priv->dev; 30 + struct timer_list *lid_timer = &dev_priv->lid_timer; 31 + unsigned long irq_flags; 32 + u32 *lid_state = dev_priv->lid_state; 33 + u32 pp_status; 34 + 35 + if (*lid_state == dev_priv->lid_last_state) 36 + goto lid_timer_schedule; 37 + 38 + if ((*lid_state) & 0x01) { 39 + /*lid state is open*/ 40 + REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON); 41 + do { 42 + pp_status = REG_READ(PP_STATUS); 43 + } while ((pp_status & PP_ON) == 0); 44 + 45 + /*FIXME: should be backlight level before*/ 46 + psb_intel_lvds_set_brightness(dev, 100); 47 + } else { 48 + psb_intel_lvds_set_brightness(dev, 0); 49 + 50 + REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON); 51 + do { 52 + pp_status = REG_READ(PP_STATUS); 53 + } while ((pp_status & PP_ON) == 0); 54 + } 55 + /* printk(KERN_INFO"%s: lid: closed\n", __FUNCTION__); */ 56 + 57 + dev_priv->lid_last_state = *lid_state; 58 + 59 + lid_timer_schedule: 60 + spin_lock_irqsave(&dev_priv->lid_lock, irq_flags); 61 + if (!timer_pending(lid_timer)) { 62 + lid_timer->expires = jiffies + PSB_LID_DELAY; 63 + add_timer(lid_timer); 64 + } 65 + spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags); 66 + } 67 + 68 + void psb_lid_timer_init(struct drm_psb_private *dev_priv) 69 + { 70 + struct timer_list *lid_timer = &dev_priv->lid_timer; 71 + unsigned long irq_flags; 72 + 73 + spin_lock_init(&dev_priv->lid_lock); 74 + spin_lock_irqsave(&dev_priv->lid_lock, irq_flags); 75 + 76 + init_timer(lid_timer); 77 + 78 + lid_timer->data = (unsigned long)dev_priv; 79 + lid_timer->function = psb_lid_timer_func; 80 + lid_timer->expires = jiffies + PSB_LID_DELAY; 81 + 82 + add_timer(lid_timer); 83 + spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags); 84 + } 85 + 86 + void psb_lid_timer_takedown(struct drm_psb_private *dev_priv) 87 + { 88 + del_timer_sync(&dev_priv->lid_timer); 89 + } 90 +
+238
drivers/staging/gma500/psb_sgx.c
··· 1 + /************************************************************************** 2 + * Copyright (c) 2007, Intel Corporation. 3 + * All Rights Reserved. 4 + * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX. USA. 5 + * All Rights Reserved. 6 + * 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms and conditions of the GNU General Public License, 9 + * version 2, as published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope it will be useful, but WITHOUT 12 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 + * more details. 15 + * 16 + * You should have received a copy of the GNU General Public License along with 17 + * this program; if not, write to the Free Software Foundation, Inc., 18 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 19 + * 20 + **************************************************************************/ 21 + 22 + #include <drm/drmP.h> 23 + #include "psb_drv.h" 24 + #include "psb_drm.h" 25 + #include "psb_reg.h" 26 + #include "ttm/ttm_bo_api.h" 27 + #include "ttm/ttm_execbuf_util.h" 28 + #include "psb_ttm_userobj_api.h" 29 + #include "ttm/ttm_placement.h" 30 + #include "psb_sgx.h" 31 + #include "psb_intel_reg.h" 32 + #include "psb_powermgmt.h" 33 + 34 + 35 + static inline int psb_same_page(unsigned long offset, 36 + unsigned long offset2) 37 + { 38 + return (offset & PAGE_MASK) == (offset2 & PAGE_MASK); 39 + } 40 + 41 + static inline unsigned long psb_offset_end(unsigned long offset, 42 + unsigned long end) 43 + { 44 + offset = (offset + PAGE_SIZE) & PAGE_MASK; 45 + return (end < offset) ? end : offset; 46 + } 47 + 48 + struct psb_dstbuf_cache { 49 + unsigned int dst; 50 + struct ttm_buffer_object *dst_buf; 51 + unsigned long dst_offset; 52 + uint32_t *dst_page; 53 + unsigned int dst_page_offset; 54 + struct ttm_bo_kmap_obj dst_kmap; 55 + bool dst_is_iomem; 56 + }; 57 + 58 + struct psb_validate_buffer { 59 + struct ttm_validate_buffer base; 60 + struct psb_validate_req req; 61 + int ret; 62 + struct psb_validate_arg __user *user_val_arg; 63 + uint32_t flags; 64 + uint32_t offset; 65 + int po_correct; 66 + }; 67 + static int 68 + psb_placement_fence_type(struct ttm_buffer_object *bo, 69 + uint64_t set_val_flags, 70 + uint64_t clr_val_flags, 71 + uint32_t new_fence_class, 72 + uint32_t *new_fence_type) 73 + { 74 + int ret; 75 + uint32_t n_fence_type; 76 + /* 77 + uint32_t set_flags = set_val_flags & 0xFFFFFFFF; 78 + uint32_t clr_flags = clr_val_flags & 0xFFFFFFFF; 79 + */ 80 + struct ttm_fence_object *old_fence; 81 + uint32_t old_fence_type; 82 + struct ttm_placement placement; 83 + 84 + if (unlikely 85 + (!(set_val_flags & 86 + (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)))) { 87 + DRM_ERROR 88 + ("GPU access type (read / write) is not indicated.\n"); 89 + return -EINVAL; 90 + } 91 + 92 + /* User space driver doesn't set any TTM placement flags in 93 + set_val_flags or clr_val_flags */ 94 + placement.num_placement = 0;/* FIXME */ 95 + placement.num_busy_placement = 0; 96 + placement.fpfn = 0; 97 + placement.lpfn = 0; 98 + ret = psb_ttm_bo_check_placement(bo, &placement); 99 + if (unlikely(ret != 0)) 100 + return ret; 101 + 102 + switch (new_fence_class) { 103 + default: 104 + n_fence_type = _PSB_FENCE_TYPE_EXE; 105 + } 106 + 107 + *new_fence_type = n_fence_type; 108 + old_fence = (struct ttm_fence_object *) bo->sync_obj; 109 + old_fence_type = (uint32_t) (unsigned long) bo->sync_obj_arg; 110 + 111 + if (old_fence && ((new_fence_class != old_fence->fence_class) || 112 + ((n_fence_type ^ old_fence_type) & 113 + old_fence_type))) { 114 + ret = ttm_bo_wait(bo, 0, 1, 0); 115 + if (unlikely(ret != 0)) 116 + return ret; 117 + } 118 + /* 119 + bo->proposed_flags = (bo->proposed_flags | set_flags) 120 + & ~clr_flags & TTM_PL_MASK_MEMTYPE; 121 + */ 122 + return 0; 123 + } 124 + 125 + int psb_validate_kernel_buffer(struct psb_context *context, 126 + struct ttm_buffer_object *bo, 127 + uint32_t fence_class, 128 + uint64_t set_flags, uint64_t clr_flags) 129 + { 130 + struct psb_validate_buffer *item; 131 + uint32_t cur_fence_type; 132 + int ret; 133 + 134 + if (unlikely(context->used_buffers >= PSB_NUM_VALIDATE_BUFFERS)) { 135 + DRM_ERROR("Out of free validation buffer entries for " 136 + "kernel buffer validation.\n"); 137 + return -ENOMEM; 138 + } 139 + 140 + item = &context->buffers[context->used_buffers]; 141 + item->user_val_arg = NULL; 142 + item->base.reserved = 0; 143 + 144 + ret = ttm_bo_reserve(bo, 1, 0, 1, context->val_seq); 145 + if (unlikely(ret != 0)) 146 + return ret; 147 + 148 + ret = psb_placement_fence_type(bo, set_flags, clr_flags, fence_class, 149 + &cur_fence_type); 150 + if (unlikely(ret != 0)) { 151 + ttm_bo_unreserve(bo); 152 + return ret; 153 + } 154 + 155 + item->base.bo = ttm_bo_reference(bo); 156 + item->base.new_sync_obj_arg = (void *) (unsigned long) cur_fence_type; 157 + item->base.reserved = 1; 158 + 159 + /* Internal locking ??? FIXMEAC */ 160 + list_add_tail(&item->base.head, &context->kern_validate_list); 161 + context->used_buffers++; 162 + /* 163 + ret = ttm_bo_validate(bo, 1, 0, 0); 164 + if (unlikely(ret != 0)) 165 + goto out_unlock; 166 + */ 167 + item->offset = bo->offset; 168 + item->flags = bo->mem.placement; 169 + context->fence_types |= cur_fence_type; 170 + 171 + return ret; 172 + } 173 + 174 + void psb_fence_or_sync(struct drm_file *file_priv, 175 + uint32_t engine, 176 + uint32_t fence_types, 177 + uint32_t fence_flags, 178 + struct list_head *list, 179 + struct psb_ttm_fence_rep *fence_arg, 180 + struct ttm_fence_object **fence_p) 181 + { 182 + struct drm_device *dev = file_priv->minor->dev; 183 + struct drm_psb_private *dev_priv = psb_priv(dev); 184 + struct ttm_fence_device *fdev = &dev_priv->fdev; 185 + int ret; 186 + struct ttm_fence_object *fence; 187 + struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile; 188 + uint32_t handle; 189 + 190 + ret = ttm_fence_user_create(fdev, tfile, 191 + engine, fence_types, 192 + TTM_FENCE_FLAG_EMIT, &fence, &handle); 193 + if (ret) { 194 + 195 + /* 196 + * Fence creation failed. 197 + * Fall back to synchronous operation and idle the engine. 198 + */ 199 + 200 + if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) { 201 + 202 + /* 203 + * Communicate to user-space that 204 + * fence creation has failed and that 205 + * the engine is idle. 206 + */ 207 + 208 + fence_arg->handle = ~0; 209 + fence_arg->error = ret; 210 + } 211 + 212 + ttm_eu_backoff_reservation(list); 213 + if (fence_p) 214 + *fence_p = NULL; 215 + return; 216 + } 217 + 218 + ttm_eu_fence_buffer_objects(list, fence); 219 + if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) { 220 + struct ttm_fence_info info = ttm_fence_get_info(fence); 221 + fence_arg->handle = handle; 222 + fence_arg->fence_class = ttm_fence_class(fence); 223 + fence_arg->fence_type = ttm_fence_types(fence); 224 + fence_arg->signaled_types = info.signaled_types; 225 + fence_arg->error = 0; 226 + } else { 227 + ret = 228 + ttm_ref_object_base_unref(tfile, handle, 229 + ttm_fence_type); 230 + BUG_ON(ret); 231 + } 232 + 233 + if (fence_p) 234 + *fence_p = fence; 235 + else if (fence) 236 + ttm_fence_object_unref(&fence); 237 + } 238 +
+32
drivers/staging/gma500/psb_sgx.h
··· 1 + /* 2 + * Copyright (c) 2008, Intel Corporation 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms and conditions of the GNU General Public License, 6 + * version 2, as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope it will be useful, but WITHOUT 9 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 + * more details. 12 + * 13 + * You should have received a copy of the GNU General Public License along with 14 + * this program; if not, write to the Free Software Foundation, Inc., 15 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 + * 17 + * Authors: 18 + * Eric Anholt <eric@anholt.net> 19 + * 20 + **/ 21 + #ifndef _PSB_SGX_H_ 22 + #define _PSB_SGX_H_ 23 + 24 + extern int psb_submit_video_cmdbuf(struct drm_device *dev, 25 + struct ttm_buffer_object *cmd_buffer, 26 + unsigned long cmd_offset, 27 + unsigned long cmd_size, 28 + struct ttm_fence_object *fence); 29 + 30 + extern int drm_idle_check_interval; 31 + 32 + #endif
+605
drivers/staging/gma500/psb_ttm_fence.c
··· 1 + /************************************************************************** 2 + * 3 + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA 4 + * All Rights Reserved. 5 + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA 6 + * All Rights Reserved. 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms and conditions of the GNU General Public License, 10 + * version 2, as published by the Free Software Foundation. 11 + * 12 + * This program is distributed in the hope it will be useful, but WITHOUT 13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 + * more details. 16 + * 17 + * You should have received a copy of the GNU General Public License along with 18 + * this program; if not, write to the Free Software Foundation, Inc., 19 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 20 + * 21 + **************************************************************************/ 22 + /* 23 + * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 24 + */ 25 + 26 + #include "psb_ttm_fence_api.h" 27 + #include "psb_ttm_fence_driver.h" 28 + #include <linux/wait.h> 29 + #include <linux/sched.h> 30 + 31 + #include <drm/drmP.h> 32 + 33 + /* 34 + * Simple implementation for now. 35 + */ 36 + 37 + static void ttm_fence_lockup(struct ttm_fence_object *fence, uint32_t mask) 38 + { 39 + struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); 40 + 41 + printk(KERN_ERR "GPU lockup dectected on engine %u " 42 + "fence type 0x%08x\n", 43 + (unsigned int)fence->fence_class, (unsigned int)mask); 44 + /* 45 + * Give engines some time to idle? 46 + */ 47 + 48 + write_lock(&fc->lock); 49 + ttm_fence_handler(fence->fdev, fence->fence_class, 50 + fence->sequence, mask, -EBUSY); 51 + write_unlock(&fc->lock); 52 + } 53 + 54 + /* 55 + * Convenience function to be called by fence::wait methods that 56 + * need polling. 57 + */ 58 + 59 + int ttm_fence_wait_polling(struct ttm_fence_object *fence, bool lazy, 60 + bool interruptible, uint32_t mask) 61 + { 62 + struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); 63 + const struct ttm_fence_driver *driver = ttm_fence_driver(fence); 64 + uint32_t count = 0; 65 + int ret; 66 + unsigned long end_jiffies = fence->timeout_jiffies; 67 + 68 + DECLARE_WAITQUEUE(entry, current); 69 + add_wait_queue(&fc->fence_queue, &entry); 70 + 71 + ret = 0; 72 + 73 + for (;;) { 74 + __set_current_state((interruptible) ? 75 + TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); 76 + if (ttm_fence_object_signaled(fence, mask)) 77 + break; 78 + if (time_after_eq(jiffies, end_jiffies)) { 79 + if (driver->lockup) 80 + driver->lockup(fence, mask); 81 + else 82 + ttm_fence_lockup(fence, mask); 83 + continue; 84 + } 85 + if (lazy) 86 + schedule_timeout(1); 87 + else if ((++count & 0x0F) == 0) { 88 + __set_current_state(TASK_RUNNING); 89 + schedule(); 90 + __set_current_state((interruptible) ? 91 + TASK_INTERRUPTIBLE : 92 + TASK_UNINTERRUPTIBLE); 93 + } 94 + if (interruptible && signal_pending(current)) { 95 + ret = -ERESTART; 96 + break; 97 + } 98 + } 99 + __set_current_state(TASK_RUNNING); 100 + remove_wait_queue(&fc->fence_queue, &entry); 101 + return ret; 102 + } 103 + 104 + /* 105 + * Typically called by the IRQ handler. 106 + */ 107 + 108 + void ttm_fence_handler(struct ttm_fence_device *fdev, uint32_t fence_class, 109 + uint32_t sequence, uint32_t type, uint32_t error) 110 + { 111 + int wake = 0; 112 + uint32_t diff; 113 + uint32_t relevant_type; 114 + uint32_t new_type; 115 + struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class]; 116 + const struct ttm_fence_driver *driver = ttm_fence_driver_from_dev(fdev); 117 + struct list_head *head; 118 + struct ttm_fence_object *fence, *next; 119 + bool found = false; 120 + 121 + if (list_empty(&fc->ring)) 122 + return; 123 + 124 + list_for_each_entry(fence, &fc->ring, ring) { 125 + diff = (sequence - fence->sequence) & fc->sequence_mask; 126 + if (diff > fc->wrap_diff) { 127 + found = true; 128 + break; 129 + } 130 + } 131 + 132 + fc->waiting_types &= ~type; 133 + head = (found) ? &fence->ring : &fc->ring; 134 + 135 + list_for_each_entry_safe_reverse(fence, next, head, ring) { 136 + if (&fence->ring == &fc->ring) 137 + break; 138 + 139 + DRM_DEBUG("Fence 0x%08lx, sequence 0x%08x, type 0x%08x\n", 140 + (unsigned long)fence, fence->sequence, 141 + fence->fence_type); 142 + 143 + if (error) { 144 + fence->info.error = error; 145 + fence->info.signaled_types = fence->fence_type; 146 + list_del_init(&fence->ring); 147 + wake = 1; 148 + break; 149 + } 150 + 151 + relevant_type = type & fence->fence_type; 152 + new_type = (fence->info.signaled_types | relevant_type) ^ 153 + fence->info.signaled_types; 154 + 155 + if (new_type) { 156 + fence->info.signaled_types |= new_type; 157 + DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n", 158 + (unsigned long)fence, 159 + fence->info.signaled_types); 160 + 161 + if (unlikely(driver->signaled)) 162 + driver->signaled(fence); 163 + 164 + if (driver->needed_flush) 165 + fc->pending_flush |= 166 + driver->needed_flush(fence); 167 + 168 + if (new_type & fence->waiting_types) 169 + wake = 1; 170 + } 171 + 172 + fc->waiting_types |= 173 + fence->waiting_types & ~fence->info.signaled_types; 174 + 175 + if (!(fence->fence_type & ~fence->info.signaled_types)) { 176 + DRM_DEBUG("Fence completely signaled 0x%08lx\n", 177 + (unsigned long)fence); 178 + list_del_init(&fence->ring); 179 + } 180 + } 181 + 182 + /* 183 + * Reinstate lost waiting types. 184 + */ 185 + 186 + if ((fc->waiting_types & type) != type) { 187 + head = head->prev; 188 + list_for_each_entry(fence, head, ring) { 189 + if (&fence->ring == &fc->ring) 190 + break; 191 + diff = 192 + (fc->highest_waiting_sequence - 193 + fence->sequence) & fc->sequence_mask; 194 + if (diff > fc->wrap_diff) 195 + break; 196 + 197 + fc->waiting_types |= 198 + fence->waiting_types & ~fence->info.signaled_types; 199 + } 200 + } 201 + 202 + if (wake) 203 + wake_up_all(&fc->fence_queue); 204 + } 205 + 206 + static void ttm_fence_unring(struct ttm_fence_object *fence) 207 + { 208 + struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); 209 + unsigned long irq_flags; 210 + 211 + write_lock_irqsave(&fc->lock, irq_flags); 212 + list_del_init(&fence->ring); 213 + write_unlock_irqrestore(&fc->lock, irq_flags); 214 + } 215 + 216 + bool ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask) 217 + { 218 + unsigned long flags; 219 + bool signaled; 220 + const struct ttm_fence_driver *driver = ttm_fence_driver(fence); 221 + struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); 222 + 223 + mask &= fence->fence_type; 224 + read_lock_irqsave(&fc->lock, flags); 225 + signaled = (mask & fence->info.signaled_types) == mask; 226 + read_unlock_irqrestore(&fc->lock, flags); 227 + if (!signaled && driver->poll) { 228 + write_lock_irqsave(&fc->lock, flags); 229 + driver->poll(fence->fdev, fence->fence_class, mask); 230 + signaled = (mask & fence->info.signaled_types) == mask; 231 + write_unlock_irqrestore(&fc->lock, flags); 232 + } 233 + return signaled; 234 + } 235 + 236 + int ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t type) 237 + { 238 + const struct ttm_fence_driver *driver = ttm_fence_driver(fence); 239 + struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); 240 + unsigned long irq_flags; 241 + uint32_t saved_pending_flush; 242 + uint32_t diff; 243 + bool call_flush; 244 + 245 + if (type & ~fence->fence_type) { 246 + DRM_ERROR("Flush trying to extend fence type, " 247 + "0x%x, 0x%x\n", type, fence->fence_type); 248 + return -EINVAL; 249 + } 250 + 251 + write_lock_irqsave(&fc->lock, irq_flags); 252 + fence->waiting_types |= type; 253 + fc->waiting_types |= fence->waiting_types; 254 + diff = (fence->sequence - fc->highest_waiting_sequence) & 255 + fc->sequence_mask; 256 + 257 + if (diff < fc->wrap_diff) 258 + fc->highest_waiting_sequence = fence->sequence; 259 + 260 + /* 261 + * fence->waiting_types has changed. Determine whether 262 + * we need to initiate some kind of flush as a result of this. 263 + */ 264 + 265 + saved_pending_flush = fc->pending_flush; 266 + if (driver->needed_flush) 267 + fc->pending_flush |= driver->needed_flush(fence); 268 + 269 + if (driver->poll) 270 + driver->poll(fence->fdev, fence->fence_class, 271 + fence->waiting_types); 272 + 273 + call_flush = (fc->pending_flush != 0); 274 + write_unlock_irqrestore(&fc->lock, irq_flags); 275 + 276 + if (call_flush && driver->flush) 277 + driver->flush(fence->fdev, fence->fence_class); 278 + 279 + return 0; 280 + } 281 + 282 + /* 283 + * Make sure old fence objects are signaled before their fence sequences are 284 + * wrapped around and reused. 285 + */ 286 + 287 + void ttm_fence_flush_old(struct ttm_fence_device *fdev, 288 + uint32_t fence_class, uint32_t sequence) 289 + { 290 + struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class]; 291 + struct ttm_fence_object *fence; 292 + unsigned long irq_flags; 293 + const struct ttm_fence_driver *driver = fdev->driver; 294 + bool call_flush; 295 + 296 + uint32_t diff; 297 + 298 + write_lock_irqsave(&fc->lock, irq_flags); 299 + 300 + list_for_each_entry_reverse(fence, &fc->ring, ring) { 301 + diff = (sequence - fence->sequence) & fc->sequence_mask; 302 + if (diff <= fc->flush_diff) 303 + break; 304 + 305 + fence->waiting_types = fence->fence_type; 306 + fc->waiting_types |= fence->fence_type; 307 + 308 + if (driver->needed_flush) 309 + fc->pending_flush |= driver->needed_flush(fence); 310 + } 311 + 312 + if (driver->poll) 313 + driver->poll(fdev, fence_class, fc->waiting_types); 314 + 315 + call_flush = (fc->pending_flush != 0); 316 + write_unlock_irqrestore(&fc->lock, irq_flags); 317 + 318 + if (call_flush && driver->flush) 319 + driver->flush(fdev, fence->fence_class); 320 + 321 + /* 322 + * FIXME: Shold we implement a wait here for really old fences? 323 + */ 324 + 325 + } 326 + 327 + int ttm_fence_object_wait(struct ttm_fence_object *fence, 328 + bool lazy, bool interruptible, uint32_t mask) 329 + { 330 + const struct ttm_fence_driver *driver = ttm_fence_driver(fence); 331 + struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); 332 + int ret = 0; 333 + unsigned long timeout; 334 + unsigned long cur_jiffies; 335 + unsigned long to_jiffies; 336 + 337 + if (mask & ~fence->fence_type) { 338 + DRM_ERROR("Wait trying to extend fence type" 339 + " 0x%08x 0x%08x\n", mask, fence->fence_type); 340 + BUG(); 341 + return -EINVAL; 342 + } 343 + 344 + if (driver->wait) 345 + return driver->wait(fence, lazy, interruptible, mask); 346 + 347 + ttm_fence_object_flush(fence, mask); 348 + retry: 349 + if (!driver->has_irq || 350 + driver->has_irq(fence->fdev, fence->fence_class, mask)) { 351 + 352 + cur_jiffies = jiffies; 353 + to_jiffies = fence->timeout_jiffies; 354 + 355 + timeout = (time_after(to_jiffies, cur_jiffies)) ? 356 + to_jiffies - cur_jiffies : 1; 357 + 358 + if (interruptible) 359 + ret = wait_event_interruptible_timeout 360 + (fc->fence_queue, 361 + ttm_fence_object_signaled(fence, mask), timeout); 362 + else 363 + ret = wait_event_timeout 364 + (fc->fence_queue, 365 + ttm_fence_object_signaled(fence, mask), timeout); 366 + 367 + if (unlikely(ret == -ERESTARTSYS)) 368 + return -ERESTART; 369 + 370 + if (unlikely(ret == 0)) { 371 + if (driver->lockup) 372 + driver->lockup(fence, mask); 373 + else 374 + ttm_fence_lockup(fence, mask); 375 + goto retry; 376 + } 377 + 378 + return 0; 379 + } 380 + 381 + return ttm_fence_wait_polling(fence, lazy, interruptible, mask); 382 + } 383 + 384 + int ttm_fence_object_emit(struct ttm_fence_object *fence, uint32_t fence_flags, 385 + uint32_t fence_class, uint32_t type) 386 + { 387 + const struct ttm_fence_driver *driver = ttm_fence_driver(fence); 388 + struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); 389 + unsigned long flags; 390 + uint32_t sequence; 391 + unsigned long timeout; 392 + int ret; 393 + 394 + ttm_fence_unring(fence); 395 + ret = driver->emit(fence->fdev, 396 + fence_class, fence_flags, &sequence, &timeout); 397 + if (ret) 398 + return ret; 399 + 400 + write_lock_irqsave(&fc->lock, flags); 401 + fence->fence_class = fence_class; 402 + fence->fence_type = type; 403 + fence->waiting_types = 0; 404 + fence->info.signaled_types = 0; 405 + fence->info.error = 0; 406 + fence->sequence = sequence; 407 + fence->timeout_jiffies = timeout; 408 + if (list_empty(&fc->ring)) 409 + fc->highest_waiting_sequence = sequence - 1; 410 + list_add_tail(&fence->ring, &fc->ring); 411 + fc->latest_queued_sequence = sequence; 412 + write_unlock_irqrestore(&fc->lock, flags); 413 + return 0; 414 + } 415 + 416 + int ttm_fence_object_init(struct ttm_fence_device *fdev, 417 + uint32_t fence_class, 418 + uint32_t type, 419 + uint32_t create_flags, 420 + void (*destroy) (struct ttm_fence_object *), 421 + struct ttm_fence_object *fence) 422 + { 423 + int ret = 0; 424 + 425 + kref_init(&fence->kref); 426 + fence->fence_class = fence_class; 427 + fence->fence_type = type; 428 + fence->info.signaled_types = 0; 429 + fence->waiting_types = 0; 430 + fence->sequence = 0; 431 + fence->info.error = 0; 432 + fence->fdev = fdev; 433 + fence->destroy = destroy; 434 + INIT_LIST_HEAD(&fence->ring); 435 + atomic_inc(&fdev->count); 436 + 437 + if (create_flags & TTM_FENCE_FLAG_EMIT) { 438 + ret = ttm_fence_object_emit(fence, create_flags, 439 + fence->fence_class, type); 440 + } 441 + 442 + return ret; 443 + } 444 + 445 + int ttm_fence_object_create(struct ttm_fence_device *fdev, 446 + uint32_t fence_class, 447 + uint32_t type, 448 + uint32_t create_flags, 449 + struct ttm_fence_object **c_fence) 450 + { 451 + struct ttm_fence_object *fence; 452 + int ret; 453 + 454 + ret = ttm_mem_global_alloc(fdev->mem_glob, 455 + sizeof(*fence), 456 + false, 457 + false); 458 + if (unlikely(ret != 0)) { 459 + printk(KERN_ERR "Out of memory creating fence object\n"); 460 + return ret; 461 + } 462 + 463 + fence = kmalloc(sizeof(*fence), GFP_KERNEL); 464 + if (!fence) { 465 + printk(KERN_ERR "Out of memory creating fence object\n"); 466 + ttm_mem_global_free(fdev->mem_glob, sizeof(*fence)); 467 + return -ENOMEM; 468 + } 469 + 470 + ret = ttm_fence_object_init(fdev, fence_class, type, 471 + create_flags, NULL, fence); 472 + if (ret) { 473 + ttm_fence_object_unref(&fence); 474 + return ret; 475 + } 476 + *c_fence = fence; 477 + 478 + return 0; 479 + } 480 + 481 + static void ttm_fence_object_destroy(struct kref *kref) 482 + { 483 + struct ttm_fence_object *fence = 484 + container_of(kref, struct ttm_fence_object, kref); 485 + struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); 486 + unsigned long irq_flags; 487 + 488 + write_lock_irqsave(&fc->lock, irq_flags); 489 + list_del_init(&fence->ring); 490 + write_unlock_irqrestore(&fc->lock, irq_flags); 491 + 492 + atomic_dec(&fence->fdev->count); 493 + if (fence->destroy) 494 + fence->destroy(fence); 495 + else { 496 + ttm_mem_global_free(fence->fdev->mem_glob, 497 + sizeof(*fence)); 498 + kfree(fence); 499 + } 500 + } 501 + 502 + void ttm_fence_device_release(struct ttm_fence_device *fdev) 503 + { 504 + kfree(fdev->fence_class); 505 + } 506 + 507 + int 508 + ttm_fence_device_init(int num_classes, 509 + struct ttm_mem_global *mem_glob, 510 + struct ttm_fence_device *fdev, 511 + const struct ttm_fence_class_init *init, 512 + bool replicate_init, 513 + const struct ttm_fence_driver *driver) 514 + { 515 + struct ttm_fence_class_manager *fc; 516 + const struct ttm_fence_class_init *fci; 517 + int i; 518 + 519 + fdev->mem_glob = mem_glob; 520 + fdev->fence_class = kzalloc(num_classes * 521 + sizeof(*fdev->fence_class), GFP_KERNEL); 522 + 523 + if (unlikely(!fdev->fence_class)) 524 + return -ENOMEM; 525 + 526 + fdev->num_classes = num_classes; 527 + atomic_set(&fdev->count, 0); 528 + fdev->driver = driver; 529 + 530 + for (i = 0; i < fdev->num_classes; ++i) { 531 + fc = &fdev->fence_class[i]; 532 + fci = &init[(replicate_init) ? 0 : i]; 533 + 534 + fc->wrap_diff = fci->wrap_diff; 535 + fc->flush_diff = fci->flush_diff; 536 + fc->sequence_mask = fci->sequence_mask; 537 + 538 + rwlock_init(&fc->lock); 539 + INIT_LIST_HEAD(&fc->ring); 540 + init_waitqueue_head(&fc->fence_queue); 541 + } 542 + 543 + return 0; 544 + } 545 + 546 + struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence) 547 + { 548 + struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); 549 + struct ttm_fence_info tmp; 550 + unsigned long irq_flags; 551 + 552 + read_lock_irqsave(&fc->lock, irq_flags); 553 + tmp = fence->info; 554 + read_unlock_irqrestore(&fc->lock, irq_flags); 555 + 556 + return tmp; 557 + } 558 + 559 + void ttm_fence_object_unref(struct ttm_fence_object **p_fence) 560 + { 561 + struct ttm_fence_object *fence = *p_fence; 562 + 563 + *p_fence = NULL; 564 + (void)kref_put(&fence->kref, &ttm_fence_object_destroy); 565 + } 566 + 567 + /* 568 + * Placement / BO sync object glue. 569 + */ 570 + 571 + bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg) 572 + { 573 + struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj; 574 + uint32_t fence_types = (uint32_t) (unsigned long)sync_arg; 575 + 576 + return ttm_fence_object_signaled(fence, fence_types); 577 + } 578 + 579 + int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg, 580 + bool lazy, bool interruptible) 581 + { 582 + struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj; 583 + uint32_t fence_types = (uint32_t) (unsigned long)sync_arg; 584 + 585 + return ttm_fence_object_wait(fence, lazy, interruptible, fence_types); 586 + } 587 + 588 + int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg) 589 + { 590 + struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj; 591 + uint32_t fence_types = (uint32_t) (unsigned long)sync_arg; 592 + 593 + return ttm_fence_object_flush(fence, fence_types); 594 + } 595 + 596 + void ttm_fence_sync_obj_unref(void **sync_obj) 597 + { 598 + ttm_fence_object_unref((struct ttm_fence_object **)sync_obj); 599 + } 600 + 601 + void *ttm_fence_sync_obj_ref(void *sync_obj) 602 + { 603 + return (void *) 604 + ttm_fence_object_ref((struct ttm_fence_object *)sync_obj); 605 + }
+272
drivers/staging/gma500/psb_ttm_fence_api.h
··· 1 + /************************************************************************** 2 + * 3 + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA 4 + * All Rights Reserved. 5 + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA 6 + * All Rights Reserved. 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms and conditions of the GNU General Public License, 10 + * version 2, as published by the Free Software Foundation. 11 + * 12 + * This program is distributed in the hope it will be useful, but WITHOUT 13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 + * more details. 16 + * 17 + * You should have received a copy of the GNU General Public License along with 18 + * this program; if not, write to the Free Software Foundation, Inc., 19 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 20 + * 21 + **************************************************************************/ 22 + /* 23 + * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 24 + */ 25 + #ifndef _TTM_FENCE_API_H_ 26 + #define _TTM_FENCE_API_H_ 27 + 28 + #include <linux/list.h> 29 + #include <linux/kref.h> 30 + 31 + #define TTM_FENCE_FLAG_EMIT (1 << 0) 32 + #define TTM_FENCE_TYPE_EXE (1 << 0) 33 + 34 + struct ttm_fence_device; 35 + 36 + /** 37 + * struct ttm_fence_info 38 + * 39 + * @fence_class: The fence class. 40 + * @fence_type: Bitfield indicating types for this fence. 41 + * @signaled_types: Bitfield indicating which types are signaled. 42 + * @error: Last error reported from the device. 43 + * 44 + * Used as output from the ttm_fence_get_info 45 + */ 46 + 47 + struct ttm_fence_info { 48 + uint32_t signaled_types; 49 + uint32_t error; 50 + }; 51 + 52 + /** 53 + * struct ttm_fence_object 54 + * 55 + * @fdev: Pointer to the fence device struct. 56 + * @kref: Holds the reference count of this fence object. 57 + * @ring: List head used for the circular list of not-completely 58 + * signaled fences. 59 + * @info: Data for fast retrieval using the ttm_fence_get_info() 60 + * function. 61 + * @timeout_jiffies: Absolute jiffies value indicating when this fence 62 + * object times out and, if waited on, calls ttm_fence_lockup 63 + * to check for and resolve a GPU lockup. 64 + * @sequence: Fence sequence number. 65 + * @waiting_types: Types currently waited on. 66 + * @destroy: Called to free the fence object, when its refcount has 67 + * reached zero. If NULL, kfree is used. 68 + * 69 + * This struct is provided in the driver interface so that drivers can 70 + * derive from it and create their own fence implementation. All members 71 + * are private to the fence implementation and the fence driver callbacks. 72 + * Otherwise a driver may access the derived object using container_of(). 73 + */ 74 + 75 + struct ttm_fence_object { 76 + struct ttm_fence_device *fdev; 77 + struct kref kref; 78 + uint32_t fence_class; 79 + uint32_t fence_type; 80 + 81 + /* 82 + * The below fields are protected by the fence class 83 + * manager spinlock. 84 + */ 85 + 86 + struct list_head ring; 87 + struct ttm_fence_info info; 88 + unsigned long timeout_jiffies; 89 + uint32_t sequence; 90 + uint32_t waiting_types; 91 + void (*destroy) (struct ttm_fence_object *); 92 + }; 93 + 94 + /** 95 + * ttm_fence_object_init 96 + * 97 + * @fdev: Pointer to a struct ttm_fence_device. 98 + * @fence_class: Fence class for this fence. 99 + * @type: Fence type for this fence. 100 + * @create_flags: Flags indicating varios actions at init time. At this point 101 + * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to 102 + * the command stream. 103 + * @destroy: Destroy function. If NULL, kfree() is used. 104 + * @fence: The struct ttm_fence_object to initialize. 105 + * 106 + * Initialize a pre-allocated fence object. This function, together with the 107 + * destroy function makes it possible to derive driver-specific fence objects. 108 + */ 109 + 110 + extern int 111 + ttm_fence_object_init(struct ttm_fence_device *fdev, 112 + uint32_t fence_class, 113 + uint32_t type, 114 + uint32_t create_flags, 115 + void (*destroy) (struct ttm_fence_object *fence), 116 + struct ttm_fence_object *fence); 117 + 118 + /** 119 + * ttm_fence_object_create 120 + * 121 + * @fdev: Pointer to a struct ttm_fence_device. 122 + * @fence_class: Fence class for this fence. 123 + * @type: Fence type for this fence. 124 + * @create_flags: Flags indicating varios actions at init time. At this point 125 + * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to 126 + * the command stream. 127 + * @c_fence: On successful termination, *(@c_fence) will point to the created 128 + * fence object. 129 + * 130 + * Create and initialize a struct ttm_fence_object. The destroy function will 131 + * be set to kfree(). 132 + */ 133 + 134 + extern int 135 + ttm_fence_object_create(struct ttm_fence_device *fdev, 136 + uint32_t fence_class, 137 + uint32_t type, 138 + uint32_t create_flags, 139 + struct ttm_fence_object **c_fence); 140 + 141 + /** 142 + * ttm_fence_object_wait 143 + * 144 + * @fence: The fence object to wait on. 145 + * @lazy: Allow sleeps to reduce the cpu-usage if polling. 146 + * @interruptible: Sleep interruptible when waiting. 147 + * @type_mask: Wait for the given type_mask to signal. 148 + * 149 + * Wait for a fence to signal the given type_mask. The function will 150 + * perform a fence_flush using type_mask. (See ttm_fence_object_flush). 151 + * 152 + * Returns 153 + * -ERESTART if interrupted by a signal. 154 + * May return driver-specific error codes if timed-out. 155 + */ 156 + 157 + extern int 158 + ttm_fence_object_wait(struct ttm_fence_object *fence, 159 + bool lazy, bool interruptible, uint32_t type_mask); 160 + 161 + /** 162 + * ttm_fence_object_flush 163 + * 164 + * @fence: The fence object to flush. 165 + * @flush_mask: Fence types to flush. 166 + * 167 + * Make sure that the given fence eventually signals the 168 + * types indicated by @flush_mask. Note that this may or may not 169 + * map to a CPU or GPU flush. 170 + */ 171 + 172 + extern int 173 + ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t flush_mask); 174 + 175 + /** 176 + * ttm_fence_get_info 177 + * 178 + * @fence: The fence object. 179 + * 180 + * Copy the info block from the fence while holding relevant locks. 181 + */ 182 + 183 + struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence); 184 + 185 + /** 186 + * ttm_fence_object_ref 187 + * 188 + * @fence: The fence object. 189 + * 190 + * Return a ref-counted pointer to the fence object indicated by @fence. 191 + */ 192 + 193 + static inline struct ttm_fence_object *ttm_fence_object_ref(struct 194 + ttm_fence_object 195 + *fence) 196 + { 197 + kref_get(&fence->kref); 198 + return fence; 199 + } 200 + 201 + /** 202 + * ttm_fence_object_unref 203 + * 204 + * @p_fence: Pointer to a ref-counted pinter to a struct ttm_fence_object. 205 + * 206 + * Unreference the fence object pointed to by *(@p_fence), clearing 207 + * *(p_fence). 208 + */ 209 + 210 + extern void ttm_fence_object_unref(struct ttm_fence_object **p_fence); 211 + 212 + /** 213 + * ttm_fence_object_signaled 214 + * 215 + * @fence: Pointer to the struct ttm_fence_object. 216 + * @mask: Type mask to check whether signaled. 217 + * 218 + * This function checks (without waiting) whether the fence object 219 + * pointed to by @fence has signaled the types indicated by @mask, 220 + * and returns 1 if true, 0 if false. This function does NOT perform 221 + * an implicit fence flush. 222 + */ 223 + 224 + extern bool 225 + ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask); 226 + 227 + /** 228 + * ttm_fence_class 229 + * 230 + * @fence: Pointer to the struct ttm_fence_object. 231 + * 232 + * Convenience function that returns the fence class of a 233 + * struct ttm_fence_object. 234 + */ 235 + 236 + static inline uint32_t ttm_fence_class(const struct ttm_fence_object *fence) 237 + { 238 + return fence->fence_class; 239 + } 240 + 241 + /** 242 + * ttm_fence_types 243 + * 244 + * @fence: Pointer to the struct ttm_fence_object. 245 + * 246 + * Convenience function that returns the fence types of a 247 + * struct ttm_fence_object. 248 + */ 249 + 250 + static inline uint32_t ttm_fence_types(const struct ttm_fence_object *fence) 251 + { 252 + return fence->fence_type; 253 + } 254 + 255 + /* 256 + * The functions below are wrappers to the above functions, with 257 + * similar names but with sync_obj omitted. These wrappers are intended 258 + * to be plugged directly into the buffer object driver's sync object 259 + * API, if the driver chooses to use ttm_fence_objects as buffer object 260 + * sync objects. In the prototypes below, a sync_obj is cast to a 261 + * struct ttm_fence_object, whereas a sync_arg is cast to an 262 + * uint32_t representing a fence_type argument. 263 + */ 264 + 265 + extern bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg); 266 + extern int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg, 267 + bool lazy, bool interruptible); 268 + extern int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg); 269 + extern void ttm_fence_sync_obj_unref(void **sync_obj); 270 + extern void *ttm_fence_sync_obj_ref(void *sync_obj); 271 + 272 + #endif
+302
drivers/staging/gma500/psb_ttm_fence_driver.h
··· 1 + /************************************************************************** 2 + * 3 + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA 4 + * All Rights Reserved. 5 + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA 6 + * All Rights Reserved. 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms and conditions of the GNU General Public License, 10 + * version 2, as published by the Free Software Foundation. 11 + * 12 + * This program is distributed in the hope it will be useful, but WITHOUT 13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 + * more details. 16 + * 17 + * You should have received a copy of the GNU General Public License along with 18 + * this program; if not, write to the Free Software Foundation, Inc., 19 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 20 + * 21 + **************************************************************************/ 22 + /* 23 + * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 24 + */ 25 + #ifndef _TTM_FENCE_DRIVER_H_ 26 + #define _TTM_FENCE_DRIVER_H_ 27 + 28 + #include <linux/kref.h> 29 + #include <linux/spinlock.h> 30 + #include <linux/wait.h> 31 + #include "psb_ttm_fence_api.h" 32 + #include "ttm/ttm_memory.h" 33 + 34 + /** @file ttm_fence_driver.h 35 + * 36 + * Definitions needed for a driver implementing the 37 + * ttm_fence subsystem. 38 + */ 39 + 40 + /** 41 + * struct ttm_fence_class_manager: 42 + * 43 + * @wrap_diff: Sequence difference to catch 32-bit wrapping. 44 + * if (seqa - seqb) > @wrap_diff, then seqa < seqb. 45 + * @flush_diff: Sequence difference to trigger fence flush. 46 + * if (cur_seq - seqa) > @flush_diff, then consider fence object with 47 + * seqa as old an needing a flush. 48 + * @sequence_mask: Mask of valid bits in a fence sequence. 49 + * @lock: Lock protecting this struct as well as fence objects 50 + * associated with this struct. 51 + * @ring: Circular sequence-ordered list of fence objects. 52 + * @pending_flush: Fence types currently needing a flush. 53 + * @waiting_types: Fence types that are currently waited for. 54 + * @fence_queue: Queue of waiters on fences belonging to this fence class. 55 + * @highest_waiting_sequence: Sequence number of the fence with highest 56 + * sequence number and that is waited for. 57 + * @latest_queued_sequence: Sequence number of the fence latest queued 58 + * on the ring. 59 + */ 60 + 61 + struct ttm_fence_class_manager { 62 + 63 + /* 64 + * Unprotected constant members. 65 + */ 66 + 67 + uint32_t wrap_diff; 68 + uint32_t flush_diff; 69 + uint32_t sequence_mask; 70 + 71 + /* 72 + * The rwlock protects this structure as well as 73 + * the data in all fence objects belonging to this 74 + * class. This should be OK as most fence objects are 75 + * only read from once they're created. 76 + */ 77 + 78 + rwlock_t lock; 79 + struct list_head ring; 80 + uint32_t pending_flush; 81 + uint32_t waiting_types; 82 + wait_queue_head_t fence_queue; 83 + uint32_t highest_waiting_sequence; 84 + uint32_t latest_queued_sequence; 85 + }; 86 + 87 + /** 88 + * struct ttm_fence_device 89 + * 90 + * @fence_class: Array of fence class managers. 91 + * @num_classes: Array dimension of @fence_class. 92 + * @count: Current number of fence objects for statistics. 93 + * @driver: Driver struct. 94 + * 95 + * Provided in the driver interface so that the driver can derive 96 + * from this struct for its driver_private, and accordingly 97 + * access the driver_private from the fence driver callbacks. 98 + * 99 + * All members except "count" are initialized at creation and 100 + * never touched after that. No protection needed. 101 + * 102 + * This struct is private to the fence implementation and to the fence 103 + * driver callbacks, and may otherwise be used by drivers only to 104 + * obtain the derived device_private object using container_of(). 105 + */ 106 + 107 + struct ttm_fence_device { 108 + struct ttm_mem_global *mem_glob; 109 + struct ttm_fence_class_manager *fence_class; 110 + uint32_t num_classes; 111 + atomic_t count; 112 + const struct ttm_fence_driver *driver; 113 + }; 114 + 115 + /** 116 + * struct ttm_fence_class_init 117 + * 118 + * @wrap_diff: Fence sequence number wrap indicator. If 119 + * (sequence1 - sequence2) > @wrap_diff, then sequence1 is 120 + * considered to be older than sequence2. 121 + * @flush_diff: Fence sequence number flush indicator. 122 + * If a non-completely-signaled fence has a fence sequence number 123 + * sequence1 and (sequence1 - current_emit_sequence) > @flush_diff, 124 + * the fence is considered too old and it will be flushed upon the 125 + * next call of ttm_fence_flush_old(), to make sure no fences with 126 + * stale sequence numbers remains unsignaled. @flush_diff should 127 + * be sufficiently less than @wrap_diff. 128 + * @sequence_mask: Mask with valid bits of the fence sequence 129 + * number set to 1. 130 + * 131 + * This struct is used as input to ttm_fence_device_init. 132 + */ 133 + 134 + struct ttm_fence_class_init { 135 + uint32_t wrap_diff; 136 + uint32_t flush_diff; 137 + uint32_t sequence_mask; 138 + }; 139 + 140 + /** 141 + * struct ttm_fence_driver 142 + * 143 + * @has_irq: Called by a potential waiter. Should return 1 if a 144 + * fence object with indicated parameters is expected to signal 145 + * automatically, and 0 if the fence implementation needs to 146 + * repeatedly call @poll to make it signal. 147 + * @emit: Make sure a fence with the given parameters is 148 + * present in the indicated command stream. Return its sequence number 149 + * in "breadcrumb". 150 + * @poll: Check and report sequences of the given "fence_class" 151 + * that have signaled "types" 152 + * @flush: Make sure that the types indicated by the bitfield 153 + * ttm_fence_class_manager::pending_flush will eventually 154 + * signal. These bits have been put together using the 155 + * result from the needed_flush function described below. 156 + * @needed_flush: Given the fence_class and fence_types indicated by 157 + * "fence", and the last received fence sequence of this 158 + * fence class, indicate what types need a fence flush to 159 + * signal. Return as a bitfield. 160 + * @wait: Set to non-NULL if the driver wants to override the fence 161 + * wait implementation. Return 0 on success, -EBUSY on failure, 162 + * and -ERESTART if interruptible and a signal is pending. 163 + * @signaled: Driver callback that is called whenever a 164 + * ttm_fence_object::signaled_types has changed status. 165 + * This function is called from atomic context, 166 + * with the ttm_fence_class_manager::lock held in write mode. 167 + * @lockup: Driver callback that is called whenever a wait has exceeded 168 + * the lifetime of a fence object. 169 + * If there is a GPU lockup, 170 + * this function should, if possible, reset the GPU, 171 + * call the ttm_fence_handler with an error status, and 172 + * return. If no lockup was detected, simply extend the 173 + * fence timeout_jiffies and return. The driver might 174 + * want to protect the lockup check with a mutex and cache a 175 + * non-locked-up status for a while to avoid an excessive 176 + * amount of lockup checks from every waiting thread. 177 + */ 178 + 179 + struct ttm_fence_driver { 180 + bool (*has_irq) (struct ttm_fence_device *fdev, 181 + uint32_t fence_class, uint32_t flags); 182 + int (*emit) (struct ttm_fence_device *fdev, 183 + uint32_t fence_class, 184 + uint32_t flags, 185 + uint32_t *breadcrumb, unsigned long *timeout_jiffies); 186 + void (*flush) (struct ttm_fence_device *fdev, uint32_t fence_class); 187 + void (*poll) (struct ttm_fence_device *fdev, 188 + uint32_t fence_class, uint32_t types); 189 + uint32_t(*needed_flush) 190 + (struct ttm_fence_object *fence); 191 + int (*wait) (struct ttm_fence_object *fence, bool lazy, 192 + bool interruptible, uint32_t mask); 193 + void (*signaled) (struct ttm_fence_object *fence); 194 + void (*lockup) (struct ttm_fence_object *fence, uint32_t fence_types); 195 + }; 196 + 197 + /** 198 + * function ttm_fence_device_init 199 + * 200 + * @num_classes: Number of fence classes for this fence implementation. 201 + * @mem_global: Pointer to the global memory accounting info. 202 + * @fdev: Pointer to an uninitialised struct ttm_fence_device. 203 + * @init: Array of initialization info for each fence class. 204 + * @replicate_init: Use the first @init initialization info for all classes. 205 + * @driver: Driver callbacks. 206 + * 207 + * Initialize a struct ttm_fence_driver structure. Returns -ENOMEM if 208 + * out-of-memory. Otherwise returns 0. 209 + */ 210 + extern int 211 + ttm_fence_device_init(int num_classes, 212 + struct ttm_mem_global *mem_glob, 213 + struct ttm_fence_device *fdev, 214 + const struct ttm_fence_class_init *init, 215 + bool replicate_init, 216 + const struct ttm_fence_driver *driver); 217 + 218 + /** 219 + * function ttm_fence_device_release 220 + * 221 + * @fdev: Pointer to the fence device. 222 + * 223 + * Release all resources held by a fence device. Note that before 224 + * this function is called, the caller must have made sure all fence 225 + * objects belonging to this fence device are completely signaled. 226 + */ 227 + 228 + extern void ttm_fence_device_release(struct ttm_fence_device *fdev); 229 + 230 + /** 231 + * ttm_fence_handler - the fence handler. 232 + * 233 + * @fdev: Pointer to the fence device. 234 + * @fence_class: Fence class that signals. 235 + * @sequence: Signaled sequence. 236 + * @type: Types that signal. 237 + * @error: Error from the engine. 238 + * 239 + * This function signals all fences with a sequence previous to the 240 + * @sequence argument, and belonging to @fence_class. The signaled fence 241 + * types are provided in @type. If error is non-zero, the error member 242 + * of the fence with sequence = @sequence is set to @error. This value 243 + * may be reported back to user-space, indicating, for example an illegal 244 + * 3D command or illegal mpeg data. 245 + * 246 + * This function is typically called from the driver::poll method when the 247 + * command sequence preceding the fence marker has executed. It should be 248 + * called with the ttm_fence_class_manager::lock held in write mode and 249 + * may be called from interrupt context. 250 + */ 251 + 252 + extern void 253 + ttm_fence_handler(struct ttm_fence_device *fdev, 254 + uint32_t fence_class, 255 + uint32_t sequence, uint32_t type, uint32_t error); 256 + 257 + /** 258 + * ttm_fence_driver_from_dev 259 + * 260 + * @fdev: The ttm fence device. 261 + * 262 + * Returns a pointer to the fence driver struct. 263 + */ 264 + 265 + static inline const struct ttm_fence_driver *ttm_fence_driver_from_dev( 266 + struct ttm_fence_device *fdev) 267 + { 268 + return fdev->driver; 269 + } 270 + 271 + /** 272 + * ttm_fence_driver 273 + * 274 + * @fence: Pointer to a ttm fence object. 275 + * 276 + * Returns a pointer to the fence driver struct. 277 + */ 278 + 279 + static inline const struct ttm_fence_driver *ttm_fence_driver(struct 280 + ttm_fence_object 281 + *fence) 282 + { 283 + return ttm_fence_driver_from_dev(fence->fdev); 284 + } 285 + 286 + /** 287 + * ttm_fence_fc 288 + * 289 + * @fence: Pointer to a ttm fence object. 290 + * 291 + * Returns a pointer to the struct ttm_fence_class_manager for the 292 + * fence class of @fence. 293 + */ 294 + 295 + static inline struct ttm_fence_class_manager *ttm_fence_fc(struct 296 + ttm_fence_object 297 + *fence) 298 + { 299 + return &fence->fdev->fence_class[fence->fence_class]; 300 + } 301 + 302 + #endif
+237
drivers/staging/gma500/psb_ttm_fence_user.c
··· 1 + /************************************************************************** 2 + * 3 + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA 4 + * All Rights Reserved. 5 + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA 6 + * All Rights Reserved. 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms and conditions of the GNU General Public License, 10 + * version 2, as published by the Free Software Foundation. 11 + * 12 + * This program is distributed in the hope it will be useful, but WITHOUT 13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 + * more details. 16 + * 17 + * You should have received a copy of the GNU General Public License along with 18 + * this program; if not, write to the Free Software Foundation, Inc., 19 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 20 + * 21 + **************************************************************************/ 22 + /* 23 + * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 24 + */ 25 + 26 + #include <drm/drmP.h> 27 + #include "psb_ttm_fence_user.h" 28 + #include "ttm/ttm_object.h" 29 + #include "psb_ttm_fence_driver.h" 30 + #include "psb_ttm_userobj_api.h" 31 + 32 + /** 33 + * struct ttm_fence_user_object 34 + * 35 + * @base: The base object used for user-space visibility and refcounting. 36 + * 37 + * @fence: The fence object itself. 38 + * 39 + */ 40 + 41 + struct ttm_fence_user_object { 42 + struct ttm_base_object base; 43 + struct ttm_fence_object fence; 44 + }; 45 + 46 + static struct ttm_fence_user_object *ttm_fence_user_object_lookup( 47 + struct ttm_object_file *tfile, 48 + uint32_t handle) 49 + { 50 + struct ttm_base_object *base; 51 + 52 + base = ttm_base_object_lookup(tfile, handle); 53 + if (unlikely(base == NULL)) { 54 + printk(KERN_ERR "Invalid fence handle 0x%08lx\n", 55 + (unsigned long)handle); 56 + return NULL; 57 + } 58 + 59 + if (unlikely(base->object_type != ttm_fence_type)) { 60 + ttm_base_object_unref(&base); 61 + printk(KERN_ERR "Invalid fence handle 0x%08lx\n", 62 + (unsigned long)handle); 63 + return NULL; 64 + } 65 + 66 + return container_of(base, struct ttm_fence_user_object, base); 67 + } 68 + 69 + /* 70 + * The fence object destructor. 71 + */ 72 + 73 + static void ttm_fence_user_destroy(struct ttm_fence_object *fence) 74 + { 75 + struct ttm_fence_user_object *ufence = 76 + container_of(fence, struct ttm_fence_user_object, fence); 77 + 78 + ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*ufence)); 79 + kfree(ufence); 80 + } 81 + 82 + /* 83 + * The base object destructor. We basically unly unreference the 84 + * attached fence object. 85 + */ 86 + 87 + static void ttm_fence_user_release(struct ttm_base_object **p_base) 88 + { 89 + struct ttm_fence_user_object *ufence; 90 + struct ttm_base_object *base = *p_base; 91 + struct ttm_fence_object *fence; 92 + 93 + *p_base = NULL; 94 + 95 + if (unlikely(base == NULL)) 96 + return; 97 + 98 + ufence = container_of(base, struct ttm_fence_user_object, base); 99 + fence = &ufence->fence; 100 + ttm_fence_object_unref(&fence); 101 + } 102 + 103 + int 104 + ttm_fence_user_create(struct ttm_fence_device *fdev, 105 + struct ttm_object_file *tfile, 106 + uint32_t fence_class, 107 + uint32_t fence_types, 108 + uint32_t create_flags, 109 + struct ttm_fence_object **fence, 110 + uint32_t *user_handle) 111 + { 112 + int ret; 113 + struct ttm_fence_object *tmp; 114 + struct ttm_fence_user_object *ufence; 115 + 116 + ret = ttm_mem_global_alloc(fdev->mem_glob, 117 + sizeof(*ufence), 118 + false, 119 + false); 120 + if (unlikely(ret != 0)) 121 + return -ENOMEM; 122 + 123 + ufence = kmalloc(sizeof(*ufence), GFP_KERNEL); 124 + if (unlikely(ufence == NULL)) { 125 + ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence)); 126 + return -ENOMEM; 127 + } 128 + 129 + ret = ttm_fence_object_init(fdev, 130 + fence_class, 131 + fence_types, create_flags, 132 + &ttm_fence_user_destroy, &ufence->fence); 133 + 134 + if (unlikely(ret != 0)) 135 + goto out_err0; 136 + 137 + /* 138 + * One fence ref is held by the fence ptr we return. 139 + * The other one by the base object. Need to up the 140 + * fence refcount before we publish this object to 141 + * user-space. 142 + */ 143 + 144 + tmp = ttm_fence_object_ref(&ufence->fence); 145 + ret = ttm_base_object_init(tfile, &ufence->base, 146 + false, ttm_fence_type, 147 + &ttm_fence_user_release, NULL); 148 + 149 + if (unlikely(ret != 0)) 150 + goto out_err1; 151 + 152 + *fence = &ufence->fence; 153 + *user_handle = ufence->base.hash.key; 154 + 155 + return 0; 156 + out_err1: 157 + ttm_fence_object_unref(&tmp); 158 + tmp = &ufence->fence; 159 + ttm_fence_object_unref(&tmp); 160 + return ret; 161 + out_err0: 162 + ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence)); 163 + kfree(ufence); 164 + return ret; 165 + } 166 + 167 + int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data) 168 + { 169 + int ret; 170 + union ttm_fence_signaled_arg *arg = data; 171 + struct ttm_fence_object *fence; 172 + struct ttm_fence_info info; 173 + struct ttm_fence_user_object *ufence; 174 + struct ttm_base_object *base; 175 + ret = 0; 176 + 177 + ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle); 178 + if (unlikely(ufence == NULL)) 179 + return -EINVAL; 180 + 181 + fence = &ufence->fence; 182 + 183 + if (arg->req.flush) { 184 + ret = ttm_fence_object_flush(fence, arg->req.fence_type); 185 + if (unlikely(ret != 0)) 186 + goto out; 187 + } 188 + 189 + info = ttm_fence_get_info(fence); 190 + arg->rep.signaled_types = info.signaled_types; 191 + arg->rep.fence_error = info.error; 192 + 193 + out: 194 + base = &ufence->base; 195 + ttm_base_object_unref(&base); 196 + return ret; 197 + } 198 + 199 + int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data) 200 + { 201 + int ret; 202 + union ttm_fence_finish_arg *arg = data; 203 + struct ttm_fence_user_object *ufence; 204 + struct ttm_base_object *base; 205 + struct ttm_fence_object *fence; 206 + ret = 0; 207 + 208 + ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle); 209 + if (unlikely(ufence == NULL)) 210 + return -EINVAL; 211 + 212 + fence = &ufence->fence; 213 + 214 + ret = ttm_fence_object_wait(fence, 215 + arg->req.mode & TTM_FENCE_FINISH_MODE_LAZY, 216 + true, arg->req.fence_type); 217 + if (likely(ret == 0)) { 218 + struct ttm_fence_info info = ttm_fence_get_info(fence); 219 + 220 + arg->rep.signaled_types = info.signaled_types; 221 + arg->rep.fence_error = info.error; 222 + } 223 + 224 + base = &ufence->base; 225 + ttm_base_object_unref(&base); 226 + 227 + return ret; 228 + } 229 + 230 + int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data) 231 + { 232 + struct ttm_fence_unref_arg *arg = data; 233 + int ret = 0; 234 + 235 + ret = ttm_ref_object_base_unref(tfile, arg->handle, ttm_fence_type); 236 + return ret; 237 + }
+140
drivers/staging/gma500/psb_ttm_fence_user.h
··· 1 + /************************************************************************** 2 + * 3 + * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA 4 + * All Rights Reserved. 5 + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA 6 + * All Rights Reserved. 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms and conditions of the GNU General Public License, 10 + * version 2, as published by the Free Software Foundation. 11 + * 12 + * This program is distributed in the hope it will be useful, but WITHOUT 13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 + * more details. 16 + * 17 + * You should have received a copy of the GNU General Public License along with 18 + * this program; if not, write to the Free Software Foundation, Inc., 19 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 20 + * 21 + **************************************************************************/ 22 + /* 23 + * Authors 24 + * Thomas Hellström <thomas-at-tungstengraphics-dot-com> 25 + */ 26 + 27 + #ifndef TTM_FENCE_USER_H 28 + #define TTM_FENCE_USER_H 29 + 30 + #if !defined(__KERNEL__) && !defined(_KERNEL) 31 + #include <stdint.h> 32 + #endif 33 + 34 + #define TTM_FENCE_MAJOR 0 35 + #define TTM_FENCE_MINOR 1 36 + #define TTM_FENCE_PL 0 37 + #define TTM_FENCE_DATE "080819" 38 + 39 + /** 40 + * struct ttm_fence_signaled_req 41 + * 42 + * @handle: Handle to the fence object. Input. 43 + * 44 + * @fence_type: Fence types we want to flush. Input. 45 + * 46 + * @flush: Boolean. Flush the indicated fence_types. Input. 47 + * 48 + * Argument to the TTM_FENCE_SIGNALED ioctl. 49 + */ 50 + 51 + struct ttm_fence_signaled_req { 52 + uint32_t handle; 53 + uint32_t fence_type; 54 + int32_t flush; 55 + uint32_t pad64; 56 + }; 57 + 58 + /** 59 + * struct ttm_fence_rep 60 + * 61 + * @signaled_types: Fence type that has signaled. 62 + * 63 + * @fence_error: Command execution error. 64 + * Hardware errors that are consequences of the execution 65 + * of the command stream preceding the fence are reported 66 + * here. 67 + * 68 + * Output argument to the TTM_FENCE_SIGNALED and 69 + * TTM_FENCE_FINISH ioctls. 70 + */ 71 + 72 + struct ttm_fence_rep { 73 + uint32_t signaled_types; 74 + uint32_t fence_error; 75 + }; 76 + 77 + union ttm_fence_signaled_arg { 78 + struct ttm_fence_signaled_req req; 79 + struct ttm_fence_rep rep; 80 + }; 81 + 82 + /* 83 + * Waiting mode flags for the TTM_FENCE_FINISH ioctl. 84 + * 85 + * TTM_FENCE_FINISH_MODE_LAZY: Allow for sleeps during polling 86 + * wait. 87 + * 88 + * TTM_FENCE_FINISH_MODE_NO_BLOCK: Don't block waiting for GPU, 89 + * but return -EBUSY if the buffer is busy. 90 + */ 91 + 92 + #define TTM_FENCE_FINISH_MODE_LAZY (1 << 0) 93 + #define TTM_FENCE_FINISH_MODE_NO_BLOCK (1 << 1) 94 + 95 + /** 96 + * struct ttm_fence_finish_req 97 + * 98 + * @handle: Handle to the fence object. Input. 99 + * 100 + * @fence_type: Fence types we want to finish. 101 + * 102 + * @mode: Wait mode. 103 + * 104 + * Input to the TTM_FENCE_FINISH ioctl. 105 + */ 106 + 107 + struct ttm_fence_finish_req { 108 + uint32_t handle; 109 + uint32_t fence_type; 110 + uint32_t mode; 111 + uint32_t pad64; 112 + }; 113 + 114 + union ttm_fence_finish_arg { 115 + struct ttm_fence_finish_req req; 116 + struct ttm_fence_rep rep; 117 + }; 118 + 119 + /** 120 + * struct ttm_fence_unref_arg 121 + * 122 + * @handle: Handle to the fence object. 123 + * 124 + * Argument to the TTM_FENCE_UNREF ioctl. 125 + */ 126 + 127 + struct ttm_fence_unref_arg { 128 + uint32_t handle; 129 + uint32_t pad64; 130 + }; 131 + 132 + /* 133 + * Ioctl offsets frome extenstion start. 134 + */ 135 + 136 + #define TTM_FENCE_SIGNALED 0x01 137 + #define TTM_FENCE_FINISH 0x02 138 + #define TTM_FENCE_UNREF 0x03 139 + 140 + #endif
+349
drivers/staging/gma500/psb_ttm_glue.c
··· 1 + /************************************************************************** 2 + * Copyright (c) 2008, Intel Corporation. 3 + * All Rights Reserved. 4 + * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA. 5 + * All Rights Reserved. 6 + * 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms and conditions of the GNU General Public License, 9 + * version 2, as published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope it will be useful, but WITHOUT 12 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 + * more details. 15 + * 16 + * You should have received a copy of the GNU General Public License along with 17 + * this program; if not, write to the Free Software Foundation, Inc., 18 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 19 + * 20 + **************************************************************************/ 21 + 22 + 23 + #include <drm/drmP.h> 24 + #include "psb_drv.h" 25 + #include "psb_ttm_userobj_api.h" 26 + #include <linux/io.h> 27 + 28 + 29 + static struct vm_operations_struct psb_ttm_vm_ops; 30 + 31 + /** 32 + * NOTE: driver_private of drm_file is now a struct psb_file_data struct 33 + * pPriv in struct psb_file_data contains the original psb_fpriv; 34 + */ 35 + int psb_open(struct inode *inode, struct file *filp) 36 + { 37 + struct drm_file *file_priv; 38 + struct drm_psb_private *dev_priv; 39 + struct psb_fpriv *psb_fp; 40 + struct psb_file_data *pvr_file_priv; 41 + int ret; 42 + 43 + DRM_DEBUG("\n"); 44 + 45 + ret = drm_open(inode, filp); 46 + if (unlikely(ret)) 47 + return ret; 48 + 49 + psb_fp = kzalloc(sizeof(*psb_fp), GFP_KERNEL); 50 + 51 + if (unlikely(psb_fp == NULL)) 52 + goto out_err0; 53 + 54 + file_priv = (struct drm_file *) filp->private_data; 55 + dev_priv = psb_priv(file_priv->minor->dev); 56 + 57 + DRM_DEBUG("is_master %d\n", file_priv->is_master ? 1 : 0); 58 + 59 + psb_fp->tfile = ttm_object_file_init(dev_priv->tdev, 60 + PSB_FILE_OBJECT_HASH_ORDER); 61 + if (unlikely(psb_fp->tfile == NULL)) 62 + goto out_err1; 63 + 64 + pvr_file_priv = (struct psb_file_data *)file_priv->driver_priv; 65 + if (!pvr_file_priv) { 66 + DRM_ERROR("drm file private is NULL\n"); 67 + goto out_err1; 68 + } 69 + 70 + pvr_file_priv->priv = psb_fp; 71 + if (unlikely(dev_priv->bdev.dev_mapping == NULL)) 72 + dev_priv->bdev.dev_mapping = dev_priv->dev->dev_mapping; 73 + 74 + return 0; 75 + 76 + out_err1: 77 + kfree(psb_fp); 78 + out_err0: 79 + (void) drm_release(inode, filp); 80 + return ret; 81 + } 82 + 83 + int psb_release(struct inode *inode, struct file *filp) 84 + { 85 + struct drm_file *file_priv; 86 + struct psb_fpriv *psb_fp; 87 + struct drm_psb_private *dev_priv; 88 + int ret; 89 + file_priv = (struct drm_file *) filp->private_data; 90 + psb_fp = psb_fpriv(file_priv); 91 + dev_priv = psb_priv(file_priv->minor->dev); 92 + 93 + ttm_object_file_release(&psb_fp->tfile); 94 + kfree(psb_fp); 95 + 96 + ret = drm_release(inode, filp); 97 + 98 + return ret; 99 + } 100 + 101 + int psb_fence_signaled_ioctl(struct drm_device *dev, void *data, 102 + struct drm_file *file_priv) 103 + { 104 + 105 + return ttm_fence_signaled_ioctl(psb_fpriv(file_priv)->tfile, data); 106 + } 107 + 108 + int psb_fence_finish_ioctl(struct drm_device *dev, void *data, 109 + struct drm_file *file_priv) 110 + { 111 + return ttm_fence_finish_ioctl(psb_fpriv(file_priv)->tfile, data); 112 + } 113 + 114 + int psb_fence_unref_ioctl(struct drm_device *dev, void *data, 115 + struct drm_file *file_priv) 116 + { 117 + return ttm_fence_unref_ioctl(psb_fpriv(file_priv)->tfile, data); 118 + } 119 + 120 + int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data, 121 + struct drm_file *file_priv) 122 + { 123 + return ttm_pl_waitidle_ioctl(psb_fpriv(file_priv)->tfile, data); 124 + } 125 + 126 + int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data, 127 + struct drm_file *file_priv) 128 + { 129 + return ttm_pl_setstatus_ioctl(psb_fpriv(file_priv)->tfile, 130 + &psb_priv(dev)->ttm_lock, data); 131 + 132 + } 133 + 134 + int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data, 135 + struct drm_file *file_priv) 136 + { 137 + return ttm_pl_synccpu_ioctl(psb_fpriv(file_priv)->tfile, data); 138 + } 139 + 140 + int psb_pl_unref_ioctl(struct drm_device *dev, void *data, 141 + struct drm_file *file_priv) 142 + { 143 + return ttm_pl_unref_ioctl(psb_fpriv(file_priv)->tfile, data); 144 + 145 + } 146 + 147 + int psb_pl_reference_ioctl(struct drm_device *dev, void *data, 148 + struct drm_file *file_priv) 149 + { 150 + return ttm_pl_reference_ioctl(psb_fpriv(file_priv)->tfile, data); 151 + 152 + } 153 + 154 + int psb_pl_create_ioctl(struct drm_device *dev, void *data, 155 + struct drm_file *file_priv) 156 + { 157 + struct drm_psb_private *dev_priv = psb_priv(dev); 158 + 159 + return ttm_pl_create_ioctl(psb_fpriv(file_priv)->tfile, 160 + &dev_priv->bdev, &dev_priv->ttm_lock, data); 161 + 162 + } 163 + 164 + int psb_pl_ub_create_ioctl(struct drm_device *dev, void *data, 165 + struct drm_file *file_priv) 166 + { 167 + struct drm_psb_private *dev_priv = psb_priv(dev); 168 + 169 + return ttm_pl_ub_create_ioctl(psb_fpriv(file_priv)->tfile, 170 + &dev_priv->bdev, &dev_priv->ttm_lock, data); 171 + 172 + } 173 + /** 174 + * psb_ttm_fault - Wrapper around the ttm fault method. 175 + * 176 + * @vma: The struct vm_area_struct as in the vm fault() method. 177 + * @vmf: The struct vm_fault as in the vm fault() method. 178 + * 179 + * Since ttm_fault() will reserve buffers while faulting, 180 + * we need to take the ttm read lock around it, as this driver 181 + * relies on the ttm_lock in write mode to exclude all threads from 182 + * reserving and thus validating buffers in aperture- and memory shortage 183 + * situations. 184 + */ 185 + 186 + static int psb_ttm_fault(struct vm_area_struct *vma, 187 + struct vm_fault *vmf) 188 + { 189 + struct ttm_buffer_object *bo = (struct ttm_buffer_object *) 190 + vma->vm_private_data; 191 + struct drm_psb_private *dev_priv = 192 + container_of(bo->bdev, struct drm_psb_private, bdev); 193 + int ret; 194 + 195 + ret = ttm_read_lock(&dev_priv->ttm_lock, true); 196 + if (unlikely(ret != 0)) 197 + return VM_FAULT_NOPAGE; 198 + 199 + ret = dev_priv->ttm_vm_ops->fault(vma, vmf); 200 + 201 + ttm_read_unlock(&dev_priv->ttm_lock); 202 + return ret; 203 + } 204 + 205 + /** 206 + * if vm_pgoff < DRM_PSB_FILE_PAGE_OFFSET call directly to 207 + * PVRMMap 208 + */ 209 + int psb_mmap(struct file *filp, struct vm_area_struct *vma) 210 + { 211 + struct drm_file *file_priv; 212 + struct drm_psb_private *dev_priv; 213 + int ret; 214 + 215 + if (vma->vm_pgoff < DRM_PSB_FILE_PAGE_OFFSET || 216 + vma->vm_pgoff > 2 * DRM_PSB_FILE_PAGE_OFFSET) 217 + #if 0 /* FIXMEAC */ 218 + return PVRMMap(filp, vma); 219 + #else 220 + return -EINVAL; 221 + #endif 222 + 223 + file_priv = (struct drm_file *) filp->private_data; 224 + dev_priv = psb_priv(file_priv->minor->dev); 225 + 226 + ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev); 227 + if (unlikely(ret != 0)) 228 + return ret; 229 + 230 + if (unlikely(dev_priv->ttm_vm_ops == NULL)) { 231 + dev_priv->ttm_vm_ops = (struct vm_operations_struct *) 232 + vma->vm_ops; 233 + psb_ttm_vm_ops = *vma->vm_ops; 234 + psb_ttm_vm_ops.fault = &psb_ttm_fault; 235 + } 236 + 237 + vma->vm_ops = &psb_ttm_vm_ops; 238 + 239 + return 0; 240 + } 241 + /* 242 + ssize_t psb_ttm_write(struct file *filp, const char __user *buf, 243 + size_t count, loff_t *f_pos) 244 + { 245 + struct drm_file *file_priv = (struct drm_file *)filp->private_data; 246 + struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev); 247 + 248 + return ttm_bo_io(&dev_priv->bdev, filp, buf, NULL, count, f_pos, 1); 249 + } 250 + 251 + ssize_t psb_ttm_read(struct file *filp, char __user *buf, 252 + size_t count, loff_t *f_pos) 253 + { 254 + struct drm_file *file_priv = (struct drm_file *)filp->private_data; 255 + struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev); 256 + 257 + return ttm_bo_io(&dev_priv->bdev, filp, NULL, buf, count, f_pos, 1); 258 + } 259 + */ 260 + int psb_verify_access(struct ttm_buffer_object *bo, 261 + struct file *filp) 262 + { 263 + struct drm_file *file_priv = (struct drm_file *)filp->private_data; 264 + 265 + if (capable(CAP_SYS_ADMIN)) 266 + return 0; 267 + 268 + if (unlikely(!file_priv->authenticated)) 269 + return -EPERM; 270 + 271 + return ttm_pl_verify_access(bo, psb_fpriv(file_priv)->tfile); 272 + } 273 + 274 + static int psb_ttm_mem_global_init(struct drm_global_reference *ref) 275 + { 276 + return ttm_mem_global_init(ref->object); 277 + } 278 + 279 + static void psb_ttm_mem_global_release(struct drm_global_reference *ref) 280 + { 281 + ttm_mem_global_release(ref->object); 282 + } 283 + 284 + int psb_ttm_global_init(struct drm_psb_private *dev_priv) 285 + { 286 + struct drm_global_reference *global_ref; 287 + int ret; 288 + 289 + global_ref = &dev_priv->mem_global_ref; 290 + global_ref->global_type = DRM_GLOBAL_TTM_MEM; 291 + global_ref->size = sizeof(struct ttm_mem_global); 292 + global_ref->init = &psb_ttm_mem_global_init; 293 + global_ref->release = &psb_ttm_mem_global_release; 294 + 295 + ret = drm_global_item_ref(global_ref); 296 + if (unlikely(ret != 0)) { 297 + DRM_ERROR("Failed referencing a global TTM memory object.\n"); 298 + return ret; 299 + } 300 + 301 + dev_priv->bo_global_ref.mem_glob = dev_priv->mem_global_ref.object; 302 + global_ref = &dev_priv->bo_global_ref.ref; 303 + global_ref->global_type = DRM_GLOBAL_TTM_BO; 304 + global_ref->size = sizeof(struct ttm_bo_global); 305 + global_ref->init = &ttm_bo_global_init; 306 + global_ref->release = &ttm_bo_global_release; 307 + ret = drm_global_item_ref(global_ref); 308 + if (ret != 0) { 309 + DRM_ERROR("Failed setting up TTM BO subsystem.\n"); 310 + drm_global_item_unref(global_ref); 311 + return ret; 312 + } 313 + return 0; 314 + } 315 + 316 + void psb_ttm_global_release(struct drm_psb_private *dev_priv) 317 + { 318 + drm_global_item_unref(&dev_priv->mem_global_ref); 319 + } 320 + 321 + int psb_getpageaddrs_ioctl(struct drm_device *dev, void *data, 322 + struct drm_file *file_priv) 323 + { 324 + struct drm_psb_getpageaddrs_arg *arg = data; 325 + struct ttm_buffer_object *bo; 326 + struct ttm_tt *ttm; 327 + struct page **tt_pages; 328 + unsigned long i, num_pages; 329 + unsigned long *p = arg->page_addrs; 330 + int ret = 0; 331 + 332 + bo = ttm_buffer_object_lookup(psb_fpriv(file_priv)->tfile, 333 + arg->handle); 334 + if (unlikely(bo == NULL)) { 335 + printk(KERN_ERR 336 + "Could not find buffer object for getpageaddrs.\n"); 337 + return -EINVAL; 338 + } 339 + 340 + arg->gtt_offset = bo->offset; 341 + ttm = bo->ttm; 342 + num_pages = ttm->num_pages; 343 + tt_pages = ttm->pages; 344 + 345 + for (i = 0; i < num_pages; i++) 346 + p[i] = (unsigned long)page_to_phys(tt_pages[i]); 347 + 348 + return ret; 349 + }
+628
drivers/staging/gma500/psb_ttm_placement_user.c
··· 1 + /************************************************************************** 2 + * 3 + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA 4 + * All Rights Reserved. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License along with 16 + * this program; if not, write to the Free Software Foundation, Inc., 17 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 + * 19 + **************************************************************************/ 20 + /* 21 + * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 22 + */ 23 + 24 + #include "psb_ttm_placement_user.h" 25 + #include "ttm/ttm_bo_driver.h" 26 + #include "ttm/ttm_object.h" 27 + #include "psb_ttm_userobj_api.h" 28 + #include "ttm/ttm_lock.h" 29 + #include <linux/slab.h> 30 + #include <linux/sched.h> 31 + 32 + struct ttm_bo_user_object { 33 + struct ttm_base_object base; 34 + struct ttm_buffer_object bo; 35 + }; 36 + 37 + static size_t pl_bo_size; 38 + 39 + static uint32_t psb_busy_prios[] = { 40 + TTM_PL_TT, 41 + TTM_PL_PRIV0, /* CI */ 42 + TTM_PL_PRIV2, /* RAR */ 43 + TTM_PL_PRIV1, /* DRM_PSB_MEM_MMU */ 44 + TTM_PL_SYSTEM 45 + }; 46 + 47 + static const struct ttm_placement default_placement = { 48 + 0, 0, 0, NULL, 5, psb_busy_prios 49 + }; 50 + 51 + static size_t ttm_pl_size(struct ttm_bo_device *bdev, unsigned long num_pages) 52 + { 53 + size_t page_array_size = 54 + (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK; 55 + 56 + if (unlikely(pl_bo_size == 0)) { 57 + pl_bo_size = bdev->glob->ttm_bo_extra_size + 58 + ttm_round_pot(sizeof(struct ttm_bo_user_object)); 59 + } 60 + 61 + return bdev->glob->ttm_bo_size + 2 * page_array_size; 62 + } 63 + 64 + static struct ttm_bo_user_object *ttm_bo_user_lookup(struct ttm_object_file 65 + *tfile, uint32_t handle) 66 + { 67 + struct ttm_base_object *base; 68 + 69 + base = ttm_base_object_lookup(tfile, handle); 70 + if (unlikely(base == NULL)) { 71 + printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", 72 + (unsigned long)handle); 73 + return NULL; 74 + } 75 + 76 + if (unlikely(base->object_type != ttm_buffer_type)) { 77 + ttm_base_object_unref(&base); 78 + printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", 79 + (unsigned long)handle); 80 + return NULL; 81 + } 82 + 83 + return container_of(base, struct ttm_bo_user_object, base); 84 + } 85 + 86 + struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file 87 + *tfile, uint32_t handle) 88 + { 89 + struct ttm_bo_user_object *user_bo; 90 + struct ttm_base_object *base; 91 + 92 + user_bo = ttm_bo_user_lookup(tfile, handle); 93 + if (unlikely(user_bo == NULL)) 94 + return NULL; 95 + 96 + (void)ttm_bo_reference(&user_bo->bo); 97 + base = &user_bo->base; 98 + ttm_base_object_unref(&base); 99 + return &user_bo->bo; 100 + } 101 + 102 + static void ttm_bo_user_destroy(struct ttm_buffer_object *bo) 103 + { 104 + struct ttm_bo_user_object *user_bo = 105 + container_of(bo, struct ttm_bo_user_object, bo); 106 + 107 + ttm_mem_global_free(bo->glob->mem_glob, bo->acc_size); 108 + kfree(user_bo); 109 + } 110 + 111 + static void ttm_bo_user_release(struct ttm_base_object **p_base) 112 + { 113 + struct ttm_bo_user_object *user_bo; 114 + struct ttm_base_object *base = *p_base; 115 + struct ttm_buffer_object *bo; 116 + 117 + *p_base = NULL; 118 + 119 + if (unlikely(base == NULL)) 120 + return; 121 + 122 + user_bo = container_of(base, struct ttm_bo_user_object, base); 123 + bo = &user_bo->bo; 124 + ttm_bo_unref(&bo); 125 + } 126 + 127 + static void ttm_bo_user_ref_release(struct ttm_base_object *base, 128 + enum ttm_ref_type ref_type) 129 + { 130 + struct ttm_bo_user_object *user_bo = 131 + container_of(base, struct ttm_bo_user_object, base); 132 + struct ttm_buffer_object *bo = &user_bo->bo; 133 + 134 + switch (ref_type) { 135 + case TTM_REF_SYNCCPU_WRITE: 136 + ttm_bo_synccpu_write_release(bo); 137 + break; 138 + default: 139 + BUG(); 140 + } 141 + } 142 + 143 + static void ttm_pl_fill_rep(struct ttm_buffer_object *bo, 144 + struct ttm_pl_rep *rep) 145 + { 146 + struct ttm_bo_user_object *user_bo = 147 + container_of(bo, struct ttm_bo_user_object, bo); 148 + 149 + rep->gpu_offset = bo->offset; 150 + rep->bo_size = bo->num_pages << PAGE_SHIFT; 151 + rep->map_handle = bo->addr_space_offset; 152 + rep->placement = bo->mem.placement; 153 + rep->handle = user_bo->base.hash.key; 154 + rep->sync_object_arg = (uint32_t) (unsigned long)bo->sync_obj_arg; 155 + } 156 + 157 + /* FIXME Copy from upstream TTM */ 158 + static inline size_t ttm_bo_size(struct ttm_bo_global *glob, 159 + unsigned long num_pages) 160 + { 161 + size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) & 162 + PAGE_MASK; 163 + 164 + return glob->ttm_bo_size + 2 * page_array_size; 165 + } 166 + 167 + /* FIXME Copy from upstream TTM "ttm_bo_create", upstream TTM does not 168 + export this, so copy it here */ 169 + static int ttm_bo_create_private(struct ttm_bo_device *bdev, 170 + unsigned long size, 171 + enum ttm_bo_type type, 172 + struct ttm_placement *placement, 173 + uint32_t page_alignment, 174 + unsigned long buffer_start, 175 + bool interruptible, 176 + struct file *persistant_swap_storage, 177 + struct ttm_buffer_object **p_bo) 178 + { 179 + struct ttm_buffer_object *bo; 180 + struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 181 + int ret; 182 + 183 + size_t acc_size = 184 + ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); 185 + ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 186 + if (unlikely(ret != 0)) 187 + return ret; 188 + 189 + bo = kzalloc(sizeof(*bo), GFP_KERNEL); 190 + 191 + if (unlikely(bo == NULL)) { 192 + ttm_mem_global_free(mem_glob, acc_size); 193 + return -ENOMEM; 194 + } 195 + 196 + ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, 197 + buffer_start, interruptible, 198 + persistant_swap_storage, acc_size, NULL); 199 + if (likely(ret == 0)) 200 + *p_bo = bo; 201 + 202 + return ret; 203 + } 204 + 205 + int psb_ttm_bo_check_placement(struct ttm_buffer_object *bo, 206 + struct ttm_placement *placement) 207 + { 208 + int i; 209 + 210 + for (i = 0; i < placement->num_placement; i++) { 211 + if (!capable(CAP_SYS_ADMIN)) { 212 + if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) { 213 + printk(KERN_ERR TTM_PFX "Need to be root to " 214 + "modify NO_EVICT status.\n"); 215 + return -EINVAL; 216 + } 217 + } 218 + } 219 + for (i = 0; i < placement->num_busy_placement; i++) { 220 + if (!capable(CAP_SYS_ADMIN)) { 221 + if (placement->busy_placement[i] 222 + & TTM_PL_FLAG_NO_EVICT) { 223 + printk(KERN_ERR TTM_PFX "Need to be root to modify NO_EVICT status.\n"); 224 + return -EINVAL; 225 + } 226 + } 227 + } 228 + return 0; 229 + } 230 + 231 + int ttm_buffer_object_create(struct ttm_bo_device *bdev, 232 + unsigned long size, 233 + enum ttm_bo_type type, 234 + uint32_t flags, 235 + uint32_t page_alignment, 236 + unsigned long buffer_start, 237 + bool interruptible, 238 + struct file *persistant_swap_storage, 239 + struct ttm_buffer_object **p_bo) 240 + { 241 + struct ttm_placement placement = default_placement; 242 + int ret; 243 + 244 + if ((flags & TTM_PL_MASK_CACHING) == 0) 245 + flags |= TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; 246 + 247 + placement.num_placement = 1; 248 + placement.placement = &flags; 249 + 250 + ret = ttm_bo_create_private(bdev, 251 + size, 252 + type, 253 + &placement, 254 + page_alignment, 255 + buffer_start, 256 + interruptible, 257 + persistant_swap_storage, 258 + p_bo); 259 + 260 + return ret; 261 + } 262 + 263 + 264 + int ttm_pl_create_ioctl(struct ttm_object_file *tfile, 265 + struct ttm_bo_device *bdev, 266 + struct ttm_lock *lock, void *data) 267 + { 268 + union ttm_pl_create_arg *arg = data; 269 + struct ttm_pl_create_req *req = &arg->req; 270 + struct ttm_pl_rep *rep = &arg->rep; 271 + struct ttm_buffer_object *bo; 272 + struct ttm_buffer_object *tmp; 273 + struct ttm_bo_user_object *user_bo; 274 + uint32_t flags; 275 + int ret = 0; 276 + struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 277 + struct ttm_placement placement = default_placement; 278 + size_t acc_size = 279 + ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT); 280 + ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 281 + if (unlikely(ret != 0)) 282 + return ret; 283 + 284 + flags = req->placement; 285 + user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); 286 + if (unlikely(user_bo == NULL)) { 287 + ttm_mem_global_free(mem_glob, acc_size); 288 + return -ENOMEM; 289 + } 290 + 291 + bo = &user_bo->bo; 292 + ret = ttm_read_lock(lock, true); 293 + if (unlikely(ret != 0)) { 294 + ttm_mem_global_free(mem_glob, acc_size); 295 + kfree(user_bo); 296 + return ret; 297 + } 298 + 299 + placement.num_placement = 1; 300 + placement.placement = &flags; 301 + 302 + if ((flags & TTM_PL_MASK_CACHING) == 0) 303 + flags |= TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; 304 + 305 + ret = ttm_bo_init(bdev, bo, req->size, 306 + ttm_bo_type_device, &placement, 307 + req->page_alignment, 0, true, 308 + NULL, acc_size, &ttm_bo_user_destroy); 309 + ttm_read_unlock(lock); 310 + 311 + /* 312 + * Note that the ttm_buffer_object_init function 313 + * would've called the destroy function on failure!! 314 + */ 315 + 316 + if (unlikely(ret != 0)) 317 + goto out; 318 + 319 + tmp = ttm_bo_reference(bo); 320 + ret = ttm_base_object_init(tfile, &user_bo->base, 321 + flags & TTM_PL_FLAG_SHARED, 322 + ttm_buffer_type, 323 + &ttm_bo_user_release, 324 + &ttm_bo_user_ref_release); 325 + if (unlikely(ret != 0)) 326 + goto out_err; 327 + 328 + ttm_pl_fill_rep(bo, rep); 329 + ttm_bo_unref(&bo); 330 + out: 331 + return 0; 332 + out_err: 333 + ttm_bo_unref(&tmp); 334 + ttm_bo_unref(&bo); 335 + return ret; 336 + } 337 + 338 + int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile, 339 + struct ttm_bo_device *bdev, 340 + struct ttm_lock *lock, void *data) 341 + { 342 + union ttm_pl_create_ub_arg *arg = data; 343 + struct ttm_pl_create_ub_req *req = &arg->req; 344 + struct ttm_pl_rep *rep = &arg->rep; 345 + struct ttm_buffer_object *bo; 346 + struct ttm_buffer_object *tmp; 347 + struct ttm_bo_user_object *user_bo; 348 + uint32_t flags; 349 + int ret = 0; 350 + struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 351 + struct ttm_placement placement = default_placement; 352 + size_t acc_size = 353 + ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT); 354 + ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 355 + if (unlikely(ret != 0)) 356 + return ret; 357 + 358 + flags = req->placement; 359 + user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); 360 + if (unlikely(user_bo == NULL)) { 361 + ttm_mem_global_free(mem_glob, acc_size); 362 + return -ENOMEM; 363 + } 364 + ret = ttm_read_lock(lock, true); 365 + if (unlikely(ret != 0)) { 366 + ttm_mem_global_free(mem_glob, acc_size); 367 + kfree(user_bo); 368 + return ret; 369 + } 370 + bo = &user_bo->bo; 371 + 372 + placement.num_placement = 1; 373 + placement.placement = &flags; 374 + 375 + ret = ttm_bo_init(bdev, 376 + bo, 377 + req->size, 378 + ttm_bo_type_user, 379 + &placement, 380 + req->page_alignment, 381 + req->user_address, 382 + true, 383 + NULL, 384 + acc_size, 385 + &ttm_bo_user_destroy); 386 + 387 + /* 388 + * Note that the ttm_buffer_object_init function 389 + * would've called the destroy function on failure!! 390 + */ 391 + ttm_read_unlock(lock); 392 + if (unlikely(ret != 0)) 393 + goto out; 394 + 395 + tmp = ttm_bo_reference(bo); 396 + ret = ttm_base_object_init(tfile, &user_bo->base, 397 + flags & TTM_PL_FLAG_SHARED, 398 + ttm_buffer_type, 399 + &ttm_bo_user_release, 400 + &ttm_bo_user_ref_release); 401 + if (unlikely(ret != 0)) 402 + goto out_err; 403 + 404 + ttm_pl_fill_rep(bo, rep); 405 + ttm_bo_unref(&bo); 406 + out: 407 + return 0; 408 + out_err: 409 + ttm_bo_unref(&tmp); 410 + ttm_bo_unref(&bo); 411 + return ret; 412 + } 413 + 414 + int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data) 415 + { 416 + union ttm_pl_reference_arg *arg = data; 417 + struct ttm_pl_rep *rep = &arg->rep; 418 + struct ttm_bo_user_object *user_bo; 419 + struct ttm_buffer_object *bo; 420 + struct ttm_base_object *base; 421 + int ret; 422 + 423 + user_bo = ttm_bo_user_lookup(tfile, arg->req.handle); 424 + if (unlikely(user_bo == NULL)) { 425 + printk(KERN_ERR "Could not reference buffer object.\n"); 426 + return -EINVAL; 427 + } 428 + 429 + bo = &user_bo->bo; 430 + ret = ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL); 431 + if (unlikely(ret != 0)) { 432 + printk(KERN_ERR 433 + "Could not add a reference to buffer object.\n"); 434 + goto out; 435 + } 436 + 437 + ttm_pl_fill_rep(bo, rep); 438 + 439 + out: 440 + base = &user_bo->base; 441 + ttm_base_object_unref(&base); 442 + return ret; 443 + } 444 + 445 + int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data) 446 + { 447 + struct ttm_pl_reference_req *arg = data; 448 + 449 + return ttm_ref_object_base_unref(tfile, arg->handle, TTM_REF_USAGE); 450 + } 451 + 452 + int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data) 453 + { 454 + struct ttm_pl_synccpu_arg *arg = data; 455 + struct ttm_bo_user_object *user_bo; 456 + struct ttm_buffer_object *bo; 457 + struct ttm_base_object *base; 458 + bool existed; 459 + int ret; 460 + 461 + switch (arg->op) { 462 + case TTM_PL_SYNCCPU_OP_GRAB: 463 + user_bo = ttm_bo_user_lookup(tfile, arg->handle); 464 + if (unlikely(user_bo == NULL)) { 465 + printk(KERN_ERR 466 + "Could not find buffer object for synccpu.\n"); 467 + return -EINVAL; 468 + } 469 + bo = &user_bo->bo; 470 + base = &user_bo->base; 471 + ret = ttm_bo_synccpu_write_grab(bo, 472 + arg->access_mode & 473 + TTM_PL_SYNCCPU_MODE_NO_BLOCK); 474 + if (unlikely(ret != 0)) { 475 + ttm_base_object_unref(&base); 476 + goto out; 477 + } 478 + ret = ttm_ref_object_add(tfile, &user_bo->base, 479 + TTM_REF_SYNCCPU_WRITE, &existed); 480 + if (existed || ret != 0) 481 + ttm_bo_synccpu_write_release(bo); 482 + ttm_base_object_unref(&base); 483 + break; 484 + case TTM_PL_SYNCCPU_OP_RELEASE: 485 + ret = ttm_ref_object_base_unref(tfile, arg->handle, 486 + TTM_REF_SYNCCPU_WRITE); 487 + break; 488 + default: 489 + ret = -EINVAL; 490 + break; 491 + } 492 + out: 493 + return ret; 494 + } 495 + 496 + int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile, 497 + struct ttm_lock *lock, void *data) 498 + { 499 + union ttm_pl_setstatus_arg *arg = data; 500 + struct ttm_pl_setstatus_req *req = &arg->req; 501 + struct ttm_pl_rep *rep = &arg->rep; 502 + struct ttm_buffer_object *bo; 503 + struct ttm_bo_device *bdev; 504 + struct ttm_placement placement = default_placement; 505 + uint32_t flags[2]; 506 + int ret; 507 + 508 + bo = ttm_buffer_object_lookup(tfile, req->handle); 509 + if (unlikely(bo == NULL)) { 510 + printk(KERN_ERR 511 + "Could not find buffer object for setstatus.\n"); 512 + return -EINVAL; 513 + } 514 + 515 + bdev = bo->bdev; 516 + 517 + ret = ttm_read_lock(lock, true); 518 + if (unlikely(ret != 0)) 519 + goto out_err0; 520 + 521 + ret = ttm_bo_reserve(bo, true, false, false, 0); 522 + if (unlikely(ret != 0)) 523 + goto out_err1; 524 + 525 + ret = ttm_bo_wait_cpu(bo, false); 526 + if (unlikely(ret != 0)) 527 + goto out_err2; 528 + 529 + flags[0] = req->set_placement; 530 + flags[1] = req->clr_placement; 531 + 532 + placement.num_placement = 2; 533 + placement.placement = flags; 534 + 535 + /* Review internal locking ? FIXMEAC */ 536 + ret = psb_ttm_bo_check_placement(bo, &placement); 537 + if (unlikely(ret != 0)) 538 + goto out_err2; 539 + 540 + placement.num_placement = 1; 541 + flags[0] = (req->set_placement | bo->mem.placement) 542 + & ~req->clr_placement; 543 + 544 + ret = ttm_bo_validate(bo, &placement, true, false, false); 545 + if (unlikely(ret != 0)) 546 + goto out_err2; 547 + 548 + ttm_pl_fill_rep(bo, rep); 549 + out_err2: 550 + ttm_bo_unreserve(bo); 551 + out_err1: 552 + ttm_read_unlock(lock); 553 + out_err0: 554 + ttm_bo_unref(&bo); 555 + return ret; 556 + } 557 + 558 + static int psb_ttm_bo_block_reservation(struct ttm_buffer_object *bo, 559 + bool interruptible, bool no_wait) 560 + { 561 + int ret; 562 + 563 + while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { 564 + if (no_wait) 565 + return -EBUSY; 566 + else if (interruptible) { 567 + ret = wait_event_interruptible(bo->event_queue, 568 + atomic_read(&bo->reserved) == 0); 569 + if (unlikely(ret != 0)) 570 + return -ERESTART; 571 + } else { 572 + wait_event(bo->event_queue, 573 + atomic_read(&bo->reserved) == 0); 574 + } 575 + } 576 + return 0; 577 + } 578 + 579 + static void psb_ttm_bo_unblock_reservation(struct ttm_buffer_object *bo) 580 + { 581 + atomic_set(&bo->reserved, 0); 582 + wake_up_all(&bo->event_queue); 583 + } 584 + 585 + int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data) 586 + { 587 + struct ttm_pl_waitidle_arg *arg = data; 588 + struct ttm_buffer_object *bo; 589 + int ret; 590 + 591 + bo = ttm_buffer_object_lookup(tfile, arg->handle); 592 + if (unlikely(bo == NULL)) { 593 + printk(KERN_ERR "Could not find buffer object for waitidle.\n"); 594 + return -EINVAL; 595 + } 596 + 597 + ret = 598 + psb_ttm_bo_block_reservation(bo, true, 599 + arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK); 600 + if (unlikely(ret != 0)) 601 + goto out; 602 + ret = ttm_bo_wait(bo, 603 + arg->mode & TTM_PL_WAITIDLE_MODE_LAZY, 604 + true, arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK); 605 + psb_ttm_bo_unblock_reservation(bo); 606 + out: 607 + ttm_bo_unref(&bo); 608 + return ret; 609 + } 610 + 611 + int ttm_pl_verify_access(struct ttm_buffer_object *bo, 612 + struct ttm_object_file *tfile) 613 + { 614 + struct ttm_bo_user_object *ubo; 615 + 616 + /* 617 + * Check bo subclass. 618 + */ 619 + 620 + if (unlikely(bo->destroy != &ttm_bo_user_destroy)) 621 + return -EPERM; 622 + 623 + ubo = container_of(bo, struct ttm_bo_user_object, bo); 624 + if (likely(ubo->base.shareable || ubo->base.tfile == tfile)) 625 + return 0; 626 + 627 + return -EPERM; 628 + }
+252
drivers/staging/gma500/psb_ttm_placement_user.h
··· 1 + /************************************************************************** 2 + * 3 + * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA 4 + * All Rights Reserved. 5 + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA 6 + * All Rights Reserved. 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms and conditions of the GNU General Public License, 10 + * version 2, as published by the Free Software Foundation. 11 + * 12 + * This program is distributed in the hope it will be useful, but WITHOUT 13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 + * more details. 16 + * 17 + * You should have received a copy of the GNU General Public License along with 18 + * this program; if not, write to the Free Software Foundation, Inc., 19 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 20 + * 21 + **************************************************************************/ 22 + /* 23 + * Authors 24 + * Thomas Hellström <thomas-at-tungstengraphics-dot-com> 25 + */ 26 + 27 + #ifndef _TTM_PLACEMENT_USER_H_ 28 + #define _TTM_PLACEMENT_USER_H_ 29 + 30 + #if !defined(__KERNEL__) && !defined(_KERNEL) 31 + #include <stdint.h> 32 + #else 33 + #include <linux/kernel.h> 34 + #endif 35 + 36 + #include "ttm/ttm_placement.h" 37 + 38 + #define TTM_PLACEMENT_MAJOR 0 39 + #define TTM_PLACEMENT_MINOR 1 40 + #define TTM_PLACEMENT_PL 0 41 + #define TTM_PLACEMENT_DATE "080819" 42 + 43 + /** 44 + * struct ttm_pl_create_req 45 + * 46 + * @size: The buffer object size. 47 + * @placement: Flags that indicate initial acceptable 48 + * placement. 49 + * @page_alignment: Required alignment in pages. 50 + * 51 + * Input to the TTM_BO_CREATE ioctl. 52 + */ 53 + 54 + struct ttm_pl_create_req { 55 + uint64_t size; 56 + uint32_t placement; 57 + uint32_t page_alignment; 58 + }; 59 + 60 + /** 61 + * struct ttm_pl_create_ub_req 62 + * 63 + * @size: The buffer object size. 64 + * @user_address: User-space address of the memory area that 65 + * should be used to back the buffer object cast to 64-bit. 66 + * @placement: Flags that indicate initial acceptable 67 + * placement. 68 + * @page_alignment: Required alignment in pages. 69 + * 70 + * Input to the TTM_BO_CREATE_UB ioctl. 71 + */ 72 + 73 + struct ttm_pl_create_ub_req { 74 + uint64_t size; 75 + uint64_t user_address; 76 + uint32_t placement; 77 + uint32_t page_alignment; 78 + }; 79 + 80 + /** 81 + * struct ttm_pl_rep 82 + * 83 + * @gpu_offset: The current offset into the memory region used. 84 + * This can be used directly by the GPU if there are no 85 + * additional GPU mapping procedures used by the driver. 86 + * 87 + * @bo_size: Actual buffer object size. 88 + * 89 + * @map_handle: Offset into the device address space. 90 + * Used for map, seek, read, write. This will never change 91 + * during the lifetime of an object. 92 + * 93 + * @placement: Flag indicating the placement status of 94 + * the buffer object using the TTM_PL flags above. 95 + * 96 + * @sync_object_arg: Used for user-space synchronization and 97 + * depends on the synchronization model used. If fences are 98 + * used, this is the buffer_object::fence_type_mask 99 + * 100 + * Output from the TTM_PL_CREATE and TTM_PL_REFERENCE, and 101 + * TTM_PL_SETSTATUS ioctls. 102 + */ 103 + 104 + struct ttm_pl_rep { 105 + uint64_t gpu_offset; 106 + uint64_t bo_size; 107 + uint64_t map_handle; 108 + uint32_t placement; 109 + uint32_t handle; 110 + uint32_t sync_object_arg; 111 + uint32_t pad64; 112 + }; 113 + 114 + /** 115 + * struct ttm_pl_setstatus_req 116 + * 117 + * @set_placement: Placement flags to set. 118 + * 119 + * @clr_placement: Placement flags to clear. 120 + * 121 + * @handle: The object handle 122 + * 123 + * Input to the TTM_PL_SETSTATUS ioctl. 124 + */ 125 + 126 + struct ttm_pl_setstatus_req { 127 + uint32_t set_placement; 128 + uint32_t clr_placement; 129 + uint32_t handle; 130 + uint32_t pad64; 131 + }; 132 + 133 + /** 134 + * struct ttm_pl_reference_req 135 + * 136 + * @handle: The object to put a reference on. 137 + * 138 + * Input to the TTM_PL_REFERENCE and the TTM_PL_UNREFERENCE ioctls. 139 + */ 140 + 141 + struct ttm_pl_reference_req { 142 + uint32_t handle; 143 + uint32_t pad64; 144 + }; 145 + 146 + /* 147 + * ACCESS mode flags for SYNCCPU. 148 + * 149 + * TTM_SYNCCPU_MODE_READ will guarantee that the GPU is not 150 + * writing to the buffer. 151 + * 152 + * TTM_SYNCCPU_MODE_WRITE will guarantee that the GPU is not 153 + * accessing the buffer. 154 + * 155 + * TTM_SYNCCPU_MODE_NO_BLOCK makes sure the call does not wait 156 + * for GPU accesses to finish but return -EBUSY. 157 + * 158 + * TTM_SYNCCPU_MODE_TRYCACHED Try to place the buffer in cacheable 159 + * memory while synchronized for CPU. 160 + */ 161 + 162 + #define TTM_PL_SYNCCPU_MODE_READ TTM_ACCESS_READ 163 + #define TTM_PL_SYNCCPU_MODE_WRITE TTM_ACCESS_WRITE 164 + #define TTM_PL_SYNCCPU_MODE_NO_BLOCK (1 << 2) 165 + #define TTM_PL_SYNCCPU_MODE_TRYCACHED (1 << 3) 166 + 167 + /** 168 + * struct ttm_pl_synccpu_arg 169 + * 170 + * @handle: The object to synchronize. 171 + * 172 + * @access_mode: access mode indicated by the 173 + * TTM_SYNCCPU_MODE flags. 174 + * 175 + * @op: indicates whether to grab or release the 176 + * buffer for cpu usage. 177 + * 178 + * Input to the TTM_PL_SYNCCPU ioctl. 179 + */ 180 + 181 + struct ttm_pl_synccpu_arg { 182 + uint32_t handle; 183 + uint32_t access_mode; 184 + enum { 185 + TTM_PL_SYNCCPU_OP_GRAB, 186 + TTM_PL_SYNCCPU_OP_RELEASE 187 + } op; 188 + uint32_t pad64; 189 + }; 190 + 191 + /* 192 + * Waiting mode flags for the TTM_BO_WAITIDLE ioctl. 193 + * 194 + * TTM_WAITIDLE_MODE_LAZY: Allow for sleeps during polling 195 + * wait. 196 + * 197 + * TTM_WAITIDLE_MODE_NO_BLOCK: Don't block waiting for GPU, 198 + * but return -EBUSY if the buffer is busy. 199 + */ 200 + 201 + #define TTM_PL_WAITIDLE_MODE_LAZY (1 << 0) 202 + #define TTM_PL_WAITIDLE_MODE_NO_BLOCK (1 << 1) 203 + 204 + /** 205 + * struct ttm_waitidle_arg 206 + * 207 + * @handle: The object to synchronize. 208 + * 209 + * @mode: wait mode indicated by the 210 + * TTM_SYNCCPU_MODE flags. 211 + * 212 + * Argument to the TTM_BO_WAITIDLE ioctl. 213 + */ 214 + 215 + struct ttm_pl_waitidle_arg { 216 + uint32_t handle; 217 + uint32_t mode; 218 + }; 219 + 220 + union ttm_pl_create_arg { 221 + struct ttm_pl_create_req req; 222 + struct ttm_pl_rep rep; 223 + }; 224 + 225 + union ttm_pl_reference_arg { 226 + struct ttm_pl_reference_req req; 227 + struct ttm_pl_rep rep; 228 + }; 229 + 230 + union ttm_pl_setstatus_arg { 231 + struct ttm_pl_setstatus_req req; 232 + struct ttm_pl_rep rep; 233 + }; 234 + 235 + union ttm_pl_create_ub_arg { 236 + struct ttm_pl_create_ub_req req; 237 + struct ttm_pl_rep rep; 238 + }; 239 + 240 + /* 241 + * Ioctl offsets. 242 + */ 243 + 244 + #define TTM_PL_CREATE 0x00 245 + #define TTM_PL_REFERENCE 0x01 246 + #define TTM_PL_UNREF 0x02 247 + #define TTM_PL_SYNCCPU 0x03 248 + #define TTM_PL_WAITIDLE 0x04 249 + #define TTM_PL_SETSTATUS 0x05 250 + #define TTM_PL_CREATE_UB 0x06 251 + 252 + #endif
+85
drivers/staging/gma500/psb_ttm_userobj_api.h
··· 1 + /************************************************************************** 2 + * 3 + * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA 4 + * All Rights Reserved. 5 + * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA 6 + * All Rights Reserved. 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms and conditions of the GNU General Public License, 10 + * version 2, as published by the Free Software Foundation. 11 + * 12 + * This program is distributed in the hope it will be useful, but WITHOUT 13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 + * more details. 16 + * 17 + * You should have received a copy of the GNU General Public License along with 18 + * this program; if not, write to the Free Software Foundation, Inc., 19 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 20 + * 21 + **************************************************************************/ 22 + /* 23 + * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 24 + */ 25 + 26 + #ifndef _TTM_USEROBJ_API_H_ 27 + #define _TTM_USEROBJ_API_H_ 28 + 29 + #include "psb_ttm_placement_user.h" 30 + #include "psb_ttm_fence_user.h" 31 + #include "ttm/ttm_object.h" 32 + #include "psb_ttm_fence_api.h" 33 + #include "ttm/ttm_bo_api.h" 34 + 35 + struct ttm_lock; 36 + 37 + /* 38 + * User ioctls. 39 + */ 40 + 41 + extern int ttm_pl_create_ioctl(struct ttm_object_file *tfile, 42 + struct ttm_bo_device *bdev, 43 + struct ttm_lock *lock, void *data); 44 + extern int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile, 45 + struct ttm_bo_device *bdev, 46 + struct ttm_lock *lock, void *data); 47 + extern int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data); 48 + extern int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data); 49 + extern int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data); 50 + extern int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile, 51 + struct ttm_lock *lock, void *data); 52 + extern int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data); 53 + extern int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data); 54 + extern int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data); 55 + extern int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data); 56 + 57 + extern int 58 + ttm_fence_user_create(struct ttm_fence_device *fdev, 59 + struct ttm_object_file *tfile, 60 + uint32_t fence_class, 61 + uint32_t fence_types, 62 + uint32_t create_flags, 63 + struct ttm_fence_object **fence, uint32_t * user_handle); 64 + 65 + extern struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file 66 + *tfile, 67 + uint32_t handle); 68 + 69 + extern int 70 + ttm_pl_verify_access(struct ttm_buffer_object *bo, 71 + struct ttm_object_file *tfile); 72 + 73 + extern int ttm_buffer_object_create(struct ttm_bo_device *bdev, 74 + unsigned long size, 75 + enum ttm_bo_type type, 76 + uint32_t flags, 77 + uint32_t page_alignment, 78 + unsigned long buffer_start, 79 + bool interruptible, 80 + struct file *persistant_swap_storage, 81 + struct ttm_buffer_object **p_bo); 82 + 83 + extern int psb_ttm_bo_check_placement(struct ttm_buffer_object *bo, 84 + struct ttm_placement *placement); 85 + #endif