Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux

Pill drm updates part 2 from Dave Airlie:
"This is the follow-up pull, 3 pieces

a) exynos next stuff, was delayed but looks okay to me, one patch in
v4l bits but it was acked by v4l person.
b) UAPI disintegration bits
c) intel fixes - DP fixes, hang fixes, other misc fixes."

* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (52 commits)
drm: exynos: hdmi: remove drm common hdmi platform data struct
drm: exynos: hdmi: add support for exynos5 hdmi
drm: exynos: hdmi: replace is_v13 with version check in hdmi
drm: exynos: hdmi: add support for exynos5 mixer
drm: exynos: hdmi: add support to disable video processor in mixer
drm: exynos: hdmi: add support for platform variants for mixer
drm: exynos: hdmi: add support for exynos5 hdmiphy
drm: exynos: hdmi: add support for exynos5 ddc
drm: exynos: remove drm hdmi platform data struct
drm: exynos: hdmi: turn off HPD interrupt in HDMI chip
drm: exynos: hdmi: use s5p-hdmi platform data
drm: exynos: hdmi: fix interrupt handling
drm: exynos: hdmi: support for platform variants
media: s5p-hdmi: add HPD GPIO to platform data
UAPI: (Scripted) Disintegrate include/drm
drm/i915: Fix GT_MODE default value
drm/i915: don't frob the vblank ts in finish_page_flip
drm/i915: call drm_handle_vblank before finish_page_flip
drm/i915: print warning if vmi915_gem_fault error is not handled
drm/i915: EBUSY status handling added to i915_gem_fault().
...

+2269 -1428
+2 -1
drivers/gpu/drm/drm_edid.c
··· 395 395 * \param adapter : i2c device adaptor 396 396 * \return 1 on success 397 397 */ 398 - static bool 398 + bool 399 399 drm_probe_ddc(struct i2c_adapter *adapter) 400 400 { 401 401 unsigned char out; 402 402 403 403 return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0); 404 404 } 405 + EXPORT_SYMBOL(drm_probe_ddc); 405 406 406 407 /** 407 408 * drm_get_edid - get EDID data, if available
+17 -5
drivers/gpu/drm/exynos/exynos_ddc.c
··· 26 26 { 27 27 hdmi_attach_ddc_client(client); 28 28 29 - dev_info(&client->adapter->dev, "attached s5p_ddc " 30 - "into i2c adapter successfully\n"); 29 + dev_info(&client->adapter->dev, 30 + "attached %s into i2c adapter successfully\n", 31 + client->name); 31 32 32 33 return 0; 33 34 } 34 35 35 36 static int s5p_ddc_remove(struct i2c_client *client) 36 37 { 37 - dev_info(&client->adapter->dev, "detached s5p_ddc " 38 - "from i2c adapter successfully\n"); 38 + dev_info(&client->adapter->dev, 39 + "detached %s from i2c adapter successfully\n", 40 + client->name); 39 41 40 42 return 0; 41 43 } 42 44 43 45 static struct i2c_device_id ddc_idtable[] = { 44 46 {"s5p_ddc", 0}, 47 + {"exynos5-hdmiddc", 0}, 45 48 { }, 49 + }; 50 + 51 + static struct of_device_id hdmiddc_match_types[] = { 52 + { 53 + .compatible = "samsung,exynos5-hdmiddc", 54 + }, { 55 + /* end node */ 56 + } 46 57 }; 47 58 48 59 struct i2c_driver ddc_driver = { 49 60 .driver = { 50 - .name = "s5p_ddc", 61 + .name = "exynos-hdmiddc", 51 62 .owner = THIS_MODULE, 63 + .of_match_table = hdmiddc_match_types, 52 64 }, 53 65 .id_table = ddc_idtable, 54 66 .probe = s5p_ddc_probe,
+46 -4
drivers/gpu/drm/exynos/exynos_drm_connector.c
··· 40 40 struct drm_connector drm_connector; 41 41 uint32_t encoder_id; 42 42 struct exynos_drm_manager *manager; 43 + uint32_t dpms; 43 44 }; 44 45 45 46 /* convert exynos_video_timings to drm_display_mode */ ··· 150 149 count = drm_add_edid_modes(connector, edid); 151 150 kfree(edid); 152 151 } else { 153 - struct drm_display_mode *mode = drm_mode_create(connector->dev); 154 152 struct exynos_drm_panel_info *panel; 153 + struct drm_display_mode *mode = drm_mode_create(connector->dev); 154 + if (!mode) { 155 + DRM_ERROR("failed to create a new display mode.\n"); 156 + return 0; 157 + } 155 158 156 159 if (display_ops->get_panel) 157 160 panel = display_ops->get_panel(manager->dev); ··· 199 194 return ret; 200 195 } 201 196 202 - static struct drm_encoder *exynos_drm_best_encoder( 203 - struct drm_connector *connector) 197 + struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector) 204 198 { 205 199 struct drm_device *dev = connector->dev; 206 200 struct exynos_drm_connector *exynos_connector = ··· 227 223 .mode_valid = exynos_drm_connector_mode_valid, 228 224 .best_encoder = exynos_drm_best_encoder, 229 225 }; 226 + 227 + void exynos_drm_display_power(struct drm_connector *connector, int mode) 228 + { 229 + struct drm_encoder *encoder = exynos_drm_best_encoder(connector); 230 + struct exynos_drm_connector *exynos_connector; 231 + struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); 232 + struct exynos_drm_display_ops *display_ops = manager->display_ops; 233 + 234 + exynos_connector = to_exynos_connector(connector); 235 + 236 + if (exynos_connector->dpms == mode) { 237 + DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n"); 238 + return; 239 + } 240 + 241 + if (display_ops && display_ops->power_on) 242 + display_ops->power_on(manager->dev, mode); 243 + 244 + exynos_connector->dpms = mode; 245 + } 246 + 247 + static void exynos_drm_connector_dpms(struct drm_connector *connector, 248 + int mode) 249 + { 250 + DRM_DEBUG_KMS("%s\n", __FILE__); 251 + 252 + /* 253 + * in case that drm_crtc_helper_set_mode() is called, 254 + * encoder/crtc->funcs->dpms() will be just returned 255 + * because they already were DRM_MODE_DPMS_ON so only 256 + * exynos_drm_display_power() will be called. 257 + */ 258 + drm_helper_connector_dpms(connector, mode); 259 + 260 + exynos_drm_display_power(connector, mode); 261 + 262 + } 230 263 231 264 static int exynos_drm_connector_fill_modes(struct drm_connector *connector, 232 265 unsigned int max_width, unsigned int max_height) ··· 324 283 } 325 284 326 285 static struct drm_connector_funcs exynos_connector_funcs = { 327 - .dpms = drm_helper_connector_dpms, 286 + .dpms = exynos_drm_connector_dpms, 328 287 .fill_modes = exynos_drm_connector_fill_modes, 329 288 .detect = exynos_drm_connector_detect, 330 289 .destroy = exynos_drm_connector_destroy, ··· 373 332 374 333 exynos_connector->encoder_id = encoder->base.id; 375 334 exynos_connector->manager = manager; 335 + exynos_connector->dpms = DRM_MODE_DPMS_OFF; 376 336 connector->encoder = encoder; 377 337 378 338 err = drm_mode_connector_attach_encoder(connector, encoder);
+4
drivers/gpu/drm/exynos/exynos_drm_connector.h
··· 31 31 struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, 32 32 struct drm_encoder *encoder); 33 33 34 + struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector); 35 + 36 + void exynos_drm_display_power(struct drm_connector *connector, int mode); 37 + 34 38 #endif
+69 -31
drivers/gpu/drm/exynos/exynos_drm_core.c
··· 34 34 35 35 static LIST_HEAD(exynos_drm_subdrv_list); 36 36 37 - static int exynos_drm_subdrv_probe(struct drm_device *dev, 37 + static int exynos_drm_create_enc_conn(struct drm_device *dev, 38 38 struct exynos_drm_subdrv *subdrv) 39 39 { 40 40 struct drm_encoder *encoder; 41 41 struct drm_connector *connector; 42 + int ret; 42 43 43 44 DRM_DEBUG_DRIVER("%s\n", __FILE__); 44 - 45 - if (subdrv->probe) { 46 - int ret; 47 - 48 - /* 49 - * this probe callback would be called by sub driver 50 - * after setting of all resources to this sub driver, 51 - * such as clock, irq and register map are done or by load() 52 - * of exynos drm driver. 53 - * 54 - * P.S. note that this driver is considered for modularization. 55 - */ 56 - ret = subdrv->probe(dev, subdrv->dev); 57 - if (ret) 58 - return ret; 59 - } 60 - 61 - if (!subdrv->manager) 62 - return 0; 63 45 64 46 subdrv->manager->dev = subdrv->dev; 65 47 ··· 60 78 connector = exynos_drm_connector_create(dev, encoder); 61 79 if (!connector) { 62 80 DRM_ERROR("failed to create connector\n"); 63 - encoder->funcs->destroy(encoder); 64 - return -EFAULT; 81 + ret = -EFAULT; 82 + goto err_destroy_encoder; 65 83 } 66 84 67 85 subdrv->encoder = encoder; 68 86 subdrv->connector = connector; 69 87 70 88 return 0; 89 + 90 + err_destroy_encoder: 91 + encoder->funcs->destroy(encoder); 92 + return ret; 71 93 } 72 94 73 - static void exynos_drm_subdrv_remove(struct drm_device *dev, 74 - struct exynos_drm_subdrv *subdrv) 95 + static void exynos_drm_destroy_enc_conn(struct exynos_drm_subdrv *subdrv) 75 96 { 76 - DRM_DEBUG_DRIVER("%s\n", __FILE__); 77 - 78 - if (subdrv->remove) 79 - subdrv->remove(dev); 80 - 81 97 if (subdrv->encoder) { 82 98 struct drm_encoder *encoder = subdrv->encoder; 83 99 encoder->funcs->destroy(encoder); ··· 89 109 } 90 110 } 91 111 112 + static int exynos_drm_subdrv_probe(struct drm_device *dev, 113 + struct exynos_drm_subdrv *subdrv) 114 + { 115 + if (subdrv->probe) { 116 + int ret; 117 + 118 + subdrv->drm_dev = dev; 119 + 120 + /* 121 + * this probe callback would be called by sub driver 122 + * after setting of all resources to this sub driver, 123 + * such as clock, irq and register map are done or by load() 124 + * of exynos drm driver. 125 + * 126 + * P.S. note that this driver is considered for modularization. 127 + */ 128 + ret = subdrv->probe(dev, subdrv->dev); 129 + if (ret) 130 + return ret; 131 + } 132 + 133 + return 0; 134 + } 135 + 136 + static void exynos_drm_subdrv_remove(struct drm_device *dev, 137 + struct exynos_drm_subdrv *subdrv) 138 + { 139 + DRM_DEBUG_DRIVER("%s\n", __FILE__); 140 + 141 + if (subdrv->remove) 142 + subdrv->remove(dev, subdrv->dev); 143 + } 144 + 92 145 int exynos_drm_device_register(struct drm_device *dev) 93 146 { 94 147 struct exynos_drm_subdrv *subdrv, *n; 148 + unsigned int fine_cnt = 0; 95 149 int err; 96 150 97 151 DRM_DEBUG_DRIVER("%s\n", __FILE__); ··· 134 120 return -EINVAL; 135 121 136 122 list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) { 137 - subdrv->drm_dev = dev; 138 123 err = exynos_drm_subdrv_probe(dev, subdrv); 139 124 if (err) { 140 125 DRM_DEBUG("exynos drm subdrv probe failed.\n"); 141 126 list_del(&subdrv->list); 127 + continue; 142 128 } 129 + 130 + /* 131 + * if manager is null then it means that this sub driver 132 + * doesn't need encoder and connector. 133 + */ 134 + if (!subdrv->manager) { 135 + fine_cnt++; 136 + continue; 137 + } 138 + 139 + err = exynos_drm_create_enc_conn(dev, subdrv); 140 + if (err) { 141 + DRM_DEBUG("failed to create encoder and connector.\n"); 142 + exynos_drm_subdrv_remove(dev, subdrv); 143 + list_del(&subdrv->list); 144 + continue; 145 + } 146 + 147 + fine_cnt++; 143 148 } 149 + 150 + if (!fine_cnt) 151 + return -EINVAL; 144 152 145 153 return 0; 146 154 } ··· 179 143 return -EINVAL; 180 144 } 181 145 182 - list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) 146 + list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) { 183 147 exynos_drm_subdrv_remove(dev, subdrv); 148 + exynos_drm_destroy_enc_conn(subdrv); 149 + } 184 150 185 151 return 0; 186 152 }
+13 -7
drivers/gpu/drm/exynos/exynos_drm_crtc.c
··· 66 66 67 67 static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) 68 68 { 69 - struct drm_device *dev = crtc->dev; 70 69 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 71 70 72 71 DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode); ··· 75 76 return; 76 77 } 77 78 78 - mutex_lock(&dev->struct_mutex); 79 - 80 79 exynos_drm_fn_encoder(crtc, &mode, exynos_drm_encoder_crtc_dpms); 81 80 exynos_crtc->dpms = mode; 82 - 83 - mutex_unlock(&dev->struct_mutex); 84 81 } 85 82 86 83 static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) ··· 92 97 93 98 DRM_DEBUG_KMS("%s\n", __FILE__); 94 99 100 + exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 95 101 exynos_plane_commit(exynos_crtc->plane); 96 102 exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_ON); 97 103 } ··· 121 125 int ret; 122 126 123 127 DRM_DEBUG_KMS("%s\n", __FILE__); 124 - 125 - exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 126 128 127 129 /* 128 130 * copy the mode data adjusted by mode_fixup() into crtc->mode ··· 154 160 int ret; 155 161 156 162 DRM_DEBUG_KMS("%s\n", __FILE__); 163 + 164 + /* when framebuffer changing is requested, crtc's dpms should be on */ 165 + if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) { 166 + DRM_ERROR("failed framebuffer changing request.\n"); 167 + return -EPERM; 168 + } 157 169 158 170 crtc_w = crtc->fb->width - x; 159 171 crtc_h = crtc->fb->height - y; ··· 212 212 int ret = -EINVAL; 213 213 214 214 DRM_DEBUG_KMS("%s\n", __FILE__); 215 + 216 + /* when the page flip is requested, crtc's dpms should be on */ 217 + if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) { 218 + DRM_ERROR("failed page flip request.\n"); 219 + return -EINVAL; 220 + } 215 221 216 222 mutex_lock(&dev->struct_mutex); 217 223
+18 -1
drivers/gpu/drm/exynos/exynos_drm_drv.h
··· 36 36 #define MAX_FB_BUFFER 4 37 37 #define DEFAULT_ZPOS -1 38 38 39 + #define _wait_for(COND, MS) ({ \ 40 + unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ 41 + int ret__ = 0; \ 42 + while (!(COND)) { \ 43 + if (time_after(jiffies, timeout__)) { \ 44 + ret__ = -ETIMEDOUT; \ 45 + break; \ 46 + } \ 47 + } \ 48 + ret__; \ 49 + }) 50 + 51 + #define wait_for(COND, MS) _wait_for(COND, MS) 52 + 39 53 struct drm_device; 40 54 struct exynos_drm_overlay; 41 55 struct drm_connector; ··· 74 60 * @commit: apply hardware specific overlay data to registers. 75 61 * @enable: enable hardware specific overlay. 76 62 * @disable: disable hardware specific overlay. 63 + * @wait_for_vblank: wait for vblank interrupt to make sure that 64 + * hardware overlay is disabled. 77 65 */ 78 66 struct exynos_drm_overlay_ops { 79 67 void (*mode_set)(struct device *subdrv_dev, ··· 83 67 void (*commit)(struct device *subdrv_dev, int zpos); 84 68 void (*enable)(struct device *subdrv_dev, int zpos); 85 69 void (*disable)(struct device *subdrv_dev, int zpos); 70 + void (*wait_for_vblank)(struct device *subdrv_dev); 86 71 }; 87 72 88 73 /* ··· 282 265 struct exynos_drm_manager *manager; 283 266 284 267 int (*probe)(struct drm_device *drm_dev, struct device *dev); 285 - void (*remove)(struct drm_device *dev); 268 + void (*remove)(struct drm_device *drm_dev, struct device *dev); 286 269 int (*open)(struct drm_device *drm_dev, struct device *dev, 287 270 struct drm_file *file); 288 271 void (*close)(struct drm_device *drm_dev, struct device *dev,
+101 -15
drivers/gpu/drm/exynos/exynos_drm_encoder.c
··· 31 31 32 32 #include "exynos_drm_drv.h" 33 33 #include "exynos_drm_encoder.h" 34 + #include "exynos_drm_connector.h" 34 35 35 36 #define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\ 36 37 drm_encoder) ··· 45 44 * @dpms: store the encoder dpms value. 46 45 */ 47 46 struct exynos_drm_encoder { 47 + struct drm_crtc *old_crtc; 48 48 struct drm_encoder drm_encoder; 49 49 struct exynos_drm_manager *manager; 50 50 int dpms; 51 51 }; 52 52 53 - static void exynos_drm_display_power(struct drm_encoder *encoder, int mode) 53 + static void exynos_drm_connector_power(struct drm_encoder *encoder, int mode) 54 54 { 55 55 struct drm_device *dev = encoder->dev; 56 56 struct drm_connector *connector; 57 - struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); 58 57 59 58 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 60 - if (connector->encoder == encoder) { 61 - struct exynos_drm_display_ops *display_ops = 62 - manager->display_ops; 63 - 59 + if (exynos_drm_best_encoder(connector) == encoder) { 64 60 DRM_DEBUG_KMS("connector[%d] dpms[%d]\n", 65 61 connector->base.id, mode); 66 - if (display_ops && display_ops->power_on) 67 - display_ops->power_on(manager->dev, mode); 62 + 63 + exynos_drm_display_power(connector, mode); 68 64 } 69 65 } 70 66 } ··· 86 88 case DRM_MODE_DPMS_ON: 87 89 if (manager_ops && manager_ops->apply) 88 90 manager_ops->apply(manager->dev); 89 - exynos_drm_display_power(encoder, mode); 91 + exynos_drm_connector_power(encoder, mode); 90 92 exynos_encoder->dpms = mode; 91 93 break; 92 94 case DRM_MODE_DPMS_STANDBY: 93 95 case DRM_MODE_DPMS_SUSPEND: 94 96 case DRM_MODE_DPMS_OFF: 95 - exynos_drm_display_power(encoder, mode); 97 + exynos_drm_connector_power(encoder, mode); 96 98 exynos_encoder->dpms = mode; 97 99 break; 98 100 default: ··· 125 127 return true; 126 128 } 127 129 130 + static void disable_plane_to_crtc(struct drm_device *dev, 131 + struct drm_crtc *old_crtc, 132 + struct drm_crtc *new_crtc) 133 + { 134 + struct drm_plane *plane; 135 + 136 + /* 137 + * if old_crtc isn't same as encoder->crtc then it means that 138 + * user changed crtc id to another one so the plane to old_crtc 139 + * should be disabled and plane->crtc should be set to new_crtc 140 + * (encoder->crtc) 141 + */ 142 + list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 143 + if (plane->crtc == old_crtc) { 144 + /* 145 + * do not change below call order. 146 + * 147 + * plane->funcs->disable_plane call checks 148 + * if encoder->crtc is same as plane->crtc and if same 149 + * then overlay_ops->disable callback will be called 150 + * to diasble current hw overlay so plane->crtc should 151 + * have new_crtc because new_crtc was set to 152 + * encoder->crtc in advance. 153 + */ 154 + plane->crtc = new_crtc; 155 + plane->funcs->disable_plane(plane); 156 + } 157 + } 158 + } 159 + 128 160 static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder, 129 161 struct drm_display_mode *mode, 130 162 struct drm_display_mode *adjusted_mode) 131 163 { 132 164 struct drm_device *dev = encoder->dev; 133 165 struct drm_connector *connector; 134 - struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); 135 - struct exynos_drm_manager_ops *manager_ops = manager->ops; 166 + struct exynos_drm_manager *manager; 167 + struct exynos_drm_manager_ops *manager_ops; 136 168 137 169 DRM_DEBUG_KMS("%s\n", __FILE__); 138 170 139 - exynos_drm_encoder_dpms(encoder, DRM_MODE_DPMS_ON); 140 - 141 171 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 142 - if (connector->encoder == encoder) 172 + if (connector->encoder == encoder) { 173 + struct exynos_drm_encoder *exynos_encoder; 174 + 175 + exynos_encoder = to_exynos_encoder(encoder); 176 + 177 + if (exynos_encoder->old_crtc != encoder->crtc && 178 + exynos_encoder->old_crtc) { 179 + 180 + /* 181 + * disable a plane to old crtc and change 182 + * crtc of the plane to new one. 183 + */ 184 + disable_plane_to_crtc(dev, 185 + exynos_encoder->old_crtc, 186 + encoder->crtc); 187 + } 188 + 189 + manager = exynos_drm_get_manager(encoder); 190 + manager_ops = manager->ops; 191 + 143 192 if (manager_ops && manager_ops->mode_set) 144 193 manager_ops->mode_set(manager->dev, 145 194 adjusted_mode); 195 + 196 + exynos_encoder->old_crtc = encoder->crtc; 197 + } 146 198 } 147 199 } 148 200 ··· 214 166 manager_ops->commit(manager->dev); 215 167 } 216 168 169 + static void exynos_drm_encoder_disable(struct drm_encoder *encoder) 170 + { 171 + struct drm_plane *plane; 172 + struct drm_device *dev = encoder->dev; 173 + 174 + exynos_drm_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 175 + 176 + /* all planes connected to this encoder should be also disabled. */ 177 + list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 178 + if (plane->crtc == encoder->crtc) 179 + plane->funcs->disable_plane(plane); 180 + } 181 + } 182 + 217 183 static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = { 218 184 .dpms = exynos_drm_encoder_dpms, 219 185 .mode_fixup = exynos_drm_encoder_mode_fixup, 220 186 .mode_set = exynos_drm_encoder_mode_set, 221 187 .prepare = exynos_drm_encoder_prepare, 222 188 .commit = exynos_drm_encoder_commit, 189 + .disable = exynos_drm_encoder_disable, 223 190 }; 224 191 225 192 static void exynos_drm_encoder_destroy(struct drm_encoder *encoder) ··· 401 338 manager_ops->dpms(manager->dev, mode); 402 339 403 340 /* 341 + * set current mode to new one so that data aren't updated into 342 + * registers by drm_helper_connector_dpms two times. 343 + * 344 + * in case that drm_crtc_helper_set_mode() is called, 345 + * overlay_ops->commit() and manager_ops->commit() callbacks 346 + * can be called two times, first at drm_crtc_helper_set_mode() 347 + * and second at drm_helper_connector_dpms(). 348 + * so with this setting, when drm_helper_connector_dpms() is called 349 + * encoder->funcs->dpms() will be ignored. 350 + */ 351 + exynos_encoder->dpms = mode; 352 + 353 + /* 404 354 * if this condition is ok then it means that the crtc is already 405 355 * detached from encoder and last function for detaching is properly 406 356 * done, so clear pipe from manager to prevent repeated call. ··· 498 422 499 423 if (overlay_ops && overlay_ops->disable) 500 424 overlay_ops->disable(manager->dev, zpos); 425 + 426 + /* 427 + * wait for vblank interrupt 428 + * - this makes sure that hardware overlay is disabled to avoid 429 + * for the dma accesses to memory after gem buffer was released 430 + * because the setting for disabling the overlay will be updated 431 + * at vsync. 432 + */ 433 + if (overlay_ops->wait_for_vblank) 434 + overlay_ops->wait_for_vblank(manager->dev); 501 435 }
+62 -3
drivers/gpu/drm/exynos/exynos_drm_fb.c
··· 41 41 * exynos specific framebuffer structure. 42 42 * 43 43 * @fb: drm framebuffer obejct. 44 + * @buf_cnt: a buffer count to drm framebuffer. 44 45 * @exynos_gem_obj: array of exynos specific gem object containing a gem object. 45 46 */ 46 47 struct exynos_drm_fb { 47 48 struct drm_framebuffer fb; 49 + unsigned int buf_cnt; 48 50 struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER]; 49 51 }; 50 52 ··· 103 101 .dirty = exynos_drm_fb_dirty, 104 102 }; 105 103 104 + void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb, 105 + unsigned int cnt) 106 + { 107 + struct exynos_drm_fb *exynos_fb; 108 + 109 + exynos_fb = to_exynos_fb(fb); 110 + 111 + exynos_fb->buf_cnt = cnt; 112 + } 113 + 114 + unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb) 115 + { 116 + struct exynos_drm_fb *exynos_fb; 117 + 118 + exynos_fb = to_exynos_fb(fb); 119 + 120 + return exynos_fb->buf_cnt; 121 + } 122 + 106 123 struct drm_framebuffer * 107 124 exynos_drm_framebuffer_init(struct drm_device *dev, 108 125 struct drm_mode_fb_cmd2 *mode_cmd, ··· 148 127 return &exynos_fb->fb; 149 128 } 150 129 130 + static u32 exynos_drm_format_num_buffers(struct drm_mode_fb_cmd2 *mode_cmd) 131 + { 132 + unsigned int cnt = 0; 133 + 134 + if (mode_cmd->pixel_format != DRM_FORMAT_NV12) 135 + return drm_format_num_planes(mode_cmd->pixel_format); 136 + 137 + while (cnt != MAX_FB_BUFFER) { 138 + if (!mode_cmd->handles[cnt]) 139 + break; 140 + cnt++; 141 + } 142 + 143 + /* 144 + * check if NV12 or NV12M. 145 + * 146 + * NV12 147 + * handles[0] = base1, offsets[0] = 0 148 + * handles[1] = base1, offsets[1] = Y_size 149 + * 150 + * NV12M 151 + * handles[0] = base1, offsets[0] = 0 152 + * handles[1] = base2, offsets[1] = 0 153 + */ 154 + if (cnt == 2) { 155 + /* 156 + * in case of NV12 format, offsets[1] is not 0 and 157 + * handles[0] is same as handles[1]. 158 + */ 159 + if (mode_cmd->offsets[1] && 160 + mode_cmd->handles[0] == mode_cmd->handles[1]) 161 + cnt = 1; 162 + } 163 + 164 + return cnt; 165 + } 166 + 151 167 static struct drm_framebuffer * 152 168 exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, 153 169 struct drm_mode_fb_cmd2 *mode_cmd) ··· 192 134 struct drm_gem_object *obj; 193 135 struct drm_framebuffer *fb; 194 136 struct exynos_drm_fb *exynos_fb; 195 - int nr; 196 137 int i; 197 138 198 139 DRM_DEBUG_KMS("%s\n", __FILE__); ··· 209 152 } 210 153 211 154 exynos_fb = to_exynos_fb(fb); 212 - nr = exynos_drm_format_num_buffers(fb->pixel_format); 155 + exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd); 213 156 214 - for (i = 1; i < nr; i++) { 157 + DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt); 158 + 159 + for (i = 1; i < exynos_fb->buf_cnt; i++) { 215 160 obj = drm_gem_object_lookup(dev, file_priv, 216 161 mode_cmd->handles[i]); 217 162 if (!obj) {
+7 -13
drivers/gpu/drm/exynos/exynos_drm_fb.h
··· 28 28 #ifndef _EXYNOS_DRM_FB_H_ 29 29 #define _EXYNOS_DRM_FB_H 30 30 31 - static inline int exynos_drm_format_num_buffers(uint32_t format) 32 - { 33 - switch (format) { 34 - case DRM_FORMAT_NV12: 35 - case DRM_FORMAT_NV12MT: 36 - return 2; 37 - case DRM_FORMAT_YUV420: 38 - return 3; 39 - default: 40 - return 1; 41 - } 42 - } 43 - 44 31 struct drm_framebuffer * 45 32 exynos_drm_framebuffer_init(struct drm_device *dev, 46 33 struct drm_mode_fb_cmd2 *mode_cmd, ··· 38 51 int index); 39 52 40 53 void exynos_drm_mode_config_init(struct drm_device *dev); 54 + 55 + /* set a buffer count to drm framebuffer. */ 56 + void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb, 57 + unsigned int cnt); 58 + 59 + /* get a buffer count to drm framebuffer. */ 60 + unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb); 41 61 42 62 #endif
+3
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
··· 79 79 return -EFAULT; 80 80 } 81 81 82 + /* buffer count to framebuffer always is 1 at booting time. */ 83 + exynos_drm_fb_set_buf_cnt(fb, 1); 84 + 82 85 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); 83 86 offset += fbi->var.yoffset * fb->pitches[0]; 84 87
+92 -25
drivers/gpu/drm/exynos/exynos_drm_fimd.c
··· 57 57 58 58 #define get_fimd_context(dev) platform_get_drvdata(to_platform_device(dev)) 59 59 60 + struct fimd_driver_data { 61 + unsigned int timing_base; 62 + }; 63 + 64 + struct fimd_driver_data exynos4_fimd_driver_data = { 65 + .timing_base = 0x0, 66 + }; 67 + 68 + struct fimd_driver_data exynos5_fimd_driver_data = { 69 + .timing_base = 0x20000, 70 + }; 71 + 60 72 struct fimd_win_data { 61 73 unsigned int offset_x; 62 74 unsigned int offset_y; ··· 102 90 103 91 struct exynos_drm_panel_info *panel; 104 92 }; 93 + 94 + static inline struct fimd_driver_data *drm_fimd_get_driver_data( 95 + struct platform_device *pdev) 96 + { 97 + return (struct fimd_driver_data *) 98 + platform_get_device_id(pdev)->driver_data; 99 + } 105 100 106 101 static bool fimd_display_is_connected(struct device *dev) 107 102 { ··· 213 194 struct fimd_context *ctx = get_fimd_context(dev); 214 195 struct exynos_drm_panel_info *panel = ctx->panel; 215 196 struct fb_videomode *timing = &panel->timing; 197 + struct fimd_driver_data *driver_data; 198 + struct platform_device *pdev = to_platform_device(dev); 216 199 u32 val; 217 200 201 + driver_data = drm_fimd_get_driver_data(pdev); 218 202 if (ctx->suspended) 219 203 return; 220 204 221 205 DRM_DEBUG_KMS("%s\n", __FILE__); 222 206 223 207 /* setup polarity values from machine code. */ 224 - writel(ctx->vidcon1, ctx->regs + VIDCON1); 208 + writel(ctx->vidcon1, ctx->regs + driver_data->timing_base + VIDCON1); 225 209 226 210 /* setup vertical timing values. */ 227 211 val = VIDTCON0_VBPD(timing->upper_margin - 1) | 228 212 VIDTCON0_VFPD(timing->lower_margin - 1) | 229 213 VIDTCON0_VSPW(timing->vsync_len - 1); 230 - writel(val, ctx->regs + VIDTCON0); 214 + writel(val, ctx->regs + driver_data->timing_base + VIDTCON0); 231 215 232 216 /* setup horizontal timing values. */ 233 217 val = VIDTCON1_HBPD(timing->left_margin - 1) | 234 218 VIDTCON1_HFPD(timing->right_margin - 1) | 235 219 VIDTCON1_HSPW(timing->hsync_len - 1); 236 - writel(val, ctx->regs + VIDTCON1); 220 + writel(val, ctx->regs + driver_data->timing_base + VIDTCON1); 237 221 238 222 /* setup horizontal and vertical display size. */ 239 223 val = VIDTCON2_LINEVAL(timing->yres - 1) | 240 224 VIDTCON2_HOZVAL(timing->xres - 1); 241 - writel(val, ctx->regs + VIDTCON2); 225 + writel(val, ctx->regs + driver_data->timing_base + VIDTCON2); 242 226 243 227 /* setup clock source, clock divider, enable dma. */ 244 228 val = ctx->vidcon0; ··· 592 570 win_data->enabled = false; 593 571 } 594 572 573 + static void fimd_wait_for_vblank(struct device *dev) 574 + { 575 + struct fimd_context *ctx = get_fimd_context(dev); 576 + int ret; 577 + 578 + ret = wait_for((__raw_readl(ctx->regs + VIDCON1) & 579 + VIDCON1_VSTATUS_VSYNC), 50); 580 + if (ret < 0) 581 + DRM_DEBUG_KMS("vblank wait timed out.\n"); 582 + } 583 + 595 584 static struct exynos_drm_overlay_ops fimd_overlay_ops = { 596 585 .mode_set = fimd_win_mode_set, 597 586 .commit = fimd_win_commit, 598 587 .disable = fimd_win_disable, 588 + .wait_for_vblank = fimd_wait_for_vblank, 599 589 }; 600 590 601 591 static struct exynos_drm_manager fimd_manager = { ··· 712 678 return 0; 713 679 } 714 680 715 - static void fimd_subdrv_remove(struct drm_device *drm_dev) 681 + static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev) 716 682 { 717 683 DRM_DEBUG_KMS("%s\n", __FILE__); 718 684 ··· 781 747 writel(val, ctx->regs + SHADOWCON); 782 748 } 783 749 784 - static int fimd_power_on(struct fimd_context *ctx, bool enable) 750 + static int fimd_clock(struct fimd_context *ctx, bool enable) 785 751 { 786 - struct exynos_drm_subdrv *subdrv = &ctx->subdrv; 787 - struct device *dev = subdrv->dev; 788 - 789 752 DRM_DEBUG_KMS("%s\n", __FILE__); 790 - 791 - if (enable != false && enable != true) 792 - return -EINVAL; 793 753 794 754 if (enable) { 795 755 int ret; ··· 797 769 clk_disable(ctx->bus_clk); 798 770 return ret; 799 771 } 772 + } else { 773 + clk_disable(ctx->lcd_clk); 774 + clk_disable(ctx->bus_clk); 775 + } 776 + 777 + return 0; 778 + } 779 + 780 + static int fimd_activate(struct fimd_context *ctx, bool enable) 781 + { 782 + if (enable) { 783 + int ret; 784 + struct device *dev = ctx->subdrv.dev; 785 + 786 + ret = fimd_clock(ctx, true); 787 + if (ret < 0) 788 + return ret; 800 789 801 790 ctx->suspended = false; 802 791 803 792 /* if vblank was enabled status, enable it again. */ 804 793 if (test_and_clear_bit(0, &ctx->irq_flags)) 805 794 fimd_enable_vblank(dev); 806 - 807 - fimd_apply(dev); 808 795 } else { 809 - clk_disable(ctx->lcd_clk); 810 - clk_disable(ctx->bus_clk); 811 - 796 + fimd_clock(ctx, false); 812 797 ctx->suspended = true; 813 798 } 814 799 ··· 971 930 { 972 931 struct fimd_context *ctx = get_fimd_context(dev); 973 932 974 - if (pm_runtime_suspended(dev)) 975 - return 0; 976 - 977 933 /* 978 934 * do not use pm_runtime_suspend(). if pm_runtime_suspend() is 979 935 * called here, an error would be returned by that interface 980 936 * because the usage_count of pm runtime is more than 1. 981 937 */ 982 - return fimd_power_on(ctx, false); 938 + if (!pm_runtime_suspended(dev)) 939 + return fimd_activate(ctx, false); 940 + 941 + return 0; 983 942 } 984 943 985 944 static int fimd_resume(struct device *dev) ··· 991 950 * of pm runtime would still be 1 so in this case, fimd driver 992 951 * should be on directly not drawing on pm runtime interface. 993 952 */ 994 - if (!pm_runtime_suspended(dev)) 995 - return fimd_power_on(ctx, true); 953 + if (pm_runtime_suspended(dev)) { 954 + int ret; 955 + 956 + ret = fimd_activate(ctx, true); 957 + if (ret < 0) 958 + return ret; 959 + 960 + /* 961 + * in case of dpms on(standby), fimd_apply function will 962 + * be called by encoder's dpms callback to update fimd's 963 + * registers but in case of sleep wakeup, it's not. 964 + * so fimd_apply function should be called at here. 965 + */ 966 + fimd_apply(dev); 967 + } 996 968 997 969 return 0; 998 970 } ··· 1018 964 1019 965 DRM_DEBUG_KMS("%s\n", __FILE__); 1020 966 1021 - return fimd_power_on(ctx, false); 967 + return fimd_activate(ctx, false); 1022 968 } 1023 969 1024 970 static int fimd_runtime_resume(struct device *dev) ··· 1027 973 1028 974 DRM_DEBUG_KMS("%s\n", __FILE__); 1029 975 1030 - return fimd_power_on(ctx, true); 976 + return fimd_activate(ctx, true); 1031 977 } 1032 978 #endif 979 + 980 + static struct platform_device_id fimd_driver_ids[] = { 981 + { 982 + .name = "exynos4-fb", 983 + .driver_data = (unsigned long)&exynos4_fimd_driver_data, 984 + }, { 985 + .name = "exynos5-fb", 986 + .driver_data = (unsigned long)&exynos5_fimd_driver_data, 987 + }, 988 + {}, 989 + }; 990 + MODULE_DEVICE_TABLE(platform, fimd_driver_ids); 1033 991 1034 992 static const struct dev_pm_ops fimd_pm_ops = { 1035 993 SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume) ··· 1051 985 struct platform_driver fimd_driver = { 1052 986 .probe = fimd_probe, 1053 987 .remove = __devexit_p(fimd_remove), 988 + .id_table = fimd_driver_ids, 1054 989 .driver = { 1055 990 .name = "exynos4-fb", 1056 991 .owner = THIS_MODULE,
+3 -2
drivers/gpu/drm/exynos/exynos_drm_g2d.c
··· 122 122 struct list_head list; 123 123 struct list_head run_cmdlist; 124 124 struct list_head event_list; 125 + pid_t pid; 125 126 struct completion complete; 126 127 int async; 127 128 }; ··· 165 164 return -ENOMEM; 166 165 } 167 166 168 - node = kcalloc(G2D_CMDLIST_NUM, G2D_CMDLIST_NUM * sizeof(*node), 169 - GFP_KERNEL); 167 + node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL); 170 168 if (!node) { 171 169 dev_err(dev, "failed to allocate memory\n"); 172 170 ret = -ENOMEM; ··· 679 679 } 680 680 681 681 mutex_lock(&g2d->runqueue_mutex); 682 + runqueue_node->pid = current->pid; 682 683 list_add_tail(&runqueue_node->list, &g2d->runqueue); 683 684 if (!g2d->runqueue_node) 684 685 g2d_exec_runqueue(g2d);
+37 -25
drivers/gpu/drm/exynos/exynos_drm_hdmi.c
··· 29 29 #define get_ctx_from_subdrv(subdrv) container_of(subdrv,\ 30 30 struct drm_hdmi_context, subdrv); 31 31 32 + /* Common hdmi subdrv needs to access the hdmi and mixer though context. 33 + * These should be initialied by the repective drivers */ 34 + static struct exynos_drm_hdmi_context *hdmi_ctx; 35 + static struct exynos_drm_hdmi_context *mixer_ctx; 36 + 32 37 /* these callback points shoud be set by specific drivers. */ 33 38 static struct exynos_hdmi_ops *hdmi_ops; 34 39 static struct exynos_mixer_ops *mixer_ops; ··· 45 40 46 41 bool enabled[MIXER_WIN_NR]; 47 42 }; 43 + 44 + void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx) 45 + { 46 + if (ctx) 47 + hdmi_ctx = ctx; 48 + } 49 + 50 + void exynos_mixer_drv_attach(struct exynos_drm_hdmi_context *ctx) 51 + { 52 + if (ctx) 53 + mixer_ctx = ctx; 54 + } 48 55 49 56 void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops) 50 57 { ··· 291 274 ctx->enabled[win] = false; 292 275 } 293 276 277 + static void drm_mixer_wait_for_vblank(struct device *subdrv_dev) 278 + { 279 + struct drm_hdmi_context *ctx = to_context(subdrv_dev); 280 + 281 + DRM_DEBUG_KMS("%s\n", __FILE__); 282 + 283 + if (mixer_ops && mixer_ops->wait_for_vblank) 284 + mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx); 285 + } 286 + 294 287 static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = { 295 288 .mode_set = drm_mixer_mode_set, 296 289 .commit = drm_mixer_commit, 297 290 .disable = drm_mixer_disable, 291 + .wait_for_vblank = drm_mixer_wait_for_vblank, 298 292 }; 299 293 300 294 static struct exynos_drm_manager hdmi_manager = { ··· 320 292 { 321 293 struct exynos_drm_subdrv *subdrv = to_subdrv(dev); 322 294 struct drm_hdmi_context *ctx; 323 - struct platform_device *pdev = to_platform_device(dev); 324 - struct exynos_drm_common_hdmi_pd *pd; 325 295 326 296 DRM_DEBUG_KMS("%s\n", __FILE__); 327 297 328 - pd = pdev->dev.platform_data; 329 - 330 - if (!pd) { 331 - DRM_DEBUG_KMS("platform data is null.\n"); 298 + if (!hdmi_ctx) { 299 + DRM_ERROR("hdmi context not initialized.\n"); 332 300 return -EFAULT; 333 301 } 334 302 335 - if (!pd->hdmi_dev) { 336 - DRM_DEBUG_KMS("hdmi device is null.\n"); 337 - return -EFAULT; 338 - } 339 - 340 - if (!pd->mixer_dev) { 341 - DRM_DEBUG_KMS("mixer device is null.\n"); 303 + if (!mixer_ctx) { 304 + DRM_ERROR("mixer context not initialized.\n"); 342 305 return -EFAULT; 343 306 } 344 307 345 308 ctx = get_ctx_from_subdrv(subdrv); 346 309 347 - ctx->hdmi_ctx = (struct exynos_drm_hdmi_context *) 348 - to_context(pd->hdmi_dev); 349 - if (!ctx->hdmi_ctx) { 350 - DRM_DEBUG_KMS("hdmi context is null.\n"); 310 + if (!ctx) { 311 + DRM_ERROR("no drm hdmi context.\n"); 351 312 return -EFAULT; 352 313 } 314 + 315 + ctx->hdmi_ctx = hdmi_ctx; 316 + ctx->mixer_ctx = mixer_ctx; 353 317 354 318 ctx->hdmi_ctx->drm_dev = drm_dev; 355 - 356 - ctx->mixer_ctx = (struct exynos_drm_hdmi_context *) 357 - to_context(pd->mixer_dev); 358 - if (!ctx->mixer_ctx) { 359 - DRM_DEBUG_KMS("mixer context is null.\n"); 360 - return -EFAULT; 361 - } 362 - 363 319 ctx->mixer_ctx->drm_dev = drm_dev; 364 320 365 321 return 0;
+3
drivers/gpu/drm/exynos/exynos_drm_hdmi.h
··· 67 67 void (*dpms)(void *ctx, int mode); 68 68 69 69 /* overlay */ 70 + void (*wait_for_vblank)(void *ctx); 70 71 void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay); 71 72 void (*win_commit)(void *ctx, int zpos); 72 73 void (*win_disable)(void *ctx, int zpos); 73 74 }; 74 75 76 + void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx); 77 + void exynos_mixer_drv_attach(struct exynos_drm_hdmi_context *ctx); 75 78 void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops); 76 79 void exynos_mixer_ops_register(struct exynos_mixer_ops *ops); 77 80 #endif
+55 -3
drivers/gpu/drm/exynos/exynos_drm_plane.c
··· 32 32 DRM_FORMAT_NV12MT, 33 33 }; 34 34 35 + /* 36 + * This function is to get X or Y size shown via screen. This needs length and 37 + * start position of CRTC. 38 + * 39 + * <--- length ---> 40 + * CRTC ---------------- 41 + * ^ start ^ end 42 + * 43 + * There are six cases from a to b. 44 + * 45 + * <----- SCREEN -----> 46 + * 0 last 47 + * ----------|------------------|---------- 48 + * CRTCs 49 + * a ------- 50 + * b ------- 51 + * c -------------------------- 52 + * d -------- 53 + * e ------- 54 + * f ------- 55 + */ 56 + static int exynos_plane_get_size(int start, unsigned length, unsigned last) 57 + { 58 + int end = start + length; 59 + int size = 0; 60 + 61 + if (start <= 0) { 62 + if (end > 0) 63 + size = min_t(unsigned, end, last); 64 + } else if (start <= last) { 65 + size = min_t(unsigned, last - start, length); 66 + } 67 + 68 + return size; 69 + } 70 + 35 71 int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, 36 72 struct drm_framebuffer *fb, int crtc_x, int crtc_y, 37 73 unsigned int crtc_w, unsigned int crtc_h, ··· 83 47 84 48 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 85 49 86 - nr = exynos_drm_format_num_buffers(fb->pixel_format); 50 + nr = exynos_drm_fb_get_buf_cnt(fb); 87 51 for (i = 0; i < nr; i++) { 88 52 struct exynos_drm_gem_buf *buffer = exynos_drm_fb_buffer(fb, i); 89 53 ··· 100 64 (unsigned long)overlay->dma_addr[i]); 101 65 } 102 66 103 - actual_w = min((unsigned)(crtc->mode.hdisplay - crtc_x), crtc_w); 104 - actual_h = min((unsigned)(crtc->mode.vdisplay - crtc_y), crtc_h); 67 + actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay); 68 + actual_h = exynos_plane_get_size(crtc_y, crtc_h, crtc->mode.vdisplay); 69 + 70 + if (crtc_x < 0) { 71 + if (actual_w) 72 + src_x -= crtc_x; 73 + else 74 + src_x += crtc_w; 75 + crtc_x = 0; 76 + } 77 + 78 + if (crtc_y < 0) { 79 + if (actual_h) 80 + src_y -= crtc_y; 81 + else 82 + src_y += crtc_h; 83 + crtc_y = 0; 84 + } 105 85 106 86 /* set drm framebuffer data. */ 107 87 overlay->fb_x = src_x;
+22 -2
drivers/gpu/drm/exynos/exynos_drm_vidi.c
··· 56 56 unsigned int connected; 57 57 bool vblank_on; 58 58 bool suspended; 59 + bool direct_vblank; 59 60 struct work_struct work; 60 61 struct mutex lock; 61 62 }; ··· 224 223 225 224 if (!test_and_set_bit(0, &ctx->irq_flags)) 226 225 ctx->vblank_on = true; 226 + 227 + ctx->direct_vblank = true; 228 + 229 + /* 230 + * in case of page flip request, vidi_finish_pageflip function 231 + * will not be called because direct_vblank is true and then 232 + * that function will be called by overlay_ops->commit callback 233 + */ 234 + schedule_work(&ctx->work); 227 235 228 236 return 0; 229 237 } ··· 435 425 /* refresh rate is about 50Hz. */ 436 426 usleep_range(16000, 20000); 437 427 438 - drm_handle_vblank(subdrv->drm_dev, manager->pipe); 428 + mutex_lock(&ctx->lock); 429 + 430 + if (ctx->direct_vblank) { 431 + drm_handle_vblank(subdrv->drm_dev, manager->pipe); 432 + ctx->direct_vblank = false; 433 + mutex_unlock(&ctx->lock); 434 + return; 435 + } 436 + 437 + mutex_unlock(&ctx->lock); 438 + 439 439 vidi_finish_pageflip(subdrv->drm_dev, manager->pipe); 440 440 } 441 441 ··· 473 453 return 0; 474 454 } 475 455 476 - static void vidi_subdrv_remove(struct drm_device *drm_dev) 456 + static void vidi_subdrv_remove(struct drm_device *drm_dev, struct device *dev) 477 457 { 478 458 DRM_DEBUG_KMS("%s\n", __FILE__); 479 459
+144 -52
drivers/gpu/drm/exynos/exynos_hdmi.c
··· 32 32 #include <linux/pm_runtime.h> 33 33 #include <linux/clk.h> 34 34 #include <linux/regulator/consumer.h> 35 + #include <linux/io.h> 36 + #include <linux/of_gpio.h> 37 + #include <plat/gpio-cfg.h> 35 38 36 39 #include <drm/exynos_drm.h> 37 40 ··· 43 40 44 41 #include "exynos_hdmi.h" 45 42 43 + #include <linux/gpio.h> 44 + #include <media/s5p_hdmi.h> 45 + 46 46 #define MAX_WIDTH 1920 47 47 #define MAX_HEIGHT 1080 48 48 #define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev)) 49 + 50 + enum hdmi_type { 51 + HDMI_TYPE13, 52 + HDMI_TYPE14, 53 + }; 49 54 50 55 struct hdmi_resources { 51 56 struct clk *hdmi; ··· 70 59 struct drm_device *drm_dev; 71 60 bool hpd; 72 61 bool powered; 73 - bool is_v13; 74 62 bool dvi_mode; 75 63 struct mutex hdmi_mutex; 76 64 77 65 void __iomem *regs; 78 - unsigned int external_irq; 79 - unsigned int internal_irq; 66 + int external_irq; 67 + int internal_irq; 80 68 81 69 struct i2c_client *ddc_port; 82 70 struct i2c_client *hdmiphy_port; ··· 86 76 struct hdmi_resources res; 87 77 void *parent_ctx; 88 78 89 - void (*cfg_hpd)(bool external); 90 - int (*get_hpd)(void); 79 + int hpd_gpio; 80 + 81 + enum hdmi_type type; 91 82 }; 92 83 93 84 /* HDMI Version 1.3 */ ··· 1220 1209 1221 1210 static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix) 1222 1211 { 1223 - if (hdata->is_v13) 1212 + if (hdata->type == HDMI_TYPE13) 1224 1213 hdmi_v13_regs_dump(hdata, prefix); 1225 1214 else 1226 1215 hdmi_v14_regs_dump(hdata, prefix); ··· 1261 1250 static int hdmi_conf_index(struct hdmi_context *hdata, 1262 1251 struct drm_display_mode *mode) 1263 1252 { 1264 - if (hdata->is_v13) 1253 + if (hdata->type == HDMI_TYPE13) 1265 1254 return hdmi_v13_conf_index(mode); 1266 1255 1267 1256 return hdmi_v14_conf_index(mode); ··· 1357 1346 check_timing->yres, check_timing->refresh, 1358 1347 check_timing->vmode); 1359 1348 1360 - if (hdata->is_v13) 1349 + if (hdata->type == HDMI_TYPE13) 1361 1350 return hdmi_v13_check_timing(check_timing); 1362 1351 else 1363 1352 return hdmi_v14_check_timing(check_timing); ··· 1423 1412 hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]); 1424 1413 hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]); 1425 1414 1426 - if (hdata->is_v13) 1415 + if (hdata->type == HDMI_TYPE13) 1427 1416 hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4); 1428 1417 else 1429 1418 hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4); ··· 1527 1516 { 1528 1517 u32 reg; 1529 1518 1530 - if (hdata->is_v13) 1519 + if (hdata->type == HDMI_TYPE13) 1531 1520 reg = HDMI_V13_CORE_RSTOUT; 1532 1521 else 1533 1522 reg = HDMI_CORE_RSTOUT; ··· 1541 1530 1542 1531 static void hdmi_conf_init(struct hdmi_context *hdata) 1543 1532 { 1544 - /* enable HPD interrupts */ 1533 + /* disable HPD interrupts */ 1545 1534 hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | 1546 - HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG); 1547 - mdelay(10); 1548 - hdmi_reg_writemask(hdata, HDMI_INTC_CON, ~0, HDMI_INTC_EN_GLOBAL | 1549 1535 HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG); 1550 1536 1551 1537 /* choose HDMI mode */ ··· 1559 1551 HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS); 1560 1552 } 1561 1553 1562 - if (hdata->is_v13) { 1554 + if (hdata->type == HDMI_TYPE13) { 1563 1555 /* choose bluescreen (fecal) color */ 1564 1556 hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12); 1565 1557 hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34); ··· 1841 1833 1842 1834 static void hdmi_timing_apply(struct hdmi_context *hdata) 1843 1835 { 1844 - if (hdata->is_v13) 1836 + if (hdata->type == HDMI_TYPE13) 1845 1837 hdmi_v13_timing_apply(hdata); 1846 1838 else 1847 1839 hdmi_v14_timing_apply(hdata); ··· 1863 1855 if (hdata->hdmiphy_port) 1864 1856 i2c_master_send(hdata->hdmiphy_port, buffer, 2); 1865 1857 1866 - if (hdata->is_v13) 1858 + if (hdata->type == HDMI_TYPE13) 1867 1859 reg = HDMI_V13_PHY_RSTOUT; 1868 1860 else 1869 1861 reg = HDMI_PHY_RSTOUT; ··· 1890 1882 } 1891 1883 1892 1884 /* pixel clock */ 1893 - if (hdata->is_v13) 1885 + if (hdata->type == HDMI_TYPE13) 1894 1886 hdmiphy_data = hdmi_v13_confs[hdata->cur_conf].hdmiphy_data; 1895 1887 else 1896 1888 hdmiphy_data = hdmi_confs[hdata->cur_conf].hdmiphy_data; ··· 1958 1950 1959 1951 drm_mode_set_crtcinfo(adjusted_mode, 0); 1960 1952 1961 - if (hdata->is_v13) 1953 + if (hdata->type == HDMI_TYPE13) 1962 1954 index = hdmi_v13_conf_index(adjusted_mode); 1963 1955 else 1964 1956 index = hdmi_v14_conf_index(adjusted_mode); ··· 1972 1964 * to adjusted_mode. 1973 1965 */ 1974 1966 list_for_each_entry(m, &connector->modes, head) { 1975 - if (hdata->is_v13) 1967 + if (hdata->type == HDMI_TYPE13) 1976 1968 index = hdmi_v13_conf_index(m); 1977 1969 else 1978 1970 index = hdmi_v14_conf_index(m); ··· 2032 2024 2033 2025 hdata->powered = true; 2034 2026 2035 - if (hdata->cfg_hpd) 2036 - hdata->cfg_hpd(true); 2037 2027 mutex_unlock(&hdata->hdmi_mutex); 2038 2028 2039 2029 pm_runtime_get_sync(hdata->dev); ··· 2067 2061 pm_runtime_put_sync(hdata->dev); 2068 2062 2069 2063 mutex_lock(&hdata->hdmi_mutex); 2070 - if (hdata->cfg_hpd) 2071 - hdata->cfg_hpd(false); 2072 2064 2073 2065 hdata->powered = false; 2074 2066 ··· 2114 2110 struct exynos_drm_hdmi_context *ctx = arg; 2115 2111 struct hdmi_context *hdata = ctx->ctx; 2116 2112 2117 - if (!hdata->get_hpd) 2118 - goto out; 2119 - 2120 2113 mutex_lock(&hdata->hdmi_mutex); 2121 - hdata->hpd = hdata->get_hpd(); 2114 + hdata->hpd = gpio_get_value(hdata->hpd_gpio); 2122 2115 mutex_unlock(&hdata->hdmi_mutex); 2123 2116 2124 2117 if (ctx->drm_dev) 2125 2118 drm_helper_hpd_irq_event(ctx->drm_dev); 2126 2119 2127 - out: 2128 2120 return IRQ_HANDLED; 2129 2121 } 2130 2122 ··· 2143 2143 HDMI_INTC_FLAG_HPD_PLUG); 2144 2144 } 2145 2145 2146 - mutex_lock(&hdata->hdmi_mutex); 2147 - hdata->hpd = hdmi_reg_read(hdata, HDMI_HPD_STATUS); 2148 - if (hdata->powered && hdata->hpd) { 2149 - mutex_unlock(&hdata->hdmi_mutex); 2150 - goto out; 2151 - } 2152 - mutex_unlock(&hdata->hdmi_mutex); 2153 - 2154 2146 if (ctx->drm_dev) 2155 2147 drm_helper_hpd_irq_event(ctx->drm_dev); 2156 2148 2157 - out: 2158 2149 return IRQ_HANDLED; 2159 2150 } 2160 2151 ··· 2253 2262 hdmi_hdmiphy = hdmiphy; 2254 2263 } 2255 2264 2265 + #ifdef CONFIG_OF 2266 + static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata 2267 + (struct device *dev) 2268 + { 2269 + struct device_node *np = dev->of_node; 2270 + struct s5p_hdmi_platform_data *pd; 2271 + enum of_gpio_flags flags; 2272 + u32 value; 2273 + 2274 + pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); 2275 + if (!pd) { 2276 + DRM_ERROR("memory allocation for pdata failed\n"); 2277 + goto err_data; 2278 + } 2279 + 2280 + if (!of_find_property(np, "hpd-gpio", &value)) { 2281 + DRM_ERROR("no hpd gpio property found\n"); 2282 + goto err_data; 2283 + } 2284 + 2285 + pd->hpd_gpio = of_get_named_gpio_flags(np, "hpd-gpio", 0, &flags); 2286 + 2287 + return pd; 2288 + 2289 + err_data: 2290 + return NULL; 2291 + } 2292 + #else 2293 + static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata 2294 + (struct device *dev) 2295 + { 2296 + return NULL; 2297 + } 2298 + #endif 2299 + 2300 + static struct platform_device_id hdmi_driver_types[] = { 2301 + { 2302 + .name = "s5pv210-hdmi", 2303 + .driver_data = HDMI_TYPE13, 2304 + }, { 2305 + .name = "exynos4-hdmi", 2306 + .driver_data = HDMI_TYPE13, 2307 + }, { 2308 + .name = "exynos4-hdmi14", 2309 + .driver_data = HDMI_TYPE14, 2310 + }, { 2311 + .name = "exynos5-hdmi", 2312 + .driver_data = HDMI_TYPE14, 2313 + }, { 2314 + /* end node */ 2315 + } 2316 + }; 2317 + 2318 + static struct of_device_id hdmi_match_types[] = { 2319 + { 2320 + .compatible = "samsung,exynos5-hdmi", 2321 + .data = (void *)HDMI_TYPE14, 2322 + }, { 2323 + /* end node */ 2324 + } 2325 + }; 2326 + 2256 2327 static int __devinit hdmi_probe(struct platform_device *pdev) 2257 2328 { 2258 2329 struct device *dev = &pdev->dev; 2259 2330 struct exynos_drm_hdmi_context *drm_hdmi_ctx; 2260 2331 struct hdmi_context *hdata; 2261 - struct exynos_drm_hdmi_pdata *pdata; 2332 + struct s5p_hdmi_platform_data *pdata; 2262 2333 struct resource *res; 2263 2334 int ret; 2264 2335 2265 2336 DRM_DEBUG_KMS("[%d]\n", __LINE__); 2266 2337 2267 - pdata = pdev->dev.platform_data; 2338 + if (pdev->dev.of_node) { 2339 + pdata = drm_hdmi_dt_parse_pdata(dev); 2340 + if (IS_ERR(pdata)) { 2341 + DRM_ERROR("failed to parse dt\n"); 2342 + return PTR_ERR(pdata); 2343 + } 2344 + } else { 2345 + pdata = pdev->dev.platform_data; 2346 + } 2347 + 2268 2348 if (!pdata) { 2269 2349 DRM_ERROR("no platform data specified\n"); 2270 2350 return -EINVAL; ··· 2362 2300 2363 2301 platform_set_drvdata(pdev, drm_hdmi_ctx); 2364 2302 2365 - hdata->is_v13 = pdata->is_v13; 2366 - hdata->cfg_hpd = pdata->cfg_hpd; 2367 - hdata->get_hpd = pdata->get_hpd; 2303 + if (dev->of_node) { 2304 + const struct of_device_id *match; 2305 + match = of_match_node(of_match_ptr(hdmi_match_types), 2306 + pdev->dev.of_node); 2307 + hdata->type = (enum hdmi_type)match->data; 2308 + } else { 2309 + hdata->type = (enum hdmi_type)platform_get_device_id 2310 + (pdev)->driver_data; 2311 + } 2312 + 2313 + hdata->hpd_gpio = pdata->hpd_gpio; 2368 2314 hdata->dev = dev; 2369 2315 2370 2316 ret = hdmi_resources_init(hdata); 2317 + 2371 2318 if (ret) { 2372 2319 ret = -EINVAL; 2320 + DRM_ERROR("hdmi_resources_init failed\n"); 2373 2321 goto err_data; 2374 2322 } 2375 2323 2376 2324 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2325 + if (!res) { 2326 + DRM_ERROR("failed to find registers\n"); 2327 + ret = -ENOENT; 2328 + goto err_resource; 2329 + } 2377 2330 2378 2331 hdata->regs = devm_request_and_ioremap(&pdev->dev, res); 2379 2332 if (!hdata->regs) { ··· 2397 2320 goto err_resource; 2398 2321 } 2399 2322 2323 + ret = gpio_request(hdata->hpd_gpio, "HPD"); 2324 + if (ret) { 2325 + DRM_ERROR("failed to request HPD gpio\n"); 2326 + goto err_resource; 2327 + } 2328 + 2400 2329 /* DDC i2c driver */ 2401 2330 if (i2c_add_driver(&ddc_driver)) { 2402 2331 DRM_ERROR("failed to register ddc i2c driver\n"); 2403 2332 ret = -ENOENT; 2404 - goto err_resource; 2333 + goto err_gpio; 2405 2334 } 2406 2335 2407 2336 hdata->ddc_port = hdmi_ddc; ··· 2421 2338 2422 2339 hdata->hdmiphy_port = hdmi_hdmiphy; 2423 2340 2424 - hdata->external_irq = platform_get_irq_byname(pdev, "external_irq"); 2341 + hdata->external_irq = gpio_to_irq(hdata->hpd_gpio); 2425 2342 if (hdata->external_irq < 0) { 2426 - DRM_ERROR("failed to get platform irq\n"); 2343 + DRM_ERROR("failed to get GPIO external irq\n"); 2427 2344 ret = hdata->external_irq; 2428 2345 goto err_hdmiphy; 2429 2346 } 2430 2347 2431 - hdata->internal_irq = platform_get_irq_byname(pdev, "internal_irq"); 2348 + hdata->internal_irq = platform_get_irq(pdev, 0); 2432 2349 if (hdata->internal_irq < 0) { 2433 2350 DRM_ERROR("failed to get platform internal irq\n"); 2434 2351 ret = hdata->internal_irq; 2435 2352 goto err_hdmiphy; 2436 2353 } 2437 2354 2355 + hdata->hpd = gpio_get_value(hdata->hpd_gpio); 2356 + 2438 2357 ret = request_threaded_irq(hdata->external_irq, NULL, 2439 2358 hdmi_external_irq_thread, IRQF_TRIGGER_RISING | 2440 2359 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 2441 2360 "hdmi_external", drm_hdmi_ctx); 2442 2361 if (ret) { 2443 - DRM_ERROR("failed to register hdmi internal interrupt\n"); 2362 + DRM_ERROR("failed to register hdmi external interrupt\n"); 2444 2363 goto err_hdmiphy; 2445 2364 } 2446 - 2447 - if (hdata->cfg_hpd) 2448 - hdata->cfg_hpd(false); 2449 2365 2450 2366 ret = request_threaded_irq(hdata->internal_irq, NULL, 2451 2367 hdmi_internal_irq_thread, IRQF_ONESHOT, ··· 2453 2371 DRM_ERROR("failed to register hdmi internal interrupt\n"); 2454 2372 goto err_free_irq; 2455 2373 } 2374 + 2375 + /* Attach HDMI Driver to common hdmi. */ 2376 + exynos_hdmi_drv_attach(drm_hdmi_ctx); 2456 2377 2457 2378 /* register specific callbacks to common hdmi. */ 2458 2379 exynos_hdmi_ops_register(&hdmi_ops); ··· 2470 2385 i2c_del_driver(&hdmiphy_driver); 2471 2386 err_ddc: 2472 2387 i2c_del_driver(&ddc_driver); 2388 + err_gpio: 2389 + gpio_free(hdata->hpd_gpio); 2473 2390 err_resource: 2474 2391 hdmi_resources_cleanup(hdata); 2475 2392 err_data: ··· 2489 2402 pm_runtime_disable(dev); 2490 2403 2491 2404 free_irq(hdata->internal_irq, hdata); 2405 + free_irq(hdata->external_irq, hdata); 2406 + 2407 + gpio_free(hdata->hpd_gpio); 2492 2408 2493 2409 hdmi_resources_cleanup(hdata); 2494 2410 ··· 2537 2447 struct platform_driver hdmi_driver = { 2538 2448 .probe = hdmi_probe, 2539 2449 .remove = __devexit_p(hdmi_remove), 2450 + .id_table = hdmi_driver_types, 2540 2451 .driver = { 2541 - .name = "exynos4-hdmi", 2452 + .name = "exynos-hdmi", 2542 2453 .owner = THIS_MODULE, 2543 2454 .pm = &hdmi_pm_ops, 2455 + .of_match_table = hdmi_match_types, 2544 2456 }, 2545 2457 };
+11 -1
drivers/gpu/drm/exynos/exynos_hdmiphy.c
··· 42 42 43 43 static const struct i2c_device_id hdmiphy_id[] = { 44 44 { "s5p_hdmiphy", 0 }, 45 + { "exynos5-hdmiphy", 0 }, 45 46 { }, 47 + }; 48 + 49 + static struct of_device_id hdmiphy_match_types[] = { 50 + { 51 + .compatible = "samsung,exynos5-hdmiphy", 52 + }, { 53 + /* end node */ 54 + } 46 55 }; 47 56 48 57 struct i2c_driver hdmiphy_driver = { 49 58 .driver = { 50 - .name = "s5p-hdmiphy", 59 + .name = "exynos-hdmiphy", 51 60 .owner = THIS_MODULE, 61 + .of_match_table = hdmiphy_match_types, 52 62 }, 53 63 .id_table = hdmiphy_id, 54 64 .probe = hdmiphy_probe,
+190 -58
drivers/gpu/drm/exynos/exynos_mixer.c
··· 73 73 struct clk *sclk_dac; 74 74 }; 75 75 76 + enum mixer_version_id { 77 + MXR_VER_0_0_0_16, 78 + MXR_VER_16_0_33_0, 79 + }; 80 + 76 81 struct mixer_context { 77 82 struct device *dev; 78 83 int pipe; 79 84 bool interlace; 80 85 bool powered; 86 + bool vp_enabled; 81 87 u32 int_en; 82 88 83 89 struct mutex mixer_mutex; 84 90 struct mixer_resources mixer_res; 85 91 struct hdmi_win_data win_data[MIXER_WIN_NR]; 92 + enum mixer_version_id mxr_ver; 93 + }; 94 + 95 + struct mixer_drv_data { 96 + enum mixer_version_id version; 97 + bool is_vp_enabled; 86 98 }; 87 99 88 100 static const u8 filter_y_horiz_tap8[] = { ··· 263 251 mixer_reg_writemask(res, MXR_STATUS, enable ? 264 252 MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE); 265 253 266 - vp_reg_write(res, VP_SHADOW_UPDATE, enable ? 254 + if (ctx->vp_enabled) 255 + vp_reg_write(res, VP_SHADOW_UPDATE, enable ? 267 256 VP_SHADOW_UPDATE_ENABLE : 0); 268 257 } 269 258 ··· 346 333 mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE); 347 334 break; 348 335 case 2: 349 - vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON); 350 - mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_VP_ENABLE); 336 + if (ctx->vp_enabled) { 337 + vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON); 338 + mixer_reg_writemask(res, MXR_CFG, val, 339 + MXR_CFG_VP_ENABLE); 340 + } 351 341 break; 352 342 } 353 343 } ··· 481 465 vp_regs_dump(ctx); 482 466 } 483 467 468 + static void mixer_layer_update(struct mixer_context *ctx) 469 + { 470 + struct mixer_resources *res = &ctx->mixer_res; 471 + u32 val; 472 + 473 + val = mixer_reg_read(res, MXR_CFG); 474 + 475 + /* allow one update per vsync only */ 476 + if (!(val & MXR_CFG_LAYER_UPDATE_COUNT_MASK)) 477 + mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE); 478 + } 479 + 484 480 static void mixer_graph_buffer(struct mixer_context *ctx, int win) 485 481 { 486 482 struct mixer_resources *res = &ctx->mixer_res; ··· 573 545 mixer_cfg_scan(ctx, win_data->mode_height); 574 546 mixer_cfg_rgb_fmt(ctx, win_data->mode_height); 575 547 mixer_cfg_layer(ctx, win, true); 548 + 549 + /* layer update mandatory for mixer 16.0.33.0 */ 550 + if (ctx->mxr_ver == MXR_VER_16_0_33_0) 551 + mixer_layer_update(ctx); 552 + 576 553 mixer_run(ctx); 577 554 578 555 mixer_vsync_set_update(ctx, true); ··· 625 592 */ 626 593 val = MXR_LAYER_CFG_GRP1_VAL(3); 627 594 val |= MXR_LAYER_CFG_GRP0_VAL(2); 628 - val |= MXR_LAYER_CFG_VP_VAL(1); 595 + if (ctx->vp_enabled) 596 + val |= MXR_LAYER_CFG_VP_VAL(1); 629 597 mixer_reg_write(res, MXR_LAYER_CFG, val); 630 598 631 599 /* setting background color */ ··· 649 615 val = MXR_GRP_CFG_ALPHA_VAL(0); 650 616 mixer_reg_write(res, MXR_VIDEO_CFG, val); 651 617 652 - /* configuration of Video Processor Registers */ 653 - vp_win_reset(ctx); 654 - vp_default_filter(res); 618 + if (ctx->vp_enabled) { 619 + /* configuration of Video Processor Registers */ 620 + vp_win_reset(ctx); 621 + vp_default_filter(res); 622 + } 655 623 656 624 /* disable all layers */ 657 625 mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE); 658 626 mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE); 659 - mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE); 627 + if (ctx->vp_enabled) 628 + mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE); 660 629 661 630 mixer_vsync_set_update(ctx, true); 662 631 spin_unlock_irqrestore(&res->reg_slock, flags); ··· 682 645 pm_runtime_get_sync(ctx->dev); 683 646 684 647 clk_enable(res->mixer); 685 - clk_enable(res->vp); 686 - clk_enable(res->sclk_mixer); 648 + if (ctx->vp_enabled) { 649 + clk_enable(res->vp); 650 + clk_enable(res->sclk_mixer); 651 + } 687 652 688 653 mixer_reg_write(res, MXR_INT_EN, ctx->int_en); 689 654 mixer_win_reset(ctx); ··· 705 666 ctx->int_en = mixer_reg_read(res, MXR_INT_EN); 706 667 707 668 clk_disable(res->mixer); 708 - clk_disable(res->vp); 709 - clk_disable(res->sclk_mixer); 669 + if (ctx->vp_enabled) { 670 + clk_disable(res->vp); 671 + clk_disable(res->sclk_mixer); 672 + } 710 673 711 674 pm_runtime_put_sync(ctx->dev); 712 675 ··· 765 724 DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode); 766 725 break; 767 726 } 727 + } 728 + 729 + static void mixer_wait_for_vblank(void *ctx) 730 + { 731 + struct mixer_context *mixer_ctx = ctx; 732 + struct mixer_resources *res = &mixer_ctx->mixer_res; 733 + int ret; 734 + 735 + ret = wait_for((mixer_reg_read(res, MXR_INT_STATUS) & 736 + MXR_INT_STATUS_VSYNC), 50); 737 + if (ret < 0) 738 + DRM_DEBUG_KMS("vblank wait timed out.\n"); 768 739 } 769 740 770 741 static void mixer_win_mode_set(void *ctx, ··· 841 788 842 789 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win); 843 790 844 - if (win > 1) 791 + if (win > 1 && mixer_ctx->vp_enabled) 845 792 vp_video_buffer(mixer_ctx, win); 846 793 else 847 794 mixer_graph_buffer(mixer_ctx, win); ··· 871 818 .dpms = mixer_dpms, 872 819 873 820 /* overlay */ 821 + .wait_for_vblank = mixer_wait_for_vblank, 874 822 .win_mode_set = mixer_win_mode_set, 875 823 .win_commit = mixer_win_commit, 876 824 .win_disable = mixer_win_disable, ··· 977 923 ret = -ENODEV; 978 924 goto fail; 979 925 } 980 - mixer_res->vp = clk_get(dev, "vp"); 981 - if (IS_ERR_OR_NULL(mixer_res->vp)) { 982 - dev_err(dev, "failed to get clock 'vp'\n"); 983 - ret = -ENODEV; 984 - goto fail; 985 - } 986 - mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer"); 987 - if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) { 988 - dev_err(dev, "failed to get clock 'sclk_mixer'\n"); 989 - ret = -ENODEV; 990 - goto fail; 991 - } 926 + 992 927 mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi"); 993 928 if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) { 994 929 dev_err(dev, "failed to get clock 'sclk_hdmi'\n"); 995 930 ret = -ENODEV; 996 931 goto fail; 997 932 } 998 - mixer_res->sclk_dac = clk_get(dev, "sclk_dac"); 999 - if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) { 1000 - dev_err(dev, "failed to get clock 'sclk_dac'\n"); 1001 - ret = -ENODEV; 1002 - goto fail; 1003 - } 1004 - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mxr"); 933 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1005 934 if (res == NULL) { 1006 935 dev_err(dev, "get memory resource failed.\n"); 1007 936 ret = -ENXIO; 1008 937 goto fail; 1009 938 } 1010 - 1011 - clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi); 1012 939 1013 940 mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start, 1014 941 resource_size(res)); ··· 999 964 goto fail; 1000 965 } 1001 966 1002 - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vp"); 1003 - if (res == NULL) { 1004 - dev_err(dev, "get memory resource failed.\n"); 1005 - ret = -ENXIO; 1006 - goto fail; 1007 - } 1008 - 1009 - mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start, 1010 - resource_size(res)); 1011 - if (mixer_res->vp_regs == NULL) { 1012 - dev_err(dev, "register mapping failed.\n"); 1013 - ret = -ENXIO; 1014 - goto fail; 1015 - } 1016 - 1017 - res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq"); 967 + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1018 968 if (res == NULL) { 1019 969 dev_err(dev, "get interrupt resource failed.\n"); 1020 970 ret = -ENXIO; ··· 1017 997 return 0; 1018 998 1019 999 fail: 1020 - if (!IS_ERR_OR_NULL(mixer_res->sclk_dac)) 1021 - clk_put(mixer_res->sclk_dac); 1022 1000 if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) 1023 1001 clk_put(mixer_res->sclk_hdmi); 1024 - if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer)) 1025 - clk_put(mixer_res->sclk_mixer); 1026 - if (!IS_ERR_OR_NULL(mixer_res->vp)) 1027 - clk_put(mixer_res->vp); 1028 1002 if (!IS_ERR_OR_NULL(mixer_res->mixer)) 1029 1003 clk_put(mixer_res->mixer); 1030 1004 return ret; 1031 1005 } 1006 + 1007 + static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx, 1008 + struct platform_device *pdev) 1009 + { 1010 + struct mixer_context *mixer_ctx = ctx->ctx; 1011 + struct device *dev = &pdev->dev; 1012 + struct mixer_resources *mixer_res = &mixer_ctx->mixer_res; 1013 + struct resource *res; 1014 + int ret; 1015 + 1016 + mixer_res->vp = clk_get(dev, "vp"); 1017 + if (IS_ERR_OR_NULL(mixer_res->vp)) { 1018 + dev_err(dev, "failed to get clock 'vp'\n"); 1019 + ret = -ENODEV; 1020 + goto fail; 1021 + } 1022 + mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer"); 1023 + if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) { 1024 + dev_err(dev, "failed to get clock 'sclk_mixer'\n"); 1025 + ret = -ENODEV; 1026 + goto fail; 1027 + } 1028 + mixer_res->sclk_dac = clk_get(dev, "sclk_dac"); 1029 + if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) { 1030 + dev_err(dev, "failed to get clock 'sclk_dac'\n"); 1031 + ret = -ENODEV; 1032 + goto fail; 1033 + } 1034 + 1035 + if (mixer_res->sclk_hdmi) 1036 + clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi); 1037 + 1038 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1039 + if (res == NULL) { 1040 + dev_err(dev, "get memory resource failed.\n"); 1041 + ret = -ENXIO; 1042 + goto fail; 1043 + } 1044 + 1045 + mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start, 1046 + resource_size(res)); 1047 + if (mixer_res->vp_regs == NULL) { 1048 + dev_err(dev, "register mapping failed.\n"); 1049 + ret = -ENXIO; 1050 + goto fail; 1051 + } 1052 + 1053 + return 0; 1054 + 1055 + fail: 1056 + if (!IS_ERR_OR_NULL(mixer_res->sclk_dac)) 1057 + clk_put(mixer_res->sclk_dac); 1058 + if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer)) 1059 + clk_put(mixer_res->sclk_mixer); 1060 + if (!IS_ERR_OR_NULL(mixer_res->vp)) 1061 + clk_put(mixer_res->vp); 1062 + return ret; 1063 + } 1064 + 1065 + static struct mixer_drv_data exynos5_mxr_drv_data = { 1066 + .version = MXR_VER_16_0_33_0, 1067 + .is_vp_enabled = 0, 1068 + }; 1069 + 1070 + static struct mixer_drv_data exynos4_mxr_drv_data = { 1071 + .version = MXR_VER_0_0_0_16, 1072 + .is_vp_enabled = 1, 1073 + }; 1074 + 1075 + static struct platform_device_id mixer_driver_types[] = { 1076 + { 1077 + .name = "s5p-mixer", 1078 + .driver_data = (unsigned long)&exynos4_mxr_drv_data, 1079 + }, { 1080 + .name = "exynos5-mixer", 1081 + .driver_data = (unsigned long)&exynos5_mxr_drv_data, 1082 + }, { 1083 + /* end node */ 1084 + } 1085 + }; 1086 + 1087 + static struct of_device_id mixer_match_types[] = { 1088 + { 1089 + .compatible = "samsung,exynos5-mixer", 1090 + .data = &exynos5_mxr_drv_data, 1091 + }, { 1092 + /* end node */ 1093 + } 1094 + }; 1032 1095 1033 1096 static int __devinit mixer_probe(struct platform_device *pdev) 1034 1097 { 1035 1098 struct device *dev = &pdev->dev; 1036 1099 struct exynos_drm_hdmi_context *drm_hdmi_ctx; 1037 1100 struct mixer_context *ctx; 1101 + struct mixer_drv_data *drv; 1038 1102 int ret; 1039 1103 1040 1104 dev_info(dev, "probe start\n"); ··· 1138 1034 1139 1035 mutex_init(&ctx->mixer_mutex); 1140 1036 1037 + if (dev->of_node) { 1038 + const struct of_device_id *match; 1039 + match = of_match_node(of_match_ptr(mixer_match_types), 1040 + pdev->dev.of_node); 1041 + drv = match->data; 1042 + } else { 1043 + drv = (struct mixer_drv_data *) 1044 + platform_get_device_id(pdev)->driver_data; 1045 + } 1046 + 1141 1047 ctx->dev = &pdev->dev; 1142 1048 drm_hdmi_ctx->ctx = (void *)ctx; 1049 + ctx->vp_enabled = drv->is_vp_enabled; 1050 + ctx->mxr_ver = drv->version; 1143 1051 1144 1052 platform_set_drvdata(pdev, drm_hdmi_ctx); 1145 1053 1146 1054 /* acquire resources: regs, irqs, clocks */ 1147 1055 ret = mixer_resources_init(drm_hdmi_ctx, pdev); 1148 - if (ret) 1056 + if (ret) { 1057 + DRM_ERROR("mixer_resources_init failed\n"); 1149 1058 goto fail; 1059 + } 1060 + 1061 + if (ctx->vp_enabled) { 1062 + /* acquire vp resources: regs, irqs, clocks */ 1063 + ret = vp_resources_init(drm_hdmi_ctx, pdev); 1064 + if (ret) { 1065 + DRM_ERROR("vp_resources_init failed\n"); 1066 + goto fail; 1067 + } 1068 + } 1069 + 1070 + /* attach mixer driver to common hdmi. */ 1071 + exynos_mixer_drv_attach(drm_hdmi_ctx); 1150 1072 1151 1073 /* register specific callback point to common hdmi. */ 1152 1074 exynos_mixer_ops_register(&mixer_ops); ··· 1212 1082 1213 1083 struct platform_driver mixer_driver = { 1214 1084 .driver = { 1215 - .name = "s5p-mixer", 1085 + .name = "exynos-mixer", 1216 1086 .owner = THIS_MODULE, 1217 1087 .pm = &mixer_pm_ops, 1088 + .of_match_table = mixer_match_types, 1218 1089 }, 1219 1090 .probe = mixer_probe, 1220 1091 .remove = __devexit_p(mixer_remove), 1092 + .id_table = mixer_driver_types, 1221 1093 };
+3
drivers/gpu/drm/exynos/regs-mixer.h
··· 69 69 (((val) << (low_bit)) & MXR_MASK(high_bit, low_bit)) 70 70 71 71 /* bits for MXR_STATUS */ 72 + #define MXR_STATUS_SOFT_RESET (1 << 8) 72 73 #define MXR_STATUS_16_BURST (1 << 7) 73 74 #define MXR_STATUS_BURST_MASK (1 << 7) 74 75 #define MXR_STATUS_BIG_ENDIAN (1 << 3) ··· 78 77 #define MXR_STATUS_REG_RUN (1 << 0) 79 78 80 79 /* bits for MXR_CFG */ 80 + #define MXR_CFG_LAYER_UPDATE (1 << 31) 81 + #define MXR_CFG_LAYER_UPDATE_COUNT_MASK (3 << 29) 81 82 #define MXR_CFG_RGB601_0_255 (0 << 9) 82 83 #define MXR_CFG_RGB601_16_235 (1 << 9) 83 84 #define MXR_CFG_RGB709_0_255 (2 << 9)
+10 -4
drivers/gpu/drm/i915/i915_gem.c
··· 1399 1399 case 0: 1400 1400 case -ERESTARTSYS: 1401 1401 case -EINTR: 1402 + case -EBUSY: 1403 + /* 1404 + * EBUSY is ok: this just means that another thread 1405 + * already did the job. 1406 + */ 1402 1407 return VM_FAULT_NOPAGE; 1403 1408 case -ENOMEM: 1404 1409 return VM_FAULT_OOM; 1405 1410 default: 1411 + WARN_ON_ONCE(ret); 1406 1412 return VM_FAULT_SIGBUS; 1407 1413 } 1408 1414 } ··· 3223 3217 enum i915_cache_level level; 3224 3218 int ret; 3225 3219 3226 - ret = i915_mutex_lock_interruptible(dev); 3227 - if (ret) 3228 - return ret; 3229 - 3230 3220 switch (args->caching) { 3231 3221 case I915_CACHING_NONE: 3232 3222 level = I915_CACHE_NONE; ··· 3233 3231 default: 3234 3232 return -EINVAL; 3235 3233 } 3234 + 3235 + ret = i915_mutex_lock_interruptible(dev); 3236 + if (ret) 3237 + return ret; 3236 3238 3237 3239 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 3238 3240 if (&obj->base == NULL) {
+1 -1
drivers/gpu/drm/i915/i915_gem_context.c
··· 328 328 * itlb_before_ctx_switch. 329 329 */ 330 330 if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) { 331 - ret = ring->flush(ring, 0, 0); 331 + ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0); 332 332 if (ret) 333 333 return ret; 334 334 }
+4 -1
drivers/gpu/drm/i915/i915_gem_tiling.c
··· 91 91 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 92 92 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 93 93 94 - if (INTEL_INFO(dev)->gen >= 6) { 94 + if (IS_VALLEYVIEW(dev)) { 95 + swizzle_x = I915_BIT_6_SWIZZLE_NONE; 96 + swizzle_y = I915_BIT_6_SWIZZLE_NONE; 97 + } else if (INTEL_INFO(dev)->gen >= 6) { 95 98 uint32_t dimm_c0, dimm_c1; 96 99 dimm_c0 = I915_READ(MAD_DIMM_C0); 97 100 dimm_c1 = I915_READ(MAD_DIMM_C1);
+8 -8
drivers/gpu/drm/i915/i915_irq.c
··· 697 697 intel_opregion_gse_intr(dev); 698 698 699 699 for (i = 0; i < 3; i++) { 700 + if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 701 + drm_handle_vblank(dev, i); 700 702 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 701 703 intel_prepare_page_flip(dev, i); 702 704 intel_finish_page_flip_plane(dev, i); 703 705 } 704 - if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 705 - drm_handle_vblank(dev, i); 706 706 } 707 707 708 708 /* check event from PCH */ ··· 784 784 if (de_iir & DE_GSE) 785 785 intel_opregion_gse_intr(dev); 786 786 787 + if (de_iir & DE_PIPEA_VBLANK) 788 + drm_handle_vblank(dev, 0); 789 + 790 + if (de_iir & DE_PIPEB_VBLANK) 791 + drm_handle_vblank(dev, 1); 792 + 787 793 if (de_iir & DE_PLANEA_FLIP_DONE) { 788 794 intel_prepare_page_flip(dev, 0); 789 795 intel_finish_page_flip_plane(dev, 0); ··· 799 793 intel_prepare_page_flip(dev, 1); 800 794 intel_finish_page_flip_plane(dev, 1); 801 795 } 802 - 803 - if (de_iir & DE_PIPEA_VBLANK) 804 - drm_handle_vblank(dev, 0); 805 - 806 - if (de_iir & DE_PIPEB_VBLANK) 807 - drm_handle_vblank(dev, 1); 808 796 809 797 /* check event from PCH */ 810 798 if (de_iir & DE_PCH_EVENT) {
+3
drivers/gpu/drm/i915/i915_reg.h
··· 527 527 # define VS_TIMER_DISPATCH (1 << 6) 528 528 # define MI_FLUSH_ENABLE (1 << 12) 529 529 530 + #define GEN6_GT_MODE 0x20d0 531 + #define GEN6_GT_MODE_HI (1 << 9) 532 + 530 533 #define GFX_MODE 0x02520 531 534 #define GFX_MODE_GEN7 0x0229c 532 535 #define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
+26 -26
drivers/gpu/drm/i915/intel_display.c
··· 2806 2806 udelay(100); 2807 2807 } 2808 2808 2809 + static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 2810 + { 2811 + struct drm_device *dev = crtc->dev; 2812 + struct drm_i915_private *dev_priv = dev->dev_private; 2813 + unsigned long flags; 2814 + bool pending; 2815 + 2816 + if (atomic_read(&dev_priv->mm.wedged)) 2817 + return false; 2818 + 2819 + spin_lock_irqsave(&dev->event_lock, flags); 2820 + pending = to_intel_crtc(crtc)->unpin_work != NULL; 2821 + spin_unlock_irqrestore(&dev->event_lock, flags); 2822 + 2823 + return pending; 2824 + } 2825 + 2809 2826 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 2810 2827 { 2811 2828 struct drm_device *dev = crtc->dev; 2829 + struct drm_i915_private *dev_priv = dev->dev_private; 2812 2830 2813 2831 if (crtc->fb == NULL) 2814 2832 return; 2833 + 2834 + wait_event(dev_priv->pending_flip_queue, 2835 + !intel_crtc_has_pending_flip(crtc)); 2815 2836 2816 2837 mutex_lock(&dev->struct_mutex); 2817 2838 intel_finish_fb(crtc->fb); ··· 4391 4370 /* default to 8bpc */ 4392 4371 pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN); 4393 4372 if (is_dp) { 4394 - if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { 4373 + if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { 4395 4374 pipeconf |= PIPECONF_BPP_6 | 4396 4375 PIPECONF_DITHER_EN | 4397 4376 PIPECONF_DITHER_TYPE_SP; ··· 4823 4802 target_clock = adjusted_mode->clock; 4824 4803 4825 4804 /* determine panel color depth */ 4826 - dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, mode); 4805 + dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, 4806 + adjusted_mode); 4827 4807 if (is_lvds && dev_priv->lvds_dither) 4828 4808 dither = true; 4829 4809 ··· 6181 6159 struct intel_unpin_work *work; 6182 6160 struct drm_i915_gem_object *obj; 6183 6161 struct drm_pending_vblank_event *e; 6184 - struct timeval tnow, tvbl; 6162 + struct timeval tvbl; 6185 6163 unsigned long flags; 6186 6164 6187 6165 /* Ignore early vblank irqs */ 6188 6166 if (intel_crtc == NULL) 6189 6167 return; 6190 - 6191 - do_gettimeofday(&tnow); 6192 6168 6193 6169 spin_lock_irqsave(&dev->event_lock, flags); 6194 6170 work = intel_crtc->unpin_work; ··· 6200 6180 if (work->event) { 6201 6181 e = work->event; 6202 6182 e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl); 6203 - 6204 - /* Called before vblank count and timestamps have 6205 - * been updated for the vblank interval of flip 6206 - * completion? Need to increment vblank count and 6207 - * add one videorefresh duration to returned timestamp 6208 - * to account for this. We assume this happened if we 6209 - * get called over 0.9 frame durations after the last 6210 - * timestamped vblank. 6211 - * 6212 - * This calculation can not be used with vrefresh rates 6213 - * below 5Hz (10Hz to be on the safe side) without 6214 - * promoting to 64 integers. 6215 - */ 6216 - if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) > 6217 - 9 * crtc->framedur_ns) { 6218 - e->event.sequence++; 6219 - tvbl = ns_to_timeval(timeval_to_ns(&tvbl) + 6220 - crtc->framedur_ns); 6221 - } 6222 6183 6223 6184 e->event.tv_sec = tvbl.tv_sec; 6224 6185 e->event.tv_usec = tvbl.tv_usec; ··· 6217 6216 6218 6217 atomic_clear_mask(1 << intel_crtc->plane, 6219 6218 &obj->pending_flip.counter); 6220 - if (atomic_read(&obj->pending_flip) == 0) 6221 - wake_up(&dev_priv->pending_flip_queue); 6222 6219 6220 + wake_up(&dev_priv->pending_flip_queue); 6223 6221 schedule_work(&work->work); 6224 6222 6225 6223 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
+57 -16
drivers/gpu/drm/i915/intel_dp.c
··· 36 36 #include <drm/i915_drm.h> 37 37 #include "i915_drv.h" 38 38 39 + #define DP_RECEIVER_CAP_SIZE 0xf 39 40 #define DP_LINK_STATUS_SIZE 6 40 41 #define DP_LINK_CHECK_TIMEOUT (10 * 1000) 41 42 ··· 1797 1796 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1798 1797 break; 1799 1798 if (i == intel_dp->lane_count && voltage_tries == 5) { 1800 - ++loop_tries; 1801 - if (loop_tries == 5) { 1799 + if (++loop_tries == 5) { 1802 1800 DRM_DEBUG_KMS("too many full retries, give up\n"); 1803 1801 break; 1804 1802 } ··· 1807 1807 } 1808 1808 1809 1809 /* Check to see if we've tried the same voltage 5 times */ 1810 - if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1811 - ++voltage_tries; 1812 - if (voltage_tries == 5) { 1813 - DRM_DEBUG_KMS("too many voltage retries, give up\n"); 1814 - break; 1815 - } 1816 - } else 1810 + if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) { 1811 + voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1817 1812 voltage_tries = 0; 1818 - voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1813 + } else 1814 + ++voltage_tries; 1819 1815 1820 1816 /* Compute new intel_dp->train_set as requested by target */ 1821 1817 intel_get_adjust_train(intel_dp, link_status); ··· 1959 1963 intel_dp_get_dpcd(struct intel_dp *intel_dp) 1960 1964 { 1961 1965 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 1962 - sizeof(intel_dp->dpcd)) && 1963 - (intel_dp->dpcd[DP_DPCD_REV] != 0)) { 1964 - return true; 1965 - } 1966 + sizeof(intel_dp->dpcd)) == 0) 1967 + return false; /* aux transfer failed */ 1966 1968 1967 - return false; 1969 + if (intel_dp->dpcd[DP_DPCD_REV] == 0) 1970 + return false; /* DPCD not present */ 1971 + 1972 + if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 1973 + DP_DWN_STRM_PORT_PRESENT)) 1974 + return true; /* native DP sink */ 1975 + 1976 + if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 1977 + return true; /* no per-port downstream info */ 1978 + 1979 + if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0, 1980 + intel_dp->downstream_ports, 1981 + DP_MAX_DOWNSTREAM_PORTS) == 0) 1982 + return false; /* downstream port status fetch failed */ 1983 + 1984 + return true; 1968 1985 } 1969 1986 1970 1987 static void ··· 2077 2068 } 2078 2069 } 2079 2070 2071 + /* XXX this is probably wrong for multiple downstream ports */ 2080 2072 static enum drm_connector_status 2081 2073 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 2082 2074 { 2083 - if (intel_dp_get_dpcd(intel_dp)) 2075 + uint8_t *dpcd = intel_dp->dpcd; 2076 + bool hpd; 2077 + uint8_t type; 2078 + 2079 + if (!intel_dp_get_dpcd(intel_dp)) 2080 + return connector_status_disconnected; 2081 + 2082 + /* if there's no downstream port, we're done */ 2083 + if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) 2084 2084 return connector_status_connected; 2085 + 2086 + /* If we're HPD-aware, SINK_COUNT changes dynamically */ 2087 + hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD); 2088 + if (hpd) { 2089 + uint8_t reg; 2090 + if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, 2091 + &reg, 1)) 2092 + return connector_status_unknown; 2093 + return DP_GET_SINK_COUNT(reg) ? connector_status_connected 2094 + : connector_status_disconnected; 2095 + } 2096 + 2097 + /* If no HPD, poke DDC gently */ 2098 + if (drm_probe_ddc(&intel_dp->adapter)) 2099 + return connector_status_connected; 2100 + 2101 + /* Well we tried, say unknown for unreliable port types */ 2102 + type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 2103 + if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID) 2104 + return connector_status_unknown; 2105 + 2106 + /* Anything else is out of spec, warn and ignore */ 2107 + DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); 2085 2108 return connector_status_disconnected; 2086 2109 } 2087 2110
+2
drivers/gpu/drm/i915/intel_drv.h
··· 332 332 }; 333 333 334 334 #define DP_RECEIVER_CAP_SIZE 0xf 335 + #define DP_MAX_DOWNSTREAM_PORTS 0x10 335 336 #define DP_LINK_CONFIGURATION_SIZE 9 336 337 337 338 struct intel_dp { ··· 347 346 uint8_t link_bw; 348 347 uint8_t lane_count; 349 348 uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; 349 + uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; 350 350 struct i2c_adapter adapter; 351 351 struct i2c_algo_dp_aux_data algo; 352 352 bool is_pch_edp;
+5
drivers/gpu/drm/i915/intel_pm.c
··· 3474 3474 DISPPLANE_TRICKLE_FEED_DISABLE); 3475 3475 intel_flush_display_plane(dev_priv, pipe); 3476 3476 } 3477 + 3478 + /* The default value should be 0x200 according to docs, but the two 3479 + * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */ 3480 + I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff)); 3481 + I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI)); 3477 3482 } 3478 3483 3479 3484 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
-15
include/drm/Kbuild
··· 1 - header-y += drm.h 2 - header-y += drm_fourcc.h 3 - header-y += drm_mode.h 4 - header-y += drm_sarea.h 5 - header-y += exynos_drm.h 6 - header-y += i810_drm.h 7 - header-y += i915_drm.h 8 - header-y += mga_drm.h 9 - header-y += nouveau_drm.h 10 - header-y += r128_drm.h 11 - header-y += radeon_drm.h 12 - header-y += savage_drm.h 13 - header-y += sis_drm.h 14 - header-y += via_drm.h 15 - header-y += vmwgfx_drm.h
include/drm/drm.h include/uapi/drm/drm.h
+1
include/drm/drm_crtc.h
··· 878 878 extern char *drm_get_tv_select_name(int val); 879 879 extern void drm_fb_release(struct drm_file *file_priv); 880 880 extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); 881 + extern bool drm_probe_ddc(struct i2c_adapter *adapter); 881 882 extern struct edid *drm_get_edid(struct drm_connector *connector, 882 883 struct i2c_adapter *adapter); 883 884 extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
+81 -16
include/drm/drm_dp_helper.h
··· 26 26 #include <linux/types.h> 27 27 #include <linux/i2c.h> 28 28 29 - /* From the VESA DisplayPort spec */ 29 + /* 30 + * Unless otherwise noted, all values are from the DP 1.1a spec. Note that 31 + * DP and DPCD versions are independent. Differences from 1.0 are not noted, 32 + * 1.0 devices basically don't exist in the wild. 33 + * 34 + * Abbreviations, in chronological order: 35 + * 36 + * eDP: Embedded DisplayPort version 1 37 + * DPI: DisplayPort Interoperability Guideline v1.1a 38 + * 1.2: DisplayPort 1.2 39 + * 40 + * 1.2 formally includes both eDP and DPI definitions. 41 + */ 30 42 31 43 #define AUX_NATIVE_WRITE 0x8 32 44 #define AUX_NATIVE_READ 0x9 ··· 65 53 66 54 #define DP_MAX_LANE_COUNT 0x002 67 55 # define DP_MAX_LANE_COUNT_MASK 0x1f 68 - # define DP_TPS3_SUPPORTED (1 << 6) 56 + # define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */ 69 57 # define DP_ENHANCED_FRAME_CAP (1 << 7) 70 58 71 59 #define DP_MAX_DOWNSPREAD 0x003 ··· 81 69 /* 10b = TMDS or HDMI */ 82 70 /* 11b = Other */ 83 71 # define DP_FORMAT_CONVERSION (1 << 3) 72 + # define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */ 84 73 85 74 #define DP_MAIN_LINK_CHANNEL_CODING 0x006 86 75 87 76 #define DP_DOWN_STREAM_PORT_COUNT 0x007 88 - #define DP_PORT_COUNT_MASK 0x0f 89 - #define DP_OUI_SUPPORT (1 << 7) 77 + # define DP_PORT_COUNT_MASK 0x0f 78 + # define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */ 79 + # define DP_OUI_SUPPORT (1 << 7) 90 80 91 - #define DP_EDP_CONFIGURATION_CAP 0x00d 92 - #define DP_TRAINING_AUX_RD_INTERVAL 0x00e 81 + #define DP_I2C_SPEED_CAP 0x00c /* DPI */ 82 + # define DP_I2C_SPEED_1K 0x01 83 + # define DP_I2C_SPEED_5K 0x02 84 + # define DP_I2C_SPEED_10K 0x04 85 + # define DP_I2C_SPEED_100K 0x08 86 + # define DP_I2C_SPEED_400K 0x10 87 + # define DP_I2C_SPEED_1M 0x20 93 88 94 - #define DP_PSR_SUPPORT 0x070 89 + #define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */ 90 + #define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */ 91 + 92 + /* Multiple stream transport */ 93 + #define DP_MSTM_CAP 0x021 /* 1.2 */ 94 + # define DP_MST_CAP (1 << 0) 95 + 96 + #define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */ 95 97 # define DP_PSR_IS_SUPPORTED 1 96 - #define DP_PSR_CAPS 0x071 98 + #define DP_PSR_CAPS 0x071 /* XXX 1.2? */ 97 99 # define DP_PSR_NO_TRAIN_ON_EXIT 1 98 100 # define DP_PSR_SETUP_TIME_330 (0 << 1) 99 101 # define DP_PSR_SETUP_TIME_275 (1 << 1) ··· 119 93 # define DP_PSR_SETUP_TIME_MASK (7 << 1) 120 94 # define DP_PSR_SETUP_TIME_SHIFT 1 121 95 96 + /* 97 + * 0x80-0x8f describe downstream port capabilities, but there are two layouts 98 + * based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not, 99 + * each port's descriptor is one byte wide. If it was set, each port's is 100 + * four bytes wide, starting with the one byte from the base info. As of 101 + * DP interop v1.1a only VGA defines additional detail. 102 + */ 103 + 104 + /* offset 0 */ 105 + #define DP_DOWNSTREAM_PORT_0 0x80 106 + # define DP_DS_PORT_TYPE_MASK (7 << 0) 107 + # define DP_DS_PORT_TYPE_DP 0 108 + # define DP_DS_PORT_TYPE_VGA 1 109 + # define DP_DS_PORT_TYPE_DVI 2 110 + # define DP_DS_PORT_TYPE_HDMI 3 111 + # define DP_DS_PORT_TYPE_NON_EDID 4 112 + # define DP_DS_PORT_HPD (1 << 3) 113 + /* offset 1 for VGA is maximum megapixels per second / 8 */ 114 + /* offset 2 */ 115 + # define DP_DS_VGA_MAX_BPC_MASK (3 << 0) 116 + # define DP_DS_VGA_8BPC 0 117 + # define DP_DS_VGA_10BPC 1 118 + # define DP_DS_VGA_12BPC 2 119 + # define DP_DS_VGA_16BPC 3 120 + 122 121 /* link configuration */ 123 122 #define DP_LINK_BW_SET 0x100 124 123 # define DP_LINK_BW_1_62 0x06 125 124 # define DP_LINK_BW_2_7 0x0a 126 - # define DP_LINK_BW_5_4 0x14 125 + # define DP_LINK_BW_5_4 0x14 /* 1.2 */ 127 126 128 127 #define DP_LANE_COUNT_SET 0x101 129 128 # define DP_LANE_COUNT_MASK 0x0f ··· 158 107 # define DP_TRAINING_PATTERN_DISABLE 0 159 108 # define DP_TRAINING_PATTERN_1 1 160 109 # define DP_TRAINING_PATTERN_2 2 161 - # define DP_TRAINING_PATTERN_3 3 110 + # define DP_TRAINING_PATTERN_3 3 /* 1.2 */ 162 111 # define DP_TRAINING_PATTERN_MASK 0x3 163 112 164 113 # define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2) ··· 199 148 200 149 #define DP_DOWNSPREAD_CTRL 0x107 201 150 # define DP_SPREAD_AMP_0_5 (1 << 4) 151 + # define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */ 202 152 203 153 #define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 204 154 # define DP_SET_ANSI_8B10B (1 << 0) 205 155 206 - #define DP_PSR_EN_CFG 0x170 156 + #define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */ 157 + /* bitmask as for DP_I2C_SPEED_CAP */ 158 + 159 + #define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */ 160 + 161 + #define DP_MSTM_CTRL 0x111 /* 1.2 */ 162 + # define DP_MST_EN (1 << 0) 163 + # define DP_UP_REQ_EN (1 << 1) 164 + # define DP_UPSTREAM_IS_SRC (1 << 2) 165 + 166 + #define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ 207 167 # define DP_PSR_ENABLE (1 << 0) 208 168 # define DP_PSR_MAIN_LINK_ACTIVE (1 << 1) 209 169 # define DP_PSR_CRC_VERIFICATION (1 << 2) 210 170 # define DP_PSR_FRAME_CAPTURE (1 << 3) 171 + 172 + #define DP_SINK_COUNT 0x200 173 + /* prior to 1.2 bit 7 was reserved mbz */ 174 + # define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f)) 175 + # define DP_SINK_CP_READY (1 << 6) 211 176 212 177 #define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201 213 178 # define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0) 214 179 # define DP_AUTOMATED_TEST_REQUEST (1 << 1) 215 180 # define DP_CP_IRQ (1 << 2) 216 181 # define DP_SINK_SPECIFIC_IRQ (1 << 6) 217 - 218 - #define DP_EDP_CONFIGURATION_SET 0x10a 219 182 220 183 #define DP_LANE0_1_STATUS 0x202 221 184 #define DP_LANE2_3_STATUS 0x203 ··· 290 225 # define DP_SET_POWER_D0 0x1 291 226 # define DP_SET_POWER_D3 0x2 292 227 293 - #define DP_PSR_ERROR_STATUS 0x2006 228 + #define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */ 294 229 # define DP_PSR_LINK_CRC_ERROR (1 << 0) 295 230 # define DP_PSR_RFB_STORAGE_ERROR (1 << 1) 296 231 297 - #define DP_PSR_ESI 0x2007 232 + #define DP_PSR_ESI 0x2007 /* XXX 1.2? */ 298 233 # define DP_PSR_CAPS_CHANGE (1 << 0) 299 234 300 - #define DP_PSR_STATUS 0x2008 235 + #define DP_PSR_STATUS 0x2008 /* XXX 1.2? */ 301 236 # define DP_PSR_SINK_INACTIVE 0 302 237 # define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1 303 238 # define DP_PSR_SINK_ACTIVE_RFB 2
include/drm/drm_fourcc.h include/uapi/drm/drm_fourcc.h
include/drm/drm_mode.h include/uapi/drm/drm_mode.h
include/drm/drm_sarea.h include/uapi/drm/drm_sarea.h
+1 -174
include/drm/exynos_drm.h
··· 25 25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 26 26 * OTHER DEALINGS IN THE SOFTWARE. 27 27 */ 28 - 29 28 #ifndef _EXYNOS_DRM_H_ 30 29 #define _EXYNOS_DRM_H_ 31 30 32 - #include <drm/drm.h> 33 - 34 - /** 35 - * User-desired buffer creation information structure. 36 - * 37 - * @size: user-desired memory allocation size. 38 - * - this size value would be page-aligned internally. 39 - * @flags: user request for setting memory type or cache attributes. 40 - * @handle: returned a handle to created gem object. 41 - * - this handle will be set by gem module of kernel side. 42 - */ 43 - struct drm_exynos_gem_create { 44 - uint64_t size; 45 - unsigned int flags; 46 - unsigned int handle; 47 - }; 48 - 49 - /** 50 - * A structure for getting buffer offset. 51 - * 52 - * @handle: a pointer to gem object created. 53 - * @pad: just padding to be 64-bit aligned. 54 - * @offset: relatived offset value of the memory region allocated. 55 - * - this value should be set by user. 56 - */ 57 - struct drm_exynos_gem_map_off { 58 - unsigned int handle; 59 - unsigned int pad; 60 - uint64_t offset; 61 - }; 62 - 63 - /** 64 - * A structure for mapping buffer. 65 - * 66 - * @handle: a handle to gem object created. 67 - * @pad: just padding to be 64-bit aligned. 68 - * @size: memory size to be mapped. 69 - * @mapped: having user virtual address mmaped. 70 - * - this variable would be filled by exynos gem module 71 - * of kernel side with user virtual address which is allocated 72 - * by do_mmap(). 73 - */ 74 - struct drm_exynos_gem_mmap { 75 - unsigned int handle; 76 - unsigned int pad; 77 - uint64_t size; 78 - uint64_t mapped; 79 - }; 80 - 81 - /** 82 - * A structure to gem information. 83 - * 84 - * @handle: a handle to gem object created. 85 - * @flags: flag value including memory type and cache attribute and 86 - * this value would be set by driver. 87 - * @size: size to memory region allocated by gem and this size would 88 - * be set by driver. 89 - */ 90 - struct drm_exynos_gem_info { 91 - unsigned int handle; 92 - unsigned int flags; 93 - uint64_t size; 94 - }; 95 - 96 - /** 97 - * A structure for user connection request of virtual display. 98 - * 99 - * @connection: indicate whether doing connetion or not by user. 100 - * @extensions: if this value is 1 then the vidi driver would need additional 101 - * 128bytes edid data. 102 - * @edid: the edid data pointer from user side. 103 - */ 104 - struct drm_exynos_vidi_connection { 105 - unsigned int connection; 106 - unsigned int extensions; 107 - uint64_t edid; 108 - }; 109 - 110 - /* memory type definitions. */ 111 - enum e_drm_exynos_gem_mem_type { 112 - /* Physically Continuous memory and used as default. */ 113 - EXYNOS_BO_CONTIG = 0 << 0, 114 - /* Physically Non-Continuous memory. */ 115 - EXYNOS_BO_NONCONTIG = 1 << 0, 116 - /* non-cachable mapping and used as default. */ 117 - EXYNOS_BO_NONCACHABLE = 0 << 1, 118 - /* cachable mapping. */ 119 - EXYNOS_BO_CACHABLE = 1 << 1, 120 - /* write-combine mapping. */ 121 - EXYNOS_BO_WC = 1 << 2, 122 - EXYNOS_BO_MASK = EXYNOS_BO_NONCONTIG | EXYNOS_BO_CACHABLE | 123 - EXYNOS_BO_WC 124 - }; 125 - 126 - struct drm_exynos_g2d_get_ver { 127 - __u32 major; 128 - __u32 minor; 129 - }; 130 - 131 - struct drm_exynos_g2d_cmd { 132 - __u32 offset; 133 - __u32 data; 134 - }; 135 - 136 - enum drm_exynos_g2d_event_type { 137 - G2D_EVENT_NOT, 138 - G2D_EVENT_NONSTOP, 139 - G2D_EVENT_STOP, /* not yet */ 140 - }; 141 - 142 - struct drm_exynos_g2d_set_cmdlist { 143 - __u64 cmd; 144 - __u64 cmd_gem; 145 - __u32 cmd_nr; 146 - __u32 cmd_gem_nr; 147 - 148 - /* for g2d event */ 149 - __u64 event_type; 150 - __u64 user_data; 151 - }; 152 - 153 - struct drm_exynos_g2d_exec { 154 - __u64 async; 155 - }; 156 - 157 - #define DRM_EXYNOS_GEM_CREATE 0x00 158 - #define DRM_EXYNOS_GEM_MAP_OFFSET 0x01 159 - #define DRM_EXYNOS_GEM_MMAP 0x02 160 - /* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */ 161 - #define DRM_EXYNOS_GEM_GET 0x04 162 - #define DRM_EXYNOS_VIDI_CONNECTION 0x07 163 - 164 - /* G2D */ 165 - #define DRM_EXYNOS_G2D_GET_VER 0x20 166 - #define DRM_EXYNOS_G2D_SET_CMDLIST 0x21 167 - #define DRM_EXYNOS_G2D_EXEC 0x22 168 - 169 - #define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \ 170 - DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create) 171 - 172 - #define DRM_IOCTL_EXYNOS_GEM_MAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + \ 173 - DRM_EXYNOS_GEM_MAP_OFFSET, struct drm_exynos_gem_map_off) 174 - 175 - #define DRM_IOCTL_EXYNOS_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + \ 176 - DRM_EXYNOS_GEM_MMAP, struct drm_exynos_gem_mmap) 177 - 178 - #define DRM_IOCTL_EXYNOS_GEM_GET DRM_IOWR(DRM_COMMAND_BASE + \ 179 - DRM_EXYNOS_GEM_GET, struct drm_exynos_gem_info) 180 - 181 - #define DRM_IOCTL_EXYNOS_VIDI_CONNECTION DRM_IOWR(DRM_COMMAND_BASE + \ 182 - DRM_EXYNOS_VIDI_CONNECTION, struct drm_exynos_vidi_connection) 183 - 184 - #define DRM_IOCTL_EXYNOS_G2D_GET_VER DRM_IOWR(DRM_COMMAND_BASE + \ 185 - DRM_EXYNOS_G2D_GET_VER, struct drm_exynos_g2d_get_ver) 186 - #define DRM_IOCTL_EXYNOS_G2D_SET_CMDLIST DRM_IOWR(DRM_COMMAND_BASE + \ 187 - DRM_EXYNOS_G2D_SET_CMDLIST, struct drm_exynos_g2d_set_cmdlist) 188 - #define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \ 189 - DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec) 190 - 191 - /* EXYNOS specific events */ 192 - #define DRM_EXYNOS_G2D_EVENT 0x80000000 193 - 194 - struct drm_exynos_g2d_event { 195 - struct drm_event base; 196 - __u64 user_data; 197 - __u32 tv_sec; 198 - __u32 tv_usec; 199 - __u32 cmdlist_no; 200 - __u32 reserved; 201 - }; 202 - 203 - #ifdef __KERNEL__ 31 + #include <uapi/drm/exynos_drm.h> 204 32 205 33 /** 206 34 * A structure for lcd panel information. ··· 85 257 int (*get_hpd)(void); 86 258 }; 87 259 88 - #endif /* __KERNEL__ */ 89 260 #endif /* _EXYNOS_DRM_H_ */
include/drm/i810_drm.h include/uapi/drm/i810_drm.h
+1 -919
include/drm/i915_drm.h
··· 23 23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 24 * 25 25 */ 26 - 27 26 #ifndef _I915_DRM_H_ 28 27 #define _I915_DRM_H_ 29 28 30 - #include <drm/drm.h> 29 + #include <uapi/drm/i915_drm.h> 31 30 32 - /* Please note that modifications to all structs defined here are 33 - * subject to backwards-compatibility constraints. 34 - */ 35 - 36 - #ifdef __KERNEL__ 37 31 /* For use by IPS driver */ 38 32 extern unsigned long i915_read_mch_val(void); 39 33 extern bool i915_gpu_raise(void); 40 34 extern bool i915_gpu_lower(void); 41 35 extern bool i915_gpu_busy(void); 42 36 extern bool i915_gpu_turbo_disable(void); 43 - #endif 44 - 45 - /* Each region is a minimum of 16k, and there are at most 255 of them. 46 - */ 47 - #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use 48 - * of chars for next/prev indices */ 49 - #define I915_LOG_MIN_TEX_REGION_SIZE 14 50 - 51 - typedef struct _drm_i915_init { 52 - enum { 53 - I915_INIT_DMA = 0x01, 54 - I915_CLEANUP_DMA = 0x02, 55 - I915_RESUME_DMA = 0x03 56 - } func; 57 - unsigned int mmio_offset; 58 - int sarea_priv_offset; 59 - unsigned int ring_start; 60 - unsigned int ring_end; 61 - unsigned int ring_size; 62 - unsigned int front_offset; 63 - unsigned int back_offset; 64 - unsigned int depth_offset; 65 - unsigned int w; 66 - unsigned int h; 67 - unsigned int pitch; 68 - unsigned int pitch_bits; 69 - unsigned int back_pitch; 70 - unsigned int depth_pitch; 71 - unsigned int cpp; 72 - unsigned int chipset; 73 - } drm_i915_init_t; 74 - 75 - typedef struct _drm_i915_sarea { 76 - struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; 77 - int last_upload; /* last time texture was uploaded */ 78 - int last_enqueue; /* last time a buffer was enqueued */ 79 - int last_dispatch; /* age of the most recently dispatched buffer */ 80 - int ctxOwner; /* last context to upload state */ 81 - int texAge; 82 - int pf_enabled; /* is pageflipping allowed? */ 83 - int pf_active; 84 - int pf_current_page; /* which buffer is being displayed? */ 85 - int perf_boxes; /* performance boxes to be displayed */ 86 - int width, height; /* screen size in pixels */ 87 - 88 - drm_handle_t front_handle; 89 - int front_offset; 90 - int front_size; 91 - 92 - drm_handle_t back_handle; 93 - int back_offset; 94 - int back_size; 95 - 96 - drm_handle_t depth_handle; 97 - int depth_offset; 98 - int depth_size; 99 - 100 - drm_handle_t tex_handle; 101 - int tex_offset; 102 - int tex_size; 103 - int log_tex_granularity; 104 - int pitch; 105 - int rotation; /* 0, 90, 180 or 270 */ 106 - int rotated_offset; 107 - int rotated_size; 108 - int rotated_pitch; 109 - int virtualX, virtualY; 110 - 111 - unsigned int front_tiled; 112 - unsigned int back_tiled; 113 - unsigned int depth_tiled; 114 - unsigned int rotated_tiled; 115 - unsigned int rotated2_tiled; 116 - 117 - int pipeA_x; 118 - int pipeA_y; 119 - int pipeA_w; 120 - int pipeA_h; 121 - int pipeB_x; 122 - int pipeB_y; 123 - int pipeB_w; 124 - int pipeB_h; 125 - 126 - /* fill out some space for old userspace triple buffer */ 127 - drm_handle_t unused_handle; 128 - __u32 unused1, unused2, unused3; 129 - 130 - /* buffer object handles for static buffers. May change 131 - * over the lifetime of the client. 132 - */ 133 - __u32 front_bo_handle; 134 - __u32 back_bo_handle; 135 - __u32 unused_bo_handle; 136 - __u32 depth_bo_handle; 137 - 138 - } drm_i915_sarea_t; 139 - 140 - /* due to userspace building against these headers we need some compat here */ 141 - #define planeA_x pipeA_x 142 - #define planeA_y pipeA_y 143 - #define planeA_w pipeA_w 144 - #define planeA_h pipeA_h 145 - #define planeB_x pipeB_x 146 - #define planeB_y pipeB_y 147 - #define planeB_w pipeB_w 148 - #define planeB_h pipeB_h 149 - 150 - /* Flags for perf_boxes 151 - */ 152 - #define I915_BOX_RING_EMPTY 0x1 153 - #define I915_BOX_FLIP 0x2 154 - #define I915_BOX_WAIT 0x4 155 - #define I915_BOX_TEXTURE_LOAD 0x8 156 - #define I915_BOX_LOST_CONTEXT 0x10 157 - 158 - /* I915 specific ioctls 159 - * The device specific ioctl range is 0x40 to 0x79. 160 - */ 161 - #define DRM_I915_INIT 0x00 162 - #define DRM_I915_FLUSH 0x01 163 - #define DRM_I915_FLIP 0x02 164 - #define DRM_I915_BATCHBUFFER 0x03 165 - #define DRM_I915_IRQ_EMIT 0x04 166 - #define DRM_I915_IRQ_WAIT 0x05 167 - #define DRM_I915_GETPARAM 0x06 168 - #define DRM_I915_SETPARAM 0x07 169 - #define DRM_I915_ALLOC 0x08 170 - #define DRM_I915_FREE 0x09 171 - #define DRM_I915_INIT_HEAP 0x0a 172 - #define DRM_I915_CMDBUFFER 0x0b 173 - #define DRM_I915_DESTROY_HEAP 0x0c 174 - #define DRM_I915_SET_VBLANK_PIPE 0x0d 175 - #define DRM_I915_GET_VBLANK_PIPE 0x0e 176 - #define DRM_I915_VBLANK_SWAP 0x0f 177 - #define DRM_I915_HWS_ADDR 0x11 178 - #define DRM_I915_GEM_INIT 0x13 179 - #define DRM_I915_GEM_EXECBUFFER 0x14 180 - #define DRM_I915_GEM_PIN 0x15 181 - #define DRM_I915_GEM_UNPIN 0x16 182 - #define DRM_I915_GEM_BUSY 0x17 183 - #define DRM_I915_GEM_THROTTLE 0x18 184 - #define DRM_I915_GEM_ENTERVT 0x19 185 - #define DRM_I915_GEM_LEAVEVT 0x1a 186 - #define DRM_I915_GEM_CREATE 0x1b 187 - #define DRM_I915_GEM_PREAD 0x1c 188 - #define DRM_I915_GEM_PWRITE 0x1d 189 - #define DRM_I915_GEM_MMAP 0x1e 190 - #define DRM_I915_GEM_SET_DOMAIN 0x1f 191 - #define DRM_I915_GEM_SW_FINISH 0x20 192 - #define DRM_I915_GEM_SET_TILING 0x21 193 - #define DRM_I915_GEM_GET_TILING 0x22 194 - #define DRM_I915_GEM_GET_APERTURE 0x23 195 - #define DRM_I915_GEM_MMAP_GTT 0x24 196 - #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 197 - #define DRM_I915_GEM_MADVISE 0x26 198 - #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 199 - #define DRM_I915_OVERLAY_ATTRS 0x28 200 - #define DRM_I915_GEM_EXECBUFFER2 0x29 201 - #define DRM_I915_GET_SPRITE_COLORKEY 0x2a 202 - #define DRM_I915_SET_SPRITE_COLORKEY 0x2b 203 - #define DRM_I915_GEM_WAIT 0x2c 204 - #define DRM_I915_GEM_CONTEXT_CREATE 0x2d 205 - #define DRM_I915_GEM_CONTEXT_DESTROY 0x2e 206 - #define DRM_I915_GEM_SET_CACHING 0x2f 207 - #define DRM_I915_GEM_GET_CACHING 0x30 208 - #define DRM_I915_REG_READ 0x31 209 - 210 - #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 211 - #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 212 - #define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) 213 - #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) 214 - #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) 215 - #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) 216 - #define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) 217 - #define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) 218 - #define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) 219 - #define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) 220 - #define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) 221 - #define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) 222 - #define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) 223 - #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 224 - #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 225 - #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 226 - #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) 227 - #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) 228 - #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) 229 - #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) 230 - #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 231 - #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 232 - #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) 233 - #define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching) 234 - #define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching) 235 - #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) 236 - #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) 237 - #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) 238 - #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) 239 - #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) 240 - #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) 241 - #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) 242 - #define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt) 243 - #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) 244 - #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) 245 - #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) 246 - #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) 247 - #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) 248 - #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) 249 - #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) 250 - #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) 251 - #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) 252 - #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 253 - #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 254 - #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) 255 - #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) 256 - #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) 257 - #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) 258 - 259 - /* Allow drivers to submit batchbuffers directly to hardware, relying 260 - * on the security mechanisms provided by hardware. 261 - */ 262 - typedef struct drm_i915_batchbuffer { 263 - int start; /* agp offset */ 264 - int used; /* nr bytes in use */ 265 - int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 266 - int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 267 - int num_cliprects; /* mulitpass with multiple cliprects? */ 268 - struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 269 - } drm_i915_batchbuffer_t; 270 - 271 - /* As above, but pass a pointer to userspace buffer which can be 272 - * validated by the kernel prior to sending to hardware. 273 - */ 274 - typedef struct _drm_i915_cmdbuffer { 275 - char __user *buf; /* pointer to userspace command buffer */ 276 - int sz; /* nr bytes in buf */ 277 - int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 278 - int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 279 - int num_cliprects; /* mulitpass with multiple cliprects? */ 280 - struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 281 - } drm_i915_cmdbuffer_t; 282 - 283 - /* Userspace can request & wait on irq's: 284 - */ 285 - typedef struct drm_i915_irq_emit { 286 - int __user *irq_seq; 287 - } drm_i915_irq_emit_t; 288 - 289 - typedef struct drm_i915_irq_wait { 290 - int irq_seq; 291 - } drm_i915_irq_wait_t; 292 - 293 - /* Ioctl to query kernel params: 294 - */ 295 - #define I915_PARAM_IRQ_ACTIVE 1 296 - #define I915_PARAM_ALLOW_BATCHBUFFER 2 297 - #define I915_PARAM_LAST_DISPATCH 3 298 - #define I915_PARAM_CHIPSET_ID 4 299 - #define I915_PARAM_HAS_GEM 5 300 - #define I915_PARAM_NUM_FENCES_AVAIL 6 301 - #define I915_PARAM_HAS_OVERLAY 7 302 - #define I915_PARAM_HAS_PAGEFLIPPING 8 303 - #define I915_PARAM_HAS_EXECBUF2 9 304 - #define I915_PARAM_HAS_BSD 10 305 - #define I915_PARAM_HAS_BLT 11 306 - #define I915_PARAM_HAS_RELAXED_FENCING 12 307 - #define I915_PARAM_HAS_COHERENT_RINGS 13 308 - #define I915_PARAM_HAS_EXEC_CONSTANTS 14 309 - #define I915_PARAM_HAS_RELAXED_DELTA 15 310 - #define I915_PARAM_HAS_GEN7_SOL_RESET 16 311 - #define I915_PARAM_HAS_LLC 17 312 - #define I915_PARAM_HAS_ALIASING_PPGTT 18 313 - #define I915_PARAM_HAS_WAIT_TIMEOUT 19 314 - #define I915_PARAM_HAS_SEMAPHORES 20 315 - #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 316 - #define I915_PARAM_RSVD_FOR_FUTURE_USE 22 317 - 318 - typedef struct drm_i915_getparam { 319 - int param; 320 - int __user *value; 321 - } drm_i915_getparam_t; 322 - 323 - /* Ioctl to set kernel params: 324 - */ 325 - #define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 326 - #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 327 - #define I915_SETPARAM_ALLOW_BATCHBUFFER 3 328 - #define I915_SETPARAM_NUM_USED_FENCES 4 329 - 330 - typedef struct drm_i915_setparam { 331 - int param; 332 - int value; 333 - } drm_i915_setparam_t; 334 - 335 - /* A memory manager for regions of shared memory: 336 - */ 337 - #define I915_MEM_REGION_AGP 1 338 - 339 - typedef struct drm_i915_mem_alloc { 340 - int region; 341 - int alignment; 342 - int size; 343 - int __user *region_offset; /* offset from start of fb or agp */ 344 - } drm_i915_mem_alloc_t; 345 - 346 - typedef struct drm_i915_mem_free { 347 - int region; 348 - int region_offset; 349 - } drm_i915_mem_free_t; 350 - 351 - typedef struct drm_i915_mem_init_heap { 352 - int region; 353 - int size; 354 - int start; 355 - } drm_i915_mem_init_heap_t; 356 - 357 - /* Allow memory manager to be torn down and re-initialized (eg on 358 - * rotate): 359 - */ 360 - typedef struct drm_i915_mem_destroy_heap { 361 - int region; 362 - } drm_i915_mem_destroy_heap_t; 363 - 364 - /* Allow X server to configure which pipes to monitor for vblank signals 365 - */ 366 - #define DRM_I915_VBLANK_PIPE_A 1 367 - #define DRM_I915_VBLANK_PIPE_B 2 368 - 369 - typedef struct drm_i915_vblank_pipe { 370 - int pipe; 371 - } drm_i915_vblank_pipe_t; 372 - 373 - /* Schedule buffer swap at given vertical blank: 374 - */ 375 - typedef struct drm_i915_vblank_swap { 376 - drm_drawable_t drawable; 377 - enum drm_vblank_seq_type seqtype; 378 - unsigned int sequence; 379 - } drm_i915_vblank_swap_t; 380 - 381 - typedef struct drm_i915_hws_addr { 382 - __u64 addr; 383 - } drm_i915_hws_addr_t; 384 - 385 - struct drm_i915_gem_init { 386 - /** 387 - * Beginning offset in the GTT to be managed by the DRM memory 388 - * manager. 389 - */ 390 - __u64 gtt_start; 391 - /** 392 - * Ending offset in the GTT to be managed by the DRM memory 393 - * manager. 394 - */ 395 - __u64 gtt_end; 396 - }; 397 - 398 - struct drm_i915_gem_create { 399 - /** 400 - * Requested size for the object. 401 - * 402 - * The (page-aligned) allocated size for the object will be returned. 403 - */ 404 - __u64 size; 405 - /** 406 - * Returned handle for the object. 407 - * 408 - * Object handles are nonzero. 409 - */ 410 - __u32 handle; 411 - __u32 pad; 412 - }; 413 - 414 - struct drm_i915_gem_pread { 415 - /** Handle for the object being read. */ 416 - __u32 handle; 417 - __u32 pad; 418 - /** Offset into the object to read from */ 419 - __u64 offset; 420 - /** Length of data to read */ 421 - __u64 size; 422 - /** 423 - * Pointer to write the data into. 424 - * 425 - * This is a fixed-size type for 32/64 compatibility. 426 - */ 427 - __u64 data_ptr; 428 - }; 429 - 430 - struct drm_i915_gem_pwrite { 431 - /** Handle for the object being written to. */ 432 - __u32 handle; 433 - __u32 pad; 434 - /** Offset into the object to write to */ 435 - __u64 offset; 436 - /** Length of data to write */ 437 - __u64 size; 438 - /** 439 - * Pointer to read the data from. 440 - * 441 - * This is a fixed-size type for 32/64 compatibility. 442 - */ 443 - __u64 data_ptr; 444 - }; 445 - 446 - struct drm_i915_gem_mmap { 447 - /** Handle for the object being mapped. */ 448 - __u32 handle; 449 - __u32 pad; 450 - /** Offset in the object to map. */ 451 - __u64 offset; 452 - /** 453 - * Length of data to map. 454 - * 455 - * The value will be page-aligned. 456 - */ 457 - __u64 size; 458 - /** 459 - * Returned pointer the data was mapped at. 460 - * 461 - * This is a fixed-size type for 32/64 compatibility. 462 - */ 463 - __u64 addr_ptr; 464 - }; 465 - 466 - struct drm_i915_gem_mmap_gtt { 467 - /** Handle for the object being mapped. */ 468 - __u32 handle; 469 - __u32 pad; 470 - /** 471 - * Fake offset to use for subsequent mmap call 472 - * 473 - * This is a fixed-size type for 32/64 compatibility. 474 - */ 475 - __u64 offset; 476 - }; 477 - 478 - struct drm_i915_gem_set_domain { 479 - /** Handle for the object */ 480 - __u32 handle; 481 - 482 - /** New read domains */ 483 - __u32 read_domains; 484 - 485 - /** New write domain */ 486 - __u32 write_domain; 487 - }; 488 - 489 - struct drm_i915_gem_sw_finish { 490 - /** Handle for the object */ 491 - __u32 handle; 492 - }; 493 - 494 - struct drm_i915_gem_relocation_entry { 495 - /** 496 - * Handle of the buffer being pointed to by this relocation entry. 497 - * 498 - * It's appealing to make this be an index into the mm_validate_entry 499 - * list to refer to the buffer, but this allows the driver to create 500 - * a relocation list for state buffers and not re-write it per 501 - * exec using the buffer. 502 - */ 503 - __u32 target_handle; 504 - 505 - /** 506 - * Value to be added to the offset of the target buffer to make up 507 - * the relocation entry. 508 - */ 509 - __u32 delta; 510 - 511 - /** Offset in the buffer the relocation entry will be written into */ 512 - __u64 offset; 513 - 514 - /** 515 - * Offset value of the target buffer that the relocation entry was last 516 - * written as. 517 - * 518 - * If the buffer has the same offset as last time, we can skip syncing 519 - * and writing the relocation. This value is written back out by 520 - * the execbuffer ioctl when the relocation is written. 521 - */ 522 - __u64 presumed_offset; 523 - 524 - /** 525 - * Target memory domains read by this operation. 526 - */ 527 - __u32 read_domains; 528 - 529 - /** 530 - * Target memory domains written by this operation. 531 - * 532 - * Note that only one domain may be written by the whole 533 - * execbuffer operation, so that where there are conflicts, 534 - * the application will get -EINVAL back. 535 - */ 536 - __u32 write_domain; 537 - }; 538 - 539 - /** @{ 540 - * Intel memory domains 541 - * 542 - * Most of these just align with the various caches in 543 - * the system and are used to flush and invalidate as 544 - * objects end up cached in different domains. 545 - */ 546 - /** CPU cache */ 547 - #define I915_GEM_DOMAIN_CPU 0x00000001 548 - /** Render cache, used by 2D and 3D drawing */ 549 - #define I915_GEM_DOMAIN_RENDER 0x00000002 550 - /** Sampler cache, used by texture engine */ 551 - #define I915_GEM_DOMAIN_SAMPLER 0x00000004 552 - /** Command queue, used to load batch buffers */ 553 - #define I915_GEM_DOMAIN_COMMAND 0x00000008 554 - /** Instruction cache, used by shader programs */ 555 - #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 556 - /** Vertex address cache */ 557 - #define I915_GEM_DOMAIN_VERTEX 0x00000020 558 - /** GTT domain - aperture and scanout */ 559 - #define I915_GEM_DOMAIN_GTT 0x00000040 560 - /** @} */ 561 - 562 - struct drm_i915_gem_exec_object { 563 - /** 564 - * User's handle for a buffer to be bound into the GTT for this 565 - * operation. 566 - */ 567 - __u32 handle; 568 - 569 - /** Number of relocations to be performed on this buffer */ 570 - __u32 relocation_count; 571 - /** 572 - * Pointer to array of struct drm_i915_gem_relocation_entry containing 573 - * the relocations to be performed in this buffer. 574 - */ 575 - __u64 relocs_ptr; 576 - 577 - /** Required alignment in graphics aperture */ 578 - __u64 alignment; 579 - 580 - /** 581 - * Returned value of the updated offset of the object, for future 582 - * presumed_offset writes. 583 - */ 584 - __u64 offset; 585 - }; 586 - 587 - struct drm_i915_gem_execbuffer { 588 - /** 589 - * List of buffers to be validated with their relocations to be 590 - * performend on them. 591 - * 592 - * This is a pointer to an array of struct drm_i915_gem_validate_entry. 593 - * 594 - * These buffers must be listed in an order such that all relocations 595 - * a buffer is performing refer to buffers that have already appeared 596 - * in the validate list. 597 - */ 598 - __u64 buffers_ptr; 599 - __u32 buffer_count; 600 - 601 - /** Offset in the batchbuffer to start execution from. */ 602 - __u32 batch_start_offset; 603 - /** Bytes used in batchbuffer from batch_start_offset */ 604 - __u32 batch_len; 605 - __u32 DR1; 606 - __u32 DR4; 607 - __u32 num_cliprects; 608 - /** This is a struct drm_clip_rect *cliprects */ 609 - __u64 cliprects_ptr; 610 - }; 611 - 612 - struct drm_i915_gem_exec_object2 { 613 - /** 614 - * User's handle for a buffer to be bound into the GTT for this 615 - * operation. 616 - */ 617 - __u32 handle; 618 - 619 - /** Number of relocations to be performed on this buffer */ 620 - __u32 relocation_count; 621 - /** 622 - * Pointer to array of struct drm_i915_gem_relocation_entry containing 623 - * the relocations to be performed in this buffer. 624 - */ 625 - __u64 relocs_ptr; 626 - 627 - /** Required alignment in graphics aperture */ 628 - __u64 alignment; 629 - 630 - /** 631 - * Returned value of the updated offset of the object, for future 632 - * presumed_offset writes. 633 - */ 634 - __u64 offset; 635 - 636 - #define EXEC_OBJECT_NEEDS_FENCE (1<<0) 637 - __u64 flags; 638 - __u64 rsvd1; 639 - __u64 rsvd2; 640 - }; 641 - 642 - struct drm_i915_gem_execbuffer2 { 643 - /** 644 - * List of gem_exec_object2 structs 645 - */ 646 - __u64 buffers_ptr; 647 - __u32 buffer_count; 648 - 649 - /** Offset in the batchbuffer to start execution from. */ 650 - __u32 batch_start_offset; 651 - /** Bytes used in batchbuffer from batch_start_offset */ 652 - __u32 batch_len; 653 - __u32 DR1; 654 - __u32 DR4; 655 - __u32 num_cliprects; 656 - /** This is a struct drm_clip_rect *cliprects */ 657 - __u64 cliprects_ptr; 658 - #define I915_EXEC_RING_MASK (7<<0) 659 - #define I915_EXEC_DEFAULT (0<<0) 660 - #define I915_EXEC_RENDER (1<<0) 661 - #define I915_EXEC_BSD (2<<0) 662 - #define I915_EXEC_BLT (3<<0) 663 - 664 - /* Used for switching the constants addressing mode on gen4+ RENDER ring. 665 - * Gen6+ only supports relative addressing to dynamic state (default) and 666 - * absolute addressing. 667 - * 668 - * These flags are ignored for the BSD and BLT rings. 669 - */ 670 - #define I915_EXEC_CONSTANTS_MASK (3<<6) 671 - #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ 672 - #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) 673 - #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ 674 - __u64 flags; 675 - __u64 rsvd1; /* now used for context info */ 676 - __u64 rsvd2; 677 - }; 678 - 679 - /** Resets the SO write offset registers for transform feedback on gen7. */ 680 - #define I915_EXEC_GEN7_SOL_RESET (1<<8) 681 - 682 - #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 683 - #define i915_execbuffer2_set_context_id(eb2, context) \ 684 - (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK 685 - #define i915_execbuffer2_get_context_id(eb2) \ 686 - ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK) 687 - 688 - struct drm_i915_gem_pin { 689 - /** Handle of the buffer to be pinned. */ 690 - __u32 handle; 691 - __u32 pad; 692 - 693 - /** alignment required within the aperture */ 694 - __u64 alignment; 695 - 696 - /** Returned GTT offset of the buffer. */ 697 - __u64 offset; 698 - }; 699 - 700 - struct drm_i915_gem_unpin { 701 - /** Handle of the buffer to be unpinned. */ 702 - __u32 handle; 703 - __u32 pad; 704 - }; 705 - 706 - struct drm_i915_gem_busy { 707 - /** Handle of the buffer to check for busy */ 708 - __u32 handle; 709 - 710 - /** Return busy status (1 if busy, 0 if idle). 711 - * The high word is used to indicate on which rings the object 712 - * currently resides: 713 - * 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc) 714 - */ 715 - __u32 busy; 716 - }; 717 - 718 - #define I915_CACHING_NONE 0 719 - #define I915_CACHING_CACHED 1 720 - 721 - struct drm_i915_gem_caching { 722 - /** 723 - * Handle of the buffer to set/get the caching level of. */ 724 - __u32 handle; 725 - 726 - /** 727 - * Cacheing level to apply or return value 728 - * 729 - * bits0-15 are for generic caching control (i.e. the above defined 730 - * values). bits16-31 are reserved for platform-specific variations 731 - * (e.g. l3$ caching on gen7). */ 732 - __u32 caching; 733 - }; 734 - 735 - #define I915_TILING_NONE 0 736 - #define I915_TILING_X 1 737 - #define I915_TILING_Y 2 738 - 739 - #define I915_BIT_6_SWIZZLE_NONE 0 740 - #define I915_BIT_6_SWIZZLE_9 1 741 - #define I915_BIT_6_SWIZZLE_9_10 2 742 - #define I915_BIT_6_SWIZZLE_9_11 3 743 - #define I915_BIT_6_SWIZZLE_9_10_11 4 744 - /* Not seen by userland */ 745 - #define I915_BIT_6_SWIZZLE_UNKNOWN 5 746 - /* Seen by userland. */ 747 - #define I915_BIT_6_SWIZZLE_9_17 6 748 - #define I915_BIT_6_SWIZZLE_9_10_17 7 749 - 750 - struct drm_i915_gem_set_tiling { 751 - /** Handle of the buffer to have its tiling state updated */ 752 - __u32 handle; 753 - 754 - /** 755 - * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 756 - * I915_TILING_Y). 757 - * 758 - * This value is to be set on request, and will be updated by the 759 - * kernel on successful return with the actual chosen tiling layout. 760 - * 761 - * The tiling mode may be demoted to I915_TILING_NONE when the system 762 - * has bit 6 swizzling that can't be managed correctly by GEM. 763 - * 764 - * Buffer contents become undefined when changing tiling_mode. 765 - */ 766 - __u32 tiling_mode; 767 - 768 - /** 769 - * Stride in bytes for the object when in I915_TILING_X or 770 - * I915_TILING_Y. 771 - */ 772 - __u32 stride; 773 - 774 - /** 775 - * Returned address bit 6 swizzling required for CPU access through 776 - * mmap mapping. 777 - */ 778 - __u32 swizzle_mode; 779 - }; 780 - 781 - struct drm_i915_gem_get_tiling { 782 - /** Handle of the buffer to get tiling state for. */ 783 - __u32 handle; 784 - 785 - /** 786 - * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 787 - * I915_TILING_Y). 788 - */ 789 - __u32 tiling_mode; 790 - 791 - /** 792 - * Returned address bit 6 swizzling required for CPU access through 793 - * mmap mapping. 794 - */ 795 - __u32 swizzle_mode; 796 - }; 797 - 798 - struct drm_i915_gem_get_aperture { 799 - /** Total size of the aperture used by i915_gem_execbuffer, in bytes */ 800 - __u64 aper_size; 801 - 802 - /** 803 - * Available space in the aperture used by i915_gem_execbuffer, in 804 - * bytes 805 - */ 806 - __u64 aper_available_size; 807 - }; 808 - 809 - struct drm_i915_get_pipe_from_crtc_id { 810 - /** ID of CRTC being requested **/ 811 - __u32 crtc_id; 812 - 813 - /** pipe of requested CRTC **/ 814 - __u32 pipe; 815 - }; 816 - 817 - #define I915_MADV_WILLNEED 0 818 - #define I915_MADV_DONTNEED 1 819 - #define __I915_MADV_PURGED 2 /* internal state */ 820 - 821 - struct drm_i915_gem_madvise { 822 - /** Handle of the buffer to change the backing store advice */ 823 - __u32 handle; 824 - 825 - /* Advice: either the buffer will be needed again in the near future, 826 - * or wont be and could be discarded under memory pressure. 827 - */ 828 - __u32 madv; 829 - 830 - /** Whether the backing store still exists. */ 831 - __u32 retained; 832 - }; 833 - 834 - /* flags */ 835 - #define I915_OVERLAY_TYPE_MASK 0xff 836 - #define I915_OVERLAY_YUV_PLANAR 0x01 837 - #define I915_OVERLAY_YUV_PACKED 0x02 838 - #define I915_OVERLAY_RGB 0x03 839 - 840 - #define I915_OVERLAY_DEPTH_MASK 0xff00 841 - #define I915_OVERLAY_RGB24 0x1000 842 - #define I915_OVERLAY_RGB16 0x2000 843 - #define I915_OVERLAY_RGB15 0x3000 844 - #define I915_OVERLAY_YUV422 0x0100 845 - #define I915_OVERLAY_YUV411 0x0200 846 - #define I915_OVERLAY_YUV420 0x0300 847 - #define I915_OVERLAY_YUV410 0x0400 848 - 849 - #define I915_OVERLAY_SWAP_MASK 0xff0000 850 - #define I915_OVERLAY_NO_SWAP 0x000000 851 - #define I915_OVERLAY_UV_SWAP 0x010000 852 - #define I915_OVERLAY_Y_SWAP 0x020000 853 - #define I915_OVERLAY_Y_AND_UV_SWAP 0x030000 854 - 855 - #define I915_OVERLAY_FLAGS_MASK 0xff000000 856 - #define I915_OVERLAY_ENABLE 0x01000000 857 - 858 - struct drm_intel_overlay_put_image { 859 - /* various flags and src format description */ 860 - __u32 flags; 861 - /* source picture description */ 862 - __u32 bo_handle; 863 - /* stride values and offsets are in bytes, buffer relative */ 864 - __u16 stride_Y; /* stride for packed formats */ 865 - __u16 stride_UV; 866 - __u32 offset_Y; /* offset for packet formats */ 867 - __u32 offset_U; 868 - __u32 offset_V; 869 - /* in pixels */ 870 - __u16 src_width; 871 - __u16 src_height; 872 - /* to compensate the scaling factors for partially covered surfaces */ 873 - __u16 src_scan_width; 874 - __u16 src_scan_height; 875 - /* output crtc description */ 876 - __u32 crtc_id; 877 - __u16 dst_x; 878 - __u16 dst_y; 879 - __u16 dst_width; 880 - __u16 dst_height; 881 - }; 882 - 883 - /* flags */ 884 - #define I915_OVERLAY_UPDATE_ATTRS (1<<0) 885 - #define I915_OVERLAY_UPDATE_GAMMA (1<<1) 886 - struct drm_intel_overlay_attrs { 887 - __u32 flags; 888 - __u32 color_key; 889 - __s32 brightness; 890 - __u32 contrast; 891 - __u32 saturation; 892 - __u32 gamma0; 893 - __u32 gamma1; 894 - __u32 gamma2; 895 - __u32 gamma3; 896 - __u32 gamma4; 897 - __u32 gamma5; 898 - }; 899 - 900 - /* 901 - * Intel sprite handling 902 - * 903 - * Color keying works with a min/mask/max tuple. Both source and destination 904 - * color keying is allowed. 905 - * 906 - * Source keying: 907 - * Sprite pixels within the min & max values, masked against the color channels 908 - * specified in the mask field, will be transparent. All other pixels will 909 - * be displayed on top of the primary plane. For RGB surfaces, only the min 910 - * and mask fields will be used; ranged compares are not allowed. 911 - * 912 - * Destination keying: 913 - * Primary plane pixels that match the min value, masked against the color 914 - * channels specified in the mask field, will be replaced by corresponding 915 - * pixels from the sprite plane. 916 - * 917 - * Note that source & destination keying are exclusive; only one can be 918 - * active on a given plane. 919 - */ 920 - 921 - #define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */ 922 - #define I915_SET_COLORKEY_DESTINATION (1<<1) 923 - #define I915_SET_COLORKEY_SOURCE (1<<2) 924 - struct drm_intel_sprite_colorkey { 925 - __u32 plane_id; 926 - __u32 min_value; 927 - __u32 channel_mask; 928 - __u32 max_value; 929 - __u32 flags; 930 - }; 931 - 932 - struct drm_i915_gem_wait { 933 - /** Handle of BO we shall wait on */ 934 - __u32 bo_handle; 935 - __u32 flags; 936 - /** Number of nanoseconds to wait, Returns time remaining. */ 937 - __s64 timeout_ns; 938 - }; 939 - 940 - struct drm_i915_gem_context_create { 941 - /* output: id of new context*/ 942 - __u32 ctx_id; 943 - __u32 pad; 944 - }; 945 - 946 - struct drm_i915_gem_context_destroy { 947 - __u32 ctx_id; 948 - __u32 pad; 949 - }; 950 - 951 - struct drm_i915_reg_read { 952 - __u64 offset; 953 - __u64 val; /* Return value */ 954 - }; 955 37 #endif /* _I915_DRM_H_ */
include/drm/mga_drm.h include/uapi/drm/mga_drm.h
include/drm/nouveau_drm.h include/uapi/drm/nouveau_drm.h
include/drm/r128_drm.h include/uapi/drm/r128_drm.h
include/drm/radeon_drm.h include/uapi/drm/radeon_drm.h
include/drm/savage_drm.h include/uapi/drm/savage_drm.h
include/drm/sis_drm.h include/uapi/drm/sis_drm.h
include/drm/via_drm.h include/uapi/drm/via_drm.h
include/drm/vmwgfx_drm.h include/uapi/drm/vmwgfx_drm.h
+2
include/media/s5p_hdmi.h
··· 20 20 * @hdmiphy_info: template for HDMIPHY I2C device 21 21 * @mhl_bus: controller id for MHL control bus 22 22 * @mhl_info: template for MHL I2C device 23 + * @hpd_gpio: GPIO for Hot-Plug-Detect pin 23 24 * 24 25 * NULL pointer for *_info fields indicates that 25 26 * the corresponding chip is not present ··· 30 29 struct i2c_board_info *hdmiphy_info; 31 30 int mhl_bus; 32 31 struct i2c_board_info *mhl_info; 32 + int hpd_gpio; 33 33 }; 34 34 35 35 #endif /* S5P_HDMI_H */
+15
include/uapi/drm/Kbuild
··· 1 1 # UAPI Header export list 2 + header-y += drm.h 3 + header-y += drm_fourcc.h 4 + header-y += drm_mode.h 5 + header-y += drm_sarea.h 6 + header-y += exynos_drm.h 7 + header-y += i810_drm.h 8 + header-y += i915_drm.h 9 + header-y += mga_drm.h 10 + header-y += nouveau_drm.h 11 + header-y += r128_drm.h 12 + header-y += radeon_drm.h 13 + header-y += savage_drm.h 14 + header-y += sis_drm.h 15 + header-y += via_drm.h 16 + header-y += vmwgfx_drm.h
+203
include/uapi/drm/exynos_drm.h
··· 1 + /* exynos_drm.h 2 + * 3 + * Copyright (c) 2011 Samsung Electronics Co., Ltd. 4 + * Authors: 5 + * Inki Dae <inki.dae@samsung.com> 6 + * Joonyoung Shim <jy0922.shim@samsung.com> 7 + * Seung-Woo Kim <sw0312.kim@samsung.com> 8 + * 9 + * Permission is hereby granted, free of charge, to any person obtaining a 10 + * copy of this software and associated documentation files (the "Software"), 11 + * to deal in the Software without restriction, including without limitation 12 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 + * and/or sell copies of the Software, and to permit persons to whom the 14 + * Software is furnished to do so, subject to the following conditions: 15 + * 16 + * The above copyright notice and this permission notice (including the next 17 + * paragraph) shall be included in all copies or substantial portions of the 18 + * Software. 19 + * 20 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 24 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 25 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 26 + * OTHER DEALINGS IN THE SOFTWARE. 27 + */ 28 + 29 + #ifndef _UAPI_EXYNOS_DRM_H_ 30 + #define _UAPI_EXYNOS_DRM_H_ 31 + 32 + #include <drm/drm.h> 33 + 34 + /** 35 + * User-desired buffer creation information structure. 36 + * 37 + * @size: user-desired memory allocation size. 38 + * - this size value would be page-aligned internally. 39 + * @flags: user request for setting memory type or cache attributes. 40 + * @handle: returned a handle to created gem object. 41 + * - this handle will be set by gem module of kernel side. 42 + */ 43 + struct drm_exynos_gem_create { 44 + uint64_t size; 45 + unsigned int flags; 46 + unsigned int handle; 47 + }; 48 + 49 + /** 50 + * A structure for getting buffer offset. 51 + * 52 + * @handle: a pointer to gem object created. 53 + * @pad: just padding to be 64-bit aligned. 54 + * @offset: relatived offset value of the memory region allocated. 55 + * - this value should be set by user. 56 + */ 57 + struct drm_exynos_gem_map_off { 58 + unsigned int handle; 59 + unsigned int pad; 60 + uint64_t offset; 61 + }; 62 + 63 + /** 64 + * A structure for mapping buffer. 65 + * 66 + * @handle: a handle to gem object created. 67 + * @pad: just padding to be 64-bit aligned. 68 + * @size: memory size to be mapped. 69 + * @mapped: having user virtual address mmaped. 70 + * - this variable would be filled by exynos gem module 71 + * of kernel side with user virtual address which is allocated 72 + * by do_mmap(). 73 + */ 74 + struct drm_exynos_gem_mmap { 75 + unsigned int handle; 76 + unsigned int pad; 77 + uint64_t size; 78 + uint64_t mapped; 79 + }; 80 + 81 + /** 82 + * A structure to gem information. 83 + * 84 + * @handle: a handle to gem object created. 85 + * @flags: flag value including memory type and cache attribute and 86 + * this value would be set by driver. 87 + * @size: size to memory region allocated by gem and this size would 88 + * be set by driver. 89 + */ 90 + struct drm_exynos_gem_info { 91 + unsigned int handle; 92 + unsigned int flags; 93 + uint64_t size; 94 + }; 95 + 96 + /** 97 + * A structure for user connection request of virtual display. 98 + * 99 + * @connection: indicate whether doing connetion or not by user. 100 + * @extensions: if this value is 1 then the vidi driver would need additional 101 + * 128bytes edid data. 102 + * @edid: the edid data pointer from user side. 103 + */ 104 + struct drm_exynos_vidi_connection { 105 + unsigned int connection; 106 + unsigned int extensions; 107 + uint64_t edid; 108 + }; 109 + 110 + /* memory type definitions. */ 111 + enum e_drm_exynos_gem_mem_type { 112 + /* Physically Continuous memory and used as default. */ 113 + EXYNOS_BO_CONTIG = 0 << 0, 114 + /* Physically Non-Continuous memory. */ 115 + EXYNOS_BO_NONCONTIG = 1 << 0, 116 + /* non-cachable mapping and used as default. */ 117 + EXYNOS_BO_NONCACHABLE = 0 << 1, 118 + /* cachable mapping. */ 119 + EXYNOS_BO_CACHABLE = 1 << 1, 120 + /* write-combine mapping. */ 121 + EXYNOS_BO_WC = 1 << 2, 122 + EXYNOS_BO_MASK = EXYNOS_BO_NONCONTIG | EXYNOS_BO_CACHABLE | 123 + EXYNOS_BO_WC 124 + }; 125 + 126 + struct drm_exynos_g2d_get_ver { 127 + __u32 major; 128 + __u32 minor; 129 + }; 130 + 131 + struct drm_exynos_g2d_cmd { 132 + __u32 offset; 133 + __u32 data; 134 + }; 135 + 136 + enum drm_exynos_g2d_event_type { 137 + G2D_EVENT_NOT, 138 + G2D_EVENT_NONSTOP, 139 + G2D_EVENT_STOP, /* not yet */ 140 + }; 141 + 142 + struct drm_exynos_g2d_set_cmdlist { 143 + __u64 cmd; 144 + __u64 cmd_gem; 145 + __u32 cmd_nr; 146 + __u32 cmd_gem_nr; 147 + 148 + /* for g2d event */ 149 + __u64 event_type; 150 + __u64 user_data; 151 + }; 152 + 153 + struct drm_exynos_g2d_exec { 154 + __u64 async; 155 + }; 156 + 157 + #define DRM_EXYNOS_GEM_CREATE 0x00 158 + #define DRM_EXYNOS_GEM_MAP_OFFSET 0x01 159 + #define DRM_EXYNOS_GEM_MMAP 0x02 160 + /* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */ 161 + #define DRM_EXYNOS_GEM_GET 0x04 162 + #define DRM_EXYNOS_VIDI_CONNECTION 0x07 163 + 164 + /* G2D */ 165 + #define DRM_EXYNOS_G2D_GET_VER 0x20 166 + #define DRM_EXYNOS_G2D_SET_CMDLIST 0x21 167 + #define DRM_EXYNOS_G2D_EXEC 0x22 168 + 169 + #define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \ 170 + DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create) 171 + 172 + #define DRM_IOCTL_EXYNOS_GEM_MAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + \ 173 + DRM_EXYNOS_GEM_MAP_OFFSET, struct drm_exynos_gem_map_off) 174 + 175 + #define DRM_IOCTL_EXYNOS_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + \ 176 + DRM_EXYNOS_GEM_MMAP, struct drm_exynos_gem_mmap) 177 + 178 + #define DRM_IOCTL_EXYNOS_GEM_GET DRM_IOWR(DRM_COMMAND_BASE + \ 179 + DRM_EXYNOS_GEM_GET, struct drm_exynos_gem_info) 180 + 181 + #define DRM_IOCTL_EXYNOS_VIDI_CONNECTION DRM_IOWR(DRM_COMMAND_BASE + \ 182 + DRM_EXYNOS_VIDI_CONNECTION, struct drm_exynos_vidi_connection) 183 + 184 + #define DRM_IOCTL_EXYNOS_G2D_GET_VER DRM_IOWR(DRM_COMMAND_BASE + \ 185 + DRM_EXYNOS_G2D_GET_VER, struct drm_exynos_g2d_get_ver) 186 + #define DRM_IOCTL_EXYNOS_G2D_SET_CMDLIST DRM_IOWR(DRM_COMMAND_BASE + \ 187 + DRM_EXYNOS_G2D_SET_CMDLIST, struct drm_exynos_g2d_set_cmdlist) 188 + #define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \ 189 + DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec) 190 + 191 + /* EXYNOS specific events */ 192 + #define DRM_EXYNOS_G2D_EVENT 0x80000000 193 + 194 + struct drm_exynos_g2d_event { 195 + struct drm_event base; 196 + __u64 user_data; 197 + __u32 tv_sec; 198 + __u32 tv_usec; 199 + __u32 cmdlist_no; 200 + __u32 reserved; 201 + }; 202 + 203 + #endif /* _UAPI_EXYNOS_DRM_H_ */
+947
include/uapi/drm/i915_drm.h
··· 1 + /* 2 + * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 3 + * All Rights Reserved. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the 7 + * "Software"), to deal in the Software without restriction, including 8 + * without limitation the rights to use, copy, modify, merge, publish, 9 + * distribute, sub license, and/or sell copies of the Software, and to 10 + * permit persons to whom the Software is furnished to do so, subject to 11 + * the following conditions: 12 + * 13 + * The above copyright notice and this permission notice (including the 14 + * next paragraph) shall be included in all copies or substantial portions 15 + * of the Software. 16 + * 17 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 18 + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 20 + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 21 + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 22 + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 23 + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 + * 25 + */ 26 + 27 + #ifndef _UAPI_I915_DRM_H_ 28 + #define _UAPI_I915_DRM_H_ 29 + 30 + #include <drm/drm.h> 31 + 32 + /* Please note that modifications to all structs defined here are 33 + * subject to backwards-compatibility constraints. 34 + */ 35 + 36 + 37 + /* Each region is a minimum of 16k, and there are at most 255 of them. 38 + */ 39 + #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use 40 + * of chars for next/prev indices */ 41 + #define I915_LOG_MIN_TEX_REGION_SIZE 14 42 + 43 + typedef struct _drm_i915_init { 44 + enum { 45 + I915_INIT_DMA = 0x01, 46 + I915_CLEANUP_DMA = 0x02, 47 + I915_RESUME_DMA = 0x03 48 + } func; 49 + unsigned int mmio_offset; 50 + int sarea_priv_offset; 51 + unsigned int ring_start; 52 + unsigned int ring_end; 53 + unsigned int ring_size; 54 + unsigned int front_offset; 55 + unsigned int back_offset; 56 + unsigned int depth_offset; 57 + unsigned int w; 58 + unsigned int h; 59 + unsigned int pitch; 60 + unsigned int pitch_bits; 61 + unsigned int back_pitch; 62 + unsigned int depth_pitch; 63 + unsigned int cpp; 64 + unsigned int chipset; 65 + } drm_i915_init_t; 66 + 67 + typedef struct _drm_i915_sarea { 68 + struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; 69 + int last_upload; /* last time texture was uploaded */ 70 + int last_enqueue; /* last time a buffer was enqueued */ 71 + int last_dispatch; /* age of the most recently dispatched buffer */ 72 + int ctxOwner; /* last context to upload state */ 73 + int texAge; 74 + int pf_enabled; /* is pageflipping allowed? */ 75 + int pf_active; 76 + int pf_current_page; /* which buffer is being displayed? */ 77 + int perf_boxes; /* performance boxes to be displayed */ 78 + int width, height; /* screen size in pixels */ 79 + 80 + drm_handle_t front_handle; 81 + int front_offset; 82 + int front_size; 83 + 84 + drm_handle_t back_handle; 85 + int back_offset; 86 + int back_size; 87 + 88 + drm_handle_t depth_handle; 89 + int depth_offset; 90 + int depth_size; 91 + 92 + drm_handle_t tex_handle; 93 + int tex_offset; 94 + int tex_size; 95 + int log_tex_granularity; 96 + int pitch; 97 + int rotation; /* 0, 90, 180 or 270 */ 98 + int rotated_offset; 99 + int rotated_size; 100 + int rotated_pitch; 101 + int virtualX, virtualY; 102 + 103 + unsigned int front_tiled; 104 + unsigned int back_tiled; 105 + unsigned int depth_tiled; 106 + unsigned int rotated_tiled; 107 + unsigned int rotated2_tiled; 108 + 109 + int pipeA_x; 110 + int pipeA_y; 111 + int pipeA_w; 112 + int pipeA_h; 113 + int pipeB_x; 114 + int pipeB_y; 115 + int pipeB_w; 116 + int pipeB_h; 117 + 118 + /* fill out some space for old userspace triple buffer */ 119 + drm_handle_t unused_handle; 120 + __u32 unused1, unused2, unused3; 121 + 122 + /* buffer object handles for static buffers. May change 123 + * over the lifetime of the client. 124 + */ 125 + __u32 front_bo_handle; 126 + __u32 back_bo_handle; 127 + __u32 unused_bo_handle; 128 + __u32 depth_bo_handle; 129 + 130 + } drm_i915_sarea_t; 131 + 132 + /* due to userspace building against these headers we need some compat here */ 133 + #define planeA_x pipeA_x 134 + #define planeA_y pipeA_y 135 + #define planeA_w pipeA_w 136 + #define planeA_h pipeA_h 137 + #define planeB_x pipeB_x 138 + #define planeB_y pipeB_y 139 + #define planeB_w pipeB_w 140 + #define planeB_h pipeB_h 141 + 142 + /* Flags for perf_boxes 143 + */ 144 + #define I915_BOX_RING_EMPTY 0x1 145 + #define I915_BOX_FLIP 0x2 146 + #define I915_BOX_WAIT 0x4 147 + #define I915_BOX_TEXTURE_LOAD 0x8 148 + #define I915_BOX_LOST_CONTEXT 0x10 149 + 150 + /* I915 specific ioctls 151 + * The device specific ioctl range is 0x40 to 0x79. 152 + */ 153 + #define DRM_I915_INIT 0x00 154 + #define DRM_I915_FLUSH 0x01 155 + #define DRM_I915_FLIP 0x02 156 + #define DRM_I915_BATCHBUFFER 0x03 157 + #define DRM_I915_IRQ_EMIT 0x04 158 + #define DRM_I915_IRQ_WAIT 0x05 159 + #define DRM_I915_GETPARAM 0x06 160 + #define DRM_I915_SETPARAM 0x07 161 + #define DRM_I915_ALLOC 0x08 162 + #define DRM_I915_FREE 0x09 163 + #define DRM_I915_INIT_HEAP 0x0a 164 + #define DRM_I915_CMDBUFFER 0x0b 165 + #define DRM_I915_DESTROY_HEAP 0x0c 166 + #define DRM_I915_SET_VBLANK_PIPE 0x0d 167 + #define DRM_I915_GET_VBLANK_PIPE 0x0e 168 + #define DRM_I915_VBLANK_SWAP 0x0f 169 + #define DRM_I915_HWS_ADDR 0x11 170 + #define DRM_I915_GEM_INIT 0x13 171 + #define DRM_I915_GEM_EXECBUFFER 0x14 172 + #define DRM_I915_GEM_PIN 0x15 173 + #define DRM_I915_GEM_UNPIN 0x16 174 + #define DRM_I915_GEM_BUSY 0x17 175 + #define DRM_I915_GEM_THROTTLE 0x18 176 + #define DRM_I915_GEM_ENTERVT 0x19 177 + #define DRM_I915_GEM_LEAVEVT 0x1a 178 + #define DRM_I915_GEM_CREATE 0x1b 179 + #define DRM_I915_GEM_PREAD 0x1c 180 + #define DRM_I915_GEM_PWRITE 0x1d 181 + #define DRM_I915_GEM_MMAP 0x1e 182 + #define DRM_I915_GEM_SET_DOMAIN 0x1f 183 + #define DRM_I915_GEM_SW_FINISH 0x20 184 + #define DRM_I915_GEM_SET_TILING 0x21 185 + #define DRM_I915_GEM_GET_TILING 0x22 186 + #define DRM_I915_GEM_GET_APERTURE 0x23 187 + #define DRM_I915_GEM_MMAP_GTT 0x24 188 + #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 189 + #define DRM_I915_GEM_MADVISE 0x26 190 + #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 191 + #define DRM_I915_OVERLAY_ATTRS 0x28 192 + #define DRM_I915_GEM_EXECBUFFER2 0x29 193 + #define DRM_I915_GET_SPRITE_COLORKEY 0x2a 194 + #define DRM_I915_SET_SPRITE_COLORKEY 0x2b 195 + #define DRM_I915_GEM_WAIT 0x2c 196 + #define DRM_I915_GEM_CONTEXT_CREATE 0x2d 197 + #define DRM_I915_GEM_CONTEXT_DESTROY 0x2e 198 + #define DRM_I915_GEM_SET_CACHING 0x2f 199 + #define DRM_I915_GEM_GET_CACHING 0x30 200 + #define DRM_I915_REG_READ 0x31 201 + 202 + #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 203 + #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 204 + #define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) 205 + #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) 206 + #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) 207 + #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) 208 + #define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) 209 + #define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) 210 + #define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) 211 + #define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) 212 + #define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) 213 + #define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) 214 + #define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) 215 + #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 216 + #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 217 + #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 218 + #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) 219 + #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) 220 + #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) 221 + #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) 222 + #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 223 + #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 224 + #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) 225 + #define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching) 226 + #define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching) 227 + #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) 228 + #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) 229 + #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) 230 + #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) 231 + #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) 232 + #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) 233 + #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) 234 + #define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt) 235 + #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) 236 + #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) 237 + #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) 238 + #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) 239 + #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) 240 + #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) 241 + #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) 242 + #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) 243 + #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) 244 + #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 245 + #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 246 + #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) 247 + #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) 248 + #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) 249 + #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) 250 + 251 + /* Allow drivers to submit batchbuffers directly to hardware, relying 252 + * on the security mechanisms provided by hardware. 253 + */ 254 + typedef struct drm_i915_batchbuffer { 255 + int start; /* agp offset */ 256 + int used; /* nr bytes in use */ 257 + int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 258 + int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 259 + int num_cliprects; /* mulitpass with multiple cliprects? */ 260 + struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 261 + } drm_i915_batchbuffer_t; 262 + 263 + /* As above, but pass a pointer to userspace buffer which can be 264 + * validated by the kernel prior to sending to hardware. 265 + */ 266 + typedef struct _drm_i915_cmdbuffer { 267 + char __user *buf; /* pointer to userspace command buffer */ 268 + int sz; /* nr bytes in buf */ 269 + int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 270 + int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 271 + int num_cliprects; /* mulitpass with multiple cliprects? */ 272 + struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 273 + } drm_i915_cmdbuffer_t; 274 + 275 + /* Userspace can request & wait on irq's: 276 + */ 277 + typedef struct drm_i915_irq_emit { 278 + int __user *irq_seq; 279 + } drm_i915_irq_emit_t; 280 + 281 + typedef struct drm_i915_irq_wait { 282 + int irq_seq; 283 + } drm_i915_irq_wait_t; 284 + 285 + /* Ioctl to query kernel params: 286 + */ 287 + #define I915_PARAM_IRQ_ACTIVE 1 288 + #define I915_PARAM_ALLOW_BATCHBUFFER 2 289 + #define I915_PARAM_LAST_DISPATCH 3 290 + #define I915_PARAM_CHIPSET_ID 4 291 + #define I915_PARAM_HAS_GEM 5 292 + #define I915_PARAM_NUM_FENCES_AVAIL 6 293 + #define I915_PARAM_HAS_OVERLAY 7 294 + #define I915_PARAM_HAS_PAGEFLIPPING 8 295 + #define I915_PARAM_HAS_EXECBUF2 9 296 + #define I915_PARAM_HAS_BSD 10 297 + #define I915_PARAM_HAS_BLT 11 298 + #define I915_PARAM_HAS_RELAXED_FENCING 12 299 + #define I915_PARAM_HAS_COHERENT_RINGS 13 300 + #define I915_PARAM_HAS_EXEC_CONSTANTS 14 301 + #define I915_PARAM_HAS_RELAXED_DELTA 15 302 + #define I915_PARAM_HAS_GEN7_SOL_RESET 16 303 + #define I915_PARAM_HAS_LLC 17 304 + #define I915_PARAM_HAS_ALIASING_PPGTT 18 305 + #define I915_PARAM_HAS_WAIT_TIMEOUT 19 306 + #define I915_PARAM_HAS_SEMAPHORES 20 307 + #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 308 + #define I915_PARAM_RSVD_FOR_FUTURE_USE 22 309 + 310 + typedef struct drm_i915_getparam { 311 + int param; 312 + int __user *value; 313 + } drm_i915_getparam_t; 314 + 315 + /* Ioctl to set kernel params: 316 + */ 317 + #define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 318 + #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 319 + #define I915_SETPARAM_ALLOW_BATCHBUFFER 3 320 + #define I915_SETPARAM_NUM_USED_FENCES 4 321 + 322 + typedef struct drm_i915_setparam { 323 + int param; 324 + int value; 325 + } drm_i915_setparam_t; 326 + 327 + /* A memory manager for regions of shared memory: 328 + */ 329 + #define I915_MEM_REGION_AGP 1 330 + 331 + typedef struct drm_i915_mem_alloc { 332 + int region; 333 + int alignment; 334 + int size; 335 + int __user *region_offset; /* offset from start of fb or agp */ 336 + } drm_i915_mem_alloc_t; 337 + 338 + typedef struct drm_i915_mem_free { 339 + int region; 340 + int region_offset; 341 + } drm_i915_mem_free_t; 342 + 343 + typedef struct drm_i915_mem_init_heap { 344 + int region; 345 + int size; 346 + int start; 347 + } drm_i915_mem_init_heap_t; 348 + 349 + /* Allow memory manager to be torn down and re-initialized (eg on 350 + * rotate): 351 + */ 352 + typedef struct drm_i915_mem_destroy_heap { 353 + int region; 354 + } drm_i915_mem_destroy_heap_t; 355 + 356 + /* Allow X server to configure which pipes to monitor for vblank signals 357 + */ 358 + #define DRM_I915_VBLANK_PIPE_A 1 359 + #define DRM_I915_VBLANK_PIPE_B 2 360 + 361 + typedef struct drm_i915_vblank_pipe { 362 + int pipe; 363 + } drm_i915_vblank_pipe_t; 364 + 365 + /* Schedule buffer swap at given vertical blank: 366 + */ 367 + typedef struct drm_i915_vblank_swap { 368 + drm_drawable_t drawable; 369 + enum drm_vblank_seq_type seqtype; 370 + unsigned int sequence; 371 + } drm_i915_vblank_swap_t; 372 + 373 + typedef struct drm_i915_hws_addr { 374 + __u64 addr; 375 + } drm_i915_hws_addr_t; 376 + 377 + struct drm_i915_gem_init { 378 + /** 379 + * Beginning offset in the GTT to be managed by the DRM memory 380 + * manager. 381 + */ 382 + __u64 gtt_start; 383 + /** 384 + * Ending offset in the GTT to be managed by the DRM memory 385 + * manager. 386 + */ 387 + __u64 gtt_end; 388 + }; 389 + 390 + struct drm_i915_gem_create { 391 + /** 392 + * Requested size for the object. 393 + * 394 + * The (page-aligned) allocated size for the object will be returned. 395 + */ 396 + __u64 size; 397 + /** 398 + * Returned handle for the object. 399 + * 400 + * Object handles are nonzero. 401 + */ 402 + __u32 handle; 403 + __u32 pad; 404 + }; 405 + 406 + struct drm_i915_gem_pread { 407 + /** Handle for the object being read. */ 408 + __u32 handle; 409 + __u32 pad; 410 + /** Offset into the object to read from */ 411 + __u64 offset; 412 + /** Length of data to read */ 413 + __u64 size; 414 + /** 415 + * Pointer to write the data into. 416 + * 417 + * This is a fixed-size type for 32/64 compatibility. 418 + */ 419 + __u64 data_ptr; 420 + }; 421 + 422 + struct drm_i915_gem_pwrite { 423 + /** Handle for the object being written to. */ 424 + __u32 handle; 425 + __u32 pad; 426 + /** Offset into the object to write to */ 427 + __u64 offset; 428 + /** Length of data to write */ 429 + __u64 size; 430 + /** 431 + * Pointer to read the data from. 432 + * 433 + * This is a fixed-size type for 32/64 compatibility. 434 + */ 435 + __u64 data_ptr; 436 + }; 437 + 438 + struct drm_i915_gem_mmap { 439 + /** Handle for the object being mapped. */ 440 + __u32 handle; 441 + __u32 pad; 442 + /** Offset in the object to map. */ 443 + __u64 offset; 444 + /** 445 + * Length of data to map. 446 + * 447 + * The value will be page-aligned. 448 + */ 449 + __u64 size; 450 + /** 451 + * Returned pointer the data was mapped at. 452 + * 453 + * This is a fixed-size type for 32/64 compatibility. 454 + */ 455 + __u64 addr_ptr; 456 + }; 457 + 458 + struct drm_i915_gem_mmap_gtt { 459 + /** Handle for the object being mapped. */ 460 + __u32 handle; 461 + __u32 pad; 462 + /** 463 + * Fake offset to use for subsequent mmap call 464 + * 465 + * This is a fixed-size type for 32/64 compatibility. 466 + */ 467 + __u64 offset; 468 + }; 469 + 470 + struct drm_i915_gem_set_domain { 471 + /** Handle for the object */ 472 + __u32 handle; 473 + 474 + /** New read domains */ 475 + __u32 read_domains; 476 + 477 + /** New write domain */ 478 + __u32 write_domain; 479 + }; 480 + 481 + struct drm_i915_gem_sw_finish { 482 + /** Handle for the object */ 483 + __u32 handle; 484 + }; 485 + 486 + struct drm_i915_gem_relocation_entry { 487 + /** 488 + * Handle of the buffer being pointed to by this relocation entry. 489 + * 490 + * It's appealing to make this be an index into the mm_validate_entry 491 + * list to refer to the buffer, but this allows the driver to create 492 + * a relocation list for state buffers and not re-write it per 493 + * exec using the buffer. 494 + */ 495 + __u32 target_handle; 496 + 497 + /** 498 + * Value to be added to the offset of the target buffer to make up 499 + * the relocation entry. 500 + */ 501 + __u32 delta; 502 + 503 + /** Offset in the buffer the relocation entry will be written into */ 504 + __u64 offset; 505 + 506 + /** 507 + * Offset value of the target buffer that the relocation entry was last 508 + * written as. 509 + * 510 + * If the buffer has the same offset as last time, we can skip syncing 511 + * and writing the relocation. This value is written back out by 512 + * the execbuffer ioctl when the relocation is written. 513 + */ 514 + __u64 presumed_offset; 515 + 516 + /** 517 + * Target memory domains read by this operation. 518 + */ 519 + __u32 read_domains; 520 + 521 + /** 522 + * Target memory domains written by this operation. 523 + * 524 + * Note that only one domain may be written by the whole 525 + * execbuffer operation, so that where there are conflicts, 526 + * the application will get -EINVAL back. 527 + */ 528 + __u32 write_domain; 529 + }; 530 + 531 + /** @{ 532 + * Intel memory domains 533 + * 534 + * Most of these just align with the various caches in 535 + * the system and are used to flush and invalidate as 536 + * objects end up cached in different domains. 537 + */ 538 + /** CPU cache */ 539 + #define I915_GEM_DOMAIN_CPU 0x00000001 540 + /** Render cache, used by 2D and 3D drawing */ 541 + #define I915_GEM_DOMAIN_RENDER 0x00000002 542 + /** Sampler cache, used by texture engine */ 543 + #define I915_GEM_DOMAIN_SAMPLER 0x00000004 544 + /** Command queue, used to load batch buffers */ 545 + #define I915_GEM_DOMAIN_COMMAND 0x00000008 546 + /** Instruction cache, used by shader programs */ 547 + #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 548 + /** Vertex address cache */ 549 + #define I915_GEM_DOMAIN_VERTEX 0x00000020 550 + /** GTT domain - aperture and scanout */ 551 + #define I915_GEM_DOMAIN_GTT 0x00000040 552 + /** @} */ 553 + 554 + struct drm_i915_gem_exec_object { 555 + /** 556 + * User's handle for a buffer to be bound into the GTT for this 557 + * operation. 558 + */ 559 + __u32 handle; 560 + 561 + /** Number of relocations to be performed on this buffer */ 562 + __u32 relocation_count; 563 + /** 564 + * Pointer to array of struct drm_i915_gem_relocation_entry containing 565 + * the relocations to be performed in this buffer. 566 + */ 567 + __u64 relocs_ptr; 568 + 569 + /** Required alignment in graphics aperture */ 570 + __u64 alignment; 571 + 572 + /** 573 + * Returned value of the updated offset of the object, for future 574 + * presumed_offset writes. 575 + */ 576 + __u64 offset; 577 + }; 578 + 579 + struct drm_i915_gem_execbuffer { 580 + /** 581 + * List of buffers to be validated with their relocations to be 582 + * performend on them. 583 + * 584 + * This is a pointer to an array of struct drm_i915_gem_validate_entry. 585 + * 586 + * These buffers must be listed in an order such that all relocations 587 + * a buffer is performing refer to buffers that have already appeared 588 + * in the validate list. 589 + */ 590 + __u64 buffers_ptr; 591 + __u32 buffer_count; 592 + 593 + /** Offset in the batchbuffer to start execution from. */ 594 + __u32 batch_start_offset; 595 + /** Bytes used in batchbuffer from batch_start_offset */ 596 + __u32 batch_len; 597 + __u32 DR1; 598 + __u32 DR4; 599 + __u32 num_cliprects; 600 + /** This is a struct drm_clip_rect *cliprects */ 601 + __u64 cliprects_ptr; 602 + }; 603 + 604 + struct drm_i915_gem_exec_object2 { 605 + /** 606 + * User's handle for a buffer to be bound into the GTT for this 607 + * operation. 608 + */ 609 + __u32 handle; 610 + 611 + /** Number of relocations to be performed on this buffer */ 612 + __u32 relocation_count; 613 + /** 614 + * Pointer to array of struct drm_i915_gem_relocation_entry containing 615 + * the relocations to be performed in this buffer. 616 + */ 617 + __u64 relocs_ptr; 618 + 619 + /** Required alignment in graphics aperture */ 620 + __u64 alignment; 621 + 622 + /** 623 + * Returned value of the updated offset of the object, for future 624 + * presumed_offset writes. 625 + */ 626 + __u64 offset; 627 + 628 + #define EXEC_OBJECT_NEEDS_FENCE (1<<0) 629 + __u64 flags; 630 + __u64 rsvd1; 631 + __u64 rsvd2; 632 + }; 633 + 634 + struct drm_i915_gem_execbuffer2 { 635 + /** 636 + * List of gem_exec_object2 structs 637 + */ 638 + __u64 buffers_ptr; 639 + __u32 buffer_count; 640 + 641 + /** Offset in the batchbuffer to start execution from. */ 642 + __u32 batch_start_offset; 643 + /** Bytes used in batchbuffer from batch_start_offset */ 644 + __u32 batch_len; 645 + __u32 DR1; 646 + __u32 DR4; 647 + __u32 num_cliprects; 648 + /** This is a struct drm_clip_rect *cliprects */ 649 + __u64 cliprects_ptr; 650 + #define I915_EXEC_RING_MASK (7<<0) 651 + #define I915_EXEC_DEFAULT (0<<0) 652 + #define I915_EXEC_RENDER (1<<0) 653 + #define I915_EXEC_BSD (2<<0) 654 + #define I915_EXEC_BLT (3<<0) 655 + 656 + /* Used for switching the constants addressing mode on gen4+ RENDER ring. 657 + * Gen6+ only supports relative addressing to dynamic state (default) and 658 + * absolute addressing. 659 + * 660 + * These flags are ignored for the BSD and BLT rings. 661 + */ 662 + #define I915_EXEC_CONSTANTS_MASK (3<<6) 663 + #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ 664 + #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) 665 + #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ 666 + __u64 flags; 667 + __u64 rsvd1; /* now used for context info */ 668 + __u64 rsvd2; 669 + }; 670 + 671 + /** Resets the SO write offset registers for transform feedback on gen7. */ 672 + #define I915_EXEC_GEN7_SOL_RESET (1<<8) 673 + 674 + #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 675 + #define i915_execbuffer2_set_context_id(eb2, context) \ 676 + (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK 677 + #define i915_execbuffer2_get_context_id(eb2) \ 678 + ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK) 679 + 680 + struct drm_i915_gem_pin { 681 + /** Handle of the buffer to be pinned. */ 682 + __u32 handle; 683 + __u32 pad; 684 + 685 + /** alignment required within the aperture */ 686 + __u64 alignment; 687 + 688 + /** Returned GTT offset of the buffer. */ 689 + __u64 offset; 690 + }; 691 + 692 + struct drm_i915_gem_unpin { 693 + /** Handle of the buffer to be unpinned. */ 694 + __u32 handle; 695 + __u32 pad; 696 + }; 697 + 698 + struct drm_i915_gem_busy { 699 + /** Handle of the buffer to check for busy */ 700 + __u32 handle; 701 + 702 + /** Return busy status (1 if busy, 0 if idle). 703 + * The high word is used to indicate on which rings the object 704 + * currently resides: 705 + * 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc) 706 + */ 707 + __u32 busy; 708 + }; 709 + 710 + #define I915_CACHING_NONE 0 711 + #define I915_CACHING_CACHED 1 712 + 713 + struct drm_i915_gem_caching { 714 + /** 715 + * Handle of the buffer to set/get the caching level of. */ 716 + __u32 handle; 717 + 718 + /** 719 + * Cacheing level to apply or return value 720 + * 721 + * bits0-15 are for generic caching control (i.e. the above defined 722 + * values). bits16-31 are reserved for platform-specific variations 723 + * (e.g. l3$ caching on gen7). */ 724 + __u32 caching; 725 + }; 726 + 727 + #define I915_TILING_NONE 0 728 + #define I915_TILING_X 1 729 + #define I915_TILING_Y 2 730 + 731 + #define I915_BIT_6_SWIZZLE_NONE 0 732 + #define I915_BIT_6_SWIZZLE_9 1 733 + #define I915_BIT_6_SWIZZLE_9_10 2 734 + #define I915_BIT_6_SWIZZLE_9_11 3 735 + #define I915_BIT_6_SWIZZLE_9_10_11 4 736 + /* Not seen by userland */ 737 + #define I915_BIT_6_SWIZZLE_UNKNOWN 5 738 + /* Seen by userland. */ 739 + #define I915_BIT_6_SWIZZLE_9_17 6 740 + #define I915_BIT_6_SWIZZLE_9_10_17 7 741 + 742 + struct drm_i915_gem_set_tiling { 743 + /** Handle of the buffer to have its tiling state updated */ 744 + __u32 handle; 745 + 746 + /** 747 + * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 748 + * I915_TILING_Y). 749 + * 750 + * This value is to be set on request, and will be updated by the 751 + * kernel on successful return with the actual chosen tiling layout. 752 + * 753 + * The tiling mode may be demoted to I915_TILING_NONE when the system 754 + * has bit 6 swizzling that can't be managed correctly by GEM. 755 + * 756 + * Buffer contents become undefined when changing tiling_mode. 757 + */ 758 + __u32 tiling_mode; 759 + 760 + /** 761 + * Stride in bytes for the object when in I915_TILING_X or 762 + * I915_TILING_Y. 763 + */ 764 + __u32 stride; 765 + 766 + /** 767 + * Returned address bit 6 swizzling required for CPU access through 768 + * mmap mapping. 769 + */ 770 + __u32 swizzle_mode; 771 + }; 772 + 773 + struct drm_i915_gem_get_tiling { 774 + /** Handle of the buffer to get tiling state for. */ 775 + __u32 handle; 776 + 777 + /** 778 + * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 779 + * I915_TILING_Y). 780 + */ 781 + __u32 tiling_mode; 782 + 783 + /** 784 + * Returned address bit 6 swizzling required for CPU access through 785 + * mmap mapping. 786 + */ 787 + __u32 swizzle_mode; 788 + }; 789 + 790 + struct drm_i915_gem_get_aperture { 791 + /** Total size of the aperture used by i915_gem_execbuffer, in bytes */ 792 + __u64 aper_size; 793 + 794 + /** 795 + * Available space in the aperture used by i915_gem_execbuffer, in 796 + * bytes 797 + */ 798 + __u64 aper_available_size; 799 + }; 800 + 801 + struct drm_i915_get_pipe_from_crtc_id { 802 + /** ID of CRTC being requested **/ 803 + __u32 crtc_id; 804 + 805 + /** pipe of requested CRTC **/ 806 + __u32 pipe; 807 + }; 808 + 809 + #define I915_MADV_WILLNEED 0 810 + #define I915_MADV_DONTNEED 1 811 + #define __I915_MADV_PURGED 2 /* internal state */ 812 + 813 + struct drm_i915_gem_madvise { 814 + /** Handle of the buffer to change the backing store advice */ 815 + __u32 handle; 816 + 817 + /* Advice: either the buffer will be needed again in the near future, 818 + * or wont be and could be discarded under memory pressure. 819 + */ 820 + __u32 madv; 821 + 822 + /** Whether the backing store still exists. */ 823 + __u32 retained; 824 + }; 825 + 826 + /* flags */ 827 + #define I915_OVERLAY_TYPE_MASK 0xff 828 + #define I915_OVERLAY_YUV_PLANAR 0x01 829 + #define I915_OVERLAY_YUV_PACKED 0x02 830 + #define I915_OVERLAY_RGB 0x03 831 + 832 + #define I915_OVERLAY_DEPTH_MASK 0xff00 833 + #define I915_OVERLAY_RGB24 0x1000 834 + #define I915_OVERLAY_RGB16 0x2000 835 + #define I915_OVERLAY_RGB15 0x3000 836 + #define I915_OVERLAY_YUV422 0x0100 837 + #define I915_OVERLAY_YUV411 0x0200 838 + #define I915_OVERLAY_YUV420 0x0300 839 + #define I915_OVERLAY_YUV410 0x0400 840 + 841 + #define I915_OVERLAY_SWAP_MASK 0xff0000 842 + #define I915_OVERLAY_NO_SWAP 0x000000 843 + #define I915_OVERLAY_UV_SWAP 0x010000 844 + #define I915_OVERLAY_Y_SWAP 0x020000 845 + #define I915_OVERLAY_Y_AND_UV_SWAP 0x030000 846 + 847 + #define I915_OVERLAY_FLAGS_MASK 0xff000000 848 + #define I915_OVERLAY_ENABLE 0x01000000 849 + 850 + struct drm_intel_overlay_put_image { 851 + /* various flags and src format description */ 852 + __u32 flags; 853 + /* source picture description */ 854 + __u32 bo_handle; 855 + /* stride values and offsets are in bytes, buffer relative */ 856 + __u16 stride_Y; /* stride for packed formats */ 857 + __u16 stride_UV; 858 + __u32 offset_Y; /* offset for packet formats */ 859 + __u32 offset_U; 860 + __u32 offset_V; 861 + /* in pixels */ 862 + __u16 src_width; 863 + __u16 src_height; 864 + /* to compensate the scaling factors for partially covered surfaces */ 865 + __u16 src_scan_width; 866 + __u16 src_scan_height; 867 + /* output crtc description */ 868 + __u32 crtc_id; 869 + __u16 dst_x; 870 + __u16 dst_y; 871 + __u16 dst_width; 872 + __u16 dst_height; 873 + }; 874 + 875 + /* flags */ 876 + #define I915_OVERLAY_UPDATE_ATTRS (1<<0) 877 + #define I915_OVERLAY_UPDATE_GAMMA (1<<1) 878 + struct drm_intel_overlay_attrs { 879 + __u32 flags; 880 + __u32 color_key; 881 + __s32 brightness; 882 + __u32 contrast; 883 + __u32 saturation; 884 + __u32 gamma0; 885 + __u32 gamma1; 886 + __u32 gamma2; 887 + __u32 gamma3; 888 + __u32 gamma4; 889 + __u32 gamma5; 890 + }; 891 + 892 + /* 893 + * Intel sprite handling 894 + * 895 + * Color keying works with a min/mask/max tuple. Both source and destination 896 + * color keying is allowed. 897 + * 898 + * Source keying: 899 + * Sprite pixels within the min & max values, masked against the color channels 900 + * specified in the mask field, will be transparent. All other pixels will 901 + * be displayed on top of the primary plane. For RGB surfaces, only the min 902 + * and mask fields will be used; ranged compares are not allowed. 903 + * 904 + * Destination keying: 905 + * Primary plane pixels that match the min value, masked against the color 906 + * channels specified in the mask field, will be replaced by corresponding 907 + * pixels from the sprite plane. 908 + * 909 + * Note that source & destination keying are exclusive; only one can be 910 + * active on a given plane. 911 + */ 912 + 913 + #define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */ 914 + #define I915_SET_COLORKEY_DESTINATION (1<<1) 915 + #define I915_SET_COLORKEY_SOURCE (1<<2) 916 + struct drm_intel_sprite_colorkey { 917 + __u32 plane_id; 918 + __u32 min_value; 919 + __u32 channel_mask; 920 + __u32 max_value; 921 + __u32 flags; 922 + }; 923 + 924 + struct drm_i915_gem_wait { 925 + /** Handle of BO we shall wait on */ 926 + __u32 bo_handle; 927 + __u32 flags; 928 + /** Number of nanoseconds to wait, Returns time remaining. */ 929 + __s64 timeout_ns; 930 + }; 931 + 932 + struct drm_i915_gem_context_create { 933 + /* output: id of new context*/ 934 + __u32 ctx_id; 935 + __u32 pad; 936 + }; 937 + 938 + struct drm_i915_gem_context_destroy { 939 + __u32 ctx_id; 940 + __u32 pad; 941 + }; 942 + 943 + struct drm_i915_reg_read { 944 + __u64 offset; 945 + __u64 val; /* Return value */ 946 + }; 947 + #endif /* _UAPI_I915_DRM_H_ */