Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
"Just a bunch of fixes to squeeze in before -rc1:

- three nouveau regression fixes

- one qxl regression fix

- a bunch of i915 fixes

... and some core displayport/atomic fixes"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
drm/nouveau/device: enable c800 quirk for tecra w50
drm/nouveau/clk/gt215: Unbreak engine pausing for GT21x/MCP7x
drm/nouveau/gr/nv04: fix big endian setting on gr context
drm/qxl: validate monitors config modes
drm/i915: Allow DSI dual link to be configured on any pipe
drm/i915: Don't try to use DDR DVFS on CHV when disabled in the BIOS
drm/i915: Fix CSR MMIO address check
drm/i915: Limit the number of loops for reading a split 64bit register
drm/i915: Fix broken mst get_hw_state.
drm/i915: Pass hpd_status_i915[] to intel_get_hpd_pins() in pre-g4x
uapi/drm/i915_drm.h: fix userspace compilation.
drm/i915: Always mark the object as dirty when used by the GPU
drm/dp: Add dp_aux_i2c_speed_khz module param to set the assume i2c bus speed
drm/dp: Adjust i2c-over-aux retry count based on message size and i2c bus speed
drm/dp: Define AUX_RETRY_INTERVAL as 500 us
drm/atomic: Fix bookkeeping with TEST_ONLY, v3.

+218 -83
+23 -16
drivers/gpu/drm/drm_atomic.c
··· 1515 1515 copied_props++; 1516 1516 } 1517 1517 1518 - if (obj->type == DRM_MODE_OBJECT_PLANE && count_props) { 1518 + if (obj->type == DRM_MODE_OBJECT_PLANE && count_props && 1519 + !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) { 1519 1520 plane = obj_to_plane(obj); 1520 1521 plane_mask |= (1 << drm_plane_index(plane)); 1521 1522 plane->old_fb = plane->fb; ··· 1538 1537 } 1539 1538 1540 1539 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) { 1540 + /* 1541 + * Unlike commit, check_only does not clean up state. 1542 + * Below we call drm_atomic_state_free for it. 1543 + */ 1541 1544 ret = drm_atomic_check_only(state); 1542 - /* _check_only() does not free state, unlike _commit() */ 1543 - if (!ret) 1544 - drm_atomic_state_free(state); 1545 1545 } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) { 1546 1546 ret = drm_atomic_async_commit(state); 1547 1547 } else { ··· 1569 1567 plane->old_fb = NULL; 1570 1568 } 1571 1569 1570 + if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { 1571 + /* 1572 + * TEST_ONLY and PAGE_FLIP_EVENT are mutually exclusive, 1573 + * if they weren't, this code should be called on success 1574 + * for TEST_ONLY too. 1575 + */ 1576 + 1577 + for_each_crtc_in_state(state, crtc, crtc_state, i) { 1578 + if (!crtc_state->event) 1579 + continue; 1580 + 1581 + destroy_vblank_event(dev, file_priv, 1582 + crtc_state->event); 1583 + } 1584 + } 1585 + 1572 1586 if (ret == -EDEADLK) { 1573 1587 drm_atomic_state_clear(state); 1574 1588 drm_modeset_backoff(&ctx); 1575 1589 goto retry; 1576 1590 } 1577 1591 1578 - if (ret) { 1579 - if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { 1580 - for_each_crtc_in_state(state, crtc, crtc_state, i) { 1581 - if (!crtc_state->event) 1582 - continue; 1583 - 1584 - destroy_vblank_event(dev, file_priv, 1585 - crtc_state->event); 1586 - } 1587 - } 1588 - 1592 + if (ret || arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) 1589 1593 drm_atomic_state_free(state); 1590 - } 1591 1594 1592 1595 drm_modeset_drop_locks(&ctx); 1593 1596 drm_modeset_acquire_fini(&ctx);
+94 -5
drivers/gpu/drm/drm_dp_helper.c
··· 159 159 } 160 160 EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate); 161 161 162 + #define AUX_RETRY_INTERVAL 500 /* us */ 163 + 162 164 /** 163 165 * DOC: dp helpers 164 166 * ··· 215 213 return -EIO; 216 214 217 215 case DP_AUX_NATIVE_REPLY_DEFER: 218 - usleep_range(400, 500); 216 + usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100); 219 217 break; 220 218 } 221 219 } ··· 424 422 I2C_FUNC_10BIT_ADDR; 425 423 } 426 424 425 + #define AUX_PRECHARGE_LEN 10 /* 10 to 16 */ 426 + #define AUX_SYNC_LEN (16 + 4) /* preamble + AUX_SYNC_END */ 427 + #define AUX_STOP_LEN 4 428 + #define AUX_CMD_LEN 4 429 + #define AUX_ADDRESS_LEN 20 430 + #define AUX_REPLY_PAD_LEN 4 431 + #define AUX_LENGTH_LEN 8 432 + 433 + /* 434 + * Calculate the duration of the AUX request/reply in usec. Gives the 435 + * "best" case estimate, ie. successful while as short as possible. 436 + */ 437 + static int drm_dp_aux_req_duration(const struct drm_dp_aux_msg *msg) 438 + { 439 + int len = AUX_PRECHARGE_LEN + AUX_SYNC_LEN + AUX_STOP_LEN + 440 + AUX_CMD_LEN + AUX_ADDRESS_LEN + AUX_LENGTH_LEN; 441 + 442 + if ((msg->request & DP_AUX_I2C_READ) == 0) 443 + len += msg->size * 8; 444 + 445 + return len; 446 + } 447 + 448 + static int drm_dp_aux_reply_duration(const struct drm_dp_aux_msg *msg) 449 + { 450 + int len = AUX_PRECHARGE_LEN + AUX_SYNC_LEN + AUX_STOP_LEN + 451 + AUX_CMD_LEN + AUX_REPLY_PAD_LEN; 452 + 453 + /* 454 + * For read we expect what was asked. For writes there will 455 + * be 0 or 1 data bytes. Assume 0 for the "best" case. 456 + */ 457 + if (msg->request & DP_AUX_I2C_READ) 458 + len += msg->size * 8; 459 + 460 + return len; 461 + } 462 + 463 + #define I2C_START_LEN 1 464 + #define I2C_STOP_LEN 1 465 + #define I2C_ADDR_LEN 9 /* ADDRESS + R/W + ACK/NACK */ 466 + #define I2C_DATA_LEN 9 /* DATA + ACK/NACK */ 467 + 468 + /* 469 + * Calculate the length of the i2c transfer in usec, assuming 470 + * the i2c bus speed is as specified. Gives the the "worst" 471 + * case estimate, ie. successful while as long as possible. 472 + * Doesn't account the the "MOT" bit, and instead assumes each 473 + * message includes a START, ADDRESS and STOP. Neither does it 474 + * account for additional random variables such as clock stretching. 475 + */ 476 + static int drm_dp_i2c_msg_duration(const struct drm_dp_aux_msg *msg, 477 + int i2c_speed_khz) 478 + { 479 + /* AUX bitrate is 1MHz, i2c bitrate as specified */ 480 + return DIV_ROUND_UP((I2C_START_LEN + I2C_ADDR_LEN + 481 + msg->size * I2C_DATA_LEN + 482 + I2C_STOP_LEN) * 1000, i2c_speed_khz); 483 + } 484 + 485 + /* 486 + * Deterine how many retries should be attempted to successfully transfer 487 + * the specified message, based on the estimated durations of the 488 + * i2c and AUX transfers. 489 + */ 490 + static int drm_dp_i2c_retry_count(const struct drm_dp_aux_msg *msg, 491 + int i2c_speed_khz) 492 + { 493 + int aux_time_us = drm_dp_aux_req_duration(msg) + 494 + drm_dp_aux_reply_duration(msg); 495 + int i2c_time_us = drm_dp_i2c_msg_duration(msg, i2c_speed_khz); 496 + 497 + return DIV_ROUND_UP(i2c_time_us, aux_time_us + AUX_RETRY_INTERVAL); 498 + } 499 + 500 + /* 501 + * FIXME currently assumes 10 kHz as some real world devices seem 502 + * to require it. We should query/set the speed via DPCD if supported. 503 + */ 504 + static int dp_aux_i2c_speed_khz __read_mostly = 10; 505 + module_param_unsafe(dp_aux_i2c_speed_khz, int, 0644); 506 + MODULE_PARM_DESC(dp_aux_i2c_speed_khz, 507 + "Assumed speed of the i2c bus in kHz, (1-400, default 10)"); 508 + 427 509 /* 428 510 * Transfer a single I2C-over-AUX message and handle various error conditions, 429 511 * retrying the transaction as appropriate. It is assumed that the ··· 520 434 { 521 435 unsigned int retry, defer_i2c; 522 436 int ret; 523 - 524 437 /* 525 438 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device 526 439 * is required to retry at least seven times upon receiving AUX_DEFER 527 440 * before giving up the AUX transaction. 441 + * 442 + * We also try to account for the i2c bus speed. 528 443 */ 529 - for (retry = 0, defer_i2c = 0; retry < (7 + defer_i2c); retry++) { 444 + int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz)); 445 + 446 + for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) { 530 447 mutex_lock(&aux->hw_mutex); 531 448 ret = aux->transfer(aux, msg); 532 449 mutex_unlock(&aux->hw_mutex); ··· 565 476 * For now just defer for long enough to hopefully be 566 477 * safe for all use-cases. 567 478 */ 568 - usleep_range(500, 600); 479 + usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100); 569 480 continue; 570 481 571 482 default: ··· 595 506 aux->i2c_defer_count++; 596 507 if (defer_i2c < 7) 597 508 defer_i2c++; 598 - usleep_range(400, 500); 509 + usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100); 599 510 continue; 600 511 601 512 default:
+7 -5
drivers/gpu/drm/i915/i915_drv.h
··· 1929 1929 struct skl_wm_values skl_hw; 1930 1930 struct vlv_wm_values vlv; 1931 1931 }; 1932 + 1933 + uint8_t max_level; 1932 1934 } wm; 1933 1935 1934 1936 struct i915_runtime_pm pm; ··· 3386 3384 #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 3387 3385 3388 3386 #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ 3389 - u32 upper, lower, tmp; \ 3390 - tmp = I915_READ(upper_reg); \ 3387 + u32 upper, lower, old_upper, loop = 0; \ 3388 + upper = I915_READ(upper_reg); \ 3391 3389 do { \ 3392 - upper = tmp; \ 3390 + old_upper = upper; \ 3393 3391 lower = I915_READ(lower_reg); \ 3394 - tmp = I915_READ(upper_reg); \ 3395 - } while (upper != tmp); \ 3392 + upper = I915_READ(upper_reg); \ 3393 + } while (upper != old_upper && loop++ < 2); \ 3396 3394 (u64)upper << 32 | lower; }) 3397 3395 3398 3396 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
+1 -1
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 1032 1032 u32 old_read = obj->base.read_domains; 1033 1033 u32 old_write = obj->base.write_domain; 1034 1034 1035 + obj->dirty = 1; /* be paranoid */ 1035 1036 obj->base.write_domain = obj->base.pending_write_domain; 1036 1037 if (obj->base.write_domain == 0) 1037 1038 obj->base.pending_read_domains |= obj->base.read_domains; ··· 1040 1039 1041 1040 i915_vma_move_to_active(vma, req); 1042 1041 if (obj->base.write_domain) { 1043 - obj->dirty = 1; 1044 1042 i915_gem_request_assign(&obj->last_write_req, req); 1045 1043 1046 1044 intel_fb_obj_invalidate(obj, ORIGIN_CS);
+1 -1
drivers/gpu/drm/i915/i915_irq.c
··· 1558 1558 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1559 1559 1560 1560 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1561 - hotplug_trigger, hpd_status_g4x, 1561 + hotplug_trigger, hpd_status_i915, 1562 1562 i9xx_port_hotplug_long_detect); 1563 1563 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1564 1564 }
+1 -1
drivers/gpu/drm/i915/intel_csr.c
··· 350 350 } 351 351 csr->mmio_count = dmc_header->mmio_count; 352 352 for (i = 0; i < dmc_header->mmio_count; i++) { 353 - if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE && 353 + if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE || 354 354 dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) { 355 355 DRM_ERROR(" Firmware has wrong mmio address 0x%x\n", 356 356 dmc_header->mmioaddr[i]);
+4 -4
drivers/gpu/drm/i915/intel_display.c
··· 6305 6305 connector->base.name); 6306 6306 6307 6307 if (connector->get_hw_state(connector)) { 6308 - struct drm_encoder *encoder = &connector->encoder->base; 6308 + struct intel_encoder *encoder = connector->encoder; 6309 6309 struct drm_connector_state *conn_state = connector->base.state; 6310 6310 6311 6311 I915_STATE_WARN(!crtc, ··· 6317 6317 I915_STATE_WARN(!crtc->state->active, 6318 6318 "connector is active, but attached crtc isn't\n"); 6319 6319 6320 - if (!encoder) 6320 + if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) 6321 6321 return; 6322 6322 6323 - I915_STATE_WARN(conn_state->best_encoder != encoder, 6323 + I915_STATE_WARN(conn_state->best_encoder != &encoder->base, 6324 6324 "atomic encoder doesn't match attached encoder\n"); 6325 6325 6326 - I915_STATE_WARN(conn_state->crtc != encoder->crtc, 6326 + I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, 6327 6327 "attached encoder crtc differs from connector crtc\n"); 6328 6328 } else { 6329 6329 I915_STATE_WARN(crtc && crtc->state->active,
+6 -1
drivers/gpu/drm/i915/intel_dp_mst.c
··· 173 173 return; 174 174 } 175 175 176 + /* MST encoders are bound to a crtc, not to a connector, 177 + * force the mapping here for get_hw_state. 178 + */ 179 + found->encoder = encoder; 180 + 176 181 DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); 177 182 intel_mst->port = found->port; 178 183 ··· 405 400 406 401 static bool intel_dp_mst_get_hw_state(struct intel_connector *connector) 407 402 { 408 - if (connector->encoder) { 403 + if (connector->encoder && connector->base.state->crtc) { 409 404 enum pipe pipe; 410 405 if (!connector->encoder->get_hw_state(connector->encoder, &pipe)) 411 406 return false;
+4 -5
drivers/gpu/drm/i915/intel_dsi.c
··· 1048 1048 intel_connector->unregister = intel_connector_unregister; 1049 1049 1050 1050 /* Pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI port C */ 1051 - if (dev_priv->vbt.dsi.config->dual_link) { 1052 - /* XXX: does dual link work on either pipe? */ 1053 - intel_encoder->crtc_mask = (1 << PIPE_A); 1054 - intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C)); 1055 - } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) { 1051 + if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) { 1056 1052 intel_encoder->crtc_mask = (1 << PIPE_A); 1057 1053 intel_dsi->ports = (1 << PORT_A); 1058 1054 } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIC) { 1059 1055 intel_encoder->crtc_mask = (1 << PIPE_B); 1060 1056 intel_dsi->ports = (1 << PORT_C); 1061 1057 } 1058 + 1059 + if (dev_priv->vbt.dsi.config->dual_link) 1060 + intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C)); 1062 1061 1063 1062 /* Create a DSI host (and a device) for each port. */ 1064 1063 for_each_dsi_port(port, intel_dsi->ports) {
+29 -13
drivers/gpu/drm/i915/intel_pm.c
··· 955 955 VLV_WM_LEVEL_PM2, 956 956 VLV_WM_LEVEL_PM5, 957 957 VLV_WM_LEVEL_DDR_DVFS, 958 - CHV_WM_NUM_LEVELS, 959 - VLV_WM_NUM_LEVELS = 1, 960 958 }; 961 959 962 960 /* latency must be in 0.1us units. */ ··· 980 982 /* all latencies in usec */ 981 983 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; 982 984 985 + dev_priv->wm.max_level = VLV_WM_LEVEL_PM2; 986 + 983 987 if (IS_CHERRYVIEW(dev_priv)) { 984 988 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; 985 989 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; 990 + 991 + dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS; 986 992 } 987 993 } 988 994 ··· 1139 1137 memset(wm_state, 0, sizeof(*wm_state)); 1140 1138 1141 1139 wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed; 1142 - if (IS_CHERRYVIEW(dev)) 1143 - wm_state->num_levels = CHV_WM_NUM_LEVELS; 1144 - else 1145 - wm_state->num_levels = VLV_WM_NUM_LEVELS; 1140 + wm_state->num_levels = to_i915(dev)->wm.max_level + 1; 1146 1141 1147 1142 wm_state->num_active_planes = 0; 1148 1143 ··· 1219 1220 } 1220 1221 1221 1222 /* clear any (partially) filled invalid levels */ 1222 - for (level = wm_state->num_levels; level < CHV_WM_NUM_LEVELS; level++) { 1223 + for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) { 1223 1224 memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level])); 1224 1225 memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level])); 1225 1226 } ··· 1323 1324 struct intel_crtc *crtc; 1324 1325 int num_active_crtcs = 0; 1325 1326 1326 - if (IS_CHERRYVIEW(dev)) 1327 - wm->level = VLV_WM_LEVEL_DDR_DVFS; 1328 - else 1329 - wm->level = VLV_WM_LEVEL_PM2; 1327 + wm->level = to_i915(dev)->wm.max_level; 1330 1328 wm->cxsr = true; 1331 1329 1332 1330 for_each_intel_crtc(dev, crtc) { ··· 4079 4083 if (val & DSP_MAXFIFO_PM5_ENABLE) 4080 4084 wm->level = VLV_WM_LEVEL_PM5; 4081 4085 4086 + /* 4087 + * If DDR DVFS is disabled in the BIOS, Punit 4088 + * will never ack the request. So if that happens 4089 + * assume we don't have to enable/disable DDR DVFS 4090 + * dynamically. To test that just set the REQ_ACK 4091 + * bit to poke the Punit, but don't change the 4092 + * HIGH/LOW bits so that we don't actually change 4093 + * the current state. 4094 + */ 4082 4095 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 4083 - if ((val & FORCE_DDR_HIGH_FREQ) == 0) 4084 - wm->level = VLV_WM_LEVEL_DDR_DVFS; 4096 + val |= FORCE_DDR_FREQ_REQ_ACK; 4097 + vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); 4098 + 4099 + if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & 4100 + FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) { 4101 + DRM_DEBUG_KMS("Punit not acking DDR DVFS request, " 4102 + "assuming DDR DVFS is disabled\n"); 4103 + dev_priv->wm.max_level = VLV_WM_LEVEL_PM5; 4104 + } else { 4105 + val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 4106 + if ((val & FORCE_DDR_HIGH_FREQ) == 0) 4107 + wm->level = VLV_WM_LEVEL_DDR_DVFS; 4108 + } 4085 4109 4086 4110 mutex_unlock(&dev_priv->rps.hw_lock); 4087 4111 }
+1
drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
··· 689 689 690 690 static const struct nvkm_device_pci_vendor 691 691 nvkm_device_pci_10de_11fc[] = { 692 + { 0x1179, 0x0001, NULL, { .War00C800_0 = true } }, /* Toshiba Tecra W50 */ 692 693 { 0x17aa, 0x2211, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */ 693 694 { 0x17aa, 0x221e, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */ 694 695 {}
+3 -3
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
··· 1048 1048 if (ret == 0) { 1049 1049 nvkm_kmap(*pgpuobj); 1050 1050 nvkm_wo32(*pgpuobj, 0x00, object->oclass); 1051 + #ifdef __BIG_ENDIAN 1052 + nvkm_mo32(*pgpuobj, 0x00, 0x00080000, 0x00080000); 1053 + #endif 1051 1054 nvkm_wo32(*pgpuobj, 0x04, 0x00000000); 1052 1055 nvkm_wo32(*pgpuobj, 0x08, 0x00000000); 1053 - #ifdef __BIG_ENDIAN 1054 - nvkm_mo32(*pgpuobj, 0x08, 0x00080000, 0x00080000); 1055 - #endif 1056 1056 nvkm_wo32(*pgpuobj, 0x0c, 0x00000000); 1057 1057 nvkm_done(*pgpuobj); 1058 1058 }
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
··· 326 326 return -EIO; 327 327 328 328 if (nvkm_msec(device, 2000, 329 - u32 tmp = nvkm_rd32(device, 0x002504) & 0x0000003f; 329 + u32 tmp = nvkm_rd32(device, 0x00251c) & 0x0000003f; 330 330 if (tmp == 0x0000003f) 331 331 break; 332 332 ) < 0)
+40 -26
drivers/gpu/drm/qxl/qxl_display.c
··· 160 160 *pwidth = head->width; 161 161 *pheight = head->height; 162 162 drm_mode_probed_add(connector, mode); 163 + /* remember the last custom size for mode validation */ 164 + qdev->monitors_config_width = mode->hdisplay; 165 + qdev->monitors_config_height = mode->vdisplay; 163 166 return 1; 164 167 } 168 + 169 + static struct mode_size { 170 + int w; 171 + int h; 172 + } common_modes[] = { 173 + { 640, 480}, 174 + { 720, 480}, 175 + { 800, 600}, 176 + { 848, 480}, 177 + {1024, 768}, 178 + {1152, 768}, 179 + {1280, 720}, 180 + {1280, 800}, 181 + {1280, 854}, 182 + {1280, 960}, 183 + {1280, 1024}, 184 + {1440, 900}, 185 + {1400, 1050}, 186 + {1680, 1050}, 187 + {1600, 1200}, 188 + {1920, 1080}, 189 + {1920, 1200} 190 + }; 165 191 166 192 static int qxl_add_common_modes(struct drm_connector *connector, 167 193 unsigned pwidth, ··· 196 170 struct drm_device *dev = connector->dev; 197 171 struct drm_display_mode *mode = NULL; 198 172 int i; 199 - struct mode_size { 200 - int w; 201 - int h; 202 - } common_modes[] = { 203 - { 640, 480}, 204 - { 720, 480}, 205 - { 800, 600}, 206 - { 848, 480}, 207 - {1024, 768}, 208 - {1152, 768}, 209 - {1280, 720}, 210 - {1280, 800}, 211 - {1280, 854}, 212 - {1280, 960}, 213 - {1280, 1024}, 214 - {1440, 900}, 215 - {1400, 1050}, 216 - {1680, 1050}, 217 - {1600, 1200}, 218 - {1920, 1080}, 219 - {1920, 1200} 220 - }; 221 - 222 173 for (i = 0; i < ARRAY_SIZE(common_modes); i++) { 223 174 mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 224 175 60, false, false, false); ··· 826 823 static int qxl_conn_mode_valid(struct drm_connector *connector, 827 824 struct drm_display_mode *mode) 828 825 { 826 + struct drm_device *ddev = connector->dev; 827 + struct qxl_device *qdev = ddev->dev_private; 828 + int i; 829 + 829 830 /* TODO: is this called for user defined modes? (xrandr --add-mode) 830 831 * TODO: check that the mode fits in the framebuffer */ 831 - DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay, 832 - mode->vdisplay, mode->status); 833 - return MODE_OK; 832 + 833 + if(qdev->monitors_config_width == mode->hdisplay && 834 + qdev->monitors_config_height == mode->vdisplay) 835 + return MODE_OK; 836 + 837 + for (i = 0; i < ARRAY_SIZE(common_modes); i++) { 838 + if (common_modes[i].w == mode->hdisplay && common_modes[i].h == mode->vdisplay) 839 + return MODE_OK; 840 + } 841 + return MODE_BAD; 834 842 } 835 843 836 844 static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
+2
drivers/gpu/drm/qxl/qxl_drv.h
··· 325 325 struct work_struct fb_work; 326 326 327 327 struct drm_property *hotplug_mode_update_property; 328 + int monitors_config_width; 329 + int monitors_config_height; 328 330 }; 329 331 330 332 /* forward declaration for QXL_INFO_IO */
+1 -1
include/uapi/drm/i915_drm.h
··· 358 358 #define I915_PARAM_HAS_RESOURCE_STREAMER 36 359 359 360 360 typedef struct drm_i915_getparam { 361 - s32 param; 361 + __s32 param; 362 362 /* 363 363 * WARNING: Using pointers instead of fixed-size u64 means we need to write 364 364 * compat32 code. Don't repeat this mistake.