Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-misc-next-2018-11-21' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for v4.21, part 2:

UAPI Changes:
- Remove syncobj timeline support from drm.

Cross-subsystem Changes:
- Document canvas provider node in the DT bindings.
- Improve documentation for TPO TPG110 DT bindings.

Core Changes:
- Use explicit state in drm atomic functions.
- Add panel quirk for new GPD Win2 firmware.
- Add DRM_FORMAT_XYUV8888.
- Set the default import/export function in prime to drm_gem_prime_import/export.
- Add a separate drm_gem_object_funcs, to stop relying on dev->driver->*gem* functions.
- Make sure that tinydrm sets the virtual address also on imported buffers.

Driver Changes:
- Support active-low data enable signal in sun4i.
- Fix scaling in vc4.
- Use canvas provider node in meson.
- Remove unused variables in sti and qxl and cirrus.
- Add overlay plane support and primary plane scaling to meson.
- i2c fixes in drm/bridge/sii902x
- Fix mailbox read size in rockchip.
- Spelling fix in panel/s6d16d0.
- Remove unnecessary null check from qxl_bo_unref.
- Remove unused arguments from qxl_bo_pin.
- Fix qxl cursor pinning.

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/9c0409e3-a85f-d2af-b4eb-baf1eb8bbae4@linux.intel.com

+2324 -849
+2
Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
··· 67 67 Optional properties: 68 68 - power-domains: Optional phandle to associated power domain as described in 69 69 the file ../power/power_domain.txt 70 + - amlogic,canvas: phandle to canvas provider node as described in the file 71 + ../soc/amlogic/amlogic,canvas.txt 70 72 71 73 Required nodes: 72 74
+52 -29
Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt
··· 1 1 TPO TPG110 Panel 2 2 ================ 3 3 4 - This binding builds on the DPI bindings, adding a few properties 5 - as a superset of a DPI. See panel-dpi.txt for the required DPI 6 - bindings. 4 + This panel driver is a component that acts as an intermediary 5 + between an RGB output and a variety of panels. The panel 6 + driver is strapped up in electronics to the desired resolution 7 + and other properties, and has a control interface over 3WIRE 8 + SPI. By talking to the TPG110 over SPI, the strapped properties 9 + can be discovered and the hardware is therefore mostly 10 + self-describing. 11 + 12 + +--------+ 13 + SPI -> | TPO | -> physical display 14 + RGB -> | TPG110 | 15 + +--------+ 16 + 17 + If some electrical strap or alternate resolution is desired, 18 + this can be set up by taking software control of the display 19 + over the SPI interface. The interface can also adjust 20 + for properties of the display such as gamma correction and 21 + certain electrical driving levels. 22 + 23 + The TPG110 does not know the physical dimensions of the panel 24 + connected, so this needs to be specified in the device tree. 25 + 26 + It requires a GPIO line for control of its reset line. 27 + 28 + The serial protocol has line names that resemble I2C but the 29 + protocol is not I2C but 3WIRE SPI. 7 30 8 31 Required properties: 9 - - compatible : "tpo,tpg110" 32 + - compatible : one of: 33 + "ste,nomadik-nhk15-display", "tpo,tpg110" 34 + "tpo,tpg110" 10 35 - grestb-gpios : panel reset GPIO 11 - - scen-gpios : serial control enable GPIO 12 - - scl-gpios : serial control clock line GPIO 13 - - sda-gpios : serial control data line GPIO 36 + - width-mm : see display/panel/panel-common.txt 37 + - height-mm : see display/panel/panel-common.txt 14 38 15 - Required nodes: 16 - - Video port for DPI input, see panel-dpi.txt 17 - - Panel timing for DPI setup, see panel-dpi.txt 39 + The device needs to be a child of an SPI bus, see 40 + spi/spi-bus.txt. The SPI child must set the following 41 + properties: 42 + - spi-3wire 43 + - spi-max-frequency = <3000000>; 44 + as these are characteristics of this device. 45 + 46 + The device node can contain one 'port' child node with one child 47 + 'endpoint' node, according to the bindings defined in 48 + media/video-interfaces.txt. This node should describe panel's video bus. 18 49 19 50 Example 20 51 ------- 21 52 22 - panel { 23 - compatible = "tpo,tpg110", "panel-dpi"; 24 - grestb-gpios = <&stmpe_gpio44 5 GPIO_ACTIVE_LOW>; 25 - scen-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>; 26 - scl-gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>; 27 - sda-gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>; 53 + panel: display@0 { 54 + compatible = "tpo,tpg110"; 55 + reg = <0>; 56 + spi-3wire; 57 + /* 320 ns min period ~= 3 MHz */ 58 + spi-max-frequency = <3000000>; 59 + /* Width and height from data sheet */ 60 + width-mm = <116>; 61 + height-mm = <87>; 62 + grestb-gpios = <&foo_gpio 5 GPIO_ACTIVE_LOW>; 28 63 backlight = <&bl>; 29 64 30 65 port { 31 66 nomadik_clcd_panel: endpoint { 32 - remote-endpoint = <&nomadik_clcd_pads>; 67 + remote-endpoint = <&foo>; 33 68 }; 34 - }; 35 - 36 - panel-timing { 37 - clock-frequency = <33200000>; 38 - hactive = <800>; 39 - hback-porch = <216>; 40 - hfront-porch = <40>; 41 - hsync-len = <1>; 42 - vactive = <480>; 43 - vback-porch = <35>; 44 - vfront-porch = <10>; 45 - vsync-len = <1>; 46 69 }; 47 70 };
+13
Documentation/gpu/todo.rst
··· 234 234 235 235 Contact: Daniel Vetter 236 236 237 + Defaults for .gem_prime_import and export 238 + ----------------------------------------- 239 + 240 + Most drivers don't need to set drm_driver->gem_prime_import and 241 + ->gem_prime_export now that drm_gem_prime_import() and drm_gem_prime_export() 242 + are the default. 243 + 244 + struct drm_gem_object_funcs 245 + --------------------------- 246 + 247 + GEM objects can now have a function table instead of having the callbacks on the 248 + DRM driver struct. This is now the preferred way and drivers can be moved over. 249 + 237 250 Core refactorings 238 251 ================= 239 252
+1
drivers/gpu/drm/bridge/Kconfig
··· 95 95 depends on OF 96 96 select DRM_KMS_HELPER 97 97 select REGMAP_I2C 98 + select I2C_MUX 98 99 ---help--- 99 100 Silicon Image sii902x bridge chip driver. 100 101
+178 -69
drivers/gpu/drm/bridge/sii902x.c
··· 1 1 /* 2 + * Copyright (C) 2018 Renesas Electronics 3 + * 2 4 * Copyright (C) 2016 Atmel 3 5 * Bo Shen <voice.shen@atmel.com> 4 6 * ··· 23 21 */ 24 22 25 23 #include <linux/gpio/consumer.h> 24 + #include <linux/i2c-mux.h> 26 25 #include <linux/i2c.h> 27 26 #include <linux/module.h> 28 27 #include <linux/regmap.h> ··· 89 86 struct drm_bridge bridge; 90 87 struct drm_connector connector; 91 88 struct gpio_desc *reset_gpio; 89 + struct i2c_mux_core *i2cmux; 92 90 }; 91 + 92 + static int sii902x_read_unlocked(struct i2c_client *i2c, u8 reg, u8 *val) 93 + { 94 + union i2c_smbus_data data; 95 + int ret; 96 + 97 + ret = __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags, 98 + I2C_SMBUS_READ, reg, I2C_SMBUS_BYTE_DATA, &data); 99 + 100 + if (ret < 0) 101 + return ret; 102 + 103 + *val = data.byte; 104 + return 0; 105 + } 106 + 107 + static int sii902x_write_unlocked(struct i2c_client *i2c, u8 reg, u8 val) 108 + { 109 + union i2c_smbus_data data; 110 + 111 + data.byte = val; 112 + 113 + return __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags, 114 + I2C_SMBUS_WRITE, reg, I2C_SMBUS_BYTE_DATA, 115 + &data); 116 + } 117 + 118 + static int sii902x_update_bits_unlocked(struct i2c_client *i2c, u8 reg, u8 mask, 119 + u8 val) 120 + { 121 + int ret; 122 + u8 status; 123 + 124 + ret = sii902x_read_unlocked(i2c, reg, &status); 125 + if (ret) 126 + return ret; 127 + status &= ~mask; 128 + status |= val & mask; 129 + return sii902x_write_unlocked(i2c, reg, status); 130 + } 93 131 94 132 static inline struct sii902x *bridge_to_sii902x(struct drm_bridge *bridge) 95 133 { ··· 179 135 static int sii902x_get_modes(struct drm_connector *connector) 180 136 { 181 137 struct sii902x *sii902x = connector_to_sii902x(connector); 182 - struct regmap *regmap = sii902x->regmap; 183 138 u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; 184 - struct device *dev = &sii902x->i2c->dev; 185 - unsigned long timeout; 186 - unsigned int retries; 187 - unsigned int status; 188 139 struct edid *edid; 189 - int num = 0; 190 - int ret; 140 + int num = 0, ret; 191 141 192 - ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA, 193 - SII902X_SYS_CTRL_DDC_BUS_REQ, 194 - SII902X_SYS_CTRL_DDC_BUS_REQ); 195 - if (ret) 196 - return ret; 197 - 198 - timeout = jiffies + 199 - msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS); 200 - do { 201 - ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status); 202 - if (ret) 203 - return ret; 204 - } while (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD) && 205 - time_before(jiffies, timeout)); 206 - 207 - if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) { 208 - dev_err(dev, "failed to acquire the i2c bus\n"); 209 - return -ETIMEDOUT; 210 - } 211 - 212 - ret = regmap_write(regmap, SII902X_SYS_CTRL_DATA, status); 213 - if (ret) 214 - return ret; 215 - 216 - edid = drm_get_edid(connector, sii902x->i2c->adapter); 142 + edid = drm_get_edid(connector, sii902x->i2cmux->adapter[0]); 217 143 drm_connector_update_edid_property(connector, edid); 218 144 if (edid) { 219 145 num = drm_add_edid_modes(connector, edid); ··· 194 180 &bus_format, 1); 195 181 if (ret) 196 182 return ret; 197 - 198 - /* 199 - * Sometimes the I2C bus can stall after failure to use the 200 - * EDID channel. Retry a few times to see if things clear 201 - * up, else continue anyway. 202 - */ 203 - retries = 5; 204 - do { 205 - ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, 206 - &status); 207 - retries--; 208 - } while (ret && retries); 209 - if (ret) 210 - dev_err(dev, "failed to read status (%d)\n", ret); 211 - 212 - ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA, 213 - SII902X_SYS_CTRL_DDC_BUS_REQ | 214 - SII902X_SYS_CTRL_DDC_BUS_GRTD, 0); 215 - if (ret) 216 - return ret; 217 - 218 - timeout = jiffies + 219 - msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS); 220 - do { 221 - ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status); 222 - if (ret) 223 - return ret; 224 - } while (status & (SII902X_SYS_CTRL_DDC_BUS_REQ | 225 - SII902X_SYS_CTRL_DDC_BUS_GRTD) && 226 - time_before(jiffies, timeout)); 227 - 228 - if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ | 229 - SII902X_SYS_CTRL_DDC_BUS_GRTD)) { 230 - dev_err(dev, "failed to release the i2c bus\n"); 231 - return -ETIMEDOUT; 232 - } 233 183 234 184 return num; 235 185 } ··· 344 366 return IRQ_HANDLED; 345 367 } 346 368 369 + /* 370 + * The purpose of sii902x_i2c_bypass_select is to enable the pass through 371 + * mode of the HDMI transmitter. Do not use regmap from within this function, 372 + * only use sii902x_*_unlocked functions to read/modify/write registers. 373 + * We are holding the parent adapter lock here, keep this in mind before 374 + * adding more i2c transactions. 375 + * 376 + * Also, since SII902X_SYS_CTRL_DATA is used with regmap_update_bits elsewhere 377 + * in this driver, we need to make sure that we only touch 0x1A[2:1] from 378 + * within sii902x_i2c_bypass_select and sii902x_i2c_bypass_deselect, and that 379 + * we leave the remaining bits as we have found them. 380 + */ 381 + static int sii902x_i2c_bypass_select(struct i2c_mux_core *mux, u32 chan_id) 382 + { 383 + struct sii902x *sii902x = i2c_mux_priv(mux); 384 + struct device *dev = &sii902x->i2c->dev; 385 + unsigned long timeout; 386 + u8 status; 387 + int ret; 388 + 389 + ret = sii902x_update_bits_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA, 390 + SII902X_SYS_CTRL_DDC_BUS_REQ, 391 + SII902X_SYS_CTRL_DDC_BUS_REQ); 392 + if (ret) 393 + return ret; 394 + 395 + timeout = jiffies + 396 + msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS); 397 + do { 398 + ret = sii902x_read_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA, 399 + &status); 400 + if (ret) 401 + return ret; 402 + } while (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD) && 403 + time_before(jiffies, timeout)); 404 + 405 + if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) { 406 + dev_err(dev, "Failed to acquire the i2c bus\n"); 407 + return -ETIMEDOUT; 408 + } 409 + 410 + return sii902x_write_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA, 411 + status); 412 + } 413 + 414 + /* 415 + * The purpose of sii902x_i2c_bypass_deselect is to disable the pass through 416 + * mode of the HDMI transmitter. Do not use regmap from within this function, 417 + * only use sii902x_*_unlocked functions to read/modify/write registers. 418 + * We are holding the parent adapter lock here, keep this in mind before 419 + * adding more i2c transactions. 420 + * 421 + * Also, since SII902X_SYS_CTRL_DATA is used with regmap_update_bits elsewhere 422 + * in this driver, we need to make sure that we only touch 0x1A[2:1] from 423 + * within sii902x_i2c_bypass_select and sii902x_i2c_bypass_deselect, and that 424 + * we leave the remaining bits as we have found them. 425 + */ 426 + static int sii902x_i2c_bypass_deselect(struct i2c_mux_core *mux, u32 chan_id) 427 + { 428 + struct sii902x *sii902x = i2c_mux_priv(mux); 429 + struct device *dev = &sii902x->i2c->dev; 430 + unsigned long timeout; 431 + unsigned int retries; 432 + u8 status; 433 + int ret; 434 + 435 + /* 436 + * When the HDMI transmitter is in pass through mode, we need an 437 + * (undocumented) additional delay between STOP and START conditions 438 + * to guarantee the bus won't get stuck. 439 + */ 440 + udelay(30); 441 + 442 + /* 443 + * Sometimes the I2C bus can stall after failure to use the 444 + * EDID channel. Retry a few times to see if things clear 445 + * up, else continue anyway. 446 + */ 447 + retries = 5; 448 + do { 449 + ret = sii902x_read_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA, 450 + &status); 451 + retries--; 452 + } while (ret && retries); 453 + if (ret) { 454 + dev_err(dev, "failed to read status (%d)\n", ret); 455 + return ret; 456 + } 457 + 458 + ret = sii902x_update_bits_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA, 459 + SII902X_SYS_CTRL_DDC_BUS_REQ | 460 + SII902X_SYS_CTRL_DDC_BUS_GRTD, 0); 461 + if (ret) 462 + return ret; 463 + 464 + timeout = jiffies + 465 + msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS); 466 + do { 467 + ret = sii902x_read_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA, 468 + &status); 469 + if (ret) 470 + return ret; 471 + } while (status & (SII902X_SYS_CTRL_DDC_BUS_REQ | 472 + SII902X_SYS_CTRL_DDC_BUS_GRTD) && 473 + time_before(jiffies, timeout)); 474 + 475 + if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ | 476 + SII902X_SYS_CTRL_DDC_BUS_GRTD)) { 477 + dev_err(dev, "failed to release the i2c bus\n"); 478 + return -ETIMEDOUT; 479 + } 480 + 481 + return 0; 482 + } 483 + 347 484 static int sii902x_probe(struct i2c_client *client, 348 485 const struct i2c_device_id *id) 349 486 { ··· 467 374 struct sii902x *sii902x; 468 375 u8 chipid[4]; 469 376 int ret; 377 + 378 + ret = i2c_check_functionality(client->adapter, 379 + I2C_FUNC_SMBUS_BYTE_DATA); 380 + if (!ret) { 381 + dev_err(dev, "I2C adapter not suitable\n"); 382 + return -EIO; 383 + } 470 384 471 385 sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL); 472 386 if (!sii902x) ··· 533 433 534 434 i2c_set_clientdata(client, sii902x); 535 435 536 - return 0; 436 + sii902x->i2cmux = i2c_mux_alloc(client->adapter, dev, 437 + 1, 0, I2C_MUX_GATE, 438 + sii902x_i2c_bypass_select, 439 + sii902x_i2c_bypass_deselect); 440 + if (!sii902x->i2cmux) 441 + return -ENOMEM; 442 + 443 + sii902x->i2cmux->priv = sii902x; 444 + return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0); 537 445 } 538 446 539 447 static int sii902x_remove(struct i2c_client *client) ··· 549 441 { 550 442 struct sii902x *sii902x = i2c_get_clientdata(client); 551 443 444 + i2c_mux_del_adapters(sii902x->i2cmux); 552 445 drm_bridge_remove(&sii902x->bridge); 553 446 554 447 return 0;
-3
drivers/gpu/drm/cirrus/cirrus_fbdev.c
··· 169 169 struct drm_mode_fb_cmd2 mode_cmd; 170 170 void *sysram; 171 171 struct drm_gem_object *gobj = NULL; 172 - struct cirrus_bo *bo = NULL; 173 172 int size, ret; 174 173 175 174 mode_cmd.width = sizes->surface_width; ··· 183 184 DRM_ERROR("failed to create fbcon backing object %d\n", ret); 184 185 return ret; 185 186 } 186 - 187 - bo = gem_to_cirrus_bo(gobj); 188 187 189 188 sysram = vmalloc(size); 190 189 if (!sysram)
+64 -51
drivers/gpu/drm/drm_atomic.c
··· 315 315 } 316 316 EXPORT_SYMBOL(drm_atomic_get_crtc_state); 317 317 318 - static int drm_atomic_crtc_check(struct drm_crtc *crtc, 319 - struct drm_crtc_state *state) 318 + static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state, 319 + const struct drm_crtc_state *new_crtc_state) 320 320 { 321 + struct drm_crtc *crtc = new_crtc_state->crtc; 322 + 321 323 /* NOTE: we explicitly don't enforce constraints such as primary 322 324 * layer covering entire screen, since that is something we want 323 325 * to allow (on hw that supports it). For hw that does not, it ··· 328 326 * TODO: Add generic modeset state checks once we support those. 329 327 */ 330 328 331 - if (state->active && !state->enable) { 329 + if (new_crtc_state->active && !new_crtc_state->enable) { 332 330 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n", 333 331 crtc->base.id, crtc->name); 334 332 return -EINVAL; ··· 338 336 * as this is a kernel-internal detail that userspace should never 339 337 * be able to trigger. */ 340 338 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 341 - WARN_ON(state->enable && !state->mode_blob)) { 339 + WARN_ON(new_crtc_state->enable && !new_crtc_state->mode_blob)) { 342 340 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n", 343 341 crtc->base.id, crtc->name); 344 342 return -EINVAL; 345 343 } 346 344 347 345 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 348 - WARN_ON(!state->enable && state->mode_blob)) { 346 + WARN_ON(!new_crtc_state->enable && new_crtc_state->mode_blob)) { 349 347 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n", 350 348 crtc->base.id, crtc->name); 351 349 return -EINVAL; ··· 361 359 * and legacy page_flip IOCTL which also reject service on a disabled 362 360 * pipe. 363 361 */ 364 - if (state->event && !state->active && !crtc->state->active) { 362 + if (new_crtc_state->event && 363 + !new_crtc_state->active && !old_crtc_state->active) { 365 364 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n", 366 365 crtc->base.id, crtc->name); 367 366 return -EINVAL; ··· 492 489 EXPORT_SYMBOL(drm_atomic_get_plane_state); 493 490 494 491 static bool 495 - plane_switching_crtc(struct drm_atomic_state *state, 496 - struct drm_plane *plane, 497 - struct drm_plane_state *plane_state) 492 + plane_switching_crtc(const struct drm_plane_state *old_plane_state, 493 + const struct drm_plane_state *new_plane_state) 498 494 { 499 - if (!plane->state->crtc || !plane_state->crtc) 495 + if (!old_plane_state->crtc || !new_plane_state->crtc) 500 496 return false; 501 497 502 - if (plane->state->crtc == plane_state->crtc) 498 + if (old_plane_state->crtc == new_plane_state->crtc) 503 499 return false; 504 500 505 501 /* This could be refined, but currently there's no helper or driver code ··· 511 509 512 510 /** 513 511 * drm_atomic_plane_check - check plane state 514 - * @plane: plane to check 515 - * @state: plane state to check 512 + * @old_plane_state: old plane state to check 513 + * @new_plane_state: new plane state to check 516 514 * 517 515 * Provides core sanity checks for plane state. 518 516 * 519 517 * RETURNS: 520 518 * Zero on success, error code on failure 521 519 */ 522 - static int drm_atomic_plane_check(struct drm_plane *plane, 523 - struct drm_plane_state *state) 520 + static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, 521 + const struct drm_plane_state *new_plane_state) 524 522 { 523 + struct drm_plane *plane = new_plane_state->plane; 524 + struct drm_crtc *crtc = new_plane_state->crtc; 525 + const struct drm_framebuffer *fb = new_plane_state->fb; 525 526 unsigned int fb_width, fb_height; 526 527 int ret; 527 528 528 529 /* either *both* CRTC and FB must be set, or neither */ 529 - if (state->crtc && !state->fb) { 530 + if (crtc && !fb) { 530 531 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n", 531 532 plane->base.id, plane->name); 532 533 return -EINVAL; 533 - } else if (state->fb && !state->crtc) { 534 + } else if (fb && !crtc) { 534 535 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n", 535 536 plane->base.id, plane->name); 536 537 return -EINVAL; 537 538 } 538 539 539 540 /* if disabled, we don't care about the rest of the state: */ 540 - if (!state->crtc) 541 + if (!crtc) 541 542 return 0; 542 543 543 544 /* Check whether this plane is usable on this CRTC */ 544 - if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) { 545 + if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) { 545 546 DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n", 546 - state->crtc->base.id, state->crtc->name, 547 + crtc->base.id, crtc->name, 547 548 plane->base.id, plane->name); 548 549 return -EINVAL; 549 550 } 550 551 551 552 /* Check whether this plane supports the fb pixel format. */ 552 - ret = drm_plane_check_pixel_format(plane, state->fb->format->format, 553 - state->fb->modifier); 553 + ret = drm_plane_check_pixel_format(plane, fb->format->format, 554 + fb->modifier); 554 555 if (ret) { 555 556 struct drm_format_name_buf format_name; 556 557 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n", 557 558 plane->base.id, plane->name, 558 - drm_get_format_name(state->fb->format->format, 559 + drm_get_format_name(fb->format->format, 559 560 &format_name), 560 - state->fb->modifier); 561 + fb->modifier); 561 562 return ret; 562 563 } 563 564 564 565 /* Give drivers some help against integer overflows */ 565 - if (state->crtc_w > INT_MAX || 566 - state->crtc_x > INT_MAX - (int32_t) state->crtc_w || 567 - state->crtc_h > INT_MAX || 568 - state->crtc_y > INT_MAX - (int32_t) state->crtc_h) { 566 + if (new_plane_state->crtc_w > INT_MAX || 567 + new_plane_state->crtc_x > INT_MAX - (int32_t) new_plane_state->crtc_w || 568 + new_plane_state->crtc_h > INT_MAX || 569 + new_plane_state->crtc_y > INT_MAX - (int32_t) new_plane_state->crtc_h) { 569 570 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n", 570 571 plane->base.id, plane->name, 571 - state->crtc_w, state->crtc_h, 572 - state->crtc_x, state->crtc_y); 572 + new_plane_state->crtc_w, new_plane_state->crtc_h, 573 + new_plane_state->crtc_x, new_plane_state->crtc_y); 573 574 return -ERANGE; 574 575 } 575 576 576 - fb_width = state->fb->width << 16; 577 - fb_height = state->fb->height << 16; 577 + fb_width = fb->width << 16; 578 + fb_height = fb->height << 16; 578 579 579 580 /* Make sure source coordinates are inside the fb. */ 580 - if (state->src_w > fb_width || 581 - state->src_x > fb_width - state->src_w || 582 - state->src_h > fb_height || 583 - state->src_y > fb_height - state->src_h) { 581 + if (new_plane_state->src_w > fb_width || 582 + new_plane_state->src_x > fb_width - new_plane_state->src_w || 583 + new_plane_state->src_h > fb_height || 584 + new_plane_state->src_y > fb_height - new_plane_state->src_h) { 584 585 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates " 585 586 "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", 586 587 plane->base.id, plane->name, 587 - state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10, 588 - state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10, 589 - state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10, 590 - state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10, 591 - state->fb->width, state->fb->height); 588 + new_plane_state->src_w >> 16, 589 + ((new_plane_state->src_w & 0xffff) * 15625) >> 10, 590 + new_plane_state->src_h >> 16, 591 + ((new_plane_state->src_h & 0xffff) * 15625) >> 10, 592 + new_plane_state->src_x >> 16, 593 + ((new_plane_state->src_x & 0xffff) * 15625) >> 10, 594 + new_plane_state->src_y >> 16, 595 + ((new_plane_state->src_y & 0xffff) * 15625) >> 10, 596 + fb->width, fb->height); 592 597 return -ENOSPC; 593 598 } 594 599 595 - if (plane_switching_crtc(state->state, plane, state)) { 600 + if (plane_switching_crtc(old_plane_state, new_plane_state)) { 596 601 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n", 597 602 plane->base.id, plane->name); 598 603 return -EINVAL; ··· 936 927 drm_atomic_add_affected_planes(struct drm_atomic_state *state, 937 928 struct drm_crtc *crtc) 938 929 { 930 + const struct drm_crtc_state *old_crtc_state = 931 + drm_atomic_get_old_crtc_state(state, crtc); 939 932 struct drm_plane *plane; 940 933 941 934 WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc)); ··· 945 934 DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n", 946 935 crtc->base.id, crtc->name, state); 947 936 948 - drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { 937 + drm_for_each_plane_mask(plane, state->dev, old_crtc_state->plane_mask) { 949 938 struct drm_plane_state *plane_state = 950 939 drm_atomic_get_plane_state(state, plane); 951 940 ··· 972 961 struct drm_device *dev = state->dev; 973 962 struct drm_mode_config *config = &dev->mode_config; 974 963 struct drm_plane *plane; 975 - struct drm_plane_state *plane_state; 964 + struct drm_plane_state *old_plane_state; 965 + struct drm_plane_state *new_plane_state; 976 966 struct drm_crtc *crtc; 977 - struct drm_crtc_state *crtc_state; 967 + struct drm_crtc_state *old_crtc_state; 968 + struct drm_crtc_state *new_crtc_state; 978 969 struct drm_connector *conn; 979 970 struct drm_connector_state *conn_state; 980 971 int i, ret = 0; 981 972 982 973 DRM_DEBUG_ATOMIC("checking %p\n", state); 983 974 984 - for_each_new_plane_in_state(state, plane, plane_state, i) { 985 - ret = drm_atomic_plane_check(plane, plane_state); 975 + for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 976 + ret = drm_atomic_plane_check(old_plane_state, new_plane_state); 986 977 if (ret) { 987 978 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n", 988 979 plane->base.id, plane->name); ··· 992 979 } 993 980 } 994 981 995 - for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 996 - ret = drm_atomic_crtc_check(crtc, crtc_state); 982 + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 983 + ret = drm_atomic_crtc_check(old_crtc_state, new_crtc_state); 997 984 if (ret) { 998 985 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n", 999 986 crtc->base.id, crtc->name); ··· 1021 1008 } 1022 1009 1023 1010 if (!state->allow_modeset) { 1024 - for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1025 - if (drm_atomic_crtc_needs_modeset(crtc_state)) { 1011 + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 1012 + if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { 1026 1013 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n", 1027 1014 crtc->base.id, crtc->name); 1028 1015 return -EINVAL;
+5 -7
drivers/gpu/drm/drm_client.c
··· 81 81 { 82 82 int ret; 83 83 84 - if (!drm_core_check_feature(dev, DRIVER_MODESET) || 85 - !dev->driver->dumb_create || !dev->driver->gem_prime_vmap) 84 + if (!drm_core_check_feature(dev, DRIVER_MODESET) || !dev->driver->dumb_create) 86 85 return -EOPNOTSUPP; 87 86 88 87 if (funcs && !try_module_get(funcs->owner)) ··· 228 229 { 229 230 struct drm_device *dev = buffer->client->dev; 230 231 231 - if (buffer->vaddr && dev->driver->gem_prime_vunmap) 232 - dev->driver->gem_prime_vunmap(buffer->gem, buffer->vaddr); 232 + drm_gem_vunmap(buffer->gem, buffer->vaddr); 233 233 234 234 if (buffer->gem) 235 235 drm_gem_object_put_unlocked(buffer->gem); ··· 281 283 * fd_install step out of the driver backend hooks, to make that 282 284 * final step optional for internal users. 283 285 */ 284 - vaddr = dev->driver->gem_prime_vmap(obj); 285 - if (!vaddr) { 286 - ret = -ENOMEM; 286 + vaddr = drm_gem_vmap(obj); 287 + if (IS_ERR(vaddr)) { 288 + ret = PTR_ERR(vaddr); 287 289 goto err_delete; 288 290 } 289 291
+1
drivers/gpu/drm/drm_fourcc.c
··· 224 224 { .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, 225 225 { .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, 226 226 { .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, 227 + { .format = DRM_FORMAT_XYUV8888, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, 227 228 { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true }, 228 229 { .format = DRM_FORMAT_Y0L0, .depth = 0, .num_planes = 1, 229 230 .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
+100 -9
drivers/gpu/drm/drm_gem.c
··· 257 257 struct drm_gem_object *obj = ptr; 258 258 struct drm_device *dev = obj->dev; 259 259 260 - if (dev->driver->gem_close_object) 260 + if (obj->funcs && obj->funcs->close) 261 + obj->funcs->close(obj, file_priv); 262 + else if (dev->driver->gem_close_object) 261 263 dev->driver->gem_close_object(obj, file_priv); 262 264 263 265 if (drm_core_check_feature(dev, DRIVER_PRIME)) ··· 412 410 if (ret) 413 411 goto err_remove; 414 412 415 - if (dev->driver->gem_open_object) { 413 + if (obj->funcs && obj->funcs->open) { 414 + ret = obj->funcs->open(obj, file_priv); 415 + if (ret) 416 + goto err_revoke; 417 + } else if (dev->driver->gem_open_object) { 416 418 ret = dev->driver->gem_open_object(obj, file_priv); 417 419 if (ret) 418 420 goto err_revoke; ··· 841 835 container_of(kref, struct drm_gem_object, refcount); 842 836 struct drm_device *dev = obj->dev; 843 837 844 - if (dev->driver->gem_free_object_unlocked) { 838 + if (obj->funcs) { 839 + obj->funcs->free(obj); 840 + } else if (dev->driver->gem_free_object_unlocked) { 845 841 dev->driver->gem_free_object_unlocked(obj); 846 842 } else if (dev->driver->gem_free_object) { 847 843 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); ··· 872 864 873 865 dev = obj->dev; 874 866 875 - if (dev->driver->gem_free_object_unlocked) { 876 - kref_put(&obj->refcount, drm_gem_object_free); 877 - } else { 867 + if (dev->driver->gem_free_object) { 878 868 might_lock(&dev->struct_mutex); 879 869 if (kref_put_mutex(&obj->refcount, drm_gem_object_free, 880 870 &dev->struct_mutex)) 881 871 mutex_unlock(&dev->struct_mutex); 872 + } else { 873 + kref_put(&obj->refcount, drm_gem_object_free); 882 874 } 883 875 } 884 876 EXPORT_SYMBOL(drm_gem_object_put_unlocked); ··· 968 960 if (obj_size < vma->vm_end - vma->vm_start) 969 961 return -EINVAL; 970 962 971 - if (!dev->driver->gem_vm_ops) 963 + if (obj->funcs && obj->funcs->vm_ops) 964 + vma->vm_ops = obj->funcs->vm_ops; 965 + else if (dev->driver->gem_vm_ops) 966 + vma->vm_ops = dev->driver->gem_vm_ops; 967 + else 972 968 return -EINVAL; 973 969 974 970 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 975 - vma->vm_ops = dev->driver->gem_vm_ops; 976 971 vma->vm_private_data = obj; 977 972 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 978 973 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); ··· 1077 1066 drm_printf_indent(p, indent, "imported=%s\n", 1078 1067 obj->import_attach ? "yes" : "no"); 1079 1068 1080 - if (obj->dev->driver->gem_print_info) 1069 + if (obj->funcs && obj->funcs->print_info) 1070 + obj->funcs->print_info(p, indent, obj); 1071 + else if (obj->dev->driver->gem_print_info) 1081 1072 obj->dev->driver->gem_print_info(p, indent, obj); 1082 1073 } 1074 + 1075 + /** 1076 + * drm_gem_pin - Pin backing buffer in memory 1077 + * @obj: GEM object 1078 + * 1079 + * Make sure the backing buffer is pinned in memory. 1080 + * 1081 + * Returns: 1082 + * 0 on success or a negative error code on failure. 1083 + */ 1084 + int drm_gem_pin(struct drm_gem_object *obj) 1085 + { 1086 + if (obj->funcs && obj->funcs->pin) 1087 + return obj->funcs->pin(obj); 1088 + else if (obj->dev->driver->gem_prime_pin) 1089 + return obj->dev->driver->gem_prime_pin(obj); 1090 + else 1091 + return 0; 1092 + } 1093 + EXPORT_SYMBOL(drm_gem_pin); 1094 + 1095 + /** 1096 + * drm_gem_unpin - Unpin backing buffer from memory 1097 + * @obj: GEM object 1098 + * 1099 + * Relax the requirement that the backing buffer is pinned in memory. 1100 + */ 1101 + void drm_gem_unpin(struct drm_gem_object *obj) 1102 + { 1103 + if (obj->funcs && obj->funcs->unpin) 1104 + obj->funcs->unpin(obj); 1105 + else if (obj->dev->driver->gem_prime_unpin) 1106 + obj->dev->driver->gem_prime_unpin(obj); 1107 + } 1108 + EXPORT_SYMBOL(drm_gem_unpin); 1109 + 1110 + /** 1111 + * drm_gem_vmap - Map buffer into kernel virtual address space 1112 + * @obj: GEM object 1113 + * 1114 + * Returns: 1115 + * A virtual pointer to a newly created GEM object or an ERR_PTR-encoded negative 1116 + * error code on failure. 1117 + */ 1118 + void *drm_gem_vmap(struct drm_gem_object *obj) 1119 + { 1120 + void *vaddr; 1121 + 1122 + if (obj->funcs && obj->funcs->vmap) 1123 + vaddr = obj->funcs->vmap(obj); 1124 + else if (obj->dev->driver->gem_prime_vmap) 1125 + vaddr = obj->dev->driver->gem_prime_vmap(obj); 1126 + else 1127 + vaddr = ERR_PTR(-EOPNOTSUPP); 1128 + 1129 + if (!vaddr) 1130 + vaddr = ERR_PTR(-ENOMEM); 1131 + 1132 + return vaddr; 1133 + } 1134 + EXPORT_SYMBOL(drm_gem_vmap); 1135 + 1136 + /** 1137 + * drm_gem_vunmap - Remove buffer mapping from kernel virtual address space 1138 + * @obj: GEM object 1139 + * @vaddr: Virtual address (can be NULL) 1140 + */ 1141 + void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr) 1142 + { 1143 + if (!vaddr) 1144 + return; 1145 + 1146 + if (obj->funcs && obj->funcs->vunmap) 1147 + obj->funcs->vunmap(obj, vaddr); 1148 + else if (obj->dev->driver->gem_prime_vunmap) 1149 + obj->dev->driver->gem_prime_vunmap(obj, vaddr); 1150 + } 1151 + EXPORT_SYMBOL(drm_gem_vunmap);
+86
drivers/gpu/drm/drm_gem_cma_helper.c
··· 176 176 * 177 177 * This function frees the backing memory of the CMA GEM object, cleans up the 178 178 * GEM object state and frees the memory used to store the object itself. 179 + * If the buffer is imported and the virtual address is set, it is released. 179 180 * Drivers using the CMA helpers should set this as their 180 181 * &drm_driver.gem_free_object_unlocked callback. 181 182 */ ··· 190 189 dma_free_wc(gem_obj->dev->dev, cma_obj->base.size, 191 190 cma_obj->vaddr, cma_obj->paddr); 192 191 } else if (gem_obj->import_attach) { 192 + if (cma_obj->vaddr) 193 + dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr); 193 194 drm_prime_gem_destroy(gem_obj, cma_obj->sgt); 194 195 } 195 196 ··· 578 575 /* Nothing to do */ 579 576 } 580 577 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap); 578 + 579 + static const struct drm_gem_object_funcs drm_cma_gem_default_funcs = { 580 + .free = drm_gem_cma_free_object, 581 + .print_info = drm_gem_cma_print_info, 582 + .get_sg_table = drm_gem_cma_prime_get_sg_table, 583 + .vmap = drm_gem_cma_prime_vmap, 584 + .vm_ops = &drm_gem_cma_vm_ops, 585 + }; 586 + 587 + /** 588 + * drm_cma_gem_create_object_default_funcs - Create a CMA GEM object with a 589 + * default function table 590 + * @dev: DRM device 591 + * @size: Size of the object to allocate 592 + * 593 + * This sets the GEM object functions to the default CMA helper functions. 594 + * This function can be used as the &drm_driver.gem_create_object callback. 595 + * 596 + * Returns: 597 + * A pointer to a allocated GEM object or an error pointer on failure. 598 + */ 599 + struct drm_gem_object * 600 + drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size) 601 + { 602 + struct drm_gem_cma_object *cma_obj; 603 + 604 + cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); 605 + if (!cma_obj) 606 + return NULL; 607 + 608 + cma_obj->base.funcs = &drm_cma_gem_default_funcs; 609 + 610 + return &cma_obj->base; 611 + } 612 + EXPORT_SYMBOL(drm_cma_gem_create_object_default_funcs); 613 + 614 + /** 615 + * drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's 616 + * scatter/gather table and get the virtual address of the buffer 617 + * @dev: DRM device 618 + * @attach: DMA-BUF attachment 619 + * @sgt: Scatter/gather table of pinned pages 620 + * 621 + * This function imports a scatter/gather table using 622 + * drm_gem_cma_prime_import_sg_table() and uses dma_buf_vmap() to get the kernel 623 + * virtual address. This ensures that a CMA GEM object always has its virtual 624 + * address set. This address is released when the object is freed. 625 + * 626 + * This function can be used as the &drm_driver.gem_prime_import_sg_table 627 + * callback. The DRM_GEM_CMA_VMAP_DRIVER_OPS() macro provides a shortcut to set 628 + * the necessary DRM driver operations. 629 + * 630 + * Returns: 631 + * A pointer to a newly created GEM object or an ERR_PTR-encoded negative 632 + * error code on failure. 633 + */ 634 + struct drm_gem_object * 635 + drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev, 636 + struct dma_buf_attachment *attach, 637 + struct sg_table *sgt) 638 + { 639 + struct drm_gem_cma_object *cma_obj; 640 + struct drm_gem_object *obj; 641 + void *vaddr; 642 + 643 + vaddr = dma_buf_vmap(attach->dmabuf); 644 + if (!vaddr) { 645 + DRM_ERROR("Failed to vmap PRIME buffer\n"); 646 + return ERR_PTR(-ENOMEM); 647 + } 648 + 649 + obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt); 650 + if (IS_ERR(obj)) { 651 + dma_buf_vunmap(attach->dmabuf, vaddr); 652 + return obj; 653 + } 654 + 655 + cma_obj = to_drm_gem_cma_obj(obj); 656 + cma_obj->vaddr = vaddr; 657 + 658 + return obj; 659 + } 660 + EXPORT_SYMBOL(drm_gem_cma_prime_import_sg_table_vmap);
+1 -1
drivers/gpu/drm/drm_panel_orientation_quirks.c
··· 63 63 .width = 720, 64 64 .height = 1280, 65 65 .bios_dates = (const char * const []){ 66 - "12/07/2017", "05/24/2018", NULL }, 66 + "12/07/2017", "05/24/2018", "06/29/2018", NULL }, 67 67 .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, 68 68 }; 69 69
+60 -19
drivers/gpu/drm/drm_prime.c
··· 199 199 { 200 200 struct drm_prime_attachment *prime_attach; 201 201 struct drm_gem_object *obj = dma_buf->priv; 202 - struct drm_device *dev = obj->dev; 203 202 204 203 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL); 205 204 if (!prime_attach) ··· 207 208 prime_attach->dir = DMA_NONE; 208 209 attach->priv = prime_attach; 209 210 210 - if (!dev->driver->gem_prime_pin) 211 - return 0; 212 - 213 - return dev->driver->gem_prime_pin(obj); 211 + return drm_gem_pin(obj); 214 212 } 215 213 EXPORT_SYMBOL(drm_gem_map_attach); 216 214 ··· 224 228 { 225 229 struct drm_prime_attachment *prime_attach = attach->priv; 226 230 struct drm_gem_object *obj = dma_buf->priv; 227 - struct drm_device *dev = obj->dev; 228 231 229 232 if (prime_attach) { 230 233 struct sg_table *sgt = prime_attach->sgt; ··· 242 247 attach->priv = NULL; 243 248 } 244 249 245 - if (dev->driver->gem_prime_unpin) 246 - dev->driver->gem_prime_unpin(obj); 250 + drm_gem_unpin(obj); 247 251 } 248 252 EXPORT_SYMBOL(drm_gem_map_detach); 249 253 ··· 304 310 if (WARN_ON(prime_attach->dir != DMA_NONE)) 305 311 return ERR_PTR(-EBUSY); 306 312 307 - sgt = obj->dev->driver->gem_prime_get_sg_table(obj); 313 + if (obj->funcs) 314 + sgt = obj->funcs->get_sg_table(obj); 315 + else 316 + sgt = obj->dev->driver->gem_prime_get_sg_table(obj); 308 317 309 318 if (!IS_ERR(sgt)) { 310 319 if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, ··· 403 406 void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) 404 407 { 405 408 struct drm_gem_object *obj = dma_buf->priv; 406 - struct drm_device *dev = obj->dev; 409 + void *vaddr; 407 410 408 - if (dev->driver->gem_prime_vmap) 409 - return dev->driver->gem_prime_vmap(obj); 410 - else 411 - return NULL; 411 + vaddr = drm_gem_vmap(obj); 412 + if (IS_ERR(vaddr)) 413 + vaddr = NULL; 414 + 415 + return vaddr; 412 416 } 413 417 EXPORT_SYMBOL(drm_gem_dmabuf_vmap); 414 418 ··· 424 426 void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 425 427 { 426 428 struct drm_gem_object *obj = dma_buf->priv; 427 - struct drm_device *dev = obj->dev; 428 429 429 - if (dev->driver->gem_prime_vunmap) 430 - dev->driver->gem_prime_vunmap(obj, vaddr); 430 + drm_gem_vunmap(obj, vaddr); 431 431 } 432 432 EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); 433 433 ··· 525 529 return dmabuf; 526 530 } 527 531 528 - dmabuf = dev->driver->gem_prime_export(dev, obj, flags); 532 + if (obj->funcs && obj->funcs->export) 533 + dmabuf = obj->funcs->export(obj, flags); 534 + else if (dev->driver->gem_prime_export) 535 + dmabuf = dev->driver->gem_prime_export(dev, obj, flags); 536 + else 537 + dmabuf = drm_gem_prime_export(dev, obj, flags); 529 538 if (IS_ERR(dmabuf)) { 530 539 /* normally the created dma-buf takes ownership of the ref, 531 540 * but if that fails then drop the ref ··· 650 649 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); 651 650 652 651 /** 652 + * drm_gem_prime_mmap - PRIME mmap function for GEM drivers 653 + * @obj: GEM object 654 + * @vma: Virtual address range 655 + * 656 + * This function sets up a userspace mapping for PRIME exported buffers using 657 + * the same codepath that is used for regular GEM buffer mapping on the DRM fd. 658 + * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is 659 + * called to set up the mapping. 660 + * 661 + * Drivers can use this as their &drm_driver.gem_prime_mmap callback. 662 + */ 663 + int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 664 + { 665 + /* Used by drm_gem_mmap() to lookup the GEM object */ 666 + struct drm_file priv = { 667 + .minor = obj->dev->primary, 668 + }; 669 + struct file fil = { 670 + .private_data = &priv, 671 + }; 672 + int ret; 673 + 674 + ret = drm_vma_node_allow(&obj->vma_node, &priv); 675 + if (ret) 676 + return ret; 677 + 678 + vma->vm_pgoff += drm_vma_node_start(&obj->vma_node); 679 + 680 + ret = obj->dev->driver->fops->mmap(&fil, vma); 681 + 682 + drm_vma_node_revoke(&obj->vma_node, &priv); 683 + 684 + return ret; 685 + } 686 + EXPORT_SYMBOL(drm_gem_prime_mmap); 687 + 688 + /** 653 689 * drm_gem_prime_import_dev - core implementation of the import callback 654 690 * @dev: drm_device to import into 655 691 * @dma_buf: dma-buf object to import ··· 800 762 801 763 /* never seen this one, need to import */ 802 764 mutex_lock(&dev->object_name_lock); 803 - obj = dev->driver->gem_prime_import(dev, dma_buf); 765 + if (dev->driver->gem_prime_import) 766 + obj = dev->driver->gem_prime_import(dev, dma_buf); 767 + else 768 + obj = drm_gem_prime_import(dev, dma_buf); 804 769 if (IS_ERR(obj)) { 805 770 ret = PTR_ERR(obj); 806 771 goto out_unlock;
+72 -289
drivers/gpu/drm/drm_syncobj.c
··· 56 56 #include "drm_internal.h" 57 57 #include <drm/drm_syncobj.h> 58 58 59 - /* merge normal syncobj to timeline syncobj, the point interval is 1 */ 60 - #define DRM_SYNCOBJ_BINARY_POINT 1 61 - 62 59 struct drm_syncobj_stub_fence { 63 60 struct dma_fence base; 64 61 spinlock_t lock; ··· 71 74 .get_timeline_name = drm_syncobj_stub_fence_get_name, 72 75 }; 73 76 74 - struct drm_syncobj_signal_pt { 75 - struct dma_fence_array *fence_array; 76 - u64 value; 77 - struct list_head list; 78 - }; 79 77 80 - static DEFINE_SPINLOCK(signaled_fence_lock); 81 - static struct dma_fence signaled_fence; 82 - 83 - static struct dma_fence *drm_syncobj_get_stub_fence(void) 84 - { 85 - spin_lock(&signaled_fence_lock); 86 - if (!signaled_fence.ops) { 87 - dma_fence_init(&signaled_fence, 88 - &drm_syncobj_stub_fence_ops, 89 - &signaled_fence_lock, 90 - 0, 0); 91 - dma_fence_signal_locked(&signaled_fence); 92 - } 93 - spin_unlock(&signaled_fence_lock); 94 - 95 - return dma_fence_get(&signaled_fence); 96 - } 97 78 /** 98 79 * drm_syncobj_find - lookup and reference a sync object. 99 80 * @file_private: drm file private pointer ··· 98 123 } 99 124 EXPORT_SYMBOL(drm_syncobj_find); 100 125 101 - static struct dma_fence * 102 - drm_syncobj_find_signal_pt_for_point(struct drm_syncobj *syncobj, 103 - uint64_t point) 104 - { 105 - struct drm_syncobj_signal_pt *signal_pt; 106 - 107 - if ((syncobj->type == DRM_SYNCOBJ_TYPE_TIMELINE) && 108 - (point <= syncobj->timeline)) 109 - return drm_syncobj_get_stub_fence(); 110 - 111 - list_for_each_entry(signal_pt, &syncobj->signal_pt_list, list) { 112 - if (point > signal_pt->value) 113 - continue; 114 - if ((syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) && 115 - (point != signal_pt->value)) 116 - continue; 117 - return dma_fence_get(&signal_pt->fence_array->base); 118 - } 119 - return NULL; 120 - } 121 - 122 126 static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj, 123 127 struct drm_syncobj_cb *cb, 124 128 drm_syncobj_func_t func) ··· 106 152 list_add_tail(&cb->node, &syncobj->cb_list); 107 153 } 108 154 109 - static void drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj, 110 - struct dma_fence **fence, 111 - struct drm_syncobj_cb *cb, 112 - drm_syncobj_func_t func) 155 + static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj, 156 + struct dma_fence **fence, 157 + struct drm_syncobj_cb *cb, 158 + drm_syncobj_func_t func) 113 159 { 114 - u64 pt_value = 0; 160 + int ret; 115 161 116 - WARN_ON(*fence); 162 + *fence = drm_syncobj_fence_get(syncobj); 163 + if (*fence) 164 + return 1; 117 165 118 - if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) { 119 - /*BINARY syncobj always wait on last pt */ 120 - pt_value = syncobj->signal_point; 121 - 122 - if (pt_value == 0) 123 - pt_value += DRM_SYNCOBJ_BINARY_POINT; 124 - } 125 - 126 - mutex_lock(&syncobj->cb_mutex); 127 - spin_lock(&syncobj->pt_lock); 128 - *fence = drm_syncobj_find_signal_pt_for_point(syncobj, pt_value); 129 - spin_unlock(&syncobj->pt_lock); 130 - if (!*fence) 166 + spin_lock(&syncobj->lock); 167 + /* We've already tried once to get a fence and failed. Now that we 168 + * have the lock, try one more time just to be sure we don't add a 169 + * callback when a fence has already been set. 170 + */ 171 + if (syncobj->fence) { 172 + *fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 173 + lockdep_is_held(&syncobj->lock))); 174 + ret = 1; 175 + } else { 176 + *fence = NULL; 131 177 drm_syncobj_add_callback_locked(syncobj, cb, func); 132 - mutex_unlock(&syncobj->cb_mutex); 133 - } 134 - 135 - static void drm_syncobj_remove_callback(struct drm_syncobj *syncobj, 136 - struct drm_syncobj_cb *cb) 137 - { 138 - mutex_lock(&syncobj->cb_mutex); 139 - list_del_init(&cb->node); 140 - mutex_unlock(&syncobj->cb_mutex); 141 - } 142 - 143 - static void drm_syncobj_init(struct drm_syncobj *syncobj) 144 - { 145 - spin_lock(&syncobj->pt_lock); 146 - syncobj->timeline_context = dma_fence_context_alloc(1); 147 - syncobj->timeline = 0; 148 - syncobj->signal_point = 0; 149 - init_waitqueue_head(&syncobj->wq); 150 - 151 - INIT_LIST_HEAD(&syncobj->signal_pt_list); 152 - spin_unlock(&syncobj->pt_lock); 153 - } 154 - 155 - static void drm_syncobj_fini(struct drm_syncobj *syncobj) 156 - { 157 - struct drm_syncobj_signal_pt *signal_pt = NULL, *tmp; 158 - 159 - spin_lock(&syncobj->pt_lock); 160 - list_for_each_entry_safe(signal_pt, tmp, 161 - &syncobj->signal_pt_list, list) { 162 - list_del(&signal_pt->list); 163 - dma_fence_put(&signal_pt->fence_array->base); 164 - kfree(signal_pt); 178 + ret = 0; 165 179 } 166 - spin_unlock(&syncobj->pt_lock); 167 - } 180 + spin_unlock(&syncobj->lock); 168 181 169 - static int drm_syncobj_create_signal_pt(struct drm_syncobj *syncobj, 170 - struct dma_fence *fence, 171 - u64 point) 172 - { 173 - struct drm_syncobj_signal_pt *signal_pt = 174 - kzalloc(sizeof(struct drm_syncobj_signal_pt), GFP_KERNEL); 175 - struct drm_syncobj_signal_pt *tail_pt; 176 - struct dma_fence **fences; 177 - int num_fences = 0; 178 - int ret = 0, i; 179 - 180 - if (!signal_pt) 181 - return -ENOMEM; 182 - if (!fence) 183 - goto out; 184 - 185 - fences = kmalloc_array(sizeof(void *), 2, GFP_KERNEL); 186 - if (!fences) { 187 - ret = -ENOMEM; 188 - goto out; 189 - } 190 - fences[num_fences++] = dma_fence_get(fence); 191 - /* timeline syncobj must take this dependency */ 192 - if (syncobj->type == DRM_SYNCOBJ_TYPE_TIMELINE) { 193 - spin_lock(&syncobj->pt_lock); 194 - if (!list_empty(&syncobj->signal_pt_list)) { 195 - tail_pt = list_last_entry(&syncobj->signal_pt_list, 196 - struct drm_syncobj_signal_pt, list); 197 - fences[num_fences++] = 198 - dma_fence_get(&tail_pt->fence_array->base); 199 - } 200 - spin_unlock(&syncobj->pt_lock); 201 - } 202 - signal_pt->fence_array = dma_fence_array_create(num_fences, fences, 203 - syncobj->timeline_context, 204 - point, false); 205 - if (!signal_pt->fence_array) { 206 - ret = -ENOMEM; 207 - goto fail; 208 - } 209 - 210 - spin_lock(&syncobj->pt_lock); 211 - if (syncobj->signal_point >= point) { 212 - DRM_WARN("A later signal is ready!"); 213 - spin_unlock(&syncobj->pt_lock); 214 - goto exist; 215 - } 216 - signal_pt->value = point; 217 - list_add_tail(&signal_pt->list, &syncobj->signal_pt_list); 218 - syncobj->signal_point = point; 219 - spin_unlock(&syncobj->pt_lock); 220 - wake_up_all(&syncobj->wq); 221 - 222 - return 0; 223 - exist: 224 - dma_fence_put(&signal_pt->fence_array->base); 225 - fail: 226 - for (i = 0; i < num_fences; i++) 227 - dma_fence_put(fences[i]); 228 - kfree(fences); 229 - out: 230 - kfree(signal_pt); 231 182 return ret; 232 183 } 233 184 234 - static void drm_syncobj_garbage_collection(struct drm_syncobj *syncobj) 185 + void drm_syncobj_add_callback(struct drm_syncobj *syncobj, 186 + struct drm_syncobj_cb *cb, 187 + drm_syncobj_func_t func) 235 188 { 236 - struct drm_syncobj_signal_pt *signal_pt, *tmp, *tail_pt; 237 - 238 - spin_lock(&syncobj->pt_lock); 239 - tail_pt = list_last_entry(&syncobj->signal_pt_list, 240 - struct drm_syncobj_signal_pt, 241 - list); 242 - list_for_each_entry_safe(signal_pt, tmp, 243 - &syncobj->signal_pt_list, list) { 244 - if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY && 245 - signal_pt == tail_pt) 246 - continue; 247 - if (dma_fence_is_signaled(&signal_pt->fence_array->base)) { 248 - syncobj->timeline = signal_pt->value; 249 - list_del(&signal_pt->list); 250 - dma_fence_put(&signal_pt->fence_array->base); 251 - kfree(signal_pt); 252 - } else { 253 - /*signal_pt is in order in list, from small to big, so 254 - * the later must not be signal either */ 255 - break; 256 - } 257 - } 258 - 259 - spin_unlock(&syncobj->pt_lock); 189 + spin_lock(&syncobj->lock); 190 + drm_syncobj_add_callback_locked(syncobj, cb, func); 191 + spin_unlock(&syncobj->lock); 260 192 } 193 + 194 + void drm_syncobj_remove_callback(struct drm_syncobj *syncobj, 195 + struct drm_syncobj_cb *cb) 196 + { 197 + spin_lock(&syncobj->lock); 198 + list_del_init(&cb->node); 199 + spin_unlock(&syncobj->lock); 200 + } 201 + 261 202 /** 262 203 * drm_syncobj_replace_fence - replace fence in a sync object. 263 204 * @syncobj: Sync object to replace fence in ··· 165 316 u64 point, 166 317 struct dma_fence *fence) 167 318 { 168 - u64 pt_value = point; 319 + struct dma_fence *old_fence; 320 + struct drm_syncobj_cb *cur, *tmp; 169 321 170 - drm_syncobj_garbage_collection(syncobj); 171 - if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) { 172 - if (!fence) { 173 - drm_syncobj_fini(syncobj); 174 - drm_syncobj_init(syncobj); 175 - return; 176 - } 177 - pt_value = syncobj->signal_point + 178 - DRM_SYNCOBJ_BINARY_POINT; 179 - } 180 - drm_syncobj_create_signal_pt(syncobj, fence, pt_value); 181 - if (fence) { 182 - struct drm_syncobj_cb *cur, *tmp; 183 - LIST_HEAD(cb_list); 322 + if (fence) 323 + dma_fence_get(fence); 184 324 185 - mutex_lock(&syncobj->cb_mutex); 325 + spin_lock(&syncobj->lock); 326 + 327 + old_fence = rcu_dereference_protected(syncobj->fence, 328 + lockdep_is_held(&syncobj->lock)); 329 + rcu_assign_pointer(syncobj->fence, fence); 330 + 331 + if (fence != old_fence) { 186 332 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) { 187 333 list_del_init(&cur->node); 188 334 cur->func(syncobj, cur); 189 335 } 190 - mutex_unlock(&syncobj->cb_mutex); 191 336 } 337 + 338 + spin_unlock(&syncobj->lock); 339 + 340 + dma_fence_put(old_fence); 192 341 } 193 342 EXPORT_SYMBOL(drm_syncobj_replace_fence); 194 343 ··· 209 362 return 0; 210 363 } 211 364 212 - static int 213 - drm_syncobj_point_get(struct drm_syncobj *syncobj, u64 point, u64 flags, 214 - struct dma_fence **fence) 215 - { 216 - int ret = 0; 217 - 218 - if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 219 - ret = wait_event_interruptible(syncobj->wq, 220 - point <= syncobj->signal_point); 221 - if (ret < 0) 222 - return ret; 223 - } 224 - spin_lock(&syncobj->pt_lock); 225 - *fence = drm_syncobj_find_signal_pt_for_point(syncobj, point); 226 - if (!*fence) 227 - ret = -EINVAL; 228 - spin_unlock(&syncobj->pt_lock); 229 - return ret; 230 - } 231 - 232 - /** 233 - * drm_syncobj_search_fence - lookup and reference the fence in a sync object or 234 - * in a timeline point 235 - * @syncobj: sync object pointer 236 - * @point: timeline point 237 - * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not 238 - * @fence: out parameter for the fence 239 - * 240 - * if flags is DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT, the function will block 241 - * here until specific timeline points is reached. 242 - * if not, you need a submit thread and block in userspace until all future 243 - * timeline points have materialized, only then you can submit to the kernel, 244 - * otherwise, function will fail to return fence. 245 - * 246 - * Returns 0 on success or a negative error value on failure. On success @fence 247 - * contains a reference to the fence, which must be released by calling 248 - * dma_fence_put(). 249 - */ 250 - int drm_syncobj_search_fence(struct drm_syncobj *syncobj, u64 point, 251 - u64 flags, struct dma_fence **fence) 252 - { 253 - u64 pt_value = point; 254 - 255 - if (!syncobj) 256 - return -ENOENT; 257 - 258 - drm_syncobj_garbage_collection(syncobj); 259 - if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) { 260 - /*BINARY syncobj always wait on last pt */ 261 - pt_value = syncobj->signal_point; 262 - 263 - if (pt_value == 0) 264 - pt_value += DRM_SYNCOBJ_BINARY_POINT; 265 - } 266 - return drm_syncobj_point_get(syncobj, pt_value, flags, fence); 267 - } 268 - EXPORT_SYMBOL(drm_syncobj_search_fence); 269 - 270 365 /** 271 366 * drm_syncobj_find_fence - lookup and reference the fence in a sync object 272 367 * @file_private: drm file private pointer ··· 218 429 * @fence: out parameter for the fence 219 430 * 220 431 * This is just a convenience function that combines drm_syncobj_find() and 221 - * drm_syncobj_lookup_fence(). 432 + * drm_syncobj_fence_get(). 222 433 * 223 434 * Returns 0 on success or a negative error value on failure. On success @fence 224 435 * contains a reference to the fence, which must be released by calling ··· 229 440 struct dma_fence **fence) 230 441 { 231 442 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); 232 - int ret; 443 + int ret = 0; 233 444 234 - ret = drm_syncobj_search_fence(syncobj, point, flags, fence); 235 - if (syncobj) 236 - drm_syncobj_put(syncobj); 445 + if (!syncobj) 446 + return -ENOENT; 447 + 448 + *fence = drm_syncobj_fence_get(syncobj); 449 + if (!*fence) { 450 + ret = -EINVAL; 451 + } 452 + drm_syncobj_put(syncobj); 237 453 return ret; 238 454 } 239 455 EXPORT_SYMBOL(drm_syncobj_find_fence); ··· 254 460 struct drm_syncobj *syncobj = container_of(kref, 255 461 struct drm_syncobj, 256 462 refcount); 257 - drm_syncobj_fini(syncobj); 463 + drm_syncobj_replace_fence(syncobj, 0, NULL); 258 464 kfree(syncobj); 259 465 } 260 466 EXPORT_SYMBOL(drm_syncobj_free); ··· 283 489 284 490 kref_init(&syncobj->refcount); 285 491 INIT_LIST_HEAD(&syncobj->cb_list); 286 - spin_lock_init(&syncobj->pt_lock); 287 - mutex_init(&syncobj->cb_mutex); 288 - if (flags & DRM_SYNCOBJ_CREATE_TYPE_TIMELINE) 289 - syncobj->type = DRM_SYNCOBJ_TYPE_TIMELINE; 290 - else 291 - syncobj->type = DRM_SYNCOBJ_TYPE_BINARY; 292 - drm_syncobj_init(syncobj); 492 + spin_lock_init(&syncobj->lock); 293 493 294 494 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) { 295 495 ret = drm_syncobj_assign_null_handle(syncobj); ··· 566 778 return -EOPNOTSUPP; 567 779 568 780 /* no valid flags yet */ 569 - if (args->flags & ~(DRM_SYNCOBJ_CREATE_SIGNALED | 570 - DRM_SYNCOBJ_CREATE_TYPE_TIMELINE)) 781 + if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED) 571 782 return -EINVAL; 572 783 573 784 return drm_syncobj_create_as_handle(file_private, ··· 659 872 struct syncobj_wait_entry *wait = 660 873 container_of(cb, struct syncobj_wait_entry, syncobj_cb); 661 874 662 - drm_syncobj_search_fence(syncobj, 0, 0, &wait->fence); 663 - 875 + /* This happens inside the syncobj lock */ 876 + wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 877 + lockdep_is_held(&syncobj->lock))); 664 878 wake_up_process(wait->task); 665 879 } 666 880 ··· 687 899 signaled_count = 0; 688 900 for (i = 0; i < count; ++i) { 689 901 entries[i].task = current; 690 - drm_syncobj_search_fence(syncobjs[i], 0, 0, 691 - &entries[i].fence); 902 + entries[i].fence = drm_syncobj_fence_get(syncobjs[i]); 692 903 if (!entries[i].fence) { 693 904 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 694 905 continue; ··· 718 931 719 932 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 720 933 for (i = 0; i < count; ++i) { 721 - if (entries[i].fence) 722 - continue; 723 - 724 934 drm_syncobj_fence_get_or_add_callback(syncobjs[i], 725 935 &entries[i].fence, 726 936 &entries[i].syncobj_cb, ··· 949 1165 if (ret < 0) 950 1166 return ret; 951 1167 952 - for (i = 0; i < args->count_handles; i++) { 953 - drm_syncobj_fini(syncobjs[i]); 954 - drm_syncobj_init(syncobjs[i]); 955 - } 1168 + for (i = 0; i < args->count_handles; i++) 1169 + drm_syncobj_replace_fence(syncobjs[i], 0, NULL); 1170 + 956 1171 drm_syncobj_array_free(syncobjs, args->count_handles); 957 1172 958 - return ret; 1173 + return 0; 959 1174 } 960 1175 961 1176 int
+1 -1
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 2157 2157 if (!(flags & I915_EXEC_FENCE_WAIT)) 2158 2158 continue; 2159 2159 2160 - drm_syncobj_search_fence(syncobj, 0, 0, &fence); 2160 + fence = drm_syncobj_fence_get(syncobj); 2161 2161 if (!fence) 2162 2162 return -EINVAL; 2163 2163
+1
drivers/gpu/drm/meson/Kconfig
··· 7 7 select DRM_GEM_CMA_HELPER 8 8 select VIDEOMODE_HELPERS 9 9 select REGMAP_MMIO 10 + select MESON_CANVAS 10 11 11 12 config DRM_MESON_DW_HDMI 12 13 tristate "HDMI Synopsys Controller support for Amlogic Meson Display"
+1 -1
drivers/gpu/drm/meson/Makefile
··· 1 1 meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o 2 - meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o 2 + meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o meson_overlay.o 3 3 4 4 obj-$(CONFIG_DRM_MESON) += meson-drm.o 5 5 obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o
+5 -2
drivers/gpu/drm/meson/meson_canvas.c
··· 39 39 #define CANVAS_WIDTH_HBIT 0 40 40 #define CANVAS_HEIGHT_BIT 9 41 41 #define CANVAS_BLKMODE_BIT 24 42 + #define CANVAS_ENDIAN_BIT 26 42 43 #define DMC_CAV_LUT_ADDR 0x50 /* 0x14 offset in data sheet */ 43 44 #define CANVAS_LUT_WR_EN (0x2 << 8) 44 45 #define CANVAS_LUT_RD_EN (0x1 << 8) ··· 48 47 uint32_t canvas_index, uint32_t addr, 49 48 uint32_t stride, uint32_t height, 50 49 unsigned int wrap, 51 - unsigned int blkmode) 50 + unsigned int blkmode, 51 + unsigned int endian) 52 52 { 53 53 unsigned int val; 54 54 ··· 62 60 CANVAS_WIDTH_HBIT) | 63 61 (height << CANVAS_HEIGHT_BIT) | 64 62 (wrap << 22) | 65 - (blkmode << CANVAS_BLKMODE_BIT)); 63 + (blkmode << CANVAS_BLKMODE_BIT) | 64 + (endian << CANVAS_ENDIAN_BIT)); 66 65 67 66 regmap_write(priv->dmc, DMC_CAV_LUT_ADDR, 68 67 CANVAS_LUT_WR_EN | canvas_index);
+10 -1
drivers/gpu/drm/meson/meson_canvas.h
··· 23 23 #define __MESON_CANVAS_H 24 24 25 25 #define MESON_CANVAS_ID_OSD1 0x4e 26 + #define MESON_CANVAS_ID_VD1_0 0x60 27 + #define MESON_CANVAS_ID_VD1_1 0x61 28 + #define MESON_CANVAS_ID_VD1_2 0x62 26 29 27 30 /* Canvas configuration. */ 28 31 #define MESON_CANVAS_WRAP_NONE 0x00 ··· 36 33 #define MESON_CANVAS_BLKMODE_32x32 0x01 37 34 #define MESON_CANVAS_BLKMODE_64x64 0x02 38 35 36 + #define MESON_CANVAS_ENDIAN_SWAP16 0x1 37 + #define MESON_CANVAS_ENDIAN_SWAP32 0x3 38 + #define MESON_CANVAS_ENDIAN_SWAP64 0x7 39 + #define MESON_CANVAS_ENDIAN_SWAP128 0xf 40 + 39 41 void meson_canvas_setup(struct meson_drm *priv, 40 42 uint32_t canvas_index, uint32_t addr, 41 43 uint32_t stride, uint32_t height, 42 44 unsigned int wrap, 43 - unsigned int blkmode); 45 + unsigned int blkmode, 46 + unsigned int endian); 44 47 45 48 #endif /* __MESON_CANVAS_H */
+243 -20
drivers/gpu/drm/meson/meson_crtc.c
··· 25 25 #include <linux/module.h> 26 26 #include <linux/mutex.h> 27 27 #include <linux/platform_device.h> 28 + #include <linux/bitfield.h> 28 29 #include <drm/drmP.h> 29 30 #include <drm/drm_atomic.h> 30 31 #include <drm/drm_atomic_helper.h> ··· 99 98 writel(crtc_state->mode.hdisplay, 100 99 priv->io_base + _REG(VPP_POSTBLEND_H_SIZE)); 101 100 101 + /* VD1 Preblend vertical start/end */ 102 + writel(FIELD_PREP(GENMASK(11, 0), 2303), 103 + priv->io_base + _REG(VPP_PREBLEND_VD1_V_START_END)); 104 + 102 105 writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE, 103 106 priv->io_base + _REG(VPP_MISC)); 104 107 ··· 115 110 struct meson_crtc *meson_crtc = to_meson_crtc(crtc); 116 111 struct meson_drm *priv = meson_crtc->priv; 117 112 113 + DRM_DEBUG_DRIVER("\n"); 114 + 118 115 priv->viu.osd1_enabled = false; 119 116 priv->viu.osd1_commit = false; 120 117 118 + priv->viu.vd1_enabled = false; 119 + priv->viu.vd1_commit = false; 120 + 121 121 /* Disable VPP Postblend */ 122 - writel_bits_relaxed(VPP_POSTBLEND_ENABLE, 0, 122 + writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_VD1_POSTBLEND | 123 + VPP_VD1_PREBLEND | VPP_POSTBLEND_ENABLE, 0, 123 124 priv->io_base + _REG(VPP_MISC)); 124 125 125 126 if (crtc->state->event && !crtc->state->active) { ··· 160 149 struct meson_drm *priv = meson_crtc->priv; 161 150 162 151 priv->viu.osd1_commit = true; 152 + priv->viu.vd1_commit = true; 163 153 } 164 154 165 155 static const struct drm_crtc_helper_funcs meson_crtc_helper_funcs = { ··· 189 177 priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W3)); 190 178 writel_relaxed(priv->viu.osd1_blk0_cfg[4], 191 179 priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W4)); 180 + writel_relaxed(priv->viu.osd_sc_ctrl0, 181 + priv->io_base + _REG(VPP_OSD_SC_CTRL0)); 182 + writel_relaxed(priv->viu.osd_sc_i_wh_m1, 183 + priv->io_base + _REG(VPP_OSD_SCI_WH_M1)); 184 + writel_relaxed(priv->viu.osd_sc_o_h_start_end, 185 + priv->io_base + _REG(VPP_OSD_SCO_H_START_END)); 186 + writel_relaxed(priv->viu.osd_sc_o_v_start_end, 187 + priv->io_base + _REG(VPP_OSD_SCO_V_START_END)); 188 + writel_relaxed(priv->viu.osd_sc_v_ini_phase, 189 + priv->io_base + _REG(VPP_OSD_VSC_INI_PHASE)); 190 + writel_relaxed(priv->viu.osd_sc_v_phase_step, 191 + priv->io_base + _REG(VPP_OSD_VSC_PHASE_STEP)); 192 + writel_relaxed(priv->viu.osd_sc_h_ini_phase, 193 + priv->io_base + _REG(VPP_OSD_HSC_INI_PHASE)); 194 + writel_relaxed(priv->viu.osd_sc_h_phase_step, 195 + priv->io_base + _REG(VPP_OSD_HSC_PHASE_STEP)); 196 + writel_relaxed(priv->viu.osd_sc_h_ctrl0, 197 + priv->io_base + _REG(VPP_OSD_HSC_CTRL0)); 198 + writel_relaxed(priv->viu.osd_sc_v_ctrl0, 199 + priv->io_base + _REG(VPP_OSD_VSC_CTRL0)); 192 200 193 - /* If output is interlace, make use of the Scaler */ 194 - if (priv->viu.osd1_interlace) { 195 - struct drm_plane *plane = priv->primary_plane; 196 - struct drm_plane_state *state = plane->state; 197 - struct drm_rect dest = { 198 - .x1 = state->crtc_x, 199 - .y1 = state->crtc_y, 200 - .x2 = state->crtc_x + state->crtc_w, 201 - .y2 = state->crtc_y + state->crtc_h, 202 - }; 203 - 204 - meson_vpp_setup_interlace_vscaler_osd1(priv, &dest); 205 - } else 206 - meson_vpp_disable_interlace_vscaler_osd1(priv); 207 - 208 - meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1, 209 - priv->viu.osd1_addr, priv->viu.osd1_stride, 210 - priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE, 211 - MESON_CANVAS_BLKMODE_LINEAR); 201 + if (priv->canvas) 202 + meson_canvas_config(priv->canvas, priv->canvas_id_osd1, 203 + priv->viu.osd1_addr, priv->viu.osd1_stride, 204 + priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE, 205 + MESON_CANVAS_BLKMODE_LINEAR, 0); 206 + else 207 + meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1, 208 + priv->viu.osd1_addr, priv->viu.osd1_stride, 209 + priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE, 210 + MESON_CANVAS_BLKMODE_LINEAR, 0); 212 211 213 212 /* Enable OSD1 */ 214 213 writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND, 215 214 priv->io_base + _REG(VPP_MISC)); 216 215 217 216 priv->viu.osd1_commit = false; 217 + } 218 + 219 + /* Update the VD1 registers */ 220 + if (priv->viu.vd1_enabled && priv->viu.vd1_commit) { 221 + 222 + switch (priv->viu.vd1_planes) { 223 + case 3: 224 + if (priv->canvas) 225 + meson_canvas_config(priv->canvas, 226 + priv->canvas_id_vd1_2, 227 + priv->viu.vd1_addr2, 228 + priv->viu.vd1_stride2, 229 + priv->viu.vd1_height2, 230 + MESON_CANVAS_WRAP_NONE, 231 + MESON_CANVAS_BLKMODE_LINEAR, 232 + MESON_CANVAS_ENDIAN_SWAP64); 233 + else 234 + meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_2, 235 + priv->viu.vd1_addr2, 236 + priv->viu.vd1_stride2, 237 + priv->viu.vd1_height2, 238 + MESON_CANVAS_WRAP_NONE, 239 + MESON_CANVAS_BLKMODE_LINEAR, 240 + MESON_CANVAS_ENDIAN_SWAP64); 241 + /* fallthrough */ 242 + case 2: 243 + if (priv->canvas) 244 + meson_canvas_config(priv->canvas, 245 + priv->canvas_id_vd1_1, 246 + priv->viu.vd1_addr1, 247 + priv->viu.vd1_stride1, 248 + priv->viu.vd1_height1, 249 + MESON_CANVAS_WRAP_NONE, 250 + MESON_CANVAS_BLKMODE_LINEAR, 251 + MESON_CANVAS_ENDIAN_SWAP64); 252 + else 253 + meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_1, 254 + priv->viu.vd1_addr2, 255 + priv->viu.vd1_stride2, 256 + priv->viu.vd1_height2, 257 + MESON_CANVAS_WRAP_NONE, 258 + MESON_CANVAS_BLKMODE_LINEAR, 259 + MESON_CANVAS_ENDIAN_SWAP64); 260 + /* fallthrough */ 261 + case 1: 262 + if (priv->canvas) 263 + meson_canvas_config(priv->canvas, 264 + priv->canvas_id_vd1_0, 265 + priv->viu.vd1_addr0, 266 + priv->viu.vd1_stride0, 267 + priv->viu.vd1_height0, 268 + MESON_CANVAS_WRAP_NONE, 269 + MESON_CANVAS_BLKMODE_LINEAR, 270 + MESON_CANVAS_ENDIAN_SWAP64); 271 + else 272 + meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_0, 273 + priv->viu.vd1_addr2, 274 + priv->viu.vd1_stride2, 275 + priv->viu.vd1_height2, 276 + MESON_CANVAS_WRAP_NONE, 277 + MESON_CANVAS_BLKMODE_LINEAR, 278 + MESON_CANVAS_ENDIAN_SWAP64); 279 + }; 280 + 281 + writel_relaxed(priv->viu.vd1_if0_gen_reg, 282 + priv->io_base + _REG(VD1_IF0_GEN_REG)); 283 + writel_relaxed(priv->viu.vd1_if0_gen_reg, 284 + priv->io_base + _REG(VD2_IF0_GEN_REG)); 285 + writel_relaxed(priv->viu.vd1_if0_gen_reg2, 286 + priv->io_base + _REG(VD1_IF0_GEN_REG2)); 287 + writel_relaxed(priv->viu.viu_vd1_fmt_ctrl, 288 + priv->io_base + _REG(VIU_VD1_FMT_CTRL)); 289 + writel_relaxed(priv->viu.viu_vd1_fmt_ctrl, 290 + priv->io_base + _REG(VIU_VD2_FMT_CTRL)); 291 + writel_relaxed(priv->viu.viu_vd1_fmt_w, 292 + priv->io_base + _REG(VIU_VD1_FMT_W)); 293 + writel_relaxed(priv->viu.viu_vd1_fmt_w, 294 + priv->io_base + _REG(VIU_VD2_FMT_W)); 295 + writel_relaxed(priv->viu.vd1_if0_canvas0, 296 + priv->io_base + _REG(VD1_IF0_CANVAS0)); 297 + writel_relaxed(priv->viu.vd1_if0_canvas0, 298 + priv->io_base + _REG(VD1_IF0_CANVAS1)); 299 + writel_relaxed(priv->viu.vd1_if0_canvas0, 300 + priv->io_base + _REG(VD2_IF0_CANVAS0)); 301 + writel_relaxed(priv->viu.vd1_if0_canvas0, 302 + priv->io_base + _REG(VD2_IF0_CANVAS1)); 303 + writel_relaxed(priv->viu.vd1_if0_luma_x0, 304 + priv->io_base + _REG(VD1_IF0_LUMA_X0)); 305 + writel_relaxed(priv->viu.vd1_if0_luma_x0, 306 + priv->io_base + _REG(VD1_IF0_LUMA_X1)); 307 + writel_relaxed(priv->viu.vd1_if0_luma_x0, 308 + priv->io_base + _REG(VD2_IF0_LUMA_X0)); 309 + writel_relaxed(priv->viu.vd1_if0_luma_x0, 310 + priv->io_base + _REG(VD2_IF0_LUMA_X1)); 311 + writel_relaxed(priv->viu.vd1_if0_luma_y0, 312 + priv->io_base + _REG(VD1_IF0_LUMA_Y0)); 313 + writel_relaxed(priv->viu.vd1_if0_luma_y0, 314 + priv->io_base + _REG(VD1_IF0_LUMA_Y1)); 315 + writel_relaxed(priv->viu.vd1_if0_luma_y0, 316 + priv->io_base + _REG(VD2_IF0_LUMA_Y0)); 317 + writel_relaxed(priv->viu.vd1_if0_luma_y0, 318 + priv->io_base + _REG(VD2_IF0_LUMA_Y1)); 319 + writel_relaxed(priv->viu.vd1_if0_chroma_x0, 320 + priv->io_base + _REG(VD1_IF0_CHROMA_X0)); 321 + writel_relaxed(priv->viu.vd1_if0_chroma_x0, 322 + priv->io_base + _REG(VD1_IF0_CHROMA_X1)); 323 + writel_relaxed(priv->viu.vd1_if0_chroma_x0, 324 + priv->io_base + _REG(VD2_IF0_CHROMA_X0)); 325 + writel_relaxed(priv->viu.vd1_if0_chroma_x0, 326 + priv->io_base + _REG(VD2_IF0_CHROMA_X1)); 327 + writel_relaxed(priv->viu.vd1_if0_chroma_y0, 328 + priv->io_base + _REG(VD1_IF0_CHROMA_Y0)); 329 + writel_relaxed(priv->viu.vd1_if0_chroma_y0, 330 + priv->io_base + _REG(VD1_IF0_CHROMA_Y1)); 331 + writel_relaxed(priv->viu.vd1_if0_chroma_y0, 332 + priv->io_base + _REG(VD2_IF0_CHROMA_Y0)); 333 + writel_relaxed(priv->viu.vd1_if0_chroma_y0, 334 + priv->io_base + _REG(VD2_IF0_CHROMA_Y1)); 335 + writel_relaxed(priv->viu.vd1_if0_repeat_loop, 336 + priv->io_base + _REG(VD1_IF0_RPT_LOOP)); 337 + writel_relaxed(priv->viu.vd1_if0_repeat_loop, 338 + priv->io_base + _REG(VD2_IF0_RPT_LOOP)); 339 + writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat, 340 + priv->io_base + _REG(VD1_IF0_LUMA0_RPT_PAT)); 341 + writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat, 342 + priv->io_base + _REG(VD2_IF0_LUMA0_RPT_PAT)); 343 + writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat, 344 + priv->io_base + _REG(VD1_IF0_LUMA1_RPT_PAT)); 345 + writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat, 346 + priv->io_base + _REG(VD2_IF0_LUMA1_RPT_PAT)); 347 + writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat, 348 + priv->io_base + _REG(VD1_IF0_CHROMA0_RPT_PAT)); 349 + writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat, 350 + priv->io_base + _REG(VD2_IF0_CHROMA0_RPT_PAT)); 351 + writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat, 352 + priv->io_base + _REG(VD1_IF0_CHROMA1_RPT_PAT)); 353 + writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat, 354 + priv->io_base + _REG(VD2_IF0_CHROMA1_RPT_PAT)); 355 + writel_relaxed(0, priv->io_base + _REG(VD1_IF0_LUMA_PSEL)); 356 + writel_relaxed(0, priv->io_base + _REG(VD1_IF0_CHROMA_PSEL)); 357 + writel_relaxed(0, priv->io_base + _REG(VD2_IF0_LUMA_PSEL)); 358 + writel_relaxed(0, priv->io_base + _REG(VD2_IF0_CHROMA_PSEL)); 359 + writel_relaxed(priv->viu.vd1_range_map_y, 360 + priv->io_base + _REG(VD1_IF0_RANGE_MAP_Y)); 361 + writel_relaxed(priv->viu.vd1_range_map_cb, 362 + priv->io_base + _REG(VD1_IF0_RANGE_MAP_CB)); 363 + writel_relaxed(priv->viu.vd1_range_map_cr, 364 + priv->io_base + _REG(VD1_IF0_RANGE_MAP_CR)); 365 + writel_relaxed(0x78404, 366 + priv->io_base + _REG(VPP_SC_MISC)); 367 + writel_relaxed(priv->viu.vpp_pic_in_height, 368 + priv->io_base + _REG(VPP_PIC_IN_HEIGHT)); 369 + writel_relaxed(priv->viu.vpp_postblend_vd1_h_start_end, 370 + priv->io_base + _REG(VPP_POSTBLEND_VD1_H_START_END)); 371 + writel_relaxed(priv->viu.vpp_blend_vd2_h_start_end, 372 + priv->io_base + _REG(VPP_BLEND_VD2_H_START_END)); 373 + writel_relaxed(priv->viu.vpp_postblend_vd1_v_start_end, 374 + priv->io_base + _REG(VPP_POSTBLEND_VD1_V_START_END)); 375 + writel_relaxed(priv->viu.vpp_blend_vd2_v_start_end, 376 + priv->io_base + _REG(VPP_BLEND_VD2_V_START_END)); 377 + writel_relaxed(priv->viu.vpp_hsc_region12_startp, 378 + priv->io_base + _REG(VPP_HSC_REGION12_STARTP)); 379 + writel_relaxed(priv->viu.vpp_hsc_region34_startp, 380 + priv->io_base + _REG(VPP_HSC_REGION34_STARTP)); 381 + writel_relaxed(priv->viu.vpp_hsc_region4_endp, 382 + priv->io_base + _REG(VPP_HSC_REGION4_ENDP)); 383 + writel_relaxed(priv->viu.vpp_hsc_start_phase_step, 384 + priv->io_base + _REG(VPP_HSC_START_PHASE_STEP)); 385 + writel_relaxed(priv->viu.vpp_hsc_region1_phase_slope, 386 + priv->io_base + _REG(VPP_HSC_REGION1_PHASE_SLOPE)); 387 + writel_relaxed(priv->viu.vpp_hsc_region3_phase_slope, 388 + priv->io_base + _REG(VPP_HSC_REGION3_PHASE_SLOPE)); 389 + writel_relaxed(priv->viu.vpp_line_in_length, 390 + priv->io_base + _REG(VPP_LINE_IN_LENGTH)); 391 + writel_relaxed(priv->viu.vpp_preblend_h_size, 392 + priv->io_base + _REG(VPP_PREBLEND_H_SIZE)); 393 + writel_relaxed(priv->viu.vpp_vsc_region12_startp, 394 + priv->io_base + _REG(VPP_VSC_REGION12_STARTP)); 395 + writel_relaxed(priv->viu.vpp_vsc_region34_startp, 396 + priv->io_base + _REG(VPP_VSC_REGION34_STARTP)); 397 + writel_relaxed(priv->viu.vpp_vsc_region4_endp, 398 + priv->io_base + _REG(VPP_VSC_REGION4_ENDP)); 399 + writel_relaxed(priv->viu.vpp_vsc_start_phase_step, 400 + priv->io_base + _REG(VPP_VSC_START_PHASE_STEP)); 401 + writel_relaxed(priv->viu.vpp_vsc_ini_phase, 402 + priv->io_base + _REG(VPP_VSC_INI_PHASE)); 403 + writel_relaxed(priv->viu.vpp_vsc_phase_ctrl, 404 + priv->io_base + _REG(VPP_VSC_PHASE_CTRL)); 405 + writel_relaxed(priv->viu.vpp_hsc_phase_ctrl, 406 + priv->io_base + _REG(VPP_HSC_PHASE_CTRL)); 407 + writel_relaxed(0x42, priv->io_base + _REG(VPP_SCALE_COEF_IDX)); 408 + 409 + /* Enable VD1 */ 410 + writel_bits_relaxed(VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND | 411 + VPP_COLOR_MNG_ENABLE, 412 + VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND | 413 + VPP_COLOR_MNG_ENABLE, 414 + priv->io_base + _REG(VPP_MISC)); 415 + 416 + priv->viu.vd1_commit = false; 218 417 } 219 418 220 419 drm_crtc_handle_vblank(priv->crtc);
+57 -17
drivers/gpu/drm/meson/meson_drv.c
··· 41 41 42 42 #include "meson_drv.h" 43 43 #include "meson_plane.h" 44 + #include "meson_overlay.h" 44 45 #include "meson_crtc.h" 45 46 #include "meson_venc_cvbs.h" 46 47 ··· 209 208 goto free_drm; 210 209 } 211 210 212 - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc"); 213 - if (!res) { 214 - ret = -EINVAL; 215 - goto free_drm; 216 - } 217 - /* Simply ioremap since it may be a shared register zone */ 218 - regs = devm_ioremap(dev, res->start, resource_size(res)); 219 - if (!regs) { 220 - ret = -EADDRNOTAVAIL; 221 - goto free_drm; 222 - } 211 + priv->canvas = meson_canvas_get(dev); 212 + if (!IS_ERR(priv->canvas)) { 213 + ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_osd1); 214 + if (ret) 215 + goto free_drm; 216 + ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0); 217 + if (ret) { 218 + meson_canvas_free(priv->canvas, priv->canvas_id_osd1); 219 + goto free_drm; 220 + } 221 + ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1); 222 + if (ret) { 223 + meson_canvas_free(priv->canvas, priv->canvas_id_osd1); 224 + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); 225 + goto free_drm; 226 + } 227 + ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2); 228 + if (ret) { 229 + meson_canvas_free(priv->canvas, priv->canvas_id_osd1); 230 + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); 231 + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1); 232 + goto free_drm; 233 + } 234 + } else { 235 + priv->canvas = NULL; 223 236 224 - priv->dmc = devm_regmap_init_mmio(dev, regs, 225 - &meson_regmap_config); 226 - if (IS_ERR(priv->dmc)) { 227 - dev_err(&pdev->dev, "Couldn't create the DMC regmap\n"); 228 - ret = PTR_ERR(priv->dmc); 229 - goto free_drm; 237 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc"); 238 + if (!res) { 239 + ret = -EINVAL; 240 + goto free_drm; 241 + } 242 + /* Simply ioremap since it may be a shared register zone */ 243 + regs = devm_ioremap(dev, res->start, resource_size(res)); 244 + if (!regs) { 245 + ret = -EADDRNOTAVAIL; 246 + goto free_drm; 247 + } 248 + 249 + priv->dmc = devm_regmap_init_mmio(dev, regs, 250 + &meson_regmap_config); 251 + if (IS_ERR(priv->dmc)) { 252 + dev_err(&pdev->dev, "Couldn't create the DMC regmap\n"); 253 + ret = PTR_ERR(priv->dmc); 254 + goto free_drm; 255 + } 230 256 } 231 257 232 258 priv->vsync_irq = platform_get_irq(pdev, 0); ··· 289 261 } 290 262 291 263 ret = meson_plane_create(priv); 264 + if (ret) 265 + goto free_drm; 266 + 267 + ret = meson_overlay_create(priv); 292 268 if (ret) 293 269 goto free_drm; 294 270 ··· 332 300 static void meson_drv_unbind(struct device *dev) 333 301 { 334 302 struct drm_device *drm = dev_get_drvdata(dev); 303 + struct meson_drm *priv = drm->dev_private; 304 + 305 + if (priv->canvas) { 306 + meson_canvas_free(priv->canvas, priv->canvas_id_osd1); 307 + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); 308 + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1); 309 + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_2); 310 + } 335 311 336 312 drm_dev_unregister(drm); 337 313 drm_kms_helper_poll_fini(drm);
+66
drivers/gpu/drm/meson/meson_drv.h
··· 22 22 #include <linux/platform_device.h> 23 23 #include <linux/regmap.h> 24 24 #include <linux/of.h> 25 + #include <linux/soc/amlogic/meson-canvas.h> 25 26 #include <drm/drmP.h> 26 27 27 28 struct meson_drm { ··· 32 31 struct regmap *dmc; 33 32 int vsync_irq; 34 33 34 + struct meson_canvas *canvas; 35 + u8 canvas_id_osd1; 36 + u8 canvas_id_vd1_0; 37 + u8 canvas_id_vd1_1; 38 + u8 canvas_id_vd1_2; 39 + 35 40 struct drm_device *drm; 36 41 struct drm_crtc *crtc; 37 42 struct drm_plane *primary_plane; 43 + struct drm_plane *overlay_plane; 38 44 39 45 /* Components Data */ 40 46 struct { ··· 53 45 uint32_t osd1_addr; 54 46 uint32_t osd1_stride; 55 47 uint32_t osd1_height; 48 + uint32_t osd_sc_ctrl0; 49 + uint32_t osd_sc_i_wh_m1; 50 + uint32_t osd_sc_o_h_start_end; 51 + uint32_t osd_sc_o_v_start_end; 52 + uint32_t osd_sc_v_ini_phase; 53 + uint32_t osd_sc_v_phase_step; 54 + uint32_t osd_sc_h_ini_phase; 55 + uint32_t osd_sc_h_phase_step; 56 + uint32_t osd_sc_h_ctrl0; 57 + uint32_t osd_sc_v_ctrl0; 58 + 59 + bool vd1_enabled; 60 + bool vd1_commit; 61 + unsigned int vd1_planes; 62 + uint32_t vd1_if0_gen_reg; 63 + uint32_t vd1_if0_luma_x0; 64 + uint32_t vd1_if0_luma_y0; 65 + uint32_t vd1_if0_chroma_x0; 66 + uint32_t vd1_if0_chroma_y0; 67 + uint32_t vd1_if0_repeat_loop; 68 + uint32_t vd1_if0_luma0_rpt_pat; 69 + uint32_t vd1_if0_chroma0_rpt_pat; 70 + uint32_t vd1_range_map_y; 71 + uint32_t vd1_range_map_cb; 72 + uint32_t vd1_range_map_cr; 73 + uint32_t viu_vd1_fmt_w; 74 + uint32_t vd1_if0_canvas0; 75 + uint32_t vd1_if0_gen_reg2; 76 + uint32_t viu_vd1_fmt_ctrl; 77 + uint32_t vd1_addr0; 78 + uint32_t vd1_addr1; 79 + uint32_t vd1_addr2; 80 + uint32_t vd1_stride0; 81 + uint32_t vd1_stride1; 82 + uint32_t vd1_stride2; 83 + uint32_t vd1_height0; 84 + uint32_t vd1_height1; 85 + uint32_t vd1_height2; 86 + uint32_t vpp_pic_in_height; 87 + uint32_t vpp_postblend_vd1_h_start_end; 88 + uint32_t vpp_postblend_vd1_v_start_end; 89 + uint32_t vpp_hsc_region12_startp; 90 + uint32_t vpp_hsc_region34_startp; 91 + uint32_t vpp_hsc_region4_endp; 92 + uint32_t vpp_hsc_start_phase_step; 93 + uint32_t vpp_hsc_region1_phase_slope; 94 + uint32_t vpp_hsc_region3_phase_slope; 95 + uint32_t vpp_line_in_length; 96 + uint32_t vpp_preblend_h_size; 97 + uint32_t vpp_vsc_region12_startp; 98 + uint32_t vpp_vsc_region34_startp; 99 + uint32_t vpp_vsc_region4_endp; 100 + uint32_t vpp_vsc_start_phase_step; 101 + uint32_t vpp_vsc_ini_phase; 102 + uint32_t vpp_vsc_phase_ctrl; 103 + uint32_t vpp_hsc_phase_ctrl; 104 + uint32_t vpp_blend_vd2_h_start_end; 105 + uint32_t vpp_blend_vd2_v_start_end; 56 106 } viu; 57 107 58 108 struct {
+586
drivers/gpu/drm/meson/meson_overlay.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * Copyright (C) 2018 BayLibre, SAS 4 + * Author: Neil Armstrong <narmstrong@baylibre.com> 5 + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. 6 + */ 7 + 8 + #include <linux/kernel.h> 9 + #include <linux/module.h> 10 + #include <linux/mutex.h> 11 + #include <linux/bitfield.h> 12 + #include <linux/platform_device.h> 13 + #include <drm/drmP.h> 14 + #include <drm/drm_atomic.h> 15 + #include <drm/drm_atomic_helper.h> 16 + #include <drm/drm_plane_helper.h> 17 + #include <drm/drm_gem_cma_helper.h> 18 + #include <drm/drm_fb_cma_helper.h> 19 + #include <drm/drm_rect.h> 20 + 21 + #include "meson_overlay.h" 22 + #include "meson_vpp.h" 23 + #include "meson_viu.h" 24 + #include "meson_canvas.h" 25 + #include "meson_registers.h" 26 + 27 + /* VD1_IF0_GEN_REG */ 28 + #define VD_URGENT_CHROMA BIT(28) 29 + #define VD_URGENT_LUMA BIT(27) 30 + #define VD_HOLD_LINES(lines) FIELD_PREP(GENMASK(24, 19), lines) 31 + #define VD_DEMUX_MODE_RGB BIT(16) 32 + #define VD_BYTES_PER_PIXEL(val) FIELD_PREP(GENMASK(15, 14), val) 33 + #define VD_CHRO_RPT_LASTL_CTRL BIT(6) 34 + #define VD_LITTLE_ENDIAN BIT(4) 35 + #define VD_SEPARATE_EN BIT(1) 36 + #define VD_ENABLE BIT(0) 37 + 38 + /* VD1_IF0_CANVAS0 */ 39 + #define CANVAS_ADDR2(addr) FIELD_PREP(GENMASK(23, 16), addr) 40 + #define CANVAS_ADDR1(addr) FIELD_PREP(GENMASK(15, 8), addr) 41 + #define CANVAS_ADDR0(addr) FIELD_PREP(GENMASK(7, 0), addr) 42 + 43 + /* VD1_IF0_LUMA_X0 VD1_IF0_CHROMA_X0 */ 44 + #define VD_X_START(value) FIELD_PREP(GENMASK(14, 0), value) 45 + #define VD_X_END(value) FIELD_PREP(GENMASK(30, 16), value) 46 + 47 + /* VD1_IF0_LUMA_Y0 VD1_IF0_CHROMA_Y0 */ 48 + #define VD_Y_START(value) FIELD_PREP(GENMASK(12, 0), value) 49 + #define VD_Y_END(value) FIELD_PREP(GENMASK(28, 16), value) 50 + 51 + /* VD1_IF0_GEN_REG2 */ 52 + #define VD_COLOR_MAP(value) FIELD_PREP(GENMASK(1, 0), value) 53 + 54 + /* VIU_VD1_FMT_CTRL */ 55 + #define VD_HORZ_Y_C_RATIO(value) FIELD_PREP(GENMASK(22, 21), value) 56 + #define VD_HORZ_FMT_EN BIT(20) 57 + #define VD_VERT_RPT_LINE0 BIT(16) 58 + #define VD_VERT_INITIAL_PHASE(value) FIELD_PREP(GENMASK(11, 8), value) 59 + #define VD_VERT_PHASE_STEP(value) FIELD_PREP(GENMASK(7, 1), value) 60 + #define VD_VERT_FMT_EN BIT(0) 61 + 62 + /* VPP_POSTBLEND_VD1_H_START_END */ 63 + #define VD_H_END(value) FIELD_PREP(GENMASK(11, 0), value) 64 + #define VD_H_START(value) FIELD_PREP(GENMASK(27, 16), value) 65 + 66 + /* VPP_POSTBLEND_VD1_V_START_END */ 67 + #define VD_V_END(value) FIELD_PREP(GENMASK(11, 0), value) 68 + #define VD_V_START(value) FIELD_PREP(GENMASK(27, 16), value) 69 + 70 + /* VPP_BLEND_VD2_V_START_END */ 71 + #define VD2_V_END(value) FIELD_PREP(GENMASK(11, 0), value) 72 + #define VD2_V_START(value) FIELD_PREP(GENMASK(27, 16), value) 73 + 74 + /* VIU_VD1_FMT_W */ 75 + #define VD_V_WIDTH(value) FIELD_PREP(GENMASK(11, 0), value) 76 + #define VD_H_WIDTH(value) FIELD_PREP(GENMASK(27, 16), value) 77 + 78 + /* VPP_HSC_REGION12_STARTP VPP_HSC_REGION34_STARTP */ 79 + #define VD_REGION24_START(value) FIELD_PREP(GENMASK(11, 0), value) 80 + #define VD_REGION13_END(value) FIELD_PREP(GENMASK(27, 16), value) 81 + 82 + struct meson_overlay { 83 + struct drm_plane base; 84 + struct meson_drm *priv; 85 + }; 86 + #define to_meson_overlay(x) container_of(x, struct meson_overlay, base) 87 + 88 + #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) 89 + 90 + static int meson_overlay_atomic_check(struct drm_plane *plane, 91 + struct drm_plane_state *state) 92 + { 93 + struct drm_crtc_state *crtc_state; 94 + 95 + if (!state->crtc) 96 + return 0; 97 + 98 + crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); 99 + if (IS_ERR(crtc_state)) 100 + return PTR_ERR(crtc_state); 101 + 102 + return drm_atomic_helper_check_plane_state(state, crtc_state, 103 + FRAC_16_16(1, 5), 104 + FRAC_16_16(5, 1), 105 + true, true); 106 + } 107 + 108 + /* Takes a fixed 16.16 number and converts it to integer. */ 109 + static inline int64_t fixed16_to_int(int64_t value) 110 + { 111 + return value >> 16; 112 + } 113 + 114 + static const uint8_t skip_tab[6] = { 115 + 0x24, 0x04, 0x68, 0x48, 0x28, 0x08, 116 + }; 117 + 118 + static void meson_overlay_get_vertical_phase(unsigned int ratio_y, int *phase, 119 + int *repeat, bool interlace) 120 + { 121 + int offset_in = 0; 122 + int offset_out = 0; 123 + int repeat_skip = 0; 124 + 125 + if (!interlace && ratio_y > (1 << 18)) 126 + offset_out = (1 * ratio_y) >> 10; 127 + 128 + while ((offset_in + (4 << 8)) <= offset_out) { 129 + repeat_skip++; 130 + offset_in += 4 << 8; 131 + } 132 + 133 + *phase = (offset_out - offset_in) >> 2; 134 + 135 + if (*phase > 0x100) 136 + repeat_skip++; 137 + 138 + *phase = *phase & 0xff; 139 + 140 + if (repeat_skip > 5) 141 + repeat_skip = 5; 142 + 143 + *repeat = skip_tab[repeat_skip]; 144 + } 145 + 146 + static void meson_overlay_setup_scaler_params(struct meson_drm *priv, 147 + struct drm_plane *plane, 148 + bool interlace_mode) 149 + { 150 + struct drm_crtc_state *crtc_state = priv->crtc->state; 151 + int video_top, video_left, video_width, video_height; 152 + struct drm_plane_state *state = plane->state; 153 + unsigned int vd_start_lines, vd_end_lines; 154 + unsigned int hd_start_lines, hd_end_lines; 155 + unsigned int crtc_height, crtc_width; 156 + unsigned int vsc_startp, vsc_endp; 157 + unsigned int hsc_startp, hsc_endp; 158 + unsigned int crop_top, crop_left; 159 + int vphase, vphase_repeat_skip; 160 + unsigned int ratio_x, ratio_y; 161 + int temp_height, temp_width; 162 + unsigned int w_in, h_in; 163 + int temp, start, end; 164 + 165 + if (!crtc_state) { 166 + DRM_ERROR("Invalid crtc_state\n"); 167 + return; 168 + } 169 + 170 + crtc_height = crtc_state->mode.vdisplay; 171 + crtc_width = crtc_state->mode.hdisplay; 172 + 173 + w_in = fixed16_to_int(state->src_w); 174 + h_in = fixed16_to_int(state->src_h); 175 + crop_top = fixed16_to_int(state->src_x); 176 + crop_left = fixed16_to_int(state->src_x); 177 + 178 + video_top = state->crtc_y; 179 + video_left = state->crtc_x; 180 + video_width = state->crtc_w; 181 + video_height = state->crtc_h; 182 + 183 + DRM_DEBUG("crtc_width %d crtc_height %d interlace %d\n", 184 + crtc_width, crtc_height, interlace_mode); 185 + DRM_DEBUG("w_in %d h_in %d crop_top %d crop_left %d\n", 186 + w_in, h_in, crop_top, crop_left); 187 + DRM_DEBUG("video top %d left %d width %d height %d\n", 188 + video_top, video_left, video_width, video_height); 189 + 190 + ratio_x = (w_in << 18) / video_width; 191 + ratio_y = (h_in << 18) / video_height; 192 + 193 + if (ratio_x * video_width < (w_in << 18)) 194 + ratio_x++; 195 + 196 + DRM_DEBUG("ratio x 0x%x y 0x%x\n", ratio_x, ratio_y); 197 + 198 + meson_overlay_get_vertical_phase(ratio_y, &vphase, &vphase_repeat_skip, 199 + interlace_mode); 200 + 201 + DRM_DEBUG("vphase 0x%x skip %d\n", vphase, vphase_repeat_skip); 202 + 203 + /* Vertical */ 204 + 205 + start = video_top + video_height / 2 - ((h_in << 17) / ratio_y); 206 + end = (h_in << 18) / ratio_y + start - 1; 207 + 208 + if (video_top < 0 && start < 0) 209 + vd_start_lines = (-(start) * ratio_y) >> 18; 210 + else if (start < video_top) 211 + vd_start_lines = ((video_top - start) * ratio_y) >> 18; 212 + else 213 + vd_start_lines = 0; 214 + 215 + if (video_top < 0) 216 + temp_height = min_t(unsigned int, 217 + video_top + video_height - 1, 218 + crtc_height - 1); 219 + else 220 + temp_height = min_t(unsigned int, 221 + video_top + video_height - 1, 222 + crtc_height - 1) - video_top + 1; 223 + 224 + temp = vd_start_lines + (temp_height * ratio_y >> 18); 225 + vd_end_lines = (temp <= (h_in - 1)) ? temp : (h_in - 1); 226 + 227 + vd_start_lines += crop_left; 228 + vd_end_lines += crop_left; 229 + 230 + /* 231 + * TOFIX: Input frames are handled and scaled like progressive frames, 232 + * proper handling of interlaced field input frames need to be figured 233 + * out using the proper framebuffer flags set by userspace. 234 + */ 235 + if (interlace_mode) { 236 + start >>= 1; 237 + end >>= 1; 238 + } 239 + 240 + vsc_startp = max_t(int, start, 241 + max_t(int, 0, video_top)); 242 + vsc_endp = min_t(int, end, 243 + min_t(int, crtc_height - 1, 244 + video_top + video_height - 1)); 245 + 246 + DRM_DEBUG("vsc startp %d endp %d start_lines %d end_lines %d\n", 247 + vsc_startp, vsc_endp, vd_start_lines, vd_end_lines); 248 + 249 + /* Horizontal */ 250 + 251 + start = video_left + video_width / 2 - ((w_in << 17) / ratio_x); 252 + end = (w_in << 18) / ratio_x + start - 1; 253 + 254 + if (video_left < 0 && start < 0) 255 + hd_start_lines = (-(start) * ratio_x) >> 18; 256 + else if (start < video_left) 257 + hd_start_lines = ((video_left - start) * ratio_x) >> 18; 258 + else 259 + hd_start_lines = 0; 260 + 261 + if (video_left < 0) 262 + temp_width = min_t(unsigned int, 263 + video_left + video_width - 1, 264 + crtc_width - 1); 265 + else 266 + temp_width = min_t(unsigned int, 267 + video_left + video_width - 1, 268 + crtc_width - 1) - video_left + 1; 269 + 270 + temp = hd_start_lines + (temp_width * ratio_x >> 18); 271 + hd_end_lines = (temp <= (w_in - 1)) ? temp : (w_in - 1); 272 + 273 + priv->viu.vpp_line_in_length = hd_end_lines - hd_start_lines + 1; 274 + hsc_startp = max_t(int, start, max_t(int, 0, video_left)); 275 + hsc_endp = min_t(int, end, min_t(int, crtc_width - 1, 276 + video_left + video_width - 1)); 277 + 278 + hd_start_lines += crop_top; 279 + hd_end_lines += crop_top; 280 + 281 + DRM_DEBUG("hsc startp %d endp %d start_lines %d end_lines %d\n", 282 + hsc_startp, hsc_endp, hd_start_lines, hd_end_lines); 283 + 284 + priv->viu.vpp_vsc_start_phase_step = ratio_y << 6; 285 + 286 + priv->viu.vpp_vsc_ini_phase = vphase << 8; 287 + priv->viu.vpp_vsc_phase_ctrl = (1 << 13) | (4 << 8) | 288 + vphase_repeat_skip; 289 + 290 + priv->viu.vd1_if0_luma_x0 = VD_X_START(hd_start_lines) | 291 + VD_X_END(hd_end_lines); 292 + priv->viu.vd1_if0_chroma_x0 = VD_X_START(hd_start_lines >> 1) | 293 + VD_X_END(hd_end_lines >> 1); 294 + 295 + priv->viu.viu_vd1_fmt_w = 296 + VD_H_WIDTH(hd_end_lines - hd_start_lines + 1) | 297 + VD_V_WIDTH(hd_end_lines/2 - hd_start_lines/2 + 1); 298 + 299 + priv->viu.vd1_if0_luma_y0 = VD_Y_START(vd_start_lines) | 300 + VD_Y_END(vd_end_lines); 301 + 302 + priv->viu.vd1_if0_chroma_y0 = VD_Y_START(vd_start_lines >> 1) | 303 + VD_Y_END(vd_end_lines >> 1); 304 + 305 + priv->viu.vpp_pic_in_height = h_in; 306 + 307 + priv->viu.vpp_postblend_vd1_h_start_end = VD_H_START(hsc_startp) | 308 + VD_H_END(hsc_endp); 309 + priv->viu.vpp_blend_vd2_h_start_end = VD_H_START(hd_start_lines) | 310 + VD_H_END(hd_end_lines); 311 + priv->viu.vpp_hsc_region12_startp = VD_REGION13_END(0) | 312 + VD_REGION24_START(hsc_startp); 313 + priv->viu.vpp_hsc_region34_startp = 314 + VD_REGION13_END(hsc_startp) | 315 + VD_REGION24_START(hsc_endp - hsc_startp); 316 + priv->viu.vpp_hsc_region4_endp = hsc_endp - hsc_startp; 317 + priv->viu.vpp_hsc_start_phase_step = ratio_x << 6; 318 + priv->viu.vpp_hsc_region1_phase_slope = 0; 319 + priv->viu.vpp_hsc_region3_phase_slope = 0; 320 + priv->viu.vpp_hsc_phase_ctrl = (1 << 21) | (4 << 16); 321 + 322 + priv->viu.vpp_line_in_length = hd_end_lines - hd_start_lines + 1; 323 + priv->viu.vpp_preblend_h_size = hd_end_lines - hd_start_lines + 1; 324 + 325 + priv->viu.vpp_postblend_vd1_v_start_end = VD_V_START(vsc_startp) | 326 + VD_V_END(vsc_endp); 327 + priv->viu.vpp_blend_vd2_v_start_end = 328 + VD2_V_START((vd_end_lines + 1) >> 1) | 329 + VD2_V_END(vd_end_lines); 330 + 331 + priv->viu.vpp_vsc_region12_startp = 0; 332 + priv->viu.vpp_vsc_region34_startp = 333 + VD_REGION13_END(vsc_endp - vsc_startp) | 334 + VD_REGION24_START(vsc_endp - vsc_startp); 335 + priv->viu.vpp_vsc_region4_endp = vsc_endp - vsc_startp; 336 + priv->viu.vpp_vsc_start_phase_step = ratio_y << 6; 337 + } 338 + 339 + static void meson_overlay_atomic_update(struct drm_plane *plane, 340 + struct drm_plane_state *old_state) 341 + { 342 + struct meson_overlay *meson_overlay = to_meson_overlay(plane); 343 + struct drm_plane_state *state = plane->state; 344 + struct drm_framebuffer *fb = state->fb; 345 + struct meson_drm *priv = meson_overlay->priv; 346 + struct drm_gem_cma_object *gem; 347 + unsigned long flags; 348 + bool interlace_mode; 349 + 350 + DRM_DEBUG_DRIVER("\n"); 351 + 352 + /* Fallback is canvas provider is not available */ 353 + if (!priv->canvas) { 354 + priv->canvas_id_vd1_0 = MESON_CANVAS_ID_VD1_0; 355 + priv->canvas_id_vd1_1 = MESON_CANVAS_ID_VD1_1; 356 + priv->canvas_id_vd1_2 = MESON_CANVAS_ID_VD1_2; 357 + } 358 + 359 + interlace_mode = state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE; 360 + 361 + spin_lock_irqsave(&priv->drm->event_lock, flags); 362 + 363 + priv->viu.vd1_if0_gen_reg = VD_URGENT_CHROMA | 364 + VD_URGENT_LUMA | 365 + VD_HOLD_LINES(9) | 366 + VD_CHRO_RPT_LASTL_CTRL | 367 + VD_ENABLE; 368 + 369 + /* Setup scaler params */ 370 + meson_overlay_setup_scaler_params(priv, plane, interlace_mode); 371 + 372 + priv->viu.vd1_if0_repeat_loop = 0; 373 + priv->viu.vd1_if0_luma0_rpt_pat = interlace_mode ? 8 : 0; 374 + priv->viu.vd1_if0_chroma0_rpt_pat = interlace_mode ? 8 : 0; 375 + priv->viu.vd1_range_map_y = 0; 376 + priv->viu.vd1_range_map_cb = 0; 377 + priv->viu.vd1_range_map_cr = 0; 378 + 379 + /* Default values for RGB888/YUV444 */ 380 + priv->viu.vd1_if0_gen_reg2 = 0; 381 + priv->viu.viu_vd1_fmt_ctrl = 0; 382 + 383 + switch (fb->format->format) { 384 + /* TOFIX DRM_FORMAT_RGB888 should be supported */ 385 + case DRM_FORMAT_YUYV: 386 + priv->viu.vd1_if0_gen_reg |= VD_BYTES_PER_PIXEL(1); 387 + priv->viu.vd1_if0_canvas0 = 388 + CANVAS_ADDR2(priv->canvas_id_vd1_0) | 389 + CANVAS_ADDR1(priv->canvas_id_vd1_0) | 390 + CANVAS_ADDR0(priv->canvas_id_vd1_0); 391 + priv->viu.viu_vd1_fmt_ctrl = VD_HORZ_Y_C_RATIO(1) | /* /2 */ 392 + VD_HORZ_FMT_EN | 393 + VD_VERT_RPT_LINE0 | 394 + VD_VERT_INITIAL_PHASE(12) | 395 + VD_VERT_PHASE_STEP(16) | /* /2 */ 396 + VD_VERT_FMT_EN; 397 + break; 398 + case DRM_FORMAT_NV12: 399 + case DRM_FORMAT_NV21: 400 + priv->viu.vd1_if0_gen_reg |= VD_SEPARATE_EN; 401 + priv->viu.vd1_if0_canvas0 = 402 + CANVAS_ADDR2(priv->canvas_id_vd1_1) | 403 + CANVAS_ADDR1(priv->canvas_id_vd1_1) | 404 + CANVAS_ADDR0(priv->canvas_id_vd1_0); 405 + if (fb->format->format == DRM_FORMAT_NV12) 406 + priv->viu.vd1_if0_gen_reg2 = VD_COLOR_MAP(1); 407 + else 408 + priv->viu.vd1_if0_gen_reg2 = VD_COLOR_MAP(2); 409 + priv->viu.viu_vd1_fmt_ctrl = VD_HORZ_Y_C_RATIO(1) | /* /2 */ 410 + VD_HORZ_FMT_EN | 411 + VD_VERT_RPT_LINE0 | 412 + VD_VERT_INITIAL_PHASE(12) | 413 + VD_VERT_PHASE_STEP(8) | /* /4 */ 414 + VD_VERT_FMT_EN; 415 + break; 416 + case DRM_FORMAT_YUV444: 417 + case DRM_FORMAT_YUV422: 418 + case DRM_FORMAT_YUV420: 419 + case DRM_FORMAT_YUV411: 420 + case DRM_FORMAT_YUV410: 421 + priv->viu.vd1_if0_gen_reg |= VD_SEPARATE_EN; 422 + priv->viu.vd1_if0_canvas0 = 423 + CANVAS_ADDR2(priv->canvas_id_vd1_2) | 424 + CANVAS_ADDR1(priv->canvas_id_vd1_1) | 425 + CANVAS_ADDR0(priv->canvas_id_vd1_0); 426 + switch (fb->format->format) { 427 + case DRM_FORMAT_YUV422: 428 + priv->viu.viu_vd1_fmt_ctrl = 429 + VD_HORZ_Y_C_RATIO(1) | /* /2 */ 430 + VD_HORZ_FMT_EN | 431 + VD_VERT_RPT_LINE0 | 432 + VD_VERT_INITIAL_PHASE(12) | 433 + VD_VERT_PHASE_STEP(16) | /* /2 */ 434 + VD_VERT_FMT_EN; 435 + break; 436 + case DRM_FORMAT_YUV420: 437 + priv->viu.viu_vd1_fmt_ctrl = 438 + VD_HORZ_Y_C_RATIO(1) | /* /2 */ 439 + VD_HORZ_FMT_EN | 440 + VD_VERT_RPT_LINE0 | 441 + VD_VERT_INITIAL_PHASE(12) | 442 + VD_VERT_PHASE_STEP(8) | /* /4 */ 443 + VD_VERT_FMT_EN; 444 + break; 445 + case DRM_FORMAT_YUV411: 446 + priv->viu.viu_vd1_fmt_ctrl = 447 + VD_HORZ_Y_C_RATIO(2) | /* /4 */ 448 + VD_HORZ_FMT_EN | 449 + VD_VERT_RPT_LINE0 | 450 + VD_VERT_INITIAL_PHASE(12) | 451 + VD_VERT_PHASE_STEP(16) | /* /2 */ 452 + VD_VERT_FMT_EN; 453 + break; 454 + case DRM_FORMAT_YUV410: 455 + priv->viu.viu_vd1_fmt_ctrl = 456 + VD_HORZ_Y_C_RATIO(2) | /* /4 */ 457 + VD_HORZ_FMT_EN | 458 + VD_VERT_RPT_LINE0 | 459 + VD_VERT_INITIAL_PHASE(12) | 460 + VD_VERT_PHASE_STEP(8) | /* /4 */ 461 + VD_VERT_FMT_EN; 462 + break; 463 + } 464 + break; 465 + } 466 + 467 + /* Update Canvas with buffer address */ 468 + priv->viu.vd1_planes = drm_format_num_planes(fb->format->format); 469 + 470 + switch (priv->viu.vd1_planes) { 471 + case 3: 472 + gem = drm_fb_cma_get_gem_obj(fb, 2); 473 + priv->viu.vd1_addr2 = gem->paddr + fb->offsets[2]; 474 + priv->viu.vd1_stride2 = fb->pitches[2]; 475 + priv->viu.vd1_height2 = 476 + drm_format_plane_height(fb->height, 477 + fb->format->format, 2); 478 + DRM_DEBUG("plane 2 addr 0x%x stride %d height %d\n", 479 + priv->viu.vd1_addr2, 480 + priv->viu.vd1_stride2, 481 + priv->viu.vd1_height2); 482 + /* fallthrough */ 483 + case 2: 484 + gem = drm_fb_cma_get_gem_obj(fb, 1); 485 + priv->viu.vd1_addr1 = gem->paddr + fb->offsets[1]; 486 + priv->viu.vd1_stride1 = fb->pitches[1]; 487 + priv->viu.vd1_height1 = 488 + drm_format_plane_height(fb->height, 489 + fb->format->format, 1); 490 + DRM_DEBUG("plane 1 addr 0x%x stride %d height %d\n", 491 + priv->viu.vd1_addr1, 492 + priv->viu.vd1_stride1, 493 + priv->viu.vd1_height1); 494 + /* fallthrough */ 495 + case 1: 496 + gem = drm_fb_cma_get_gem_obj(fb, 0); 497 + priv->viu.vd1_addr0 = gem->paddr + fb->offsets[0]; 498 + priv->viu.vd1_stride0 = fb->pitches[0]; 499 + priv->viu.vd1_height0 = 500 + drm_format_plane_height(fb->height, 501 + fb->format->format, 0); 502 + DRM_DEBUG("plane 0 addr 0x%x stride %d height %d\n", 503 + priv->viu.vd1_addr0, 504 + priv->viu.vd1_stride0, 505 + priv->viu.vd1_height0); 506 + } 507 + 508 + priv->viu.vd1_enabled = true; 509 + 510 + spin_unlock_irqrestore(&priv->drm->event_lock, flags); 511 + 512 + DRM_DEBUG_DRIVER("\n"); 513 + } 514 + 515 + static void meson_overlay_atomic_disable(struct drm_plane *plane, 516 + struct drm_plane_state *old_state) 517 + { 518 + struct meson_overlay *meson_overlay = to_meson_overlay(plane); 519 + struct meson_drm *priv = meson_overlay->priv; 520 + 521 + DRM_DEBUG_DRIVER("\n"); 522 + 523 + priv->viu.vd1_enabled = false; 524 + 525 + /* Disable VD1 */ 526 + writel_bits_relaxed(VPP_VD1_POSTBLEND | VPP_VD1_PREBLEND, 0, 527 + priv->io_base + _REG(VPP_MISC)); 528 + 529 + } 530 + 531 + static const struct drm_plane_helper_funcs meson_overlay_helper_funcs = { 532 + .atomic_check = meson_overlay_atomic_check, 533 + .atomic_disable = meson_overlay_atomic_disable, 534 + .atomic_update = meson_overlay_atomic_update, 535 + }; 536 + 537 + static const struct drm_plane_funcs meson_overlay_funcs = { 538 + .update_plane = drm_atomic_helper_update_plane, 539 + .disable_plane = drm_atomic_helper_disable_plane, 540 + .destroy = drm_plane_cleanup, 541 + .reset = drm_atomic_helper_plane_reset, 542 + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 543 + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 544 + }; 545 + 546 + static const uint32_t supported_drm_formats[] = { 547 + DRM_FORMAT_YUYV, 548 + DRM_FORMAT_NV12, 549 + DRM_FORMAT_NV21, 550 + DRM_FORMAT_YUV444, 551 + DRM_FORMAT_YUV422, 552 + DRM_FORMAT_YUV420, 553 + DRM_FORMAT_YUV411, 554 + DRM_FORMAT_YUV410, 555 + }; 556 + 557 + int meson_overlay_create(struct meson_drm *priv) 558 + { 559 + struct meson_overlay *meson_overlay; 560 + struct drm_plane *plane; 561 + 562 + DRM_DEBUG_DRIVER("\n"); 563 + 564 + meson_overlay = devm_kzalloc(priv->drm->dev, sizeof(*meson_overlay), 565 + GFP_KERNEL); 566 + if (!meson_overlay) 567 + return -ENOMEM; 568 + 569 + meson_overlay->priv = priv; 570 + plane = &meson_overlay->base; 571 + 572 + drm_universal_plane_init(priv->drm, plane, 0xFF, 573 + &meson_overlay_funcs, 574 + supported_drm_formats, 575 + ARRAY_SIZE(supported_drm_formats), 576 + NULL, 577 + DRM_PLANE_TYPE_OVERLAY, "meson_overlay_plane"); 578 + 579 + drm_plane_helper_add(plane, &meson_overlay_helper_funcs); 580 + 581 + priv->overlay_plane = plane; 582 + 583 + DRM_DEBUG_DRIVER("\n"); 584 + 585 + return 0; 586 + }
+14
drivers/gpu/drm/meson/meson_overlay.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + /* 3 + * Copyright (C) 2018 BayLibre, SAS 4 + * Author: Neil Armstrong <narmstrong@baylibre.com> 5 + */ 6 + 7 + #ifndef __MESON_OVERLAY_H 8 + #define __MESON_OVERLAY_H 9 + 10 + #include "meson_drv.h" 11 + 12 + int meson_overlay_create(struct meson_drm *priv); 13 + 14 + #endif /* __MESON_OVERLAY_H */
+163 -24
drivers/gpu/drm/meson/meson_plane.c
··· 24 24 #include <linux/kernel.h> 25 25 #include <linux/module.h> 26 26 #include <linux/mutex.h> 27 + #include <linux/bitfield.h> 27 28 #include <linux/platform_device.h> 28 29 #include <drm/drmP.h> 29 30 #include <drm/drm_atomic.h> ··· 40 39 #include "meson_canvas.h" 41 40 #include "meson_registers.h" 42 41 42 + /* OSD_SCI_WH_M1 */ 43 + #define SCI_WH_M1_W(w) FIELD_PREP(GENMASK(28, 16), w) 44 + #define SCI_WH_M1_H(h) FIELD_PREP(GENMASK(12, 0), h) 45 + 46 + /* OSD_SCO_H_START_END */ 47 + /* OSD_SCO_V_START_END */ 48 + #define SCO_HV_START(start) FIELD_PREP(GENMASK(27, 16), start) 49 + #define SCO_HV_END(end) FIELD_PREP(GENMASK(11, 0), end) 50 + 51 + /* OSD_SC_CTRL0 */ 52 + #define SC_CTRL0_PATH_EN BIT(3) 53 + #define SC_CTRL0_SEL_OSD1 BIT(2) 54 + 55 + /* OSD_VSC_CTRL0 */ 56 + #define VSC_BANK_LEN(value) FIELD_PREP(GENMASK(2, 0), value) 57 + #define VSC_TOP_INI_RCV_NUM(value) FIELD_PREP(GENMASK(6, 3), value) 58 + #define VSC_TOP_RPT_L0_NUM(value) FIELD_PREP(GENMASK(9, 8), value) 59 + #define VSC_BOT_INI_RCV_NUM(value) FIELD_PREP(GENMASK(14, 11), value) 60 + #define VSC_BOT_RPT_L0_NUM(value) FIELD_PREP(GENMASK(17, 16), value) 61 + #define VSC_PROG_INTERLACE BIT(23) 62 + #define VSC_VERTICAL_SCALER_EN BIT(24) 63 + 64 + /* OSD_VSC_INI_PHASE */ 65 + #define VSC_INI_PHASE_BOT(bottom) FIELD_PREP(GENMASK(31, 16), bottom) 66 + #define VSC_INI_PHASE_TOP(top) FIELD_PREP(GENMASK(15, 0), top) 67 + 68 + /* OSD_HSC_CTRL0 */ 69 + #define HSC_BANK_LENGTH(value) FIELD_PREP(GENMASK(2, 0), value) 70 + #define HSC_INI_RCV_NUM0(value) FIELD_PREP(GENMASK(6, 3), value) 71 + #define HSC_RPT_P0_NUM0(value) FIELD_PREP(GENMASK(9, 8), value) 72 + #define HSC_HORIZ_SCALER_EN BIT(22) 73 + 74 + /* VPP_OSD_VSC_PHASE_STEP */ 75 + /* VPP_OSD_HSC_PHASE_STEP */ 76 + #define SC_PHASE_STEP(value) FIELD_PREP(GENMASK(27, 0), value) 77 + 43 78 struct meson_plane { 44 79 struct drm_plane base; 45 80 struct meson_drm *priv; 46 81 }; 47 82 #define to_meson_plane(x) container_of(x, struct meson_plane, base) 83 + 84 + #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) 48 85 49 86 static int meson_plane_atomic_check(struct drm_plane *plane, 50 87 struct drm_plane_state *state) ··· 96 57 if (IS_ERR(crtc_state)) 97 58 return PTR_ERR(crtc_state); 98 59 60 + /* 61 + * Only allow : 62 + * - Upscaling up to 5x, vertical and horizontal 63 + * - Final coordinates must match crtc size 64 + */ 99 65 return drm_atomic_helper_check_plane_state(state, crtc_state, 66 + FRAC_16_16(1, 5), 100 67 DRM_PLANE_HELPER_NO_SCALING, 101 - DRM_PLANE_HELPER_NO_SCALING, 102 - true, true); 68 + false, true); 103 69 } 104 70 105 71 /* Takes a fixed 16.16 number and converts it to integer. */ ··· 118 74 { 119 75 struct meson_plane *meson_plane = to_meson_plane(plane); 120 76 struct drm_plane_state *state = plane->state; 121 - struct drm_framebuffer *fb = state->fb; 77 + struct drm_rect dest = drm_plane_state_dest(state); 122 78 struct meson_drm *priv = meson_plane->priv; 79 + struct drm_framebuffer *fb = state->fb; 123 80 struct drm_gem_cma_object *gem; 124 - struct drm_rect src = { 125 - .x1 = (state->src_x), 126 - .y1 = (state->src_y), 127 - .x2 = (state->src_x + state->src_w), 128 - .y2 = (state->src_y + state->src_h), 129 - }; 130 - struct drm_rect dest = { 131 - .x1 = state->crtc_x, 132 - .y1 = state->crtc_y, 133 - .x2 = state->crtc_x + state->crtc_w, 134 - .y2 = state->crtc_y + state->crtc_h, 135 - }; 136 81 unsigned long flags; 82 + int vsc_ini_rcv_num, vsc_ini_rpt_p0_num; 83 + int vsc_bot_rcv_num, vsc_bot_rpt_p0_num; 84 + int hsc_ini_rcv_num, hsc_ini_rpt_p0_num; 85 + int hf_phase_step, vf_phase_step; 86 + int src_w, src_h, dst_w, dst_h; 87 + int bot_ini_phase; 88 + int hf_bank_len; 89 + int vf_bank_len; 90 + u8 canvas_id_osd1; 137 91 138 92 /* 139 93 * Update Coordinates ··· 146 104 (0xFF << OSD_GLOBAL_ALPHA_SHIFT) | 147 105 OSD_BLK0_ENABLE; 148 106 107 + if (priv->canvas) 108 + canvas_id_osd1 = priv->canvas_id_osd1; 109 + else 110 + canvas_id_osd1 = MESON_CANVAS_ID_OSD1; 111 + 149 112 /* Set up BLK0 to point to the right canvas */ 150 - priv->viu.osd1_blk0_cfg[0] = ((MESON_CANVAS_ID_OSD1 << OSD_CANVAS_SEL) | 113 + priv->viu.osd1_blk0_cfg[0] = ((canvas_id_osd1 << OSD_CANVAS_SEL) | 151 114 OSD_ENDIANNESS_LE); 152 115 153 116 /* On GXBB, Use the old non-HDR RGB2YUV converter */ ··· 184 137 break; 185 138 }; 186 139 187 - if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) { 188 - priv->viu.osd1_interlace = true; 140 + /* Default scaler parameters */ 141 + vsc_bot_rcv_num = 0; 142 + vsc_bot_rpt_p0_num = 0; 143 + hf_bank_len = 4; 144 + vf_bank_len = 4; 189 145 146 + if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) { 147 + vsc_bot_rcv_num = 6; 148 + vsc_bot_rpt_p0_num = 2; 149 + } 150 + 151 + hsc_ini_rcv_num = hf_bank_len; 152 + vsc_ini_rcv_num = vf_bank_len; 153 + hsc_ini_rpt_p0_num = (hf_bank_len / 2) - 1; 154 + vsc_ini_rpt_p0_num = (vf_bank_len / 2) - 1; 155 + 156 + src_w = fixed16_to_int(state->src_w); 157 + src_h = fixed16_to_int(state->src_h); 158 + dst_w = state->crtc_w; 159 + dst_h = state->crtc_h; 160 + 161 + /* 162 + * When the output is interlaced, the OSD must switch between 163 + * each field using the INTERLACE_SEL_ODD (0) of VIU_OSD1_BLK0_CFG_W0 164 + * at each vsync. 165 + * But the vertical scaler can provide such funtionnality if 166 + * is configured for 2:1 scaling with interlace options enabled. 167 + */ 168 + if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) { 190 169 dest.y1 /= 2; 191 170 dest.y2 /= 2; 192 - } else 193 - priv->viu.osd1_interlace = false; 171 + dst_h /= 2; 172 + } 173 + 174 + hf_phase_step = ((src_w << 18) / dst_w) << 6; 175 + vf_phase_step = (src_h << 20) / dst_h; 176 + 177 + if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) 178 + bot_ini_phase = ((vf_phase_step / 2) >> 4); 179 + else 180 + bot_ini_phase = 0; 181 + 182 + vf_phase_step = (vf_phase_step << 4); 183 + 184 + /* In interlaced mode, scaler is always active */ 185 + if (src_h != dst_h || src_w != dst_w) { 186 + priv->viu.osd_sc_i_wh_m1 = SCI_WH_M1_W(src_w - 1) | 187 + SCI_WH_M1_H(src_h - 1); 188 + priv->viu.osd_sc_o_h_start_end = SCO_HV_START(dest.x1) | 189 + SCO_HV_END(dest.x2 - 1); 190 + priv->viu.osd_sc_o_v_start_end = SCO_HV_START(dest.y1) | 191 + SCO_HV_END(dest.y2 - 1); 192 + /* Enable OSD Scaler */ 193 + priv->viu.osd_sc_ctrl0 = SC_CTRL0_PATH_EN | SC_CTRL0_SEL_OSD1; 194 + } else { 195 + priv->viu.osd_sc_i_wh_m1 = 0; 196 + priv->viu.osd_sc_o_h_start_end = 0; 197 + priv->viu.osd_sc_o_v_start_end = 0; 198 + priv->viu.osd_sc_ctrl0 = 0; 199 + } 200 + 201 + /* In interlaced mode, vertical scaler is always active */ 202 + if (src_h != dst_h) { 203 + priv->viu.osd_sc_v_ctrl0 = 204 + VSC_BANK_LEN(vf_bank_len) | 205 + VSC_TOP_INI_RCV_NUM(vsc_ini_rcv_num) | 206 + VSC_TOP_RPT_L0_NUM(vsc_ini_rpt_p0_num) | 207 + VSC_VERTICAL_SCALER_EN; 208 + 209 + if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) 210 + priv->viu.osd_sc_v_ctrl0 |= 211 + VSC_BOT_INI_RCV_NUM(vsc_bot_rcv_num) | 212 + VSC_BOT_RPT_L0_NUM(vsc_bot_rpt_p0_num) | 213 + VSC_PROG_INTERLACE; 214 + 215 + priv->viu.osd_sc_v_phase_step = SC_PHASE_STEP(vf_phase_step); 216 + priv->viu.osd_sc_v_ini_phase = VSC_INI_PHASE_BOT(bot_ini_phase); 217 + } else { 218 + priv->viu.osd_sc_v_ctrl0 = 0; 219 + priv->viu.osd_sc_v_phase_step = 0; 220 + priv->viu.osd_sc_v_ini_phase = 0; 221 + } 222 + 223 + /* Horizontal scaler is only used if width does not match */ 224 + if (src_w != dst_w) { 225 + priv->viu.osd_sc_h_ctrl0 = 226 + HSC_BANK_LENGTH(hf_bank_len) | 227 + HSC_INI_RCV_NUM0(hsc_ini_rcv_num) | 228 + HSC_RPT_P0_NUM0(hsc_ini_rpt_p0_num) | 229 + HSC_HORIZ_SCALER_EN; 230 + priv->viu.osd_sc_h_phase_step = SC_PHASE_STEP(hf_phase_step); 231 + priv->viu.osd_sc_h_ini_phase = 0; 232 + } else { 233 + priv->viu.osd_sc_h_ctrl0 = 0; 234 + priv->viu.osd_sc_h_phase_step = 0; 235 + priv->viu.osd_sc_h_ini_phase = 0; 236 + } 194 237 195 238 /* 196 239 * The format of these registers is (x2 << 16 | x1), 197 240 * where x2 is exclusive. 198 241 * e.g. +30x1920 would be (1919 << 16) | 30 199 242 */ 200 - priv->viu.osd1_blk0_cfg[1] = ((fixed16_to_int(src.x2) - 1) << 16) | 201 - fixed16_to_int(src.x1); 202 - priv->viu.osd1_blk0_cfg[2] = ((fixed16_to_int(src.y2) - 1) << 16) | 203 - fixed16_to_int(src.y1); 243 + priv->viu.osd1_blk0_cfg[1] = 244 + ((fixed16_to_int(state->src.x2) - 1) << 16) | 245 + fixed16_to_int(state->src.x1); 246 + priv->viu.osd1_blk0_cfg[2] = 247 + ((fixed16_to_int(state->src.y2) - 1) << 16) | 248 + fixed16_to_int(state->src.y1); 204 249 priv->viu.osd1_blk0_cfg[3] = ((dest.x2 - 1) << 16) | dest.x1; 205 250 priv->viu.osd1_blk0_cfg[4] = ((dest.y2 - 1) << 16) | dest.y1; 206 251
+3
drivers/gpu/drm/meson/meson_registers.h
··· 286 286 #define VIU_OSD1_MATRIX_COEF22_30 0x1a9d 287 287 #define VIU_OSD1_MATRIX_COEF31_32 0x1a9e 288 288 #define VIU_OSD1_MATRIX_COEF40_41 0x1a9f 289 + #define VD1_IF0_GEN_REG3 0x1aa7 289 290 #define VIU_OSD1_EOTF_CTL 0x1ad4 290 291 #define VIU_OSD1_EOTF_COEF00_01 0x1ad5 291 292 #define VIU_OSD1_EOTF_COEF02_10 0x1ad6 ··· 298 297 #define VIU_OSD1_OETF_CTL 0x1adc 299 298 #define VIU_OSD1_OETF_LUT_ADDR_PORT 0x1add 300 299 #define VIU_OSD1_OETF_LUT_DATA_PORT 0x1ade 300 + #define AFBC_ENABLE 0x1ae0 301 301 302 302 /* vpp */ 303 303 #define VPP_DUMMY_DATA 0x1d00 ··· 351 349 #define VPP_VD2_PREBLEND BIT(15) 352 350 #define VPP_OSD1_PREBLEND BIT(16) 353 351 #define VPP_OSD2_PREBLEND BIT(17) 352 + #define VPP_COLOR_MNG_ENABLE BIT(28) 354 353 #define VPP_OFIFO_SIZE 0x1d27 355 354 #define VPP_FIFO_STATUS 0x1d28 356 355 #define VPP_SMOKE_CTRL 0x1d29
+15
drivers/gpu/drm/meson/meson_viu.c
··· 329 329 0xff << OSD_REPLACE_SHIFT, 330 330 priv->io_base + _REG(VIU_OSD2_CTRL_STAT2)); 331 331 332 + /* Disable VD1 AFBC */ 333 + /* di_mif0_en=0 mif0_to_vpp_en=0 di_mad_en=0 */ 334 + writel_bits_relaxed(0x7 << 16, 0, 335 + priv->io_base + _REG(VIU_MISC_CTRL0)); 336 + /* afbc vd1 set=0 */ 337 + writel_bits_relaxed(BIT(20), 0, 338 + priv->io_base + _REG(VIU_MISC_CTRL0)); 339 + writel_relaxed(0, priv->io_base + _REG(AFBC_ENABLE)); 340 + 341 + writel_relaxed(0x00FF00C0, 342 + priv->io_base + _REG(VD1_IF0_LUMA_FIFO_SIZE)); 343 + writel_relaxed(0x00FF00C0, 344 + priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE)); 345 + 346 + 332 347 priv->viu.osd1_enabled = false; 333 348 priv->viu.osd1_commit = false; 334 349 priv->viu.osd1_interlace = false;
+43 -47
drivers/gpu/drm/meson/meson_vpp.c
··· 51 51 writel(mux, priv->io_base + _REG(VPU_VIU_VENC_MUX_CTRL)); 52 52 } 53 53 54 - /* 55 - * When the output is interlaced, the OSD must switch between 56 - * each field using the INTERLACE_SEL_ODD (0) of VIU_OSD1_BLK0_CFG_W0 57 - * at each vsync. 58 - * But the vertical scaler can provide such funtionnality if 59 - * is configured for 2:1 scaling with interlace options enabled. 60 - */ 61 - void meson_vpp_setup_interlace_vscaler_osd1(struct meson_drm *priv, 62 - struct drm_rect *input) 63 - { 64 - writel_relaxed(BIT(3) /* Enable scaler */ | 65 - BIT(2), /* Select OSD1 */ 66 - priv->io_base + _REG(VPP_OSD_SC_CTRL0)); 67 - 68 - writel_relaxed(((drm_rect_width(input) - 1) << 16) | 69 - (drm_rect_height(input) - 1), 70 - priv->io_base + _REG(VPP_OSD_SCI_WH_M1)); 71 - /* 2:1 scaling */ 72 - writel_relaxed(((input->x1) << 16) | (input->x2), 73 - priv->io_base + _REG(VPP_OSD_SCO_H_START_END)); 74 - writel_relaxed(((input->y1 >> 1) << 16) | (input->y2 >> 1), 75 - priv->io_base + _REG(VPP_OSD_SCO_V_START_END)); 76 - 77 - /* 2:1 scaling values */ 78 - writel_relaxed(BIT(16), priv->io_base + _REG(VPP_OSD_VSC_INI_PHASE)); 79 - writel_relaxed(BIT(25), priv->io_base + _REG(VPP_OSD_VSC_PHASE_STEP)); 80 - 81 - writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0)); 82 - 83 - writel_relaxed((4 << 0) /* osd_vsc_bank_length */ | 84 - (4 << 3) /* osd_vsc_top_ini_rcv_num0 */ | 85 - (1 << 8) /* osd_vsc_top_rpt_p0_num0 */ | 86 - (6 << 11) /* osd_vsc_bot_ini_rcv_num0 */ | 87 - (2 << 16) /* osd_vsc_bot_rpt_p0_num0 */ | 88 - BIT(23) /* osd_prog_interlace */ | 89 - BIT(24), /* Enable vertical scaler */ 90 - priv->io_base + _REG(VPP_OSD_VSC_CTRL0)); 91 - } 92 - 93 - void meson_vpp_disable_interlace_vscaler_osd1(struct meson_drm *priv) 94 - { 95 - writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0)); 96 - writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0)); 97 - writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0)); 98 - } 99 - 100 54 static unsigned int vpp_filter_coefs_4point_bspline[] = { 101 55 0x15561500, 0x14561600, 0x13561700, 0x12561800, 102 56 0x11551a00, 0x11541b00, 0x10541c00, 0x0f541d00, ··· 74 120 for (i = 0; i < 33; i++) 75 121 writel_relaxed(coefs[i], 76 122 priv->io_base + _REG(VPP_OSD_SCALE_COEF)); 123 + } 124 + 125 + static const uint32_t vpp_filter_coefs_bicubic[] = { 126 + 0x00800000, 0x007f0100, 0xff7f0200, 0xfe7f0300, 127 + 0xfd7e0500, 0xfc7e0600, 0xfb7d0800, 0xfb7c0900, 128 + 0xfa7b0b00, 0xfa7a0dff, 0xf9790fff, 0xf97711ff, 129 + 0xf87613ff, 0xf87416fe, 0xf87218fe, 0xf8701afe, 130 + 0xf76f1dfd, 0xf76d1ffd, 0xf76b21fd, 0xf76824fd, 131 + 0xf76627fc, 0xf76429fc, 0xf7612cfc, 0xf75f2ffb, 132 + 0xf75d31fb, 0xf75a34fb, 0xf75837fa, 0xf7553afa, 133 + 0xf8523cfa, 0xf8503ff9, 0xf84d42f9, 0xf84a45f9, 134 + 0xf84848f8 135 + }; 136 + 137 + static void meson_vpp_write_vd_scaling_filter_coefs(struct meson_drm *priv, 138 + const unsigned int *coefs, 139 + bool is_horizontal) 140 + { 141 + int i; 142 + 143 + writel_relaxed(is_horizontal ? BIT(8) : 0, 144 + priv->io_base + _REG(VPP_SCALE_COEF_IDX)); 145 + for (i = 0; i < 33; i++) 146 + writel_relaxed(coefs[i], 147 + priv->io_base + _REG(VPP_SCALE_COEF)); 77 148 } 78 149 79 150 void meson_vpp_init(struct meson_drm *priv) ··· 129 150 130 151 /* Force all planes off */ 131 152 writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_OSD2_POSTBLEND | 132 - VPP_VD1_POSTBLEND | VPP_VD2_POSTBLEND, 0, 153 + VPP_VD1_POSTBLEND | VPP_VD2_POSTBLEND | 154 + VPP_VD1_PREBLEND | VPP_VD2_PREBLEND, 0, 133 155 priv->io_base + _REG(VPP_MISC)); 156 + 157 + /* Setup default VD settings */ 158 + writel_relaxed(4096, 159 + priv->io_base + _REG(VPP_PREBLEND_VD1_H_START_END)); 160 + writel_relaxed(4096, 161 + priv->io_base + _REG(VPP_BLEND_VD2_H_START_END)); 134 162 135 163 /* Disable Scalers */ 136 164 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0)); 137 165 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0)); 138 166 writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0)); 167 + writel_relaxed(4 | (4 << 8) | BIT(15), 168 + priv->io_base + _REG(VPP_SC_MISC)); 169 + 170 + writel_relaxed(1, priv->io_base + _REG(VPP_VADJ_CTRL)); 139 171 140 172 /* Write in the proper filter coefficients. */ 141 173 meson_vpp_write_scaling_filter_coefs(priv, 142 174 vpp_filter_coefs_4point_bspline, false); 143 175 meson_vpp_write_scaling_filter_coefs(priv, 144 176 vpp_filter_coefs_4point_bspline, true); 177 + 178 + /* Write the VD proper filter coefficients. */ 179 + meson_vpp_write_vd_scaling_filter_coefs(priv, vpp_filter_coefs_bicubic, 180 + false); 181 + meson_vpp_write_vd_scaling_filter_coefs(priv, vpp_filter_coefs_bicubic, 182 + true); 145 183 }
+1 -1
drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
··· 96 96 ret = mipi_dsi_dcs_set_tear_on(dsi, 97 97 MIPI_DSI_DCS_TEAR_MODE_VBLANK); 98 98 if (ret) { 99 - DRM_DEV_ERROR(s6->dev, "failed to enble vblank TE (%d)\n", 99 + DRM_DEV_ERROR(s6->dev, "failed to enable vblank TE (%d)\n", 100 100 ret); 101 101 return ret; 102 102 }
+12 -6
drivers/gpu/drm/qxl/qxl_display.c
··· 622 622 if (ret) 623 623 goto out_kunmap; 624 624 625 - ret = qxl_release_reserve_list(release, true); 625 + ret = qxl_bo_pin(cursor_bo); 626 626 if (ret) 627 627 goto out_free_bo; 628 + 629 + ret = qxl_release_reserve_list(release, true); 630 + if (ret) 631 + goto out_unpin; 628 632 629 633 ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); 630 634 if (ret) ··· 674 670 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 675 671 qxl_release_fence_buffer_objects(release); 676 672 677 - if (old_cursor_bo) 678 - qxl_bo_unref(&old_cursor_bo); 679 - 673 + if (old_cursor_bo != NULL) 674 + qxl_bo_unpin(old_cursor_bo); 675 + qxl_bo_unref(&old_cursor_bo); 680 676 qxl_bo_unref(&cursor_bo); 681 677 682 678 return; 683 679 684 680 out_backoff: 685 681 qxl_release_backoff_reserve_list(release); 682 + out_unpin: 683 + qxl_bo_unpin(cursor_bo); 686 684 out_free_bo: 687 685 qxl_bo_unref(&cursor_bo); 688 686 out_kunmap: ··· 763 757 } 764 758 } 765 759 766 - ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); 760 + ret = qxl_bo_pin(user_bo); 767 761 if (ret) 768 762 return ret; 769 763 ··· 1110 1104 } 1111 1105 qdev->monitors_config_bo = gem_to_qxl_bo(gobj); 1112 1106 1113 - ret = qxl_bo_pin(qdev->monitors_config_bo, QXL_GEM_DOMAIN_VRAM, NULL); 1107 + ret = qxl_bo_pin(qdev->monitors_config_bo); 1114 1108 if (ret) 1115 1109 return ret; 1116 1110
+1 -2
drivers/gpu/drm/qxl/qxl_draw.c
··· 247 247 qxl_release_fence_buffer_objects(release); 248 248 249 249 out_free_palette: 250 - if (palette_bo) 251 - qxl_bo_unref(&palette_bo); 250 + qxl_bo_unref(&palette_bo); 252 251 out_free_image: 253 252 qxl_image_free_objects(qdev, dimage); 254 253 out_free_drawable:
+1 -1
drivers/gpu/drm/qxl/qxl_fb.c
··· 111 111 qbo->surf.stride = mode_cmd->pitches[0]; 112 112 qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB; 113 113 114 - ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL); 114 + ret = qxl_bo_pin(qbo); 115 115 if (ret) { 116 116 goto out_unref; 117 117 }
+2 -4
drivers/gpu/drm/qxl/qxl_kms.c
··· 313 313 314 314 void qxl_device_fini(struct qxl_device *qdev) 315 315 { 316 - if (qdev->current_release_bo[0]) 317 - qxl_bo_unref(&qdev->current_release_bo[0]); 318 - if (qdev->current_release_bo[1]) 319 - qxl_bo_unref(&qdev->current_release_bo[1]); 316 + qxl_bo_unref(&qdev->current_release_bo[0]); 317 + qxl_bo_unref(&qdev->current_release_bo[1]); 320 318 flush_work(&qdev->gc_work); 321 319 qxl_ring_free(qdev->command_ring); 322 320 qxl_ring_free(qdev->cursor_ring);
+7 -15
drivers/gpu/drm/qxl/qxl_object.c
··· 186 186 struct qxl_bo *bo, void *pmap) 187 187 { 188 188 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; 189 - struct io_mapping *map; 190 189 191 - if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 192 - map = qdev->vram_mapping; 193 - else if (bo->tbo.mem.mem_type == TTM_PL_PRIV) 194 - map = qdev->surface_mapping; 195 - else 190 + if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) && 191 + (bo->tbo.mem.mem_type != TTM_PL_PRIV)) 196 192 goto fallback; 197 193 198 194 io_mapping_unmap_atomic(pmap); ··· 196 200 (void) ttm_mem_io_lock(man, false); 197 201 ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem); 198 202 ttm_mem_io_unlock(man); 199 - return ; 203 + return; 200 204 fallback: 201 205 qxl_bo_kunmap(bo); 202 206 } ··· 216 220 return bo; 217 221 } 218 222 219 - static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) 223 + static int __qxl_bo_pin(struct qxl_bo *bo) 220 224 { 221 225 struct ttm_operation_ctx ctx = { false, false }; 222 226 struct drm_device *ddev = bo->gem_base.dev; ··· 224 228 225 229 if (bo->pin_count) { 226 230 bo->pin_count++; 227 - if (gpu_addr) 228 - *gpu_addr = qxl_bo_gpu_offset(bo); 229 231 return 0; 230 232 } 231 - qxl_ttm_placement_from_domain(bo, domain, true); 233 + qxl_ttm_placement_from_domain(bo, bo->type, true); 232 234 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 233 235 if (likely(r == 0)) { 234 236 bo->pin_count = 1; 235 - if (gpu_addr != NULL) 236 - *gpu_addr = qxl_bo_gpu_offset(bo); 237 237 } 238 238 if (unlikely(r != 0)) 239 239 dev_err(ddev->dev, "%p pin failed\n", bo); ··· 262 270 * beforehand, use the internal version directly __qxl_bo_pin. 263 271 * 264 272 */ 265 - int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) 273 + int qxl_bo_pin(struct qxl_bo *bo) 266 274 { 267 275 int r; 268 276 ··· 270 278 if (r) 271 279 return r; 272 280 273 - r = __qxl_bo_pin(bo, bo->type, NULL); 281 + r = __qxl_bo_pin(bo); 274 282 qxl_bo_unreserve(bo); 275 283 return r; 276 284 }
+1 -1
drivers/gpu/drm/qxl/qxl_object.h
··· 97 97 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map); 98 98 extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo); 99 99 extern void qxl_bo_unref(struct qxl_bo **bo); 100 - extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr); 100 + extern int qxl_bo_pin(struct qxl_bo *bo); 101 101 extern int qxl_bo_unpin(struct qxl_bo *bo); 102 102 extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned); 103 103 extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
-4
drivers/gpu/drm/qxl/qxl_release.c
··· 427 427 struct ttm_buffer_object *bo; 428 428 struct ttm_bo_global *glob; 429 429 struct ttm_bo_device *bdev; 430 - struct ttm_bo_driver *driver; 431 - struct qxl_bo *qbo; 432 430 struct ttm_validate_buffer *entry; 433 431 struct qxl_device *qdev; 434 432 ··· 447 449 release->id | 0xf0000000, release->base.seqno); 448 450 trace_dma_fence_emit(&release->base); 449 451 450 - driver = bdev->driver; 451 452 glob = bdev->glob; 452 453 453 454 spin_lock(&glob->lru_lock); 454 455 455 456 list_for_each_entry(entry, &release->bos, head) { 456 457 bo = entry->bo; 457 - qbo = to_qxl_bo(bo); 458 458 459 459 reservation_object_add_shared_fence(bo->resv, &release->base); 460 460 ttm_bo_add_to_lru(bo);
+1 -1
drivers/gpu/drm/rockchip/cdn-dp-reg.c
··· 147 147 } 148 148 149 149 static int cdn_dp_mailbox_read_receive(struct cdn_dp_device *dp, 150 - u8 *buff, u8 buff_size) 150 + u8 *buff, u16 buff_size) 151 151 { 152 152 u32 i; 153 153 int ret;
-2
drivers/gpu/drm/sti/sti_crtc.c
··· 252 252 struct sti_compositor *compo; 253 253 struct drm_crtc *crtc = data; 254 254 struct sti_mixer *mixer; 255 - struct sti_private *priv; 256 255 unsigned int pipe; 257 256 258 - priv = crtc->dev->dev_private; 259 257 pipe = drm_crtc_index(crtc); 260 258 compo = container_of(nb, struct sti_compositor, vtg_vblank_nb[pipe]); 261 259 mixer = compo->mixer[pipe];
+15 -14
drivers/gpu/drm/sun4i/sun4i_tcon.c
··· 478 478 } 479 479 480 480 static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, 481 + const struct drm_encoder *encoder, 481 482 const struct drm_display_mode *mode) 482 483 { 484 + struct drm_connector *connector = sun4i_tcon_get_connector(encoder); 485 + struct drm_display_info display_info = connector->display_info; 483 486 unsigned int bp, hsync, vsync; 484 487 u8 clk_delay; 485 488 u32 val = 0; ··· 494 491 sun4i_tcon0_mode_set_common(tcon, mode); 495 492 496 493 /* Set dithering if needed */ 497 - if (tcon->panel) 498 - sun4i_tcon0_mode_set_dithering(tcon, tcon->panel->connector); 494 + sun4i_tcon0_mode_set_dithering(tcon, connector); 499 495 500 496 /* Adjust clock delay */ 501 497 clk_delay = sun4i_tcon_get_clk_delay(mode, 0); ··· 543 541 if (mode->flags & DRM_MODE_FLAG_PVSYNC) 544 542 val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE; 545 543 544 + if (display_info.bus_flags & DRM_BUS_FLAG_DE_LOW) 545 + val |= SUN4I_TCON0_IO_POL_DE_NEGATIVE; 546 + 546 547 /* 547 548 * On A20 and similar SoCs, the only way to achieve Positive Edge 548 549 * (Rising Edge), is setting dclk clock phase to 2/3(240°). ··· 561 556 * Following code is a way to avoid quirks all around TCON 562 557 * and DOTCLOCK drivers. 563 558 */ 564 - if (tcon->panel) { 565 - struct drm_panel *panel = tcon->panel; 566 - struct drm_connector *connector = panel->connector; 567 - struct drm_display_info display_info = connector->display_info; 559 + if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE) 560 + clk_set_phase(tcon->dclk, 240); 568 561 569 - if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE) 570 - clk_set_phase(tcon->dclk, 240); 571 - 572 - if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) 573 - clk_set_phase(tcon->dclk, 0); 574 - } 562 + if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) 563 + clk_set_phase(tcon->dclk, 0); 575 564 576 565 regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG, 577 - SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE, 566 + SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | 567 + SUN4I_TCON0_IO_POL_VSYNC_POSITIVE | 568 + SUN4I_TCON0_IO_POL_DE_NEGATIVE, 578 569 val); 579 570 580 571 /* Map output pins to channel 0 */ ··· 685 684 sun4i_tcon0_mode_set_lvds(tcon, encoder, mode); 686 685 break; 687 686 case DRM_MODE_ENCODER_NONE: 688 - sun4i_tcon0_mode_set_rgb(tcon, mode); 687 + sun4i_tcon0_mode_set_rgb(tcon, encoder, mode); 689 688 sun4i_tcon_set_mux(tcon, 0, encoder); 690 689 break; 691 690 case DRM_MODE_ENCODER_TVDAC:
+1
drivers/gpu/drm/sun4i/sun4i_tcon.h
··· 116 116 117 117 #define SUN4I_TCON0_IO_POL_REG 0x88 118 118 #define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase) ((phase & 3) << 28) 119 + #define SUN4I_TCON0_IO_POL_DE_NEGATIVE BIT(27) 119 120 #define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE BIT(25) 120 121 #define SUN4I_TCON0_IO_POL_VSYNC_POSITIVE BIT(24) 121 122
-71
drivers/gpu/drm/tinydrm/core/tinydrm-core.c
··· 36 36 * and registers the DRM device using devm_tinydrm_register(). 37 37 */ 38 38 39 - /** 40 - * tinydrm_gem_cma_prime_import_sg_table - Produce a CMA GEM object from 41 - * another driver's scatter/gather table of pinned pages 42 - * @drm: DRM device to import into 43 - * @attach: DMA-BUF attachment 44 - * @sgt: Scatter/gather table of pinned pages 45 - * 46 - * This function imports a scatter/gather table exported via DMA-BUF by 47 - * another driver using drm_gem_cma_prime_import_sg_table(). It sets the 48 - * kernel virtual address on the CMA object. Drivers should use this as their 49 - * &drm_driver->gem_prime_import_sg_table callback if they need the virtual 50 - * address. tinydrm_gem_cma_free_object() should be used in combination with 51 - * this function. 52 - * 53 - * Returns: 54 - * A pointer to a newly created GEM object or an ERR_PTR-encoded negative 55 - * error code on failure. 56 - */ 57 - struct drm_gem_object * 58 - tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm, 59 - struct dma_buf_attachment *attach, 60 - struct sg_table *sgt) 61 - { 62 - struct drm_gem_cma_object *cma_obj; 63 - struct drm_gem_object *obj; 64 - void *vaddr; 65 - 66 - vaddr = dma_buf_vmap(attach->dmabuf); 67 - if (!vaddr) { 68 - DRM_ERROR("Failed to vmap PRIME buffer\n"); 69 - return ERR_PTR(-ENOMEM); 70 - } 71 - 72 - obj = drm_gem_cma_prime_import_sg_table(drm, attach, sgt); 73 - if (IS_ERR(obj)) { 74 - dma_buf_vunmap(attach->dmabuf, vaddr); 75 - return obj; 76 - } 77 - 78 - cma_obj = to_drm_gem_cma_obj(obj); 79 - cma_obj->vaddr = vaddr; 80 - 81 - return obj; 82 - } 83 - EXPORT_SYMBOL(tinydrm_gem_cma_prime_import_sg_table); 84 - 85 - /** 86 - * tinydrm_gem_cma_free_object - Free resources associated with a CMA GEM 87 - * object 88 - * @gem_obj: GEM object to free 89 - * 90 - * This function frees the backing memory of the CMA GEM object, cleans up the 91 - * GEM object state and frees the memory used to store the object itself using 92 - * drm_gem_cma_free_object(). It also handles PRIME buffers which has the kernel 93 - * virtual address set by tinydrm_gem_cma_prime_import_sg_table(). Drivers 94 - * can use this as their &drm_driver->gem_free_object_unlocked callback. 95 - */ 96 - void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj) 97 - { 98 - if (gem_obj->import_attach) { 99 - struct drm_gem_cma_object *cma_obj; 100 - 101 - cma_obj = to_drm_gem_cma_obj(gem_obj); 102 - dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr); 103 - cma_obj->vaddr = NULL; 104 - } 105 - 106 - drm_gem_cma_free_object(gem_obj); 107 - } 108 - EXPORT_SYMBOL_GPL(tinydrm_gem_cma_free_object); 109 - 110 39 static struct drm_framebuffer * 111 40 tinydrm_fb_create(struct drm_device *drm, struct drm_file *file_priv, 112 41 const struct drm_mode_fb_cmd2 *mode_cmd)
+6
drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
··· 9 9 10 10 #include <linux/backlight.h> 11 11 #include <linux/dma-buf.h> 12 + #include <linux/module.h> 12 13 #include <linux/pm.h> 13 14 #include <linux/spi/spi.h> 14 15 #include <linux/swab.h> 15 16 17 + #include <drm/drm_device.h> 18 + #include <drm/drm_drv.h> 19 + #include <drm/drm_fourcc.h> 20 + #include <drm/drm_print.h> 16 21 #include <drm/tinydrm/tinydrm.h> 17 22 #include <drm/tinydrm/tinydrm-helpers.h> 23 + #include <uapi/drm/drm.h> 18 24 19 25 static unsigned int spi_max; 20 26 module_param(spi_max, uint, 0400);
+2 -2
drivers/gpu/drm/tinydrm/hx8357d.c
··· 16 16 #include <linux/property.h> 17 17 #include <linux/spi/spi.h> 18 18 19 - #include <drm/drm_fb_helper.h> 19 + #include <drm/drm_gem_cma_helper.h> 20 20 #include <drm/drm_gem_framebuffer_helper.h> 21 21 #include <drm/drm_modeset_helper.h> 22 22 #include <drm/tinydrm/mipi-dbi.h> ··· 188 188 static struct drm_driver hx8357d_driver = { 189 189 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, 190 190 .fops = &hx8357d_fops, 191 - TINYDRM_GEM_DRIVER_OPS, 191 + DRM_GEM_CMA_VMAP_DRIVER_OPS, 192 192 .debugfs_init = mipi_dbi_debugfs_init, 193 193 .name = "hx8357d", 194 194 .desc = "HX8357D",
+3 -2
drivers/gpu/drm/tinydrm/ili9225.c
··· 20 20 #include <linux/spi/spi.h> 21 21 #include <video/mipi_display.h> 22 22 23 - #include <drm/drm_fb_helper.h> 23 + #include <drm/drm_fb_cma_helper.h> 24 + #include <drm/drm_gem_cma_helper.h> 24 25 #include <drm/drm_gem_framebuffer_helper.h> 25 26 #include <drm/tinydrm/mipi-dbi.h> 26 27 #include <drm/tinydrm/tinydrm-helpers.h> ··· 368 367 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | 369 368 DRIVER_ATOMIC, 370 369 .fops = &ili9225_fops, 371 - TINYDRM_GEM_DRIVER_OPS, 370 + DRM_GEM_CMA_VMAP_DRIVER_OPS, 372 371 .name = "ili9225", 373 372 .desc = "Ilitek ILI9225", 374 373 .date = "20171106",
+2 -2
drivers/gpu/drm/tinydrm/ili9341.c
··· 15 15 #include <linux/property.h> 16 16 #include <linux/spi/spi.h> 17 17 18 - #include <drm/drm_fb_helper.h> 18 + #include <drm/drm_gem_cma_helper.h> 19 19 #include <drm/drm_gem_framebuffer_helper.h> 20 20 #include <drm/drm_modeset_helper.h> 21 21 #include <drm/tinydrm/mipi-dbi.h> ··· 144 144 static struct drm_driver ili9341_driver = { 145 145 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, 146 146 .fops = &ili9341_fops, 147 - TINYDRM_GEM_DRIVER_OPS, 147 + DRM_GEM_CMA_VMAP_DRIVER_OPS, 148 148 .debugfs_init = mipi_dbi_debugfs_init, 149 149 .name = "ili9341", 150 150 .desc = "Ilitek ILI9341",
+3 -3
drivers/gpu/drm/tinydrm/mi0283qt.c
··· 17 17 #include <linux/regulator/consumer.h> 18 18 #include <linux/spi/spi.h> 19 19 20 - #include <drm/drm_fb_helper.h> 21 - #include <drm/drm_modeset_helper.h> 20 + #include <drm/drm_gem_cma_helper.h> 22 21 #include <drm/drm_gem_framebuffer_helper.h> 22 + #include <drm/drm_modeset_helper.h> 23 23 #include <drm/tinydrm/mipi-dbi.h> 24 24 #include <drm/tinydrm/tinydrm-helpers.h> 25 25 #include <video/mipi_display.h> ··· 153 153 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | 154 154 DRIVER_ATOMIC, 155 155 .fops = &mi0283qt_fops, 156 - TINYDRM_GEM_DRIVER_OPS, 156 + DRM_GEM_CMA_VMAP_DRIVER_OPS, 157 157 .debugfs_init = mipi_dbi_debugfs_init, 158 158 .name = "mi0283qt", 159 159 .desc = "Multi-Inno MI0283QT",
+7 -3
drivers/gpu/drm/tinydrm/mipi-dbi.c
··· 9 9 * (at your option) any later version. 10 10 */ 11 11 12 - #include <drm/drm_gem_framebuffer_helper.h> 13 - #include <drm/tinydrm/mipi-dbi.h> 14 - #include <drm/tinydrm/tinydrm-helpers.h> 15 12 #include <linux/debugfs.h> 16 13 #include <linux/dma-buf.h> 17 14 #include <linux/gpio/consumer.h> 18 15 #include <linux/module.h> 19 16 #include <linux/regulator/consumer.h> 20 17 #include <linux/spi/spi.h> 18 + 19 + #include <drm/drm_fb_cma_helper.h> 20 + #include <drm/drm_gem_cma_helper.h> 21 + #include <drm/drm_gem_framebuffer_helper.h> 22 + #include <drm/tinydrm/mipi-dbi.h> 23 + #include <drm/tinydrm/tinydrm-helpers.h> 24 + #include <uapi/drm/drm.h> 21 25 #include <video/mipi_display.h> 22 26 23 27 #define MIPI_DBI_MAX_SPI_READ_SPEED 2000000 /* 2MHz */
+3 -1
drivers/gpu/drm/tinydrm/repaper.c
··· 26 26 #include <linux/spi/spi.h> 27 27 #include <linux/thermal.h> 28 28 29 + #include <drm/drm_fb_cma_helper.h> 30 + #include <drm/drm_gem_cma_helper.h> 29 31 #include <drm/drm_gem_framebuffer_helper.h> 30 32 #include <drm/tinydrm/tinydrm.h> 31 33 #include <drm/tinydrm/tinydrm-helpers.h> ··· 884 882 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | 885 883 DRIVER_ATOMIC, 886 884 .fops = &repaper_fops, 887 - TINYDRM_GEM_DRIVER_OPS, 885 + DRM_GEM_CMA_VMAP_DRIVER_OPS, 888 886 .name = "repaper", 889 887 .desc = "Pervasive Displays RePaper e-ink panels", 890 888 .date = "20170405",
+3 -2
drivers/gpu/drm/tinydrm/st7586.c
··· 17 17 #include <linux/spi/spi.h> 18 18 #include <video/mipi_display.h> 19 19 20 - #include <drm/drm_fb_helper.h> 20 + #include <drm/drm_fb_cma_helper.h> 21 + #include <drm/drm_gem_cma_helper.h> 21 22 #include <drm/drm_gem_framebuffer_helper.h> 22 23 #include <drm/tinydrm/mipi-dbi.h> 23 24 #include <drm/tinydrm/tinydrm-helpers.h> ··· 304 303 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | 305 304 DRIVER_ATOMIC, 306 305 .fops = &st7586_fops, 307 - TINYDRM_GEM_DRIVER_OPS, 306 + DRM_GEM_CMA_VMAP_DRIVER_OPS, 308 307 .debugfs_init = mipi_dbi_debugfs_init, 309 308 .name = "st7586", 310 309 .desc = "Sitronix ST7586",
+2 -2
drivers/gpu/drm/tinydrm/st7735r.c
··· 14 14 #include <linux/spi/spi.h> 15 15 #include <video/mipi_display.h> 16 16 17 - #include <drm/drm_fb_helper.h> 17 + #include <drm/drm_gem_cma_helper.h> 18 18 #include <drm/drm_gem_framebuffer_helper.h> 19 19 #include <drm/tinydrm/mipi-dbi.h> 20 20 #include <drm/tinydrm/tinydrm-helpers.h> ··· 119 119 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | 120 120 DRIVER_ATOMIC, 121 121 .fops = &st7735r_fops, 122 - TINYDRM_GEM_DRIVER_OPS, 122 + DRM_GEM_CMA_VMAP_DRIVER_OPS, 123 123 .debugfs_init = mipi_dbi_debugfs_init, 124 124 .name = "st7735r", 125 125 .desc = "Sitronix ST7735R",
+12 -10
drivers/gpu/drm/vc4/vc4_plane.c
··· 129 129 130 130 static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst) 131 131 { 132 - if (dst > src) 133 - return VC4_SCALING_PPF; 134 - else if (dst < src) 135 - return VC4_SCALING_TPZ; 136 - else 132 + if (dst == src) 137 133 return VC4_SCALING_NONE; 134 + if (3 * dst >= 2 * src) 135 + return VC4_SCALING_PPF; 136 + else 137 + return VC4_SCALING_TPZ; 138 138 } 139 139 140 140 static bool plane_enabled(struct drm_plane_state *state) ··· 341 341 vc4_get_scaling_mode(vc4_state->src_h[1], 342 342 vc4_state->crtc_h); 343 343 344 - /* YUV conversion requires that horizontal scaling be enabled, 345 - * even on a plane that's otherwise 1:1. Looks like only PPF 346 - * works in that case, so let's pick that one. 344 + /* YUV conversion requires that horizontal scaling be enabled 345 + * on the UV plane even if vc4_get_scaling_mode() returned 346 + * VC4_SCALING_NONE (which can happen when the down-scaling 347 + * ratio is 0.5). Let's force it to VC4_SCALING_PPF in this 348 + * case. 347 349 */ 348 - if (vc4_state->is_unity) 349 - vc4_state->x_scaling[0] = VC4_SCALING_PPF; 350 + if (vc4_state->x_scaling[1] == VC4_SCALING_NONE) 351 + vc4_state->x_scaling[1] = VC4_SCALING_PPF; 350 352 } else { 351 353 vc4_state->is_yuv = false; 352 354 vc4_state->x_scaling[1] = VC4_SCALING_NONE;
+6 -2
drivers/gpu/drm/virtio/virtgpu_drv.h
··· 47 47 #define DRIVER_DATE "0" 48 48 49 49 #define DRIVER_MAJOR 0 50 - #define DRIVER_MINOR 0 51 - #define DRIVER_PATCHLEVEL 1 50 + #define DRIVER_MINOR 1 51 + #define DRIVER_PATCHLEVEL 0 52 52 53 53 /* virtgpu_drm_bus.c */ 54 54 int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev); ··· 131 131 int x1, y1, x2, y2; /* dirty rect */ 132 132 spinlock_t dirty_lock; 133 133 uint32_t hw_res_handle; 134 + struct virtio_gpu_fence *fence; 134 135 }; 135 136 #define to_virtio_gpu_framebuffer(x) \ 136 137 container_of(x, struct virtio_gpu_framebuffer, base) ··· 347 346 int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma); 348 347 349 348 /* virtio_gpu_fence.c */ 349 + struct virtio_gpu_fence *virtio_gpu_fence_alloc( 350 + struct virtio_gpu_device *vgdev); 351 + void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence); 350 352 int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, 351 353 struct virtio_gpu_ctrl_hdr *cmd_hdr, 352 354 struct virtio_gpu_fence **fence);
+22 -7
drivers/gpu/drm/virtio/virtgpu_fence.c
··· 67 67 .timeline_value_str = virtio_timeline_value_str, 68 68 }; 69 69 70 + struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev) 71 + { 72 + struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; 73 + struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence), 74 + GFP_ATOMIC); 75 + if (!fence) 76 + return fence; 77 + 78 + fence->drv = drv; 79 + dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0); 80 + 81 + return fence; 82 + } 83 + 84 + void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence) 85 + { 86 + if (!fence) 87 + return; 88 + 89 + dma_fence_put(&fence->f); 90 + } 91 + 70 92 int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, 71 93 struct virtio_gpu_ctrl_hdr *cmd_hdr, 72 94 struct virtio_gpu_fence **fence) ··· 96 74 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; 97 75 unsigned long irq_flags; 98 76 99 - *fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_ATOMIC); 100 - if ((*fence) == NULL) 101 - return -ENOMEM; 102 - 103 77 spin_lock_irqsave(&drv->lock, irq_flags); 104 - (*fence)->drv = drv; 105 78 (*fence)->seq = ++drv->sync_seq; 106 - dma_fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock, 107 - drv->context, (*fence)->seq); 108 79 dma_fence_get(&(*fence)->f); 109 80 list_add_tail(&(*fence)->node, &drv->fences); 110 81 spin_unlock_irqrestore(&drv->lock, irq_flags);
+94 -16
drivers/gpu/drm/virtio/virtgpu_ioctl.c
··· 28 28 #include <drm/drmP.h> 29 29 #include <drm/virtgpu_drm.h> 30 30 #include <drm/ttm/ttm_execbuf_util.h> 31 + #include <linux/sync_file.h> 31 32 32 33 #include "virtgpu_drv.h" 33 34 ··· 106 105 struct virtio_gpu_device *vgdev = dev->dev_private; 107 106 struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv; 108 107 struct drm_gem_object *gobj; 109 - struct virtio_gpu_fence *fence; 108 + struct virtio_gpu_fence *out_fence; 110 109 struct virtio_gpu_object *qobj; 111 110 int ret; 112 111 uint32_t *bo_handles = NULL; ··· 115 114 struct ttm_validate_buffer *buflist = NULL; 116 115 int i; 117 116 struct ww_acquire_ctx ticket; 117 + struct sync_file *sync_file; 118 + int in_fence_fd = exbuf->fence_fd; 119 + int out_fence_fd = -1; 118 120 void *buf; 119 121 120 122 if (vgdev->has_virgl_3d == false) 121 123 return -ENOSYS; 124 + 125 + if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS)) 126 + return -EINVAL; 127 + 128 + exbuf->fence_fd = -1; 129 + 130 + if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) { 131 + struct dma_fence *in_fence; 132 + 133 + in_fence = sync_file_get_fence(in_fence_fd); 134 + 135 + if (!in_fence) 136 + return -EINVAL; 137 + 138 + /* 139 + * Wait if the fence is from a foreign context, or if the fence 140 + * array contains any fence from a foreign context. 141 + */ 142 + ret = 0; 143 + if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context)) 144 + ret = dma_fence_wait(in_fence, true); 145 + 146 + dma_fence_put(in_fence); 147 + if (ret) 148 + return ret; 149 + } 150 + 151 + if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) { 152 + out_fence_fd = get_unused_fd_flags(O_CLOEXEC); 153 + if (out_fence_fd < 0) 154 + return out_fence_fd; 155 + } 122 156 123 157 INIT_LIST_HEAD(&validate_list); 124 158 if (exbuf->num_bo_handles) { ··· 164 128 sizeof(struct ttm_validate_buffer), 165 129 GFP_KERNEL | __GFP_ZERO); 166 130 if (!bo_handles || !buflist) { 167 - kvfree(bo_handles); 168 - kvfree(buflist); 169 - return -ENOMEM; 131 + ret = -ENOMEM; 132 + goto out_unused_fd; 170 133 } 171 134 172 135 user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles; 173 136 if (copy_from_user(bo_handles, user_bo_handles, 174 137 exbuf->num_bo_handles * sizeof(uint32_t))) { 175 138 ret = -EFAULT; 176 - kvfree(bo_handles); 177 - kvfree(buflist); 178 - return ret; 139 + goto out_unused_fd; 179 140 } 180 141 181 142 for (i = 0; i < exbuf->num_bo_handles; i++) { 182 143 gobj = drm_gem_object_lookup(drm_file, bo_handles[i]); 183 144 if (!gobj) { 184 - kvfree(bo_handles); 185 - kvfree(buflist); 186 - return -ENOENT; 145 + ret = -ENOENT; 146 + goto out_unused_fd; 187 147 } 188 148 189 149 qobj = gem_to_virtio_gpu_obj(gobj); ··· 188 156 list_add(&buflist[i].head, &validate_list); 189 157 } 190 158 kvfree(bo_handles); 159 + bo_handles = NULL; 191 160 } 192 161 193 162 ret = virtio_gpu_object_list_validate(&ticket, &validate_list); ··· 201 168 ret = PTR_ERR(buf); 202 169 goto out_unresv; 203 170 } 204 - virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, 205 - vfpriv->ctx_id, &fence); 206 171 207 - ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f); 172 + out_fence = virtio_gpu_fence_alloc(vgdev); 173 + if(!out_fence) { 174 + ret = -ENOMEM; 175 + goto out_memdup; 176 + } 177 + 178 + if (out_fence_fd >= 0) { 179 + sync_file = sync_file_create(&out_fence->f); 180 + if (!sync_file) { 181 + dma_fence_put(&out_fence->f); 182 + ret = -ENOMEM; 183 + goto out_memdup; 184 + } 185 + 186 + exbuf->fence_fd = out_fence_fd; 187 + fd_install(out_fence_fd, sync_file->file); 188 + } 189 + 190 + virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, 191 + vfpriv->ctx_id, &out_fence); 192 + 193 + ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f); 208 194 209 195 /* fence the command bo */ 210 196 virtio_gpu_unref_list(&validate_list); 211 197 kvfree(buflist); 212 - dma_fence_put(&fence->f); 213 198 return 0; 214 199 200 + out_memdup: 201 + kfree(buf); 215 202 out_unresv: 216 203 ttm_eu_backoff_reservation(&ticket, &validate_list); 217 204 out_free: 218 205 virtio_gpu_unref_list(&validate_list); 206 + out_unused_fd: 207 + kvfree(bo_handles); 219 208 kvfree(buflist); 209 + 210 + if (out_fence_fd >= 0) 211 + put_unused_fd(out_fence_fd); 212 + 220 213 return ret; 221 214 } 222 215 ··· 342 283 rc_3d.nr_samples = cpu_to_le32(rc->nr_samples); 343 284 rc_3d.flags = cpu_to_le32(rc->flags); 344 285 286 + fence = virtio_gpu_fence_alloc(vgdev); 287 + if (!fence) { 288 + ret = -ENOMEM; 289 + goto fail_backoff; 290 + } 291 + 345 292 virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d, NULL); 346 293 ret = virtio_gpu_object_attach(vgdev, qobj, &fence); 347 294 if (ret) { 348 - ttm_eu_backoff_reservation(&ticket, &validate_list); 349 - goto fail_unref; 295 + virtio_gpu_fence_cleanup(fence); 296 + goto fail_backoff; 350 297 } 351 298 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f); 352 299 } ··· 377 312 dma_fence_put(&fence->f); 378 313 } 379 314 return 0; 315 + fail_backoff: 316 + ttm_eu_backoff_reservation(&ticket, &validate_list); 380 317 fail_unref: 381 318 if (vgdev->has_virgl_3d) { 382 319 virtio_gpu_unref_list(&validate_list); ··· 441 374 goto out_unres; 442 375 443 376 convert_to_hw_box(&box, &args->box); 377 + 378 + fence = virtio_gpu_fence_alloc(vgdev); 379 + if (!fence) { 380 + ret = -ENOMEM; 381 + goto out_unres; 382 + } 444 383 virtio_gpu_cmd_transfer_from_host_3d 445 384 (vgdev, qobj->hw_res_handle, 446 385 vfpriv->ctx_id, offset, args->level, ··· 496 423 (vgdev, qobj, offset, 497 424 box.w, box.h, box.x, box.y, NULL); 498 425 } else { 426 + fence = virtio_gpu_fence_alloc(vgdev); 427 + if (!fence) { 428 + ret = -ENOMEM; 429 + goto out_unres; 430 + } 499 431 virtio_gpu_cmd_transfer_to_host_3d 500 432 (vgdev, qobj, 501 433 vfpriv ? vfpriv->ctx_id : 0, offset,
+6 -3
drivers/gpu/drm/virtio/virtgpu_kms.c
··· 55 55 static int virtio_gpu_context_create(struct virtio_gpu_device *vgdev, 56 56 uint32_t nlen, const char *name) 57 57 { 58 - int handle = ida_alloc_min(&vgdev->ctx_id_ida, 1, GFP_KERNEL); 58 + int handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL); 59 59 60 60 if (handle < 0) 61 61 return handle; 62 + handle += 1; 62 63 virtio_gpu_cmd_context_create(vgdev, handle, nlen, name); 63 64 return handle; 64 65 } ··· 68 67 uint32_t ctx_id) 69 68 { 70 69 virtio_gpu_cmd_context_destroy(vgdev, ctx_id); 71 - ida_free(&vgdev->ctx_id_ida, ctx_id); 70 + ida_free(&vgdev->ctx_id_ida, ctx_id - 1); 72 71 } 73 72 74 73 static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq, ··· 267 266 268 267 get_task_comm(dbgname, current); 269 268 id = virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname); 270 - if (id < 0) 269 + if (id < 0) { 270 + kfree(vfpriv); 271 271 return id; 272 + } 272 273 273 274 vfpriv->ctx_id = id; 274 275 file->driver_priv = vfpriv;
+14 -5
drivers/gpu/drm/virtio/virtgpu_object.c
··· 25 25 26 26 #include "virtgpu_drv.h" 27 27 28 - static void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, 28 + static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, 29 29 uint32_t *resid) 30 30 { 31 - int handle = ida_alloc_min(&vgdev->resource_ida, 1, GFP_KERNEL); 32 - *resid = handle; 31 + int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); 32 + 33 + if (handle < 0) 34 + return handle; 35 + 36 + *resid = handle + 1; 37 + return 0; 33 38 } 34 39 35 40 static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id) 36 41 { 37 - ida_free(&vgdev->resource_ida, id); 42 + ida_free(&vgdev->resource_ida, id - 1); 38 43 } 39 44 40 45 static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) ··· 99 94 bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL); 100 95 if (bo == NULL) 101 96 return -ENOMEM; 102 - virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle); 97 + ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle); 98 + if (ret < 0) { 99 + kfree(bo); 100 + return ret; 101 + } 103 102 size = roundup(size, PAGE_SIZE); 104 103 ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size); 105 104 if (ret != 0) {
+41 -5
drivers/gpu/drm/virtio/virtgpu_plane.c
··· 137 137 plane->state->src_h >> 16); 138 138 } 139 139 140 + static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane, 141 + struct drm_plane_state *new_state) 142 + { 143 + struct drm_device *dev = plane->dev; 144 + struct virtio_gpu_device *vgdev = dev->dev_private; 145 + struct virtio_gpu_framebuffer *vgfb; 146 + struct virtio_gpu_object *bo; 147 + 148 + if (!new_state->fb) 149 + return 0; 150 + 151 + vgfb = to_virtio_gpu_framebuffer(new_state->fb); 152 + bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 153 + if (bo && bo->dumb && (plane->state->fb != new_state->fb)) { 154 + vgfb->fence = virtio_gpu_fence_alloc(vgdev); 155 + if (!vgfb->fence) 156 + return -ENOMEM; 157 + } 158 + 159 + return 0; 160 + } 161 + 162 + static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane, 163 + struct drm_plane_state *old_state) 164 + { 165 + struct virtio_gpu_framebuffer *vgfb; 166 + 167 + if (!plane->state->fb) 168 + return; 169 + 170 + vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 171 + if (vgfb->fence) 172 + virtio_gpu_fence_cleanup(vgfb->fence); 173 + } 174 + 140 175 static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, 141 176 struct drm_plane_state *old_state) 142 177 { ··· 179 144 struct virtio_gpu_device *vgdev = dev->dev_private; 180 145 struct virtio_gpu_output *output = NULL; 181 146 struct virtio_gpu_framebuffer *vgfb; 182 - struct virtio_gpu_fence *fence = NULL; 183 147 struct virtio_gpu_object *bo = NULL; 184 148 uint32_t handle; 185 149 int ret = 0; ··· 204 170 (vgdev, bo, 0, 205 171 cpu_to_le32(plane->state->crtc_w), 206 172 cpu_to_le32(plane->state->crtc_h), 207 - 0, 0, &fence); 173 + 0, 0, &vgfb->fence); 208 174 ret = virtio_gpu_object_reserve(bo, false); 209 175 if (!ret) { 210 176 reservation_object_add_excl_fence(bo->tbo.resv, 211 - &fence->f); 212 - dma_fence_put(&fence->f); 213 - fence = NULL; 177 + &vgfb->fence->f); 178 + dma_fence_put(&vgfb->fence->f); 179 + vgfb->fence = NULL; 214 180 virtio_gpu_object_unreserve(bo); 215 181 virtio_gpu_object_wait(bo, false); 216 182 } ··· 252 218 }; 253 219 254 220 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { 221 + .prepare_fb = virtio_gpu_cursor_prepare_fb, 222 + .cleanup_fb = virtio_gpu_cursor_cleanup_fb, 255 223 .atomic_check = virtio_gpu_plane_atomic_check, 256 224 .atomic_update = virtio_gpu_cursor_plane_update, 257 225 };
+1 -1
drivers/gpu/drm/virtio/virtgpu_vq.c
··· 896 896 struct virtio_gpu_object *obj) 897 897 { 898 898 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); 899 - struct virtio_gpu_fence *fence; 900 899 901 900 if (use_dma_api && obj->mapped) { 901 + struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev); 902 902 /* detach backing and wait for the host process it ... */ 903 903 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence); 904 904 dma_fence_wait(&fence->f, true);
+4
include/drm/drm_drv.h
··· 471 471 * @gem_prime_export: 472 472 * 473 473 * export GEM -> dmabuf 474 + * 475 + * This defaults to drm_gem_prime_export() if not set. 474 476 */ 475 477 struct dma_buf * (*gem_prime_export)(struct drm_device *dev, 476 478 struct drm_gem_object *obj, int flags); ··· 480 478 * @gem_prime_import: 481 479 * 482 480 * import dmabuf -> GEM 481 + * 482 + * This defaults to drm_gem_prime_import() if not set. 483 483 */ 484 484 struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, 485 485 struct dma_buf *dma_buf);
+131
include/drm/drm_gem.h
··· 38 38 39 39 #include <drm/drm_vma_manager.h> 40 40 41 + struct drm_gem_object; 42 + 43 + /** 44 + * struct drm_gem_object_funcs - GEM object functions 45 + */ 46 + struct drm_gem_object_funcs { 47 + /** 48 + * @free: 49 + * 50 + * Deconstructor for drm_gem_objects. 51 + * 52 + * This callback is mandatory. 53 + */ 54 + void (*free)(struct drm_gem_object *obj); 55 + 56 + /** 57 + * @open: 58 + * 59 + * Called upon GEM handle creation. 60 + * 61 + * This callback is optional. 62 + */ 63 + int (*open)(struct drm_gem_object *obj, struct drm_file *file); 64 + 65 + /** 66 + * @close: 67 + * 68 + * Called upon GEM handle release. 69 + * 70 + * This callback is optional. 71 + */ 72 + void (*close)(struct drm_gem_object *obj, struct drm_file *file); 73 + 74 + /** 75 + * @print_info: 76 + * 77 + * If driver subclasses struct &drm_gem_object, it can implement this 78 + * optional hook for printing additional driver specific info. 79 + * 80 + * drm_printf_indent() should be used in the callback passing it the 81 + * indent argument. 82 + * 83 + * This callback is called from drm_gem_print_info(). 84 + * 85 + * This callback is optional. 86 + */ 87 + void (*print_info)(struct drm_printer *p, unsigned int indent, 88 + const struct drm_gem_object *obj); 89 + 90 + /** 91 + * @export: 92 + * 93 + * Export backing buffer as a &dma_buf. 94 + * If this is not set drm_gem_prime_export() is used. 95 + * 96 + * This callback is optional. 97 + */ 98 + struct dma_buf *(*export)(struct drm_gem_object *obj, int flags); 99 + 100 + /** 101 + * @pin: 102 + * 103 + * Pin backing buffer in memory. 104 + * 105 + * This callback is optional. 106 + */ 107 + int (*pin)(struct drm_gem_object *obj); 108 + 109 + /** 110 + * @unpin: 111 + * 112 + * Unpin backing buffer. 113 + * 114 + * This callback is optional. 115 + */ 116 + void (*unpin)(struct drm_gem_object *obj); 117 + 118 + /** 119 + * @get_sg_table: 120 + * 121 + * Returns a Scatter-Gather table representation of the buffer. 122 + * Used when exporting a buffer. 123 + * 124 + * This callback is mandatory if buffer export is supported. 125 + */ 126 + struct sg_table *(*get_sg_table)(struct drm_gem_object *obj); 127 + 128 + /** 129 + * @vmap: 130 + * 131 + * Returns a virtual address for the buffer. 132 + * 133 + * This callback is optional. 134 + */ 135 + void *(*vmap)(struct drm_gem_object *obj); 136 + 137 + /** 138 + * @vunmap: 139 + * 140 + * Releases the the address previously returned by @vmap. 141 + * 142 + * This callback is optional. 143 + */ 144 + void (*vunmap)(struct drm_gem_object *obj, void *vaddr); 145 + 146 + /** 147 + * @vm_ops: 148 + * 149 + * Virtual memory operations used with mmap. 150 + * 151 + * This is optional but necessary for mmap support. 152 + */ 153 + const struct vm_operations_struct *vm_ops; 154 + }; 155 + 41 156 /** 42 157 * struct drm_gem_object - GEM buffer object 43 158 * ··· 261 146 * simply leave it as NULL. 262 147 */ 263 148 struct dma_buf_attachment *import_attach; 149 + 150 + /** 151 + * @funcs: 152 + * 153 + * Optional GEM object functions. If this is set, it will be used instead of the 154 + * corresponding &drm_driver GEM callbacks. 155 + * 156 + * New drivers should use this. 157 + * 158 + */ 159 + const struct drm_gem_object_funcs *funcs; 264 160 }; 265 161 266 162 /** ··· 418 292 int drm_gem_dumb_destroy(struct drm_file *file, 419 293 struct drm_device *dev, 420 294 uint32_t handle); 295 + 296 + int drm_gem_pin(struct drm_gem_object *obj); 297 + void drm_gem_unpin(struct drm_gem_object *obj); 298 + void *drm_gem_vmap(struct drm_gem_object *obj); 299 + void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr); 421 300 422 301 #endif /* __DRM_GEM_H__ */
+24
include/drm/drm_gem_cma_helper.h
··· 103 103 void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj); 104 104 void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 105 105 106 + struct drm_gem_object * 107 + drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size); 108 + 109 + /** 110 + * DRM_GEM_CMA_VMAP_DRIVER_OPS - CMA GEM driver operations ensuring a virtual 111 + * address on the buffer 112 + * 113 + * This macro provides a shortcut for setting the default GEM operations in the 114 + * &drm_driver structure for drivers that need the virtual address also on 115 + * imported buffers. 116 + */ 117 + #define DRM_GEM_CMA_VMAP_DRIVER_OPS \ 118 + .gem_create_object = drm_cma_gem_create_object_default_funcs, \ 119 + .dumb_create = drm_gem_cma_dumb_create, \ 120 + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ 121 + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ 122 + .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table_vmap, \ 123 + .gem_prime_mmap = drm_gem_prime_mmap 124 + 125 + struct drm_gem_object * 126 + drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm, 127 + struct dma_buf_attachment *attach, 128 + struct sg_table *sgt); 129 + 106 130 #endif /* __DRM_GEM_CMA_HELPER_H__ */
+1
include/drm/drm_prime.h
··· 70 70 int drm_gem_prime_handle_to_fd(struct drm_device *dev, 71 71 struct drm_file *file_priv, uint32_t handle, uint32_t flags, 72 72 int *prime_fd); 73 + int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); 73 74 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, 74 75 struct dma_buf *dma_buf); 75 76
+34 -39
include/drm/drm_syncobj.h
··· 30 30 31 31 struct drm_syncobj_cb; 32 32 33 - enum drm_syncobj_type { 34 - DRM_SYNCOBJ_TYPE_BINARY, 35 - DRM_SYNCOBJ_TYPE_TIMELINE 36 - }; 37 - 38 33 /** 39 34 * struct drm_syncobj - sync object. 40 35 * 41 - * This structure defines a generic sync object which is timeline based. 36 + * This structure defines a generic sync object which wraps a &dma_fence. 42 37 */ 43 38 struct drm_syncobj { 44 39 /** ··· 41 46 */ 42 47 struct kref refcount; 43 48 /** 44 - * @type: indicate syncobj type 49 + * @fence: 50 + * NULL or a pointer to the fence bound to this object. 51 + * 52 + * This field should not be used directly. Use drm_syncobj_fence_get() 53 + * and drm_syncobj_replace_fence() instead. 45 54 */ 46 - enum drm_syncobj_type type; 55 + struct dma_fence __rcu *fence; 47 56 /** 48 - * @wq: wait signal operation work queue 57 + * @cb_list: List of callbacks to call when the &fence gets replaced. 49 58 */ 50 - wait_queue_head_t wq; 51 - /** 52 - * @timeline_context: fence context used by timeline 53 - */ 54 - u64 timeline_context; 55 - /** 56 - * @timeline: syncobj timeline value, which indicates point is signaled. 57 - */ 58 - u64 timeline; 59 - /** 60 - * @signal_point: which indicates the latest signaler point. 61 - */ 62 - u64 signal_point; 63 - /** 64 - * @signal_pt_list: signaler point list. 65 - */ 66 - struct list_head signal_pt_list; 67 - 68 - /** 69 - * @cb_list: List of callbacks to call when the &fence gets replaced. 70 - */ 71 59 struct list_head cb_list; 72 60 /** 73 - * @pt_lock: Protects pt list. 61 + * @lock: Protects &cb_list and write-locks &fence. 74 62 */ 75 - spinlock_t pt_lock; 76 - /** 77 - * @cb_mutex: Protects syncobj cb list. 78 - */ 79 - struct mutex cb_mutex; 63 + spinlock_t lock; 80 64 /** 81 65 * @file: A file backing for this syncobj. 82 66 */ ··· 68 94 /** 69 95 * struct drm_syncobj_cb - callback for drm_syncobj_add_callback 70 96 * @node: used by drm_syncob_add_callback to append this struct to 71 - * &drm_syncobj.cb_list 97 + * &drm_syncobj.cb_list 72 98 * @func: drm_syncobj_func_t to call 73 99 * 74 100 * This struct will be initialized by drm_syncobj_add_callback, additional ··· 106 132 kref_put(&obj->refcount, drm_syncobj_free); 107 133 } 108 134 135 + /** 136 + * drm_syncobj_fence_get - get a reference to a fence in a sync object 137 + * @syncobj: sync object. 138 + * 139 + * This acquires additional reference to &drm_syncobj.fence contained in @obj, 140 + * if not NULL. It is illegal to call this without already holding a reference. 141 + * No locks required. 142 + * 143 + * Returns: 144 + * Either the fence of @obj or NULL if there's none. 145 + */ 146 + static inline struct dma_fence * 147 + drm_syncobj_fence_get(struct drm_syncobj *syncobj) 148 + { 149 + struct dma_fence *fence; 150 + 151 + rcu_read_lock(); 152 + fence = dma_fence_get_rcu_safe(&syncobj->fence); 153 + rcu_read_unlock(); 154 + 155 + return fence; 156 + } 157 + 109 158 struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, 110 159 u32 handle); 111 160 void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, u64 point, ··· 142 145 int drm_syncobj_get_handle(struct drm_file *file_private, 143 146 struct drm_syncobj *syncobj, u32 *handle); 144 147 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd); 145 - int drm_syncobj_search_fence(struct drm_syncobj *syncobj, u64 point, u64 flags, 146 - struct dma_fence **fence); 147 148 148 149 #endif
+7 -28
include/drm/tinydrm/tinydrm.h
··· 10 10 #ifndef __LINUX_TINYDRM_H 11 11 #define __LINUX_TINYDRM_H 12 12 13 - #include <drm/drm_gem_cma_helper.h> 14 - #include <drm/drm_fb_cma_helper.h> 13 + #include <linux/mutex.h> 15 14 #include <drm/drm_simple_kms_helper.h> 15 + 16 + struct drm_clip_rect; 17 + struct drm_driver; 18 + struct drm_file; 19 + struct drm_framebuffer; 20 + struct drm_framebuffer_funcs; 16 21 17 22 /** 18 23 * struct tinydrm_device - tinydrm device ··· 59 54 } 60 55 61 56 /** 62 - * TINYDRM_GEM_DRIVER_OPS - default tinydrm gem operations 63 - * 64 - * This macro provides a shortcut for setting the tinydrm GEM operations in 65 - * the &drm_driver structure. 66 - */ 67 - #define TINYDRM_GEM_DRIVER_OPS \ 68 - .gem_free_object_unlocked = tinydrm_gem_cma_free_object, \ 69 - .gem_print_info = drm_gem_cma_print_info, \ 70 - .gem_vm_ops = &drm_gem_cma_vm_ops, \ 71 - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ 72 - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ 73 - .gem_prime_import = drm_gem_prime_import, \ 74 - .gem_prime_export = drm_gem_prime_export, \ 75 - .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, \ 76 - .gem_prime_import_sg_table = tinydrm_gem_cma_prime_import_sg_table, \ 77 - .gem_prime_vmap = drm_gem_cma_prime_vmap, \ 78 - .gem_prime_vunmap = drm_gem_cma_prime_vunmap, \ 79 - .gem_prime_mmap = drm_gem_cma_prime_mmap, \ 80 - .dumb_create = drm_gem_cma_dumb_create 81 - 82 - /** 83 57 * TINYDRM_MODE - tinydrm display mode 84 58 * @hd: Horizontal resolution, width 85 59 * @vd: Vertical resolution, height ··· 81 97 .type = DRM_MODE_TYPE_DRIVER, \ 82 98 .clock = 1 /* pass validation */ 83 99 84 - void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj); 85 - struct drm_gem_object * 86 - tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm, 87 - struct dma_buf_attachment *attach, 88 - struct sg_table *sgt); 89 100 int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev, 90 101 const struct drm_framebuffer_funcs *fb_funcs, 91 102 struct drm_driver *driver);
-1
include/uapi/drm/drm.h
··· 717 717 struct drm_syncobj_create { 718 718 __u32 handle; 719 719 #define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0) 720 - #define DRM_SYNCOBJ_CREATE_TYPE_TIMELINE (1 << 1) 721 720 __u32 flags; 722 721 }; 723 722
+1
include/uapi/drm/drm_fourcc.h
··· 151 151 #define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */ 152 152 153 153 #define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */ 154 + #define DRM_FORMAT_XYUV8888 fourcc_code('X', 'Y', 'U', 'V') /* [31:0] X:Y:Cb:Cr 8:8:8:8 little endian */ 154 155 155 156 /* 156 157 * packed YCbCr420 2x2 tiled formats
+10 -3
include/uapi/drm/virtgpu_drm.h
··· 47 47 #define DRM_VIRTGPU_WAIT 0x08 48 48 #define DRM_VIRTGPU_GET_CAPS 0x09 49 49 50 + #define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01 51 + #define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02 52 + #define VIRTGPU_EXECBUF_FLAGS (\ 53 + VIRTGPU_EXECBUF_FENCE_FD_IN |\ 54 + VIRTGPU_EXECBUF_FENCE_FD_OUT |\ 55 + 0) 56 + 50 57 struct drm_virtgpu_map { 51 58 __u64 offset; /* use for mmap system call */ 52 59 __u32 handle; ··· 61 54 }; 62 55 63 56 struct drm_virtgpu_execbuffer { 64 - __u32 flags; /* for future use */ 57 + __u32 flags; 65 58 __u32 size; 66 59 __u64 command; /* void* */ 67 60 __u64 bo_handles; 68 61 __u32 num_bo_handles; 69 - __u32 pad; 62 + __s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */ 70 63 }; 71 64 72 65 #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ ··· 144 137 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map) 145 138 146 139 #define DRM_IOCTL_VIRTGPU_EXECBUFFER \ 147 - DRM_IOW(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\ 140 + DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\ 148 141 struct drm_virtgpu_execbuffer) 149 142 150 143 #define DRM_IOCTL_VIRTGPU_GETPARAM \