Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.15-rc1 837 lines 20 kB view raw
1/************************************************************************** 2 * 3 * Copyright © 2007 David Airlie 4 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 29#include <linux/pci.h> 30 31#include <drm/drm_fourcc.h> 32#include <drm/ttm/ttm_placement.h> 33 34#include "vmwgfx_drv.h" 35#include "vmwgfx_kms.h" 36 37#define VMW_DIRTY_DELAY (HZ / 30) 38 39struct vmw_fb_par { 40 struct vmw_private *vmw_priv; 41 42 void *vmalloc; 43 44 struct mutex bo_mutex; 45 struct vmw_buffer_object *vmw_bo; 46 unsigned bo_size; 47 struct drm_framebuffer *set_fb; 48 struct drm_display_mode *set_mode; 49 u32 fb_x; 50 u32 fb_y; 51 bool bo_iowrite; 52 53 u32 pseudo_palette[17]; 54 55 unsigned max_width; 56 unsigned max_height; 57 58 struct { 59 spinlock_t lock; 60 bool active; 61 unsigned x1; 62 unsigned y1; 63 unsigned x2; 64 unsigned y2; 65 } dirty; 66 67 struct drm_crtc *crtc; 68 struct drm_connector *con; 69 struct delayed_work local_work; 70}; 71 72static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green, 73 unsigned blue, unsigned transp, 74 struct fb_info *info) 75{ 76 struct vmw_fb_par *par = info->par; 77 u32 *pal = par->pseudo_palette; 78 79 if (regno > 15) { 80 DRM_ERROR("Bad regno %u.\n", regno); 81 return 1; 82 } 83 84 switch (par->set_fb->format->depth) { 85 case 24: 86 case 32: 87 pal[regno] = ((red & 0xff00) << 8) | 88 (green & 0xff00) | 89 ((blue & 0xff00) >> 8); 90 break; 91 default: 92 DRM_ERROR("Bad depth %u, bpp %u.\n", 93 par->set_fb->format->depth, 94 par->set_fb->format->cpp[0] * 8); 95 return 1; 96 } 97 98 return 0; 99} 100 101static int vmw_fb_check_var(struct fb_var_screeninfo *var, 102 struct fb_info *info) 103{ 104 int depth = var->bits_per_pixel; 105 struct vmw_fb_par *par = info->par; 106 struct vmw_private *vmw_priv = par->vmw_priv; 107 108 switch (var->bits_per_pixel) { 109 case 32: 110 depth = (var->transp.length > 0) ? 32 : 24; 111 break; 112 default: 113 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel); 114 return -EINVAL; 115 } 116 117 switch (depth) { 118 case 24: 119 var->red.offset = 16; 120 var->green.offset = 8; 121 var->blue.offset = 0; 122 var->red.length = 8; 123 var->green.length = 8; 124 var->blue.length = 8; 125 var->transp.length = 0; 126 var->transp.offset = 0; 127 break; 128 case 32: 129 var->red.offset = 16; 130 var->green.offset = 8; 131 var->blue.offset = 0; 132 var->red.length = 8; 133 var->green.length = 8; 134 var->blue.length = 8; 135 var->transp.length = 8; 136 var->transp.offset = 24; 137 break; 138 default: 139 DRM_ERROR("Bad depth %u.\n", depth); 140 return -EINVAL; 141 } 142 143 if ((var->xoffset + var->xres) > par->max_width || 144 (var->yoffset + var->yres) > par->max_height) { 145 DRM_ERROR("Requested geom can not fit in framebuffer\n"); 146 return -EINVAL; 147 } 148 149 if (!vmw_kms_validate_mode_vram(vmw_priv, 150 var->xres * var->bits_per_pixel/8, 151 var->yoffset + var->yres)) { 152 DRM_ERROR("Requested geom can not fit in framebuffer\n"); 153 return -EINVAL; 154 } 155 156 return 0; 157} 158 159static int vmw_fb_blank(int blank, struct fb_info *info) 160{ 161 return 0; 162} 163 164/** 165 * vmw_fb_dirty_flush - flush dirty regions to the kms framebuffer 166 * 167 * @work: The struct work_struct associated with this task. 168 * 169 * This function flushes the dirty regions of the vmalloc framebuffer to the 170 * kms framebuffer, and if the kms framebuffer is visible, also updated the 171 * corresponding displays. Note that this function runs even if the kms 172 * framebuffer is not bound to a crtc and thus not visible, but it's turned 173 * off during hibernation using the par->dirty.active bool. 174 */ 175static void vmw_fb_dirty_flush(struct work_struct *work) 176{ 177 struct vmw_fb_par *par = container_of(work, struct vmw_fb_par, 178 local_work.work); 179 struct vmw_private *vmw_priv = par->vmw_priv; 180 struct fb_info *info = vmw_priv->fb_info; 181 unsigned long irq_flags; 182 s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0; 183 u32 cpp, max_x, max_y; 184 struct drm_clip_rect clip; 185 struct drm_framebuffer *cur_fb; 186 u8 *src_ptr, *dst_ptr; 187 struct vmw_buffer_object *vbo = par->vmw_bo; 188 void *virtual; 189 190 if (!READ_ONCE(par->dirty.active)) 191 return; 192 193 mutex_lock(&par->bo_mutex); 194 cur_fb = par->set_fb; 195 if (!cur_fb) 196 goto out_unlock; 197 198 (void) ttm_bo_reserve(&vbo->base, false, false, NULL); 199 virtual = vmw_bo_map_and_cache(vbo); 200 if (!virtual) 201 goto out_unreserve; 202 203 spin_lock_irqsave(&par->dirty.lock, irq_flags); 204 if (!par->dirty.active) { 205 spin_unlock_irqrestore(&par->dirty.lock, irq_flags); 206 goto out_unreserve; 207 } 208 209 /* 210 * Handle panning when copying from vmalloc to framebuffer. 211 * Clip dirty area to framebuffer. 212 */ 213 cpp = cur_fb->format->cpp[0]; 214 max_x = par->fb_x + cur_fb->width; 215 max_y = par->fb_y + cur_fb->height; 216 217 dst_x1 = par->dirty.x1 - par->fb_x; 218 dst_y1 = par->dirty.y1 - par->fb_y; 219 dst_x1 = max_t(s32, dst_x1, 0); 220 dst_y1 = max_t(s32, dst_y1, 0); 221 222 dst_x2 = par->dirty.x2 - par->fb_x; 223 dst_y2 = par->dirty.y2 - par->fb_y; 224 dst_x2 = min_t(s32, dst_x2, max_x); 225 dst_y2 = min_t(s32, dst_y2, max_y); 226 w = dst_x2 - dst_x1; 227 h = dst_y2 - dst_y1; 228 w = max_t(s32, 0, w); 229 h = max_t(s32, 0, h); 230 231 par->dirty.x1 = par->dirty.x2 = 0; 232 par->dirty.y1 = par->dirty.y2 = 0; 233 spin_unlock_irqrestore(&par->dirty.lock, irq_flags); 234 235 if (w && h) { 236 dst_ptr = (u8 *)virtual + 237 (dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp); 238 src_ptr = (u8 *)par->vmalloc + 239 ((dst_y1 + par->fb_y) * info->fix.line_length + 240 (dst_x1 + par->fb_x) * cpp); 241 242 while (h-- > 0) { 243 memcpy(dst_ptr, src_ptr, w*cpp); 244 dst_ptr += par->set_fb->pitches[0]; 245 src_ptr += info->fix.line_length; 246 } 247 248 clip.x1 = dst_x1; 249 clip.x2 = dst_x2; 250 clip.y1 = dst_y1; 251 clip.y2 = dst_y2; 252 } 253 254out_unreserve: 255 ttm_bo_unreserve(&vbo->base); 256 if (w && h) { 257 WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0, 258 &clip, 1)); 259 vmw_cmd_flush(vmw_priv, false); 260 } 261out_unlock: 262 mutex_unlock(&par->bo_mutex); 263} 264 265static void vmw_fb_dirty_mark(struct vmw_fb_par *par, 266 unsigned x1, unsigned y1, 267 unsigned width, unsigned height) 268{ 269 unsigned long flags; 270 unsigned x2 = x1 + width; 271 unsigned y2 = y1 + height; 272 273 spin_lock_irqsave(&par->dirty.lock, flags); 274 if (par->dirty.x1 == par->dirty.x2) { 275 par->dirty.x1 = x1; 276 par->dirty.y1 = y1; 277 par->dirty.x2 = x2; 278 par->dirty.y2 = y2; 279 /* if we are active start the dirty work 280 * we share the work with the defio system */ 281 if (par->dirty.active) 282 schedule_delayed_work(&par->local_work, 283 VMW_DIRTY_DELAY); 284 } else { 285 if (x1 < par->dirty.x1) 286 par->dirty.x1 = x1; 287 if (y1 < par->dirty.y1) 288 par->dirty.y1 = y1; 289 if (x2 > par->dirty.x2) 290 par->dirty.x2 = x2; 291 if (y2 > par->dirty.y2) 292 par->dirty.y2 = y2; 293 } 294 spin_unlock_irqrestore(&par->dirty.lock, flags); 295} 296 297static int vmw_fb_pan_display(struct fb_var_screeninfo *var, 298 struct fb_info *info) 299{ 300 struct vmw_fb_par *par = info->par; 301 302 if ((var->xoffset + var->xres) > var->xres_virtual || 303 (var->yoffset + var->yres) > var->yres_virtual) { 304 DRM_ERROR("Requested panning can not fit in framebuffer\n"); 305 return -EINVAL; 306 } 307 308 mutex_lock(&par->bo_mutex); 309 par->fb_x = var->xoffset; 310 par->fb_y = var->yoffset; 311 if (par->set_fb) 312 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width, 313 par->set_fb->height); 314 mutex_unlock(&par->bo_mutex); 315 316 return 0; 317} 318 319static void vmw_deferred_io(struct fb_info *info, 320 struct list_head *pagelist) 321{ 322 struct vmw_fb_par *par = info->par; 323 unsigned long start, end, min, max; 324 unsigned long flags; 325 struct page *page; 326 int y1, y2; 327 328 min = ULONG_MAX; 329 max = 0; 330 list_for_each_entry(page, pagelist, lru) { 331 start = page->index << PAGE_SHIFT; 332 end = start + PAGE_SIZE - 1; 333 min = min(min, start); 334 max = max(max, end); 335 } 336 337 if (min < max) { 338 y1 = min / info->fix.line_length; 339 y2 = (max / info->fix.line_length) + 1; 340 341 spin_lock_irqsave(&par->dirty.lock, flags); 342 par->dirty.x1 = 0; 343 par->dirty.y1 = y1; 344 par->dirty.x2 = info->var.xres; 345 par->dirty.y2 = y2; 346 spin_unlock_irqrestore(&par->dirty.lock, flags); 347 348 /* 349 * Since we've already waited on this work once, try to 350 * execute asap. 351 */ 352 cancel_delayed_work(&par->local_work); 353 schedule_delayed_work(&par->local_work, 0); 354 } 355}; 356 357static struct fb_deferred_io vmw_defio = { 358 .delay = VMW_DIRTY_DELAY, 359 .deferred_io = vmw_deferred_io, 360}; 361 362/* 363 * Draw code 364 */ 365 366static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 367{ 368 cfb_fillrect(info, rect); 369 vmw_fb_dirty_mark(info->par, rect->dx, rect->dy, 370 rect->width, rect->height); 371} 372 373static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region) 374{ 375 cfb_copyarea(info, region); 376 vmw_fb_dirty_mark(info->par, region->dx, region->dy, 377 region->width, region->height); 378} 379 380static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image) 381{ 382 cfb_imageblit(info, image); 383 vmw_fb_dirty_mark(info->par, image->dx, image->dy, 384 image->width, image->height); 385} 386 387/* 388 * Bring up code 389 */ 390 391static int vmw_fb_create_bo(struct vmw_private *vmw_priv, 392 size_t size, struct vmw_buffer_object **out) 393{ 394 struct vmw_buffer_object *vmw_bo; 395 int ret; 396 397 vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL); 398 if (!vmw_bo) { 399 ret = -ENOMEM; 400 goto err_unlock; 401 } 402 403 ret = vmw_bo_init(vmw_priv, vmw_bo, size, 404 &vmw_sys_placement, 405 false, false, 406 &vmw_bo_bo_free); 407 if (unlikely(ret != 0)) 408 goto err_unlock; /* init frees the buffer on failure */ 409 410 *out = vmw_bo; 411 412err_unlock: 413 return ret; 414} 415 416static int vmw_fb_compute_depth(struct fb_var_screeninfo *var, 417 int *depth) 418{ 419 switch (var->bits_per_pixel) { 420 case 32: 421 *depth = (var->transp.length > 0) ? 32 : 24; 422 break; 423 default: 424 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel); 425 return -EINVAL; 426 } 427 428 return 0; 429} 430 431static int vmwgfx_set_config_internal(struct drm_mode_set *set) 432{ 433 struct drm_crtc *crtc = set->crtc; 434 struct drm_modeset_acquire_ctx ctx; 435 int ret; 436 437 drm_modeset_acquire_init(&ctx, 0); 438 439restart: 440 ret = crtc->funcs->set_config(set, &ctx); 441 442 if (ret == -EDEADLK) { 443 drm_modeset_backoff(&ctx); 444 goto restart; 445 } 446 447 drm_modeset_drop_locks(&ctx); 448 drm_modeset_acquire_fini(&ctx); 449 450 return ret; 451} 452 453static int vmw_fb_kms_detach(struct vmw_fb_par *par, 454 bool detach_bo, 455 bool unref_bo) 456{ 457 struct drm_framebuffer *cur_fb = par->set_fb; 458 int ret; 459 460 /* Detach the KMS framebuffer from crtcs */ 461 if (par->set_mode) { 462 struct drm_mode_set set; 463 464 set.crtc = par->crtc; 465 set.x = 0; 466 set.y = 0; 467 set.mode = NULL; 468 set.fb = NULL; 469 set.num_connectors = 0; 470 set.connectors = &par->con; 471 ret = vmwgfx_set_config_internal(&set); 472 if (ret) { 473 DRM_ERROR("Could not unset a mode.\n"); 474 return ret; 475 } 476 drm_mode_destroy(&par->vmw_priv->drm, par->set_mode); 477 par->set_mode = NULL; 478 } 479 480 if (cur_fb) { 481 drm_framebuffer_put(cur_fb); 482 par->set_fb = NULL; 483 } 484 485 if (par->vmw_bo && detach_bo && unref_bo) 486 vmw_bo_unreference(&par->vmw_bo); 487 488 return 0; 489} 490 491static int vmw_fb_kms_framebuffer(struct fb_info *info) 492{ 493 struct drm_mode_fb_cmd2 mode_cmd; 494 struct vmw_fb_par *par = info->par; 495 struct fb_var_screeninfo *var = &info->var; 496 struct drm_framebuffer *cur_fb; 497 struct vmw_framebuffer *vfb; 498 int ret = 0, depth; 499 size_t new_bo_size; 500 501 ret = vmw_fb_compute_depth(var, &depth); 502 if (ret) 503 return ret; 504 505 mode_cmd.width = var->xres; 506 mode_cmd.height = var->yres; 507 mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width; 508 mode_cmd.pixel_format = 509 drm_mode_legacy_fb_format(var->bits_per_pixel, depth); 510 511 cur_fb = par->set_fb; 512 if (cur_fb && cur_fb->width == mode_cmd.width && 513 cur_fb->height == mode_cmd.height && 514 cur_fb->format->format == mode_cmd.pixel_format && 515 cur_fb->pitches[0] == mode_cmd.pitches[0]) 516 return 0; 517 518 /* Need new buffer object ? */ 519 new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height; 520 ret = vmw_fb_kms_detach(par, 521 par->bo_size < new_bo_size || 522 par->bo_size > 2*new_bo_size, 523 true); 524 if (ret) 525 return ret; 526 527 if (!par->vmw_bo) { 528 ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size, 529 &par->vmw_bo); 530 if (ret) { 531 DRM_ERROR("Failed creating a buffer object for " 532 "fbdev.\n"); 533 return ret; 534 } 535 par->bo_size = new_bo_size; 536 } 537 538 vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL, 539 true, &mode_cmd); 540 if (IS_ERR(vfb)) 541 return PTR_ERR(vfb); 542 543 par->set_fb = &vfb->base; 544 545 return 0; 546} 547 548static int vmw_fb_set_par(struct fb_info *info) 549{ 550 struct vmw_fb_par *par = info->par; 551 struct vmw_private *vmw_priv = par->vmw_priv; 552 struct drm_mode_set set; 553 struct fb_var_screeninfo *var = &info->var; 554 struct drm_display_mode new_mode = { DRM_MODE("fb_mode", 555 DRM_MODE_TYPE_DRIVER, 556 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 557 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) 558 }; 559 struct drm_display_mode *mode; 560 int ret; 561 562 mode = drm_mode_duplicate(&vmw_priv->drm, &new_mode); 563 if (!mode) { 564 DRM_ERROR("Could not create new fb mode.\n"); 565 return -ENOMEM; 566 } 567 568 mode->hdisplay = var->xres; 569 mode->vdisplay = var->yres; 570 vmw_guess_mode_timing(mode); 571 572 if (!vmw_kms_validate_mode_vram(vmw_priv, 573 mode->hdisplay * 574 DIV_ROUND_UP(var->bits_per_pixel, 8), 575 mode->vdisplay)) { 576 drm_mode_destroy(&vmw_priv->drm, mode); 577 return -EINVAL; 578 } 579 580 mutex_lock(&par->bo_mutex); 581 ret = vmw_fb_kms_framebuffer(info); 582 if (ret) 583 goto out_unlock; 584 585 par->fb_x = var->xoffset; 586 par->fb_y = var->yoffset; 587 588 set.crtc = par->crtc; 589 set.x = 0; 590 set.y = 0; 591 set.mode = mode; 592 set.fb = par->set_fb; 593 set.num_connectors = 1; 594 set.connectors = &par->con; 595 596 ret = vmwgfx_set_config_internal(&set); 597 if (ret) 598 goto out_unlock; 599 600 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, 601 par->set_fb->width, par->set_fb->height); 602 603 /* If there already was stuff dirty we wont 604 * schedule a new work, so lets do it now */ 605 606 schedule_delayed_work(&par->local_work, 0); 607 608out_unlock: 609 if (par->set_mode) 610 drm_mode_destroy(&vmw_priv->drm, par->set_mode); 611 par->set_mode = mode; 612 613 mutex_unlock(&par->bo_mutex); 614 615 return ret; 616} 617 618 619static const struct fb_ops vmw_fb_ops = { 620 .owner = THIS_MODULE, 621 .fb_check_var = vmw_fb_check_var, 622 .fb_set_par = vmw_fb_set_par, 623 .fb_setcolreg = vmw_fb_setcolreg, 624 .fb_fillrect = vmw_fb_fillrect, 625 .fb_copyarea = vmw_fb_copyarea, 626 .fb_imageblit = vmw_fb_imageblit, 627 .fb_pan_display = vmw_fb_pan_display, 628 .fb_blank = vmw_fb_blank, 629}; 630 631int vmw_fb_init(struct vmw_private *vmw_priv) 632{ 633 struct device *device = vmw_priv->drm.dev; 634 struct vmw_fb_par *par; 635 struct fb_info *info; 636 unsigned fb_width, fb_height; 637 unsigned int fb_bpp, fb_pitch, fb_size; 638 struct drm_display_mode *init_mode; 639 int ret; 640 641 fb_bpp = 32; 642 643 /* XXX As shouldn't these be as well. */ 644 fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); 645 fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); 646 647 fb_pitch = fb_width * fb_bpp / 8; 648 fb_size = fb_pitch * fb_height; 649 650 info = framebuffer_alloc(sizeof(*par), device); 651 if (!info) 652 return -ENOMEM; 653 654 /* 655 * Par 656 */ 657 vmw_priv->fb_info = info; 658 par = info->par; 659 memset(par, 0, sizeof(*par)); 660 INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush); 661 par->vmw_priv = vmw_priv; 662 par->vmalloc = NULL; 663 par->max_width = fb_width; 664 par->max_height = fb_height; 665 666 ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width, 667 par->max_height, &par->con, 668 &par->crtc, &init_mode); 669 if (ret) 670 goto err_kms; 671 672 info->var.xres = init_mode->hdisplay; 673 info->var.yres = init_mode->vdisplay; 674 675 /* 676 * Create buffers and alloc memory 677 */ 678 par->vmalloc = vzalloc(fb_size); 679 if (unlikely(par->vmalloc == NULL)) { 680 ret = -ENOMEM; 681 goto err_free; 682 } 683 684 /* 685 * Fixed and var 686 */ 687 strcpy(info->fix.id, "svgadrmfb"); 688 info->fix.type = FB_TYPE_PACKED_PIXELS; 689 info->fix.visual = FB_VISUAL_TRUECOLOR; 690 info->fix.type_aux = 0; 691 info->fix.xpanstep = 1; /* doing it in hw */ 692 info->fix.ypanstep = 1; /* doing it in hw */ 693 info->fix.ywrapstep = 0; 694 info->fix.accel = FB_ACCEL_NONE; 695 info->fix.line_length = fb_pitch; 696 697 info->fix.smem_start = 0; 698 info->fix.smem_len = fb_size; 699 700 info->pseudo_palette = par->pseudo_palette; 701 info->screen_base = (char __iomem *)par->vmalloc; 702 info->screen_size = fb_size; 703 704 info->fbops = &vmw_fb_ops; 705 706 /* 24 depth per default */ 707 info->var.red.offset = 16; 708 info->var.green.offset = 8; 709 info->var.blue.offset = 0; 710 info->var.red.length = 8; 711 info->var.green.length = 8; 712 info->var.blue.length = 8; 713 info->var.transp.offset = 0; 714 info->var.transp.length = 0; 715 716 info->var.xres_virtual = fb_width; 717 info->var.yres_virtual = fb_height; 718 info->var.bits_per_pixel = fb_bpp; 719 info->var.xoffset = 0; 720 info->var.yoffset = 0; 721 info->var.activate = FB_ACTIVATE_NOW; 722 info->var.height = -1; 723 info->var.width = -1; 724 725 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 726 info->apertures = alloc_apertures(1); 727 if (!info->apertures) { 728 ret = -ENOMEM; 729 goto err_aper; 730 } 731 info->apertures->ranges[0].base = vmw_priv->vram_start; 732 info->apertures->ranges[0].size = vmw_priv->vram_size; 733 734 /* 735 * Dirty & Deferred IO 736 */ 737 par->dirty.x1 = par->dirty.x2 = 0; 738 par->dirty.y1 = par->dirty.y2 = 0; 739 par->dirty.active = true; 740 spin_lock_init(&par->dirty.lock); 741 mutex_init(&par->bo_mutex); 742 info->fbdefio = &vmw_defio; 743 fb_deferred_io_init(info); 744 745 ret = register_framebuffer(info); 746 if (unlikely(ret != 0)) 747 goto err_defio; 748 749 vmw_fb_set_par(info); 750 751 return 0; 752 753err_defio: 754 fb_deferred_io_cleanup(info); 755err_aper: 756err_free: 757 vfree(par->vmalloc); 758err_kms: 759 framebuffer_release(info); 760 vmw_priv->fb_info = NULL; 761 762 return ret; 763} 764 765int vmw_fb_close(struct vmw_private *vmw_priv) 766{ 767 struct fb_info *info; 768 struct vmw_fb_par *par; 769 770 if (!vmw_priv->fb_info) 771 return 0; 772 773 info = vmw_priv->fb_info; 774 par = info->par; 775 776 /* ??? order */ 777 fb_deferred_io_cleanup(info); 778 cancel_delayed_work_sync(&par->local_work); 779 unregister_framebuffer(info); 780 781 mutex_lock(&par->bo_mutex); 782 (void) vmw_fb_kms_detach(par, true, true); 783 mutex_unlock(&par->bo_mutex); 784 785 vfree(par->vmalloc); 786 framebuffer_release(info); 787 788 return 0; 789} 790 791int vmw_fb_off(struct vmw_private *vmw_priv) 792{ 793 struct fb_info *info; 794 struct vmw_fb_par *par; 795 unsigned long flags; 796 797 if (!vmw_priv->fb_info) 798 return -EINVAL; 799 800 info = vmw_priv->fb_info; 801 par = info->par; 802 803 spin_lock_irqsave(&par->dirty.lock, flags); 804 par->dirty.active = false; 805 spin_unlock_irqrestore(&par->dirty.lock, flags); 806 807 flush_delayed_work(&info->deferred_work); 808 flush_delayed_work(&par->local_work); 809 810 return 0; 811} 812 813int vmw_fb_on(struct vmw_private *vmw_priv) 814{ 815 struct fb_info *info; 816 struct vmw_fb_par *par; 817 unsigned long flags; 818 819 if (!vmw_priv->fb_info) 820 return -EINVAL; 821 822 info = vmw_priv->fb_info; 823 par = info->par; 824 825 spin_lock_irqsave(&par->dirty.lock, flags); 826 par->dirty.active = true; 827 spin_unlock_irqrestore(&par->dirty.lock, flags); 828 829 /* 830 * Need to reschedule a dirty update, because otherwise that's 831 * only done in dirty_mark() if the previous coalesced 832 * dirty region was empty. 833 */ 834 schedule_delayed_work(&par->local_work, 0); 835 836 return 0; 837}