Merge branch 'qxl-fixes' of git://people.freedesktop.org/~airlied/linux

Pull qxl drm fixes from Dave Airlie:
"Okay as I warned, the qxl driver was running a bit free and loose with
its ttm object reservations and the new lockdep enabled reservation
tracking shone a bright light into it, it also with the new
reservations mutexes hits a possible deadlock during boot.

The first patch is a real fix to render the console correctly as the
driver used to just drop irq renderering as too hard, this also fixes
a sleeping while atomic warning.

The other two patches are the big ugly ones that redo how the driver
allocates objects and reserves them and makes things all work
properly, I've tested this in a VM, and compared to the current code
which hits a lockdep warning and the sleep while atomic warning before
failing.

So sorry this is coming in late, I should have tested qxl before
merging the mutex code, but I'd rather just fix qxl with this than
revert the reservations code at this point"

* 'qxl-fixes' of git://people.freedesktop.org/~airlied/linux:
qxl: convert qxl driver to proper use for reservations
qxl: allow creation of pre-pinned objects and use for releases.
drm/qxl: add delayed fb operations

+18 -24
drivers/gpu/drm/qxl/qxl_cmd.c
··· 179 179 uint32_t type, bool interruptible) 180 180 { 181 181 struct qxl_command cmd; 182 + struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 182 183 183 184 cmd.type = type; 184 - cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); 185 + cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); 185 186 186 187 return qxl_ring_push(qdev->command_ring, &cmd, interruptible); 187 188 } ··· 192 191 uint32_t type, bool interruptible) 193 192 { 194 193 struct qxl_command cmd; 194 + struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 195 195 196 196 cmd.type = type; 197 - cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); 197 + cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); 198 198 199 199 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); 200 200 } ··· 216 214 struct qxl_release *release; 217 215 uint64_t id, next_id; 218 216 int i = 0; 219 - int ret; 220 217 union qxl_release_info *info; 221 218 222 219 while (qxl_ring_pop(qdev->release_ring, &id)) { ··· 225 224 if (release == NULL) 226 225 break; 227 226 228 - ret = qxl_release_reserve(qdev, release, false); 229 - if (ret) { 230 - qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id); 231 - DRM_ERROR("failed to reserve release %lld\n", id); 232 - } 233 - 234 227 info = qxl_release_map(qdev, release); 235 228 next_id = info->next; 236 229 qxl_release_unmap(qdev, release, info); 237 230 238 - qxl_release_unreserve(qdev, release); 239 231 QXL_INFO(qdev, "popped %lld, next %lld\n", id, 240 232 next_id); 241 233 ··· 253 259 return i; 254 260 } 255 261 256 - int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, 262 + int qxl_alloc_bo_reserved(struct qxl_device *qdev, 263 + struct qxl_release *release, 264 + unsigned long size, 257 265 struct qxl_bo **_bo) 258 266 { 259 267 struct qxl_bo *bo; 260 268 int ret; 261 269 262 270 ret = qxl_bo_create(qdev, size, false /* not kernel - device */, 263 - QXL_GEM_DOMAIN_VRAM, NULL, &bo); 271 + false, QXL_GEM_DOMAIN_VRAM, NULL, &bo); 264 272 if (ret) { 265 273 DRM_ERROR("failed to allocate VRAM BO\n"); 266 274 return ret; 267 275 } 268 - ret = qxl_bo_reserve(bo, false); 269 - if (unlikely(ret != 0)) 276 + ret = qxl_release_list_add(release, bo); 277 + if (ret) 270 278 goto out_unref; 271 279 272 280 *_bo = bo; 273 281 return 0; 274 282 out_unref: 275 283 qxl_bo_unref(&bo); 276 - return 0; 284 + return ret; 277 285 } 278 286 279 287 static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr) ··· 499 503 if (ret) 500 504 return ret; 501 505 506 + ret = qxl_release_reserve_list(release, true); 507 + if (ret) 508 + return ret; 509 + 502 510 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); 503 511 cmd->type = QXL_SURFACE_CMD_CREATE; 504 512 cmd->u.surface_create.format = surf->surf.format; ··· 524 524 525 525 surf->surf_create = release; 526 526 527 - /* no need to add a release to the fence for this bo, 527 + /* no need to add a release to the fence for this surface bo, 528 528 since it is only released when we ask to destroy the surface 529 529 and it would never signal otherwise */ 530 - qxl_fence_releaseable(qdev, release); 531 - 532 530 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); 533 - 534 - qxl_release_unreserve(qdev, release); 531 + qxl_release_fence_buffer_objects(release); 535 532 536 533 surf->hw_surf_alloc = true; 537 534 spin_lock(&qdev->surf_id_idr_lock); ··· 570 573 cmd->surface_id = id; 571 574 qxl_release_unmap(qdev, release, &cmd->release_info); 572 575 573 - qxl_fence_releaseable(qdev, release); 574 - 575 576 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); 576 577 577 - qxl_release_unreserve(qdev, release); 578 - 578 + qxl_release_fence_buffer_objects(release); 579 579 580 580 return 0; 581 581 }
+46 -24
drivers/gpu/drm/qxl/qxl_display.c
··· 179 179 kfree(qxl_crtc); 180 180 } 181 181 182 - static void 182 + static int 183 183 qxl_hide_cursor(struct qxl_device *qdev) 184 184 { 185 185 struct qxl_release *release; ··· 188 188 189 189 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, 190 190 &release, NULL); 191 + if (ret) 192 + return ret; 193 + 194 + ret = qxl_release_reserve_list(release, true); 195 + if (ret) { 196 + qxl_release_free(qdev, release); 197 + return ret; 198 + } 191 199 192 200 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); 193 201 cmd->type = QXL_CURSOR_HIDE; 194 202 qxl_release_unmap(qdev, release, &cmd->release_info); 195 203 196 - qxl_fence_releaseable(qdev, release); 197 204 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 198 - qxl_release_unreserve(qdev, release); 205 + qxl_release_fence_buffer_objects(release); 206 + return 0; 199 207 } 200 208 201 209 static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, ··· 224 216 225 217 int size = 64*64*4; 226 218 int ret = 0; 227 - if (!handle) { 228 - qxl_hide_cursor(qdev); 229 - return 0; 230 - } 219 + if (!handle) 220 + return qxl_hide_cursor(qdev); 231 221 232 222 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); 233 223 if (!obj) { ··· 240 234 goto out_unref; 241 235 242 236 ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); 237 + qxl_bo_unreserve(user_bo); 243 238 if (ret) 244 - goto out_unreserve; 239 + goto out_unref; 245 240 246 241 ret = qxl_bo_kmap(user_bo, &user_ptr); 247 242 if (ret) ··· 253 246 &release, NULL); 254 247 if (ret) 255 248 goto out_kunmap; 256 - ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size, 257 - &cursor_bo); 249 + 250 + ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_cursor) + size, 251 + &cursor_bo); 258 252 if (ret) 259 253 goto out_free_release; 260 - ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); 254 + 255 + ret = qxl_release_reserve_list(release, false); 261 256 if (ret) 262 257 goto out_free_bo; 258 + 259 + ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); 260 + if (ret) 261 + goto out_backoff; 263 262 264 263 cursor->header.unique = 0; 265 264 cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; ··· 282 269 283 270 qxl_bo_kunmap(cursor_bo); 284 271 285 - /* finish with the userspace bo */ 286 272 qxl_bo_kunmap(user_bo); 287 - qxl_bo_unpin(user_bo); 288 - qxl_bo_unreserve(user_bo); 289 - drm_gem_object_unreference_unlocked(obj); 290 273 291 274 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); 292 275 cmd->type = QXL_CURSOR_SET; ··· 290 281 cmd->u.set.position.y = qcrtc->cur_y; 291 282 292 283 cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); 293 - qxl_release_add_res(qdev, release, cursor_bo); 294 284 295 285 cmd->u.set.visible = 1; 296 286 qxl_release_unmap(qdev, release, &cmd->release_info); 297 287 298 - qxl_fence_releaseable(qdev, release); 299 288 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 300 - qxl_release_unreserve(qdev, release); 289 + qxl_release_fence_buffer_objects(release); 301 290 302 - qxl_bo_unreserve(cursor_bo); 291 + /* finish with the userspace bo */ 292 + ret = qxl_bo_reserve(user_bo, false); 293 + if (!ret) { 294 + qxl_bo_unpin(user_bo); 295 + qxl_bo_unreserve(user_bo); 296 + } 297 + drm_gem_object_unreference_unlocked(obj); 298 + 303 299 qxl_bo_unref(&cursor_bo); 304 300 305 301 return ret; 302 + 303 + out_backoff: 304 + qxl_release_backoff_reserve_list(release); 306 305 out_free_bo: 307 306 qxl_bo_unref(&cursor_bo); 308 307 out_free_release: 309 - qxl_release_unreserve(qdev, release); 310 308 qxl_release_free(qdev, release); 311 309 out_kunmap: 312 310 qxl_bo_kunmap(user_bo); 313 311 out_unpin: 314 312 qxl_bo_unpin(user_bo); 315 - out_unreserve: 316 - qxl_bo_unreserve(user_bo); 317 313 out_unref: 318 314 drm_gem_object_unreference_unlocked(obj); 319 315 return ret; ··· 336 322 337 323 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, 338 324 &release, NULL); 325 + if (ret) 326 + return ret; 327 + 328 + ret = qxl_release_reserve_list(release, true); 329 + if (ret) { 330 + qxl_release_free(qdev, release); 331 + return ret; 332 + } 339 333 340 334 qcrtc->cur_x = x; 341 335 qcrtc->cur_y = y; ··· 354 332 cmd->u.position.y = qcrtc->cur_y; 355 333 qxl_release_unmap(qdev, release, &cmd->release_info); 356 334 357 - qxl_fence_releaseable(qdev, release); 358 335 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 359 - qxl_release_unreserve(qdev, release); 336 + qxl_release_fence_buffer_objects(release); 337 + 360 338 return 0; 361 339 } 362 340
+183 -86
drivers/gpu/drm/qxl/qxl_draw.c
··· 23 23 #include "qxl_drv.h" 24 24 #include "qxl_object.h" 25 25 26 + static int alloc_clips(struct qxl_device *qdev, 27 + struct qxl_release *release, 28 + unsigned num_clips, 29 + struct qxl_bo **clips_bo) 30 + { 31 + int size = sizeof(struct qxl_clip_rects) + sizeof(struct qxl_rect) * num_clips; 32 + 33 + return qxl_alloc_bo_reserved(qdev, release, size, clips_bo); 34 + } 35 + 26 36 /* returns a pointer to the already allocated qxl_rect array inside 27 37 * the qxl_clip_rects. This is *not* the same as the memory allocated 28 38 * on the device, it is offset to qxl_clip_rects.chunk.data */ 29 39 static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, 30 40 struct qxl_drawable *drawable, 31 41 unsigned num_clips, 32 - struct qxl_bo **clips_bo, 33 - struct qxl_release *release) 42 + struct qxl_bo *clips_bo) 34 43 { 35 44 struct qxl_clip_rects *dev_clips; 36 45 int ret; 37 - int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips; 38 - ret = qxl_alloc_bo_reserved(qdev, size, clips_bo); 39 - if (ret) 40 - return NULL; 41 46 42 - ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips); 47 + ret = qxl_bo_kmap(clips_bo, (void **)&dev_clips); 43 48 if (ret) { 44 - qxl_bo_unref(clips_bo); 45 49 return NULL; 46 50 } 47 51 dev_clips->num_rects = num_clips; ··· 56 52 } 57 53 58 54 static int 59 - make_drawable(struct qxl_device *qdev, int surface, uint8_t type, 60 - const struct qxl_rect *rect, 61 - struct qxl_release **release) 55 + alloc_drawable(struct qxl_device *qdev, struct qxl_release **release) 62 56 { 63 - struct qxl_drawable *drawable; 64 - int i, ret; 65 - 66 - ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable), 57 + int ret; 58 + ret = qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable), 67 59 QXL_RELEASE_DRAWABLE, release, 68 60 NULL); 69 - if (ret) 70 - return ret; 61 + return ret; 62 + } 71 63 72 - drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release); 64 + static void 65 + free_drawable(struct qxl_device *qdev, struct qxl_release *release) 66 + { 67 + qxl_release_free(qdev, release); 68 + } 69 + 70 + /* release needs to be reserved at this point */ 71 + static int 72 + make_drawable(struct qxl_device *qdev, int surface, uint8_t type, 73 + const struct qxl_rect *rect, 74 + struct qxl_release *release) 75 + { 76 + struct qxl_drawable *drawable; 77 + int i; 78 + 79 + drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); 80 + if (!drawable) 81 + return -ENOMEM; 82 + 73 83 drawable->type = type; 74 84 75 85 drawable->surface_id = surface; /* Only primary for now */ ··· 109 91 drawable->bbox = *rect; 110 92 111 93 drawable->mm_time = qdev->rom->mm_clock; 112 - qxl_release_unmap(qdev, *release, &drawable->release_info); 94 + qxl_release_unmap(qdev, release, &drawable->release_info); 113 95 return 0; 114 96 } 115 97 116 - static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, 98 + static int alloc_palette_object(struct qxl_device *qdev, 99 + struct qxl_release *release, 100 + struct qxl_bo **palette_bo) 101 + { 102 + return qxl_alloc_bo_reserved(qdev, release, 103 + sizeof(struct qxl_palette) + sizeof(uint32_t) * 2, 104 + palette_bo); 105 + } 106 + 107 + static int qxl_palette_create_1bit(struct qxl_bo *palette_bo, 108 + struct qxl_release *release, 117 109 const struct qxl_fb_image *qxl_fb_image) 118 110 { 119 - struct qxl_device *qdev = qxl_fb_image->qdev; 120 111 const struct fb_image *fb_image = &qxl_fb_image->fb_image; 121 112 uint32_t visual = qxl_fb_image->visual; 122 113 const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette; ··· 135 108 static uint64_t unique; /* we make no attempt to actually set this 136 109 * correctly globaly, since that would require 137 110 * tracking all of our palettes. */ 138 - 139 - ret = qxl_alloc_bo_reserved(qdev, 140 - sizeof(struct qxl_palette) + sizeof(uint32_t) * 2, 141 - palette_bo); 142 - 143 - ret = qxl_bo_kmap(*palette_bo, (void **)&pal); 111 + ret = qxl_bo_kmap(palette_bo, (void **)&pal); 144 112 pal->num_ents = 2; 145 113 pal->unique = unique++; 146 114 if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { ··· 148 126 } 149 127 pal->ents[0] = bgcolor; 150 128 pal->ents[1] = fgcolor; 151 - qxl_bo_kunmap(*palette_bo); 129 + qxl_bo_kunmap(palette_bo); 152 130 return 0; 153 131 } 154 132 ··· 166 144 const char *src = fb_image->data; 167 145 int depth = fb_image->depth; 168 146 struct qxl_release *release; 169 - struct qxl_bo *image_bo; 170 147 struct qxl_image *image; 171 148 int ret; 172 - 149 + struct qxl_drm_image *dimage; 150 + struct qxl_bo *palette_bo = NULL; 173 151 if (stride == 0) 174 152 stride = depth * width / 8; 153 + 154 + ret = alloc_drawable(qdev, &release); 155 + if (ret) 156 + return; 157 + 158 + ret = qxl_image_alloc_objects(qdev, release, 159 + &dimage, 160 + height, stride); 161 + if (ret) 162 + goto out_free_drawable; 163 + 164 + if (depth == 1) { 165 + ret = alloc_palette_object(qdev, release, &palette_bo); 166 + if (ret) 167 + goto out_free_image; 168 + } 169 + 170 + /* do a reservation run over all the objects we just allocated */ 171 + ret = qxl_release_reserve_list(release, true); 172 + if (ret) 173 + goto out_free_palette; 175 174 176 175 rect.left = x; 177 176 rect.right = x + width; 178 177 rect.top = y; 179 178 rect.bottom = y + height; 180 179 181 - ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release); 182 - if (ret) 183 - return; 184 - 185 - ret = qxl_image_create(qdev, release, &image_bo, 186 - (const uint8_t *)src, 0, 0, 187 - width, height, depth, stride); 180 + ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, release); 188 181 if (ret) { 189 - qxl_release_unreserve(qdev, release); 182 + qxl_release_backoff_reserve_list(release); 183 + goto out_free_palette; 184 + } 185 + 186 + ret = qxl_image_init(qdev, release, dimage, 187 + (const uint8_t *)src, 0, 0, 188 + width, height, depth, stride); 189 + if (ret) { 190 + qxl_release_backoff_reserve_list(release); 190 191 qxl_release_free(qdev, release); 191 192 return; 192 193 } 193 194 194 195 if (depth == 1) { 195 - struct qxl_bo *palette_bo; 196 196 void *ptr; 197 - ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image); 198 - qxl_release_add_res(qdev, release, palette_bo); 197 + ret = qxl_palette_create_1bit(palette_bo, release, qxl_fb_image); 199 198 200 - ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0); 199 + ptr = qxl_bo_kmap_atomic_page(qdev, dimage->bo, 0); 201 200 image = ptr; 202 201 image->u.bitmap.palette = 203 202 qxl_bo_physical_address(qdev, palette_bo, 0); 204 - qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); 205 - qxl_bo_unreserve(palette_bo); 206 - qxl_bo_unref(&palette_bo); 203 + qxl_bo_kunmap_atomic_page(qdev, dimage->bo, ptr); 207 204 } 208 205 209 206 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); ··· 240 199 drawable->u.copy.mask.bitmap = 0; 241 200 242 201 drawable->u.copy.src_bitmap = 243 - qxl_bo_physical_address(qdev, image_bo, 0); 202 + qxl_bo_physical_address(qdev, dimage->bo, 0); 244 203 qxl_release_unmap(qdev, release, &drawable->release_info); 245 204 246 - qxl_release_add_res(qdev, release, image_bo); 247 - qxl_bo_unreserve(image_bo); 248 - qxl_bo_unref(&image_bo); 249 - 250 - qxl_fence_releaseable(qdev, release); 251 205 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 252 - qxl_release_unreserve(qdev, release); 206 + qxl_release_fence_buffer_objects(release); 207 + 208 + out_free_palette: 209 + if (palette_bo) 210 + qxl_bo_unref(&palette_bo); 211 + out_free_image: 212 + qxl_image_free_objects(qdev, dimage); 213 + out_free_drawable: 214 + if (ret) 215 + free_drawable(qdev, release); 253 216 } 254 217 255 218 /* push a draw command using the given clipping rectangles as ··· 288 243 int depth = qxl_fb->base.bits_per_pixel; 289 244 uint8_t *surface_base; 290 245 struct qxl_release *release; 291 - struct qxl_bo *image_bo; 292 246 struct qxl_bo *clips_bo; 247 + struct qxl_drm_image *dimage; 293 248 int ret; 249 + 250 + ret = alloc_drawable(qdev, &release); 251 + if (ret) 252 + return; 294 253 295 254 left = clips->x1; 296 255 right = clips->x2; ··· 312 263 313 264 width = right - left; 314 265 height = bottom - top; 266 + 267 + ret = alloc_clips(qdev, release, num_clips, &clips_bo); 268 + if (ret) 269 + goto out_free_drawable; 270 + 271 + ret = qxl_image_alloc_objects(qdev, release, 272 + &dimage, 273 + height, stride); 274 + if (ret) 275 + goto out_free_clips; 276 + 277 + /* do a reservation run over all the objects we just allocated */ 278 + ret = qxl_release_reserve_list(release, true); 279 + if (ret) 280 + goto out_free_image; 281 + 315 282 drawable_rect.left = left; 316 283 drawable_rect.right = right; 317 284 drawable_rect.top = top; 318 285 drawable_rect.bottom = bottom; 286 + 319 287 ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect, 320 - &release); 288 + release); 321 289 if (ret) 322 - return; 290 + goto out_release_backoff; 323 291 324 292 ret = qxl_bo_kmap(bo, (void **)&surface_base); 325 293 if (ret) 326 - goto out_unref; 294 + goto out_release_backoff; 327 295 328 - ret = qxl_image_create(qdev, release, &image_bo, surface_base, 329 - left, top, width, height, depth, stride); 296 + 297 + ret = qxl_image_init(qdev, release, dimage, surface_base, 298 + left, top, width, height, depth, stride); 330 299 qxl_bo_kunmap(bo); 331 300 if (ret) 332 - goto out_unref; 301 + goto out_release_backoff; 333 302 334 - rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release); 335 - if (!rects) { 336 - qxl_bo_unref(&image_bo); 337 - goto out_unref; 338 - } 303 + rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo); 304 + if (!rects) 305 + goto out_release_backoff; 306 + 339 307 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); 340 308 341 309 drawable->clip.type = SPICE_CLIP_TYPE_RECTS; 342 310 drawable->clip.data = qxl_bo_physical_address(qdev, 343 311 clips_bo, 0); 344 - qxl_release_add_res(qdev, release, clips_bo); 345 312 346 313 drawable->u.copy.src_area.top = 0; 347 314 drawable->u.copy.src_area.bottom = height; ··· 371 306 drawable->u.copy.mask.pos.y = 0; 372 307 drawable->u.copy.mask.bitmap = 0; 373 308 374 - drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0); 309 + drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, dimage->bo, 0); 375 310 qxl_release_unmap(qdev, release, &drawable->release_info); 376 - qxl_release_add_res(qdev, release, image_bo); 377 - qxl_bo_unreserve(image_bo); 378 - qxl_bo_unref(&image_bo); 311 + 379 312 clips_ptr = clips; 380 313 for (i = 0; i < num_clips; i++, clips_ptr += inc) { 381 314 rects[i].left = clips_ptr->x1; ··· 382 319 rects[i].bottom = clips_ptr->y2; 383 320 } 384 321 qxl_bo_kunmap(clips_bo); 385 - qxl_bo_unreserve(clips_bo); 386 - qxl_bo_unref(&clips_bo); 387 322 388 - qxl_fence_releaseable(qdev, release); 389 323 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 390 - qxl_release_unreserve(qdev, release); 391 - return; 324 + qxl_release_fence_buffer_objects(release); 392 325 393 - out_unref: 394 - qxl_release_unreserve(qdev, release); 395 - qxl_release_free(qdev, release); 326 + out_release_backoff: 327 + if (ret) 328 + qxl_release_backoff_reserve_list(release); 329 + out_free_image: 330 + qxl_image_free_objects(qdev, dimage); 331 + out_free_clips: 332 + qxl_bo_unref(&clips_bo); 333 + out_free_drawable: 334 + /* only free drawable on error */ 335 + if (ret) 336 + free_drawable(qdev, release); 337 + 396 338 } 397 339 398 340 void qxl_draw_copyarea(struct qxl_device *qdev, ··· 410 342 struct qxl_release *release; 411 343 int ret; 412 344 345 + ret = alloc_drawable(qdev, &release); 346 + if (ret) 347 + return; 348 + 349 + /* do a reservation run over all the objects we just allocated */ 350 + ret = qxl_release_reserve_list(release, true); 351 + if (ret) 352 + goto out_free_release; 353 + 413 354 rect.left = dx; 414 355 rect.top = dy; 415 356 rect.right = dx + width; 416 357 rect.bottom = dy + height; 417 - ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release); 418 - if (ret) 419 - return; 358 + ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, release); 359 + if (ret) { 360 + qxl_release_backoff_reserve_list(release); 361 + goto out_free_release; 362 + } 420 363 421 364 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); 422 365 drawable->u.copy_bits.src_pos.x = sx; 423 366 drawable->u.copy_bits.src_pos.y = sy; 424 - 425 367 qxl_release_unmap(qdev, release, &drawable->release_info); 426 - qxl_fence_releaseable(qdev, release); 368 + 427 369 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 428 - qxl_release_unreserve(qdev, release); 370 + qxl_release_fence_buffer_objects(release); 371 + 372 + out_free_release: 373 + if (ret) 374 + free_drawable(qdev, release); 429 375 } 430 376 431 377 void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) ··· 452 370 struct qxl_release *release; 453 371 int ret; 454 372 455 - ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release); 373 + ret = alloc_drawable(qdev, &release); 456 374 if (ret) 457 375 return; 376 + 377 + /* do a reservation run over all the objects we just allocated */ 378 + ret = qxl_release_reserve_list(release, true); 379 + if (ret) 380 + goto out_free_release; 381 + 382 + ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, release); 383 + if (ret) { 384 + qxl_release_backoff_reserve_list(release); 385 + goto out_free_release; 386 + } 458 387 459 388 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); 460 389 drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID; ··· 477 384 drawable->u.fill.mask.bitmap = 0; 478 385 479 386 qxl_release_unmap(qdev, release, &drawable->release_info); 480 - qxl_fence_releaseable(qdev, release); 387 + 481 388 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 482 - qxl_release_unreserve(qdev, release); 389 + qxl_release_fence_buffer_objects(release); 390 + 391 + out_free_release: 392 + if (ret) 393 + free_drawable(qdev, release); 483 394 }
+41 -35
drivers/gpu/drm/qxl/qxl_drv.h
··· 42 42 #include <ttm/ttm_placement.h> 43 43 #include <ttm/ttm_module.h> 44 44 45 + /* just for ttm_validate_buffer */ 46 + #include <ttm/ttm_execbuf_util.h> 47 + 45 48 #include <drm/qxl_drm.h> 46 49 #include "qxl_dev.h" 47 50 ··· 121 118 uint32_t surface_id; 122 119 struct qxl_fence fence; /* per bo fence - list of releases */ 123 120 struct qxl_release *surf_create; 124 - atomic_t reserve_count; 125 121 }; 126 122 #define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base) 123 + #define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo) 127 124 128 125 struct qxl_gem { 129 126 struct mutex mutex; ··· 131 128 }; 132 129 133 130 struct qxl_bo_list { 134 - struct list_head lhead; 135 - struct qxl_bo *bo; 136 - }; 137 - 138 - struct qxl_reloc_list { 139 - struct list_head bos; 131 + struct ttm_validate_buffer tv; 140 132 }; 141 133 142 134 struct qxl_crtc { ··· 193 195 struct qxl_release { 194 196 int id; 195 197 int type; 196 - int bo_count; 197 198 uint32_t release_offset; 198 199 uint32_t surface_release_id; 199 - struct qxl_bo *bos[QXL_MAX_RES]; 200 + struct ww_acquire_ctx ticket; 201 + struct list_head bos; 202 + }; 203 + 204 + struct qxl_drm_chunk { 205 + struct list_head head; 206 + struct qxl_bo *bo; 207 + }; 208 + 209 + struct qxl_drm_image { 210 + struct qxl_bo *bo; 211 + struct list_head chunk_list; 200 212 }; 201 213 202 214 struct qxl_fb_image { ··· 322 314 struct workqueue_struct *gc_queue; 323 315 struct work_struct gc_work; 324 316 317 + struct work_struct fb_work; 325 318 }; 326 319 327 320 /* forward declaration for QXL_INFO_IO */ ··· 442 433 443 434 /* qxl image */ 444 435 445 - int qxl_image_create(struct qxl_device *qdev, 446 - struct qxl_release *release, 447 - struct qxl_bo **image_bo, 448 - const uint8_t *data, 449 - int x, int y, int width, int height, 450 - int depth, int stride); 436 + int qxl_image_init(struct qxl_device *qdev, 437 + struct qxl_release *release, 438 + struct qxl_drm_image *dimage, 439 + const uint8_t *data, 440 + int x, int y, int width, int height, 441 + int depth, int stride); 442 + int 443 + qxl_image_alloc_objects(struct qxl_device *qdev, 444 + struct qxl_release *release, 445 + struct qxl_drm_image **image_ptr, 446 + int height, int stride); 447 + void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage); 448 + 451 449 void qxl_update_screen(struct qxl_device *qxl); 452 450 453 451 /* qxl io operations (qxl_cmd.c) */ ··· 475 459 void qxl_io_flush_release(struct qxl_device *qdev); 476 460 void qxl_io_flush_surfaces(struct qxl_device *qdev); 477 461 478 - int qxl_release_reserve(struct qxl_device *qdev, 479 - struct qxl_release *release, bool no_wait); 480 - void qxl_release_unreserve(struct qxl_device *qdev, 481 - struct qxl_release *release); 482 462 union qxl_release_info *qxl_release_map(struct qxl_device *qdev, 483 463 struct qxl_release *release); 484 464 void qxl_release_unmap(struct qxl_device *qdev, 485 465 struct qxl_release *release, 486 466 union qxl_release_info *info); 487 - /* 488 - * qxl_bo_add_resource. 489 - * 490 - */ 491 - void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource); 467 + int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo); 468 + int qxl_release_reserve_list(struct qxl_release *release, bool no_intr); 469 + void qxl_release_backoff_reserve_list(struct qxl_release *release); 470 + void qxl_release_fence_buffer_objects(struct qxl_release *release); 492 471 493 472 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, 494 473 enum qxl_surface_cmd_type surface_cmd_type, ··· 492 481 int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, 493 482 int type, struct qxl_release **release, 494 483 struct qxl_bo **rbo); 495 - int qxl_fence_releaseable(struct qxl_device *qdev, 496 - struct qxl_release *release); 484 + 497 485 int 498 486 qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, 499 487 uint32_t type, bool interruptible); 500 488 int 501 489 qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, 502 490 uint32_t type, bool interruptible); 503 - int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, 491 + int qxl_alloc_bo_reserved(struct qxl_device *qdev, 492 + struct qxl_release *release, 493 + unsigned long size, 504 494 struct qxl_bo **_bo); 505 495 /* qxl drawing commands */ 506 496 ··· 522 510 u32 sx, u32 sy, 523 511 u32 dx, u32 dy); 524 512 525 - uint64_t 526 - qxl_release_alloc(struct qxl_device *qdev, int type, 527 - struct qxl_release **ret); 528 - 529 513 void qxl_release_free(struct qxl_device *qdev, 530 514 struct qxl_release *release); 531 - void qxl_release_add_res(struct qxl_device *qdev, 532 - struct qxl_release *release, 533 - struct qxl_bo *bo); 515 + 534 516 /* used by qxl_debugfs_release */ 535 517 struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, 536 518 uint64_t id); ··· 567 561 int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf); 568 562 569 563 /* qxl_fence.c */ 570 - int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id); 564 + void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id); 571 565 int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id); 572 566 int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence); 573 567 void qxl_fence_fini(struct qxl_fence *qfence);
+165 -23
drivers/gpu/drm/qxl/qxl_fb.c
··· 37 37 38 38 #define QXL_DIRTY_DELAY (HZ / 30) 39 39 40 + #define QXL_FB_OP_FILLRECT 1 41 + #define QXL_FB_OP_COPYAREA 2 42 + #define QXL_FB_OP_IMAGEBLIT 3 43 + 44 + struct qxl_fb_op { 45 + struct list_head head; 46 + int op_type; 47 + union { 48 + struct fb_fillrect fr; 49 + struct fb_copyarea ca; 50 + struct fb_image ib; 51 + } op; 52 + void *img_data; 53 + }; 54 + 40 55 struct qxl_fbdev { 41 56 struct drm_fb_helper helper; 42 57 struct qxl_framebuffer qfb; 43 58 struct list_head fbdev_list; 44 59 struct qxl_device *qdev; 45 60 61 + spinlock_t delayed_ops_lock; 62 + struct list_head delayed_ops; 46 63 void *shadow; 47 64 int size; 48 65 ··· 181 164 .deferred_io = qxl_deferred_io, 182 165 }; 183 166 184 - static void qxl_fb_fillrect(struct fb_info *info, 185 - const struct fb_fillrect *fb_rect) 167 + static void qxl_fb_delayed_fillrect(struct qxl_fbdev *qfbdev, 168 + const struct fb_fillrect *fb_rect) 169 + { 170 + struct qxl_fb_op *op; 171 + unsigned long flags; 172 + 173 + op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN); 174 + if (!op) 175 + return; 176 + 177 + op->op.fr = *fb_rect; 178 + op->img_data = NULL; 179 + op->op_type = QXL_FB_OP_FILLRECT; 180 + 181 + spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); 182 + list_add_tail(&op->head, &qfbdev->delayed_ops); 183 + spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); 184 + } 185 + 186 + static void qxl_fb_delayed_copyarea(struct qxl_fbdev *qfbdev, 187 + const struct fb_copyarea *fb_copy) 188 + { 189 + struct qxl_fb_op *op; 190 + unsigned long flags; 191 + 192 + op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN); 193 + if (!op) 194 + return; 195 + 196 + op->op.ca = *fb_copy; 197 + op->img_data = NULL; 198 + op->op_type = QXL_FB_OP_COPYAREA; 199 + 200 + spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); 201 + list_add_tail(&op->head, &qfbdev->delayed_ops); 202 + spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); 203 + } 204 + 205 + static void qxl_fb_delayed_imageblit(struct qxl_fbdev *qfbdev, 206 + const struct fb_image *fb_image) 207 + { 208 + struct qxl_fb_op *op; 209 + unsigned long flags; 210 + uint32_t size = fb_image->width * fb_image->height * (fb_image->depth >= 8 ? fb_image->depth / 8 : 1); 211 + 212 + op = kmalloc(sizeof(struct qxl_fb_op) + size, GFP_ATOMIC | __GFP_NOWARN); 213 + if (!op) 214 + return; 215 + 216 + op->op.ib = *fb_image; 217 + op->img_data = (void *)(op + 1); 218 + op->op_type = QXL_FB_OP_IMAGEBLIT; 219 + 220 + memcpy(op->img_data, fb_image->data, size); 221 + 222 + op->op.ib.data = op->img_data; 223 + spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); 224 + list_add_tail(&op->head, &qfbdev->delayed_ops); 225 + spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); 226 + } 227 + 228 + static void qxl_fb_fillrect_internal(struct fb_info *info, 229 + const struct fb_fillrect *fb_rect) 186 230 { 187 231 struct qxl_fbdev *qfbdev = info->par; 188 232 struct qxl_device *qdev = qfbdev->qdev; ··· 281 203 qxl_draw_fill_rec.rect = rect; 282 204 qxl_draw_fill_rec.color = color; 283 205 qxl_draw_fill_rec.rop = rop; 284 - if (!drm_can_sleep()) { 285 - qxl_io_log(qdev, 286 - "%s: TODO use RCU, mysterious locks with spin_lock\n", 287 - __func__); 288 - return; 289 - } 206 + 290 207 qxl_draw_fill(&qxl_draw_fill_rec); 291 208 } 292 209 293 - static void qxl_fb_copyarea(struct fb_info *info, 294 - const struct fb_copyarea *region) 210 + static void qxl_fb_fillrect(struct fb_info *info, 211 + const struct fb_fillrect *fb_rect) 212 + { 213 + struct qxl_fbdev *qfbdev = info->par; 214 + struct qxl_device *qdev = qfbdev->qdev; 215 + 216 + if (!drm_can_sleep()) { 217 + qxl_fb_delayed_fillrect(qfbdev, fb_rect); 218 + schedule_work(&qdev->fb_work); 219 + return; 220 + } 221 + /* make sure any previous work is done */ 222 + flush_work(&qdev->fb_work); 223 + qxl_fb_fillrect_internal(info, fb_rect); 224 + } 225 + 226 + static void qxl_fb_copyarea_internal(struct fb_info *info, 227 + const struct fb_copyarea *region) 295 228 { 296 229 struct qxl_fbdev *qfbdev = info->par; 297 230 ··· 312 223 region->dx, region->dy); 313 224 } 314 225 226 + static void qxl_fb_copyarea(struct fb_info *info, 227 + const struct fb_copyarea *region) 228 + { 229 + struct qxl_fbdev *qfbdev = info->par; 230 + struct qxl_device *qdev = qfbdev->qdev; 231 + 232 + if (!drm_can_sleep()) { 233 + qxl_fb_delayed_copyarea(qfbdev, region); 234 + schedule_work(&qdev->fb_work); 235 + return; 236 + } 237 + /* make sure any previous work is done */ 238 + flush_work(&qdev->fb_work); 239 + qxl_fb_copyarea_internal(info, region); 240 + } 241 + 315 242 static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image) 316 243 { 317 244 qxl_draw_opaque_fb(qxl_fb_image, 0); 245 + } 246 + 247 + static void qxl_fb_imageblit_internal(struct fb_info *info, 248 + const struct fb_image *image) 249 + { 250 + struct qxl_fbdev *qfbdev = info->par; 251 + struct qxl_fb_image qxl_fb_image; 252 + 253 + /* ensure proper order rendering operations - TODO: must do this 254 + * for everything. */ 255 + qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image); 256 + qxl_fb_imageblit_safe(&qxl_fb_image); 318 257 } 319 258 320 259 static void qxl_fb_imageblit(struct fb_info *info, ··· 350 233 { 351 234 struct qxl_fbdev *qfbdev = info->par; 352 235 struct qxl_device *qdev = qfbdev->qdev; 353 - struct qxl_fb_image qxl_fb_image; 354 236 355 237 if (!drm_can_sleep()) { 356 - /* we cannot do any ttm_bo allocation since that will fail on 357 - * ioremap_wc..__get_vm_area_node, so queue the work item 358 - * instead This can happen from printk inside an interrupt 359 - * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */ 360 - qxl_io_log(qdev, 361 - "%s: TODO use RCU, mysterious locks with spin_lock\n", 362 - __func__); 238 + qxl_fb_delayed_imageblit(qfbdev, image); 239 + schedule_work(&qdev->fb_work); 363 240 return; 364 241 } 242 + /* make sure any previous work is done */ 243 + flush_work(&qdev->fb_work); 244 + qxl_fb_imageblit_internal(info, image); 245 + } 365 246 366 - /* ensure proper order of rendering operations - TODO: must do this 367 - * for everything. */ 368 - qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image); 369 - qxl_fb_imageblit_safe(&qxl_fb_image); 247 + static void qxl_fb_work(struct work_struct *work) 248 + { 249 + struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work); 250 + unsigned long flags; 251 + struct qxl_fb_op *entry, *tmp; 252 + struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev; 253 + 254 + /* since the irq context just adds entries to the end of the 255 + list dropping the lock should be fine, as entry isn't modified 256 + in the operation code */ 257 + spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); 258 + list_for_each_entry_safe(entry, tmp, &qfbdev->delayed_ops, head) { 259 + spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); 260 + switch (entry->op_type) { 261 + case QXL_FB_OP_FILLRECT: 262 + qxl_fb_fillrect_internal(qfbdev->helper.fbdev, &entry->op.fr); 263 + break; 264 + case QXL_FB_OP_COPYAREA: 265 + qxl_fb_copyarea_internal(qfbdev->helper.fbdev, &entry->op.ca); 266 + break; 267 + case QXL_FB_OP_IMAGEBLIT: 268 + qxl_fb_imageblit_internal(qfbdev->helper.fbdev, &entry->op.ib); 269 + break; 270 + } 271 + spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); 272 + list_del(&entry->head); 273 + kfree(entry); 274 + } 275 + spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); 370 276 } 371 277 372 278 int qxl_fb_init(struct qxl_device *qdev) 373 279 { 280 + INIT_WORK(&qdev->fb_work, qxl_fb_work); 374 281 return 0; 375 282 } 376 283 ··· 677 536 qfbdev->qdev = qdev; 678 537 qdev->mode_info.qfbdev = qfbdev; 679 538 qfbdev->helper.funcs = &qxl_fb_helper_funcs; 680 - 539 + spin_lock_init(&qfbdev->delayed_ops_lock); 540 + INIT_LIST_HEAD(&qfbdev->delayed_ops); 681 541 ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper, 682 542 qxl_num_crtc /* num_crtc - QXL supports just 1 */, 683 543 QXLFB_CONN_LIMIT);
+2 -8
drivers/gpu/drm/qxl/qxl_fence.c
··· 49 49 50 50 For some reason every so often qxl hw fails to release, things go wrong. 51 51 */ 52 - 53 - 54 - int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id) 52 + /* must be called with the fence lock held */ 53 + void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id) 55 54 { 56 - struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence); 57 - 58 - spin_lock(&bo->tbo.bdev->fence_lock); 59 55 radix_tree_insert(&qfence->tree, rel_id, qfence); 60 56 qfence->num_active_releases++; 61 - spin_unlock(&bo->tbo.bdev->fence_lock); 62 - return 0; 63 57 } 64 58 65 59 int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
+1 -1
drivers/gpu/drm/qxl/qxl_gem.c
··· 55 55 /* At least align on page size */ 56 56 if (alignment < PAGE_SIZE) 57 57 alignment = PAGE_SIZE; 58 - r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo); 58 + r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo); 59 59 if (r) { 60 60 if (r != -ERESTARTSYS) 61 61 DRM_ERROR(
+87 -26
drivers/gpu/drm/qxl/qxl_image.c
··· 30 30 #include "qxl_object.h" 31 31 32 32 static int 33 - qxl_image_create_helper(struct qxl_device *qdev, 34 - struct qxl_release *release, 35 - struct qxl_bo **image_bo, 36 - const uint8_t *data, 37 - int width, int height, 38 - int depth, unsigned int hash, 39 - int stride) 33 + qxl_allocate_chunk(struct qxl_device *qdev, 34 + struct qxl_release *release, 35 + struct qxl_drm_image *image, 36 + unsigned int chunk_size) 40 37 { 38 + struct qxl_drm_chunk *chunk; 39 + int ret; 40 + 41 + chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL); 42 + if (!chunk) 43 + return -ENOMEM; 44 + 45 + ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo); 46 + if (ret) { 47 + kfree(chunk); 48 + return ret; 49 + } 50 + 51 + list_add_tail(&chunk->head, &image->chunk_list); 52 + return 0; 53 + } 54 + 55 + int 56 + qxl_image_alloc_objects(struct qxl_device *qdev, 57 + struct qxl_release *release, 58 + struct qxl_drm_image **image_ptr, 59 + int height, int stride) 60 + { 61 + struct qxl_drm_image *image; 62 + int ret; 63 + 64 + image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL); 65 + if (!image) 66 + return -ENOMEM; 67 + 68 + INIT_LIST_HEAD(&image->chunk_list); 69 + 70 + ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo); 71 + if (ret) { 72 + kfree(image); 73 + return ret; 74 + } 75 + 76 + ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height); 77 + if (ret) { 78 + qxl_bo_unref(&image->bo); 79 + kfree(image); 80 + return ret; 81 + } 82 + *image_ptr = image; 83 + return 0; 84 + } 85 + 86 + void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage) 87 + { 88 + struct qxl_drm_chunk *chunk, *tmp; 89 + 90 + list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) { 91 + qxl_bo_unref(&chunk->bo); 92 + kfree(chunk); 93 + } 94 + 95 + qxl_bo_unref(&dimage->bo); 96 + kfree(dimage); 97 + } 98 + 99 + static int 100 + qxl_image_init_helper(struct qxl_device *qdev, 101 + struct qxl_release *release, 102 + struct qxl_drm_image *dimage, 103 + const uint8_t *data, 104 + int width, int height, 105 + int depth, unsigned int hash, 106 + int stride) 107 + { 108 + struct qxl_drm_chunk *drv_chunk; 41 109 struct qxl_image *image; 42 110 struct qxl_data_chunk *chunk; 43 111 int i; 44 112 int chunk_stride; 45 113 int linesize = width * depth / 8; 46 - struct qxl_bo *chunk_bo; 47 - int ret; 114 + struct qxl_bo *chunk_bo, *image_bo; 48 115 void *ptr; 49 116 /* Chunk */ 50 117 /* FIXME: Check integer overflow */ 51 118 /* TODO: variable number of chunks */ 119 + 120 + drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head); 121 + 122 + chunk_bo = drv_chunk->bo; 52 123 chunk_stride = stride; /* TODO: should use linesize, but it renders 53 124 wrong (check the bitmaps are sent correctly 54 125 first) */ 55 - ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride, 56 - &chunk_bo); 57 - 126 + 58 127 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); 59 128 chunk = ptr; 60 129 chunk->data_size = height * chunk_stride; ··· 171 102 while (remain > 0) { 172 103 page_base = out_offset & PAGE_MASK; 173 104 page_offset = offset_in_page(out_offset); 174 - 175 105 size = min((int)(PAGE_SIZE - page_offset), remain); 176 106 177 107 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base); ··· 184 116 } 185 117 } 186 118 } 187 - 188 - 189 119 qxl_bo_kunmap(chunk_bo); 190 120 191 - /* Image */ 192 - ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo); 193 - 194 - ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0); 121 + image_bo = dimage->bo; 122 + ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0); 195 123 image = ptr; 196 124 197 125 image->descriptor.id = 0; ··· 218 154 image->u.bitmap.stride = chunk_stride; 219 155 image->u.bitmap.palette = 0; 220 156 image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0); 221 - qxl_release_add_res(qdev, release, chunk_bo); 222 - qxl_bo_unreserve(chunk_bo); 223 - qxl_bo_unref(&chunk_bo); 224 157 225 - qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr); 158 + qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); 226 159 227 160 return 0; 228 161 } 229 162 230 - int qxl_image_create(struct qxl_device *qdev, 163 + int qxl_image_init(struct qxl_device *qdev, 231 164 struct qxl_release *release, 232 - struct qxl_bo **image_bo, 165 + struct qxl_drm_image *dimage, 233 166 const uint8_t *data, 234 167 int x, int y, int width, int height, 235 168 int depth, int stride) 236 169 { 237 170 data += y * stride + x * (depth / 8); 238 - return qxl_image_create_helper(qdev, release, image_bo, data, 171 + return qxl_image_init_helper(qdev, release, dimage, data, 239 172 width, height, depth, 0, stride); 240 173 }
+177 -146
drivers/gpu/drm/qxl/qxl_ioctl.c
··· 68 68 &qxl_map->offset); 69 69 } 70 70 71 + struct qxl_reloc_info { 72 + int type; 73 + struct qxl_bo *dst_bo; 74 + uint32_t dst_offset; 75 + struct qxl_bo *src_bo; 76 + int src_offset; 77 + }; 78 + 71 79 /* 72 80 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's 73 81 * are on vram). 74 82 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off) 75 83 */ 76 84 static void 77 - apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, 78 - struct qxl_bo *src, uint64_t src_off) 85 + apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) 79 86 { 80 87 void *reloc_page; 81 - 82 - reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); 83 - *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, 84 - src, src_off); 85 - qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); 88 + reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); 89 + *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, 90 + info->src_bo, 91 + info->src_offset); 92 + qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page); 86 93 } 87 94 88 95 static void 89 - apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, 90 - struct qxl_bo *src) 96 + apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) 91 97 { 92 98 uint32_t id = 0; 93 99 void *reloc_page; 94 100 95 - if (src && !src->is_primary) 96 - id = src->surface_id; 101 + if (info->src_bo && !info->src_bo->is_primary) 102 + id = info->src_bo->surface_id; 97 103 98 - reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); 99 - *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id; 100 - qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); 104 + reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); 105 + *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id; 106 + qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page); 101 107 } 102 108 103 109 /* return holding the reference to this object */ 104 110 static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, 105 111 struct drm_file *file_priv, uint64_t handle, 106 - struct qxl_reloc_list *reloc_list) 112 + struct qxl_release *release) 107 113 { 108 114 struct drm_gem_object *gobj; 109 115 struct qxl_bo *qobj; 110 116 int ret; 111 117 112 118 gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle); 113 - if (!gobj) { 114 - DRM_ERROR("bad bo handle %lld\n", handle); 119 + if (!gobj) 115 120 return NULL; 116 - } 121 + 117 122 qobj = gem_to_qxl_bo(gobj); 118 123 119 - ret = qxl_bo_list_add(reloc_list, qobj); 124 + ret = qxl_release_list_add(release, qobj); 120 125 if (ret) 121 126 return NULL; 122 127 ··· 134 129 * However, the command as passed from user space must *not* contain the initial 135 130 * QXLReleaseInfo struct (first XXX bytes) 136 131 */ 132 + static int qxl_process_single_command(struct qxl_device *qdev, 133 + struct drm_qxl_command *cmd, 134 + struct drm_file *file_priv) 135 + { 136 + struct qxl_reloc_info *reloc_info; 137 + int release_type; 138 + struct qxl_release *release; 139 + struct qxl_bo *cmd_bo; 140 + void *fb_cmd; 141 + int i, j, ret, num_relocs; 142 + int unwritten; 143 + 144 + switch (cmd->type) { 145 + case QXL_CMD_DRAW: 146 + release_type = QXL_RELEASE_DRAWABLE; 147 + break; 148 + case QXL_CMD_SURFACE: 149 + case QXL_CMD_CURSOR: 150 + default: 151 + DRM_DEBUG("Only draw commands in execbuffers\n"); 152 + return -EINVAL; 153 + break; 154 + } 155 + 156 + if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info)) 157 + return -EINVAL; 158 + 159 + if (!access_ok(VERIFY_READ, 160 + (void *)(unsigned long)cmd->command, 161 + cmd->command_size)) 162 + return -EFAULT; 163 + 164 + reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL); 165 + if (!reloc_info) 166 + return -ENOMEM; 167 + 168 + ret = qxl_alloc_release_reserved(qdev, 169 + sizeof(union qxl_release_info) + 170 + cmd->command_size, 171 + release_type, 172 + &release, 173 + &cmd_bo); 174 + if (ret) 175 + goto out_free_reloc; 176 + 177 + /* TODO copy slow path code from i915 */ 178 + fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); 179 + unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size); 180 + 181 + { 182 + struct qxl_drawable *draw = fb_cmd; 183 + draw->mm_time = qdev->rom->mm_clock; 184 + } 185 + 186 + qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd); 187 + if (unwritten) { 188 + DRM_ERROR("got unwritten %d\n", unwritten); 189 + ret = -EFAULT; 190 + goto out_free_release; 191 + } 192 + 193 + /* fill out reloc info structs */ 194 + num_relocs = 0; 195 + for (i = 0; i < cmd->relocs_num; ++i) { 196 + struct drm_qxl_reloc reloc; 197 + 198 + if (DRM_COPY_FROM_USER(&reloc, 199 + &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i], 200 + sizeof(reloc))) { 201 + ret = -EFAULT; 202 + goto out_free_bos; 203 + } 204 + 205 + /* add the bos to the list of bos to validate - 206 + need to validate first then process relocs? */ 207 + if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) { 208 + DRM_DEBUG("unknown reloc type %d\n", reloc_info[i].type); 209 + 210 + ret = -EINVAL; 211 + goto out_free_bos; 212 + } 213 + reloc_info[i].type = reloc.reloc_type; 214 + 215 + if (reloc.dst_handle) { 216 + reloc_info[i].dst_bo = qxlhw_handle_to_bo(qdev, file_priv, 217 + reloc.dst_handle, release); 218 + if (!reloc_info[i].dst_bo) { 219 + ret = -EINVAL; 220 + reloc_info[i].src_bo = NULL; 221 + goto out_free_bos; 222 + } 223 + reloc_info[i].dst_offset = reloc.dst_offset; 224 + } else { 225 + reloc_info[i].dst_bo = cmd_bo; 226 + reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset; 227 + } 228 + num_relocs++; 229 + 230 + /* reserve and validate the reloc dst bo */ 231 + if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) { 232 + reloc_info[i].src_bo = 233 + qxlhw_handle_to_bo(qdev, file_priv, 234 + reloc.src_handle, release); 235 + if (!reloc_info[i].src_bo) { 236 + if (reloc_info[i].dst_bo != cmd_bo) 237 + drm_gem_object_unreference_unlocked(&reloc_info[i].dst_bo->gem_base); 238 + ret = -EINVAL; 239 + goto out_free_bos; 240 + } 241 + reloc_info[i].src_offset = reloc.src_offset; 242 + } else { 243 + reloc_info[i].src_bo = NULL; 244 + reloc_info[i].src_offset = 0; 245 + } 246 + } 247 + 248 + /* validate all buffers */ 249 + ret = qxl_release_reserve_list(release, false); 250 + if (ret) 251 + goto out_free_bos; 252 + 253 + for (i = 0; i < cmd->relocs_num; ++i) { 254 + if (reloc_info[i].type == QXL_RELOC_TYPE_BO) 255 + apply_reloc(qdev, &reloc_info[i]); 256 + else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF) 257 + apply_surf_reloc(qdev, &reloc_info[i]); 258 + } 259 + 260 + ret = qxl_push_command_ring_release(qdev, release, cmd->type, true); 261 + if (ret) 262 + qxl_release_backoff_reserve_list(release); 263 + else 264 + qxl_release_fence_buffer_objects(release); 265 + 266 + out_free_bos: 267 + for (j = 0; j < num_relocs; j++) { 268 + if (reloc_info[j].dst_bo != cmd_bo) 269 + drm_gem_object_unreference_unlocked(&reloc_info[j].dst_bo->gem_base); 270 + if (reloc_info[j].src_bo && reloc_info[j].src_bo != cmd_bo) 271 + drm_gem_object_unreference_unlocked(&reloc_info[j].src_bo->gem_base); 272 + } 273 + out_free_release: 274 + if (ret) 275 + qxl_release_free(qdev, release); 276 + out_free_reloc: 277 + kfree(reloc_info); 278 + return ret; 279 + } 280 + 137 281 static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, 138 282 struct drm_file *file_priv) 139 283 { ··· 290 136 struct drm_qxl_execbuffer *execbuffer = data; 291 137 struct drm_qxl_command user_cmd; 292 138 int cmd_num; 293 - struct qxl_bo *reloc_src_bo; 294 - struct qxl_bo *reloc_dst_bo; 295 - struct drm_qxl_reloc reloc; 296 - void *fb_cmd; 297 - int i, ret; 298 - struct qxl_reloc_list reloc_list; 299 - int unwritten; 300 - uint32_t reloc_dst_offset; 301 - INIT_LIST_HEAD(&reloc_list.bos); 139 + int ret; 302 140 303 141 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { 304 - struct qxl_release *release; 305 - struct qxl_bo *cmd_bo; 306 - int release_type; 142 + 307 143 struct drm_qxl_command *commands = 308 144 (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; 309 145 310 146 if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], 311 147 sizeof(user_cmd))) 312 148 return -EFAULT; 313 - switch (user_cmd.type) { 314 - case QXL_CMD_DRAW: 315 - release_type = QXL_RELEASE_DRAWABLE; 316 - break; 317 - case QXL_CMD_SURFACE: 318 - case QXL_CMD_CURSOR: 319 - default: 320 - DRM_DEBUG("Only draw commands in execbuffers\n"); 321 - return -EINVAL; 322 - break; 323 - } 324 149 325 - if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info)) 326 - return -EINVAL; 327 - 328 - if (!access_ok(VERIFY_READ, 329 - (void *)(unsigned long)user_cmd.command, 330 - user_cmd.command_size)) 331 - return -EFAULT; 332 - 333 - ret = qxl_alloc_release_reserved(qdev, 334 - sizeof(union qxl_release_info) + 335 - user_cmd.command_size, 336 - release_type, 337 - &release, 338 - &cmd_bo); 150 + ret = qxl_process_single_command(qdev, &user_cmd, file_priv); 339 151 if (ret) 340 152 return ret; 341 - 342 - /* TODO copy slow path code from i915 */ 343 - fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); 344 - unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size); 345 - 346 - { 347 - struct qxl_drawable *draw = fb_cmd; 348 - 349 - draw->mm_time = qdev->rom->mm_clock; 350 - } 351 - qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd); 352 - if (unwritten) { 353 - DRM_ERROR("got unwritten %d\n", unwritten); 354 - qxl_release_unreserve(qdev, release); 355 - qxl_release_free(qdev, release); 356 - return -EFAULT; 357 - } 358 - 359 - for (i = 0 ; i < user_cmd.relocs_num; ++i) { 360 - if (DRM_COPY_FROM_USER(&reloc, 361 - &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i], 362 - sizeof(reloc))) { 363 - qxl_bo_list_unreserve(&reloc_list, true); 364 - qxl_release_unreserve(qdev, release); 365 - qxl_release_free(qdev, release); 366 - return -EFAULT; 367 - } 368 - 369 - /* add the bos to the list of bos to validate - 370 - need to validate first then process relocs? */ 371 - if (reloc.dst_handle) { 372 - reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv, 373 - reloc.dst_handle, &reloc_list); 374 - if (!reloc_dst_bo) { 375 - qxl_bo_list_unreserve(&reloc_list, true); 376 - qxl_release_unreserve(qdev, release); 377 - qxl_release_free(qdev, release); 378 - return -EINVAL; 379 - } 380 - reloc_dst_offset = 0; 381 - } else { 382 - reloc_dst_bo = cmd_bo; 383 - reloc_dst_offset = release->release_offset; 384 - } 385 - 386 - /* reserve and validate the reloc dst bo */ 387 - if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) { 388 - reloc_src_bo = 389 - qxlhw_handle_to_bo(qdev, file_priv, 390 - reloc.src_handle, &reloc_list); 391 - if (!reloc_src_bo) { 392 - if (reloc_dst_bo != cmd_bo) 393 - drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); 394 - qxl_bo_list_unreserve(&reloc_list, true); 395 - qxl_release_unreserve(qdev, release); 396 - qxl_release_free(qdev, release); 397 - return -EINVAL; 398 - } 399 - } else 400 - reloc_src_bo = NULL; 401 - if (reloc.reloc_type == QXL_RELOC_TYPE_BO) { 402 - apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, 403 - reloc_src_bo, reloc.src_offset); 404 - } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) { 405 - apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo); 406 - } else { 407 - DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type); 408 - return -EINVAL; 409 - } 410 - 411 - if (reloc_src_bo && reloc_src_bo != cmd_bo) { 412 - qxl_release_add_res(qdev, release, reloc_src_bo); 413 - drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base); 414 - } 415 - 416 - if (reloc_dst_bo != cmd_bo) 417 - drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); 418 - } 419 - qxl_fence_releaseable(qdev, release); 420 - 421 - ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true); 422 - if (ret == -ERESTARTSYS) { 423 - qxl_release_unreserve(qdev, release); 424 - qxl_release_free(qdev, release); 425 - qxl_bo_list_unreserve(&reloc_list, true); 426 - return ret; 427 - } 428 - qxl_release_unreserve(qdev, release); 429 153 } 430 - qxl_bo_list_unreserve(&reloc_list, 0); 431 154 return 0; 432 155 } 433 156 ··· 336 305 goto out; 337 306 338 307 if (!qobj->pin_count) { 339 - qxl_ttm_placement_from_domain(qobj, qobj->type); 308 + qxl_ttm_placement_from_domain(qobj, qobj->type, false); 340 309 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, 341 310 true, false); 342 311 if (unlikely(ret))
+11 -59
drivers/gpu/drm/qxl/qxl_object.c
··· 51 51 return false; 52 52 } 53 53 54 - void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) 54 + void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) 55 55 { 56 56 u32 c = 0; 57 + u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0; 57 58 58 59 qbo->placement.fpfn = 0; 59 60 qbo->placement.lpfn = 0; 60 61 qbo->placement.placement = qbo->placements; 61 62 qbo->placement.busy_placement = qbo->placements; 62 63 if (domain == QXL_GEM_DOMAIN_VRAM) 63 - qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM; 64 + qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; 64 65 if (domain == QXL_GEM_DOMAIN_SURFACE) 65 - qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0; 66 + qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag; 66 67 if (domain == QXL_GEM_DOMAIN_CPU) 67 - qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 68 + qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag; 68 69 if (!c) 69 70 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 70 71 qbo->placement.num_placement = c; ··· 74 73 75 74 76 75 int qxl_bo_create(struct qxl_device *qdev, 77 - unsigned long size, bool kernel, u32 domain, 76 + unsigned long size, bool kernel, bool pinned, u32 domain, 78 77 struct qxl_surface *surf, 79 78 struct qxl_bo **bo_ptr) 80 79 { ··· 100 99 } 101 100 bo->gem_base.driver_private = NULL; 102 101 bo->type = domain; 103 - bo->pin_count = 0; 102 + bo->pin_count = pinned ? 1 : 0; 104 103 bo->surface_id = 0; 105 104 qxl_fence_init(qdev, &bo->fence); 106 105 INIT_LIST_HEAD(&bo->list); 107 - atomic_set(&bo->reserve_count, 0); 106 + 108 107 if (surf) 109 108 bo->surf = *surf; 110 109 111 - qxl_ttm_placement_from_domain(bo, domain); 110 + qxl_ttm_placement_from_domain(bo, domain, pinned); 112 111 113 112 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, 114 113 &bo->placement, 0, !kernel, NULL, size, ··· 229 228 int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) 230 229 { 231 230 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; 232 - int r, i; 231 + int r; 233 232 234 233 if (bo->pin_count) { 235 234 bo->pin_count++; ··· 237 236 *gpu_addr = qxl_bo_gpu_offset(bo); 238 237 return 0; 239 238 } 240 - qxl_ttm_placement_from_domain(bo, domain); 241 - for (i = 0; i < bo->placement.num_placement; i++) 242 - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 239 + qxl_ttm_placement_from_domain(bo, domain, true); 243 240 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 244 241 if (likely(r == 0)) { 245 242 bo->pin_count = 1; ··· 313 314 if (ret) 314 315 return ret; 315 316 } 316 - return 0; 317 - } 318 - 319 - void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed) 320 - { 321 - struct qxl_bo_list *entry, *sf; 322 - 323 - list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) { 324 - qxl_bo_unreserve(entry->bo); 325 - list_del(&entry->lhead); 326 - kfree(entry); 327 - } 328 - } 329 - 330 - int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo) 331 - { 332 - struct qxl_bo_list *entry; 333 - int ret; 334 - 335 - list_for_each_entry(entry, &reloc_list->bos, lhead) { 336 - if (entry->bo == bo) 337 - return 0; 338 - } 339 - 340 - entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL); 341 - if (!entry) 342 - return -ENOMEM; 343 - 344 - entry->bo = bo; 345 - list_add(&entry->lhead, &reloc_list->bos); 346 - 347 - ret = qxl_bo_reserve(bo, false); 348 - if (ret) 349 - return ret; 350 - 351 - if (!bo->pin_count) { 352 - qxl_ttm_placement_from_domain(bo, bo->type); 353 - ret = ttm_bo_validate(&bo->tbo, &bo->placement, 354 - true, false); 355 - if (ret) 356 - return ret; 357 - } 358 - 359 - /* allocate a surface for reserved + validated buffers */ 360 - ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo); 361 - if (ret) 362 - return ret; 363 317 return 0; 364 318 } 365 319
+2 -4
drivers/gpu/drm/qxl/qxl_object.h
··· 88 88 89 89 extern int qxl_bo_create(struct qxl_device *qdev, 90 90 unsigned long size, 91 - bool kernel, u32 domain, 91 + bool kernel, bool pinned, u32 domain, 92 92 struct qxl_surface *surf, 93 93 struct qxl_bo **bo_ptr); 94 94 extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); ··· 99 99 extern void qxl_bo_unref(struct qxl_bo **bo); 100 100 extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr); 101 101 extern int qxl_bo_unpin(struct qxl_bo *bo); 102 - extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain); 102 + extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned); 103 103 extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo); 104 104 105 - extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo); 106 - extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed); 107 105 #endif
+133 -79
drivers/gpu/drm/qxl/qxl_release.c
··· 38 38 39 39 static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; 40 40 static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; 41 - uint64_t 41 + 42 + static uint64_t 42 43 qxl_release_alloc(struct qxl_device *qdev, int type, 43 44 struct qxl_release **ret) 44 45 { ··· 54 53 return 0; 55 54 } 56 55 release->type = type; 57 - release->bo_count = 0; 58 56 release->release_offset = 0; 59 57 release->surface_release_id = 0; 58 + INIT_LIST_HEAD(&release->bos); 60 59 61 60 idr_preload(GFP_KERNEL); 62 61 spin_lock(&qdev->release_idr_lock); ··· 78 77 qxl_release_free(struct qxl_device *qdev, 79 78 struct qxl_release *release) 80 79 { 81 - int i; 82 - 83 - QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id, 84 - release->type, release->bo_count); 80 + struct qxl_bo_list *entry, *tmp; 81 + QXL_INFO(qdev, "release %d, type %d\n", release->id, 82 + release->type); 85 83 86 84 if (release->surface_release_id) 87 85 qxl_surface_id_dealloc(qdev, release->surface_release_id); 88 86 89 - for (i = 0 ; i < release->bo_count; ++i) { 87 + list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) { 88 + struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); 90 89 QXL_INFO(qdev, "release %llx\n", 91 - release->bos[i]->tbo.addr_space_offset 90 + entry->tv.bo->addr_space_offset 92 91 - DRM_FILE_OFFSET); 93 - qxl_fence_remove_release(&release->bos[i]->fence, release->id); 94 - qxl_bo_unref(&release->bos[i]); 92 + qxl_fence_remove_release(&bo->fence, release->id); 93 + qxl_bo_unref(&bo); 95 94 } 96 95 spin_lock(&qdev->release_idr_lock); 97 96 idr_remove(&qdev->release_idr, release->id); ··· 99 98 kfree(release); 100 99 } 101 100 102 - void 103 - qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release, 104 - struct qxl_bo *bo) 105 - { 106 - int i; 107 - for (i = 0; i < release->bo_count; i++) 108 - if (release->bos[i] == bo) 109 - return; 110 - 111 - if (release->bo_count >= QXL_MAX_RES) { 112 - DRM_ERROR("exceeded max resource on a qxl_release item\n"); 113 - return; 114 - } 115 - release->bos[release->bo_count++] = qxl_bo_ref(bo); 116 - } 117 - 118 101 static int qxl_release_bo_alloc(struct qxl_device *qdev, 119 102 struct qxl_bo **bo) 120 103 { 121 104 int ret; 122 - ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL, 105 + /* pin releases bo's they are too messy to evict */ 106 + ret = qxl_bo_create(qdev, PAGE_SIZE, false, true, 107 + QXL_GEM_DOMAIN_VRAM, NULL, 123 108 bo); 124 109 return ret; 125 110 } 126 111 127 - int qxl_release_reserve(struct qxl_device *qdev, 128 - struct qxl_release *release, bool no_wait) 112 + int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo) 113 + { 114 + struct qxl_bo_list *entry; 115 + 116 + list_for_each_entry(entry, &release->bos, tv.head) { 117 + if (entry->tv.bo == &bo->tbo) 118 + return 0; 119 + } 120 + 121 + entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL); 122 + if (!entry) 123 + return -ENOMEM; 124 + 125 + qxl_bo_ref(bo); 126 + entry->tv.bo = &bo->tbo; 127 + list_add_tail(&entry->tv.head, &release->bos); 128 + return 0; 129 + } 130 + 131 + static int qxl_release_validate_bo(struct qxl_bo *bo) 129 132 { 130 133 int ret; 131 - if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) { 132 - ret = qxl_bo_reserve(release->bos[0], no_wait); 134 + 135 + if (!bo->pin_count) { 136 + qxl_ttm_placement_from_domain(bo, bo->type, false); 137 + ret = ttm_bo_validate(&bo->tbo, &bo->placement, 138 + true, false); 133 139 if (ret) 134 140 return ret; 141 + } 142 + 143 + /* allocate a surface for reserved + validated buffers */ 144 + ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo); 145 + if (ret) 146 + return ret; 147 + return 0; 148 + } 149 + 150 + int qxl_release_reserve_list(struct qxl_release *release, bool no_intr) 151 + { 152 + int ret; 153 + struct qxl_bo_list *entry; 154 + 155 + /* if only one object on the release its the release itself 156 + since these objects are pinned no need to reserve */ 157 + if (list_is_singular(&release->bos)) 158 + return 0; 159 + 160 + ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos); 161 + if (ret) 162 + return ret; 163 + 164 + list_for_each_entry(entry, &release->bos, tv.head) { 165 + struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); 166 + 167 + ret = qxl_release_validate_bo(bo); 168 + if (ret) { 169 + ttm_eu_backoff_reservation(&release->ticket, &release->bos); 170 + return ret; 171 + } 135 172 } 136 173 return 0; 137 174 } 138 175 139 - void qxl_release_unreserve(struct qxl_device *qdev, 140 - struct qxl_release *release) 176 + void qxl_release_backoff_reserve_list(struct qxl_release *release) 141 177 { 142 - if (atomic_dec_and_test(&release->bos[0]->reserve_count)) 143 - qxl_bo_unreserve(release->bos[0]); 178 + /* if only one object on the release its the release itself 179 + since these objects are pinned no need to reserve */ 180 + if (list_is_singular(&release->bos)) 181 + return; 182 + 183 + ttm_eu_backoff_reservation(&release->ticket, &release->bos); 144 184 } 185 + 145 186 146 187 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, 147 188 enum qxl_surface_cmd_type surface_cmd_type, 148 189 struct qxl_release *create_rel, 149 190 struct qxl_release **release) 150 191 { 151 - int ret; 152 - 153 192 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { 154 193 int idr_ret; 194 + struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head); 155 195 struct qxl_bo *bo; 156 196 union qxl_release_info *info; 157 197 158 198 /* stash the release after the create command */ 159 199 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); 160 - bo = qxl_bo_ref(create_rel->bos[0]); 200 + bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo)); 161 201 162 202 (*release)->release_offset = create_rel->release_offset + 64; 163 203 164 - qxl_release_add_res(qdev, *release, bo); 204 + qxl_release_list_add(*release, bo); 165 205 166 - ret = qxl_release_reserve(qdev, *release, false); 167 - if (ret) { 168 - DRM_ERROR("release reserve failed\n"); 169 - goto out_unref; 170 - } 171 206 info = qxl_release_map(qdev, *release); 172 207 info->id = idr_ret; 173 208 qxl_release_unmap(qdev, *release, info); 174 209 175 - 176 - out_unref: 177 210 qxl_bo_unref(&bo); 178 - return ret; 211 + return 0; 179 212 } 180 213 181 214 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), ··· 222 187 { 223 188 struct qxl_bo *bo; 224 189 int idr_ret; 225 - int ret; 190 + int ret = 0; 226 191 union qxl_release_info *info; 227 192 int cur_idx; 228 193 ··· 251 216 mutex_unlock(&qdev->release_mutex); 252 217 return ret; 253 218 } 254 - 255 - /* pin releases bo's they are too messy to evict */ 256 - ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false); 257 - qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL); 258 - qxl_bo_unreserve(qdev->current_release_bo[cur_idx]); 259 219 } 260 220 261 221 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); ··· 261 231 if (rbo) 262 232 *rbo = bo; 263 233 264 - qxl_release_add_res(qdev, *release, bo); 265 - 266 - ret = qxl_release_reserve(qdev, *release, false); 267 234 mutex_unlock(&qdev->release_mutex); 268 - if (ret) 269 - goto out_unref; 235 + 236 + qxl_release_list_add(*release, bo); 270 237 271 238 info = qxl_release_map(qdev, *release); 272 239 info->id = idr_ret; 273 240 qxl_release_unmap(qdev, *release, info); 274 241 275 - out_unref: 276 242 qxl_bo_unref(&bo); 277 243 return ret; 278 - } 279 - 280 - int qxl_fence_releaseable(struct qxl_device *qdev, 281 - struct qxl_release *release) 282 - { 283 - int i, ret; 284 - for (i = 0; i < release->bo_count; i++) { 285 - if (!release->bos[i]->tbo.sync_obj) 286 - release->bos[i]->tbo.sync_obj = &release->bos[i]->fence; 287 - ret = qxl_fence_add_release(&release->bos[i]->fence, release->id); 288 - if (ret) 289 - return ret; 290 - } 291 - return 0; 292 244 } 293 245 294 246 struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, ··· 285 273 DRM_ERROR("failed to find id in release_idr\n"); 286 274 return NULL; 287 275 } 288 - if (release->bo_count < 1) { 289 - DRM_ERROR("read a released resource with 0 bos\n"); 290 - return NULL; 291 - } 276 + 292 277 return release; 293 278 } 294 279 ··· 294 285 { 295 286 void *ptr; 296 287 union qxl_release_info *info; 297 - struct qxl_bo *bo = release->bos[0]; 288 + struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 289 + struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); 298 290 299 291 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); 292 + if (!ptr) 293 + return NULL; 300 294 info = ptr + (release->release_offset & ~PAGE_SIZE); 301 295 return info; 302 296 } ··· 308 296 struct qxl_release *release, 309 297 union qxl_release_info *info) 310 298 { 311 - struct qxl_bo *bo = release->bos[0]; 299 + struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 300 + struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); 312 301 void *ptr; 313 302 314 303 ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); 315 304 qxl_bo_kunmap_atomic_page(qdev, bo, ptr); 316 305 } 306 + 307 + void qxl_release_fence_buffer_objects(struct qxl_release *release) 308 + { 309 + struct ttm_validate_buffer *entry; 310 + struct ttm_buffer_object *bo; 311 + struct ttm_bo_global *glob; 312 + struct ttm_bo_device *bdev; 313 + struct ttm_bo_driver *driver; 314 + struct qxl_bo *qbo; 315 + 316 + /* if only one object on the release its the release itself 317 + since these objects are pinned no need to reserve */ 318 + if (list_is_singular(&release->bos)) 319 + return; 320 + 321 + bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo; 322 + bdev = bo->bdev; 323 + driver = bdev->driver; 324 + glob = bo->glob; 325 + 326 + spin_lock(&glob->lru_lock); 327 + spin_lock(&bdev->fence_lock); 328 + 329 + list_for_each_entry(entry, &release->bos, head) { 330 + bo = entry->bo; 331 + qbo = to_qxl_bo(bo); 332 + 333 + if (!entry->bo->sync_obj) 334 + entry->bo->sync_obj = &qbo->fence; 335 + 336 + qxl_fence_add_release_locked(&qbo->fence, release->id); 337 + 338 + ttm_bo_add_to_lru(bo); 339 + ww_mutex_unlock(&bo->resv->lock); 340 + entry->reserved = false; 341 + } 342 + spin_unlock(&bdev->fence_lock); 343 + spin_unlock(&glob->lru_lock); 344 + ww_acquire_fini(&release->ticket); 345 + } 346 +
+1 -1
drivers/gpu/drm/qxl/qxl_ttm.c
··· 206 206 return; 207 207 } 208 208 qbo = container_of(bo, struct qxl_bo, tbo); 209 - qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU); 209 + qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false); 210 210 *placement = qbo->placement; 211 211 } 212 212