Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.5-rc5 625 lines 17 kB view raw
1/* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26/* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Dave Airlie 30 */ 31#include <linux/seq_file.h> 32#include <linux/atomic.h> 33#include <linux/wait.h> 34#include <linux/list.h> 35#include <linux/kref.h> 36#include <linux/slab.h> 37#include "drmP.h" 38#include "drm.h" 39#include "radeon_reg.h" 40#include "radeon.h" 41#include "radeon_trace.h" 42 43static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) 44{ 45 if (rdev->wb.enabled) { 46 *rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq); 47 } else { 48 WREG32(rdev->fence_drv[ring].scratch_reg, seq); 49 } 50} 51 52static u32 radeon_fence_read(struct radeon_device *rdev, int ring) 53{ 54 u32 seq = 0; 55 56 if (rdev->wb.enabled) { 57 seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr); 58 } else { 59 seq = RREG32(rdev->fence_drv[ring].scratch_reg); 60 } 61 return seq; 62} 63 64int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) 65{ 66 /* we are protected by the ring emission mutex */ 67 if (fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) { 68 return 0; 69 } 70 fence->seq = ++rdev->fence_drv[fence->ring].seq; 71 radeon_fence_ring_emit(rdev, fence->ring, fence); 72 trace_radeon_fence_emit(rdev->ddev, fence->seq); 73 return 0; 74} 75 76void radeon_fence_process(struct radeon_device *rdev, int ring) 77{ 78 uint64_t seq, last_seq; 79 unsigned count_loop = 0; 80 bool wake = false; 81 82 /* Note there is a scenario here for an infinite loop but it's 83 * very unlikely to happen. For it to happen, the current polling 84 * process need to be interrupted by another process and another 85 * process needs to update the last_seq btw the atomic read and 86 * xchg of the current process. 87 * 88 * More over for this to go in infinite loop there need to be 89 * continuously new fence signaled ie radeon_fence_read needs 90 * to return a different value each time for both the currently 91 * polling process and the other process that xchg the last_seq 92 * btw atomic read and xchg of the current process. And the 93 * value the other process set as last seq must be higher than 94 * the seq value we just read. Which means that current process 95 * need to be interrupted after radeon_fence_read and before 96 * atomic xchg. 97 * 98 * To be even more safe we count the number of time we loop and 99 * we bail after 10 loop just accepting the fact that we might 100 * have temporarly set the last_seq not to the true real last 101 * seq but to an older one. 102 */ 103 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); 104 do { 105 seq = radeon_fence_read(rdev, ring); 106 seq |= last_seq & 0xffffffff00000000LL; 107 if (seq < last_seq) { 108 seq += 0x100000000LL; 109 } 110 111 if (seq == last_seq) { 112 break; 113 } 114 /* If we loop over we don't want to return without 115 * checking if a fence is signaled as it means that the 116 * seq we just read is different from the previous on. 117 */ 118 wake = true; 119 last_seq = seq; 120 if ((count_loop++) > 10) { 121 /* We looped over too many time leave with the 122 * fact that we might have set an older fence 123 * seq then the current real last seq as signaled 124 * by the hw. 125 */ 126 break; 127 } 128 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); 129 130 if (wake) { 131 rdev->fence_drv[ring].last_activity = jiffies; 132 wake_up_all(&rdev->fence_queue); 133 } 134} 135 136static void radeon_fence_destroy(struct kref *kref) 137{ 138 struct radeon_fence *fence; 139 140 fence = container_of(kref, struct radeon_fence, kref); 141 fence->seq = RADEON_FENCE_NOTEMITED_SEQ; 142 kfree(fence); 143} 144 145int radeon_fence_create(struct radeon_device *rdev, 146 struct radeon_fence **fence, 147 int ring) 148{ 149 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); 150 if ((*fence) == NULL) { 151 return -ENOMEM; 152 } 153 kref_init(&((*fence)->kref)); 154 (*fence)->rdev = rdev; 155 (*fence)->seq = RADEON_FENCE_NOTEMITED_SEQ; 156 (*fence)->ring = ring; 157 return 0; 158} 159 160static bool radeon_fence_seq_signaled(struct radeon_device *rdev, 161 u64 seq, unsigned ring) 162{ 163 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { 164 return true; 165 } 166 /* poll new last sequence at least once */ 167 radeon_fence_process(rdev, ring); 168 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { 169 return true; 170 } 171 return false; 172} 173 174bool radeon_fence_signaled(struct radeon_fence *fence) 175{ 176 if (!fence) { 177 return true; 178 } 179 if (fence->seq == RADEON_FENCE_NOTEMITED_SEQ) { 180 WARN(1, "Querying an unemitted fence : %p !\n", fence); 181 return true; 182 } 183 if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) { 184 return true; 185 } 186 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { 187 fence->seq = RADEON_FENCE_SIGNALED_SEQ; 188 return true; 189 } 190 return false; 191} 192 193static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, 194 unsigned ring, bool intr, bool lock_ring) 195{ 196 unsigned long timeout, last_activity; 197 uint64_t seq; 198 unsigned i; 199 bool signaled; 200 int r; 201 202 while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) { 203 if (!rdev->ring[ring].ready) { 204 return -EBUSY; 205 } 206 207 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; 208 if (time_after(rdev->fence_drv[ring].last_activity, timeout)) { 209 /* the normal case, timeout is somewhere before last_activity */ 210 timeout = rdev->fence_drv[ring].last_activity - timeout; 211 } else { 212 /* either jiffies wrapped around, or no fence was signaled in the last 500ms 213 * anyway we will just wait for the minimum amount and then check for a lockup 214 */ 215 timeout = 1; 216 } 217 seq = atomic64_read(&rdev->fence_drv[ring].last_seq); 218 /* Save current last activity valuee, used to check for GPU lockups */ 219 last_activity = rdev->fence_drv[ring].last_activity; 220 221 trace_radeon_fence_wait_begin(rdev->ddev, seq); 222 radeon_irq_kms_sw_irq_get(rdev, ring); 223 if (intr) { 224 r = wait_event_interruptible_timeout(rdev->fence_queue, 225 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), 226 timeout); 227 } else { 228 r = wait_event_timeout(rdev->fence_queue, 229 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), 230 timeout); 231 } 232 radeon_irq_kms_sw_irq_put(rdev, ring); 233 if (unlikely(r < 0)) { 234 return r; 235 } 236 trace_radeon_fence_wait_end(rdev->ddev, seq); 237 238 if (unlikely(!signaled)) { 239 /* we were interrupted for some reason and fence 240 * isn't signaled yet, resume waiting */ 241 if (r) { 242 continue; 243 } 244 245 /* check if sequence value has changed since last_activity */ 246 if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) { 247 continue; 248 } 249 250 if (lock_ring) { 251 mutex_lock(&rdev->ring_lock); 252 } 253 254 /* test if somebody else has already decided that this is a lockup */ 255 if (last_activity != rdev->fence_drv[ring].last_activity) { 256 if (lock_ring) { 257 mutex_unlock(&rdev->ring_lock); 258 } 259 continue; 260 } 261 262 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { 263 /* good news we believe it's a lockup */ 264 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n", 265 target_seq, seq); 266 267 /* change last activity so nobody else think there is a lockup */ 268 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 269 rdev->fence_drv[i].last_activity = jiffies; 270 } 271 272 /* mark the ring as not ready any more */ 273 rdev->ring[ring].ready = false; 274 if (lock_ring) { 275 mutex_unlock(&rdev->ring_lock); 276 } 277 return -EDEADLK; 278 } 279 280 if (lock_ring) { 281 mutex_unlock(&rdev->ring_lock); 282 } 283 } 284 } 285 return 0; 286} 287 288int radeon_fence_wait(struct radeon_fence *fence, bool intr) 289{ 290 int r; 291 292 if (fence == NULL) { 293 WARN(1, "Querying an invalid fence : %p !\n", fence); 294 return -EINVAL; 295 } 296 297 r = radeon_fence_wait_seq(fence->rdev, fence->seq, 298 fence->ring, intr, true); 299 if (r) { 300 return r; 301 } 302 fence->seq = RADEON_FENCE_SIGNALED_SEQ; 303 return 0; 304} 305 306bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) 307{ 308 unsigned i; 309 310 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 311 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) { 312 return true; 313 } 314 } 315 return false; 316} 317 318static int radeon_fence_wait_any_seq(struct radeon_device *rdev, 319 u64 *target_seq, bool intr) 320{ 321 unsigned long timeout, last_activity, tmp; 322 unsigned i, ring = RADEON_NUM_RINGS; 323 bool signaled; 324 int r; 325 326 for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) { 327 if (!target_seq[i]) { 328 continue; 329 } 330 331 /* use the most recent one as indicator */ 332 if (time_after(rdev->fence_drv[i].last_activity, last_activity)) { 333 last_activity = rdev->fence_drv[i].last_activity; 334 } 335 336 /* For lockup detection just pick the lowest ring we are 337 * actively waiting for 338 */ 339 if (i < ring) { 340 ring = i; 341 } 342 } 343 344 /* nothing to wait for ? */ 345 if (ring == RADEON_NUM_RINGS) { 346 return 0; 347 } 348 349 while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { 350 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT; 351 if (time_after(last_activity, timeout)) { 352 /* the normal case, timeout is somewhere before last_activity */ 353 timeout = last_activity - timeout; 354 } else { 355 /* either jiffies wrapped around, or no fence was signaled in the last 500ms 356 * anyway we will just wait for the minimum amount and then check for a lockup 357 */ 358 timeout = 1; 359 } 360 361 trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]); 362 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 363 if (target_seq[i]) { 364 radeon_irq_kms_sw_irq_get(rdev, i); 365 } 366 } 367 if (intr) { 368 r = wait_event_interruptible_timeout(rdev->fence_queue, 369 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)), 370 timeout); 371 } else { 372 r = wait_event_timeout(rdev->fence_queue, 373 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)), 374 timeout); 375 } 376 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 377 if (target_seq[i]) { 378 radeon_irq_kms_sw_irq_put(rdev, i); 379 } 380 } 381 if (unlikely(r < 0)) { 382 return r; 383 } 384 trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]); 385 386 if (unlikely(!signaled)) { 387 /* we were interrupted for some reason and fence 388 * isn't signaled yet, resume waiting */ 389 if (r) { 390 continue; 391 } 392 393 mutex_lock(&rdev->ring_lock); 394 for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) { 395 if (time_after(rdev->fence_drv[i].last_activity, tmp)) { 396 tmp = rdev->fence_drv[i].last_activity; 397 } 398 } 399 /* test if somebody else has already decided that this is a lockup */ 400 if (last_activity != tmp) { 401 last_activity = tmp; 402 mutex_unlock(&rdev->ring_lock); 403 continue; 404 } 405 406 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { 407 /* good news we believe it's a lockup */ 408 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n", 409 target_seq[ring]); 410 411 /* change last activity so nobody else think there is a lockup */ 412 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 413 rdev->fence_drv[i].last_activity = jiffies; 414 } 415 416 /* mark the ring as not ready any more */ 417 rdev->ring[ring].ready = false; 418 mutex_unlock(&rdev->ring_lock); 419 return -EDEADLK; 420 } 421 mutex_unlock(&rdev->ring_lock); 422 } 423 } 424 return 0; 425} 426 427int radeon_fence_wait_any(struct radeon_device *rdev, 428 struct radeon_fence **fences, 429 bool intr) 430{ 431 uint64_t seq[RADEON_NUM_RINGS]; 432 unsigned i; 433 int r; 434 435 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 436 seq[i] = 0; 437 438 if (!fences[i]) { 439 continue; 440 } 441 442 if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) { 443 /* something was allready signaled */ 444 return 0; 445 } 446 447 if (fences[i]->seq < RADEON_FENCE_NOTEMITED_SEQ) { 448 seq[i] = fences[i]->seq; 449 } 450 } 451 452 r = radeon_fence_wait_any_seq(rdev, seq, intr); 453 if (r) { 454 return r; 455 } 456 return 0; 457} 458 459int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) 460{ 461 uint64_t seq; 462 463 /* We are not protected by ring lock when reading current seq but 464 * it's ok as worst case is we return to early while we could have 465 * wait. 466 */ 467 seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; 468 if (seq >= rdev->fence_drv[ring].seq) { 469 /* nothing to wait for, last_seq is 470 already the last emited fence */ 471 return -ENOENT; 472 } 473 return radeon_fence_wait_seq(rdev, seq, ring, false, false); 474} 475 476int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) 477{ 478 /* We are not protected by ring lock when reading current seq 479 * but it's ok as wait empty is call from place where no more 480 * activity can be scheduled so there won't be concurrent access 481 * to seq value. 482 */ 483 return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].seq, 484 ring, false, false); 485} 486 487struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) 488{ 489 kref_get(&fence->kref); 490 return fence; 491} 492 493void radeon_fence_unref(struct radeon_fence **fence) 494{ 495 struct radeon_fence *tmp = *fence; 496 497 *fence = NULL; 498 if (tmp) { 499 kref_put(&tmp->kref, radeon_fence_destroy); 500 } 501} 502 503unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) 504{ 505 uint64_t emitted; 506 507 /* We are not protected by ring lock when reading the last sequence 508 * but it's ok to report slightly wrong fence count here. 509 */ 510 radeon_fence_process(rdev, ring); 511 emitted = rdev->fence_drv[ring].seq - atomic64_read(&rdev->fence_drv[ring].last_seq); 512 /* to avoid 32bits warp around */ 513 if (emitted > 0x10000000) { 514 emitted = 0x10000000; 515 } 516 return (unsigned)emitted; 517} 518 519int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) 520{ 521 uint64_t index; 522 int r; 523 524 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 525 if (rdev->wb.use_event) { 526 rdev->fence_drv[ring].scratch_reg = 0; 527 index = R600_WB_EVENT_OFFSET + ring * 4; 528 } else { 529 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); 530 if (r) { 531 dev_err(rdev->dev, "fence failed to get scratch register\n"); 532 return r; 533 } 534 index = RADEON_WB_SCRATCH_OFFSET + 535 rdev->fence_drv[ring].scratch_reg - 536 rdev->scratch.reg_base; 537 } 538 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; 539 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; 540 radeon_fence_write(rdev, rdev->fence_drv[ring].seq, ring); 541 rdev->fence_drv[ring].initialized = true; 542 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n", 543 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); 544 return 0; 545} 546 547static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) 548{ 549 rdev->fence_drv[ring].scratch_reg = -1; 550 rdev->fence_drv[ring].cpu_addr = NULL; 551 rdev->fence_drv[ring].gpu_addr = 0; 552 rdev->fence_drv[ring].seq = 0; 553 atomic64_set(&rdev->fence_drv[ring].last_seq, 0); 554 rdev->fence_drv[ring].last_activity = jiffies; 555 rdev->fence_drv[ring].initialized = false; 556} 557 558int radeon_fence_driver_init(struct radeon_device *rdev) 559{ 560 int ring; 561 562 init_waitqueue_head(&rdev->fence_queue); 563 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 564 radeon_fence_driver_init_ring(rdev, ring); 565 } 566 if (radeon_debugfs_fence_init(rdev)) { 567 dev_err(rdev->dev, "fence debugfs file creation failed\n"); 568 } 569 return 0; 570} 571 572void radeon_fence_driver_fini(struct radeon_device *rdev) 573{ 574 int ring; 575 576 mutex_lock(&rdev->ring_lock); 577 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 578 if (!rdev->fence_drv[ring].initialized) 579 continue; 580 radeon_fence_wait_empty_locked(rdev, ring); 581 wake_up_all(&rdev->fence_queue); 582 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 583 rdev->fence_drv[ring].initialized = false; 584 } 585 mutex_unlock(&rdev->ring_lock); 586} 587 588 589/* 590 * Fence debugfs 591 */ 592#if defined(CONFIG_DEBUG_FS) 593static int radeon_debugfs_fence_info(struct seq_file *m, void *data) 594{ 595 struct drm_info_node *node = (struct drm_info_node *)m->private; 596 struct drm_device *dev = node->minor->dev; 597 struct radeon_device *rdev = dev->dev_private; 598 int i; 599 600 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 601 if (!rdev->fence_drv[i].initialized) 602 continue; 603 604 seq_printf(m, "--- ring %d ---\n", i); 605 seq_printf(m, "Last signaled fence 0x%016llx\n", 606 (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); 607 seq_printf(m, "Last emitted 0x%016llx\n", 608 rdev->fence_drv[i].seq); 609 } 610 return 0; 611} 612 613static struct drm_info_list radeon_debugfs_fence_list[] = { 614 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL}, 615}; 616#endif 617 618int radeon_debugfs_fence_init(struct radeon_device *rdev) 619{ 620#if defined(CONFIG_DEBUG_FS) 621 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1); 622#else 623 return 0; 624#endif 625}