Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm: Remove DRM_ERR OS macro.

This was used to make all ioctl handlers return -errno on linux and errno on
*BSD. Instead, just return -errno in shared code, and flip sign on return f
shared code to *BSD code.

Signed-off-by: Dave Airlie <airlied@linux.ie>

authored by

Eric Anholt and committed by
Dave Airlie
20caafa6 23fd5045

+474 -475
+3 -3
drivers/char/drm/drm_drawable.c
··· 130 130 131 131 if (update.num && !rects) { 132 132 DRM_ERROR("Failed to allocate cliprect memory\n"); 133 - err = DRM_ERR(ENOMEM); 133 + err = -ENOMEM; 134 134 goto error; 135 135 } 136 136 ··· 140 140 update.num * 141 141 sizeof(*rects))) { 142 142 DRM_ERROR("Failed to copy cliprects from userspace\n"); 143 - err = DRM_ERR(EFAULT); 143 + err = -EFAULT; 144 144 goto error; 145 145 } 146 146 ··· 161 161 break; 162 162 default: 163 163 DRM_ERROR("Invalid update type %d\n", update.type); 164 - return DRM_ERR(EINVAL); 164 + return -EINVAL; 165 165 } 166 166 167 167 return 0;
+1 -1
drivers/char/drm/drm_ioctl.c
··· 123 123 */ 124 124 ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func); 125 125 if (ret != 3) 126 - return DRM_ERR(EINVAL); 126 + return -EINVAL; 127 127 domain = bus >> 8; 128 128 bus &= 0xff; 129 129
+1 -1
drivers/char/drm/drm_lock.c
··· 125 125 if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) { 126 126 if (dev->driver->dma_quiescent(dev)) { 127 127 DRM_DEBUG("%d waiting for DMA quiescent\n", lock.context); 128 - return DRM_ERR(EBUSY); 128 + return -EBUSY; 129 129 } 130 130 } 131 131
-1
drivers/char/drm/drm_os_linux.h
··· 10 10 #define DRMFILE struct file * 11 11 /** Ioctl arguments */ 12 12 #define DRM_IOCTL_ARGS struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data 13 - #define DRM_ERR(d) -(d) 14 13 /** Current process ID */ 15 14 #define DRM_CURRENTPID current->pid 16 15 #define DRM_SUSER(p) capable(CAP_SYS_ADMIN)
+1 -1
drivers/char/drm/i810_dma.c
··· 380 380 i810_dma_cleanup(dev); 381 381 DRM_ERROR("can not ioremap virtual address for" 382 382 " ring buffer\n"); 383 - return DRM_ERR(ENOMEM); 383 + return -ENOMEM; 384 384 } 385 385 386 386 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+1 -1
drivers/char/drm/i830_dma.c
··· 389 389 i830_dma_cleanup(dev); 390 390 DRM_ERROR("can not ioremap virtual address for" 391 391 " ring buffer\n"); 392 - return DRM_ERR(ENOMEM); 392 + return -ENOMEM; 393 393 } 394 394 395 395 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+29 -29
drivers/char/drm/i915_dma.c
··· 70 70 last_head = ring->head; 71 71 } 72 72 73 - return DRM_ERR(EBUSY); 73 + return -EBUSY; 74 74 } 75 75 76 76 void i915_kernel_lost_context(struct drm_device * dev) ··· 137 137 DRM_ERROR("can not find sarea!\n"); 138 138 dev->dev_private = (void *)dev_priv; 139 139 i915_dma_cleanup(dev); 140 - return DRM_ERR(EINVAL); 140 + return -EINVAL; 141 141 } 142 142 143 143 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); ··· 145 145 dev->dev_private = (void *)dev_priv; 146 146 i915_dma_cleanup(dev); 147 147 DRM_ERROR("can not find mmio map!\n"); 148 - return DRM_ERR(EINVAL); 148 + return -EINVAL; 149 149 } 150 150 151 151 dev_priv->sarea_priv = (drm_i915_sarea_t *) ··· 169 169 i915_dma_cleanup(dev); 170 170 DRM_ERROR("can not ioremap virtual address for" 171 171 " ring buffer\n"); 172 - return DRM_ERR(ENOMEM); 172 + return -ENOMEM; 173 173 } 174 174 175 175 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; ··· 200 200 dev->dev_private = (void *)dev_priv; 201 201 i915_dma_cleanup(dev); 202 202 DRM_ERROR("Can not allocate hardware status page\n"); 203 - return DRM_ERR(ENOMEM); 203 + return -ENOMEM; 204 204 } 205 205 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; 206 206 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; ··· 221 221 222 222 if (!dev_priv->sarea) { 223 223 DRM_ERROR("can not find sarea!\n"); 224 - return DRM_ERR(EINVAL); 224 + return -EINVAL; 225 225 } 226 226 227 227 if (!dev_priv->mmio_map) { 228 228 DRM_ERROR("can not find mmio map!\n"); 229 - return DRM_ERR(EINVAL); 229 + return -EINVAL; 230 230 } 231 231 232 232 if (dev_priv->ring.map.handle == NULL) { 233 233 DRM_ERROR("can not ioremap virtual address for" 234 234 " ring buffer\n"); 235 - return DRM_ERR(ENOMEM); 235 + return -ENOMEM; 236 236 } 237 237 238 238 /* Program Hardware Status Page */ 239 239 if (!dev_priv->hw_status_page) { 240 240 DRM_ERROR("Can not find hardware status page\n"); 241 - return DRM_ERR(EINVAL); 241 + return -EINVAL; 242 242 } 243 243 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); 244 244 ··· 266 266 dev_priv = drm_alloc(sizeof(drm_i915_private_t), 267 267 DRM_MEM_DRIVER); 268 268 if (dev_priv == NULL) 269 - return DRM_ERR(ENOMEM); 269 + return -ENOMEM; 270 270 retcode = i915_initialize(dev, dev_priv, &init); 271 271 break; 272 272 case I915_CLEANUP_DMA: ··· 276 276 retcode = i915_dma_resume(dev); 277 277 break; 278 278 default: 279 - retcode = DRM_ERR(EINVAL); 279 + retcode = -EINVAL; 280 280 break; 281 281 } 282 282 ··· 366 366 RING_LOCALS; 367 367 368 368 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) 369 - return DRM_ERR(EINVAL); 369 + return -EINVAL; 370 370 371 371 BEGIN_LP_RING((dwords+1)&~1); 372 372 ··· 374 374 int cmd, sz; 375 375 376 376 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) 377 - return DRM_ERR(EINVAL); 377 + return -EINVAL; 378 378 379 379 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) 380 - return DRM_ERR(EINVAL); 380 + return -EINVAL; 381 381 382 382 OUT_RING(cmd); 383 383 384 384 while (++i, --sz) { 385 385 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], 386 386 sizeof(cmd))) { 387 - return DRM_ERR(EINVAL); 387 + return -EINVAL; 388 388 } 389 389 OUT_RING(cmd); 390 390 } ··· 407 407 RING_LOCALS; 408 408 409 409 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { 410 - return DRM_ERR(EFAULT); 410 + return -EFAULT; 411 411 } 412 412 413 413 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { 414 414 DRM_ERROR("Bad box %d,%d..%d,%d\n", 415 415 box.x1, box.y1, box.x2, box.y2); 416 - return DRM_ERR(EINVAL); 416 + return -EINVAL; 417 417 } 418 418 419 419 if (IS_I965G(dev)) { ··· 467 467 468 468 if (cmd->sz & 0x3) { 469 469 DRM_ERROR("alignment"); 470 - return DRM_ERR(EINVAL); 470 + return -EINVAL; 471 471 } 472 472 473 473 i915_kernel_lost_context(dev); ··· 502 502 503 503 if ((batch->start | batch->used) & 0x7) { 504 504 DRM_ERROR("alignment"); 505 - return DRM_ERR(EINVAL); 505 + return -EINVAL; 506 506 } 507 507 508 508 i915_kernel_lost_context(dev); ··· 619 619 620 620 if (!dev_priv->allow_batchbuffer) { 621 621 DRM_ERROR("Batchbuffer ioctl disabled\n"); 622 - return DRM_ERR(EINVAL); 622 + return -EINVAL; 623 623 } 624 624 625 625 DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data, ··· 633 633 if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects, 634 634 batch.num_cliprects * 635 635 sizeof(struct drm_clip_rect))) 636 - return DRM_ERR(EFAULT); 636 + return -EFAULT; 637 637 638 638 ret = i915_dispatch_batchbuffer(dev, &batch); 639 639 ··· 664 664 cmdbuf.num_cliprects * 665 665 sizeof(struct drm_clip_rect))) { 666 666 DRM_ERROR("Fault accessing cliprects\n"); 667 - return DRM_ERR(EFAULT); 667 + return -EFAULT; 668 668 } 669 669 670 670 ret = i915_dispatch_cmdbuffer(dev, &cmdbuf); ··· 697 697 698 698 if (!dev_priv) { 699 699 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 700 - return DRM_ERR(EINVAL); 700 + return -EINVAL; 701 701 } 702 702 703 703 DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data, ··· 715 715 break; 716 716 default: 717 717 DRM_ERROR("Unknown parameter %d\n", param.param); 718 - return DRM_ERR(EINVAL); 718 + return -EINVAL; 719 719 } 720 720 721 721 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { 722 722 DRM_ERROR("DRM_COPY_TO_USER failed\n"); 723 - return DRM_ERR(EFAULT); 723 + return -EFAULT; 724 724 } 725 725 726 726 return 0; ··· 734 734 735 735 if (!dev_priv) { 736 736 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 737 - return DRM_ERR(EINVAL); 737 + return -EINVAL; 738 738 } 739 739 740 740 DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data, ··· 753 753 break; 754 754 default: 755 755 DRM_ERROR("unknown parameter %d\n", param.param); 756 - return DRM_ERR(EINVAL); 756 + return -EINVAL; 757 757 } 758 758 759 759 return 0; ··· 767 767 768 768 if (!dev_priv) { 769 769 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 770 - return DRM_ERR(EINVAL); 770 + return -EINVAL; 771 771 } 772 772 DRM_COPY_FROM_USER_IOCTL(hws, (drm_i915_hws_addr_t __user *) data, 773 773 sizeof(hws)); ··· 788 788 dev_priv->status_gfx_addr = 0; 789 789 DRM_ERROR("can not ioremap virtual address for" 790 790 " G33 hw status page\n"); 791 - return DRM_ERR(ENOMEM); 791 + return -ENOMEM; 792 792 } 793 793 dev_priv->hw_status_page = dev_priv->hws_map.handle; 794 794
+16 -16
drivers/char/drm/i915_irq.c
··· 311 311 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, 312 312 READ_BREADCRUMB(dev_priv) >= irq_nr); 313 313 314 - if (ret == DRM_ERR(EBUSY)) { 314 + if (ret == -EBUSY) { 315 315 DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n", 316 316 __FUNCTION__, 317 317 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); ··· 330 330 331 331 if (!dev_priv) { 332 332 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 333 - return DRM_ERR(EINVAL); 333 + return -EINVAL; 334 334 } 335 335 336 336 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, ··· 366 366 367 367 if (!dev_priv) { 368 368 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 369 - return DRM_ERR(EINVAL); 369 + return -EINVAL; 370 370 } 371 371 372 372 DRM_COPY_FROM_USER_IOCTL(emit, (drm_i915_irq_emit_t __user *) data, ··· 376 376 377 377 if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) { 378 378 DRM_ERROR("copy_to_user\n"); 379 - return DRM_ERR(EFAULT); 379 + return -EFAULT; 380 380 } 381 381 382 382 return 0; ··· 392 392 393 393 if (!dev_priv) { 394 394 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 395 - return DRM_ERR(EINVAL); 395 + return -EINVAL; 396 396 } 397 397 398 398 DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_i915_irq_wait_t __user *) data, ··· 425 425 426 426 if (!dev_priv) { 427 427 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 428 - return DRM_ERR(EINVAL); 428 + return -EINVAL; 429 429 } 430 430 431 431 DRM_COPY_FROM_USER_IOCTL(pipe, (drm_i915_vblank_pipe_t __user *) data, ··· 434 434 if (pipe.pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) { 435 435 DRM_ERROR("%s called with invalid pipe 0x%x\n", 436 436 __FUNCTION__, pipe.pipe); 437 - return DRM_ERR(EINVAL); 437 + return -EINVAL; 438 438 } 439 439 440 440 dev_priv->vblank_pipe = pipe.pipe; ··· 453 453 454 454 if (!dev_priv) { 455 455 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 456 - return DRM_ERR(EINVAL); 456 + return -EINVAL; 457 457 } 458 458 459 459 flag = I915_READ(I915REG_INT_ENABLE_R); ··· 482 482 483 483 if (!dev_priv) { 484 484 DRM_ERROR("%s called with no initialization\n", __func__); 485 - return DRM_ERR(EINVAL); 485 + return -EINVAL; 486 486 } 487 487 488 488 if (dev_priv->sarea_priv->rotation) { 489 489 DRM_DEBUG("Rotation not supported\n"); 490 - return DRM_ERR(EINVAL); 490 + return -EINVAL; 491 491 } 492 492 493 493 DRM_COPY_FROM_USER_IOCTL(swap, (drm_i915_vblank_swap_t __user *) data, ··· 496 496 if (swap.seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE | 497 497 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) { 498 498 DRM_ERROR("Invalid sequence type 0x%x\n", swap.seqtype); 499 - return DRM_ERR(EINVAL); 499 + return -EINVAL; 500 500 } 501 501 502 502 pipe = (swap.seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; ··· 505 505 506 506 if (!(dev_priv->vblank_pipe & (1 << pipe))) { 507 507 DRM_ERROR("Invalid pipe %d\n", pipe); 508 - return DRM_ERR(EINVAL); 508 + return -EINVAL; 509 509 } 510 510 511 511 spin_lock_irqsave(&dev->drw_lock, irqflags); ··· 513 513 if (!drm_get_drawable_info(dev, swap.drawable)) { 514 514 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 515 515 DRM_DEBUG("Invalid drawable ID %d\n", swap.drawable); 516 - return DRM_ERR(EINVAL); 516 + return -EINVAL; 517 517 } 518 518 519 519 spin_unlock_irqrestore(&dev->drw_lock, irqflags); ··· 528 528 swap.sequence = curseq + 1; 529 529 } else { 530 530 DRM_DEBUG("Missed target sequence\n"); 531 - return DRM_ERR(EINVAL); 531 + return -EINVAL; 532 532 } 533 533 } 534 534 ··· 550 550 551 551 if (dev_priv->swaps_pending >= 100) { 552 552 DRM_DEBUG("Too many swaps queued\n"); 553 - return DRM_ERR(EBUSY); 553 + return -EBUSY; 554 554 } 555 555 556 556 vbl_swap = drm_calloc(1, sizeof(vbl_swap), DRM_MEM_DRIVER); 557 557 558 558 if (!vbl_swap) { 559 559 DRM_ERROR("Failed to allocate memory to queue swap\n"); 560 - return DRM_ERR(ENOMEM); 560 + return -ENOMEM; 561 561 } 562 562 563 563 DRM_DEBUG("\n");
+14 -14
drivers/char/drm/i915_mem.c
··· 276 276 277 277 if (!dev_priv) { 278 278 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 279 - return DRM_ERR(EINVAL); 279 + return -EINVAL; 280 280 } 281 281 282 282 DRM_COPY_FROM_USER_IOCTL(alloc, (drm_i915_mem_alloc_t __user *) data, ··· 284 284 285 285 heap = get_heap(dev_priv, alloc.region); 286 286 if (!heap || !*heap) 287 - return DRM_ERR(EFAULT); 287 + return -EFAULT; 288 288 289 289 /* Make things easier on ourselves: all allocations at least 290 290 * 4k aligned. ··· 295 295 block = alloc_block(*heap, alloc.size, alloc.alignment, filp); 296 296 297 297 if (!block) 298 - return DRM_ERR(ENOMEM); 298 + return -ENOMEM; 299 299 300 300 mark_block(dev, block, 1); 301 301 302 302 if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) { 303 303 DRM_ERROR("copy_to_user\n"); 304 - return DRM_ERR(EFAULT); 304 + return -EFAULT; 305 305 } 306 306 307 307 return 0; ··· 316 316 317 317 if (!dev_priv) { 318 318 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 319 - return DRM_ERR(EINVAL); 319 + return -EINVAL; 320 320 } 321 321 322 322 DRM_COPY_FROM_USER_IOCTL(memfree, (drm_i915_mem_free_t __user *) data, ··· 324 324 325 325 heap = get_heap(dev_priv, memfree.region); 326 326 if (!heap || !*heap) 327 - return DRM_ERR(EFAULT); 327 + return -EFAULT; 328 328 329 329 block = find_block(*heap, memfree.region_offset); 330 330 if (!block) 331 - return DRM_ERR(EFAULT); 331 + return -EFAULT; 332 332 333 333 if (block->filp != filp) 334 - return DRM_ERR(EPERM); 334 + return -EPERM; 335 335 336 336 mark_block(dev, block, 0); 337 337 free_block(block); ··· 347 347 348 348 if (!dev_priv) { 349 349 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 350 - return DRM_ERR(EINVAL); 350 + return -EINVAL; 351 351 } 352 352 353 353 DRM_COPY_FROM_USER_IOCTL(initheap, ··· 356 356 357 357 heap = get_heap(dev_priv, initheap.region); 358 358 if (!heap) 359 - return DRM_ERR(EFAULT); 359 + return -EFAULT; 360 360 361 361 if (*heap) { 362 362 DRM_ERROR("heap already initialized?"); 363 - return DRM_ERR(EFAULT); 363 + return -EFAULT; 364 364 } 365 365 366 366 return init_heap(heap, initheap.start, initheap.size); ··· 375 375 376 376 if ( !dev_priv ) { 377 377 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); 378 - return DRM_ERR(EINVAL); 378 + return -EINVAL; 379 379 } 380 380 381 381 DRM_COPY_FROM_USER_IOCTL( destroyheap, (drm_i915_mem_destroy_heap_t *)data, ··· 384 384 heap = get_heap( dev_priv, destroyheap.region ); 385 385 if (!heap) { 386 386 DRM_ERROR("get_heap failed"); 387 - return DRM_ERR(EFAULT); 387 + return -EFAULT; 388 388 } 389 389 390 390 if (!*heap) { 391 391 DRM_ERROR("heap not initialized?"); 392 - return DRM_ERR(EFAULT); 392 + return -EFAULT; 393 393 } 394 394 395 395 i915_mem_takedown( heap );
+21 -21
drivers/char/drm/mga_dma.c
··· 71 71 DRM_ERROR("failed!\n"); 72 72 DRM_INFO(" status=0x%08x\n", status); 73 73 #endif 74 - return DRM_ERR(EBUSY); 74 + return -EBUSY; 75 75 } 76 76 77 77 static int mga_do_dma_reset(drm_mga_private_t * dev_priv) ··· 256 256 257 257 dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); 258 258 if (dev_priv->head == NULL) 259 - return DRM_ERR(ENOMEM); 259 + return -ENOMEM; 260 260 261 261 memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t)); 262 262 SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0); ··· 267 267 268 268 entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); 269 269 if (entry == NULL) 270 - return DRM_ERR(ENOMEM); 270 + return -ENOMEM; 271 271 272 272 memset(entry, 0, sizeof(drm_mga_freelist_t)); 273 273 ··· 399 399 400 400 dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); 401 401 if (!dev_priv) 402 - return DRM_ERR(ENOMEM); 402 + return -ENOMEM; 403 403 404 404 dev->dev_private = (void *)dev_priv; 405 405 memset(dev_priv, 0, sizeof(drm_mga_private_t)); ··· 578 578 DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n", 579 579 dev_priv->warp->handle, dev_priv->primary->handle, 580 580 dev->agp_buffer_map->handle); 581 - return DRM_ERR(ENOMEM); 581 + return -ENOMEM; 582 582 } 583 583 584 584 dev_priv->dma_access = MGA_PAGPXFER; ··· 622 622 623 623 if (dev->dma == NULL) { 624 624 DRM_ERROR("dev->dma is NULL\n"); 625 - return DRM_ERR(EFAULT); 625 + return -EFAULT; 626 626 } 627 627 628 628 /* Make drm_addbufs happy by not trying to create a mapping for less ··· 656 656 657 657 if (err != 0) { 658 658 DRM_ERROR("Unable to allocate primary DMA region: %d\n", err); 659 - return DRM_ERR(ENOMEM); 659 + return -ENOMEM; 660 660 } 661 661 662 662 if (dev_priv->primary->size != dma_bs->primary_size) { ··· 826 826 dev_priv->sarea = drm_getsarea(dev); 827 827 if (!dev_priv->sarea) { 828 828 DRM_ERROR("failed to find sarea!\n"); 829 - return DRM_ERR(EINVAL); 829 + return -EINVAL; 830 830 } 831 831 832 832 if (!dev_priv->used_new_dma_init) { ··· 837 837 dev_priv->status = drm_core_findmap(dev, init->status_offset); 838 838 if (!dev_priv->status) { 839 839 DRM_ERROR("failed to find status page!\n"); 840 - return DRM_ERR(EINVAL); 840 + return -EINVAL; 841 841 } 842 842 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); 843 843 if (!dev_priv->mmio) { 844 844 DRM_ERROR("failed to find mmio region!\n"); 845 - return DRM_ERR(EINVAL); 845 + return -EINVAL; 846 846 } 847 847 dev_priv->warp = drm_core_findmap(dev, init->warp_offset); 848 848 if (!dev_priv->warp) { 849 849 DRM_ERROR("failed to find warp microcode region!\n"); 850 - return DRM_ERR(EINVAL); 850 + return -EINVAL; 851 851 } 852 852 dev_priv->primary = drm_core_findmap(dev, init->primary_offset); 853 853 if (!dev_priv->primary) { 854 854 DRM_ERROR("failed to find primary dma region!\n"); 855 - return DRM_ERR(EINVAL); 855 + return -EINVAL; 856 856 } 857 857 dev->agp_buffer_token = init->buffers_offset; 858 858 dev->agp_buffer_map = 859 859 drm_core_findmap(dev, init->buffers_offset); 860 860 if (!dev->agp_buffer_map) { 861 861 DRM_ERROR("failed to find dma buffer region!\n"); 862 - return DRM_ERR(EINVAL); 862 + return -EINVAL; 863 863 } 864 864 865 865 drm_core_ioremap(dev_priv->warp, dev); ··· 877 877 ((dev->agp_buffer_map == NULL) || 878 878 (dev->agp_buffer_map->handle == NULL)))) { 879 879 DRM_ERROR("failed to ioremap agp regions!\n"); 880 - return DRM_ERR(ENOMEM); 880 + return -ENOMEM; 881 881 } 882 882 883 883 ret = mga_warp_install_microcode(dev_priv); ··· 927 927 928 928 if (mga_freelist_init(dev, dev_priv) < 0) { 929 929 DRM_ERROR("could not initialize freelist\n"); 930 - return DRM_ERR(ENOMEM); 930 + return -ENOMEM; 931 931 } 932 932 933 933 return 0; ··· 1029 1029 return mga_do_cleanup_dma(dev, FULL_CLEANUP); 1030 1030 } 1031 1031 1032 - return DRM_ERR(EINVAL); 1032 + return -EINVAL; 1033 1033 } 1034 1034 1035 1035 /* ================================================================ ··· 1094 1094 for (i = d->granted_count; i < d->request_count; i++) { 1095 1095 buf = mga_freelist_get(dev); 1096 1096 if (!buf) 1097 - return DRM_ERR(EAGAIN); 1097 + return -EAGAIN; 1098 1098 1099 1099 buf->filp = filp; 1100 1100 1101 1101 if (DRM_COPY_TO_USER(&d->request_indices[i], 1102 1102 &buf->idx, sizeof(buf->idx))) 1103 - return DRM_ERR(EFAULT); 1103 + return -EFAULT; 1104 1104 if (DRM_COPY_TO_USER(&d->request_sizes[i], 1105 1105 &buf->total, sizeof(buf->total))) 1106 - return DRM_ERR(EFAULT); 1106 + return -EFAULT; 1107 1107 1108 1108 d->granted_count++; 1109 1109 } ··· 1128 1128 if (d.send_count != 0) { 1129 1129 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", 1130 1130 DRM_CURRENTPID, d.send_count); 1131 - return DRM_ERR(EINVAL); 1131 + return -EINVAL; 1132 1132 } 1133 1133 1134 1134 /* We'll send you buffers. ··· 1136 1136 if (d.request_count < 0 || d.request_count > dma->buf_count) { 1137 1137 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", 1138 1138 DRM_CURRENTPID, d.request_count, dma->buf_count); 1139 - return DRM_ERR(EINVAL); 1139 + return -EINVAL; 1140 1140 } 1141 1141 1142 1142 WRAP_TEST_WITH_RETURN(dev_priv);
+2 -2
drivers/char/drm/mga_drv.h
··· 245 245 dev_priv->prim.high_mark ) { \ 246 246 if ( MGA_DMA_DEBUG ) \ 247 247 DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \ 248 - return DRM_ERR(EBUSY); \ 248 + return -EBUSY; \ 249 249 } \ 250 250 } \ 251 251 } while (0) ··· 256 256 if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \ 257 257 if ( MGA_DMA_DEBUG ) \ 258 258 DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \ 259 - return DRM_ERR(EBUSY); \ 259 + return -EBUSY; \ 260 260 } \ 261 261 mga_do_dma_wrap_end( dev_priv ); \ 262 262 } \
+20 -20
drivers/char/drm/mga_state.c
··· 392 392 ctx->dstorg, dev_priv->front_offset, 393 393 dev_priv->back_offset); 394 394 ctx->dstorg = 0; 395 - return DRM_ERR(EINVAL); 395 + return -EINVAL; 396 396 } 397 397 398 398 return 0; ··· 411 411 if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) { 412 412 DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit); 413 413 tex->texorg = 0; 414 - return DRM_ERR(EINVAL); 414 + return -EINVAL; 415 415 } 416 416 417 417 return 0; ··· 453 453 dstorg + length > (dev_priv->texture_offset + 454 454 dev_priv->texture_size)) { 455 455 DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg); 456 - return DRM_ERR(EINVAL); 456 + return -EINVAL; 457 457 } 458 458 459 459 if (length & MGA_ILOAD_MASK) { 460 460 DRM_ERROR("*** bad iload length: 0x%x\n", 461 461 length & MGA_ILOAD_MASK); 462 - return DRM_ERR(EINVAL); 462 + return -EINVAL; 463 463 } 464 464 465 465 return 0; ··· 471 471 if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) || 472 472 (dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) { 473 473 DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg); 474 - return DRM_ERR(EINVAL); 474 + return -EINVAL; 475 475 } 476 476 return 0; 477 477 } ··· 892 892 sizeof(vertex)); 893 893 894 894 if (vertex.idx < 0 || vertex.idx > dma->buf_count) 895 - return DRM_ERR(EINVAL); 895 + return -EINVAL; 896 896 buf = dma->buflist[vertex.idx]; 897 897 buf_priv = buf->dev_private; 898 898 ··· 906 906 buf_priv->dispatched = 0; 907 907 mga_freelist_put(dev, buf); 908 908 } 909 - return DRM_ERR(EINVAL); 909 + return -EINVAL; 910 910 } 911 911 912 912 WRAP_TEST_WITH_RETURN(dev_priv); ··· 932 932 sizeof(indices)); 933 933 934 934 if (indices.idx < 0 || indices.idx > dma->buf_count) 935 - return DRM_ERR(EINVAL); 935 + return -EINVAL; 936 936 937 937 buf = dma->buflist[indices.idx]; 938 938 buf_priv = buf->dev_private; ··· 946 946 buf_priv->dispatched = 0; 947 947 mga_freelist_put(dev, buf); 948 948 } 949 - return DRM_ERR(EINVAL); 949 + return -EINVAL; 950 950 } 951 951 952 952 WRAP_TEST_WITH_RETURN(dev_priv); ··· 975 975 if (mga_do_wait_for_idle(dev_priv) < 0) { 976 976 if (MGA_DMA_DEBUG) 977 977 DRM_INFO("%s: -EBUSY\n", __FUNCTION__); 978 - return DRM_ERR(EBUSY); 978 + return -EBUSY; 979 979 } 980 980 #endif 981 981 if (iload.idx < 0 || iload.idx > dma->buf_count) 982 - return DRM_ERR(EINVAL); 982 + return -EINVAL; 983 983 984 984 buf = dma->buflist[iload.idx]; 985 985 buf_priv = buf->dev_private; 986 986 987 987 if (mga_verify_iload(dev_priv, iload.dstorg, iload.length)) { 988 988 mga_freelist_put(dev, buf); 989 - return DRM_ERR(EINVAL); 989 + return -EINVAL; 990 990 } 991 991 992 992 WRAP_TEST_WITH_RETURN(dev_priv); ··· 1017 1017 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; 1018 1018 1019 1019 if (mga_verify_blit(dev_priv, blit.srcorg, blit.dstorg)) 1020 - return DRM_ERR(EINVAL); 1020 + return -EINVAL; 1021 1021 1022 1022 WRAP_TEST_WITH_RETURN(dev_priv); 1023 1023 ··· 1039 1039 1040 1040 if (!dev_priv) { 1041 1041 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1042 - return DRM_ERR(EINVAL); 1042 + return -EINVAL; 1043 1043 } 1044 1044 1045 1045 DRM_COPY_FROM_USER_IOCTL(param, (drm_mga_getparam_t __user *) data, ··· 1055 1055 value = dev_priv->chipset; 1056 1056 break; 1057 1057 default: 1058 - return DRM_ERR(EINVAL); 1058 + return -EINVAL; 1059 1059 } 1060 1060 1061 1061 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { 1062 1062 DRM_ERROR("copy_to_user\n"); 1063 - return DRM_ERR(EFAULT); 1063 + return -EFAULT; 1064 1064 } 1065 1065 1066 1066 return 0; ··· 1075 1075 1076 1076 if (!dev_priv) { 1077 1077 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1078 - return DRM_ERR(EINVAL); 1078 + return -EINVAL; 1079 1079 } 1080 1080 1081 1081 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); ··· 1095 1095 1096 1096 if (DRM_COPY_TO_USER((u32 __user *) data, &temp, sizeof(u32))) { 1097 1097 DRM_ERROR("copy_to_user\n"); 1098 - return DRM_ERR(EFAULT); 1098 + return -EFAULT; 1099 1099 } 1100 1100 1101 1101 return 0; ··· 1109 1109 1110 1110 if (!dev_priv) { 1111 1111 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1112 - return DRM_ERR(EINVAL); 1112 + return -EINVAL; 1113 1113 } 1114 1114 1115 1115 DRM_COPY_FROM_USER_IOCTL(fence, (u32 __user *) data, sizeof(u32)); ··· 1120 1120 1121 1121 if (DRM_COPY_TO_USER((u32 __user *) data, &fence, sizeof(u32))) { 1122 1122 DRM_ERROR("copy_to_user\n"); 1123 - return DRM_ERR(EFAULT); 1123 + return -EFAULT; 1124 1124 } 1125 1125 1126 1126 return 0;
+4 -4
drivers/char/drm/mga_warp.c
··· 141 141 if (size > dev_priv->warp->size) { 142 142 DRM_ERROR("microcode too large! (%u > %lu)\n", 143 143 size, dev_priv->warp->size); 144 - return DRM_ERR(ENOMEM); 144 + return -ENOMEM; 145 145 } 146 146 147 147 switch (dev_priv->chipset) { ··· 151 151 case MGA_CARD_TYPE_G200: 152 152 return mga_warp_install_g200_microcode(dev_priv); 153 153 default: 154 - return DRM_ERR(EINVAL); 154 + return -EINVAL; 155 155 } 156 156 } 157 157 ··· 177 177 MGA_WRITE(MGA_WVRTXSZ, 7); 178 178 break; 179 179 default: 180 - return DRM_ERR(EINVAL); 180 + return -EINVAL; 181 181 } 182 182 183 183 MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE | ··· 186 186 if (wmisc != WMISC_EXPECTED) { 187 187 DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n", 188 188 wmisc, WMISC_EXPECTED); 189 - return DRM_ERR(EINVAL); 189 + return -EINVAL; 190 190 } 191 191 192 192 return 0;
+27 -27
drivers/char/drm/r128_cce.c
··· 129 129 #if R128_FIFO_DEBUG 130 130 DRM_ERROR("failed!\n"); 131 131 #endif 132 - return DRM_ERR(EBUSY); 132 + return -EBUSY; 133 133 } 134 134 135 135 static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries) ··· 146 146 #if R128_FIFO_DEBUG 147 147 DRM_ERROR("failed!\n"); 148 148 #endif 149 - return DRM_ERR(EBUSY); 149 + return -EBUSY; 150 150 } 151 151 152 152 static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv) ··· 168 168 #if R128_FIFO_DEBUG 169 169 DRM_ERROR("failed!\n"); 170 170 #endif 171 - return DRM_ERR(EBUSY); 171 + return -EBUSY; 172 172 } 173 173 174 174 /* ================================================================ ··· 227 227 DRM_ERROR("failed!\n"); 228 228 r128_status(dev_priv); 229 229 #endif 230 - return DRM_ERR(EBUSY); 230 + return -EBUSY; 231 231 } 232 232 233 233 /* Start the Concurrent Command Engine. ··· 355 355 356 356 dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER); 357 357 if (dev_priv == NULL) 358 - return DRM_ERR(ENOMEM); 358 + return -ENOMEM; 359 359 360 360 memset(dev_priv, 0, sizeof(drm_r128_private_t)); 361 361 ··· 365 365 DRM_ERROR("PCI GART memory not allocated!\n"); 366 366 dev->dev_private = (void *)dev_priv; 367 367 r128_do_cleanup_cce(dev); 368 - return DRM_ERR(EINVAL); 368 + return -EINVAL; 369 369 } 370 370 371 371 dev_priv->usec_timeout = init->usec_timeout; ··· 374 374 DRM_DEBUG("TIMEOUT problem!\n"); 375 375 dev->dev_private = (void *)dev_priv; 376 376 r128_do_cleanup_cce(dev); 377 - return DRM_ERR(EINVAL); 377 + return -EINVAL; 378 378 } 379 379 380 380 dev_priv->cce_mode = init->cce_mode; ··· 394 394 DRM_DEBUG("Bad cce_mode!\n"); 395 395 dev->dev_private = (void *)dev_priv; 396 396 r128_do_cleanup_cce(dev); 397 - return DRM_ERR(EINVAL); 397 + return -EINVAL; 398 398 } 399 399 400 400 switch (init->cce_mode) { ··· 461 461 DRM_ERROR("could not find sarea!\n"); 462 462 dev->dev_private = (void *)dev_priv; 463 463 r128_do_cleanup_cce(dev); 464 - return DRM_ERR(EINVAL); 464 + return -EINVAL; 465 465 } 466 466 467 467 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); ··· 469 469 DRM_ERROR("could not find mmio region!\n"); 470 470 dev->dev_private = (void *)dev_priv; 471 471 r128_do_cleanup_cce(dev); 472 - return DRM_ERR(EINVAL); 472 + return -EINVAL; 473 473 } 474 474 dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset); 475 475 if (!dev_priv->cce_ring) { 476 476 DRM_ERROR("could not find cce ring region!\n"); 477 477 dev->dev_private = (void *)dev_priv; 478 478 r128_do_cleanup_cce(dev); 479 - return DRM_ERR(EINVAL); 479 + return -EINVAL; 480 480 } 481 481 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); 482 482 if (!dev_priv->ring_rptr) { 483 483 DRM_ERROR("could not find ring read pointer!\n"); 484 484 dev->dev_private = (void *)dev_priv; 485 485 r128_do_cleanup_cce(dev); 486 - return DRM_ERR(EINVAL); 486 + return -EINVAL; 487 487 } 488 488 dev->agp_buffer_token = init->buffers_offset; 489 489 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); ··· 491 491 DRM_ERROR("could not find dma buffer region!\n"); 492 492 dev->dev_private = (void *)dev_priv; 493 493 r128_do_cleanup_cce(dev); 494 - return DRM_ERR(EINVAL); 494 + return -EINVAL; 495 495 } 496 496 497 497 if (!dev_priv->is_pci) { ··· 501 501 DRM_ERROR("could not find agp texture region!\n"); 502 502 dev->dev_private = (void *)dev_priv; 503 503 r128_do_cleanup_cce(dev); 504 - return DRM_ERR(EINVAL); 504 + return -EINVAL; 505 505 } 506 506 } 507 507 ··· 520 520 DRM_ERROR("Could not ioremap agp regions!\n"); 521 521 dev->dev_private = (void *)dev_priv; 522 522 r128_do_cleanup_cce(dev); 523 - return DRM_ERR(ENOMEM); 523 + return -ENOMEM; 524 524 } 525 525 } else 526 526 #endif ··· 567 567 DRM_ERROR("failed to init PCI GART!\n"); 568 568 dev->dev_private = (void *)dev_priv; 569 569 r128_do_cleanup_cce(dev); 570 - return DRM_ERR(ENOMEM); 570 + return -ENOMEM; 571 571 } 572 572 R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr); 573 573 #if __OS_HAS_AGP ··· 644 644 return r128_do_cleanup_cce(dev); 645 645 } 646 646 647 - return DRM_ERR(EINVAL); 647 + return -EINVAL; 648 648 } 649 649 650 650 int r128_cce_start(DRM_IOCTL_ARGS) ··· 721 721 722 722 if (!dev_priv) { 723 723 DRM_DEBUG("%s called before init done\n", __FUNCTION__); 724 - return DRM_ERR(EINVAL); 724 + return -EINVAL; 725 725 } 726 726 727 727 r128_do_cce_reset(dev_priv); ··· 759 759 760 760 int r128_fullscreen(DRM_IOCTL_ARGS) 761 761 { 762 - return DRM_ERR(EINVAL); 762 + return -EINVAL; 763 763 } 764 764 765 765 /* ================================================================ ··· 780 780 781 781 dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); 782 782 if (dev_priv->head == NULL) 783 - return DRM_ERR(ENOMEM); 783 + return -ENOMEM; 784 784 785 785 memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t)); 786 786 dev_priv->head->age = R128_BUFFER_USED; ··· 791 791 792 792 entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); 793 793 if (!entry) 794 - return DRM_ERR(ENOMEM); 794 + return -ENOMEM; 795 795 796 796 entry->age = R128_BUFFER_FREE; 797 797 entry->buf = buf; ··· 883 883 884 884 /* FIXME: This is being ignored... */ 885 885 DRM_ERROR("failed!\n"); 886 - return DRM_ERR(EBUSY); 886 + return -EBUSY; 887 887 } 888 888 889 889 static int r128_cce_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d) ··· 894 894 for (i = d->granted_count; i < d->request_count; i++) { 895 895 buf = r128_freelist_get(dev); 896 896 if (!buf) 897 - return DRM_ERR(EAGAIN); 897 + return -EAGAIN; 898 898 899 899 buf->filp = filp; 900 900 901 901 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, 902 902 sizeof(buf->idx))) 903 - return DRM_ERR(EFAULT); 903 + return -EFAULT; 904 904 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, 905 905 sizeof(buf->total))) 906 - return DRM_ERR(EFAULT); 906 + return -EFAULT; 907 907 908 908 d->granted_count++; 909 909 } ··· 927 927 if (d.send_count != 0) { 928 928 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", 929 929 DRM_CURRENTPID, d.send_count); 930 - return DRM_ERR(EINVAL); 930 + return -EINVAL; 931 931 } 932 932 933 933 /* We'll send you buffers. ··· 935 935 if (d.request_count < 0 || d.request_count > dma->buf_count) { 936 936 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", 937 937 DRM_CURRENTPID, d.request_count, dma->buf_count); 938 - return DRM_ERR(EINVAL); 938 + return -EINVAL; 939 939 } 940 940 941 941 d.granted_count = 0;
+1 -1
drivers/char/drm/r128_drv.h
··· 428 428 DRM_UDELAY(1); \ 429 429 } \ 430 430 DRM_ERROR( "ring space check failed!\n" ); \ 431 - return DRM_ERR(EBUSY); \ 431 + return -EBUSY; \ 432 432 } \ 433 433 __ring_space_done: \ 434 434 ; \
+50 -50
drivers/char/drm/r128_state.c
··· 809 809 break; 810 810 default: 811 811 DRM_ERROR("invalid blit format %d\n", blit->format); 812 - return DRM_ERR(EINVAL); 812 + return -EINVAL; 813 813 } 814 814 815 815 /* Flush the pixel cache, and mark the contents as Read Invalid. ··· 832 832 if (buf->filp != filp) { 833 833 DRM_ERROR("process %d using buffer owned by %p\n", 834 834 DRM_CURRENTPID, buf->filp); 835 - return DRM_ERR(EINVAL); 835 + return -EINVAL; 836 836 } 837 837 if (buf->pending) { 838 838 DRM_ERROR("sending pending buffer %d\n", blit->idx); 839 - return DRM_ERR(EINVAL); 839 + return -EINVAL; 840 840 } 841 841 842 842 buf_priv->discard = 1; ··· 900 900 901 901 count = depth->n; 902 902 if (count > 4096 || count <= 0) 903 - return DRM_ERR(EMSGSIZE); 903 + return -EMSGSIZE; 904 904 905 905 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) { 906 - return DRM_ERR(EFAULT); 906 + return -EFAULT; 907 907 } 908 908 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) { 909 - return DRM_ERR(EFAULT); 909 + return -EFAULT; 910 910 } 911 911 912 912 buffer_size = depth->n * sizeof(u32); 913 913 buffer = drm_alloc(buffer_size, DRM_MEM_BUFS); 914 914 if (buffer == NULL) 915 - return DRM_ERR(ENOMEM); 915 + return -ENOMEM; 916 916 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { 917 917 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 918 - return DRM_ERR(EFAULT); 918 + return -EFAULT; 919 919 } 920 920 921 921 mask_size = depth->n * sizeof(u8); ··· 923 923 mask = drm_alloc(mask_size, DRM_MEM_BUFS); 924 924 if (mask == NULL) { 925 925 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 926 - return DRM_ERR(ENOMEM); 926 + return -ENOMEM; 927 927 } 928 928 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { 929 929 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 930 930 drm_free(mask, mask_size, DRM_MEM_BUFS); 931 - return DRM_ERR(EFAULT); 931 + return -EFAULT; 932 932 } 933 933 934 934 for (i = 0; i < count; i++, x++) { ··· 996 996 997 997 count = depth->n; 998 998 if (count > 4096 || count <= 0) 999 - return DRM_ERR(EMSGSIZE); 999 + return -EMSGSIZE; 1000 1000 1001 1001 xbuf_size = count * sizeof(*x); 1002 1002 ybuf_size = count * sizeof(*y); 1003 1003 x = drm_alloc(xbuf_size, DRM_MEM_BUFS); 1004 1004 if (x == NULL) { 1005 - return DRM_ERR(ENOMEM); 1005 + return -ENOMEM; 1006 1006 } 1007 1007 y = drm_alloc(ybuf_size, DRM_MEM_BUFS); 1008 1008 if (y == NULL) { 1009 1009 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1010 - return DRM_ERR(ENOMEM); 1010 + return -ENOMEM; 1011 1011 } 1012 1012 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { 1013 1013 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1014 1014 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1015 - return DRM_ERR(EFAULT); 1015 + return -EFAULT; 1016 1016 } 1017 1017 if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) { 1018 1018 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1019 1019 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1020 - return DRM_ERR(EFAULT); 1020 + return -EFAULT; 1021 1021 } 1022 1022 1023 1023 buffer_size = depth->n * sizeof(u32); ··· 1025 1025 if (buffer == NULL) { 1026 1026 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1027 1027 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1028 - return DRM_ERR(ENOMEM); 1028 + return -ENOMEM; 1029 1029 } 1030 1030 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { 1031 1031 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1032 1032 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1033 1033 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 1034 - return DRM_ERR(EFAULT); 1034 + return -EFAULT; 1035 1035 } 1036 1036 1037 1037 if (depth->mask) { ··· 1041 1041 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1042 1042 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1043 1043 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 1044 - return DRM_ERR(ENOMEM); 1044 + return -ENOMEM; 1045 1045 } 1046 1046 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { 1047 1047 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1048 1048 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1049 1049 drm_free(buffer, buffer_size, DRM_MEM_BUFS); 1050 1050 drm_free(mask, mask_size, DRM_MEM_BUFS); 1051 - return DRM_ERR(EFAULT); 1051 + return -EFAULT; 1052 1052 } 1053 1053 1054 1054 for (i = 0; i < count; i++) { ··· 1115 1115 1116 1116 count = depth->n; 1117 1117 if (count > 4096 || count <= 0) 1118 - return DRM_ERR(EMSGSIZE); 1118 + return -EMSGSIZE; 1119 1119 1120 1120 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) { 1121 - return DRM_ERR(EFAULT); 1121 + return -EFAULT; 1122 1122 } 1123 1123 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) { 1124 - return DRM_ERR(EFAULT); 1124 + return -EFAULT; 1125 1125 } 1126 1126 1127 1127 BEGIN_RING(7); ··· 1159 1159 1160 1160 count = depth->n; 1161 1161 if (count > 4096 || count <= 0) 1162 - return DRM_ERR(EMSGSIZE); 1162 + return -EMSGSIZE; 1163 1163 1164 1164 if (count > dev_priv->depth_pitch) { 1165 1165 count = dev_priv->depth_pitch; ··· 1169 1169 ybuf_size = count * sizeof(*y); 1170 1170 x = drm_alloc(xbuf_size, DRM_MEM_BUFS); 1171 1171 if (x == NULL) { 1172 - return DRM_ERR(ENOMEM); 1172 + return -ENOMEM; 1173 1173 } 1174 1174 y = drm_alloc(ybuf_size, DRM_MEM_BUFS); 1175 1175 if (y == NULL) { 1176 1176 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1177 - return DRM_ERR(ENOMEM); 1177 + return -ENOMEM; 1178 1178 } 1179 1179 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { 1180 1180 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1181 1181 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1182 - return DRM_ERR(EFAULT); 1182 + return -EFAULT; 1183 1183 } 1184 1184 if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) { 1185 1185 drm_free(x, xbuf_size, DRM_MEM_BUFS); 1186 1186 drm_free(y, ybuf_size, DRM_MEM_BUFS); 1187 - return DRM_ERR(EFAULT); 1187 + return -EFAULT; 1188 1188 } 1189 1189 1190 1190 for (i = 0; i < count; i++) { ··· 1363 1363 1364 1364 if (!dev_priv) { 1365 1365 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1366 - return DRM_ERR(EINVAL); 1366 + return -EINVAL; 1367 1367 } 1368 1368 1369 1369 DRM_COPY_FROM_USER_IOCTL(vertex, (drm_r128_vertex_t __user *) data, ··· 1375 1375 if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { 1376 1376 DRM_ERROR("buffer index %d (of %d max)\n", 1377 1377 vertex.idx, dma->buf_count - 1); 1378 - return DRM_ERR(EINVAL); 1378 + return -EINVAL; 1379 1379 } 1380 1380 if (vertex.prim < 0 || 1381 1381 vertex.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { 1382 1382 DRM_ERROR("buffer prim %d\n", vertex.prim); 1383 - return DRM_ERR(EINVAL); 1383 + return -EINVAL; 1384 1384 } 1385 1385 1386 1386 RING_SPACE_TEST_WITH_RETURN(dev_priv); ··· 1392 1392 if (buf->filp != filp) { 1393 1393 DRM_ERROR("process %d using buffer owned by %p\n", 1394 1394 DRM_CURRENTPID, buf->filp); 1395 - return DRM_ERR(EINVAL); 1395 + return -EINVAL; 1396 1396 } 1397 1397 if (buf->pending) { 1398 1398 DRM_ERROR("sending pending buffer %d\n", vertex.idx); 1399 - return DRM_ERR(EINVAL); 1399 + return -EINVAL; 1400 1400 } 1401 1401 1402 1402 buf->used = vertex.count; ··· 1423 1423 1424 1424 if (!dev_priv) { 1425 1425 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1426 - return DRM_ERR(EINVAL); 1426 + return -EINVAL; 1427 1427 } 1428 1428 1429 1429 DRM_COPY_FROM_USER_IOCTL(elts, (drm_r128_indices_t __user *) data, ··· 1435 1435 if (elts.idx < 0 || elts.idx >= dma->buf_count) { 1436 1436 DRM_ERROR("buffer index %d (of %d max)\n", 1437 1437 elts.idx, dma->buf_count - 1); 1438 - return DRM_ERR(EINVAL); 1438 + return -EINVAL; 1439 1439 } 1440 1440 if (elts.prim < 0 || elts.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { 1441 1441 DRM_ERROR("buffer prim %d\n", elts.prim); 1442 - return DRM_ERR(EINVAL); 1442 + return -EINVAL; 1443 1443 } 1444 1444 1445 1445 RING_SPACE_TEST_WITH_RETURN(dev_priv); ··· 1451 1451 if (buf->filp != filp) { 1452 1452 DRM_ERROR("process %d using buffer owned by %p\n", 1453 1453 DRM_CURRENTPID, buf->filp); 1454 - return DRM_ERR(EINVAL); 1454 + return -EINVAL; 1455 1455 } 1456 1456 if (buf->pending) { 1457 1457 DRM_ERROR("sending pending buffer %d\n", elts.idx); 1458 - return DRM_ERR(EINVAL); 1458 + return -EINVAL; 1459 1459 } 1460 1460 1461 1461 count = (elts.end - elts.start) / sizeof(u16); ··· 1463 1463 1464 1464 if (elts.start & 0x7) { 1465 1465 DRM_ERROR("misaligned buffer 0x%x\n", elts.start); 1466 - return DRM_ERR(EINVAL); 1466 + return -EINVAL; 1467 1467 } 1468 1468 if (elts.start < buf->used) { 1469 1469 DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used); 1470 - return DRM_ERR(EINVAL); 1470 + return -EINVAL; 1471 1471 } 1472 1472 1473 1473 buf->used = elts.end; ··· 1498 1498 if (blit.idx < 0 || blit.idx >= dma->buf_count) { 1499 1499 DRM_ERROR("buffer index %d (of %d max)\n", 1500 1500 blit.idx, dma->buf_count - 1); 1501 - return DRM_ERR(EINVAL); 1501 + return -EINVAL; 1502 1502 } 1503 1503 1504 1504 RING_SPACE_TEST_WITH_RETURN(dev_priv); ··· 1524 1524 1525 1525 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1526 1526 1527 - ret = DRM_ERR(EINVAL); 1527 + ret = -EINVAL; 1528 1528 switch (depth.func) { 1529 1529 case R128_WRITE_SPAN: 1530 1530 ret = r128_cce_dispatch_write_span(dev, &depth); ··· 1557 1557 sizeof(stipple)); 1558 1558 1559 1559 if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32))) 1560 - return DRM_ERR(EFAULT); 1560 + return -EFAULT; 1561 1561 1562 1562 RING_SPACE_TEST_WITH_RETURN(dev_priv); 1563 1563 ··· 1583 1583 1584 1584 if (!dev_priv) { 1585 1585 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1586 - return DRM_ERR(EINVAL); 1586 + return -EINVAL; 1587 1587 } 1588 1588 1589 1589 DRM_COPY_FROM_USER_IOCTL(indirect, (drm_r128_indirect_t __user *) data, ··· 1595 1595 if (indirect.idx < 0 || indirect.idx >= dma->buf_count) { 1596 1596 DRM_ERROR("buffer index %d (of %d max)\n", 1597 1597 indirect.idx, dma->buf_count - 1); 1598 - return DRM_ERR(EINVAL); 1598 + return -EINVAL; 1599 1599 } 1600 1600 1601 1601 buf = dma->buflist[indirect.idx]; ··· 1604 1604 if (buf->filp != filp) { 1605 1605 DRM_ERROR("process %d using buffer owned by %p\n", 1606 1606 DRM_CURRENTPID, buf->filp); 1607 - return DRM_ERR(EINVAL); 1607 + return -EINVAL; 1608 1608 } 1609 1609 if (buf->pending) { 1610 1610 DRM_ERROR("sending pending buffer %d\n", indirect.idx); 1611 - return DRM_ERR(EINVAL); 1611 + return -EINVAL; 1612 1612 } 1613 1613 1614 1614 if (indirect.start < buf->used) { 1615 1615 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", 1616 1616 indirect.start, buf->used); 1617 - return DRM_ERR(EINVAL); 1617 + return -EINVAL; 1618 1618 } 1619 1619 1620 1620 RING_SPACE_TEST_WITH_RETURN(dev_priv); ··· 1651 1651 1652 1652 if (!dev_priv) { 1653 1653 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 1654 - return DRM_ERR(EINVAL); 1654 + return -EINVAL; 1655 1655 } 1656 1656 1657 1657 DRM_COPY_FROM_USER_IOCTL(param, (drm_r128_getparam_t __user *) data, ··· 1664 1664 value = dev->irq; 1665 1665 break; 1666 1666 default: 1667 - return DRM_ERR(EINVAL); 1667 + return -EINVAL; 1668 1668 } 1669 1669 1670 1670 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { 1671 1671 DRM_ERROR("copy_to_user\n"); 1672 - return DRM_ERR(EFAULT); 1672 + return -EFAULT; 1673 1673 } 1674 1674 1675 1675 return 0;
+30 -30
drivers/char/drm/r300_cmdbuf.c
··· 74 74 if (DRM_COPY_FROM_USER_UNCHECKED 75 75 (&box, &cmdbuf->boxes[n + i], sizeof(box))) { 76 76 DRM_ERROR("copy cliprect faulted\n"); 77 - return DRM_ERR(EFAULT); 77 + return -EFAULT; 78 78 } 79 79 80 80 box.x1 = ··· 263 263 DRM_ERROR 264 264 ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", 265 265 reg, sz); 266 - return DRM_ERR(EINVAL); 266 + return -EINVAL; 267 267 } 268 268 for (i = 0; i < sz; i++) { 269 269 values[i] = ((int *)cmdbuf->buf)[i]; ··· 275 275 DRM_ERROR 276 276 ("Offset failed range check (reg=%04x sz=%d)\n", 277 277 reg, sz); 278 - return DRM_ERR(EINVAL); 278 + return -EINVAL; 279 279 } 280 280 break; 281 281 default: 282 282 DRM_ERROR("Register %04x failed check as flag=%02x\n", 283 283 reg + i * 4, r300_reg_flags[(reg >> 2) + i]); 284 - return DRM_ERR(EINVAL); 284 + return -EINVAL; 285 285 } 286 286 } 287 287 ··· 317 317 return 0; 318 318 319 319 if (sz * 4 > cmdbuf->bufsz) 320 - return DRM_ERR(EINVAL); 320 + return -EINVAL; 321 321 322 322 if (reg + sz * 4 >= 0x10000) { 323 323 DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg, 324 324 sz); 325 - return DRM_ERR(EINVAL); 325 + return -EINVAL; 326 326 } 327 327 328 328 if (r300_check_range(reg, sz)) { ··· 362 362 if (!sz) 363 363 return 0; 364 364 if (sz * 16 > cmdbuf->bufsz) 365 - return DRM_ERR(EINVAL); 365 + return -EINVAL; 366 366 367 367 BEGIN_RING(5 + sz * 4); 368 368 /* Wait for VAP to come to senses.. */ ··· 391 391 RING_LOCALS; 392 392 393 393 if (8 * 4 > cmdbuf->bufsz) 394 - return DRM_ERR(EINVAL); 394 + return -EINVAL; 395 395 396 396 BEGIN_RING(10); 397 397 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8)); ··· 421 421 if ((count + 1) > MAX_ARRAY_PACKET) { 422 422 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", 423 423 count); 424 - return DRM_ERR(EINVAL); 424 + return -EINVAL; 425 425 } 426 426 memset(payload, 0, MAX_ARRAY_PACKET * 4); 427 427 memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4); ··· 437 437 DRM_ERROR 438 438 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", 439 439 k, i); 440 - return DRM_ERR(EINVAL); 440 + return -EINVAL; 441 441 } 442 442 k++; 443 443 i++; ··· 448 448 DRM_ERROR 449 449 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", 450 450 k, i); 451 - return DRM_ERR(EINVAL); 451 + return -EINVAL; 452 452 } 453 453 k++; 454 454 i++; ··· 458 458 DRM_ERROR 459 459 ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", 460 460 k, i, narrays, count + 1); 461 - return DRM_ERR(EINVAL); 461 + return -EINVAL; 462 462 } 463 463 464 464 /* all clear, output packet */ ··· 492 492 ret = !radeon_check_offset(dev_priv, offset); 493 493 if (ret) { 494 494 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset); 495 - return DRM_ERR(EINVAL); 495 + return -EINVAL; 496 496 } 497 497 } 498 498 ··· 502 502 ret = !radeon_check_offset(dev_priv, offset); 503 503 if (ret) { 504 504 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset); 505 - return DRM_ERR(EINVAL); 505 + return -EINVAL; 506 506 } 507 507 508 508 } ··· 530 530 531 531 if ((cmd[1] & 0x8000ffff) != 0x80000810) { 532 532 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); 533 - return DRM_ERR(EINVAL); 533 + return -EINVAL; 534 534 } 535 535 ret = !radeon_check_offset(dev_priv, cmd[2]); 536 536 if (ret) { 537 537 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); 538 - return DRM_ERR(EINVAL); 538 + return -EINVAL; 539 539 } 540 540 541 541 BEGIN_RING(count+2); ··· 557 557 RING_LOCALS; 558 558 559 559 if (4 > cmdbuf->bufsz) 560 - return DRM_ERR(EINVAL); 560 + return -EINVAL; 561 561 562 562 /* Fixme !! This simply emits a packet without much checking. 563 563 We need to be smarter. */ ··· 568 568 /* Is it packet 3 ? */ 569 569 if ((header >> 30) != 0x3) { 570 570 DRM_ERROR("Not a packet3 header (0x%08x)\n", header); 571 - return DRM_ERR(EINVAL); 571 + return -EINVAL; 572 572 } 573 573 574 574 count = (header >> 16) & 0x3fff; ··· 578 578 DRM_ERROR 579 579 ("Expected packet3 of length %d but have only %d bytes left\n", 580 580 (count + 2) * 4, cmdbuf->bufsz); 581 - return DRM_ERR(EINVAL); 581 + return -EINVAL; 582 582 } 583 583 584 584 /* Is it a packet type we know about ? */ ··· 600 600 break; 601 601 default: 602 602 DRM_ERROR("Unknown packet3 header (0x%08x)\n", header); 603 - return DRM_ERR(EINVAL); 603 + return -EINVAL; 604 604 } 605 605 606 606 BEGIN_RING(count + 2); ··· 664 664 DRM_ERROR("bad packet3 type %i at %p\n", 665 665 header.packet3.packet, 666 666 cmdbuf->buf - sizeof(header)); 667 - return DRM_ERR(EINVAL); 667 + return -EINVAL; 668 668 } 669 669 670 670 n += R300_SIMULTANEOUS_CLIPRECTS; ··· 726 726 727 727 if (cmdbuf->bufsz < 728 728 (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) { 729 - return DRM_ERR(EINVAL); 729 + return -EINVAL; 730 730 } 731 731 732 732 if (header.scratch.reg >= 5) { 733 - return DRM_ERR(EINVAL); 733 + return -EINVAL; 734 734 } 735 735 736 736 dev_priv->scratch_ages[header.scratch.reg]++; ··· 745 745 buf_idx *= 2; /* 8 bytes per buf */ 746 746 747 747 if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) { 748 - return DRM_ERR(EINVAL); 748 + return -EINVAL; 749 749 } 750 750 751 751 if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) { 752 - return DRM_ERR(EINVAL); 752 + return -EINVAL; 753 753 } 754 754 755 755 if (h_pending == 0) { 756 - return DRM_ERR(EINVAL); 756 + return -EINVAL; 757 757 } 758 758 759 759 h_pending--; 760 760 761 761 if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) { 762 - return DRM_ERR(EINVAL); 762 + return -EINVAL; 763 763 } 764 764 765 765 cmdbuf->buf += sizeof(buf_idx); ··· 879 879 if (idx < 0 || idx >= dma->buf_count) { 880 880 DRM_ERROR("buffer index %d (of %d max)\n", 881 881 idx, dma->buf_count - 1); 882 - ret = DRM_ERR(EINVAL); 882 + ret = -EINVAL; 883 883 goto cleanup; 884 884 } 885 885 ··· 887 887 if (buf->filp != filp || buf->pending) { 888 888 DRM_ERROR("bad buffer %p %p %d\n", 889 889 buf->filp, filp, buf->pending); 890 - ret = DRM_ERR(EINVAL); 890 + ret = -EINVAL; 891 891 goto cleanup; 892 892 } 893 893 ··· 924 924 DRM_ERROR("bad cmd_type %i at %p\n", 925 925 header.header.cmd_type, 926 926 cmdbuf->buf - sizeof(header)); 927 - ret = DRM_ERR(EINVAL); 927 + ret = -EINVAL; 928 928 goto cleanup; 929 929 } 930 930 }
+25 -25
drivers/char/drm/radeon_cp.c
··· 889 889 DRM_ERROR("failed!\n"); 890 890 radeon_status(dev_priv); 891 891 #endif 892 - return DRM_ERR(EBUSY); 892 + return -EBUSY; 893 893 } 894 894 895 895 static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries) ··· 910 910 DRM_ERROR("failed!\n"); 911 911 radeon_status(dev_priv); 912 912 #endif 913 - return DRM_ERR(EBUSY); 913 + return -EBUSY; 914 914 } 915 915 916 916 static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) ··· 936 936 DRM_ERROR("failed!\n"); 937 937 radeon_status(dev_priv); 938 938 #endif 939 - return DRM_ERR(EBUSY); 939 + return -EBUSY; 940 940 } 941 941 942 942 /* ================================================================ ··· 1394 1394 if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) { 1395 1395 DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n"); 1396 1396 radeon_do_cleanup_cp(dev); 1397 - return DRM_ERR(EINVAL); 1397 + return -EINVAL; 1398 1398 } 1399 1399 1400 1400 if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) { ··· 1409 1409 if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) { 1410 1410 DRM_ERROR("PCI GART memory not allocated!\n"); 1411 1411 radeon_do_cleanup_cp(dev); 1412 - return DRM_ERR(EINVAL); 1412 + return -EINVAL; 1413 1413 } 1414 1414 1415 1415 dev_priv->usec_timeout = init->usec_timeout; ··· 1417 1417 dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) { 1418 1418 DRM_DEBUG("TIMEOUT problem!\n"); 1419 1419 radeon_do_cleanup_cp(dev); 1420 - return DRM_ERR(EINVAL); 1420 + return -EINVAL; 1421 1421 } 1422 1422 1423 1423 /* Enable vblank on CRTC1 for older X servers ··· 1446 1446 (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) { 1447 1447 DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode); 1448 1448 radeon_do_cleanup_cp(dev); 1449 - return DRM_ERR(EINVAL); 1449 + return -EINVAL; 1450 1450 } 1451 1451 1452 1452 switch (init->fb_bpp) { ··· 1515 1515 if (!dev_priv->sarea) { 1516 1516 DRM_ERROR("could not find sarea!\n"); 1517 1517 radeon_do_cleanup_cp(dev); 1518 - return DRM_ERR(EINVAL); 1518 + return -EINVAL; 1519 1519 } 1520 1520 1521 1521 dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); 1522 1522 if (!dev_priv->cp_ring) { 1523 1523 DRM_ERROR("could not find cp ring region!\n"); 1524 1524 radeon_do_cleanup_cp(dev); 1525 - return DRM_ERR(EINVAL); 1525 + return -EINVAL; 1526 1526 } 1527 1527 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); 1528 1528 if (!dev_priv->ring_rptr) { 1529 1529 DRM_ERROR("could not find ring read pointer!\n"); 1530 1530 radeon_do_cleanup_cp(dev); 1531 - return DRM_ERR(EINVAL); 1531 + return -EINVAL; 1532 1532 } 1533 1533 dev->agp_buffer_token = init->buffers_offset; 1534 1534 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 1535 1535 if (!dev->agp_buffer_map) { 1536 1536 DRM_ERROR("could not find dma buffer region!\n"); 1537 1537 radeon_do_cleanup_cp(dev); 1538 - return DRM_ERR(EINVAL); 1538 + return -EINVAL; 1539 1539 } 1540 1540 1541 1541 if (init->gart_textures_offset) { ··· 1544 1544 if (!dev_priv->gart_textures) { 1545 1545 DRM_ERROR("could not find GART texture region!\n"); 1546 1546 radeon_do_cleanup_cp(dev); 1547 - return DRM_ERR(EINVAL); 1547 + return -EINVAL; 1548 1548 } 1549 1549 } 1550 1550 ··· 1562 1562 !dev->agp_buffer_map->handle) { 1563 1563 DRM_ERROR("could not find ioremap agp regions!\n"); 1564 1564 radeon_do_cleanup_cp(dev); 1565 - return DRM_ERR(EINVAL); 1565 + return -EINVAL; 1566 1566 } 1567 1567 } else 1568 1568 #endif ··· 1710 1710 DRM_ERROR 1711 1711 ("Cannot use PCI Express without GART in FB memory\n"); 1712 1712 radeon_do_cleanup_cp(dev); 1713 - return DRM_ERR(EINVAL); 1713 + return -EINVAL; 1714 1714 } 1715 1715 } 1716 1716 1717 1717 if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { 1718 1718 DRM_ERROR("failed to init PCI GART!\n"); 1719 1719 radeon_do_cleanup_cp(dev); 1720 - return DRM_ERR(ENOMEM); 1720 + return -ENOMEM; 1721 1721 } 1722 1722 1723 1723 /* Turn on PCI GART */ ··· 1797 1797 1798 1798 if (!dev_priv) { 1799 1799 DRM_ERROR("Called with no initialization\n"); 1800 - return DRM_ERR(EINVAL); 1800 + return -EINVAL; 1801 1801 } 1802 1802 1803 1803 DRM_DEBUG("Starting radeon_do_resume_cp()\n"); ··· 1845 1845 return radeon_do_cleanup_cp(dev); 1846 1846 } 1847 1847 1848 - return DRM_ERR(EINVAL); 1848 + return -EINVAL; 1849 1849 } 1850 1850 1851 1851 int radeon_cp_start(DRM_IOCTL_ARGS) ··· 1973 1973 1974 1974 if (!dev_priv) { 1975 1975 DRM_DEBUG("%s called before init done\n", __FUNCTION__); 1976 - return DRM_ERR(EINVAL); 1976 + return -EINVAL; 1977 1977 } 1978 1978 1979 1979 radeon_do_cp_reset(dev_priv); ··· 2167 2167 radeon_status(dev_priv); 2168 2168 DRM_ERROR("failed!\n"); 2169 2169 #endif 2170 - return DRM_ERR(EBUSY); 2170 + return -EBUSY; 2171 2171 } 2172 2172 2173 2173 static int radeon_cp_get_buffers(DRMFILE filp, struct drm_device * dev, ··· 2179 2179 for (i = d->granted_count; i < d->request_count; i++) { 2180 2180 buf = radeon_freelist_get(dev); 2181 2181 if (!buf) 2182 - return DRM_ERR(EBUSY); /* NOTE: broken client */ 2182 + return -EBUSY; /* NOTE: broken client */ 2183 2183 2184 2184 buf->filp = filp; 2185 2185 2186 2186 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, 2187 2187 sizeof(buf->idx))) 2188 - return DRM_ERR(EFAULT); 2188 + return -EFAULT; 2189 2189 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, 2190 2190 sizeof(buf->total))) 2191 - return DRM_ERR(EFAULT); 2191 + return -EFAULT; 2192 2192 2193 2193 d->granted_count++; 2194 2194 } ··· 2212 2212 if (d.send_count != 0) { 2213 2213 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", 2214 2214 DRM_CURRENTPID, d.send_count); 2215 - return DRM_ERR(EINVAL); 2215 + return -EINVAL; 2216 2216 } 2217 2217 2218 2218 /* We'll send you buffers. ··· 2220 2220 if (d.request_count < 0 || d.request_count > dma->buf_count) { 2221 2221 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", 2222 2222 DRM_CURRENTPID, d.request_count, dma->buf_count); 2223 - return DRM_ERR(EINVAL); 2223 + return -EINVAL; 2224 2224 } 2225 2225 2226 2226 d.granted_count = 0; ··· 2241 2241 2242 2242 dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER); 2243 2243 if (dev_priv == NULL) 2244 - return DRM_ERR(ENOMEM); 2244 + return -ENOMEM; 2245 2245 2246 2246 memset(dev_priv, 0, sizeof(drm_radeon_private_t)); 2247 2247 dev->dev_private = (void *)dev_priv;
+6 -6
drivers/char/drm/radeon_irq.c
··· 155 155 atomic_t *counter; 156 156 if (!dev_priv) { 157 157 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 158 - return DRM_ERR(EINVAL); 158 + return -EINVAL; 159 159 } 160 160 161 161 if (crtc == DRM_RADEON_VBLANK_CRTC1) { ··· 165 165 counter = &dev->vbl_received2; 166 166 ack |= RADEON_CRTC2_VBLANK_STAT; 167 167 } else 168 - return DRM_ERR(EINVAL); 168 + return -EINVAL; 169 169 170 170 radeon_acknowledge_irqs(dev_priv, ack); 171 171 ··· 207 207 208 208 if (!dev_priv) { 209 209 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 210 - return DRM_ERR(EINVAL); 210 + return -EINVAL; 211 211 } 212 212 213 213 DRM_COPY_FROM_USER_IOCTL(emit, (drm_radeon_irq_emit_t __user *) data, ··· 217 217 218 218 if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) { 219 219 DRM_ERROR("copy_to_user\n"); 220 - return DRM_ERR(EFAULT); 220 + return -EFAULT; 221 221 } 222 222 223 223 return 0; ··· 233 233 234 234 if (!dev_priv) { 235 235 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 236 - return DRM_ERR(EINVAL); 236 + return -EINVAL; 237 237 } 238 238 239 239 DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_radeon_irq_wait_t __user *) data, ··· 320 320 drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; 321 321 if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) { 322 322 DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value); 323 - return DRM_ERR(EINVAL); 323 + return -EINVAL; 324 324 } 325 325 dev_priv->vblank_crtc = (unsigned int)value; 326 326 radeon_enable_interrupt(dev);
+13 -13
drivers/char/drm/radeon_mem.c
··· 137 137 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); 138 138 139 139 if (!blocks) 140 - return DRM_ERR(ENOMEM); 140 + return -ENOMEM; 141 141 142 142 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); 143 143 if (!*heap) { 144 144 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); 145 - return DRM_ERR(ENOMEM); 145 + return -ENOMEM; 146 146 } 147 147 148 148 blocks->start = start; ··· 226 226 227 227 if (!dev_priv) { 228 228 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 229 - return DRM_ERR(EINVAL); 229 + return -EINVAL; 230 230 } 231 231 232 232 DRM_COPY_FROM_USER_IOCTL(alloc, (drm_radeon_mem_alloc_t __user *) data, ··· 234 234 235 235 heap = get_heap(dev_priv, alloc.region); 236 236 if (!heap || !*heap) 237 - return DRM_ERR(EFAULT); 237 + return -EFAULT; 238 238 239 239 /* Make things easier on ourselves: all allocations at least 240 240 * 4k aligned. ··· 245 245 block = alloc_block(*heap, alloc.size, alloc.alignment, filp); 246 246 247 247 if (!block) 248 - return DRM_ERR(ENOMEM); 248 + return -ENOMEM; 249 249 250 250 if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) { 251 251 DRM_ERROR("copy_to_user\n"); 252 - return DRM_ERR(EFAULT); 252 + return -EFAULT; 253 253 } 254 254 255 255 return 0; ··· 264 264 265 265 if (!dev_priv) { 266 266 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 267 - return DRM_ERR(EINVAL); 267 + return -EINVAL; 268 268 } 269 269 270 270 DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_mem_free_t __user *) data, ··· 272 272 273 273 heap = get_heap(dev_priv, memfree.region); 274 274 if (!heap || !*heap) 275 - return DRM_ERR(EFAULT); 275 + return -EFAULT; 276 276 277 277 block = find_block(*heap, memfree.region_offset); 278 278 if (!block) 279 - return DRM_ERR(EFAULT); 279 + return -EFAULT; 280 280 281 281 if (block->filp != filp) 282 - return DRM_ERR(EPERM); 282 + return -EPERM; 283 283 284 284 free_block(block); 285 285 return 0; ··· 294 294 295 295 if (!dev_priv) { 296 296 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 297 - return DRM_ERR(EINVAL); 297 + return -EINVAL; 298 298 } 299 299 300 300 DRM_COPY_FROM_USER_IOCTL(initheap, ··· 303 303 304 304 heap = get_heap(dev_priv, initheap.region); 305 305 if (!heap) 306 - return DRM_ERR(EFAULT); 306 + return -EFAULT; 307 307 308 308 if (*heap) { 309 309 DRM_ERROR("heap already initialized?"); 310 - return DRM_ERR(EFAULT); 310 + return -EFAULT; 311 311 } 312 312 313 313 return init_heap(heap, initheap.start, initheap.size);
+77 -77
drivers/char/drm/radeon_state.c
··· 85 85 *offset = off; 86 86 return 0; 87 87 } 88 - return DRM_ERR(EINVAL); 88 + return -EINVAL; 89 89 } 90 90 91 91 static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * ··· 99 99 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 100 100 &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) { 101 101 DRM_ERROR("Invalid depth buffer offset\n"); 102 - return DRM_ERR(EINVAL); 102 + return -EINVAL; 103 103 } 104 104 break; 105 105 ··· 107 107 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 108 108 &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) { 109 109 DRM_ERROR("Invalid colour buffer offset\n"); 110 - return DRM_ERR(EINVAL); 110 + return -EINVAL; 111 111 } 112 112 break; 113 113 ··· 120 120 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 121 121 &data[0])) { 122 122 DRM_ERROR("Invalid R200 texture offset\n"); 123 - return DRM_ERR(EINVAL); 123 + return -EINVAL; 124 124 } 125 125 break; 126 126 ··· 130 130 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 131 131 &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) { 132 132 DRM_ERROR("Invalid R100 texture offset\n"); 133 - return DRM_ERR(EINVAL); 133 + return -EINVAL; 134 134 } 135 135 break; 136 136 ··· 147 147 &data[i])) { 148 148 DRM_ERROR 149 149 ("Invalid R200 cubic texture offset\n"); 150 - return DRM_ERR(EINVAL); 150 + return -EINVAL; 151 151 } 152 152 } 153 153 break; ··· 163 163 &data[i])) { 164 164 DRM_ERROR 165 165 ("Invalid R100 cubic texture offset\n"); 166 - return DRM_ERR(EINVAL); 166 + return -EINVAL; 167 167 } 168 168 } 169 169 } ··· 256 256 257 257 default: 258 258 DRM_ERROR("Unknown state packet ID %d\n", id); 259 - return DRM_ERR(EINVAL); 259 + return -EINVAL; 260 260 } 261 261 262 262 return 0; ··· 277 277 278 278 if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) { 279 279 DRM_ERROR("Not a type 3 packet\n"); 280 - return DRM_ERR(EINVAL); 280 + return -EINVAL; 281 281 } 282 282 283 283 if (4 * *cmdsz > cmdbuf->bufsz) { 284 284 DRM_ERROR("Packet size larger than size of data provided\n"); 285 - return DRM_ERR(EINVAL); 285 + return -EINVAL; 286 286 } 287 287 288 288 switch(cmd[0] & 0xff00) { ··· 307 307 /* safe but r200 only */ 308 308 if (dev_priv->microcode_version != UCODE_R200) { 309 309 DRM_ERROR("Invalid 3d packet for r100-class chip\n"); 310 - return DRM_ERR(EINVAL); 310 + return -EINVAL; 311 311 } 312 312 break; 313 313 ··· 317 317 if (count > 18) { /* 12 arrays max */ 318 318 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", 319 319 count); 320 - return DRM_ERR(EINVAL); 320 + return -EINVAL; 321 321 } 322 322 323 323 /* carefully check packet contents */ ··· 330 330 DRM_ERROR 331 331 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", 332 332 k, i); 333 - return DRM_ERR(EINVAL); 333 + return -EINVAL; 334 334 } 335 335 k++; 336 336 i++; ··· 341 341 DRM_ERROR 342 342 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", 343 343 k, i); 344 - return DRM_ERR(EINVAL); 344 + return -EINVAL; 345 345 } 346 346 k++; 347 347 i++; ··· 351 351 DRM_ERROR 352 352 ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", 353 353 k, i, narrays, count + 1); 354 - return DRM_ERR(EINVAL); 354 + return -EINVAL; 355 355 } 356 356 break; 357 357 358 358 case RADEON_3D_RNDR_GEN_INDX_PRIM: 359 359 if (dev_priv->microcode_version != UCODE_R100) { 360 360 DRM_ERROR("Invalid 3d packet for r200-class chip\n"); 361 - return DRM_ERR(EINVAL); 361 + return -EINVAL; 362 362 } 363 363 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[1])) { 364 364 DRM_ERROR("Invalid rndr_gen_indx offset\n"); 365 - return DRM_ERR(EINVAL); 365 + return -EINVAL; 366 366 } 367 367 break; 368 368 369 369 case RADEON_CP_INDX_BUFFER: 370 370 if (dev_priv->microcode_version != UCODE_R200) { 371 371 DRM_ERROR("Invalid 3d packet for r100-class chip\n"); 372 - return DRM_ERR(EINVAL); 372 + return -EINVAL; 373 373 } 374 374 if ((cmd[1] & 0x8000ffff) != 0x80000810) { 375 375 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); 376 - return DRM_ERR(EINVAL); 376 + return -EINVAL; 377 377 } 378 378 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[2])) { 379 379 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); 380 - return DRM_ERR(EINVAL); 380 + return -EINVAL; 381 381 } 382 382 break; 383 383 ··· 391 391 if (radeon_check_and_fixup_offset 392 392 (dev_priv, filp_priv, &offset)) { 393 393 DRM_ERROR("Invalid first packet offset\n"); 394 - return DRM_ERR(EINVAL); 394 + return -EINVAL; 395 395 } 396 396 cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10; 397 397 } ··· 402 402 if (radeon_check_and_fixup_offset 403 403 (dev_priv, filp_priv, &offset)) { 404 404 DRM_ERROR("Invalid second packet offset\n"); 405 - return DRM_ERR(EINVAL); 405 + return -EINVAL; 406 406 } 407 407 cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10; 408 408 } ··· 410 410 411 411 default: 412 412 DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00); 413 - return DRM_ERR(EINVAL); 413 + return -EINVAL; 414 414 } 415 415 416 416 return 0; ··· 451 451 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 452 452 &ctx->rb3d_depthoffset)) { 453 453 DRM_ERROR("Invalid depth buffer offset\n"); 454 - return DRM_ERR(EINVAL); 454 + return -EINVAL; 455 455 } 456 456 457 457 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 458 458 &ctx->rb3d_coloroffset)) { 459 459 DRM_ERROR("Invalid depth buffer offset\n"); 460 - return DRM_ERR(EINVAL); 460 + return -EINVAL; 461 461 } 462 462 463 463 BEGIN_RING(14); ··· 546 546 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 547 547 &tex[0].pp_txoffset)) { 548 548 DRM_ERROR("Invalid texture offset for unit 0\n"); 549 - return DRM_ERR(EINVAL); 549 + return -EINVAL; 550 550 } 551 551 552 552 BEGIN_RING(9); ··· 566 566 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 567 567 &tex[1].pp_txoffset)) { 568 568 DRM_ERROR("Invalid texture offset for unit 1\n"); 569 - return DRM_ERR(EINVAL); 569 + return -EINVAL; 570 570 } 571 571 572 572 BEGIN_RING(9); ··· 586 586 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 587 587 &tex[2].pp_txoffset)) { 588 588 DRM_ERROR("Invalid texture offset for unit 2\n"); 589 - return DRM_ERR(EINVAL); 589 + return -EINVAL; 590 590 } 591 591 592 592 BEGIN_RING(9); ··· 1668 1668 1669 1669 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &tex->offset)) { 1670 1670 DRM_ERROR("Invalid destination offset\n"); 1671 - return DRM_ERR(EINVAL); 1671 + return -EINVAL; 1672 1672 } 1673 1673 1674 1674 dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD; ··· 1711 1711 break; 1712 1712 default: 1713 1713 DRM_ERROR("invalid texture format %d\n", tex->format); 1714 - return DRM_ERR(EINVAL); 1714 + return -EINVAL; 1715 1715 } 1716 1716 spitch = blit_width >> 6; 1717 1717 if (spitch == 0 && image->height > 1) 1718 - return DRM_ERR(EINVAL); 1718 + return -EINVAL; 1719 1719 1720 1720 texpitch = tex->pitch; 1721 1721 if ((texpitch << 22) & RADEON_DST_TILE_MICRO) { ··· 1760 1760 if (!buf) { 1761 1761 DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n"); 1762 1762 if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image))) 1763 - return DRM_ERR(EFAULT); 1764 - return DRM_ERR(EAGAIN); 1763 + return -EFAULT; 1764 + return -EAGAIN; 1765 1765 } 1766 1766 1767 1767 /* Dispatch the indirect buffer. ··· 1774 1774 do { \ 1775 1775 if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\ 1776 1776 DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \ 1777 - return DRM_ERR(EFAULT); \ 1777 + return -EFAULT; \ 1778 1778 } \ 1779 1779 } while(0) 1780 1780 ··· 2083 2083 sizeof(alloc)); 2084 2084 2085 2085 if (alloc_surface(&alloc, dev_priv, filp) == -1) 2086 - return DRM_ERR(EINVAL); 2086 + return -EINVAL; 2087 2087 else 2088 2088 return 0; 2089 2089 } ··· 2098 2098 sizeof(memfree)); 2099 2099 2100 2100 if (free_surface(filp, dev_priv, memfree.address)) 2101 - return DRM_ERR(EINVAL); 2101 + return -EINVAL; 2102 2102 else 2103 2103 return 0; 2104 2104 } ··· 2124 2124 2125 2125 if (DRM_COPY_FROM_USER(&depth_boxes, clear.depth_boxes, 2126 2126 sarea_priv->nbox * sizeof(depth_boxes[0]))) 2127 - return DRM_ERR(EFAULT); 2127 + return -EFAULT; 2128 2128 2129 2129 radeon_cp_dispatch_clear(dev, &clear, depth_boxes); 2130 2130 ··· 2226 2226 if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { 2227 2227 DRM_ERROR("buffer index %d (of %d max)\n", 2228 2228 vertex.idx, dma->buf_count - 1); 2229 - return DRM_ERR(EINVAL); 2229 + return -EINVAL; 2230 2230 } 2231 2231 if (vertex.prim < 0 || vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { 2232 2232 DRM_ERROR("buffer prim %d\n", vertex.prim); 2233 - return DRM_ERR(EINVAL); 2233 + return -EINVAL; 2234 2234 } 2235 2235 2236 2236 RING_SPACE_TEST_WITH_RETURN(dev_priv); ··· 2241 2241 if (buf->filp != filp) { 2242 2242 DRM_ERROR("process %d using buffer owned by %p\n", 2243 2243 DRM_CURRENTPID, buf->filp); 2244 - return DRM_ERR(EINVAL); 2244 + return -EINVAL; 2245 2245 } 2246 2246 if (buf->pending) { 2247 2247 DRM_ERROR("sending pending buffer %d\n", vertex.idx); 2248 - return DRM_ERR(EINVAL); 2248 + return -EINVAL; 2249 2249 } 2250 2250 2251 2251 /* Build up a prim_t record: ··· 2259 2259 sarea_priv->tex_state, 2260 2260 sarea_priv->dirty)) { 2261 2261 DRM_ERROR("radeon_emit_state failed\n"); 2262 - return DRM_ERR(EINVAL); 2262 + return -EINVAL; 2263 2263 } 2264 2264 2265 2265 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | ··· 2310 2310 if (elts.idx < 0 || elts.idx >= dma->buf_count) { 2311 2311 DRM_ERROR("buffer index %d (of %d max)\n", 2312 2312 elts.idx, dma->buf_count - 1); 2313 - return DRM_ERR(EINVAL); 2313 + return -EINVAL; 2314 2314 } 2315 2315 if (elts.prim < 0 || elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { 2316 2316 DRM_ERROR("buffer prim %d\n", elts.prim); 2317 - return DRM_ERR(EINVAL); 2317 + return -EINVAL; 2318 2318 } 2319 2319 2320 2320 RING_SPACE_TEST_WITH_RETURN(dev_priv); ··· 2325 2325 if (buf->filp != filp) { 2326 2326 DRM_ERROR("process %d using buffer owned by %p\n", 2327 2327 DRM_CURRENTPID, buf->filp); 2328 - return DRM_ERR(EINVAL); 2328 + return -EINVAL; 2329 2329 } 2330 2330 if (buf->pending) { 2331 2331 DRM_ERROR("sending pending buffer %d\n", elts.idx); 2332 - return DRM_ERR(EINVAL); 2332 + return -EINVAL; 2333 2333 } 2334 2334 2335 2335 count = (elts.end - elts.start) / sizeof(u16); ··· 2337 2337 2338 2338 if (elts.start & 0x7) { 2339 2339 DRM_ERROR("misaligned buffer 0x%x\n", elts.start); 2340 - return DRM_ERR(EINVAL); 2340 + return -EINVAL; 2341 2341 } 2342 2342 if (elts.start < buf->used) { 2343 2343 DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used); 2344 - return DRM_ERR(EINVAL); 2344 + return -EINVAL; 2345 2345 } 2346 2346 2347 2347 buf->used = elts.end; ··· 2352 2352 sarea_priv->tex_state, 2353 2353 sarea_priv->dirty)) { 2354 2354 DRM_ERROR("radeon_emit_state failed\n"); 2355 - return DRM_ERR(EINVAL); 2355 + return -EINVAL; 2356 2356 } 2357 2357 2358 2358 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | ··· 2394 2394 2395 2395 if (tex.image == NULL) { 2396 2396 DRM_ERROR("null texture image!\n"); 2397 - return DRM_ERR(EINVAL); 2397 + return -EINVAL; 2398 2398 } 2399 2399 2400 2400 if (DRM_COPY_FROM_USER(&image, 2401 2401 (drm_radeon_tex_image_t __user *) tex.image, 2402 2402 sizeof(image))) 2403 - return DRM_ERR(EFAULT); 2403 + return -EFAULT; 2404 2404 2405 2405 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2406 2406 VB_AGE_TEST_WITH_RETURN(dev_priv); ··· 2424 2424 sizeof(stipple)); 2425 2425 2426 2426 if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32))) 2427 - return DRM_ERR(EFAULT); 2427 + return -EFAULT; 2428 2428 2429 2429 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2430 2430 ··· 2455 2455 if (indirect.idx < 0 || indirect.idx >= dma->buf_count) { 2456 2456 DRM_ERROR("buffer index %d (of %d max)\n", 2457 2457 indirect.idx, dma->buf_count - 1); 2458 - return DRM_ERR(EINVAL); 2458 + return -EINVAL; 2459 2459 } 2460 2460 2461 2461 buf = dma->buflist[indirect.idx]; ··· 2463 2463 if (buf->filp != filp) { 2464 2464 DRM_ERROR("process %d using buffer owned by %p\n", 2465 2465 DRM_CURRENTPID, buf->filp); 2466 - return DRM_ERR(EINVAL); 2466 + return -EINVAL; 2467 2467 } 2468 2468 if (buf->pending) { 2469 2469 DRM_ERROR("sending pending buffer %d\n", indirect.idx); 2470 - return DRM_ERR(EINVAL); 2470 + return -EINVAL; 2471 2471 } 2472 2472 2473 2473 if (indirect.start < buf->used) { 2474 2474 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", 2475 2475 indirect.start, buf->used); 2476 - return DRM_ERR(EINVAL); 2476 + return -EINVAL; 2477 2477 } 2478 2478 2479 2479 RING_SPACE_TEST_WITH_RETURN(dev_priv); ··· 2528 2528 if (vertex.idx < 0 || vertex.idx >= dma->buf_count) { 2529 2529 DRM_ERROR("buffer index %d (of %d max)\n", 2530 2530 vertex.idx, dma->buf_count - 1); 2531 - return DRM_ERR(EINVAL); 2531 + return -EINVAL; 2532 2532 } 2533 2533 2534 2534 RING_SPACE_TEST_WITH_RETURN(dev_priv); ··· 2539 2539 if (buf->filp != filp) { 2540 2540 DRM_ERROR("process %d using buffer owned by %p\n", 2541 2541 DRM_CURRENTPID, buf->filp); 2542 - return DRM_ERR(EINVAL); 2542 + return -EINVAL; 2543 2543 } 2544 2544 2545 2545 if (buf->pending) { 2546 2546 DRM_ERROR("sending pending buffer %d\n", vertex.idx); 2547 - return DRM_ERR(EINVAL); 2547 + return -EINVAL; 2548 2548 } 2549 2549 2550 2550 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) 2551 - return DRM_ERR(EINVAL); 2551 + return -EINVAL; 2552 2552 2553 2553 for (laststate = 0xff, i = 0; i < vertex.nr_prims; i++) { 2554 2554 drm_radeon_prim_t prim; 2555 2555 drm_radeon_tcl_prim_t tclprim; 2556 2556 2557 2557 if (DRM_COPY_FROM_USER(&prim, &vertex.prim[i], sizeof(prim))) 2558 - return DRM_ERR(EFAULT); 2558 + return -EFAULT; 2559 2559 2560 2560 if (prim.stateidx != laststate) { 2561 2561 drm_radeon_state_t state; ··· 2563 2563 if (DRM_COPY_FROM_USER(&state, 2564 2564 &vertex.state[prim.stateidx], 2565 2565 sizeof(state))) 2566 - return DRM_ERR(EFAULT); 2566 + return -EFAULT; 2567 2567 2568 2568 if (radeon_emit_state2(dev_priv, filp_priv, &state)) { 2569 2569 DRM_ERROR("radeon_emit_state2 failed\n"); 2570 - return DRM_ERR(EINVAL); 2570 + return -EINVAL; 2571 2571 } 2572 2572 2573 2573 laststate = prim.stateidx; ··· 2613 2613 RING_LOCALS; 2614 2614 2615 2615 if (id >= RADEON_MAX_STATE_PACKETS) 2616 - return DRM_ERR(EINVAL); 2616 + return -EINVAL; 2617 2617 2618 2618 sz = packet[id].len; 2619 2619 reg = packet[id].start; 2620 2620 2621 2621 if (sz * sizeof(int) > cmdbuf->bufsz) { 2622 2622 DRM_ERROR("Packet size provided larger than data provided\n"); 2623 - return DRM_ERR(EINVAL); 2623 + return -EINVAL; 2624 2624 } 2625 2625 2626 2626 if (radeon_check_and_fixup_packets(dev_priv, filp_priv, id, data)) { 2627 2627 DRM_ERROR("Packet verification failed\n"); 2628 - return DRM_ERR(EINVAL); 2628 + return -EINVAL; 2629 2629 } 2630 2630 2631 2631 BEGIN_RING(sz + 1); ··· 2713 2713 if (!sz) 2714 2714 return 0; 2715 2715 if (sz * 4 > cmdbuf->bufsz) 2716 - return DRM_ERR(EINVAL); 2716 + return -EINVAL; 2717 2717 2718 2718 BEGIN_RING(5 + sz); 2719 2719 OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0); ··· 2781 2781 do { 2782 2782 if (i < cmdbuf->nbox) { 2783 2783 if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box))) 2784 - return DRM_ERR(EFAULT); 2784 + return -EFAULT; 2785 2785 /* FIXME The second and subsequent times round 2786 2786 * this loop, send a WAIT_UNTIL_3D_IDLE before 2787 2787 * calling emit_clip_rect(). This fixes a ··· 2839 2839 ADVANCE_RING(); 2840 2840 break; 2841 2841 default: 2842 - return DRM_ERR(EINVAL); 2842 + return -EINVAL; 2843 2843 } 2844 2844 2845 2845 return 0; ··· 2870 2870 VB_AGE_TEST_WITH_RETURN(dev_priv); 2871 2871 2872 2872 if (cmdbuf.bufsz > 64 * 1024 || cmdbuf.bufsz < 0) { 2873 - return DRM_ERR(EINVAL); 2873 + return -EINVAL; 2874 2874 } 2875 2875 2876 2876 /* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid ··· 2881 2881 if (orig_bufsz != 0) { 2882 2882 kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER); 2883 2883 if (kbuf == NULL) 2884 - return DRM_ERR(ENOMEM); 2884 + return -ENOMEM; 2885 2885 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf, 2886 2886 cmdbuf.bufsz)) { 2887 2887 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2888 - return DRM_ERR(EFAULT); 2888 + return -EFAULT; 2889 2889 } 2890 2890 cmdbuf.buf = kbuf; 2891 2891 } ··· 3012 3012 err: 3013 3013 if (orig_bufsz != 0) 3014 3014 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 3015 - return DRM_ERR(EINVAL); 3015 + return -EINVAL; 3016 3016 } 3017 3017 3018 3018 static int radeon_cp_getparam(DRM_IOCTL_ARGS) ··· 3074 3074 break; 3075 3075 case RADEON_PARAM_SCRATCH_OFFSET: 3076 3076 if (!dev_priv->writeback_works) 3077 - return DRM_ERR(EINVAL); 3077 + return -EINVAL; 3078 3078 value = RADEON_SCRATCH_REG_OFFSET; 3079 3079 break; 3080 3080 case RADEON_PARAM_CARD_TYPE: ··· 3090 3090 break; 3091 3091 default: 3092 3092 DRM_DEBUG("Invalid parameter %d\n", param.param); 3093 - return DRM_ERR(EINVAL); 3093 + return -EINVAL; 3094 3094 } 3095 3095 3096 3096 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { 3097 3097 DRM_ERROR("copy_to_user\n"); 3098 - return DRM_ERR(EFAULT); 3098 + return -EFAULT; 3099 3099 } 3100 3100 3101 3101 return 0; ··· 3149 3149 break; 3150 3150 default: 3151 3151 DRM_DEBUG("Invalid parameter %d\n", sp.param); 3152 - return DRM_ERR(EINVAL); 3152 + return -EINVAL; 3153 3153 } 3154 3154 3155 3155 return 0;
+30 -30
drivers/char/drm/savage_bci.c
··· 60 60 DRM_ERROR("failed!\n"); 61 61 DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold); 62 62 #endif 63 - return DRM_ERR(EBUSY); 63 + return -EBUSY; 64 64 } 65 65 66 66 static int ··· 81 81 DRM_ERROR("failed!\n"); 82 82 DRM_INFO(" status=0x%08x\n", status); 83 83 #endif 84 - return DRM_ERR(EBUSY); 84 + return -EBUSY; 85 85 } 86 86 87 87 static int ··· 102 102 DRM_ERROR("failed!\n"); 103 103 DRM_INFO(" status=0x%08x\n", status); 104 104 #endif 105 - return DRM_ERR(EBUSY); 105 + return -EBUSY; 106 106 } 107 107 108 108 /* ··· 136 136 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); 137 137 #endif 138 138 139 - return DRM_ERR(EBUSY); 139 + return -EBUSY; 140 140 } 141 141 142 142 static int ··· 158 158 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); 159 159 #endif 160 160 161 - return DRM_ERR(EBUSY); 161 + return -EBUSY; 162 162 } 163 163 164 164 uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv, ··· 301 301 dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) * 302 302 dev_priv->nr_dma_pages, DRM_MEM_DRIVER); 303 303 if (dev_priv->dma_pages == NULL) 304 - return DRM_ERR(ENOMEM); 304 + return -ENOMEM; 305 305 306 306 for (i = 0; i < dev_priv->nr_dma_pages; ++i) { 307 307 SET_AGE(&dev_priv->dma_pages[i].age, 0, 0); ··· 541 541 542 542 dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER); 543 543 if (dev_priv == NULL) 544 - return DRM_ERR(ENOMEM); 544 + return -ENOMEM; 545 545 546 546 memset(dev_priv, 0, sizeof(drm_savage_private_t)); 547 547 dev->dev_private = (void *)dev_priv; ··· 682 682 683 683 if (init->fb_bpp != 16 && init->fb_bpp != 32) { 684 684 DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp); 685 - return DRM_ERR(EINVAL); 685 + return -EINVAL; 686 686 } 687 687 if (init->depth_bpp != 16 && init->depth_bpp != 32) { 688 688 DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp); 689 - return DRM_ERR(EINVAL); 689 + return -EINVAL; 690 690 } 691 691 if (init->dma_type != SAVAGE_DMA_AGP && 692 692 init->dma_type != SAVAGE_DMA_PCI) { 693 693 DRM_ERROR("invalid dma memory type %d!\n", init->dma_type); 694 - return DRM_ERR(EINVAL); 694 + return -EINVAL; 695 695 } 696 696 697 697 dev_priv->cob_size = init->cob_size; ··· 715 715 if (!dev_priv->sarea) { 716 716 DRM_ERROR("could not find sarea!\n"); 717 717 savage_do_cleanup_bci(dev); 718 - return DRM_ERR(EINVAL); 718 + return -EINVAL; 719 719 } 720 720 if (init->status_offset != 0) { 721 721 dev_priv->status = drm_core_findmap(dev, init->status_offset); 722 722 if (!dev_priv->status) { 723 723 DRM_ERROR("could not find shadow status region!\n"); 724 724 savage_do_cleanup_bci(dev); 725 - return DRM_ERR(EINVAL); 725 + return -EINVAL; 726 726 } 727 727 } else { 728 728 dev_priv->status = NULL; ··· 734 734 if (!dev->agp_buffer_map) { 735 735 DRM_ERROR("could not find DMA buffer region!\n"); 736 736 savage_do_cleanup_bci(dev); 737 - return DRM_ERR(EINVAL); 737 + return -EINVAL; 738 738 } 739 739 drm_core_ioremap(dev->agp_buffer_map, dev); 740 740 if (!dev->agp_buffer_map) { 741 741 DRM_ERROR("failed to ioremap DMA buffer region!\n"); 742 742 savage_do_cleanup_bci(dev); 743 - return DRM_ERR(ENOMEM); 743 + return -ENOMEM; 744 744 } 745 745 } 746 746 if (init->agp_textures_offset) { ··· 749 749 if (!dev_priv->agp_textures) { 750 750 DRM_ERROR("could not find agp texture region!\n"); 751 751 savage_do_cleanup_bci(dev); 752 - return DRM_ERR(EINVAL); 752 + return -EINVAL; 753 753 } 754 754 } else { 755 755 dev_priv->agp_textures = NULL; ··· 760 760 DRM_ERROR("command DMA not supported on " 761 761 "Savage3D/MX/IX.\n"); 762 762 savage_do_cleanup_bci(dev); 763 - return DRM_ERR(EINVAL); 763 + return -EINVAL; 764 764 } 765 765 if (dev->dma && dev->dma->buflist) { 766 766 DRM_ERROR("command and vertex DMA not supported " 767 767 "at the same time.\n"); 768 768 savage_do_cleanup_bci(dev); 769 - return DRM_ERR(EINVAL); 769 + return -EINVAL; 770 770 } 771 771 dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset); 772 772 if (!dev_priv->cmd_dma) { 773 773 DRM_ERROR("could not find command DMA region!\n"); 774 774 savage_do_cleanup_bci(dev); 775 - return DRM_ERR(EINVAL); 775 + return -EINVAL; 776 776 } 777 777 if (dev_priv->dma_type == SAVAGE_DMA_AGP) { 778 778 if (dev_priv->cmd_dma->type != _DRM_AGP) { 779 779 DRM_ERROR("AGP command DMA region is not a " 780 780 "_DRM_AGP map!\n"); 781 781 savage_do_cleanup_bci(dev); 782 - return DRM_ERR(EINVAL); 782 + return -EINVAL; 783 783 } 784 784 drm_core_ioremap(dev_priv->cmd_dma, dev); 785 785 if (!dev_priv->cmd_dma->handle) { 786 786 DRM_ERROR("failed to ioremap command " 787 787 "DMA region!\n"); 788 788 savage_do_cleanup_bci(dev); 789 - return DRM_ERR(ENOMEM); 789 + return -ENOMEM; 790 790 } 791 791 } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) { 792 792 DRM_ERROR("PCI command DMA region is not a " 793 793 "_DRM_CONSISTENT map!\n"); 794 794 savage_do_cleanup_bci(dev); 795 - return DRM_ERR(EINVAL); 795 + return -EINVAL; 796 796 } 797 797 } else { 798 798 dev_priv->cmd_dma = NULL; ··· 809 809 if (!dev_priv->fake_dma.handle) { 810 810 DRM_ERROR("could not allocate faked DMA buffer!\n"); 811 811 savage_do_cleanup_bci(dev); 812 - return DRM_ERR(ENOMEM); 812 + return -ENOMEM; 813 813 } 814 814 dev_priv->cmd_dma = &dev_priv->fake_dma; 815 815 dev_priv->dma_flush = savage_fake_dma_flush; ··· 886 886 if (savage_freelist_init(dev) < 0) { 887 887 DRM_ERROR("could not initialize freelist\n"); 888 888 savage_do_cleanup_bci(dev); 889 - return DRM_ERR(ENOMEM); 889 + return -ENOMEM; 890 890 } 891 891 892 892 if (savage_dma_init(dev_priv) < 0) { 893 893 DRM_ERROR("could not initialize command DMA\n"); 894 894 savage_do_cleanup_bci(dev); 895 - return DRM_ERR(ENOMEM); 895 + return -ENOMEM; 896 896 } 897 897 898 898 return 0; ··· 945 945 return savage_do_cleanup_bci(dev); 946 946 } 947 947 948 - return DRM_ERR(EINVAL); 948 + return -EINVAL; 949 949 } 950 950 951 951 static int savage_bci_event_emit(DRM_IOCTL_ARGS) ··· 1015 1015 for (i = d->granted_count; i < d->request_count; i++) { 1016 1016 buf = savage_freelist_get(dev); 1017 1017 if (!buf) 1018 - return DRM_ERR(EAGAIN); 1018 + return -EAGAIN; 1019 1019 1020 1020 buf->filp = filp; 1021 1021 1022 1022 if (DRM_COPY_TO_USER(&d->request_indices[i], 1023 1023 &buf->idx, sizeof(buf->idx))) 1024 - return DRM_ERR(EFAULT); 1024 + return -EFAULT; 1025 1025 if (DRM_COPY_TO_USER(&d->request_sizes[i], 1026 1026 &buf->total, sizeof(buf->total))) 1027 - return DRM_ERR(EFAULT); 1027 + return -EFAULT; 1028 1028 1029 1029 d->granted_count++; 1030 1030 } ··· 1047 1047 if (d.send_count != 0) { 1048 1048 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", 1049 1049 DRM_CURRENTPID, d.send_count); 1050 - return DRM_ERR(EINVAL); 1050 + return -EINVAL; 1051 1051 } 1052 1052 1053 1053 /* We'll send you buffers. ··· 1055 1055 if (d.request_count < 0 || d.request_count > dma->buf_count) { 1056 1056 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", 1057 1057 DRM_CURRENTPID, d.request_count, dma->buf_count); 1058 - return DRM_ERR(EINVAL); 1058 + return -EINVAL; 1059 1059 } 1060 1060 1061 1061 d.granted_count = 0;
+48 -48
drivers/char/drm/savage_state.c
··· 83 83 { 84 84 if ((addr & 6) != 2) { /* reserved bits */ 85 85 DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr); 86 - return DRM_ERR(EINVAL); 86 + return -EINVAL; 87 87 } 88 88 if (!(addr & 1)) { /* local */ 89 89 addr &= ~7; ··· 92 92 DRM_ERROR 93 93 ("bad texAddr%d %08x (local addr out of range)\n", 94 94 unit, addr); 95 - return DRM_ERR(EINVAL); 95 + return -EINVAL; 96 96 } 97 97 } else { /* AGP */ 98 98 if (!dev_priv->agp_textures) { 99 99 DRM_ERROR("bad texAddr%d %08x (AGP not available)\n", 100 100 unit, addr); 101 - return DRM_ERR(EINVAL); 101 + return -EINVAL; 102 102 } 103 103 addr &= ~7; 104 104 if (addr < dev_priv->agp_textures->offset || ··· 107 107 DRM_ERROR 108 108 ("bad texAddr%d %08x (AGP addr out of range)\n", 109 109 unit, addr); 110 - return DRM_ERR(EINVAL); 110 + return -EINVAL; 111 111 } 112 112 } 113 113 return 0; ··· 133 133 start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { 134 134 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", 135 135 start, start + count - 1); 136 - return DRM_ERR(EINVAL); 136 + return -EINVAL; 137 137 } 138 138 139 139 SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart, ··· 165 165 start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) { 166 166 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", 167 167 start, start + count - 1); 168 - return DRM_ERR(EINVAL); 168 + return -EINVAL; 169 169 } 170 170 171 171 SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0, ··· 289 289 290 290 if (!dmabuf) { 291 291 DRM_ERROR("called without dma buffers!\n"); 292 - return DRM_ERR(EINVAL); 292 + return -EINVAL; 293 293 } 294 294 295 295 if (!n) ··· 303 303 if (n % 3 != 0) { 304 304 DRM_ERROR("wrong number of vertices %u in TRILIST\n", 305 305 n); 306 - return DRM_ERR(EINVAL); 306 + return -EINVAL; 307 307 } 308 308 break; 309 309 case SAVAGE_PRIM_TRISTRIP: ··· 312 312 DRM_ERROR 313 313 ("wrong number of vertices %u in TRIFAN/STRIP\n", 314 314 n); 315 - return DRM_ERR(EINVAL); 315 + return -EINVAL; 316 316 } 317 317 break; 318 318 default: 319 319 DRM_ERROR("invalid primitive type %u\n", prim); 320 - return DRM_ERR(EINVAL); 320 + return -EINVAL; 321 321 } 322 322 323 323 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 324 324 if (skip != 0) { 325 325 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); 326 - return DRM_ERR(EINVAL); 326 + return -EINVAL; 327 327 } 328 328 } else { 329 329 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - ··· 331 331 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); 332 332 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { 333 333 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); 334 - return DRM_ERR(EINVAL); 334 + return -EINVAL; 335 335 } 336 336 if (reorder) { 337 337 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); 338 - return DRM_ERR(EINVAL); 338 + return -EINVAL; 339 339 } 340 340 } 341 341 342 342 if (start + n > dmabuf->total / 32) { 343 343 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", 344 344 start, start + n - 1, dmabuf->total / 32); 345 - return DRM_ERR(EINVAL); 345 + return -EINVAL; 346 346 } 347 347 348 348 /* Vertex DMA doesn't work with command DMA at the same time, ··· 440 440 if (n % 3 != 0) { 441 441 DRM_ERROR("wrong number of vertices %u in TRILIST\n", 442 442 n); 443 - return DRM_ERR(EINVAL); 443 + return -EINVAL; 444 444 } 445 445 break; 446 446 case SAVAGE_PRIM_TRISTRIP: ··· 449 449 DRM_ERROR 450 450 ("wrong number of vertices %u in TRIFAN/STRIP\n", 451 451 n); 452 - return DRM_ERR(EINVAL); 452 + return -EINVAL; 453 453 } 454 454 break; 455 455 default: 456 456 DRM_ERROR("invalid primitive type %u\n", prim); 457 - return DRM_ERR(EINVAL); 457 + return -EINVAL; 458 458 } 459 459 460 460 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 461 461 if (skip > SAVAGE_SKIP_ALL_S3D) { 462 462 DRM_ERROR("invalid skip flags 0x%04x\n", skip); 463 - return DRM_ERR(EINVAL); 463 + return -EINVAL; 464 464 } 465 465 vtx_size = 8; /* full vertex */ 466 466 } else { 467 467 if (skip > SAVAGE_SKIP_ALL_S4) { 468 468 DRM_ERROR("invalid skip flags 0x%04x\n", skip); 469 - return DRM_ERR(EINVAL); 469 + return -EINVAL; 470 470 } 471 471 vtx_size = 10; /* full vertex */ 472 472 } ··· 478 478 if (vtx_size > vb_stride) { 479 479 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", 480 480 vtx_size, vb_stride); 481 - return DRM_ERR(EINVAL); 481 + return -EINVAL; 482 482 } 483 483 484 484 if (start + n > vb_size / (vb_stride * 4)) { 485 485 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", 486 486 start, start + n - 1, vb_size / (vb_stride * 4)); 487 - return DRM_ERR(EINVAL); 487 + return -EINVAL; 488 488 } 489 489 490 490 prim <<= 25; ··· 547 547 548 548 if (!dmabuf) { 549 549 DRM_ERROR("called without dma buffers!\n"); 550 - return DRM_ERR(EINVAL); 550 + return -EINVAL; 551 551 } 552 552 553 553 if (!n) ··· 560 560 case SAVAGE_PRIM_TRILIST: 561 561 if (n % 3 != 0) { 562 562 DRM_ERROR("wrong number of indices %u in TRILIST\n", n); 563 - return DRM_ERR(EINVAL); 563 + return -EINVAL; 564 564 } 565 565 break; 566 566 case SAVAGE_PRIM_TRISTRIP: ··· 568 568 if (n < 3) { 569 569 DRM_ERROR 570 570 ("wrong number of indices %u in TRIFAN/STRIP\n", n); 571 - return DRM_ERR(EINVAL); 571 + return -EINVAL; 572 572 } 573 573 break; 574 574 default: 575 575 DRM_ERROR("invalid primitive type %u\n", prim); 576 - return DRM_ERR(EINVAL); 576 + return -EINVAL; 577 577 } 578 578 579 579 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 580 580 if (skip != 0) { 581 581 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); 582 - return DRM_ERR(EINVAL); 582 + return -EINVAL; 583 583 } 584 584 } else { 585 585 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - ··· 587 587 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); 588 588 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { 589 589 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); 590 - return DRM_ERR(EINVAL); 590 + return -EINVAL; 591 591 } 592 592 if (reorder) { 593 593 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); 594 - return DRM_ERR(EINVAL); 594 + return -EINVAL; 595 595 } 596 596 } 597 597 ··· 628 628 if (idx[i] > dmabuf->total / 32) { 629 629 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", 630 630 i, idx[i], dmabuf->total / 32); 631 - return DRM_ERR(EINVAL); 631 + return -EINVAL; 632 632 } 633 633 } 634 634 ··· 698 698 case SAVAGE_PRIM_TRILIST: 699 699 if (n % 3 != 0) { 700 700 DRM_ERROR("wrong number of indices %u in TRILIST\n", n); 701 - return DRM_ERR(EINVAL); 701 + return -EINVAL; 702 702 } 703 703 break; 704 704 case SAVAGE_PRIM_TRISTRIP: ··· 706 706 if (n < 3) { 707 707 DRM_ERROR 708 708 ("wrong number of indices %u in TRIFAN/STRIP\n", n); 709 - return DRM_ERR(EINVAL); 709 + return -EINVAL; 710 710 } 711 711 break; 712 712 default: 713 713 DRM_ERROR("invalid primitive type %u\n", prim); 714 - return DRM_ERR(EINVAL); 714 + return -EINVAL; 715 715 } 716 716 717 717 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 718 718 if (skip > SAVAGE_SKIP_ALL_S3D) { 719 719 DRM_ERROR("invalid skip flags 0x%04x\n", skip); 720 - return DRM_ERR(EINVAL); 720 + return -EINVAL; 721 721 } 722 722 vtx_size = 8; /* full vertex */ 723 723 } else { 724 724 if (skip > SAVAGE_SKIP_ALL_S4) { 725 725 DRM_ERROR("invalid skip flags 0x%04x\n", skip); 726 - return DRM_ERR(EINVAL); 726 + return -EINVAL; 727 727 } 728 728 vtx_size = 10; /* full vertex */ 729 729 } ··· 735 735 if (vtx_size > vb_stride) { 736 736 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", 737 737 vtx_size, vb_stride); 738 - return DRM_ERR(EINVAL); 738 + return -EINVAL; 739 739 } 740 740 741 741 prim <<= 25; ··· 748 748 if (idx[i] > vb_size / (vb_stride * 4)) { 749 749 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", 750 750 i, idx[i], vb_size / (vb_stride * 4)); 751 - return DRM_ERR(EINVAL); 751 + return -EINVAL; 752 752 } 753 753 } 754 754 ··· 942 942 DRM_ERROR("IMPLEMENTATION ERROR: " 943 943 "non-drawing-command %d\n", 944 944 cmd_header.cmd.cmd); 945 - return DRM_ERR(EINVAL); 945 + return -EINVAL; 946 946 } 947 947 948 948 if (ret != 0) ··· 979 979 DRM_ERROR 980 980 ("vertex buffer index %u out of range (0-%u)\n", 981 981 cmdbuf.dma_idx, dma->buf_count - 1); 982 - return DRM_ERR(EINVAL); 982 + return -EINVAL; 983 983 } 984 984 dmabuf = dma->buflist[cmdbuf.dma_idx]; 985 985 } else { ··· 994 994 if (cmdbuf.size) { 995 995 kcmd_addr = drm_alloc(cmdbuf.size * 8, DRM_MEM_DRIVER); 996 996 if (kcmd_addr == NULL) 997 - return DRM_ERR(ENOMEM); 997 + return -ENOMEM; 998 998 999 999 if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf.cmd_addr, 1000 1000 cmdbuf.size * 8)) 1001 1001 { 1002 1002 drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER); 1003 - return DRM_ERR(EFAULT); 1003 + return -EFAULT; 1004 1004 } 1005 1005 cmdbuf.cmd_addr = kcmd_addr; 1006 1006 } 1007 1007 if (cmdbuf.vb_size) { 1008 1008 kvb_addr = drm_alloc(cmdbuf.vb_size, DRM_MEM_DRIVER); 1009 1009 if (kvb_addr == NULL) { 1010 - ret = DRM_ERR(ENOMEM); 1010 + ret = -ENOMEM; 1011 1011 goto done; 1012 1012 } 1013 1013 1014 1014 if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf.vb_addr, 1015 1015 cmdbuf.vb_size)) { 1016 - ret = DRM_ERR(EFAULT); 1016 + ret = -EFAULT; 1017 1017 goto done; 1018 1018 } 1019 1019 cmdbuf.vb_addr = kvb_addr; ··· 1022 1022 kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(struct drm_clip_rect), 1023 1023 DRM_MEM_DRIVER); 1024 1024 if (kbox_addr == NULL) { 1025 - ret = DRM_ERR(ENOMEM); 1025 + ret = -ENOMEM; 1026 1026 goto done; 1027 1027 } 1028 1028 1029 1029 if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr, 1030 1030 cmdbuf.nbox * sizeof(struct drm_clip_rect))) { 1031 - ret = DRM_ERR(EFAULT); 1031 + ret = -EFAULT; 1032 1032 goto done; 1033 1033 } 1034 1034 cmdbuf.box_addr = kbox_addr; ··· 1061 1061 DRM_ERROR("indexed drawing command extends " 1062 1062 "beyond end of command buffer\n"); 1063 1063 DMA_FLUSH(); 1064 - return DRM_ERR(EINVAL); 1064 + return -EINVAL; 1065 1065 } 1066 1066 /* fall through */ 1067 1067 case SAVAGE_CMD_DMA_PRIM: ··· 1094 1094 DRM_ERROR("command SAVAGE_CMD_STATE extends " 1095 1095 "beyond end of command buffer\n"); 1096 1096 DMA_FLUSH(); 1097 - ret = DRM_ERR(EINVAL); 1097 + ret = -EINVAL; 1098 1098 goto done; 1099 1099 } 1100 1100 ret = savage_dispatch_state(dev_priv, &cmd_header, ··· 1107 1107 DRM_ERROR("command SAVAGE_CMD_CLEAR extends " 1108 1108 "beyond end of command buffer\n"); 1109 1109 DMA_FLUSH(); 1110 - ret = DRM_ERR(EINVAL); 1110 + ret = -EINVAL; 1111 1111 goto done; 1112 1112 } 1113 1113 ret = savage_dispatch_clear(dev_priv, &cmd_header, ··· 1123 1123 default: 1124 1124 DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd); 1125 1125 DMA_FLUSH(); 1126 - ret = DRM_ERR(EINVAL); 1126 + ret = -EINVAL; 1127 1127 goto done; 1128 1128 } 1129 1129
+1 -1
drivers/char/drm/sis_drv.c
··· 42 42 43 43 dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER); 44 44 if (dev_priv == NULL) 45 - return DRM_ERR(ENOMEM); 45 + return -ENOMEM; 46 46 47 47 dev->dev_private = (void *)dev_priv; 48 48 dev_priv->chipset = chipset;
+2 -2
drivers/char/drm/sis_mm.c
··· 140 140 dev_priv->agp_initialized)) { 141 141 DRM_ERROR 142 142 ("Attempt to allocate from uninitialized memory manager.\n"); 143 - return DRM_ERR(EINVAL); 143 + return -EINVAL; 144 144 } 145 145 146 146 mem.size = (mem.size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT; ··· 159 159 mem.offset = 0; 160 160 mem.size = 0; 161 161 mem.free = 0; 162 - retval = DRM_ERR(ENOMEM); 162 + retval = -ENOMEM; 163 163 } 164 164 165 165 DRM_COPY_TO_USER_IOCTL(argp, mem, sizeof(mem));
+20 -20
drivers/char/drm/via_dma.c
··· 175 175 { 176 176 if (!dev_priv || !dev_priv->mmio) { 177 177 DRM_ERROR("via_dma_init called before via_map_init\n"); 178 - return DRM_ERR(EFAULT); 178 + return -EFAULT; 179 179 } 180 180 181 181 if (dev_priv->ring.virtual_start != NULL) { 182 182 DRM_ERROR("%s called again without calling cleanup\n", 183 183 __FUNCTION__); 184 - return DRM_ERR(EFAULT); 184 + return -EFAULT; 185 185 } 186 186 187 187 if (!dev->agp || !dev->agp->base) { 188 188 DRM_ERROR("%s called with no agp memory available\n", 189 189 __FUNCTION__); 190 - return DRM_ERR(EFAULT); 190 + return -EFAULT; 191 191 } 192 192 193 193 if (dev_priv->chipset == VIA_DX9_0) { 194 194 DRM_ERROR("AGP DMA is not supported on this chip\n"); 195 - return DRM_ERR(EINVAL); 195 + return -EINVAL; 196 196 } 197 197 198 198 dev_priv->ring.map.offset = dev->agp->base + init->offset; ··· 207 207 via_dma_cleanup(dev); 208 208 DRM_ERROR("can not ioremap virtual address for" 209 209 " ring buffer\n"); 210 - return DRM_ERR(ENOMEM); 210 + return -ENOMEM; 211 211 } 212 212 213 213 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; ··· 240 240 switch (init.func) { 241 241 case VIA_INIT_DMA: 242 242 if (!DRM_SUSER(DRM_CURPROC)) 243 - retcode = DRM_ERR(EPERM); 243 + retcode = -EPERM; 244 244 else 245 245 retcode = via_initialize(dev, dev_priv, &init); 246 246 break; 247 247 case VIA_CLEANUP_DMA: 248 248 if (!DRM_SUSER(DRM_CURPROC)) 249 - retcode = DRM_ERR(EPERM); 249 + retcode = -EPERM; 250 250 else 251 251 retcode = via_dma_cleanup(dev); 252 252 break; 253 253 case VIA_DMA_INITIALIZED: 254 254 retcode = (dev_priv->ring.virtual_start != NULL) ? 255 - 0 : DRM_ERR(EFAULT); 255 + 0 : -EFAULT; 256 256 break; 257 257 default: 258 - retcode = DRM_ERR(EINVAL); 258 + retcode = -EINVAL; 259 259 break; 260 260 } 261 261 ··· 273 273 if (dev_priv->ring.virtual_start == NULL) { 274 274 DRM_ERROR("%s called without initializing AGP ring buffer.\n", 275 275 __FUNCTION__); 276 - return DRM_ERR(EFAULT); 276 + return -EFAULT; 277 277 } 278 278 279 279 if (cmd->size > VIA_PCI_BUF_SIZE) { 280 - return DRM_ERR(ENOMEM); 280 + return -ENOMEM; 281 281 } 282 282 283 283 if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) 284 - return DRM_ERR(EFAULT); 284 + return -EFAULT; 285 285 286 286 /* 287 287 * Running this function on AGP memory is dead slow. Therefore ··· 297 297 298 298 vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size); 299 299 if (vb == NULL) { 300 - return DRM_ERR(EAGAIN); 300 + return -EAGAIN; 301 301 } 302 302 303 303 memcpy(vb, dev_priv->pci_buf, cmd->size); ··· 321 321 drm_via_private_t *dev_priv = dev->dev_private; 322 322 323 323 if (!via_wait_idle(dev_priv)) { 324 - return DRM_ERR(EBUSY); 324 + return -EBUSY; 325 325 } 326 326 return 0; 327 327 } ··· 363 363 int ret; 364 364 365 365 if (cmd->size > VIA_PCI_BUF_SIZE) { 366 - return DRM_ERR(ENOMEM); 366 + return -ENOMEM; 367 367 } 368 368 if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) 369 - return DRM_ERR(EFAULT); 369 + return -EFAULT; 370 370 371 371 if ((ret = 372 372 via_verify_command_stream((uint32_t *) dev_priv->pci_buf, ··· 669 669 if (dev_priv->ring.virtual_start == NULL) { 670 670 DRM_ERROR("%s called without initializing AGP ring buffer.\n", 671 671 __FUNCTION__); 672 - return DRM_ERR(EFAULT); 672 + return -EFAULT; 673 673 } 674 674 675 675 DRM_COPY_FROM_USER_IOCTL(d_siz, (drm_via_cmdbuf_size_t __user *) data, ··· 687 687 } 688 688 if (!count) { 689 689 DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n"); 690 - ret = DRM_ERR(EAGAIN); 690 + ret = -EAGAIN; 691 691 } 692 692 break; 693 693 case VIA_CMDBUF_LAG: ··· 699 699 } 700 700 if (!count) { 701 701 DRM_ERROR("VIA_CMDBUF_LAG timed out.\n"); 702 - ret = DRM_ERR(EAGAIN); 702 + ret = -EAGAIN; 703 703 } 704 704 break; 705 705 default: 706 - ret = DRM_ERR(EFAULT); 706 + ret = -EFAULT; 707 707 } 708 708 d_siz.size = tmp_size; 709 709
+16 -16
drivers/char/drm/via_dmablit.c
··· 237 237 first_pfn + 1; 238 238 239 239 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) 240 - return DRM_ERR(ENOMEM); 240 + return -ENOMEM; 241 241 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); 242 242 down_read(&current->mm->mmap_sem); 243 243 ret = get_user_pages(current, current->mm, ··· 251 251 if (ret < 0) 252 252 return ret; 253 253 vsg->state = dr_via_pages_locked; 254 - return DRM_ERR(EINVAL); 254 + return -EINVAL; 255 255 } 256 256 vsg->state = dr_via_pages_locked; 257 257 DRM_DEBUG("DMA pages locked\n"); ··· 274 274 vsg->descriptors_per_page; 275 275 276 276 if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL))) 277 - return DRM_ERR(ENOMEM); 277 + return -ENOMEM; 278 278 279 279 vsg->state = dr_via_desc_pages_alloc; 280 280 for (i=0; i<vsg->num_desc_pages; ++i) { 281 281 if (NULL == (vsg->desc_pages[i] = 282 282 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) 283 - return DRM_ERR(ENOMEM); 283 + return -ENOMEM; 284 284 } 285 285 DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages, 286 286 vsg->num_desc); ··· 593 593 594 594 if (xfer->num_lines <= 0 || xfer->line_length <= 0) { 595 595 DRM_ERROR("Zero size bitblt.\n"); 596 - return DRM_ERR(EINVAL); 596 + return -EINVAL; 597 597 } 598 598 599 599 /* ··· 606 606 if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) { 607 607 DRM_ERROR("Too large system memory stride. Stride: %d, " 608 608 "Length: %d\n", xfer->mem_stride, xfer->line_length); 609 - return DRM_ERR(EINVAL); 609 + return -EINVAL; 610 610 } 611 611 612 612 if ((xfer->mem_stride == xfer->line_length) && ··· 624 624 625 625 if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) { 626 626 DRM_ERROR("Too large PCI DMA bitblt.\n"); 627 - return DRM_ERR(EINVAL); 627 + return -EINVAL; 628 628 } 629 629 630 630 /* ··· 635 635 if (xfer->mem_stride < xfer->line_length || 636 636 abs(xfer->fb_stride) < xfer->line_length) { 637 637 DRM_ERROR("Invalid frame-buffer / memory stride.\n"); 638 - return DRM_ERR(EINVAL); 638 + return -EINVAL; 639 639 } 640 640 641 641 /* ··· 648 648 if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || 649 649 ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) { 650 650 DRM_ERROR("Invalid DRM bitblt alignment.\n"); 651 - return DRM_ERR(EINVAL); 651 + return -EINVAL; 652 652 } 653 653 #else 654 654 if ((((unsigned long)xfer->mem_addr & 15) || ··· 656 656 ((xfer->num_lines > 1) && 657 657 ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) { 658 658 DRM_ERROR("Invalid DRM bitblt alignment.\n"); 659 - return DRM_ERR(EINVAL); 659 + return -EINVAL; 660 660 } 661 661 #endif 662 662 ··· 696 696 697 697 DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0); 698 698 if (ret) { 699 - return (DRM_ERR(EINTR) == ret) ? DRM_ERR(EAGAIN) : ret; 699 + return (-EINTR == ret) ? -EAGAIN : ret; 700 700 } 701 701 702 702 spin_lock_irqsave(&blitq->blit_lock, irqsave); ··· 740 740 741 741 if (dev_priv == NULL) { 742 742 DRM_ERROR("Called without initialization.\n"); 743 - return DRM_ERR(EINVAL); 743 + return -EINVAL; 744 744 } 745 745 746 746 engine = (xfer->to_fb) ? 0 : 1; ··· 750 750 } 751 751 if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { 752 752 via_dmablit_release_slot(blitq); 753 - return DRM_ERR(ENOMEM); 753 + return -ENOMEM; 754 754 } 755 755 if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) { 756 756 via_dmablit_release_slot(blitq); ··· 790 790 DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync)); 791 791 792 792 if (sync.engine >= VIA_NUM_BLIT_ENGINES) 793 - return DRM_ERR(EINVAL); 793 + return -EINVAL; 794 794 795 795 err = via_dmablit_sync(dev, sync.sync_handle, sync.engine); 796 796 797 - if (DRM_ERR(EINTR) == err) 798 - err = DRM_ERR(EAGAIN); 797 + if (-EINTR == err) 798 + err = -EAGAIN; 799 799 800 800 return err; 801 801 }
+7 -7
drivers/char/drm/via_irq.c
··· 205 205 206 206 if (!dev_priv) { 207 207 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 208 - return DRM_ERR(EINVAL); 208 + return -EINVAL; 209 209 } 210 210 211 211 if (irq >= drm_via_irq_num) { 212 212 DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, 213 213 irq); 214 - return DRM_ERR(EINVAL); 214 + return -EINVAL; 215 215 } 216 216 217 217 real_irq = dev_priv->irq_map[irq]; ··· 219 219 if (real_irq < 0) { 220 220 DRM_ERROR("%s Video IRQ %d not available on this hardware.\n", 221 221 __FUNCTION__, irq); 222 - return DRM_ERR(EINVAL); 222 + return -EINVAL; 223 223 } 224 224 225 225 masks = dev_priv->irq_masks; ··· 343 343 int force_sequence; 344 344 345 345 if (!dev->irq) 346 - return DRM_ERR(EINVAL); 346 + return -EINVAL; 347 347 348 348 DRM_COPY_FROM_USER_IOCTL(irqwait, argp, sizeof(irqwait)); 349 349 if (irqwait.request.irq >= dev_priv->num_irqs) { 350 350 DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, 351 351 irqwait.request.irq); 352 - return DRM_ERR(EINVAL); 352 + return -EINVAL; 353 353 } 354 354 355 355 cur_irq += irqwait.request.irq; ··· 361 361 case VIA_IRQ_ABSOLUTE: 362 362 break; 363 363 default: 364 - return DRM_ERR(EINVAL); 364 + return -EINVAL; 365 365 } 366 366 367 367 if (irqwait.request.type & VIA_IRQ_SIGNAL) { 368 368 DRM_ERROR("%s Signals on Via IRQs not implemented yet.\n", 369 369 __FUNCTION__); 370 - return DRM_ERR(EINVAL); 370 + return -EINVAL; 371 371 } 372 372 373 373 force_sequence = (irqwait.request.type & VIA_IRQ_FORCE_SEQUENCE);
+1 -1
drivers/char/drm/via_map.c
··· 102 102 103 103 dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER); 104 104 if (dev_priv == NULL) 105 - return DRM_ERR(ENOMEM); 105 + return -ENOMEM; 106 106 107 107 dev->dev_private = (void *)dev_priv; 108 108
+3 -3
drivers/char/drm/via_mm.c
··· 136 136 137 137 if (mem.type > VIA_MEM_AGP) { 138 138 DRM_ERROR("Unknown memory type allocation\n"); 139 - return DRM_ERR(EINVAL); 139 + return -EINVAL; 140 140 } 141 141 mutex_lock(&dev->struct_mutex); 142 142 if (0 == ((mem.type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized : ··· 144 144 DRM_ERROR 145 145 ("Attempt to allocate from uninitialized memory manager.\n"); 146 146 mutex_unlock(&dev->struct_mutex); 147 - return DRM_ERR(EINVAL); 147 + return -EINVAL; 148 148 } 149 149 150 150 tmpSize = (mem.size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; ··· 162 162 mem.size = 0; 163 163 mem.index = 0; 164 164 DRM_DEBUG("Video memory allocation failed\n"); 165 - retval = DRM_ERR(ENOMEM); 165 + retval = -ENOMEM; 166 166 } 167 167 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, sizeof(mem)); 168 168
+4 -4
drivers/char/drm/via_verifier.c
··· 1026 1026 case state_error: 1027 1027 default: 1028 1028 *hc_state = saved_state; 1029 - return DRM_ERR(EINVAL); 1029 + return -EINVAL; 1030 1030 } 1031 1031 } 1032 1032 if (state == state_error) { 1033 1033 *hc_state = saved_state; 1034 - return DRM_ERR(EINVAL); 1034 + return -EINVAL; 1035 1035 } 1036 1036 return 0; 1037 1037 } ··· 1082 1082 break; 1083 1083 case state_error: 1084 1084 default: 1085 - return DRM_ERR(EINVAL); 1085 + return -EINVAL; 1086 1086 } 1087 1087 } 1088 1088 if (state == state_error) { 1089 - return DRM_ERR(EINVAL); 1089 + return -EINVAL; 1090 1090 } 1091 1091 return 0; 1092 1092 }