Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[POWERPC] Change rheap functions to use ulongs instead of pointers

The rheap allocation functions return a pointer, but the actual value is based
on how the heap was initialized, and so it can be anything, e.g. an offset
into a buffer. A ulong is a better representation of the value returned by
the allocation functions.

This patch changes all of the relevant rheap functions to use a unsigned long
integers instead of a pointer. In case of an error, the value returned is
a negative error code that has been cast to an unsigned long. The caller can
use the IS_ERR_VALUE() macro to check for this.

All code which calls the rheap functions is updated accordingly. Macros
IS_MURAM_ERR() and IS_DPERR(), have been deleted in favor of IS_ERR_VALUE().

Also added error checking to rh_attach_region().

Signed-off-by: Timur Tabi <timur@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>

authored by

Timur Tabi and committed by
Kumar Gala
4c35630c 742226c5

+221 -217
+67 -50
arch/powerpc/lib/rheap.c
··· 133 133 info->empty_slots--; 134 134 135 135 /* Initialize */ 136 - blk->start = NULL; 136 + blk->start = 0; 137 137 blk->size = 0; 138 138 blk->owner = NULL; 139 139 ··· 158 158 159 159 /* We assume that they are aligned properly */ 160 160 size = blkn->size; 161 - s = (unsigned long)blkn->start; 161 + s = blkn->start; 162 162 e = s + size; 163 163 164 164 /* Find the blocks immediately before and after the given one ··· 170 170 list_for_each(l, &info->free_list) { 171 171 blk = list_entry(l, rh_block_t, list); 172 172 173 - bs = (unsigned long)blk->start; 173 + bs = blk->start; 174 174 be = bs + blk->size; 175 175 176 176 if (next == NULL && s >= bs) ··· 188 188 } 189 189 190 190 /* Now check if they are really adjacent */ 191 - if (before != NULL && s != (unsigned long)before->start + before->size) 191 + if (before && s != (before->start + before->size)) 192 192 before = NULL; 193 193 194 - if (after != NULL && e != (unsigned long)after->start) 194 + if (after && e != after->start) 195 195 after = NULL; 196 196 197 197 /* No coalescing; list insert and return */ ··· 216 216 217 217 /* Grow the after block backwards */ 218 218 if (before == NULL && after != NULL) { 219 - after->start = (int8_t *)after->start - size; 219 + after->start -= size; 220 220 after->size += size; 221 221 return; 222 222 } ··· 321 321 } 322 322 323 323 /* Attach a free memory region, coalesces regions if adjuscent */ 324 - int rh_attach_region(rh_info_t * info, void *start, int size) 324 + int rh_attach_region(rh_info_t * info, unsigned long start, int size) 325 325 { 326 326 rh_block_t *blk; 327 327 unsigned long s, e, m; 328 328 int r; 329 329 330 330 /* The region must be aligned */ 331 - s = (unsigned long)start; 331 + s = start; 332 332 e = s + size; 333 333 m = info->alignment - 1; 334 334 ··· 338 338 /* Round end down */ 339 339 e = e & ~m; 340 340 341 + if (IS_ERR_VALUE(e) || (e < s)) 342 + return -ERANGE; 343 + 341 344 /* Take final values */ 342 - start = (void *)s; 343 - size = (int)(e - s); 345 + start = s; 346 + size = e - s; 344 347 345 348 /* Grow the blocks, if needed */ 346 349 r = assure_empty(info, 1); ··· 361 358 } 362 359 363 360 /* Detatch given address range, splits free block if needed. */ 364 - void *rh_detach_region(rh_info_t * info, void *start, int size) 361 + unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size) 365 362 { 366 363 struct list_head *l; 367 364 rh_block_t *blk, *newblk; ··· 369 366 370 367 /* Validate size */ 371 368 if (size <= 0) 372 - return ERR_PTR(-EINVAL); 369 + return (unsigned long) -EINVAL; 373 370 374 371 /* The region must be aligned */ 375 - s = (unsigned long)start; 372 + s = start; 376 373 e = s + size; 377 374 m = info->alignment - 1; 378 375 ··· 383 380 e = e & ~m; 384 381 385 382 if (assure_empty(info, 1) < 0) 386 - return ERR_PTR(-ENOMEM); 383 + return (unsigned long) -ENOMEM; 387 384 388 385 blk = NULL; 389 386 list_for_each(l, &info->free_list) { 390 387 blk = list_entry(l, rh_block_t, list); 391 388 /* The range must lie entirely inside one free block */ 392 - bs = (unsigned long)blk->start; 393 - be = (unsigned long)blk->start + blk->size; 389 + bs = blk->start; 390 + be = blk->start + blk->size; 394 391 if (s >= bs && e <= be) 395 392 break; 396 393 blk = NULL; 397 394 } 398 395 399 396 if (blk == NULL) 400 - return ERR_PTR(-ENOMEM); 397 + return (unsigned long) -ENOMEM; 401 398 402 399 /* Perfect fit */ 403 400 if (bs == s && be == e) { 404 401 /* Delete from free list, release slot */ 405 402 list_del(&blk->list); 406 403 release_slot(info, blk); 407 - return (void *)s; 404 + return s; 408 405 } 409 406 410 407 /* blk still in free list, with updated start and/or size */ 411 408 if (bs == s || be == e) { 412 409 if (bs == s) 413 - blk->start = (int8_t *)blk->start + size; 410 + blk->start += size; 414 411 blk->size -= size; 415 412 416 413 } else { ··· 419 416 420 417 /* the back free fragment */ 421 418 newblk = get_slot(info); 422 - newblk->start = (void *)e; 419 + newblk->start = e; 423 420 newblk->size = be - e; 424 421 425 422 list_add(&newblk->list, &blk->list); 426 423 } 427 424 428 - return (void *)s; 425 + return s; 429 426 } 430 427 431 - void *rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owner) 428 + /* Allocate a block of memory at the specified alignment. The value returned 429 + * is an offset into the buffer initialized by rh_init(), or a negative number 430 + * if there is an error. 431 + */ 432 + unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owner) 432 433 { 433 434 struct list_head *l; 434 435 rh_block_t *blk; 435 436 rh_block_t *newblk; 436 - void *start; 437 + unsigned long start; 437 438 438 - /* Validate size, (must be power of two) */ 439 + /* Validate size, and alignment must be power of two */ 439 440 if (size <= 0 || (alignment & (alignment - 1)) != 0) 440 - return ERR_PTR(-EINVAL); 441 + return (unsigned long) -EINVAL; 441 442 442 443 /* given alignment larger that default rheap alignment */ 443 444 if (alignment > info->alignment) ··· 451 444 size = (size + (info->alignment - 1)) & ~(info->alignment - 1); 452 445 453 446 if (assure_empty(info, 1) < 0) 454 - return ERR_PTR(-ENOMEM); 447 + return (unsigned long) -ENOMEM; 455 448 456 449 blk = NULL; 457 450 list_for_each(l, &info->free_list) { ··· 462 455 } 463 456 464 457 if (blk == NULL) 465 - return ERR_PTR(-ENOMEM); 458 + return (unsigned long) -ENOMEM; 466 459 467 460 /* Just fits */ 468 461 if (blk->size == size) { ··· 482 475 newblk->owner = owner; 483 476 484 477 /* blk still in free list, with updated start, size */ 485 - blk->start = (int8_t *)blk->start + size; 478 + blk->start += size; 486 479 blk->size -= size; 487 480 488 481 start = newblk->start; ··· 493 486 /* this is no problem with the deallocator since */ 494 487 /* we scan for pointers that lie in the blocks */ 495 488 if (alignment > info->alignment) 496 - start = (void *)(((unsigned long)start + alignment - 1) & 497 - ~(alignment - 1)); 489 + start = (start + alignment - 1) & ~(alignment - 1); 498 490 499 491 return start; 500 492 } 501 493 502 - void *rh_alloc(rh_info_t * info, int size, const char *owner) 494 + /* Allocate a block of memory at the default alignment. The value returned is 495 + * an offset into the buffer initialized by rh_init(), or a negative number if 496 + * there is an error. 497 + */ 498 + unsigned long rh_alloc(rh_info_t * info, int size, const char *owner) 503 499 { 504 500 return rh_alloc_align(info, size, info->alignment, owner); 505 501 } 506 502 507 - /* allocate at precisely the given address */ 508 - void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner) 503 + /* Allocate a block of memory at the given offset, rounded up to the default 504 + * alignment. The value returned is an offset into the buffer initialized by 505 + * rh_init(), or a negative number if there is an error. 506 + */ 507 + unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, const char *owner) 509 508 { 510 509 struct list_head *l; 511 510 rh_block_t *blk, *newblk1, *newblk2; ··· 519 506 520 507 /* Validate size */ 521 508 if (size <= 0) 522 - return ERR_PTR(-EINVAL); 509 + return (unsigned long) -EINVAL; 523 510 524 511 /* The region must be aligned */ 525 - s = (unsigned long)start; 512 + s = start; 526 513 e = s + size; 527 514 m = info->alignment - 1; 528 515 ··· 533 520 e = e & ~m; 534 521 535 522 if (assure_empty(info, 2) < 0) 536 - return ERR_PTR(-ENOMEM); 523 + return (unsigned long) -ENOMEM; 537 524 538 525 blk = NULL; 539 526 list_for_each(l, &info->free_list) { 540 527 blk = list_entry(l, rh_block_t, list); 541 528 /* The range must lie entirely inside one free block */ 542 - bs = (unsigned long)blk->start; 543 - be = (unsigned long)blk->start + blk->size; 529 + bs = blk->start; 530 + be = blk->start + blk->size; 544 531 if (s >= bs && e <= be) 545 532 break; 546 533 } 547 534 548 535 if (blk == NULL) 549 - return ERR_PTR(-ENOMEM); 536 + return (unsigned long) -ENOMEM; 550 537 551 538 /* Perfect fit */ 552 539 if (bs == s && be == e) { ··· 564 551 /* blk still in free list, with updated start and/or size */ 565 552 if (bs == s || be == e) { 566 553 if (bs == s) 567 - blk->start = (int8_t *)blk->start + size; 554 + blk->start += size; 568 555 blk->size -= size; 569 556 570 557 } else { ··· 573 560 574 561 /* The back free fragment */ 575 562 newblk2 = get_slot(info); 576 - newblk2->start = (void *)e; 563 + newblk2->start = e; 577 564 newblk2->size = be - e; 578 565 579 566 list_add(&newblk2->list, &blk->list); 580 567 } 581 568 582 569 newblk1 = get_slot(info); 583 - newblk1->start = (void *)s; 570 + newblk1->start = s; 584 571 newblk1->size = e - s; 585 572 newblk1->owner = owner; 586 573 ··· 590 577 return start; 591 578 } 592 579 593 - int rh_free(rh_info_t * info, void *start) 580 + /* Deallocate the memory previously allocated by one of the rh_alloc functions. 581 + * The return value is the size of the deallocated block, or a negative number 582 + * if there is an error. 583 + */ 584 + int rh_free(rh_info_t * info, unsigned long start) 594 585 { 595 586 rh_block_t *blk, *blk2; 596 587 struct list_head *l; ··· 659 642 return nr; 660 643 } 661 644 662 - int rh_set_owner(rh_info_t * info, void *start, const char *owner) 645 + int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner) 663 646 { 664 647 rh_block_t *blk, *blk2; 665 648 struct list_head *l; ··· 701 684 nr = maxnr; 702 685 for (i = 0; i < nr; i++) 703 686 printk(KERN_INFO 704 - " 0x%p-0x%p (%u)\n", 705 - st[i].start, (int8_t *) st[i].start + st[i].size, 687 + " 0x%lx-0x%lx (%u)\n", 688 + st[i].start, st[i].start + st[i].size, 706 689 st[i].size); 707 690 printk(KERN_INFO "\n"); 708 691 ··· 712 695 nr = maxnr; 713 696 for (i = 0; i < nr; i++) 714 697 printk(KERN_INFO 715 - " 0x%p-0x%p (%u) %s\n", 716 - st[i].start, (int8_t *) st[i].start + st[i].size, 698 + " 0x%lx-0x%lx (%u) %s\n", 699 + st[i].start, st[i].start + st[i].size, 717 700 st[i].size, st[i].owner != NULL ? st[i].owner : ""); 718 701 printk(KERN_INFO "\n"); 719 702 } ··· 721 704 void rh_dump_blk(rh_info_t * info, rh_block_t * blk) 722 705 { 723 706 printk(KERN_INFO 724 - "blk @0x%p: 0x%p-0x%p (%u)\n", 725 - blk, blk->start, (int8_t *) blk->start + blk->size, blk->size); 707 + "blk @0x%p: 0x%lx-0x%lx (%u)\n", 708 + blk, blk->start, blk->start + blk->size, blk->size); 726 709 }
+10 -10
arch/powerpc/sysdev/commproc.c
··· 330 330 * with the processor and the microcode patches applied / activated. 331 331 * But the following should be at least safe. 332 332 */ 333 - rh_attach_region(&cpm_dpmem_info, (void *)CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE); 333 + rh_attach_region(&cpm_dpmem_info, CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE); 334 334 } 335 335 336 336 /* ··· 338 338 * This function returns an offset into the DPRAM area. 339 339 * Use cpm_dpram_addr() to get the virtual address of the area. 340 340 */ 341 - uint cpm_dpalloc(uint size, uint align) 341 + unsigned long cpm_dpalloc(uint size, uint align) 342 342 { 343 - void *start; 343 + unsigned long start; 344 344 unsigned long flags; 345 345 346 346 spin_lock_irqsave(&cpm_dpmem_lock, flags); ··· 352 352 } 353 353 EXPORT_SYMBOL(cpm_dpalloc); 354 354 355 - int cpm_dpfree(uint offset) 355 + int cpm_dpfree(unsigned long offset) 356 356 { 357 357 int ret; 358 358 unsigned long flags; 359 359 360 360 spin_lock_irqsave(&cpm_dpmem_lock, flags); 361 - ret = rh_free(&cpm_dpmem_info, (void *)offset); 361 + ret = rh_free(&cpm_dpmem_info, offset); 362 362 spin_unlock_irqrestore(&cpm_dpmem_lock, flags); 363 363 364 364 return ret; 365 365 } 366 366 EXPORT_SYMBOL(cpm_dpfree); 367 367 368 - uint cpm_dpalloc_fixed(uint offset, uint size, uint align) 368 + unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align) 369 369 { 370 - void *start; 370 + unsigned long start; 371 371 unsigned long flags; 372 372 373 373 spin_lock_irqsave(&cpm_dpmem_lock, flags); 374 374 cpm_dpmem_info.alignment = align; 375 - start = rh_alloc_fixed(&cpm_dpmem_info, (void *)offset, size, "commproc"); 375 + start = rh_alloc_fixed(&cpm_dpmem_info, offset, size, "commproc"); 376 376 spin_unlock_irqrestore(&cpm_dpmem_lock, flags); 377 377 378 - return (uint)start; 378 + return start; 379 379 } 380 380 EXPORT_SYMBOL(cpm_dpalloc_fixed); 381 381 ··· 385 385 } 386 386 EXPORT_SYMBOL(cpm_dpdump); 387 387 388 - void *cpm_dpram_addr(uint offset) 388 + void *cpm_dpram_addr(unsigned long offset) 389 389 { 390 390 return (void *)(dpram_vbase + offset); 391 391 }
+10 -11
arch/powerpc/sysdev/cpm2_common.c
··· 248 248 * varies with the processor and the microcode patches activated. 249 249 * But the following should be at least safe. 250 250 */ 251 - rh_attach_region(&cpm_dpmem_info, (void *)CPM_DATAONLY_BASE, 252 - CPM_DATAONLY_SIZE); 251 + rh_attach_region(&cpm_dpmem_info, CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE); 253 252 } 254 253 255 254 /* This function returns an index into the DPRAM area. 256 255 */ 257 - uint cpm_dpalloc(uint size, uint align) 256 + unsigned long cpm_dpalloc(uint size, uint align) 258 257 { 259 - void *start; 258 + unsigned long start; 260 259 unsigned long flags; 261 260 262 261 spin_lock_irqsave(&cpm_dpmem_lock, flags); ··· 267 268 } 268 269 EXPORT_SYMBOL(cpm_dpalloc); 269 270 270 - int cpm_dpfree(uint offset) 271 + int cpm_dpfree(unsigned long offset) 271 272 { 272 273 int ret; 273 274 unsigned long flags; 274 275 275 276 spin_lock_irqsave(&cpm_dpmem_lock, flags); 276 - ret = rh_free(&cpm_dpmem_info, (void *)offset); 277 + ret = rh_free(&cpm_dpmem_info, offset); 277 278 spin_unlock_irqrestore(&cpm_dpmem_lock, flags); 278 279 279 280 return ret; ··· 281 282 EXPORT_SYMBOL(cpm_dpfree); 282 283 283 284 /* not sure if this is ever needed */ 284 - uint cpm_dpalloc_fixed(uint offset, uint size, uint align) 285 + unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align) 285 286 { 286 - void *start; 287 + unsigned long start; 287 288 unsigned long flags; 288 289 289 290 spin_lock_irqsave(&cpm_dpmem_lock, flags); 290 291 cpm_dpmem_info.alignment = align; 291 - start = rh_alloc_fixed(&cpm_dpmem_info, (void *)offset, size, "commproc"); 292 + start = rh_alloc_fixed(&cpm_dpmem_info, offset, size, "commproc"); 292 293 spin_unlock_irqrestore(&cpm_dpmem_lock, flags); 293 294 294 - return (uint)start; 295 + return start; 295 296 } 296 297 EXPORT_SYMBOL(cpm_dpalloc_fixed); 297 298 ··· 301 302 } 302 303 EXPORT_SYMBOL(cpm_dpdump); 303 304 304 - void *cpm_dpram_addr(uint offset) 305 + void *cpm_dpram_addr(unsigned long offset) 305 306 { 306 307 return (void *)(im_dprambase + offset); 307 308 }
+14 -15
arch/powerpc/sysdev/qe_lib/qe.c
··· 244 244 static int qe_sdma_init(void) 245 245 { 246 246 struct sdma *sdma = &qe_immr->sdma; 247 - u32 sdma_buf_offset; 247 + unsigned long sdma_buf_offset; 248 248 249 249 if (!sdma) 250 250 return -ENODEV; ··· 252 252 /* allocate 2 internal temporary buffers (512 bytes size each) for 253 253 * the SDMA */ 254 254 sdma_buf_offset = qe_muram_alloc(512 * 2, 4096); 255 - if (IS_MURAM_ERR(sdma_buf_offset)) 255 + if (IS_ERR_VALUE(sdma_buf_offset)) 256 256 return -ENOMEM; 257 257 258 - out_be32(&sdma->sdebcr, sdma_buf_offset & QE_SDEBCR_BA_MASK); 258 + out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK); 259 259 out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK | 260 260 (0x1 << QE_SDMR_CEN_SHIFT))); 261 261 ··· 291 291 if ((np = of_find_node_by_name(NULL, "data-only")) != NULL) { 292 292 address = *of_get_address(np, 0, &size, &flags); 293 293 of_node_put(np); 294 - rh_attach_region(&qe_muram_info, 295 - (void *)address, (int)size); 294 + rh_attach_region(&qe_muram_info, address, (int) size); 296 295 } 297 296 } 298 297 299 298 /* This function returns an index into the MURAM area. 300 299 */ 301 - u32 qe_muram_alloc(u32 size, u32 align) 300 + unsigned long qe_muram_alloc(int size, int align) 302 301 { 303 - void *start; 302 + unsigned long start; 304 303 unsigned long flags; 305 304 306 305 spin_lock_irqsave(&qe_muram_lock, flags); 307 306 start = rh_alloc_align(&qe_muram_info, size, align, "QE"); 308 307 spin_unlock_irqrestore(&qe_muram_lock, flags); 309 308 310 - return (u32) start; 309 + return start; 311 310 } 312 311 EXPORT_SYMBOL(qe_muram_alloc); 313 312 314 - int qe_muram_free(u32 offset) 313 + int qe_muram_free(unsigned long offset) 315 314 { 316 315 int ret; 317 316 unsigned long flags; 318 317 319 318 spin_lock_irqsave(&qe_muram_lock, flags); 320 - ret = rh_free(&qe_muram_info, (void *)offset); 319 + ret = rh_free(&qe_muram_info, offset); 321 320 spin_unlock_irqrestore(&qe_muram_lock, flags); 322 321 323 322 return ret; ··· 324 325 EXPORT_SYMBOL(qe_muram_free); 325 326 326 327 /* not sure if this is ever needed */ 327 - u32 qe_muram_alloc_fixed(u32 offset, u32 size) 328 + unsigned long qe_muram_alloc_fixed(unsigned long offset, int size) 328 329 { 329 - void *start; 330 + unsigned long start; 330 331 unsigned long flags; 331 332 332 333 spin_lock_irqsave(&qe_muram_lock, flags); 333 - start = rh_alloc_fixed(&qe_muram_info, (void *)offset, size, "commproc"); 334 + start = rh_alloc_fixed(&qe_muram_info, offset, size, "commproc"); 334 335 spin_unlock_irqrestore(&qe_muram_lock, flags); 335 336 336 - return (u32) start; 337 + return start; 337 338 } 338 339 EXPORT_SYMBOL(qe_muram_alloc_fixed); 339 340 ··· 343 344 } 344 345 EXPORT_SYMBOL(qe_muram_dump); 345 346 346 - void *qe_muram_addr(u32 offset) 347 + void *qe_muram_addr(unsigned long offset) 347 348 { 348 349 return (void *)&qe_immr->muram[offset]; 349 350 }
+3 -2
arch/powerpc/sysdev/qe_lib/ucc_fast.c
··· 18 18 #include <linux/slab.h> 19 19 #include <linux/stddef.h> 20 20 #include <linux/interrupt.h> 21 + #include <linux/err.h> 21 22 22 23 #include <asm/io.h> 23 24 #include <asm/immap_qe.h> ··· 269 268 /* Allocate memory for Tx Virtual Fifo */ 270 269 uccf->ucc_fast_tx_virtual_fifo_base_offset = 271 270 qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); 272 - if (IS_MURAM_ERR(uccf->ucc_fast_tx_virtual_fifo_base_offset)) { 271 + if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) { 273 272 printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO", __FUNCTION__); 274 273 uccf->ucc_fast_tx_virtual_fifo_base_offset = 0; 275 274 ucc_fast_free(uccf); ··· 281 280 qe_muram_alloc(uf_info->urfs + 282 281 UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR, 283 282 UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); 284 - if (IS_MURAM_ERR(uccf->ucc_fast_rx_virtual_fifo_base_offset)) { 283 + if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) { 285 284 printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO", __FUNCTION__); 286 285 uccf->ucc_fast_rx_virtual_fifo_base_offset = 0; 287 286 ucc_fast_free(uccf);
+4 -3
arch/powerpc/sysdev/qe_lib/ucc_slow.c
··· 18 18 #include <linux/slab.h> 19 19 #include <linux/stddef.h> 20 20 #include <linux/interrupt.h> 21 + #include <linux/err.h> 21 22 22 23 #include <asm/io.h> 23 24 #include <asm/immap_qe.h> ··· 176 175 /* Get PRAM base */ 177 176 uccs->us_pram_offset = 178 177 qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM); 179 - if (IS_MURAM_ERR(uccs->us_pram_offset)) { 178 + if (IS_ERR_VALUE(uccs->us_pram_offset)) { 180 179 printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __FUNCTION__); 181 180 ucc_slow_free(uccs); 182 181 return -ENOMEM; ··· 211 210 uccs->rx_base_offset = 212 211 qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd), 213 212 QE_ALIGNMENT_OF_BD); 214 - if (IS_MURAM_ERR(uccs->rx_base_offset)) { 213 + if (IS_ERR_VALUE(uccs->rx_base_offset)) { 215 214 printk(KERN_ERR "%s: cannot allocate RX BDs", __FUNCTION__); 216 215 uccs->rx_base_offset = 0; 217 216 ucc_slow_free(uccs); ··· 221 220 uccs->tx_base_offset = 222 221 qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd), 223 222 QE_ALIGNMENT_OF_BD); 224 - if (IS_MURAM_ERR(uccs->tx_base_offset)) { 223 + if (IS_ERR_VALUE(uccs->tx_base_offset)) { 225 224 printk(KERN_ERR "%s: cannot allocate TX BDs", __FUNCTION__); 226 225 uccs->tx_base_offset = 0; 227 226 ucc_slow_free(uccs);
+11 -11
arch/ppc/8xx_io/commproc.c
··· 402 402 * with the processor and the microcode patches applied / activated. 403 403 * But the following should be at least safe. 404 404 */ 405 - rh_attach_region(&cpm_dpmem_info, (void *)CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE); 405 + rh_attach_region(&cpm_dpmem_info, CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE); 406 406 } 407 407 408 408 /* ··· 410 410 * This function returns an offset into the DPRAM area. 411 411 * Use cpm_dpram_addr() to get the virtual address of the area. 412 412 */ 413 - uint cpm_dpalloc(uint size, uint align) 413 + unsigned long cpm_dpalloc(uint size, uint align) 414 414 { 415 - void *start; 415 + unsigned long start; 416 416 unsigned long flags; 417 417 418 418 spin_lock_irqsave(&cpm_dpmem_lock, flags); ··· 420 420 start = rh_alloc(&cpm_dpmem_info, size, "commproc"); 421 421 spin_unlock_irqrestore(&cpm_dpmem_lock, flags); 422 422 423 - return (uint)start; 423 + return start; 424 424 } 425 425 EXPORT_SYMBOL(cpm_dpalloc); 426 426 427 - int cpm_dpfree(uint offset) 427 + int cpm_dpfree(unsigned long offset) 428 428 { 429 429 int ret; 430 430 unsigned long flags; 431 431 432 432 spin_lock_irqsave(&cpm_dpmem_lock, flags); 433 - ret = rh_free(&cpm_dpmem_info, (void *)offset); 433 + ret = rh_free(&cpm_dpmem_info, offset); 434 434 spin_unlock_irqrestore(&cpm_dpmem_lock, flags); 435 435 436 436 return ret; 437 437 } 438 438 EXPORT_SYMBOL(cpm_dpfree); 439 439 440 - uint cpm_dpalloc_fixed(uint offset, uint size, uint align) 440 + unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align) 441 441 { 442 - void *start; 442 + unsigned long start; 443 443 unsigned long flags; 444 444 445 445 spin_lock_irqsave(&cpm_dpmem_lock, flags); 446 446 cpm_dpmem_info.alignment = align; 447 - start = rh_alloc_fixed(&cpm_dpmem_info, (void *)offset, size, "commproc"); 447 + start = rh_alloc_fixed(&cpm_dpmem_info, offset, size, "commproc"); 448 448 spin_unlock_irqrestore(&cpm_dpmem_lock, flags); 449 449 450 - return (uint)start; 450 + return start; 451 451 } 452 452 EXPORT_SYMBOL(cpm_dpalloc_fixed); 453 453 ··· 457 457 } 458 458 EXPORT_SYMBOL(cpm_dpdump); 459 459 460 - void *cpm_dpram_addr(uint offset) 460 + void *cpm_dpram_addr(unsigned long offset) 461 461 { 462 462 return ((immap_t *)IMAP_ADDR)->im_cpm.cp_dpmem + offset; 463 463 }
+49 -46
arch/ppc/lib/rheap.c
··· 132 132 info->empty_slots--; 133 133 134 134 /* Initialize */ 135 - blk->start = NULL; 135 + blk->start = 0; 136 136 blk->size = 0; 137 137 blk->owner = NULL; 138 138 ··· 157 157 158 158 /* We assume that they are aligned properly */ 159 159 size = blkn->size; 160 - s = (unsigned long)blkn->start; 160 + s = blkn->start; 161 161 e = s + size; 162 162 163 163 /* Find the blocks immediately before and after the given one ··· 169 169 list_for_each(l, &info->free_list) { 170 170 blk = list_entry(l, rh_block_t, list); 171 171 172 - bs = (unsigned long)blk->start; 172 + bs = blk->start; 173 173 be = bs + blk->size; 174 174 175 175 if (next == NULL && s >= bs) ··· 187 187 } 188 188 189 189 /* Now check if they are really adjacent */ 190 - if (before != NULL && s != (unsigned long)before->start + before->size) 190 + if (before && s != (before->start + before->size)) 191 191 before = NULL; 192 192 193 - if (after != NULL && e != (unsigned long)after->start) 193 + if (after && e != after->start) 194 194 after = NULL; 195 195 196 196 /* No coalescing; list insert and return */ ··· 215 215 216 216 /* Grow the after block backwards */ 217 217 if (before == NULL && after != NULL) { 218 - after->start = (int8_t *)after->start - size; 218 + after->start -= size; 219 219 after->size += size; 220 220 return; 221 221 } ··· 320 320 } 321 321 322 322 /* Attach a free memory region, coalesces regions if adjuscent */ 323 - int rh_attach_region(rh_info_t * info, void *start, int size) 323 + int rh_attach_region(rh_info_t * info, unsigned long start, int size) 324 324 { 325 325 rh_block_t *blk; 326 326 unsigned long s, e, m; 327 327 int r; 328 328 329 329 /* The region must be aligned */ 330 - s = (unsigned long)start; 330 + s = start; 331 331 e = s + size; 332 332 m = info->alignment - 1; 333 333 ··· 337 337 /* Round end down */ 338 338 e = e & ~m; 339 339 340 + if (IS_ERR_VALUE(e) || (e < s)) 341 + return -ERANGE; 342 + 340 343 /* Take final values */ 341 - start = (void *)s; 342 - size = (int)(e - s); 344 + start = s; 345 + size = e - s; 343 346 344 347 /* Grow the blocks, if needed */ 345 348 r = assure_empty(info, 1); ··· 360 357 } 361 358 362 359 /* Detatch given address range, splits free block if needed. */ 363 - void *rh_detach_region(rh_info_t * info, void *start, int size) 360 + unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size) 364 361 { 365 362 struct list_head *l; 366 363 rh_block_t *blk, *newblk; ··· 368 365 369 366 /* Validate size */ 370 367 if (size <= 0) 371 - return ERR_PTR(-EINVAL); 368 + return (unsigned long) -EINVAL; 372 369 373 370 /* The region must be aligned */ 374 - s = (unsigned long)start; 371 + s = start; 375 372 e = s + size; 376 373 m = info->alignment - 1; 377 374 ··· 382 379 e = e & ~m; 383 380 384 381 if (assure_empty(info, 1) < 0) 385 - return ERR_PTR(-ENOMEM); 382 + return (unsigned long) -ENOMEM; 386 383 387 384 blk = NULL; 388 385 list_for_each(l, &info->free_list) { 389 386 blk = list_entry(l, rh_block_t, list); 390 387 /* The range must lie entirely inside one free block */ 391 - bs = (unsigned long)blk->start; 392 - be = (unsigned long)blk->start + blk->size; 388 + bs = blk->start; 389 + be = blk->start + blk->size; 393 390 if (s >= bs && e <= be) 394 391 break; 395 392 blk = NULL; 396 393 } 397 394 398 395 if (blk == NULL) 399 - return ERR_PTR(-ENOMEM); 396 + return (unsigned long) -ENOMEM; 400 397 401 398 /* Perfect fit */ 402 399 if (bs == s && be == e) { 403 400 /* Delete from free list, release slot */ 404 401 list_del(&blk->list); 405 402 release_slot(info, blk); 406 - return (void *)s; 403 + return s; 407 404 } 408 405 409 406 /* blk still in free list, with updated start and/or size */ 410 407 if (bs == s || be == e) { 411 408 if (bs == s) 412 - blk->start = (int8_t *)blk->start + size; 409 + blk->start += size; 413 410 blk->size -= size; 414 411 415 412 } else { ··· 418 415 419 416 /* the back free fragment */ 420 417 newblk = get_slot(info); 421 - newblk->start = (void *)e; 418 + newblk->start = e; 422 419 newblk->size = be - e; 423 420 424 421 list_add(&newblk->list, &blk->list); 425 422 } 426 423 427 - return (void *)s; 424 + return s; 428 425 } 429 426 430 - void *rh_alloc(rh_info_t * info, int size, const char *owner) 427 + unsigned long rh_alloc(rh_info_t * info, int size, const char *owner) 431 428 { 432 429 struct list_head *l; 433 430 rh_block_t *blk; 434 431 rh_block_t *newblk; 435 - void *start; 432 + unsigned long start; 436 433 437 434 /* Validate size */ 438 435 if (size <= 0) 439 - return ERR_PTR(-EINVAL); 436 + return (unsigned long) -EINVAL; 440 437 441 438 /* Align to configured alignment */ 442 439 size = (size + (info->alignment - 1)) & ~(info->alignment - 1); 443 440 444 441 if (assure_empty(info, 1) < 0) 445 - return ERR_PTR(-ENOMEM); 442 + return (unsigned long) -ENOMEM; 446 443 447 444 blk = NULL; 448 445 list_for_each(l, &info->free_list) { ··· 453 450 } 454 451 455 452 if (blk == NULL) 456 - return ERR_PTR(-ENOMEM); 453 + return (unsigned long) -ENOMEM; 457 454 458 455 /* Just fits */ 459 456 if (blk->size == size) { ··· 473 470 newblk->owner = owner; 474 471 475 472 /* blk still in free list, with updated start, size */ 476 - blk->start = (int8_t *)blk->start + size; 473 + blk->start += size; 477 474 blk->size -= size; 478 475 479 476 start = newblk->start; ··· 484 481 } 485 482 486 483 /* allocate at precisely the given address */ 487 - void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner) 484 + unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, const char *owner) 488 485 { 489 486 struct list_head *l; 490 487 rh_block_t *blk, *newblk1, *newblk2; 491 - unsigned long s, e, m, bs, be; 488 + unsigned long s, e, m, bs=0, be=0; 492 489 493 490 /* Validate size */ 494 491 if (size <= 0) 495 - return ERR_PTR(-EINVAL); 492 + return (unsigned long) -EINVAL; 496 493 497 494 /* The region must be aligned */ 498 - s = (unsigned long)start; 495 + s = start; 499 496 e = s + size; 500 497 m = info->alignment - 1; 501 498 ··· 506 503 e = e & ~m; 507 504 508 505 if (assure_empty(info, 2) < 0) 509 - return ERR_PTR(-ENOMEM); 506 + return (unsigned long) -ENOMEM; 510 507 511 508 blk = NULL; 512 509 list_for_each(l, &info->free_list) { 513 510 blk = list_entry(l, rh_block_t, list); 514 511 /* The range must lie entirely inside one free block */ 515 - bs = (unsigned long)blk->start; 516 - be = (unsigned long)blk->start + blk->size; 512 + bs = blk->start; 513 + be = blk->start + blk->size; 517 514 if (s >= bs && e <= be) 518 515 break; 519 516 } 520 517 521 518 if (blk == NULL) 522 - return ERR_PTR(-ENOMEM); 519 + return (unsigned long) -ENOMEM; 523 520 524 521 /* Perfect fit */ 525 522 if (bs == s && be == e) { ··· 537 534 /* blk still in free list, with updated start and/or size */ 538 535 if (bs == s || be == e) { 539 536 if (bs == s) 540 - blk->start = (int8_t *)blk->start + size; 537 + blk->start += size; 541 538 blk->size -= size; 542 539 543 540 } else { ··· 546 543 547 544 /* The back free fragment */ 548 545 newblk2 = get_slot(info); 549 - newblk2->start = (void *)e; 546 + newblk2->start = e; 550 547 newblk2->size = be - e; 551 548 552 549 list_add(&newblk2->list, &blk->list); 553 550 } 554 551 555 552 newblk1 = get_slot(info); 556 - newblk1->start = (void *)s; 553 + newblk1->start = s; 557 554 newblk1->size = e - s; 558 555 newblk1->owner = owner; 559 556 ··· 563 560 return start; 564 561 } 565 562 566 - int rh_free(rh_info_t * info, void *start) 563 + int rh_free(rh_info_t * info, unsigned long start) 567 564 { 568 565 rh_block_t *blk, *blk2; 569 566 struct list_head *l; ··· 628 625 return nr; 629 626 } 630 627 631 - int rh_set_owner(rh_info_t * info, void *start, const char *owner) 628 + int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner) 632 629 { 633 630 rh_block_t *blk, *blk2; 634 631 struct list_head *l; ··· 670 667 nr = maxnr; 671 668 for (i = 0; i < nr; i++) 672 669 printk(KERN_INFO 673 - " 0x%p-0x%p (%u)\n", 674 - st[i].start, (int8_t *) st[i].start + st[i].size, 670 + " 0x%lx-0x%lx (%u)\n", 671 + st[i].start, st[i].start + st[i].size, 675 672 st[i].size); 676 673 printk(KERN_INFO "\n"); 677 674 ··· 681 678 nr = maxnr; 682 679 for (i = 0; i < nr; i++) 683 680 printk(KERN_INFO 684 - " 0x%p-0x%p (%u) %s\n", 685 - st[i].start, (int8_t *) st[i].start + st[i].size, 681 + " 0x%lx-0x%lx (%u) %s\n", 682 + st[i].start, st[i].start + st[i].size, 686 683 st[i].size, st[i].owner != NULL ? st[i].owner : ""); 687 684 printk(KERN_INFO "\n"); 688 685 } ··· 690 687 void rh_dump_blk(rh_info_t * info, rh_block_t * blk) 691 688 { 692 689 printk(KERN_INFO 693 - "blk @0x%p: 0x%p-0x%p (%u)\n", 694 - blk, blk->start, (int8_t *) blk->start + blk->size, blk->size); 690 + "blk @0x%p: 0x%lx-0x%lx (%u)\n", 691 + blk, blk->start, blk->start + blk->size, blk->size); 695 692 }
+11 -12
arch/ppc/syslib/cpm2_common.c
··· 136 136 * varies with the processor and the microcode patches activated. 137 137 * But the following should be at least safe. 138 138 */ 139 - rh_attach_region(&cpm_dpmem_info, (void *)CPM_DATAONLY_BASE, 140 - CPM_DATAONLY_SIZE); 139 + rh_attach_region(&cpm_dpmem_info, CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE); 141 140 } 142 141 143 142 /* This function returns an index into the DPRAM area. 144 143 */ 145 - uint cpm_dpalloc(uint size, uint align) 144 + unsigned long cpm_dpalloc(uint size, uint align) 146 145 { 147 - void *start; 146 + unsigned long start; 148 147 unsigned long flags; 149 148 150 149 spin_lock_irqsave(&cpm_dpmem_lock, flags); ··· 151 152 start = rh_alloc(&cpm_dpmem_info, size, "commproc"); 152 153 spin_unlock_irqrestore(&cpm_dpmem_lock, flags); 153 154 154 - return (uint)start; 155 + return start; 155 156 } 156 157 EXPORT_SYMBOL(cpm_dpalloc); 157 158 158 - int cpm_dpfree(uint offset) 159 + int cpm_dpfree(unsigned long offset) 159 160 { 160 161 int ret; 161 162 unsigned long flags; 162 163 163 164 spin_lock_irqsave(&cpm_dpmem_lock, flags); 164 - ret = rh_free(&cpm_dpmem_info, (void *)offset); 165 + ret = rh_free(&cpm_dpmem_info, offset); 165 166 spin_unlock_irqrestore(&cpm_dpmem_lock, flags); 166 167 167 168 return ret; ··· 169 170 EXPORT_SYMBOL(cpm_dpfree); 170 171 171 172 /* not sure if this is ever needed */ 172 - uint cpm_dpalloc_fixed(uint offset, uint size, uint align) 173 + unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align) 173 174 { 174 - void *start; 175 + unsigned long start; 175 176 unsigned long flags; 176 177 177 178 spin_lock_irqsave(&cpm_dpmem_lock, flags); 178 179 cpm_dpmem_info.alignment = align; 179 - start = rh_alloc_fixed(&cpm_dpmem_info, (void *)offset, size, "commproc"); 180 + start = rh_alloc_fixed(&cpm_dpmem_info, offset, size, "commproc"); 180 181 spin_unlock_irqrestore(&cpm_dpmem_lock, flags); 181 182 182 - return (uint)start; 183 + return start; 183 184 } 184 185 EXPORT_SYMBOL(cpm_dpalloc_fixed); 185 186 ··· 189 190 } 190 191 EXPORT_SYMBOL(cpm_dpdump); 191 192 192 - void *cpm_dpram_addr(uint offset) 193 + void *cpm_dpram_addr(unsigned long offset) 193 194 { 194 195 return (void *)&cpm2_immr->im_dprambase[offset]; 195 196 }
+1 -1
drivers/net/fs_enet/mac-scc.c
··· 167 167 168 168 fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) * 169 169 sizeof(cbd_t), 8); 170 - if (IS_DPERR(fep->ring_mem_addr)) 170 + if (IS_ERR_VALUE(fep->ring_mem_addr)) 171 171 return -ENOMEM; 172 172 173 173 fep->ring_base = cpm_dpram_addr(fep->ring_mem_addr);
+15 -15
drivers/net/ucc_geth.c
··· 293 293 else { 294 294 init_enet_offset = 295 295 qe_muram_alloc(thread_size, thread_alignment); 296 - if (IS_MURAM_ERR(init_enet_offset)) { 296 + if (IS_ERR_VALUE(init_enet_offset)) { 297 297 ugeth_err 298 298 ("fill_init_enet_entries: Can not allocate DPRAM memory."); 299 299 qe_put_snum((u8) snum); ··· 2594 2594 ugeth->tx_bd_ring_offset[j] = 2595 2595 qe_muram_alloc(length, 2596 2596 UCC_GETH_TX_BD_RING_ALIGNMENT); 2597 - if (!IS_MURAM_ERR(ugeth->tx_bd_ring_offset[j])) 2597 + if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j])) 2598 2598 ugeth->p_tx_bd_ring[j] = 2599 2599 (u8 *) qe_muram_addr(ugeth-> 2600 2600 tx_bd_ring_offset[j]); ··· 2629 2629 ugeth->rx_bd_ring_offset[j] = 2630 2630 qe_muram_alloc(length, 2631 2631 UCC_GETH_RX_BD_RING_ALIGNMENT); 2632 - if (!IS_MURAM_ERR(ugeth->rx_bd_ring_offset[j])) 2632 + if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j])) 2633 2633 ugeth->p_rx_bd_ring[j] = 2634 2634 (u8 *) qe_muram_addr(ugeth-> 2635 2635 rx_bd_ring_offset[j]); ··· 2713 2713 ugeth->tx_glbl_pram_offset = 2714 2714 qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram), 2715 2715 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT); 2716 - if (IS_MURAM_ERR(ugeth->tx_glbl_pram_offset)) { 2716 + if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) { 2717 2717 ugeth_err 2718 2718 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", 2719 2719 __FUNCTION__); ··· 2735 2735 sizeof(struct ucc_geth_thread_data_tx) + 2736 2736 32 * (numThreadsTxNumerical == 1), 2737 2737 UCC_GETH_THREAD_DATA_ALIGNMENT); 2738 - if (IS_MURAM_ERR(ugeth->thread_dat_tx_offset)) { 2738 + if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) { 2739 2739 ugeth_err 2740 2740 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", 2741 2741 __FUNCTION__); ··· 2763 2763 qe_muram_alloc(ug_info->numQueuesTx * 2764 2764 sizeof(struct ucc_geth_send_queue_qd), 2765 2765 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); 2766 - if (IS_MURAM_ERR(ugeth->send_q_mem_reg_offset)) { 2766 + if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) { 2767 2767 ugeth_err 2768 2768 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", 2769 2769 __FUNCTION__); ··· 2806 2806 ugeth->scheduler_offset = 2807 2807 qe_muram_alloc(sizeof(struct ucc_geth_scheduler), 2808 2808 UCC_GETH_SCHEDULER_ALIGNMENT); 2809 - if (IS_MURAM_ERR(ugeth->scheduler_offset)) { 2809 + if (IS_ERR_VALUE(ugeth->scheduler_offset)) { 2810 2810 ugeth_err 2811 2811 ("%s: Can not allocate DPRAM memory for p_scheduler.", 2812 2812 __FUNCTION__); ··· 2854 2854 qe_muram_alloc(sizeof 2855 2855 (struct ucc_geth_tx_firmware_statistics_pram), 2856 2856 UCC_GETH_TX_STATISTICS_ALIGNMENT); 2857 - if (IS_MURAM_ERR(ugeth->tx_fw_statistics_pram_offset)) { 2857 + if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) { 2858 2858 ugeth_err 2859 2859 ("%s: Can not allocate DPRAM memory for" 2860 2860 " p_tx_fw_statistics_pram.", __FUNCTION__); ··· 2893 2893 ugeth->rx_glbl_pram_offset = 2894 2894 qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram), 2895 2895 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT); 2896 - if (IS_MURAM_ERR(ugeth->rx_glbl_pram_offset)) { 2896 + if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) { 2897 2897 ugeth_err 2898 2898 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", 2899 2899 __FUNCTION__); ··· 2914 2914 qe_muram_alloc(numThreadsRxNumerical * 2915 2915 sizeof(struct ucc_geth_thread_data_rx), 2916 2916 UCC_GETH_THREAD_DATA_ALIGNMENT); 2917 - if (IS_MURAM_ERR(ugeth->thread_dat_rx_offset)) { 2917 + if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) { 2918 2918 ugeth_err 2919 2919 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", 2920 2920 __FUNCTION__); ··· 2937 2937 qe_muram_alloc(sizeof 2938 2938 (struct ucc_geth_rx_firmware_statistics_pram), 2939 2939 UCC_GETH_RX_STATISTICS_ALIGNMENT); 2940 - if (IS_MURAM_ERR(ugeth->rx_fw_statistics_pram_offset)) { 2940 + if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) { 2941 2941 ugeth_err 2942 2942 ("%s: Can not allocate DPRAM memory for" 2943 2943 " p_rx_fw_statistics_pram.", __FUNCTION__); ··· 2959 2959 qe_muram_alloc(ug_info->numQueuesRx * 2960 2960 sizeof(struct ucc_geth_rx_interrupt_coalescing_entry) 2961 2961 + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT); 2962 - if (IS_MURAM_ERR(ugeth->rx_irq_coalescing_tbl_offset)) { 2962 + if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) { 2963 2963 ugeth_err 2964 2964 ("%s: Can not allocate DPRAM memory for" 2965 2965 " p_rx_irq_coalescing_tbl.", __FUNCTION__); ··· 3027 3027 (sizeof(struct ucc_geth_rx_bd_queues_entry) + 3028 3028 sizeof(struct ucc_geth_rx_prefetched_bds)), 3029 3029 UCC_GETH_RX_BD_QUEUES_ALIGNMENT); 3030 - if (IS_MURAM_ERR(ugeth->rx_bd_qs_tbl_offset)) { 3030 + if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) { 3031 3031 ugeth_err 3032 3032 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", 3033 3033 __FUNCTION__); ··· 3116 3116 ugeth->exf_glbl_param_offset = 3117 3117 qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram), 3118 3118 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT); 3119 - if (IS_MURAM_ERR(ugeth->exf_glbl_param_offset)) { 3119 + if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) { 3120 3120 ugeth_err 3121 3121 ("%s: Can not allocate DPRAM memory for" 3122 3122 " p_exf_glbl_param.", __FUNCTION__); ··· 3258 3258 3259 3259 /* Allocate InitEnet command parameter structure */ 3260 3260 init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4); 3261 - if (IS_MURAM_ERR(init_enet_pram_offset)) { 3261 + if (IS_ERR_VALUE(init_enet_pram_offset)) { 3262 3262 ugeth_err 3263 3263 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", 3264 3264 __FUNCTION__);
+2 -2
drivers/serial/cpm_uart/cpm_uart_cpm1.c
··· 125 125 { 126 126 int dpmemsz, memsz; 127 127 u8 *dp_mem; 128 - uint dp_offset; 128 + unsigned long dp_offset; 129 129 u8 *mem_addr; 130 130 dma_addr_t dma_addr = 0; 131 131 ··· 133 133 134 134 dpmemsz = sizeof(cbd_t) * (pinfo->rx_nrfifos + pinfo->tx_nrfifos); 135 135 dp_offset = cpm_dpalloc(dpmemsz, 8); 136 - if (IS_DPERR(dp_offset)) { 136 + if (IS_ERR_VALUE(dp_offset)) { 137 137 printk(KERN_ERR 138 138 "cpm_uart_cpm1.c: could not allocate buffer descriptors\n"); 139 139 return -ENOMEM;
+2 -2
drivers/serial/cpm_uart/cpm_uart_cpm2.c
··· 222 222 { 223 223 int dpmemsz, memsz; 224 224 u8 *dp_mem; 225 - uint dp_offset; 225 + unsigned long dp_offset; 226 226 u8 *mem_addr; 227 227 dma_addr_t dma_addr = 0; 228 228 ··· 230 230 231 231 dpmemsz = sizeof(cbd_t) * (pinfo->rx_nrfifos + pinfo->tx_nrfifos); 232 232 dp_offset = cpm_dpalloc(dpmemsz, 8); 233 - if (IS_DPERR(dp_offset)) { 233 + if (IS_ERR_VALUE(dp_offset)) { 234 234 printk(KERN_ERR 235 235 "cpm_uart_cpm.c: could not allocate buffer descriptors\n"); 236 236 return -ENOMEM;
+4 -9
include/asm-powerpc/qe.h
··· 38 38 void qe_setbrg(u32 brg, u32 rate); 39 39 int qe_get_snum(void); 40 40 void qe_put_snum(u8 snum); 41 - u32 qe_muram_alloc(u32 size, u32 align); 42 - int qe_muram_free(u32 offset); 43 - u32 qe_muram_alloc_fixed(u32 offset, u32 size); 41 + unsigned long qe_muram_alloc(int size, int align); 42 + int qe_muram_free(unsigned long offset); 43 + unsigned long qe_muram_alloc_fixed(unsigned long offset, int size); 44 44 void qe_muram_dump(void); 45 - void *qe_muram_addr(u32 offset); 45 + void *qe_muram_addr(unsigned long offset); 46 46 47 47 /* Buffer descriptors */ 48 48 struct qe_bd { ··· 447 447 #define UCC_FAST_FUNCTION_CODE_GBL 0x20 448 448 #define UCC_FAST_FUNCTION_CODE_DTB_LCL 0x02 449 449 #define UCC_FAST_FUNCTION_CODE_BDB_LCL 0x01 450 - 451 - static inline long IS_MURAM_ERR(const u32 offset) 452 - { 453 - return offset > (u32) - 1000L; 454 - } 455 450 456 451 #endif /* __KERNEL__ */ 457 452 #endif /* _ASM_POWERPC_QE_H */
+4 -9
include/asm-ppc/commproc.h
··· 63 63 #define CPM_DATAONLY_SIZE ((uint)0x0700) 64 64 #define CPM_DP_NOSPACE ((uint)0x7fffffff) 65 65 66 - static inline long IS_DPERR(const uint offset) 67 - { 68 - return (uint)offset > (uint)-1000L; 69 - } 70 - 71 66 /* Export the base address of the communication processor registers 72 67 * and dual port ram. 73 68 */ 74 69 extern cpm8xx_t *cpmp; /* Pointer to comm processor */ 75 - extern uint cpm_dpalloc(uint size, uint align); 76 - extern int cpm_dpfree(uint offset); 77 - extern uint cpm_dpalloc_fixed(uint offset, uint size, uint align); 70 + extern unsigned long cpm_dpalloc(uint size, uint align); 71 + extern int cpm_dpfree(unsigned long offset); 72 + extern unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align); 78 73 extern void cpm_dpdump(void); 79 - extern void *cpm_dpram_addr(uint offset); 74 + extern void *cpm_dpram_addr(unsigned long offset); 80 75 extern uint cpm_dpram_phys(u8* addr); 81 76 extern void cpm_setbrg(uint brg, uint rate); 82 77
+4 -9
include/asm-ppc/cpm2.h
··· 104 104 */ 105 105 #define NUM_CPM_HOST_PAGES 2 106 106 107 - static inline long IS_DPERR(const uint offset) 108 - { 109 - return (uint)offset > (uint)-1000L; 110 - } 111 - 112 107 /* Export the base address of the communication processor registers 113 108 * and dual port ram. 114 109 */ 115 110 extern cpm_cpm2_t *cpmp; /* Pointer to comm processor */ 116 111 117 - extern uint cpm_dpalloc(uint size, uint align); 118 - extern int cpm_dpfree(uint offset); 119 - extern uint cpm_dpalloc_fixed(uint offset, uint size, uint align); 112 + extern unsigned long cpm_dpalloc(uint size, uint align); 113 + extern int cpm_dpfree(unsigned long offset); 114 + extern unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align); 120 115 extern void cpm_dpdump(void); 121 - extern void *cpm_dpram_addr(uint offset); 116 + extern void *cpm_dpram_addr(unsigned long offset); 122 117 extern void cpm_setbrg(uint brg, uint rate); 123 118 extern void cpm2_fastbrg(uint brg, uint rate, int div16); 124 119 extern void cpm2_reset(void);
+10 -10
include/asm-ppc/rheap.h
··· 18 18 19 19 typedef struct _rh_block { 20 20 struct list_head list; 21 - void *start; 21 + unsigned long start; 22 22 int size; 23 23 const char *owner; 24 24 } rh_block_t; ··· 37 37 #define RHIF_STATIC_INFO 0x1 38 38 #define RHIF_STATIC_BLOCK 0x2 39 39 40 - typedef struct rh_stats_t { 41 - void *start; 40 + typedef struct _rh_stats { 41 + unsigned long start; 42 42 int size; 43 43 const char *owner; 44 44 } rh_stats_t; ··· 57 57 rh_block_t * block); 58 58 59 59 /* Attach a free region to manage */ 60 - extern int rh_attach_region(rh_info_t * info, void *start, int size); 60 + extern int rh_attach_region(rh_info_t * info, unsigned long start, int size); 61 61 62 62 /* Detach a free region */ 63 - extern void *rh_detach_region(rh_info_t * info, void *start, int size); 63 + extern unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size); 64 64 65 65 /* Allocate the given size from the remote heap (with alignment) */ 66 - extern void *rh_alloc_align(rh_info_t * info, int size, int alignment, 66 + extern unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, 67 67 const char *owner); 68 68 69 69 /* Allocate the given size from the remote heap */ 70 - extern void *rh_alloc(rh_info_t * info, int size, const char *owner); 70 + extern unsigned long rh_alloc(rh_info_t * info, int size, const char *owner); 71 71 72 72 /* Allocate the given size from the given address */ 73 - extern void *rh_alloc_fixed(rh_info_t * info, void *start, int size, 73 + extern unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, 74 74 const char *owner); 75 75 76 76 /* Free the allocated area */ 77 - extern int rh_free(rh_info_t * info, void *start); 77 + extern int rh_free(rh_info_t * info, unsigned long start); 78 78 79 79 /* Get stats for debugging purposes */ 80 80 extern int rh_get_stats(rh_info_t * info, int what, int max_stats, ··· 84 84 extern void rh_dump(rh_info_t * info); 85 85 86 86 /* Set owner of taken block */ 87 - extern int rh_set_owner(rh_info_t * info, void *start, const char *owner); 87 + extern int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner); 88 88 89 89 #endif /* __ASM_PPC_RHEAP_H__ */