Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

staging: tidspbridge: set7 remove hungarian from structs

hungarian notation will be removed from the elements inside
structures, the next varibles will be renamed:

Original: Replacement:
ul_gpp_phys gpp_phys
ul_gpp_read_pointer gpp_read_pointer
ul_gpp_size gpp_size
ul_gpp_va gpp_va
ul_heap_size heap_size
ul_internal_mem_size internal_mem_size
ul_in_use_cnt in_use_cnt
ul_len_max_free_block len_max_free_block
ul_max max
ul_min_block_size min_block_size
ul_min min
ul_mpu_addr mpu_addr
ul_n_bytes bytes
ul_num_alloc_blocks num_alloc_blocks
ul_number_bytes number_bytes
ul_num_chnls num_chnls
ul_num_free_blocks num_free_blocks
ul_num_gppsm_segs num_gppsm_segs
ul_pos pos
ul_reserved reserved

Signed-off-by: Rene Sapiens <rene.sapiens@ti.com>
Signed-off-by: Armando Uribe <x0095078@ti.com>
Signed-off-by: Omar Ramirez Luna <omar.ramirez@ti.com>

authored by

Rene Sapiens and committed by
Omar Ramirez Luna
6c66e948 dab7f7fe

+90 -90
+31 -31
drivers/staging/tidspbridge/core/io_sm.c
··· 118 118 u32 ul_trace_buffer_begin; /* Trace message start address */ 119 119 u32 ul_trace_buffer_end; /* Trace message end address */ 120 120 u32 ul_trace_buffer_current; /* Trace message current address */ 121 - u32 ul_gpp_read_pointer; /* GPP Read pointer to Trace buffer */ 121 + u32 gpp_read_pointer; /* GPP Read pointer to Trace buffer */ 122 122 u8 *pmsg; 123 - u32 ul_gpp_va; 123 + u32 gpp_va; 124 124 u32 dsp_va; 125 125 #endif 126 126 /* IO Dpc */ ··· 532 532 * This is the virtual uncached ioremapped 533 533 * address!!! 534 534 */ 535 - ae_proc[ndx].ul_gpp_va = gpp_va_curr; 535 + ae_proc[ndx].gpp_va = gpp_va_curr; 536 536 ae_proc[ndx].dsp_va = 537 537 va_curr / hio_mgr->word_size; 538 538 ae_proc[ndx].ul_size = page_size[i]; ··· 542 542 dev_dbg(bridge, "shm MMU TLB entry PA %x" 543 543 " VA %x DSP_VA %x Size %x\n", 544 544 ae_proc[ndx].gpp_pa, 545 - ae_proc[ndx].ul_gpp_va, 545 + ae_proc[ndx].gpp_va, 546 546 ae_proc[ndx].dsp_va * 547 547 hio_mgr->word_size, page_size[i]); 548 548 ndx++; ··· 557 557 "shm MMU PTE entry PA %x" 558 558 " VA %x DSP_VA %x Size %x\n", 559 559 ae_proc[ndx].gpp_pa, 560 - ae_proc[ndx].ul_gpp_va, 560 + ae_proc[ndx].gpp_va, 561 561 ae_proc[ndx].dsp_va * 562 562 hio_mgr->word_size, page_size[i]); 563 563 if (status) ··· 580 580 * should not conflict with shm entries on MPU or DSP side. 581 581 */ 582 582 for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) { 583 - if (hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys == 0) 583 + if (hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys == 0) 584 584 continue; 585 585 586 - if ((hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys > 586 + if ((hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys > 587 587 ul_gpp_pa - 0x100000 588 - && hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys <= 588 + && hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys <= 589 589 ul_gpp_pa + ul_seg_size) 590 590 || (hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt > 591 591 ul_dsp_va - 0x100000 / hio_mgr->word_size ··· 595 595 "CDB MMU entry %d conflicts with " 596 596 "shm.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: " 597 597 "GppPa %x, DspVa %x, Bytes %x.\n", i, 598 - hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys, 598 + hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys, 599 599 hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt, 600 600 ul_gpp_pa, ul_dsp_va, ul_seg_size); 601 601 status = -EPERM; ··· 606 606 dsp_virt; 607 607 ae_proc[ndx].gpp_pa = 608 608 hio_mgr->ext_proc_info.ty_tlb[i]. 609 - ul_gpp_phys; 610 - ae_proc[ndx].ul_gpp_va = 0; 609 + gpp_phys; 610 + ae_proc[ndx].gpp_va = 0; 611 611 /* 1 MB */ 612 612 ae_proc[ndx].ul_size = 0x100000; 613 613 dev_dbg(bridge, "shm MMU entry PA %x " ··· 618 618 status = hio_mgr->intf_fxns->brd_mem_map 619 619 (hio_mgr->hbridge_context, 620 620 hio_mgr->ext_proc_info.ty_tlb[i]. 621 - ul_gpp_phys, 621 + gpp_phys, 622 622 hio_mgr->ext_proc_info.ty_tlb[i]. 623 623 dsp_virt, 0x100000, map_attrs, 624 624 NULL); ··· 649 649 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) { 650 650 ae_proc[i].dsp_va = 0; 651 651 ae_proc[i].gpp_pa = 0; 652 - ae_proc[i].ul_gpp_va = 0; 652 + ae_proc[i].gpp_va = 0; 653 653 ae_proc[i].ul_size = 0; 654 654 } 655 655 /* ··· 657 657 * to the virtual uncached ioremapped address of shm reserved 658 658 * on MPU. 659 659 */ 660 - hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys = 660 + hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys = 661 661 (ul_gpp_va + ul_seg1_size + ul_pad_size); 662 662 663 663 /* 664 664 * Need shm Phys addr. IO supports only one DSP for now: 665 665 * num_procs = 1. 666 666 */ 667 - if (!hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys || num_procs != 1) { 667 + if (!hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys || num_procs != 1) { 668 668 status = -EFAULT; 669 669 goto func_end; 670 670 } else { ··· 688 688 ae_proc); 689 689 if (status) 690 690 goto func_end; 691 - ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys; 691 + ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys; 692 692 ul_shm_base += ul_shm_base_offset; 693 693 ul_shm_base = (u32) MEM_LINEAR_ADDRESS((void *)ul_shm_base, 694 694 ul_mem_length); ··· 740 740 goto func_end; 741 741 } 742 742 743 - hio_mgr->ul_gpp_read_pointer = hio_mgr->ul_trace_buffer_begin = 743 + hio_mgr->gpp_read_pointer = hio_mgr->ul_trace_buffer_begin = 744 744 (ul_gpp_va + ul_seg1_size + ul_pad_size) + 745 745 (hio_mgr->ul_trace_buffer_begin - ul_dsp_va); 746 746 /* Get the end address of trace buffer */ ··· 772 772 status = -ENOMEM; 773 773 774 774 hio_mgr->dsp_va = ul_dsp_va; 775 - hio_mgr->ul_gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size); 775 + hio_mgr->gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size); 776 776 777 777 #endif 778 778 func_end: ··· 1541 1541 goto func_end; 1542 1542 } 1543 1543 /* First TLB entry reserved for Bridge SM use. */ 1544 - ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys; 1544 + ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys; 1545 1545 /* Get size in bytes */ 1546 1546 ul_dsp_virt = 1547 1547 hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt * ··· 1693 1693 ul_gpp_cur_pointer = 1694 1694 *(u32 *) (hio_mgr->ul_trace_buffer_current); 1695 1695 ul_gpp_cur_pointer = 1696 - hio_mgr->ul_gpp_va + (ul_gpp_cur_pointer - 1696 + hio_mgr->gpp_va + (ul_gpp_cur_pointer - 1697 1697 hio_mgr->dsp_va); 1698 1698 1699 1699 /* No new debug messages available yet */ 1700 - if (ul_gpp_cur_pointer == hio_mgr->ul_gpp_read_pointer) { 1700 + if (ul_gpp_cur_pointer == hio_mgr->gpp_read_pointer) { 1701 1701 break; 1702 - } else if (ul_gpp_cur_pointer > hio_mgr->ul_gpp_read_pointer) { 1702 + } else if (ul_gpp_cur_pointer > hio_mgr->gpp_read_pointer) { 1703 1703 /* Continuous data */ 1704 1704 ul_new_message_length = 1705 - ul_gpp_cur_pointer - hio_mgr->ul_gpp_read_pointer; 1705 + ul_gpp_cur_pointer - hio_mgr->gpp_read_pointer; 1706 1706 1707 1707 memcpy(hio_mgr->pmsg, 1708 - (char *)hio_mgr->ul_gpp_read_pointer, 1708 + (char *)hio_mgr->gpp_read_pointer, 1709 1709 ul_new_message_length); 1710 1710 hio_mgr->pmsg[ul_new_message_length] = '\0'; 1711 1711 /* 1712 1712 * Advance the GPP trace pointer to DSP current 1713 1713 * pointer. 1714 1714 */ 1715 - hio_mgr->ul_gpp_read_pointer += ul_new_message_length; 1715 + hio_mgr->gpp_read_pointer += ul_new_message_length; 1716 1716 /* Print the trace messages */ 1717 1717 pr_info("DSPTrace: %s\n", hio_mgr->pmsg); 1718 - } else if (ul_gpp_cur_pointer < hio_mgr->ul_gpp_read_pointer) { 1718 + } else if (ul_gpp_cur_pointer < hio_mgr->gpp_read_pointer) { 1719 1719 /* Handle trace buffer wraparound */ 1720 1720 memcpy(hio_mgr->pmsg, 1721 - (char *)hio_mgr->ul_gpp_read_pointer, 1721 + (char *)hio_mgr->gpp_read_pointer, 1722 1722 hio_mgr->ul_trace_buffer_end - 1723 - hio_mgr->ul_gpp_read_pointer); 1723 + hio_mgr->gpp_read_pointer); 1724 1724 ul_new_message_length = 1725 1725 ul_gpp_cur_pointer - hio_mgr->ul_trace_buffer_begin; 1726 1726 memcpy(&hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end - 1727 - hio_mgr->ul_gpp_read_pointer], 1727 + hio_mgr->gpp_read_pointer], 1728 1728 (char *)hio_mgr->ul_trace_buffer_begin, 1729 1729 ul_new_message_length); 1730 1730 hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end - 1731 - hio_mgr->ul_gpp_read_pointer + 1731 + hio_mgr->gpp_read_pointer + 1732 1732 ul_new_message_length] = '\0'; 1733 1733 /* 1734 1734 * Advance the GPP trace pointer to DSP current 1735 1735 * pointer. 1736 1736 */ 1737 - hio_mgr->ul_gpp_read_pointer = 1737 + hio_mgr->gpp_read_pointer = 1738 1738 hio_mgr->ul_trace_buffer_begin + 1739 1739 ul_new_message_length; 1740 1740 /* Print the trace messages */
+1 -1
drivers/staging/tidspbridge/core/tiomap3430.c
··· 406 406 ul_shm_offset_virt = 407 407 ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE); 408 408 /* Kernel logical address */ 409 - ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt; 409 + ul_shm_base = dev_context->atlb_entry[0].gpp_va + ul_shm_offset_virt; 410 410 411 411 DBC_ASSERT(ul_shm_base != 0); 412 412 /* 2nd wd is used as sync field */
+2 -2
drivers/staging/tidspbridge/core/tiomap_io.c
··· 137 137 dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE; 138 138 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 139 139 dw_ext_prog_virt_mem = 140 - dev_context->atlb_entry[0].ul_gpp_va; 140 + dev_context->atlb_entry[0].gpp_va; 141 141 142 142 if (!trace_read) { 143 143 ul_shm_offset_virt = ··· 337 337 ul_shm_base_virt - ul_tlb_base_virt; 338 338 if (trace_load) { 339 339 dw_ext_prog_virt_mem = 340 - dev_context->atlb_entry[0].ul_gpp_va; 340 + dev_context->atlb_entry[0].gpp_va; 341 341 } else { 342 342 dw_ext_prog_virt_mem = host_res->mem_base[1]; 343 343 dw_ext_prog_virt_mem +=
+1 -1
drivers/staging/tidspbridge/include/dspbridge/cmm.h
··· 81 81 * Requires: 82 82 * cmm_init(void) called. 83 83 * ph_cmm_mgr != NULL. 84 - * mgr_attrts->ul_min_block_size >= 4 bytes. 84 + * mgr_attrts->min_block_size >= 4 bytes. 85 85 * Ensures: 86 86 * 87 87 */
+6 -6
drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h
··· 23 23 /* Cmm attributes used in cmm_create() */ 24 24 struct cmm_mgrattrs { 25 25 /* Minimum SM allocation; default 32 bytes. */ 26 - u32 ul_min_block_size; 26 + u32 min_block_size; 27 27 }; 28 28 29 29 /* Attributes for CMM_AllocBuf() & CMM_AllocDesc() */ 30 30 struct cmm_attrs { 31 31 u32 ul_seg_id; /* 1,2... are SM segments. 0 is not. */ 32 - u32 alignment; /* 0,1,2,4....ul_min_block_size */ 32 + u32 alignment; /* 0,1,2,4....min_block_size */ 33 33 }; 34 34 35 35 /* ··· 55 55 /* Total size in bytes of segment: DSP+GPP */ 56 56 u32 ul_total_seg_size; 57 57 u32 gpp_base_pa; /* Start Phys addr of Gpp SM seg */ 58 - u32 ul_gpp_size; /* Size of Gpp SM seg in bytes */ 58 + u32 gpp_size; /* Size of Gpp SM seg in bytes */ 59 59 u32 dsp_base_va; /* DSP virt base byte address */ 60 60 u32 dsp_size; /* DSP seg size in bytes */ 61 61 /* # of current GPP allocations from this segment */ 62 - u32 ul_in_use_cnt; 62 + u32 in_use_cnt; 63 63 u32 seg_base_va; /* Start Virt address of SM seg */ 64 64 65 65 }; ··· 67 67 /* CMM useful information */ 68 68 struct cmm_info { 69 69 /* # of SM segments registered with this Cmm. */ 70 - u32 ul_num_gppsm_segs; 70 + u32 num_gppsm_segs; 71 71 /* Total # of allocations outstanding for CMM */ 72 72 u32 ul_total_in_use_cnt; 73 73 /* Min SM block size allocation from cmm_create() */ 74 - u32 ul_min_block_size; 74 + u32 min_block_size; 75 75 /* Info per registered SM segment. */ 76 76 struct cmm_seginfo seg_info[CMM_MAXGPPSEGS]; 77 77 };
+6 -6
drivers/staging/tidspbridge/include/dspbridge/dbdefs.h
··· 210 210 struct dsp_memstat { 211 211 u32 ul_size; 212 212 u32 ul_total_free_size; 213 - u32 ul_len_max_free_block; 214 - u32 ul_num_free_blocks; 215 - u32 ul_num_alloc_blocks; 213 + u32 len_max_free_block; 214 + u32 num_free_blocks; 215 + u32 num_alloc_blocks; 216 216 }; 217 217 218 218 /* Processor Load information Values */ ··· 276 276 }; 277 277 278 278 struct dsp_nodeprofs { 279 - u32 ul_heap_size; 279 + u32 heap_size; 280 280 }; 281 281 282 282 /* The dsp_ndbprops structure reports the attributes of a node */ ··· 358 358 int processor_family; 359 359 int processor_type; 360 360 u32 clock_rate; 361 - u32 ul_internal_mem_size; 361 + u32 internal_mem_size; 362 362 u32 external_mem_size; 363 363 u32 processor_id; 364 364 int ty_running_rtos; ··· 425 425 u32 cb_struct; 426 426 u32 number_bufs_allowed; 427 427 u32 number_bufs_in_stream; 428 - u32 ul_number_bytes; 428 + u32 number_bytes; 429 429 void *sync_object_handle; 430 430 enum dsp_streamstate ss_stream_state; 431 431 };
+1 -1
drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
··· 58 58 u32 dsp_va; /* DSP virtual address */ 59 59 u32 gpp_pa; /* GPP physical address */ 60 60 /* GPP virtual address. __va does not work for ioremapped addresses */ 61 - u32 ul_gpp_va; 61 + u32 gpp_va; 62 62 u32 ul_size; /* Size of the mapped memory in bytes */ 63 63 enum hw_endianism_t endianism; 64 64 enum hw_mmu_mixed_size_t mixed_mode;
+1 -1
drivers/staging/tidspbridge/include/dspbridge/mgrpriv.h
··· 29 29 30 30 struct mgr_tlbentry { 31 31 u32 dsp_virt; /* DSP virtual address */ 32 - u32 ul_gpp_phys; /* GPP physical address */ 32 + u32 gpp_phys; /* GPP physical address */ 33 33 }; 34 34 35 35 /*
+15 -15
drivers/staging/tidspbridge/pmgr/cmm.c
··· 98 98 */ 99 99 struct mutex cmm_lock; /* Lock to access cmm mgr */ 100 100 struct list_head node_free_list; /* Free list of memory nodes */ 101 - u32 ul_min_block_size; /* Min SM block; default 16 bytes */ 101 + u32 min_block_size; /* Min SM block; default 16 bytes */ 102 102 u32 page_size; /* Memory Page size (1k/4k) */ 103 103 /* GPP SM segment ptrs */ 104 104 struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS]; ··· 106 106 107 107 /* Default CMM Mgr attributes */ 108 108 static struct cmm_mgrattrs cmm_dfltmgrattrs = { 109 - /* ul_min_block_size, min block size(bytes) allocated by cmm mgr */ 109 + /* min_block_size, min block size(bytes) allocated by cmm mgr */ 110 110 16 111 111 }; 112 112 ··· 185 185 /* get the allocator object for this segment id */ 186 186 allocator = 187 187 get_allocator(cmm_mgr_obj, pattrs->ul_seg_id); 188 - /* keep block size a multiple of ul_min_block_size */ 188 + /* keep block size a multiple of min_block_size */ 189 189 usize = 190 - ((usize - 1) & ~(cmm_mgr_obj->ul_min_block_size - 190 + ((usize - 1) & ~(cmm_mgr_obj->min_block_size - 191 191 1)) 192 - + cmm_mgr_obj->ul_min_block_size; 192 + + cmm_mgr_obj->min_block_size; 193 193 mutex_lock(&cmm_mgr_obj->cmm_lock); 194 194 pnode = get_free_block(allocator, usize); 195 195 } 196 196 if (pnode) { 197 197 delta_size = (pnode->ul_size - usize); 198 - if (delta_size >= cmm_mgr_obj->ul_min_block_size) { 198 + if (delta_size >= cmm_mgr_obj->min_block_size) { 199 199 /* create a new block with the leftovers and 200 200 * add to freelist */ 201 201 new_node = ··· 257 257 mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */ 258 258 259 259 /* 4 bytes minimum */ 260 - DBC_ASSERT(mgr_attrts->ul_min_block_size >= 4); 260 + DBC_ASSERT(mgr_attrts->min_block_size >= 4); 261 261 /* save away smallest block allocation for this cmm mgr */ 262 - cmm_obj->ul_min_block_size = mgr_attrts->ul_min_block_size; 262 + cmm_obj->min_block_size = mgr_attrts->min_block_size; 263 263 cmm_obj->page_size = PAGE_SIZE; 264 264 265 265 /* create node free list */ ··· 426 426 return status; 427 427 } 428 428 mutex_lock(&cmm_mgr_obj->cmm_lock); 429 - cmm_info_obj->ul_num_gppsm_segs = 0; /* # of SM segments */ 429 + cmm_info_obj->num_gppsm_segs = 0; /* # of SM segments */ 430 430 /* Total # of outstanding alloc */ 431 431 cmm_info_obj->ul_total_in_use_cnt = 0; 432 432 /* min block size */ 433 - cmm_info_obj->ul_min_block_size = cmm_mgr_obj->ul_min_block_size; 433 + cmm_info_obj->min_block_size = cmm_mgr_obj->min_block_size; 434 434 /* check SM memory segments */ 435 435 for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) { 436 436 /* get the allocator object for this segment id */ 437 437 altr = get_allocator(cmm_mgr_obj, ul_seg); 438 438 if (!altr) 439 439 continue; 440 - cmm_info_obj->ul_num_gppsm_segs++; 440 + cmm_info_obj->num_gppsm_segs++; 441 441 cmm_info_obj->seg_info[ul_seg - 1].seg_base_pa = 442 442 altr->shm_base - altr->dsp_size; 443 443 cmm_info_obj->seg_info[ul_seg - 1].ul_total_seg_size = 444 444 altr->dsp_size + altr->ul_sm_size; 445 445 cmm_info_obj->seg_info[ul_seg - 1].gpp_base_pa = 446 446 altr->shm_base; 447 - cmm_info_obj->seg_info[ul_seg - 1].ul_gpp_size = 447 + cmm_info_obj->seg_info[ul_seg - 1].gpp_size = 448 448 altr->ul_sm_size; 449 449 cmm_info_obj->seg_info[ul_seg - 1].dsp_base_va = 450 450 altr->dsp_base; ··· 452 452 altr->dsp_size; 453 453 cmm_info_obj->seg_info[ul_seg - 1].seg_base_va = 454 454 altr->vm_base - altr->dsp_size; 455 - cmm_info_obj->seg_info[ul_seg - 1].ul_in_use_cnt = 0; 455 + cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt = 0; 456 456 457 457 list_for_each_entry(curr, &altr->in_use_list, link) { 458 458 cmm_info_obj->ul_total_in_use_cnt++; 459 - cmm_info_obj->seg_info[ul_seg - 1].ul_in_use_cnt++; 459 + cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt++; 460 460 } 461 461 } 462 462 mutex_unlock(&cmm_mgr_obj->cmm_lock); ··· 524 524 } 525 525 526 526 /* Check if input ul_size is big enough to alloc at least one block */ 527 - if (ul_size < cmm_mgr_obj->ul_min_block_size) { 527 + if (ul_size < cmm_mgr_obj->min_block_size) { 528 528 status = -EINVAL; 529 529 goto func_end; 530 530 }
+6 -6
drivers/staging/tidspbridge/pmgr/dbll.c
··· 123 123 u32 open_ref; /* Number of times opened */ 124 124 u32 load_ref; /* Number of times loaded */ 125 125 struct gh_t_hash_tab *sym_tab; /* Hash table of symbols */ 126 - u32 ul_pos; 126 + u32 pos; 127 127 }; 128 128 129 129 /* ··· 398 398 399 399 } else { 400 400 (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, 401 - zl_lib->ul_pos, 401 + zl_lib->pos, 402 402 SEEK_SET); 403 403 } 404 404 } else { ··· 522 522 523 523 } 524 524 if (!status) { 525 - zl_lib->ul_pos = (*(zl_lib->target_obj->attrs.ftell)) 525 + zl_lib->pos = (*(zl_lib->target_obj->attrs.ftell)) 526 526 (zl_lib->fp); 527 527 /* Reset file cursor */ 528 528 (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, ··· 599 599 if (zl_lib == NULL) { 600 600 status = -ENOMEM; 601 601 } else { 602 - zl_lib->ul_pos = 0; 602 + zl_lib->pos = 0; 603 603 /* Increment ref count to allow close on failure 604 604 * later on */ 605 605 zl_lib->open_ref++; ··· 649 649 if (!status && zl_lib->fp == NULL) 650 650 status = dof_open(zl_lib); 651 651 652 - zl_lib->ul_pos = (*(zl_lib->target_obj->attrs.ftell)) (zl_lib->fp); 652 + zl_lib->pos = (*(zl_lib->target_obj->attrs.ftell)) (zl_lib->fp); 653 653 (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, (long)0, SEEK_SET); 654 654 /* Create a hash table for symbols if flag is set */ 655 655 if (zl_lib->sym_tab != NULL || !(flags & DBLL_SYMB)) ··· 738 738 739 739 } else { 740 740 (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, 741 - zl_lib->ul_pos, 741 + zl_lib->pos, 742 742 SEEK_SET); 743 743 } 744 744 } else {
+3 -3
drivers/staging/tidspbridge/rmgr/dbdcd.c
··· 1253 1253 /* Heap Size for the node */ 1254 1254 gen_obj->obj_data.node_obj. 1255 1255 ndb_props.node_profiles[i]. 1256 - ul_heap_size = atoi(token); 1256 + heap_size = atoi(token); 1257 1257 } 1258 1258 } 1259 1259 } ··· 1285 1285 gen_obj->obj_data.proc_info.clock_rate = atoi(token); 1286 1286 token = strsep(&psz_cur, seps); 1287 1287 1288 - gen_obj->obj_data.proc_info.ul_internal_mem_size = atoi(token); 1288 + gen_obj->obj_data.proc_info.internal_mem_size = atoi(token); 1289 1289 token = strsep(&psz_cur, seps); 1290 1290 1291 1291 gen_obj->obj_data.proc_info.external_mem_size = atoi(token); ··· 1308 1308 for (entry_id = 0; entry_id < 7; entry_id++) { 1309 1309 token = strsep(&psz_cur, seps); 1310 1310 gen_obj->obj_data.ext_proc_obj.ty_tlb[entry_id]. 1311 - ul_gpp_phys = atoi(token); 1311 + gpp_phys = atoi(token); 1312 1312 1313 1313 token = strsep(&psz_cur, seps); 1314 1314 gen_obj->obj_data.ext_proc_obj.ty_tlb[entry_id].
+9 -9
drivers/staging/tidspbridge/rmgr/node.c
··· 146 146 struct msg_mgr *msg_mgr_obj; 147 147 148 148 /* Processor properties needed by Node Dispatcher */ 149 - u32 ul_num_chnls; /* Total number of channels */ 149 + u32 num_chnls; /* Total number of channels */ 150 150 u32 chnl_offset; /* Offset of chnl ids rsvd for RMS */ 151 151 u32 chnl_buf_size; /* Buffer size for data to RMS */ 152 152 int proc_family; /* eg, 5000 */ ··· 1003 1003 set_bit(chnl_id, hnode_mgr->dma_chnl_map); 1004 1004 /* dma chans are 2nd transport chnl set 1005 1005 * ids(e.g. 16-31) */ 1006 - chnl_id = chnl_id + hnode_mgr->ul_num_chnls; 1006 + chnl_id = chnl_id + hnode_mgr->num_chnls; 1007 1007 } 1008 1008 break; 1009 1009 case STRMMODE_ZEROCOPY: ··· 1014 1014 /* zero-copy chans are 3nd transport set 1015 1015 * (e.g. 32-47) */ 1016 1016 chnl_id = chnl_id + 1017 - (2 * hnode_mgr->ul_num_chnls); 1017 + (2 * hnode_mgr->num_chnls); 1018 1018 } 1019 1019 break; 1020 1020 case STRMMODE_PROCCOPY: ··· 2723 2723 set_bit(stream.dev_id, hnode_mgr->pipe_done_map); 2724 2724 } 2725 2725 } else if (stream.type == HOSTCONNECT) { 2726 - if (stream.dev_id < hnode_mgr->ul_num_chnls) { 2726 + if (stream.dev_id < hnode_mgr->num_chnls) { 2727 2727 clear_bit(stream.dev_id, hnode_mgr->chnl_map); 2728 - } else if (stream.dev_id < (2 * hnode_mgr->ul_num_chnls)) { 2728 + } else if (stream.dev_id < (2 * hnode_mgr->num_chnls)) { 2729 2729 /* dsp-dma */ 2730 - clear_bit(stream.dev_id - (1 * hnode_mgr->ul_num_chnls), 2730 + clear_bit(stream.dev_id - (1 * hnode_mgr->num_chnls), 2731 2731 hnode_mgr->dma_chnl_map); 2732 - } else if (stream.dev_id < (3 * hnode_mgr->ul_num_chnls)) { 2732 + } else if (stream.dev_id < (3 * hnode_mgr->num_chnls)) { 2733 2733 /* zero-copy */ 2734 - clear_bit(stream.dev_id - (2 * hnode_mgr->ul_num_chnls), 2734 + clear_bit(stream.dev_id - (2 * hnode_mgr->num_chnls), 2735 2735 hnode_mgr->zc_chnl_map); 2736 2736 } 2737 2737 } ··· 2904 2904 return -EPERM; 2905 2905 hnode_mgr->chnl_offset = host_res->chnl_offset; 2906 2906 hnode_mgr->chnl_buf_size = host_res->chnl_buf_size; 2907 - hnode_mgr->ul_num_chnls = host_res->num_chnls; 2907 + hnode_mgr->num_chnls = host_res->num_chnls; 2908 2908 2909 2909 /* 2910 2910 * PROC will add an API to get dsp_processorinfo.
+6 -6
drivers/staging/tidspbridge/rmgr/rmm.c
··· 371 371 /* ul_size */ 372 372 mem_stat_buf->ul_size = target->seg_tab[segid].length; 373 373 374 - /* ul_num_free_blocks */ 375 - mem_stat_buf->ul_num_free_blocks = free_blocks; 374 + /* num_free_blocks */ 375 + mem_stat_buf->num_free_blocks = free_blocks; 376 376 377 377 /* ul_total_free_size */ 378 378 mem_stat_buf->ul_total_free_size = total_free_size; 379 379 380 - /* ul_len_max_free_block */ 381 - mem_stat_buf->ul_len_max_free_block = max_free_size; 380 + /* len_max_free_block */ 381 + mem_stat_buf->len_max_free_block = max_free_size; 382 382 383 - /* ul_num_alloc_blocks */ 384 - mem_stat_buf->ul_num_alloc_blocks = 383 + /* num_alloc_blocks */ 384 + mem_stat_buf->num_alloc_blocks = 385 385 target->seg_tab[segid].number; 386 386 387 387 ret = true;
+2 -2
drivers/staging/tidspbridge/rmgr/strm.c
··· 71 71 u32 utimeout; 72 72 u32 num_bufs; /* Max # of bufs allowed in stream */ 73 73 u32 un_bufs_in_strm; /* Current # of bufs in stream */ 74 - u32 ul_n_bytes; /* bytes transferred since idled */ 74 + u32 bytes; /* bytes transferred since idled */ 75 75 /* STREAM_IDLE, STREAM_READY, ... */ 76 76 enum dsp_streamstate strm_state; 77 77 void *user_event; /* Saved for strm_get_info() */ ··· 341 341 stream_info->user_strm->number_bufs_in_stream = chnl_info_obj.cio_cs + 342 342 chnl_info_obj.cio_reqs; 343 343 /* # of bytes transferred since last call to DSPStream_Idle() */ 344 - stream_info->user_strm->ul_number_bytes = chnl_info_obj.bytes_tx; 344 + stream_info->user_strm->number_bytes = chnl_info_obj.bytes_tx; 345 345 stream_info->user_strm->sync_object_handle = chnl_info_obj.event_obj; 346 346 /* Determine stream state based on channel state and info */ 347 347 if (chnl_info_obj.state & CHNL_STATEEOS) {