Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

staging: tidspbridge: set6 remove hungarian from structs

hungarian notation will be removed from the elements inside
structures, the next varibles will be renamed:

Original: Replacement:
pfn_write write
pf_phase_split phase_split
ul_alignment alignment
ul_bufsize bufsize
ul_bufsize_rms bufsize_rms
ul_chnl_buf_size chnl_buf_size
ul_chnl_offset chnl_offset
ul_code_mem_seg_mask code_mem_seg_mask
ul_dais_arg dais_arg
ul_data1 data1
ul_data_mem_seg_mask data_mem_seg_mask
ul_dsp_addr dsp_addr
ul_dsp_res_addr dsp_res_addr
ul_dsp_size dsp_size
ul_dsp_va dsp_va
ul_dsp_virt dsp_virt
ul_entry entry
ul_external_mem_size external_mem_size
ul_fxn_addrs fxn_addrs
ul_gpp_pa gpp_pa

Signed-off-by: Rene Sapiens <rene.sapiens@ti.com>
Signed-off-by: Armando Uribe <x0095078@ti.com>
Signed-off-by: Omar Ramirez Luna <omar.ramirez@ti.com>

authored by

Rene Sapiens and committed by
Omar Ramirez Luna
dab7f7fe 09f13304

+123 -123
+25 -25
drivers/staging/tidspbridge/core/io_sm.c
··· 121 121 u32 ul_gpp_read_pointer; /* GPP Read pointer to Trace buffer */ 122 122 u8 *pmsg; 123 123 u32 ul_gpp_va; 124 - u32 ul_dsp_va; 124 + u32 dsp_va; 125 125 #endif 126 126 /* IO Dpc */ 127 127 u32 dpc_req; /* Number of requested DPC's. */ ··· 421 421 ul_gpp_va = host_res->mem_base[1]; 422 422 /* This is the virtual uncached ioremapped address!!! */ 423 423 /* Why can't we directly take the DSPVA from the symbols? */ 424 - ul_dsp_va = hio_mgr->ext_proc_info.ty_tlb[0].ul_dsp_virt; 424 + ul_dsp_va = hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt; 425 425 ul_seg_size = (shm0_end - ul_dsp_va) * hio_mgr->word_size; 426 426 ul_seg1_size = 427 427 (ul_ext_end - ul_dyn_ext_base) * hio_mgr->word_size; ··· 527 527 * This is the physical address written to 528 528 * DSP MMU. 529 529 */ 530 - ae_proc[ndx].ul_gpp_pa = pa_curr; 530 + ae_proc[ndx].gpp_pa = pa_curr; 531 531 /* 532 532 * This is the virtual uncached ioremapped 533 533 * address!!! 534 534 */ 535 535 ae_proc[ndx].ul_gpp_va = gpp_va_curr; 536 - ae_proc[ndx].ul_dsp_va = 536 + ae_proc[ndx].dsp_va = 537 537 va_curr / hio_mgr->word_size; 538 538 ae_proc[ndx].ul_size = page_size[i]; 539 539 ae_proc[ndx].endianism = HW_LITTLE_ENDIAN; ··· 541 541 ae_proc[ndx].mixed_mode = HW_MMU_CPUES; 542 542 dev_dbg(bridge, "shm MMU TLB entry PA %x" 543 543 " VA %x DSP_VA %x Size %x\n", 544 - ae_proc[ndx].ul_gpp_pa, 544 + ae_proc[ndx].gpp_pa, 545 545 ae_proc[ndx].ul_gpp_va, 546 - ae_proc[ndx].ul_dsp_va * 546 + ae_proc[ndx].dsp_va * 547 547 hio_mgr->word_size, page_size[i]); 548 548 ndx++; 549 549 } else { ··· 556 556 dev_dbg(bridge, 557 557 "shm MMU PTE entry PA %x" 558 558 " VA %x DSP_VA %x Size %x\n", 559 - ae_proc[ndx].ul_gpp_pa, 559 + ae_proc[ndx].gpp_pa, 560 560 ae_proc[ndx].ul_gpp_va, 561 - ae_proc[ndx].ul_dsp_va * 561 + ae_proc[ndx].dsp_va * 562 562 hio_mgr->word_size, page_size[i]); 563 563 if (status) 564 564 goto func_end; ··· 587 587 ul_gpp_pa - 0x100000 588 588 && hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys <= 589 589 ul_gpp_pa + ul_seg_size) 590 - || (hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt > 590 + || (hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt > 591 591 ul_dsp_va - 0x100000 / hio_mgr->word_size 592 - && hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt <= 592 + && hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt <= 593 593 ul_dsp_va + ul_seg_size / hio_mgr->word_size)) { 594 594 dev_dbg(bridge, 595 595 "CDB MMU entry %d conflicts with " 596 596 "shm.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: " 597 597 "GppPa %x, DspVa %x, Bytes %x.\n", i, 598 598 hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys, 599 - hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt, 599 + hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt, 600 600 ul_gpp_pa, ul_dsp_va, ul_seg_size); 601 601 status = -EPERM; 602 602 } else { 603 603 if (ndx < MAX_LOCK_TLB_ENTRIES) { 604 - ae_proc[ndx].ul_dsp_va = 604 + ae_proc[ndx].dsp_va = 605 605 hio_mgr->ext_proc_info.ty_tlb[i]. 606 - ul_dsp_virt; 607 - ae_proc[ndx].ul_gpp_pa = 606 + dsp_virt; 607 + ae_proc[ndx].gpp_pa = 608 608 hio_mgr->ext_proc_info.ty_tlb[i]. 609 609 ul_gpp_phys; 610 610 ae_proc[ndx].ul_gpp_va = 0; 611 611 /* 1 MB */ 612 612 ae_proc[ndx].ul_size = 0x100000; 613 613 dev_dbg(bridge, "shm MMU entry PA %x " 614 - "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa, 615 - ae_proc[ndx].ul_dsp_va); 614 + "DSP_VA 0x%x\n", ae_proc[ndx].gpp_pa, 615 + ae_proc[ndx].dsp_va); 616 616 ndx++; 617 617 } else { 618 618 status = hio_mgr->intf_fxns->brd_mem_map ··· 620 620 hio_mgr->ext_proc_info.ty_tlb[i]. 621 621 ul_gpp_phys, 622 622 hio_mgr->ext_proc_info.ty_tlb[i]. 623 - ul_dsp_virt, 0x100000, map_attrs, 623 + dsp_virt, 0x100000, map_attrs, 624 624 NULL); 625 625 } 626 626 } ··· 647 647 } 648 648 649 649 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) { 650 - ae_proc[i].ul_dsp_va = 0; 651 - ae_proc[i].ul_gpp_pa = 0; 650 + ae_proc[i].dsp_va = 0; 651 + ae_proc[i].gpp_pa = 0; 652 652 ae_proc[i].ul_gpp_va = 0; 653 653 ae_proc[i].ul_size = 0; 654 654 } ··· 668 668 status = -EFAULT; 669 669 goto func_end; 670 670 } else { 671 - if (ae_proc[0].ul_dsp_va > ul_shm_base) { 671 + if (ae_proc[0].dsp_va > ul_shm_base) { 672 672 status = -EPERM; 673 673 goto func_end; 674 674 } 675 675 /* ul_shm_base may not be at ul_dsp_va address */ 676 - ul_shm_base_offset = (ul_shm_base - ae_proc[0].ul_dsp_va) * 676 + ul_shm_base_offset = (ul_shm_base - ae_proc[0].dsp_va) * 677 677 hio_mgr->word_size; 678 678 /* 679 679 * bridge_dev_ctrl() will set dev context dsp-mmu info. In ··· 698 698 } 699 699 /* Register SM */ 700 700 status = 701 - register_shm_segs(hio_mgr, cod_man, ae_proc[0].ul_gpp_pa); 701 + register_shm_segs(hio_mgr, cod_man, ae_proc[0].gpp_pa); 702 702 } 703 703 704 704 hio_mgr->shared_mem = (struct shm *)ul_shm_base; ··· 771 771 if (!hio_mgr->pmsg) 772 772 status = -ENOMEM; 773 773 774 - hio_mgr->ul_dsp_va = ul_dsp_va; 774 + hio_mgr->dsp_va = ul_dsp_va; 775 775 hio_mgr->ul_gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size); 776 776 777 777 #endif ··· 1544 1544 ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys; 1545 1545 /* Get size in bytes */ 1546 1546 ul_dsp_virt = 1547 - hio_mgr->ext_proc_info.ty_tlb[0].ul_dsp_virt * 1547 + hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt * 1548 1548 hio_mgr->word_size; 1549 1549 /* 1550 1550 * Calc byte offset used to convert GPP phys <-> DSP byte ··· 1694 1694 *(u32 *) (hio_mgr->ul_trace_buffer_current); 1695 1695 ul_gpp_cur_pointer = 1696 1696 hio_mgr->ul_gpp_va + (ul_gpp_cur_pointer - 1697 - hio_mgr->ul_dsp_va); 1697 + hio_mgr->dsp_va); 1698 1698 1699 1699 /* No new debug messages available yet */ 1700 1700 if (ul_gpp_cur_pointer == hio_mgr->ul_gpp_read_pointer) {
+8 -8
drivers/staging/tidspbridge/core/tiomap3430.c
··· 401 401 ul_shm_base_virt *= DSPWORDSIZE; 402 402 DBC_ASSERT(ul_shm_base_virt != 0); 403 403 /* DSP Virtual address */ 404 - ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va; 404 + ul_tlb_base_virt = dev_context->atlb_entry[0].dsp_va; 405 405 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 406 406 ul_shm_offset_virt = 407 407 ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE); ··· 466 466 .mixed_size = e->mixed_mode, 467 467 }; 468 468 469 - if (!e->ul_gpp_pa || !e->ul_dsp_va) 469 + if (!e->gpp_pa || !e->dsp_va) 470 470 continue; 471 471 472 472 dev_dbg(bridge, 473 473 "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x", 474 474 itmp_entry_ndx, 475 - e->ul_gpp_pa, 476 - e->ul_dsp_va, 475 + e->gpp_pa, 476 + e->dsp_va, 477 477 e->ul_size); 478 478 479 479 hw_mmu_tlb_add(dev_context->dsp_mmu_base, 480 - e->ul_gpp_pa, 481 - e->ul_dsp_va, 480 + e->gpp_pa, 481 + e->dsp_va, 482 482 e->ul_size, 483 483 itmp_entry_ndx, 484 484 &map_attrs, 1, 1); ··· 771 771 /* Clear dev context MMU table entries. 772 772 * These get set on bridge_io_on_loaded() call after program loaded. */ 773 773 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) { 774 - dev_context->atlb_entry[entry_ndx].ul_gpp_pa = 775 - dev_context->atlb_entry[entry_ndx].ul_dsp_va = 0; 774 + dev_context->atlb_entry[entry_ndx].gpp_pa = 775 + dev_context->atlb_entry[entry_ndx].dsp_va = 0; 776 776 } 777 777 dev_context->dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *) 778 778 (config_param->
+2 -2
drivers/staging/tidspbridge/core/tiomap_io.c
··· 134 134 135 135 if (!status) { 136 136 ul_tlb_base_virt = 137 - dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE; 137 + dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE; 138 138 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 139 139 dw_ext_prog_virt_mem = 140 140 dev_context->atlb_entry[0].ul_gpp_va; ··· 319 319 320 320 if (!ret) { 321 321 ul_tlb_base_virt = 322 - dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE; 322 + dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE; 323 323 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 324 324 325 325 if (symbols_reloaded) {
+2 -2
drivers/staging/tidspbridge/gen/uuidutil.c
··· 45 45 46 46 i = snprintf(sz_uuid, size, 47 47 "%.8X_%.4X_%.4X_%.2X%.2X_%.2X%.2X%.2X%.2X%.2X%.2X", 48 - uuid_obj->ul_data1, uuid_obj->us_data2, uuid_obj->us_data3, 48 + uuid_obj->data1, uuid_obj->us_data2, uuid_obj->us_data3, 49 49 uuid_obj->uc_data4, uuid_obj->uc_data5, 50 50 uuid_obj->uc_data6[0], uuid_obj->uc_data6[1], 51 51 uuid_obj->uc_data6[2], uuid_obj->uc_data6[3], ··· 79 79 { 80 80 s32 j; 81 81 82 - uuid_obj->ul_data1 = uuid_hex_to_bin(sz_uuid, 8); 82 + uuid_obj->data1 = uuid_hex_to_bin(sz_uuid, 8); 83 83 sz_uuid += 8; 84 84 85 85 /* Step over underscore */
+2 -2
drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h
··· 29 29 /* Attributes for CMM_AllocBuf() & CMM_AllocDesc() */ 30 30 struct cmm_attrs { 31 31 u32 ul_seg_id; /* 1,2... are SM segments. 0 is not. */ 32 - u32 ul_alignment; /* 0,1,2,4....ul_min_block_size */ 32 + u32 alignment; /* 0,1,2,4....ul_min_block_size */ 33 33 }; 34 34 35 35 /* ··· 57 57 u32 gpp_base_pa; /* Start Phys addr of Gpp SM seg */ 58 58 u32 ul_gpp_size; /* Size of Gpp SM seg in bytes */ 59 59 u32 dsp_base_va; /* DSP virt base byte address */ 60 - u32 ul_dsp_size; /* DSP seg size in bytes */ 60 + u32 dsp_size; /* DSP seg size in bytes */ 61 61 /* # of current GPP allocations from this segment */ 62 62 u32 ul_in_use_cnt; 63 63 u32 seg_base_va; /* Start Virt address of SM seg */
+2 -2
drivers/staging/tidspbridge/include/dspbridge/dbdcddef.h
··· 55 55 56 56 /* Dynamic load properties */ 57 57 u16 us_load_type; /* Static, dynamic, overlay */ 58 - u32 ul_data_mem_seg_mask; /* Data memory requirements */ 59 - u32 ul_code_mem_seg_mask; /* Code memory requirements */ 58 + u32 data_mem_seg_mask; /* Data memory requirements */ 59 + u32 code_mem_seg_mask; /* Code memory requirements */ 60 60 }; 61 61 62 62 /* DCD Generic Object Type */
+2 -2
drivers/staging/tidspbridge/include/dspbridge/dbdefs.h
··· 99 99 100 100 /* The Node UUID structure */ 101 101 struct dsp_uuid { 102 - u32 ul_data1; 102 + u32 data1; 103 103 u16 us_data2; 104 104 u16 us_data3; 105 105 u8 uc_data4; ··· 359 359 int processor_type; 360 360 u32 clock_rate; 361 361 u32 ul_internal_mem_size; 362 - u32 ul_external_mem_size; 362 + u32 external_mem_size; 363 363 u32 processor_id; 364 364 int ty_running_rtos; 365 365 s32 node_min_priority;
+2 -2
drivers/staging/tidspbridge/include/dspbridge/disp.h
··· 27 27 28 28 /* Node Dispatcher attributes */ 29 29 struct disp_attr { 30 - u32 ul_chnl_offset; /* Offset of channel ids reserved for RMS */ 30 + u32 chnl_offset; /* Offset of channel ids reserved for RMS */ 31 31 /* Size of buffer for sending data to RMS */ 32 - u32 ul_chnl_buf_size; 32 + u32 chnl_buf_size; 33 33 int proc_family; /* eg, 5000 */ 34 34 int proc_type; /* eg, 5510 */ 35 35 void *reserved1; /* Reserved for future use. */
+2 -2
drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
··· 55 55 #define BRDIOCTL_NUMOFMMUTLB 32 56 56 57 57 struct bridge_ioctl_extproc { 58 - u32 ul_dsp_va; /* DSP virtual address */ 59 - u32 ul_gpp_pa; /* GPP physical address */ 58 + u32 dsp_va; /* DSP virtual address */ 59 + u32 gpp_pa; /* GPP physical address */ 60 60 /* GPP virtual address. __va does not work for ioremapped addresses */ 61 61 u32 ul_gpp_va; 62 62 u32 ul_size; /* Size of the mapped memory in bytes */
+1 -1
drivers/staging/tidspbridge/include/dspbridge/mgrpriv.h
··· 28 28 struct mgr_object; 29 29 30 30 struct mgr_tlbentry { 31 - u32 ul_dsp_virt; /* DSP virtual address */ 31 + u32 dsp_virt; /* DSP virtual address */ 32 32 u32 ul_gpp_phys; /* GPP physical address */ 33 33 }; 34 34
+1 -1
drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h
··· 83 83 */ 84 84 struct nldr_attrs { 85 85 nldr_ovlyfxn ovly; 86 - nldr_writefxn pfn_write; 86 + nldr_writefxn write; 87 87 u16 us_dsp_word_size; 88 88 u16 us_dsp_mau_size; 89 89 };
+1 -1
drivers/staging/tidspbridge/include/dspbridge/nodepriv.h
··· 62 62 u32 profile_id; /* Profile ID */ 63 63 u32 num_inputs; 64 64 u32 num_outputs; 65 - u32 ul_dais_arg; /* Address of iAlg object */ 65 + u32 dais_arg; /* Address of iAlg object */ 66 66 struct node_strmdef *strm_in_def; 67 67 struct node_strmdef *strm_out_def; 68 68 };
+11 -11
drivers/staging/tidspbridge/pmgr/cmm.c
··· 70 70 * SM space */ 71 71 s8 c_factor; /* DSPPa to GPPPa Conversion Factor */ 72 72 unsigned int dsp_base; /* DSP virt base byte address */ 73 - u32 ul_dsp_size; /* DSP seg size in bytes */ 73 + u32 dsp_size; /* DSP seg size in bytes */ 74 74 struct cmm_object *hcmm_mgr; /* back ref to parent mgr */ 75 75 /* node list of available memory */ 76 76 struct list_head free_list; ··· 439 439 continue; 440 440 cmm_info_obj->ul_num_gppsm_segs++; 441 441 cmm_info_obj->seg_info[ul_seg - 1].seg_base_pa = 442 - altr->shm_base - altr->ul_dsp_size; 442 + altr->shm_base - altr->dsp_size; 443 443 cmm_info_obj->seg_info[ul_seg - 1].ul_total_seg_size = 444 - altr->ul_dsp_size + altr->ul_sm_size; 444 + altr->dsp_size + altr->ul_sm_size; 445 445 cmm_info_obj->seg_info[ul_seg - 1].gpp_base_pa = 446 446 altr->shm_base; 447 447 cmm_info_obj->seg_info[ul_seg - 1].ul_gpp_size = 448 448 altr->ul_sm_size; 449 449 cmm_info_obj->seg_info[ul_seg - 1].dsp_base_va = 450 450 altr->dsp_base; 451 - cmm_info_obj->seg_info[ul_seg - 1].ul_dsp_size = 452 - altr->ul_dsp_size; 451 + cmm_info_obj->seg_info[ul_seg - 1].dsp_size = 452 + altr->dsp_size; 453 453 cmm_info_obj->seg_info[ul_seg - 1].seg_base_va = 454 - altr->vm_base - altr->ul_dsp_size; 454 + altr->vm_base - altr->dsp_size; 455 455 cmm_info_obj->seg_info[ul_seg - 1].ul_in_use_cnt = 0; 456 456 457 457 list_for_each_entry(curr, &altr->in_use_list, link) { ··· 543 543 psma->dsp_phys_addr_offset = dsp_addr_offset; 544 544 psma->c_factor = c_factor; 545 545 psma->dsp_base = dw_dsp_base; 546 - psma->ul_dsp_size = ul_dsp_size; 546 + psma->dsp_size = ul_dsp_size; 547 547 if (psma->vm_base == 0) { 548 548 status = -EPERM; 549 549 goto func_end; ··· 968 968 /* Gpp Va = Va Base + offset */ 969 969 dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base - 970 970 allocator-> 971 - ul_dsp_size); 971 + dsp_size); 972 972 dw_addr_xlate = xlator_obj->virt_base + dw_offset; 973 973 /* Check if translated Va base is in range */ 974 974 if ((dw_addr_xlate < xlator_obj->virt_base) || ··· 982 982 dw_offset = 983 983 (u8 *) paddr - (u8 *) xlator_obj->virt_base; 984 984 dw_addr_xlate = 985 - allocator->shm_base - allocator->ul_dsp_size + 985 + allocator->shm_base - allocator->dsp_size + 986 986 dw_offset; 987 987 } 988 988 } else { ··· 992 992 if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) { 993 993 /* Got Gpp Pa now, convert to DSP Pa */ 994 994 dw_addr_xlate = 995 - GPPPA2DSPPA((allocator->shm_base - allocator->ul_dsp_size), 995 + GPPPA2DSPPA((allocator->shm_base - allocator->dsp_size), 996 996 dw_addr_xlate, 997 997 allocator->dsp_phys_addr_offset * 998 998 allocator->c_factor); 999 999 } else if (xtype == CMM_DSPPA2PA) { 1000 1000 /* Got DSP Pa, convert to GPP Pa */ 1001 1001 dw_addr_xlate = 1002 - DSPPA2GPPPA(allocator->shm_base - allocator->ul_dsp_size, 1002 + DSPPA2GPPPA(allocator->shm_base - allocator->dsp_size, 1003 1003 dw_addr_xlate, 1004 1004 allocator->dsp_phys_addr_offset * 1005 1005 allocator->c_factor);
+3 -3
drivers/staging/tidspbridge/pmgr/cod.c
··· 47 47 struct dbll_tar_obj *target; 48 48 struct dbll_library_obj *base_lib; 49 49 bool loaded; /* Base library loaded? */ 50 - u32 ul_entry; 50 + u32 entry; 51 51 struct dbll_fxns fxns; 52 52 struct dbll_attrs attrs; 53 53 char sz_zl_file[COD_MAXPATHLENGTH]; ··· 346 346 DBC_REQUIRE(cod_mgr_obj); 347 347 DBC_REQUIRE(entry_pt != NULL); 348 348 349 - *entry_pt = cod_mgr_obj->ul_entry; 349 + *entry_pt = cod_mgr_obj->entry; 350 350 351 351 return 0; 352 352 } ··· 516 516 flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB; 517 517 status = cod_mgr_obj->fxns.load_fxn(cod_mgr_obj->base_lib, flags, 518 518 &new_attrs, 519 - &cod_mgr_obj->ul_entry); 519 + &cod_mgr_obj->entry); 520 520 if (status) 521 521 cod_mgr_obj->fxns.close_fxn(cod_mgr_obj->base_lib); 522 522
+4 -4
drivers/staging/tidspbridge/rmgr/dbdcd.c
··· 1227 1227 1228 1228 /* Dynamic load data requirements */ 1229 1229 if (token) { 1230 - gen_obj->obj_data.node_obj.ul_data_mem_seg_mask = 1230 + gen_obj->obj_data.node_obj.data_mem_seg_mask = 1231 1231 atoi(token); 1232 1232 token = strsep(&psz_cur, seps); 1233 1233 } 1234 1234 1235 1235 /* Dynamic load code requirements */ 1236 1236 if (token) { 1237 - gen_obj->obj_data.node_obj.ul_code_mem_seg_mask = 1237 + gen_obj->obj_data.node_obj.code_mem_seg_mask = 1238 1238 atoi(token); 1239 1239 token = strsep(&psz_cur, seps); 1240 1240 } ··· 1288 1288 gen_obj->obj_data.proc_info.ul_internal_mem_size = atoi(token); 1289 1289 token = strsep(&psz_cur, seps); 1290 1290 1291 - gen_obj->obj_data.proc_info.ul_external_mem_size = atoi(token); 1291 + gen_obj->obj_data.proc_info.external_mem_size = atoi(token); 1292 1292 token = strsep(&psz_cur, seps); 1293 1293 1294 1294 gen_obj->obj_data.proc_info.processor_id = atoi(token); ··· 1312 1312 1313 1313 token = strsep(&psz_cur, seps); 1314 1314 gen_obj->obj_data.ext_proc_obj.ty_tlb[entry_id]. 1315 - ul_dsp_virt = atoi(token); 1315 + dsp_virt = atoi(token); 1316 1316 } 1317 1317 #endif 1318 1318
+9 -9
drivers/staging/tidspbridge/rmgr/disp.c
··· 65 65 struct chnl_object *chnl_to_dsp; /* Chnl for commands to RMS */ 66 66 struct chnl_object *chnl_from_dsp; /* Chnl for replies from RMS */ 67 67 u8 *pbuf; /* Buffer for commands, replies */ 68 - u32 ul_bufsize; /* pbuf size in bytes */ 69 - u32 ul_bufsize_rms; /* pbuf size in RMS words */ 68 + u32 bufsize; /* buf size in bytes */ 69 + u32 bufsize_rms; /* buf size in RMS words */ 70 70 u32 char_size; /* Size of DSP character */ 71 71 u32 word_size; /* Size of DSP word */ 72 72 u32 data_mau_size; /* Size of DSP Data MAU */ ··· 140 140 /* Open channels for communicating with the RMS */ 141 141 chnl_attr_obj.uio_reqs = CHNLIOREQS; 142 142 chnl_attr_obj.event_obj = NULL; 143 - ul_chnl_id = disp_attrs->ul_chnl_offset + CHNLTORMSOFFSET; 143 + ul_chnl_id = disp_attrs->chnl_offset + CHNLTORMSOFFSET; 144 144 status = (*intf_fxns->chnl_open) (&(disp_obj->chnl_to_dsp), 145 145 disp_obj->hchnl_mgr, 146 146 CHNL_MODETODSP, ul_chnl_id, 147 147 &chnl_attr_obj); 148 148 149 149 if (!status) { 150 - ul_chnl_id = disp_attrs->ul_chnl_offset + CHNLFROMRMSOFFSET; 150 + ul_chnl_id = disp_attrs->chnl_offset + CHNLFROMRMSOFFSET; 151 151 status = 152 152 (*intf_fxns->chnl_open) (&(disp_obj->chnl_from_dsp), 153 153 disp_obj->hchnl_mgr, ··· 156 156 } 157 157 if (!status) { 158 158 /* Allocate buffer for commands, replies */ 159 - disp_obj->ul_bufsize = disp_attrs->ul_chnl_buf_size; 160 - disp_obj->ul_bufsize_rms = RMS_COMMANDBUFSIZE; 161 - disp_obj->pbuf = kzalloc(disp_obj->ul_bufsize, GFP_KERNEL); 159 + disp_obj->bufsize = disp_attrs->chnl_buf_size; 160 + disp_obj->bufsize_rms = RMS_COMMANDBUFSIZE; 161 + disp_obj->pbuf = kzalloc(disp_obj->bufsize, GFP_KERNEL); 162 162 if (disp_obj->pbuf == NULL) 163 163 status = -ENOMEM; 164 164 } ··· 295 295 DBC_REQUIRE(pargs != NULL); 296 296 node_type = node_get_type(hnode); 297 297 node_msg_args = pargs->asa.node_msg_args; 298 - max = disp_obj->ul_bufsize_rms; /*Max # of RMS words that can be sent */ 298 + max = disp_obj->bufsize_rms; /*Max # of RMS words that can be sent */ 299 299 DBC_ASSERT(max == RMS_COMMANDBUFSIZE); 300 300 chars_in_rms_word = sizeof(rms_word) / disp_obj->char_size; 301 301 /* Number of RMS words needed to hold arg data */ ··· 404 404 more_task_args->stack_seg = task_arg_obj.stack_seg; 405 405 more_task_args->heap_addr = task_arg_obj.udsp_heap_addr; 406 406 more_task_args->heap_size = task_arg_obj.heap_size; 407 - more_task_args->misc = task_arg_obj.ul_dais_arg; 407 + more_task_args->misc = task_arg_obj.dais_arg; 408 408 more_task_args->num_input_streams = 409 409 task_arg_obj.num_inputs; 410 410 total +=
+26 -26
drivers/staging/tidspbridge/rmgr/nldr.c
··· 220 220 struct dsp_uuid uuid; /* Node's UUID */ 221 221 bool dynamic; /* Dynamically loaded node? */ 222 222 bool overlay; /* Overlay node? */ 223 - bool *pf_phase_split; /* Multiple phase libraries? */ 223 + bool *phase_split; /* Multiple phase libraries? */ 224 224 struct lib_node root; /* Library containing node phase */ 225 225 struct lib_node create_lib; /* Library with create phase lib */ 226 226 struct lib_node execute_lib; /* Library with execute phase lib */ ··· 326 326 if (nldr_node_obj == NULL) { 327 327 status = -ENOMEM; 328 328 } else { 329 - nldr_node_obj->pf_phase_split = pf_phase_split; 329 + nldr_node_obj->phase_split = pf_phase_split; 330 330 nldr_node_obj->pers_libs = 0; 331 331 nldr_node_obj->nldr_obj = nldr_obj; 332 332 nldr_node_obj->priv_ref = priv_ref; ··· 344 344 */ 345 345 /* Create phase */ 346 346 nldr_node_obj->seg_id[CREATEDATAFLAGBIT] = (u16) 347 - (node_props->ul_data_mem_seg_mask >> CREATEBIT) & 347 + (node_props->data_mem_seg_mask >> CREATEBIT) & 348 348 SEGMASK; 349 349 nldr_node_obj->code_data_flag_mask |= 350 - ((node_props->ul_data_mem_seg_mask >> 350 + ((node_props->data_mem_seg_mask >> 351 351 (CREATEBIT + FLAGBIT)) & 1) << CREATEDATAFLAGBIT; 352 352 nldr_node_obj->seg_id[CREATECODEFLAGBIT] = (u16) 353 - (node_props->ul_code_mem_seg_mask >> 353 + (node_props->code_mem_seg_mask >> 354 354 CREATEBIT) & SEGMASK; 355 355 nldr_node_obj->code_data_flag_mask |= 356 - ((node_props->ul_code_mem_seg_mask >> 356 + ((node_props->code_mem_seg_mask >> 357 357 (CREATEBIT + FLAGBIT)) & 1) << CREATECODEFLAGBIT; 358 358 /* Execute phase */ 359 359 nldr_node_obj->seg_id[EXECUTEDATAFLAGBIT] = (u16) 360 - (node_props->ul_data_mem_seg_mask >> 360 + (node_props->data_mem_seg_mask >> 361 361 EXECUTEBIT) & SEGMASK; 362 362 nldr_node_obj->code_data_flag_mask |= 363 - ((node_props->ul_data_mem_seg_mask >> 363 + ((node_props->data_mem_seg_mask >> 364 364 (EXECUTEBIT + FLAGBIT)) & 1) << 365 365 EXECUTEDATAFLAGBIT; 366 366 nldr_node_obj->seg_id[EXECUTECODEFLAGBIT] = (u16) 367 - (node_props->ul_code_mem_seg_mask >> 367 + (node_props->code_mem_seg_mask >> 368 368 EXECUTEBIT) & SEGMASK; 369 369 nldr_node_obj->code_data_flag_mask |= 370 - ((node_props->ul_code_mem_seg_mask >> 370 + ((node_props->code_mem_seg_mask >> 371 371 (EXECUTEBIT + FLAGBIT)) & 1) << 372 372 EXECUTECODEFLAGBIT; 373 373 /* Delete phase */ 374 374 nldr_node_obj->seg_id[DELETEDATAFLAGBIT] = (u16) 375 - (node_props->ul_data_mem_seg_mask >> DELETEBIT) & 375 + (node_props->data_mem_seg_mask >> DELETEBIT) & 376 376 SEGMASK; 377 377 nldr_node_obj->code_data_flag_mask |= 378 - ((node_props->ul_data_mem_seg_mask >> 378 + ((node_props->data_mem_seg_mask >> 379 379 (DELETEBIT + FLAGBIT)) & 1) << DELETEDATAFLAGBIT; 380 380 nldr_node_obj->seg_id[DELETECODEFLAGBIT] = (u16) 381 - (node_props->ul_code_mem_seg_mask >> 381 + (node_props->code_mem_seg_mask >> 382 382 DELETEBIT) & SEGMASK; 383 383 nldr_node_obj->code_data_flag_mask |= 384 - ((node_props->ul_code_mem_seg_mask >> 384 + ((node_props->code_mem_seg_mask >> 385 385 (DELETEBIT + FLAGBIT)) & 1) << DELETECODEFLAGBIT; 386 386 } else { 387 387 /* Non-dynamically loaded nodes are part of the ··· 430 430 DBC_REQUIRE(hdev_obj != NULL); 431 431 DBC_REQUIRE(pattrs != NULL); 432 432 DBC_REQUIRE(pattrs->ovly != NULL); 433 - DBC_REQUIRE(pattrs->pfn_write != NULL); 433 + DBC_REQUIRE(pattrs->write != NULL); 434 434 435 435 /* Allocate dynamic loader object */ 436 436 nldr_obj = kzalloc(sizeof(struct nldr_object), GFP_KERNEL); ··· 533 533 new_attrs.free = (dbll_free_fxn) remote_free; 534 534 new_attrs.sym_lookup = (dbll_sym_lookup) get_symbol_value; 535 535 new_attrs.sym_handle = nldr_obj; 536 - new_attrs.write = (dbll_write_fxn) pattrs->pfn_write; 536 + new_attrs.write = (dbll_write_fxn) pattrs->write; 537 537 nldr_obj->ovly_fxn = pattrs->ovly; 538 - nldr_obj->write_fxn = pattrs->pfn_write; 538 + nldr_obj->write_fxn = pattrs->write; 539 539 nldr_obj->ldr_attrs = new_attrs; 540 540 } 541 541 kfree(rmm_segs); ··· 678 678 679 679 nldr_obj = nldr_node_obj->nldr_obj; 680 680 /* Called from node_create(), node_delete(), or node_run(). */ 681 - if (nldr_node_obj->dynamic && *nldr_node_obj->pf_phase_split) { 681 + if (nldr_node_obj->dynamic && *nldr_node_obj->phase_split) { 682 682 switch (nldr_node_obj->phase) { 683 683 case NLDR_CREATE: 684 684 root = nldr_node_obj->create_lib; ··· 821 821 false, nldr_node_obj->lib_path, phase, 0); 822 822 823 823 if (!status) { 824 - if (*nldr_node_obj->pf_phase_split) { 824 + if (*nldr_node_obj->phase_split) { 825 825 switch (phase) { 826 826 case NLDR_CREATE: 827 827 nldr_node_obj->create_lib = ··· 868 868 869 869 if (nldr_node_obj != NULL) { 870 870 if (nldr_node_obj->dynamic) { 871 - if (*nldr_node_obj->pf_phase_split) { 871 + if (*nldr_node_obj->phase_split) { 872 872 switch (phase) { 873 873 case NLDR_CREATE: 874 874 root_lib = &nldr_node_obj->create_lib; ··· 1264 1264 dcd_get_library_name(nldr_node_obj->nldr_obj-> 1265 1265 hdcd_mgr, &uuid, psz_file_name, 1266 1266 &dw_buf_size, phase, 1267 - nldr_node_obj->pf_phase_split); 1267 + nldr_node_obj->phase_split); 1268 1268 } else { 1269 1269 /* Dependent libraries are registered with a phase */ 1270 1270 status = ··· 1314 1314 } 1315 1315 DBC_ASSERT(nd_libs >= np_libs); 1316 1316 if (!status) { 1317 - if (!(*nldr_node_obj->pf_phase_split)) 1317 + if (!(*nldr_node_obj->phase_split)) 1318 1318 np_libs = 0; 1319 1319 1320 1320 /* nd_libs = #of dependent libraries */ ··· 1359 1359 * is, then record it. If root library IS persistent, 1360 1360 * the deplib is already included */ 1361 1361 if (!root_prstnt && persistent_dep_libs[i] && 1362 - *nldr_node_obj->pf_phase_split) { 1362 + *nldr_node_obj->phase_split) { 1363 1363 if ((nldr_node_obj->pers_libs) >= MAXLIBS) { 1364 1364 status = -EILSEQ; 1365 1365 break; ··· 1385 1385 if (!status) { 1386 1386 if ((status != 0) && 1387 1387 !root_prstnt && persistent_dep_libs[i] && 1388 - *nldr_node_obj->pf_phase_split) { 1388 + *nldr_node_obj->phase_split) { 1389 1389 (nldr_node_obj->pers_libs)++; 1390 1390 } else { 1391 1391 if (!persistent_dep_libs[i] || 1392 - !(*nldr_node_obj->pf_phase_split)) { 1392 + !(*nldr_node_obj->phase_split)) { 1393 1393 nd_libs_loaded++; 1394 1394 } 1395 1395 } ··· 1903 1903 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__, (u32) nldr_node, 1904 1904 sym_addr, offset_range, (u32) offset_output, sym_name); 1905 1905 1906 - if (nldr_node->dynamic && *nldr_node->pf_phase_split) { 1906 + if (nldr_node->dynamic && *nldr_node->phase_split) { 1907 1907 switch (nldr_node->phase) { 1908 1908 case NLDR_CREATE: 1909 1909 root = nldr_node->create_lib;
+20 -20
drivers/staging/tidspbridge/rmgr/node.c
··· 142 142 DECLARE_BITMAP(zc_chnl_map, CHNL_MAXCHANNELS); 143 143 struct ntfy_object *ntfy_obj; /* Manages registered notifications */ 144 144 struct mutex node_mgr_lock; /* For critical sections */ 145 - u32 ul_fxn_addrs[NUMRMSFXNS]; /* RMS function addresses */ 145 + u32 fxn_addrs[NUMRMSFXNS]; /* RMS function addresses */ 146 146 struct msg_mgr *msg_mgr_obj; 147 147 148 148 /* Processor properties needed by Node Dispatcher */ 149 149 u32 ul_num_chnls; /* Total number of channels */ 150 - u32 ul_chnl_offset; /* Offset of chnl ids rsvd for RMS */ 151 - u32 ul_chnl_buf_size; /* Buffer size for data to RMS */ 150 + u32 chnl_offset; /* Offset of chnl ids rsvd for RMS */ 151 + u32 chnl_buf_size; /* Buffer size for data to RMS */ 152 152 int proc_family; /* eg, 5000 */ 153 153 int proc_type; /* eg, 5510 */ 154 154 u32 udsp_word_size; /* Size of DSP word on host bytes */ ··· 367 367 } 368 368 369 369 /* Assuming that 0 is not a valid function address */ 370 - if (hnode_mgr->ul_fxn_addrs[0] == 0) { 370 + if (hnode_mgr->fxn_addrs[0] == 0) { 371 371 /* No RMS on target - we currently can't handle this */ 372 372 pr_err("%s: Failed, no RMS in base image\n", __func__); 373 373 status = -EPERM; ··· 813 813 status = 814 814 disp_node_change_priority(hnode_mgr->disp_obj, 815 815 hnode, 816 - hnode_mgr->ul_fxn_addrs 816 + hnode_mgr->fxn_addrs 817 817 [RMSCHANGENODEPRIORITY], 818 818 hnode->node_env, prio); 819 819 } ··· 1216 1216 hnode->dcd_props.obj_data.node_obj. 1217 1217 pstr_i_alg_name, 1218 1218 &hnode->create_args.asa. 1219 - task_arg_obj.ul_dais_arg); 1219 + task_arg_obj.dais_arg); 1220 1220 } 1221 1221 } 1222 1222 } 1223 1223 if (!status) { 1224 1224 if (node_type != NODE_DEVICE) { 1225 1225 status = disp_node_create(hnode_mgr->disp_obj, hnode, 1226 - hnode_mgr->ul_fxn_addrs 1226 + hnode_mgr->fxn_addrs 1227 1227 [RMSCREATENODE], 1228 1228 ul_create_fxn, 1229 1229 &(hnode->create_args), ··· 1324 1324 goto out_err; 1325 1325 1326 1326 /* Create NODE Dispatcher */ 1327 - disp_attr_obj.ul_chnl_offset = node_mgr_obj->ul_chnl_offset; 1328 - disp_attr_obj.ul_chnl_buf_size = node_mgr_obj->ul_chnl_buf_size; 1327 + disp_attr_obj.chnl_offset = node_mgr_obj->chnl_offset; 1328 + disp_attr_obj.chnl_buf_size = node_mgr_obj->chnl_buf_size; 1329 1329 disp_attr_obj.proc_family = node_mgr_obj->proc_family; 1330 1330 disp_attr_obj.proc_type = node_mgr_obj->proc_type; 1331 1331 ··· 1344 1344 mutex_init(&node_mgr_obj->node_mgr_lock); 1345 1345 1346 1346 /* Block out reserved channels */ 1347 - for (i = 0; i < node_mgr_obj->ul_chnl_offset; i++) 1347 + for (i = 0; i < node_mgr_obj->chnl_offset; i++) 1348 1348 set_bit(i, node_mgr_obj->chnl_map); 1349 1349 1350 1350 /* Block out channels reserved for RMS */ 1351 - set_bit(node_mgr_obj->ul_chnl_offset, node_mgr_obj->chnl_map); 1352 - set_bit(node_mgr_obj->ul_chnl_offset + 1, node_mgr_obj->chnl_map); 1351 + set_bit(node_mgr_obj->chnl_offset, node_mgr_obj->chnl_map); 1352 + set_bit(node_mgr_obj->chnl_offset + 1, node_mgr_obj->chnl_map); 1353 1353 1354 1354 /* NO RM Server on the IVA */ 1355 1355 if (dev_type != IVA_UNIT) { ··· 1363 1363 node_mgr_obj->nldr_fxns = nldr_fxns; /* Dyn loader funcs */ 1364 1364 1365 1365 nldr_attrs_obj.ovly = ovly; 1366 - nldr_attrs_obj.pfn_write = mem_write; 1366 + nldr_attrs_obj.write = mem_write; 1367 1367 nldr_attrs_obj.us_dsp_word_size = node_mgr_obj->udsp_word_size; 1368 1368 nldr_attrs_obj.us_dsp_mau_size = node_mgr_obj->udsp_mau_size; 1369 1369 node_mgr_obj->loader_init = node_mgr_obj->nldr_fxns.init(); ··· 1489 1489 status = 1490 1490 disp_node_delete(disp_obj, pnode, 1491 1491 hnode_mgr-> 1492 - ul_fxn_addrs 1492 + fxn_addrs 1493 1493 [RMSDELETENODE], 1494 1494 ul_delete_fxn, 1495 1495 pnode->node_env); ··· 2012 2012 } 2013 2013 2014 2014 status = disp_node_change_priority(hnode_mgr->disp_obj, hnode, 2015 - hnode_mgr->ul_fxn_addrs[RMSCHANGENODEPRIORITY], 2015 + hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY], 2016 2016 hnode->node_env, NODE_SUSPENDEDPRI); 2017 2017 2018 2018 /* Update state */ ··· 2274 2274 } 2275 2275 } 2276 2276 if (!status) { 2277 - ul_fxn_addr = hnode_mgr->ul_fxn_addrs[RMSEXECUTENODE]; 2277 + ul_fxn_addr = hnode_mgr->fxn_addrs[RMSEXECUTENODE]; 2278 2278 status = 2279 2279 disp_node_run(hnode_mgr->disp_obj, hnode, 2280 2280 ul_fxn_addr, ul_execute_fxn, 2281 2281 hnode->node_env); 2282 2282 } 2283 2283 } else if (state == NODE_PAUSED) { 2284 - ul_fxn_addr = hnode_mgr->ul_fxn_addrs[RMSCHANGENODEPRIORITY]; 2284 + ul_fxn_addr = hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY]; 2285 2285 status = disp_node_change_priority(hnode_mgr->disp_obj, hnode, 2286 2286 ul_fxn_addr, hnode->node_env, 2287 2287 NODE_GET_PRIORITY(hnode)); ··· 2902 2902 host_res = pbridge_context->resources; 2903 2903 if (!host_res) 2904 2904 return -EPERM; 2905 - hnode_mgr->ul_chnl_offset = host_res->chnl_offset; 2906 - hnode_mgr->ul_chnl_buf_size = host_res->chnl_buf_size; 2905 + hnode_mgr->chnl_offset = host_res->chnl_offset; 2906 + hnode_mgr->chnl_buf_size = host_res->chnl_buf_size; 2907 2907 hnode_mgr->ul_num_chnls = host_res->num_chnls; 2908 2908 2909 2909 /* ··· 3024 3024 3025 3025 for (i = 0; i < NUMRMSFXNS; i++) { 3026 3026 status = dev_get_symbol(dev_obj, psz_fxns[i], 3027 - &(hnode_mgr->ul_fxn_addrs[i])); 3027 + &(hnode_mgr->fxn_addrs[i])); 3028 3028 if (status) { 3029 3029 if (status == -ESPIPE) { 3030 3030 /*