Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

staging: tidspbridge: set8 remove hungarian from structs

hungarian notation will be removed from the elements inside
structures, the next varibles will be renamed:

Original: Replacement:
hbridge_context bridge_context
hchnl_mgr chnl_mgr
hcmm_mgr cmm_mgr
hdcd_mgr dcd_mgr
hdeh_mgr deh_mgr
hdev_obj dev_obj
hdrv_obj drv_obj
hmgr_obj mgr_obj
hmsg_mgr msg_mgr
hnode_mgr node_mgr
psz_last_coff last_coff
ul_resource resource
ul_seg_id seg_id
ul_size size
ul_sm_size sm_size
ul_total_free_size total_free_size
ul_total_in_use_cnt total_in_use_cnt
ul_total_seg_size total_seg_size
ul_trace_buffer_begin trace_buffer_begin
ul_trace_buffer_current trace_buffer_current
ul_trace_buffer_end trace_buffer_end
ul_unit unit
ul_virt_size virt_size
us_dsp_mau_size dsp_mau_size
us_dsp_word_size dsp_word_size

Signed-off-by: Rene Sapiens <rene.sapiens@ti.com>
Signed-off-by: Armando Uribe <x0095078@ti.com>
Signed-off-by: Omar Ramirez Luna <omar.ramirez@ti.com>

authored by

Rene Sapiens and committed by
Omar Ramirez Luna
085467b8 6c66e948

+405 -405
+1 -1
drivers/staging/tidspbridge/core/_deh.h
··· 25 25 26 26 /* DEH Manager: only one created per board: */ 27 27 struct deh_mgr { 28 - struct bridge_dev_context *hbridge_context; /* Bridge context. */ 28 + struct bridge_dev_context *bridge_context; /* Bridge context. */ 29 29 struct ntfy_object *ntfy_obj; /* NTFY object */ 30 30 31 31 /* MMU Fault DPC */
+1 -1
drivers/staging/tidspbridge/core/_msg_sm.h
··· 108 108 */ 109 109 struct msg_queue { 110 110 struct list_head list_elem; 111 - struct msg_mgr *hmsg_mgr; 111 + struct msg_mgr *msg_mgr; 112 112 u32 max_msgs; /* Node message depth */ 113 113 u32 msgq_id; /* Node environment pointer */ 114 114 struct list_head msg_free_list; /* Free MsgFrames ready to be filled */
+1 -1
drivers/staging/tidspbridge/core/_tiomap.h
··· 319 319 320 320 /* This Bridge driver's device context: */ 321 321 struct bridge_dev_context { 322 - struct dev_object *hdev_obj; /* Handle to Bridge device object. */ 322 + struct dev_object *dev_obj; /* Handle to Bridge device object. */ 323 323 u32 dsp_base_addr; /* Arm's API to DSP virt base addr */ 324 324 /* 325 325 * DSP External memory prog address as seen virtually by the OS on
+3 -3
drivers/staging/tidspbridge/core/chnl_sm.c
··· 388 388 chnl_mgr_obj->open_channels = 0; 389 389 chnl_mgr_obj->output_mask = 0; 390 390 chnl_mgr_obj->last_output = 0; 391 - chnl_mgr_obj->hdev_obj = hdev_obj; 391 + chnl_mgr_obj->dev_obj = hdev_obj; 392 392 spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock); 393 393 } else { 394 394 status = -ENOMEM; ··· 434 434 kfree(chnl_mgr_obj->ap_channel); 435 435 436 436 /* Set hchnl_mgr to NULL in device object. */ 437 - dev_set_chnl_mgr(chnl_mgr_obj->hdev_obj, NULL); 437 + dev_set_chnl_mgr(chnl_mgr_obj->dev_obj, NULL); 438 438 /* Free this Chnl Mgr object: */ 439 439 kfree(hchnl_mgr); 440 440 } else { ··· 508 508 if (channel_info != NULL) { 509 509 if (pchnl) { 510 510 /* Return the requested information: */ 511 - channel_info->hchnl_mgr = pchnl->chnl_mgr_obj; 511 + channel_info->chnl_mgr = pchnl->chnl_mgr_obj; 512 512 channel_info->event_obj = pchnl->user_event; 513 513 channel_info->cnhl_id = pchnl->chnl_id; 514 514 channel_info->mode = pchnl->chnl_mode;
+69 -69
drivers/staging/tidspbridge/core/io_sm.c
··· 89 89 struct io_mgr { 90 90 /* These four fields must be the first fields in a io_mgr_ struct */ 91 91 /* Bridge device context */ 92 - struct bridge_dev_context *hbridge_context; 92 + struct bridge_dev_context *bridge_context; 93 93 /* Function interface to Bridge driver */ 94 94 struct bridge_drv_interface *intf_fxns; 95 - struct dev_object *hdev_obj; /* Device this board represents */ 95 + struct dev_object *dev_obj; /* Device this board represents */ 96 96 97 97 /* These fields initialized in bridge_io_create() */ 98 - struct chnl_mgr *hchnl_mgr; 98 + struct chnl_mgr *chnl_mgr; 99 99 struct shm *shared_mem; /* Shared Memory control */ 100 100 u8 *input; /* Address of input channel */ 101 101 u8 *output; /* Address of output channel */ 102 - struct msg_mgr *hmsg_mgr; /* Message manager */ 102 + struct msg_mgr *msg_mgr; /* Message manager */ 103 103 /* Msg control for from DSP messages */ 104 104 struct msg_ctrl *msg_input_ctrl; 105 105 /* Msg control for to DSP messages */ ··· 112 112 u16 intr_val; /* Interrupt value */ 113 113 /* Private extnd proc info; mmu setup */ 114 114 struct mgr_processorextinfo ext_proc_info; 115 - struct cmm_object *hcmm_mgr; /* Shared Mem Mngr */ 115 + struct cmm_object *cmm_mgr; /* Shared Mem Mngr */ 116 116 struct work_struct io_workq; /* workqueue */ 117 117 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG) 118 - u32 ul_trace_buffer_begin; /* Trace message start address */ 119 - u32 ul_trace_buffer_end; /* Trace message end address */ 120 - u32 ul_trace_buffer_current; /* Trace message current address */ 118 + u32 trace_buffer_begin; /* Trace message start address */ 119 + u32 trace_buffer_end; /* Trace message end address */ 120 + u32 trace_buffer_current; /* Trace message current address */ 121 121 u32 gpp_read_pointer; /* GPP Read pointer to Trace buffer */ 122 122 u8 *pmsg; 123 123 u32 gpp_va; ··· 201 201 return -ENOMEM; 202 202 203 203 /* Initialize chnl_mgr object */ 204 - pio_mgr->hchnl_mgr = hchnl_mgr; 204 + pio_mgr->chnl_mgr = hchnl_mgr; 205 205 pio_mgr->word_size = mgr_attrts->word_size; 206 206 207 207 if (dev_type == DSP_UNIT) { ··· 220 220 } 221 221 } 222 222 223 - pio_mgr->hbridge_context = hbridge_context; 223 + pio_mgr->bridge_context = hbridge_context; 224 224 pio_mgr->shared_irq = mgr_attrts->irq_shared; 225 225 if (dsp_wdt_init()) { 226 226 bridge_io_destroy(pio_mgr); ··· 306 306 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB 307 307 }; 308 308 309 - status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context); 309 + status = dev_get_bridge_context(hio_mgr->dev_obj, &pbridge_context); 310 310 if (!pbridge_context) { 311 311 status = -EFAULT; 312 312 goto func_end; ··· 317 317 status = -EFAULT; 318 318 goto func_end; 319 319 } 320 - status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man); 320 + status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man); 321 321 if (!cod_man) { 322 322 status = -EFAULT; 323 323 goto func_end; 324 324 } 325 - hchnl_mgr = hio_mgr->hchnl_mgr; 325 + hchnl_mgr = hio_mgr->chnl_mgr; 326 326 /* The message manager is destroyed when the board is stopped. */ 327 - dev_get_msg_mgr(hio_mgr->hdev_obj, &hio_mgr->hmsg_mgr); 328 - hmsg_mgr = hio_mgr->hmsg_mgr; 327 + dev_get_msg_mgr(hio_mgr->dev_obj, &hio_mgr->msg_mgr); 328 + hmsg_mgr = hio_mgr->msg_mgr; 329 329 if (!hchnl_mgr || !hmsg_mgr) { 330 330 status = -EFAULT; 331 331 goto func_end; ··· 483 483 1)) == 0)) { 484 484 status = 485 485 hio_mgr->intf_fxns-> 486 - brd_mem_map(hio_mgr->hbridge_context, 486 + brd_mem_map(hio_mgr->bridge_context, 487 487 pa_curr, va_curr, 488 488 page_size[i], map_attrs, 489 489 NULL); ··· 535 535 ae_proc[ndx].gpp_va = gpp_va_curr; 536 536 ae_proc[ndx].dsp_va = 537 537 va_curr / hio_mgr->word_size; 538 - ae_proc[ndx].ul_size = page_size[i]; 538 + ae_proc[ndx].size = page_size[i]; 539 539 ae_proc[ndx].endianism = HW_LITTLE_ENDIAN; 540 540 ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT; 541 541 ae_proc[ndx].mixed_mode = HW_MMU_CPUES; ··· 549 549 } else { 550 550 status = 551 551 hio_mgr->intf_fxns-> 552 - brd_mem_map(hio_mgr->hbridge_context, 552 + brd_mem_map(hio_mgr->bridge_context, 553 553 pa_curr, va_curr, 554 554 page_size[i], map_attrs, 555 555 NULL); ··· 609 609 gpp_phys; 610 610 ae_proc[ndx].gpp_va = 0; 611 611 /* 1 MB */ 612 - ae_proc[ndx].ul_size = 0x100000; 612 + ae_proc[ndx].size = 0x100000; 613 613 dev_dbg(bridge, "shm MMU entry PA %x " 614 614 "DSP_VA 0x%x\n", ae_proc[ndx].gpp_pa, 615 615 ae_proc[ndx].dsp_va); 616 616 ndx++; 617 617 } else { 618 618 status = hio_mgr->intf_fxns->brd_mem_map 619 - (hio_mgr->hbridge_context, 619 + (hio_mgr->bridge_context, 620 620 hio_mgr->ext_proc_info.ty_tlb[i]. 621 621 gpp_phys, 622 622 hio_mgr->ext_proc_info.ty_tlb[i]. ··· 638 638 i = 0; 639 639 while (l4_peripheral_table[i].phys_addr) { 640 640 status = hio_mgr->intf_fxns->brd_mem_map 641 - (hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr, 641 + (hio_mgr->bridge_context, l4_peripheral_table[i].phys_addr, 642 642 l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB, 643 643 map_attrs, NULL); 644 644 if (status) ··· 650 650 ae_proc[i].dsp_va = 0; 651 651 ae_proc[i].gpp_pa = 0; 652 652 ae_proc[i].gpp_va = 0; 653 - ae_proc[i].ul_size = 0; 653 + ae_proc[i].size = 0; 654 654 } 655 655 /* 656 656 * Set the shm physical address entry (grayed out in CDB file) ··· 683 683 */ 684 684 685 685 status = 686 - hio_mgr->intf_fxns->dev_cntrl(hio_mgr->hbridge_context, 686 + hio_mgr->intf_fxns->dev_cntrl(hio_mgr->bridge_context, 687 687 BRDIOCTL_SETMMUCONFIG, 688 688 ae_proc); 689 689 if (status) ··· 734 734 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG) 735 735 /* Get the start address of trace buffer */ 736 736 status = cod_get_sym_value(cod_man, SYS_PUTCBEG, 737 - &hio_mgr->ul_trace_buffer_begin); 737 + &hio_mgr->trace_buffer_begin); 738 738 if (status) { 739 739 status = -EFAULT; 740 740 goto func_end; 741 741 } 742 742 743 - hio_mgr->gpp_read_pointer = hio_mgr->ul_trace_buffer_begin = 743 + hio_mgr->gpp_read_pointer = hio_mgr->trace_buffer_begin = 744 744 (ul_gpp_va + ul_seg1_size + ul_pad_size) + 745 - (hio_mgr->ul_trace_buffer_begin - ul_dsp_va); 745 + (hio_mgr->trace_buffer_begin - ul_dsp_va); 746 746 /* Get the end address of trace buffer */ 747 747 status = cod_get_sym_value(cod_man, SYS_PUTCEND, 748 - &hio_mgr->ul_trace_buffer_end); 748 + &hio_mgr->trace_buffer_end); 749 749 if (status) { 750 750 status = -EFAULT; 751 751 goto func_end; 752 752 } 753 - hio_mgr->ul_trace_buffer_end = 753 + hio_mgr->trace_buffer_end = 754 754 (ul_gpp_va + ul_seg1_size + ul_pad_size) + 755 - (hio_mgr->ul_trace_buffer_end - ul_dsp_va); 755 + (hio_mgr->trace_buffer_end - ul_dsp_va); 756 756 /* Get the current address of DSP write pointer */ 757 757 status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT, 758 - &hio_mgr->ul_trace_buffer_current); 758 + &hio_mgr->trace_buffer_current); 759 759 if (status) { 760 760 status = -EFAULT; 761 761 goto func_end; 762 762 } 763 - hio_mgr->ul_trace_buffer_current = 763 + hio_mgr->trace_buffer_current = 764 764 (ul_gpp_va + ul_seg1_size + ul_pad_size) + 765 - (hio_mgr->ul_trace_buffer_current - ul_dsp_va); 765 + (hio_mgr->trace_buffer_current - ul_dsp_va); 766 766 /* Calculate the size of trace buffer */ 767 767 kfree(hio_mgr->pmsg); 768 - hio_mgr->pmsg = kmalloc(((hio_mgr->ul_trace_buffer_end - 769 - hio_mgr->ul_trace_buffer_begin) * 768 + hio_mgr->pmsg = kmalloc(((hio_mgr->trace_buffer_end - 769 + hio_mgr->trace_buffer_begin) * 770 770 hio_mgr->word_size) + 2, GFP_KERNEL); 771 771 if (!hio_mgr->pmsg) 772 772 status = -ENOMEM; ··· 807 807 /* Inform DSP that we have no more buffers on this channel */ 808 808 set_chnl_free(sm, chnl); 809 809 810 - sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS); 810 + sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS); 811 811 func_end: 812 812 return; 813 813 } ··· 829 829 if (parg[0] == MBX_PM_HIBERNATE_EN) { 830 830 dev_dbg(bridge, "PM: Hibernate command\n"); 831 831 status = pio_mgr->intf_fxns-> 832 - dev_cntrl(pio_mgr->hbridge_context, 832 + dev_cntrl(pio_mgr->bridge_context, 833 833 BRDIOCTL_PWR_HIBERNATE, parg); 834 834 if (status) 835 835 pr_err("%s: hibernate cmd failed 0x%x\n", ··· 838 838 parg[1] = pio_mgr->shared_mem->opp_request.rqst_opp_pt; 839 839 dev_dbg(bridge, "PM: Requested OPP = 0x%x\n", parg[1]); 840 840 status = pio_mgr->intf_fxns-> 841 - dev_cntrl(pio_mgr->hbridge_context, 841 + dev_cntrl(pio_mgr->bridge_context, 842 842 BRDIOCTL_CONSTRAINT_REQUEST, parg); 843 843 if (status) 844 844 dev_dbg(bridge, "PM: Failed to set constraint " ··· 847 847 dev_dbg(bridge, "PM: clk control value of msg = 0x%x\n", 848 848 parg[0]); 849 849 status = pio_mgr->intf_fxns-> 850 - dev_cntrl(pio_mgr->hbridge_context, 850 + dev_cntrl(pio_mgr->bridge_context, 851 851 BRDIOCTL_CLK_CTRL, parg); 852 852 if (status) 853 853 dev_dbg(bridge, "PM: Failed to ctrl the DSP clk" ··· 872 872 873 873 if (!pio_mgr) 874 874 goto func_end; 875 - chnl_mgr_obj = pio_mgr->hchnl_mgr; 876 - dev_get_msg_mgr(pio_mgr->hdev_obj, &msg_mgr_obj); 877 - dev_get_deh_mgr(pio_mgr->hdev_obj, &hdeh_mgr); 875 + chnl_mgr_obj = pio_mgr->chnl_mgr; 876 + dev_get_msg_mgr(pio_mgr->dev_obj, &msg_mgr_obj); 877 + dev_get_deh_mgr(pio_mgr->dev_obj, &hdeh_mgr); 878 878 if (!chnl_mgr_obj) 879 879 goto func_end; 880 880 ··· 970 970 971 971 if (!pchnl || !mbx_val) 972 972 goto func_end; 973 - chnl_mgr_obj = io_manager->hchnl_mgr; 973 + chnl_mgr_obj = io_manager->chnl_mgr; 974 974 sm = io_manager->shared_mem; 975 975 if (io_mode == IO_INPUT) { 976 976 /* ··· 1076 1076 bool notify_client = false; 1077 1077 1078 1078 sm = pio_mgr->shared_mem; 1079 - chnl_mgr_obj = pio_mgr->hchnl_mgr; 1079 + chnl_mgr_obj = pio_mgr->chnl_mgr; 1080 1080 1081 1081 /* Attempt to perform input */ 1082 1082 if (!sm->input_full) ··· 1164 1164 if (clear_chnl) { 1165 1165 /* Indicate to the DSP we have read the input */ 1166 1166 sm->input_full = 0; 1167 - sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS); 1167 + sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS); 1168 1168 } 1169 1169 if (notify_client) { 1170 1170 /* Notify client with IO completion record */ ··· 1202 1202 /* Read the next message */ 1203 1203 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.cmd); 1204 1204 msg.msg.cmd = 1205 - read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr); 1205 + read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr); 1206 1206 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg1); 1207 1207 msg.msg.arg1 = 1208 - read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr); 1208 + read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr); 1209 1209 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg2); 1210 1210 msg.msg.arg2 = 1211 - read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr); 1211 + read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr); 1212 1212 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msgq_id); 1213 1213 msg.msgq_id = 1214 - read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr); 1214 + read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr); 1215 1215 msg_input += sizeof(struct msg_dspmsg); 1216 1216 1217 1217 /* Determine which queue to put the message in */ ··· 1269 1269 /* Tell the DSP we've read the messages */ 1270 1270 msg_ctr_obj->buf_empty = true; 1271 1271 msg_ctr_obj->post_swi = true; 1272 - sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS); 1272 + sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS); 1273 1273 } 1274 1274 } 1275 1275 ··· 1323 1323 struct chnl_irp *chnl_packet_obj; 1324 1324 u32 dw_dsp_f_mask; 1325 1325 1326 - chnl_mgr_obj = pio_mgr->hchnl_mgr; 1326 + chnl_mgr_obj = pio_mgr->chnl_mgr; 1327 1327 sm = pio_mgr->shared_mem; 1328 1328 /* Attempt to perform output */ 1329 1329 if (sm->output_full) ··· 1381 1381 #endif 1382 1382 sm->output_full = 1; 1383 1383 /* Indicate to the DSP we have written the output */ 1384 - sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS); 1384 + sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS); 1385 1385 /* Notify client with IO completion record (keep EOS) */ 1386 1386 chnl_packet_obj->status &= CHNL_IOCSTATEOS; 1387 1387 notify_chnl_complete(pchnl, chnl_packet_obj); ··· 1428 1428 1429 1429 val = (pmsg->msg_data).msgq_id; 1430 1430 addr = (u32) &msg_output->msgq_id; 1431 - write_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr, val); 1431 + write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val); 1432 1432 1433 1433 val = (pmsg->msg_data).msg.cmd; 1434 1434 addr = (u32) &msg_output->msg.cmd; 1435 - write_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr, val); 1435 + write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val); 1436 1436 1437 1437 val = (pmsg->msg_data).msg.arg1; 1438 1438 addr = (u32) &msg_output->msg.arg1; 1439 - write_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr, val); 1439 + write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val); 1440 1440 1441 1441 val = (pmsg->msg_data).msg.arg2; 1442 1442 addr = (u32) &msg_output->msg.arg2; 1443 - write_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr, val); 1443 + write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val); 1444 1444 1445 1445 msg_output++; 1446 1446 list_add_tail(&pmsg->list_elem, &hmsg_mgr->msg_free_list); ··· 1462 1462 /* Set the post SWI flag */ 1463 1463 msg_ctr_obj->post_swi = true; 1464 1464 /* Tell the DSP we have written the output. */ 1465 - sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS); 1465 + sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS); 1466 1466 } 1467 1467 } 1468 1468 ··· 1518 1518 } 1519 1519 /* Register with CMM */ 1520 1520 if (!status) { 1521 - status = dev_get_cmm_mgr(hio_mgr->hdev_obj, &hio_mgr->hcmm_mgr); 1521 + status = dev_get_cmm_mgr(hio_mgr->dev_obj, &hio_mgr->cmm_mgr); 1522 1522 if (!status) { 1523 - status = cmm_un_register_gppsm_seg(hio_mgr->hcmm_mgr, 1523 + status = cmm_un_register_gppsm_seg(hio_mgr->cmm_mgr, 1524 1524 CMM_ALLSEGMENTS); 1525 1525 } 1526 1526 } ··· 1575 1575 ul_dsp_virt; 1576 1576 /* Register SM Segment 0. */ 1577 1577 status = 1578 - cmm_register_gppsm_seg(hio_mgr->hcmm_mgr, dw_gpp_base_pa, 1578 + cmm_register_gppsm_seg(hio_mgr->cmm_mgr, dw_gpp_base_pa, 1579 1579 ul_rsrvd_size, dw_offset, 1580 1580 (dw_gpp_base_pa > 1581 1581 ul_dsp_virt) ? CMM_ADDTODSPPA : ··· 1691 1691 while (true) { 1692 1692 /* Get the DSP current pointer */ 1693 1693 ul_gpp_cur_pointer = 1694 - *(u32 *) (hio_mgr->ul_trace_buffer_current); 1694 + *(u32 *) (hio_mgr->trace_buffer_current); 1695 1695 ul_gpp_cur_pointer = 1696 1696 hio_mgr->gpp_va + (ul_gpp_cur_pointer - 1697 1697 hio_mgr->dsp_va); ··· 1719 1719 /* Handle trace buffer wraparound */ 1720 1720 memcpy(hio_mgr->pmsg, 1721 1721 (char *)hio_mgr->gpp_read_pointer, 1722 - hio_mgr->ul_trace_buffer_end - 1722 + hio_mgr->trace_buffer_end - 1723 1723 hio_mgr->gpp_read_pointer); 1724 1724 ul_new_message_length = 1725 - ul_gpp_cur_pointer - hio_mgr->ul_trace_buffer_begin; 1726 - memcpy(&hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end - 1725 + ul_gpp_cur_pointer - hio_mgr->trace_buffer_begin; 1726 + memcpy(&hio_mgr->pmsg[hio_mgr->trace_buffer_end - 1727 1727 hio_mgr->gpp_read_pointer], 1728 - (char *)hio_mgr->ul_trace_buffer_begin, 1728 + (char *)hio_mgr->trace_buffer_begin, 1729 1729 ul_new_message_length); 1730 - hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end - 1730 + hio_mgr->pmsg[hio_mgr->trace_buffer_end - 1731 1731 hio_mgr->gpp_read_pointer + 1732 1732 ul_new_message_length] = '\0'; 1733 1733 /* ··· 1735 1735 * pointer. 1736 1736 */ 1737 1737 hio_mgr->gpp_read_pointer = 1738 - hio_mgr->ul_trace_buffer_begin + 1738 + hio_mgr->trace_buffer_begin + 1739 1739 ul_new_message_length; 1740 1740 /* Print the trace messages */ 1741 1741 pr_info("DSPTrace: %s\n", hio_mgr->pmsg); ··· 1776 1776 struct bridge_dev_context *pbridge_context = hbridge_context; 1777 1777 struct bridge_drv_interface *intf_fxns; 1778 1778 struct dev_object *dev_obj = (struct dev_object *) 1779 - pbridge_context->hdev_obj; 1779 + pbridge_context->dev_obj; 1780 1780 1781 1781 status = dev_get_cod_mgr(dev_obj, &cod_mgr); 1782 1782 ··· 1949 1949 "ILC", "RILC", "IER", "CSR"}; 1950 1950 const char *exec_ctxt[] = {"Task", "SWI", "HWI", "Unknown"}; 1951 1951 struct bridge_drv_interface *intf_fxns; 1952 - struct dev_object *dev_object = bridge_context->hdev_obj; 1952 + struct dev_object *dev_object = bridge_context->dev_obj; 1953 1953 1954 1954 status = dev_get_cod_mgr(dev_object, &code_mgr); 1955 1955 if (!code_mgr) { ··· 2155 2155 struct cod_manager *code_mgr; 2156 2156 struct bridge_drv_interface *intf_fxns; 2157 2157 struct bridge_dev_context *bridge_ctxt = bridge_context; 2158 - struct dev_object *dev_object = bridge_ctxt->hdev_obj; 2158 + struct dev_object *dev_object = bridge_ctxt->dev_obj; 2159 2159 struct modules_header modules_hdr; 2160 2160 struct dll_module *module_struct = NULL; 2161 2161 u32 module_dsp_addr;
+8 -8
drivers/staging/tidspbridge/core/msg_sm.c
··· 121 121 return -ENOMEM; 122 122 123 123 msg_q->max_msgs = max_msgs; 124 - msg_q->hmsg_mgr = hmsg_mgr; 124 + msg_q->msg_mgr = hmsg_mgr; 125 125 msg_q->arg = arg; /* Node handle */ 126 126 msg_q->msgq_id = msgq_id; /* Node env (not valid yet) */ 127 127 /* Queues of Message frames for messages from the DSP */ ··· 214 214 struct msg_mgr *hmsg_mgr; 215 215 u32 io_msg_pend; 216 216 217 - if (!msg_queue_obj || !msg_queue_obj->hmsg_mgr) 217 + if (!msg_queue_obj || !msg_queue_obj->msg_mgr) 218 218 return; 219 219 220 - hmsg_mgr = msg_queue_obj->hmsg_mgr; 220 + hmsg_mgr = msg_queue_obj->msg_mgr; 221 221 msg_queue_obj->done = true; 222 222 /* Unblock all threads blocked in MSG_Get() or MSG_Put(). */ 223 223 io_msg_pend = msg_queue_obj->io_msg_pend; ··· 254 254 if (!msg_queue_obj || pmsg == NULL) 255 255 return -ENOMEM; 256 256 257 - hmsg_mgr = msg_queue_obj->hmsg_mgr; 257 + hmsg_mgr = msg_queue_obj->msg_mgr; 258 258 259 259 spin_lock_bh(&hmsg_mgr->msg_mgr_lock); 260 260 /* If a message is already there, get it */ ··· 331 331 u32 index; 332 332 int status; 333 333 334 - if (!msg_queue_obj || !pmsg || !msg_queue_obj->hmsg_mgr) 334 + if (!msg_queue_obj || !pmsg || !msg_queue_obj->msg_mgr) 335 335 return -EFAULT; 336 336 337 - hmsg_mgr = msg_queue_obj->hmsg_mgr; 337 + hmsg_mgr = msg_queue_obj->msg_mgr; 338 338 339 339 spin_lock_bh(&hmsg_mgr->msg_mgr_lock); 340 340 ··· 521 521 struct msg_frame *pmsg, *tmp; 522 522 u32 i; 523 523 524 - if (!msg_queue_obj || !msg_queue_obj->hmsg_mgr) 524 + if (!msg_queue_obj || !msg_queue_obj->msg_mgr) 525 525 return; 526 526 527 - hmsg_mgr = msg_queue_obj->hmsg_mgr; 527 + hmsg_mgr = msg_queue_obj->msg_mgr; 528 528 529 529 /* Pull off num_to_dsp message frames from Msg manager and free */ 530 530 i = 0;
+8 -8
drivers/staging/tidspbridge/core/tiomap3430.c
··· 396 396 * last dsp base image was loaded. The first entry is always 397 397 * SHMMEM base. */ 398 398 /* Get SHM_BEG - convert to byte address */ 399 - (void)dev_get_symbol(dev_context->hdev_obj, SHMBASENAME, 399 + (void)dev_get_symbol(dev_context->dev_obj, SHMBASENAME, 400 400 &ul_shm_base_virt); 401 401 ul_shm_base_virt *= DSPWORDSIZE; 402 402 DBC_ASSERT(ul_shm_base_virt != 0); ··· 474 474 itmp_entry_ndx, 475 475 e->gpp_pa, 476 476 e->dsp_va, 477 - e->ul_size); 477 + e->size); 478 478 479 479 hw_mmu_tlb_add(dev_context->dsp_mmu_base, 480 480 e->gpp_pa, 481 481 e->dsp_va, 482 - e->ul_size, 482 + e->size, 483 483 itmp_entry_ndx, 484 484 &map_attrs, 1, 1); 485 485 ··· 505 505 hw_mmu_enable(resources->dmmu_base); 506 506 507 507 /* Enable the BIOS clock */ 508 - (void)dev_get_symbol(dev_context->hdev_obj, 508 + (void)dev_get_symbol(dev_context->dev_obj, 509 509 BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer); 510 - (void)dev_get_symbol(dev_context->hdev_obj, 510 + (void)dev_get_symbol(dev_context->dev_obj, 511 511 BRIDGEINIT_LOADMON_GPTIMER, 512 512 &ul_load_monitor_timer); 513 513 } ··· 536 536 537 537 if (!status) { 538 538 /* Set the DSP clock rate */ 539 - (void)dev_get_symbol(dev_context->hdev_obj, 539 + (void)dev_get_symbol(dev_context->dev_obj, 540 540 "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr); 541 541 /*Set Autoidle Mode for IVA2 PLL */ 542 542 (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT, ··· 607 607 dsp_wdt_sm_set((void *)ul_shm_base); 608 608 dsp_wdt_enable(true); 609 609 610 - status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr); 610 + status = dev_get_io_mgr(dev_context->dev_obj, &hio_mgr); 611 611 if (hio_mgr) { 612 612 io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL); 613 613 /* Write the synchronization bit to indicate the ··· 872 872 dev_context->dsp_mmu_base = resources->dmmu_base; 873 873 } 874 874 if (!status) { 875 - dev_context->hdev_obj = hdev_obj; 875 + dev_context->dev_obj = hdev_obj; 876 876 /* Store current board state. */ 877 877 dev_context->brd_state = BRD_UNKNOWN; 878 878 dev_context->resources = resources;
+3 -3
drivers/staging/tidspbridge/core/tiomap3430_pwr.c
··· 121 121 dev_context->brd_state = BRD_DSP_HIBERNATION; 122 122 #ifdef CONFIG_TIDSPBRIDGE_DVFS 123 123 status = 124 - dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr); 124 + dev_get_io_mgr(dev_context->dev_obj, &hio_mgr); 125 125 if (!hio_mgr) { 126 126 status = DSP_EHANDLE; 127 127 return status; ··· 216 216 pr_err("%s: Timed out waiting for DSP off mode, state %x\n", 217 217 __func__, pwr_state); 218 218 #ifdef CONFIG_TIDSPBRIDGE_NTFY_PWRERR 219 - dev_get_deh_mgr(dev_context->hdev_obj, &hdeh_mgr); 219 + dev_get_deh_mgr(dev_context->dev_obj, &hdeh_mgr); 220 220 bridge_deh_notify(hdeh_mgr, DSP_PWRERROR, 0); 221 221 #endif /* CONFIG_TIDSPBRIDGE_NTFY_PWRERR */ 222 222 return -ETIMEDOUT; ··· 382 382 u32 voltage_domain; 383 383 struct io_mgr *hio_mgr; 384 384 385 - status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr); 385 + status = dev_get_io_mgr(dev_context->dev_obj, &hio_mgr); 386 386 if (!hio_mgr) 387 387 return -EFAULT; 388 388
+15 -15
drivers/staging/tidspbridge/core/tiomap_io.c
··· 65 65 bool trace_read = false; 66 66 67 67 if (!ul_shm_base_virt) { 68 - status = dev_get_symbol(dev_context->hdev_obj, 68 + status = dev_get_symbol(dev_context->dev_obj, 69 69 SHMBASENAME, &ul_shm_base_virt); 70 70 } 71 71 DBC_ASSERT(ul_shm_base_virt != 0); 72 72 73 73 /* Check if it is a read of Trace section */ 74 74 if (!status && !ul_trace_sec_beg) { 75 - status = dev_get_symbol(dev_context->hdev_obj, 75 + status = dev_get_symbol(dev_context->dev_obj, 76 76 DSP_TRACESEC_BEG, &ul_trace_sec_beg); 77 77 } 78 78 DBC_ASSERT(ul_trace_sec_beg != 0); 79 79 80 80 if (!status && !ul_trace_sec_end) { 81 - status = dev_get_symbol(dev_context->hdev_obj, 81 + status = dev_get_symbol(dev_context->dev_obj, 82 82 DSP_TRACESEC_END, &ul_trace_sec_end); 83 83 } 84 84 DBC_ASSERT(ul_trace_sec_end != 0); ··· 102 102 103 103 /* Get DYNEXT_BEG, EXT_BEG and EXT_END. */ 104 104 if (!status && !ul_dyn_ext_base) { 105 - status = dev_get_symbol(dev_context->hdev_obj, 105 + status = dev_get_symbol(dev_context->dev_obj, 106 106 DYNEXTBASE, &ul_dyn_ext_base); 107 107 } 108 108 DBC_ASSERT(ul_dyn_ext_base != 0); 109 109 110 110 if (!status) { 111 - status = dev_get_symbol(dev_context->hdev_obj, 111 + status = dev_get_symbol(dev_context->dev_obj, 112 112 EXTBASE, &ul_ext_base); 113 113 } 114 114 DBC_ASSERT(ul_ext_base != 0); 115 115 116 116 if (!status) { 117 - status = dev_get_symbol(dev_context->hdev_obj, 117 + status = dev_get_symbol(dev_context->dev_obj, 118 118 EXTEND, &ul_ext_end); 119 119 } 120 120 DBC_ASSERT(ul_ext_end != 0); ··· 246 246 247 247 if (symbols_reloaded) { 248 248 /* Check if it is a load to Trace section */ 249 - ret = dev_get_symbol(dev_context->hdev_obj, 249 + ret = dev_get_symbol(dev_context->dev_obj, 250 250 DSP_TRACESEC_BEG, &ul_trace_sec_beg); 251 251 if (!ret) 252 - ret = dev_get_symbol(dev_context->hdev_obj, 252 + ret = dev_get_symbol(dev_context->dev_obj, 253 253 DSP_TRACESEC_END, 254 254 &ul_trace_sec_end); 255 255 } ··· 269 269 if (!dw_base_addr) { 270 270 if (symbols_reloaded) 271 271 /* Get SHM_BEG EXT_BEG and EXT_END. */ 272 - ret = dev_get_symbol(dev_context->hdev_obj, 272 + ret = dev_get_symbol(dev_context->dev_obj, 273 273 SHMBASENAME, &ul_shm_base_virt); 274 274 DBC_ASSERT(ul_shm_base_virt != 0); 275 275 if (dynamic_load) { ··· 277 277 if (symbols_reloaded) 278 278 ret = 279 279 dev_get_symbol 280 - (dev_context->hdev_obj, DYNEXTBASE, 280 + (dev_context->dev_obj, DYNEXTBASE, 281 281 &ul_ext_base); 282 282 } 283 283 DBC_ASSERT(ul_ext_base != 0); ··· 289 289 if (symbols_reloaded) 290 290 ret = 291 291 dev_get_symbol 292 - (dev_context->hdev_obj, EXTEND, 292 + (dev_context->dev_obj, EXTEND, 293 293 &ul_ext_end); 294 294 } 295 295 } else { ··· 297 297 if (!ret) 298 298 ret = 299 299 dev_get_symbol 300 - (dev_context->hdev_obj, EXTBASE, 300 + (dev_context->dev_obj, EXTBASE, 301 301 &ul_ext_base); 302 302 DBC_ASSERT(ul_ext_base != 0); 303 303 if (!ret) 304 304 ret = 305 305 dev_get_symbol 306 - (dev_context->hdev_obj, EXTEND, 306 + (dev_context->dev_obj, EXTEND, 307 307 &ul_ext_end); 308 308 } 309 309 } ··· 324 324 325 325 if (symbols_reloaded) { 326 326 ret = dev_get_symbol 327 - (dev_context->hdev_obj, 327 + (dev_context->dev_obj, 328 328 DSP_TRACESEC_END, &shm0_end); 329 329 if (!ret) { 330 330 ret = 331 331 dev_get_symbol 332 - (dev_context->hdev_obj, DYNEXTBASE, 332 + (dev_context->dev_obj, DYNEXTBASE, 333 333 &ul_dyn_ext_base); 334 334 } 335 335 }
+3 -3
drivers/staging/tidspbridge/core/ue_deh.c
··· 52 52 if (!deh) 53 53 return IRQ_HANDLED; 54 54 55 - resources = deh->hbridge_context->resources; 55 + resources = deh->bridge_context->resources; 56 56 if (!resources) { 57 57 dev_dbg(bridge, "%s: Failed to get Host Resources\n", 58 58 __func__); ··· 113 113 tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh); 114 114 115 115 /* Fill in context structure */ 116 - deh->hbridge_context = hbridge_context; 116 + deh->bridge_context = hbridge_context; 117 117 118 118 /* Install ISR function for DSP MMU fault */ 119 119 status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0, ··· 228 228 return; 229 229 230 230 dev_dbg(bridge, "%s: device exception", __func__); 231 - dev_context = deh->hbridge_context; 231 + dev_context = deh->bridge_context; 232 232 233 233 switch (event) { 234 234 case DSP_SYSERROR:
+1 -1
drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h
··· 116 116 struct bridge_drv_interface *intf_fxns; 117 117 struct io_mgr *hio_mgr; /* IO manager */ 118 118 /* Device this board represents */ 119 - struct dev_object *hdev_obj; 119 + struct dev_object *dev_obj; 120 120 121 121 /* These fields initialized in bridge_chnl_create(): */ 122 122 u32 output_mask; /* Host output channels w/ full buffers */
+1 -1
drivers/staging/tidspbridge/include/dspbridge/chnlpriv.h
··· 53 53 54 54 /* Channel info. */ 55 55 struct chnl_info { 56 - struct chnl_mgr *hchnl_mgr; /* Owning channel manager. */ 56 + struct chnl_mgr *chnl_mgr; /* Owning channel manager. */ 57 57 u32 cnhl_id; /* Channel ID. */ 58 58 void *event_obj; /* Channel I/O completion event. */ 59 59 /*Abstraction of I/O completion event. */
+4 -4
drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h
··· 28 28 29 29 /* Attributes for CMM_AllocBuf() & CMM_AllocDesc() */ 30 30 struct cmm_attrs { 31 - u32 ul_seg_id; /* 1,2... are SM segments. 0 is not. */ 31 + u32 seg_id; /* 1,2... are SM segments. 0 is not. */ 32 32 u32 alignment; /* 0,1,2,4....min_block_size */ 33 33 }; 34 34 ··· 53 53 struct cmm_seginfo { 54 54 u32 seg_base_pa; /* Start Phys address of SM segment */ 55 55 /* Total size in bytes of segment: DSP+GPP */ 56 - u32 ul_total_seg_size; 56 + u32 total_seg_size; 57 57 u32 gpp_base_pa; /* Start Phys addr of Gpp SM seg */ 58 58 u32 gpp_size; /* Size of Gpp SM seg in bytes */ 59 59 u32 dsp_base_va; /* DSP virt base byte address */ ··· 69 69 /* # of SM segments registered with this Cmm. */ 70 70 u32 num_gppsm_segs; 71 71 /* Total # of allocations outstanding for CMM */ 72 - u32 ul_total_in_use_cnt; 72 + u32 total_in_use_cnt; 73 73 /* Min SM block size allocation from cmm_create() */ 74 74 u32 min_block_size; 75 75 /* Info per registered SM segment. */ ··· 78 78 79 79 /* XlatorCreate attributes */ 80 80 struct cmm_xlatorattrs { 81 - u32 ul_seg_id; /* segment Id used for SM allocations */ 81 + u32 seg_id; /* segment Id used for SM allocations */ 82 82 u32 dsp_bufs; /* # of DSP-side bufs */ 83 83 u32 dsp_buf_size; /* size of DSP-side bufs in GPP bytes */ 84 84 /* Vm base address alloc'd in client process context */
+3 -3
drivers/staging/tidspbridge/include/dspbridge/dbdefs.h
··· 208 208 209 209 /* Memory Segment Status Values */ 210 210 struct dsp_memstat { 211 - u32 ul_size; 212 - u32 ul_total_free_size; 211 + u32 size; 212 + u32 total_free_size; 213 213 u32 len_max_free_block; 214 214 u32 num_free_blocks; 215 215 u32 num_alloc_blocks; ··· 388 388 u32 cb_struct; 389 389 enum dsp_resourceinfotype resource_type; 390 390 union { 391 - u32 ul_resource; 391 + u32 resource; 392 392 struct dsp_memstat mem_stat; 393 393 struct dsp_procloadstat proc_load_stat; 394 394 } result;
+3 -3
drivers/staging/tidspbridge/include/dspbridge/dev.h
··· 109 109 * DEV Initialized 110 110 * Valid hdev_obj 111 111 * Ensures: 112 - * 0 and hdev_obj->hnode_mgr != NULL 113 - * else hdev_obj->hnode_mgr == NULL 112 + * 0 and hdev_obj->node_mgr != NULL 113 + * else hdev_obj->node_mgr == NULL 114 114 */ 115 115 extern int dev_create2(struct dev_object *hdev_obj); 116 116 ··· 127 127 * DEV Initialized 128 128 * Valid hdev_obj 129 129 * Ensures: 130 - * 0 and hdev_obj->hnode_mgr == NULL 130 + * 0 and hdev_obj->node_mgr == NULL 131 131 * else -EPERM. 132 132 */ 133 133 extern int dev_destroy2(struct dev_object *hdev_obj);
+10 -10
drivers/staging/tidspbridge/include/dspbridge/dspapi-ioctl.h
··· 120 120 121 121 struct { 122 122 void *hprocessor; 123 - u32 ul_size; 123 + u32 size; 124 124 void *__user *pp_rsv_addr; 125 125 } args_proc_rsvmem; 126 126 127 127 struct { 128 128 void *hprocessor; 129 - u32 ul_size; 129 + u32 size; 130 130 void *prsv_addr; 131 131 } args_proc_unrsvmem; 132 132 133 133 struct { 134 134 void *hprocessor; 135 135 void *pmpu_addr; 136 - u32 ul_size; 136 + u32 size; 137 137 void *req_addr; 138 138 void *__user *pp_map_addr; 139 139 u32 ul_map_attr; ··· 141 141 142 142 struct { 143 143 void *hprocessor; 144 - u32 ul_size; 144 + u32 size; 145 145 void *map_addr; 146 146 } args_proc_unmapmem; 147 147 148 148 struct { 149 149 void *hprocessor; 150 150 void *pmpu_addr; 151 - u32 ul_size; 151 + u32 size; 152 152 u32 dir; 153 153 } args_proc_dma; 154 154 155 155 struct { 156 156 void *hprocessor; 157 157 void *pmpu_addr; 158 - u32 ul_size; 158 + u32 size; 159 159 u32 ul_flags; 160 160 } args_proc_flushmemory; 161 161 162 162 struct { 163 163 void *hprocessor; 164 164 void *pmpu_addr; 165 - u32 ul_size; 165 + u32 size; 166 166 } args_proc_invalidatememory; 167 167 168 168 /* NODE Module */ ··· 328 328 329 329 /* CMM Module */ 330 330 struct { 331 - struct cmm_object *hcmm_mgr; 331 + struct cmm_object *cmm_mgr; 332 332 u32 usize; 333 333 struct cmm_attrs *pattrs; 334 334 void **pp_buf_va; 335 335 } args_cmm_allocbuf; 336 336 337 337 struct { 338 - struct cmm_object *hcmm_mgr; 338 + struct cmm_object *cmm_mgr; 339 339 void *buf_pa; 340 340 u32 ul_seg_id; 341 341 } args_cmm_freebuf; ··· 346 346 } args_cmm_gethandle; 347 347 348 348 struct { 349 - struct cmm_object *hcmm_mgr; 349 + struct cmm_object *cmm_mgr; 350 350 struct cmm_info __user *cmm_info_obj; 351 351 } args_cmm_getinfo; 352 352
+1 -1
drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
··· 59 59 u32 gpp_pa; /* GPP physical address */ 60 60 /* GPP virtual address. __va does not work for ioremapped addresses */ 61 61 u32 gpp_va; 62 - u32 ul_size; /* Size of the mapped memory in bytes */ 62 + u32 size; /* Size of the mapped memory in bytes */ 63 63 enum hw_endianism_t endianism; 64 64 enum hw_mmu_mixed_size_t mixed_mode; 65 65 enum hw_element_size_t elem_size;
+2 -2
drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h
··· 84 84 struct nldr_attrs { 85 85 nldr_ovlyfxn ovly; 86 86 nldr_writefxn write; 87 - u16 us_dsp_word_size; 88 - u16 us_dsp_mau_size; 87 + u16 dsp_word_size; 88 + u16 dsp_mau_size; 89 89 }; 90 90 91 91 /*
+1 -1
drivers/staging/tidspbridge/include/dspbridge/strmdefs.h
··· 28 28 char *pstr_event_name; 29 29 void *virt_base; /* Process virtual base address of 30 30 * mapped SM */ 31 - u32 ul_virt_size; /* Size of virtual space in bytes */ 31 + u32 virt_size; /* Size of virtual space in bytes */ 32 32 struct dsp_streamattrin *stream_attr_in; 33 33 }; 34 34
+44 -44
drivers/staging/tidspbridge/pmgr/cmm.c
··· 49 49 #include <dspbridge/cmm.h> 50 50 51 51 /* ----------------------------------- Defines, Data Structures, Typedefs */ 52 - #define NEXT_PA(pnode) (pnode->pa + pnode->ul_size) 52 + #define NEXT_PA(pnode) (pnode->pa + pnode->size) 53 53 54 54 /* Other bus/platform translations */ 55 55 #define DSPPA2GPPPA(base, x, y) ((x)+(y)) ··· 63 63 */ 64 64 struct cmm_allocator { /* sma */ 65 65 unsigned int shm_base; /* Start of physical SM block */ 66 - u32 ul_sm_size; /* Size of SM block in bytes */ 66 + u32 sm_size; /* Size of SM block in bytes */ 67 67 unsigned int vm_base; /* Start of VM block. (Dev driver 68 68 * context for 'sma') */ 69 69 u32 dsp_phys_addr_offset; /* DSP PA to GPP PA offset for this ··· 71 71 s8 c_factor; /* DSPPa to GPPPa Conversion Factor */ 72 72 unsigned int dsp_base; /* DSP virt base byte address */ 73 73 u32 dsp_size; /* DSP seg size in bytes */ 74 - struct cmm_object *hcmm_mgr; /* back ref to parent mgr */ 74 + struct cmm_object *cmm_mgr; /* back ref to parent mgr */ 75 75 /* node list of available memory */ 76 76 struct list_head free_list; 77 77 /* node list of memory in use */ ··· 80 80 81 81 struct cmm_xlator { /* Pa<->Va translator object */ 82 82 /* CMM object this translator associated */ 83 - struct cmm_object *hcmm_mgr; 83 + struct cmm_object *cmm_mgr; 84 84 /* 85 85 * Client process virtual base address that corresponds to phys SM 86 - * base address for translator's ul_seg_id. 86 + * base address for translator's seg_id. 87 87 * Only 1 segment ID currently supported. 88 88 */ 89 89 unsigned int virt_base; /* virtual base address */ 90 - u32 ul_virt_size; /* size of virt space in bytes */ 91 - u32 ul_seg_id; /* Segment Id */ 90 + u32 virt_size; /* size of virt space in bytes */ 91 + u32 seg_id; /* Segment Id */ 92 92 }; 93 93 94 94 /* CMM Mgr */ ··· 112 112 113 113 /* Default allocation attributes */ 114 114 static struct cmm_attrs cmm_dfltalctattrs = { 115 - 1 /* ul_seg_id, default segment Id for allocator */ 115 + 1 /* seg_id, default segment Id for allocator */ 116 116 }; 117 117 118 118 /* Address translator default attrs */ 119 119 static struct cmm_xlatorattrs cmm_dfltxlatorattrs = { 120 - /* ul_seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */ 120 + /* seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */ 121 121 1, 122 122 0, /* dsp_bufs */ 123 123 0, /* dsp_buf_size */ ··· 130 130 struct list_head link; /* must be 1st element */ 131 131 u32 pa; /* Phys addr */ 132 132 u32 va; /* Virtual address in device process context */ 133 - u32 ul_size; /* SM block size in bytes */ 133 + u32 size; /* SM block size in bytes */ 134 134 u32 client_proc; /* Process that allocated this mem block */ 135 135 }; 136 136 ··· 180 180 *pp_buf_va = NULL; 181 181 182 182 if (cmm_mgr_obj && (usize != 0)) { 183 - if (pattrs->ul_seg_id > 0) { 183 + if (pattrs->seg_id > 0) { 184 184 /* SegId > 0 is SM */ 185 185 /* get the allocator object for this segment id */ 186 186 allocator = 187 - get_allocator(cmm_mgr_obj, pattrs->ul_seg_id); 187 + get_allocator(cmm_mgr_obj, pattrs->seg_id); 188 188 /* keep block size a multiple of min_block_size */ 189 189 usize = 190 190 ((usize - 1) & ~(cmm_mgr_obj->min_block_size - ··· 194 194 pnode = get_free_block(allocator, usize); 195 195 } 196 196 if (pnode) { 197 - delta_size = (pnode->ul_size - usize); 197 + delta_size = (pnode->size - usize); 198 198 if (delta_size >= cmm_mgr_obj->min_block_size) { 199 199 /* create a new block with the leftovers and 200 200 * add to freelist */ ··· 205 205 /* leftovers go free */ 206 206 add_to_free_list(allocator, new_node); 207 207 /* adjust our node's size */ 208 - pnode->ul_size = usize; 208 + pnode->size = usize; 209 209 } 210 210 /* Tag node with client process requesting allocation 211 211 * We'll need to free up a process's alloc'd SM if the ··· 294 294 /* Check for outstanding memory allocations */ 295 295 status = cmm_get_info(hcmm_mgr, &temp_info); 296 296 if (!status) { 297 - if (temp_info.ul_total_in_use_cnt > 0) { 297 + if (temp_info.total_in_use_cnt > 0) { 298 298 /* outstanding allocations */ 299 299 status = -EPERM; 300 300 } ··· 356 356 357 357 if (ul_seg_id == 0) { 358 358 pattrs = &cmm_dfltalctattrs; 359 - ul_seg_id = pattrs->ul_seg_id; 359 + ul_seg_id = pattrs->seg_id; 360 360 } 361 361 if (!hcmm_mgr || !(ul_seg_id > 0)) { 362 362 status = -EFAULT; ··· 428 428 mutex_lock(&cmm_mgr_obj->cmm_lock); 429 429 cmm_info_obj->num_gppsm_segs = 0; /* # of SM segments */ 430 430 /* Total # of outstanding alloc */ 431 - cmm_info_obj->ul_total_in_use_cnt = 0; 431 + cmm_info_obj->total_in_use_cnt = 0; 432 432 /* min block size */ 433 433 cmm_info_obj->min_block_size = cmm_mgr_obj->min_block_size; 434 434 /* check SM memory segments */ ··· 440 440 cmm_info_obj->num_gppsm_segs++; 441 441 cmm_info_obj->seg_info[ul_seg - 1].seg_base_pa = 442 442 altr->shm_base - altr->dsp_size; 443 - cmm_info_obj->seg_info[ul_seg - 1].ul_total_seg_size = 444 - altr->dsp_size + altr->ul_sm_size; 443 + cmm_info_obj->seg_info[ul_seg - 1].total_seg_size = 444 + altr->dsp_size + altr->sm_size; 445 445 cmm_info_obj->seg_info[ul_seg - 1].gpp_base_pa = 446 446 altr->shm_base; 447 447 cmm_info_obj->seg_info[ul_seg - 1].gpp_size = 448 - altr->ul_sm_size; 448 + altr->sm_size; 449 449 cmm_info_obj->seg_info[ul_seg - 1].dsp_base_va = 450 450 altr->dsp_base; 451 451 cmm_info_obj->seg_info[ul_seg - 1].dsp_size = ··· 455 455 cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt = 0; 456 456 457 457 list_for_each_entry(curr, &altr->in_use_list, link) { 458 - cmm_info_obj->ul_total_in_use_cnt++; 458 + cmm_info_obj->total_in_use_cnt++; 459 459 cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt++; 460 460 } 461 461 } ··· 536 536 goto func_end; 537 537 } 538 538 539 - psma->hcmm_mgr = hcmm_mgr; /* ref to parent */ 539 + psma->cmm_mgr = hcmm_mgr; /* ref to parent */ 540 540 psma->shm_base = dw_gpp_base_pa; /* SM Base phys */ 541 - psma->ul_sm_size = ul_size; /* SM segment size in bytes */ 541 + psma->sm_size = ul_size; /* SM segment size in bytes */ 542 542 psma->vm_base = gpp_base_va; 543 543 psma->dsp_phys_addr_offset = dsp_addr_offset; 544 544 psma->c_factor = c_factor; ··· 706 706 707 707 pnode->pa = dw_pa; 708 708 pnode->va = dw_va; 709 - pnode->ul_size = ul_size; 709 + pnode->size = ul_size; 710 710 711 711 return pnode; 712 712 } ··· 738 738 return NULL; 739 739 740 740 list_for_each_entry_safe(node, tmp, &allocator->free_list, link) { 741 - if (usize <= node->ul_size) { 741 + if (usize <= node->size) { 742 742 list_del(&node->link); 743 743 return node; 744 744 } ··· 764 764 765 765 list_for_each_entry(curr, &allocator->free_list, link) { 766 766 if (NEXT_PA(curr) == node->pa) { 767 - curr->ul_size += node->ul_size; 768 - delete_node(allocator->hcmm_mgr, node); 767 + curr->size += node->size; 768 + delete_node(allocator->cmm_mgr, node); 769 769 return; 770 770 } 771 771 if (curr->pa == NEXT_PA(node)) { 772 772 curr->pa = node->pa; 773 773 curr->va = node->va; 774 - curr->ul_size += node->ul_size; 775 - delete_node(allocator->hcmm_mgr, node); 774 + curr->size += node->size; 775 + delete_node(allocator->cmm_mgr, node); 776 776 return; 777 777 } 778 778 } 779 779 list_for_each_entry(curr, &allocator->free_list, link) { 780 - if (curr->ul_size >= node->ul_size) { 780 + if (curr->size >= node->size) { 781 781 list_add_tail(&node->link, &curr->link); 782 782 return; 783 783 } ··· 828 828 829 829 xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL); 830 830 if (xlator_object != NULL) { 831 - xlator_object->hcmm_mgr = hcmm_mgr; /* ref back to CMM */ 831 + xlator_object->cmm_mgr = hcmm_mgr; /* ref back to CMM */ 832 832 /* SM seg_id */ 833 - xlator_object->ul_seg_id = xlator_attrs->ul_seg_id; 833 + xlator_object->seg_id = xlator_attrs->seg_id; 834 834 } else { 835 835 status = -ENOMEM; 836 836 } ··· 853 853 854 854 DBC_REQUIRE(refs > 0); 855 855 DBC_REQUIRE(xlator != NULL); 856 - DBC_REQUIRE(xlator_obj->hcmm_mgr != NULL); 856 + DBC_REQUIRE(xlator_obj->cmm_mgr != NULL); 857 857 DBC_REQUIRE(va_buf != NULL); 858 858 DBC_REQUIRE(pa_size > 0); 859 - DBC_REQUIRE(xlator_obj->ul_seg_id > 0); 859 + DBC_REQUIRE(xlator_obj->seg_id > 0); 860 860 861 861 if (xlator_obj) { 862 - attrs.ul_seg_id = xlator_obj->ul_seg_id; 862 + attrs.seg_id = xlator_obj->seg_id; 863 863 __raw_writel(0, va_buf); 864 864 /* Alloc SM */ 865 865 pbuf = 866 - cmm_calloc_buf(xlator_obj->hcmm_mgr, pa_size, &attrs, NULL); 866 + cmm_calloc_buf(xlator_obj->cmm_mgr, pa_size, &attrs, NULL); 867 867 if (pbuf) { 868 868 /* convert to translator(node/strm) process Virtual 869 869 * address */ ··· 889 889 890 890 DBC_REQUIRE(refs > 0); 891 891 DBC_REQUIRE(buf_va != NULL); 892 - DBC_REQUIRE(xlator_obj->ul_seg_id > 0); 892 + DBC_REQUIRE(xlator_obj->seg_id > 0); 893 893 894 894 if (xlator_obj) { 895 895 /* convert Va to Pa so we can free it. */ 896 896 buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA); 897 897 if (buf_pa) { 898 - status = cmm_free_buf(xlator_obj->hcmm_mgr, buf_pa, 899 - xlator_obj->ul_seg_id); 898 + status = cmm_free_buf(xlator_obj->cmm_mgr, buf_pa, 899 + xlator_obj->seg_id); 900 900 if (status) { 901 901 /* Uh oh, this shouldn't happen. Descriptor 902 902 * gone! */ ··· 926 926 if (set_info) { 927 927 /* set translators virtual address range */ 928 928 xlator_obj->virt_base = (u32) *paddr; 929 - xlator_obj->ul_virt_size = ul_size; 929 + xlator_obj->virt_size = ul_size; 930 930 } else { /* return virt base address */ 931 931 *paddr = (u8 *) xlator_obj->virt_base; 932 932 } ··· 955 955 if (!xlator_obj) 956 956 goto loop_cont; 957 957 958 - cmm_mgr_obj = (struct cmm_object *)xlator_obj->hcmm_mgr; 958 + cmm_mgr_obj = (struct cmm_object *)xlator_obj->cmm_mgr; 959 959 /* get this translator's default SM allocator */ 960 - DBC_ASSERT(xlator_obj->ul_seg_id > 0); 961 - allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->ul_seg_id - 1]; 960 + DBC_ASSERT(xlator_obj->seg_id > 0); 961 + allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->seg_id - 1]; 962 962 if (!allocator) 963 963 goto loop_cont; 964 964 ··· 974 974 if ((dw_addr_xlate < xlator_obj->virt_base) || 975 975 (dw_addr_xlate >= 976 976 (xlator_obj->virt_base + 977 - xlator_obj->ul_virt_size))) { 977 + xlator_obj->virt_size))) { 978 978 dw_addr_xlate = 0; /* bad address */ 979 979 } 980 980 } else {
+55 -55
drivers/staging/tidspbridge/pmgr/dev.c
··· 61 61 u8 dev_type; /* Device Type */ 62 62 struct cfg_devnode *dev_node_obj; /* Platform specific dev id */ 63 63 /* Bridge Context Handle */ 64 - struct bridge_dev_context *hbridge_context; 64 + struct bridge_dev_context *bridge_context; 65 65 /* Function interface to Bridge driver. */ 66 66 struct bridge_drv_interface bridge_interface; 67 67 struct brd_object *lock_owner; /* Client with exclusive access. */ 68 68 struct cod_manager *cod_mgr; /* Code manager handle. */ 69 - struct chnl_mgr *hchnl_mgr; /* Channel manager. */ 70 - struct deh_mgr *hdeh_mgr; /* DEH manager. */ 71 - struct msg_mgr *hmsg_mgr; /* Message manager. */ 69 + struct chnl_mgr *chnl_mgr; /* Channel manager. */ 70 + struct deh_mgr *deh_mgr; /* DEH manager. */ 71 + struct msg_mgr *msg_mgr; /* Message manager. */ 72 72 struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */ 73 - struct cmm_object *hcmm_mgr; /* SM memory manager. */ 73 + struct cmm_object *cmm_mgr; /* SM memory manager. */ 74 74 struct dmm_object *dmm_mgr; /* Dynamic memory manager. */ 75 75 u32 word_size; /* DSP word size: quick access. */ 76 - struct drv_object *hdrv_obj; /* Driver Object */ 76 + struct drv_object *drv_obj; /* Driver Object */ 77 77 /* List of Processors attached to this device */ 78 78 struct list_head proc_list; 79 - struct node_mgr *hnode_mgr; 79 + struct node_mgr *node_mgr; 80 80 }; 81 81 82 82 struct drv_ext { ··· 110 110 DBC_REQUIRE(host_buf != NULL); /* Required of BrdWrite(). */ 111 111 if (dev_obj) { 112 112 /* Require of BrdWrite() */ 113 - DBC_ASSERT(dev_obj->hbridge_context != NULL); 113 + DBC_ASSERT(dev_obj->bridge_context != NULL); 114 114 status = (*dev_obj->bridge_interface.brd_write) ( 115 - dev_obj->hbridge_context, host_buf, 115 + dev_obj->bridge_context, host_buf, 116 116 dsp_add, ul_num_bytes, mem_space); 117 117 /* Special case of getting the address only */ 118 118 if (ul_num_bytes == 0) ··· 175 175 /* Fill out the rest of the Dev Object structure: */ 176 176 dev_obj->dev_node_obj = dev_node_obj; 177 177 dev_obj->cod_mgr = NULL; 178 - dev_obj->hchnl_mgr = NULL; 179 - dev_obj->hdeh_mgr = NULL; 178 + dev_obj->chnl_mgr = NULL; 179 + dev_obj->deh_mgr = NULL; 180 180 dev_obj->lock_owner = NULL; 181 181 dev_obj->word_size = DSPWORDSIZE; 182 - dev_obj->hdrv_obj = hdrv_obj; 182 + dev_obj->drv_obj = hdrv_obj; 183 183 dev_obj->dev_type = DSP_UNIT; 184 184 /* Store this Bridge's interface functions, based on its 185 185 * version. */ ··· 189 189 /* Call fxn_dev_create() to get the Bridge's device 190 190 * context handle. */ 191 191 status = (dev_obj->bridge_interface.dev_create) 192 - (&dev_obj->hbridge_context, dev_obj, 192 + (&dev_obj->bridge_context, dev_obj, 193 193 host_res); 194 194 /* Assert bridge_dev_create()'s ensure clause: */ 195 195 DBC_ASSERT(status 196 - || (dev_obj->hbridge_context != NULL)); 196 + || (dev_obj->bridge_context != NULL)); 197 197 } else { 198 198 status = -ENOMEM; 199 199 } ··· 224 224 pr_err("%s: No memory reserved for shared structures\n", 225 225 __func__); 226 226 } 227 - status = chnl_create(&dev_obj->hchnl_mgr, dev_obj, &mgr_attrs); 227 + status = chnl_create(&dev_obj->chnl_mgr, dev_obj, &mgr_attrs); 228 228 if (status == -ENOSYS) { 229 229 /* It's OK for a device not to have a channel 230 230 * manager: */ 231 231 status = 0; 232 232 } 233 233 /* Create CMM mgr even if Msg Mgr not impl. */ 234 - status = cmm_create(&dev_obj->hcmm_mgr, 234 + status = cmm_create(&dev_obj->cmm_mgr, 235 235 (struct dev_object *)dev_obj, NULL); 236 236 /* Only create IO manager if we have a channel manager */ 237 - if (!status && dev_obj->hchnl_mgr) { 237 + if (!status && dev_obj->chnl_mgr) { 238 238 status = io_create(&dev_obj->hio_mgr, dev_obj, 239 239 &io_mgr_attrs); 240 240 } 241 241 /* Only create DEH manager if we have an IO manager */ 242 242 if (!status) { 243 243 /* Instantiate the DEH module */ 244 - status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj); 244 + status = bridge_deh_create(&dev_obj->deh_mgr, dev_obj); 245 245 } 246 246 /* Create DMM mgr . */ 247 247 status = dmm_create(&dev_obj->dmm_mgr, ··· 291 291 DBC_REQUIRE(hdev_obj); 292 292 293 293 /* There can be only one Node Manager per DEV object */ 294 - DBC_ASSERT(!dev_obj->hnode_mgr); 295 - status = node_create_mgr(&dev_obj->hnode_mgr, hdev_obj); 294 + DBC_ASSERT(!dev_obj->node_mgr); 295 + status = node_create_mgr(&dev_obj->node_mgr, hdev_obj); 296 296 if (status) 297 - dev_obj->hnode_mgr = NULL; 297 + dev_obj->node_mgr = NULL; 298 298 299 - DBC_ENSURE((!status && dev_obj->hnode_mgr != NULL) 300 - || (status && dev_obj->hnode_mgr == NULL)); 299 + DBC_ENSURE((!status && dev_obj->node_mgr != NULL) 300 + || (status && dev_obj->node_mgr == NULL)); 301 301 return status; 302 302 } 303 303 ··· 314 314 DBC_REQUIRE(refs > 0); 315 315 DBC_REQUIRE(hdev_obj); 316 316 317 - if (dev_obj->hnode_mgr) { 318 - if (node_delete_mgr(dev_obj->hnode_mgr)) 317 + if (dev_obj->node_mgr) { 318 + if (node_delete_mgr(dev_obj->node_mgr)) 319 319 status = -EPERM; 320 320 else 321 - dev_obj->hnode_mgr = NULL; 321 + dev_obj->node_mgr = NULL; 322 322 323 323 } 324 324 325 - DBC_ENSURE((!status && dev_obj->hnode_mgr == NULL) || status); 325 + DBC_ENSURE((!status && dev_obj->node_mgr == NULL) || status); 326 326 return status; 327 327 } 328 328 ··· 345 345 dev_obj->cod_mgr = NULL; 346 346 } 347 347 348 - if (dev_obj->hnode_mgr) { 349 - node_delete_mgr(dev_obj->hnode_mgr); 350 - dev_obj->hnode_mgr = NULL; 348 + if (dev_obj->node_mgr) { 349 + node_delete_mgr(dev_obj->node_mgr); 350 + dev_obj->node_mgr = NULL; 351 351 } 352 352 353 353 /* Free the io, channel, and message managers for this board: */ ··· 355 355 io_destroy(dev_obj->hio_mgr); 356 356 dev_obj->hio_mgr = NULL; 357 357 } 358 - if (dev_obj->hchnl_mgr) { 359 - chnl_destroy(dev_obj->hchnl_mgr); 360 - dev_obj->hchnl_mgr = NULL; 358 + if (dev_obj->chnl_mgr) { 359 + chnl_destroy(dev_obj->chnl_mgr); 360 + dev_obj->chnl_mgr = NULL; 361 361 } 362 - if (dev_obj->hmsg_mgr) { 363 - msg_delete(dev_obj->hmsg_mgr); 364 - dev_obj->hmsg_mgr = NULL; 362 + if (dev_obj->msg_mgr) { 363 + msg_delete(dev_obj->msg_mgr); 364 + dev_obj->msg_mgr = NULL; 365 365 } 366 366 367 - if (dev_obj->hdeh_mgr) { 367 + if (dev_obj->deh_mgr) { 368 368 /* Uninitialize DEH module. */ 369 - bridge_deh_destroy(dev_obj->hdeh_mgr); 370 - dev_obj->hdeh_mgr = NULL; 369 + bridge_deh_destroy(dev_obj->deh_mgr); 370 + dev_obj->deh_mgr = NULL; 371 371 } 372 - if (dev_obj->hcmm_mgr) { 373 - cmm_destroy(dev_obj->hcmm_mgr, true); 374 - dev_obj->hcmm_mgr = NULL; 372 + if (dev_obj->cmm_mgr) { 373 + cmm_destroy(dev_obj->cmm_mgr, true); 374 + dev_obj->cmm_mgr = NULL; 375 375 } 376 376 377 377 if (dev_obj->dmm_mgr) { ··· 381 381 382 382 /* Call the driver's bridge_dev_destroy() function: */ 383 383 /* Require of DevDestroy */ 384 - if (dev_obj->hbridge_context) { 384 + if (dev_obj->bridge_context) { 385 385 status = (*dev_obj->bridge_interface.dev_destroy) 386 - (dev_obj->hbridge_context); 387 - dev_obj->hbridge_context = NULL; 386 + (dev_obj->bridge_context); 387 + dev_obj->bridge_context = NULL; 388 388 } else 389 389 status = -EPERM; 390 390 if (!status) { 391 391 /* Remove this DEV_Object from the global list: */ 392 - drv_remove_dev_object(dev_obj->hdrv_obj, dev_obj); 392 + drv_remove_dev_object(dev_obj->drv_obj, dev_obj); 393 393 /* Free The library * LDR_FreeModule 394 394 * (dev_obj->module_obj); */ 395 395 /* Free this dev object: */ ··· 419 419 DBC_REQUIRE(mgr != NULL); 420 420 421 421 if (hdev_obj) { 422 - *mgr = dev_obj->hchnl_mgr; 422 + *mgr = dev_obj->chnl_mgr; 423 423 } else { 424 424 *mgr = NULL; 425 425 status = -EFAULT; ··· 445 445 DBC_REQUIRE(mgr != NULL); 446 446 447 447 if (hdev_obj) { 448 - *mgr = dev_obj->hcmm_mgr; 448 + *mgr = dev_obj->cmm_mgr; 449 449 } else { 450 450 *mgr = NULL; 451 451 status = -EFAULT; ··· 518 518 DBC_REQUIRE(deh_manager != NULL); 519 519 DBC_REQUIRE(hdev_obj); 520 520 if (hdev_obj) { 521 - *deh_manager = hdev_obj->hdeh_mgr; 521 + *deh_manager = hdev_obj->deh_mgr; 522 522 } else { 523 523 *deh_manager = NULL; 524 524 status = -EFAULT; ··· 642 642 DBC_REQUIRE(msg_man != NULL); 643 643 DBC_REQUIRE(hdev_obj); 644 644 645 - *msg_man = hdev_obj->hmsg_mgr; 645 + *msg_man = hdev_obj->msg_mgr; 646 646 } 647 647 648 648 /* ··· 660 660 DBC_REQUIRE(node_man != NULL); 661 661 662 662 if (hdev_obj) { 663 - *node_man = dev_obj->hnode_mgr; 663 + *node_man = dev_obj->node_mgr; 664 664 } else { 665 665 *node_man = NULL; 666 666 status = -EFAULT; ··· 710 710 DBC_REQUIRE(phbridge_context != NULL); 711 711 712 712 if (hdev_obj) { 713 - *phbridge_context = dev_obj->hbridge_context; 713 + *phbridge_context = dev_obj->bridge_context; 714 714 } else { 715 715 *phbridge_context = NULL; 716 716 status = -EFAULT; ··· 844 844 DBC_REQUIRE(refs > 0); 845 845 846 846 if (hdev_obj) 847 - dev_obj->hchnl_mgr = hmgr; 847 + dev_obj->chnl_mgr = hmgr; 848 848 else 849 849 status = -EFAULT; 850 850 851 - DBC_ENSURE(status || (dev_obj->hchnl_mgr == hmgr)); 851 + DBC_ENSURE(status || (dev_obj->chnl_mgr == hmgr)); 852 852 return status; 853 853 } 854 854 ··· 862 862 DBC_REQUIRE(refs > 0); 863 863 DBC_REQUIRE(hdev_obj); 864 864 865 - hdev_obj->hmsg_mgr = hmgr; 865 + hdev_obj->msg_mgr = hmgr; 866 866 } 867 867 868 868 /*
+10 -10
drivers/staging/tidspbridge/pmgr/dspapi.c
··· 695 695 696 696 status = proc_end_dma(pr_ctxt, 697 697 args->args_proc_dma.pmpu_addr, 698 - args->args_proc_dma.ul_size, 698 + args->args_proc_dma.size, 699 699 args->args_proc_dma.dir); 700 700 return status; 701 701 } ··· 709 709 710 710 status = proc_begin_dma(pr_ctxt, 711 711 args->args_proc_dma.pmpu_addr, 712 - args->args_proc_dma.ul_size, 712 + args->args_proc_dma.size, 713 713 args->args_proc_dma.dir); 714 714 return status; 715 715 } ··· 727 727 728 728 status = proc_flush_memory(pr_ctxt, 729 729 args->args_proc_flushmemory.pmpu_addr, 730 - args->args_proc_flushmemory.ul_size, 730 + args->args_proc_flushmemory.size, 731 731 args->args_proc_flushmemory.ul_flags); 732 732 return status; 733 733 } ··· 742 742 status = 743 743 proc_invalidate_memory(pr_ctxt, 744 744 args->args_proc_invalidatememory.pmpu_addr, 745 - args->args_proc_invalidatememory.ul_size); 745 + args->args_proc_invalidatememory.size); 746 746 return status; 747 747 } 748 748 ··· 950 950 void *map_addr; 951 951 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor; 952 952 953 - if (!args->args_proc_mapmem.ul_size) 953 + if (!args->args_proc_mapmem.size) 954 954 return -EINVAL; 955 955 956 956 status = proc_map(args->args_proc_mapmem.hprocessor, 957 957 args->args_proc_mapmem.pmpu_addr, 958 - args->args_proc_mapmem.ul_size, 958 + args->args_proc_mapmem.size, 959 959 args->args_proc_mapmem.req_addr, &map_addr, 960 960 args->args_proc_mapmem.ul_map_attr, pr_ctxt); 961 961 if (!status) { ··· 999 999 void *prsv_addr; 1000 1000 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor; 1001 1001 1002 - if ((args->args_proc_rsvmem.ul_size <= 0) || 1003 - (args->args_proc_rsvmem.ul_size & (PG_SIZE4K - 1)) != 0) 1002 + if ((args->args_proc_rsvmem.size <= 0) || 1003 + (args->args_proc_rsvmem.size & (PG_SIZE4K - 1)) != 0) 1004 1004 return -EINVAL; 1005 1005 1006 1006 status = proc_reserve_memory(hprocessor, 1007 - args->args_proc_rsvmem.ul_size, &prsv_addr, 1007 + args->args_proc_rsvmem.size, &prsv_addr, 1008 1008 pr_ctxt); 1009 1009 if (!status) { 1010 1010 if (put_user(prsv_addr, args->args_proc_rsvmem.pp_rsv_addr)) { ··· 1905 1905 int status = 0; 1906 1906 struct cmm_info cmm_info_obj; 1907 1907 1908 - status = cmm_get_info(args->args_cmm_getinfo.hcmm_mgr, &cmm_info_obj); 1908 + status = cmm_get_info(args->args_cmm_getinfo.cmm_mgr, &cmm_info_obj); 1909 1909 1910 1910 CP_TO_USR(args->args_cmm_getinfo.cmm_info_obj, &cmm_info_obj, status, 1911 1911 1);
+1 -1
drivers/staging/tidspbridge/pmgr/io.c
··· 73 73 if (!status) { 74 74 pio_mgr = (struct io_mgr_ *)hio_mgr; 75 75 pio_mgr->intf_fxns = intf_fxns; 76 - pio_mgr->hdev_obj = hdev_obj; 76 + pio_mgr->dev_obj = hdev_obj; 77 77 78 78 /* Return the new channel manager handle: */ 79 79 *io_man = hio_mgr;
+2 -2
drivers/staging/tidspbridge/pmgr/ioobj.h
··· 29 29 */ 30 30 struct io_mgr_ { 31 31 /* These must be the first fields in a io_mgr struct: */ 32 - struct bridge_dev_context *hbridge_context; /* Bridge context. */ 32 + struct bridge_dev_context *bridge_context; /* Bridge context. */ 33 33 /* Function interface to Bridge driver. */ 34 34 struct bridge_drv_interface *intf_fxns; 35 - struct dev_object *hdev_obj; /* Device this board represents. */ 35 + struct dev_object *dev_obj; /* Device this board represents. */ 36 36 }; 37 37 38 38 #endif /* IOOBJ_ */
+9 -9
drivers/staging/tidspbridge/rmgr/disp.c
··· 58 58 * ======== disp_object ======== 59 59 */ 60 60 struct disp_object { 61 - struct dev_object *hdev_obj; /* Device for this processor */ 61 + struct dev_object *dev_obj; /* Device for this processor */ 62 62 /* Function interface to Bridge driver */ 63 63 struct bridge_drv_interface *intf_fxns; 64 - struct chnl_mgr *hchnl_mgr; /* Channel manager */ 64 + struct chnl_mgr *chnl_mgr; /* Channel manager */ 65 65 struct chnl_object *chnl_to_dsp; /* Chnl for commands to RMS */ 66 66 struct chnl_object *chnl_from_dsp; /* Chnl for replies from RMS */ 67 67 u8 *pbuf; /* Buffer for commands, replies */ ··· 108 108 if (disp_obj == NULL) 109 109 status = -ENOMEM; 110 110 else 111 - disp_obj->hdev_obj = hdev_obj; 111 + disp_obj->dev_obj = hdev_obj; 112 112 113 113 /* Get Channel manager and Bridge function interface */ 114 114 if (!status) { 115 - status = dev_get_chnl_mgr(hdev_obj, &(disp_obj->hchnl_mgr)); 115 + status = dev_get_chnl_mgr(hdev_obj, &(disp_obj->chnl_mgr)); 116 116 if (!status) { 117 117 (void)dev_get_intf_fxns(hdev_obj, &intf_fxns); 118 118 disp_obj->intf_fxns = intf_fxns; ··· 142 142 chnl_attr_obj.event_obj = NULL; 143 143 ul_chnl_id = disp_attrs->chnl_offset + CHNLTORMSOFFSET; 144 144 status = (*intf_fxns->chnl_open) (&(disp_obj->chnl_to_dsp), 145 - disp_obj->hchnl_mgr, 145 + disp_obj->chnl_mgr, 146 146 CHNL_MODETODSP, ul_chnl_id, 147 147 &chnl_attr_obj); 148 148 ··· 150 150 ul_chnl_id = disp_attrs->chnl_offset + CHNLFROMRMSOFFSET; 151 151 status = 152 152 (*intf_fxns->chnl_open) (&(disp_obj->chnl_from_dsp), 153 - disp_obj->hchnl_mgr, 153 + disp_obj->chnl_mgr, 154 154 CHNL_MODEFROMDSP, ul_chnl_id, 155 155 &chnl_attr_obj); 156 156 } ··· 282 282 DBC_REQUIRE(node_get_type(hnode) != NODE_DEVICE); 283 283 DBC_REQUIRE(node_env != NULL); 284 284 285 - status = dev_get_dev_type(disp_obj->hdev_obj, &dev_type); 285 + status = dev_get_dev_type(disp_obj->dev_obj, &dev_type); 286 286 287 287 if (status) 288 288 goto func_end; ··· 484 484 DBC_REQUIRE(disp_obj); 485 485 DBC_REQUIRE(hnode != NULL); 486 486 487 - status = dev_get_dev_type(disp_obj->hdev_obj, &dev_type); 487 + status = dev_get_dev_type(disp_obj->dev_obj, &dev_type); 488 488 489 489 if (!status) { 490 490 ··· 525 525 DBC_REQUIRE(disp_obj); 526 526 DBC_REQUIRE(hnode != NULL); 527 527 528 - status = dev_get_dev_type(disp_obj->hdev_obj, &dev_type); 528 + status = dev_get_dev_type(disp_obj->dev_obj, &dev_type); 529 529 530 530 if (!status) { 531 531
+8 -8
drivers/staging/tidspbridge/rmgr/mgr.c
··· 44 44 #define ZLDLLNAME "" 45 45 46 46 struct mgr_object { 47 - struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */ 47 + struct dcd_manager *dcd_mgr; /* Proc/Node data manager */ 48 48 }; 49 49 50 50 /* ----------------------------------- Globals */ ··· 67 67 68 68 pmgr_obj = kzalloc(sizeof(struct mgr_object), GFP_KERNEL); 69 69 if (pmgr_obj) { 70 - status = dcd_create_manager(ZLDLLNAME, &pmgr_obj->hdcd_mgr); 70 + status = dcd_create_manager(ZLDLLNAME, &pmgr_obj->dcd_mgr); 71 71 if (!status) { 72 72 /* If succeeded store the handle in the MGR Object */ 73 73 if (drv_datap) { ··· 81 81 if (!status) { 82 82 *mgr_obj = pmgr_obj; 83 83 } else { 84 - dcd_destroy_manager(pmgr_obj->hdcd_mgr); 84 + dcd_destroy_manager(pmgr_obj->dcd_mgr); 85 85 kfree(pmgr_obj); 86 86 } 87 87 } else { ··· 110 110 DBC_REQUIRE(hmgr_obj); 111 111 112 112 /* Free resources */ 113 - if (hmgr_obj->hdcd_mgr) 114 - dcd_destroy_manager(hmgr_obj->hdcd_mgr); 113 + if (hmgr_obj->dcd_mgr) 114 + dcd_destroy_manager(hmgr_obj->dcd_mgr); 115 115 116 116 kfree(pmgr_obj); 117 117 /* Update the driver data with NULL for MGR Object */ ··· 163 163 break; 164 164 *pu_num_nodes = node_index; 165 165 if (node_id == (node_index - 1)) { 166 - status = dcd_get_object_def(pmgr_obj->hdcd_mgr, 166 + status = dcd_get_object_def(pmgr_obj->dcd_mgr, 167 167 &node_uuid, DSP_DCDNODETYPE, &gen_obj); 168 168 if (status) 169 169 break; ··· 258 258 if (proc_detect != false) 259 259 continue; 260 260 261 - status2 = dcd_get_object_def(pmgr_obj->hdcd_mgr, 261 + status2 = dcd_get_object_def(pmgr_obj->dcd_mgr, 262 262 (struct dsp_uuid *)&temp_uuid, 263 263 DSP_DCDPROCESSORTYPE, &gen_obj); 264 264 if (!status2) { ··· 333 333 334 334 *dcd_handle = (u32) NULL; 335 335 if (pmgr_obj) { 336 - *dcd_handle = (u32) pmgr_obj->hdcd_mgr; 336 + *dcd_handle = (u32) pmgr_obj->dcd_mgr; 337 337 status = 0; 338 338 } 339 339 DBC_ENSURE((!status && *dcd_handle != (u32) NULL) ||
+22 -22
drivers/staging/tidspbridge/rmgr/nldr.c
··· 190 190 * Overlay loader object. 191 191 */ 192 192 struct nldr_object { 193 - struct dev_object *hdev_obj; /* Device object */ 194 - struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */ 193 + struct dev_object *dev_obj; /* Device object */ 194 + struct dcd_manager *dcd_mgr; /* Proc/Node data manager */ 195 195 struct dbll_tar_obj *dbll; /* The DBL loader */ 196 196 struct dbll_library_obj *base_lib; /* Base image library */ 197 197 struct rmm_target_obj *rmm; /* Remote memory manager for DSP */ ··· 206 206 u32 *seg_table; /* memtypes of dynamic memory segs 207 207 * indexed by segid 208 208 */ 209 - u16 us_dsp_mau_size; /* Size of DSP MAU */ 210 - u16 us_dsp_word_size; /* Size of DSP word */ 209 + u16 dsp_mau_size; /* Size of DSP MAU */ 210 + u16 dsp_word_size; /* Size of DSP word */ 211 211 }; 212 212 213 213 /* ··· 435 435 /* Allocate dynamic loader object */ 436 436 nldr_obj = kzalloc(sizeof(struct nldr_object), GFP_KERNEL); 437 437 if (nldr_obj) { 438 - nldr_obj->hdev_obj = hdev_obj; 438 + nldr_obj->dev_obj = hdev_obj; 439 439 /* warning, lazy status checking alert! */ 440 440 dev_get_cod_mgr(hdev_obj, &cod_mgr); 441 441 if (cod_mgr) { ··· 450 450 } 451 451 status = 0; 452 452 /* end lazy status checking */ 453 - nldr_obj->us_dsp_mau_size = pattrs->us_dsp_mau_size; 454 - nldr_obj->us_dsp_word_size = pattrs->us_dsp_word_size; 453 + nldr_obj->dsp_mau_size = pattrs->dsp_mau_size; 454 + nldr_obj->dsp_word_size = pattrs->dsp_word_size; 455 455 nldr_obj->ldr_fxns = ldr_fxns; 456 456 if (!(nldr_obj->ldr_fxns.init_fxn())) 457 457 status = -ENOMEM; ··· 461 461 } 462 462 /* Create the DCD Manager */ 463 463 if (!status) 464 - status = dcd_create_manager(NULL, &nldr_obj->hdcd_mgr); 464 + status = dcd_create_manager(NULL, &nldr_obj->dcd_mgr); 465 465 466 466 /* Get dynamic loading memory sections from base lib */ 467 467 if (!status) { ··· 471 471 &ul_len); 472 472 if (!status) { 473 473 psz_coff_buf = 474 - kzalloc(ul_len * nldr_obj->us_dsp_mau_size, 474 + kzalloc(ul_len * nldr_obj->dsp_mau_size, 475 475 GFP_KERNEL); 476 476 if (!psz_coff_buf) 477 477 status = -ENOMEM; ··· 550 550 DBC_ASSERT(!status); 551 551 /* First count number of overlay nodes */ 552 552 status = 553 - dcd_get_objects(nldr_obj->hdcd_mgr, sz_zl_file, 553 + dcd_get_objects(nldr_obj->dcd_mgr, sz_zl_file, 554 554 add_ovly_node, (void *)nldr_obj); 555 555 /* Now build table of overlay nodes */ 556 556 if (!status && nldr_obj->ovly_nodes > 0) { ··· 560 560 nldr_obj->ovly_nodes, GFP_KERNEL); 561 561 /* Put overlay nodes in the table */ 562 562 nldr_obj->ovly_nid = 0; 563 - status = dcd_get_objects(nldr_obj->hdcd_mgr, sz_zl_file, 563 + status = dcd_get_objects(nldr_obj->dcd_mgr, sz_zl_file, 564 564 add_ovly_node, 565 565 (void *)nldr_obj); 566 566 } ··· 604 604 605 605 kfree(nldr_obj->seg_table); 606 606 607 - if (nldr_obj->hdcd_mgr) 608 - dcd_destroy_manager(nldr_obj->hdcd_mgr); 607 + if (nldr_obj->dcd_mgr) 608 + dcd_destroy_manager(nldr_obj->dcd_mgr); 609 609 610 610 /* Free overlay node information */ 611 611 if (nldr_obj->ovly_table) { ··· 1005 1005 goto func_end; 1006 1006 1007 1007 status = 1008 - dcd_get_object_def(nldr_obj->hdcd_mgr, uuid_obj, obj_type, 1008 + dcd_get_object_def(nldr_obj->dcd_mgr, uuid_obj, obj_type, 1009 1009 &obj_def); 1010 1010 if (status) 1011 1011 goto func_end; ··· 1262 1262 if (depth == 0) { 1263 1263 status = 1264 1264 dcd_get_library_name(nldr_node_obj->nldr_obj-> 1265 - hdcd_mgr, &uuid, psz_file_name, 1265 + dcd_mgr, &uuid, psz_file_name, 1266 1266 &dw_buf_size, phase, 1267 1267 nldr_node_obj->phase_split); 1268 1268 } else { 1269 1269 /* Dependent libraries are registered with a phase */ 1270 1270 status = 1271 1271 dcd_get_library_name(nldr_node_obj->nldr_obj-> 1272 - hdcd_mgr, &uuid, psz_file_name, 1272 + dcd_mgr, &uuid, psz_file_name, 1273 1273 &dw_buf_size, NLDR_NOPHASE, 1274 1274 NULL); 1275 1275 } ··· 1309 1309 depth++; 1310 1310 /* Get number of dependent libraries */ 1311 1311 status = 1312 - dcd_get_num_dep_libs(nldr_node_obj->nldr_obj->hdcd_mgr, 1312 + dcd_get_num_dep_libs(nldr_node_obj->nldr_obj->dcd_mgr, 1313 1313 &uuid, &nd_libs, &np_libs, phase); 1314 1314 } 1315 1315 DBC_ASSERT(nd_libs >= np_libs); ··· 1342 1342 /* Get the dependent library UUIDs */ 1343 1343 status = 1344 1344 dcd_get_dep_libs(nldr_node_obj-> 1345 - nldr_obj->hdcd_mgr, &uuid, 1345 + nldr_obj->dcd_mgr, &uuid, 1346 1346 nd_libs, dep_lib_uui_ds, 1347 1347 persistent_dep_libs, 1348 1348 phase); ··· 1630 1630 rmm = nldr_obj->rmm; 1631 1631 /* Convert size to DSP words */ 1632 1632 word_size = 1633 - (size + nldr_obj->us_dsp_word_size - 1634 - 1) / nldr_obj->us_dsp_word_size; 1633 + (size + nldr_obj->dsp_word_size - 1634 + 1) / nldr_obj->dsp_word_size; 1635 1635 /* Modify memory 'align' to account for DSP cache line size */ 1636 1636 align = lcm(GEM_CACHE_LINE_SIZE, align); 1637 1637 dev_dbg(bridge, "%s: memory align to 0x%x\n", __func__, align); ··· 1742 1742 1743 1743 /* Convert size to DSP words */ 1744 1744 word_size = 1745 - (size + nldr_obj->us_dsp_word_size - 1746 - 1) / nldr_obj->us_dsp_word_size; 1745 + (size + nldr_obj->dsp_word_size - 1746 + 1) / nldr_obj->dsp_word_size; 1747 1747 1748 1748 if (rmm_free(rmm, space, dsp_address, word_size, reserve)) 1749 1749 status = 0;
+44 -44
drivers/staging/tidspbridge/rmgr/node.c
··· 124 124 * ======== node_mgr ======== 125 125 */ 126 126 struct node_mgr { 127 - struct dev_object *hdev_obj; /* Device object */ 127 + struct dev_object *dev_obj; /* Device object */ 128 128 /* Function interface to Bridge driver */ 129 129 struct bridge_drv_interface *intf_fxns; 130 - struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */ 130 + struct dcd_manager *dcd_mgr; /* Proc/Node data manager */ 131 131 struct disp_object *disp_obj; /* Node dispatcher */ 132 132 struct list_head node_list; /* List of all allocated nodes */ 133 133 u32 num_nodes; /* Number of nodes in node_list */ ··· 188 188 */ 189 189 struct node_object { 190 190 struct list_head list_elem; 191 - struct node_mgr *hnode_mgr; /* The manager of this node */ 191 + struct node_mgr *node_mgr; /* The manager of this node */ 192 192 struct proc_object *hprocessor; /* Back pointer to processor */ 193 193 struct dsp_uuid node_uuid; /* Node's ID */ 194 194 s32 prio; /* Node's current priority */ ··· 389 389 status = -ENOMEM; 390 390 goto func_end; 391 391 } 392 - pnode->hnode_mgr = hnode_mgr; 392 + pnode->node_mgr = hnode_mgr; 393 393 /* This critical section protects get_node_props */ 394 394 mutex_lock(&hnode_mgr->node_mgr_lock); 395 395 396 396 /* Get dsp_ndbprops from node database */ 397 - status = get_node_props(hnode_mgr->hdcd_mgr, pnode, node_uuid, 397 + status = get_node_props(hnode_mgr->dcd_mgr, pnode, node_uuid, 398 398 &(pnode->dcd_props)); 399 399 if (status) 400 400 goto func_cont; ··· 784 784 785 785 DBC_REQUIRE(refs > 0); 786 786 787 - if (!hnode || !hnode->hnode_mgr) { 787 + if (!hnode || !hnode->node_mgr) { 788 788 status = -EFAULT; 789 789 } else { 790 - hnode_mgr = hnode->hnode_mgr; 790 + hnode_mgr = hnode->node_mgr; 791 791 node_type = node_get_type(hnode); 792 792 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) 793 793 status = -EPERM; ··· 862 862 /* The two nodes must be on the same processor */ 863 863 if (node1 != (struct node_object *)DSP_HGPPNODE && 864 864 node2 != (struct node_object *)DSP_HGPPNODE && 865 - node1->hnode_mgr != node2->hnode_mgr) 865 + node1->node_mgr != node2->node_mgr) 866 866 return -EPERM; 867 867 868 868 /* Cannot connect a node to itself */ ··· 901 901 return -EPERM; /* illegal stream mode */ 902 902 903 903 if (node1_type != NODE_GPP) { 904 - hnode_mgr = node1->hnode_mgr; 904 + hnode_mgr = node1->node_mgr; 905 905 } else { 906 906 DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE); 907 - hnode_mgr = node2->hnode_mgr; 907 + hnode_mgr = node2->node_mgr; 908 908 } 909 909 910 910 /* Enter critical section */ ··· 1158 1158 /* create struct dsp_cbdata struct for PWR calls */ 1159 1159 cb_data.cb_data = PWR_TIMEOUT; 1160 1160 node_type = node_get_type(hnode); 1161 - hnode_mgr = hnode->hnode_mgr; 1161 + hnode_mgr = hnode->node_mgr; 1162 1162 intf_fxns = hnode_mgr->intf_fxns; 1163 1163 /* Get access to node dispatcher */ 1164 1164 mutex_lock(&hnode_mgr->node_mgr_lock); ··· 1301 1301 if (!node_mgr_obj) 1302 1302 return -ENOMEM; 1303 1303 1304 - node_mgr_obj->hdev_obj = hdev_obj; 1304 + node_mgr_obj->dev_obj = hdev_obj; 1305 1305 1306 1306 node_mgr_obj->ntfy_obj = kmalloc(sizeof(struct ntfy_object), 1307 1307 GFP_KERNEL); ··· 1315 1315 1316 1316 dev_get_dev_type(hdev_obj, &dev_type); 1317 1317 1318 - status = dcd_create_manager(sz_zl_file, &node_mgr_obj->hdcd_mgr); 1318 + status = dcd_create_manager(sz_zl_file, &node_mgr_obj->dcd_mgr); 1319 1319 if (status) 1320 1320 goto out_err; 1321 1321 ··· 1364 1364 1365 1365 nldr_attrs_obj.ovly = ovly; 1366 1366 nldr_attrs_obj.write = mem_write; 1367 - nldr_attrs_obj.us_dsp_word_size = node_mgr_obj->udsp_word_size; 1368 - nldr_attrs_obj.us_dsp_mau_size = node_mgr_obj->udsp_mau_size; 1367 + nldr_attrs_obj.dsp_word_size = node_mgr_obj->udsp_word_size; 1368 + nldr_attrs_obj.dsp_mau_size = node_mgr_obj->udsp_mau_size; 1369 1369 node_mgr_obj->loader_init = node_mgr_obj->nldr_fxns.init(); 1370 1370 status = node_mgr_obj->nldr_fxns.create(&node_mgr_obj->nldr_obj, 1371 1371 hdev_obj, ··· 1417 1417 } 1418 1418 /* create struct dsp_cbdata struct for PWR call */ 1419 1419 cb_data.cb_data = PWR_TIMEOUT; 1420 - hnode_mgr = pnode->hnode_mgr; 1420 + hnode_mgr = pnode->node_mgr; 1421 1421 hprocessor = pnode->hprocessor; 1422 1422 disp_obj = hnode_mgr->disp_obj; 1423 1423 node_type = node_get_type(pnode); ··· 1676 1676 if (!hnode) 1677 1677 return -EFAULT; 1678 1678 1679 - hnode_mgr = hnode->hnode_mgr; 1679 + hnode_mgr = hnode->node_mgr; 1680 1680 /* Enter hnode_mgr critical section (since we're accessing 1681 1681 * data that could be changed by node_change_priority() and 1682 1682 * node_connect(). */ ··· 1779 1779 status = -EPERM; 1780 1780 goto func_end; 1781 1781 } 1782 - hnode_mgr = hnode->hnode_mgr; 1782 + hnode_mgr = hnode->node_mgr; 1783 1783 node_type = node_get_type(hnode); 1784 1784 if (node_type != NODE_MESSAGE && node_type != NODE_TASK && 1785 1785 node_type != NODE_DAISSOCKET) { ··· 1801 1801 /* Translate DSP byte addr to GPP Va. */ 1802 1802 tmp_buf = cmm_xlator_translate(hnode->xlator, 1803 1803 (void *)(message->arg1 * 1804 - hnode->hnode_mgr-> 1804 + hnode->node_mgr-> 1805 1805 udsp_word_size), CMM_DSPPA2PA); 1806 1806 if (tmp_buf != NULL) { 1807 1807 /* now convert this GPP Pa to Va */ ··· 1810 1810 if (tmp_buf != NULL) { 1811 1811 /* Adjust SM size in msg */ 1812 1812 message->arg1 = (u32) tmp_buf; 1813 - message->arg2 *= hnode->hnode_mgr->udsp_word_size; 1813 + message->arg2 *= hnode->node_mgr->udsp_word_size; 1814 1814 } else { 1815 1815 status = -ESRCH; 1816 1816 } ··· 1857 1857 if (!hnode) 1858 1858 status = -EFAULT; 1859 1859 else 1860 - *strm_man = hnode->hnode_mgr->strm_mgr_obj; 1860 + *strm_man = hnode->node_mgr->strm_mgr_obj; 1861 1861 1862 1862 return status; 1863 1863 } ··· 1942 1942 NODE_SET_STATE(hnode, NODE_DONE); 1943 1943 hnode->exit_status = node_status; 1944 1944 if (hnode->loaded && hnode->phase_split) { 1945 - (void)hnode->hnode_mgr->nldr_fxns.unload(hnode-> 1945 + (void)hnode->node_mgr->nldr_fxns.unload(hnode-> 1946 1946 nldr_node_obj, 1947 1947 NLDR_EXECUTE); 1948 1948 hnode->loaded = false; ··· 1988 1988 status = -ENOSYS; 1989 1989 1990 1990 if (!status) { 1991 - hnode_mgr = hnode->hnode_mgr; 1991 + hnode_mgr = hnode->node_mgr; 1992 1992 1993 1993 /* Enter critical section */ 1994 1994 mutex_lock(&hnode_mgr->node_mgr_lock); ··· 2072 2072 status = -EPERM; 2073 2073 goto func_end; 2074 2074 } 2075 - hnode_mgr = hnode->hnode_mgr; 2075 + hnode_mgr = hnode->node_mgr; 2076 2076 node_type = node_get_type(hnode); 2077 2077 if (node_type != NODE_MESSAGE && node_type != NODE_TASK && 2078 2078 node_type != NODE_DAISSOCKET) ··· 2107 2107 CMM_VA2DSPPA); 2108 2108 if (tmp_buf != NULL) { 2109 2109 /* got translation, convert to MAUs in msg */ 2110 - if (hnode->hnode_mgr->udsp_word_size != 0) { 2110 + if (hnode->node_mgr->udsp_word_size != 0) { 2111 2111 new_msg.arg1 = 2112 2112 (u32) tmp_buf / 2113 - hnode->hnode_mgr->udsp_word_size; 2113 + hnode->node_mgr->udsp_word_size; 2114 2114 /* MAUs */ 2115 - new_msg.arg2 /= hnode->hnode_mgr-> 2115 + new_msg.arg2 /= hnode->node_mgr-> 2116 2116 udsp_word_size; 2117 2117 } else { 2118 2118 pr_err("%s: udsp_word_size is zero!\n", ··· 2172 2172 notify_type); 2173 2173 } else { 2174 2174 /* Send Message part of event mask to msg_ctrl */ 2175 - intf_fxns = hnode->hnode_mgr->intf_fxns; 2175 + intf_fxns = hnode->node_mgr->intf_fxns; 2176 2176 status = (*intf_fxns->msg_register_notify) 2177 2177 (hnode->msg_queue_obj, 2178 2178 event_mask & DSP_NODEMESSAGEREADY, notify_type, ··· 2229 2229 if (status) 2230 2230 goto func_end; 2231 2231 2232 - hnode_mgr = hnode->hnode_mgr; 2232 + hnode_mgr = hnode->node_mgr; 2233 2233 if (!hnode_mgr) { 2234 2234 status = -EFAULT; 2235 2235 goto func_end; ··· 2329 2329 DBC_REQUIRE(refs > 0); 2330 2330 DBC_REQUIRE(pstatus != NULL); 2331 2331 2332 - if (!hnode || !hnode->hnode_mgr) { 2332 + if (!hnode || !hnode->node_mgr) { 2333 2333 status = -EFAULT; 2334 2334 goto func_end; 2335 2335 } ··· 2340 2340 status = proc_get_processor_id(pnode->hprocessor, &proc_id); 2341 2341 2342 2342 if (!status) { 2343 - hnode_mgr = hnode->hnode_mgr; 2343 + hnode_mgr = hnode->node_mgr; 2344 2344 node_type = node_get_type(hnode); 2345 2345 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) 2346 2346 status = -EPERM; ··· 2416 2416 * Here it goes the part of the simulation of 2417 2417 * the DSP exception. 2418 2418 */ 2419 - dev_get_deh_mgr(hnode_mgr->hdev_obj, &hdeh_mgr); 2419 + dev_get_deh_mgr(hnode_mgr->dev_obj, &hdeh_mgr); 2420 2420 if (!hdeh_mgr) 2421 2421 goto func_cont; 2422 2422 ··· 2465 2465 int status; 2466 2466 if (!hnode) 2467 2467 goto func_end; 2468 - hnode_mgr = hnode->hnode_mgr; 2468 + hnode_mgr = hnode->node_mgr; 2469 2469 if (!hnode_mgr) 2470 2470 goto func_end; 2471 2471 ··· 2567 2567 kfree(hnode->xlator); 2568 2568 kfree(hnode->nldr_node_obj); 2569 2569 hnode->nldr_node_obj = NULL; 2570 - hnode->hnode_mgr = NULL; 2570 + hnode->node_mgr = NULL; 2571 2571 kfree(hnode); 2572 2572 hnode = NULL; 2573 2573 func_end: ··· 2585 2585 2586 2586 if (hnode_mgr) { 2587 2587 /* Free resources */ 2588 - if (hnode_mgr->hdcd_mgr) 2589 - dcd_destroy_manager(hnode_mgr->hdcd_mgr); 2588 + if (hnode_mgr->dcd_mgr) 2589 + dcd_destroy_manager(hnode_mgr->dcd_mgr); 2590 2590 2591 2591 /* Remove any elements remaining in lists */ 2592 2592 list_for_each_entry_safe(hnode, tmp, &hnode_mgr->node_list, ··· 2686 2686 struct node_strmdef *pstrm_def, 2687 2687 struct dsp_strmattr *pattrs) 2688 2688 { 2689 - struct node_mgr *hnode_mgr = hnode->hnode_mgr; 2689 + struct node_mgr *hnode_mgr = hnode->node_mgr; 2690 2690 2691 2691 if (pattrs != NULL) { 2692 2692 pstrm_def->num_bufs = pattrs->num_bufs; ··· 2746 2746 u32 phase) 2747 2747 { 2748 2748 char *pstr_fxn_name = NULL; 2749 - struct node_mgr *hnode_mgr = hnode->hnode_mgr; 2749 + struct node_mgr *hnode_mgr = hnode->node_mgr; 2750 2750 int status = 0; 2751 2751 DBC_REQUIRE(node_get_type(hnode) == NODE_TASK || 2752 2752 node_get_type(hnode) == NODE_DAISSOCKET || ··· 2979 2979 dcd_node_props.pstr_delete_phase_fxn = NULL; 2980 2980 dcd_node_props.pstr_i_alg_name = NULL; 2981 2981 2982 - status = dcd_get_object_def(hnode_mgr->hdcd_mgr, 2982 + status = dcd_get_object_def(hnode_mgr->dcd_mgr, 2983 2983 (struct dsp_uuid *)node_uuid, DSP_DCDNODETYPE, 2984 2984 (struct dcd_genericobj *)&dcd_node_props); 2985 2985 ··· 3007 3007 static int get_rms_fxns(struct node_mgr *hnode_mgr) 3008 3008 { 3009 3009 s32 i; 3010 - struct dev_object *dev_obj = hnode_mgr->hdev_obj; 3010 + struct dev_object *dev_obj = hnode_mgr->dev_obj; 3011 3011 int status = 0; 3012 3012 3013 3013 static char *psz_fxns[NUMRMSFXNS] = { ··· 3065 3065 3066 3066 DBC_REQUIRE(hnode); 3067 3067 3068 - hnode_mgr = hnode->hnode_mgr; 3068 + hnode_mgr = hnode->node_mgr; 3069 3069 3070 3070 ul_size = ul_num_bytes / hnode_mgr->udsp_word_size; 3071 3071 ul_timeout = hnode->utimeout; 3072 3072 3073 3073 /* Call new MemCopy function */ 3074 3074 intf_fxns = hnode_mgr->intf_fxns; 3075 - status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context); 3075 + status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context); 3076 3076 if (!status) { 3077 3077 status = 3078 3078 (*intf_fxns->brd_mem_copy) (hbridge_context, ··· 3109 3109 DBC_REQUIRE(hnode); 3110 3110 DBC_REQUIRE(mem_space & DBLL_CODE || mem_space & DBLL_DATA); 3111 3111 3112 - hnode_mgr = hnode->hnode_mgr; 3112 + hnode_mgr = hnode->node_mgr; 3113 3113 3114 3114 ul_timeout = hnode->utimeout; 3115 3115 mem_sect_type = (mem_space & DBLL_CODE) ? RMS_CODE : RMS_DATA; 3116 3116 3117 3117 /* Call new MemWrite function */ 3118 3118 intf_fxns = hnode_mgr->intf_fxns; 3119 - status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context); 3119 + status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context); 3120 3120 status = (*intf_fxns->brd_mem_write) (hbridge_context, pbuf, 3121 3121 dsp_add, ul_num_bytes, mem_sect_type); 3122 3122
+63 -63
drivers/staging/tidspbridge/rmgr/proc.c
··· 80 80 /* The proc_object structure. */ 81 81 struct proc_object { 82 82 struct list_head link; /* Link to next proc_object */ 83 - struct dev_object *hdev_obj; /* Device this PROC represents */ 83 + struct dev_object *dev_obj; /* Device this PROC represents */ 84 84 u32 process; /* Process owning this Processor */ 85 - struct mgr_object *hmgr_obj; /* Manager Object Handle */ 85 + struct mgr_object *mgr_obj; /* Manager Object Handle */ 86 86 u32 attach_count; /* Processor attach count */ 87 87 u32 processor_id; /* Processor number */ 88 88 u32 utimeout; /* Time out count */ 89 89 enum dsp_procstate proc_state; /* Processor state */ 90 - u32 ul_unit; /* DDSP unit number */ 90 + u32 unit; /* DDSP unit number */ 91 91 bool is_already_attached; /* 92 92 * True if the Device below has 93 93 * GPP Client attached 94 94 */ 95 95 struct ntfy_object *ntfy_obj; /* Manages notifications */ 96 96 /* Bridge Context Handle */ 97 - struct bridge_dev_context *hbridge_context; 97 + struct bridge_dev_context *bridge_context; 98 98 /* Function interface to Bridge driver */ 99 99 struct bridge_drv_interface *intf_fxns; 100 - char *psz_last_coff; 100 + char *last_coff; 101 101 struct list_head proc_list; 102 102 }; 103 103 ··· 315 315 status = -ENOMEM; 316 316 goto func_end; 317 317 } 318 - p_proc_object->hdev_obj = hdev_obj; 319 - p_proc_object->hmgr_obj = hmgr_obj; 318 + p_proc_object->dev_obj = hdev_obj; 319 + p_proc_object->mgr_obj = hmgr_obj; 320 320 p_proc_object->processor_id = dev_type; 321 321 /* Store TGID instead of process handle */ 322 322 p_proc_object->process = current->tgid; ··· 331 331 status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns); 332 332 if (!status) { 333 333 status = dev_get_bridge_context(hdev_obj, 334 - &p_proc_object->hbridge_context); 334 + &p_proc_object->bridge_context); 335 335 if (status) 336 336 kfree(p_proc_object); 337 337 } else ··· 356 356 * Return handle to this Processor Object: 357 357 * Find out if the Device is already attached to a 358 358 * Processor. If so, return AlreadyAttached status */ 359 - status = dev_insert_proc_object(p_proc_object->hdev_obj, 359 + status = dev_insert_proc_object(p_proc_object->dev_obj, 360 360 (u32) p_proc_object, 361 361 &p_proc_object-> 362 362 is_already_attached); ··· 463 463 status = -ENOMEM; 464 464 goto func_end; 465 465 } 466 - p_proc_object->hdev_obj = hdev_obj; 467 - p_proc_object->hmgr_obj = hmgr_obj; 466 + p_proc_object->dev_obj = hdev_obj; 467 + p_proc_object->mgr_obj = hmgr_obj; 468 468 status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns); 469 469 if (!status) 470 470 status = dev_get_bridge_context(hdev_obj, 471 - &p_proc_object->hbridge_context); 471 + &p_proc_object->bridge_context); 472 472 if (status) 473 473 goto func_cont; 474 474 ··· 491 491 if (!status) 492 492 status = proc_start(p_proc_object); 493 493 } 494 - kfree(p_proc_object->psz_last_coff); 495 - p_proc_object->psz_last_coff = NULL; 494 + kfree(p_proc_object->last_coff); 495 + p_proc_object->last_coff = NULL; 496 496 func_cont: 497 497 kfree(p_proc_object); 498 498 func_end: ··· 541 541 status = pwr_wake_dsp(timeout); 542 542 } else 543 543 if (!((*p_proc_object->intf_fxns->dev_cntrl) 544 - (p_proc_object->hbridge_context, dw_cmd, 544 + (p_proc_object->bridge_context, dw_cmd, 545 545 arg))) { 546 546 status = 0; 547 547 } else { ··· 578 578 kfree(p_proc_object->ntfy_obj); 579 579 } 580 580 581 - kfree(p_proc_object->psz_last_coff); 582 - p_proc_object->psz_last_coff = NULL; 581 + kfree(p_proc_object->last_coff); 582 + p_proc_object->last_coff = NULL; 583 583 /* Remove the Proc from the DEV List */ 584 - (void)dev_remove_proc_object(p_proc_object->hdev_obj, 584 + (void)dev_remove_proc_object(p_proc_object->dev_obj, 585 585 (u32) p_proc_object); 586 586 /* Free the Processor Object */ 587 587 kfree(p_proc_object); ··· 613 613 DBC_REQUIRE(pu_allocated != NULL); 614 614 615 615 if (p_proc_object) { 616 - if (!(dev_get_node_manager(p_proc_object->hdev_obj, 616 + if (!(dev_get_node_manager(p_proc_object->dev_obj, 617 617 &hnode_mgr))) { 618 618 if (hnode_mgr) { 619 619 status = node_enum_nodes(hnode_mgr, node_tab, ··· 890 890 case DSP_RESOURCE_DYNSARAM: 891 891 case DSP_RESOURCE_DYNEXTERNAL: 892 892 case DSP_RESOURCE_DYNSRAM: 893 - status = dev_get_node_manager(p_proc_object->hdev_obj, 893 + status = dev_get_node_manager(p_proc_object->dev_obj, 894 894 &hnode_mgr); 895 895 if (!hnode_mgr) { 896 896 status = -EFAULT; ··· 913 913 } 914 914 break; 915 915 case DSP_RESOURCE_PROCLOAD: 916 - status = dev_get_io_mgr(p_proc_object->hdev_obj, &hio_mgr); 916 + status = dev_get_io_mgr(p_proc_object->dev_obj, &hio_mgr); 917 917 if (hio_mgr) 918 918 status = 919 919 p_proc_object->intf_fxns-> ··· 963 963 DBC_REQUIRE(device_obj != NULL); 964 964 965 965 if (p_proc_object) { 966 - *device_obj = p_proc_object->hdev_obj; 966 + *device_obj = p_proc_object->dev_obj; 967 967 status = 0; 968 968 } else { 969 969 *device_obj = NULL; ··· 996 996 if (p_proc_object) { 997 997 /* First, retrieve BRD state information */ 998 998 status = (*p_proc_object->intf_fxns->brd_status) 999 - (p_proc_object->hbridge_context, &brd_status); 999 + (p_proc_object->bridge_context, &brd_status); 1000 1000 if (!status) { 1001 1001 switch (brd_status) { 1002 1002 case BRD_STOPPED: ··· 1115 1115 status = -EFAULT; 1116 1116 goto func_end; 1117 1117 } 1118 - dev_get_cod_mgr(p_proc_object->hdev_obj, &cod_mgr); 1118 + dev_get_cod_mgr(p_proc_object->dev_obj, &cod_mgr); 1119 1119 if (!cod_mgr) { 1120 1120 status = -EPERM; 1121 1121 goto func_end; ··· 1147 1147 prepend_envp(new_envp, (char **)user_envp, 1148 1148 envp_elems, cnew_envp, sz_proc_id); 1149 1149 /* Get the DCD Handle */ 1150 - status = mgr_get_dcd_handle(p_proc_object->hmgr_obj, 1150 + status = mgr_get_dcd_handle(p_proc_object->mgr_obj, 1151 1151 (u32 *) &hdcd_handle); 1152 1152 if (!status) { 1153 1153 /* Before proceeding with new load, ··· 1156 1156 * If yes, unregister nodes in previously 1157 1157 * registered COFF. If any error occurred, 1158 1158 * set previously registered COFF to NULL. */ 1159 - if (p_proc_object->psz_last_coff != NULL) { 1159 + if (p_proc_object->last_coff != NULL) { 1160 1160 status = 1161 1161 dcd_auto_unregister(hdcd_handle, 1162 1162 p_proc_object-> 1163 - psz_last_coff); 1163 + last_coff); 1164 1164 /* Regardless of auto unregister status, 1165 1165 * free previously allocated 1166 1166 * memory. */ 1167 - kfree(p_proc_object->psz_last_coff); 1168 - p_proc_object->psz_last_coff = NULL; 1167 + kfree(p_proc_object->last_coff); 1168 + p_proc_object->last_coff = NULL; 1169 1169 } 1170 1170 } 1171 1171 /* On success, do cod_open_base() */ ··· 1178 1178 if (!status) { 1179 1179 /* Auto-register data base */ 1180 1180 /* Get the DCD Handle */ 1181 - status = mgr_get_dcd_handle(p_proc_object->hmgr_obj, 1181 + status = mgr_get_dcd_handle(p_proc_object->mgr_obj, 1182 1182 (u32 *) &hdcd_handle); 1183 1183 if (!status) { 1184 1184 /* Auto register nodes in specified COFF ··· 1195 1195 if (status) { 1196 1196 status = -EPERM; 1197 1197 } else { 1198 - DBC_ASSERT(p_proc_object->psz_last_coff == 1198 + DBC_ASSERT(p_proc_object->last_coff == 1199 1199 NULL); 1200 1200 /* Allocate memory for pszLastCoff */ 1201 - p_proc_object->psz_last_coff = 1201 + p_proc_object->last_coff = 1202 1202 kzalloc((strlen(user_args[0]) + 1203 1203 1), GFP_KERNEL); 1204 1204 /* If memory allocated, save COFF file name */ 1205 - if (p_proc_object->psz_last_coff) { 1206 - strncpy(p_proc_object->psz_last_coff, 1205 + if (p_proc_object->last_coff) { 1206 + strncpy(p_proc_object->last_coff, 1207 1207 (char *)user_args[0], 1208 1208 (strlen((char *)user_args[0]) + 1209 1209 1)); ··· 1215 1215 if (!status) { 1216 1216 /* Create the message manager. This must be done 1217 1217 * before calling the IOOnLoaded function. */ 1218 - dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr); 1218 + dev_get_msg_mgr(p_proc_object->dev_obj, &hmsg_mgr); 1219 1219 if (!hmsg_mgr) { 1220 - status = msg_create(&hmsg_mgr, p_proc_object->hdev_obj, 1220 + status = msg_create(&hmsg_mgr, p_proc_object->dev_obj, 1221 1221 (msg_onexit) node_on_exit); 1222 1222 DBC_ASSERT(!status); 1223 - dev_set_msg_mgr(p_proc_object->hdev_obj, hmsg_mgr); 1223 + dev_set_msg_mgr(p_proc_object->dev_obj, hmsg_mgr); 1224 1224 } 1225 1225 } 1226 1226 if (!status) { 1227 1227 /* Set the Device object's message manager */ 1228 - status = dev_get_io_mgr(p_proc_object->hdev_obj, &hio_mgr); 1228 + status = dev_get_io_mgr(p_proc_object->dev_obj, &hio_mgr); 1229 1229 if (hio_mgr) 1230 1230 status = (*p_proc_object->intf_fxns->io_on_loaded) 1231 1231 (hio_mgr); ··· 1242 1242 #endif 1243 1243 status = cod_load_base(cod_mgr, argc_index, (char **)user_args, 1244 1244 dev_brd_write_fxn, 1245 - p_proc_object->hdev_obj, NULL); 1245 + p_proc_object->dev_obj, NULL); 1246 1246 if (status) { 1247 1247 if (status == -EBADF) { 1248 1248 dev_dbg(bridge, "%s: Failure to Load the EXE\n", ··· 1263 1263 if (!status) { 1264 1264 /* Update the Processor status to loaded */ 1265 1265 status = (*p_proc_object->intf_fxns->brd_set_state) 1266 - (p_proc_object->hbridge_context, BRD_LOADED); 1266 + (p_proc_object->bridge_context, BRD_LOADED); 1267 1267 if (!status) { 1268 1268 p_proc_object->proc_state = PROC_LOADED; 1269 1269 if (p_proc_object->ntfy_obj) ··· 1283 1283 /* Reset DMM structs and add an initial free chunk */ 1284 1284 if (!status) { 1285 1285 status = 1286 - dev_get_dmm_mgr(p_proc_object->hdev_obj, 1286 + dev_get_dmm_mgr(p_proc_object->dev_obj, 1287 1287 &dmm_mgr); 1288 1288 if (dmm_mgr) { 1289 1289 /* Set dw_ext_end to DMM START u8 ··· 1305 1305 user_args[0] = pargv0; 1306 1306 if (!status) { 1307 1307 if (!((*p_proc_object->intf_fxns->brd_status) 1308 - (p_proc_object->hbridge_context, &brd_state))) { 1308 + (p_proc_object->bridge_context, &brd_state))) { 1309 1309 pr_info("%s: Processor Loaded %s\n", __func__, pargv0); 1310 1310 kfree(drv_datap->base_img); 1311 1311 drv_datap->base_img = kmalloc(strlen(pargv0) + 1, ··· 1398 1398 status = -ENOMEM; 1399 1399 else 1400 1400 status = (*p_proc_object->intf_fxns->brd_mem_map) 1401 - (p_proc_object->hbridge_context, pa_align, va_align, 1401 + (p_proc_object->bridge_context, pa_align, va_align, 1402 1402 size_align, ul_map_attr, map_obj->pages); 1403 1403 } 1404 1404 if (!status) { ··· 1475 1475 */ 1476 1476 if ((event_mask == 0) && status) { 1477 1477 status = 1478 - dev_get_deh_mgr(p_proc_object->hdev_obj, 1478 + dev_get_deh_mgr(p_proc_object->dev_obj, 1479 1479 &hdeh_mgr); 1480 1480 status = 1481 1481 bridge_deh_register_notify(hdeh_mgr, ··· 1484 1484 hnotification); 1485 1485 } 1486 1486 } else { 1487 - status = dev_get_deh_mgr(p_proc_object->hdev_obj, 1487 + status = dev_get_deh_mgr(p_proc_object->dev_obj, 1488 1488 &hdeh_mgr); 1489 1489 status = 1490 1490 bridge_deh_register_notify(hdeh_mgr, ··· 1570 1570 status = -EBADR; 1571 1571 goto func_end; 1572 1572 } 1573 - status = dev_get_cod_mgr(p_proc_object->hdev_obj, &cod_mgr); 1573 + status = dev_get_cod_mgr(p_proc_object->dev_obj, &cod_mgr); 1574 1574 if (!cod_mgr) { 1575 1575 status = -EFAULT; 1576 1576 goto func_cont; ··· 1581 1581 goto func_cont; 1582 1582 1583 1583 status = (*p_proc_object->intf_fxns->brd_start) 1584 - (p_proc_object->hbridge_context, dw_dsp_addr); 1584 + (p_proc_object->bridge_context, dw_dsp_addr); 1585 1585 if (status) 1586 1586 goto func_cont; 1587 1587 1588 1588 /* Call dev_create2 */ 1589 - status = dev_create2(p_proc_object->hdev_obj); 1589 + status = dev_create2(p_proc_object->dev_obj); 1590 1590 if (!status) { 1591 1591 p_proc_object->proc_state = PROC_RUNNING; 1592 1592 /* Deep sleep switces off the peripheral clocks. ··· 1601 1601 /* Failed to Create Node Manager and DISP Object 1602 1602 * Stop the Processor from running. Put it in STOPPED State */ 1603 1603 (void)(*p_proc_object->intf_fxns-> 1604 - brd_stop) (p_proc_object->hbridge_context); 1604 + brd_stop) (p_proc_object->bridge_context); 1605 1605 p_proc_object->proc_state = PROC_STOPPED; 1606 1606 } 1607 1607 func_cont: 1608 1608 if (!status) { 1609 1609 if (!((*p_proc_object->intf_fxns->brd_status) 1610 - (p_proc_object->hbridge_context, &brd_state))) { 1610 + (p_proc_object->bridge_context, &brd_state))) { 1611 1611 pr_info("%s: dsp in running state\n", __func__); 1612 1612 DBC_ASSERT(brd_state != BRD_HIBERNATION); 1613 1613 } ··· 1645 1645 goto func_end; 1646 1646 } 1647 1647 /* check if there are any running nodes */ 1648 - status = dev_get_node_manager(p_proc_object->hdev_obj, &hnode_mgr); 1648 + status = dev_get_node_manager(p_proc_object->dev_obj, &hnode_mgr); 1649 1649 if (!status && hnode_mgr) { 1650 1650 status = node_enum_nodes(hnode_mgr, &hnode, node_tab_size, 1651 1651 &num_nodes, &nodes_allocated); ··· 1659 1659 /* It is OK to stop a device that does n't have nodes OR not started */ 1660 1660 status = 1661 1661 (*p_proc_object->intf_fxns-> 1662 - brd_stop) (p_proc_object->hbridge_context); 1662 + brd_stop) (p_proc_object->bridge_context); 1663 1663 if (!status) { 1664 1664 dev_dbg(bridge, "%s: processor in standby mode\n", __func__); 1665 1665 p_proc_object->proc_state = PROC_STOPPED; 1666 1666 /* Destory the Node Manager, msg_ctrl Manager */ 1667 - if (!(dev_destroy2(p_proc_object->hdev_obj))) { 1667 + if (!(dev_destroy2(p_proc_object->dev_obj))) { 1668 1668 /* Destroy the msg_ctrl by calling msg_delete */ 1669 - dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr); 1669 + dev_get_msg_mgr(p_proc_object->dev_obj, &hmsg_mgr); 1670 1670 if (hmsg_mgr) { 1671 1671 msg_delete(hmsg_mgr); 1672 - dev_set_msg_mgr(p_proc_object->hdev_obj, NULL); 1672 + dev_set_msg_mgr(p_proc_object->dev_obj, NULL); 1673 1673 } 1674 1674 if (!((*p_proc_object-> 1675 1675 intf_fxns->brd_status) (p_proc_object-> 1676 - hbridge_context, 1676 + bridge_context, 1677 1677 &brd_state))) 1678 1678 DBC_ASSERT(brd_state == BRD_STOPPED); 1679 1679 } ··· 1721 1721 /* Remove mapping from the page tables. */ 1722 1722 if (!status) { 1723 1723 status = (*p_proc_object->intf_fxns->brd_mem_un_map) 1724 - (p_proc_object->hbridge_context, va_align, size_align); 1724 + (p_proc_object->bridge_context, va_align, size_align); 1725 1725 } 1726 1726 1727 1727 mutex_unlock(&proc_lock); ··· 1819 1819 /* This is needed only when Device is loaded when it is 1820 1820 * already 'ACTIVE' */ 1821 1821 /* Destory the Node Manager, msg_ctrl Manager */ 1822 - if (!dev_destroy2(proc_obj->hdev_obj)) { 1822 + if (!dev_destroy2(proc_obj->dev_obj)) { 1823 1823 /* Destroy the msg_ctrl by calling msg_delete */ 1824 - dev_get_msg_mgr(proc_obj->hdev_obj, &hmsg_mgr); 1824 + dev_get_msg_mgr(proc_obj->dev_obj, &hmsg_mgr); 1825 1825 if (hmsg_mgr) { 1826 1826 msg_delete(hmsg_mgr); 1827 - dev_set_msg_mgr(proc_obj->hdev_obj, NULL); 1827 + dev_set_msg_mgr(proc_obj->dev_obj, NULL); 1828 1828 } 1829 1829 } 1830 1830 /* Place the Board in the Monitor State */ 1831 1831 if (!((*proc_obj->intf_fxns->brd_monitor) 1832 - (proc_obj->hbridge_context))) { 1832 + (proc_obj->bridge_context))) { 1833 1833 status = 0; 1834 1834 if (!((*proc_obj->intf_fxns->brd_status) 1835 - (proc_obj->hbridge_context, &brd_state))) 1835 + (proc_obj->bridge_context, &brd_state))) 1836 1836 DBC_ASSERT(brd_state == BRD_IDLE); 1837 1837 } 1838 1838 ··· 1929 1929 goto func_end; 1930 1930 } 1931 1931 1932 - dev_notify_clients(p_proc_object->hdev_obj, events); 1932 + dev_notify_clients(p_proc_object->dev_obj, events); 1933 1933 1934 1934 func_end: 1935 1935 return status;
+3 -3
drivers/staging/tidspbridge/rmgr/rmm.c
··· 369 369 } 370 370 371 371 /* ul_size */ 372 - mem_stat_buf->ul_size = target->seg_tab[segid].length; 372 + mem_stat_buf->size = target->seg_tab[segid].length; 373 373 374 374 /* num_free_blocks */ 375 375 mem_stat_buf->num_free_blocks = free_blocks; 376 376 377 - /* ul_total_free_size */ 378 - mem_stat_buf->ul_total_free_size = total_free_size; 377 + /* total_free_size */ 378 + mem_stat_buf->total_free_size = total_free_size; 379 379 380 380 /* len_max_free_block */ 381 381 mem_stat_buf->len_max_free_block = max_free_size;
+6 -6
drivers/staging/tidspbridge/rmgr/strm.c
··· 55 55 */ 56 56 struct strm_mgr { 57 57 struct dev_object *dev_obj; /* Device for this processor */ 58 - struct chnl_mgr *hchnl_mgr; /* Channel manager */ 58 + struct chnl_mgr *chnl_mgr; /* Channel manager */ 59 59 /* Function interface to Bridge driver */ 60 60 struct bridge_drv_interface *intf_fxns; 61 61 }; ··· 213 213 214 214 /* Get Channel manager and Bridge function interface */ 215 215 if (!status) { 216 - status = dev_get_chnl_mgr(dev_obj, &(strm_mgr_obj->hchnl_mgr)); 216 + status = dev_get_chnl_mgr(dev_obj, &(strm_mgr_obj->chnl_mgr)); 217 217 if (!status) { 218 218 (void)dev_get_intf_fxns(dev_obj, 219 219 &(strm_mgr_obj->intf_fxns)); ··· 532 532 if (status) 533 533 goto func_cont; 534 534 535 - if ((pattr->virt_base == NULL) || !(pattr->ul_virt_size > 0)) 535 + if ((pattr->virt_base == NULL) || !(pattr->virt_size > 0)) 536 536 goto func_cont; 537 537 538 538 /* No System DMA */ ··· 547 547 /* Set translators Virt Addr attributes */ 548 548 status = cmm_xlator_info(strm_obj->xlator, 549 549 (u8 **) &pattr->virt_base, 550 - pattr->ul_virt_size, 550 + pattr->virt_size, 551 551 strm_obj->segment_id, true); 552 552 } 553 553 } ··· 558 558 CHNL_MODETODSP : CHNL_MODEFROMDSP; 559 559 intf_fxns = strm_mgr_obj->intf_fxns; 560 560 status = (*intf_fxns->chnl_open) (&(strm_obj->chnl_obj), 561 - strm_mgr_obj->hchnl_mgr, 561 + strm_mgr_obj->chnl_mgr, 562 562 chnl_mode, ul_chnl_id, 563 563 &chnl_attr_obj); 564 564 if (status) { ··· 572 572 * We got a status that's not return-able. 573 573 * Assert that we got something we were 574 574 * expecting (-EFAULT isn't acceptable, 575 - * strm_mgr_obj->hchnl_mgr better be valid or we 575 + * strm_mgr_obj->chnl_mgr better be valid or we 576 576 * assert here), and then return -EPERM. 577 577 */ 578 578 DBC_ASSERT(status == -ENOSR ||