Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

staging: ti dspbridge: add platform manager code

Add TI's DSP Bridge platform manager driver sources

Signed-off-by: Omar Ramirez Luna <omar.ramirez@ti.com>
Signed-off-by: Kanigeri, Hari <h-kanigeri2@ti.com>
Signed-off-by: Ameya Palande <ameya.palande@nokia.com>
Signed-off-by: Guzman Lugo, Fernando <fernando.lugo@ti.com>
Signed-off-by: Hebbar, Shivananda <x0hebbar@ti.com>
Signed-off-by: Ramos Falcon, Ernesto <ernesto@ti.com>
Signed-off-by: Felipe Contreras <felipe.contreras@gmail.com>
Signed-off-by: Anna, Suman <s-anna@ti.com>
Signed-off-by: Gupta, Ramesh <grgupta@ti.com>
Signed-off-by: Gomez Castellanos, Ivan <ivan.gomez@ti.com>
Signed-off-by: Andy Shevchenko <ext-andriy.shevchenko@nokia.com>
Signed-off-by: Armando Uribe De Leon <x0095078@ti.com>
Signed-off-by: Deepak Chitriki <deepak.chitriki@ti.com>
Signed-off-by: Menon, Nishanth <nm@ti.com>
Signed-off-by: Phil Carmody <ext-phil.2.carmody@nokia.com>
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>

authored by

Omar Ramirez Luna and committed by
Greg Kroah-Hartman
c4ca3d5a 999e07d6

+7360
+163
drivers/staging/tidspbridge/pmgr/chnl.c
··· 1 + /* 2 + * chnl.c 3 + * 4 + * DSP-BIOS Bridge driver support functions for TI OMAP processors. 5 + * 6 + * DSP API channel interface: multiplexes data streams through the single 7 + * physical link managed by a Bridge Bridge driver. 8 + * 9 + * Copyright (C) 2005-2006 Texas Instruments, Inc. 10 + * 11 + * This package is free software; you can redistribute it and/or modify 12 + * it under the terms of the GNU General Public License version 2 as 13 + * published by the Free Software Foundation. 14 + * 15 + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 16 + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 17 + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 18 + */ 19 + 20 + /* ----------------------------------- Host OS */ 21 + #include <dspbridge/host_os.h> 22 + 23 + /* ----------------------------------- DSP/BIOS Bridge */ 24 + #include <dspbridge/std.h> 25 + #include <dspbridge/dbdefs.h> 26 + 27 + /* ----------------------------------- Trace & Debug */ 28 + #include <dspbridge/dbc.h> 29 + 30 + /* ----------------------------------- OS Adaptation Layer */ 31 + #include <dspbridge/cfg.h> 32 + #include <dspbridge/sync.h> 33 + 34 + /* ----------------------------------- Platform Manager */ 35 + #include <dspbridge/proc.h> 36 + #include <dspbridge/dev.h> 37 + 38 + /* ----------------------------------- Others */ 39 + #include <dspbridge/chnlpriv.h> 40 + #include <chnlobj.h> 41 + 42 + /* ----------------------------------- This */ 43 + #include <dspbridge/chnl.h> 44 + 45 + /* ----------------------------------- Globals */ 46 + static u32 refs; 47 + 48 + /* 49 + * ======== chnl_create ======== 50 + * Purpose: 51 + * Create a channel manager object, responsible for opening new channels 52 + * and closing old ones for a given 'Bridge board. 53 + */ 54 + int chnl_create(OUT struct chnl_mgr **phChnlMgr, 55 + struct dev_object *hdev_obj, 56 + IN CONST struct chnl_mgrattrs *pMgrAttrs) 57 + { 58 + int status; 59 + struct chnl_mgr *hchnl_mgr; 60 + struct chnl_mgr_ *chnl_mgr_obj = NULL; 61 + 62 + DBC_REQUIRE(refs > 0); 63 + DBC_REQUIRE(phChnlMgr != NULL); 64 + DBC_REQUIRE(pMgrAttrs != NULL); 65 + 66 + *phChnlMgr = NULL; 67 + 68 + /* Validate args: */ 69 + if ((0 < pMgrAttrs->max_channels) && 70 + (pMgrAttrs->max_channels <= CHNL_MAXCHANNELS)) 71 + status = 0; 72 + else if (pMgrAttrs->max_channels == 0) 73 + status = -EINVAL; 74 + else 75 + status = -ECHRNG; 76 + 77 + if (pMgrAttrs->word_size == 0) 78 + status = -EINVAL; 79 + 80 + if (DSP_SUCCEEDED(status)) { 81 + status = dev_get_chnl_mgr(hdev_obj, &hchnl_mgr); 82 + if (DSP_SUCCEEDED(status) && hchnl_mgr != NULL) 83 + status = -EEXIST; 84 + 85 + } 86 + 87 + if (DSP_SUCCEEDED(status)) { 88 + struct bridge_drv_interface *intf_fxns; 89 + dev_get_intf_fxns(hdev_obj, &intf_fxns); 90 + /* Let Bridge channel module finish the create: */ 91 + status = (*intf_fxns->pfn_chnl_create) (&hchnl_mgr, hdev_obj, 92 + pMgrAttrs); 93 + if (DSP_SUCCEEDED(status)) { 94 + /* Fill in DSP API channel module's fields of the 95 + * chnl_mgr structure */ 96 + chnl_mgr_obj = (struct chnl_mgr_ *)hchnl_mgr; 97 + chnl_mgr_obj->intf_fxns = intf_fxns; 98 + /* Finally, return the new channel manager handle: */ 99 + *phChnlMgr = hchnl_mgr; 100 + } 101 + } 102 + 103 + DBC_ENSURE(DSP_FAILED(status) || chnl_mgr_obj); 104 + 105 + return status; 106 + } 107 + 108 + /* 109 + * ======== chnl_destroy ======== 110 + * Purpose: 111 + * Close all open channels, and destroy the channel manager. 112 + */ 113 + int chnl_destroy(struct chnl_mgr *hchnl_mgr) 114 + { 115 + struct chnl_mgr_ *chnl_mgr_obj = (struct chnl_mgr_ *)hchnl_mgr; 116 + struct bridge_drv_interface *intf_fxns; 117 + int status; 118 + 119 + DBC_REQUIRE(refs > 0); 120 + 121 + if (chnl_mgr_obj) { 122 + intf_fxns = chnl_mgr_obj->intf_fxns; 123 + /* Let Bridge channel module destroy the chnl_mgr: */ 124 + status = (*intf_fxns->pfn_chnl_destroy) (hchnl_mgr); 125 + } else { 126 + status = -EFAULT; 127 + } 128 + 129 + return status; 130 + } 131 + 132 + /* 133 + * ======== chnl_exit ======== 134 + * Purpose: 135 + * Discontinue usage of the CHNL module. 136 + */ 137 + void chnl_exit(void) 138 + { 139 + DBC_REQUIRE(refs > 0); 140 + 141 + refs--; 142 + 143 + DBC_ENSURE(refs >= 0); 144 + } 145 + 146 + /* 147 + * ======== chnl_init ======== 148 + * Purpose: 149 + * Initialize the CHNL module's private state. 150 + */ 151 + bool chnl_init(void) 152 + { 153 + bool ret = true; 154 + 155 + DBC_REQUIRE(refs >= 0); 156 + 157 + if (ret) 158 + refs++; 159 + 160 + DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0))); 161 + 162 + return ret; 163 + }
+46
drivers/staging/tidspbridge/pmgr/chnlobj.h
··· 1 + /* 2 + * chnlobj.h 3 + * 4 + * DSP-BIOS Bridge driver support functions for TI OMAP processors. 5 + * 6 + * Structure subcomponents of channel class library channel objects which 7 + * are exposed to DSP API from Bridge driver. 8 + * 9 + * Copyright (C) 2005-2006 Texas Instruments, Inc. 10 + * 11 + * This package is free software; you can redistribute it and/or modify 12 + * it under the terms of the GNU General Public License version 2 as 13 + * published by the Free Software Foundation. 14 + * 15 + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 16 + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 17 + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 18 + */ 19 + 20 + #ifndef CHNLOBJ_ 21 + #define CHNLOBJ_ 22 + 23 + #include <dspbridge/chnldefs.h> 24 + #include <dspbridge/dspdefs.h> 25 + 26 + /* 27 + * This struct is the first field in a chnl_mgr struct. Other. implementation 28 + * specific fields follow this structure in memory. 29 + */ 30 + struct chnl_mgr_ { 31 + /* These must be the first fields in a chnl_mgr struct: */ 32 + 33 + /* Function interface to Bridge driver. */ 34 + struct bridge_drv_interface *intf_fxns; 35 + }; 36 + 37 + /* 38 + * This struct is the first field in a chnl_object struct. Other, 39 + * implementation specific fields follow this structure in memory. 40 + */ 41 + struct chnl_object_ { 42 + /* These must be the first fields in a chnl_object struct: */ 43 + struct chnl_mgr_ *chnl_mgr_obj; /* Pointer back to channel manager. */ 44 + }; 45 + 46 + #endif /* CHNLOBJ_ */
+1172
drivers/staging/tidspbridge/pmgr/cmm.c
··· 1 + /* 2 + * cmm.c 3 + * 4 + * DSP-BIOS Bridge driver support functions for TI OMAP processors. 5 + * 6 + * The Communication(Shared) Memory Management(CMM) module provides 7 + * shared memory management services for DSP/BIOS Bridge data streaming 8 + * and messaging. 9 + * 10 + * Multiple shared memory segments can be registered with CMM. 11 + * Each registered SM segment is represented by a SM "allocator" that 12 + * describes a block of physically contiguous shared memory used for 13 + * future allocations by CMM. 14 + * 15 + * Memory is coelesced back to the appropriate heap when a buffer is 16 + * freed. 17 + * 18 + * Notes: 19 + * Va: Virtual address. 20 + * Pa: Physical or kernel system address. 21 + * 22 + * Copyright (C) 2005-2006 Texas Instruments, Inc. 23 + * 24 + * This package is free software; you can redistribute it and/or modify 25 + * it under the terms of the GNU General Public License version 2 as 26 + * published by the Free Software Foundation. 27 + * 28 + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 29 + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 30 + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 31 + */ 32 + 33 + /* ----------------------------------- DSP/BIOS Bridge */ 34 + #include <dspbridge/std.h> 35 + #include <dspbridge/dbdefs.h> 36 + 37 + /* ----------------------------------- Trace & Debug */ 38 + #include <dspbridge/dbc.h> 39 + 40 + /* ----------------------------------- OS Adaptation Layer */ 41 + #include <dspbridge/cfg.h> 42 + #include <dspbridge/list.h> 43 + #include <dspbridge/sync.h> 44 + #include <dspbridge/utildefs.h> 45 + 46 + /* ----------------------------------- Platform Manager */ 47 + #include <dspbridge/dev.h> 48 + #include <dspbridge/proc.h> 49 + 50 + /* ----------------------------------- This */ 51 + #include <dspbridge/cmm.h> 52 + 53 + /* ----------------------------------- Defines, Data Structures, Typedefs */ 54 + #define NEXT_PA(pnode) (pnode->dw_pa + pnode->ul_size) 55 + 56 + /* Other bus/platform translations */ 57 + #define DSPPA2GPPPA(base, x, y) ((x)+(y)) 58 + #define GPPPA2DSPPA(base, x, y) ((x)-(y)) 59 + 60 + /* 61 + * Allocators define a block of contiguous memory used for future allocations. 62 + * 63 + * sma - shared memory allocator. 64 + * vma - virtual memory allocator.(not used). 65 + */ 66 + struct cmm_allocator { /* sma */ 67 + unsigned int shm_base; /* Start of physical SM block */ 68 + u32 ul_sm_size; /* Size of SM block in bytes */ 69 + unsigned int dw_vm_base; /* Start of VM block. (Dev driver 70 + * context for 'sma') */ 71 + u32 dw_dsp_phys_addr_offset; /* DSP PA to GPP PA offset for this 72 + * SM space */ 73 + s8 c_factor; /* DSPPa to GPPPa Conversion Factor */ 74 + unsigned int dw_dsp_base; /* DSP virt base byte address */ 75 + u32 ul_dsp_size; /* DSP seg size in bytes */ 76 + struct cmm_object *hcmm_mgr; /* back ref to parent mgr */ 77 + /* node list of available memory */ 78 + struct lst_list *free_list_head; 79 + /* node list of memory in use */ 80 + struct lst_list *in_use_list_head; 81 + }; 82 + 83 + struct cmm_xlator { /* Pa<->Va translator object */ 84 + /* CMM object this translator associated */ 85 + struct cmm_object *hcmm_mgr; 86 + /* 87 + * Client process virtual base address that corresponds to phys SM 88 + * base address for translator's ul_seg_id. 89 + * Only 1 segment ID currently supported. 90 + */ 91 + unsigned int dw_virt_base; /* virtual base address */ 92 + u32 ul_virt_size; /* size of virt space in bytes */ 93 + u32 ul_seg_id; /* Segment Id */ 94 + }; 95 + 96 + /* CMM Mgr */ 97 + struct cmm_object { 98 + /* 99 + * Cmm Lock is used to serialize access mem manager for multi-threads. 100 + */ 101 + struct mutex cmm_lock; /* Lock to access cmm mgr */ 102 + struct lst_list *node_free_list_head; /* Free list of memory nodes */ 103 + u32 ul_min_block_size; /* Min SM block; default 16 bytes */ 104 + u32 dw_page_size; /* Memory Page size (1k/4k) */ 105 + /* GPP SM segment ptrs */ 106 + struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS]; 107 + }; 108 + 109 + /* Default CMM Mgr attributes */ 110 + static struct cmm_mgrattrs cmm_dfltmgrattrs = { 111 + /* ul_min_block_size, min block size(bytes) allocated by cmm mgr */ 112 + 16 113 + }; 114 + 115 + /* Default allocation attributes */ 116 + static struct cmm_attrs cmm_dfltalctattrs = { 117 + 1 /* ul_seg_id, default segment Id for allocator */ 118 + }; 119 + 120 + /* Address translator default attrs */ 121 + static struct cmm_xlatorattrs cmm_dfltxlatorattrs = { 122 + /* ul_seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */ 123 + 1, 124 + 0, /* dw_dsp_bufs */ 125 + 0, /* dw_dsp_buf_size */ 126 + NULL, /* vm_base */ 127 + 0, /* dw_vm_size */ 128 + }; 129 + 130 + /* SM node representing a block of memory. */ 131 + struct cmm_mnode { 132 + struct list_head link; /* must be 1st element */ 133 + u32 dw_pa; /* Phys addr */ 134 + u32 dw_va; /* Virtual address in device process context */ 135 + u32 ul_size; /* SM block size in bytes */ 136 + u32 client_proc; /* Process that allocated this mem block */ 137 + }; 138 + 139 + /* ----------------------------------- Globals */ 140 + static u32 refs; /* module reference count */ 141 + 142 + /* ----------------------------------- Function Prototypes */ 143 + static void add_to_free_list(struct cmm_allocator *allocator, 144 + struct cmm_mnode *pnode); 145 + static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj, 146 + u32 ul_seg_id); 147 + static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator, 148 + u32 usize); 149 + static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa, 150 + u32 dw_va, u32 ul_size); 151 + /* get available slot for new allocator */ 152 + static s32 get_slot(struct cmm_object *hcmm_mgr); 153 + static void un_register_gppsm_seg(struct cmm_allocator *psma); 154 + 155 + /* 156 + * ======== cmm_calloc_buf ======== 157 + * Purpose: 158 + * Allocate a SM buffer, zero contents, and return the physical address 159 + * and optional driver context virtual address(pp_buf_va). 160 + * 161 + * The freelist is sorted in increasing size order. Get the first 162 + * block that satifies the request and sort the remaining back on 163 + * the freelist; if large enough. The kept block is placed on the 164 + * inUseList. 165 + */ 166 + void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize, 167 + struct cmm_attrs *pattrs, OUT void **pp_buf_va) 168 + { 169 + struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; 170 + void *buf_pa = NULL; 171 + struct cmm_mnode *pnode = NULL; 172 + struct cmm_mnode *new_node = NULL; 173 + struct cmm_allocator *allocator = NULL; 174 + u32 delta_size; 175 + u8 *pbyte = NULL; 176 + s32 cnt; 177 + 178 + if (pattrs == NULL) 179 + pattrs = &cmm_dfltalctattrs; 180 + 181 + if (pp_buf_va != NULL) 182 + *pp_buf_va = NULL; 183 + 184 + if (cmm_mgr_obj && (usize != 0)) { 185 + if (pattrs->ul_seg_id > 0) { 186 + /* SegId > 0 is SM */ 187 + /* get the allocator object for this segment id */ 188 + allocator = 189 + get_allocator(cmm_mgr_obj, pattrs->ul_seg_id); 190 + /* keep block size a multiple of ul_min_block_size */ 191 + usize = 192 + ((usize - 1) & ~(cmm_mgr_obj->ul_min_block_size - 193 + 1)) 194 + + cmm_mgr_obj->ul_min_block_size; 195 + mutex_lock(&cmm_mgr_obj->cmm_lock); 196 + pnode = get_free_block(allocator, usize); 197 + } 198 + if (pnode) { 199 + delta_size = (pnode->ul_size - usize); 200 + if (delta_size >= cmm_mgr_obj->ul_min_block_size) { 201 + /* create a new block with the leftovers and 202 + * add to freelist */ 203 + new_node = 204 + get_node(cmm_mgr_obj, pnode->dw_pa + usize, 205 + pnode->dw_va + usize, 206 + (u32) delta_size); 207 + /* leftovers go free */ 208 + add_to_free_list(allocator, new_node); 209 + /* adjust our node's size */ 210 + pnode->ul_size = usize; 211 + } 212 + /* Tag node with client process requesting allocation 213 + * We'll need to free up a process's alloc'd SM if the 214 + * client process goes away. 215 + */ 216 + /* Return TGID instead of process handle */ 217 + pnode->client_proc = current->tgid; 218 + 219 + /* put our node on InUse list */ 220 + lst_put_tail(allocator->in_use_list_head, 221 + (struct list_head *)pnode); 222 + buf_pa = (void *)pnode->dw_pa; /* physical address */ 223 + /* clear mem */ 224 + pbyte = (u8 *) pnode->dw_va; 225 + for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++) 226 + *pbyte = 0; 227 + 228 + if (pp_buf_va != NULL) { 229 + /* Virtual address */ 230 + *pp_buf_va = (void *)pnode->dw_va; 231 + } 232 + } 233 + mutex_unlock(&cmm_mgr_obj->cmm_lock); 234 + } 235 + return buf_pa; 236 + } 237 + 238 + /* 239 + * ======== cmm_create ======== 240 + * Purpose: 241 + * Create a communication memory manager object. 242 + */ 243 + int cmm_create(OUT struct cmm_object **ph_cmm_mgr, 244 + struct dev_object *hdev_obj, 245 + IN CONST struct cmm_mgrattrs *pMgrAttrs) 246 + { 247 + struct cmm_object *cmm_obj = NULL; 248 + int status = 0; 249 + struct util_sysinfo sys_info; 250 + 251 + DBC_REQUIRE(refs > 0); 252 + DBC_REQUIRE(ph_cmm_mgr != NULL); 253 + 254 + *ph_cmm_mgr = NULL; 255 + /* create, zero, and tag a cmm mgr object */ 256 + cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL); 257 + if (cmm_obj != NULL) { 258 + if (pMgrAttrs == NULL) 259 + pMgrAttrs = &cmm_dfltmgrattrs; /* set defaults */ 260 + 261 + /* 4 bytes minimum */ 262 + DBC_ASSERT(pMgrAttrs->ul_min_block_size >= 4); 263 + /* save away smallest block allocation for this cmm mgr */ 264 + cmm_obj->ul_min_block_size = pMgrAttrs->ul_min_block_size; 265 + /* save away the systems memory page size */ 266 + sys_info.dw_page_size = PAGE_SIZE; 267 + sys_info.dw_allocation_granularity = PAGE_SIZE; 268 + sys_info.dw_number_of_processors = 1; 269 + if (DSP_SUCCEEDED(status)) { 270 + cmm_obj->dw_page_size = sys_info.dw_page_size; 271 + } else { 272 + cmm_obj->dw_page_size = 0; 273 + status = -EPERM; 274 + } 275 + /* Note: DSP SM seg table(aDSPSMSegTab[]) zero'd by 276 + * MEM_ALLOC_OBJECT */ 277 + if (DSP_SUCCEEDED(status)) { 278 + /* create node free list */ 279 + cmm_obj->node_free_list_head = 280 + kzalloc(sizeof(struct lst_list), 281 + GFP_KERNEL); 282 + if (cmm_obj->node_free_list_head == NULL) 283 + status = -ENOMEM; 284 + else 285 + INIT_LIST_HEAD(&cmm_obj-> 286 + node_free_list_head->head); 287 + } 288 + if (DSP_SUCCEEDED(status)) 289 + mutex_init(&cmm_obj->cmm_lock); 290 + 291 + if (DSP_SUCCEEDED(status)) 292 + *ph_cmm_mgr = cmm_obj; 293 + else 294 + cmm_destroy(cmm_obj, true); 295 + 296 + } else { 297 + status = -ENOMEM; 298 + } 299 + return status; 300 + } 301 + 302 + /* 303 + * ======== cmm_destroy ======== 304 + * Purpose: 305 + * Release the communication memory manager resources. 306 + */ 307 + int cmm_destroy(struct cmm_object *hcmm_mgr, bool bForce) 308 + { 309 + struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; 310 + struct cmm_info temp_info; 311 + int status = 0; 312 + s32 slot_seg; 313 + struct cmm_mnode *pnode; 314 + 315 + DBC_REQUIRE(refs > 0); 316 + if (!hcmm_mgr) { 317 + status = -EFAULT; 318 + return status; 319 + } 320 + mutex_lock(&cmm_mgr_obj->cmm_lock); 321 + /* If not force then fail if outstanding allocations exist */ 322 + if (!bForce) { 323 + /* Check for outstanding memory allocations */ 324 + status = cmm_get_info(hcmm_mgr, &temp_info); 325 + if (DSP_SUCCEEDED(status)) { 326 + if (temp_info.ul_total_in_use_cnt > 0) { 327 + /* outstanding allocations */ 328 + status = -EPERM; 329 + } 330 + } 331 + } 332 + if (DSP_SUCCEEDED(status)) { 333 + /* UnRegister SM allocator */ 334 + for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) { 335 + if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] != NULL) { 336 + un_register_gppsm_seg 337 + (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg]); 338 + /* Set slot to NULL for future reuse */ 339 + cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = NULL; 340 + } 341 + } 342 + } 343 + if (cmm_mgr_obj->node_free_list_head != NULL) { 344 + /* Free the free nodes */ 345 + while (!LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) { 346 + pnode = (struct cmm_mnode *) 347 + lst_get_head(cmm_mgr_obj->node_free_list_head); 348 + kfree(pnode); 349 + } 350 + /* delete NodeFreeList list */ 351 + kfree(cmm_mgr_obj->node_free_list_head); 352 + } 353 + mutex_unlock(&cmm_mgr_obj->cmm_lock); 354 + if (DSP_SUCCEEDED(status)) { 355 + /* delete CS & cmm mgr object */ 356 + mutex_destroy(&cmm_mgr_obj->cmm_lock); 357 + kfree(cmm_mgr_obj); 358 + } 359 + return status; 360 + } 361 + 362 + /* 363 + * ======== cmm_exit ======== 364 + * Purpose: 365 + * Discontinue usage of module; free resources when reference count 366 + * reaches 0. 367 + */ 368 + void cmm_exit(void) 369 + { 370 + DBC_REQUIRE(refs > 0); 371 + 372 + refs--; 373 + } 374 + 375 + /* 376 + * ======== cmm_free_buf ======== 377 + * Purpose: 378 + * Free the given buffer. 379 + */ 380 + int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa, 381 + u32 ul_seg_id) 382 + { 383 + struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; 384 + int status = -EFAULT; 385 + struct cmm_mnode *mnode_obj = NULL; 386 + struct cmm_allocator *allocator = NULL; 387 + struct cmm_attrs *pattrs; 388 + 389 + DBC_REQUIRE(refs > 0); 390 + DBC_REQUIRE(buf_pa != NULL); 391 + 392 + if (ul_seg_id == 0) { 393 + pattrs = &cmm_dfltalctattrs; 394 + ul_seg_id = pattrs->ul_seg_id; 395 + } 396 + if (!hcmm_mgr || !(ul_seg_id > 0)) { 397 + status = -EFAULT; 398 + return status; 399 + } 400 + /* get the allocator for this segment id */ 401 + allocator = get_allocator(cmm_mgr_obj, ul_seg_id); 402 + if (allocator != NULL) { 403 + mutex_lock(&cmm_mgr_obj->cmm_lock); 404 + mnode_obj = 405 + (struct cmm_mnode *)lst_first(allocator->in_use_list_head); 406 + while (mnode_obj) { 407 + if ((u32) buf_pa == mnode_obj->dw_pa) { 408 + /* Found it */ 409 + lst_remove_elem(allocator->in_use_list_head, 410 + (struct list_head *)mnode_obj); 411 + /* back to freelist */ 412 + add_to_free_list(allocator, mnode_obj); 413 + status = 0; /* all right! */ 414 + break; 415 + } 416 + /* next node. */ 417 + mnode_obj = (struct cmm_mnode *) 418 + lst_next(allocator->in_use_list_head, 419 + (struct list_head *)mnode_obj); 420 + } 421 + mutex_unlock(&cmm_mgr_obj->cmm_lock); 422 + } 423 + return status; 424 + } 425 + 426 + /* 427 + * ======== cmm_get_handle ======== 428 + * Purpose: 429 + * Return the communication memory manager object for this device. 430 + * This is typically called from the client process. 431 + */ 432 + int cmm_get_handle(void *hprocessor, OUT struct cmm_object ** ph_cmm_mgr) 433 + { 434 + int status = 0; 435 + struct dev_object *hdev_obj; 436 + 437 + DBC_REQUIRE(refs > 0); 438 + DBC_REQUIRE(ph_cmm_mgr != NULL); 439 + if (hprocessor != NULL) 440 + status = proc_get_dev_object(hprocessor, &hdev_obj); 441 + else 442 + hdev_obj = dev_get_first(); /* default */ 443 + 444 + if (DSP_SUCCEEDED(status)) 445 + status = dev_get_cmm_mgr(hdev_obj, ph_cmm_mgr); 446 + 447 + return status; 448 + } 449 + 450 + /* 451 + * ======== cmm_get_info ======== 452 + * Purpose: 453 + * Return the current memory utilization information. 454 + */ 455 + int cmm_get_info(struct cmm_object *hcmm_mgr, 456 + OUT struct cmm_info *cmm_info_obj) 457 + { 458 + struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; 459 + u32 ul_seg; 460 + int status = 0; 461 + struct cmm_allocator *altr; 462 + struct cmm_mnode *mnode_obj = NULL; 463 + 464 + DBC_REQUIRE(cmm_info_obj != NULL); 465 + 466 + if (!hcmm_mgr) { 467 + status = -EFAULT; 468 + return status; 469 + } 470 + mutex_lock(&cmm_mgr_obj->cmm_lock); 471 + cmm_info_obj->ul_num_gppsm_segs = 0; /* # of SM segments */ 472 + /* Total # of outstanding alloc */ 473 + cmm_info_obj->ul_total_in_use_cnt = 0; 474 + /* min block size */ 475 + cmm_info_obj->ul_min_block_size = cmm_mgr_obj->ul_min_block_size; 476 + /* check SM memory segments */ 477 + for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) { 478 + /* get the allocator object for this segment id */ 479 + altr = get_allocator(cmm_mgr_obj, ul_seg); 480 + if (altr != NULL) { 481 + cmm_info_obj->ul_num_gppsm_segs++; 482 + cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_pa = 483 + altr->shm_base - altr->ul_dsp_size; 484 + cmm_info_obj->seg_info[ul_seg - 1].ul_total_seg_size = 485 + altr->ul_dsp_size + altr->ul_sm_size; 486 + cmm_info_obj->seg_info[ul_seg - 1].dw_gpp_base_pa = 487 + altr->shm_base; 488 + cmm_info_obj->seg_info[ul_seg - 1].ul_gpp_size = 489 + altr->ul_sm_size; 490 + cmm_info_obj->seg_info[ul_seg - 1].dw_dsp_base_va = 491 + altr->dw_dsp_base; 492 + cmm_info_obj->seg_info[ul_seg - 1].ul_dsp_size = 493 + altr->ul_dsp_size; 494 + cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_va = 495 + altr->dw_vm_base - altr->ul_dsp_size; 496 + cmm_info_obj->seg_info[ul_seg - 1].ul_in_use_cnt = 0; 497 + mnode_obj = (struct cmm_mnode *) 498 + lst_first(altr->in_use_list_head); 499 + /* Count inUse blocks */ 500 + while (mnode_obj) { 501 + cmm_info_obj->ul_total_in_use_cnt++; 502 + cmm_info_obj->seg_info[ul_seg - 503 + 1].ul_in_use_cnt++; 504 + /* next node. */ 505 + mnode_obj = (struct cmm_mnode *) 506 + lst_next(altr->in_use_list_head, 507 + (struct list_head *)mnode_obj); 508 + } 509 + } 510 + } /* end for */ 511 + mutex_unlock(&cmm_mgr_obj->cmm_lock); 512 + return status; 513 + } 514 + 515 + /* 516 + * ======== cmm_init ======== 517 + * Purpose: 518 + * Initializes private state of CMM module. 519 + */ 520 + bool cmm_init(void) 521 + { 522 + bool ret = true; 523 + 524 + DBC_REQUIRE(refs >= 0); 525 + if (ret) 526 + refs++; 527 + 528 + DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0))); 529 + 530 + return ret; 531 + } 532 + 533 + /* 534 + * ======== cmm_register_gppsm_seg ======== 535 + * Purpose: 536 + * Register a block of SM with the CMM to be used for later GPP SM 537 + * allocations. 538 + */ 539 + int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr, 540 + u32 dw_gpp_base_pa, u32 ul_size, 541 + u32 dwDSPAddrOffset, s8 c_factor, 542 + u32 dw_dsp_base, u32 ul_dsp_size, 543 + u32 *pulSegId, u32 dw_gpp_base_va) 544 + { 545 + struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; 546 + struct cmm_allocator *psma = NULL; 547 + int status = 0; 548 + struct cmm_mnode *new_node; 549 + s32 slot_seg; 550 + 551 + DBC_REQUIRE(ul_size > 0); 552 + DBC_REQUIRE(pulSegId != NULL); 553 + DBC_REQUIRE(dw_gpp_base_pa != 0); 554 + DBC_REQUIRE(dw_gpp_base_va != 0); 555 + DBC_REQUIRE((c_factor <= CMM_ADDTODSPPA) && 556 + (c_factor >= CMM_SUBFROMDSPPA)); 557 + dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dwDSPAddrOffset %x " 558 + "dw_dsp_base %x ul_dsp_size %x dw_gpp_base_va %x\n", __func__, 559 + dw_gpp_base_pa, ul_size, dwDSPAddrOffset, dw_dsp_base, 560 + ul_dsp_size, dw_gpp_base_va); 561 + if (!hcmm_mgr) { 562 + status = -EFAULT; 563 + return status; 564 + } 565 + /* make sure we have room for another allocator */ 566 + mutex_lock(&cmm_mgr_obj->cmm_lock); 567 + slot_seg = get_slot(cmm_mgr_obj); 568 + if (slot_seg < 0) { 569 + /* get a slot number */ 570 + status = -EPERM; 571 + goto func_end; 572 + } 573 + /* Check if input ul_size is big enough to alloc at least one block */ 574 + if (DSP_SUCCEEDED(status)) { 575 + if (ul_size < cmm_mgr_obj->ul_min_block_size) { 576 + status = -EINVAL; 577 + goto func_end; 578 + } 579 + } 580 + if (DSP_SUCCEEDED(status)) { 581 + /* create, zero, and tag an SM allocator object */ 582 + psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL); 583 + } 584 + if (psma != NULL) { 585 + psma->hcmm_mgr = hcmm_mgr; /* ref to parent */ 586 + psma->shm_base = dw_gpp_base_pa; /* SM Base phys */ 587 + psma->ul_sm_size = ul_size; /* SM segment size in bytes */ 588 + psma->dw_vm_base = dw_gpp_base_va; 589 + psma->dw_dsp_phys_addr_offset = dwDSPAddrOffset; 590 + psma->c_factor = c_factor; 591 + psma->dw_dsp_base = dw_dsp_base; 592 + psma->ul_dsp_size = ul_dsp_size; 593 + if (psma->dw_vm_base == 0) { 594 + status = -EPERM; 595 + goto func_end; 596 + } 597 + if (DSP_SUCCEEDED(status)) { 598 + /* return the actual segment identifier */ 599 + *pulSegId = (u32) slot_seg + 1; 600 + /* create memory free list */ 601 + psma->free_list_head = kzalloc(sizeof(struct lst_list), 602 + GFP_KERNEL); 603 + if (psma->free_list_head == NULL) { 604 + status = -ENOMEM; 605 + goto func_end; 606 + } 607 + INIT_LIST_HEAD(&psma->free_list_head->head); 608 + } 609 + if (DSP_SUCCEEDED(status)) { 610 + /* create memory in-use list */ 611 + psma->in_use_list_head = kzalloc(sizeof(struct 612 + lst_list), GFP_KERNEL); 613 + if (psma->in_use_list_head == NULL) { 614 + status = -ENOMEM; 615 + goto func_end; 616 + } 617 + INIT_LIST_HEAD(&psma->in_use_list_head->head); 618 + } 619 + if (DSP_SUCCEEDED(status)) { 620 + /* Get a mem node for this hunk-o-memory */ 621 + new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa, 622 + psma->dw_vm_base, ul_size); 623 + /* Place node on the SM allocator's free list */ 624 + if (new_node) { 625 + lst_put_tail(psma->free_list_head, 626 + (struct list_head *)new_node); 627 + } else { 628 + status = -ENOMEM; 629 + goto func_end; 630 + } 631 + } 632 + if (DSP_FAILED(status)) { 633 + /* Cleanup allocator */ 634 + un_register_gppsm_seg(psma); 635 + } 636 + } else { 637 + status = -ENOMEM; 638 + goto func_end; 639 + } 640 + /* make entry */ 641 + if (DSP_SUCCEEDED(status)) 642 + cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma; 643 + 644 + func_end: 645 + mutex_unlock(&cmm_mgr_obj->cmm_lock); 646 + return status; 647 + } 648 + 649 + /* 650 + * ======== cmm_un_register_gppsm_seg ======== 651 + * Purpose: 652 + * UnRegister GPP SM segments with the CMM. 653 + */ 654 + int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr, 655 + u32 ul_seg_id) 656 + { 657 + struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; 658 + int status = 0; 659 + struct cmm_allocator *psma; 660 + u32 ul_id = ul_seg_id; 661 + 662 + DBC_REQUIRE(ul_seg_id > 0); 663 + if (hcmm_mgr) { 664 + if (ul_seg_id == CMM_ALLSEGMENTS) 665 + ul_id = 1; 666 + 667 + if ((ul_id > 0) && (ul_id <= CMM_MAXGPPSEGS)) { 668 + while (ul_id <= CMM_MAXGPPSEGS) { 669 + mutex_lock(&cmm_mgr_obj->cmm_lock); 670 + /* slot = seg_id-1 */ 671 + psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1]; 672 + if (psma != NULL) { 673 + un_register_gppsm_seg(psma); 674 + /* Set alctr ptr to NULL for future 675 + * reuse */ 676 + cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 677 + 1] = NULL; 678 + } else if (ul_seg_id != CMM_ALLSEGMENTS) { 679 + status = -EPERM; 680 + } 681 + mutex_unlock(&cmm_mgr_obj->cmm_lock); 682 + if (ul_seg_id != CMM_ALLSEGMENTS) 683 + break; 684 + 685 + ul_id++; 686 + } /* end while */ 687 + } else { 688 + status = -EINVAL; 689 + } 690 + } else { 691 + status = -EFAULT; 692 + } 693 + return status; 694 + } 695 + 696 + /* 697 + * ======== un_register_gppsm_seg ======== 698 + * Purpose: 699 + * UnRegister the SM allocator by freeing all its resources and 700 + * nulling cmm mgr table entry. 701 + * Note: 702 + * This routine is always called within cmm lock crit sect. 703 + */ 704 + static void un_register_gppsm_seg(struct cmm_allocator *psma) 705 + { 706 + struct cmm_mnode *mnode_obj = NULL; 707 + struct cmm_mnode *next_node = NULL; 708 + 709 + DBC_REQUIRE(psma != NULL); 710 + if (psma->free_list_head != NULL) { 711 + /* free nodes on free list */ 712 + mnode_obj = (struct cmm_mnode *)lst_first(psma->free_list_head); 713 + while (mnode_obj) { 714 + next_node = 715 + (struct cmm_mnode *)lst_next(psma->free_list_head, 716 + (struct list_head *) 717 + mnode_obj); 718 + lst_remove_elem(psma->free_list_head, 719 + (struct list_head *)mnode_obj); 720 + kfree((void *)mnode_obj); 721 + /* next node. */ 722 + mnode_obj = next_node; 723 + } 724 + kfree(psma->free_list_head); /* delete freelist */ 725 + /* free nodes on InUse list */ 726 + mnode_obj = 727 + (struct cmm_mnode *)lst_first(psma->in_use_list_head); 728 + while (mnode_obj) { 729 + next_node = 730 + (struct cmm_mnode *)lst_next(psma->in_use_list_head, 731 + (struct list_head *) 732 + mnode_obj); 733 + lst_remove_elem(psma->in_use_list_head, 734 + (struct list_head *)mnode_obj); 735 + kfree((void *)mnode_obj); 736 + /* next node. */ 737 + mnode_obj = next_node; 738 + } 739 + kfree(psma->in_use_list_head); /* delete InUse list */ 740 + } 741 + if ((void *)psma->dw_vm_base != NULL) 742 + MEM_UNMAP_LINEAR_ADDRESS((void *)psma->dw_vm_base); 743 + 744 + /* Free allocator itself */ 745 + kfree(psma); 746 + } 747 + 748 + /* 749 + * ======== get_slot ======== 750 + * Purpose: 751 + * An available slot # is returned. Returns negative on failure. 752 + */ 753 + static s32 get_slot(struct cmm_object *cmm_mgr_obj) 754 + { 755 + s32 slot_seg = -1; /* neg on failure */ 756 + DBC_REQUIRE(cmm_mgr_obj != NULL); 757 + /* get first available slot in cmm mgr SMSegTab[] */ 758 + for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) { 759 + if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL) 760 + break; 761 + 762 + } 763 + if (slot_seg == CMM_MAXGPPSEGS) 764 + slot_seg = -1; /* failed */ 765 + 766 + return slot_seg; 767 + } 768 + 769 + /* 770 + * ======== get_node ======== 771 + * Purpose: 772 + * Get a memory node from freelist or create a new one. 773 + */ 774 + static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa, 775 + u32 dw_va, u32 ul_size) 776 + { 777 + struct cmm_mnode *pnode = NULL; 778 + 779 + DBC_REQUIRE(cmm_mgr_obj != NULL); 780 + DBC_REQUIRE(dw_pa != 0); 781 + DBC_REQUIRE(dw_va != 0); 782 + DBC_REQUIRE(ul_size != 0); 783 + /* Check cmm mgr's node freelist */ 784 + if (LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) { 785 + pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL); 786 + } else { 787 + /* surely a valid element */ 788 + pnode = (struct cmm_mnode *) 789 + lst_get_head(cmm_mgr_obj->node_free_list_head); 790 + } 791 + if (pnode) { 792 + lst_init_elem((struct list_head *)pnode); /* set self */ 793 + pnode->dw_pa = dw_pa; /* Physical addr of start of block */ 794 + pnode->dw_va = dw_va; /* Virtual " " */ 795 + pnode->ul_size = ul_size; /* Size of block */ 796 + } 797 + return pnode; 798 + } 799 + 800 + /* 801 + * ======== delete_node ======== 802 + * Purpose: 803 + * Put a memory node on the cmm nodelist for later use. 804 + * Doesn't actually delete the node. Heap thrashing friendly. 805 + */ 806 + static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode) 807 + { 808 + DBC_REQUIRE(pnode != NULL); 809 + lst_init_elem((struct list_head *)pnode); /* init .self ptr */ 810 + lst_put_tail(cmm_mgr_obj->node_free_list_head, 811 + (struct list_head *)pnode); 812 + } 813 + 814 + /* 815 + * ====== get_free_block ======== 816 + * Purpose: 817 + * Scan the free block list and return the first block that satisfies 818 + * the size. 819 + */ 820 + static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator, 821 + u32 usize) 822 + { 823 + if (allocator) { 824 + struct cmm_mnode *mnode_obj = (struct cmm_mnode *) 825 + lst_first(allocator->free_list_head); 826 + while (mnode_obj) { 827 + if (usize <= (u32) mnode_obj->ul_size) { 828 + lst_remove_elem(allocator->free_list_head, 829 + (struct list_head *)mnode_obj); 830 + return mnode_obj; 831 + } 832 + /* next node. */ 833 + mnode_obj = (struct cmm_mnode *) 834 + lst_next(allocator->free_list_head, 835 + (struct list_head *)mnode_obj); 836 + } 837 + } 838 + return NULL; 839 + } 840 + 841 + /* 842 + * ======== add_to_free_list ======== 843 + * Purpose: 844 + * Coelesce node into the freelist in ascending size order. 845 + */ 846 + static void add_to_free_list(struct cmm_allocator *allocator, 847 + struct cmm_mnode *pnode) 848 + { 849 + struct cmm_mnode *node_prev = NULL; 850 + struct cmm_mnode *node_next = NULL; 851 + struct cmm_mnode *mnode_obj; 852 + u32 dw_this_pa; 853 + u32 dw_next_pa; 854 + 855 + DBC_REQUIRE(pnode != NULL); 856 + DBC_REQUIRE(allocator != NULL); 857 + dw_this_pa = pnode->dw_pa; 858 + dw_next_pa = NEXT_PA(pnode); 859 + mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head); 860 + while (mnode_obj) { 861 + if (dw_this_pa == NEXT_PA(mnode_obj)) { 862 + /* found the block ahead of this one */ 863 + node_prev = mnode_obj; 864 + } else if (dw_next_pa == mnode_obj->dw_pa) { 865 + node_next = mnode_obj; 866 + } 867 + if ((node_prev == NULL) || (node_next == NULL)) { 868 + /* next node. */ 869 + mnode_obj = (struct cmm_mnode *) 870 + lst_next(allocator->free_list_head, 871 + (struct list_head *)mnode_obj); 872 + } else { 873 + /* got 'em */ 874 + break; 875 + } 876 + } /* while */ 877 + if (node_prev != NULL) { 878 + /* combine with previous block */ 879 + lst_remove_elem(allocator->free_list_head, 880 + (struct list_head *)node_prev); 881 + /* grow node to hold both */ 882 + pnode->ul_size += node_prev->ul_size; 883 + pnode->dw_pa = node_prev->dw_pa; 884 + pnode->dw_va = node_prev->dw_va; 885 + /* place node on mgr nodeFreeList */ 886 + delete_node((struct cmm_object *)allocator->hcmm_mgr, 887 + node_prev); 888 + } 889 + if (node_next != NULL) { 890 + /* combine with next block */ 891 + lst_remove_elem(allocator->free_list_head, 892 + (struct list_head *)node_next); 893 + /* grow da node */ 894 + pnode->ul_size += node_next->ul_size; 895 + /* place node on mgr nodeFreeList */ 896 + delete_node((struct cmm_object *)allocator->hcmm_mgr, 897 + node_next); 898 + } 899 + /* Now, let's add to freelist in increasing size order */ 900 + mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head); 901 + while (mnode_obj) { 902 + if (pnode->ul_size <= mnode_obj->ul_size) 903 + break; 904 + 905 + /* next node. */ 906 + mnode_obj = 907 + (struct cmm_mnode *)lst_next(allocator->free_list_head, 908 + (struct list_head *)mnode_obj); 909 + } 910 + /* if mnode_obj is NULL then add our pnode to the end of the freelist */ 911 + if (mnode_obj == NULL) { 912 + lst_put_tail(allocator->free_list_head, 913 + (struct list_head *)pnode); 914 + } else { 915 + /* insert our node before the current traversed node */ 916 + lst_insert_before(allocator->free_list_head, 917 + (struct list_head *)pnode, 918 + (struct list_head *)mnode_obj); 919 + } 920 + } 921 + 922 + /* 923 + * ======== get_allocator ======== 924 + * Purpose: 925 + * Return the allocator for the given SM Segid. 926 + * SegIds: 1,2,3..max. 927 + */ 928 + static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj, 929 + u32 ul_seg_id) 930 + { 931 + struct cmm_allocator *allocator = NULL; 932 + 933 + DBC_REQUIRE(cmm_mgr_obj != NULL); 934 + DBC_REQUIRE((ul_seg_id > 0) && (ul_seg_id <= CMM_MAXGPPSEGS)); 935 + allocator = cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1]; 936 + if (allocator != NULL) { 937 + /* make sure it's for real */ 938 + if (!allocator) { 939 + allocator = NULL; 940 + DBC_ASSERT(false); 941 + } 942 + } 943 + return allocator; 944 + } 945 + 946 + /* 947 + * The CMM_Xlator[xxx] routines below are used by Node and Stream 948 + * to perform SM address translation to the client process address space. 949 + * A "translator" object is created by a node/stream for each SM seg used. 950 + */ 951 + 952 + /* 953 + * ======== cmm_xlator_create ======== 954 + * Purpose: 955 + * Create an address translator object. 956 + */ 957 + int cmm_xlator_create(OUT struct cmm_xlatorobject **phXlator, 958 + struct cmm_object *hcmm_mgr, 959 + struct cmm_xlatorattrs *pXlatorAttrs) 960 + { 961 + struct cmm_xlator *xlator_object = NULL; 962 + int status = 0; 963 + 964 + DBC_REQUIRE(refs > 0); 965 + DBC_REQUIRE(phXlator != NULL); 966 + DBC_REQUIRE(hcmm_mgr != NULL); 967 + 968 + *phXlator = NULL; 969 + if (pXlatorAttrs == NULL) 970 + pXlatorAttrs = &cmm_dfltxlatorattrs; /* set defaults */ 971 + 972 + xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL); 973 + if (xlator_object != NULL) { 974 + xlator_object->hcmm_mgr = hcmm_mgr; /* ref back to CMM */ 975 + /* SM seg_id */ 976 + xlator_object->ul_seg_id = pXlatorAttrs->ul_seg_id; 977 + } else { 978 + status = -ENOMEM; 979 + } 980 + if (DSP_SUCCEEDED(status)) 981 + *phXlator = (struct cmm_xlatorobject *)xlator_object; 982 + 983 + return status; 984 + } 985 + 986 + /* 987 + * ======== cmm_xlator_delete ======== 988 + * Purpose: 989 + * Free the Xlator resources. 990 + * VM gets freed later. 991 + */ 992 + int cmm_xlator_delete(struct cmm_xlatorobject *xlator, bool bForce) 993 + { 994 + struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator; 995 + int status = 0; 996 + 997 + DBC_REQUIRE(refs > 0); 998 + 999 + if (xlator_obj) 1000 + kfree(xlator_obj); 1001 + else 1002 + status = -EFAULT; 1003 + 1004 + return status; 1005 + } 1006 + 1007 + /* 1008 + * ======== cmm_xlator_alloc_buf ======== 1009 + */ 1010 + void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *pVaBuf, 1011 + u32 uPaSize) 1012 + { 1013 + struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator; 1014 + void *pbuf = NULL; 1015 + struct cmm_attrs attrs; 1016 + 1017 + DBC_REQUIRE(refs > 0); 1018 + DBC_REQUIRE(xlator != NULL); 1019 + DBC_REQUIRE(xlator_obj->hcmm_mgr != NULL); 1020 + DBC_REQUIRE(pVaBuf != NULL); 1021 + DBC_REQUIRE(uPaSize > 0); 1022 + DBC_REQUIRE(xlator_obj->ul_seg_id > 0); 1023 + 1024 + if (xlator_obj) { 1025 + attrs.ul_seg_id = xlator_obj->ul_seg_id; 1026 + *(volatile u32 *)pVaBuf = 0; 1027 + /* Alloc SM */ 1028 + pbuf = 1029 + cmm_calloc_buf(xlator_obj->hcmm_mgr, uPaSize, &attrs, NULL); 1030 + if (pbuf) { 1031 + /* convert to translator(node/strm) process Virtual 1032 + * address */ 1033 + *(volatile u32 **)pVaBuf = 1034 + (u32 *) cmm_xlator_translate(xlator, 1035 + pbuf, CMM_PA2VA); 1036 + } 1037 + } 1038 + return pbuf; 1039 + } 1040 + 1041 + /* 1042 + * ======== cmm_xlator_free_buf ======== 1043 + * Purpose: 1044 + * Free the given SM buffer and descriptor. 1045 + * Does not free virtual memory. 1046 + */ 1047 + int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *pBufVa) 1048 + { 1049 + struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator; 1050 + int status = -EPERM; 1051 + void *buf_pa = NULL; 1052 + 1053 + DBC_REQUIRE(refs > 0); 1054 + DBC_REQUIRE(pBufVa != NULL); 1055 + DBC_REQUIRE(xlator_obj->ul_seg_id > 0); 1056 + 1057 + if (xlator_obj) { 1058 + /* convert Va to Pa so we can free it. */ 1059 + buf_pa = cmm_xlator_translate(xlator, pBufVa, CMM_VA2PA); 1060 + if (buf_pa) { 1061 + status = cmm_free_buf(xlator_obj->hcmm_mgr, buf_pa, 1062 + xlator_obj->ul_seg_id); 1063 + if (DSP_FAILED(status)) { 1064 + /* Uh oh, this shouldn't happen. Descriptor 1065 + * gone! */ 1066 + DBC_ASSERT(false); /* CMM is leaking mem */ 1067 + } 1068 + } 1069 + } 1070 + return status; 1071 + } 1072 + 1073 + /* 1074 + * ======== cmm_xlator_info ======== 1075 + * Purpose: 1076 + * Set/Get translator info. 1077 + */ 1078 + int cmm_xlator_info(struct cmm_xlatorobject *xlator, IN OUT u8 ** paddr, 1079 + u32 ul_size, u32 uSegId, bool set_info) 1080 + { 1081 + struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator; 1082 + int status = 0; 1083 + 1084 + DBC_REQUIRE(refs > 0); 1085 + DBC_REQUIRE(paddr != NULL); 1086 + DBC_REQUIRE((uSegId > 0) && (uSegId <= CMM_MAXGPPSEGS)); 1087 + 1088 + if (xlator_obj) { 1089 + if (set_info) { 1090 + /* set translators virtual address range */ 1091 + xlator_obj->dw_virt_base = (u32) *paddr; 1092 + xlator_obj->ul_virt_size = ul_size; 1093 + } else { /* return virt base address */ 1094 + *paddr = (u8 *) xlator_obj->dw_virt_base; 1095 + } 1096 + } else { 1097 + status = -EFAULT; 1098 + } 1099 + return status; 1100 + } 1101 + 1102 + /* 1103 + * ======== cmm_xlator_translate ======== 1104 + */ 1105 + void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr, 1106 + enum cmm_xlatetype xType) 1107 + { 1108 + u32 dw_addr_xlate = 0; 1109 + struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator; 1110 + struct cmm_object *cmm_mgr_obj = NULL; 1111 + struct cmm_allocator *allocator = NULL; 1112 + u32 dw_offset = 0; 1113 + 1114 + DBC_REQUIRE(refs > 0); 1115 + DBC_REQUIRE(paddr != NULL); 1116 + DBC_REQUIRE((xType >= CMM_VA2PA) && (xType <= CMM_DSPPA2PA)); 1117 + 1118 + if (!xlator_obj) 1119 + goto loop_cont; 1120 + 1121 + cmm_mgr_obj = (struct cmm_object *)xlator_obj->hcmm_mgr; 1122 + /* get this translator's default SM allocator */ 1123 + DBC_ASSERT(xlator_obj->ul_seg_id > 0); 1124 + allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->ul_seg_id - 1]; 1125 + if (!allocator) 1126 + goto loop_cont; 1127 + 1128 + if ((xType == CMM_VA2DSPPA) || (xType == CMM_VA2PA) || 1129 + (xType == CMM_PA2VA)) { 1130 + if (xType == CMM_PA2VA) { 1131 + /* Gpp Va = Va Base + offset */ 1132 + dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base - 1133 + allocator-> 1134 + ul_dsp_size); 1135 + dw_addr_xlate = xlator_obj->dw_virt_base + dw_offset; 1136 + /* Check if translated Va base is in range */ 1137 + if ((dw_addr_xlate < xlator_obj->dw_virt_base) || 1138 + (dw_addr_xlate >= 1139 + (xlator_obj->dw_virt_base + 1140 + xlator_obj->ul_virt_size))) { 1141 + dw_addr_xlate = 0; /* bad address */ 1142 + } 1143 + } else { 1144 + /* Gpp PA = Gpp Base + offset */ 1145 + dw_offset = 1146 + (u8 *) paddr - (u8 *) xlator_obj->dw_virt_base; 1147 + dw_addr_xlate = 1148 + allocator->shm_base - allocator->ul_dsp_size + 1149 + dw_offset; 1150 + } 1151 + } else { 1152 + dw_addr_xlate = (u32) paddr; 1153 + } 1154 + /*Now convert address to proper target physical address if needed */ 1155 + if ((xType == CMM_VA2DSPPA) || (xType == CMM_PA2DSPPA)) { 1156 + /* Got Gpp Pa now, convert to DSP Pa */ 1157 + dw_addr_xlate = 1158 + GPPPA2DSPPA((allocator->shm_base - allocator->ul_dsp_size), 1159 + dw_addr_xlate, 1160 + allocator->dw_dsp_phys_addr_offset * 1161 + allocator->c_factor); 1162 + } else if (xType == CMM_DSPPA2PA) { 1163 + /* Got DSP Pa, convert to GPP Pa */ 1164 + dw_addr_xlate = 1165 + DSPPA2GPPPA(allocator->shm_base - allocator->ul_dsp_size, 1166 + dw_addr_xlate, 1167 + allocator->dw_dsp_phys_addr_offset * 1168 + allocator->c_factor); 1169 + } 1170 + loop_cont: 1171 + return (void *)dw_addr_xlate; 1172 + }
+658
drivers/staging/tidspbridge/pmgr/cod.c
··· 1 + /* 2 + * cod.c 3 + * 4 + * DSP-BIOS Bridge driver support functions for TI OMAP processors. 5 + * 6 + * This module implements DSP code management for the DSP/BIOS Bridge 7 + * environment. It is mostly a thin wrapper. 8 + * 9 + * This module provides an interface for loading both static and 10 + * dynamic code objects onto DSP systems. 11 + * 12 + * Copyright (C) 2005-2006 Texas Instruments, Inc. 13 + * 14 + * This package is free software; you can redistribute it and/or modify 15 + * it under the terms of the GNU General Public License version 2 as 16 + * published by the Free Software Foundation. 17 + * 18 + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 19 + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 20 + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 21 + */ 22 + 23 + /* ----------------------------------- Host OS */ 24 + #include <dspbridge/host_os.h> 25 + #include <linux/fs.h> 26 + #include <linux/uaccess.h> 27 + 28 + /* ----------------------------------- DSP/BIOS Bridge */ 29 + #include <dspbridge/std.h> 30 + #include <dspbridge/dbdefs.h> 31 + 32 + /* ----------------------------------- Trace & Debug */ 33 + #include <dspbridge/dbc.h> 34 + 35 + /* ----------------------------------- OS Adaptation Layer */ 36 + #include <dspbridge/ldr.h> 37 + 38 + /* ----------------------------------- Platform Manager */ 39 + /* Include appropriate loader header file */ 40 + #include <dspbridge/dbll.h> 41 + 42 + /* ----------------------------------- This */ 43 + #include <dspbridge/cod.h> 44 + 45 + /* magic number for handle validation */ 46 + #define MAGIC 0xc001beef 47 + 48 + /* macro to validate COD manager handles */ 49 + #define IS_VALID(h) ((h) != NULL && (h)->ul_magic == MAGIC) 50 + 51 + /* 52 + * ======== cod_manager ======== 53 + */ 54 + struct cod_manager { 55 + struct dbll_tar_obj *target; 56 + struct dbll_library_obj *base_lib; 57 + bool loaded; /* Base library loaded? */ 58 + u32 ul_entry; 59 + struct ldr_module *dll_obj; 60 + struct dbll_fxns fxns; 61 + struct dbll_attrs attrs; 62 + char sz_zl_file[COD_MAXPATHLENGTH]; 63 + u32 ul_magic; 64 + }; 65 + 66 + /* 67 + * ======== cod_libraryobj ======== 68 + */ 69 + struct cod_libraryobj { 70 + struct dbll_library_obj *dbll_lib; 71 + struct cod_manager *cod_mgr; 72 + }; 73 + 74 + static u32 refs = 0L; 75 + 76 + static struct dbll_fxns ldr_fxns = { 77 + (dbll_close_fxn) dbll_close, 78 + (dbll_create_fxn) dbll_create, 79 + (dbll_delete_fxn) dbll_delete, 80 + (dbll_exit_fxn) dbll_exit, 81 + (dbll_get_attrs_fxn) dbll_get_attrs, 82 + (dbll_get_addr_fxn) dbll_get_addr, 83 + (dbll_get_c_addr_fxn) dbll_get_c_addr, 84 + (dbll_get_sect_fxn) dbll_get_sect, 85 + (dbll_init_fxn) dbll_init, 86 + (dbll_load_fxn) dbll_load, 87 + (dbll_load_sect_fxn) dbll_load_sect, 88 + (dbll_open_fxn) dbll_open, 89 + (dbll_read_sect_fxn) dbll_read_sect, 90 + (dbll_set_attrs_fxn) dbll_set_attrs, 91 + (dbll_unload_fxn) dbll_unload, 92 + (dbll_unload_sect_fxn) dbll_unload_sect, 93 + }; 94 + 95 + static bool no_op(void); 96 + 97 + /* 98 + * File operations (originally were under kfile.c) 99 + */ 100 + static s32 cod_f_close(struct file *filp) 101 + { 102 + /* Check for valid handle */ 103 + if (!filp) 104 + return -EFAULT; 105 + 106 + filp_close(filp, NULL); 107 + 108 + /* we can't use 0 here */ 109 + return 0; 110 + } 111 + 112 + static struct file *cod_f_open(CONST char *psz_file_name, CONST char *pszMode) 113 + { 114 + mm_segment_t fs; 115 + struct file *filp; 116 + 117 + fs = get_fs(); 118 + set_fs(get_ds()); 119 + 120 + /* ignore given mode and open file as read-only */ 121 + filp = filp_open(psz_file_name, O_RDONLY, 0); 122 + 123 + if (IS_ERR(filp)) 124 + filp = NULL; 125 + 126 + set_fs(fs); 127 + 128 + return filp; 129 + } 130 + 131 + static s32 cod_f_read(void __user *pbuffer, s32 size, s32 cCount, 132 + struct file *filp) 133 + { 134 + /* check for valid file handle */ 135 + if (!filp) 136 + return -EFAULT; 137 + 138 + if ((size > 0) && (cCount > 0) && pbuffer) { 139 + u32 dw_bytes_read; 140 + mm_segment_t fs; 141 + 142 + /* read from file */ 143 + fs = get_fs(); 144 + set_fs(get_ds()); 145 + dw_bytes_read = filp->f_op->read(filp, pbuffer, size * cCount, 146 + &(filp->f_pos)); 147 + set_fs(fs); 148 + 149 + if (!dw_bytes_read) 150 + return -EBADF; 151 + 152 + return dw_bytes_read / size; 153 + } 154 + 155 + return -EINVAL; 156 + } 157 + 158 + static s32 cod_f_seek(struct file *filp, s32 lOffset, s32 cOrigin) 159 + { 160 + loff_t dw_cur_pos; 161 + 162 + /* check for valid file handle */ 163 + if (!filp) 164 + return -EFAULT; 165 + 166 + /* based on the origin flag, move the internal pointer */ 167 + dw_cur_pos = filp->f_op->llseek(filp, lOffset, cOrigin); 168 + 169 + if ((s32) dw_cur_pos < 0) 170 + return -EPERM; 171 + 172 + /* we can't use 0 here */ 173 + return 0; 174 + } 175 + 176 + static s32 cod_f_tell(struct file *filp) 177 + { 178 + loff_t dw_cur_pos; 179 + 180 + if (!filp) 181 + return -EFAULT; 182 + 183 + /* Get current position */ 184 + dw_cur_pos = filp->f_op->llseek(filp, 0, SEEK_CUR); 185 + 186 + if ((s32) dw_cur_pos < 0) 187 + return -EPERM; 188 + 189 + return dw_cur_pos; 190 + } 191 + 192 + /* 193 + * ======== cod_close ======== 194 + */ 195 + void cod_close(struct cod_libraryobj *lib) 196 + { 197 + struct cod_manager *hmgr; 198 + 199 + DBC_REQUIRE(refs > 0); 200 + DBC_REQUIRE(lib != NULL); 201 + DBC_REQUIRE(IS_VALID(((struct cod_libraryobj *)lib)->cod_mgr)); 202 + 203 + hmgr = lib->cod_mgr; 204 + hmgr->fxns.close_fxn(lib->dbll_lib); 205 + 206 + kfree(lib); 207 + } 208 + 209 + /* 210 + * ======== cod_create ======== 211 + * Purpose: 212 + * Create an object to manage code on a DSP system. 213 + * This object can be used to load an initial program image with 214 + * arguments that can later be expanded with 215 + * dynamically loaded object files. 216 + * 217 + */ 218 + int cod_create(OUT struct cod_manager **phMgr, char *pstrDummyFile, 219 + IN OPTIONAL CONST struct cod_attrs *attrs) 220 + { 221 + struct cod_manager *mgr_new; 222 + struct dbll_attrs zl_attrs; 223 + int status = 0; 224 + 225 + DBC_REQUIRE(refs > 0); 226 + DBC_REQUIRE(phMgr != NULL); 227 + 228 + /* assume failure */ 229 + *phMgr = NULL; 230 + 231 + /* we don't support non-default attrs yet */ 232 + if (attrs != NULL) 233 + return -ENOSYS; 234 + 235 + mgr_new = kzalloc(sizeof(struct cod_manager), GFP_KERNEL); 236 + if (mgr_new == NULL) 237 + return -ENOMEM; 238 + 239 + mgr_new->ul_magic = MAGIC; 240 + 241 + /* Set up loader functions */ 242 + mgr_new->fxns = ldr_fxns; 243 + 244 + /* initialize the ZL module */ 245 + mgr_new->fxns.init_fxn(); 246 + 247 + zl_attrs.alloc = (dbll_alloc_fxn) no_op; 248 + zl_attrs.free = (dbll_free_fxn) no_op; 249 + zl_attrs.fread = (dbll_read_fxn) cod_f_read; 250 + zl_attrs.fseek = (dbll_seek_fxn) cod_f_seek; 251 + zl_attrs.ftell = (dbll_tell_fxn) cod_f_tell; 252 + zl_attrs.fclose = (dbll_f_close_fxn) cod_f_close; 253 + zl_attrs.fopen = (dbll_f_open_fxn) cod_f_open; 254 + zl_attrs.sym_lookup = NULL; 255 + zl_attrs.base_image = true; 256 + zl_attrs.log_write = NULL; 257 + zl_attrs.log_write_handle = NULL; 258 + zl_attrs.write = NULL; 259 + zl_attrs.rmm_handle = NULL; 260 + zl_attrs.input_params = NULL; 261 + zl_attrs.sym_handle = NULL; 262 + zl_attrs.sym_arg = NULL; 263 + 264 + mgr_new->attrs = zl_attrs; 265 + 266 + status = mgr_new->fxns.create_fxn(&mgr_new->target, &zl_attrs); 267 + 268 + if (DSP_FAILED(status)) { 269 + cod_delete(mgr_new); 270 + return -ESPIPE; 271 + } 272 + 273 + /* return the new manager */ 274 + *phMgr = mgr_new; 275 + 276 + return 0; 277 + } 278 + 279 + /* 280 + * ======== cod_delete ======== 281 + * Purpose: 282 + * Delete a code manager object. 283 + */ 284 + void cod_delete(struct cod_manager *hmgr) 285 + { 286 + DBC_REQUIRE(refs > 0); 287 + DBC_REQUIRE(IS_VALID(hmgr)); 288 + 289 + if (hmgr->base_lib) { 290 + if (hmgr->loaded) 291 + hmgr->fxns.unload_fxn(hmgr->base_lib, &hmgr->attrs); 292 + 293 + hmgr->fxns.close_fxn(hmgr->base_lib); 294 + } 295 + if (hmgr->target) { 296 + hmgr->fxns.delete_fxn(hmgr->target); 297 + hmgr->fxns.exit_fxn(); 298 + } 299 + hmgr->ul_magic = ~MAGIC; 300 + kfree(hmgr); 301 + } 302 + 303 + /* 304 + * ======== cod_exit ======== 305 + * Purpose: 306 + * Discontinue usage of the COD module. 307 + * 308 + */ 309 + void cod_exit(void) 310 + { 311 + DBC_REQUIRE(refs > 0); 312 + 313 + refs--; 314 + 315 + DBC_ENSURE(refs >= 0); 316 + } 317 + 318 + /* 319 + * ======== cod_get_base_lib ======== 320 + * Purpose: 321 + * Get handle to the base image DBL library. 322 + */ 323 + int cod_get_base_lib(struct cod_manager *cod_mgr_obj, 324 + struct dbll_library_obj **plib) 325 + { 326 + int status = 0; 327 + 328 + DBC_REQUIRE(refs > 0); 329 + DBC_REQUIRE(IS_VALID(cod_mgr_obj)); 330 + DBC_REQUIRE(plib != NULL); 331 + 332 + *plib = (struct dbll_library_obj *)cod_mgr_obj->base_lib; 333 + 334 + return status; 335 + } 336 + 337 + /* 338 + * ======== cod_get_base_name ======== 339 + */ 340 + int cod_get_base_name(struct cod_manager *cod_mgr_obj, char *pszName, 341 + u32 usize) 342 + { 343 + int status = 0; 344 + 345 + DBC_REQUIRE(refs > 0); 346 + DBC_REQUIRE(IS_VALID(cod_mgr_obj)); 347 + DBC_REQUIRE(pszName != NULL); 348 + 349 + if (usize <= COD_MAXPATHLENGTH) 350 + strncpy(pszName, cod_mgr_obj->sz_zl_file, usize); 351 + else 352 + status = -EPERM; 353 + 354 + return status; 355 + } 356 + 357 + /* 358 + * ======== cod_get_entry ======== 359 + * Purpose: 360 + * Retrieve the entry point of a loaded DSP program image 361 + * 362 + */ 363 + int cod_get_entry(struct cod_manager *cod_mgr_obj, u32 *pulEntry) 364 + { 365 + DBC_REQUIRE(refs > 0); 366 + DBC_REQUIRE(IS_VALID(cod_mgr_obj)); 367 + DBC_REQUIRE(pulEntry != NULL); 368 + 369 + *pulEntry = cod_mgr_obj->ul_entry; 370 + 371 + return 0; 372 + } 373 + 374 + /* 375 + * ======== cod_get_loader ======== 376 + * Purpose: 377 + * Get handle to the DBLL loader. 378 + */ 379 + int cod_get_loader(struct cod_manager *cod_mgr_obj, 380 + struct dbll_tar_obj **phLoader) 381 + { 382 + int status = 0; 383 + 384 + DBC_REQUIRE(refs > 0); 385 + DBC_REQUIRE(IS_VALID(cod_mgr_obj)); 386 + DBC_REQUIRE(phLoader != NULL); 387 + 388 + *phLoader = (struct dbll_tar_obj *)cod_mgr_obj->target; 389 + 390 + return status; 391 + } 392 + 393 + /* 394 + * ======== cod_get_section ======== 395 + * Purpose: 396 + * Retrieve the starting address and length of a section in the COFF file 397 + * given the section name. 398 + */ 399 + int cod_get_section(struct cod_libraryobj *lib, IN char *pstrSect, 400 + OUT u32 *puAddr, OUT u32 *puLen) 401 + { 402 + struct cod_manager *cod_mgr_obj; 403 + int status = 0; 404 + 405 + DBC_REQUIRE(refs > 0); 406 + DBC_REQUIRE(lib != NULL); 407 + DBC_REQUIRE(IS_VALID(lib->cod_mgr)); 408 + DBC_REQUIRE(pstrSect != NULL); 409 + DBC_REQUIRE(puAddr != NULL); 410 + DBC_REQUIRE(puLen != NULL); 411 + 412 + *puAddr = 0; 413 + *puLen = 0; 414 + if (lib != NULL) { 415 + cod_mgr_obj = lib->cod_mgr; 416 + status = cod_mgr_obj->fxns.get_sect_fxn(lib->dbll_lib, pstrSect, 417 + puAddr, puLen); 418 + } else { 419 + status = -ESPIPE; 420 + } 421 + 422 + DBC_ENSURE(DSP_SUCCEEDED(status) || ((*puAddr == 0) && (*puLen == 0))); 423 + 424 + return status; 425 + } 426 + 427 + /* 428 + * ======== cod_get_sym_value ======== 429 + * Purpose: 430 + * Retrieve the value for the specified symbol. The symbol is first 431 + * searched for literally and then, if not found, searched for as a 432 + * C symbol. 433 + * 434 + */ 435 + int cod_get_sym_value(struct cod_manager *hmgr, char *pstrSym, 436 + u32 *pul_value) 437 + { 438 + struct dbll_sym_val *dbll_sym; 439 + 440 + DBC_REQUIRE(refs > 0); 441 + DBC_REQUIRE(IS_VALID(hmgr)); 442 + DBC_REQUIRE(pstrSym != NULL); 443 + DBC_REQUIRE(pul_value != NULL); 444 + 445 + dev_dbg(bridge, "%s: hmgr: %p pstrSym: %s pul_value: %p\n", 446 + __func__, hmgr, pstrSym, pul_value); 447 + if (hmgr->base_lib) { 448 + if (!hmgr->fxns. 449 + get_addr_fxn(hmgr->base_lib, pstrSym, &dbll_sym)) { 450 + if (!hmgr->fxns. 451 + get_c_addr_fxn(hmgr->base_lib, pstrSym, &dbll_sym)) 452 + return -ESPIPE; 453 + } 454 + } else { 455 + return -ESPIPE; 456 + } 457 + 458 + *pul_value = dbll_sym->value; 459 + 460 + return 0; 461 + } 462 + 463 + /* 464 + * ======== cod_init ======== 465 + * Purpose: 466 + * Initialize the COD module's private state. 467 + * 468 + */ 469 + bool cod_init(void) 470 + { 471 + bool ret = true; 472 + 473 + DBC_REQUIRE(refs >= 0); 474 + 475 + if (ret) 476 + refs++; 477 + 478 + DBC_ENSURE((ret && refs > 0) || (!ret && refs >= 0)); 479 + return ret; 480 + } 481 + 482 + /* 483 + * ======== cod_load_base ======== 484 + * Purpose: 485 + * Load the initial program image, optionally with command-line arguments, 486 + * on the DSP system managed by the supplied handle. The program to be 487 + * loaded must be the first element of the args array and must be a fully 488 + * qualified pathname. 489 + * Details: 490 + * if nArgc doesn't match the number of arguments in the aArgs array, the 491 + * aArgs array is searched for a NULL terminating entry, and argc is 492 + * recalculated to reflect this. In this way, we can support NULL 493 + * terminating aArgs arrays, if nArgc is very large. 494 + */ 495 + int cod_load_base(struct cod_manager *hmgr, u32 nArgc, char *aArgs[], 496 + cod_writefxn pfn_write, void *pArb, char *envp[]) 497 + { 498 + dbll_flags flags; 499 + struct dbll_attrs save_attrs; 500 + struct dbll_attrs new_attrs; 501 + int status; 502 + u32 i; 503 + 504 + DBC_REQUIRE(refs > 0); 505 + DBC_REQUIRE(IS_VALID(hmgr)); 506 + DBC_REQUIRE(nArgc > 0); 507 + DBC_REQUIRE(aArgs != NULL); 508 + DBC_REQUIRE(aArgs[0] != NULL); 509 + DBC_REQUIRE(pfn_write != NULL); 510 + DBC_REQUIRE(hmgr->base_lib != NULL); 511 + 512 + /* 513 + * Make sure every argv[] stated in argc has a value, or change argc to 514 + * reflect true number in NULL terminated argv array. 515 + */ 516 + for (i = 0; i < nArgc; i++) { 517 + if (aArgs[i] == NULL) { 518 + nArgc = i; 519 + break; 520 + } 521 + } 522 + 523 + /* set the write function for this operation */ 524 + hmgr->fxns.get_attrs_fxn(hmgr->target, &save_attrs); 525 + 526 + new_attrs = save_attrs; 527 + new_attrs.write = (dbll_write_fxn) pfn_write; 528 + new_attrs.input_params = pArb; 529 + new_attrs.alloc = (dbll_alloc_fxn) no_op; 530 + new_attrs.free = (dbll_free_fxn) no_op; 531 + new_attrs.log_write = NULL; 532 + new_attrs.log_write_handle = NULL; 533 + 534 + /* Load the image */ 535 + flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB; 536 + status = hmgr->fxns.load_fxn(hmgr->base_lib, flags, &new_attrs, 537 + &hmgr->ul_entry); 538 + if (DSP_FAILED(status)) 539 + hmgr->fxns.close_fxn(hmgr->base_lib); 540 + 541 + if (DSP_SUCCEEDED(status)) 542 + hmgr->loaded = true; 543 + else 544 + hmgr->base_lib = NULL; 545 + 546 + return status; 547 + } 548 + 549 + /* 550 + * ======== cod_open ======== 551 + * Open library for reading sections. 552 + */ 553 + int cod_open(struct cod_manager *hmgr, IN char *pszCoffPath, 554 + u32 flags, struct cod_libraryobj **pLib) 555 + { 556 + int status = 0; 557 + struct cod_libraryobj *lib = NULL; 558 + 559 + DBC_REQUIRE(refs > 0); 560 + DBC_REQUIRE(IS_VALID(hmgr)); 561 + DBC_REQUIRE(pszCoffPath != NULL); 562 + DBC_REQUIRE(flags == COD_NOLOAD || flags == COD_SYMB); 563 + DBC_REQUIRE(pLib != NULL); 564 + 565 + *pLib = NULL; 566 + 567 + lib = kzalloc(sizeof(struct cod_libraryobj), GFP_KERNEL); 568 + if (lib == NULL) 569 + status = -ENOMEM; 570 + 571 + if (DSP_SUCCEEDED(status)) { 572 + lib->cod_mgr = hmgr; 573 + status = hmgr->fxns.open_fxn(hmgr->target, pszCoffPath, flags, 574 + &lib->dbll_lib); 575 + if (DSP_SUCCEEDED(status)) 576 + *pLib = lib; 577 + } 578 + 579 + if (DSP_FAILED(status)) 580 + pr_err("%s: error status 0x%x, pszCoffPath: %s flags: 0x%x\n", 581 + __func__, status, pszCoffPath, flags); 582 + return status; 583 + } 584 + 585 + /* 586 + * ======== cod_open_base ======== 587 + * Purpose: 588 + * Open base image for reading sections. 589 + */ 590 + int cod_open_base(struct cod_manager *hmgr, IN char *pszCoffPath, 591 + dbll_flags flags) 592 + { 593 + int status = 0; 594 + struct dbll_library_obj *lib; 595 + 596 + DBC_REQUIRE(refs > 0); 597 + DBC_REQUIRE(IS_VALID(hmgr)); 598 + DBC_REQUIRE(pszCoffPath != NULL); 599 + 600 + /* if we previously opened a base image, close it now */ 601 + if (hmgr->base_lib) { 602 + if (hmgr->loaded) { 603 + hmgr->fxns.unload_fxn(hmgr->base_lib, &hmgr->attrs); 604 + hmgr->loaded = false; 605 + } 606 + hmgr->fxns.close_fxn(hmgr->base_lib); 607 + hmgr->base_lib = NULL; 608 + } 609 + status = hmgr->fxns.open_fxn(hmgr->target, pszCoffPath, flags, &lib); 610 + if (DSP_SUCCEEDED(status)) { 611 + /* hang onto the library for subsequent sym table usage */ 612 + hmgr->base_lib = lib; 613 + strncpy(hmgr->sz_zl_file, pszCoffPath, COD_MAXPATHLENGTH - 1); 614 + hmgr->sz_zl_file[COD_MAXPATHLENGTH - 1] = '\0'; 615 + } 616 + 617 + if (DSP_FAILED(status)) 618 + pr_err("%s: error status 0x%x pszCoffPath: %s\n", __func__, 619 + status, pszCoffPath); 620 + return status; 621 + } 622 + 623 + /* 624 + * ======== cod_read_section ======== 625 + * Purpose: 626 + * Retrieve the content of a code section given the section name. 627 + */ 628 + int cod_read_section(struct cod_libraryobj *lib, IN char *pstrSect, 629 + OUT char *pstrContent, IN u32 cContentSize) 630 + { 631 + int status = 0; 632 + 633 + DBC_REQUIRE(refs > 0); 634 + DBC_REQUIRE(lib != NULL); 635 + DBC_REQUIRE(IS_VALID(lib->cod_mgr)); 636 + DBC_REQUIRE(pstrSect != NULL); 637 + DBC_REQUIRE(pstrContent != NULL); 638 + 639 + if (lib != NULL) 640 + status = 641 + lib->cod_mgr->fxns.read_sect_fxn(lib->dbll_lib, pstrSect, 642 + pstrContent, cContentSize); 643 + else 644 + status = -ESPIPE; 645 + 646 + return status; 647 + } 648 + 649 + /* 650 + * ======== no_op ======== 651 + * Purpose: 652 + * No Operation. 653 + * 654 + */ 655 + static bool no_op(void) 656 + { 657 + return true; 658 + }
+1585
drivers/staging/tidspbridge/pmgr/dbll.c
··· 1 + /* 2 + * dbll.c 3 + * 4 + * DSP-BIOS Bridge driver support functions for TI OMAP processors. 5 + * 6 + * Copyright (C) 2005-2006 Texas Instruments, Inc. 7 + * 8 + * This package is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + * 12 + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 13 + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 14 + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 15 + */ 16 + 17 + /* ----------------------------------- Host OS */ 18 + #include <dspbridge/host_os.h> 19 + 20 + /* ----------------------------------- DSP/BIOS Bridge */ 21 + #include <dspbridge/std.h> 22 + #include <dspbridge/dbdefs.h> 23 + 24 + /* ----------------------------------- Trace & Debug */ 25 + #include <dspbridge/dbc.h> 26 + #include <dspbridge/gh.h> 27 + 28 + /* ----------------------------------- OS Adaptation Layer */ 29 + 30 + /* Dynamic loader library interface */ 31 + #include <dspbridge/dynamic_loader.h> 32 + #include <dspbridge/getsection.h> 33 + 34 + /* ----------------------------------- This */ 35 + #include <dspbridge/dbll.h> 36 + #include <dspbridge/rmm.h> 37 + 38 + /* Number of buckets for symbol hash table */ 39 + #define MAXBUCKETS 211 40 + 41 + /* Max buffer length */ 42 + #define MAXEXPR 128 43 + 44 + #ifndef UINT32_C 45 + #define UINT32_C(zzz) ((uint32_t)zzz) 46 + #endif 47 + #define DOFF_ALIGN(x) (((x) + 3) & ~UINT32_C(3)) 48 + 49 + /* 50 + * ======== struct dbll_tar_obj* ======== 51 + * A target may have one or more libraries of symbols/code/data loaded 52 + * onto it, where a library is simply the symbols/code/data contained 53 + * in a DOFF file. 54 + */ 55 + /* 56 + * ======== dbll_tar_obj ======== 57 + */ 58 + struct dbll_tar_obj { 59 + struct dbll_attrs attrs; 60 + struct dbll_library_obj *head; /* List of all opened libraries */ 61 + }; 62 + 63 + /* 64 + * The following 4 typedefs are "super classes" of the dynamic loader 65 + * library types used in dynamic loader functions (dynamic_loader.h). 66 + */ 67 + /* 68 + * ======== dbll_stream ======== 69 + * Contains dynamic_loader_stream 70 + */ 71 + struct dbll_stream { 72 + struct dynamic_loader_stream dl_stream; 73 + struct dbll_library_obj *lib; 74 + }; 75 + 76 + /* 77 + * ======== ldr_symbol ======== 78 + */ 79 + struct ldr_symbol { 80 + struct dynamic_loader_sym dl_symbol; 81 + struct dbll_library_obj *lib; 82 + }; 83 + 84 + /* 85 + * ======== dbll_alloc ======== 86 + */ 87 + struct dbll_alloc { 88 + struct dynamic_loader_allocate dl_alloc; 89 + struct dbll_library_obj *lib; 90 + }; 91 + 92 + /* 93 + * ======== dbll_init_obj ======== 94 + */ 95 + struct dbll_init_obj { 96 + struct dynamic_loader_initialize dl_init; 97 + struct dbll_library_obj *lib; 98 + }; 99 + 100 + /* 101 + * ======== DBLL_Library ======== 102 + * A library handle is returned by DBLL_Open() and is passed to dbll_load() 103 + * to load symbols/code/data, and to dbll_unload(), to remove the 104 + * symbols/code/data loaded by dbll_load(). 105 + */ 106 + 107 + /* 108 + * ======== dbll_library_obj ======== 109 + */ 110 + struct dbll_library_obj { 111 + struct dbll_library_obj *next; /* Next library in target's list */ 112 + struct dbll_library_obj *prev; /* Previous in the list */ 113 + struct dbll_tar_obj *target_obj; /* target for this library */ 114 + 115 + /* Objects needed by dynamic loader */ 116 + struct dbll_stream stream; 117 + struct ldr_symbol symbol; 118 + struct dbll_alloc allocate; 119 + struct dbll_init_obj init; 120 + void *dload_mod_obj; 121 + 122 + char *file_name; /* COFF file name */ 123 + void *fp; /* Opaque file handle */ 124 + u32 entry; /* Entry point */ 125 + void *desc; /* desc of DOFF file loaded */ 126 + u32 open_ref; /* Number of times opened */ 127 + u32 load_ref; /* Number of times loaded */ 128 + struct gh_t_hash_tab *sym_tab; /* Hash table of symbols */ 129 + u32 ul_pos; 130 + }; 131 + 132 + /* 133 + * ======== dbll_symbol ======== 134 + */ 135 + struct dbll_symbol { 136 + struct dbll_sym_val value; 137 + char *name; 138 + }; 139 + 140 + static void dof_close(struct dbll_library_obj *zl_lib); 141 + static int dof_open(struct dbll_library_obj *zl_lib); 142 + static s32 no_op(struct dynamic_loader_initialize *thisptr, void *bufr, 143 + ldr_addr locn, struct ldr_section_info *info, unsigned bytsiz); 144 + 145 + /* 146 + * Functions called by dynamic loader 147 + * 148 + */ 149 + /* dynamic_loader_stream */ 150 + static int dbll_read_buffer(struct dynamic_loader_stream *this, void *buffer, 151 + unsigned bufsize); 152 + static int dbll_set_file_posn(struct dynamic_loader_stream *this, 153 + unsigned int pos); 154 + /* dynamic_loader_sym */ 155 + static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this, 156 + const char *name); 157 + static struct dynload_symbol *dbll_add_to_symbol_table(struct dynamic_loader_sym 158 + *this, const char *name, 159 + unsigned moduleId); 160 + static struct dynload_symbol *find_in_symbol_table(struct dynamic_loader_sym 161 + *this, const char *name, 162 + unsigned moduleid); 163 + static void dbll_purge_symbol_table(struct dynamic_loader_sym *this, 164 + unsigned moduleId); 165 + static void *allocate(struct dynamic_loader_sym *this, unsigned memsize); 166 + static void deallocate(struct dynamic_loader_sym *this, void *memPtr); 167 + static void dbll_err_report(struct dynamic_loader_sym *this, const char *errstr, 168 + va_list args); 169 + /* dynamic_loader_allocate */ 170 + static int dbll_rmm_alloc(struct dynamic_loader_allocate *this, 171 + struct ldr_section_info *info, unsigned align); 172 + static void rmm_dealloc(struct dynamic_loader_allocate *this, 173 + struct ldr_section_info *info); 174 + 175 + /* dynamic_loader_initialize */ 176 + static int connect(struct dynamic_loader_initialize *this); 177 + static int read_mem(struct dynamic_loader_initialize *this, void *buf, 178 + ldr_addr addr, struct ldr_section_info *info, 179 + unsigned nbytes); 180 + static int write_mem(struct dynamic_loader_initialize *this, void *buf, 181 + ldr_addr addr, struct ldr_section_info *info, 182 + unsigned nbytes); 183 + static int fill_mem(struct dynamic_loader_initialize *this, ldr_addr addr, 184 + struct ldr_section_info *info, unsigned nbytes, 185 + unsigned val); 186 + static int execute(struct dynamic_loader_initialize *this, ldr_addr start); 187 + static void release(struct dynamic_loader_initialize *this); 188 + 189 + /* symbol table hash functions */ 190 + static u16 name_hash(void *name, u16 max_bucket); 191 + static bool name_match(void *name, void *sp); 192 + static void sym_delete(void *sp); 193 + 194 + static u32 refs; /* module reference count */ 195 + 196 + /* Symbol Redefinition */ 197 + static int redefined_symbol; 198 + static int gbl_search = 1; 199 + 200 + /* 201 + * ======== dbll_close ======== 202 + */ 203 + void dbll_close(struct dbll_library_obj *zl_lib) 204 + { 205 + struct dbll_tar_obj *zl_target; 206 + 207 + DBC_REQUIRE(refs > 0); 208 + DBC_REQUIRE(zl_lib); 209 + DBC_REQUIRE(zl_lib->open_ref > 0); 210 + zl_target = zl_lib->target_obj; 211 + zl_lib->open_ref--; 212 + if (zl_lib->open_ref == 0) { 213 + /* Remove library from list */ 214 + if (zl_target->head == zl_lib) 215 + zl_target->head = zl_lib->next; 216 + 217 + if (zl_lib->prev) 218 + (zl_lib->prev)->next = zl_lib->next; 219 + 220 + if (zl_lib->next) 221 + (zl_lib->next)->prev = zl_lib->prev; 222 + 223 + /* Free DOF resources */ 224 + dof_close(zl_lib); 225 + kfree(zl_lib->file_name); 226 + 227 + /* remove symbols from symbol table */ 228 + if (zl_lib->sym_tab) 229 + gh_delete(zl_lib->sym_tab); 230 + 231 + /* remove the library object itself */ 232 + kfree(zl_lib); 233 + zl_lib = NULL; 234 + } 235 + } 236 + 237 + /* 238 + * ======== dbll_create ======== 239 + */ 240 + int dbll_create(struct dbll_tar_obj **target_obj, 241 + struct dbll_attrs *pattrs) 242 + { 243 + struct dbll_tar_obj *pzl_target; 244 + int status = 0; 245 + 246 + DBC_REQUIRE(refs > 0); 247 + DBC_REQUIRE(pattrs != NULL); 248 + DBC_REQUIRE(target_obj != NULL); 249 + 250 + /* Allocate DBL target object */ 251 + pzl_target = kzalloc(sizeof(struct dbll_tar_obj), GFP_KERNEL); 252 + if (target_obj != NULL) { 253 + if (pzl_target == NULL) { 254 + *target_obj = NULL; 255 + status = -ENOMEM; 256 + } else { 257 + pzl_target->attrs = *pattrs; 258 + *target_obj = (struct dbll_tar_obj *)pzl_target; 259 + } 260 + DBC_ENSURE((DSP_SUCCEEDED(status) && *target_obj) || 261 + (DSP_FAILED(status) && *target_obj == NULL)); 262 + } 263 + 264 + return status; 265 + } 266 + 267 + /* 268 + * ======== dbll_delete ======== 269 + */ 270 + void dbll_delete(struct dbll_tar_obj *target) 271 + { 272 + struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target; 273 + 274 + DBC_REQUIRE(refs > 0); 275 + DBC_REQUIRE(zl_target); 276 + 277 + if (zl_target != NULL) 278 + kfree(zl_target); 279 + 280 + } 281 + 282 + /* 283 + * ======== dbll_exit ======== 284 + * Discontinue usage of DBL module. 285 + */ 286 + void dbll_exit(void) 287 + { 288 + DBC_REQUIRE(refs > 0); 289 + 290 + refs--; 291 + 292 + if (refs == 0) 293 + gh_exit(); 294 + 295 + DBC_ENSURE(refs >= 0); 296 + } 297 + 298 + /* 299 + * ======== dbll_get_addr ======== 300 + * Get address of name in the specified library. 301 + */ 302 + bool dbll_get_addr(struct dbll_library_obj *zl_lib, char *name, 303 + struct dbll_sym_val **ppSym) 304 + { 305 + struct dbll_symbol *sym; 306 + bool status = false; 307 + 308 + DBC_REQUIRE(refs > 0); 309 + DBC_REQUIRE(zl_lib); 310 + DBC_REQUIRE(name != NULL); 311 + DBC_REQUIRE(ppSym != NULL); 312 + DBC_REQUIRE(zl_lib->sym_tab != NULL); 313 + 314 + sym = (struct dbll_symbol *)gh_find(zl_lib->sym_tab, name); 315 + if (sym != NULL) { 316 + *ppSym = &sym->value; 317 + status = true; 318 + } 319 + 320 + dev_dbg(bridge, "%s: lib: %p name: %s paddr: %p, status 0x%x\n", 321 + __func__, zl_lib, name, ppSym, status); 322 + return status; 323 + } 324 + 325 + /* 326 + * ======== dbll_get_attrs ======== 327 + * Retrieve the attributes of the target. 328 + */ 329 + void dbll_get_attrs(struct dbll_tar_obj *target, struct dbll_attrs *pattrs) 330 + { 331 + struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target; 332 + 333 + DBC_REQUIRE(refs > 0); 334 + DBC_REQUIRE(zl_target); 335 + DBC_REQUIRE(pattrs != NULL); 336 + 337 + if ((pattrs != NULL) && (zl_target != NULL)) 338 + *pattrs = zl_target->attrs; 339 + 340 + } 341 + 342 + /* 343 + * ======== dbll_get_c_addr ======== 344 + * Get address of a "C" name in the specified library. 345 + */ 346 + bool dbll_get_c_addr(struct dbll_library_obj *zl_lib, char *name, 347 + struct dbll_sym_val **ppSym) 348 + { 349 + struct dbll_symbol *sym; 350 + char cname[MAXEXPR + 1]; 351 + bool status = false; 352 + 353 + DBC_REQUIRE(refs > 0); 354 + DBC_REQUIRE(zl_lib); 355 + DBC_REQUIRE(ppSym != NULL); 356 + DBC_REQUIRE(zl_lib->sym_tab != NULL); 357 + DBC_REQUIRE(name != NULL); 358 + 359 + cname[0] = '_'; 360 + 361 + strncpy(cname + 1, name, sizeof(cname) - 2); 362 + cname[MAXEXPR] = '\0'; /* insure '\0' string termination */ 363 + 364 + /* Check for C name, if not found */ 365 + sym = (struct dbll_symbol *)gh_find(zl_lib->sym_tab, cname); 366 + 367 + if (sym != NULL) { 368 + *ppSym = &sym->value; 369 + status = true; 370 + } 371 + 372 + return status; 373 + } 374 + 375 + /* 376 + * ======== dbll_get_sect ======== 377 + * Get the base address and size (in bytes) of a COFF section. 378 + */ 379 + int dbll_get_sect(struct dbll_library_obj *lib, char *name, u32 *paddr, 380 + u32 *psize) 381 + { 382 + u32 byte_size; 383 + bool opened_doff = false; 384 + const struct ldr_section_info *sect = NULL; 385 + struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib; 386 + int status = 0; 387 + 388 + DBC_REQUIRE(refs > 0); 389 + DBC_REQUIRE(name != NULL); 390 + DBC_REQUIRE(paddr != NULL); 391 + DBC_REQUIRE(psize != NULL); 392 + DBC_REQUIRE(zl_lib); 393 + 394 + /* If DOFF file is not open, we open it. */ 395 + if (zl_lib != NULL) { 396 + if (zl_lib->fp == NULL) { 397 + status = dof_open(zl_lib); 398 + if (DSP_SUCCEEDED(status)) 399 + opened_doff = true; 400 + 401 + } else { 402 + (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, 403 + zl_lib->ul_pos, 404 + SEEK_SET); 405 + } 406 + } else { 407 + status = -EFAULT; 408 + } 409 + if (DSP_SUCCEEDED(status)) { 410 + byte_size = 1; 411 + if (dload_get_section_info(zl_lib->desc, name, &sect)) { 412 + *paddr = sect->load_addr; 413 + *psize = sect->size * byte_size; 414 + /* Make sure size is even for good swap */ 415 + if (*psize % 2) 416 + (*psize)++; 417 + 418 + /* Align size */ 419 + *psize = DOFF_ALIGN(*psize); 420 + } else { 421 + status = -ENXIO; 422 + } 423 + } 424 + if (opened_doff) { 425 + dof_close(zl_lib); 426 + opened_doff = false; 427 + } 428 + 429 + dev_dbg(bridge, "%s: lib: %p name: %s paddr: %p psize: %p, " 430 + "status 0x%x\n", __func__, lib, name, paddr, psize, status); 431 + 432 + return status; 433 + } 434 + 435 + /* 436 + * ======== dbll_init ======== 437 + */ 438 + bool dbll_init(void) 439 + { 440 + DBC_REQUIRE(refs >= 0); 441 + 442 + if (refs == 0) 443 + gh_init(); 444 + 445 + refs++; 446 + 447 + return true; 448 + } 449 + 450 + /* 451 + * ======== dbll_load ======== 452 + */ 453 + int dbll_load(struct dbll_library_obj *lib, dbll_flags flags, 454 + struct dbll_attrs *attrs, u32 *pEntry) 455 + { 456 + struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib; 457 + struct dbll_tar_obj *dbzl; 458 + bool got_symbols = true; 459 + s32 err; 460 + int status = 0; 461 + bool opened_doff = false; 462 + DBC_REQUIRE(refs > 0); 463 + DBC_REQUIRE(zl_lib); 464 + DBC_REQUIRE(pEntry != NULL); 465 + DBC_REQUIRE(attrs != NULL); 466 + 467 + /* 468 + * Load if not already loaded. 469 + */ 470 + if (zl_lib->load_ref == 0 || !(flags & DBLL_DYNAMIC)) { 471 + dbzl = zl_lib->target_obj; 472 + dbzl->attrs = *attrs; 473 + /* Create a hash table for symbols if not already created */ 474 + if (zl_lib->sym_tab == NULL) { 475 + got_symbols = false; 476 + zl_lib->sym_tab = gh_create(MAXBUCKETS, 477 + sizeof(struct dbll_symbol), 478 + name_hash, 479 + name_match, sym_delete); 480 + if (zl_lib->sym_tab == NULL) 481 + status = -ENOMEM; 482 + 483 + } 484 + /* 485 + * Set up objects needed by the dynamic loader 486 + */ 487 + /* Stream */ 488 + zl_lib->stream.dl_stream.read_buffer = dbll_read_buffer; 489 + zl_lib->stream.dl_stream.set_file_posn = dbll_set_file_posn; 490 + zl_lib->stream.lib = zl_lib; 491 + /* Symbol */ 492 + zl_lib->symbol.dl_symbol.find_matching_symbol = 493 + dbll_find_symbol; 494 + if (got_symbols) { 495 + zl_lib->symbol.dl_symbol.add_to_symbol_table = 496 + find_in_symbol_table; 497 + } else { 498 + zl_lib->symbol.dl_symbol.add_to_symbol_table = 499 + dbll_add_to_symbol_table; 500 + } 501 + zl_lib->symbol.dl_symbol.purge_symbol_table = 502 + dbll_purge_symbol_table; 503 + zl_lib->symbol.dl_symbol.dload_allocate = allocate; 504 + zl_lib->symbol.dl_symbol.dload_deallocate = deallocate; 505 + zl_lib->symbol.dl_symbol.error_report = dbll_err_report; 506 + zl_lib->symbol.lib = zl_lib; 507 + /* Allocate */ 508 + zl_lib->allocate.dl_alloc.dload_allocate = dbll_rmm_alloc; 509 + zl_lib->allocate.dl_alloc.dload_deallocate = rmm_dealloc; 510 + zl_lib->allocate.lib = zl_lib; 511 + /* Init */ 512 + zl_lib->init.dl_init.connect = connect; 513 + zl_lib->init.dl_init.readmem = read_mem; 514 + zl_lib->init.dl_init.writemem = write_mem; 515 + zl_lib->init.dl_init.fillmem = fill_mem; 516 + zl_lib->init.dl_init.execute = execute; 517 + zl_lib->init.dl_init.release = release; 518 + zl_lib->init.lib = zl_lib; 519 + /* If COFF file is not open, we open it. */ 520 + if (zl_lib->fp == NULL) { 521 + status = dof_open(zl_lib); 522 + if (DSP_SUCCEEDED(status)) 523 + opened_doff = true; 524 + 525 + } 526 + if (DSP_SUCCEEDED(status)) { 527 + zl_lib->ul_pos = (*(zl_lib->target_obj->attrs.ftell)) 528 + (zl_lib->fp); 529 + /* Reset file cursor */ 530 + (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, 531 + (long)0, 532 + SEEK_SET); 533 + symbols_reloaded = true; 534 + /* The 5th argument, DLOAD_INITBSS, tells the DLL 535 + * module to zero-init all BSS sections. In general, 536 + * this is not necessary and also increases load time. 537 + * We may want to make this configurable by the user */ 538 + err = dynamic_load_module(&zl_lib->stream.dl_stream, 539 + &zl_lib->symbol.dl_symbol, 540 + &zl_lib->allocate.dl_alloc, 541 + &zl_lib->init.dl_init, 542 + DLOAD_INITBSS, 543 + &zl_lib->dload_mod_obj); 544 + 545 + if (err != 0) { 546 + status = -EILSEQ; 547 + } else if (redefined_symbol) { 548 + zl_lib->load_ref++; 549 + dbll_unload(zl_lib, (struct dbll_attrs *)attrs); 550 + redefined_symbol = false; 551 + status = -EILSEQ; 552 + } else { 553 + *pEntry = zl_lib->entry; 554 + } 555 + } 556 + } 557 + if (DSP_SUCCEEDED(status)) 558 + zl_lib->load_ref++; 559 + 560 + /* Clean up DOFF resources */ 561 + if (opened_doff) 562 + dof_close(zl_lib); 563 + 564 + DBC_ENSURE(DSP_FAILED(status) || zl_lib->load_ref > 0); 565 + 566 + dev_dbg(bridge, "%s: lib: %p flags: 0x%x pEntry: %p, status 0x%x\n", 567 + __func__, lib, flags, pEntry, status); 568 + 569 + return status; 570 + } 571 + 572 + /* 573 + * ======== dbll_load_sect ======== 574 + * Not supported for COFF. 575 + */ 576 + int dbll_load_sect(struct dbll_library_obj *zl_lib, char *sectName, 577 + struct dbll_attrs *attrs) 578 + { 579 + DBC_REQUIRE(zl_lib); 580 + 581 + return -ENOSYS; 582 + } 583 + 584 + /* 585 + * ======== dbll_open ======== 586 + */ 587 + int dbll_open(struct dbll_tar_obj *target, char *file, dbll_flags flags, 588 + struct dbll_library_obj **pLib) 589 + { 590 + struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target; 591 + struct dbll_library_obj *zl_lib = NULL; 592 + s32 err; 593 + int status = 0; 594 + 595 + DBC_REQUIRE(refs > 0); 596 + DBC_REQUIRE(zl_target); 597 + DBC_REQUIRE(zl_target->attrs.fopen != NULL); 598 + DBC_REQUIRE(file != NULL); 599 + DBC_REQUIRE(pLib != NULL); 600 + 601 + zl_lib = zl_target->head; 602 + while (zl_lib != NULL) { 603 + if (strcmp(zl_lib->file_name, file) == 0) { 604 + /* Library is already opened */ 605 + zl_lib->open_ref++; 606 + break; 607 + } 608 + zl_lib = zl_lib->next; 609 + } 610 + if (zl_lib == NULL) { 611 + /* Allocate DBL library object */ 612 + zl_lib = kzalloc(sizeof(struct dbll_library_obj), GFP_KERNEL); 613 + if (zl_lib == NULL) { 614 + status = -ENOMEM; 615 + } else { 616 + zl_lib->ul_pos = 0; 617 + /* Increment ref count to allow close on failure 618 + * later on */ 619 + zl_lib->open_ref++; 620 + zl_lib->target_obj = zl_target; 621 + /* Keep a copy of the file name */ 622 + zl_lib->file_name = kzalloc(strlen(file) + 1, 623 + GFP_KERNEL); 624 + if (zl_lib->file_name == NULL) { 625 + status = -ENOMEM; 626 + } else { 627 + strncpy(zl_lib->file_name, file, 628 + strlen(file) + 1); 629 + } 630 + zl_lib->sym_tab = NULL; 631 + } 632 + } 633 + /* 634 + * Set up objects needed by the dynamic loader 635 + */ 636 + if (DSP_FAILED(status)) 637 + goto func_cont; 638 + 639 + /* Stream */ 640 + zl_lib->stream.dl_stream.read_buffer = dbll_read_buffer; 641 + zl_lib->stream.dl_stream.set_file_posn = dbll_set_file_posn; 642 + zl_lib->stream.lib = zl_lib; 643 + /* Symbol */ 644 + zl_lib->symbol.dl_symbol.add_to_symbol_table = dbll_add_to_symbol_table; 645 + zl_lib->symbol.dl_symbol.find_matching_symbol = dbll_find_symbol; 646 + zl_lib->symbol.dl_symbol.purge_symbol_table = dbll_purge_symbol_table; 647 + zl_lib->symbol.dl_symbol.dload_allocate = allocate; 648 + zl_lib->symbol.dl_symbol.dload_deallocate = deallocate; 649 + zl_lib->symbol.dl_symbol.error_report = dbll_err_report; 650 + zl_lib->symbol.lib = zl_lib; 651 + /* Allocate */ 652 + zl_lib->allocate.dl_alloc.dload_allocate = dbll_rmm_alloc; 653 + zl_lib->allocate.dl_alloc.dload_deallocate = rmm_dealloc; 654 + zl_lib->allocate.lib = zl_lib; 655 + /* Init */ 656 + zl_lib->init.dl_init.connect = connect; 657 + zl_lib->init.dl_init.readmem = read_mem; 658 + zl_lib->init.dl_init.writemem = write_mem; 659 + zl_lib->init.dl_init.fillmem = fill_mem; 660 + zl_lib->init.dl_init.execute = execute; 661 + zl_lib->init.dl_init.release = release; 662 + zl_lib->init.lib = zl_lib; 663 + if (DSP_SUCCEEDED(status) && zl_lib->fp == NULL) 664 + status = dof_open(zl_lib); 665 + 666 + zl_lib->ul_pos = (*(zl_lib->target_obj->attrs.ftell)) (zl_lib->fp); 667 + (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, (long)0, SEEK_SET); 668 + /* Create a hash table for symbols if flag is set */ 669 + if (zl_lib->sym_tab != NULL || !(flags & DBLL_SYMB)) 670 + goto func_cont; 671 + 672 + zl_lib->sym_tab = 673 + gh_create(MAXBUCKETS, sizeof(struct dbll_symbol), name_hash, 674 + name_match, sym_delete); 675 + if (zl_lib->sym_tab == NULL) { 676 + status = -ENOMEM; 677 + } else { 678 + /* Do a fake load to get symbols - set write func to no_op */ 679 + zl_lib->init.dl_init.writemem = no_op; 680 + err = dynamic_open_module(&zl_lib->stream.dl_stream, 681 + &zl_lib->symbol.dl_symbol, 682 + &zl_lib->allocate.dl_alloc, 683 + &zl_lib->init.dl_init, 0, 684 + &zl_lib->dload_mod_obj); 685 + if (err != 0) { 686 + status = -EILSEQ; 687 + } else { 688 + /* Now that we have the symbol table, we can unload */ 689 + err = dynamic_unload_module(zl_lib->dload_mod_obj, 690 + &zl_lib->symbol.dl_symbol, 691 + &zl_lib->allocate.dl_alloc, 692 + &zl_lib->init.dl_init); 693 + if (err != 0) 694 + status = -EILSEQ; 695 + 696 + zl_lib->dload_mod_obj = NULL; 697 + } 698 + } 699 + func_cont: 700 + if (DSP_SUCCEEDED(status)) { 701 + if (zl_lib->open_ref == 1) { 702 + /* First time opened - insert in list */ 703 + if (zl_target->head) 704 + (zl_target->head)->prev = zl_lib; 705 + 706 + zl_lib->prev = NULL; 707 + zl_lib->next = zl_target->head; 708 + zl_target->head = zl_lib; 709 + } 710 + *pLib = (struct dbll_library_obj *)zl_lib; 711 + } else { 712 + *pLib = NULL; 713 + if (zl_lib != NULL) 714 + dbll_close((struct dbll_library_obj *)zl_lib); 715 + 716 + } 717 + DBC_ENSURE((DSP_SUCCEEDED(status) && (zl_lib->open_ref > 0) && *pLib) 718 + || (DSP_FAILED(status) && *pLib == NULL)); 719 + 720 + dev_dbg(bridge, "%s: target: %p file: %s pLib: %p, status 0x%x\n", 721 + __func__, target, file, pLib, status); 722 + 723 + return status; 724 + } 725 + 726 + /* 727 + * ======== dbll_read_sect ======== 728 + * Get the content of a COFF section. 729 + */ 730 + int dbll_read_sect(struct dbll_library_obj *lib, char *name, 731 + char *pContent, u32 size) 732 + { 733 + struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib; 734 + bool opened_doff = false; 735 + u32 byte_size; /* size of bytes */ 736 + u32 ul_sect_size; /* size of section */ 737 + const struct ldr_section_info *sect = NULL; 738 + int status = 0; 739 + 740 + DBC_REQUIRE(refs > 0); 741 + DBC_REQUIRE(zl_lib); 742 + DBC_REQUIRE(name != NULL); 743 + DBC_REQUIRE(pContent != NULL); 744 + DBC_REQUIRE(size != 0); 745 + 746 + /* If DOFF file is not open, we open it. */ 747 + if (zl_lib != NULL) { 748 + if (zl_lib->fp == NULL) { 749 + status = dof_open(zl_lib); 750 + if (DSP_SUCCEEDED(status)) 751 + opened_doff = true; 752 + 753 + } else { 754 + (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, 755 + zl_lib->ul_pos, 756 + SEEK_SET); 757 + } 758 + } else { 759 + status = -EFAULT; 760 + } 761 + if (DSP_FAILED(status)) 762 + goto func_cont; 763 + 764 + byte_size = 1; 765 + if (!dload_get_section_info(zl_lib->desc, name, &sect)) { 766 + status = -ENXIO; 767 + goto func_cont; 768 + } 769 + /* 770 + * Ensure the supplied buffer size is sufficient to store 771 + * the section content to be read. 772 + */ 773 + ul_sect_size = sect->size * byte_size; 774 + /* Make sure size is even for good swap */ 775 + if (ul_sect_size % 2) 776 + ul_sect_size++; 777 + 778 + /* Align size */ 779 + ul_sect_size = DOFF_ALIGN(ul_sect_size); 780 + if (ul_sect_size > size) { 781 + status = -EPERM; 782 + } else { 783 + if (!dload_get_section(zl_lib->desc, sect, pContent)) 784 + status = -EBADF; 785 + 786 + } 787 + func_cont: 788 + if (opened_doff) { 789 + dof_close(zl_lib); 790 + opened_doff = false; 791 + } 792 + 793 + dev_dbg(bridge, "%s: lib: %p name: %s pContent: %p size: 0x%x, " 794 + "status 0x%x\n", __func__, lib, name, pContent, size, status); 795 + return status; 796 + } 797 + 798 + /* 799 + * ======== dbll_set_attrs ======== 800 + * Set the attributes of the target. 801 + */ 802 + void dbll_set_attrs(struct dbll_tar_obj *target, struct dbll_attrs *pattrs) 803 + { 804 + struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target; 805 + DBC_REQUIRE(refs > 0); 806 + DBC_REQUIRE(zl_target); 807 + DBC_REQUIRE(pattrs != NULL); 808 + 809 + if ((pattrs != NULL) && (zl_target != NULL)) 810 + zl_target->attrs = *pattrs; 811 + 812 + } 813 + 814 + /* 815 + * ======== dbll_unload ======== 816 + */ 817 + void dbll_unload(struct dbll_library_obj *lib, struct dbll_attrs *attrs) 818 + { 819 + struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib; 820 + s32 err = 0; 821 + 822 + DBC_REQUIRE(refs > 0); 823 + DBC_REQUIRE(zl_lib); 824 + DBC_REQUIRE(zl_lib->load_ref > 0); 825 + dev_dbg(bridge, "%s: lib: %p\n", __func__, lib); 826 + zl_lib->load_ref--; 827 + /* Unload only if reference count is 0 */ 828 + if (zl_lib->load_ref != 0) 829 + goto func_end; 830 + 831 + zl_lib->target_obj->attrs = *attrs; 832 + if (zl_lib->dload_mod_obj) { 833 + err = dynamic_unload_module(zl_lib->dload_mod_obj, 834 + &zl_lib->symbol.dl_symbol, 835 + &zl_lib->allocate.dl_alloc, 836 + &zl_lib->init.dl_init); 837 + if (err != 0) 838 + dev_dbg(bridge, "%s: failed: 0x%x\n", __func__, err); 839 + } 840 + /* remove symbols from symbol table */ 841 + if (zl_lib->sym_tab != NULL) { 842 + gh_delete(zl_lib->sym_tab); 843 + zl_lib->sym_tab = NULL; 844 + } 845 + /* delete DOFF desc since it holds *lots* of host OS 846 + * resources */ 847 + dof_close(zl_lib); 848 + func_end: 849 + DBC_ENSURE(zl_lib->load_ref >= 0); 850 + } 851 + 852 + /* 853 + * ======== dbll_unload_sect ======== 854 + * Not supported for COFF. 855 + */ 856 + int dbll_unload_sect(struct dbll_library_obj *lib, char *sectName, 857 + struct dbll_attrs *attrs) 858 + { 859 + DBC_REQUIRE(refs > 0); 860 + DBC_REQUIRE(sectName != NULL); 861 + 862 + return -ENOSYS; 863 + } 864 + 865 + /* 866 + * ======== dof_close ======== 867 + */ 868 + static void dof_close(struct dbll_library_obj *zl_lib) 869 + { 870 + if (zl_lib->desc) { 871 + dload_module_close(zl_lib->desc); 872 + zl_lib->desc = NULL; 873 + } 874 + /* close file */ 875 + if (zl_lib->fp) { 876 + (zl_lib->target_obj->attrs.fclose) (zl_lib->fp); 877 + zl_lib->fp = NULL; 878 + } 879 + } 880 + 881 + /* 882 + * ======== dof_open ======== 883 + */ 884 + static int dof_open(struct dbll_library_obj *zl_lib) 885 + { 886 + void *open = *(zl_lib->target_obj->attrs.fopen); 887 + int status = 0; 888 + 889 + /* First open the file for the dynamic loader, then open COF */ 890 + zl_lib->fp = 891 + (void *)((dbll_f_open_fxn) (open)) (zl_lib->file_name, "rb"); 892 + 893 + /* Open DOFF module */ 894 + if (zl_lib->fp && zl_lib->desc == NULL) { 895 + (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, (long)0, 896 + SEEK_SET); 897 + zl_lib->desc = 898 + dload_module_open(&zl_lib->stream.dl_stream, 899 + &zl_lib->symbol.dl_symbol); 900 + if (zl_lib->desc == NULL) { 901 + (zl_lib->target_obj->attrs.fclose) (zl_lib->fp); 902 + zl_lib->fp = NULL; 903 + status = -EBADF; 904 + } 905 + } else { 906 + status = -EBADF; 907 + } 908 + 909 + return status; 910 + } 911 + 912 + /* 913 + * ======== name_hash ======== 914 + */ 915 + static u16 name_hash(void *key, u16 max_bucket) 916 + { 917 + u16 ret; 918 + u16 hash; 919 + char *name = (char *)key; 920 + 921 + DBC_REQUIRE(name != NULL); 922 + 923 + hash = 0; 924 + 925 + while (*name) { 926 + hash <<= 1; 927 + hash ^= *name++; 928 + } 929 + 930 + ret = hash % max_bucket; 931 + 932 + return ret; 933 + } 934 + 935 + /* 936 + * ======== name_match ======== 937 + */ 938 + static bool name_match(void *key, void *value) 939 + { 940 + DBC_REQUIRE(key != NULL); 941 + DBC_REQUIRE(value != NULL); 942 + 943 + if ((key != NULL) && (value != NULL)) { 944 + if (strcmp((char *)key, ((struct dbll_symbol *)value)->name) == 945 + 0) 946 + return true; 947 + } 948 + return false; 949 + } 950 + 951 + /* 952 + * ======== no_op ======== 953 + */ 954 + static int no_op(struct dynamic_loader_initialize *thisptr, void *bufr, 955 + ldr_addr locn, struct ldr_section_info *info, unsigned bytsize) 956 + { 957 + return 1; 958 + } 959 + 960 + /* 961 + * ======== sym_delete ======== 962 + */ 963 + static void sym_delete(void *value) 964 + { 965 + struct dbll_symbol *sp = (struct dbll_symbol *)value; 966 + 967 + kfree(sp->name); 968 + } 969 + 970 + /* 971 + * Dynamic Loader Functions 972 + */ 973 + 974 + /* dynamic_loader_stream */ 975 + /* 976 + * ======== dbll_read_buffer ======== 977 + */ 978 + static int dbll_read_buffer(struct dynamic_loader_stream *this, void *buffer, 979 + unsigned bufsize) 980 + { 981 + struct dbll_stream *pstream = (struct dbll_stream *)this; 982 + struct dbll_library_obj *lib; 983 + int bytes_read = 0; 984 + 985 + DBC_REQUIRE(this != NULL); 986 + lib = pstream->lib; 987 + DBC_REQUIRE(lib); 988 + 989 + if (lib != NULL) { 990 + bytes_read = 991 + (*(lib->target_obj->attrs.fread)) (buffer, 1, bufsize, 992 + lib->fp); 993 + } 994 + return bytes_read; 995 + } 996 + 997 + /* 998 + * ======== dbll_set_file_posn ======== 999 + */ 1000 + static int dbll_set_file_posn(struct dynamic_loader_stream *this, 1001 + unsigned int pos) 1002 + { 1003 + struct dbll_stream *pstream = (struct dbll_stream *)this; 1004 + struct dbll_library_obj *lib; 1005 + int status = 0; /* Success */ 1006 + 1007 + DBC_REQUIRE(this != NULL); 1008 + lib = pstream->lib; 1009 + DBC_REQUIRE(lib); 1010 + 1011 + if (lib != NULL) { 1012 + status = (*(lib->target_obj->attrs.fseek)) (lib->fp, (long)pos, 1013 + SEEK_SET); 1014 + } 1015 + 1016 + return status; 1017 + } 1018 + 1019 + /* dynamic_loader_sym */ 1020 + 1021 + /* 1022 + * ======== dbll_find_symbol ======== 1023 + */ 1024 + static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this, 1025 + const char *name) 1026 + { 1027 + struct dynload_symbol *ret_sym; 1028 + struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; 1029 + struct dbll_library_obj *lib; 1030 + struct dbll_sym_val *dbll_sym = NULL; 1031 + bool status = false; /* Symbol not found yet */ 1032 + 1033 + DBC_REQUIRE(this != NULL); 1034 + lib = ldr_sym->lib; 1035 + DBC_REQUIRE(lib); 1036 + 1037 + if (lib != NULL) { 1038 + if (lib->target_obj->attrs.sym_lookup) { 1039 + /* Check current lib + base lib + dep lib + 1040 + * persistent lib */ 1041 + status = (*(lib->target_obj->attrs.sym_lookup)) 1042 + (lib->target_obj->attrs.sym_handle, 1043 + lib->target_obj->attrs.sym_arg, 1044 + lib->target_obj->attrs.rmm_handle, name, 1045 + &dbll_sym); 1046 + } else { 1047 + /* Just check current lib for symbol */ 1048 + status = dbll_get_addr((struct dbll_library_obj *)lib, 1049 + (char *)name, &dbll_sym); 1050 + if (!status) { 1051 + status = 1052 + dbll_get_c_addr((struct dbll_library_obj *) 1053 + lib, (char *)name, 1054 + &dbll_sym); 1055 + } 1056 + } 1057 + } 1058 + 1059 + if (!status && gbl_search) 1060 + dev_dbg(bridge, "%s: Symbol not found: %s\n", __func__, name); 1061 + 1062 + DBC_ASSERT((status && (dbll_sym != NULL)) 1063 + || (!status && (dbll_sym == NULL))); 1064 + 1065 + ret_sym = (struct dynload_symbol *)dbll_sym; 1066 + return ret_sym; 1067 + } 1068 + 1069 + /* 1070 + * ======== find_in_symbol_table ======== 1071 + */ 1072 + static struct dynload_symbol *find_in_symbol_table(struct dynamic_loader_sym 1073 + *this, const char *name, 1074 + unsigned moduleid) 1075 + { 1076 + struct dynload_symbol *ret_sym; 1077 + struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; 1078 + struct dbll_library_obj *lib; 1079 + struct dbll_symbol *sym; 1080 + 1081 + DBC_REQUIRE(this != NULL); 1082 + lib = ldr_sym->lib; 1083 + DBC_REQUIRE(lib); 1084 + DBC_REQUIRE(lib->sym_tab != NULL); 1085 + 1086 + sym = (struct dbll_symbol *)gh_find(lib->sym_tab, (char *)name); 1087 + 1088 + ret_sym = (struct dynload_symbol *)&sym->value; 1089 + return ret_sym; 1090 + } 1091 + 1092 + /* 1093 + * ======== dbll_add_to_symbol_table ======== 1094 + */ 1095 + static struct dynload_symbol *dbll_add_to_symbol_table(struct dynamic_loader_sym 1096 + *this, const char *name, 1097 + unsigned moduleId) 1098 + { 1099 + struct dbll_symbol *sym_ptr = NULL; 1100 + struct dbll_symbol symbol; 1101 + struct dynload_symbol *dbll_sym = NULL; 1102 + struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; 1103 + struct dbll_library_obj *lib; 1104 + struct dynload_symbol *ret; 1105 + 1106 + DBC_REQUIRE(this != NULL); 1107 + DBC_REQUIRE(name); 1108 + lib = ldr_sym->lib; 1109 + DBC_REQUIRE(lib); 1110 + 1111 + /* Check to see if symbol is already defined in symbol table */ 1112 + if (!(lib->target_obj->attrs.base_image)) { 1113 + gbl_search = false; 1114 + dbll_sym = dbll_find_symbol(this, name); 1115 + gbl_search = true; 1116 + if (dbll_sym) { 1117 + redefined_symbol = true; 1118 + dev_dbg(bridge, "%s already defined in symbol table\n", 1119 + name); 1120 + return NULL; 1121 + } 1122 + } 1123 + /* Allocate string to copy symbol name */ 1124 + symbol.name = kzalloc(strlen((char *const)name) + 1, GFP_KERNEL); 1125 + if (symbol.name == NULL) 1126 + return NULL; 1127 + 1128 + if (symbol.name != NULL) { 1129 + /* Just copy name (value will be filled in by dynamic loader) */ 1130 + strncpy(symbol.name, (char *const)name, 1131 + strlen((char *const)name) + 1); 1132 + 1133 + /* Add symbol to symbol table */ 1134 + sym_ptr = 1135 + (struct dbll_symbol *)gh_insert(lib->sym_tab, (void *)name, 1136 + (void *)&symbol); 1137 + if (sym_ptr == NULL) 1138 + kfree(symbol.name); 1139 + 1140 + } 1141 + if (sym_ptr != NULL) 1142 + ret = (struct dynload_symbol *)&sym_ptr->value; 1143 + else 1144 + ret = NULL; 1145 + 1146 + return ret; 1147 + } 1148 + 1149 + /* 1150 + * ======== dbll_purge_symbol_table ======== 1151 + */ 1152 + static void dbll_purge_symbol_table(struct dynamic_loader_sym *this, 1153 + unsigned moduleId) 1154 + { 1155 + struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; 1156 + struct dbll_library_obj *lib; 1157 + 1158 + DBC_REQUIRE(this != NULL); 1159 + lib = ldr_sym->lib; 1160 + DBC_REQUIRE(lib); 1161 + 1162 + /* May not need to do anything */ 1163 + } 1164 + 1165 + /* 1166 + * ======== allocate ======== 1167 + */ 1168 + static void *allocate(struct dynamic_loader_sym *this, unsigned memsize) 1169 + { 1170 + struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; 1171 + struct dbll_library_obj *lib; 1172 + void *buf; 1173 + 1174 + DBC_REQUIRE(this != NULL); 1175 + lib = ldr_sym->lib; 1176 + DBC_REQUIRE(lib); 1177 + 1178 + buf = kzalloc(memsize, GFP_KERNEL); 1179 + 1180 + return buf; 1181 + } 1182 + 1183 + /* 1184 + * ======== deallocate ======== 1185 + */ 1186 + static void deallocate(struct dynamic_loader_sym *this, void *memPtr) 1187 + { 1188 + struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; 1189 + struct dbll_library_obj *lib; 1190 + 1191 + DBC_REQUIRE(this != NULL); 1192 + lib = ldr_sym->lib; 1193 + DBC_REQUIRE(lib); 1194 + 1195 + kfree(memPtr); 1196 + } 1197 + 1198 + /* 1199 + * ======== dbll_err_report ======== 1200 + */ 1201 + static void dbll_err_report(struct dynamic_loader_sym *this, const char *errstr, 1202 + va_list args) 1203 + { 1204 + struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; 1205 + struct dbll_library_obj *lib; 1206 + char temp_buf[MAXEXPR]; 1207 + 1208 + DBC_REQUIRE(this != NULL); 1209 + lib = ldr_sym->lib; 1210 + DBC_REQUIRE(lib); 1211 + vsnprintf((char *)temp_buf, MAXEXPR, (char *)errstr, args); 1212 + dev_dbg(bridge, "%s\n", temp_buf); 1213 + } 1214 + 1215 + /* dynamic_loader_allocate */ 1216 + 1217 + /* 1218 + * ======== dbll_rmm_alloc ======== 1219 + */ 1220 + static int dbll_rmm_alloc(struct dynamic_loader_allocate *this, 1221 + struct ldr_section_info *info, unsigned align) 1222 + { 1223 + struct dbll_alloc *dbll_alloc_obj = (struct dbll_alloc *)this; 1224 + struct dbll_library_obj *lib; 1225 + int status = 0; 1226 + u32 mem_sect_type; 1227 + struct rmm_addr rmm_addr_obj; 1228 + s32 ret = TRUE; 1229 + unsigned stype = DLOAD_SECTION_TYPE(info->type); 1230 + char *token = NULL; 1231 + char *sz_sec_last_token = NULL; 1232 + char *sz_last_token = NULL; 1233 + char *sz_sect_name = NULL; 1234 + char *psz_cur; 1235 + s32 token_len = 0; 1236 + s32 seg_id = -1; 1237 + s32 req = -1; 1238 + s32 count = 0; 1239 + u32 alloc_size = 0; 1240 + u32 run_addr_flag = 0; 1241 + 1242 + DBC_REQUIRE(this != NULL); 1243 + lib = dbll_alloc_obj->lib; 1244 + DBC_REQUIRE(lib); 1245 + 1246 + mem_sect_type = 1247 + (stype == DLOAD_TEXT) ? DBLL_CODE : (stype == 1248 + DLOAD_BSS) ? DBLL_BSS : 1249 + DBLL_DATA; 1250 + 1251 + /* Attempt to extract the segment ID and requirement information from 1252 + the name of the section */ 1253 + DBC_REQUIRE(info->name); 1254 + token_len = strlen((char *)(info->name)) + 1; 1255 + 1256 + sz_sect_name = kzalloc(token_len, GFP_KERNEL); 1257 + sz_last_token = kzalloc(token_len, GFP_KERNEL); 1258 + sz_sec_last_token = kzalloc(token_len, GFP_KERNEL); 1259 + 1260 + if (sz_sect_name == NULL || sz_sec_last_token == NULL || 1261 + sz_last_token == NULL) { 1262 + status = -ENOMEM; 1263 + goto func_cont; 1264 + } 1265 + strncpy(sz_sect_name, (char *)(info->name), token_len); 1266 + psz_cur = sz_sect_name; 1267 + while ((token = strsep(&psz_cur, ":")) && *token != '\0') { 1268 + strncpy(sz_sec_last_token, sz_last_token, 1269 + strlen(sz_last_token) + 1); 1270 + strncpy(sz_last_token, token, strlen(token) + 1); 1271 + token = strsep(&psz_cur, ":"); 1272 + count++; /* optimizes processing */ 1273 + } 1274 + /* If token is 0 or 1, and sz_sec_last_token is DYN_DARAM or DYN_SARAM, 1275 + or DYN_EXTERNAL, then mem granularity information is present 1276 + within the section name - only process if there are at least three 1277 + tokens within the section name (just a minor optimization) */ 1278 + if (count >= 3) 1279 + strict_strtol(sz_last_token, 10, (long *)&req); 1280 + 1281 + if ((req == 0) || (req == 1)) { 1282 + if (strcmp(sz_sec_last_token, "DYN_DARAM") == 0) { 1283 + seg_id = 0; 1284 + } else { 1285 + if (strcmp(sz_sec_last_token, "DYN_SARAM") == 0) { 1286 + seg_id = 1; 1287 + } else { 1288 + if (strcmp(sz_sec_last_token, 1289 + "DYN_EXTERNAL") == 0) 1290 + seg_id = 2; 1291 + } 1292 + } 1293 + } 1294 + func_cont: 1295 + kfree(sz_sect_name); 1296 + sz_sect_name = NULL; 1297 + kfree(sz_last_token); 1298 + sz_last_token = NULL; 1299 + kfree(sz_sec_last_token); 1300 + sz_sec_last_token = NULL; 1301 + 1302 + if (mem_sect_type == DBLL_CODE) 1303 + alloc_size = info->size + GEM_L1P_PREFETCH_SIZE; 1304 + else 1305 + alloc_size = info->size; 1306 + 1307 + if (info->load_addr != info->run_addr) 1308 + run_addr_flag = 1; 1309 + /* TODO - ideally, we can pass the alignment requirement also 1310 + * from here */ 1311 + if (lib != NULL) { 1312 + status = 1313 + (lib->target_obj->attrs.alloc) (lib->target_obj->attrs. 1314 + rmm_handle, mem_sect_type, 1315 + alloc_size, align, 1316 + (u32 *) &rmm_addr_obj, 1317 + seg_id, req, FALSE); 1318 + } 1319 + if (DSP_FAILED(status)) { 1320 + ret = false; 1321 + } else { 1322 + /* RMM gives word address. Need to convert to byte address */ 1323 + info->load_addr = rmm_addr_obj.addr * DSPWORDSIZE; 1324 + if (!run_addr_flag) 1325 + info->run_addr = info->load_addr; 1326 + info->context = (u32) rmm_addr_obj.segid; 1327 + dev_dbg(bridge, "%s: %s base = 0x%x len = 0x%x, " 1328 + "info->run_addr 0x%x, info->load_addr 0x%x\n", 1329 + __func__, info->name, info->load_addr / DSPWORDSIZE, 1330 + info->size / DSPWORDSIZE, info->run_addr, 1331 + info->load_addr); 1332 + } 1333 + return ret; 1334 + } 1335 + 1336 + /* 1337 + * ======== rmm_dealloc ======== 1338 + */ 1339 + static void rmm_dealloc(struct dynamic_loader_allocate *this, 1340 + struct ldr_section_info *info) 1341 + { 1342 + struct dbll_alloc *dbll_alloc_obj = (struct dbll_alloc *)this; 1343 + struct dbll_library_obj *lib; 1344 + u32 segid; 1345 + int status = 0; 1346 + unsigned stype = DLOAD_SECTION_TYPE(info->type); 1347 + u32 mem_sect_type; 1348 + u32 free_size = 0; 1349 + 1350 + mem_sect_type = 1351 + (stype == DLOAD_TEXT) ? DBLL_CODE : (stype == 1352 + DLOAD_BSS) ? DBLL_BSS : 1353 + DBLL_DATA; 1354 + DBC_REQUIRE(this != NULL); 1355 + lib = dbll_alloc_obj->lib; 1356 + DBC_REQUIRE(lib); 1357 + /* segid was set by alloc function */ 1358 + segid = (u32) info->context; 1359 + if (mem_sect_type == DBLL_CODE) 1360 + free_size = info->size + GEM_L1P_PREFETCH_SIZE; 1361 + else 1362 + free_size = info->size; 1363 + if (lib != NULL) { 1364 + status = 1365 + (lib->target_obj->attrs.free) (lib->target_obj->attrs. 1366 + sym_handle, segid, 1367 + info->load_addr / 1368 + DSPWORDSIZE, free_size, 1369 + false); 1370 + } 1371 + } 1372 + 1373 + /* dynamic_loader_initialize */ 1374 + /* 1375 + * ======== connect ======== 1376 + */ 1377 + static int connect(struct dynamic_loader_initialize *this) 1378 + { 1379 + return true; 1380 + } 1381 + 1382 + /* 1383 + * ======== read_mem ======== 1384 + * This function does not need to be implemented. 1385 + */ 1386 + static int read_mem(struct dynamic_loader_initialize *this, void *buf, 1387 + ldr_addr addr, struct ldr_section_info *info, 1388 + unsigned nbytes) 1389 + { 1390 + struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this; 1391 + struct dbll_library_obj *lib; 1392 + int bytes_read = 0; 1393 + 1394 + DBC_REQUIRE(this != NULL); 1395 + lib = init_obj->lib; 1396 + DBC_REQUIRE(lib); 1397 + /* Need bridge_brd_read function */ 1398 + return bytes_read; 1399 + } 1400 + 1401 + /* 1402 + * ======== write_mem ======== 1403 + */ 1404 + static int write_mem(struct dynamic_loader_initialize *this, void *buf, 1405 + ldr_addr addr, struct ldr_section_info *info, 1406 + unsigned bytes) 1407 + { 1408 + struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this; 1409 + struct dbll_library_obj *lib; 1410 + struct dbll_tar_obj *target_obj; 1411 + struct dbll_sect_info sect_info; 1412 + u32 mem_sect_type; 1413 + bool ret = true; 1414 + 1415 + DBC_REQUIRE(this != NULL); 1416 + lib = init_obj->lib; 1417 + if (!lib) 1418 + return false; 1419 + 1420 + target_obj = lib->target_obj; 1421 + 1422 + mem_sect_type = 1423 + (DLOAD_SECTION_TYPE(info->type) == 1424 + DLOAD_TEXT) ? DBLL_CODE : DBLL_DATA; 1425 + if (target_obj && target_obj->attrs.write) { 1426 + ret = 1427 + (*target_obj->attrs.write) (target_obj->attrs.input_params, 1428 + addr, buf, bytes, 1429 + mem_sect_type); 1430 + 1431 + if (target_obj->attrs.log_write) { 1432 + sect_info.name = info->name; 1433 + sect_info.sect_run_addr = info->run_addr; 1434 + sect_info.sect_load_addr = info->load_addr; 1435 + sect_info.size = info->size; 1436 + sect_info.type = mem_sect_type; 1437 + /* Pass the information about what we've written to 1438 + * another module */ 1439 + (*target_obj->attrs.log_write) (target_obj->attrs. 1440 + log_write_handle, 1441 + &sect_info, addr, 1442 + bytes); 1443 + } 1444 + } 1445 + return ret; 1446 + } 1447 + 1448 + /* 1449 + * ======== fill_mem ======== 1450 + * Fill bytes of memory at a given address with a given value by 1451 + * writing from a buffer containing the given value. Write in 1452 + * sets of MAXEXPR (128) bytes to avoid large stack buffer issues. 1453 + */ 1454 + static int fill_mem(struct dynamic_loader_initialize *this, ldr_addr addr, 1455 + struct ldr_section_info *info, unsigned bytes, unsigned val) 1456 + { 1457 + bool ret = true; 1458 + char *pbuf; 1459 + struct dbll_library_obj *lib; 1460 + struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this; 1461 + 1462 + DBC_REQUIRE(this != NULL); 1463 + lib = init_obj->lib; 1464 + pbuf = NULL; 1465 + /* Pass the NULL pointer to write_mem to get the start address of Shared 1466 + memory. This is a trick to just get the start address, there is no 1467 + writing taking place with this Writemem 1468 + */ 1469 + if ((lib->target_obj->attrs.write) != (dbll_write_fxn) no_op) 1470 + write_mem(this, &pbuf, addr, info, 0); 1471 + if (pbuf) 1472 + memset(pbuf, val, bytes); 1473 + 1474 + return ret; 1475 + } 1476 + 1477 + /* 1478 + * ======== execute ======== 1479 + */ 1480 + static int execute(struct dynamic_loader_initialize *this, ldr_addr start) 1481 + { 1482 + struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this; 1483 + struct dbll_library_obj *lib; 1484 + bool ret = true; 1485 + 1486 + DBC_REQUIRE(this != NULL); 1487 + lib = init_obj->lib; 1488 + DBC_REQUIRE(lib); 1489 + /* Save entry point */ 1490 + if (lib != NULL) 1491 + lib->entry = (u32) start; 1492 + 1493 + return ret; 1494 + } 1495 + 1496 + /* 1497 + * ======== release ======== 1498 + */ 1499 + static void release(struct dynamic_loader_initialize *this) 1500 + { 1501 + } 1502 + 1503 + /** 1504 + * find_symbol_context - Basic symbol context structure 1505 + * @address: Symbol Adress 1506 + * @offset_range: Offset range where the search for the DSP symbol 1507 + * started. 1508 + * @cur_best_offset: Best offset to start looking for the DSP symbol 1509 + * @sym_addr: Address of the DSP symbol 1510 + * @name: Symbol name 1511 + * 1512 + */ 1513 + struct find_symbol_context { 1514 + /* input */ 1515 + u32 address; 1516 + u32 offset_range; 1517 + /* state */ 1518 + u32 cur_best_offset; 1519 + /* output */ 1520 + u32 sym_addr; 1521 + char name[120]; 1522 + }; 1523 + 1524 + /** 1525 + * find_symbol_callback() - Validates symbol address and copies the symbol name 1526 + * to the user data. 1527 + * @elem: dsp library context 1528 + * @user_data: Find symbol context 1529 + * 1530 + */ 1531 + void find_symbol_callback(void *elem, void *user_data) 1532 + { 1533 + struct dbll_symbol *symbol = elem; 1534 + struct find_symbol_context *context = user_data; 1535 + u32 symbol_addr = symbol->value.value; 1536 + u32 offset = context->address - symbol_addr; 1537 + 1538 + /* 1539 + * Address given should be greater than symbol address, 1540 + * symbol address should be within specified range 1541 + * and the offset should be better than previous one 1542 + */ 1543 + if (context->address >= symbol_addr && symbol_addr < (u32)-1 && 1544 + offset < context->cur_best_offset) { 1545 + context->cur_best_offset = offset; 1546 + context->sym_addr = symbol_addr; 1547 + strncpy(context->name, symbol->name, sizeof(context->name)); 1548 + } 1549 + 1550 + return; 1551 + } 1552 + 1553 + /** 1554 + * dbll_find_dsp_symbol() - This function retrieves the dsp symbol from the dsp binary. 1555 + * @zl_lib: DSP binary obj library pointer 1556 + * @address: Given address to find the dsp symbol 1557 + * @offset_range: offset range to look for dsp symbol 1558 + * @sym_addr_output: Symbol Output address 1559 + * @name_output: String with the dsp symbol 1560 + * 1561 + * This function retrieves the dsp symbol from the dsp binary. 1562 + */ 1563 + bool dbll_find_dsp_symbol(struct dbll_library_obj *zl_lib, u32 address, 1564 + u32 offset_range, u32 *sym_addr_output, 1565 + char *name_output) 1566 + { 1567 + bool status = false; 1568 + struct find_symbol_context context; 1569 + 1570 + context.address = address; 1571 + context.offset_range = offset_range; 1572 + context.cur_best_offset = offset_range; 1573 + context.sym_addr = 0; 1574 + context.name[0] = '\0'; 1575 + 1576 + gh_iterate(zl_lib->sym_tab, find_symbol_callback, &context); 1577 + 1578 + if (context.name[0]) { 1579 + status = true; 1580 + strcpy(name_output, context.name); 1581 + *sym_addr_output = context.sym_addr; 1582 + } 1583 + 1584 + return status; 1585 + }
+1171
drivers/staging/tidspbridge/pmgr/dev.c
··· 1 + /* 2 + * dev.c 3 + * 4 + * DSP-BIOS Bridge driver support functions for TI OMAP processors. 5 + * 6 + * Implementation of Bridge Bridge driver device operations. 7 + * 8 + * Copyright (C) 2005-2006 Texas Instruments, Inc. 9 + * 10 + * This package is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License version 2 as 12 + * published by the Free Software Foundation. 13 + * 14 + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 15 + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 16 + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 17 + */ 18 + 19 + /* ----------------------------------- Host OS */ 20 + #include <dspbridge/host_os.h> 21 + 22 + /* ----------------------------------- DSP/BIOS Bridge */ 23 + #include <dspbridge/std.h> 24 + #include <dspbridge/dbdefs.h> 25 + 26 + /* ----------------------------------- Trace & Debug */ 27 + #include <dspbridge/dbc.h> 28 + 29 + /* ----------------------------------- OS Adaptation Layer */ 30 + #include <dspbridge/cfg.h> 31 + #include <dspbridge/ldr.h> 32 + #include <dspbridge/list.h> 33 + 34 + /* ----------------------------------- Platform Manager */ 35 + #include <dspbridge/cod.h> 36 + #include <dspbridge/drv.h> 37 + #include <dspbridge/proc.h> 38 + #include <dspbridge/dmm.h> 39 + 40 + /* ----------------------------------- Resource Manager */ 41 + #include <dspbridge/mgr.h> 42 + #include <dspbridge/node.h> 43 + 44 + /* ----------------------------------- Others */ 45 + #include <dspbridge/dspapi.h> /* DSP API version info. */ 46 + 47 + #include <dspbridge/chnl.h> 48 + #include <dspbridge/io.h> 49 + #include <dspbridge/msg.h> 50 + #include <dspbridge/cmm.h> 51 + 52 + /* ----------------------------------- This */ 53 + #include <dspbridge/dev.h> 54 + 55 + /* ----------------------------------- Defines, Data Structures, Typedefs */ 56 + 57 + #define MAKEVERSION(major, minor) (major * 10 + minor) 58 + #define BRD_API_VERSION MAKEVERSION(BRD_API_MAJOR_VERSION, \ 59 + BRD_API_MINOR_VERSION) 60 + 61 + /* The Bridge device object: */ 62 + struct dev_object { 63 + /* LST requires "link" to be first field! */ 64 + struct list_head link; /* Link to next dev_object. */ 65 + u8 dev_type; /* Device Type */ 66 + struct cfg_devnode *dev_node_obj; /* Platform specific dev id */ 67 + /* Bridge Context Handle */ 68 + struct bridge_dev_context *hbridge_context; 69 + /* Function interface to Bridge driver. */ 70 + struct bridge_drv_interface bridge_interface; 71 + struct brd_object *lock_owner; /* Client with exclusive access. */ 72 + struct cod_manager *cod_mgr; /* Code manager handle. */ 73 + struct chnl_mgr *hchnl_mgr; /* Channel manager. */ 74 + struct deh_mgr *hdeh_mgr; /* DEH manager. */ 75 + struct msg_mgr *hmsg_mgr; /* Message manager. */ 76 + struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */ 77 + struct cmm_object *hcmm_mgr; /* SM memory manager. */ 78 + struct dmm_object *dmm_mgr; /* Dynamic memory manager. */ 79 + struct ldr_module *module_obj; /* Bridge Module handle. */ 80 + u32 word_size; /* DSP word size: quick access. */ 81 + struct drv_object *hdrv_obj; /* Driver Object */ 82 + struct lst_list *proc_list; /* List of Proceeosr attached to 83 + * this device */ 84 + struct node_mgr *hnode_mgr; 85 + }; 86 + 87 + /* ----------------------------------- Globals */ 88 + static u32 refs; /* Module reference count */ 89 + 90 + /* ----------------------------------- Function Prototypes */ 91 + static int fxn_not_implemented(int arg, ...); 92 + static int init_cod_mgr(struct dev_object *dev_obj); 93 + static void store_interface_fxns(struct bridge_drv_interface *drv_fxns, 94 + OUT struct bridge_drv_interface *intf_fxns); 95 + /* 96 + * ======== dev_brd_write_fxn ======== 97 + * Purpose: 98 + * Exported function to be used as the COD write function. This function 99 + * is passed a handle to a DEV_hObject, then calls the 100 + * device's bridge_brd_write() function. 101 + */ 102 + u32 dev_brd_write_fxn(void *pArb, u32 ulDspAddr, void *pHostBuf, 103 + u32 ul_num_bytes, u32 nMemSpace) 104 + { 105 + struct dev_object *dev_obj = (struct dev_object *)pArb; 106 + u32 ul_written = 0; 107 + int status; 108 + 109 + DBC_REQUIRE(refs > 0); 110 + DBC_REQUIRE(pHostBuf != NULL); /* Required of BrdWrite(). */ 111 + if (dev_obj) { 112 + /* Require of BrdWrite() */ 113 + DBC_ASSERT(dev_obj->hbridge_context != NULL); 114 + status = (*dev_obj->bridge_interface.pfn_brd_write) ( 115 + dev_obj->hbridge_context, pHostBuf, 116 + ulDspAddr, ul_num_bytes, nMemSpace); 117 + /* Special case of getting the address only */ 118 + if (ul_num_bytes == 0) 119 + ul_num_bytes = 1; 120 + if (DSP_SUCCEEDED(status)) 121 + ul_written = ul_num_bytes; 122 + 123 + } 124 + return ul_written; 125 + } 126 + 127 + /* 128 + * ======== dev_create_device ======== 129 + * Purpose: 130 + * Called by the operating system to load the PM Bridge Driver for a 131 + * PM board (device). 132 + */ 133 + int dev_create_device(OUT struct dev_object **phDevObject, 134 + IN CONST char *driver_file_name, 135 + struct cfg_devnode *dev_node_obj) 136 + { 137 + struct cfg_hostres *host_res; 138 + struct ldr_module *module_obj = NULL; 139 + struct bridge_drv_interface *drv_fxns = NULL; 140 + struct dev_object *dev_obj = NULL; 141 + struct chnl_mgrattrs mgr_attrs; 142 + struct io_attrs io_mgr_attrs; 143 + u32 num_windows; 144 + struct drv_object *hdrv_obj = NULL; 145 + int status = 0; 146 + DBC_REQUIRE(refs > 0); 147 + DBC_REQUIRE(phDevObject != NULL); 148 + DBC_REQUIRE(driver_file_name != NULL); 149 + 150 + status = drv_request_bridge_res_dsp((void *)&host_res); 151 + 152 + if (DSP_FAILED(status)) { 153 + dev_dbg(bridge, "%s: Failed to reserve bridge resources\n", 154 + __func__); 155 + goto leave; 156 + } 157 + 158 + /* Get the Bridge driver interface functions */ 159 + bridge_drv_entry(&drv_fxns, driver_file_name); 160 + if (DSP_FAILED(cfg_get_object((u32 *) &hdrv_obj, REG_DRV_OBJECT))) { 161 + /* don't propogate CFG errors from this PROC function */ 162 + status = -EPERM; 163 + } 164 + /* Create the device object, and pass a handle to the Bridge driver for 165 + * storage. */ 166 + if (DSP_SUCCEEDED(status)) { 167 + DBC_ASSERT(drv_fxns); 168 + dev_obj = kzalloc(sizeof(struct dev_object), GFP_KERNEL); 169 + if (dev_obj) { 170 + /* Fill out the rest of the Dev Object structure: */ 171 + dev_obj->dev_node_obj = dev_node_obj; 172 + dev_obj->module_obj = module_obj; 173 + dev_obj->cod_mgr = NULL; 174 + dev_obj->hchnl_mgr = NULL; 175 + dev_obj->hdeh_mgr = NULL; 176 + dev_obj->lock_owner = NULL; 177 + dev_obj->word_size = DSPWORDSIZE; 178 + dev_obj->hdrv_obj = hdrv_obj; 179 + dev_obj->dev_type = DSP_UNIT; 180 + /* Store this Bridge's interface functions, based on its 181 + * version. */ 182 + store_interface_fxns(drv_fxns, 183 + &dev_obj->bridge_interface); 184 + 185 + /* Call fxn_dev_create() to get the Bridge's device 186 + * context handle. */ 187 + status = (dev_obj->bridge_interface.pfn_dev_create) 188 + (&dev_obj->hbridge_context, dev_obj, 189 + host_res); 190 + /* Assert bridge_dev_create()'s ensure clause: */ 191 + DBC_ASSERT(DSP_FAILED(status) 192 + || (dev_obj->hbridge_context != NULL)); 193 + } else { 194 + status = -ENOMEM; 195 + } 196 + } 197 + /* Attempt to create the COD manager for this device: */ 198 + if (DSP_SUCCEEDED(status)) 199 + status = init_cod_mgr(dev_obj); 200 + 201 + /* Attempt to create the channel manager for this device: */ 202 + if (DSP_SUCCEEDED(status)) { 203 + mgr_attrs.max_channels = CHNL_MAXCHANNELS; 204 + io_mgr_attrs.birq = host_res->birq_registers; 205 + io_mgr_attrs.irq_shared = 206 + (host_res->birq_attrib & CFG_IRQSHARED); 207 + io_mgr_attrs.word_size = DSPWORDSIZE; 208 + mgr_attrs.word_size = DSPWORDSIZE; 209 + num_windows = host_res->num_mem_windows; 210 + if (num_windows) { 211 + /* Assume last memory window is for CHNL */ 212 + io_mgr_attrs.shm_base = host_res->dw_mem_base[1] + 213 + host_res->dw_offset_for_monitor; 214 + io_mgr_attrs.usm_length = 215 + host_res->dw_mem_length[1] - 216 + host_res->dw_offset_for_monitor; 217 + } else { 218 + io_mgr_attrs.shm_base = 0; 219 + io_mgr_attrs.usm_length = 0; 220 + pr_err("%s: No memory reserved for shared structures\n", 221 + __func__); 222 + } 223 + status = chnl_create(&dev_obj->hchnl_mgr, dev_obj, &mgr_attrs); 224 + if (status == -ENOSYS) { 225 + /* It's OK for a device not to have a channel 226 + * manager: */ 227 + status = 0; 228 + } 229 + /* Create CMM mgr even if Msg Mgr not impl. */ 230 + status = cmm_create(&dev_obj->hcmm_mgr, 231 + (struct dev_object *)dev_obj, NULL); 232 + /* Only create IO manager if we have a channel manager */ 233 + if (DSP_SUCCEEDED(status) && dev_obj->hchnl_mgr) { 234 + status = io_create(&dev_obj->hio_mgr, dev_obj, 235 + &io_mgr_attrs); 236 + } 237 + /* Only create DEH manager if we have an IO manager */ 238 + if (DSP_SUCCEEDED(status)) { 239 + /* Instantiate the DEH module */ 240 + status = (*dev_obj->bridge_interface.pfn_deh_create) 241 + (&dev_obj->hdeh_mgr, dev_obj); 242 + } 243 + /* Create DMM mgr . */ 244 + status = dmm_create(&dev_obj->dmm_mgr, 245 + (struct dev_object *)dev_obj, NULL); 246 + } 247 + /* Add the new DEV_Object to the global list: */ 248 + if (DSP_SUCCEEDED(status)) { 249 + lst_init_elem(&dev_obj->link); 250 + status = drv_insert_dev_object(hdrv_obj, dev_obj); 251 + } 252 + /* Create the Processor List */ 253 + if (DSP_SUCCEEDED(status)) { 254 + dev_obj->proc_list = kzalloc(sizeof(struct lst_list), 255 + GFP_KERNEL); 256 + if (!(dev_obj->proc_list)) 257 + status = -EPERM; 258 + else 259 + INIT_LIST_HEAD(&dev_obj->proc_list->head); 260 + } 261 + leave: 262 + /* If all went well, return a handle to the dev object; 263 + * else, cleanup and return NULL in the OUT parameter. */ 264 + if (DSP_SUCCEEDED(status)) { 265 + *phDevObject = dev_obj; 266 + } else { 267 + if (dev_obj) { 268 + kfree(dev_obj->proc_list); 269 + if (dev_obj->cod_mgr) 270 + cod_delete(dev_obj->cod_mgr); 271 + if (dev_obj->dmm_mgr) 272 + dmm_destroy(dev_obj->dmm_mgr); 273 + kfree(dev_obj); 274 + } 275 + 276 + *phDevObject = NULL; 277 + } 278 + 279 + DBC_ENSURE((DSP_SUCCEEDED(status) && *phDevObject) || 280 + (DSP_FAILED(status) && !*phDevObject)); 281 + return status; 282 + } 283 + 284 + /* 285 + * ======== dev_create2 ======== 286 + * Purpose: 287 + * After successful loading of the image from api_init_complete2 288 + * (PROC Auto_Start) or proc_load this fxn is called. This creates 289 + * the Node Manager and updates the DEV Object. 290 + */ 291 + int dev_create2(struct dev_object *hdev_obj) 292 + { 293 + int status = 0; 294 + struct dev_object *dev_obj = hdev_obj; 295 + 296 + DBC_REQUIRE(refs > 0); 297 + DBC_REQUIRE(hdev_obj); 298 + 299 + /* There can be only one Node Manager per DEV object */ 300 + DBC_ASSERT(!dev_obj->hnode_mgr); 301 + status = node_create_mgr(&dev_obj->hnode_mgr, hdev_obj); 302 + if (DSP_FAILED(status)) 303 + dev_obj->hnode_mgr = NULL; 304 + 305 + DBC_ENSURE((DSP_SUCCEEDED(status) && dev_obj->hnode_mgr != NULL) 306 + || (DSP_FAILED(status) && dev_obj->hnode_mgr == NULL)); 307 + return status; 308 + } 309 + 310 + /* 311 + * ======== dev_destroy2 ======== 312 + * Purpose: 313 + * Destroys the Node manager for this device. 314 + */ 315 + int dev_destroy2(struct dev_object *hdev_obj) 316 + { 317 + int status = 0; 318 + struct dev_object *dev_obj = hdev_obj; 319 + 320 + DBC_REQUIRE(refs > 0); 321 + DBC_REQUIRE(hdev_obj); 322 + 323 + if (dev_obj->hnode_mgr) { 324 + if (DSP_FAILED(node_delete_mgr(dev_obj->hnode_mgr))) 325 + status = -EPERM; 326 + else 327 + dev_obj->hnode_mgr = NULL; 328 + 329 + } 330 + 331 + DBC_ENSURE((DSP_SUCCEEDED(status) && dev_obj->hnode_mgr == NULL) || 332 + DSP_FAILED(status)); 333 + return status; 334 + } 335 + 336 + /* 337 + * ======== dev_destroy_device ======== 338 + * Purpose: 339 + * Destroys the channel manager for this device, if any, calls 340 + * bridge_dev_destroy(), and then attempts to unload the Bridge module. 341 + */ 342 + int dev_destroy_device(struct dev_object *hdev_obj) 343 + { 344 + int status = 0; 345 + struct dev_object *dev_obj = hdev_obj; 346 + 347 + DBC_REQUIRE(refs > 0); 348 + 349 + if (hdev_obj) { 350 + if (dev_obj->cod_mgr) { 351 + cod_delete(dev_obj->cod_mgr); 352 + dev_obj->cod_mgr = NULL; 353 + } 354 + 355 + if (dev_obj->hnode_mgr) { 356 + node_delete_mgr(dev_obj->hnode_mgr); 357 + dev_obj->hnode_mgr = NULL; 358 + } 359 + 360 + /* Free the io, channel, and message managers for this board: */ 361 + if (dev_obj->hio_mgr) { 362 + io_destroy(dev_obj->hio_mgr); 363 + dev_obj->hio_mgr = NULL; 364 + } 365 + if (dev_obj->hchnl_mgr) { 366 + chnl_destroy(dev_obj->hchnl_mgr); 367 + dev_obj->hchnl_mgr = NULL; 368 + } 369 + if (dev_obj->hmsg_mgr) { 370 + msg_delete(dev_obj->hmsg_mgr); 371 + dev_obj->hmsg_mgr = NULL; 372 + } 373 + 374 + if (dev_obj->hdeh_mgr) { 375 + /* Uninitialize DEH module. */ 376 + (*dev_obj->bridge_interface.pfn_deh_destroy) 377 + (dev_obj->hdeh_mgr); 378 + dev_obj->hdeh_mgr = NULL; 379 + } 380 + if (dev_obj->hcmm_mgr) { 381 + cmm_destroy(dev_obj->hcmm_mgr, true); 382 + dev_obj->hcmm_mgr = NULL; 383 + } 384 + 385 + if (dev_obj->dmm_mgr) { 386 + dmm_destroy(dev_obj->dmm_mgr); 387 + dev_obj->dmm_mgr = NULL; 388 + } 389 + 390 + /* Call the driver's bridge_dev_destroy() function: */ 391 + /* Require of DevDestroy */ 392 + if (dev_obj->hbridge_context) { 393 + status = (*dev_obj->bridge_interface.pfn_dev_destroy) 394 + (dev_obj->hbridge_context); 395 + dev_obj->hbridge_context = NULL; 396 + } else 397 + status = -EPERM; 398 + if (DSP_SUCCEEDED(status)) { 399 + kfree(dev_obj->proc_list); 400 + dev_obj->proc_list = NULL; 401 + 402 + /* Remove this DEV_Object from the global list: */ 403 + drv_remove_dev_object(dev_obj->hdrv_obj, dev_obj); 404 + /* Free The library * LDR_FreeModule 405 + * (dev_obj->module_obj); */ 406 + /* Free this dev object: */ 407 + kfree(dev_obj); 408 + dev_obj = NULL; 409 + } 410 + } else { 411 + status = -EFAULT; 412 + } 413 + 414 + return status; 415 + } 416 + 417 + /* 418 + * ======== dev_get_chnl_mgr ======== 419 + * Purpose: 420 + * Retrieve the handle to the channel manager handle created for this 421 + * device. 422 + */ 423 + int dev_get_chnl_mgr(struct dev_object *hdev_obj, 424 + OUT struct chnl_mgr **phMgr) 425 + { 426 + int status = 0; 427 + struct dev_object *dev_obj = hdev_obj; 428 + 429 + DBC_REQUIRE(refs > 0); 430 + DBC_REQUIRE(phMgr != NULL); 431 + 432 + if (hdev_obj) { 433 + *phMgr = dev_obj->hchnl_mgr; 434 + } else { 435 + *phMgr = NULL; 436 + status = -EFAULT; 437 + } 438 + 439 + DBC_ENSURE(DSP_SUCCEEDED(status) || ((phMgr != NULL) && 440 + (*phMgr == NULL))); 441 + return status; 442 + } 443 + 444 + /* 445 + * ======== dev_get_cmm_mgr ======== 446 + * Purpose: 447 + * Retrieve the handle to the shared memory manager created for this 448 + * device. 449 + */ 450 + int dev_get_cmm_mgr(struct dev_object *hdev_obj, 451 + OUT struct cmm_object **phMgr) 452 + { 453 + int status = 0; 454 + struct dev_object *dev_obj = hdev_obj; 455 + 456 + DBC_REQUIRE(refs > 0); 457 + DBC_REQUIRE(phMgr != NULL); 458 + 459 + if (hdev_obj) { 460 + *phMgr = dev_obj->hcmm_mgr; 461 + } else { 462 + *phMgr = NULL; 463 + status = -EFAULT; 464 + } 465 + 466 + DBC_ENSURE(DSP_SUCCEEDED(status) || ((phMgr != NULL) && 467 + (*phMgr == NULL))); 468 + return status; 469 + } 470 + 471 + /* 472 + * ======== dev_get_dmm_mgr ======== 473 + * Purpose: 474 + * Retrieve the handle to the dynamic memory manager created for this 475 + * device. 476 + */ 477 + int dev_get_dmm_mgr(struct dev_object *hdev_obj, 478 + OUT struct dmm_object **phMgr) 479 + { 480 + int status = 0; 481 + struct dev_object *dev_obj = hdev_obj; 482 + 483 + DBC_REQUIRE(refs > 0); 484 + DBC_REQUIRE(phMgr != NULL); 485 + 486 + if (hdev_obj) { 487 + *phMgr = dev_obj->dmm_mgr; 488 + } else { 489 + *phMgr = NULL; 490 + status = -EFAULT; 491 + } 492 + 493 + DBC_ENSURE(DSP_SUCCEEDED(status) || ((phMgr != NULL) && 494 + (*phMgr == NULL))); 495 + return status; 496 + } 497 + 498 + /* 499 + * ======== dev_get_cod_mgr ======== 500 + * Purpose: 501 + * Retrieve the COD manager create for this device. 502 + */ 503 + int dev_get_cod_mgr(struct dev_object *hdev_obj, 504 + OUT struct cod_manager **phCodMgr) 505 + { 506 + int status = 0; 507 + struct dev_object *dev_obj = hdev_obj; 508 + 509 + DBC_REQUIRE(refs > 0); 510 + DBC_REQUIRE(phCodMgr != NULL); 511 + 512 + if (hdev_obj) { 513 + *phCodMgr = dev_obj->cod_mgr; 514 + } else { 515 + *phCodMgr = NULL; 516 + status = -EFAULT; 517 + } 518 + 519 + DBC_ENSURE(DSP_SUCCEEDED(status) || ((phCodMgr != NULL) && 520 + (*phCodMgr == NULL))); 521 + return status; 522 + } 523 + 524 + /* 525 + * ========= dev_get_deh_mgr ======== 526 + */ 527 + int dev_get_deh_mgr(struct dev_object *hdev_obj, 528 + OUT struct deh_mgr **phDehMgr) 529 + { 530 + int status = 0; 531 + 532 + DBC_REQUIRE(refs > 0); 533 + DBC_REQUIRE(phDehMgr != NULL); 534 + DBC_REQUIRE(hdev_obj); 535 + if (hdev_obj) { 536 + *phDehMgr = hdev_obj->hdeh_mgr; 537 + } else { 538 + *phDehMgr = NULL; 539 + status = -EFAULT; 540 + } 541 + return status; 542 + } 543 + 544 + /* 545 + * ======== dev_get_dev_node ======== 546 + * Purpose: 547 + * Retrieve the platform specific device ID for this device. 548 + */ 549 + int dev_get_dev_node(struct dev_object *hdev_obj, 550 + OUT struct cfg_devnode **phDevNode) 551 + { 552 + int status = 0; 553 + struct dev_object *dev_obj = hdev_obj; 554 + 555 + DBC_REQUIRE(refs > 0); 556 + DBC_REQUIRE(phDevNode != NULL); 557 + 558 + if (hdev_obj) { 559 + *phDevNode = dev_obj->dev_node_obj; 560 + } else { 561 + *phDevNode = NULL; 562 + status = -EFAULT; 563 + } 564 + 565 + DBC_ENSURE(DSP_SUCCEEDED(status) || ((phDevNode != NULL) && 566 + (*phDevNode == NULL))); 567 + return status; 568 + } 569 + 570 + /* 571 + * ======== dev_get_first ======== 572 + * Purpose: 573 + * Retrieve the first Device Object handle from an internal linked list 574 + * DEV_OBJECTs maintained by DEV. 575 + */ 576 + struct dev_object *dev_get_first(void) 577 + { 578 + struct dev_object *dev_obj = NULL; 579 + 580 + dev_obj = (struct dev_object *)drv_get_first_dev_object(); 581 + 582 + return dev_obj; 583 + } 584 + 585 + /* 586 + * ======== dev_get_intf_fxns ======== 587 + * Purpose: 588 + * Retrieve the Bridge interface function structure for the loaded driver. 589 + * ppIntfFxns != NULL. 590 + */ 591 + int dev_get_intf_fxns(struct dev_object *hdev_obj, 592 + OUT struct bridge_drv_interface **ppIntfFxns) 593 + { 594 + int status = 0; 595 + struct dev_object *dev_obj = hdev_obj; 596 + 597 + DBC_REQUIRE(refs > 0); 598 + DBC_REQUIRE(ppIntfFxns != NULL); 599 + 600 + if (hdev_obj) { 601 + *ppIntfFxns = &dev_obj->bridge_interface; 602 + } else { 603 + *ppIntfFxns = NULL; 604 + status = -EFAULT; 605 + } 606 + 607 + DBC_ENSURE(DSP_SUCCEEDED(status) || ((ppIntfFxns != NULL) && 608 + (*ppIntfFxns == NULL))); 609 + return status; 610 + } 611 + 612 + /* 613 + * ========= dev_get_io_mgr ======== 614 + */ 615 + int dev_get_io_mgr(struct dev_object *hdev_obj, 616 + OUT struct io_mgr **phIOMgr) 617 + { 618 + int status = 0; 619 + 620 + DBC_REQUIRE(refs > 0); 621 + DBC_REQUIRE(phIOMgr != NULL); 622 + DBC_REQUIRE(hdev_obj); 623 + 624 + if (hdev_obj) { 625 + *phIOMgr = hdev_obj->hio_mgr; 626 + } else { 627 + *phIOMgr = NULL; 628 + status = -EFAULT; 629 + } 630 + 631 + return status; 632 + } 633 + 634 + /* 635 + * ======== dev_get_next ======== 636 + * Purpose: 637 + * Retrieve the next Device Object handle from an internal linked list 638 + * of DEV_OBJECTs maintained by DEV, after having previously called 639 + * dev_get_first() and zero or more dev_get_next 640 + */ 641 + struct dev_object *dev_get_next(struct dev_object *hdev_obj) 642 + { 643 + struct dev_object *next_dev_object = NULL; 644 + 645 + if (hdev_obj) { 646 + next_dev_object = (struct dev_object *) 647 + drv_get_next_dev_object((u32) hdev_obj); 648 + } 649 + 650 + return next_dev_object; 651 + } 652 + 653 + /* 654 + * ========= dev_get_msg_mgr ======== 655 + */ 656 + void dev_get_msg_mgr(struct dev_object *hdev_obj, OUT struct msg_mgr **phMsgMgr) 657 + { 658 + DBC_REQUIRE(refs > 0); 659 + DBC_REQUIRE(phMsgMgr != NULL); 660 + DBC_REQUIRE(hdev_obj); 661 + 662 + *phMsgMgr = hdev_obj->hmsg_mgr; 663 + } 664 + 665 + /* 666 + * ======== dev_get_node_manager ======== 667 + * Purpose: 668 + * Retrieve the Node Manager Handle 669 + */ 670 + int dev_get_node_manager(struct dev_object *hdev_obj, 671 + OUT struct node_mgr **phNodeMgr) 672 + { 673 + int status = 0; 674 + struct dev_object *dev_obj = hdev_obj; 675 + 676 + DBC_REQUIRE(refs > 0); 677 + DBC_REQUIRE(phNodeMgr != NULL); 678 + 679 + if (hdev_obj) { 680 + *phNodeMgr = dev_obj->hnode_mgr; 681 + } else { 682 + *phNodeMgr = NULL; 683 + status = -EFAULT; 684 + } 685 + 686 + DBC_ENSURE(DSP_SUCCEEDED(status) || ((phNodeMgr != NULL) && 687 + (*phNodeMgr == NULL))); 688 + return status; 689 + } 690 + 691 + /* 692 + * ======== dev_get_symbol ======== 693 + */ 694 + int dev_get_symbol(struct dev_object *hdev_obj, 695 + IN CONST char *pstrSym, OUT u32 * pul_value) 696 + { 697 + int status = 0; 698 + struct cod_manager *cod_mgr; 699 + 700 + DBC_REQUIRE(refs > 0); 701 + DBC_REQUIRE(pstrSym != NULL && pul_value != NULL); 702 + 703 + if (hdev_obj) { 704 + status = dev_get_cod_mgr(hdev_obj, &cod_mgr); 705 + if (cod_mgr) 706 + status = cod_get_sym_value(cod_mgr, (char *)pstrSym, 707 + pul_value); 708 + else 709 + status = -EFAULT; 710 + } 711 + 712 + return status; 713 + } 714 + 715 + /* 716 + * ======== dev_get_bridge_context ======== 717 + * Purpose: 718 + * Retrieve the Bridge Context handle, as returned by the 719 + * bridge_dev_create fxn. 720 + */ 721 + int dev_get_bridge_context(struct dev_object *hdev_obj, 722 + OUT struct bridge_dev_context **phbridge_context) 723 + { 724 + int status = 0; 725 + struct dev_object *dev_obj = hdev_obj; 726 + 727 + DBC_REQUIRE(refs > 0); 728 + DBC_REQUIRE(phbridge_context != NULL); 729 + 730 + if (hdev_obj) { 731 + *phbridge_context = dev_obj->hbridge_context; 732 + } else { 733 + *phbridge_context = NULL; 734 + status = -EFAULT; 735 + } 736 + 737 + DBC_ENSURE(DSP_SUCCEEDED(status) || ((phbridge_context != NULL) && 738 + (*phbridge_context == NULL))); 739 + return status; 740 + } 741 + 742 + /* 743 + * ======== dev_exit ======== 744 + * Purpose: 745 + * Decrement reference count, and free resources when reference count is 746 + * 0. 747 + */ 748 + void dev_exit(void) 749 + { 750 + DBC_REQUIRE(refs > 0); 751 + 752 + refs--; 753 + 754 + if (refs == 0) { 755 + cmm_exit(); 756 + dmm_exit(); 757 + } 758 + 759 + DBC_ENSURE(refs >= 0); 760 + } 761 + 762 + /* 763 + * ======== dev_init ======== 764 + * Purpose: 765 + * Initialize DEV's private state, keeping a reference count on each call. 766 + */ 767 + bool dev_init(void) 768 + { 769 + bool cmm_ret, dmm_ret, ret = true; 770 + 771 + DBC_REQUIRE(refs >= 0); 772 + 773 + if (refs == 0) { 774 + cmm_ret = cmm_init(); 775 + dmm_ret = dmm_init(); 776 + 777 + ret = cmm_ret && dmm_ret; 778 + 779 + if (!ret) { 780 + if (cmm_ret) 781 + cmm_exit(); 782 + 783 + if (dmm_ret) 784 + dmm_exit(); 785 + 786 + } 787 + } 788 + 789 + if (ret) 790 + refs++; 791 + 792 + DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0))); 793 + 794 + return ret; 795 + } 796 + 797 + /* 798 + * ======== dev_notify_clients ======== 799 + * Purpose: 800 + * Notify all clients of this device of a change in device status. 801 + */ 802 + int dev_notify_clients(struct dev_object *hdev_obj, u32 ulStatus) 803 + { 804 + int status = 0; 805 + 806 + struct dev_object *dev_obj = hdev_obj; 807 + void *proc_obj; 808 + 809 + for (proc_obj = (void *)lst_first(dev_obj->proc_list); 810 + proc_obj != NULL; 811 + proc_obj = (void *)lst_next(dev_obj->proc_list, 812 + (struct list_head *)proc_obj)) 813 + proc_notify_clients(proc_obj, (u32) ulStatus); 814 + 815 + return status; 816 + } 817 + 818 + /* 819 + * ======== dev_remove_device ======== 820 + */ 821 + int dev_remove_device(struct cfg_devnode *dev_node_obj) 822 + { 823 + struct dev_object *hdev_obj; /* handle to device object */ 824 + int status = 0; 825 + struct dev_object *dev_obj; 826 + 827 + /* Retrieve the device object handle originaly stored with 828 + * the dev_node: */ 829 + status = cfg_get_dev_object(dev_node_obj, (u32 *) &hdev_obj); 830 + if (DSP_SUCCEEDED(status)) { 831 + /* Remove the Processor List */ 832 + dev_obj = (struct dev_object *)hdev_obj; 833 + /* Destroy the device object. */ 834 + status = dev_destroy_device(hdev_obj); 835 + } 836 + 837 + return status; 838 + } 839 + 840 + /* 841 + * ======== dev_set_chnl_mgr ======== 842 + * Purpose: 843 + * Set the channel manager for this device. 844 + */ 845 + int dev_set_chnl_mgr(struct dev_object *hdev_obj, 846 + struct chnl_mgr *hmgr) 847 + { 848 + int status = 0; 849 + struct dev_object *dev_obj = hdev_obj; 850 + 851 + DBC_REQUIRE(refs > 0); 852 + 853 + if (hdev_obj) 854 + dev_obj->hchnl_mgr = hmgr; 855 + else 856 + status = -EFAULT; 857 + 858 + DBC_ENSURE(DSP_FAILED(status) || (dev_obj->hchnl_mgr == hmgr)); 859 + return status; 860 + } 861 + 862 + /* 863 + * ======== dev_set_msg_mgr ======== 864 + * Purpose: 865 + * Set the message manager for this device. 866 + */ 867 + void dev_set_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr *hmgr) 868 + { 869 + DBC_REQUIRE(refs > 0); 870 + DBC_REQUIRE(hdev_obj); 871 + 872 + hdev_obj->hmsg_mgr = hmgr; 873 + } 874 + 875 + /* 876 + * ======== dev_start_device ======== 877 + * Purpose: 878 + * Initializes the new device with the BRIDGE environment. 879 + */ 880 + int dev_start_device(struct cfg_devnode *dev_node_obj) 881 + { 882 + struct dev_object *hdev_obj = NULL; /* handle to 'Bridge Device */ 883 + /* Bridge driver filename */ 884 + char bridge_file_name[CFG_MAXSEARCHPATHLEN] = "UMA"; 885 + int status; 886 + struct mgr_object *hmgr_obj = NULL; 887 + 888 + DBC_REQUIRE(refs > 0); 889 + 890 + /* Given all resources, create a device object. */ 891 + status = dev_create_device(&hdev_obj, bridge_file_name, 892 + dev_node_obj); 893 + if (DSP_SUCCEEDED(status)) { 894 + /* Store away the hdev_obj with the DEVNODE */ 895 + status = cfg_set_dev_object(dev_node_obj, (u32) hdev_obj); 896 + if (DSP_FAILED(status)) { 897 + /* Clean up */ 898 + dev_destroy_device(hdev_obj); 899 + hdev_obj = NULL; 900 + } 901 + } 902 + if (DSP_SUCCEEDED(status)) { 903 + /* Create the Manager Object */ 904 + status = mgr_create(&hmgr_obj, dev_node_obj); 905 + } 906 + if (DSP_FAILED(status)) { 907 + if (hdev_obj) 908 + dev_destroy_device(hdev_obj); 909 + 910 + /* Ensure the device extension is NULL */ 911 + cfg_set_dev_object(dev_node_obj, 0L); 912 + } 913 + 914 + return status; 915 + } 916 + 917 + /* 918 + * ======== fxn_not_implemented ======== 919 + * Purpose: 920 + * Takes the place of a Bridge Null Function. 921 + * Parameters: 922 + * Multiple, optional. 923 + * Returns: 924 + * -ENOSYS: Always. 925 + */ 926 + static int fxn_not_implemented(int arg, ...) 927 + { 928 + return -ENOSYS; 929 + } 930 + 931 + /* 932 + * ======== init_cod_mgr ======== 933 + * Purpose: 934 + * Create a COD manager for this device. 935 + * Parameters: 936 + * dev_obj: Pointer to device object created with 937 + * dev_create_device() 938 + * Returns: 939 + * 0: Success. 940 + * -EFAULT: Invalid hdev_obj. 941 + * Requires: 942 + * Should only be called once by dev_create_device() for a given DevObject. 943 + * Ensures: 944 + */ 945 + static int init_cod_mgr(struct dev_object *dev_obj) 946 + { 947 + int status = 0; 948 + char *sz_dummy_file = "dummy"; 949 + 950 + DBC_REQUIRE(refs > 0); 951 + DBC_REQUIRE(!dev_obj || (dev_obj->cod_mgr == NULL)); 952 + 953 + status = cod_create(&dev_obj->cod_mgr, sz_dummy_file, NULL); 954 + 955 + return status; 956 + } 957 + 958 + /* 959 + * ======== dev_insert_proc_object ======== 960 + * Purpose: 961 + * Insert a ProcObject into the list maintained by DEV. 962 + * Parameters: 963 + * p_proc_object: Ptr to ProcObject to insert. 964 + * dev_obj: Ptr to Dev Object where the list is. 965 + * pbAlreadyAttached: Ptr to return the bool 966 + * Returns: 967 + * 0: If successful. 968 + * Requires: 969 + * List Exists 970 + * hdev_obj is Valid handle 971 + * DEV Initialized 972 + * pbAlreadyAttached != NULL 973 + * proc_obj != 0 974 + * Ensures: 975 + * 0 and List is not Empty. 976 + */ 977 + int dev_insert_proc_object(struct dev_object *hdev_obj, 978 + u32 proc_obj, OUT bool *pbAlreadyAttached) 979 + { 980 + int status = 0; 981 + struct dev_object *dev_obj = (struct dev_object *)hdev_obj; 982 + 983 + DBC_REQUIRE(refs > 0); 984 + DBC_REQUIRE(dev_obj); 985 + DBC_REQUIRE(proc_obj != 0); 986 + DBC_REQUIRE(dev_obj->proc_list != NULL); 987 + DBC_REQUIRE(pbAlreadyAttached != NULL); 988 + if (!LST_IS_EMPTY(dev_obj->proc_list)) 989 + *pbAlreadyAttached = true; 990 + 991 + /* Add DevObject to tail. */ 992 + lst_put_tail(dev_obj->proc_list, (struct list_head *)proc_obj); 993 + 994 + DBC_ENSURE(DSP_SUCCEEDED(status) && !LST_IS_EMPTY(dev_obj->proc_list)); 995 + 996 + return status; 997 + } 998 + 999 + /* 1000 + * ======== dev_remove_proc_object ======== 1001 + * Purpose: 1002 + * Search for and remove a Proc object from the given list maintained 1003 + * by the DEV 1004 + * Parameters: 1005 + * p_proc_object: Ptr to ProcObject to insert. 1006 + * dev_obj Ptr to Dev Object where the list is. 1007 + * Returns: 1008 + * 0: If successful. 1009 + * Requires: 1010 + * List exists and is not empty 1011 + * proc_obj != 0 1012 + * hdev_obj is a valid Dev handle. 1013 + * Ensures: 1014 + * Details: 1015 + * List will be deleted when the DEV is destroyed. 1016 + */ 1017 + int dev_remove_proc_object(struct dev_object *hdev_obj, u32 proc_obj) 1018 + { 1019 + int status = -EPERM; 1020 + struct list_head *cur_elem; 1021 + struct dev_object *dev_obj = (struct dev_object *)hdev_obj; 1022 + 1023 + DBC_REQUIRE(dev_obj); 1024 + DBC_REQUIRE(proc_obj != 0); 1025 + DBC_REQUIRE(dev_obj->proc_list != NULL); 1026 + DBC_REQUIRE(!LST_IS_EMPTY(dev_obj->proc_list)); 1027 + 1028 + /* Search list for dev_obj: */ 1029 + for (cur_elem = lst_first(dev_obj->proc_list); cur_elem != NULL; 1030 + cur_elem = lst_next(dev_obj->proc_list, cur_elem)) { 1031 + /* If found, remove it. */ 1032 + if ((u32) cur_elem == proc_obj) { 1033 + lst_remove_elem(dev_obj->proc_list, cur_elem); 1034 + status = 0; 1035 + break; 1036 + } 1037 + } 1038 + 1039 + return status; 1040 + } 1041 + 1042 + int dev_get_dev_type(struct dev_object *hdevObject, u8 *dev_type) 1043 + { 1044 + int status = 0; 1045 + struct dev_object *dev_obj = (struct dev_object *)hdevObject; 1046 + 1047 + *dev_type = dev_obj->dev_type; 1048 + 1049 + return status; 1050 + } 1051 + 1052 + /* 1053 + * ======== store_interface_fxns ======== 1054 + * Purpose: 1055 + * Copy the Bridge's interface functions into the device object, 1056 + * ensuring that fxn_not_implemented() is set for: 1057 + * 1058 + * 1. All Bridge function pointers which are NULL; and 1059 + * 2. All function slots in the struct dev_object structure which have no 1060 + * corresponding slots in the the Bridge's interface, because the Bridge 1061 + * is of an *older* version. 1062 + * Parameters: 1063 + * intf_fxns: Interface fxn Structure of the Bridge's Dev Object. 1064 + * drv_fxns: Interface Fxns offered by the Bridge during DEV_Create(). 1065 + * Returns: 1066 + * Requires: 1067 + * Input pointers are valid. 1068 + * Bridge driver is *not* written for a newer DSP API. 1069 + * Ensures: 1070 + * All function pointers in the dev object's fxn interface are not NULL. 1071 + */ 1072 + static void store_interface_fxns(struct bridge_drv_interface *drv_fxns, 1073 + OUT struct bridge_drv_interface *intf_fxns) 1074 + { 1075 + u32 bridge_version; 1076 + 1077 + /* Local helper macro: */ 1078 + #define STORE_FXN(cast, pfn) \ 1079 + (intf_fxns->pfn = ((drv_fxns->pfn != NULL) ? drv_fxns->pfn : \ 1080 + (cast)fxn_not_implemented)) 1081 + 1082 + DBC_REQUIRE(intf_fxns != NULL); 1083 + DBC_REQUIRE(drv_fxns != NULL); 1084 + DBC_REQUIRE(MAKEVERSION(drv_fxns->brd_api_major_version, 1085 + drv_fxns->brd_api_minor_version) <= BRD_API_VERSION); 1086 + bridge_version = MAKEVERSION(drv_fxns->brd_api_major_version, 1087 + drv_fxns->brd_api_minor_version); 1088 + intf_fxns->brd_api_major_version = drv_fxns->brd_api_major_version; 1089 + intf_fxns->brd_api_minor_version = drv_fxns->brd_api_minor_version; 1090 + /* Install functions up to DSP API version .80 (first alpha): */ 1091 + if (bridge_version > 0) { 1092 + STORE_FXN(fxn_dev_create, pfn_dev_create); 1093 + STORE_FXN(fxn_dev_destroy, pfn_dev_destroy); 1094 + STORE_FXN(fxn_dev_ctrl, pfn_dev_cntrl); 1095 + STORE_FXN(fxn_brd_monitor, pfn_brd_monitor); 1096 + STORE_FXN(fxn_brd_start, pfn_brd_start); 1097 + STORE_FXN(fxn_brd_stop, pfn_brd_stop); 1098 + STORE_FXN(fxn_brd_status, pfn_brd_status); 1099 + STORE_FXN(fxn_brd_read, pfn_brd_read); 1100 + STORE_FXN(fxn_brd_write, pfn_brd_write); 1101 + STORE_FXN(fxn_brd_setstate, pfn_brd_set_state); 1102 + STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy); 1103 + STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write); 1104 + STORE_FXN(fxn_brd_memmap, pfn_brd_mem_map); 1105 + STORE_FXN(fxn_brd_memunmap, pfn_brd_mem_un_map); 1106 + STORE_FXN(fxn_chnl_create, pfn_chnl_create); 1107 + STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy); 1108 + STORE_FXN(fxn_chnl_open, pfn_chnl_open); 1109 + STORE_FXN(fxn_chnl_close, pfn_chnl_close); 1110 + STORE_FXN(fxn_chnl_addioreq, pfn_chnl_add_io_req); 1111 + STORE_FXN(fxn_chnl_getioc, pfn_chnl_get_ioc); 1112 + STORE_FXN(fxn_chnl_cancelio, pfn_chnl_cancel_io); 1113 + STORE_FXN(fxn_chnl_flushio, pfn_chnl_flush_io); 1114 + STORE_FXN(fxn_chnl_getinfo, pfn_chnl_get_info); 1115 + STORE_FXN(fxn_chnl_getmgrinfo, pfn_chnl_get_mgr_info); 1116 + STORE_FXN(fxn_chnl_idle, pfn_chnl_idle); 1117 + STORE_FXN(fxn_chnl_registernotify, pfn_chnl_register_notify); 1118 + STORE_FXN(fxn_deh_create, pfn_deh_create); 1119 + STORE_FXN(fxn_deh_destroy, pfn_deh_destroy); 1120 + STORE_FXN(fxn_deh_notify, pfn_deh_notify); 1121 + STORE_FXN(fxn_deh_registernotify, pfn_deh_register_notify); 1122 + STORE_FXN(fxn_deh_getinfo, pfn_deh_get_info); 1123 + STORE_FXN(fxn_io_create, pfn_io_create); 1124 + STORE_FXN(fxn_io_destroy, pfn_io_destroy); 1125 + STORE_FXN(fxn_io_onloaded, pfn_io_on_loaded); 1126 + STORE_FXN(fxn_io_getprocload, pfn_io_get_proc_load); 1127 + STORE_FXN(fxn_msg_create, pfn_msg_create); 1128 + STORE_FXN(fxn_msg_createqueue, pfn_msg_create_queue); 1129 + STORE_FXN(fxn_msg_delete, pfn_msg_delete); 1130 + STORE_FXN(fxn_msg_deletequeue, pfn_msg_delete_queue); 1131 + STORE_FXN(fxn_msg_get, pfn_msg_get); 1132 + STORE_FXN(fxn_msg_put, pfn_msg_put); 1133 + STORE_FXN(fxn_msg_registernotify, pfn_msg_register_notify); 1134 + STORE_FXN(fxn_msg_setqueueid, pfn_msg_set_queue_id); 1135 + } 1136 + /* Add code for any additional functions in newerBridge versions here */ 1137 + /* Ensure postcondition: */ 1138 + DBC_ENSURE(intf_fxns->pfn_dev_create != NULL); 1139 + DBC_ENSURE(intf_fxns->pfn_dev_destroy != NULL); 1140 + DBC_ENSURE(intf_fxns->pfn_dev_cntrl != NULL); 1141 + DBC_ENSURE(intf_fxns->pfn_brd_monitor != NULL); 1142 + DBC_ENSURE(intf_fxns->pfn_brd_start != NULL); 1143 + DBC_ENSURE(intf_fxns->pfn_brd_stop != NULL); 1144 + DBC_ENSURE(intf_fxns->pfn_brd_status != NULL); 1145 + DBC_ENSURE(intf_fxns->pfn_brd_read != NULL); 1146 + DBC_ENSURE(intf_fxns->pfn_brd_write != NULL); 1147 + DBC_ENSURE(intf_fxns->pfn_chnl_create != NULL); 1148 + DBC_ENSURE(intf_fxns->pfn_chnl_destroy != NULL); 1149 + DBC_ENSURE(intf_fxns->pfn_chnl_open != NULL); 1150 + DBC_ENSURE(intf_fxns->pfn_chnl_close != NULL); 1151 + DBC_ENSURE(intf_fxns->pfn_chnl_add_io_req != NULL); 1152 + DBC_ENSURE(intf_fxns->pfn_chnl_get_ioc != NULL); 1153 + DBC_ENSURE(intf_fxns->pfn_chnl_cancel_io != NULL); 1154 + DBC_ENSURE(intf_fxns->pfn_chnl_flush_io != NULL); 1155 + DBC_ENSURE(intf_fxns->pfn_chnl_get_info != NULL); 1156 + DBC_ENSURE(intf_fxns->pfn_chnl_get_mgr_info != NULL); 1157 + DBC_ENSURE(intf_fxns->pfn_chnl_idle != NULL); 1158 + DBC_ENSURE(intf_fxns->pfn_chnl_register_notify != NULL); 1159 + DBC_ENSURE(intf_fxns->pfn_deh_create != NULL); 1160 + DBC_ENSURE(intf_fxns->pfn_deh_destroy != NULL); 1161 + DBC_ENSURE(intf_fxns->pfn_deh_notify != NULL); 1162 + DBC_ENSURE(intf_fxns->pfn_deh_register_notify != NULL); 1163 + DBC_ENSURE(intf_fxns->pfn_deh_get_info != NULL); 1164 + DBC_ENSURE(intf_fxns->pfn_io_create != NULL); 1165 + DBC_ENSURE(intf_fxns->pfn_io_destroy != NULL); 1166 + DBC_ENSURE(intf_fxns->pfn_io_on_loaded != NULL); 1167 + DBC_ENSURE(intf_fxns->pfn_io_get_proc_load != NULL); 1168 + DBC_ENSURE(intf_fxns->pfn_msg_set_queue_id != NULL); 1169 + 1170 + #undef STORE_FXN 1171 + }
+533
drivers/staging/tidspbridge/pmgr/dmm.c
··· 1 + /* 2 + * dmm.c 3 + * 4 + * DSP-BIOS Bridge driver support functions for TI OMAP processors. 5 + * 6 + * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address 7 + * space that can be directly mapped to any MPU buffer or memory region 8 + * 9 + * Notes: 10 + * Region: Generic memory entitiy having a start address and a size 11 + * Chunk: Reserved region 12 + * 13 + * Copyright (C) 2005-2006 Texas Instruments, Inc. 14 + * 15 + * This package is free software; you can redistribute it and/or modify 16 + * it under the terms of the GNU General Public License version 2 as 17 + * published by the Free Software Foundation. 18 + * 19 + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 20 + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 21 + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 22 + */ 23 + 24 + /* ----------------------------------- Host OS */ 25 + #include <dspbridge/host_os.h> 26 + 27 + /* ----------------------------------- DSP/BIOS Bridge */ 28 + #include <dspbridge/std.h> 29 + #include <dspbridge/dbdefs.h> 30 + 31 + /* ----------------------------------- Trace & Debug */ 32 + #include <dspbridge/dbc.h> 33 + 34 + /* ----------------------------------- OS Adaptation Layer */ 35 + #include <dspbridge/sync.h> 36 + 37 + /* ----------------------------------- Platform Manager */ 38 + #include <dspbridge/dev.h> 39 + #include <dspbridge/proc.h> 40 + 41 + /* ----------------------------------- This */ 42 + #include <dspbridge/dmm.h> 43 + 44 + /* ----------------------------------- Defines, Data Structures, Typedefs */ 45 + #define DMM_ADDR_VIRTUAL(a) \ 46 + (((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\ 47 + dyn_mem_map_beg) 48 + #define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K) 49 + 50 + /* DMM Mgr */ 51 + struct dmm_object { 52 + /* Dmm Lock is used to serialize access mem manager for 53 + * multi-threads. */ 54 + spinlock_t dmm_lock; /* Lock to access dmm mgr */ 55 + }; 56 + 57 + /* ----------------------------------- Globals */ 58 + static u32 refs; /* module reference count */ 59 + struct map_page { 60 + u32 region_size:15; 61 + u32 mapped_size:15; 62 + u32 reserved:1; 63 + u32 mapped:1; 64 + }; 65 + 66 + /* Create the free list */ 67 + static struct map_page *virtual_mapping_table; 68 + static u32 free_region; /* The index of free region */ 69 + static u32 free_size; 70 + static u32 dyn_mem_map_beg; /* The Beginning of dynamic memory mapping */ 71 + static u32 table_size; /* The size of virt and phys pages tables */ 72 + 73 + /* ----------------------------------- Function Prototypes */ 74 + static struct map_page *get_region(u32 addr); 75 + static struct map_page *get_free_region(u32 aSize); 76 + static struct map_page *get_mapped_region(u32 aAddr); 77 + 78 + /* ======== dmm_create_tables ======== 79 + * Purpose: 80 + * Create table to hold the information of physical address 81 + * the buffer pages that is passed by the user, and the table 82 + * to hold the information of the virtual memory that is reserved 83 + * for DSP. 84 + */ 85 + int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size) 86 + { 87 + struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; 88 + int status = 0; 89 + 90 + status = dmm_delete_tables(dmm_obj); 91 + if (DSP_SUCCEEDED(status)) { 92 + dyn_mem_map_beg = addr; 93 + table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K; 94 + /* Create the free list */ 95 + virtual_mapping_table = __vmalloc(table_size * 96 + sizeof(struct map_page), GFP_KERNEL | 97 + __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); 98 + if (virtual_mapping_table == NULL) 99 + status = -ENOMEM; 100 + else { 101 + /* On successful allocation, 102 + * all entries are zero ('free') */ 103 + free_region = 0; 104 + free_size = table_size * PG_SIZE4K; 105 + virtual_mapping_table[0].region_size = table_size; 106 + } 107 + } 108 + 109 + if (DSP_FAILED(status)) 110 + pr_err("%s: failure, status 0x%x\n", __func__, status); 111 + 112 + return status; 113 + } 114 + 115 + /* 116 + * ======== dmm_create ======== 117 + * Purpose: 118 + * Create a dynamic memory manager object. 119 + */ 120 + int dmm_create(OUT struct dmm_object **phDmmMgr, 121 + struct dev_object *hdev_obj, 122 + IN CONST struct dmm_mgrattrs *pMgrAttrs) 123 + { 124 + struct dmm_object *dmm_obj = NULL; 125 + int status = 0; 126 + DBC_REQUIRE(refs > 0); 127 + DBC_REQUIRE(phDmmMgr != NULL); 128 + 129 + *phDmmMgr = NULL; 130 + /* create, zero, and tag a cmm mgr object */ 131 + dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL); 132 + if (dmm_obj != NULL) { 133 + spin_lock_init(&dmm_obj->dmm_lock); 134 + *phDmmMgr = dmm_obj; 135 + } else { 136 + status = -ENOMEM; 137 + } 138 + 139 + return status; 140 + } 141 + 142 + /* 143 + * ======== dmm_destroy ======== 144 + * Purpose: 145 + * Release the communication memory manager resources. 146 + */ 147 + int dmm_destroy(struct dmm_object *dmm_mgr) 148 + { 149 + struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; 150 + int status = 0; 151 + 152 + DBC_REQUIRE(refs > 0); 153 + if (dmm_mgr) { 154 + status = dmm_delete_tables(dmm_obj); 155 + if (DSP_SUCCEEDED(status)) 156 + kfree(dmm_obj); 157 + } else 158 + status = -EFAULT; 159 + 160 + return status; 161 + } 162 + 163 + /* 164 + * ======== dmm_delete_tables ======== 165 + * Purpose: 166 + * Delete DMM Tables. 167 + */ 168 + int dmm_delete_tables(struct dmm_object *dmm_mgr) 169 + { 170 + int status = 0; 171 + 172 + DBC_REQUIRE(refs > 0); 173 + /* Delete all DMM tables */ 174 + if (dmm_mgr) 175 + vfree(virtual_mapping_table); 176 + else 177 + status = -EFAULT; 178 + return status; 179 + } 180 + 181 + /* 182 + * ======== dmm_exit ======== 183 + * Purpose: 184 + * Discontinue usage of module; free resources when reference count 185 + * reaches 0. 186 + */ 187 + void dmm_exit(void) 188 + { 189 + DBC_REQUIRE(refs > 0); 190 + 191 + refs--; 192 + } 193 + 194 + /* 195 + * ======== dmm_get_handle ======== 196 + * Purpose: 197 + * Return the dynamic memory manager object for this device. 198 + * This is typically called from the client process. 199 + */ 200 + int dmm_get_handle(void *hprocessor, OUT struct dmm_object **phDmmMgr) 201 + { 202 + int status = 0; 203 + struct dev_object *hdev_obj; 204 + 205 + DBC_REQUIRE(refs > 0); 206 + DBC_REQUIRE(phDmmMgr != NULL); 207 + if (hprocessor != NULL) 208 + status = proc_get_dev_object(hprocessor, &hdev_obj); 209 + else 210 + hdev_obj = dev_get_first(); /* default */ 211 + 212 + if (DSP_SUCCEEDED(status)) 213 + status = dev_get_dmm_mgr(hdev_obj, phDmmMgr); 214 + 215 + return status; 216 + } 217 + 218 + /* 219 + * ======== dmm_init ======== 220 + * Purpose: 221 + * Initializes private state of DMM module. 222 + */ 223 + bool dmm_init(void) 224 + { 225 + bool ret = true; 226 + 227 + DBC_REQUIRE(refs >= 0); 228 + 229 + if (ret) 230 + refs++; 231 + 232 + DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0))); 233 + 234 + virtual_mapping_table = NULL; 235 + table_size = 0; 236 + 237 + return ret; 238 + } 239 + 240 + /* 241 + * ======== dmm_map_memory ======== 242 + * Purpose: 243 + * Add a mapping block to the reserved chunk. DMM assumes that this block 244 + * will be mapped in the DSP/IVA's address space. DMM returns an error if a 245 + * mapping overlaps another one. This function stores the info that will be 246 + * required later while unmapping the block. 247 + */ 248 + int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size) 249 + { 250 + struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; 251 + struct map_page *chunk; 252 + int status = 0; 253 + 254 + spin_lock(&dmm_obj->dmm_lock); 255 + /* Find the Reserved memory chunk containing the DSP block to 256 + * be mapped */ 257 + chunk = (struct map_page *)get_region(addr); 258 + if (chunk != NULL) { 259 + /* Mark the region 'mapped', leave the 'reserved' info as-is */ 260 + chunk->mapped = true; 261 + chunk->mapped_size = (size / PG_SIZE4K); 262 + } else 263 + status = -ENOENT; 264 + spin_unlock(&dmm_obj->dmm_lock); 265 + 266 + dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, " 267 + "chunk %p", __func__, dmm_mgr, addr, size, status, chunk); 268 + 269 + return status; 270 + } 271 + 272 + /* 273 + * ======== dmm_reserve_memory ======== 274 + * Purpose: 275 + * Reserve a chunk of virtually contiguous DSP/IVA address space. 276 + */ 277 + int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size, 278 + u32 *prsv_addr) 279 + { 280 + int status = 0; 281 + struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; 282 + struct map_page *node; 283 + u32 rsv_addr = 0; 284 + u32 rsv_size = 0; 285 + 286 + spin_lock(&dmm_obj->dmm_lock); 287 + 288 + /* Try to get a DSP chunk from the free list */ 289 + node = get_free_region(size); 290 + if (node != NULL) { 291 + /* DSP chunk of given size is available. */ 292 + rsv_addr = DMM_ADDR_VIRTUAL(node); 293 + /* Calculate the number entries to use */ 294 + rsv_size = size / PG_SIZE4K; 295 + if (rsv_size < node->region_size) { 296 + /* Mark remainder of free region */ 297 + node[rsv_size].mapped = false; 298 + node[rsv_size].reserved = false; 299 + node[rsv_size].region_size = 300 + node->region_size - rsv_size; 301 + node[rsv_size].mapped_size = 0; 302 + } 303 + /* get_region will return first fit chunk. But we only use what 304 + is requested. */ 305 + node->mapped = false; 306 + node->reserved = true; 307 + node->region_size = rsv_size; 308 + node->mapped_size = 0; 309 + /* Return the chunk's starting address */ 310 + *prsv_addr = rsv_addr; 311 + } else 312 + /*dSP chunk of given size is not available */ 313 + status = -ENOMEM; 314 + 315 + spin_unlock(&dmm_obj->dmm_lock); 316 + 317 + dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, " 318 + "rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size, 319 + prsv_addr, status, rsv_addr, rsv_size); 320 + 321 + return status; 322 + } 323 + 324 + /* 325 + * ======== dmm_un_map_memory ======== 326 + * Purpose: 327 + * Remove the mapped block from the reserved chunk. 328 + */ 329 + int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize) 330 + { 331 + struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; 332 + struct map_page *chunk; 333 + int status = 0; 334 + 335 + spin_lock(&dmm_obj->dmm_lock); 336 + chunk = get_mapped_region(addr); 337 + if (chunk == NULL) 338 + status = -ENOENT; 339 + 340 + if (DSP_SUCCEEDED(status)) { 341 + /* Unmap the region */ 342 + *psize = chunk->mapped_size * PG_SIZE4K; 343 + chunk->mapped = false; 344 + chunk->mapped_size = 0; 345 + } 346 + spin_unlock(&dmm_obj->dmm_lock); 347 + 348 + dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, " 349 + "chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk); 350 + 351 + return status; 352 + } 353 + 354 + /* 355 + * ======== dmm_un_reserve_memory ======== 356 + * Purpose: 357 + * Free a chunk of reserved DSP/IVA address space. 358 + */ 359 + int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr) 360 + { 361 + struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; 362 + struct map_page *chunk; 363 + u32 i; 364 + int status = 0; 365 + u32 chunk_size; 366 + 367 + spin_lock(&dmm_obj->dmm_lock); 368 + 369 + /* Find the chunk containing the reserved address */ 370 + chunk = get_mapped_region(rsv_addr); 371 + if (chunk == NULL) 372 + status = -ENOENT; 373 + 374 + if (DSP_SUCCEEDED(status)) { 375 + /* Free all the mapped pages for this reserved region */ 376 + i = 0; 377 + while (i < chunk->region_size) { 378 + if (chunk[i].mapped) { 379 + /* Remove mapping from the page tables. */ 380 + chunk_size = chunk[i].mapped_size; 381 + /* Clear the mapping flags */ 382 + chunk[i].mapped = false; 383 + chunk[i].mapped_size = 0; 384 + i += chunk_size; 385 + } else 386 + i++; 387 + } 388 + /* Clear the flags (mark the region 'free') */ 389 + chunk->reserved = false; 390 + /* NOTE: We do NOT coalesce free regions here. 391 + * Free regions are coalesced in get_region(), as it traverses 392 + *the whole mapping table 393 + */ 394 + } 395 + spin_unlock(&dmm_obj->dmm_lock); 396 + 397 + dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p", 398 + __func__, dmm_mgr, rsv_addr, status, chunk); 399 + 400 + return status; 401 + } 402 + 403 + /* 404 + * ======== get_region ======== 405 + * Purpose: 406 + * Returns a region containing the specified memory region 407 + */ 408 + static struct map_page *get_region(u32 aAddr) 409 + { 410 + struct map_page *curr_region = NULL; 411 + u32 i = 0; 412 + 413 + if (virtual_mapping_table != NULL) { 414 + /* find page mapped by this address */ 415 + i = DMM_ADDR_TO_INDEX(aAddr); 416 + if (i < table_size) 417 + curr_region = virtual_mapping_table + i; 418 + } 419 + 420 + dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n", 421 + __func__, curr_region, free_region, free_size); 422 + return curr_region; 423 + } 424 + 425 + /* 426 + * ======== get_free_region ======== 427 + * Purpose: 428 + * Returns the requested free region 429 + */ 430 + static struct map_page *get_free_region(u32 aSize) 431 + { 432 + struct map_page *curr_region = NULL; 433 + u32 i = 0; 434 + u32 region_size = 0; 435 + u32 next_i = 0; 436 + 437 + if (virtual_mapping_table == NULL) 438 + return curr_region; 439 + if (aSize > free_size) { 440 + /* Find the largest free region 441 + * (coalesce during the traversal) */ 442 + while (i < table_size) { 443 + region_size = virtual_mapping_table[i].region_size; 444 + next_i = i + region_size; 445 + if (virtual_mapping_table[i].reserved == false) { 446 + /* Coalesce, if possible */ 447 + if (next_i < table_size && 448 + virtual_mapping_table[next_i].reserved 449 + == false) { 450 + virtual_mapping_table[i].region_size += 451 + virtual_mapping_table 452 + [next_i].region_size; 453 + continue; 454 + } 455 + region_size *= PG_SIZE4K; 456 + if (region_size > free_size) { 457 + free_region = i; 458 + free_size = region_size; 459 + } 460 + } 461 + i = next_i; 462 + } 463 + } 464 + if (aSize <= free_size) { 465 + curr_region = virtual_mapping_table + free_region; 466 + free_region += (aSize / PG_SIZE4K); 467 + free_size -= aSize; 468 + } 469 + return curr_region; 470 + } 471 + 472 + /* 473 + * ======== get_mapped_region ======== 474 + * Purpose: 475 + * Returns the requestedmapped region 476 + */ 477 + static struct map_page *get_mapped_region(u32 aAddr) 478 + { 479 + u32 i = 0; 480 + struct map_page *curr_region = NULL; 481 + 482 + if (virtual_mapping_table == NULL) 483 + return curr_region; 484 + 485 + i = DMM_ADDR_TO_INDEX(aAddr); 486 + if (i < table_size && (virtual_mapping_table[i].mapped || 487 + virtual_mapping_table[i].reserved)) 488 + curr_region = virtual_mapping_table + i; 489 + return curr_region; 490 + } 491 + 492 + #ifdef DSP_DMM_DEBUG 493 + u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr) 494 + { 495 + struct map_page *curr_node = NULL; 496 + u32 i; 497 + u32 freemem = 0; 498 + u32 bigsize = 0; 499 + 500 + spin_lock(&dmm_mgr->dmm_lock); 501 + 502 + if (virtual_mapping_table != NULL) { 503 + for (i = 0; i < table_size; i += 504 + virtual_mapping_table[i].region_size) { 505 + curr_node = virtual_mapping_table + i; 506 + if (curr_node->reserved == TRUE) { 507 + /*printk("RESERVED size = 0x%x, " 508 + "Map size = 0x%x\n", 509 + (curr_node->region_size * PG_SIZE4K), 510 + (curr_node->mapped == false) ? 0 : 511 + (curr_node->mapped_size * PG_SIZE4K)); 512 + */ 513 + } else { 514 + /* printk("UNRESERVED size = 0x%x\n", 515 + (curr_node->region_size * PG_SIZE4K)); 516 + */ 517 + freemem += (curr_node->region_size * PG_SIZE4K); 518 + if (curr_node->region_size > bigsize) 519 + bigsize = curr_node->region_size; 520 + } 521 + } 522 + } 523 + spin_unlock(&dmm_mgr->dmm_lock); 524 + printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n", 525 + freemem / (1024 * 1024)); 526 + printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n", 527 + (((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024)); 528 + printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n", 529 + (bigsize * PG_SIZE4K / (1024 * 1024))); 530 + 531 + return 0; 532 + } 533 + #endif
+1685
drivers/staging/tidspbridge/pmgr/dspapi.c
··· 1 + /* 2 + * dspapi.c 3 + * 4 + * DSP-BIOS Bridge driver support functions for TI OMAP processors. 5 + * 6 + * Common DSP API functions, also includes the wrapper 7 + * functions called directly by the DeviceIOControl interface. 8 + * 9 + * Copyright (C) 2005-2006 Texas Instruments, Inc. 10 + * 11 + * This package is free software; you can redistribute it and/or modify 12 + * it under the terms of the GNU General Public License version 2 as 13 + * published by the Free Software Foundation. 14 + * 15 + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 16 + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 17 + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 18 + */ 19 + 20 + /* ----------------------------------- Host OS */ 21 + #include <dspbridge/host_os.h> 22 + 23 + /* ----------------------------------- DSP/BIOS Bridge */ 24 + #include <dspbridge/std.h> 25 + #include <dspbridge/dbdefs.h> 26 + 27 + /* ----------------------------------- Trace & Debug */ 28 + #include <dspbridge/dbc.h> 29 + 30 + /* ----------------------------------- OS Adaptation Layer */ 31 + #include <dspbridge/cfg.h> 32 + #include <dspbridge/ntfy.h> 33 + #include <dspbridge/services.h> 34 + 35 + /* ----------------------------------- Platform Manager */ 36 + #include <dspbridge/chnl.h> 37 + #include <dspbridge/dev.h> 38 + #include <dspbridge/drv.h> 39 + 40 + #include <dspbridge/proc.h> 41 + #include <dspbridge/strm.h> 42 + 43 + /* ----------------------------------- Resource Manager */ 44 + #include <dspbridge/disp.h> 45 + #include <dspbridge/mgr.h> 46 + #include <dspbridge/node.h> 47 + #include <dspbridge/rmm.h> 48 + 49 + /* ----------------------------------- Others */ 50 + #include <dspbridge/msg.h> 51 + #include <dspbridge/cmm.h> 52 + #include <dspbridge/io.h> 53 + 54 + /* ----------------------------------- This */ 55 + #include <dspbridge/dspapi.h> 56 + #include <dspbridge/dbdcd.h> 57 + 58 + #include <dspbridge/resourcecleanup.h> 59 + 60 + /* ----------------------------------- Defines, Data Structures, Typedefs */ 61 + #define MAX_TRACEBUFLEN 255 62 + #define MAX_LOADARGS 16 63 + #define MAX_NODES 64 64 + #define MAX_STREAMS 16 65 + #define MAX_BUFS 64 66 + 67 + /* Used to get dspbridge ioctl table */ 68 + #define DB_GET_IOC_TABLE(cmd) (DB_GET_MODULE(cmd) >> DB_MODULE_SHIFT) 69 + 70 + /* Device IOCtl function pointer */ 71 + struct api_cmd { 72 + u32(*fxn) (union Trapped_Args *args, void *pr_ctxt); 73 + u32 dw_index; 74 + }; 75 + 76 + /* ----------------------------------- Globals */ 77 + static u32 api_c_refs; 78 + 79 + /* 80 + * Function tables. 81 + * The order of these functions MUST be the same as the order of the command 82 + * numbers defined in dspapi-ioctl.h This is how an IOCTL number in user mode 83 + * turns into a function call in kernel mode. 84 + */ 85 + 86 + /* MGR wrapper functions */ 87 + static struct api_cmd mgr_cmd[] = { 88 + {mgrwrap_enum_node_info}, /* MGR_ENUMNODE_INFO */ 89 + {mgrwrap_enum_proc_info}, /* MGR_ENUMPROC_INFO */ 90 + {mgrwrap_register_object}, /* MGR_REGISTEROBJECT */ 91 + {mgrwrap_unregister_object}, /* MGR_UNREGISTEROBJECT */ 92 + {mgrwrap_wait_for_bridge_events}, /* MGR_WAIT */ 93 + {mgrwrap_get_process_resources_info}, /* MGR_GET_PROC_RES */ 94 + }; 95 + 96 + /* PROC wrapper functions */ 97 + static struct api_cmd proc_cmd[] = { 98 + {procwrap_attach}, /* PROC_ATTACH */ 99 + {procwrap_ctrl}, /* PROC_CTRL */ 100 + {procwrap_detach}, /* PROC_DETACH */ 101 + {procwrap_enum_node_info}, /* PROC_ENUMNODE */ 102 + {procwrap_enum_resources}, /* PROC_ENUMRESOURCES */ 103 + {procwrap_get_state}, /* PROC_GET_STATE */ 104 + {procwrap_get_trace}, /* PROC_GET_TRACE */ 105 + {procwrap_load}, /* PROC_LOAD */ 106 + {procwrap_register_notify}, /* PROC_REGISTERNOTIFY */ 107 + {procwrap_start}, /* PROC_START */ 108 + {procwrap_reserve_memory}, /* PROC_RSVMEM */ 109 + {procwrap_un_reserve_memory}, /* PROC_UNRSVMEM */ 110 + {procwrap_map}, /* PROC_MAPMEM */ 111 + {procwrap_un_map}, /* PROC_UNMAPMEM */ 112 + {procwrap_flush_memory}, /* PROC_FLUSHMEMORY */ 113 + {procwrap_stop}, /* PROC_STOP */ 114 + {procwrap_invalidate_memory}, /* PROC_INVALIDATEMEMORY */ 115 + {procwrap_begin_dma}, /* PROC_BEGINDMA */ 116 + {procwrap_end_dma}, /* PROC_ENDDMA */ 117 + }; 118 + 119 + /* NODE wrapper functions */ 120 + static struct api_cmd node_cmd[] = { 121 + {nodewrap_allocate}, /* NODE_ALLOCATE */ 122 + {nodewrap_alloc_msg_buf}, /* NODE_ALLOCMSGBUF */ 123 + {nodewrap_change_priority}, /* NODE_CHANGEPRIORITY */ 124 + {nodewrap_connect}, /* NODE_CONNECT */ 125 + {nodewrap_create}, /* NODE_CREATE */ 126 + {nodewrap_delete}, /* NODE_DELETE */ 127 + {nodewrap_free_msg_buf}, /* NODE_FREEMSGBUF */ 128 + {nodewrap_get_attr}, /* NODE_GETATTR */ 129 + {nodewrap_get_message}, /* NODE_GETMESSAGE */ 130 + {nodewrap_pause}, /* NODE_PAUSE */ 131 + {nodewrap_put_message}, /* NODE_PUTMESSAGE */ 132 + {nodewrap_register_notify}, /* NODE_REGISTERNOTIFY */ 133 + {nodewrap_run}, /* NODE_RUN */ 134 + {nodewrap_terminate}, /* NODE_TERMINATE */ 135 + {nodewrap_get_uuid_props}, /* NODE_GETUUIDPROPS */ 136 + }; 137 + 138 + /* STRM wrapper functions */ 139 + static struct api_cmd strm_cmd[] = { 140 + {strmwrap_allocate_buffer}, /* STRM_ALLOCATEBUFFER */ 141 + {strmwrap_close}, /* STRM_CLOSE */ 142 + {strmwrap_free_buffer}, /* STRM_FREEBUFFER */ 143 + {strmwrap_get_event_handle}, /* STRM_GETEVENTHANDLE */ 144 + {strmwrap_get_info}, /* STRM_GETINFO */ 145 + {strmwrap_idle}, /* STRM_IDLE */ 146 + {strmwrap_issue}, /* STRM_ISSUE */ 147 + {strmwrap_open}, /* STRM_OPEN */ 148 + {strmwrap_reclaim}, /* STRM_RECLAIM */ 149 + {strmwrap_register_notify}, /* STRM_REGISTERNOTIFY */ 150 + {strmwrap_select}, /* STRM_SELECT */ 151 + }; 152 + 153 + /* CMM wrapper functions */ 154 + static struct api_cmd cmm_cmd[] = { 155 + {cmmwrap_calloc_buf}, /* CMM_ALLOCBUF */ 156 + {cmmwrap_free_buf}, /* CMM_FREEBUF */ 157 + {cmmwrap_get_handle}, /* CMM_GETHANDLE */ 158 + {cmmwrap_get_info}, /* CMM_GETINFO */ 159 + }; 160 + 161 + /* Array used to store ioctl table sizes. It can hold up to 8 entries */ 162 + static u8 size_cmd[] = { 163 + ARRAY_SIZE(mgr_cmd), 164 + ARRAY_SIZE(proc_cmd), 165 + ARRAY_SIZE(node_cmd), 166 + ARRAY_SIZE(strm_cmd), 167 + ARRAY_SIZE(cmm_cmd), 168 + }; 169 + 170 + static inline void _cp_fm_usr(void *to, const void __user * from, 171 + int *err, unsigned long bytes) 172 + { 173 + if (DSP_FAILED(*err)) 174 + return; 175 + 176 + if (unlikely(!from)) { 177 + *err = -EFAULT; 178 + return; 179 + } 180 + 181 + if (unlikely(copy_from_user(to, from, bytes))) 182 + *err = -EFAULT; 183 + } 184 + 185 + #define CP_FM_USR(to, from, err, n) \ 186 + _cp_fm_usr(to, from, &(err), (n) * sizeof(*(to))) 187 + 188 + static inline void _cp_to_usr(void __user *to, const void *from, 189 + int *err, unsigned long bytes) 190 + { 191 + if (DSP_FAILED(*err)) 192 + return; 193 + 194 + if (unlikely(!to)) { 195 + *err = -EFAULT; 196 + return; 197 + } 198 + 199 + if (unlikely(copy_to_user(to, from, bytes))) 200 + *err = -EFAULT; 201 + } 202 + 203 + #define CP_TO_USR(to, from, err, n) \ 204 + _cp_to_usr(to, from, &(err), (n) * sizeof(*(from))) 205 + 206 + /* 207 + * ======== api_call_dev_ioctl ======== 208 + * Purpose: 209 + * Call the (wrapper) function for the corresponding API IOCTL. 210 + */ 211 + inline int api_call_dev_ioctl(u32 cmd, union Trapped_Args *args, 212 + u32 *result, void *pr_ctxt) 213 + { 214 + u32(*ioctl_cmd) (union Trapped_Args *args, void *pr_ctxt) = NULL; 215 + int i; 216 + 217 + if (_IOC_TYPE(cmd) != DB) { 218 + pr_err("%s: Incompatible dspbridge ioctl number\n", __func__); 219 + goto err; 220 + } 221 + 222 + if (DB_GET_IOC_TABLE(cmd) > ARRAY_SIZE(size_cmd)) { 223 + pr_err("%s: undefined ioctl module\n", __func__); 224 + goto err; 225 + } 226 + 227 + /* Check the size of the required cmd table */ 228 + i = DB_GET_IOC(cmd); 229 + if (i > size_cmd[DB_GET_IOC_TABLE(cmd)]) { 230 + pr_err("%s: requested ioctl %d out of bounds for table %d\n", 231 + __func__, i, DB_GET_IOC_TABLE(cmd)); 232 + goto err; 233 + } 234 + 235 + switch (DB_GET_MODULE(cmd)) { 236 + case DB_MGR: 237 + ioctl_cmd = mgr_cmd[i].fxn; 238 + break; 239 + case DB_PROC: 240 + ioctl_cmd = proc_cmd[i].fxn; 241 + break; 242 + case DB_NODE: 243 + ioctl_cmd = node_cmd[i].fxn; 244 + break; 245 + case DB_STRM: 246 + ioctl_cmd = strm_cmd[i].fxn; 247 + break; 248 + case DB_CMM: 249 + ioctl_cmd = cmm_cmd[i].fxn; 250 + break; 251 + } 252 + 253 + if (!ioctl_cmd) { 254 + pr_err("%s: requested ioctl not defined\n", __func__); 255 + goto err; 256 + } else { 257 + *result = (*ioctl_cmd) (args, pr_ctxt); 258 + } 259 + 260 + return 0; 261 + 262 + err: 263 + return -EINVAL; 264 + } 265 + 266 + /* 267 + * ======== api_exit ======== 268 + */ 269 + void api_exit(void) 270 + { 271 + DBC_REQUIRE(api_c_refs > 0); 272 + api_c_refs--; 273 + 274 + if (api_c_refs == 0) { 275 + /* Release all modules initialized in api_init(). */ 276 + cod_exit(); 277 + dev_exit(); 278 + chnl_exit(); 279 + msg_exit(); 280 + io_exit(); 281 + strm_exit(); 282 + disp_exit(); 283 + node_exit(); 284 + proc_exit(); 285 + mgr_exit(); 286 + rmm_exit(); 287 + drv_exit(); 288 + } 289 + DBC_ENSURE(api_c_refs >= 0); 290 + } 291 + 292 + /* 293 + * ======== api_init ======== 294 + * Purpose: 295 + * Module initialization used by Bridge API. 296 + */ 297 + bool api_init(void) 298 + { 299 + bool ret = true; 300 + bool fdrv, fdev, fcod, fchnl, fmsg, fio; 301 + bool fmgr, fproc, fnode, fdisp, fstrm, frmm; 302 + 303 + if (api_c_refs == 0) { 304 + /* initialize driver and other modules */ 305 + fdrv = drv_init(); 306 + fmgr = mgr_init(); 307 + fproc = proc_init(); 308 + fnode = node_init(); 309 + fdisp = disp_init(); 310 + fstrm = strm_init(); 311 + frmm = rmm_init(); 312 + fchnl = chnl_init(); 313 + fmsg = msg_mod_init(); 314 + fio = io_init(); 315 + fdev = dev_init(); 316 + fcod = cod_init(); 317 + ret = fdrv && fdev && fchnl && fcod && fmsg && fio; 318 + ret = ret && fmgr && fproc && frmm; 319 + if (!ret) { 320 + if (fdrv) 321 + drv_exit(); 322 + 323 + if (fmgr) 324 + mgr_exit(); 325 + 326 + if (fstrm) 327 + strm_exit(); 328 + 329 + if (fproc) 330 + proc_exit(); 331 + 332 + if (fnode) 333 + node_exit(); 334 + 335 + if (fdisp) 336 + disp_exit(); 337 + 338 + if (fchnl) 339 + chnl_exit(); 340 + 341 + if (fmsg) 342 + msg_exit(); 343 + 344 + if (fio) 345 + io_exit(); 346 + 347 + if (fdev) 348 + dev_exit(); 349 + 350 + if (fcod) 351 + cod_exit(); 352 + 353 + if (frmm) 354 + rmm_exit(); 355 + 356 + } 357 + } 358 + if (ret) 359 + api_c_refs++; 360 + 361 + return ret; 362 + } 363 + 364 + /* 365 + * ======== api_init_complete2 ======== 366 + * Purpose: 367 + * Perform any required bridge initialization which cannot 368 + * be performed in api_init() or dev_start_device() due 369 + * to the fact that some services are not yet 370 + * completely initialized. 371 + * Parameters: 372 + * Returns: 373 + * 0: Allow this device to load 374 + * -EPERM: Failure. 375 + * Requires: 376 + * Bridge API initialized. 377 + * Ensures: 378 + */ 379 + int api_init_complete2(void) 380 + { 381 + int status = 0; 382 + struct cfg_devnode *dev_node; 383 + struct dev_object *hdev_obj; 384 + u8 dev_type; 385 + u32 tmp; 386 + 387 + DBC_REQUIRE(api_c_refs > 0); 388 + 389 + /* Walk the list of DevObjects, get each devnode, and attempting to 390 + * autostart the board. Note that this requires COF loading, which 391 + * requires KFILE. */ 392 + for (hdev_obj = dev_get_first(); hdev_obj != NULL; 393 + hdev_obj = dev_get_next(hdev_obj)) { 394 + if (DSP_FAILED(dev_get_dev_node(hdev_obj, &dev_node))) 395 + continue; 396 + 397 + if (DSP_FAILED(dev_get_dev_type(hdev_obj, &dev_type))) 398 + continue; 399 + 400 + if ((dev_type == DSP_UNIT) || (dev_type == IVA_UNIT)) 401 + if (cfg_get_auto_start(dev_node, &tmp) == 0 402 + && tmp) 403 + proc_auto_start(dev_node, hdev_obj); 404 + } 405 + 406 + return status; 407 + } 408 + 409 + /* TODO: Remove deprecated and not implemented ioctl wrappers */ 410 + 411 + /* 412 + * ======== mgrwrap_enum_node_info ======== 413 + */ 414 + u32 mgrwrap_enum_node_info(union Trapped_Args *args, void *pr_ctxt) 415 + { 416 + u8 *pndb_props; 417 + u32 num_nodes; 418 + int status = 0; 419 + u32 size = args->args_mgr_enumnode_info.undb_props_size; 420 + 421 + if (size < sizeof(struct dsp_ndbprops)) 422 + return -EINVAL; 423 + 424 + pndb_props = kmalloc(size, GFP_KERNEL); 425 + if (pndb_props == NULL) 426 + status = -ENOMEM; 427 + 428 + if (DSP_SUCCEEDED(status)) { 429 + status = 430 + mgr_enum_node_info(args->args_mgr_enumnode_info.node_id, 431 + (struct dsp_ndbprops *)pndb_props, size, 432 + &num_nodes); 433 + } 434 + CP_TO_USR(args->args_mgr_enumnode_info.pndb_props, pndb_props, status, 435 + size); 436 + CP_TO_USR(args->args_mgr_enumnode_info.pu_num_nodes, &num_nodes, status, 437 + 1); 438 + kfree(pndb_props); 439 + 440 + return status; 441 + } 442 + 443 + /* 444 + * ======== mgrwrap_enum_proc_info ======== 445 + */ 446 + u32 mgrwrap_enum_proc_info(union Trapped_Args *args, void *pr_ctxt) 447 + { 448 + u8 *processor_info; 449 + u8 num_procs; 450 + int status = 0; 451 + u32 size = args->args_mgr_enumproc_info.processor_info_size; 452 + 453 + if (size < sizeof(struct dsp_processorinfo)) 454 + return -EINVAL; 455 + 456 + processor_info = kmalloc(size, GFP_KERNEL); 457 + if (processor_info == NULL) 458 + status = -ENOMEM; 459 + 460 + if (DSP_SUCCEEDED(status)) { 461 + status = 462 + mgr_enum_processor_info(args->args_mgr_enumproc_info. 463 + processor_id, 464 + (struct dsp_processorinfo *) 465 + processor_info, size, &num_procs); 466 + } 467 + CP_TO_USR(args->args_mgr_enumproc_info.processor_info, processor_info, 468 + status, size); 469 + CP_TO_USR(args->args_mgr_enumproc_info.pu_num_procs, &num_procs, 470 + status, 1); 471 + kfree(processor_info); 472 + 473 + return status; 474 + } 475 + 476 + #define WRAP_MAP2CALLER(x) x 477 + /* 478 + * ======== mgrwrap_register_object ======== 479 + */ 480 + u32 mgrwrap_register_object(union Trapped_Args *args, void *pr_ctxt) 481 + { 482 + u32 ret; 483 + struct dsp_uuid uuid_obj; 484 + u32 path_size = 0; 485 + char *psz_path_name = NULL; 486 + int status = 0; 487 + 488 + CP_FM_USR(&uuid_obj, args->args_mgr_registerobject.uuid_obj, status, 1); 489 + if (DSP_FAILED(status)) 490 + goto func_end; 491 + /* path_size is increased by 1 to accommodate NULL */ 492 + path_size = strlen_user((char *) 493 + args->args_mgr_registerobject.psz_path_name) + 494 + 1; 495 + psz_path_name = kmalloc(path_size, GFP_KERNEL); 496 + if (!psz_path_name) 497 + goto func_end; 498 + ret = strncpy_from_user(psz_path_name, 499 + (char *)args->args_mgr_registerobject. 500 + psz_path_name, path_size); 501 + if (!ret) { 502 + status = -EFAULT; 503 + goto func_end; 504 + } 505 + 506 + if (args->args_mgr_registerobject.obj_type >= DSP_DCDMAXOBJTYPE) 507 + return -EINVAL; 508 + 509 + status = dcd_register_object(&uuid_obj, 510 + args->args_mgr_registerobject.obj_type, 511 + (char *)psz_path_name); 512 + func_end: 513 + kfree(psz_path_name); 514 + return status; 515 + } 516 + 517 + /* 518 + * ======== mgrwrap_unregister_object ======== 519 + */ 520 + u32 mgrwrap_unregister_object(union Trapped_Args *args, void *pr_ctxt) 521 + { 522 + int status = 0; 523 + struct dsp_uuid uuid_obj; 524 + 525 + CP_FM_USR(&uuid_obj, args->args_mgr_registerobject.uuid_obj, status, 1); 526 + if (DSP_FAILED(status)) 527 + goto func_end; 528 + 529 + status = dcd_unregister_object(&uuid_obj, 530 + args->args_mgr_unregisterobject. 531 + obj_type); 532 + func_end: 533 + return status; 534 + 535 + } 536 + 537 + /* 538 + * ======== mgrwrap_wait_for_bridge_events ======== 539 + */ 540 + u32 mgrwrap_wait_for_bridge_events(union Trapped_Args *args, void *pr_ctxt) 541 + { 542 + int status = 0, real_status = 0; 543 + struct dsp_notification *anotifications[MAX_EVENTS]; 544 + struct dsp_notification notifications[MAX_EVENTS]; 545 + u32 index, i; 546 + u32 count = args->args_mgr_wait.count; 547 + 548 + if (count > MAX_EVENTS) 549 + status = -EINVAL; 550 + 551 + /* get the array of pointers to user structures */ 552 + CP_FM_USR(anotifications, args->args_mgr_wait.anotifications, 553 + status, count); 554 + /* get the events */ 555 + for (i = 0; i < count; i++) { 556 + CP_FM_USR(&notifications[i], anotifications[i], status, 1); 557 + if (DSP_SUCCEEDED(status)) { 558 + /* set the array of pointers to kernel structures */ 559 + anotifications[i] = &notifications[i]; 560 + } 561 + } 562 + if (DSP_SUCCEEDED(status)) { 563 + real_status = mgr_wait_for_bridge_events(anotifications, count, 564 + &index, 565 + args->args_mgr_wait. 566 + utimeout); 567 + } 568 + CP_TO_USR(args->args_mgr_wait.pu_index, &index, status, 1); 569 + return real_status; 570 + } 571 + 572 + /* 573 + * ======== MGRWRAP_GetProcessResourceInfo ======== 574 + */ 575 + u32 __deprecated mgrwrap_get_process_resources_info(union Trapped_Args * args, 576 + void *pr_ctxt) 577 + { 578 + pr_err("%s: deprecated dspbridge ioctl\n", __func__); 579 + return 0; 580 + } 581 + 582 + /* 583 + * ======== procwrap_attach ======== 584 + */ 585 + u32 procwrap_attach(union Trapped_Args *args, void *pr_ctxt) 586 + { 587 + void *processor; 588 + int status = 0; 589 + struct dsp_processorattrin proc_attr_in, *attr_in = NULL; 590 + 591 + /* Optional argument */ 592 + if (args->args_proc_attach.attr_in) { 593 + CP_FM_USR(&proc_attr_in, args->args_proc_attach.attr_in, status, 594 + 1); 595 + if (DSP_SUCCEEDED(status)) 596 + attr_in = &proc_attr_in; 597 + else 598 + goto func_end; 599 + 600 + } 601 + status = proc_attach(args->args_proc_attach.processor_id, attr_in, 602 + &processor, pr_ctxt); 603 + CP_TO_USR(args->args_proc_attach.ph_processor, &processor, status, 1); 604 + func_end: 605 + return status; 606 + } 607 + 608 + /* 609 + * ======== procwrap_ctrl ======== 610 + */ 611 + u32 procwrap_ctrl(union Trapped_Args *args, void *pr_ctxt) 612 + { 613 + u32 cb_data_size, __user * psize = (u32 __user *) 614 + args->args_proc_ctrl.pargs; 615 + u8 *pargs = NULL; 616 + int status = 0; 617 + 618 + if (psize) { 619 + if (get_user(cb_data_size, psize)) { 620 + status = -EPERM; 621 + goto func_end; 622 + } 623 + cb_data_size += sizeof(u32); 624 + pargs = kmalloc(cb_data_size, GFP_KERNEL); 625 + if (pargs == NULL) { 626 + status = -ENOMEM; 627 + goto func_end; 628 + } 629 + 630 + CP_FM_USR(pargs, args->args_proc_ctrl.pargs, status, 631 + cb_data_size); 632 + } 633 + if (DSP_SUCCEEDED(status)) { 634 + status = proc_ctrl(args->args_proc_ctrl.hprocessor, 635 + args->args_proc_ctrl.dw_cmd, 636 + (struct dsp_cbdata *)pargs); 637 + } 638 + 639 + /* CP_TO_USR(args->args_proc_ctrl.pargs, pargs, status, 1); */ 640 + kfree(pargs); 641 + func_end: 642 + return status; 643 + } 644 + 645 + /* 646 + * ======== procwrap_detach ======== 647 + */ 648 + u32 __deprecated procwrap_detach(union Trapped_Args * args, void *pr_ctxt) 649 + { 650 + /* proc_detach called at bridge_release only */ 651 + pr_err("%s: deprecated dspbridge ioctl\n", __func__); 652 + return 0; 653 + } 654 + 655 + /* 656 + * ======== procwrap_enum_node_info ======== 657 + */ 658 + u32 procwrap_enum_node_info(union Trapped_Args *args, void *pr_ctxt) 659 + { 660 + int status; 661 + void *node_tab[MAX_NODES]; 662 + u32 num_nodes; 663 + u32 alloc_cnt; 664 + 665 + if (!args->args_proc_enumnode_info.node_tab_size) 666 + return -EINVAL; 667 + 668 + status = proc_enum_nodes(args->args_proc_enumnode_info.hprocessor, 669 + node_tab, 670 + args->args_proc_enumnode_info.node_tab_size, 671 + &num_nodes, &alloc_cnt); 672 + CP_TO_USR(args->args_proc_enumnode_info.node_tab, node_tab, status, 673 + num_nodes); 674 + CP_TO_USR(args->args_proc_enumnode_info.pu_num_nodes, &num_nodes, 675 + status, 1); 676 + CP_TO_USR(args->args_proc_enumnode_info.pu_allocated, &alloc_cnt, 677 + status, 1); 678 + return status; 679 + } 680 + 681 + u32 procwrap_end_dma(union Trapped_Args *args, void *pr_ctxt) 682 + { 683 + int status; 684 + 685 + if (args->args_proc_dma.dir >= DMA_NONE) 686 + return -EINVAL; 687 + 688 + status = proc_end_dma(pr_ctxt, 689 + args->args_proc_dma.pmpu_addr, 690 + args->args_proc_dma.ul_size, 691 + args->args_proc_dma.dir); 692 + return status; 693 + } 694 + 695 + u32 procwrap_begin_dma(union Trapped_Args *args, void *pr_ctxt) 696 + { 697 + int status; 698 + 699 + if (args->args_proc_dma.dir >= DMA_NONE) 700 + return -EINVAL; 701 + 702 + status = proc_begin_dma(pr_ctxt, 703 + args->args_proc_dma.pmpu_addr, 704 + args->args_proc_dma.ul_size, 705 + args->args_proc_dma.dir); 706 + return status; 707 + } 708 + 709 + /* 710 + * ======== procwrap_flush_memory ======== 711 + */ 712 + u32 procwrap_flush_memory(union Trapped_Args *args, void *pr_ctxt) 713 + { 714 + int status; 715 + 716 + if (args->args_proc_flushmemory.ul_flags > 717 + PROC_WRITEBACK_INVALIDATE_MEM) 718 + return -EINVAL; 719 + 720 + status = proc_flush_memory(pr_ctxt, 721 + args->args_proc_flushmemory.pmpu_addr, 722 + args->args_proc_flushmemory.ul_size, 723 + args->args_proc_flushmemory.ul_flags); 724 + return status; 725 + } 726 + 727 + /* 728 + * ======== procwrap_invalidate_memory ======== 729 + */ 730 + u32 procwrap_invalidate_memory(union Trapped_Args *args, void *pr_ctxt) 731 + { 732 + int status; 733 + 734 + status = 735 + proc_invalidate_memory(pr_ctxt, 736 + args->args_proc_invalidatememory.pmpu_addr, 737 + args->args_proc_invalidatememory.ul_size); 738 + return status; 739 + } 740 + 741 + /* 742 + * ======== procwrap_enum_resources ======== 743 + */ 744 + u32 procwrap_enum_resources(union Trapped_Args *args, void *pr_ctxt) 745 + { 746 + int status = 0; 747 + struct dsp_resourceinfo resource_info; 748 + 749 + if (args->args_proc_enumresources.resource_info_size < 750 + sizeof(struct dsp_resourceinfo)) 751 + return -EINVAL; 752 + 753 + status = 754 + proc_get_resource_info(args->args_proc_enumresources.hprocessor, 755 + args->args_proc_enumresources.resource_type, 756 + &resource_info, 757 + args->args_proc_enumresources. 758 + resource_info_size); 759 + 760 + CP_TO_USR(args->args_proc_enumresources.resource_info, &resource_info, 761 + status, 1); 762 + 763 + return status; 764 + 765 + } 766 + 767 + /* 768 + * ======== procwrap_get_state ======== 769 + */ 770 + u32 procwrap_get_state(union Trapped_Args *args, void *pr_ctxt) 771 + { 772 + int status; 773 + struct dsp_processorstate proc_state; 774 + 775 + if (args->args_proc_getstate.state_info_size < 776 + sizeof(struct dsp_processorstate)) 777 + return -EINVAL; 778 + 779 + status = 780 + proc_get_state(args->args_proc_getstate.hprocessor, &proc_state, 781 + args->args_proc_getstate.state_info_size); 782 + CP_TO_USR(args->args_proc_getstate.proc_state_obj, &proc_state, status, 783 + 1); 784 + return status; 785 + 786 + } 787 + 788 + /* 789 + * ======== procwrap_get_trace ======== 790 + */ 791 + u32 procwrap_get_trace(union Trapped_Args *args, void *pr_ctxt) 792 + { 793 + int status; 794 + u8 *pbuf; 795 + 796 + if (args->args_proc_gettrace.max_size > MAX_TRACEBUFLEN) 797 + return -EINVAL; 798 + 799 + pbuf = kzalloc(args->args_proc_gettrace.max_size, GFP_KERNEL); 800 + if (pbuf != NULL) { 801 + status = proc_get_trace(args->args_proc_gettrace.hprocessor, 802 + pbuf, 803 + args->args_proc_gettrace.max_size); 804 + } else { 805 + status = -ENOMEM; 806 + } 807 + CP_TO_USR(args->args_proc_gettrace.pbuf, pbuf, status, 808 + args->args_proc_gettrace.max_size); 809 + kfree(pbuf); 810 + 811 + return status; 812 + } 813 + 814 + /* 815 + * ======== procwrap_load ======== 816 + */ 817 + u32 procwrap_load(union Trapped_Args *args, void *pr_ctxt) 818 + { 819 + s32 i, len; 820 + int status = 0; 821 + char *temp; 822 + s32 count = args->args_proc_load.argc_index; 823 + u8 **argv = NULL, **envp = NULL; 824 + 825 + if (count <= 0 || count > MAX_LOADARGS) { 826 + status = -EINVAL; 827 + goto func_cont; 828 + } 829 + 830 + argv = kmalloc(count * sizeof(u8 *), GFP_KERNEL); 831 + if (!argv) { 832 + status = -ENOMEM; 833 + goto func_cont; 834 + } 835 + 836 + CP_FM_USR(argv, args->args_proc_load.user_args, status, count); 837 + if (DSP_FAILED(status)) { 838 + kfree(argv); 839 + argv = NULL; 840 + goto func_cont; 841 + } 842 + 843 + for (i = 0; i < count; i++) { 844 + if (argv[i]) { 845 + /* User space pointer to argument */ 846 + temp = (char *)argv[i]; 847 + /* len is increased by 1 to accommodate NULL */ 848 + len = strlen_user((char *)temp) + 1; 849 + /* Kernel space pointer to argument */ 850 + argv[i] = kmalloc(len, GFP_KERNEL); 851 + if (argv[i]) { 852 + CP_FM_USR(argv[i], temp, status, len); 853 + if (DSP_FAILED(status)) { 854 + kfree(argv[i]); 855 + argv[i] = NULL; 856 + goto func_cont; 857 + } 858 + } else { 859 + status = -ENOMEM; 860 + goto func_cont; 861 + } 862 + } 863 + } 864 + /* TODO: validate this */ 865 + if (args->args_proc_load.user_envp) { 866 + /* number of elements in the envp array including NULL */ 867 + count = 0; 868 + do { 869 + get_user(temp, args->args_proc_load.user_envp + count); 870 + count++; 871 + } while (temp); 872 + envp = kmalloc(count * sizeof(u8 *), GFP_KERNEL); 873 + if (!envp) { 874 + status = -ENOMEM; 875 + goto func_cont; 876 + } 877 + 878 + CP_FM_USR(envp, args->args_proc_load.user_envp, status, count); 879 + if (DSP_FAILED(status)) { 880 + kfree(envp); 881 + envp = NULL; 882 + goto func_cont; 883 + } 884 + for (i = 0; envp[i]; i++) { 885 + /* User space pointer to argument */ 886 + temp = (char *)envp[i]; 887 + /* len is increased by 1 to accommodate NULL */ 888 + len = strlen_user((char *)temp) + 1; 889 + /* Kernel space pointer to argument */ 890 + envp[i] = kmalloc(len, GFP_KERNEL); 891 + if (envp[i]) { 892 + CP_FM_USR(envp[i], temp, status, len); 893 + if (DSP_FAILED(status)) { 894 + kfree(envp[i]); 895 + envp[i] = NULL; 896 + goto func_cont; 897 + } 898 + } else { 899 + status = -ENOMEM; 900 + goto func_cont; 901 + } 902 + } 903 + } 904 + 905 + if (DSP_SUCCEEDED(status)) { 906 + status = proc_load(args->args_proc_load.hprocessor, 907 + args->args_proc_load.argc_index, 908 + (CONST char **)argv, (CONST char **)envp); 909 + } 910 + func_cont: 911 + if (envp) { 912 + i = 0; 913 + while (envp[i]) 914 + kfree(envp[i++]); 915 + 916 + kfree(envp); 917 + } 918 + 919 + if (argv) { 920 + count = args->args_proc_load.argc_index; 921 + for (i = 0; (i < count) && argv[i]; i++) 922 + kfree(argv[i]); 923 + 924 + kfree(argv); 925 + } 926 + 927 + return status; 928 + } 929 + 930 + /* 931 + * ======== procwrap_map ======== 932 + */ 933 + u32 procwrap_map(union Trapped_Args *args, void *pr_ctxt) 934 + { 935 + int status; 936 + void *map_addr; 937 + 938 + if (!args->args_proc_mapmem.ul_size) 939 + return -EINVAL; 940 + 941 + status = proc_map(args->args_proc_mapmem.hprocessor, 942 + args->args_proc_mapmem.pmpu_addr, 943 + args->args_proc_mapmem.ul_size, 944 + args->args_proc_mapmem.req_addr, &map_addr, 945 + args->args_proc_mapmem.ul_map_attr, pr_ctxt); 946 + if (DSP_SUCCEEDED(status)) { 947 + if (put_user(map_addr, args->args_proc_mapmem.pp_map_addr)) { 948 + status = -EINVAL; 949 + proc_un_map(args->args_proc_mapmem.hprocessor, 950 + map_addr, pr_ctxt); 951 + } 952 + 953 + } 954 + return status; 955 + } 956 + 957 + /* 958 + * ======== procwrap_register_notify ======== 959 + */ 960 + u32 procwrap_register_notify(union Trapped_Args *args, void *pr_ctxt) 961 + { 962 + int status; 963 + struct dsp_notification notification; 964 + 965 + /* Initialize the notification data structure */ 966 + notification.ps_name = NULL; 967 + notification.handle = NULL; 968 + 969 + status = 970 + proc_register_notify(args->args_proc_register_notify.hprocessor, 971 + args->args_proc_register_notify.event_mask, 972 + args->args_proc_register_notify.notify_type, 973 + &notification); 974 + CP_TO_USR(args->args_proc_register_notify.hnotification, &notification, 975 + status, 1); 976 + return status; 977 + } 978 + 979 + /* 980 + * ======== procwrap_reserve_memory ======== 981 + */ 982 + u32 procwrap_reserve_memory(union Trapped_Args *args, void *pr_ctxt) 983 + { 984 + int status; 985 + void *prsv_addr; 986 + 987 + if ((args->args_proc_rsvmem.ul_size <= 0) || 988 + (args->args_proc_rsvmem.ul_size & (PG_SIZE4K - 1)) != 0) 989 + return -EINVAL; 990 + 991 + status = proc_reserve_memory(args->args_proc_rsvmem.hprocessor, 992 + args->args_proc_rsvmem.ul_size, &prsv_addr, 993 + pr_ctxt); 994 + if (DSP_SUCCEEDED(status)) { 995 + if (put_user(prsv_addr, args->args_proc_rsvmem.pp_rsv_addr)) { 996 + status = -EINVAL; 997 + proc_un_reserve_memory(args->args_proc_rsvmem. 998 + hprocessor, prsv_addr, pr_ctxt); 999 + } 1000 + } 1001 + return status; 1002 + } 1003 + 1004 + /* 1005 + * ======== procwrap_start ======== 1006 + */ 1007 + u32 procwrap_start(union Trapped_Args *args, void *pr_ctxt) 1008 + { 1009 + u32 ret; 1010 + 1011 + ret = proc_start(args->args_proc_start.hprocessor); 1012 + return ret; 1013 + } 1014 + 1015 + /* 1016 + * ======== procwrap_un_map ======== 1017 + */ 1018 + u32 procwrap_un_map(union Trapped_Args *args, void *pr_ctxt) 1019 + { 1020 + int status; 1021 + 1022 + status = proc_un_map(args->args_proc_unmapmem.hprocessor, 1023 + args->args_proc_unmapmem.map_addr, pr_ctxt); 1024 + return status; 1025 + } 1026 + 1027 + /* 1028 + * ======== procwrap_un_reserve_memory ======== 1029 + */ 1030 + u32 procwrap_un_reserve_memory(union Trapped_Args *args, void *pr_ctxt) 1031 + { 1032 + int status; 1033 + 1034 + status = proc_un_reserve_memory(args->args_proc_unrsvmem.hprocessor, 1035 + args->args_proc_unrsvmem.prsv_addr, 1036 + pr_ctxt); 1037 + return status; 1038 + } 1039 + 1040 + /* 1041 + * ======== procwrap_stop ======== 1042 + */ 1043 + u32 procwrap_stop(union Trapped_Args *args, void *pr_ctxt) 1044 + { 1045 + u32 ret; 1046 + 1047 + ret = proc_stop(args->args_proc_stop.hprocessor); 1048 + 1049 + return ret; 1050 + } 1051 + 1052 + /* 1053 + * ======== nodewrap_allocate ======== 1054 + */ 1055 + u32 nodewrap_allocate(union Trapped_Args *args, void *pr_ctxt) 1056 + { 1057 + int status = 0; 1058 + struct dsp_uuid node_uuid; 1059 + u32 cb_data_size = 0; 1060 + u32 __user *psize = (u32 __user *) args->args_node_allocate.pargs; 1061 + u8 *pargs = NULL; 1062 + struct dsp_nodeattrin proc_attr_in, *attr_in = NULL; 1063 + struct node_object *hnode; 1064 + 1065 + /* Optional argument */ 1066 + if (psize) { 1067 + if (get_user(cb_data_size, psize)) 1068 + status = -EPERM; 1069 + 1070 + cb_data_size += sizeof(u32); 1071 + if (DSP_SUCCEEDED(status)) { 1072 + pargs = kmalloc(cb_data_size, GFP_KERNEL); 1073 + if (pargs == NULL) 1074 + status = -ENOMEM; 1075 + 1076 + } 1077 + CP_FM_USR(pargs, args->args_node_allocate.pargs, status, 1078 + cb_data_size); 1079 + } 1080 + CP_FM_USR(&node_uuid, args->args_node_allocate.node_id_ptr, status, 1); 1081 + if (DSP_FAILED(status)) 1082 + goto func_cont; 1083 + /* Optional argument */ 1084 + if (args->args_node_allocate.attr_in) { 1085 + CP_FM_USR(&proc_attr_in, args->args_node_allocate.attr_in, 1086 + status, 1); 1087 + if (DSP_SUCCEEDED(status)) 1088 + attr_in = &proc_attr_in; 1089 + else 1090 + status = -ENOMEM; 1091 + 1092 + } 1093 + if (DSP_SUCCEEDED(status)) { 1094 + status = node_allocate(args->args_node_allocate.hprocessor, 1095 + &node_uuid, (struct dsp_cbdata *)pargs, 1096 + attr_in, &hnode, pr_ctxt); 1097 + } 1098 + if (DSP_SUCCEEDED(status)) { 1099 + CP_TO_USR(args->args_node_allocate.ph_node, &hnode, status, 1); 1100 + if (DSP_FAILED(status)) { 1101 + status = -EFAULT; 1102 + node_delete(hnode, pr_ctxt); 1103 + } 1104 + } 1105 + func_cont: 1106 + kfree(pargs); 1107 + 1108 + return status; 1109 + } 1110 + 1111 + /* 1112 + * ======== nodewrap_alloc_msg_buf ======== 1113 + */ 1114 + u32 nodewrap_alloc_msg_buf(union Trapped_Args *args, void *pr_ctxt) 1115 + { 1116 + int status = 0; 1117 + struct dsp_bufferattr *pattr = NULL; 1118 + struct dsp_bufferattr attr; 1119 + u8 *pbuffer = NULL; 1120 + 1121 + if (!args->args_node_allocmsgbuf.usize) 1122 + return -EINVAL; 1123 + 1124 + if (args->args_node_allocmsgbuf.pattr) { /* Optional argument */ 1125 + CP_FM_USR(&attr, args->args_node_allocmsgbuf.pattr, status, 1); 1126 + if (DSP_SUCCEEDED(status)) 1127 + pattr = &attr; 1128 + 1129 + } 1130 + /* IN OUT argument */ 1131 + CP_FM_USR(&pbuffer, args->args_node_allocmsgbuf.pbuffer, status, 1); 1132 + if (DSP_SUCCEEDED(status)) { 1133 + status = node_alloc_msg_buf(args->args_node_allocmsgbuf.hnode, 1134 + args->args_node_allocmsgbuf.usize, 1135 + pattr, &pbuffer); 1136 + } 1137 + CP_TO_USR(args->args_node_allocmsgbuf.pbuffer, &pbuffer, status, 1); 1138 + return status; 1139 + } 1140 + 1141 + /* 1142 + * ======== nodewrap_change_priority ======== 1143 + */ 1144 + u32 nodewrap_change_priority(union Trapped_Args *args, void *pr_ctxt) 1145 + { 1146 + u32 ret; 1147 + 1148 + ret = node_change_priority(args->args_node_changepriority.hnode, 1149 + args->args_node_changepriority.prio); 1150 + 1151 + return ret; 1152 + } 1153 + 1154 + /* 1155 + * ======== nodewrap_connect ======== 1156 + */ 1157 + u32 nodewrap_connect(union Trapped_Args *args, void *pr_ctxt) 1158 + { 1159 + int status = 0; 1160 + struct dsp_strmattr attrs; 1161 + struct dsp_strmattr *pattrs = NULL; 1162 + u32 cb_data_size; 1163 + u32 __user *psize = (u32 __user *) args->args_node_connect.conn_param; 1164 + u8 *pargs = NULL; 1165 + 1166 + /* Optional argument */ 1167 + if (psize) { 1168 + if (get_user(cb_data_size, psize)) 1169 + status = -EPERM; 1170 + 1171 + cb_data_size += sizeof(u32); 1172 + if (DSP_SUCCEEDED(status)) { 1173 + pargs = kmalloc(cb_data_size, GFP_KERNEL); 1174 + if (pargs == NULL) { 1175 + status = -ENOMEM; 1176 + goto func_cont; 1177 + } 1178 + 1179 + } 1180 + CP_FM_USR(pargs, args->args_node_connect.conn_param, status, 1181 + cb_data_size); 1182 + if (DSP_FAILED(status)) 1183 + goto func_cont; 1184 + } 1185 + if (args->args_node_connect.pattrs) { /* Optional argument */ 1186 + CP_FM_USR(&attrs, args->args_node_connect.pattrs, status, 1); 1187 + if (DSP_SUCCEEDED(status)) 1188 + pattrs = &attrs; 1189 + 1190 + } 1191 + if (DSP_SUCCEEDED(status)) { 1192 + status = node_connect(args->args_node_connect.hnode, 1193 + args->args_node_connect.stream_id, 1194 + args->args_node_connect.other_node, 1195 + args->args_node_connect.other_stream, 1196 + pattrs, (struct dsp_cbdata *)pargs); 1197 + } 1198 + func_cont: 1199 + kfree(pargs); 1200 + 1201 + return status; 1202 + } 1203 + 1204 + /* 1205 + * ======== nodewrap_create ======== 1206 + */ 1207 + u32 nodewrap_create(union Trapped_Args *args, void *pr_ctxt) 1208 + { 1209 + u32 ret; 1210 + 1211 + ret = node_create(args->args_node_create.hnode); 1212 + 1213 + return ret; 1214 + } 1215 + 1216 + /* 1217 + * ======== nodewrap_delete ======== 1218 + */ 1219 + u32 nodewrap_delete(union Trapped_Args *args, void *pr_ctxt) 1220 + { 1221 + u32 ret; 1222 + 1223 + ret = node_delete(args->args_node_delete.hnode, pr_ctxt); 1224 + 1225 + return ret; 1226 + } 1227 + 1228 + /* 1229 + * ======== nodewrap_free_msg_buf ======== 1230 + */ 1231 + u32 nodewrap_free_msg_buf(union Trapped_Args *args, void *pr_ctxt) 1232 + { 1233 + int status = 0; 1234 + struct dsp_bufferattr *pattr = NULL; 1235 + struct dsp_bufferattr attr; 1236 + if (args->args_node_freemsgbuf.pattr) { /* Optional argument */ 1237 + CP_FM_USR(&attr, args->args_node_freemsgbuf.pattr, status, 1); 1238 + if (DSP_SUCCEEDED(status)) 1239 + pattr = &attr; 1240 + 1241 + } 1242 + 1243 + if (!args->args_node_freemsgbuf.pbuffer) 1244 + return -EFAULT; 1245 + 1246 + if (DSP_SUCCEEDED(status)) { 1247 + status = node_free_msg_buf(args->args_node_freemsgbuf.hnode, 1248 + args->args_node_freemsgbuf.pbuffer, 1249 + pattr); 1250 + } 1251 + 1252 + return status; 1253 + } 1254 + 1255 + /* 1256 + * ======== nodewrap_get_attr ======== 1257 + */ 1258 + u32 nodewrap_get_attr(union Trapped_Args *args, void *pr_ctxt) 1259 + { 1260 + int status = 0; 1261 + struct dsp_nodeattr attr; 1262 + 1263 + status = node_get_attr(args->args_node_getattr.hnode, &attr, 1264 + args->args_node_getattr.attr_size); 1265 + CP_TO_USR(args->args_node_getattr.pattr, &attr, status, 1); 1266 + 1267 + return status; 1268 + } 1269 + 1270 + /* 1271 + * ======== nodewrap_get_message ======== 1272 + */ 1273 + u32 nodewrap_get_message(union Trapped_Args *args, void *pr_ctxt) 1274 + { 1275 + int status; 1276 + struct dsp_msg msg; 1277 + 1278 + status = node_get_message(args->args_node_getmessage.hnode, &msg, 1279 + args->args_node_getmessage.utimeout); 1280 + 1281 + CP_TO_USR(args->args_node_getmessage.message, &msg, status, 1); 1282 + 1283 + return status; 1284 + } 1285 + 1286 + /* 1287 + * ======== nodewrap_pause ======== 1288 + */ 1289 + u32 nodewrap_pause(union Trapped_Args *args, void *pr_ctxt) 1290 + { 1291 + u32 ret; 1292 + 1293 + ret = node_pause(args->args_node_pause.hnode); 1294 + 1295 + return ret; 1296 + } 1297 + 1298 + /* 1299 + * ======== nodewrap_put_message ======== 1300 + */ 1301 + u32 nodewrap_put_message(union Trapped_Args *args, void *pr_ctxt) 1302 + { 1303 + int status = 0; 1304 + struct dsp_msg msg; 1305 + 1306 + CP_FM_USR(&msg, args->args_node_putmessage.message, status, 1); 1307 + 1308 + if (DSP_SUCCEEDED(status)) { 1309 + status = 1310 + node_put_message(args->args_node_putmessage.hnode, &msg, 1311 + args->args_node_putmessage.utimeout); 1312 + } 1313 + 1314 + return status; 1315 + } 1316 + 1317 + /* 1318 + * ======== nodewrap_register_notify ======== 1319 + */ 1320 + u32 nodewrap_register_notify(union Trapped_Args *args, void *pr_ctxt) 1321 + { 1322 + int status = 0; 1323 + struct dsp_notification notification; 1324 + 1325 + /* Initialize the notification data structure */ 1326 + notification.ps_name = NULL; 1327 + notification.handle = NULL; 1328 + 1329 + if (!args->args_proc_register_notify.event_mask) 1330 + CP_FM_USR(&notification, 1331 + args->args_proc_register_notify.hnotification, 1332 + status, 1); 1333 + 1334 + status = node_register_notify(args->args_node_registernotify.hnode, 1335 + args->args_node_registernotify.event_mask, 1336 + args->args_node_registernotify. 1337 + notify_type, &notification); 1338 + CP_TO_USR(args->args_node_registernotify.hnotification, &notification, 1339 + status, 1); 1340 + return status; 1341 + } 1342 + 1343 + /* 1344 + * ======== nodewrap_run ======== 1345 + */ 1346 + u32 nodewrap_run(union Trapped_Args *args, void *pr_ctxt) 1347 + { 1348 + u32 ret; 1349 + 1350 + ret = node_run(args->args_node_run.hnode); 1351 + 1352 + return ret; 1353 + } 1354 + 1355 + /* 1356 + * ======== nodewrap_terminate ======== 1357 + */ 1358 + u32 nodewrap_terminate(union Trapped_Args *args, void *pr_ctxt) 1359 + { 1360 + int status; 1361 + int tempstatus; 1362 + 1363 + status = node_terminate(args->args_node_terminate.hnode, &tempstatus); 1364 + 1365 + CP_TO_USR(args->args_node_terminate.pstatus, &tempstatus, status, 1); 1366 + 1367 + return status; 1368 + } 1369 + 1370 + /* 1371 + * ======== nodewrap_get_uuid_props ======== 1372 + */ 1373 + u32 nodewrap_get_uuid_props(union Trapped_Args *args, void *pr_ctxt) 1374 + { 1375 + int status = 0; 1376 + struct dsp_uuid node_uuid; 1377 + struct dsp_ndbprops *pnode_props = NULL; 1378 + 1379 + CP_FM_USR(&node_uuid, args->args_node_getuuidprops.node_id_ptr, status, 1380 + 1); 1381 + if (DSP_FAILED(status)) 1382 + goto func_cont; 1383 + pnode_props = kmalloc(sizeof(struct dsp_ndbprops), GFP_KERNEL); 1384 + if (pnode_props != NULL) { 1385 + status = 1386 + node_get_uuid_props(args->args_node_getuuidprops.hprocessor, 1387 + &node_uuid, pnode_props); 1388 + CP_TO_USR(args->args_node_getuuidprops.node_props, pnode_props, 1389 + status, 1); 1390 + } else 1391 + status = -ENOMEM; 1392 + func_cont: 1393 + kfree(pnode_props); 1394 + return status; 1395 + } 1396 + 1397 + /* 1398 + * ======== strmwrap_allocate_buffer ======== 1399 + */ 1400 + u32 strmwrap_allocate_buffer(union Trapped_Args *args, void *pr_ctxt) 1401 + { 1402 + int status; 1403 + u8 **ap_buffer = NULL; 1404 + u32 num_bufs = args->args_strm_allocatebuffer.num_bufs; 1405 + 1406 + if (num_bufs > MAX_BUFS) 1407 + return -EINVAL; 1408 + 1409 + ap_buffer = kmalloc((num_bufs * sizeof(u8 *)), GFP_KERNEL); 1410 + 1411 + status = strm_allocate_buffer(args->args_strm_allocatebuffer.hstream, 1412 + args->args_strm_allocatebuffer.usize, 1413 + ap_buffer, num_bufs, pr_ctxt); 1414 + if (DSP_SUCCEEDED(status)) { 1415 + CP_TO_USR(args->args_strm_allocatebuffer.ap_buffer, ap_buffer, 1416 + status, num_bufs); 1417 + if (DSP_FAILED(status)) { 1418 + status = -EFAULT; 1419 + strm_free_buffer(args->args_strm_allocatebuffer.hstream, 1420 + ap_buffer, num_bufs, pr_ctxt); 1421 + } 1422 + } 1423 + kfree(ap_buffer); 1424 + 1425 + return status; 1426 + } 1427 + 1428 + /* 1429 + * ======== strmwrap_close ======== 1430 + */ 1431 + u32 strmwrap_close(union Trapped_Args *args, void *pr_ctxt) 1432 + { 1433 + return strm_close(args->args_strm_close.hstream, pr_ctxt); 1434 + } 1435 + 1436 + /* 1437 + * ======== strmwrap_free_buffer ======== 1438 + */ 1439 + u32 strmwrap_free_buffer(union Trapped_Args *args, void *pr_ctxt) 1440 + { 1441 + int status = 0; 1442 + u8 **ap_buffer = NULL; 1443 + u32 num_bufs = args->args_strm_freebuffer.num_bufs; 1444 + 1445 + if (num_bufs > MAX_BUFS) 1446 + return -EINVAL; 1447 + 1448 + ap_buffer = kmalloc((num_bufs * sizeof(u8 *)), GFP_KERNEL); 1449 + 1450 + CP_FM_USR(ap_buffer, args->args_strm_freebuffer.ap_buffer, status, 1451 + num_bufs); 1452 + 1453 + if (DSP_SUCCEEDED(status)) { 1454 + status = strm_free_buffer(args->args_strm_freebuffer.hstream, 1455 + ap_buffer, num_bufs, pr_ctxt); 1456 + } 1457 + CP_TO_USR(args->args_strm_freebuffer.ap_buffer, ap_buffer, status, 1458 + num_bufs); 1459 + kfree(ap_buffer); 1460 + 1461 + return status; 1462 + } 1463 + 1464 + /* 1465 + * ======== strmwrap_get_event_handle ======== 1466 + */ 1467 + u32 __deprecated strmwrap_get_event_handle(union Trapped_Args * args, 1468 + void *pr_ctxt) 1469 + { 1470 + pr_err("%s: deprecated dspbridge ioctl\n", __func__); 1471 + return -ENOSYS; 1472 + } 1473 + 1474 + /* 1475 + * ======== strmwrap_get_info ======== 1476 + */ 1477 + u32 strmwrap_get_info(union Trapped_Args *args, void *pr_ctxt) 1478 + { 1479 + int status = 0; 1480 + struct stream_info strm_info; 1481 + struct dsp_streaminfo user; 1482 + struct dsp_streaminfo *temp; 1483 + 1484 + CP_FM_USR(&strm_info, args->args_strm_getinfo.stream_info, status, 1); 1485 + temp = strm_info.user_strm; 1486 + 1487 + strm_info.user_strm = &user; 1488 + 1489 + if (DSP_SUCCEEDED(status)) { 1490 + status = strm_get_info(args->args_strm_getinfo.hstream, 1491 + &strm_info, 1492 + args->args_strm_getinfo. 1493 + stream_info_size); 1494 + } 1495 + CP_TO_USR(temp, strm_info.user_strm, status, 1); 1496 + strm_info.user_strm = temp; 1497 + CP_TO_USR(args->args_strm_getinfo.stream_info, &strm_info, status, 1); 1498 + return status; 1499 + } 1500 + 1501 + /* 1502 + * ======== strmwrap_idle ======== 1503 + */ 1504 + u32 strmwrap_idle(union Trapped_Args *args, void *pr_ctxt) 1505 + { 1506 + u32 ret; 1507 + 1508 + ret = strm_idle(args->args_strm_idle.hstream, 1509 + args->args_strm_idle.flush_flag); 1510 + 1511 + return ret; 1512 + } 1513 + 1514 + /* 1515 + * ======== strmwrap_issue ======== 1516 + */ 1517 + u32 strmwrap_issue(union Trapped_Args *args, void *pr_ctxt) 1518 + { 1519 + int status = 0; 1520 + 1521 + if (!args->args_strm_issue.pbuffer) 1522 + return -EFAULT; 1523 + 1524 + /* No need of doing CP_FM_USR for the user buffer (pbuffer) 1525 + as this is done in Bridge internal function bridge_chnl_add_io_req 1526 + in chnl_sm.c */ 1527 + status = strm_issue(args->args_strm_issue.hstream, 1528 + args->args_strm_issue.pbuffer, 1529 + args->args_strm_issue.dw_bytes, 1530 + args->args_strm_issue.dw_buf_size, 1531 + args->args_strm_issue.dw_arg); 1532 + 1533 + return status; 1534 + } 1535 + 1536 + /* 1537 + * ======== strmwrap_open ======== 1538 + */ 1539 + u32 strmwrap_open(union Trapped_Args *args, void *pr_ctxt) 1540 + { 1541 + int status = 0; 1542 + struct strm_attr attr; 1543 + struct strm_object *strm_obj; 1544 + struct dsp_streamattrin strm_attr_in; 1545 + 1546 + CP_FM_USR(&attr, args->args_strm_open.attr_in, status, 1); 1547 + 1548 + if (attr.stream_attr_in != NULL) { /* Optional argument */ 1549 + CP_FM_USR(&strm_attr_in, attr.stream_attr_in, status, 1); 1550 + if (DSP_SUCCEEDED(status)) { 1551 + attr.stream_attr_in = &strm_attr_in; 1552 + if (attr.stream_attr_in->strm_mode == STRMMODE_LDMA) 1553 + return -ENOSYS; 1554 + } 1555 + 1556 + } 1557 + status = strm_open(args->args_strm_open.hnode, 1558 + args->args_strm_open.direction, 1559 + args->args_strm_open.index, &attr, &strm_obj, 1560 + pr_ctxt); 1561 + CP_TO_USR(args->args_strm_open.ph_stream, &strm_obj, status, 1); 1562 + return status; 1563 + } 1564 + 1565 + /* 1566 + * ======== strmwrap_reclaim ======== 1567 + */ 1568 + u32 strmwrap_reclaim(union Trapped_Args *args, void *pr_ctxt) 1569 + { 1570 + int status = 0; 1571 + u8 *buf_ptr; 1572 + u32 ul_bytes; 1573 + u32 dw_arg; 1574 + u32 ul_buf_size; 1575 + 1576 + status = strm_reclaim(args->args_strm_reclaim.hstream, &buf_ptr, 1577 + &ul_bytes, &ul_buf_size, &dw_arg); 1578 + CP_TO_USR(args->args_strm_reclaim.buf_ptr, &buf_ptr, status, 1); 1579 + CP_TO_USR(args->args_strm_reclaim.bytes, &ul_bytes, status, 1); 1580 + CP_TO_USR(args->args_strm_reclaim.pdw_arg, &dw_arg, status, 1); 1581 + 1582 + if (args->args_strm_reclaim.buf_size_ptr != NULL) { 1583 + CP_TO_USR(args->args_strm_reclaim.buf_size_ptr, &ul_buf_size, 1584 + status, 1); 1585 + } 1586 + 1587 + return status; 1588 + } 1589 + 1590 + /* 1591 + * ======== strmwrap_register_notify ======== 1592 + */ 1593 + u32 strmwrap_register_notify(union Trapped_Args *args, void *pr_ctxt) 1594 + { 1595 + int status = 0; 1596 + struct dsp_notification notification; 1597 + 1598 + /* Initialize the notification data structure */ 1599 + notification.ps_name = NULL; 1600 + notification.handle = NULL; 1601 + 1602 + status = strm_register_notify(args->args_strm_registernotify.hstream, 1603 + args->args_strm_registernotify.event_mask, 1604 + args->args_strm_registernotify. 1605 + notify_type, &notification); 1606 + CP_TO_USR(args->args_strm_registernotify.hnotification, &notification, 1607 + status, 1); 1608 + 1609 + return status; 1610 + } 1611 + 1612 + /* 1613 + * ======== strmwrap_select ======== 1614 + */ 1615 + u32 strmwrap_select(union Trapped_Args *args, void *pr_ctxt) 1616 + { 1617 + u32 mask; 1618 + struct strm_object *strm_tab[MAX_STREAMS]; 1619 + int status = 0; 1620 + 1621 + if (args->args_strm_select.strm_num > MAX_STREAMS) 1622 + return -EINVAL; 1623 + 1624 + CP_FM_USR(strm_tab, args->args_strm_select.stream_tab, status, 1625 + args->args_strm_select.strm_num); 1626 + if (DSP_SUCCEEDED(status)) { 1627 + status = strm_select(strm_tab, args->args_strm_select.strm_num, 1628 + &mask, args->args_strm_select.utimeout); 1629 + } 1630 + CP_TO_USR(args->args_strm_select.pmask, &mask, status, 1); 1631 + return status; 1632 + } 1633 + 1634 + /* CMM */ 1635 + 1636 + /* 1637 + * ======== cmmwrap_calloc_buf ======== 1638 + */ 1639 + u32 __deprecated cmmwrap_calloc_buf(union Trapped_Args * args, void *pr_ctxt) 1640 + { 1641 + /* This operation is done in kernel */ 1642 + pr_err("%s: deprecated dspbridge ioctl\n", __func__); 1643 + return -ENOSYS; 1644 + } 1645 + 1646 + /* 1647 + * ======== cmmwrap_free_buf ======== 1648 + */ 1649 + u32 __deprecated cmmwrap_free_buf(union Trapped_Args * args, void *pr_ctxt) 1650 + { 1651 + /* This operation is done in kernel */ 1652 + pr_err("%s: deprecated dspbridge ioctl\n", __func__); 1653 + return -ENOSYS; 1654 + } 1655 + 1656 + /* 1657 + * ======== cmmwrap_get_handle ======== 1658 + */ 1659 + u32 cmmwrap_get_handle(union Trapped_Args *args, void *pr_ctxt) 1660 + { 1661 + int status = 0; 1662 + struct cmm_object *hcmm_mgr; 1663 + 1664 + status = cmm_get_handle(args->args_cmm_gethandle.hprocessor, &hcmm_mgr); 1665 + 1666 + CP_TO_USR(args->args_cmm_gethandle.ph_cmm_mgr, &hcmm_mgr, status, 1); 1667 + 1668 + return status; 1669 + } 1670 + 1671 + /* 1672 + * ======== cmmwrap_get_info ======== 1673 + */ 1674 + u32 cmmwrap_get_info(union Trapped_Args *args, void *pr_ctxt) 1675 + { 1676 + int status = 0; 1677 + struct cmm_info cmm_info_obj; 1678 + 1679 + status = cmm_get_info(args->args_cmm_getinfo.hcmm_mgr, &cmm_info_obj); 1680 + 1681 + CP_TO_USR(args->args_cmm_getinfo.cmm_info_obj, &cmm_info_obj, status, 1682 + 1); 1683 + 1684 + return status; 1685 + }
+142
drivers/staging/tidspbridge/pmgr/io.c
··· 1 + /* 2 + * io.c 3 + * 4 + * DSP-BIOS Bridge driver support functions for TI OMAP processors. 5 + * 6 + * IO manager interface: Manages IO between CHNL and msg_ctrl. 7 + * 8 + * Copyright (C) 2005-2006 Texas Instruments, Inc. 9 + * 10 + * This package is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License version 2 as 12 + * published by the Free Software Foundation. 13 + * 14 + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 15 + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 16 + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 17 + */ 18 + 19 + /* ----------------------------------- Host OS */ 20 + #include <dspbridge/host_os.h> 21 + 22 + /* ----------------------------------- DSP/BIOS Bridge */ 23 + #include <dspbridge/std.h> 24 + #include <dspbridge/dbdefs.h> 25 + 26 + /* ----------------------------------- Trace & Debug */ 27 + #include <dspbridge/dbc.h> 28 + 29 + /* ----------------------------------- OS Adaptation Layer */ 30 + #include <dspbridge/cfg.h> 31 + 32 + /* ----------------------------------- Platform Manager */ 33 + #include <dspbridge/dev.h> 34 + 35 + /* ----------------------------------- This */ 36 + #include <ioobj.h> 37 + #include <dspbridge/iodefs.h> 38 + #include <dspbridge/io.h> 39 + 40 + /* ----------------------------------- Globals */ 41 + static u32 refs; 42 + 43 + /* 44 + * ======== io_create ======== 45 + * Purpose: 46 + * Create an IO manager object, responsible for managing IO between 47 + * CHNL and msg_ctrl 48 + */ 49 + int io_create(OUT struct io_mgr **phIOMgr, struct dev_object *hdev_obj, 50 + IN CONST struct io_attrs *pMgrAttrs) 51 + { 52 + struct bridge_drv_interface *intf_fxns; 53 + struct io_mgr *hio_mgr = NULL; 54 + struct io_mgr_ *pio_mgr = NULL; 55 + int status = 0; 56 + 57 + DBC_REQUIRE(refs > 0); 58 + DBC_REQUIRE(phIOMgr != NULL); 59 + DBC_REQUIRE(pMgrAttrs != NULL); 60 + 61 + *phIOMgr = NULL; 62 + 63 + /* A memory base of 0 implies no memory base: */ 64 + if ((pMgrAttrs->shm_base != 0) && (pMgrAttrs->usm_length == 0)) 65 + status = -EINVAL; 66 + 67 + if (pMgrAttrs->word_size == 0) 68 + status = -EINVAL; 69 + 70 + if (DSP_SUCCEEDED(status)) { 71 + dev_get_intf_fxns(hdev_obj, &intf_fxns); 72 + 73 + /* Let Bridge channel module finish the create: */ 74 + status = (*intf_fxns->pfn_io_create) (&hio_mgr, hdev_obj, 75 + pMgrAttrs); 76 + 77 + if (DSP_SUCCEEDED(status)) { 78 + pio_mgr = (struct io_mgr_ *)hio_mgr; 79 + pio_mgr->intf_fxns = intf_fxns; 80 + pio_mgr->hdev_obj = hdev_obj; 81 + 82 + /* Return the new channel manager handle: */ 83 + *phIOMgr = hio_mgr; 84 + } 85 + } 86 + 87 + return status; 88 + } 89 + 90 + /* 91 + * ======== io_destroy ======== 92 + * Purpose: 93 + * Delete IO manager. 94 + */ 95 + int io_destroy(struct io_mgr *hio_mgr) 96 + { 97 + struct bridge_drv_interface *intf_fxns; 98 + struct io_mgr_ *pio_mgr = (struct io_mgr_ *)hio_mgr; 99 + int status; 100 + 101 + DBC_REQUIRE(refs > 0); 102 + 103 + intf_fxns = pio_mgr->intf_fxns; 104 + 105 + /* Let Bridge channel module destroy the io_mgr: */ 106 + status = (*intf_fxns->pfn_io_destroy) (hio_mgr); 107 + 108 + return status; 109 + } 110 + 111 + /* 112 + * ======== io_exit ======== 113 + * Purpose: 114 + * Discontinue usage of the IO module. 115 + */ 116 + void io_exit(void) 117 + { 118 + DBC_REQUIRE(refs > 0); 119 + 120 + refs--; 121 + 122 + DBC_ENSURE(refs >= 0); 123 + } 124 + 125 + /* 126 + * ======== io_init ======== 127 + * Purpose: 128 + * Initialize the IO module's private state. 129 + */ 130 + bool io_init(void) 131 + { 132 + bool ret = true; 133 + 134 + DBC_REQUIRE(refs >= 0); 135 + 136 + if (ret) 137 + refs++; 138 + 139 + DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0))); 140 + 141 + return ret; 142 + }
+38
drivers/staging/tidspbridge/pmgr/ioobj.h
··· 1 + /* 2 + * ioobj.h 3 + * 4 + * DSP-BIOS Bridge driver support functions for TI OMAP processors. 5 + * 6 + * Structure subcomponents of channel class library IO objects which 7 + * are exposed to DSP API from Bridge driver. 8 + * 9 + * Copyright (C) 2005-2006 Texas Instruments, Inc. 10 + * 11 + * This package is free software; you can redistribute it and/or modify 12 + * it under the terms of the GNU General Public License version 2 as 13 + * published by the Free Software Foundation. 14 + * 15 + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 16 + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 17 + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 18 + */ 19 + 20 + #ifndef IOOBJ_ 21 + #define IOOBJ_ 22 + 23 + #include <dspbridge/devdefs.h> 24 + #include <dspbridge/dspdefs.h> 25 + 26 + /* 27 + * This struct is the first field in a io_mgr struct. Other, implementation 28 + * specific fields follow this structure in memory. 29 + */ 30 + struct io_mgr_ { 31 + /* These must be the first fields in a io_mgr struct: */ 32 + struct bridge_dev_context *hbridge_context; /* Bridge context. */ 33 + /* Function interface to Bridge driver. */ 34 + struct bridge_drv_interface *intf_fxns; 35 + struct dev_object *hdev_obj; /* Device this board represents. */ 36 + }; 37 + 38 + #endif /* IOOBJ_ */
+129
drivers/staging/tidspbridge/pmgr/msg.c
··· 1 + /* 2 + * msg.c 3 + * 4 + * DSP-BIOS Bridge driver support functions for TI OMAP processors. 5 + * 6 + * DSP/BIOS Bridge msg_ctrl Module. 7 + * 8 + * Copyright (C) 2005-2006 Texas Instruments, Inc. 9 + * 10 + * This package is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License version 2 as 12 + * published by the Free Software Foundation. 13 + * 14 + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 15 + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 16 + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 17 + */ 18 + 19 + /* ----------------------------------- Host OS */ 20 + #include <dspbridge/host_os.h> 21 + 22 + /* ----------------------------------- DSP/BIOS Bridge */ 23 + #include <dspbridge/std.h> 24 + #include <dspbridge/dbdefs.h> 25 + 26 + /* ----------------------------------- Trace & Debug */ 27 + #include <dspbridge/dbc.h> 28 + 29 + /* ----------------------------------- Bridge Driver */ 30 + #include <dspbridge/dspdefs.h> 31 + 32 + /* ----------------------------------- Platform Manager */ 33 + #include <dspbridge/dev.h> 34 + 35 + /* ----------------------------------- This */ 36 + #include <msgobj.h> 37 + #include <dspbridge/msg.h> 38 + 39 + /* ----------------------------------- Globals */ 40 + static u32 refs; /* module reference count */ 41 + 42 + /* 43 + * ======== msg_create ======== 44 + * Purpose: 45 + * Create an object to manage message queues. Only one of these objects 46 + * can exist per device object. 47 + */ 48 + int msg_create(OUT struct msg_mgr **phMsgMgr, 49 + struct dev_object *hdev_obj, msg_onexit msgCallback) 50 + { 51 + struct bridge_drv_interface *intf_fxns; 52 + struct msg_mgr_ *msg_mgr_obj; 53 + struct msg_mgr *hmsg_mgr; 54 + int status = 0; 55 + 56 + DBC_REQUIRE(refs > 0); 57 + DBC_REQUIRE(phMsgMgr != NULL); 58 + DBC_REQUIRE(msgCallback != NULL); 59 + DBC_REQUIRE(hdev_obj != NULL); 60 + 61 + *phMsgMgr = NULL; 62 + 63 + dev_get_intf_fxns(hdev_obj, &intf_fxns); 64 + 65 + /* Let Bridge message module finish the create: */ 66 + status = 67 + (*intf_fxns->pfn_msg_create) (&hmsg_mgr, hdev_obj, msgCallback); 68 + 69 + if (DSP_SUCCEEDED(status)) { 70 + /* Fill in DSP API message module's fields of the msg_mgr 71 + * structure */ 72 + msg_mgr_obj = (struct msg_mgr_ *)hmsg_mgr; 73 + msg_mgr_obj->intf_fxns = intf_fxns; 74 + 75 + /* Finally, return the new message manager handle: */ 76 + *phMsgMgr = hmsg_mgr; 77 + } else { 78 + status = -EPERM; 79 + } 80 + return status; 81 + } 82 + 83 + /* 84 + * ======== msg_delete ======== 85 + * Purpose: 86 + * Delete a msg_ctrl manager allocated in msg_create(). 87 + */ 88 + void msg_delete(struct msg_mgr *hmsg_mgr) 89 + { 90 + struct msg_mgr_ *msg_mgr_obj = (struct msg_mgr_ *)hmsg_mgr; 91 + struct bridge_drv_interface *intf_fxns; 92 + 93 + DBC_REQUIRE(refs > 0); 94 + 95 + if (msg_mgr_obj) { 96 + intf_fxns = msg_mgr_obj->intf_fxns; 97 + 98 + /* Let Bridge message module destroy the msg_mgr: */ 99 + (*intf_fxns->pfn_msg_delete) (hmsg_mgr); 100 + } else { 101 + dev_dbg(bridge, "%s: Error hmsg_mgr handle: %p\n", 102 + __func__, hmsg_mgr); 103 + } 104 + } 105 + 106 + /* 107 + * ======== msg_exit ======== 108 + */ 109 + void msg_exit(void) 110 + { 111 + DBC_REQUIRE(refs > 0); 112 + refs--; 113 + 114 + DBC_ENSURE(refs >= 0); 115 + } 116 + 117 + /* 118 + * ======== msg_mod_init ======== 119 + */ 120 + bool msg_mod_init(void) 121 + { 122 + DBC_REQUIRE(refs >= 0); 123 + 124 + refs++; 125 + 126 + DBC_ENSURE(refs >= 0); 127 + 128 + return true; 129 + }
+38
drivers/staging/tidspbridge/pmgr/msgobj.h
··· 1 + /* 2 + * msgobj.h 3 + * 4 + * DSP-BIOS Bridge driver support functions for TI OMAP processors. 5 + * 6 + * Structure subcomponents of channel class library msg_ctrl objects which 7 + * are exposed to DSP API from Bridge driver. 8 + * 9 + * Copyright (C) 2005-2006 Texas Instruments, Inc. 10 + * 11 + * This package is free software; you can redistribute it and/or modify 12 + * it under the terms of the GNU General Public License version 2 as 13 + * published by the Free Software Foundation. 14 + * 15 + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 16 + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 17 + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 18 + */ 19 + 20 + #ifndef MSGOBJ_ 21 + #define MSGOBJ_ 22 + 23 + #include <dspbridge/dspdefs.h> 24 + 25 + #include <dspbridge/msgdefs.h> 26 + 27 + /* 28 + * This struct is the first field in a msg_mgr struct. Other, implementation 29 + * specific fields follow this structure in memory. 30 + */ 31 + struct msg_mgr_ { 32 + /* The first field must match that in _msg_sm.h */ 33 + 34 + /* Function interface to Bridge driver. */ 35 + struct bridge_drv_interface *intf_fxns; 36 + }; 37 + 38 + #endif /* MSGOBJ_ */