Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Revert "staging: tidspbridge - move all iommu related code to a new file"

This reverts commit f94378f9f9a897fc08e9d12733401ae52466e408.

Signed-off-by: Felipe Contreras <felipe.contreras@gmail.com>
Signed-off-by: Omar Ramirez Luna <omar.ramirez@ti.com>

authored by

Felipe Contreras and committed by
Omar Ramirez Luna
f5bd96bb 9d4f81a7

+289 -391
+1 -1
drivers/staging/tidspbridge/Makefile
··· 2 2 3 3 libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o 4 4 libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \ 5 - core/tiomap3430_pwr.o core/tiomap_io.o core/dsp-mmu.o \ 5 + core/tiomap3430_pwr.o core/tiomap_io.o \ 6 6 core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o 7 7 libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \ 8 8 pmgr/dmm.o pmgr/cmm.o pmgr/dbll.o
+3
drivers/staging/tidspbridge/core/_deh.h
··· 27 27 struct deh_mgr { 28 28 struct bridge_dev_context *hbridge_context; /* Bridge context. */ 29 29 struct ntfy_object *ntfy_obj; /* NTFY object */ 30 + 31 + /* MMU Fault DPC */ 32 + struct tasklet_struct dpc_tasklet; 30 33 }; 31 34 32 35 int mmu_fault_isr(struct iommu *mmu);
+26 -1
drivers/staging/tidspbridge/core/_tiomap.h
··· 23 23 #include <plat/clockdomain.h> 24 24 #include <mach-omap2/prm-regbits-34xx.h> 25 25 #include <mach-omap2/cm-regbits-34xx.h> 26 - #include <dspbridge/dsp-mmu.h> 26 + #include <plat/iommu.h> 27 + #include <plat/iovmm.h> 27 28 #include <dspbridge/devdefs.h> 28 29 #include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */ 29 30 #include <dspbridge/sync.h> ··· 379 378 * Ensures: 380 379 */ 381 380 int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val); 381 + 382 + /** 383 + * user_to_dsp_map() - maps user to dsp virtual address 384 + * @mmu: Pointer to iommu handle. 385 + * @uva: Virtual user space address. 386 + * @da DSP address 387 + * @size Buffer size to map. 388 + * @usr_pgs struct page array pointer where the user pages will be stored 389 + * 390 + * This function maps a user space buffer into DSP virtual address. 391 + * 392 + */ 393 + u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size, 394 + struct page **usr_pgs); 395 + 396 + /** 397 + * user_to_dsp_unmap() - unmaps DSP virtual buffer. 398 + * @mmu: Pointer to iommu handle. 399 + * @da DSP address 400 + * 401 + * This function unmaps a user space buffer into DSP virtual address. 402 + * 403 + */ 404 + int user_to_dsp_unmap(struct iommu *mmu, u32 da); 382 405 383 406 #endif /* _TIOMAP_ */
-317
drivers/staging/tidspbridge/core/dsp-mmu.c
··· 1 - /* 2 - * dsp-mmu.c 3 - * 4 - * DSP-BIOS Bridge driver support functions for TI OMAP processors. 5 - * 6 - * DSP iommu. 7 - * 8 - * Copyright (C) 2010 Texas Instruments, Inc. 9 - * 10 - * This package is free software; you can redistribute it and/or modify 11 - * it under the terms of the GNU General Public License version 2 as 12 - * published by the Free Software Foundation. 13 - * 14 - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 15 - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 16 - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 17 - */ 18 - 19 - #include <dspbridge/host_os.h> 20 - #include <plat/dmtimer.h> 21 - #include <dspbridge/dbdefs.h> 22 - #include <dspbridge/dev.h> 23 - #include <dspbridge/io_sm.h> 24 - #include <dspbridge/dspdeh.h> 25 - #include "_tiomap.h" 26 - 27 - #include <dspbridge/dsp-mmu.h> 28 - 29 - #define MMU_CNTL_TWL_EN (1 << 2) 30 - 31 - static struct tasklet_struct mmu_tasklet; 32 - 33 - #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE 34 - static void mmu_fault_print_stack(struct bridge_dev_context *dev_context) 35 - { 36 - void *dummy_addr; 37 - u32 fa, tmp; 38 - struct iotlb_entry e; 39 - struct iommu *mmu = dev_context->dsp_mmu; 40 - dummy_addr = (void *)__get_free_page(GFP_ATOMIC); 41 - 42 - /* 43 - * Before acking the MMU fault, let's make sure MMU can only 44 - * access entry #0. Then add a new entry so that the DSP OS 45 - * can continue in order to dump the stack. 46 - */ 47 - tmp = iommu_read_reg(mmu, MMU_CNTL); 48 - tmp &= ~MMU_CNTL_TWL_EN; 49 - iommu_write_reg(mmu, tmp, MMU_CNTL); 50 - fa = iommu_read_reg(mmu, MMU_FAULT_AD); 51 - e.da = fa & PAGE_MASK; 52 - e.pa = virt_to_phys(dummy_addr); 53 - e.valid = 1; 54 - e.prsvd = 1; 55 - e.pgsz = IOVMF_PGSZ_4K & MMU_CAM_PGSZ_MASK; 56 - e.endian = MMU_RAM_ENDIAN_LITTLE; 57 - e.elsz = MMU_RAM_ELSZ_32; 58 - e.mixed = 0; 59 - 60 - load_iotlb_entry(mmu, &e); 61 - 62 - dsp_clk_enable(DSP_CLK_GPT8); 63 - 64 - dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe); 65 - 66 - /* Clear MMU interrupt */ 67 - tmp = iommu_read_reg(mmu, MMU_IRQSTATUS); 68 - iommu_write_reg(mmu, tmp, MMU_IRQSTATUS); 69 - 70 - dump_dsp_stack(dev_context); 71 - dsp_clk_disable(DSP_CLK_GPT8); 72 - 73 - iopgtable_clear_entry(mmu, fa); 74 - free_page((unsigned long)dummy_addr); 75 - } 76 - #endif 77 - 78 - 79 - static void fault_tasklet(unsigned long data) 80 - { 81 - struct iommu *mmu = (struct iommu *)data; 82 - struct bridge_dev_context *dev_ctx; 83 - struct deh_mgr *dm; 84 - u32 fa; 85 - dev_get_deh_mgr(dev_get_first(), &dm); 86 - dev_get_bridge_context(dev_get_first(), &dev_ctx); 87 - 88 - if (!dm || !dev_ctx) 89 - return; 90 - 91 - fa = iommu_read_reg(mmu, MMU_FAULT_AD); 92 - 93 - #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE 94 - print_dsp_trace_buffer(dev_ctx); 95 - dump_dl_modules(dev_ctx); 96 - mmu_fault_print_stack(dev_ctx); 97 - #endif 98 - 99 - bridge_deh_notify(dm, DSP_MMUFAULT, fa); 100 - } 101 - 102 - /* 103 - * ======== mmu_fault_isr ======== 104 - * ISR to be triggered by a DSP MMU fault interrupt. 105 - */ 106 - static int mmu_fault_callback(struct iommu *mmu) 107 - { 108 - if (!mmu) 109 - return -EPERM; 110 - 111 - iommu_write_reg(mmu, 0, MMU_IRQENABLE); 112 - tasklet_schedule(&mmu_tasklet); 113 - return 0; 114 - } 115 - 116 - /** 117 - * dsp_mmu_init() - initialize dsp_mmu module and returns a handle 118 - * 119 - * This function initialize dsp mmu module and returns a struct iommu 120 - * handle to use it for dsp maps. 121 - * 122 - */ 123 - struct iommu *dsp_mmu_init() 124 - { 125 - struct iommu *mmu; 126 - 127 - mmu = iommu_get("iva2"); 128 - 129 - if (!IS_ERR(mmu)) { 130 - tasklet_init(&mmu_tasklet, fault_tasklet, (unsigned long)mmu); 131 - mmu->isr = mmu_fault_callback; 132 - } 133 - 134 - return mmu; 135 - } 136 - 137 - /** 138 - * dsp_mmu_exit() - destroy dsp mmu module 139 - * @mmu: Pointer to iommu handle. 140 - * 141 - * This function destroys dsp mmu module. 142 - * 143 - */ 144 - void dsp_mmu_exit(struct iommu *mmu) 145 - { 146 - if (mmu) 147 - iommu_put(mmu); 148 - tasklet_kill(&mmu_tasklet); 149 - } 150 - 151 - /** 152 - * user_va2_pa() - get physical address from userspace address. 153 - * @mm: mm_struct Pointer of the process. 154 - * @address: Virtual user space address. 155 - * 156 - */ 157 - static u32 user_va2_pa(struct mm_struct *mm, u32 address) 158 - { 159 - pgd_t *pgd; 160 - pmd_t *pmd; 161 - pte_t *ptep, pte; 162 - 163 - pgd = pgd_offset(mm, address); 164 - if (!(pgd_none(*pgd) || pgd_bad(*pgd))) { 165 - pmd = pmd_offset(pgd, address); 166 - if (!(pmd_none(*pmd) || pmd_bad(*pmd))) { 167 - ptep = pte_offset_map(pmd, address); 168 - if (ptep) { 169 - pte = *ptep; 170 - if (pte_present(pte)) 171 - return pte & PAGE_MASK; 172 - } 173 - } 174 - } 175 - 176 - return 0; 177 - } 178 - 179 - /** 180 - * get_io_pages() - pin and get pages of io user's buffer. 181 - * @mm: mm_struct Pointer of the process. 182 - * @uva: Virtual user space address. 183 - * @pages Pages to be pined. 184 - * @usr_pgs struct page array pointer where the user pages will be stored 185 - * 186 - */ 187 - static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages, 188 - struct page **usr_pgs) 189 - { 190 - u32 pa; 191 - int i; 192 - struct page *pg; 193 - 194 - for (i = 0; i < pages; i++) { 195 - pa = user_va2_pa(mm, uva); 196 - 197 - if (!pfn_valid(__phys_to_pfn(pa))) 198 - break; 199 - 200 - pg = phys_to_page(pa); 201 - usr_pgs[i] = pg; 202 - get_page(pg); 203 - } 204 - return i; 205 - } 206 - 207 - /** 208 - * user_to_dsp_map() - maps user to dsp virtual address 209 - * @mmu: Pointer to iommu handle. 210 - * @uva: Virtual user space address. 211 - * @da DSP address 212 - * @size Buffer size to map. 213 - * @usr_pgs struct page array pointer where the user pages will be stored 214 - * 215 - * This function maps a user space buffer into DSP virtual address. 216 - * 217 - */ 218 - u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size, 219 - struct page **usr_pgs) 220 - { 221 - int res, w; 222 - unsigned pages; 223 - int i; 224 - struct vm_area_struct *vma; 225 - struct mm_struct *mm = current->mm; 226 - struct sg_table *sgt; 227 - struct scatterlist *sg; 228 - 229 - if (!size || !usr_pgs) 230 - return -EINVAL; 231 - 232 - pages = size / PG_SIZE4K; 233 - 234 - down_read(&mm->mmap_sem); 235 - vma = find_vma(mm, uva); 236 - while (vma && (uva + size > vma->vm_end)) 237 - vma = find_vma(mm, vma->vm_end + 1); 238 - 239 - if (!vma) { 240 - pr_err("%s: Failed to get VMA region for 0x%x (%d)\n", 241 - __func__, uva, size); 242 - up_read(&mm->mmap_sem); 243 - return -EINVAL; 244 - } 245 - if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) 246 - w = 1; 247 - 248 - if (vma->vm_flags & VM_IO) 249 - i = get_io_pages(mm, uva, pages, usr_pgs); 250 - else 251 - i = get_user_pages(current, mm, uva, pages, w, 1, 252 - usr_pgs, NULL); 253 - up_read(&mm->mmap_sem); 254 - 255 - if (i < 0) 256 - return i; 257 - 258 - if (i < pages) { 259 - res = -EFAULT; 260 - goto err_pages; 261 - } 262 - 263 - sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 264 - if (!sgt) { 265 - res = -ENOMEM; 266 - goto err_pages; 267 - } 268 - 269 - res = sg_alloc_table(sgt, pages, GFP_KERNEL); 270 - 271 - if (res < 0) 272 - goto err_sg; 273 - 274 - for_each_sg(sgt->sgl, sg, sgt->nents, i) 275 - sg_set_page(sg, usr_pgs[i], PAGE_SIZE, 0); 276 - 277 - da = iommu_vmap(mmu, da, sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); 278 - 279 - if (!IS_ERR_VALUE(da)) 280 - return da; 281 - res = (int)da; 282 - 283 - sg_free_table(sgt); 284 - err_sg: 285 - kfree(sgt); 286 - i = pages; 287 - err_pages: 288 - while (i--) 289 - put_page(usr_pgs[i]); 290 - return res; 291 - } 292 - 293 - /** 294 - * user_to_dsp_unmap() - unmaps DSP virtual buffer. 295 - * @mmu: Pointer to iommu handle. 296 - * @da DSP address 297 - * 298 - * This function unmaps a user space buffer into DSP virtual address. 299 - * 300 - */ 301 - int user_to_dsp_unmap(struct iommu *mmu, u32 da) 302 - { 303 - unsigned i; 304 - struct sg_table *sgt; 305 - struct scatterlist *sg; 306 - 307 - sgt = iommu_vunmap(mmu, da); 308 - if (!sgt) 309 - return -EFAULT; 310 - 311 - for_each_sg(sgt->sgl, sg, sgt->nents, i) 312 - put_page(sg_page(sg)); 313 - sg_free_table(sgt); 314 - kfree(sgt); 315 - 316 - return 0; 317 - }
+174 -4
drivers/staging/tidspbridge/core/tiomap3430.c
··· 53 53 #include "_tiomap.h" 54 54 #include "_tiomap_pwr.h" 55 55 #include "tiomap_io.h" 56 + #include "_deh.h" 56 57 57 58 /* Offset in shared mem to write to in order to synchronize start with DSP */ 58 59 #define SHMSYNCOFFSET 4 /* GPP byte offset */ ··· 68 67 #define MMU_SMALL_PAGE_MASK 0xFFFFF000 69 68 #define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00 70 69 #define PAGES_II_LVL_TABLE 512 70 + #define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT) 71 71 72 72 /* 73 73 * This is a totally ugly layer violation, but needed until ··· 366 364 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); 367 365 mmu = dev_context->dsp_mmu; 368 366 if (mmu) 369 - dsp_mmu_exit(mmu); 370 - mmu = dsp_mmu_init(); 367 + iommu_put(mmu); 368 + mmu = iommu_get("iva2"); 371 369 if (IS_ERR(mmu)) { 372 - dev_err(bridge, "dsp_mmu_init failed!\n"); 370 + dev_err(bridge, "iommu_get failed!\n"); 373 371 dev_context->dsp_mmu = NULL; 374 372 status = (int)mmu; 375 373 } 376 374 } 377 375 if (!status) { 378 376 dev_context->dsp_mmu = mmu; 377 + mmu->isr = mmu_fault_isr; 379 378 sm_sg = &dev_context->sh_s; 380 379 sg0_da = iommu_kmap(mmu, sm_sg->seg0_da, sm_sg->seg0_pa, 381 380 sm_sg->seg0_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); ··· 632 629 } 633 630 iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg0_da); 634 631 iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg1_da); 635 - dsp_mmu_exit(dev_context->dsp_mmu); 632 + iommu_put(dev_context->dsp_mmu); 636 633 dev_context->dsp_mmu = NULL; 637 634 } 638 635 /* Reset IVA IOMMU*/ ··· 944 941 host_buff = host_buff + ul_bytes; 945 942 } 946 943 return status; 944 + } 945 + 946 + /* 947 + * ======== user_va2_pa ======== 948 + * Purpose: 949 + * This function walks through the page tables to convert a userland 950 + * virtual address to physical address 951 + */ 952 + static u32 user_va2_pa(struct mm_struct *mm, u32 address) 953 + { 954 + pgd_t *pgd; 955 + pmd_t *pmd; 956 + pte_t *ptep, pte; 957 + 958 + pgd = pgd_offset(mm, address); 959 + if (!(pgd_none(*pgd) || pgd_bad(*pgd))) { 960 + pmd = pmd_offset(pgd, address); 961 + if (!(pmd_none(*pmd) || pmd_bad(*pmd))) { 962 + ptep = pte_offset_map(pmd, address); 963 + if (ptep) { 964 + pte = *ptep; 965 + if (pte_present(pte)) 966 + return pte & PAGE_MASK; 967 + } 968 + } 969 + } 970 + 971 + return 0; 972 + } 973 + 974 + /** 975 + * get_io_pages() - pin and get pages of io user's buffer. 976 + * @mm: mm_struct Pointer of the process. 977 + * @uva: Virtual user space address. 978 + * @pages Pages to be pined. 979 + * @usr_pgs struct page array pointer where the user pages will be stored 980 + * 981 + */ 982 + static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages, 983 + struct page **usr_pgs) 984 + { 985 + u32 pa; 986 + int i; 987 + struct page *pg; 988 + 989 + for (i = 0; i < pages; i++) { 990 + pa = user_va2_pa(mm, uva); 991 + 992 + if (!pfn_valid(__phys_to_pfn(pa))) 993 + break; 994 + 995 + pg = PHYS_TO_PAGE(pa); 996 + usr_pgs[i] = pg; 997 + get_page(pg); 998 + } 999 + return i; 1000 + } 1001 + 1002 + /** 1003 + * user_to_dsp_map() - maps user to dsp virtual address 1004 + * @mmu: Pointer to iommu handle. 1005 + * @uva: Virtual user space address. 1006 + * @da DSP address 1007 + * @size Buffer size to map. 1008 + * @usr_pgs struct page array pointer where the user pages will be stored 1009 + * 1010 + * This function maps a user space buffer into DSP virtual address. 1011 + * 1012 + */ 1013 + u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size, 1014 + struct page **usr_pgs) 1015 + { 1016 + int res, w; 1017 + unsigned pages, i; 1018 + struct vm_area_struct *vma; 1019 + struct mm_struct *mm = current->mm; 1020 + struct sg_table *sgt; 1021 + struct scatterlist *sg; 1022 + 1023 + if (!size || !usr_pgs) 1024 + return -EINVAL; 1025 + 1026 + pages = size / PG_SIZE4K; 1027 + 1028 + down_read(&mm->mmap_sem); 1029 + vma = find_vma(mm, uva); 1030 + while (vma && (uva + size > vma->vm_end)) 1031 + vma = find_vma(mm, vma->vm_end + 1); 1032 + 1033 + if (!vma) { 1034 + pr_err("%s: Failed to get VMA region for 0x%x (%d)\n", 1035 + __func__, uva, size); 1036 + up_read(&mm->mmap_sem); 1037 + return -EINVAL; 1038 + } 1039 + if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) 1040 + w = 1; 1041 + 1042 + if (vma->vm_flags & VM_IO) 1043 + i = get_io_pages(mm, uva, pages, usr_pgs); 1044 + else 1045 + i = get_user_pages(current, mm, uva, pages, w, 1, 1046 + usr_pgs, NULL); 1047 + up_read(&mm->mmap_sem); 1048 + 1049 + if (i < 0) 1050 + return i; 1051 + 1052 + if (i < pages) { 1053 + res = -EFAULT; 1054 + goto err_pages; 1055 + } 1056 + 1057 + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 1058 + if (!sgt) { 1059 + res = -ENOMEM; 1060 + goto err_pages; 1061 + } 1062 + 1063 + res = sg_alloc_table(sgt, pages, GFP_KERNEL); 1064 + 1065 + if (res < 0) 1066 + goto err_sg; 1067 + 1068 + for_each_sg(sgt->sgl, sg, sgt->nents, i) 1069 + sg_set_page(sg, usr_pgs[i], PAGE_SIZE, 0); 1070 + 1071 + da = iommu_vmap(mmu, da, sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); 1072 + 1073 + if (!IS_ERR_VALUE(da)) 1074 + return da; 1075 + res = (int)da; 1076 + 1077 + sg_free_table(sgt); 1078 + err_sg: 1079 + kfree(sgt); 1080 + i = pages; 1081 + err_pages: 1082 + while (i--) 1083 + put_page(usr_pgs[i]); 1084 + return res; 1085 + } 1086 + 1087 + /** 1088 + * user_to_dsp_unmap() - unmaps DSP virtual buffer. 1089 + * @mmu: Pointer to iommu handle. 1090 + * @da DSP address 1091 + * 1092 + * This function unmaps a user space buffer into DSP virtual address. 1093 + * 1094 + */ 1095 + int user_to_dsp_unmap(struct iommu *mmu, u32 da) 1096 + { 1097 + unsigned i; 1098 + struct sg_table *sgt; 1099 + struct scatterlist *sg; 1100 + 1101 + sgt = iommu_vunmap(mmu, da); 1102 + if (!sgt) 1103 + return -EFAULT; 1104 + 1105 + for_each_sg(sgt->sgl, sg, sgt->nents, i) 1106 + put_page(sg_page(sg)); 1107 + sg_free_table(sgt); 1108 + kfree(sgt); 1109 + 1110 + return 0; 947 1111 } 948 1112 949 1113 /*
+85 -1
drivers/staging/tidspbridge/core/ue_deh.c
··· 31 31 #include <dspbridge/drv.h> 32 32 #include <dspbridge/wdt.h> 33 33 34 + #define MMU_CNTL_TWL_EN (1 << 2) 35 + 36 + static void mmu_fault_dpc(unsigned long data) 37 + { 38 + struct deh_mgr *deh = (void *)data; 39 + 40 + if (!deh) 41 + return; 42 + 43 + bridge_deh_notify(deh, DSP_MMUFAULT, 0); 44 + } 45 + 46 + int mmu_fault_isr(struct iommu *mmu) 47 + { 48 + struct deh_mgr *dm; 49 + 50 + dev_get_deh_mgr(dev_get_first(), &dm); 51 + 52 + if (!dm) 53 + return -EPERM; 54 + 55 + iommu_write_reg(mmu, 0, MMU_IRQENABLE); 56 + tasklet_schedule(&dm->dpc_tasklet); 57 + return 0; 58 + } 59 + 34 60 int bridge_deh_create(struct deh_mgr **ret_deh, 35 61 struct dev_object *hdev_obj) 36 62 { ··· 84 58 } 85 59 ntfy_init(deh->ntfy_obj); 86 60 61 + /* Create a MMUfault DPC */ 62 + tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh); 63 + 87 64 /* Fill in context structure */ 88 65 deh->hbridge_context = hbridge_context; 89 66 ··· 110 81 kfree(deh->ntfy_obj); 111 82 } 112 83 84 + /* Free DPC object */ 85 + tasklet_kill(&deh->dpc_tasklet); 86 + 113 87 /* Deallocate the DEH manager object */ 114 88 kfree(deh); 115 89 ··· 133 101 return ntfy_unregister(deh->ntfy_obj, hnotification); 134 102 } 135 103 104 + #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE 105 + static void mmu_fault_print_stack(struct bridge_dev_context *dev_context) 106 + { 107 + void *dummy_addr; 108 + u32 fa, tmp; 109 + struct iotlb_entry e; 110 + struct iommu *mmu = dev_context->dsp_mmu; 111 + dummy_addr = (void *)__get_free_page(GFP_ATOMIC); 112 + 113 + /* 114 + * Before acking the MMU fault, let's make sure MMU can only 115 + * access entry #0. Then add a new entry so that the DSP OS 116 + * can continue in order to dump the stack. 117 + */ 118 + tmp = iommu_read_reg(mmu, MMU_CNTL); 119 + tmp &= ~MMU_CNTL_TWL_EN; 120 + iommu_write_reg(mmu, tmp, MMU_CNTL); 121 + fa = iommu_read_reg(mmu, MMU_FAULT_AD); 122 + e.da = fa & PAGE_MASK; 123 + e.pa = virt_to_phys(dummy_addr); 124 + e.valid = 1; 125 + e.prsvd = 1; 126 + e.pgsz = IOVMF_PGSZ_4K & MMU_CAM_PGSZ_MASK; 127 + e.endian = MMU_RAM_ENDIAN_LITTLE; 128 + e.elsz = MMU_RAM_ELSZ_32; 129 + e.mixed = 0; 130 + 131 + load_iotlb_entry(dev_context->dsp_mmu, &e); 132 + 133 + dsp_clk_enable(DSP_CLK_GPT8); 134 + 135 + dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe); 136 + 137 + /* Clear MMU interrupt */ 138 + tmp = iommu_read_reg(mmu, MMU_IRQSTATUS); 139 + iommu_write_reg(mmu, tmp, MMU_IRQSTATUS); 140 + 141 + dump_dsp_stack(dev_context); 142 + dsp_clk_disable(DSP_CLK_GPT8); 143 + 144 + iopgtable_clear_entry(mmu, fa); 145 + free_page((unsigned long)dummy_addr); 146 + } 147 + #endif 148 + 136 149 static inline const char *event_to_string(int event) 137 150 { 138 151 switch (event) { ··· 193 116 { 194 117 struct bridge_dev_context *dev_context; 195 118 const char *str = event_to_string(event); 119 + u32 fa; 196 120 197 121 if (!deh) 198 122 return; ··· 211 133 #endif 212 134 break; 213 135 case DSP_MMUFAULT: 214 - dev_err(bridge, "%s: %s, addr=0x%x", __func__, str, info); 136 + fa = iommu_read_reg(dev_context->dsp_mmu, MMU_FAULT_AD); 137 + dev_err(bridge, "%s: %s, addr=0x%x", __func__, str, fa); 138 + #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE 139 + print_dsp_trace_buffer(dev_context); 140 + dump_dl_modules(dev_context); 141 + mmu_fault_print_stack(dev_context); 142 + #endif 215 143 break; 216 144 default: 217 145 dev_err(bridge, "%s: %s", __func__, str);
-67
drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h
··· 1 - /* 2 - * dsp-mmu.h 3 - * 4 - * DSP-BIOS Bridge driver support functions for TI OMAP processors. 5 - * 6 - * DSP iommu. 7 - * 8 - * Copyright (C) 2005-2010 Texas Instruments, Inc. 9 - * 10 - * This package is free software; you can redistribute it and/or modify 11 - * it under the terms of the GNU General Public License version 2 as 12 - * published by the Free Software Foundation. 13 - * 14 - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 15 - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 16 - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 17 - */ 18 - 19 - #ifndef _DSP_MMU_ 20 - #define _DSP_MMU_ 21 - 22 - #include <plat/iommu.h> 23 - #include <plat/iovmm.h> 24 - 25 - /** 26 - * dsp_mmu_init() - initialize dsp_mmu module and returns a handle 27 - * 28 - * This function initialize dsp mmu module and returns a struct iommu 29 - * handle to use it for dsp maps. 30 - * 31 - */ 32 - struct iommu *dsp_mmu_init(void); 33 - 34 - /** 35 - * dsp_mmu_exit() - destroy dsp mmu module 36 - * @mmu: Pointer to iommu handle. 37 - * 38 - * This function destroys dsp mmu module. 39 - * 40 - */ 41 - void dsp_mmu_exit(struct iommu *mmu); 42 - 43 - /** 44 - * user_to_dsp_map() - maps user to dsp virtual address 45 - * @mmu: Pointer to iommu handle. 46 - * @uva: Virtual user space address. 47 - * @da DSP address 48 - * @size Buffer size to map. 49 - * @usr_pgs struct page array pointer where the user pages will be stored 50 - * 51 - * This function maps a user space buffer into DSP virtual address. 52 - * 53 - */ 54 - u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size, 55 - struct page **usr_pgs); 56 - 57 - /** 58 - * user_to_dsp_unmap() - unmaps DSP virtual buffer. 59 - * @mmu: Pointer to iommu handle. 60 - * @da DSP address 61 - * 62 - * This function unmaps a user space buffer into DSP virtual address. 63 - * 64 - */ 65 - int user_to_dsp_unmap(struct iommu *mmu, u32 da); 66 - 67 - #endif