Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sh: Fix up more 64-bit pgprot truncation on SH-X2 TLB.

Both the store queue API and the PMB remapping take unsigned long for
their pgprot flags, which cuts off the extended protection bits. In the
case of the PMB this isn't really a problem since the cache attribute
bits that we care about are all in the lower 32-bits, but we do it just
to be safe. The store queue remapping on the other hand depends on the
extended prot bits for enabling userspace access to the mappings.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>

+18 -13
+3 -2
arch/sh/include/asm/mmu.h
··· 33 33 #ifndef __ASSEMBLY__ 34 34 #include <linux/errno.h> 35 35 #include <linux/threads.h> 36 + #include <asm/page.h> 36 37 37 38 /* Default "unsigned long" context */ 38 39 typedef unsigned long mm_context_id_t[NR_CPUS]; ··· 72 71 #ifdef CONFIG_PMB 73 72 /* arch/sh/mm/pmb.c */ 74 73 long pmb_remap(unsigned long virt, unsigned long phys, 75 - unsigned long size, unsigned long flags); 74 + unsigned long size, pgprot_t prot); 76 75 void pmb_unmap(unsigned long addr); 77 76 int pmb_init(void); 78 77 bool __in_29bit_mode(void); 79 78 #else 80 79 static inline long pmb_remap(unsigned long virt, unsigned long phys, 81 - unsigned long size, unsigned long flags) 80 + unsigned long size, pgprot_t prot) 82 81 { 83 82 return -EINVAL; 84 83 }
+2 -1
arch/sh/include/cpu-sh4/cpu/sq.h
··· 12 12 #define __ASM_CPU_SH4_SQ_H 13 13 14 14 #include <asm/addrspace.h> 15 + #include <asm/page.h> 15 16 16 17 /* 17 18 * Store queues range from e0000000-e3fffffc, allowing approx. 64MB to be ··· 29 28 30 29 /* arch/sh/kernel/cpu/sh4/sq.c */ 31 30 unsigned long sq_remap(unsigned long phys, unsigned int size, 32 - const char *name, unsigned long flags); 31 + const char *name, pgprot_t prot); 33 32 void sq_unmap(unsigned long vaddr); 34 33 void sq_flush_range(unsigned long start, unsigned int len); 35 34
+6 -7
arch/sh/kernel/cpu/sh4/sq.c
··· 100 100 spin_unlock_irq(&sq_mapping_lock); 101 101 } 102 102 103 - static int __sq_remap(struct sq_mapping *map, unsigned long flags) 103 + static int __sq_remap(struct sq_mapping *map, pgprot_t prot) 104 104 { 105 105 #if defined(CONFIG_MMU) 106 106 struct vm_struct *vma; ··· 113 113 114 114 if (ioremap_page_range((unsigned long)vma->addr, 115 115 (unsigned long)vma->addr + map->size, 116 - vma->phys_addr, __pgprot(flags))) { 116 + vma->phys_addr, prot)) { 117 117 vunmap(vma->addr); 118 118 return -EAGAIN; 119 119 } ··· 135 135 * @phys: Physical address of mapping. 136 136 * @size: Length of mapping. 137 137 * @name: User invoking mapping. 138 - * @flags: Protection flags. 138 + * @prot: Protection bits. 139 139 * 140 140 * Remaps the physical address @phys through the next available store queue 141 141 * address of @size length. @name is logged at boot time as well as through 142 142 * the sysfs interface. 143 143 */ 144 144 unsigned long sq_remap(unsigned long phys, unsigned int size, 145 - const char *name, unsigned long flags) 145 + const char *name, pgprot_t prot) 146 146 { 147 147 struct sq_mapping *map; 148 148 unsigned long end; ··· 177 177 178 178 map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); 179 179 180 - ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags); 180 + ret = __sq_remap(map, prot); 181 181 if (unlikely(ret != 0)) 182 182 goto out; 183 183 ··· 309 309 return -EIO; 310 310 311 311 if (likely(len)) { 312 - int ret = sq_remap(base, len, "Userspace", 313 - pgprot_val(PAGE_SHARED)); 312 + int ret = sq_remap(base, len, "Userspace", PAGE_SHARED); 314 313 if (ret < 0) 315 314 return ret; 316 315 } else
+1 -1
arch/sh/mm/ioremap.c
··· 80 80 if (unlikely(phys_addr >= P1SEG)) { 81 81 unsigned long mapped; 82 82 83 - mapped = pmb_remap(addr, phys_addr, size, pgprot_val(pgprot)); 83 + mapped = pmb_remap(addr, phys_addr, size, pgprot); 84 84 if (likely(mapped)) { 85 85 addr += mapped; 86 86 phys_addr += mapped;
+5 -1
arch/sh/mm/pmb.c
··· 24 24 #include <asm/system.h> 25 25 #include <asm/uaccess.h> 26 26 #include <asm/pgtable.h> 27 + #include <asm/page.h> 27 28 #include <asm/mmu.h> 28 29 #include <asm/io.h> 29 30 #include <asm/mmu_context.h> ··· 167 166 }; 168 167 169 168 long pmb_remap(unsigned long vaddr, unsigned long phys, 170 - unsigned long size, unsigned long flags) 169 + unsigned long size, pgprot_t prot) 171 170 { 172 171 struct pmb_entry *pmbp, *pmbe; 173 172 unsigned long wanted; 174 173 int pmb_flags, i; 175 174 long err; 175 + u64 flags; 176 + 177 + flags = pgprot_val(prot); 176 178 177 179 /* Convert typical pgprot value to the PMB equivalent */ 178 180 if (flags & _PAGE_CACHABLE) {
+1 -1
drivers/video/pvr2fb.c
··· 831 831 printk(KERN_NOTICE "fb%d: registering with SQ API\n", fb_info->node); 832 832 833 833 pvr2fb_map = sq_remap(fb_info->fix.smem_start, fb_info->fix.smem_len, 834 - fb_info->fix.id, pgprot_val(PAGE_SHARED)); 834 + fb_info->fix.id, PAGE_SHARED); 835 835 836 836 printk(KERN_NOTICE "fb%d: Mapped video memory to SQ addr 0x%lx\n", 837 837 fb_info->node, pvr2fb_map);