Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

nommu: add support for Memory Protection Units (MPU)

Some architectures (like the Blackfin arch) implement some of the
"simpler" features that one would expect out of a MMU such as memory
protection.

In our case, we actually get read/write/exec protection down to the page
boundary so processes can't stomp on each other let alone the kernel.

There is a performance decrease (which depends greatly on the workload)
however as the hardware/software interaction was not optimized at design
time.

Signed-off-by: Bernd Schmidt <bernds_cb1@t-online.de>
Signed-off-by: Bryan Wu <cooloney@kernel.org>
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Acked-by: David Howells <dhowells@redhat.com>
Acked-by: Greg Ungerer <gerg@snapgear.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Bernd Schmidt and committed by
Linus Torvalds
eb8cdec4 02e87d1a

+26
+5
kernel/module.c
··· 47 47 #include <linux/rculist.h> 48 48 #include <asm/uaccess.h> 49 49 #include <asm/cacheflush.h> 50 + #include <asm/mmu_context.h> 50 51 #include <linux/license.h> 51 52 #include <asm/sections.h> 52 53 #include <linux/tracepoint.h> ··· 1536 1535 1537 1536 /* Finally, free the core (containing the module structure) */ 1538 1537 module_free(mod, mod->module_core); 1538 + 1539 + #ifdef CONFIG_MPU 1540 + update_protections(current->mm); 1541 + #endif 1539 1542 } 1540 1543 1541 1544 void *__symbol_get(const char *symbol)
+21
mm/nommu.c
··· 33 33 #include <asm/uaccess.h> 34 34 #include <asm/tlb.h> 35 35 #include <asm/tlbflush.h> 36 + #include <asm/mmu_context.h> 36 37 #include "internal.h" 37 38 38 39 static inline __attribute__((format(printf, 1, 2))) ··· 624 623 } 625 624 626 625 /* 626 + * update protection on a vma 627 + */ 628 + static void protect_vma(struct vm_area_struct *vma, unsigned long flags) 629 + { 630 + #ifdef CONFIG_MPU 631 + struct mm_struct *mm = vma->vm_mm; 632 + long start = vma->vm_start & PAGE_MASK; 633 + while (start < vma->vm_end) { 634 + protect_page(mm, start, flags); 635 + start += PAGE_SIZE; 636 + } 637 + update_protections(mm); 638 + #endif 639 + } 640 + 641 + /* 627 642 * add a VMA into a process's mm_struct in the appropriate place in the list 628 643 * and tree and add to the address space's page tree also if not an anonymous 629 644 * page ··· 657 640 658 641 mm->map_count++; 659 642 vma->vm_mm = mm; 643 + 644 + protect_vma(vma, vma->vm_flags); 660 645 661 646 /* add the VMA to the mapping */ 662 647 if (vma->vm_file) { ··· 721 702 struct mm_struct *mm = vma->vm_mm; 722 703 723 704 kenter("%p", vma); 705 + 706 + protect_vma(vma, 0); 724 707 725 708 mm->map_count--; 726 709 if (mm->mmap_cache == vma)