+4
arch/parisc/include/asm/elf.h
+4
arch/parisc/include/asm/elf.h
+1
arch/parisc/include/asm/pgtable.h
+1
arch/parisc/include/asm/pgtable.h
+2
arch/parisc/include/asm/processor.h
+2
arch/parisc/include/asm/processor.h
···
30
30
#endif
31
31
#define current_text_addr() ({ void *pc; current_ia(pc); pc; })
32
32
33
+
#define HAVE_ARCH_PICK_MMAP_LAYOUT
34
+
33
35
#define TASK_SIZE_OF(tsk) ((tsk)->thread.task_size)
34
36
#define TASK_SIZE TASK_SIZE_OF(current)
35
37
#define TASK_UNMAPPED_BASE (current->thread.map_base)
+10
arch/parisc/include/asm/thread_info.h
+10
arch/parisc/include/asm/thread_info.h
···
76
76
#define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
77
77
_TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT)
78
78
79
+
#ifdef CONFIG_64BIT
80
+
# ifdef CONFIG_COMPAT
81
+
# define is_32bit_task() (test_thread_flag(TIF_32BIT))
82
+
# else
83
+
# define is_32bit_task() (0)
84
+
# endif
85
+
#else
86
+
# define is_32bit_task() (1)
87
+
#endif
88
+
79
89
#endif /* __KERNEL__ */
80
90
81
91
#endif /* _ASM_PARISC_THREAD_INFO_H */
+20
-1
arch/parisc/kernel/process.c
+20
-1
arch/parisc/kernel/process.c
···
13
13
* Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
14
14
* Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org>
15
15
* Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
16
-
* Copyright (C) 2001-2007 Helge Deller <deller at parisc-linux.org>
16
+
* Copyright (C) 2001-2014 Helge Deller <deller@gmx.de>
17
17
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
18
18
*
19
19
*
···
49
49
#include <linux/kallsyms.h>
50
50
#include <linux/uaccess.h>
51
51
#include <linux/rcupdate.h>
52
+
#include <linux/random.h>
52
53
53
54
#include <asm/io.h>
54
55
#include <asm/asm-offsets.h>
···
287
286
return ptr;
288
287
}
289
288
#endif
289
+
290
+
static inline unsigned long brk_rnd(void)
291
+
{
292
+
/* 8MB for 32bit, 1GB for 64bit */
293
+
if (is_32bit_task())
294
+
return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
295
+
else
296
+
return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
297
+
}
298
+
299
+
unsigned long arch_randomize_brk(struct mm_struct *mm)
300
+
{
301
+
unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
302
+
303
+
if (ret < mm->brk)
304
+
return mm->brk;
305
+
return ret;
306
+
}
+206
-52
arch/parisc/kernel/sys_parisc.c
+206
-52
arch/parisc/kernel/sys_parisc.c
···
5
5
* Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
6
6
* Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
7
7
* Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
8
+
* Copyright (C) 1999-2014 Helge Deller <deller@gmx.de>
8
9
*
9
10
*
10
11
* This program is free software; you can redistribute it and/or modify
···
24
23
*/
25
24
26
25
#include <asm/uaccess.h>
26
+
#include <asm/elf.h>
27
27
#include <linux/file.h>
28
28
#include <linux/fs.h>
29
29
#include <linux/linkage.h>
···
34
32
#include <linux/syscalls.h>
35
33
#include <linux/utsname.h>
36
34
#include <linux/personality.h>
35
+
#include <linux/random.h>
37
36
38
-
static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
37
+
/* we construct an artificial offset for the mapping based on the physical
38
+
* address of the kernel mapping variable */
39
+
#define GET_LAST_MMAP(filp) \
40
+
(filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL)
41
+
#define SET_LAST_MMAP(filp, val) \
42
+
{ /* nothing */ }
43
+
44
+
static int get_offset(unsigned int last_mmap)
39
45
{
40
-
struct vm_unmapped_area_info info;
46
+
return (last_mmap & (SHMLBA-1)) >> PAGE_SHIFT;
47
+
}
41
48
42
-
info.flags = 0;
43
-
info.length = len;
44
-
info.low_limit = PAGE_ALIGN(addr);
45
-
info.high_limit = TASK_SIZE;
46
-
info.align_mask = 0;
47
-
info.align_offset = 0;
48
-
return vm_unmapped_area(&info);
49
+
static unsigned long shared_align_offset(unsigned int last_mmap,
50
+
unsigned long pgoff)
51
+
{
52
+
return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT;
53
+
}
54
+
55
+
static inline unsigned long COLOR_ALIGN(unsigned long addr,
56
+
unsigned int last_mmap, unsigned long pgoff)
57
+
{
58
+
unsigned long base = (addr+SHMLBA-1) & ~(SHMLBA-1);
59
+
unsigned long off = (SHMLBA-1) &
60
+
(shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT);
61
+
62
+
return base + off;
49
63
}
50
64
51
65
/*
52
-
* We need to know the offset to use. Old scheme was to look for
53
-
* existing mapping and use the same offset. New scheme is to use the
54
-
* address of the kernel data structure as the seed for the offset.
55
-
* We'll see how that works...
56
-
*
57
-
* The mapping is cacheline aligned, so there's no information in the bottom
58
-
* few bits of the address. We're looking for 10 bits (4MB / 4k), so let's
59
-
* drop the bottom 8 bits and use bits 8-17.
66
+
* Top of mmap area (just below the process stack).
60
67
*/
61
-
static int get_offset(struct address_space *mapping)
68
+
69
+
static unsigned long mmap_upper_limit(void)
62
70
{
63
-
return (unsigned long) mapping >> 8;
71
+
unsigned long stack_base;
72
+
73
+
/* Limit stack size to 1GB - see setup_arg_pages() in fs/exec.c */
74
+
stack_base = rlimit_max(RLIMIT_STACK);
75
+
if (stack_base > (1 << 30))
76
+
stack_base = 1 << 30;
77
+
78
+
return PAGE_ALIGN(STACK_TOP - stack_base);
64
79
}
65
80
66
-
static unsigned long shared_align_offset(struct file *filp, unsigned long pgoff)
67
-
{
68
-
struct address_space *mapping = filp ? filp->f_mapping : NULL;
69
-
70
-
return (get_offset(mapping) + pgoff) << PAGE_SHIFT;
71
-
}
72
-
73
-
static unsigned long get_shared_area(struct file *filp, unsigned long addr,
74
-
unsigned long len, unsigned long pgoff)
75
-
{
76
-
struct vm_unmapped_area_info info;
77
-
78
-
info.flags = 0;
79
-
info.length = len;
80
-
info.low_limit = PAGE_ALIGN(addr);
81
-
info.high_limit = TASK_SIZE;
82
-
info.align_mask = PAGE_MASK & (SHMLBA - 1);
83
-
info.align_offset = shared_align_offset(filp, pgoff);
84
-
return vm_unmapped_area(&info);
85
-
}
86
81
87
82
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
88
83
unsigned long len, unsigned long pgoff, unsigned long flags)
89
84
{
90
-
if (len > TASK_SIZE)
91
-
return -ENOMEM;
92
-
if (flags & MAP_FIXED) {
93
-
if ((flags & MAP_SHARED) &&
94
-
(addr - shared_align_offset(filp, pgoff)) & (SHMLBA - 1))
95
-
return -EINVAL;
96
-
return addr;
97
-
}
98
-
if (!addr)
99
-
addr = TASK_UNMAPPED_BASE;
85
+
struct mm_struct *mm = current->mm;
86
+
struct vm_area_struct *vma;
87
+
unsigned long task_size = TASK_SIZE;
88
+
int do_color_align, last_mmap;
89
+
struct vm_unmapped_area_info info;
100
90
91
+
if (len > task_size)
92
+
return -ENOMEM;
93
+
94
+
do_color_align = 0;
101
95
if (filp || (flags & MAP_SHARED))
102
-
addr = get_shared_area(filp, addr, len, pgoff);
103
-
else
104
-
addr = get_unshared_area(addr, len);
96
+
do_color_align = 1;
97
+
last_mmap = GET_LAST_MMAP(filp);
98
+
99
+
if (flags & MAP_FIXED) {
100
+
if ((flags & MAP_SHARED) && last_mmap &&
101
+
(addr - shared_align_offset(last_mmap, pgoff))
102
+
& (SHMLBA - 1))
103
+
return -EINVAL;
104
+
goto found_addr;
105
+
}
106
+
107
+
if (addr) {
108
+
if (do_color_align && last_mmap)
109
+
addr = COLOR_ALIGN(addr, last_mmap, pgoff);
110
+
else
111
+
addr = PAGE_ALIGN(addr);
112
+
113
+
vma = find_vma(mm, addr);
114
+
if (task_size - len >= addr &&
115
+
(!vma || addr + len <= vma->vm_start))
116
+
goto found_addr;
117
+
}
118
+
119
+
info.flags = 0;
120
+
info.length = len;
121
+
info.low_limit = mm->mmap_legacy_base;
122
+
info.high_limit = mmap_upper_limit();
123
+
info.align_mask = last_mmap ? (PAGE_MASK & (SHMLBA - 1)) : 0;
124
+
info.align_offset = shared_align_offset(last_mmap, pgoff);
125
+
addr = vm_unmapped_area(&info);
126
+
127
+
found_addr:
128
+
if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
129
+
SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
105
130
106
131
return addr;
107
132
}
133
+
134
+
unsigned long
135
+
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
136
+
const unsigned long len, const unsigned long pgoff,
137
+
const unsigned long flags)
138
+
{
139
+
struct vm_area_struct *vma;
140
+
struct mm_struct *mm = current->mm;
141
+
unsigned long addr = addr0;
142
+
int do_color_align, last_mmap;
143
+
struct vm_unmapped_area_info info;
144
+
145
+
#ifdef CONFIG_64BIT
146
+
/* This should only ever run for 32-bit processes. */
147
+
BUG_ON(!test_thread_flag(TIF_32BIT));
148
+
#endif
149
+
150
+
/* requested length too big for entire address space */
151
+
if (len > TASK_SIZE)
152
+
return -ENOMEM;
153
+
154
+
do_color_align = 0;
155
+
if (filp || (flags & MAP_SHARED))
156
+
do_color_align = 1;
157
+
last_mmap = GET_LAST_MMAP(filp);
158
+
159
+
if (flags & MAP_FIXED) {
160
+
if ((flags & MAP_SHARED) && last_mmap &&
161
+
(addr - shared_align_offset(last_mmap, pgoff))
162
+
& (SHMLBA - 1))
163
+
return -EINVAL;
164
+
goto found_addr;
165
+
}
166
+
167
+
/* requesting a specific address */
168
+
if (addr) {
169
+
if (do_color_align && last_mmap)
170
+
addr = COLOR_ALIGN(addr, last_mmap, pgoff);
171
+
else
172
+
addr = PAGE_ALIGN(addr);
173
+
vma = find_vma(mm, addr);
174
+
if (TASK_SIZE - len >= addr &&
175
+
(!vma || addr + len <= vma->vm_start))
176
+
goto found_addr;
177
+
}
178
+
179
+
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
180
+
info.length = len;
181
+
info.low_limit = PAGE_SIZE;
182
+
info.high_limit = mm->mmap_base;
183
+
info.align_mask = last_mmap ? (PAGE_MASK & (SHMLBA - 1)) : 0;
184
+
info.align_offset = shared_align_offset(last_mmap, pgoff);
185
+
addr = vm_unmapped_area(&info);
186
+
if (!(addr & ~PAGE_MASK))
187
+
goto found_addr;
188
+
VM_BUG_ON(addr != -ENOMEM);
189
+
190
+
/*
191
+
* A failed mmap() very likely causes application failure,
192
+
* so fall back to the bottom-up function here. This scenario
193
+
* can happen with large stack limits and large mmap()
194
+
* allocations.
195
+
*/
196
+
return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
197
+
198
+
found_addr:
199
+
if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
200
+
SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
201
+
202
+
return addr;
203
+
}
204
+
205
+
static int mmap_is_legacy(void)
206
+
{
207
+
if (current->personality & ADDR_COMPAT_LAYOUT)
208
+
return 1;
209
+
210
+
/* parisc stack always grows up - so a unlimited stack should
211
+
* not be an indicator to use the legacy memory layout.
212
+
* if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
213
+
* return 1;
214
+
*/
215
+
216
+
return sysctl_legacy_va_layout;
217
+
}
218
+
219
+
static unsigned long mmap_rnd(void)
220
+
{
221
+
unsigned long rnd = 0;
222
+
223
+
/*
224
+
* 8 bits of randomness in 32bit mmaps, 20 address space bits
225
+
* 28 bits of randomness in 64bit mmaps, 40 address space bits
226
+
*/
227
+
if (current->flags & PF_RANDOMIZE) {
228
+
if (is_32bit_task())
229
+
rnd = get_random_int() % (1<<8);
230
+
else
231
+
rnd = get_random_int() % (1<<28);
232
+
}
233
+
return rnd << PAGE_SHIFT;
234
+
}
235
+
236
+
static unsigned long mmap_legacy_base(void)
237
+
{
238
+
return TASK_UNMAPPED_BASE + mmap_rnd();
239
+
}
240
+
241
+
/*
242
+
* This function, called very early during the creation of a new
243
+
* process VM image, sets up which VM layout function to use:
244
+
*/
245
+
void arch_pick_mmap_layout(struct mm_struct *mm)
246
+
{
247
+
mm->mmap_legacy_base = mmap_legacy_base();
248
+
mm->mmap_base = mmap_upper_limit();
249
+
250
+
if (mmap_is_legacy()) {
251
+
mm->mmap_base = mm->mmap_legacy_base;
252
+
mm->get_unmapped_area = arch_get_unmapped_area;
253
+
} else {
254
+
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
255
+
}
256
+
}
257
+
108
258
109
259
asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
110
260
unsigned long prot, unsigned long flags, unsigned long fd,