···9191 * of this define that was meant to.9292 * Fortunately, there is no reference for this in noMMU mode, for now.9393 */9494-#ifndef TASK_SIZE9595-#define TASK_SIZE (CONFIG_DRAM_SIZE)9696-#endif9494+#define TASK_SIZE UL(0xffffffff)97959896#ifndef TASK_UNMAPPED_BASE9997#define TASK_UNMAPPED_BASE UL(0x00000000)
-9
arch/arm/include/asm/perf_event.h
···1212#ifndef __ARM_PERF_EVENT_H__1313#define __ARM_PERF_EVENT_H__14141515-/*1616- * The ARMv7 CPU PMU supports up to 32 event counters.1717- */1818-#define ARMPMU_MAX_HWEVENTS 321919-2020-#define HW_OP_UNSUPPORTED 0xFFFF2121-#define C(_x) PERF_COUNT_HW_CACHE_##_x2222-#define CACHE_OP_UNSUPPORTED 0xFFFF2323-2415#ifdef CONFIG_HW_PERF_EVENTS2516struct pt_regs;2617extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
+19
arch/arm/include/asm/pmu.h
···42424343#ifdef CONFIG_HW_PERF_EVENTS44444545+/*4646+ * The ARMv7 CPU PMU supports up to 32 event counters.4747+ */4848+#define ARMPMU_MAX_HWEVENTS 324949+5050+#define HW_OP_UNSUPPORTED 0xFFFF5151+#define C(_x) PERF_COUNT_HW_CACHE_##_x5252+#define CACHE_OP_UNSUPPORTED 0xFFFF5353+5454+#define PERF_MAP_ALL_UNSUPPORTED \5555+ [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED5656+5757+#define PERF_CACHE_MAP_ALL_UNSUPPORTED \5858+[0 ... C(MAX) - 1] = { \5959+ [0 ... C(OP_MAX) - 1] = { \6060+ [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \6161+ }, \6262+}6363+4564/* The events for a given PMU register set. */4665struct pmu_hw_events {4766 /*
+1-1
arch/arm/include/asm/uaccess.h
···242242#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)243243244244#define user_addr_max() \245245- (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)245245+ (segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs())246246247247/*248248 * The "__xxx" versions of the user access functions do not verify the
+11-2
arch/arm/kernel/perf_event.c
···560560 struct perf_callchain_entry *entry)561561{562562 struct frame_tail buftail;563563+ unsigned long err;563564564564- /* Also check accessibility of one struct frame_tail beyond */565565 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))566566 return NULL;567567- if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))567567+568568+ pagefault_disable();569569+ err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));570570+ pagefault_enable();571571+572572+ if (err)568573 return NULL;569574570575 perf_callchain_store(entry, buftail.lr);···595590 }596591597592 perf_callchain_store(entry, regs->ARM_pc);593593+594594+ if (!current->mm)595595+ return;596596+598597 tail = (struct frame_tail __user *)regs->ARM_fp - 1;599598600599 while ((entry->nr < PERF_MAX_STACK_DEPTH) &&