···2020# $4 - default install path (blank if root directory)2121#22222323+verify () {2424+ if [ ! -f "$1" ]; then2525+ echo "" 1>&22626+ echo " *** Missing file: $1" 1>&22727+ echo ' *** You need to run "make" before "make install".' 1>&22828+ echo "" 1>&22929+ exit 13030+ fi3131+}3232+3333+# Make sure the files actually exist3434+verify "$2"3535+verify "$3"3636+2337# User may have a custom install script2438if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi2539if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
···332332 unsigned long hva;333333 int pfnmap = 0;334334 int tsize = BOOK3E_PAGESZ_4K;335335+ int ret = 0;336336+ unsigned long mmu_seq;337337+ struct kvm *kvm = vcpu_e500->vcpu.kvm;338338+339339+ /* used to check for invalidations in progress */340340+ mmu_seq = kvm->mmu_notifier_seq;341341+ smp_rmb();335342336343 /*337344 * Translate guest physical to true physical, acquiring···456449 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);457450 }458451452452+ spin_lock(&kvm->mmu_lock);453453+ if (mmu_notifier_retry(kvm, mmu_seq)) {454454+ ret = -EAGAIN;455455+ goto out;456456+ }457457+459458 kvmppc_e500_ref_setup(ref, gtlbe, pfn);460459461460 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,···470457 /* Clear i-cache for new pages */471458 kvmppc_mmu_flush_icache(pfn);472459460460+out:461461+ spin_unlock(&kvm->mmu_lock);462462+473463 /* Drop refcount on page, so that mmu notifiers can clear it */474464 kvm_release_pfn_clean(pfn);475465476476- return 0;466466+ return ret;477467}478468479469/* XXX only map the one-one case, for now use TLB0 */
···166166 *167167 * Atomically sets @v to @i and returns old @v168168 */169169-static inline u64 atomic64_xchg(atomic64_t *v, u64 n)169169+static inline long long atomic64_xchg(atomic64_t *v, long long n)170170{171171 return xchg64(&v->counter, n);172172}···180180 * Atomically checks if @v holds @o and replaces it with @n if so.181181 * Returns the old value at @v.182182 */183183-static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)183183+static inline long long atomic64_cmpxchg(atomic64_t *v, long long o,184184+ long long n)184185{185186 return cmpxchg64(&v->counter, o, n);186187}
+15-12
arch/tile/include/asm/atomic_32.h
···8080/* A 64bit atomic type */81818282typedef struct {8383- u64 __aligned(8) counter;8383+ long long counter;8484} atomic64_t;85858686#define ATOMIC64_INIT(val) { (val) }···9191 *9292 * Atomically reads the value of @v.9393 */9494-static inline u64 atomic64_read(const atomic64_t *v)9494+static inline long long atomic64_read(const atomic64_t *v)9595{9696 /*9797 * Requires an atomic op to read both 32-bit parts consistently.9898 * Casting away const is safe since the atomic support routines9999 * do not write to memory if the value has not been modified.100100 */101101- return _atomic64_xchg_add((u64 *)&v->counter, 0);101101+ return _atomic64_xchg_add((long long *)&v->counter, 0);102102}103103104104/**···108108 *109109 * Atomically adds @i to @v.110110 */111111-static inline void atomic64_add(u64 i, atomic64_t *v)111111+static inline void atomic64_add(long long i, atomic64_t *v)112112{113113 _atomic64_xchg_add(&v->counter, i);114114}···120120 *121121 * Atomically adds @i to @v and returns @i + @v122122 */123123-static inline u64 atomic64_add_return(u64 i, atomic64_t *v)123123+static inline long long atomic64_add_return(long long i, atomic64_t *v)124124{125125 smp_mb(); /* barrier for proper semantics */126126 return _atomic64_xchg_add(&v->counter, i) + i;···135135 * Atomically adds @a to @v, so long as @v was not already @u.136136 * Returns non-zero if @v was not @u, and zero otherwise.137137 */138138-static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)138138+static inline long long atomic64_add_unless(atomic64_t *v, long long a,139139+ long long u)139140{140141 smp_mb(); /* barrier for proper semantics */141142 return _atomic64_xchg_add_unless(&v->counter, a, u) != u;···152151 * atomic64_set() can't be just a raw store, since it would be lost if it153152 * fell between the load and store of one of the other atomic ops.154153 */155155-static inline void atomic64_set(atomic64_t *v, u64 n)154154+static inline void atomic64_set(atomic64_t *v, long long n)156155{157156 _atomic64_xchg(&v->counter, n);158157}···237236extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);238237extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);239238extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);240240-extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n);241241-extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n);242242-extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);243243-extern u64 __atomic64_xchg_add_unless(volatile u64 *p,244244- int *lock, u64 o, u64 n);239239+extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,240240+ long long o, long long n);241241+extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);242242+extern long long __atomic64_xchg_add(volatile long long *p, int *lock,243243+ long long n);244244+extern long long __atomic64_xchg_add_unless(volatile long long *p,245245+ int *lock, long long o, long long n);245246246247/* Return failure from the atomic wrappers. */247248struct __get_user __atomic_bad_address(int __user *addr);
+17-11
arch/tile/include/asm/cmpxchg.h
···3535int _atomic_xchg_add(int *v, int i);3636int _atomic_xchg_add_unless(int *v, int a, int u);3737int _atomic_cmpxchg(int *ptr, int o, int n);3838-u64 _atomic64_xchg(u64 *v, u64 n);3939-u64 _atomic64_xchg_add(u64 *v, u64 i);4040-u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u);4141-u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);3838+long long _atomic64_xchg(long long *v, long long n);3939+long long _atomic64_xchg_add(long long *v, long long i);4040+long long _atomic64_xchg_add_unless(long long *v, long long a, long long u);4141+long long _atomic64_cmpxchg(long long *v, long long o, long long n);42424343#define xchg(ptr, n) \4444 ({ \···5353 if (sizeof(*(ptr)) != 4) \5454 __cmpxchg_called_with_bad_pointer(); \5555 smp_mb(); \5656- (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \5656+ (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, \5757+ (int)n); \5758 })58595960#define xchg64(ptr, n) \···6261 if (sizeof(*(ptr)) != 8) \6362 __xchg_called_with_bad_pointer(); \6463 smp_mb(); \6565- (typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \6464+ (typeof(*(ptr)))_atomic64_xchg((long long *)(ptr), \6565+ (long long)(n)); \6666 })67676868#define cmpxchg64(ptr, o, n) \···7169 if (sizeof(*(ptr)) != 8) \7270 __cmpxchg_called_with_bad_pointer(); \7371 smp_mb(); \7474- (typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \7272+ (typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr, \7373+ (long long)o, (long long)n); \7574 })76757776#else···8481 switch (sizeof(*(ptr))) { \8582 case 4: \8683 __x = (typeof(__x))(unsigned long) \8787- __insn_exch4((ptr), (u32)(unsigned long)(n)); \8484+ __insn_exch4((ptr), \8585+ (u32)(unsigned long)(n)); \8886 break; \8987 case 8: \9090- __x = (typeof(__x)) \8888+ __x = (typeof(__x)) \9189 __insn_exch((ptr), (unsigned long)(n)); \9290 break; \9391 default: \···107103 switch (sizeof(*(ptr))) { \108104 case 4: \109105 __x = (typeof(__x))(unsigned long) \110110- __insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \106106+ __insn_cmpexch4((ptr), \107107+ (u32)(unsigned long)(n)); \111108 break; \112109 case 8: \113113- __x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \110110+ __x = (typeof(__x))__insn_cmpexch((ptr), \111111+ (long long)(n)); \114112 break; \115113 default: \116114 __cmpxchg_called_with_bad_pointer(); \
+31-3
arch/tile/include/asm/percpu.h
···1515#ifndef _ASM_TILE_PERCPU_H1616#define _ASM_TILE_PERCPU_H17171818-register unsigned long __my_cpu_offset __asm__("tp");1919-#define __my_cpu_offset __my_cpu_offset2020-#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp))1818+register unsigned long my_cpu_offset_reg asm("tp");1919+2020+#ifdef CONFIG_PREEMPT2121+/*2222+ * For full preemption, we can't just use the register variable2323+ * directly, since we need barrier() to hazard against it, causing the2424+ * compiler to reload anything computed from a previous "tp" value.2525+ * But we also don't want to use volatile asm, since we'd like the2626+ * compiler to be able to cache the value across multiple percpu reads.2727+ * So we use a fake stack read as a hazard against barrier().2828+ * The 'U' constraint is like 'm' but disallows postincrement.2929+ */3030+static inline unsigned long __my_cpu_offset(void)3131+{3232+ unsigned long tp;3333+ register unsigned long *sp asm("sp");3434+ asm("move %0, tp" : "=r" (tp) : "U" (*sp));3535+ return tp;3636+}3737+#define __my_cpu_offset __my_cpu_offset()3838+#else3939+/*4040+ * We don't need to hazard against barrier() since "tp" doesn't ever4141+ * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only4242+ * changes at function call points, at which we are already re-reading4343+ * the value of "tp" due to "my_cpu_offset_reg" being a global variable.4444+ */4545+#define __my_cpu_offset my_cpu_offset_reg4646+#endif4747+4848+#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))21492250#include <asm-generic/percpu.h>2351
···2323#include <linux/mmzone.h>2424#include <linux/dcache.h>2525#include <linux/fs.h>2626+#include <linux/string.h>2627#include <asm/backtrace.h>2728#include <asm/page.h>2829#include <asm/ucontext.h>···333332 }334333335334 if (vma->vm_file) {336336- char *s;337335 p = d_path(&vma->vm_file->f_path, buf, bufsize);338336 if (IS_ERR(p))339337 p = "?";340340- s = strrchr(p, '/');341341- if (s)342342- p = s+1;338338+ name = kbasename(p);343339 } else {344344- p = "anon";340340+ name = "anon";345341 }346342347343 /* Generate a string description of the vma info. */348348- namelen = strlen(p);344344+ namelen = strlen(name);349345 remaining = (bufsize - 1) - namelen;350350- memmove(buf, p, namelen);346346+ memmove(buf, name, namelen);351347 snprintf(buf + namelen, remaining, "[%lx+%lx] ",352348 vma->vm_start, vma->vm_end - vma->vm_start);353349}
+4-4
arch/tile/lib/atomic_32.c
···107107EXPORT_SYMBOL(_atomic_xor);108108109109110110-u64 _atomic64_xchg(u64 *v, u64 n)110110+long long _atomic64_xchg(long long *v, long long n)111111{112112 return __atomic64_xchg(v, __atomic_setup(v), n);113113}114114EXPORT_SYMBOL(_atomic64_xchg);115115116116-u64 _atomic64_xchg_add(u64 *v, u64 i)116116+long long _atomic64_xchg_add(long long *v, long long i)117117{118118 return __atomic64_xchg_add(v, __atomic_setup(v), i);119119}120120EXPORT_SYMBOL(_atomic64_xchg_add);121121122122-u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)122122+long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)123123{124124 /*125125 * Note: argument order is switched here since it is easier···130130}131131EXPORT_SYMBOL(_atomic64_xchg_add_unless);132132133133-u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n)133133+long long _atomic64_cmpxchg(long long *v, long long o, long long n)134134{135135 return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);136136}
+3-3
arch/x86/include/asm/cpufeature.h
···374374 * Catch too early usage of this before alternatives375375 * have run.376376 */377377- asm goto("1: jmp %l[t_warn]\n"377377+ asm_volatile_goto("1: jmp %l[t_warn]\n"378378 "2:\n"379379 ".section .altinstructions,\"a\"\n"380380 " .long 1b - .\n"···388388389389#endif390390391391- asm goto("1: jmp %l[t_no]\n"391391+ asm_volatile_goto("1: jmp %l[t_no]\n"392392 "2:\n"393393 ".section .altinstructions,\"a\"\n"394394 " .long 1b - .\n"···453453 * have. Thus, we force the jump to the widest, 4-byte, signed relative454454 * offset even though the last would often fit in less bytes.455455 */456456- asm goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"456456+ asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"457457 "2:\n"458458 ".section .altinstructions,\"a\"\n"459459 " .long 1b - .\n" /* src offset */
···2727 * - USB ID 04d9:a067, sold as Sharkoon Drakonia and Perixx MX-20002828 * - USB ID 04d9:a04a, sold as Tracer Sniper TRM-503, NOVA Gaming Slider X2002929 * and Zalman ZM-GM13030+ * - USB ID 04d9:a081, sold as SHARKOON DarkGlider Gaming mouse3031 */31323233static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,···4746 }4847 break;4948 case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A:4949+ case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081:5050 if (*rsize >= 113 && rdesc[106] == 0xff && rdesc[107] == 0x7f5151 && rdesc[111] == 0xff && rdesc[112] == 0x7f) {5252 hid_info(hdev, "Fixing up report descriptor\n");···6563 USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },6664 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,6765 USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },6666+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,6767+ USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },6868 { }6969};7070MODULE_DEVICE_TABLE(hid, holtek_mouse_devices);
···230230231231static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)232232{233233+ u8 status, data = 0;233234 int i;234235235236 if (send_command(cmd) || send_argument(key)) {···238237 return -EIO;239238 }240239240240+ /* This has no effect on newer (2012) SMCs */241241 if (send_byte(len, APPLESMC_DATA_PORT)) {242242 pr_warn("%.4s: read len fail\n", key);243243 return -EIO;···251249 }252250 buffer[i] = inb(APPLESMC_DATA_PORT);253251 }252252+253253+ /* Read the data port until bit0 is cleared */254254+ for (i = 0; i < 16; i++) {255255+ udelay(APPLESMC_MIN_WAIT);256256+ status = inb(APPLESMC_CMD_PORT);257257+ if (!(status & 0x01))258258+ break;259259+ data = inb(APPLESMC_DATA_PORT);260260+ }261261+ if (i)262262+ pr_warn("flushed %d bytes, last value is: %d\n", i, data);254263255264 return 0;256265}
···476476 master->bus_num = bus_num;477477478478 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);479479- if (!res) {480480- dev_err(&pdev->dev, "can't get platform resource\n");481481- ret = -EINVAL;482482- goto out_master_put;483483- }484484-485479 dspi->base = devm_ioremap_resource(&pdev->dev, res);486486- if (!dspi->base) {487487- ret = -EINVAL;480480+ if (IS_ERR(dspi->base)) {481481+ ret = PTR_ERR(dspi->base);488482 goto out_master_put;489483 }490484
+3-1
drivers/spi/spi-mpc512x-psc.c
···522522 psc_num = master->bus_num;523523 snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num);524524 clk = devm_clk_get(dev, clk_name);525525- if (IS_ERR(clk))525525+ if (IS_ERR(clk)) {526526+ ret = PTR_ERR(clk);526527 goto free_irq;528528+ }527529 ret = clk_prepare_enable(clk);528530 if (ret)529531 goto free_irq;
+10-1
drivers/spi/spi-pxa2xx.c
···546546 if (pm_runtime_suspended(&drv_data->pdev->dev))547547 return IRQ_NONE;548548549549- sccr1_reg = read_SSCR1(reg);549549+ /*550550+ * If the device is not yet in RPM suspended state and we get an551551+ * interrupt that is meant for another device, check if status bits552552+ * are all set to one. That means that the device is already553553+ * powered off.554554+ */550555 status = read_SSSR(reg);556556+ if (status == ~0)557557+ return IRQ_NONE;558558+559559+ sccr1_reg = read_SSCR1(reg);551560552561 /* Ignore possible writes if we don't need to write */553562 if (!(sccr1_reg & SSCR1_TIE))
···802802 return -ENODEV;803803 }804804805805+ /*806806+ * Ignore all auxilary iLO devices with the following PCI ID807807+ */808808+ if (dev->subsystem_device == 0x1979)809809+ return -ENODEV;810810+805811 if (pci_enable_device(dev)) {806812 dev_warn(&dev->dev,807813 "Not possible to enable PCI Device: 0x%x:0x%x.\n",
+15
include/linux/compiler-gcc4.h
···6565#define __visible __attribute__((externally_visible))6666#endif67676868+/*6969+ * GCC 'asm goto' miscompiles certain code sequences:7070+ *7171+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=586707272+ *7373+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.7474+ * Fixed in GCC 4.8.2 and later versions.7575+ *7676+ * (asm goto is automatically volatile - the naming reflects this.)7777+ */7878+#if GCC_VERSION <= 408017979+# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)8080+#else8181+# define asm_volatile_goto(x...) do { asm goto(x); } while (0)8282+#endif68836984#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP7085#if GCC_VERSION >= 40400
···294294 */295295struct perf_event {296296#ifdef CONFIG_PERF_EVENTS297297- struct list_head group_entry;297297+ /*298298+ * entry onto perf_event_context::event_list;299299+ * modifications require ctx->lock300300+ * RCU safe iterations.301301+ */298302 struct list_head event_entry;303303+304304+ /*305305+ * XXX: group_entry and sibling_list should be mutually exclusive;306306+ * either you're a sibling on a group, or you're the group leader.307307+ * Rework the code to always use the same list element.308308+ *309309+ * Locked for modification by both ctx->mutex and ctx->lock; holding310310+ * either sufficies for read.311311+ */312312+ struct list_head group_entry;299313 struct list_head sibling_list;314314+315315+ /*316316+ * We need storage to track the entries in perf_pmu_migrate_context; we317317+ * cannot use the event_entry because of RCU and we want to keep the318318+ * group in tact which avoids us using the other two entries.319319+ */320320+ struct list_head migrate_entry;321321+300322 struct hlist_node hlist_entry;301323 int nr_siblings;302324 int group_flags;
+1
include/linux/random.h
···1717extern void get_random_bytes(void *buf, int nbytes);1818extern void get_random_bytes_arch(void *buf, int nbytes);1919void generate_random_uuid(unsigned char uuid_out[16]);2020+extern int random_int_secret_init(void);20212122#ifndef MODULE2223extern const struct file_operations random_fops, urandom_fops;
+14
include/linux/timex.h
···64646565#include <asm/timex.h>66666767+#ifndef random_get_entropy6868+/*6969+ * The random_get_entropy() function is used by the /dev/random driver7070+ * in order to extract entropy via the relative unpredictability of7171+ * when an interrupt takes places versus a high speed, fine-grained7272+ * timing source or cycle counter. Since it will be occurred on every7373+ * single interrupt, it must have a very low cost/overhead.7474+ *7575+ * By default we use get_cycles() for this purpose, but individual7676+ * architectures may override this in their asm/timex.h header file.7777+ */7878+#define random_get_entropy() get_cycles()7979+#endif8080+6781/*6882 * SHIFT_PLL is used as a dampening factor to define how much we6983 * adjust the frequency correction for a given offset in PLL mode.
···426426 * @die_mem: a buffer for result DIE427427 *428428 * Search a non-inlined function DIE which includes @addr. Stores the429429- * DIE to @die_mem and returns it if found. Returns NULl if failed.429429+ * DIE to @die_mem and returns it if found. Returns NULL if failed.430430 */431431Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,432432 Dwarf_Die *die_mem)···454454}455455456456/**457457- * die_find_inlinefunc - Search an inlined function at given address458458- * @cu_die: a CU DIE which including @addr457457+ * die_find_top_inlinefunc - Search the top inlined function at given address458458+ * @sp_die: a subprogram DIE which including @addr459459 * @addr: target address460460 * @die_mem: a buffer for result DIE461461 *462462 * Search an inlined function DIE which includes @addr. Stores the463463- * DIE to @die_mem and returns it if found. Returns NULl if failed.463463+ * DIE to @die_mem and returns it if found. Returns NULL if failed.464464+ * Even if several inlined functions are expanded recursively, this465465+ * doesn't trace it down, and returns the topmost one.466466+ */467467+Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,468468+ Dwarf_Die *die_mem)469469+{470470+ return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem);471471+}472472+473473+/**474474+ * die_find_inlinefunc - Search an inlined function at given address475475+ * @sp_die: a subprogram DIE which including @addr476476+ * @addr: target address477477+ * @die_mem: a buffer for result DIE478478+ *479479+ * Search an inlined function DIE which includes @addr. Stores the480480+ * DIE to @die_mem and returns it if found. Returns NULL if failed.464481 * If several inlined functions are expanded recursively, this trace465465- * it and returns deepest one.482482+ * it down and returns deepest one.466483 */467484Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,468485 Dwarf_Die *die_mem)
+5-1
tools/perf/util/dwarf-aux.h
···7979extern Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,8080 Dwarf_Die *die_mem);81818282-/* Search an inlined function including given address */8282+/* Search the top inlined function including given address */8383+extern Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,8484+ Dwarf_Die *die_mem);8585+8686+/* Search the deepest inlined function including given address */8387extern Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,8488 Dwarf_Die *die_mem);8589
+12
tools/perf/util/header.c
···27682768 if (perf_file_header__read(&f_header, header, fd) < 0)27692769 return -EINVAL;2770277027712771+ /*27722772+ * Sanity check that perf.data was written cleanly; data size is27732773+ * initialized to 0 and updated only if the on_exit function is run.27742774+ * If data size is still 0 then the file contains only partial27752775+ * information. Just warn user and process it as much as it can.27762776+ */27772777+ if (f_header.data.size == 0) {27782778+ pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"27792779+ "Was the 'perf record' command properly terminated?\n",27802780+ session->filename);27812781+ }27822782+27712783 nr_attrs = f_header.attrs.size / f_header.attr_size;27722784 lseek(fd, f_header.attrs.offset, SEEK_SET);27732785
+33-16
tools/perf/util/probe-finder.c
···13271327 struct perf_probe_point *ppt)13281328{13291329 Dwarf_Die cudie, spdie, indie;13301330- Dwarf_Addr _addr, baseaddr;13311331- const char *fname = NULL, *func = NULL, *tmp;13301330+ Dwarf_Addr _addr = 0, baseaddr = 0;13311331+ const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;13321332 int baseline = 0, lineno = 0, ret = 0;1333133313341334 /* Adjust address with bias */···13491349 /* Find a corresponding function (name, baseline and baseaddr) */13501350 if (die_find_realfunc(&cudie, (Dwarf_Addr)addr, &spdie)) {13511351 /* Get function entry information */13521352- tmp = dwarf_diename(&spdie);13531353- if (!tmp ||13521352+ func = basefunc = dwarf_diename(&spdie);13531353+ if (!func ||13541354 dwarf_entrypc(&spdie, &baseaddr) != 0 ||13551355- dwarf_decl_line(&spdie, &baseline) != 0)13551355+ dwarf_decl_line(&spdie, &baseline) != 0) {13561356+ lineno = 0;13561357 goto post;13571357- func = tmp;13581358+ }1358135913591359- if (addr == (unsigned long)baseaddr)13601360+ if (addr == (unsigned long)baseaddr) {13601361 /* Function entry - Relative line number is 0 */13611362 lineno = baseline;13621362- else if (die_find_inlinefunc(&spdie, (Dwarf_Addr)addr,13631363- &indie)) {13631363+ fname = dwarf_decl_file(&spdie);13641364+ goto post;13651365+ }13661366+13671367+ /* Track down the inline functions step by step */13681368+ while (die_find_top_inlinefunc(&spdie, (Dwarf_Addr)addr,13691369+ &indie)) {13701370+ /* There is an inline function */13641371 if (dwarf_entrypc(&indie, &_addr) == 0 &&13651365- _addr == addr)13721372+ _addr == addr) {13661373 /*13671374 * addr is at an inline function entry.13681375 * In this case, lineno should be the call-site13691369- * line number.13761376+ * line number. (overwrite lineinfo)13701377 */13711378 lineno = die_get_call_lineno(&indie);13721372- else {13791379+ fname = die_get_call_file(&indie);13801380+ break;13811381+ } else {13731382 /*13741383 * addr is in an inline function body.13751384 * Since lineno points one of the lines···13861377 * be the entry line of the inline function.13871378 */13881379 tmp = dwarf_diename(&indie);13891389- if (tmp &&13901390- dwarf_decl_line(&spdie, &baseline) == 0)13911391- func = tmp;13801380+ if (!tmp ||13811381+ dwarf_decl_line(&indie, &baseline) != 0)13821382+ break;13831383+ func = tmp;13841384+ spdie = indie;13921385 }13931386 }13871387+ /* Verify the lineno and baseline are in a same file */13881388+ tmp = dwarf_decl_file(&spdie);13891389+ if (!tmp || strcmp(tmp, fname) != 0)13901390+ lineno = 0;13941391 }1395139213961393post:13971394 /* Make a relative line number or an offset */13981395 if (lineno)13991396 ppt->line = lineno - baseline;14001400- else if (func)13971397+ else if (basefunc) {14011398 ppt->offset = addr - (unsigned long)baseaddr;13991399+ func = basefunc;14001400+ }1402140114031402 /* Duplicate strings */14041403 if (func) {
+3-1
tools/perf/util/session.c
···256256 tool->sample = process_event_sample_stub;257257 if (tool->mmap == NULL)258258 tool->mmap = process_event_stub;259259+ if (tool->mmap2 == NULL)260260+ tool->mmap2 = process_event_stub;259261 if (tool->comm == NULL)260262 tool->comm = process_event_stub;261263 if (tool->fork == NULL)···13121310 file_offset = page_offset;13131311 head = data_offset - page_offset;1314131213151315- if (data_offset + data_size < file_size)13131313+ if (data_size && (data_offset + data_size < file_size))13161314 file_size = data_offset + data_size;1317131513181316 progress_next = file_size / 16;