Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:
"These are late by a week; they should have been merged during the
merge window, but unfortunately, the ARM kernel build/boot farms were
indicating random failures, and it wasn't clear whether the cause was
something in these changes or something during the merge window.

This is a set of merge window fixes with some documentation additions"

* 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
ARM: avoid unwanted GCC memset()/memcpy() optimisations for IO variants
ARM: pgtable: document mapping types
ARM: io: convert ioremap*() to functions
ARM: io: fix ioremap_wt() implementation
ARM: io: document ARM specific behaviour of ioremap*() implementations
ARM: fix lockdep unannotated irqs-off warning
ARM: 8397/1: fix vdsomunge not to depend on glibc specific error.h
ARM: add helpful message when truncating physical memory
ARM: add help text for HIGHPTE configuration entry
ARM: fix DEBUG_SET_MODULE_RONX build dependencies
ARM: 8396/1: use phys_addr_t in pfn_to_kaddr()
ARM: 8394/1: update memblock limit after mapping lowmem
ARM: 8393/1: smp: Fix suspicious RCU usage with ipi tracepoints

+203 -74
+6
arch/arm/Kconfig
··· 1693 1693 config HIGHPTE 1694 1694 bool "Allocate 2nd-level pagetables from highmem" 1695 1695 depends on HIGHMEM 1696 + help 1697 + The VM uses one page of physical memory for each page table. 1698 + For systems with a lot of processes, this can use a lot of 1699 + precious low memory, eventually leading to low memory being 1700 + consumed by page tables. Setting this option will allow 1701 + user-space 2nd level page tables to reside in high memory. 1696 1702 1697 1703 config HW_PERF_EVENTS 1698 1704 bool "Enable hardware performance counter support for perf events"
+1 -1
arch/arm/Kconfig.debug
··· 1635 1635 1636 1636 config DEBUG_SET_MODULE_RONX 1637 1637 bool "Set loadable kernel module data as NX and text as RO" 1638 - depends on MODULES 1638 + depends on MODULES && MMU 1639 1639 ---help--- 1640 1640 This option helps catch unintended modifications to loadable 1641 1641 kernel module's text and read-only data. It also prevents execution
+58 -17
arch/arm/include/asm/io.h
··· 140 140 * The _caller variety takes a __builtin_return_address(0) value for 141 141 * /proc/vmalloc to use - and should only be used in non-inline functions. 142 142 */ 143 - extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long, 144 - size_t, unsigned int, void *); 145 143 extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int, 146 144 void *); 147 - 148 145 extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); 149 - extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int); 150 146 extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached); 151 147 extern void __iounmap(volatile void __iomem *addr); 152 - extern void __arm_iounmap(volatile void __iomem *addr); 153 148 154 149 extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, 155 150 unsigned int, void *); ··· 316 321 static inline void memset_io(volatile void __iomem *dst, unsigned c, 317 322 size_t count) 318 323 { 319 - memset((void __force *)dst, c, count); 324 + extern void mmioset(void *, unsigned int, size_t); 325 + mmioset((void __force *)dst, c, count); 320 326 } 321 327 #define memset_io(dst,c,count) memset_io(dst,c,count) 322 328 323 329 static inline void memcpy_fromio(void *to, const volatile void __iomem *from, 324 330 size_t count) 325 331 { 326 - memcpy(to, (const void __force *)from, count); 332 + extern void mmiocpy(void *, const void *, size_t); 333 + mmiocpy(to, (const void __force *)from, count); 327 334 } 328 335 #define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count) 329 336 330 337 static inline void memcpy_toio(volatile void __iomem *to, const void *from, 331 338 size_t count) 332 339 { 333 - memcpy((void __force *)to, from, count); 340 + extern void mmiocpy(void *, const void *, size_t); 341 + mmiocpy((void __force *)to, from, count); 334 342 } 335 343 #define memcpy_toio(to,from,count) memcpy_toio(to,from,count) 336 344 ··· 346 348 #endif /* readl */ 347 349 348 350 /* 349 - * ioremap and friends. 351 + * ioremap() and friends. 350 352 * 351 - * ioremap takes a PCI memory address, as specified in 352 - * Documentation/io-mapping.txt. 353 + * ioremap() takes a resource address, and size. Due to the ARM memory 354 + * types, it is important to use the correct ioremap() function as each 355 + * mapping has specific properties. 353 356 * 357 + * Function Memory type Cacheability Cache hint 358 + * ioremap() Device n/a n/a 359 + * ioremap_nocache() Device n/a n/a 360 + * ioremap_cache() Normal Writeback Read allocate 361 + * ioremap_wc() Normal Non-cacheable n/a 362 + * ioremap_wt() Normal Non-cacheable n/a 363 + * 364 + * All device mappings have the following properties: 365 + * - no access speculation 366 + * - no repetition (eg, on return from an exception) 367 + * - number, order and size of accesses are maintained 368 + * - unaligned accesses are "unpredictable" 369 + * - writes may be delayed before they hit the endpoint device 370 + * 371 + * ioremap_nocache() is the same as ioremap() as there are too many device 372 + * drivers using this for device registers, and documentation which tells 373 + * people to use it for such for this to be any different. This is not a 374 + * safe fallback for memory-like mappings, or memory regions where the 375 + * compiler may generate unaligned accesses - eg, via inlining its own 376 + * memcpy. 377 + * 378 + * All normal memory mappings have the following properties: 379 + * - reads can be repeated with no side effects 380 + * - repeated reads return the last value written 381 + * - reads can fetch additional locations without side effects 382 + * - writes can be repeated (in certain cases) with no side effects 383 + * - writes can be merged before accessing the target 384 + * - unaligned accesses can be supported 385 + * - ordering is not guaranteed without explicit dependencies or barrier 386 + * instructions 387 + * - writes may be delayed before they hit the endpoint memory 388 + * 389 + * The cache hint is only a performance hint: CPUs may alias these hints. 390 + * Eg, a CPU not implementing read allocate but implementing write allocate 391 + * will provide a write allocate mapping instead. 354 392 */ 355 - #define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) 356 - #define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) 357 - #define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) 358 - #define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) 359 - #define ioremap_wt(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) 360 - #define iounmap __arm_iounmap 393 + void __iomem *ioremap(resource_size_t res_cookie, size_t size); 394 + #define ioremap ioremap 395 + #define ioremap_nocache ioremap 396 + 397 + void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size); 398 + #define ioremap_cache ioremap_cache 399 + 400 + void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size); 401 + #define ioremap_wc ioremap_wc 402 + #define ioremap_wt ioremap_wc 403 + 404 + void iounmap(volatile void __iomem *iomem_cookie); 405 + #define iounmap iounmap 361 406 362 407 /* 363 408 * io{read,write}{16,32}be() macros
+1 -1
arch/arm/include/asm/memory.h
··· 275 275 */ 276 276 #define __pa(x) __virt_to_phys((unsigned long)(x)) 277 277 #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) 278 - #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 278 + #define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT) 279 279 280 280 extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x); 281 281
+30 -1
arch/arm/include/asm/pgtable-2level.h
··· 129 129 130 130 /* 131 131 * These are the memory types, defined to be compatible with 132 - * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB 132 + * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B 133 + * ARMv6+ without TEX remapping, they are a table index. 134 + * ARMv6+ with TEX remapping, they correspond to n/a,TEX(0),C,B 135 + * 136 + * MT type Pre-ARMv6 ARMv6+ type / cacheable status 137 + * UNCACHED Uncached Strongly ordered 138 + * BUFFERABLE Bufferable Normal memory / non-cacheable 139 + * WRITETHROUGH Writethrough Normal memory / write through 140 + * WRITEBACK Writeback Normal memory / write back, read alloc 141 + * MINICACHE Minicache N/A 142 + * WRITEALLOC Writeback Normal memory / write back, write alloc 143 + * DEV_SHARED Uncached Device memory (shared) 144 + * DEV_NONSHARED Uncached Device memory (non-shared) 145 + * DEV_WC Bufferable Normal memory / non-cacheable 146 + * DEV_CACHED Writeback Normal memory / write back, read alloc 147 + * VECTORS Variable Normal memory / variable 148 + * 149 + * All normal memory mappings have the following properties: 150 + * - reads can be repeated with no side effects 151 + * - repeated reads return the last value written 152 + * - reads can fetch additional locations without side effects 153 + * - writes can be repeated (in certain cases) with no side effects 154 + * - writes can be merged before accessing the target 155 + * - unaligned accesses can be supported 156 + * 157 + * All device mappings have the following properties: 158 + * - no access speculation 159 + * - no repetition (eg, on return from an exception) 160 + * - number, order and size of accesses are maintained 161 + * - unaligned accesses are "unpredictable" 133 162 */ 134 163 #define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ 135 164 #define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */
+6
arch/arm/kernel/armksyms.c
··· 50 50 51 51 extern void fpundefinstr(void); 52 52 53 + void mmioset(void *, unsigned int, size_t); 54 + void mmiocpy(void *, const void *, size_t); 55 + 53 56 /* platform dependent support */ 54 57 EXPORT_SYMBOL(arm_delay_ops); 55 58 ··· 90 87 EXPORT_SYMBOL(memmove); 91 88 EXPORT_SYMBOL(memchr); 92 89 EXPORT_SYMBOL(__memzero); 90 + 91 + EXPORT_SYMBOL(mmioset); 92 + EXPORT_SYMBOL(mmiocpy); 93 93 94 94 #ifdef CONFIG_MMU 95 95 EXPORT_SYMBOL(copy_page);
+1 -1
arch/arm/kernel/entry-armv.S
··· 410 410 zero_fp 411 411 412 412 .if \trace 413 - #ifdef CONFIG_IRQSOFF_TRACER 413 + #ifdef CONFIG_TRACE_IRQFLAGS 414 414 bl trace_hardirqs_off 415 415 #endif 416 416 ct_user_exit save = 0
+2 -2
arch/arm/kernel/smp.c
··· 578 578 struct pt_regs *old_regs = set_irq_regs(regs); 579 579 580 580 if ((unsigned)ipinr < NR_IPI) { 581 - trace_ipi_entry(ipi_types[ipinr]); 581 + trace_ipi_entry_rcuidle(ipi_types[ipinr]); 582 582 __inc_irq_stat(cpu, ipi_irqs[ipinr]); 583 583 } 584 584 ··· 637 637 } 638 638 639 639 if ((unsigned)ipinr < NR_IPI) 640 - trace_ipi_exit(ipi_types[ipinr]); 640 + trace_ipi_exit_rcuidle(ipi_types[ipinr]); 641 641 set_irq_regs(old_regs); 642 642 } 643 643
+2
arch/arm/lib/memcpy.S
··· 61 61 62 62 /* Prototype: void *memcpy(void *dest, const void *src, size_t n); */ 63 63 64 + ENTRY(mmiocpy) 64 65 ENTRY(memcpy) 65 66 66 67 #include "copy_template.S" 67 68 68 69 ENDPROC(memcpy) 70 + ENDPROC(mmiocpy)
+2
arch/arm/lib/memset.S
··· 16 16 .text 17 17 .align 5 18 18 19 + ENTRY(mmioset) 19 20 ENTRY(memset) 20 21 UNWIND( .fnstart ) 21 22 ands r3, r0, #3 @ 1 unaligned? ··· 134 133 b 1b 135 134 UNWIND( .fnend ) 136 135 ENDPROC(memset) 136 + ENDPROC(mmioset)
+23 -10
arch/arm/mm/ioremap.c
··· 255 255 } 256 256 #endif 257 257 258 - void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, 258 + static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, 259 259 unsigned long offset, size_t size, unsigned int mtype, void *caller) 260 260 { 261 261 const struct mem_type *type; ··· 363 363 unsigned int mtype) 364 364 { 365 365 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, 366 - __builtin_return_address(0)); 366 + __builtin_return_address(0)); 367 367 } 368 368 EXPORT_SYMBOL(__arm_ioremap_pfn); 369 369 ··· 371 371 unsigned int, void *) = 372 372 __arm_ioremap_caller; 373 373 374 - void __iomem * 375 - __arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype) 374 + void __iomem *ioremap(resource_size_t res_cookie, size_t size) 376 375 { 377 - return arch_ioremap_caller(phys_addr, size, mtype, 378 - __builtin_return_address(0)); 376 + return arch_ioremap_caller(res_cookie, size, MT_DEVICE, 377 + __builtin_return_address(0)); 379 378 } 380 - EXPORT_SYMBOL(__arm_ioremap); 379 + EXPORT_SYMBOL(ioremap); 380 + 381 + void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size) 382 + { 383 + return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED, 384 + __builtin_return_address(0)); 385 + } 386 + EXPORT_SYMBOL(ioremap_cache); 387 + 388 + void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) 389 + { 390 + return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC, 391 + __builtin_return_address(0)); 392 + } 393 + EXPORT_SYMBOL(ioremap_wc); 381 394 382 395 /* 383 396 * Remap an arbitrary physical address space into the kernel virtual ··· 444 431 445 432 void (*arch_iounmap)(volatile void __iomem *) = __iounmap; 446 433 447 - void __arm_iounmap(volatile void __iomem *io_addr) 434 + void iounmap(volatile void __iomem *cookie) 448 435 { 449 - arch_iounmap(io_addr); 436 + arch_iounmap(cookie); 450 437 } 451 - EXPORT_SYMBOL(__arm_iounmap); 438 + EXPORT_SYMBOL(iounmap); 452 439 453 440 #ifdef CONFIG_PCI 454 441 static int pci_ioremap_mem_type = MT_DEVICE;
+7
arch/arm/mm/mmu.c
··· 1072 1072 int highmem = 0; 1073 1073 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; 1074 1074 struct memblock_region *reg; 1075 + bool should_use_highmem = false; 1075 1076 1076 1077 for_each_memblock(memory, reg) { 1077 1078 phys_addr_t block_start = reg->base; ··· 1091 1090 pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", 1092 1091 &block_start, &block_end); 1093 1092 memblock_remove(reg->base, reg->size); 1093 + should_use_highmem = true; 1094 1094 continue; 1095 1095 } 1096 1096 ··· 1102 1100 &block_start, &block_end, &vmalloc_limit); 1103 1101 memblock_remove(vmalloc_limit, overlap_size); 1104 1102 block_end = vmalloc_limit; 1103 + should_use_highmem = true; 1105 1104 } 1106 1105 } 1107 1106 ··· 1136 1133 1137 1134 } 1138 1135 } 1136 + 1137 + if (should_use_highmem) 1138 + pr_notice("Consider using a HIGHMEM enabled kernel.\n"); 1139 1139 1140 1140 high_memory = __va(arm_lowmem_limit - 1) + 1; 1141 1141 ··· 1500 1494 build_mem_type_table(); 1501 1495 prepare_page_table(); 1502 1496 map_lowmem(); 1497 + memblock_set_current_limit(arm_lowmem_limit); 1503 1498 dma_contiguous_remap(); 1504 1499 devicemaps_init(mdesc); 1505 1500 kmap_init();
+31 -18
arch/arm/mm/nommu.c
··· 351 351 } 352 352 EXPORT_SYMBOL(__arm_ioremap_pfn); 353 353 354 - void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, 355 - size_t size, unsigned int mtype, void *caller) 356 - { 357 - return __arm_ioremap_pfn(pfn, offset, size, mtype); 358 - } 359 - 360 - void __iomem *__arm_ioremap(phys_addr_t phys_addr, size_t size, 361 - unsigned int mtype) 362 - { 363 - return (void __iomem *)phys_addr; 364 - } 365 - EXPORT_SYMBOL(__arm_ioremap); 366 - 367 - void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *); 368 - 369 354 void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size, 370 355 unsigned int mtype, void *caller) 371 356 { 372 - return __arm_ioremap(phys_addr, size, mtype); 357 + return (void __iomem *)phys_addr; 373 358 } 359 + 360 + void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *); 361 + 362 + void __iomem *ioremap(resource_size_t res_cookie, size_t size) 363 + { 364 + return __arm_ioremap_caller(res_cookie, size, MT_DEVICE, 365 + __builtin_return_address(0)); 366 + } 367 + EXPORT_SYMBOL(ioremap); 368 + 369 + void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size) 370 + { 371 + return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED, 372 + __builtin_return_address(0)); 373 + } 374 + EXPORT_SYMBOL(ioremap_cache); 375 + 376 + void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) 377 + { 378 + return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC, 379 + __builtin_return_address(0)); 380 + } 381 + EXPORT_SYMBOL(ioremap_wc); 382 + 383 + void __iounmap(volatile void __iomem *addr) 384 + { 385 + } 386 + EXPORT_SYMBOL(__iounmap); 374 387 375 388 void (*arch_iounmap)(volatile void __iomem *); 376 389 377 - void __arm_iounmap(volatile void __iomem *addr) 390 + void iounmap(volatile void __iomem *addr) 378 391 { 379 392 } 380 - EXPORT_SYMBOL(__arm_iounmap); 393 + EXPORT_SYMBOL(iounmap);
+33 -23
arch/arm/vdso/vdsomunge.c
··· 45 45 * it does. 46 46 */ 47 47 48 - #define _GNU_SOURCE 49 - 50 48 #include <byteswap.h> 51 49 #include <elf.h> 52 50 #include <errno.h> 53 - #include <error.h> 54 51 #include <fcntl.h> 52 + #include <stdarg.h> 55 53 #include <stdbool.h> 56 54 #include <stdio.h> 57 55 #include <stdlib.h> ··· 80 82 #define EF_ARM_ABI_FLOAT_HARD 0x400 81 83 #endif 82 84 85 + static int failed; 86 + static const char *argv0; 83 87 static const char *outfile; 88 + 89 + static void fail(const char *fmt, ...) 90 + { 91 + va_list ap; 92 + 93 + failed = 1; 94 + fprintf(stderr, "%s: ", argv0); 95 + va_start(ap, fmt); 96 + vfprintf(stderr, fmt, ap); 97 + va_end(ap); 98 + exit(EXIT_FAILURE); 99 + } 84 100 85 101 static void cleanup(void) 86 102 { 87 - if (error_message_count > 0 && outfile != NULL) 103 + if (failed && outfile != NULL) 88 104 unlink(outfile); 89 105 } 90 106 ··· 131 119 int infd; 132 120 133 121 atexit(cleanup); 122 + argv0 = argv[0]; 134 123 135 124 if (argc != 3) 136 - error(EXIT_FAILURE, 0, "Usage: %s [infile] [outfile]", argv[0]); 125 + fail("Usage: %s [infile] [outfile]\n", argv[0]); 137 126 138 127 infile = argv[1]; 139 128 outfile = argv[2]; 140 129 141 130 infd = open(infile, O_RDONLY); 142 131 if (infd < 0) 143 - error(EXIT_FAILURE, errno, "Cannot open %s", infile); 132 + fail("Cannot open %s: %s\n", infile, strerror(errno)); 144 133 145 134 if (fstat(infd, &stat) != 0) 146 - error(EXIT_FAILURE, errno, "Failed stat for %s", infile); 135 + fail("Failed stat for %s: %s\n", infile, strerror(errno)); 147 136 148 137 inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0); 149 138 if (inbuf == MAP_FAILED) 150 - error(EXIT_FAILURE, errno, "Failed to map %s", infile); 139 + fail("Failed to map %s: %s\n", infile, strerror(errno)); 151 140 152 141 close(infd); 153 142 154 143 inhdr = inbuf; 155 144 156 145 if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0) 157 - error(EXIT_FAILURE, 0, "Not an ELF file"); 146 + fail("Not an ELF file\n"); 158 147 159 148 if (inhdr->e_ident[EI_CLASS] != ELFCLASS32) 160 - error(EXIT_FAILURE, 0, "Unsupported ELF class"); 149 + fail("Unsupported ELF class\n"); 161 150 162 151 swap = inhdr->e_ident[EI_DATA] != HOST_ORDER; 163 152 164 153 if (read_elf_half(inhdr->e_type, swap) != ET_DYN) 165 - error(EXIT_FAILURE, 0, "Not a shared object"); 154 + fail("Not a shared object\n"); 166 155 167 - if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) { 168 - error(EXIT_FAILURE, 0, "Unsupported architecture %#x", 169 - inhdr->e_machine); 170 - } 156 + if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) 157 + fail("Unsupported architecture %#x\n", inhdr->e_machine); 171 158 172 159 e_flags = read_elf_word(inhdr->e_flags, swap); 173 160 174 161 if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) { 175 - error(EXIT_FAILURE, 0, "Unsupported EABI version %#x", 176 - EF_ARM_EABI_VERSION(e_flags)); 162 + fail("Unsupported EABI version %#x\n", 163 + EF_ARM_EABI_VERSION(e_flags)); 177 164 } 178 165 179 166 if (e_flags & EF_ARM_ABI_FLOAT_HARD) 180 - error(EXIT_FAILURE, 0, 181 - "Unexpected hard-float flag set in e_flags"); 167 + fail("Unexpected hard-float flag set in e_flags\n"); 182 168 183 169 clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT); 184 170 185 171 outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR); 186 172 if (outfd < 0) 187 - error(EXIT_FAILURE, errno, "Cannot open %s", outfile); 173 + fail("Cannot open %s: %s\n", outfile, strerror(errno)); 188 174 189 175 if (ftruncate(outfd, stat.st_size) != 0) 190 - error(EXIT_FAILURE, errno, "Cannot truncate %s", outfile); 176 + fail("Cannot truncate %s: %s\n", outfile, strerror(errno)); 191 177 192 178 outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED, 193 179 outfd, 0); 194 180 if (outbuf == MAP_FAILED) 195 - error(EXIT_FAILURE, errno, "Failed to map %s", outfile); 181 + fail("Failed to map %s: %s\n", outfile, strerror(errno)); 196 182 197 183 close(outfd); 198 184 ··· 205 195 } 206 196 207 197 if (msync(outbuf, stat.st_size, MS_SYNC) != 0) 208 - error(EXIT_FAILURE, errno, "Failed to sync %s", outfile); 198 + fail("Failed to sync %s: %s\n", outfile, strerror(errno)); 209 199 210 200 return EXIT_SUCCESS; 211 201 }