Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: handover page flags with a pgprot_t parameter

In order to avoid multiple conversions, handover directly a
pgprot_t to map_kernel_page() as already done for radix.

Do the same for __ioremap_caller() and __ioremap_at().

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

authored by

Christophe Leroy and committed by
Michael Ellerman
c766ee72 56f3c141

+64 -77
+1 -1
arch/powerpc/include/asm/book3s/32/pgtable.h
··· 292 292 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) 293 293 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) 294 294 295 - int map_kernel_page(unsigned long va, phys_addr_t pa, int flags); 295 + int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); 296 296 297 297 /* Generic accessors to PTE bits */ 298 298 static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
+1 -2
arch/powerpc/include/asm/book3s/64/hash.h
··· 201 201 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 202 202 203 203 204 - extern int hash__map_kernel_page(unsigned long ea, unsigned long pa, 205 - unsigned long flags); 204 + int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot); 206 205 extern int __meminit hash__vmemmap_create_mapping(unsigned long start, 207 206 unsigned long page_size, 208 207 unsigned long phys);
+3 -4
arch/powerpc/include/asm/book3s/64/pgtable.h
··· 1030 1030 #define pgd_ERROR(e) \ 1031 1031 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 1032 1032 1033 - static inline int map_kernel_page(unsigned long ea, unsigned long pa, 1034 - unsigned long flags) 1033 + static inline int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) 1035 1034 { 1036 1035 if (radix_enabled()) { 1037 1036 #if defined(CONFIG_PPC_RADIX_MMU) && defined(DEBUG_VM) 1038 1037 unsigned long page_size = 1 << mmu_psize_defs[mmu_io_psize].shift; 1039 1038 WARN((page_size != PAGE_SIZE), "I/O page size != PAGE_SIZE"); 1040 1039 #endif 1041 - return radix__map_kernel_page(ea, pa, __pgprot(flags), PAGE_SIZE); 1040 + return radix__map_kernel_page(ea, pa, prot, PAGE_SIZE); 1042 1041 } 1043 - return hash__map_kernel_page(ea, pa, flags); 1042 + return hash__map_kernel_page(ea, pa, prot); 1044 1043 } 1045 1044 1046 1045 static inline int __meminit vmemmap_create_mapping(unsigned long start,
+1 -1
arch/powerpc/include/asm/fixmap.h
··· 72 72 static inline void __set_fixmap(enum fixed_addresses idx, 73 73 phys_addr_t phys, pgprot_t flags) 74 74 { 75 - map_kernel_page(fix_to_virt(idx), phys, pgprot_val(flags)); 75 + map_kernel_page(fix_to_virt(idx), phys, flags); 76 76 } 77 77 78 78 #endif /* !__ASSEMBLY__ */
+2 -2
arch/powerpc/include/asm/io.h
··· 786 786 extern void __iomem *__ioremap(phys_addr_t, unsigned long size, 787 787 unsigned long flags); 788 788 extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size, 789 - unsigned long flags, void *caller); 789 + pgprot_t prot, void *caller); 790 790 791 791 extern void __iounmap(volatile void __iomem *addr); 792 792 793 793 extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea, 794 - unsigned long size, unsigned long flags); 794 + unsigned long size, pgprot_t prot); 795 795 extern void __iounmap_at(void *ea, unsigned long size); 796 796 797 797 /*
+1 -1
arch/powerpc/include/asm/machdep.h
··· 35 35 char *name; 36 36 #ifdef CONFIG_PPC64 37 37 void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, 38 - unsigned long flags, void *caller); 38 + pgprot_t prot, void *caller); 39 39 void (*iounmap)(volatile void __iomem *token); 40 40 41 41 #ifdef CONFIG_PM
+1 -1
arch/powerpc/include/asm/nohash/32/pgtable.h
··· 323 323 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) 324 324 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) 325 325 326 - int map_kernel_page(unsigned long va, phys_addr_t pa, int flags); 326 + int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); 327 327 328 328 #endif /* !__ASSEMBLY__ */ 329 329
+1 -2
arch/powerpc/include/asm/nohash/64/pgtable.h
··· 327 327 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) 328 328 #define __swp_entry_to_pte(x) __pte((x).val) 329 329 330 - extern int map_kernel_page(unsigned long ea, unsigned long pa, 331 - unsigned long flags); 330 + int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot); 332 331 extern int __meminit vmemmap_create_mapping(unsigned long start, 333 332 unsigned long page_size, 334 333 unsigned long phys);
+2 -2
arch/powerpc/kernel/io-workarounds.c
··· 153 153 154 154 #ifdef CONFIG_PPC_INDIRECT_MMIO 155 155 static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size, 156 - unsigned long flags, void *caller) 156 + pgprot_t prot, void *caller) 157 157 { 158 158 struct iowa_bus *bus; 159 - void __iomem *res = __ioremap_caller(addr, size, flags, caller); 159 + void __iomem *res = __ioremap_caller(addr, size, prot, caller); 160 160 int busno; 161 161 162 162 bus = iowa_pci_find(0, (unsigned long)addr);
+3 -3
arch/powerpc/kernel/isa-bridge.c
··· 110 110 size = 0x10000; 111 111 112 112 __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE, 113 - size, pgprot_val(pgprot_noncached(PAGE_KERNEL))); 113 + size, pgprot_noncached(PAGE_KERNEL)); 114 114 return; 115 115 116 116 inval_range: 117 117 printk(KERN_ERR "no ISA IO ranges or unexpected isa range, " 118 118 "mapping 64k\n"); 119 119 __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE, 120 - 0x10000, pgprot_val(pgprot_noncached(PAGE_KERNEL))); 120 + 0x10000, pgprot_noncached(PAGE_KERNEL)); 121 121 } 122 122 123 123 ··· 253 253 */ 254 254 isa_io_base = ISA_IO_BASE; 255 255 __ioremap_at(pbase, (void *)ISA_IO_BASE, 256 - size, pgprot_val(pgprot_noncached(PAGE_KERNEL))); 256 + size, pgprot_noncached(PAGE_KERNEL)); 257 257 258 258 pr_debug("ISA: Non-PCI bridge is %pOF\n", np); 259 259 }
+1 -1
arch/powerpc/kernel/pci_64.c
··· 159 159 160 160 /* Establish the mapping */ 161 161 if (__ioremap_at(phys_page, area->addr, size_page, 162 - pgprot_val(pgprot_noncached(PAGE_KERNEL))) == NULL) 162 + pgprot_noncached(PAGE_KERNEL)) == NULL) 163 163 return -ENOMEM; 164 164 165 165 /* Fixup hose IO resource */
+1 -2
arch/powerpc/lib/code-patching.c
··· 98 98 else 99 99 pfn = __pa_symbol(addr) >> PAGE_SHIFT; 100 100 101 - err = map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), 102 - pgprot_val(PAGE_KERNEL)); 101 + err = map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL); 103 102 104 103 pr_devel("Mapped addr %lx with pfn %lx:%d\n", text_poke_addr, pfn, err); 105 104 if (err)
+1 -2
arch/powerpc/mm/8xx_mmu.c
··· 91 91 { 92 92 unsigned long p = PHYS_IMMR_BASE; 93 93 unsigned long v = VIRT_IMMR_BASE; 94 - unsigned long f = pgprot_val(PAGE_KERNEL_NCG); 95 94 int offset; 96 95 97 96 for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE) 98 - map_kernel_page(v + offset, p + offset, f); 97 + map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG); 99 98 } 100 99 101 100 /* Address of instructions to patch */
+1 -1
arch/powerpc/mm/dma-noncoherent.c
··· 228 228 do { 229 229 SetPageReserved(page); 230 230 map_kernel_page(vaddr, page_to_phys(page), 231 - pgprot_val(pgprot_noncached(PAGE_KERNEL))); 231 + pgprot_noncached(PAGE_KERNEL)); 232 232 page++; 233 233 vaddr += PAGE_SIZE; 234 234 } while (size -= PAGE_SIZE);
+2 -2
arch/powerpc/mm/mem.c
··· 309 309 unsigned long end = __fix_to_virt(FIX_HOLE); 310 310 311 311 for (; v < end; v += PAGE_SIZE) 312 - map_kernel_page(v, 0, 0); /* XXX gross */ 312 + map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */ 313 313 #endif 314 314 315 315 #ifdef CONFIG_HIGHMEM 316 - map_kernel_page(PKMAP_BASE, 0, 0); /* XXX gross */ 316 + map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */ 317 317 pkmap_page_table = virt_to_kpte(PKMAP_BASE); 318 318 319 319 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
+3 -6
arch/powerpc/mm/pgtable-book3e.c
··· 42 42 * thus must have the low bits clear 43 43 */ 44 44 for (i = 0; i < page_size; i += PAGE_SIZE) 45 - BUG_ON(map_kernel_page(start + i, phys, flags)); 45 + BUG_ON(map_kernel_page(start + i, phys, __pgprot(flags))); 46 46 47 47 return 0; 48 48 } ··· 70 70 * map_kernel_page adds an entry to the ioremap page table 71 71 * and adds an entry to the HPT, possibly bolting it 72 72 */ 73 - int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags) 73 + int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) 74 74 { 75 75 pgd_t *pgdp; 76 76 pud_t *pudp; ··· 89 89 ptep = pte_alloc_kernel(pmdp, ea); 90 90 if (!ptep) 91 91 return -ENOMEM; 92 - set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, 93 - __pgprot(flags))); 94 92 } else { 95 93 pgdp = pgd_offset_k(ea); 96 94 #ifndef __PAGETABLE_PUD_FOLDED ··· 111 113 pmd_populate_kernel(&init_mm, pmdp, ptep); 112 114 } 113 115 ptep = pte_offset_kernel(pmdp, ea); 114 - set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, 115 - __pgprot(flags))); 116 116 } 117 + set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); 117 118 118 119 smp_wmb(); 119 120 return 0;
+3 -4
arch/powerpc/mm/pgtable-hash64.c
··· 142 142 * map_kernel_page adds an entry to the ioremap page table 143 143 * and adds an entry to the HPT, possibly bolting it 144 144 */ 145 - int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags) 145 + int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) 146 146 { 147 147 pgd_t *pgdp; 148 148 pud_t *pudp; ··· 161 161 ptep = pte_alloc_kernel(pmdp, ea); 162 162 if (!ptep) 163 163 return -ENOMEM; 164 - set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, 165 - __pgprot(flags))); 164 + set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); 166 165 } else { 167 166 /* 168 167 * If the mm subsystem is not fully up, we cannot create a ··· 169 170 * entry in the hardware page table. 170 171 * 171 172 */ 172 - if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags, 173 + if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, pgprot_val(prot), 173 174 mmu_io_psize, mmu_kernel_ssize)) { 174 175 printk(KERN_ERR "Failed to do bolted mapping IO " 175 176 "memory at %016lx !\n", pa);
+17 -20
arch/powerpc/mm/pgtable_32.c
··· 76 76 void __iomem * 77 77 ioremap(phys_addr_t addr, unsigned long size) 78 78 { 79 - unsigned long flags = pgprot_val(pgprot_noncached(PAGE_KERNEL)); 79 + pgprot_t prot = pgprot_noncached(PAGE_KERNEL); 80 80 81 - return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); 81 + return __ioremap_caller(addr, size, prot, __builtin_return_address(0)); 82 82 } 83 83 EXPORT_SYMBOL(ioremap); 84 84 85 85 void __iomem * 86 86 ioremap_wc(phys_addr_t addr, unsigned long size) 87 87 { 88 - unsigned long flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL)); 88 + pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL); 89 89 90 - return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); 90 + return __ioremap_caller(addr, size, prot, __builtin_return_address(0)); 91 91 } 92 92 EXPORT_SYMBOL(ioremap_wc); 93 93 94 94 void __iomem * 95 95 ioremap_wt(phys_addr_t addr, unsigned long size) 96 96 { 97 - unsigned long flags = pgprot_val(pgprot_cached_wthru(PAGE_KERNEL)); 97 + pgprot_t prot = pgprot_cached_wthru(PAGE_KERNEL); 98 98 99 - return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); 99 + return __ioremap_caller(addr, size, prot, __builtin_return_address(0)); 100 100 } 101 101 EXPORT_SYMBOL(ioremap_wt); 102 102 103 103 void __iomem * 104 104 ioremap_coherent(phys_addr_t addr, unsigned long size) 105 105 { 106 - unsigned long flags = pgprot_val(pgprot_cached(PAGE_KERNEL)); 106 + pgprot_t prot = pgprot_cached(PAGE_KERNEL); 107 107 108 - return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); 108 + return __ioremap_caller(addr, size, prot, __builtin_return_address(0)); 109 109 } 110 110 EXPORT_SYMBOL(ioremap_coherent); 111 111 ··· 120 120 flags &= ~(_PAGE_USER | _PAGE_EXEC); 121 121 flags |= _PAGE_PRIVILEGED; 122 122 123 - return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); 123 + return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0)); 124 124 } 125 125 EXPORT_SYMBOL(ioremap_prot); 126 126 127 127 void __iomem * 128 128 __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) 129 129 { 130 - return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); 130 + return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0)); 131 131 } 132 132 133 133 void __iomem * 134 - __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, 135 - void *caller) 134 + __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller) 136 135 { 137 136 unsigned long v, i; 138 137 phys_addr_t p; ··· 194 195 195 196 err = 0; 196 197 for (i = 0; i < size && err == 0; i += PAGE_SIZE) 197 - err = map_kernel_page(v+i, p+i, flags); 198 + err = map_kernel_page(v + i, p + i, prot); 198 199 if (err) { 199 200 if (slab_is_available()) 200 201 vunmap((void *)v); ··· 220 221 } 221 222 EXPORT_SYMBOL(iounmap); 222 223 223 - int map_kernel_page(unsigned long va, phys_addr_t pa, int flags) 224 + int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot) 224 225 { 225 226 pmd_t *pd; 226 227 pte_t *pg; ··· 236 237 * hash table 237 238 */ 238 239 BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) && 239 - flags); 240 - set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, 241 - __pgprot(flags))); 240 + pgprot_val(prot)); 241 + set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot)); 242 242 } 243 243 smp_wmb(); 244 244 return err; ··· 248 250 */ 249 251 static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) 250 252 { 251 - unsigned long v, s, f; 253 + unsigned long v, s; 252 254 phys_addr_t p; 253 255 int ktext; 254 256 ··· 258 260 for (; s < top; s += PAGE_SIZE) { 259 261 ktext = ((char *)v >= _stext && (char *)v < etext) || 260 262 ((char *)v >= _sinittext && (char *)v < _einittext); 261 - f = ktext ? pgprot_val(PAGE_KERNEL_TEXT) : pgprot_val(PAGE_KERNEL); 262 - map_kernel_page(v, p, f); 263 + map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL); 263 264 #ifdef CONFIG_PPC_STD_MMU_32 264 265 if (ktext) 265 266 hash_preload(&init_mm, v, 0, 0x300);
+18 -19
arch/powerpc/mm/pgtable_64.c
··· 113 113 * __ioremap_at - Low level function to establish the page tables 114 114 * for an IO mapping 115 115 */ 116 - void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size, 117 - unsigned long flags) 116 + void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot) 118 117 { 119 118 unsigned long i; 120 119 121 120 /* We don't support the 4K PFN hack with ioremap */ 122 - if (flags & H_PAGE_4K_PFN) 121 + if (pgprot_val(prot) & H_PAGE_4K_PFN) 123 122 return NULL; 124 123 125 124 WARN_ON(pa & ~PAGE_MASK); ··· 126 127 WARN_ON(size & ~PAGE_MASK); 127 128 128 129 for (i = 0; i < size; i += PAGE_SIZE) 129 - if (map_kernel_page((unsigned long)ea+i, pa+i, flags)) 130 + if (map_kernel_page((unsigned long)ea + i, pa + i, prot)) 130 131 return NULL; 131 132 132 133 return (void __iomem *)ea; ··· 147 148 } 148 149 149 150 void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, 150 - unsigned long flags, void *caller) 151 + pgprot_t prot, void *caller) 151 152 { 152 153 phys_addr_t paligned; 153 154 void __iomem *ret; ··· 177 178 return NULL; 178 179 179 180 area->phys_addr = paligned; 180 - ret = __ioremap_at(paligned, area->addr, size, flags); 181 + ret = __ioremap_at(paligned, area->addr, size, prot); 181 182 if (!ret) 182 183 vunmap(area->addr); 183 184 } else { 184 - ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags); 185 + ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot); 185 186 if (ret) 186 187 ioremap_bot += size; 187 188 } ··· 194 195 void __iomem * __ioremap(phys_addr_t addr, unsigned long size, 195 196 unsigned long flags) 196 197 { 197 - return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); 198 + return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0)); 198 199 } 199 200 200 201 void __iomem * ioremap(phys_addr_t addr, unsigned long size) 201 202 { 202 - unsigned long flags = pgprot_val(pgprot_noncached(PAGE_KERNEL)); 203 + pgprot_t prot = pgprot_noncached(PAGE_KERNEL); 203 204 void *caller = __builtin_return_address(0); 204 205 205 206 if (ppc_md.ioremap) 206 - return ppc_md.ioremap(addr, size, flags, caller); 207 - return __ioremap_caller(addr, size, flags, caller); 207 + return ppc_md.ioremap(addr, size, prot, caller); 208 + return __ioremap_caller(addr, size, prot, caller); 208 209 } 209 210 210 211 void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size) 211 212 { 212 - unsigned long flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL)); 213 + pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL); 213 214 void *caller = __builtin_return_address(0); 214 215 215 216 if (ppc_md.ioremap) 216 - return ppc_md.ioremap(addr, size, flags, caller); 217 - return __ioremap_caller(addr, size, flags, caller); 217 + return ppc_md.ioremap(addr, size, prot, caller); 218 + return __ioremap_caller(addr, size, prot, caller); 218 219 } 219 220 220 221 void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size) 221 222 { 222 - unsigned long flags = pgprot_val(pgprot_cached(PAGE_KERNEL)); 223 + pgprot_t prot = pgprot_cached(PAGE_KERNEL); 223 224 void *caller = __builtin_return_address(0); 224 225 225 226 if (ppc_md.ioremap) 226 - return ppc_md.ioremap(addr, size, flags, caller); 227 - return __ioremap_caller(addr, size, flags, caller); 227 + return ppc_md.ioremap(addr, size, prot, caller); 228 + return __ioremap_caller(addr, size, prot, caller); 228 229 } 229 230 230 231 void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, ··· 245 246 flags |= _PAGE_PRIVILEGED; 246 247 247 248 if (ppc_md.ioremap) 248 - return ppc_md.ioremap(addr, size, flags, caller); 249 - return __ioremap_caller(addr, size, flags, caller); 249 + return ppc_md.ioremap(addr, size, __pgprot(flags), caller); 250 + return __ioremap_caller(addr, size, __pgprot(flags), caller); 250 251 } 251 252 252 253
+1 -1
drivers/pcmcia/electra_cf.c
··· 230 230 231 231 if (!cf->mem_base || !cf->io_virt || !cf->gpio_base || 232 232 (__ioremap_at(io.start, cf->io_virt, cf->io_size, 233 - pgprot_val(pgprot_noncached(PAGE_KERNEL))) == NULL)) { 233 + pgprot_noncached(PAGE_KERNEL)) == NULL)) { 234 234 dev_err(device, "can't ioremap ranges\n"); 235 235 status = -ENOMEM; 236 236 goto fail1;