Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'akpm' (fixes from Andrew)

Merge misc fixes from Andrew Morton:

- A bunch of fixes

- Finish off the idr API conversions before someone starts to use the
old interfaces again.

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
idr: idr_alloc() shouldn't trigger lowmem warning when preloaded
UAPI: fix endianness conditionals in M32R's asm/stat.h
UAPI: fix endianness conditionals in linux/raid/md_p.h
UAPI: fix endianness conditionals in linux/acct.h
UAPI: fix endianness conditionals in linux/aio_abi.h
decompressors: fix typo "POWERPC"
mm/fremap.c: fix oops on error path
idr: deprecate idr_pre_get() and idr_get_new[_above]()
tidspbridge: convert to idr_alloc()
zcache: convert to idr_alloc()
mlx4: remove leftover idr_pre_get() call
workqueue: convert to idr_alloc()
nfsd: convert to idr_alloc()
nfsd: remove unused get_new_stid()
kernel/signal.c: use __ARCH_HAS_SA_RESTORER instead of SA_RESTORER
signal: always clear sa_restorer on execve
mm: remove_memory(): fix end_pfn setting
include/linux/res_counter.h needs errno.h

+144 -174
+2 -2
arch/m32r/include/uapi/asm/stat.h
··· 63 63 long long st_size; 64 64 unsigned long st_blksize; 65 65 66 - #if defined(__BIG_ENDIAN) 66 + #if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN) 67 67 unsigned long __pad4; /* future possible st_blocks high bits */ 68 68 unsigned long st_blocks; /* Number 512-byte blocks allocated. */ 69 - #elif defined(__LITTLE_ENDIAN) 69 + #elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN) 70 70 unsigned long st_blocks; /* Number 512-byte blocks allocated. */ 71 71 unsigned long __pad4; /* future possible st_blocks high bits */ 72 72 #else
-1
drivers/infiniband/hw/mlx4/cm.c
··· 362 362 INIT_LIST_HEAD(&dev->sriov.cm_list); 363 363 dev->sriov.sl_id_map = RB_ROOT; 364 364 idr_init(&dev->sriov.pv_id_table); 365 - idr_pre_get(&dev->sriov.pv_id_table, GFP_KERNEL); 366 365 } 367 366 368 367 /* slave = -1 ==> all slaves */
+26 -44
drivers/staging/tidspbridge/rmgr/drv.c
··· 76 76 struct node_res_object **node_res_obj = 77 77 (struct node_res_object **)node_resource; 78 78 struct process_context *ctxt = (struct process_context *)process_ctxt; 79 - int status = 0; 80 79 int retval; 81 80 82 81 *node_res_obj = kzalloc(sizeof(struct node_res_object), GFP_KERNEL); 83 - if (!*node_res_obj) { 84 - status = -ENOMEM; 85 - goto func_end; 86 - } 82 + if (!*node_res_obj) 83 + return -ENOMEM; 87 84 88 85 (*node_res_obj)->node = hnode; 89 - retval = idr_get_new(ctxt->node_id, *node_res_obj, 90 - &(*node_res_obj)->id); 91 - if (retval == -EAGAIN) { 92 - if (!idr_pre_get(ctxt->node_id, GFP_KERNEL)) { 93 - pr_err("%s: OUT OF MEMORY\n", __func__); 94 - status = -ENOMEM; 95 - goto func_end; 96 - } 97 - 98 - retval = idr_get_new(ctxt->node_id, *node_res_obj, 99 - &(*node_res_obj)->id); 86 + retval = idr_alloc(ctxt->node_id, *node_res_obj, 0, 0, GFP_KERNEL); 87 + if (retval >= 0) { 88 + (*node_res_obj)->id = retval; 89 + return 0; 100 90 } 101 - if (retval) { 91 + 92 + kfree(*node_res_obj); 93 + 94 + if (retval == -ENOSPC) { 102 95 pr_err("%s: FAILED, IDR is FULL\n", __func__); 103 - status = -EFAULT; 96 + return -EFAULT; 97 + } else { 98 + pr_err("%s: OUT OF MEMORY\n", __func__); 99 + return -ENOMEM; 104 100 } 105 - func_end: 106 - if (status) 107 - kfree(*node_res_obj); 108 - 109 - return status; 110 101 } 111 102 112 103 /* Release all Node resources and its context ··· 192 201 struct strm_res_object **pstrm_res = 193 202 (struct strm_res_object **)strm_res; 194 203 struct process_context *ctxt = (struct process_context *)process_ctxt; 195 - int status = 0; 196 204 int retval; 197 205 198 206 *pstrm_res = kzalloc(sizeof(struct strm_res_object), GFP_KERNEL); 199 - if (*pstrm_res == NULL) { 200 - status = -EFAULT; 201 - goto func_end; 202 - } 207 + if (*pstrm_res == NULL) 208 + return -EFAULT; 203 209 204 210 (*pstrm_res)->stream = stream_obj; 205 - retval = idr_get_new(ctxt->stream_id, *pstrm_res, 206 - &(*pstrm_res)->id); 207 - if (retval == -EAGAIN) { 208 - if (!idr_pre_get(ctxt->stream_id, GFP_KERNEL)) { 209 - pr_err("%s: OUT OF MEMORY\n", __func__); 210 - status = -ENOMEM; 211 - goto func_end; 212 - } 213 - 214 - retval = idr_get_new(ctxt->stream_id, *pstrm_res, 215 - &(*pstrm_res)->id); 211 + retval = idr_alloc(ctxt->stream_id, *pstrm_res, 0, 0, GFP_KERNEL); 212 + if (retval >= 0) { 213 + (*pstrm_res)->id = retval; 214 + return 0; 216 215 } 217 - if (retval) { 216 + 217 + if (retval == -ENOSPC) { 218 218 pr_err("%s: FAILED, IDR is FULL\n", __func__); 219 - status = -EPERM; 219 + return -EPERM; 220 + } else { 221 + pr_err("%s: OUT OF MEMORY\n", __func__); 222 + return -ENOMEM; 220 223 } 221 - 222 - func_end: 223 - return status; 224 224 } 225 225 226 226 static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt)
+10 -15
drivers/staging/zcache/ramster/tcp.c
··· 300 300 301 301 static int r2net_prep_nsw(struct r2net_node *nn, struct r2net_status_wait *nsw) 302 302 { 303 - int ret = 0; 303 + int ret; 304 304 305 - do { 306 - if (!idr_pre_get(&nn->nn_status_idr, GFP_ATOMIC)) { 307 - ret = -EAGAIN; 308 - break; 309 - } 310 - spin_lock(&nn->nn_lock); 311 - ret = idr_get_new(&nn->nn_status_idr, nsw, &nsw->ns_id); 312 - if (ret == 0) 313 - list_add_tail(&nsw->ns_node_item, 314 - &nn->nn_status_list); 315 - spin_unlock(&nn->nn_lock); 316 - } while (ret == -EAGAIN); 305 + spin_lock(&nn->nn_lock); 306 + ret = idr_alloc(&nn->nn_status_idr, nsw, 0, 0, GFP_ATOMIC); 307 + if (ret >= 0) { 308 + nsw->ns_id = ret; 309 + list_add_tail(&nsw->ns_node_item, &nn->nn_status_list); 310 + } 311 + spin_unlock(&nn->nn_lock); 317 312 318 - if (ret == 0) { 313 + if (ret >= 0) { 319 314 init_waitqueue_head(&nsw->ns_wq); 320 315 nsw->ns_sys_status = R2NET_ERR_NONE; 321 316 nsw->ns_status = 0; 317 + return 0; 322 318 } 323 - 324 319 return ret; 325 320 } 326 321
+2 -34
fs/nfsd/nfs4state.c
··· 230 230 __nfs4_file_put_access(fp, oflag); 231 231 } 232 232 233 - static inline int get_new_stid(struct nfs4_stid *stid) 234 - { 235 - static int min_stateid = 0; 236 - struct idr *stateids = &stid->sc_client->cl_stateids; 237 - int new_stid; 238 - int error; 239 - 240 - error = idr_get_new_above(stateids, stid, min_stateid, &new_stid); 241 - /* 242 - * Note: the necessary preallocation was done in 243 - * nfs4_alloc_stateid(). The idr code caps the number of 244 - * preallocations that can exist at a time, but the state lock 245 - * prevents anyone from using ours before we get here: 246 - */ 247 - WARN_ON_ONCE(error); 248 - /* 249 - * It shouldn't be a problem to reuse an opaque stateid value. 250 - * I don't think it is for 4.1. But with 4.0 I worry that, for 251 - * example, a stray write retransmission could be accepted by 252 - * the server when it should have been rejected. Therefore, 253 - * adopt a trick from the sctp code to attempt to maximize the 254 - * amount of time until an id is reused, by ensuring they always 255 - * "increase" (mod INT_MAX): 256 - */ 257 - 258 - min_stateid = new_stid+1; 259 - if (min_stateid == INT_MAX) 260 - min_stateid = 0; 261 - return new_stid; 262 - } 263 - 264 233 static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct 265 234 kmem_cache *slab) 266 235 { ··· 242 273 if (!stid) 243 274 return NULL; 244 275 245 - if (!idr_pre_get(stateids, GFP_KERNEL)) 246 - goto out_free; 247 - if (idr_get_new_above(stateids, stid, min_stateid, &new_id)) 276 + new_id = idr_alloc(stateids, stid, min_stateid, 0, GFP_KERNEL); 277 + if (new_id < 0) 248 278 goto out_free; 249 279 stid->sc_client = cl; 250 280 stid->sc_type = 0;
+50 -16
include/linux/idr.h
··· 73 73 */ 74 74 75 75 void *idr_find_slowpath(struct idr *idp, int id); 76 - int idr_pre_get(struct idr *idp, gfp_t gfp_mask); 77 - int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); 78 76 void idr_preload(gfp_t gfp_mask); 79 77 int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask); 80 78 int idr_for_each(struct idr *idp, ··· 118 120 } 119 121 120 122 /** 121 - * idr_get_new - allocate new idr entry 122 - * @idp: idr handle 123 - * @ptr: pointer you want associated with the id 124 - * @id: pointer to the allocated handle 125 - * 126 - * Simple wrapper around idr_get_new_above() w/ @starting_id of zero. 127 - */ 128 - static inline int idr_get_new(struct idr *idp, void *ptr, int *id) 129 - { 130 - return idr_get_new_above(idp, ptr, 0, id); 131 - } 132 - 133 - /** 134 123 * idr_for_each_entry - iterate over an idr's elements of a given type 135 124 * @idp: idr handle 136 125 * @entry: the type * to use as cursor ··· 128 143 entry != NULL; \ 129 144 ++id, entry = (typeof(entry))idr_get_next((idp), &(id))) 130 145 131 - void __idr_remove_all(struct idr *idp); /* don't use */ 146 + /* 147 + * Don't use the following functions. These exist only to suppress 148 + * deprecated warnings on EXPORT_SYMBOL()s. 149 + */ 150 + int __idr_pre_get(struct idr *idp, gfp_t gfp_mask); 151 + int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); 152 + void __idr_remove_all(struct idr *idp); 153 + 154 + /** 155 + * idr_pre_get - reserve resources for idr allocation 156 + * @idp: idr handle 157 + * @gfp_mask: memory allocation flags 158 + * 159 + * Part of old alloc interface. This is going away. Use 160 + * idr_preload[_end]() and idr_alloc() instead. 161 + */ 162 + static inline int __deprecated idr_pre_get(struct idr *idp, gfp_t gfp_mask) 163 + { 164 + return __idr_pre_get(idp, gfp_mask); 165 + } 166 + 167 + /** 168 + * idr_get_new_above - allocate new idr entry above or equal to a start id 169 + * @idp: idr handle 170 + * @ptr: pointer you want associated with the id 171 + * @starting_id: id to start search at 172 + * @id: pointer to the allocated handle 173 + * 174 + * Part of old alloc interface. This is going away. Use 175 + * idr_preload[_end]() and idr_alloc() instead. 176 + */ 177 + static inline int __deprecated idr_get_new_above(struct idr *idp, void *ptr, 178 + int starting_id, int *id) 179 + { 180 + return __idr_get_new_above(idp, ptr, starting_id, id); 181 + } 182 + 183 + /** 184 + * idr_get_new - allocate new idr entry 185 + * @idp: idr handle 186 + * @ptr: pointer you want associated with the id 187 + * @id: pointer to the allocated handle 188 + * 189 + * Part of old alloc interface. This is going away. Use 190 + * idr_preload[_end]() and idr_alloc() instead. 191 + */ 192 + static inline int __deprecated idr_get_new(struct idr *idp, void *ptr, int *id) 193 + { 194 + return __idr_get_new_above(idp, ptr, 0, id); 195 + } 132 196 133 197 /** 134 198 * idr_remove_all - remove all ids from the given idr tree
+1
include/linux/res_counter.h
··· 14 14 */ 15 15 16 16 #include <linux/cgroup.h> 17 + #include <linux/errno.h> 17 18 18 19 /* 19 20 * The core object. the cgroup that wishes to account for some
+4 -2
include/uapi/linux/acct.h
··· 107 107 #define ACORE 0x08 /* ... dumped core */ 108 108 #define AXSIG 0x10 /* ... was killed by a signal */ 109 109 110 - #ifdef __BIG_ENDIAN 110 + #if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN) 111 111 #define ACCT_BYTEORDER 0x80 /* accounting file is big endian */ 112 - #else 112 + #elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN) 113 113 #define ACCT_BYTEORDER 0x00 /* accounting file is little endian */ 114 + #else 115 + #error unspecified endianness 114 116 #endif 115 117 116 118 #ifndef __KERNEL__
+2 -2
include/uapi/linux/aio_abi.h
··· 62 62 __s64 res2; /* secondary result */ 63 63 }; 64 64 65 - #if defined(__LITTLE_ENDIAN) 65 + #if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN) 66 66 #define PADDED(x,y) x, y 67 - #elif defined(__BIG_ENDIAN) 67 + #elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN) 68 68 #define PADDED(x,y) y, x 69 69 #else 70 70 #error edit for your odd byteorder.
+4 -2
include/uapi/linux/raid/md_p.h
··· 145 145 __u32 failed_disks; /* 4 Number of failed disks */ 146 146 __u32 spare_disks; /* 5 Number of spare disks */ 147 147 __u32 sb_csum; /* 6 checksum of the whole superblock */ 148 - #ifdef __BIG_ENDIAN 148 + #if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN) 149 149 __u32 events_hi; /* 7 high-order of superblock update count */ 150 150 __u32 events_lo; /* 8 low-order of superblock update count */ 151 151 __u32 cp_events_hi; /* 9 high-order of checkpoint update count */ 152 152 __u32 cp_events_lo; /* 10 low-order of checkpoint update count */ 153 - #else 153 + #elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN) 154 154 __u32 events_lo; /* 7 low-order of superblock update count */ 155 155 __u32 events_hi; /* 8 high-order of superblock update count */ 156 156 __u32 cp_events_lo; /* 9 low-order of checkpoint update count */ 157 157 __u32 cp_events_hi; /* 10 high-order of checkpoint update count */ 158 + #else 159 + #error unspecified endianness 158 160 #endif 159 161 __u32 recovery_cp; /* 11 recovery checkpoint sector count */ 160 162 /* There are only valid for minor_version > 90 */
+3
kernel/signal.c
··· 485 485 if (force_default || ka->sa.sa_handler != SIG_IGN) 486 486 ka->sa.sa_handler = SIG_DFL; 487 487 ka->sa.sa_flags = 0; 488 + #ifdef __ARCH_HAS_SA_RESTORER 489 + ka->sa.sa_restorer = NULL; 490 + #endif 488 491 sigemptyset(&ka->sa.sa_mask); 489 492 ka++; 490 493 }
+4 -3
kernel/workqueue.c
··· 457 457 int ret; 458 458 459 459 mutex_lock(&worker_pool_idr_mutex); 460 - idr_pre_get(&worker_pool_idr, GFP_KERNEL); 461 - ret = idr_get_new(&worker_pool_idr, pool, &pool->id); 460 + ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); 461 + if (ret >= 0) 462 + pool->id = ret; 462 463 mutex_unlock(&worker_pool_idr_mutex); 463 464 464 - return ret; 465 + return ret < 0 ? ret : 0; 465 466 } 466 467 467 468 /*
+30 -49
lib/idr.c
··· 106 106 if (layer_idr) 107 107 return get_from_free_list(layer_idr); 108 108 109 - /* try to allocate directly from kmem_cache */ 110 - new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); 109 + /* 110 + * Try to allocate directly from kmem_cache. We want to try this 111 + * before preload buffer; otherwise, non-preloading idr_alloc() 112 + * users will end up taking advantage of preloading ones. As the 113 + * following is allowed to fail for preloaded cases, suppress 114 + * warning this time. 115 + */ 116 + new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN); 111 117 if (new) 112 118 return new; 113 119 ··· 121 115 * Try to fetch one from the per-cpu preload buffer if in process 122 116 * context. See idr_preload() for details. 123 117 */ 124 - if (in_interrupt()) 125 - return NULL; 126 - 127 - preempt_disable(); 128 - new = __this_cpu_read(idr_preload_head); 129 - if (new) { 130 - __this_cpu_write(idr_preload_head, new->ary[0]); 131 - __this_cpu_dec(idr_preload_cnt); 132 - new->ary[0] = NULL; 118 + if (!in_interrupt()) { 119 + preempt_disable(); 120 + new = __this_cpu_read(idr_preload_head); 121 + if (new) { 122 + __this_cpu_write(idr_preload_head, new->ary[0]); 123 + __this_cpu_dec(idr_preload_cnt); 124 + new->ary[0] = NULL; 125 + } 126 + preempt_enable(); 127 + if (new) 128 + return new; 133 129 } 134 - preempt_enable(); 135 - return new; 130 + 131 + /* 132 + * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so 133 + * that memory allocation failure warning is printed as intended. 134 + */ 135 + return kmem_cache_zalloc(idr_layer_cache, gfp_mask); 136 136 } 137 137 138 138 static void idr_layer_rcu_free(struct rcu_head *head) ··· 196 184 } 197 185 } 198 186 199 - /** 200 - * idr_pre_get - reserve resources for idr allocation 201 - * @idp: idr handle 202 - * @gfp_mask: memory allocation flags 203 - * 204 - * This function should be called prior to calling the idr_get_new* functions. 205 - * It preallocates enough memory to satisfy the worst possible allocation. The 206 - * caller should pass in GFP_KERNEL if possible. This of course requires that 207 - * no spinning locks be held. 208 - * 209 - * If the system is REALLY out of memory this function returns %0, 210 - * otherwise %1. 211 - */ 212 - int idr_pre_get(struct idr *idp, gfp_t gfp_mask) 187 + int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) 213 188 { 214 189 while (idp->id_free_cnt < MAX_IDR_FREE) { 215 190 struct idr_layer *new; ··· 207 208 } 208 209 return 1; 209 210 } 210 - EXPORT_SYMBOL(idr_pre_get); 211 + EXPORT_SYMBOL(__idr_pre_get); 211 212 212 213 /** 213 214 * sub_alloc - try to allocate an id without growing the tree depth ··· 374 375 idr_mark_full(pa, id); 375 376 } 376 377 377 - /** 378 - * idr_get_new_above - allocate new idr entry above or equal to a start id 379 - * @idp: idr handle 380 - * @ptr: pointer you want associated with the id 381 - * @starting_id: id to start search at 382 - * @id: pointer to the allocated handle 383 - * 384 - * This is the allocate id function. It should be called with any 385 - * required locks. 386 - * 387 - * If allocation from IDR's private freelist fails, idr_get_new_above() will 388 - * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill 389 - * IDR's preallocation and then retry the idr_get_new_above() call. 390 - * 391 - * If the idr is full idr_get_new_above() will return %-ENOSPC. 392 - * 393 - * @id returns a value in the range @starting_id ... %0x7fffffff 394 - */ 395 - int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) 378 + int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) 396 379 { 397 380 struct idr_layer *pa[MAX_IDR_LEVEL + 1]; 398 381 int rv; ··· 387 406 *id = rv; 388 407 return 0; 389 408 } 390 - EXPORT_SYMBOL(idr_get_new_above); 409 + EXPORT_SYMBOL(__idr_get_new_above); 391 410 392 411 /** 393 412 * idr_preload - preload for idr_alloc() ··· 888 907 int ida_pre_get(struct ida *ida, gfp_t gfp_mask) 889 908 { 890 909 /* allocate idr_layers */ 891 - if (!idr_pre_get(&ida->idr, gfp_mask)) 910 + if (!__idr_pre_get(&ida->idr, gfp_mask)) 892 911 return 0; 893 912 894 913 /* allocate free_bitmap */
+1 -1
lib/xz/Kconfig
··· 15 15 16 16 config XZ_DEC_POWERPC 17 17 bool "PowerPC BCJ filter decoder" 18 - default y if POWERPC 18 + default y if PPC 19 19 select XZ_DEC_BCJ 20 20 21 21 config XZ_DEC_IA64
+4 -2
mm/fremap.c
··· 163 163 * and that the remapped range is valid and fully within 164 164 * the single existing vma. 165 165 */ 166 - if (!vma || !(vma->vm_flags & VM_SHARED)) 166 + vm_flags = vma->vm_flags; 167 + if (!vma || !(vm_flags & VM_SHARED)) 167 168 goto out; 168 169 169 170 if (!vma->vm_ops || !vma->vm_ops->remap_pages) ··· 255 254 */ 256 255 257 256 out: 258 - vm_flags = vma->vm_flags; 257 + if (vma) 258 + vm_flags = vma->vm_flags; 259 259 if (likely(!has_write_lock)) 260 260 up_read(&mm->mmap_sem); 261 261 else
+1 -1
mm/memory_hotplug.c
··· 1801 1801 int retry = 1; 1802 1802 1803 1803 start_pfn = PFN_DOWN(start); 1804 - end_pfn = start_pfn + PFN_DOWN(size); 1804 + end_pfn = PFN_UP(start + size - 1); 1805 1805 1806 1806 /* 1807 1807 * When CONFIG_MEMCG is on, one memory block may be used by other