Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
"A few leftovers"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
mm, page_owner: skip unnecessary stack_trace entries
arm64: stacktrace: avoid listing stacktrace functions in stacktrace
mm: treewide: remove GFP_TEMPORARY allocation flag
IB/mlx4: fix sprintf format warning
fscache: fix fscache_objlist_show format processing
lib/test_bitmap.c: use ULL suffix for 64-bit constants
procfs: remove unused variable
drivers/media/cec/cec-adap.c: fix build with gcc-4.4.4
idr: remove WARN_ON_ONCE() when trying to replace negative ID

+83 -76
+1 -1
arch/arc/kernel/setup.c
··· 510 goto done; 511 } 512 513 - str = (char *)__get_free_page(GFP_TEMPORARY); 514 if (!str) 515 goto done; 516
··· 510 goto done; 511 } 512 513 + str = (char *)__get_free_page(GFP_KERNEL); 514 if (!str) 515 goto done; 516
+1 -1
arch/arc/kernel/troubleshoot.c
··· 178 struct callee_regs *cregs; 179 char *buf; 180 181 - buf = (char *)__get_free_page(GFP_TEMPORARY); 182 if (!buf) 183 return; 184
··· 178 struct callee_regs *cregs; 179 char *buf; 180 181 + buf = (char *)__get_free_page(GFP_KERNEL); 182 if (!buf) 183 return; 184
+13 -5
arch/arm64/kernel/stacktrace.c
··· 140 trace->entries[trace->nr_entries++] = ULONG_MAX; 141 } 142 143 - void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 144 { 145 struct stack_trace_data data; 146 struct stackframe frame; ··· 151 152 data.trace = trace; 153 data.skip = trace->skip; 154 155 if (tsk != current) { 156 - data.no_sched_functions = 1; 157 frame.fp = thread_saved_fp(tsk); 158 frame.pc = thread_saved_pc(tsk); 159 } else { 160 - data.no_sched_functions = 0; 161 frame.fp = (unsigned long)__builtin_frame_address(0); 162 - frame.pc = (unsigned long)save_stack_trace_tsk; 163 } 164 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 165 frame.graph = tsk->curr_ret_stack; ··· 174 } 175 EXPORT_SYMBOL_GPL(save_stack_trace_tsk); 176 177 void save_stack_trace(struct stack_trace *trace) 178 { 179 - save_stack_trace_tsk(current, trace); 180 } 181 EXPORT_SYMBOL_GPL(save_stack_trace); 182 #endif
··· 140 trace->entries[trace->nr_entries++] = ULONG_MAX; 141 } 142 143 + static noinline void __save_stack_trace(struct task_struct *tsk, 144 + struct stack_trace *trace, unsigned int nosched) 145 { 146 struct stack_trace_data data; 147 struct stackframe frame; ··· 150 151 data.trace = trace; 152 data.skip = trace->skip; 153 + data.no_sched_functions = nosched; 154 155 if (tsk != current) { 156 frame.fp = thread_saved_fp(tsk); 157 frame.pc = thread_saved_pc(tsk); 158 } else { 159 + /* We don't want this function nor the caller */ 160 + data.skip += 2; 161 frame.fp = (unsigned long)__builtin_frame_address(0); 162 + frame.pc = (unsigned long)__save_stack_trace; 163 } 164 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 165 frame.graph = tsk->curr_ret_stack; ··· 172 } 173 EXPORT_SYMBOL_GPL(save_stack_trace_tsk); 174 175 + void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 176 + { 177 + __save_stack_trace(tsk, trace, 1); 178 + } 179 + 180 void save_stack_trace(struct stack_trace *trace) 181 { 182 + __save_stack_trace(current, trace, 0); 183 } 184 + 185 EXPORT_SYMBOL_GPL(save_stack_trace); 186 #endif
+2 -2
arch/powerpc/kernel/rtas.c
··· 914 if (ret) { 915 cpumask_var_t tmp_mask; 916 917 - if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY)) 918 return ret; 919 920 /* Use tmp_mask to preserve cpus mask from first failure */ ··· 962 return -EIO; 963 } 964 965 - if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY)) 966 return -ENOMEM; 967 968 atomic_set(&data.working, 0);
··· 914 if (ret) { 915 cpumask_var_t tmp_mask; 916 917 + if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL)) 918 return ret; 919 920 /* Use tmp_mask to preserve cpus mask from first failure */ ··· 962 return -EIO; 963 } 964 965 + if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL)) 966 return -ENOMEM; 967 968 atomic_set(&data.working, 0);
+1 -1
arch/powerpc/platforms/pseries/suspend.c
··· 151 if (!capable(CAP_SYS_ADMIN)) 152 return -EPERM; 153 154 - if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY)) 155 return -ENOMEM; 156 157 stream_id = simple_strtoul(buf, NULL, 16);
··· 151 if (!capable(CAP_SYS_ADMIN)) 152 return -EPERM; 153 154 + if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL)) 155 return -ENOMEM; 156 157 stream_id = simple_strtoul(buf, NULL, 16);
+1 -1
drivers/gpu/drm/drm_blend.c
··· 319 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n", 320 crtc->base.id, crtc->name); 321 322 - states = kmalloc_array(total_planes, sizeof(*states), GFP_TEMPORARY); 323 if (!states) 324 return -ENOMEM; 325
··· 319 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n", 320 crtc->base.id, crtc->name); 321 322 + states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL); 323 if (!states) 324 return -ENOMEM; 325
+1 -1
drivers/gpu/drm/drm_dp_dual_mode_helper.c
··· 111 void *data; 112 int ret; 113 114 - data = kmalloc(msg.len, GFP_TEMPORARY); 115 if (!data) 116 return -ENOMEM; 117
··· 111 void *data; 112 int ret; 113 114 + data = kmalloc(msg.len, GFP_KERNEL); 115 if (!data) 116 return -ENOMEM; 117
+1 -1
drivers/gpu/drm/drm_scdc_helper.c
··· 102 void *data; 103 int err; 104 105 - data = kmalloc(1 + size, GFP_TEMPORARY); 106 if (!data) 107 return -ENOMEM; 108
··· 102 void *data; 103 int err; 104 105 + data = kmalloc(1 + size, GFP_KERNEL); 106 if (!data) 107 return -ENOMEM; 108
+1 -1
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
··· 37 struct etnaviv_gem_submit *submit; 38 size_t sz = size_vstruct(nr, sizeof(submit->bos[0]), sizeof(*submit)); 39 40 - submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); 41 if (submit) { 42 submit->dev = dev; 43 submit->gpu = gpu;
··· 37 struct etnaviv_gem_submit *submit; 38 size_t sz = size_vstruct(nr, sizeof(submit->bos[0]), sizeof(*submit)); 39 40 + submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); 41 if (submit) { 42 submit->dev = dev; 43 submit->gpu = gpu;
+1 -1
drivers/gpu/drm/i915/i915_gem.c
··· 2540 2541 if (n_pages > ARRAY_SIZE(stack_pages)) { 2542 /* Too big for stack -- allocate temporary array instead */ 2543 - pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_TEMPORARY); 2544 if (!pages) 2545 return NULL; 2546 }
··· 2540 2541 if (n_pages > ARRAY_SIZE(stack_pages)) { 2542 /* Too big for stack -- allocate temporary array instead */ 2543 + pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); 2544 if (!pages) 2545 return NULL; 2546 }
+6 -6
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 293 * as possible to perform the allocation and warn 294 * if it fails. 295 */ 296 - flags = GFP_TEMPORARY; 297 if (size > 1) 298 flags |= __GFP_NORETRY | __GFP_NOWARN; 299 ··· 1515 urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr); 1516 size = nreloc * sizeof(*relocs); 1517 1518 - relocs = kvmalloc_array(size, 1, GFP_TEMPORARY); 1519 if (!relocs) { 1520 kvfree(relocs); 1521 err = -ENOMEM; ··· 2077 return ERR_PTR(-EFAULT); 2078 2079 fences = kvmalloc_array(args->num_cliprects, sizeof(*fences), 2080 - __GFP_NOWARN | GFP_TEMPORARY); 2081 if (!fences) 2082 return ERR_PTR(-ENOMEM); 2083 ··· 2463 2464 /* Copy in the exec list from userland */ 2465 exec_list = kvmalloc_array(args->buffer_count, sizeof(*exec_list), 2466 - __GFP_NOWARN | GFP_TEMPORARY); 2467 exec2_list = kvmalloc_array(args->buffer_count + 1, sz, 2468 - __GFP_NOWARN | GFP_TEMPORARY); 2469 if (exec_list == NULL || exec2_list == NULL) { 2470 DRM_DEBUG("Failed to allocate exec list for %d buffers\n", 2471 args->buffer_count); ··· 2543 2544 /* Allocate an extra slot for use by the command parser */ 2545 exec2_list = kvmalloc_array(args->buffer_count + 1, sz, 2546 - __GFP_NOWARN | GFP_TEMPORARY); 2547 if (exec2_list == NULL) { 2548 DRM_DEBUG("Failed to allocate exec list for %d buffers\n", 2549 args->buffer_count);
··· 293 * as possible to perform the allocation and warn 294 * if it fails. 295 */ 296 + flags = GFP_KERNEL; 297 if (size > 1) 298 flags |= __GFP_NORETRY | __GFP_NOWARN; 299 ··· 1515 urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr); 1516 size = nreloc * sizeof(*relocs); 1517 1518 + relocs = kvmalloc_array(size, 1, GFP_KERNEL); 1519 if (!relocs) { 1520 kvfree(relocs); 1521 err = -ENOMEM; ··· 2077 return ERR_PTR(-EFAULT); 2078 2079 fences = kvmalloc_array(args->num_cliprects, sizeof(*fences), 2080 + __GFP_NOWARN | GFP_KERNEL); 2081 if (!fences) 2082 return ERR_PTR(-ENOMEM); 2083 ··· 2463 2464 /* Copy in the exec list from userland */ 2465 exec_list = kvmalloc_array(args->buffer_count, sizeof(*exec_list), 2466 + __GFP_NOWARN | GFP_KERNEL); 2467 exec2_list = kvmalloc_array(args->buffer_count + 1, sz, 2468 + __GFP_NOWARN | GFP_KERNEL); 2469 if (exec_list == NULL || exec2_list == NULL) { 2470 DRM_DEBUG("Failed to allocate exec list for %d buffers\n", 2471 args->buffer_count); ··· 2543 2544 /* Allocate an extra slot for use by the command parser */ 2545 exec2_list = kvmalloc_array(args->buffer_count + 1, sz, 2546 + __GFP_NOWARN | GFP_KERNEL); 2547 if (exec2_list == NULL) { 2548 DRM_DEBUG("Failed to allocate exec list for %d buffers\n", 2549 args->buffer_count);
+1 -1
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 3231 /* Allocate a temporary list of source pages for random access. */ 3232 page_addr_list = kvmalloc_array(n_pages, 3233 sizeof(dma_addr_t), 3234 - GFP_TEMPORARY); 3235 if (!page_addr_list) 3236 return ERR_PTR(ret); 3237
··· 3231 /* Allocate a temporary list of source pages for random access. */ 3232 page_addr_list = kvmalloc_array(n_pages, 3233 sizeof(dma_addr_t), 3234 + GFP_KERNEL); 3235 if (!page_addr_list) 3236 return ERR_PTR(ret); 3237
+2 -2
drivers/gpu/drm/i915/i915_gem_userptr.c
··· 507 ret = -ENOMEM; 508 pinned = 0; 509 510 - pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_TEMPORARY); 511 if (pvec != NULL) { 512 struct mm_struct *mm = obj->userptr.mm->mm; 513 unsigned int flags = 0; ··· 643 644 if (mm == current->mm) { 645 pvec = kvmalloc_array(num_pages, sizeof(struct page *), 646 - GFP_TEMPORARY | 647 __GFP_NORETRY | 648 __GFP_NOWARN); 649 if (pvec) /* defer to worker if malloc fails */
··· 507 ret = -ENOMEM; 508 pinned = 0; 509 510 + pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 511 if (pvec != NULL) { 512 struct mm_struct *mm = obj->userptr.mm->mm; 513 unsigned int flags = 0; ··· 643 644 if (mm == current->mm) { 645 pvec = kvmalloc_array(num_pages, sizeof(struct page *), 646 + GFP_KERNEL | 647 __GFP_NORETRY | 648 __GFP_NOWARN); 649 if (pvec) /* defer to worker if malloc fails */
+3 -3
drivers/gpu/drm/i915/i915_gpu_error.c
··· 787 */ 788 ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE; 789 ebuf->buf = kmalloc(ebuf->size, 790 - GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN); 791 792 if (ebuf->buf == NULL) { 793 ebuf->size = PAGE_SIZE; 794 - ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); 795 } 796 797 if (ebuf->buf == NULL) { 798 ebuf->size = 128; 799 - ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); 800 } 801 802 if (ebuf->buf == NULL)
··· 787 */ 788 ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE; 789 ebuf->buf = kmalloc(ebuf->size, 790 + GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); 791 792 if (ebuf->buf == NULL) { 793 ebuf->size = PAGE_SIZE; 794 + ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL); 795 } 796 797 if (ebuf->buf == NULL) { 798 ebuf->size = 128; 799 + ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL); 800 } 801 802 if (ebuf->buf == NULL)
+1 -1
drivers/gpu/drm/i915/selftests/i915_random.c
··· 62 { 63 unsigned int *order, i; 64 65 - order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY); 66 if (!order) 67 return order; 68
··· 62 { 63 unsigned int *order, i; 64 65 + order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); 66 if (!order) 67 return order; 68
+5 -5
drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
··· 117 118 mock_engine_reset(engine); 119 120 - waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY); 121 if (!waiters) 122 goto out_engines; 123 124 bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap), 125 - GFP_TEMPORARY); 126 if (!bitmap) 127 goto out_waiters; 128 ··· 187 188 mock_engine_reset(engine); 189 190 - waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY); 191 if (!waiters) 192 goto out_engines; 193 194 bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap), 195 - GFP_TEMPORARY); 196 if (!bitmap) 197 goto out_waiters; 198 ··· 368 369 mock_engine_reset(engine); 370 371 - waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY); 372 if (!waiters) 373 goto out_engines; 374
··· 117 118 mock_engine_reset(engine); 119 120 + waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); 121 if (!waiters) 122 goto out_engines; 123 124 bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap), 125 + GFP_KERNEL); 126 if (!bitmap) 127 goto out_waiters; 128 ··· 187 188 mock_engine_reset(engine); 189 190 + waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); 191 if (!waiters) 192 goto out_engines; 193 194 bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap), 195 + GFP_KERNEL); 196 if (!bitmap) 197 goto out_waiters; 198 ··· 368 369 mock_engine_reset(engine); 370 371 + waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); 372 if (!waiters) 373 goto out_engines; 374
+1 -1
drivers/gpu/drm/i915/selftests/intel_uncore.c
··· 127 return 0; 128 129 valid = kzalloc(BITS_TO_LONGS(FW_RANGE) * sizeof(*valid), 130 - GFP_TEMPORARY); 131 if (!valid) 132 return -ENOMEM; 133
··· 127 return 0; 128 129 valid = kzalloc(BITS_TO_LONGS(FW_RANGE) * sizeof(*valid), 130 + GFP_KERNEL); 131 if (!valid) 132 return -ENOMEM; 133
+1 -1
drivers/gpu/drm/lib/drm_random.c
··· 28 { 29 unsigned int *order, i; 30 31 - order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY); 32 if (!order) 33 return order; 34
··· 28 { 29 unsigned int *order, i; 30 31 + order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); 32 if (!order) 33 return order; 34
+1 -1
drivers/gpu/drm/msm/msm_gem_submit.c
··· 40 if (sz > SIZE_MAX) 41 return NULL; 42 43 - submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); 44 if (!submit) 45 return NULL; 46
··· 40 if (sz > SIZE_MAX) 41 return NULL; 42 43 + submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); 44 if (!submit) 45 return NULL; 46
+2 -2
drivers/gpu/drm/selftests/test-drm_mm.c
··· 1627 goto err; 1628 1629 bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long), 1630 - GFP_TEMPORARY); 1631 if (!bitmap) 1632 goto err_nodes; 1633 ··· 1741 goto err; 1742 1743 bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long), 1744 - GFP_TEMPORARY); 1745 if (!bitmap) 1746 goto err_nodes; 1747
··· 1627 goto err; 1628 1629 bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long), 1630 + GFP_KERNEL); 1631 if (!bitmap) 1632 goto err_nodes; 1633 ··· 1741 goto err; 1742 1743 bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long), 1744 + GFP_KERNEL); 1745 if (!bitmap) 1746 goto err_nodes; 1747
+1 -1
drivers/infiniband/hw/mlx4/sysfs.c
··· 221 static int add_port_entries(struct mlx4_ib_dev *device, int port_num) 222 { 223 int i; 224 - char buff[10]; 225 struct mlx4_ib_iov_port *port = NULL; 226 int ret = 0 ; 227 struct ib_port_attr attr;
··· 221 static int add_port_entries(struct mlx4_ib_dev *device, int port_num) 222 { 223 int i; 224 + char buff[11]; 225 struct mlx4_ib_iov_port *port = NULL; 226 int ret = 0 ; 227 struct ib_port_attr attr;
+4 -1
drivers/media/cec/cec-adap.c
··· 181 { 182 static const struct cec_event ev_lost_msgs = { 183 .event = CEC_EVENT_LOST_MSGS, 184 - .lost_msgs.lost_msgs = 1, 185 }; 186 struct cec_msg_entry *entry; 187
··· 181 { 182 static const struct cec_event ev_lost_msgs = { 183 .event = CEC_EVENT_LOST_MSGS, 184 + .flags = 0, 185 + { 186 + .lost_msgs = { 1 }, 187 + }, 188 }; 189 struct cec_msg_entry *entry; 190
+1 -1
drivers/misc/cxl/pci.c
··· 1279 } 1280 1281 /* use bounce buffer for copy */ 1282 - tbuf = (void *)__get_free_page(GFP_TEMPORARY); 1283 if (!tbuf) 1284 return -ENOMEM; 1285
··· 1279 } 1280 1281 /* use bounce buffer for copy */ 1282 + tbuf = (void *)__get_free_page(GFP_KERNEL); 1283 if (!tbuf) 1284 return -ENOMEM; 1285
+1 -1
drivers/xen/gntalloc.c
··· 294 goto out; 295 } 296 297 - gref_ids = kcalloc(op.count, sizeof(gref_ids[0]), GFP_TEMPORARY); 298 if (!gref_ids) { 299 rc = -ENOMEM; 300 goto out;
··· 294 goto out; 295 } 296 297 + gref_ids = kcalloc(op.count, sizeof(gref_ids[0]), GFP_KERNEL); 298 if (!gref_ids) { 299 rc = -ENOMEM; 300 goto out;
+1 -1
fs/coredump.c
··· 161 if (!exe_file) 162 return cn_esc_printf(cn, "%s (path unknown)", current->comm); 163 164 - pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY); 165 if (!pathbuf) { 166 ret = -ENOMEM; 167 goto put_exe_file;
··· 161 if (!exe_file) 162 return cn_esc_printf(cn, "%s (path unknown)", current->comm); 163 164 + pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); 165 if (!pathbuf) { 166 ret = -ENOMEM; 167 goto put_exe_file;
+2 -2
fs/exec.c
··· 1763 bprm->filename = filename->name; 1764 } else { 1765 if (filename->name[0] == '\0') 1766 - pathbuf = kasprintf(GFP_TEMPORARY, "/dev/fd/%d", fd); 1767 else 1768 - pathbuf = kasprintf(GFP_TEMPORARY, "/dev/fd/%d/%s", 1769 fd, filename->name); 1770 if (!pathbuf) { 1771 retval = -ENOMEM;
··· 1763 bprm->filename = filename->name; 1764 } else { 1765 if (filename->name[0] == '\0') 1766 + pathbuf = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd); 1767 else 1768 + pathbuf = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s", 1769 fd, filename->name); 1770 if (!pathbuf) { 1771 retval = -ENOMEM;
+2 -1
fs/fscache/object-list.c
··· 262 type = "DT"; 263 break; 264 default: 265 - sprintf(_type, "%02u", cookie->def->type); 266 type = _type; 267 break; 268 }
··· 262 type = "DT"; 263 break; 264 default: 265 + snprintf(_type, sizeof(_type), "%02u", 266 + cookie->def->type); 267 type = _type; 268 break; 269 }
+1 -1
fs/overlayfs/copy_up.c
··· 241 int buflen = MAX_HANDLE_SZ; 242 uuid_t *uuid = &lower->d_sb->s_uuid; 243 244 - buf = kmalloc(buflen, GFP_TEMPORARY); 245 if (!buf) 246 return ERR_PTR(-ENOMEM); 247
··· 241 int buflen = MAX_HANDLE_SZ; 242 uuid_t *uuid = &lower->d_sb->s_uuid; 243 244 + buf = kmalloc(buflen, GFP_KERNEL); 245 if (!buf) 246 return ERR_PTR(-ENOMEM); 247
+1 -1
fs/overlayfs/dir.c
··· 833 goto out; 834 } 835 836 - buf = ret = kmalloc(buflen, GFP_TEMPORARY); 837 if (!buf) 838 goto out; 839
··· 833 goto out; 834 } 835 836 + buf = ret = kmalloc(buflen, GFP_KERNEL); 837 if (!buf) 838 goto out; 839
+6 -6
fs/overlayfs/namei.c
··· 38 return 0; 39 goto fail; 40 } 41 - buf = kzalloc(prelen + res + strlen(post) + 1, GFP_TEMPORARY); 42 if (!buf) 43 return -ENOMEM; 44 ··· 103 if (res == 0) 104 return NULL; 105 106 - fh = kzalloc(res, GFP_TEMPORARY); 107 if (!fh) 108 return ERR_PTR(-ENOMEM); 109 ··· 309 310 BUG_ON(*ctrp); 311 if (!*stackp) 312 - *stackp = kmalloc(sizeof(struct path), GFP_TEMPORARY); 313 if (!*stackp) { 314 dput(origin); 315 return -ENOMEM; ··· 418 419 err = -ENOMEM; 420 len = index->d_name.len / 2; 421 - fh = kzalloc(len, GFP_TEMPORARY); 422 if (!fh) 423 goto fail; 424 ··· 478 return PTR_ERR(fh); 479 480 err = -ENOMEM; 481 - n = kzalloc(fh->len * 2, GFP_TEMPORARY); 482 if (n) { 483 s = bin2hex(n, fh, fh->len); 484 *name = (struct qstr) QSTR_INIT(n, s - n); ··· 646 if (!d.stop && poe->numlower) { 647 err = -ENOMEM; 648 stack = kcalloc(ofs->numlower, sizeof(struct path), 649 - GFP_TEMPORARY); 650 if (!stack) 651 goto out_put_upper; 652 }
··· 38 return 0; 39 goto fail; 40 } 41 + buf = kzalloc(prelen + res + strlen(post) + 1, GFP_KERNEL); 42 if (!buf) 43 return -ENOMEM; 44 ··· 103 if (res == 0) 104 return NULL; 105 106 + fh = kzalloc(res, GFP_KERNEL); 107 if (!fh) 108 return ERR_PTR(-ENOMEM); 109 ··· 309 310 BUG_ON(*ctrp); 311 if (!*stackp) 312 + *stackp = kmalloc(sizeof(struct path), GFP_KERNEL); 313 if (!*stackp) { 314 dput(origin); 315 return -ENOMEM; ··· 418 419 err = -ENOMEM; 420 len = index->d_name.len / 2; 421 + fh = kzalloc(len, GFP_KERNEL); 422 if (!fh) 423 goto fail; 424 ··· 478 return PTR_ERR(fh); 479 480 err = -ENOMEM; 481 + n = kzalloc(fh->len * 2, GFP_KERNEL); 482 if (n) { 483 s = bin2hex(n, fh, fh->len); 484 *name = (struct qstr) QSTR_INIT(n, s - n); ··· 646 if (!d.stop && poe->numlower) { 647 err = -ENOMEM; 648 stack = kcalloc(ofs->numlower, sizeof(struct path), 649 + GFP_KERNEL); 650 if (!stack) 651 goto out_put_upper; 652 }
+4 -4
fs/proc/base.c
··· 232 goto out_mmput; 233 } 234 235 - page = (char *)__get_free_page(GFP_TEMPORARY); 236 if (!page) { 237 rv = -ENOMEM; 238 goto out_mmput; ··· 813 if (!mm) 814 return 0; 815 816 - page = (char *)__get_free_page(GFP_TEMPORARY); 817 if (!page) 818 return -ENOMEM; 819 ··· 918 if (!mm || !mm->env_end) 919 return 0; 920 921 - page = (char *)__get_free_page(GFP_TEMPORARY); 922 if (!page) 923 return -ENOMEM; 924 ··· 1630 1631 static int do_proc_readlink(struct path *path, char __user *buffer, int buflen) 1632 { 1633 - char *tmp = (char*)__get_free_page(GFP_TEMPORARY); 1634 char *pathname; 1635 int len; 1636
··· 232 goto out_mmput; 233 } 234 235 + page = (char *)__get_free_page(GFP_KERNEL); 236 if (!page) { 237 rv = -ENOMEM; 238 goto out_mmput; ··· 813 if (!mm) 814 return 0; 815 816 + page = (char *)__get_free_page(GFP_KERNEL); 817 if (!page) 818 return -ENOMEM; 819 ··· 918 if (!mm || !mm->env_end) 919 return 0; 920 921 + page = (char *)__get_free_page(GFP_KERNEL); 922 if (!page) 923 return -ENOMEM; 924 ··· 1630 1631 static int do_proc_readlink(struct path *path, char __user *buffer, int buflen) 1632 { 1633 + char *tmp = (char *)__get_free_page(GFP_KERNEL); 1634 char *pathname; 1635 int len; 1636
+1 -1
fs/proc/task_mmu.c
··· 1474 pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN); 1475 1476 pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); 1477 - pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY); 1478 ret = -ENOMEM; 1479 if (!pm.buffer) 1480 goto out_mm;
··· 1474 pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN); 1475 1476 pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); 1477 + pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_KERNEL); 1478 ret = -ENOMEM; 1479 if (!pm.buffer) 1480 goto out_mm;
-1
fs/proc/task_nommu.c
··· 145 int is_pid) 146 { 147 struct mm_struct *mm = vma->vm_mm; 148 - struct proc_maps_private *priv = m->private; 149 unsigned long ino = 0; 150 struct file *file; 151 dev_t dev = 0;
··· 145 int is_pid) 146 { 147 struct mm_struct *mm = vma->vm_mm; 148 unsigned long ino = 0; 149 struct file *file; 150 dev_t dev = 0;
-2
include/linux/gfp.h
··· 288 #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) 289 #define GFP_NOIO (__GFP_RECLAIM) 290 #define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) 291 - #define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \ 292 - __GFP_RECLAIMABLE) 293 #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) 294 #define GFP_DMA __GFP_DMA 295 #define GFP_DMA32 __GFP_DMA32
··· 288 #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) 289 #define GFP_NOIO (__GFP_RECLAIM) 290 #define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) 291 #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) 292 #define GFP_DMA __GFP_DMA 293 #define GFP_DMA32 __GFP_DMA32
-1
include/trace/events/mmflags.h
··· 18 {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\ 19 {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ 20 {(unsigned long)GFP_USER, "GFP_USER"}, \ 21 - {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \ 22 {(unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \ 23 {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \ 24 {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
··· 18 {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\ 19 {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ 20 {(unsigned long)GFP_USER, "GFP_USER"}, \ 21 {(unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \ 22 {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \ 23 {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
+1 -1
kernel/locking/test-ww_mutex.c
··· 362 int *order; 363 int n, r, tmp; 364 365 - order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY); 366 if (!order) 367 return order; 368
··· 362 int *order; 363 int n, r, tmp; 364 365 + order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); 366 if (!order) 367 return order; 368
+1 -1
kernel/trace/trace_events_filter.c
··· 702 int pos = ps->lasterr_pos; 703 char *buf, *pbuf; 704 705 - buf = (char *)__get_free_page(GFP_TEMPORARY); 706 if (!buf) 707 return; 708
··· 702 int pos = ps->lasterr_pos; 703 char *buf, *pbuf; 704 705 + buf = (char *)__get_free_page(GFP_KERNEL); 706 if (!buf) 707 return; 708
+1 -1
lib/idr.c
··· 151 */ 152 void *idr_replace(struct idr *idr, void *ptr, int id) 153 { 154 - if (WARN_ON_ONCE(id < 0)) 155 return ERR_PTR(-EINVAL); 156 157 return idr_replace_ext(idr, ptr, id);
··· 151 */ 152 void *idr_replace(struct idr *idr, void *ptr, int id) 153 { 154 + if (id < 0) 155 return ERR_PTR(-EINVAL); 156 157 return idr_replace_ext(idr, ptr, id);
+2 -2
lib/string_helpers.c
··· 576 char *buffer, *quoted; 577 int i, res; 578 579 - buffer = kmalloc(PAGE_SIZE, GFP_TEMPORARY); 580 if (!buffer) 581 return NULL; 582 ··· 612 return kstrdup("<unknown>", gfp); 613 614 /* We add 11 spaces for ' (deleted)' to be appended */ 615 - temp = kmalloc(PATH_MAX + 11, GFP_TEMPORARY); 616 if (!temp) 617 return kstrdup("<no_memory>", gfp); 618
··· 576 char *buffer, *quoted; 577 int i, res; 578 579 + buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); 580 if (!buffer) 581 return NULL; 582 ··· 612 return kstrdup("<unknown>", gfp); 613 614 /* We add 11 spaces for ' (deleted)' to be appended */ 615 + temp = kmalloc(PATH_MAX + 11, GFP_KERNEL); 616 if (!temp) 617 return kstrdup("<no_memory>", gfp); 618
+4 -4
lib/test_bitmap.c
··· 186 BITMAP_FROM_U64(0x22222222), 187 BITMAP_FROM_U64(0xffffffff), 188 BITMAP_FROM_U64(0xfffffffe), 189 - BITMAP_FROM_U64(0x3333333311111111), 190 - BITMAP_FROM_U64(0xffffffff77777777) 191 }; 192 193 static const unsigned long exp2[] __initconst = { 194 - BITMAP_FROM_U64(0x3333333311111111), 195 - BITMAP_FROM_U64(0xffffffff77777777) 196 }; 197 198 static const struct test_bitmap_parselist parselist_tests[] __initconst = {
··· 186 BITMAP_FROM_U64(0x22222222), 187 BITMAP_FROM_U64(0xffffffff), 188 BITMAP_FROM_U64(0xfffffffe), 189 + BITMAP_FROM_U64(0x3333333311111111ULL), 190 + BITMAP_FROM_U64(0xffffffff77777777ULL) 191 }; 192 193 static const unsigned long exp2[] __initconst = { 194 + BITMAP_FROM_U64(0x3333333311111111ULL), 195 + BITMAP_FROM_U64(0xffffffff77777777ULL) 196 }; 197 198 static const struct test_bitmap_parselist parselist_tests[] __initconst = {
+1 -1
mm/page_owner.c
··· 142 .nr_entries = 0, 143 .entries = entries, 144 .max_entries = PAGE_OWNER_STACK_DEPTH, 145 - .skip = 0 146 }; 147 depot_stack_handle_t handle; 148
··· 142 .nr_entries = 0, 143 .entries = entries, 144 .max_entries = PAGE_OWNER_STACK_DEPTH, 145 + .skip = 2 146 }; 147 depot_stack_handle_t handle; 148
+1 -1
mm/shmem.c
··· 3685 if (len > MFD_NAME_MAX_LEN + 1) 3686 return -EINVAL; 3687 3688 - name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_TEMPORARY); 3689 if (!name) 3690 return -ENOMEM; 3691
··· 3685 if (len > MFD_NAME_MAX_LEN + 1) 3686 return -EINVAL; 3687 3688 + name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_KERNEL); 3689 if (!name) 3690 return -ENOMEM; 3691
+1 -1
mm/slub.c
··· 4597 struct kmem_cache_node *n; 4598 4599 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), 4600 - GFP_TEMPORARY)) { 4601 kfree(map); 4602 return sprintf(buf, "Out of memory\n"); 4603 }
··· 4597 struct kmem_cache_node *n; 4598 4599 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), 4600 + GFP_KERNEL)) { 4601 kfree(map); 4602 return sprintf(buf, "Out of memory\n"); 4603 }
-1
tools/perf/builtin-kmem.c
··· 627 { "GFP_HIGHUSER_MOVABLE", "HUM" }, 628 { "GFP_HIGHUSER", "HU" }, 629 { "GFP_USER", "U" }, 630 - { "GFP_TEMPORARY", "TMP" }, 631 { "GFP_KERNEL_ACCOUNT", "KAC" }, 632 { "GFP_KERNEL", "K" }, 633 { "GFP_NOFS", "NF" },
··· 627 { "GFP_HIGHUSER_MOVABLE", "HUM" }, 628 { "GFP_HIGHUSER", "HU" }, 629 { "GFP_USER", "U" }, 630 { "GFP_KERNEL_ACCOUNT", "KAC" }, 631 { "GFP_KERNEL", "K" }, 632 { "GFP_NOFS", "NF" },