Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

proc/maps: make vm_is_stack() logic namespace-friendly

- Rename vm_is_stack() to task_of_stack() and change it to return
"struct task_struct *" rather than the global (and thus wrong in
general) pid_t.

- Add the new pid_of_stack() helper which calls task_of_stack() and
uses the right namespace to report the correct pid_t.

Unfortunately we need to define this helper twice, in task_mmu.c
and in task_nommu.c. perhaps it makes sense to add fs/proc/util.c
and move at least pid_of_stack/task_of_stack there to avoid the
code duplication.

- Change show_map_vma() and show_numa_map() to use the new helper.

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Greg Ungerer <gerg@uclinux.org>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Oleg Nesterov and committed by
Linus Torvalds
58cb6548 2c03376d

+51 -22
+21 -4
fs/proc/task_mmu.c
··· 261 261 sizeof(struct proc_maps_private)); 262 262 } 263 263 264 + static pid_t pid_of_stack(struct proc_maps_private *priv, 265 + struct vm_area_struct *vma, bool is_pid) 266 + { 267 + struct inode *inode = priv->inode; 268 + struct task_struct *task; 269 + pid_t ret = 0; 270 + 271 + rcu_read_lock(); 272 + task = pid_task(proc_pid(inode), PIDTYPE_PID); 273 + if (task) { 274 + task = task_of_stack(task, vma, is_pid); 275 + if (task) 276 + ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info); 277 + } 278 + rcu_read_unlock(); 279 + 280 + return ret; 281 + } 282 + 264 283 static void 265 284 show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) 266 285 { 267 286 struct mm_struct *mm = vma->vm_mm; 268 287 struct file *file = vma->vm_file; 269 288 struct proc_maps_private *priv = m->private; 270 - struct task_struct *task = priv->task; 271 289 vm_flags_t flags = vma->vm_flags; 272 290 unsigned long ino = 0; 273 291 unsigned long long pgoff = 0; ··· 350 332 goto done; 351 333 } 352 334 353 - tid = vm_is_stack(task, vma, is_pid); 354 - 335 + tid = pid_of_stack(priv, vma, is_pid); 355 336 if (tid != 0) { 356 337 /* 357 338 * Thread stack in /proc/PID/task/TID/maps or ··· 1463 1446 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { 1464 1447 seq_puts(m, " heap"); 1465 1448 } else { 1466 - pid_t tid = vm_is_stack(task, vma, is_pid); 1449 + pid_t tid = pid_of_stack(proc_priv, vma, is_pid); 1467 1450 if (tid != 0) { 1468 1451 /* 1469 1452 * Thread stack in /proc/PID/task/TID/maps or
+20 -1
fs/proc/task_nommu.c
··· 123 123 return size; 124 124 } 125 125 126 + static pid_t pid_of_stack(struct proc_maps_private *priv, 127 + struct vm_area_struct *vma, bool is_pid) 128 + { 129 + struct inode *inode = priv->inode; 130 + struct task_struct *task; 131 + pid_t ret = 0; 132 + 133 + rcu_read_lock(); 134 + task = pid_task(proc_pid(inode), PIDTYPE_PID); 135 + if (task) { 136 + task = task_of_stack(task, vma, is_pid); 137 + if (task) 138 + ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info); 139 + } 140 + rcu_read_unlock(); 141 + 142 + return ret; 143 + } 144 + 126 145 /* 127 146 * display a single VMA to a sequenced file 128 147 */ ··· 182 163 seq_pad(m, ' '); 183 164 seq_path(m, &file->f_path, ""); 184 165 } else if (mm) { 185 - pid_t tid = vm_is_stack(priv->task, vma, is_pid); 166 + pid_t tid = pid_of_stack(priv, vma, is_pid); 186 167 187 168 if (tid != 0) { 188 169 seq_pad(m, ' ');
+2 -2
include/linux/mm.h
··· 1247 1247 !vma_growsup(vma->vm_next, addr); 1248 1248 } 1249 1249 1250 - extern pid_t 1251 - vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group); 1250 + extern struct task_struct *task_of_stack(struct task_struct *task, 1251 + struct vm_area_struct *vma, bool in_group); 1252 1252 1253 1253 extern unsigned long move_page_tables(struct vm_area_struct *vma, 1254 1254 unsigned long old_addr, struct vm_area_struct *new_vma,
+8 -15
mm/util.c
··· 170 170 /* 171 171 * Check if the vma is being used as a stack. 172 172 * If is_group is non-zero, check in the entire thread group or else 173 - * just check in the current task. Returns the pid of the task that 174 - * the vma is stack for. 173 + * just check in the current task. Returns the task_struct of the task 174 + * that the vma is stack for. Must be called under rcu_read_lock(). 175 175 */ 176 - pid_t vm_is_stack(struct task_struct *task, 177 - struct vm_area_struct *vma, int in_group) 176 + struct task_struct *task_of_stack(struct task_struct *task, 177 + struct vm_area_struct *vma, bool in_group) 178 178 { 179 - pid_t ret = 0; 180 - 181 179 if (vm_is_stack_for_task(task, vma)) 182 - return task->pid; 180 + return task; 183 181 184 182 if (in_group) { 185 183 struct task_struct *t; 186 184 187 - rcu_read_lock(); 188 185 for_each_thread(task, t) { 189 - if (vm_is_stack_for_task(t, vma)) { 190 - ret = t->pid; 191 - goto done; 192 - } 186 + if (vm_is_stack_for_task(t, vma)) 187 + return t; 193 188 } 194 - done: 195 - rcu_read_unlock(); 196 189 } 197 190 198 - return ret; 191 + return NULL; 199 192 } 200 193 201 194 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)