Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Use helpers to obtain task pid in printks

The task_struct->pid member is going to be deprecated, so start
using the helpers (task_pid_nr/task_pid_vnr/task_pid_nr_ns) in
the kernel.

The first thing to start with is the pid, printed to dmesg - in
this case we may safely use task_pid_nr(). Besides, printks produce
more (much more) than a half of all the explicit pid usage.

[akpm@linux-foundation.org: git-drm went and changed lots of stuff]
Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Cc: Dave Airlie <airlied@linux.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Pavel Emelyanov and committed by
Linus Torvalds
ba25f9dc 9a2e7057

+97 -90
+1 -1
block/ll_rw_blk.c
··· 3367 3367 if (unlikely(block_dump)) { 3368 3368 char b[BDEVNAME_SIZE]; 3369 3369 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n", 3370 - current->comm, current->pid, 3370 + current->comm, task_pid_nr(current), 3371 3371 (rw & WRITE) ? "WRITE" : "READ", 3372 3372 (unsigned long long)bio->bi_sector, 3373 3373 bdevname(bio->bi_bdev,b));
+1 -1
drivers/block/nbd.c
··· 188 188 if (signal_pending(current)) { 189 189 siginfo_t info; 190 190 printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n", 191 - current->pid, current->comm, 191 + task_pid_nr(current), current->comm, 192 192 dequeue_signal_lock(current, &current->blocked, &info)); 193 193 result = -EINTR; 194 194 sock_shutdown(lo, !send);
+1 -1
drivers/cdrom/cdrom.c
··· 1107 1107 is the default case! */ 1108 1108 cdinfo(CD_OPEN, "bummer. wrong media type.\n"); 1109 1109 cdinfo(CD_WARNING, "pid %d must open device O_NONBLOCK!\n", 1110 - (unsigned int)current->pid); 1110 + (unsigned int)task_pid_nr(current)); 1111 1111 ret=-EMEDIUMTYPE; 1112 1112 goto clean_up_and_return; 1113 1113 }
+1 -1
drivers/char/drm/drm_bufs.c
··· 1456 1456 buf = dma->buflist[idx]; 1457 1457 if (buf->file_priv != file_priv) { 1458 1458 DRM_ERROR("Process %d freeing buffer not owned\n", 1459 - current->pid); 1459 + task_pid_nr(current)); 1460 1460 return -EINVAL; 1461 1461 } 1462 1462 drm_free_buffer(dev, buf);
+1 -1
drivers/char/drm/drm_drv.c
··· 463 463 ++file_priv->ioctl_count; 464 464 465 465 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", 466 - current->pid, cmd, nr, 466 + task_pid_nr(current), cmd, nr, 467 467 (long)old_encode_dev(file_priv->head->device), 468 468 file_priv->authenticated); 469 469
+4 -3
drivers/char/drm/drm_fops.c
··· 234 234 if (!drm_cpu_valid()) 235 235 return -EINVAL; 236 236 237 - DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor); 237 + DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor); 238 238 239 239 priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES); 240 240 if (!priv) ··· 244 244 filp->private_data = priv; 245 245 priv->filp = filp; 246 246 priv->uid = current->euid; 247 - priv->pid = current->pid; 247 + priv->pid = task_pid_nr(current); 248 248 priv->minor = minor; 249 249 priv->head = drm_heads[minor]; 250 250 priv->ioctl_count = 0; ··· 339 339 */ 340 340 341 341 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", 342 - current->pid, (long)old_encode_dev(file_priv->head->device), 342 + task_pid_nr(current), 343 + (long)old_encode_dev(file_priv->head->device), 343 344 dev->open_count); 344 345 345 346 if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
+3 -3
drivers/char/drm/drm_lock.c
··· 58 58 59 59 if (lock->context == DRM_KERNEL_CONTEXT) { 60 60 DRM_ERROR("Process %d using kernel context %d\n", 61 - current->pid, lock->context); 61 + task_pid_nr(current), lock->context); 62 62 return -EINVAL; 63 63 } 64 64 65 65 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", 66 - lock->context, current->pid, 66 + lock->context, task_pid_nr(current), 67 67 dev->lock.hw_lock->lock, lock->flags); 68 68 69 69 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) ··· 153 153 154 154 if (lock->context == DRM_KERNEL_CONTEXT) { 155 155 DRM_ERROR("Process %d using kernel context %d\n", 156 - current->pid, lock->context); 156 + task_pid_nr(current), lock->context); 157 157 return -EINVAL; 158 158 } 159 159
+1 -1
drivers/char/drm/drm_os_linux.h
··· 7 7 #include <linux/delay.h> 8 8 9 9 /** Current process ID */ 10 - #define DRM_CURRENTPID current->pid 10 + #define DRM_CURRENTPID task_pid_nr(current) 11 11 #define DRM_SUSER(p) capable(CAP_SYS_ADMIN) 12 12 #define DRM_UDELAY(d) udelay(d) 13 13 /** Read a byte from a MMIO region */
+1 -1
drivers/char/drm/i810_dma.c
··· 1024 1024 retcode = i810_dma_get_buffer(dev, d, file_priv); 1025 1025 1026 1026 DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", 1027 - current->pid, retcode, d->granted); 1027 + task_pid_nr(current), retcode, d->granted); 1028 1028 1029 1029 sarea_priv->last_dispatch = (int)hw_status[5]; 1030 1030
+1 -1
drivers/char/drm/i830_dma.c
··· 1409 1409 retcode = i830_dma_get_buffer(dev, d, file_priv); 1410 1410 1411 1411 DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n", 1412 - current->pid, retcode, d->granted); 1412 + task_pid_nr(current), retcode, d->granted); 1413 1413 1414 1414 sarea_priv->last_dispatch = (int)hw_status[5]; 1415 1415
+1 -1
drivers/char/sx.c
··· 1467 1467 1468 1468 line = tty->index; 1469 1469 sx_dprintk(SX_DEBUG_OPEN, "%d: opening line %d. tty=%p ctty=%p, " 1470 - "np=%d)\n", current->pid, line, tty, 1470 + "np=%d)\n", task_pid_nr(current), line, tty, 1471 1471 current->signal->tty, sx_nports); 1472 1472 1473 1473 if ((line < 0) || (line >= SX_NPORTS) || (line >= sx_nports))
+3 -3
drivers/char/tty_io.c
··· 3530 3530 do_each_pid_task(session, PIDTYPE_SID, p) { 3531 3531 printk(KERN_NOTICE "SAK: killed process %d" 3532 3532 " (%s): task_session_nr(p)==tty->session\n", 3533 - p->pid, p->comm); 3533 + task_pid_nr(p), p->comm); 3534 3534 send_sig(SIGKILL, p, 1); 3535 3535 } while_each_pid_task(session, PIDTYPE_SID, p); 3536 3536 /* Now kill any processes that happen to have the ··· 3540 3540 if (p->signal->tty == tty) { 3541 3541 printk(KERN_NOTICE "SAK: killed process %d" 3542 3542 " (%s): task_session_nr(p)==tty->session\n", 3543 - p->pid, p->comm); 3543 + task_pid_nr(p), p->comm); 3544 3544 send_sig(SIGKILL, p, 1); 3545 3545 continue; 3546 3546 } ··· 3560 3560 filp->private_data == tty) { 3561 3561 printk(KERN_NOTICE "SAK: killed process %d" 3562 3562 " (%s): fd#%d opened to the tty\n", 3563 - p->pid, p->comm, i); 3563 + task_pid_nr(p), p->comm, i); 3564 3564 force_sig(SIGKILL, p); 3565 3565 break; 3566 3566 }
+2 -2
drivers/hid/hidraw.c
··· 113 113 114 114 if (count > HID_MIN_BUFFER_SIZE) { 115 115 printk(KERN_WARNING "hidraw: pid %d passed too large report\n", 116 - current->pid); 116 + task_pid_nr(current)); 117 117 return -EINVAL; 118 118 } 119 119 120 120 if (count < 2) { 121 121 printk(KERN_WARNING "hidraw: pid %d passed too short report\n", 122 - current->pid); 122 + task_pid_nr(current)); 123 123 return -EINVAL; 124 124 } 125 125
+1 -1
drivers/md/md.c
··· 4717 4717 4718 4718 void md_unregister_thread(mdk_thread_t *thread) 4719 4719 { 4720 - dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid); 4720 + dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 4721 4721 4722 4722 kthread_stop(thread->tsk); 4723 4723 kfree(thread);
+2 -2
drivers/media/video/zoran_driver.c
··· 1285 1285 } 1286 1286 1287 1287 dprintk(1, KERN_INFO "%s: zoran_open(%s, pid=[%d]), users(-)=%d\n", 1288 - ZR_DEVNAME(zr), current->comm, current->pid, zr->user); 1288 + ZR_DEVNAME(zr), current->comm, task_pid_nr(current), zr->user); 1289 1289 1290 1290 /* now, create the open()-specific file_ops struct */ 1291 1291 fh = kzalloc(sizeof(struct zoran_fh), GFP_KERNEL); ··· 1358 1358 struct zoran *zr = fh->zr; 1359 1359 1360 1360 dprintk(1, KERN_INFO "%s: zoran_close(%s, pid=[%d]), users(+)=%d\n", 1361 - ZR_DEVNAME(zr), current->comm, current->pid, zr->user); 1361 + ZR_DEVNAME(zr), current->comm, task_pid_nr(current), zr->user); 1362 1362 1363 1363 /* kernel locks (fs/device.c), so don't do that ourselves 1364 1364 * (prevents deadlocks) */
+1 -1
drivers/mtd/ubi/wl.c
··· 1309 1309 struct ubi_device *ubi = u; 1310 1310 1311 1311 ubi_msg("background thread \"%s\" started, PID %d", 1312 - ubi->bgt_name, current->pid); 1312 + ubi->bgt_name, task_pid_nr(current)); 1313 1313 1314 1314 set_freezable(); 1315 1315 for (;;) {
+1 -1
drivers/net/wireless/hostap/hostap_ioctl.c
··· 2920 2920 2921 2921 printk(KERN_DEBUG "%s: process %d (%s) used deprecated iwpriv monitor " 2922 2922 "- update software to use iwconfig mode monitor\n", 2923 - dev->name, current->pid, current->comm); 2923 + dev->name, task_pid_nr(current), current->comm); 2924 2924 2925 2925 /* Backward compatibility code - this can be removed at some point */ 2926 2926
+4 -4
drivers/scsi/libsas/sas_discover.c
··· 285 285 dev = port->port_dev; 286 286 287 287 SAS_DPRINTK("DOING DISCOVERY on port %d, pid:%d\n", port->id, 288 - current->pid); 288 + task_pid_nr(current)); 289 289 290 290 switch (dev->dev_type) { 291 291 case SAS_END_DEV: ··· 320 320 } 321 321 322 322 SAS_DPRINTK("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id, 323 - current->pid, error); 323 + task_pid_nr(current), error); 324 324 } 325 325 326 326 static void sas_revalidate_domain(struct work_struct *work) ··· 334 334 &port->disc.pending); 335 335 336 336 SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, 337 - current->pid); 337 + task_pid_nr(current)); 338 338 if (port->port_dev) 339 339 res = sas_ex_revalidate_domain(port->port_dev); 340 340 341 341 SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", 342 - port->id, current->pid, res); 342 + port->id, task_pid_nr(current), res); 343 343 } 344 344 345 345 /* ---------- Events ---------- */
+1 -1
drivers/usb/core/devio.c
··· 460 460 return 0; 461 461 /* if not yet claimed, claim it for the driver */ 462 462 dev_warn(&ps->dev->dev, "usbfs: process %d (%s) did not claim interface %u before use\n", 463 - current->pid, current->comm, ifnum); 463 + task_pid_nr(current), current->comm, ifnum); 464 464 return claimintf(ps, ifnum); 465 465 } 466 466
+1 -1
drivers/usb/gadget/file_storage.c
··· 4006 4006 DBG(fsg, "removable=%d, stall=%d, buflen=%u\n", 4007 4007 mod_data.removable, mod_data.can_stall, 4008 4008 mod_data.buflen); 4009 - DBG(fsg, "I/O thread pid: %d\n", fsg->thread_task->pid); 4009 + DBG(fsg, "I/O thread pid: %d\n", task_pid_nr(fsg->thread_task)); 4010 4010 4011 4011 set_bit(REGISTERED, &fsg->atomic_bitflags); 4012 4012
+1 -1
fs/cifs/connect.c
··· 352 352 353 353 current->flags |= PF_MEMALLOC; 354 354 server->tsk = current; /* save process info to wake at shutdown */ 355 - cFYI(1, ("Demultiplex PID: %d", current->pid)); 355 + cFYI(1, ("Demultiplex PID: %d", task_pid_nr(current))); 356 356 write_lock(&GlobalSMBSeslock); 357 357 atomic_inc(&tcpSesAllocCount); 358 358 length = tcpSesAllocCount.counter;
+1 -1
fs/dlm/user.c
··· 456 456 printk(KERN_DEBUG "dlm: process %s (%d) version mismatch " 457 457 "user (%d.%d.%d) kernel (%d.%d.%d)\n", 458 458 current->comm, 459 - current->pid, 459 + task_pid_nr(current), 460 460 req->version[0], 461 461 req->version[1], 462 462 req->version[2],
+1 -1
fs/fs-writeback.c
··· 89 89 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) 90 90 printk(KERN_DEBUG 91 91 "%s(%d): dirtied inode %lu (%s) on %s\n", 92 - current->comm, current->pid, inode->i_ino, 92 + current->comm, task_pid_nr(current), inode->i_ino, 93 93 name, inode->i_sb->s_id); 94 94 } 95 95
+4 -4
fs/jffs2/debug.h
··· 80 80 #define JFFS2_ERROR(fmt, ...) \ 81 81 do { \ 82 82 printk(JFFS2_ERR_MSG_PREFIX \ 83 - " (%d) %s: " fmt, current->pid, \ 83 + " (%d) %s: " fmt, task_pid_nr(current), \ 84 84 __FUNCTION__ , ##__VA_ARGS__); \ 85 85 } while(0) 86 86 87 87 #define JFFS2_WARNING(fmt, ...) \ 88 88 do { \ 89 89 printk(JFFS2_WARN_MSG_PREFIX \ 90 - " (%d) %s: " fmt, current->pid, \ 90 + " (%d) %s: " fmt, task_pid_nr(current), \ 91 91 __FUNCTION__ , ##__VA_ARGS__); \ 92 92 } while(0) 93 93 94 94 #define JFFS2_NOTICE(fmt, ...) \ 95 95 do { \ 96 96 printk(JFFS2_NOTICE_MSG_PREFIX \ 97 - " (%d) %s: " fmt, current->pid, \ 97 + " (%d) %s: " fmt, task_pid_nr(current), \ 98 98 __FUNCTION__ , ##__VA_ARGS__); \ 99 99 } while(0) 100 100 101 101 #define JFFS2_DEBUG(fmt, ...) \ 102 102 do { \ 103 103 printk(JFFS2_DBG_MSG_PREFIX \ 104 - " (%d) %s: " fmt, current->pid, \ 104 + " (%d) %s: " fmt, task_pid_nr(current), \ 105 105 __FUNCTION__ , ##__VA_ARGS__); \ 106 106 } while(0) 107 107
+3 -3
fs/nfsd/vfs.c
··· 1029 1029 if (EX_WGATHER(exp)) { 1030 1030 if (atomic_read(&inode->i_writecount) > 1 1031 1031 || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) { 1032 - dprintk("nfsd: write defer %d\n", current->pid); 1032 + dprintk("nfsd: write defer %d\n", task_pid_nr(current)); 1033 1033 msleep(10); 1034 - dprintk("nfsd: write resume %d\n", current->pid); 1034 + dprintk("nfsd: write resume %d\n", task_pid_nr(current)); 1035 1035 } 1036 1036 1037 1037 if (inode->i_state & I_DIRTY) { 1038 - dprintk("nfsd: write sync %d\n", current->pid); 1038 + dprintk("nfsd: write sync %d\n", task_pid_nr(current)); 1039 1039 host_err=nfsd_sync(file); 1040 1040 } 1041 1041 #if 0
+1 -1
fs/ocfs2/cluster/heartbeat.c
··· 1372 1372 1373 1373 spin_lock(&o2hb_live_lock); 1374 1374 if (reg->hr_task) 1375 - pid = reg->hr_task->pid; 1375 + pid = task_pid_nr(reg->hr_task); 1376 1376 spin_unlock(&o2hb_live_lock); 1377 1377 1378 1378 if (!pid)
+1 -1
fs/ocfs2/cluster/masklog.h
··· 192 192 * previous token if args expands to nothing. 193 193 */ 194 194 #define __mlog_printk(level, fmt, args...) \ 195 - printk(level "(%u,%lu):%s:%d " fmt, current->pid, \ 195 + printk(level "(%u,%lu):%s:%d " fmt, task_pid_nr(current), \ 196 196 __mlog_cpu_guess, __PRETTY_FUNCTION__, __LINE__ , \ 197 197 ##args) 198 198
+5 -5
fs/ocfs2/dlm/dlmrecovery.c
··· 259 259 struct dlm_lock_resource *res; 260 260 261 261 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n", 262 - dlm->name, dlm->dlm_reco_thread_task->pid, 262 + dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), 263 263 dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive", 264 264 dlm->reco.dead_node, dlm->reco.new_master); 265 265 ··· 420 420 if (dlm_in_recovery(dlm)) { 421 421 mlog(0, "%s: reco thread %d in recovery: " 422 422 "state=%d, master=%u, dead=%u\n", 423 - dlm->name, dlm->dlm_reco_thread_task->pid, 423 + dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), 424 424 dlm->reco.state, dlm->reco.new_master, 425 425 dlm->reco.dead_node); 426 426 } ··· 483 483 return 0; 484 484 } 485 485 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n", 486 - dlm->name, dlm->dlm_reco_thread_task->pid, 486 + dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), 487 487 dlm->reco.dead_node); 488 488 spin_unlock(&dlm->spinlock); 489 489 ··· 507 507 mlog(0, "another node will master this recovery session.\n"); 508 508 } 509 509 mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n", 510 - dlm->name, dlm->dlm_reco_thread_task->pid, dlm->reco.new_master, 510 + dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.new_master, 511 511 dlm->node_num, dlm->reco.dead_node); 512 512 513 513 /* it is safe to start everything back up here ··· 520 520 521 521 master_here: 522 522 mlog(0, "(%d) mastering recovery of %s:%u here(this=%u)!\n", 523 - dlm->dlm_reco_thread_task->pid, 523 + task_pid_nr(dlm->dlm_reco_thread_task), 524 524 dlm->name, dlm->reco.dead_node, dlm->node_num); 525 525 526 526 status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
+1 -1
include/linux/reiserfs_fs.h
··· 85 85 if( !( cond ) ) \ 86 86 reiserfs_panic( NULL, "reiserfs[%i]: assertion " scond " failed at " \ 87 87 __FILE__ ":%i:%s: " format "\n", \ 88 - in_interrupt() ? -1 : current -> pid, __LINE__ , __FUNCTION__ , ##args ) 88 + in_interrupt() ? -1 : task_pid_nr(current), __LINE__ , __FUNCTION__ , ##args ) 89 89 90 90 #define RASSERT(cond, format, args...) __RASSERT(cond, #cond, format, ##args) 91 91
+2 -2
include/net/9p/9p.h
··· 44 44 do { \ 45 45 if ((p9_debug_level & level) == level) \ 46 46 printk(KERN_NOTICE "-- %s (%d): " \ 47 - format , __FUNCTION__, current->pid , ## arg); \ 47 + format , __FUNCTION__, task_pid_nr(current) , ## arg); \ 48 48 } while (0) 49 49 50 50 #define PRINT_FCALL_ERROR(s, fcall) P9_DPRINTK(P9_DEBUG_ERROR, \ ··· 59 59 #define P9_EPRINTK(level, format, arg...) \ 60 60 do { \ 61 61 printk(level "9p: %s (%d): " \ 62 - format , __FUNCTION__, current->pid , ## arg); \ 62 + format , __FUNCTION__, task_pid_nr(current), ## arg); \ 63 63 } while (0) 64 64 65 65
+2 -1
kernel/cpu.c
··· 98 98 !cputime_eq(p->stime, cputime_zero))) 99 99 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\ 100 100 (state = %ld, flags = %x) \n", 101 - p->comm, p->pid, cpu, p->state, p->flags); 101 + p->comm, task_pid_nr(p), cpu, 102 + p->state, p->flags); 102 103 } 103 104 write_unlock_irq(&tasklist_lock); 104 105 }
+1 -1
kernel/exit.c
··· 959 959 960 960 if (unlikely(in_atomic())) 961 961 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", 962 - current->comm, current->pid, 962 + current->comm, task_pid_nr(current), 963 963 preempt_count()); 964 964 965 965 acct_update_integrals(tsk);
+11 -11
kernel/lockdep.c
··· 511 511 int i, depth = curr->lockdep_depth; 512 512 513 513 if (!depth) { 514 - printk("no locks held by %s/%d.\n", curr->comm, curr->pid); 514 + printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr)); 515 515 return; 516 516 } 517 517 printk("%d lock%s held by %s/%d:\n", 518 - depth, depth > 1 ? "s" : "", curr->comm, curr->pid); 518 + depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr)); 519 519 520 520 for (i = 0; i < depth; i++) { 521 521 printk(" #%d: ", i); ··· 904 904 print_kernel_version(); 905 905 printk( "-------------------------------------------------------\n"); 906 906 printk("%s/%d is trying to acquire lock:\n", 907 - curr->comm, curr->pid); 907 + curr->comm, task_pid_nr(curr)); 908 908 print_lock(check_source); 909 909 printk("\nbut task is already holding lock:\n"); 910 910 print_lock(check_target); ··· 1085 1085 print_kernel_version(); 1086 1086 printk( "------------------------------------------------------\n"); 1087 1087 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", 1088 - curr->comm, curr->pid, 1088 + curr->comm, task_pid_nr(curr), 1089 1089 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, 1090 1090 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, 1091 1091 curr->hardirqs_enabled, ··· 1237 1237 print_kernel_version(); 1238 1238 printk( "---------------------------------------------\n"); 1239 1239 printk("%s/%d is trying to acquire lock:\n", 1240 - curr->comm, curr->pid); 1240 + curr->comm, task_pid_nr(curr)); 1241 1241 print_lock(next); 1242 1242 printk("\nbut task is already holding lock:\n"); 1243 1243 print_lock(prev); ··· 1641 1641 usage_str[prev_bit], usage_str[new_bit]); 1642 1642 1643 1643 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", 1644 - curr->comm, curr->pid, 1644 + curr->comm, task_pid_nr(curr), 1645 1645 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, 1646 1646 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, 1647 1647 trace_hardirqs_enabled(curr), ··· 1694 1694 print_kernel_version(); 1695 1695 printk( "---------------------------------------------------------\n"); 1696 1696 printk("%s/%d just changed the state of lock:\n", 1697 - curr->comm, curr->pid); 1697 + curr->comm, task_pid_nr(curr)); 1698 1698 print_lock(this); 1699 1699 if (forwards) 1700 1700 printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass); ··· 2487 2487 printk( "[ BUG: bad unlock balance detected! ]\n"); 2488 2488 printk( "-------------------------------------\n"); 2489 2489 printk("%s/%d is trying to release lock (", 2490 - curr->comm, curr->pid); 2490 + curr->comm, task_pid_nr(curr)); 2491 2491 print_lockdep_cache(lock); 2492 2492 printk(") at:\n"); 2493 2493 print_ip_sym(ip); ··· 2737 2737 printk( "[ BUG: bad contention detected! ]\n"); 2738 2738 printk( "---------------------------------\n"); 2739 2739 printk("%s/%d is trying to contend lock (", 2740 - curr->comm, curr->pid); 2740 + curr->comm, task_pid_nr(curr)); 2741 2741 print_lockdep_cache(lock); 2742 2742 printk(") at:\n"); 2743 2743 print_ip_sym(ip); ··· 3072 3072 printk( "[ BUG: held lock freed! ]\n"); 3073 3073 printk( "-------------------------\n"); 3074 3074 printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n", 3075 - curr->comm, curr->pid, mem_from, mem_to-1); 3075 + curr->comm, task_pid_nr(curr), mem_from, mem_to-1); 3076 3076 print_lock(hlock); 3077 3077 lockdep_print_held_locks(curr); 3078 3078 ··· 3125 3125 printk( "[ BUG: lock held at task exit time! ]\n"); 3126 3126 printk( "-------------------------------------\n"); 3127 3127 printk("%s/%d is exiting with locks still held!\n", 3128 - curr->comm, curr->pid); 3128 + curr->comm, task_pid_nr(curr)); 3129 3129 lockdep_print_held_locks(curr); 3130 3130 3131 3131 printk("\nstack backtrace:\n");
+9 -6
kernel/rtmutex-debug.c
··· 87 87 static void printk_task(struct task_struct *p) 88 88 { 89 89 if (p) 90 - printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio); 90 + printk("%16s:%5d [%p, %3d]", p->comm, task_pid_nr(p), p, p->prio); 91 91 else 92 92 printk("<none>"); 93 93 } ··· 152 152 printk( "[ BUG: circular locking deadlock detected! ]\n"); 153 153 printk( "--------------------------------------------\n"); 154 154 printk("%s/%d is deadlocking current task %s/%d\n\n", 155 - task->comm, task->pid, current->comm, current->pid); 155 + task->comm, task_pid_nr(task), 156 + current->comm, task_pid_nr(current)); 156 157 157 158 printk("\n1) %s/%d is trying to acquire this lock:\n", 158 - current->comm, current->pid); 159 + current->comm, task_pid_nr(current)); 159 160 printk_lock(waiter->lock, 1); 160 161 161 - printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid); 162 + printk("\n2) %s/%d is blocked on this lock:\n", 163 + task->comm, task_pid_nr(task)); 162 164 printk_lock(waiter->deadlock_lock, 1); 163 165 164 166 debug_show_held_locks(current); 165 167 debug_show_held_locks(task); 166 168 167 - printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid); 169 + printk("\n%s/%d's [blocked] stackdump:\n\n", 170 + task->comm, task_pid_nr(task)); 168 171 show_stack(task, NULL); 169 172 printk("\n%s/%d's [current] stackdump:\n\n", 170 - current->comm, current->pid); 173 + current->comm, task_pid_nr(current)); 171 174 dump_stack(); 172 175 debug_show_all_locks(); 173 176
+1 -1
kernel/rtmutex.c
··· 185 185 prev_max = max_lock_depth; 186 186 printk(KERN_WARNING "Maximum lock depth %d reached " 187 187 "task: %s (%d)\n", max_lock_depth, 188 - top_task->comm, top_task->pid); 188 + top_task->comm, task_pid_nr(top_task)); 189 189 } 190 190 put_task_struct(task); 191 191
+4 -3
kernel/sched.c
··· 3502 3502 static noinline void __schedule_bug(struct task_struct *prev) 3503 3503 { 3504 3504 printk(KERN_ERR "BUG: scheduling while atomic: %s/0x%08x/%d\n", 3505 - prev->comm, preempt_count(), prev->pid); 3505 + prev->comm, preempt_count(), task_pid_nr(prev)); 3506 3506 debug_show_held_locks(prev); 3507 3507 if (irqs_disabled()) 3508 3508 print_irqtrace_events(prev); ··· 4865 4865 free = (unsigned long)n - (unsigned long)end_of_stack(p); 4866 4866 } 4867 4867 #endif 4868 - printk(KERN_CONT "%5lu %5d %6d\n", free, p->pid, p->parent->pid); 4868 + printk(KERN_CONT "%5lu %5d %6d\n", free, 4869 + task_pid_nr(p), task_pid_nr(p->parent)); 4869 4870 4870 4871 if (state != TASK_RUNNING) 4871 4872 show_stack(p, NULL); ··· 5173 5172 if (p->mm && printk_ratelimit()) 5174 5173 printk(KERN_INFO "process %d (%s) no " 5175 5174 "longer affine to cpu%d\n", 5176 - p->pid, p->comm, dead_cpu); 5175 + task_pid_nr(p), p->comm, dead_cpu); 5177 5176 } 5178 5177 } while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); 5179 5178 }
+1 -1
kernel/signal.c
··· 730 730 static void print_fatal_signal(struct pt_regs *regs, int signr) 731 731 { 732 732 printk("%s/%d: potentially unexpected fatal signal %d.\n", 733 - current->comm, current->pid, signr); 733 + current->comm, task_pid_nr(current), signr); 734 734 735 735 #ifdef __i386__ 736 736 printk("code at %08lx: ", regs->eip);
+1 -1
kernel/softlockup.c
··· 113 113 spin_lock(&print_lock); 114 114 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n", 115 115 this_cpu, now - touch_timestamp, 116 - current->comm, current->pid); 116 + current->comm, task_pid_nr(current)); 117 117 if (regs) 118 118 show_regs(regs); 119 119 else
+1 -1
kernel/workqueue.c
··· 282 282 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 283 283 "%s/0x%08x/%d\n", 284 284 current->comm, preempt_count(), 285 - current->pid); 285 + task_pid_nr(current)); 286 286 printk(KERN_ERR " last function: "); 287 287 print_symbol("%s\n", (unsigned long)f); 288 288 debug_show_held_locks(current);
+4 -4
lib/spinlock_debug.c
··· 60 60 owner = lock->owner; 61 61 printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", 62 62 msg, raw_smp_processor_id(), 63 - current->comm, current->pid); 63 + current->comm, task_pid_nr(current)); 64 64 printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, " 65 65 ".owner_cpu: %d\n", 66 66 lock, lock->magic, 67 67 owner ? owner->comm : "<none>", 68 - owner ? owner->pid : -1, 68 + owner ? task_pid_nr(owner) : -1, 69 69 lock->owner_cpu); 70 70 dump_stack(); 71 71 } ··· 116 116 printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, " 117 117 "%s/%d, %p\n", 118 118 raw_smp_processor_id(), current->comm, 119 - current->pid, lock); 119 + task_pid_nr(current), lock); 120 120 dump_stack(); 121 121 #ifdef CONFIG_SMP 122 122 trigger_all_cpu_backtrace(); ··· 161 161 162 162 printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n", 163 163 msg, raw_smp_processor_id(), current->comm, 164 - current->pid, lock); 164 + task_pid_nr(current), lock); 165 165 dump_stack(); 166 166 } 167 167
+3 -2
mm/oom_kill.c
··· 278 278 } 279 279 280 280 if (verbose) 281 - printk(KERN_ERR "Killed process %d (%s)\n", p->pid, p->comm); 281 + printk(KERN_ERR "Killed process %d (%s)\n", 282 + task_pid_nr(p), p->comm); 282 283 283 284 /* 284 285 * We give our sacrificial lamb high priority and access to ··· 357 356 } 358 357 359 358 printk(KERN_ERR "%s: kill process %d (%s) score %li or a child\n", 360 - message, p->pid, p->comm, points); 359 + message, task_pid_nr(p), p->comm, points); 361 360 362 361 /* Try to kill a child first */ 363 362 list_for_each_entry(c, &p->children, sibling) {
+1 -1
net/core/pktgen.c
··· 3514 3514 3515 3515 init_waitqueue_head(&t->queue); 3516 3516 3517 - pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, current->pid); 3517 + pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current)); 3518 3518 3519 3519 set_current_state(TASK_INTERRUPTIBLE); 3520 3520
+1 -1
net/core/sock.c
··· 232 232 warned++; 233 233 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) " 234 234 "tries to set negative timeout\n", 235 - current->comm, current->pid); 235 + current->comm, task_pid_nr(current)); 236 236 return 0; 237 237 } 238 238 *timeo_p = MAX_SCHEDULE_TIMEOUT;
+2 -2
net/ipv4/ipvs/ip_vs_sync.c
··· 877 877 if (!tinfo) 878 878 return -ENOMEM; 879 879 880 - IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, current->pid); 880 + IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, task_pid_nr(current)); 881 881 IP_VS_DBG(7, "Each ip_vs_sync_conn entry need %Zd bytes\n", 882 882 sizeof(struct ip_vs_sync_conn)); 883 883 ··· 917 917 (state == IP_VS_STATE_BACKUP && !sync_backup_pid)) 918 918 return -ESRCH; 919 919 920 - IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, current->pid); 920 + IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, task_pid_nr(current)); 921 921 IP_VS_INFO("stopping sync thread %d ...\n", 922 922 (state == IP_VS_STATE_MASTER) ? 923 923 sync_master_pid : sync_backup_pid);
+1 -1
net/ipv4/tcp.c
··· 1334 1334 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) { 1335 1335 if (net_ratelimit()) 1336 1336 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n", 1337 - current->comm, current->pid); 1337 + current->comm, task_pid_nr(current)); 1338 1338 peek_seq = tp->copied_seq; 1339 1339 } 1340 1340 continue;
+1 -1
net/llc/af_llc.c
··· 762 762 if (net_ratelimit()) 763 763 printk(KERN_DEBUG "LLC(%s:%d): Application " 764 764 "bug, race in MSG_PEEK.\n", 765 - current->comm, current->pid); 765 + current->comm, task_pid_nr(current)); 766 766 peek_seq = llc->copied_seq; 767 767 } 768 768 continue;
+1 -1
net/sunrpc/sched.c
··· 847 847 task->tk_start = jiffies; 848 848 849 849 dprintk("RPC: new task initialized, procpid %u\n", 850 - current->pid); 850 + task_pid_nr(current)); 851 851 } 852 852 853 853 static struct rpc_task *