Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sparse pointer use of zero as null

Get rid of sparse related warnings from places that use integer as NULL
pointer.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Cc: Andi Kleen <ak@suse.de>
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Ian Kent <raven@themaw.net>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Davide Libenzi <davidel@xmailserver.org>
Cc: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Stephen Hemminger and committed by
Linus Torvalds
c80544dc 0e9663ee

+34 -33
+1 -1
arch/x86/kernel/vsyscall_64.c
··· 172 172 if (unlikely(!__vsyscall_gtod_data.sysctl_enabled)) 173 173 return time_syscall(t); 174 174 175 - vgettimeofday(&tv, 0); 175 + vgettimeofday(&tv, NULL); 176 176 result = tv.tv_sec; 177 177 if (t) 178 178 *t = result;
+1 -1
drivers/ata/libata-sff.c
··· 882 882 /* Filter out DMA modes if the device has been configured by 883 883 the BIOS as PIO only */ 884 884 885 - if (adev->link->ap->ioaddr.bmdma_addr == 0) 885 + if (adev->link->ap->ioaddr.bmdma_addr == NULL) 886 886 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 887 887 return xfer_mask; 888 888 }
+1 -1
drivers/base/dmapool.c
··· 366 366 unsigned long flags; 367 367 int map, block; 368 368 369 - if ((page = pool_find_page (pool, dma)) == 0) { 369 + if ((page = pool_find_page(pool, dma)) == NULL) { 370 370 if (pool->dev) 371 371 dev_err(pool->dev, "dma_pool_free %s, %p/%lx (bad dma)\n", 372 372 pool->name, vaddr, (unsigned long) dma);
+1 -1
drivers/char/random.c
··· 649 649 650 650 void add_interrupt_randomness(int irq) 651 651 { 652 - if (irq >= NR_IRQS || irq_timer_state[irq] == 0) 652 + if (irq >= NR_IRQS || irq_timer_state[irq] == NULL) 653 653 return; 654 654 655 655 DEBUG_ENT("irq event %d\n", irq);
+1 -1
fs/autofs/waitq.c
··· 182 182 { 183 183 struct autofs_wait_queue *wq, **wql; 184 184 185 - for ( wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next ) { 185 + for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) { 186 186 if ( wq->wait_queue_token == wait_queue_token ) 187 187 break; 188 188 }
+1 -1
fs/autofs4/waitq.c
··· 376 376 struct autofs_wait_queue *wq, **wql; 377 377 378 378 mutex_lock(&sbi->wq_mutex); 379 - for (wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next) { 379 + for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) { 380 380 if (wq->wait_queue_token == wait_queue_token) 381 381 break; 382 382 }
+1 -1
fs/compat_ioctl.c
··· 3001 3001 int i; 3002 3002 3003 3003 for (i = 0; i < ARRAY_SIZE(ioctl_start); i++) { 3004 - if (ioctl_start[i].next != 0) { 3004 + if (ioctl_start[i].next) { 3005 3005 printk("ioctl translation %d bad\n",i); 3006 3006 return -1; 3007 3007 }
+1 -1
fs/eventpoll.c
··· 463 463 * holding "epmutex" we can be sure that no file cleanup code will hit 464 464 * us during this operation. So we can avoid the lock on "ep->lock". 465 465 */ 466 - while ((rbp = rb_first(&ep->rbr)) != 0) { 466 + while ((rbp = rb_first(&ep->rbr)) != NULL) { 467 467 epi = rb_entry(rbp, struct epitem, rbn); 468 468 ep_remove(ep, epi); 469 469 }
+1 -1
fs/ext3/fsync.c
··· 47 47 struct inode *inode = dentry->d_inode; 48 48 int ret = 0; 49 49 50 - J_ASSERT(ext3_journal_current_handle() == 0); 50 + J_ASSERT(ext3_journal_current_handle() == NULL); 51 51 52 52 /* 53 53 * data=writeback:
+1 -1
fs/ext3/inode.c
··· 1028 1028 } 1029 1029 if (buffer_new(&dummy)) { 1030 1030 J_ASSERT(create != 0); 1031 - J_ASSERT(handle != 0); 1031 + J_ASSERT(handle != NULL); 1032 1032 1033 1033 /* 1034 1034 * Now that we do not always journal data, we should
+5 -4
fs/jbd/journal.c
··· 217 217 if (IS_ERR(t)) 218 218 return PTR_ERR(t); 219 219 220 - wait_event(journal->j_wait_done_commit, journal->j_task != 0); 220 + wait_event(journal->j_wait_done_commit, journal->j_task != NULL); 221 221 return 0; 222 222 } 223 223 ··· 229 229 while (journal->j_task) { 230 230 wake_up(&journal->j_wait_commit); 231 231 spin_unlock(&journal->j_state_lock); 232 - wait_event(journal->j_wait_done_commit, journal->j_task == 0); 232 + wait_event(journal->j_wait_done_commit, 233 + journal->j_task == NULL); 233 234 spin_lock(&journal->j_state_lock); 234 235 } 235 236 spin_unlock(&journal->j_state_lock); ··· 1652 1651 atomic_inc(&nr_journal_heads); 1653 1652 #endif 1654 1653 ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS); 1655 - if (ret == 0) { 1654 + if (ret == NULL) { 1656 1655 jbd_debug(1, "out of memory for journal_head\n"); 1657 1656 if (time_after(jiffies, last_warning + 5*HZ)) { 1658 1657 printk(KERN_NOTICE "ENOMEM in %s, retrying.\n", 1659 1658 __FUNCTION__); 1660 1659 last_warning = jiffies; 1661 1660 } 1662 - while (ret == 0) { 1661 + while (ret == NULL) { 1663 1662 yield(); 1664 1663 ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS); 1665 1664 }
+6 -6
fs/jbd/transaction.c
··· 1172 1172 } 1173 1173 1174 1174 /* That test should have eliminated the following case: */ 1175 - J_ASSERT_JH(jh, jh->b_frozen_data == 0); 1175 + J_ASSERT_JH(jh, jh->b_frozen_data == NULL); 1176 1176 1177 1177 JBUFFER_TRACE(jh, "file as BJ_Metadata"); 1178 1178 spin_lock(&journal->j_list_lock); ··· 1522 1522 1523 1523 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types); 1524 1524 if (jh->b_jlist != BJ_None) 1525 - J_ASSERT_JH(jh, transaction != 0); 1525 + J_ASSERT_JH(jh, transaction != NULL); 1526 1526 1527 1527 switch (jh->b_jlist) { 1528 1528 case BJ_None: ··· 1591 1591 if (buffer_locked(bh) || buffer_dirty(bh)) 1592 1592 goto out; 1593 1593 1594 - if (jh->b_next_transaction != 0) 1594 + if (jh->b_next_transaction != NULL) 1595 1595 goto out; 1596 1596 1597 1597 spin_lock(&journal->j_list_lock); 1598 - if (jh->b_transaction != 0 && jh->b_cp_transaction == 0) { 1598 + if (jh->b_transaction != NULL && jh->b_cp_transaction == NULL) { 1599 1599 if (jh->b_jlist == BJ_SyncData || jh->b_jlist == BJ_Locked) { 1600 1600 /* A written-back ordered data buffer */ 1601 1601 JBUFFER_TRACE(jh, "release data"); ··· 1603 1603 journal_remove_journal_head(bh); 1604 1604 __brelse(bh); 1605 1605 } 1606 - } else if (jh->b_cp_transaction != 0 && jh->b_transaction == 0) { 1606 + } else if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) { 1607 1607 /* written-back checkpointed metadata buffer */ 1608 1608 if (jh->b_jlist == BJ_None) { 1609 1609 JBUFFER_TRACE(jh, "remove from checkpoint list"); ··· 1963 1963 1964 1964 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types); 1965 1965 J_ASSERT_JH(jh, jh->b_transaction == transaction || 1966 - jh->b_transaction == 0); 1966 + jh->b_transaction == NULL); 1967 1967 1968 1968 if (jh->b_transaction && jh->b_jlist == jlist) 1969 1969 return;
+1 -1
fs/nls/nls_base.c
··· 111 111 int c, nc; 112 112 const struct utf8_table *t; 113 113 114 - if (s == 0) 114 + if (!s) 115 115 return 0; 116 116 117 117 l = wc;
+3 -3
init/do_mounts_rd.c
··· 57 57 unsigned char *buf; 58 58 59 59 buf = kmalloc(size, GFP_KERNEL); 60 - if (buf == 0) 60 + if (!buf) 61 61 return -1; 62 62 63 63 minixsb = (struct minix_super_block *) buf; ··· 407 407 crd_infd = in_fd; 408 408 crd_outfd = out_fd; 409 409 inbuf = kmalloc(INBUFSIZ, GFP_KERNEL); 410 - if (inbuf == 0) { 410 + if (!inbuf) { 411 411 printk(KERN_ERR "RAMDISK: Couldn't allocate gzip buffer\n"); 412 412 return -1; 413 413 } 414 414 window = kmalloc(WSIZE, GFP_KERNEL); 415 - if (window == 0) { 415 + if (!window) { 416 416 printk(KERN_ERR "RAMDISK: Couldn't allocate gzip window\n"); 417 417 kfree(inbuf); 418 418 return -1;
+2 -2
kernel/futex.c
··· 293 293 */ 294 294 void drop_futex_key_refs(union futex_key *key) 295 295 { 296 - if (key->both.ptr == 0) 296 + if (!key->both.ptr) 297 297 return; 298 298 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { 299 299 case FUT_OFF_INODE: ··· 1046 1046 retry: 1047 1047 lock_ptr = q->lock_ptr; 1048 1048 barrier(); 1049 - if (lock_ptr != 0) { 1049 + if (lock_ptr != NULL) { 1050 1050 spin_lock(lock_ptr); 1051 1051 /* 1052 1052 * q->lock_ptr can change between reading it and
+2 -2
kernel/kexec.c
··· 785 785 size_t uchunk, mchunk; 786 786 787 787 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); 788 - if (page == 0) { 788 + if (!page) { 789 789 result = -ENOMEM; 790 790 goto out; 791 791 } ··· 844 844 size_t uchunk, mchunk; 845 845 846 846 page = pfn_to_page(maddr >> PAGE_SHIFT); 847 - if (page == 0) { 847 + if (!page) { 848 848 result = -ENOMEM; 849 849 goto out; 850 850 }
+1 -1
mm/hugetlb.c
··· 1020 1020 * size such that we can guarentee to record the reservation. */ 1021 1021 if (&rg->link == head || t < rg->from) { 1022 1022 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 1023 - if (nrg == 0) 1023 + if (!nrg) 1024 1024 return -ENOMEM; 1025 1025 nrg->from = f; 1026 1026 nrg->to = f;
+2 -2
mm/mremap.c
··· 291 291 if ((addr <= new_addr) && (addr+old_len) > new_addr) 292 292 goto out; 293 293 294 - ret = security_file_mmap(0, 0, 0, 0, new_addr, 1); 294 + ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); 295 295 if (ret) 296 296 goto out; 297 297 ··· 399 399 goto out; 400 400 } 401 401 402 - ret = security_file_mmap(0, 0, 0, 0, new_addr, 1); 402 + ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); 403 403 if (ret) 404 404 goto out; 405 405 }
+1 -1
mm/vmscan.c
··· 1282 1282 */ 1283 1283 if (priority < 0) 1284 1284 priority = 0; 1285 - for (i = 0; zones[i] != 0; i++) { 1285 + for (i = 0; zones[i] != NULL; i++) { 1286 1286 struct zone *zone = zones[i]; 1287 1287 1288 1288 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+1 -1
security/selinux/xfrm.c
··· 448 448 if (dst) { 449 449 struct dst_entry *dst_test; 450 450 451 - for (dst_test = dst; dst_test != 0; 451 + for (dst_test = dst; dst_test != NULL; 452 452 dst_test = dst_test->child) { 453 453 struct xfrm_state *x = dst_test->xfrm; 454 454