Merge branch 'master' of /home/trondmy/repositories/git/linux-2.6/

+650 -672
+80 -47
Documentation/DocBook/kernel-locking.tmpl
··· 551 551 <function>spin_lock_irqsave()</function>, which is a superset 552 552 of all other spinlock primitives. 553 553 </para> 554 + 554 555 <table> 555 556 <title>Table of Locking Requirements</title> 556 557 <tgroup cols="11"> 557 558 <tbody> 559 + 558 560 <row> 559 561 <entry></entry> 560 562 <entry>IRQ Handler A</entry> ··· 578 576 579 577 <row> 580 578 <entry>IRQ Handler B</entry> 581 - <entry>spin_lock_irqsave</entry> 579 + <entry>SLIS</entry> 582 580 <entry>None</entry> 583 581 </row> 584 582 585 583 <row> 586 584 <entry>Softirq A</entry> 587 - <entry>spin_lock_irq</entry> 588 - <entry>spin_lock_irq</entry> 589 - <entry>spin_lock</entry> 585 + <entry>SLI</entry> 586 + <entry>SLI</entry> 587 + <entry>SL</entry> 590 588 </row> 591 589 592 590 <row> 593 591 <entry>Softirq B</entry> 594 - <entry>spin_lock_irq</entry> 595 - <entry>spin_lock_irq</entry> 596 - <entry>spin_lock</entry> 597 - <entry>spin_lock</entry> 592 + <entry>SLI</entry> 593 + <entry>SLI</entry> 594 + <entry>SL</entry> 595 + <entry>SL</entry> 598 596 </row> 599 597 600 598 <row> 601 599 <entry>Tasklet A</entry> 602 - <entry>spin_lock_irq</entry> 603 - <entry>spin_lock_irq</entry> 604 - <entry>spin_lock</entry> 605 - <entry>spin_lock</entry> 600 + <entry>SLI</entry> 601 + <entry>SLI</entry> 602 + <entry>SL</entry> 603 + <entry>SL</entry> 606 604 <entry>None</entry> 607 605 </row> 608 606 609 607 <row> 610 608 <entry>Tasklet B</entry> 611 - <entry>spin_lock_irq</entry> 612 - <entry>spin_lock_irq</entry> 613 - <entry>spin_lock</entry> 614 - <entry>spin_lock</entry> 615 - <entry>spin_lock</entry> 609 + <entry>SLI</entry> 610 + <entry>SLI</entry> 611 + <entry>SL</entry> 612 + <entry>SL</entry> 613 + <entry>SL</entry> 616 614 <entry>None</entry> 617 615 </row> 618 616 619 617 <row> 620 618 <entry>Timer A</entry> 621 - <entry>spin_lock_irq</entry> 622 - <entry>spin_lock_irq</entry> 623 - <entry>spin_lock</entry> 624 - <entry>spin_lock</entry> 625 - <entry>spin_lock</entry> 626 - <entry>spin_lock</entry> 619 + <entry>SLI</entry> 620 + <entry>SLI</entry> 621 + <entry>SL</entry> 622 + <entry>SL</entry> 623 + <entry>SL</entry> 624 + <entry>SL</entry> 627 625 <entry>None</entry> 628 626 </row> 629 627 630 628 <row> 631 629 <entry>Timer B</entry> 632 - <entry>spin_lock_irq</entry> 633 - <entry>spin_lock_irq</entry> 634 - <entry>spin_lock</entry> 635 - <entry>spin_lock</entry> 636 - <entry>spin_lock</entry> 637 - <entry>spin_lock</entry> 638 - <entry>spin_lock</entry> 630 + <entry>SLI</entry> 631 + <entry>SLI</entry> 632 + <entry>SL</entry> 633 + <entry>SL</entry> 634 + <entry>SL</entry> 635 + <entry>SL</entry> 636 + <entry>SL</entry> 639 637 <entry>None</entry> 640 638 </row> 641 639 642 640 <row> 643 641 <entry>User Context A</entry> 644 - <entry>spin_lock_irq</entry> 645 - <entry>spin_lock_irq</entry> 646 - <entry>spin_lock_bh</entry> 647 - <entry>spin_lock_bh</entry> 648 - <entry>spin_lock_bh</entry> 649 - <entry>spin_lock_bh</entry> 650 - <entry>spin_lock_bh</entry> 651 - <entry>spin_lock_bh</entry> 642 + <entry>SLI</entry> 643 + <entry>SLI</entry> 644 + <entry>SLBH</entry> 645 + <entry>SLBH</entry> 646 + <entry>SLBH</entry> 647 + <entry>SLBH</entry> 648 + <entry>SLBH</entry> 649 + <entry>SLBH</entry> 652 650 <entry>None</entry> 653 651 </row> 654 652 655 653 <row> 656 654 <entry>User Context B</entry> 657 - <entry>spin_lock_irq</entry> 658 - <entry>spin_lock_irq</entry> 659 - <entry>spin_lock_bh</entry> 660 - <entry>spin_lock_bh</entry> 661 - <entry>spin_lock_bh</entry> 662 - <entry>spin_lock_bh</entry> 663 - <entry>spin_lock_bh</entry> 664 - <entry>spin_lock_bh</entry> 665 - <entry>down_interruptible</entry> 655 + <entry>SLI</entry> 656 + <entry>SLI</entry> 657 + <entry>SLBH</entry> 658 + <entry>SLBH</entry> 659 + <entry>SLBH</entry> 660 + <entry>SLBH</entry> 661 + <entry>SLBH</entry> 662 + <entry>SLBH</entry> 663 + <entry>DI</entry> 666 664 <entry>None</entry> 667 665 </row> 668 666 669 667 </tbody> 670 668 </tgroup> 671 669 </table> 670 + 671 + <table> 672 + <title>Legend for Locking Requirements Table</title> 673 + <tgroup cols="2"> 674 + <tbody> 675 + 676 + <row> 677 + <entry>SLIS</entry> 678 + <entry>spin_lock_irqsave</entry> 679 + </row> 680 + <row> 681 + <entry>SLI</entry> 682 + <entry>spin_lock_irq</entry> 683 + </row> 684 + <row> 685 + <entry>SL</entry> 686 + <entry>spin_lock</entry> 687 + </row> 688 + <row> 689 + <entry>SLBH</entry> 690 + <entry>spin_lock_bh</entry> 691 + </row> 692 + <row> 693 + <entry>DI</entry> 694 + <entry>down_interruptible</entry> 695 + </row> 696 + 697 + </tbody> 698 + </tgroup> 699 + </table> 700 + 672 701 </sect1> 673 702 </chapter> 674 703
+6 -2
Documentation/gpio.txt
··· 111 111 112 112 The return value is zero for success, else a negative errno. It should 113 113 be checked, since the get/set calls don't have error returns and since 114 - misconfiguration is possible. (These calls could sleep.) 114 + misconfiguration is possible. You should normally issue these calls from 115 + a task context. However, for spinlock-safe GPIOs it's OK to use them 116 + before tasking is enabled, as part of early board setup. 115 117 116 118 For output GPIOs, the value provided becomes the initial output value. 117 119 This helps avoid signal glitching during system startup. ··· 199 197 200 198 Passing invalid GPIO numbers to gpio_request() will fail, as will requesting 201 199 GPIOs that have already been claimed with that call. The return value of 202 - gpio_request() must be checked. (These calls could sleep.) 200 + gpio_request() must be checked. You should normally issue these calls from 201 + a task context. However, for spinlock-safe GPIOs it's OK to request GPIOs 202 + before tasking is enabled, as part of early board setup. 203 203 204 204 These calls serve two basic purposes. One is marking the signals which 205 205 are actually in use as GPIOs, for better diagnostics; systems may have
+12 -5
Documentation/vm/slabinfo.c
··· 242 242 243 243 memset(numa, 0, MAX_NODES * sizeof(int)); 244 244 245 + if (!t) 246 + return; 247 + 245 248 while (*t == 'N') { 246 249 t++; 247 250 node = strtoul(t, &t, 10); ··· 389 386 { 390 387 if (strcmp(s->name, "*") == 0) 391 388 return; 392 - printf("\nSlabcache: %-20s Aliases: %2d Order : %2d\n", s->name, s->aliases, s->order); 389 + 390 + printf("\nSlabcache: %-20s Aliases: %2d Order : %2d Objects: %d\n", 391 + s->name, s->aliases, s->order, s->objects); 393 392 if (s->hwcache_align) 394 393 printf("** Hardware cacheline aligned\n"); 395 394 if (s->cache_dma) ··· 796 791 797 792 store_size(b1, total_size);store_size(b2, total_waste); 798 793 store_size(b3, total_waste * 100 / total_used); 799 - printf("Memory used: %6s # Loss : %6s MRatio: %6s%%\n", b1, b2, b3); 794 + printf("Memory used: %6s # Loss : %6s MRatio:%6s%%\n", b1, b2, b3); 800 795 801 796 store_size(b1, total_objects);store_size(b2, total_partobj); 802 797 store_size(b3, total_partobj * 100 / total_objects); 803 - printf("# Objects : %6s # PartObj: %6s ORatio: %6s%%\n", b1, b2, b3); 798 + printf("# Objects : %6s # PartObj: %6s ORatio:%6s%%\n", b1, b2, b3); 804 799 805 800 printf("\n"); 806 801 printf("Per Cache Average Min Max Total\n"); ··· 823 818 store_size(b1, avg_ppart);store_size(b2, min_ppart); 824 819 store_size(b3, max_ppart); 825 820 store_size(b4, total_partial * 100 / total_slabs); 826 - printf("%%PartSlab %10s%% %10s%% %10s%% %10s%%\n", 821 + printf("%%PartSlab%10s%% %10s%% %10s%% %10s%%\n", 827 822 b1, b2, b3, b4); 828 823 829 824 store_size(b1, avg_partobj);store_size(b2, min_partobj); ··· 835 830 store_size(b1, avg_ppartobj);store_size(b2, min_ppartobj); 836 831 store_size(b3, max_ppartobj); 837 832 store_size(b4, total_partobj * 100 / total_objects); 838 - printf("%% PartObj %10s%% %10s%% %10s%% %10s%%\n", 833 + printf("%% PartObj%10s%% %10s%% %10s%% %10s%%\n", 839 834 b1, b2, b3, b4); 840 835 841 836 store_size(b1, avg_size);store_size(b2, min_size); ··· 1105 1100 ops(slab); 1106 1101 else if (show_slab) 1107 1102 slabcache(slab); 1103 + else if (show_report) 1104 + report(slab); 1108 1105 } 1109 1106 } 1110 1107
+2 -2
MAINTAINERS
··· 2689 2689 S: Maintained 2690 2690 2691 2691 PARALLEL PORT SUPPORT 2692 - L: linux-parport@lists.infradead.org 2692 + L: linux-parport@lists.infradead.org (subscribers-only) 2693 2693 S: Orphan 2694 2694 2695 2695 PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES 2696 2696 P: Tim Waugh 2697 2697 M: tim@cyberelk.net 2698 - L: linux-parport@lists.infradead.org 2698 + L: linux-parport@lists.infradead.org (subscribers-only) 2699 2699 W: http://www.torque.net/linux-pp.html 2700 2700 S: Maintained 2701 2701
-8
arch/blackfin/Kconfig
··· 560 560 561 561 source "mm/Kconfig" 562 562 563 - config LARGE_ALLOCS 564 - bool "Allow allocating large blocks (> 1MB) of memory" 565 - help 566 - Allow the slab memory allocator to keep chains for very large 567 - memory sizes - upto 32MB. You may need this if your system has 568 - a lot of RAM, and you need to able to allocate very large 569 - contiguous chunks. If unsure, say N. 570 - 571 563 config BFIN_DMA_5XX 572 564 bool "Enable DMA Support" 573 565 depends on (BF533 || BF532 || BF531 || BF537 || BF536 || BF534 || BF561)
-8
arch/frv/Kconfig
··· 102 102 with a lot of RAM, this can be wasteful of precious low memory. 103 103 Setting this option will put user-space page tables in high memory. 104 104 105 - config LARGE_ALLOCS 106 - bool "Allow allocating large blocks (> 1MB) of memory" 107 - help 108 - Allow the slab memory allocator to keep chains for very large memory 109 - sizes - up to 32MB. You may need this if your system has a lot of 110 - RAM, and you need to able to allocate very large contiguous chunks. 111 - If unsure, say N. 112 - 113 105 source "mm/Kconfig" 114 106 115 107 choice
+1 -1
arch/i386/kernel/cpu/mtrr/generic.c
··· 78 78 } 79 79 80 80 /* Grab all of the MTRR state for this CPU into *state */ 81 - void __init get_mtrr_state(void) 81 + void get_mtrr_state(void) 82 82 { 83 83 unsigned int i; 84 84 struct mtrr_var_range *vrs;
+1 -1
arch/i386/kernel/cpu/mtrr/main.c
··· 639 639 * initialized (i.e. before smp_init()). 640 640 * 641 641 */ 642 - void __init mtrr_bp_init(void) 642 + void mtrr_bp_init(void) 643 643 { 644 644 init_ifs(); 645 645
+1 -1
arch/i386/kernel/smp.c
··· 421 421 } 422 422 if (!cpus_empty(cpu_mask)) 423 423 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); 424 - check_pgt_cache(); 424 + 425 425 preempt_enable(); 426 426 } 427 427
-8
arch/m68knommu/Kconfig
··· 470 470 default y 471 471 depends on (AVNET5282) 472 472 473 - config LARGE_ALLOCS 474 - bool "Allow allocating large blocks (> 1MB) of memory" 475 - help 476 - Allow the slab memory allocator to keep chains for very large 477 - memory sizes - upto 32MB. You may need this if your system has 478 - a lot of RAM, and you need to able to allocate very large 479 - contiguous chunks. If unsure, say N. 480 - 481 473 config 4KSTACKS 482 474 bool "Use 4Kb for kernel stacks instead of 8Kb" 483 475 default y
+1 -3
arch/powerpc/platforms/cell/spufs/inode.c
··· 71 71 { 72 72 struct spufs_inode_info *ei = p; 73 73 74 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 75 - inode_init_once(&ei->vfs_inode); 76 - } 74 + inode_init_once(&ei->vfs_inode); 77 75 } 78 76 79 77 static struct inode *
-8
arch/v850/Kconfig
··· 240 240 config RESET_GUARD 241 241 bool "Reset Guard" 242 242 243 - config LARGE_ALLOCS 244 - bool "Allow allocating large blocks (> 1MB) of memory" 245 - help 246 - Allow the slab memory allocator to keep chains for very large 247 - memory sizes - upto 32MB. You may need this if your system has 248 - a lot of RAM, and you need to able to allocate very large 249 - contiguous chunks. If unsure, say N. 250 - 251 243 source "mm/Kconfig" 252 244 253 245 endmenu
+4 -4
drivers/acpi/numa.c
··· 40 40 #define NID_INVAL -1 41 41 42 42 /* maps to convert between proximity domain and logical node ID */ 43 - int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS] 43 + static int pxm_to_node_map[MAX_PXM_DOMAINS] 44 44 = { [0 ... MAX_PXM_DOMAINS - 1] = NID_INVAL }; 45 - int __cpuinitdata node_to_pxm_map[MAX_NUMNODES] 45 + static int node_to_pxm_map[MAX_NUMNODES] 46 46 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; 47 47 48 - int __cpuinit pxm_to_node(int pxm) 48 + int pxm_to_node(int pxm) 49 49 { 50 50 if (pxm < 0) 51 51 return NID_INVAL; 52 52 return pxm_to_node_map[pxm]; 53 53 } 54 54 55 - int __cpuinit node_to_pxm(int node) 55 + int node_to_pxm(int node) 56 56 { 57 57 if (node < 0) 58 58 return PXM_INVAL;
-3
drivers/mtd/ubi/eba.c
··· 940 940 { 941 941 struct ltree_entry *le = obj; 942 942 943 - if (flags & SLAB_CTOR_CONSTRUCTOR) 944 - return; 945 - 946 943 le->users = 0; 947 944 init_rwsem(&le->mutex); 948 945 }
+3 -3
drivers/rtc/Kconfig
··· 59 59 depends on RTC_CLASS 60 60 61 61 config RTC_INTF_SYSFS 62 - boolean "sysfs" 62 + boolean "/sys/class/rtc/rtcN (sysfs)" 63 63 depends on RTC_CLASS && SYSFS 64 64 default RTC_CLASS 65 65 help ··· 70 70 will be called rtc-sysfs. 71 71 72 72 config RTC_INTF_PROC 73 - boolean "proc" 73 + boolean "/proc/driver/rtc (procfs for rtc0)" 74 74 depends on RTC_CLASS && PROC_FS 75 75 default RTC_CLASS 76 76 help ··· 82 82 will be called rtc-proc. 83 83 84 84 config RTC_INTF_DEV 85 - boolean "dev" 85 + boolean "/dev/rtcN (character devices)" 86 86 depends on RTC_CLASS 87 87 default RTC_CLASS 88 88 help
+2 -2
drivers/rtc/rtc-omap.c
··· 371 371 goto fail; 372 372 } 373 373 platform_set_drvdata(pdev, rtc); 374 - dev_set_devdata(&rtc->dev, mem); 374 + dev_set_drvdata(&rtc->dev, mem); 375 375 376 376 /* clear pending irqs, and set 1/second periodic, 377 377 * which we'll use instead of update irqs ··· 453 453 free_irq(omap_rtc_timer, rtc); 454 454 free_irq(omap_rtc_alarm, rtc); 455 455 456 - release_resource(dev_get_devdata(&rtc->dev)); 456 + release_resource(dev_get_drvdata(&rtc->dev)); 457 457 rtc_device_unregister(rtc); 458 458 return 0; 459 459 }
+18 -3
drivers/serial/8250.c
··· 894 894 quot = serial_dl_read(up); 895 895 quot <<= 3; 896 896 897 - status1 = serial_in(up, 0x04); /* EXCR1 */ 897 + status1 = serial_in(up, 0x04); /* EXCR2 */ 898 898 status1 &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ 899 899 status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ 900 900 serial_outp(up, 0x04, status1); ··· 2617 2617 */ 2618 2618 void serial8250_resume_port(int line) 2619 2619 { 2620 - uart_resume_port(&serial8250_reg, &serial8250_ports[line].port); 2620 + struct uart_8250_port *up = &serial8250_ports[line]; 2621 + 2622 + if (up->capabilities & UART_NATSEMI) { 2623 + unsigned char tmp; 2624 + 2625 + /* Ensure it's still in high speed mode */ 2626 + serial_outp(up, UART_LCR, 0xE0); 2627 + 2628 + tmp = serial_in(up, 0x04); /* EXCR2 */ 2629 + tmp &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ 2630 + tmp |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ 2631 + serial_outp(up, 0x04, tmp); 2632 + 2633 + serial_outp(up, UART_LCR, 0); 2634 + } 2635 + uart_resume_port(&serial8250_reg, &up->port); 2621 2636 } 2622 2637 2623 2638 /* ··· 2709 2694 struct uart_8250_port *up = &serial8250_ports[i]; 2710 2695 2711 2696 if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev) 2712 - uart_resume_port(&serial8250_reg, &up->port); 2697 + serial8250_resume_port(i); 2713 2698 } 2714 2699 2715 2700 return 0;
+31 -24
drivers/serial/icom.c
··· 69 69 70 70 static const struct pci_device_id icom_pci_table[] = { 71 71 { 72 - .vendor = PCI_VENDOR_ID_IBM, 73 - .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1, 74 - .subvendor = PCI_ANY_ID, 75 - .subdevice = PCI_ANY_ID, 76 - .driver_data = ADAPTER_V1, 77 - }, 72 + .vendor = PCI_VENDOR_ID_IBM, 73 + .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1, 74 + .subvendor = PCI_ANY_ID, 75 + .subdevice = PCI_ANY_ID, 76 + .driver_data = ADAPTER_V1, 77 + }, 78 78 { 79 - .vendor = PCI_VENDOR_ID_IBM, 80 - .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, 81 - .subvendor = PCI_VENDOR_ID_IBM, 82 - .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX, 83 - .driver_data = ADAPTER_V2, 84 - }, 79 + .vendor = PCI_VENDOR_ID_IBM, 80 + .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, 81 + .subvendor = PCI_VENDOR_ID_IBM, 82 + .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX, 83 + .driver_data = ADAPTER_V2, 84 + }, 85 85 { 86 - .vendor = PCI_VENDOR_ID_IBM, 87 - .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, 88 - .subvendor = PCI_VENDOR_ID_IBM, 89 - .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM, 90 - .driver_data = ADAPTER_V2, 91 - }, 86 + .vendor = PCI_VENDOR_ID_IBM, 87 + .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, 88 + .subvendor = PCI_VENDOR_ID_IBM, 89 + .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM, 90 + .driver_data = ADAPTER_V2, 91 + }, 92 92 { 93 - .vendor = PCI_VENDOR_ID_IBM, 94 - .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, 95 - .subvendor = PCI_VENDOR_ID_IBM, 96 - .subdevice = PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL, 97 - .driver_data = ADAPTER_V2, 98 - }, 93 + .vendor = PCI_VENDOR_ID_IBM, 94 + .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, 95 + .subvendor = PCI_VENDOR_ID_IBM, 96 + .subdevice = PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL, 97 + .driver_data = ADAPTER_V2, 98 + }, 99 + { 100 + .vendor = PCI_VENDOR_ID_IBM, 101 + .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, 102 + .subvendor = PCI_VENDOR_ID_IBM, 103 + .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE, 104 + .driver_data = ADAPTER_V2, 105 + }, 99 106 {} 100 107 }; 101 108
+7 -2
drivers/video/console/vgacon.c
··· 368 368 #endif 369 369 } 370 370 371 + /* SCREEN_INFO initialized? */ 372 + if ((ORIG_VIDEO_MODE == 0) && 373 + (ORIG_VIDEO_LINES == 0) && 374 + (ORIG_VIDEO_COLS == 0)) 375 + goto no_vga; 376 + 371 377 /* VGA16 modes are not handled by VGACON */ 372 - if ((ORIG_VIDEO_MODE == 0x00) || /* SCREEN_INFO not initialized */ 373 - (ORIG_VIDEO_MODE == 0x0D) || /* 320x200/4 */ 378 + if ((ORIG_VIDEO_MODE == 0x0D) || /* 320x200/4 */ 374 379 (ORIG_VIDEO_MODE == 0x0E) || /* 640x200/4 */ 375 380 (ORIG_VIDEO_MODE == 0x10) || /* 640x350/4 */ 376 381 (ORIG_VIDEO_MODE == 0x12) || /* 640x480/4 */
+1 -2
fs/adfs/super.c
··· 232 232 { 233 233 struct adfs_inode_info *ei = (struct adfs_inode_info *) foo; 234 234 235 - if (flags & SLAB_CTOR_CONSTRUCTOR) 236 - inode_init_once(&ei->vfs_inode); 235 + inode_init_once(&ei->vfs_inode); 237 236 } 238 237 239 238 static int init_inodecache(void)
+3 -5
fs/affs/super.c
··· 87 87 { 88 88 struct affs_inode_info *ei = (struct affs_inode_info *) foo; 89 89 90 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 91 - init_MUTEX(&ei->i_link_lock); 92 - init_MUTEX(&ei->i_ext_lock); 93 - inode_init_once(&ei->vfs_inode); 94 - } 90 + init_MUTEX(&ei->i_link_lock); 91 + init_MUTEX(&ei->i_ext_lock); 92 + inode_init_once(&ei->vfs_inode); 95 93 } 96 94 97 95 static int init_inodecache(void)
+9 -11
fs/afs/super.c
··· 451 451 { 452 452 struct afs_vnode *vnode = _vnode; 453 453 454 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 455 - memset(vnode, 0, sizeof(*vnode)); 456 - inode_init_once(&vnode->vfs_inode); 457 - init_waitqueue_head(&vnode->update_waitq); 458 - mutex_init(&vnode->permits_lock); 459 - mutex_init(&vnode->validate_lock); 460 - spin_lock_init(&vnode->writeback_lock); 461 - spin_lock_init(&vnode->lock); 462 - INIT_LIST_HEAD(&vnode->writebacks); 463 - INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work); 464 - } 454 + memset(vnode, 0, sizeof(*vnode)); 455 + inode_init_once(&vnode->vfs_inode); 456 + init_waitqueue_head(&vnode->update_waitq); 457 + mutex_init(&vnode->permits_lock); 458 + mutex_init(&vnode->validate_lock); 459 + spin_lock_init(&vnode->writeback_lock); 460 + spin_lock_init(&vnode->lock); 461 + INIT_LIST_HEAD(&vnode->writebacks); 462 + INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work); 465 463 } 466 464 467 465 /*
+2 -4
fs/befs/linuxvfs.c
··· 292 292 static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 293 293 { 294 294 struct befs_inode_info *bi = (struct befs_inode_info *) foo; 295 - 296 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 297 - inode_init_once(&bi->vfs_inode); 298 - } 295 + 296 + inode_init_once(&bi->vfs_inode); 299 297 } 300 298 301 299 static void
+1 -2
fs/bfs/inode.c
··· 248 248 { 249 249 struct bfs_inode_info *bi = foo; 250 250 251 - if (flags & SLAB_CTOR_CONSTRUCTOR) 252 - inode_init_once(&bi->vfs_inode); 251 + inode_init_once(&bi->vfs_inode); 253 252 } 254 253 255 254 static int init_inodecache(void)
+7 -9
fs/block_dev.c
··· 458 458 struct bdev_inode *ei = (struct bdev_inode *) foo; 459 459 struct block_device *bdev = &ei->bdev; 460 460 461 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 462 - memset(bdev, 0, sizeof(*bdev)); 463 - mutex_init(&bdev->bd_mutex); 464 - sema_init(&bdev->bd_mount_sem, 1); 465 - INIT_LIST_HEAD(&bdev->bd_inodes); 466 - INIT_LIST_HEAD(&bdev->bd_list); 461 + memset(bdev, 0, sizeof(*bdev)); 462 + mutex_init(&bdev->bd_mutex); 463 + sema_init(&bdev->bd_mount_sem, 1); 464 + INIT_LIST_HEAD(&bdev->bd_inodes); 465 + INIT_LIST_HEAD(&bdev->bd_list); 467 466 #ifdef CONFIG_SYSFS 468 - INIT_LIST_HEAD(&bdev->bd_holder_list); 467 + INIT_LIST_HEAD(&bdev->bd_holder_list); 469 468 #endif 470 - inode_init_once(&ei->vfs_inode); 471 - } 469 + inode_init_once(&ei->vfs_inode); 472 470 } 473 471 474 472 static inline void __bd_forget(struct inode *inode)
+6 -19
fs/buffer.c
··· 981 981 struct page *page; 982 982 struct buffer_head *bh; 983 983 984 - page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); 984 + page = find_or_create_page(inode->i_mapping, index, 985 + mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); 985 986 if (!page) 986 987 return NULL; 987 988 ··· 2899 2898 2900 2899 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 2901 2900 { 2902 - struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); 2901 + struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); 2903 2902 if (ret) { 2903 + INIT_LIST_HEAD(&ret->b_assoc_buffers); 2904 2904 get_cpu_var(bh_accounting).nr++; 2905 2905 recalc_bh_state(); 2906 2906 put_cpu_var(bh_accounting); ··· 2919 2917 put_cpu_var(bh_accounting); 2920 2918 } 2921 2919 EXPORT_SYMBOL(free_buffer_head); 2922 - 2923 - static void 2924 - init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags) 2925 - { 2926 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 2927 - struct buffer_head * bh = (struct buffer_head *)data; 2928 - 2929 - memset(bh, 0, sizeof(*bh)); 2930 - INIT_LIST_HEAD(&bh->b_assoc_buffers); 2931 - } 2932 - } 2933 2920 2934 2921 static void buffer_exit_cpu(int cpu) 2935 2922 { ··· 2946 2955 { 2947 2956 int nrpages; 2948 2957 2949 - bh_cachep = kmem_cache_create("buffer_head", 2950 - sizeof(struct buffer_head), 0, 2951 - (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 2952 - SLAB_MEM_SPREAD), 2953 - init_buffer_head, 2954 - NULL); 2958 + bh_cachep = KMEM_CACHE(buffer_head, 2959 + SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 2955 2960 2956 2961 /* 2957 2962 * Limit the bh occupancy to 10% of ZONE_NORMAL
+2 -4
fs/cifs/cifsfs.c
··· 701 701 { 702 702 struct cifsInodeInfo *cifsi = inode; 703 703 704 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 705 - inode_init_once(&cifsi->vfs_inode); 706 - INIT_LIST_HEAD(&cifsi->lockList); 707 - } 704 + inode_init_once(&cifsi->vfs_inode); 705 + INIT_LIST_HEAD(&cifsi->lockList); 708 706 } 709 707 710 708 static int
+1 -2
fs/coda/inode.c
··· 62 62 { 63 63 struct coda_inode_info *ei = (struct coda_inode_info *) foo; 64 64 65 - if (flags & SLAB_CTOR_CONSTRUCTOR) 66 - inode_init_once(&ei->vfs_inode); 65 + inode_init_once(&ei->vfs_inode); 67 66 } 68 67 69 68 int coda_init_inodecache(void)
+4 -9
fs/compat.c
··· 2230 2230 asmlinkage long compat_sys_timerfd(int ufd, int clockid, int flags, 2231 2231 const struct compat_itimerspec __user *utmr) 2232 2232 { 2233 - long res; 2234 2233 struct itimerspec t; 2235 2234 struct itimerspec __user *ut; 2236 2235 2237 - res = -EFAULT; 2238 2236 if (get_compat_itimerspec(&t, utmr)) 2239 - goto err_exit; 2237 + return -EFAULT; 2240 2238 ut = compat_alloc_user_space(sizeof(*ut)); 2241 - if (copy_to_user(ut, &t, sizeof(t)) ) 2242 - goto err_exit; 2239 + if (copy_to_user(ut, &t, sizeof(t))) 2240 + return -EFAULT; 2243 2241 2244 - res = sys_timerfd(ufd, clockid, flags, ut); 2245 - err_exit: 2246 - return res; 2242 + return sys_timerfd(ufd, clockid, flags, ut); 2247 2243 } 2248 2244 2249 2245 #endif /* CONFIG_TIMERFD */ 2250 -
+1 -1
fs/dquot.c
··· 1421 1421 /* If quota was reenabled in the meantime, we have 1422 1422 * nothing to do */ 1423 1423 if (!sb_has_quota_enabled(sb, cnt)) { 1424 - mutex_lock(&toputinode[cnt]->i_mutex); 1424 + mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA); 1425 1425 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | 1426 1426 S_NOATIME | S_NOQUOTA); 1427 1427 truncate_inode_pages(&toputinode[cnt]->i_data, 0);
+1 -2
fs/ecryptfs/main.c
··· 583 583 { 584 584 struct ecryptfs_inode_info *ei = (struct ecryptfs_inode_info *)vptr; 585 585 586 - if (flags & SLAB_CTOR_CONSTRUCTOR) 587 - inode_init_once(&ei->vfs_inode); 586 + inode_init_once(&ei->vfs_inode); 588 587 } 589 588 590 589 static struct ecryptfs_cache_info {
+3 -11
fs/ecryptfs/mmap.c
··· 364 364 { 365 365 struct inode *inode = page->mapping->host; 366 366 int end_byte_in_page; 367 - char *page_virt; 368 367 369 368 if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index) 370 369 goto out; 371 370 end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE; 372 371 if (to > end_byte_in_page) 373 372 end_byte_in_page = to; 374 - page_virt = kmap_atomic(page, KM_USER0); 375 - memset((page_virt + end_byte_in_page), 0, 376 - (PAGE_CACHE_SIZE - end_byte_in_page)); 377 - kunmap_atomic(page_virt, KM_USER0); 378 - flush_dcache_page(page); 373 + zero_user_page(page, end_byte_in_page, 374 + PAGE_CACHE_SIZE - end_byte_in_page, KM_USER0); 379 375 out: 380 376 return 0; 381 377 } ··· 736 740 { 737 741 int rc = 0; 738 742 struct page *tmp_page; 739 - char *tmp_page_virt; 740 743 741 744 tmp_page = ecryptfs_get1page(file, index); 742 745 if (IS_ERR(tmp_page)) { ··· 752 757 page_cache_release(tmp_page); 753 758 goto out; 754 759 } 755 - tmp_page_virt = kmap_atomic(tmp_page, KM_USER0); 756 - memset(((char *)tmp_page_virt + start), 0, num_zeros); 757 - kunmap_atomic(tmp_page_virt, KM_USER0); 758 - flush_dcache_page(tmp_page); 760 + zero_user_page(tmp_page, start, num_zeros, KM_USER0); 759 761 rc = ecryptfs_commit_write(file, tmp_page, start, start + num_zeros); 760 762 if (rc < 0) { 761 763 ecryptfs_printk(KERN_ERR, "Error attempting to write zero's "
+1 -2
fs/efs/super.c
··· 72 72 { 73 73 struct efs_inode_info *ei = (struct efs_inode_info *) foo; 74 74 75 - if (flags & SLAB_CTOR_CONSTRUCTOR) 76 - inode_init_once(&ei->vfs_inode); 75 + inode_init_once(&ei->vfs_inode); 77 76 } 78 77 79 78 static int init_inodecache(void)
+1 -3
fs/exec.c
··· 60 60 #endif 61 61 62 62 int core_uses_pid; 63 - char core_pattern[128] = "core"; 63 + char core_pattern[CORENAME_MAX_SIZE] = "core"; 64 64 int suid_dumpable = 0; 65 65 66 66 EXPORT_SYMBOL(suid_dumpable); ··· 1263 1263 } 1264 1264 1265 1265 EXPORT_SYMBOL(set_binfmt); 1266 - 1267 - #define CORENAME_MAX_SIZE 64 1268 1266 1269 1267 /* format_corename will inspect the pattern parameter, and output a 1270 1268 * name into corename, which must have space for at least
+3 -5
fs/ext2/super.c
··· 160 160 { 161 161 struct ext2_inode_info *ei = (struct ext2_inode_info *) foo; 162 162 163 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 164 - rwlock_init(&ei->i_meta_lock); 163 + rwlock_init(&ei->i_meta_lock); 165 164 #ifdef CONFIG_EXT2_FS_XATTR 166 - init_rwsem(&ei->xattr_sem); 165 + init_rwsem(&ei->xattr_sem); 167 166 #endif 168 - inode_init_once(&ei->vfs_inode); 169 - } 167 + inode_init_once(&ei->vfs_inode); 170 168 } 171 169 172 170 static int init_inodecache(void)
+4 -6
fs/ext3/super.c
··· 466 466 { 467 467 struct ext3_inode_info *ei = (struct ext3_inode_info *) foo; 468 468 469 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 470 - INIT_LIST_HEAD(&ei->i_orphan); 469 + INIT_LIST_HEAD(&ei->i_orphan); 471 470 #ifdef CONFIG_EXT3_FS_XATTR 472 - init_rwsem(&ei->xattr_sem); 471 + init_rwsem(&ei->xattr_sem); 473 472 #endif 474 - mutex_init(&ei->truncate_mutex); 475 - inode_init_once(&ei->vfs_inode); 476 - } 473 + mutex_init(&ei->truncate_mutex); 474 + inode_init_once(&ei->vfs_inode); 477 475 } 478 476 479 477 static int init_inodecache(void)
+4 -6
fs/ext4/super.c
··· 517 517 { 518 518 struct ext4_inode_info *ei = (struct ext4_inode_info *) foo; 519 519 520 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 521 - INIT_LIST_HEAD(&ei->i_orphan); 520 + INIT_LIST_HEAD(&ei->i_orphan); 522 521 #ifdef CONFIG_EXT4DEV_FS_XATTR 523 - init_rwsem(&ei->xattr_sem); 522 + init_rwsem(&ei->xattr_sem); 524 523 #endif 525 - mutex_init(&ei->truncate_mutex); 526 - inode_init_once(&ei->vfs_inode); 527 - } 524 + mutex_init(&ei->truncate_mutex); 525 + inode_init_once(&ei->vfs_inode); 528 526 } 529 527 530 528 static int init_inodecache(void)
+1 -2
fs/fat/cache.c
··· 40 40 { 41 41 struct fat_cache *cache = (struct fat_cache *)foo; 42 42 43 - if (flags & SLAB_CTOR_CONSTRUCTOR) 44 - INIT_LIST_HEAD(&cache->cache_list); 43 + INIT_LIST_HEAD(&cache->cache_list); 45 44 } 46 45 47 46 int __init fat_cache_init(void)
+6 -8
fs/fat/inode.c
··· 500 500 { 501 501 struct msdos_inode_info *ei = (struct msdos_inode_info *)foo; 502 502 503 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 504 - spin_lock_init(&ei->cache_lru_lock); 505 - ei->nr_caches = 0; 506 - ei->cache_valid_id = FAT_CACHE_VALID + 1; 507 - INIT_LIST_HEAD(&ei->cache_lru); 508 - INIT_HLIST_NODE(&ei->i_fat_hash); 509 - inode_init_once(&ei->vfs_inode); 510 - } 503 + spin_lock_init(&ei->cache_lru_lock); 504 + ei->nr_caches = 0; 505 + ei->cache_valid_id = FAT_CACHE_VALID + 1; 506 + INIT_LIST_HEAD(&ei->cache_lru); 507 + INIT_HLIST_NODE(&ei->i_fat_hash); 508 + inode_init_once(&ei->vfs_inode); 511 509 } 512 510 513 511 static int __init fat_init_inodecache(void)
+1 -2
fs/fuse/inode.c
··· 687 687 { 688 688 struct inode * inode = foo; 689 689 690 - if (flags & SLAB_CTOR_CONSTRUCTOR) 691 - inode_init_once(inode); 690 + inode_init_once(inode); 692 691 } 693 692 694 693 static int __init fuse_fs_init(void)
+16 -18
fs/gfs2/main.c
··· 27 27 static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned long flags) 28 28 { 29 29 struct gfs2_inode *ip = foo; 30 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 31 - inode_init_once(&ip->i_inode); 32 - spin_lock_init(&ip->i_spin); 33 - init_rwsem(&ip->i_rw_mutex); 34 - memset(ip->i_cache, 0, sizeof(ip->i_cache)); 35 - } 30 + 31 + inode_init_once(&ip->i_inode); 32 + spin_lock_init(&ip->i_spin); 33 + init_rwsem(&ip->i_rw_mutex); 34 + memset(ip->i_cache, 0, sizeof(ip->i_cache)); 36 35 } 37 36 38 37 static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags) 39 38 { 40 39 struct gfs2_glock *gl = foo; 41 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 42 - INIT_HLIST_NODE(&gl->gl_list); 43 - spin_lock_init(&gl->gl_spin); 44 - INIT_LIST_HEAD(&gl->gl_holders); 45 - INIT_LIST_HEAD(&gl->gl_waiters1); 46 - INIT_LIST_HEAD(&gl->gl_waiters3); 47 - gl->gl_lvb = NULL; 48 - atomic_set(&gl->gl_lvb_count, 0); 49 - INIT_LIST_HEAD(&gl->gl_reclaim); 50 - INIT_LIST_HEAD(&gl->gl_ail_list); 51 - atomic_set(&gl->gl_ail_count, 0); 52 - } 40 + 41 + INIT_HLIST_NODE(&gl->gl_list); 42 + spin_lock_init(&gl->gl_spin); 43 + INIT_LIST_HEAD(&gl->gl_holders); 44 + INIT_LIST_HEAD(&gl->gl_waiters1); 45 + INIT_LIST_HEAD(&gl->gl_waiters3); 46 + gl->gl_lvb = NULL; 47 + atomic_set(&gl->gl_lvb_count, 0); 48 + INIT_LIST_HEAD(&gl->gl_reclaim); 49 + INIT_LIST_HEAD(&gl->gl_ail_list); 50 + atomic_set(&gl->gl_ail_count, 0); 53 51 } 54 52 55 53 /**
+1 -2
fs/hfs/super.c
··· 434 434 { 435 435 struct hfs_inode_info *i = p; 436 436 437 - if (flags & SLAB_CTOR_CONSTRUCTOR) 438 - inode_init_once(&i->vfs_inode); 437 + inode_init_once(&i->vfs_inode); 439 438 } 440 439 441 440 static int __init init_hfs_fs(void)
+1 -2
fs/hfsplus/super.c
··· 470 470 { 471 471 struct hfsplus_inode_info *i = p; 472 472 473 - if (flags & SLAB_CTOR_CONSTRUCTOR) 474 - inode_init_once(&i->vfs_inode); 473 + inode_init_once(&i->vfs_inode); 475 474 } 476 475 477 476 static int __init init_hfsplus_fs(void)
+3 -5
fs/hpfs/super.c
··· 176 176 { 177 177 struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo; 178 178 179 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 180 - mutex_init(&ei->i_mutex); 181 - mutex_init(&ei->i_parent_mutex); 182 - inode_init_once(&ei->vfs_inode); 183 - } 179 + mutex_init(&ei->i_mutex); 180 + mutex_init(&ei->i_parent_mutex); 181 + inode_init_once(&ei->vfs_inode); 184 182 } 185 183 186 184 static int init_inodecache(void)
+1 -2
fs/hugetlbfs/inode.c
··· 556 556 { 557 557 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; 558 558 559 - if (flags & SLAB_CTOR_CONSTRUCTOR) 560 - inode_init_once(&ei->vfs_inode); 559 + inode_init_once(&ei->vfs_inode); 561 560 } 562 561 563 562 const struct file_operations hugetlbfs_file_operations = {
+1 -2
fs/inode.c
··· 213 213 { 214 214 struct inode * inode = (struct inode *) foo; 215 215 216 - if (flags & SLAB_CTOR_CONSTRUCTOR) 217 - inode_init_once(inode); 216 + inode_init_once(inode); 218 217 } 219 218 220 219 /*
+1 -2
fs/isofs/inode.c
··· 77 77 { 78 78 struct iso_inode_info *ei = foo; 79 79 80 - if (flags & SLAB_CTOR_CONSTRUCTOR) 81 - inode_init_once(&ei->vfs_inode); 80 + inode_init_once(&ei->vfs_inode); 82 81 } 83 82 84 83 static int init_inodecache(void)
+2 -4
fs/jffs2/super.c
··· 47 47 { 48 48 struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo; 49 49 50 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 51 - init_MUTEX(&ei->sem); 52 - inode_init_once(&ei->vfs_inode); 53 - } 50 + init_MUTEX(&ei->sem); 51 + inode_init_once(&ei->vfs_inode); 54 52 } 55 53 56 54 static int jffs2_sync_fs(struct super_block *sb, int wait)
+8 -10
fs/jfs/jfs_metapage.c
··· 184 184 { 185 185 struct metapage *mp = (struct metapage *)foo; 186 186 187 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 188 - mp->lid = 0; 189 - mp->lsn = 0; 190 - mp->flag = 0; 191 - mp->data = NULL; 192 - mp->clsn = 0; 193 - mp->log = NULL; 194 - set_bit(META_free, &mp->flag); 195 - init_waitqueue_head(&mp->wait); 196 - } 187 + mp->lid = 0; 188 + mp->lsn = 0; 189 + mp->flag = 0; 190 + mp->data = NULL; 191 + mp->clsn = 0; 192 + mp->log = NULL; 193 + set_bit(META_free, &mp->flag); 194 + init_waitqueue_head(&mp->wait); 197 195 } 198 196 199 197 static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
+10 -12
fs/jfs/super.c
··· 752 752 { 753 753 struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo; 754 754 755 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 756 - memset(jfs_ip, 0, sizeof(struct jfs_inode_info)); 757 - INIT_LIST_HEAD(&jfs_ip->anon_inode_list); 758 - init_rwsem(&jfs_ip->rdwrlock); 759 - mutex_init(&jfs_ip->commit_mutex); 760 - init_rwsem(&jfs_ip->xattr_sem); 761 - spin_lock_init(&jfs_ip->ag_lock); 762 - jfs_ip->active_ag = -1; 755 + memset(jfs_ip, 0, sizeof(struct jfs_inode_info)); 756 + INIT_LIST_HEAD(&jfs_ip->anon_inode_list); 757 + init_rwsem(&jfs_ip->rdwrlock); 758 + mutex_init(&jfs_ip->commit_mutex); 759 + init_rwsem(&jfs_ip->xattr_sem); 760 + spin_lock_init(&jfs_ip->ag_lock); 761 + jfs_ip->active_ag = -1; 763 762 #ifdef CONFIG_JFS_POSIX_ACL 764 - jfs_ip->i_acl = JFS_ACL_NOT_CACHED; 765 - jfs_ip->i_default_acl = JFS_ACL_NOT_CACHED; 763 + jfs_ip->i_acl = JFS_ACL_NOT_CACHED; 764 + jfs_ip->i_default_acl = JFS_ACL_NOT_CACHED; 766 765 #endif 767 - inode_init_once(&jfs_ip->vfs_inode); 768 - } 766 + inode_init_once(&jfs_ip->vfs_inode); 769 767 } 770 768 771 769 static int __init init_jfs_fs(void)
-3
fs/locks.c
··· 203 203 { 204 204 struct file_lock *lock = (struct file_lock *) foo; 205 205 206 - if (!(flags & SLAB_CTOR_CONSTRUCTOR)) 207 - return; 208 - 209 206 locks_init_lock(lock); 210 207 } 211 208
+1 -2
fs/minix/inode.c
··· 73 73 { 74 74 struct minix_inode_info *ei = (struct minix_inode_info *) foo; 75 75 76 - if (flags & SLAB_CTOR_CONSTRUCTOR) 77 - inode_init_once(&ei->vfs_inode); 76 + inode_init_once(&ei->vfs_inode); 78 77 } 79 78 80 79 static int init_inodecache(void)
+2 -4
fs/ncpfs/inode.c
··· 60 60 { 61 61 struct ncp_inode_info *ei = (struct ncp_inode_info *) foo; 62 62 63 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 64 - mutex_init(&ei->open_mutex); 65 - inode_init_once(&ei->vfs_inode); 66 - } 63 + mutex_init(&ei->open_mutex); 64 + inode_init_once(&ei->vfs_inode); 67 65 } 68 66 69 67 static int init_inodecache(void)
+13 -15
fs/nfs/inode.c
··· 1164 1164 { 1165 1165 struct nfs_inode *nfsi = (struct nfs_inode *) foo; 1166 1166 1167 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 1168 - inode_init_once(&nfsi->vfs_inode); 1169 - spin_lock_init(&nfsi->req_lock); 1170 - INIT_LIST_HEAD(&nfsi->dirty); 1171 - INIT_LIST_HEAD(&nfsi->commit); 1172 - INIT_LIST_HEAD(&nfsi->open_files); 1173 - INIT_LIST_HEAD(&nfsi->access_cache_entry_lru); 1174 - INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); 1175 - INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC); 1176 - atomic_set(&nfsi->data_updates, 0); 1177 - nfsi->ndirty = 0; 1178 - nfsi->ncommit = 0; 1179 - nfsi->npages = 0; 1180 - nfs4_init_once(nfsi); 1181 - } 1167 + inode_init_once(&nfsi->vfs_inode); 1168 + spin_lock_init(&nfsi->req_lock); 1169 + INIT_LIST_HEAD(&nfsi->dirty); 1170 + INIT_LIST_HEAD(&nfsi->commit); 1171 + INIT_LIST_HEAD(&nfsi->open_files); 1172 + INIT_LIST_HEAD(&nfsi->access_cache_entry_lru); 1173 + INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); 1174 + INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC); 1175 + atomic_set(&nfsi->data_updates, 0); 1176 + nfsi->ndirty = 0; 1177 + nfsi->ncommit = 0; 1178 + nfsi->npages = 0; 1179 + nfs4_init_once(nfsi); 1182 1180 } 1183 1181 1184 1182 static int __init nfs_init_inodecache(void)
+1 -2
fs/ntfs/super.c
··· 3085 3085 { 3086 3086 ntfs_inode *ni = (ntfs_inode *)foo; 3087 3087 3088 - if (flags & SLAB_CTOR_CONSTRUCTOR) 3089 - inode_init_once(VFS_I(ni)); 3088 + inode_init_once(VFS_I(ni)); 3090 3089 } 3091 3090 3092 3091 /*
+3 -5
fs/ocfs2/dlm/dlmfs.c
··· 262 262 struct dlmfs_inode_private *ip = 263 263 (struct dlmfs_inode_private *) foo; 264 264 265 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 266 - ip->ip_dlm = NULL; 267 - ip->ip_parent = NULL; 265 + ip->ip_dlm = NULL; 266 + ip->ip_parent = NULL; 268 267 269 - inode_init_once(&ip->ip_vfs_inode); 270 - } 268 + inode_init_once(&ip->ip_vfs_inode); 271 269 } 272 270 273 271 static struct inode *dlmfs_alloc_inode(struct super_block *sb)
+18 -20
fs/ocfs2/super.c
··· 937 937 { 938 938 struct ocfs2_inode_info *oi = data; 939 939 940 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 941 - oi->ip_flags = 0; 942 - oi->ip_open_count = 0; 943 - spin_lock_init(&oi->ip_lock); 944 - ocfs2_extent_map_init(&oi->vfs_inode); 945 - INIT_LIST_HEAD(&oi->ip_io_markers); 946 - oi->ip_created_trans = 0; 947 - oi->ip_last_trans = 0; 948 - oi->ip_dir_start_lookup = 0; 940 + oi->ip_flags = 0; 941 + oi->ip_open_count = 0; 942 + spin_lock_init(&oi->ip_lock); 943 + ocfs2_extent_map_init(&oi->vfs_inode); 944 + INIT_LIST_HEAD(&oi->ip_io_markers); 945 + oi->ip_created_trans = 0; 946 + oi->ip_last_trans = 0; 947 + oi->ip_dir_start_lookup = 0; 949 948 950 - init_rwsem(&oi->ip_alloc_sem); 951 - mutex_init(&oi->ip_io_mutex); 949 + init_rwsem(&oi->ip_alloc_sem); 950 + mutex_init(&oi->ip_io_mutex); 952 951 953 - oi->ip_blkno = 0ULL; 954 - oi->ip_clusters = 0; 952 + oi->ip_blkno = 0ULL; 953 + oi->ip_clusters = 0; 955 954 956 - ocfs2_lock_res_init_once(&oi->ip_rw_lockres); 957 - ocfs2_lock_res_init_once(&oi->ip_meta_lockres); 958 - ocfs2_lock_res_init_once(&oi->ip_data_lockres); 959 - ocfs2_lock_res_init_once(&oi->ip_open_lockres); 955 + ocfs2_lock_res_init_once(&oi->ip_rw_lockres); 956 + ocfs2_lock_res_init_once(&oi->ip_meta_lockres); 957 + ocfs2_lock_res_init_once(&oi->ip_data_lockres); 958 + ocfs2_lock_res_init_once(&oi->ip_open_lockres); 960 959 961 - ocfs2_metadata_cache_init(&oi->vfs_inode); 960 + ocfs2_metadata_cache_init(&oi->vfs_inode); 962 961 963 - inode_init_once(&oi->vfs_inode); 964 - } 962 + inode_init_once(&oi->vfs_inode); 965 963 } 966 964 967 965 static int ocfs2_initialize_mem_caches(void)
+1 -2
fs/openpromfs/inode.c
··· 419 419 { 420 420 struct op_inode_info *oi = (struct op_inode_info *) data; 421 421 422 - if (flags & SLAB_CTOR_CONSTRUCTOR) 423 - inode_init_once(&oi->vfs_inode); 422 + inode_init_once(&oi->vfs_inode); 424 423 } 425 424 426 425 static int __init init_openprom_fs(void)
+1 -2
fs/proc/inode.c
··· 109 109 { 110 110 struct proc_inode *ei = (struct proc_inode *) foo; 111 111 112 - if (flags & SLAB_CTOR_CONSTRUCTOR) 113 - inode_init_once(&ei->vfs_inode); 112 + inode_init_once(&ei->vfs_inode); 114 113 } 115 114 116 115 int __init proc_init_inodecache(void)
+1 -2
fs/qnx4/inode.c
··· 536 536 { 537 537 struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo; 538 538 539 - if (flags & SLAB_CTOR_CONSTRUCTOR) 540 - inode_init_once(&ei->vfs_inode); 539 + inode_init_once(&ei->vfs_inode); 541 540 } 542 541 543 542 static int init_inodecache(void)
+7 -16
fs/quota.c
··· 157 157 static void quota_sync_sb(struct super_block *sb, int type) 158 158 { 159 159 int cnt; 160 - struct inode *discard[MAXQUOTAS]; 161 160 162 161 sb->s_qcop->quota_sync(sb, type); 163 162 /* This is not very clever (and fast) but currently I don't know about ··· 166 167 sb->s_op->sync_fs(sb, 1); 167 168 sync_blockdev(sb->s_bdev); 168 169 169 - /* Now when everything is written we can discard the pagecache so 170 - * that userspace sees the changes. We need i_mutex and so we could 171 - * not do it inside dqonoff_mutex. Moreover we need to be carefull 172 - * about races with quotaoff() (that is the reason why we have own 173 - * reference to inode). */ 170 + /* 171 + * Now when everything is written we can discard the pagecache so 172 + * that userspace sees the changes. 173 + */ 174 174 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); 175 175 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 176 - discard[cnt] = NULL; 177 176 if (type != -1 && cnt != type) 178 177 continue; 179 178 if (!sb_has_quota_enabled(sb, cnt)) 180 179 continue; 181 - discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]); 180 + mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA); 181 + truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0); 182 + mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex); 182 183 } 183 184 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 184 - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 185 - if (discard[cnt]) { 186 - mutex_lock(&discard[cnt]->i_mutex); 187 - truncate_inode_pages(&discard[cnt]->i_data, 0); 188 - mutex_unlock(&discard[cnt]->i_mutex); 189 - iput(discard[cnt]); 190 - } 191 - } 192 185 } 193 186 194 187 void sync_dquots(struct super_block *sb, int type)
+4 -6
fs/reiserfs/super.c
··· 511 511 { 512 512 struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo; 513 513 514 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 515 - INIT_LIST_HEAD(&ei->i_prealloc_list); 516 - inode_init_once(&ei->vfs_inode); 514 + INIT_LIST_HEAD(&ei->i_prealloc_list); 515 + inode_init_once(&ei->vfs_inode); 517 516 #ifdef CONFIG_REISERFS_FS_POSIX_ACL 518 - ei->i_acl_access = NULL; 519 - ei->i_acl_default = NULL; 517 + ei->i_acl_access = NULL; 518 + ei->i_acl_default = NULL; 520 519 #endif 521 - } 522 520 } 523 521 524 522 static int init_inodecache(void)
+3 -4
fs/romfs/inode.c
··· 566 566 kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode)); 567 567 } 568 568 569 - static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 569 + static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) 570 570 { 571 - struct romfs_inode_info *ei = (struct romfs_inode_info *) foo; 571 + struct romfs_inode_info *ei = foo; 572 572 573 - if (flags & SLAB_CTOR_CONSTRUCTOR) 574 - inode_init_once(&ei->vfs_inode); 573 + inode_init_once(&ei->vfs_inode); 575 574 } 576 575 577 576 static int init_inodecache(void)
+1 -2
fs/smbfs/inode.c
··· 70 70 { 71 71 struct smb_inode_info *ei = (struct smb_inode_info *) foo; 72 72 73 - if (flags & SLAB_CTOR_CONSTRUCTOR) 74 - inode_init_once(&ei->vfs_inode); 73 + inode_init_once(&ei->vfs_inode); 75 74 } 76 75 77 76 static int init_inodecache(void)
+1 -2
fs/sysv/inode.c
··· 322 322 { 323 323 struct sysv_inode_info *si = (struct sysv_inode_info *)p; 324 324 325 - if (flags & SLAB_CTOR_CONSTRUCTOR) 326 - inode_init_once(&si->vfs_inode); 325 + inode_init_once(&si->vfs_inode); 327 326 } 328 327 329 328 const struct super_operations sysv_sops = {
+2 -4
fs/udf/super.c
··· 134 134 { 135 135 struct udf_inode_info *ei = (struct udf_inode_info *) foo; 136 136 137 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 138 - ei->i_ext.i_data = NULL; 139 - inode_init_once(&ei->vfs_inode); 140 - } 137 + ei->i_ext.i_data = NULL; 138 + inode_init_once(&ei->vfs_inode); 141 139 } 142 140 143 141 static int init_inodecache(void)
+1 -2
fs/ufs/super.c
··· 1237 1237 { 1238 1238 struct ufs_inode_info *ei = (struct ufs_inode_info *) foo; 1239 1239 1240 - if (flags & SLAB_CTOR_CONSTRUCTOR) 1241 - inode_init_once(&ei->vfs_inode); 1240 + inode_init_once(&ei->vfs_inode); 1242 1241 } 1243 1242 1244 1243 static int init_inodecache(void)
+1 -2
fs/xfs/linux-2.6/xfs_super.c
··· 360 360 kmem_zone_t *zonep, 361 361 unsigned long flags) 362 362 { 363 - if (flags & SLAB_CTOR_CONSTRUCTOR) 364 - inode_init_once(vn_to_inode((bhv_vnode_t *)vnode)); 363 + inode_init_once(vn_to_inode((bhv_vnode_t *)vnode)); 365 364 } 366 365 367 366 STATIC int
+2 -5
include/acpi/acpi_numa.h
··· 11 11 #define MAX_PXM_DOMAINS (256) /* Old pxm spec is defined 8 bit */ 12 12 #endif 13 13 14 - extern int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS]; 15 - extern int __cpuinitdata node_to_pxm_map[MAX_NUMNODES]; 16 - 17 - extern int __cpuinit pxm_to_node(int); 18 - extern int __cpuinit node_to_pxm(int); 14 + extern int pxm_to_node(int); 15 + extern int node_to_pxm(int); 19 16 extern int __cpuinit acpi_map_pxm_to_node(int); 20 17 extern void __cpuinit acpi_unmap_pxm_to_node(int); 21 18
+2
include/linux/binfmts.h
··· 17 17 18 18 #ifdef __KERNEL__ 19 19 20 + #define CORENAME_MAX_SIZE 128 21 + 20 22 /* 21 23 * This structure is used to hold the arguments that are used when loading binaries. 22 24 */
+15 -5
include/linux/kmalloc_sizes.h
··· 19 19 CACHE(32768) 20 20 CACHE(65536) 21 21 CACHE(131072) 22 - #if (NR_CPUS > 512) || (MAX_NUMNODES > 256) || !defined(CONFIG_MMU) 22 + #if KMALLOC_MAX_SIZE >= 262144 23 23 CACHE(262144) 24 24 #endif 25 - #ifndef CONFIG_MMU 25 + #if KMALLOC_MAX_SIZE >= 524288 26 26 CACHE(524288) 27 + #endif 28 + #if KMALLOC_MAX_SIZE >= 1048576 27 29 CACHE(1048576) 28 - #ifdef CONFIG_LARGE_ALLOCS 30 + #endif 31 + #if KMALLOC_MAX_SIZE >= 2097152 29 32 CACHE(2097152) 33 + #endif 34 + #if KMALLOC_MAX_SIZE >= 4194304 30 35 CACHE(4194304) 36 + #endif 37 + #if KMALLOC_MAX_SIZE >= 8388608 31 38 CACHE(8388608) 39 + #endif 40 + #if KMALLOC_MAX_SIZE >= 16777216 32 41 CACHE(16777216) 42 + #endif 43 + #if KMALLOC_MAX_SIZE >= 33554432 33 44 CACHE(33554432) 34 - #endif /* CONFIG_LARGE_ALLOCS */ 35 - #endif /* CONFIG_MMU */ 45 + #endif
+1
include/linux/pci_ids.h
··· 471 471 #define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2 0x0219 472 472 #define PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX 0x021A 473 473 #define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM 0x0251 474 + #define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361 474 475 #define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252 475 476 476 477 #define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */
+5 -8
include/linux/rmap.h
··· 74 74 void page_add_file_rmap(struct page *); 75 75 void page_remove_rmap(struct page *, struct vm_area_struct *); 76 76 77 - /** 78 - * page_dup_rmap - duplicate pte mapping to a page 79 - * @page: the page to add the mapping to 80 - * 81 - * For copy_page_range only: minimal extract from page_add_rmap, 82 - * avoiding unnecessary tests (already checked) so it's quicker. 83 - */ 84 - static inline void page_dup_rmap(struct page *page) 77 + #ifdef CONFIG_DEBUG_VM 78 + void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address); 79 + #else 80 + static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) 85 81 { 86 82 atomic_inc(&page->_mapcount); 87 83 } 84 + #endif 88 85 89 86 /* 90 87 * Called from mm/vmscan.c to handle paging out
+15 -6
include/linux/slab.h
··· 32 32 #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 33 33 #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ 34 34 35 - /* Flags passed to a constructor functions */ 36 - #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */ 37 - 38 35 /* 39 36 * struct kmem_cache related prototypes 40 37 */ ··· 72 75 return kmem_cache_alloc(cachep, flags); 73 76 } 74 77 #endif 78 + 79 + /* 80 + * The largest kmalloc size supported by the slab allocators is 81 + * 32 megabyte (2^25) or the maximum allocatable page order if that is 82 + * less than 32 MB. 83 + * 84 + * WARNING: Its not easy to increase this value since the allocators have 85 + * to do various tricks to work around compiler limitations in order to 86 + * ensure proper constant folding. 87 + */ 88 + #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) <= 25 ? \ 89 + (MAX_ORDER + PAGE_SHIFT) : 25) 90 + 91 + #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH) 92 + #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT) 75 93 76 94 /* 77 95 * Common kmalloc functions provided by all allocators ··· 244 232 kmalloc_track_caller(size, flags) 245 233 246 234 #endif /* DEBUG_SLAB */ 247 - 248 - extern const struct seq_operations slabinfo_op; 249 - ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *); 250 235 251 236 #endif /* __KERNEL__ */ 252 237 #endif /* _LINUX_SLAB_H */
+3
include/linux/slab_def.h
··· 109 109 110 110 #endif /* CONFIG_NUMA */ 111 111 112 + extern const struct seq_operations slabinfo_op; 113 + ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *); 114 + 112 115 #endif /* _LINUX_SLAB_DEF_H */
+8 -19
include/linux/slub_def.h
··· 40 40 int objects; /* Number of objects in slab */ 41 41 int refcount; /* Refcount for slab cache destroy */ 42 42 void (*ctor)(void *, struct kmem_cache *, unsigned long); 43 - void (*dtor)(void *, struct kmem_cache *, unsigned long); 44 43 int inuse; /* Offset to metadata */ 45 44 int align; /* Alignment */ 46 45 const char *name; /* Name (only for display!) */ ··· 58 59 */ 59 60 #define KMALLOC_SHIFT_LOW 3 60 61 61 - #ifdef CONFIG_LARGE_ALLOCS 62 - #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) =< 25 ? \ 63 - (MAX_ORDER + PAGE_SHIFT - 1) : 25) 64 - #else 65 - #if !defined(CONFIG_MMU) || NR_CPUS > 512 || MAX_NUMNODES > 256 66 - #define KMALLOC_SHIFT_HIGH 20 67 - #else 68 - #define KMALLOC_SHIFT_HIGH 18 69 - #endif 70 - #endif 71 - 72 62 /* 73 63 * We keep the general caches in an array of slab caches that are used for 74 64 * 2^x bytes of allocations. ··· 68 80 * Sorry that the following has to be that ugly but some versions of GCC 69 81 * have trouble with constant propagation and loops. 70 82 */ 71 - static inline int kmalloc_index(int size) 83 + static inline int kmalloc_index(size_t size) 72 84 { 73 85 /* 74 86 * We should return 0 if size == 0 but we use the smallest object ··· 76 88 */ 77 89 WARN_ON_ONCE(size == 0); 78 90 79 - if (size > (1 << KMALLOC_SHIFT_HIGH)) 91 + if (size > KMALLOC_MAX_SIZE) 80 92 return -1; 81 93 82 94 if (size > 64 && size <= 96) ··· 99 111 if (size <= 64 * 1024) return 16; 100 112 if (size <= 128 * 1024) return 17; 101 113 if (size <= 256 * 1024) return 18; 102 - #if KMALLOC_SHIFT_HIGH > 18 103 114 if (size <= 512 * 1024) return 19; 104 115 if (size <= 1024 * 1024) return 20; 105 - #endif 106 - #if KMALLOC_SHIFT_HIGH > 20 107 116 if (size <= 2 * 1024 * 1024) return 21; 108 117 if (size <= 4 * 1024 * 1024) return 22; 109 118 if (size <= 8 * 1024 * 1024) return 23; 110 119 if (size <= 16 * 1024 * 1024) return 24; 111 120 if (size <= 32 * 1024 * 1024) return 25; 112 - #endif 113 121 return -1; 114 122 115 123 /* ··· 130 146 if (index == 0) 131 147 return NULL; 132 148 133 - if (index < 0) { 149 + /* 150 + * This function only gets expanded if __builtin_constant_p(size), so 151 + * testing it here shouldn't be needed. But some versions of gcc need 152 + * help. 153 + */ 154 + if (__builtin_constant_p(size) && index < 0) { 134 155 /* 135 156 * Generate a link failure. Would be great if we could 136 157 * do something to stop the compile here.
+3 -4
include/linux/smp.h
··· 6 6 * Alan Cox. <alan@redhat.com> 7 7 */ 8 8 9 + #include <linux/errno.h> 9 10 10 11 extern void cpu_idle(void); 11 12 ··· 100 99 #define num_booting_cpus() 1 101 100 #define smp_prepare_boot_cpu() do {} while (0) 102 101 static inline int smp_call_function_single(int cpuid, void (*func) (void *info), 103 - void *info, int retry, int wait) 102 + void *info, int retry, int wait) 104 103 { 105 - /* Disable interrupts here? */ 106 - func(info); 107 - return 0; 104 + return -EBUSY; 108 105 } 109 106 110 107 #endif /* !SMP */
+1 -1
include/linux/workqueue.h
··· 122 122 int singlethread, 123 123 int freezeable); 124 124 #define create_workqueue(name) __create_workqueue((name), 0, 0) 125 - #define create_freezeable_workqueue(name) __create_workqueue((name), 0, 1) 125 + #define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1) 126 126 #define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0) 127 127 128 128 extern void destroy_workqueue(struct workqueue_struct *wq);
+2 -6
init/Kconfig
··· 567 567 a slab allocator. 568 568 569 569 config SLUB 570 - depends on EXPERIMENTAL && !ARCH_USES_SLAB_PAGE_STRUCT 571 570 bool "SLUB (Unqueued Allocator)" 572 571 help 573 572 SLUB is a slab allocator that minimizes cache line usage ··· 576 577 and has enhanced diagnostics. 577 578 578 579 config SLOB 579 - # 580 - # SLOB does not support SMP because SLAB_DESTROY_BY_RCU is unsupported 581 - # 582 - depends on EMBEDDED && !SMP && !SPARSEMEM 580 + depends on EMBEDDED && !SPARSEMEM 583 581 bool "SLOB (Simple Allocator)" 584 582 help 585 583 SLOB replaces the SLAB allocator with a drastically simpler 586 - allocator. SLOB is more space efficient that SLAB but does not 584 + allocator. SLOB is more space efficient than SLAB but does not 587 585 scale well (single lock for all operations) and is also highly 588 586 susceptible to fragmentation. SLUB can accomplish a higher object 589 587 density. It is usually better to use SLUB instead of SLOB.
+1 -2
ipc/mqueue.c
··· 215 215 { 216 216 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; 217 217 218 - if (flags & SLAB_CTOR_CONSTRUCTOR) 219 - inode_init_once(&p->vfs_inode); 218 + inode_init_once(&p->vfs_inode); 220 219 } 221 220 222 221 static struct inode *mqueue_alloc_inode(struct super_block *sb)
+2 -4
kernel/fork.c
··· 1427 1427 { 1428 1428 struct sighand_struct *sighand = data; 1429 1429 1430 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 1431 - spin_lock_init(&sighand->siglock); 1432 - INIT_LIST_HEAD(&sighand->signalfd_list); 1433 - } 1430 + spin_lock_init(&sighand->siglock); 1431 + INIT_LIST_HEAD(&sighand->signalfd_list); 1434 1432 } 1435 1433 1436 1434 void __init proc_caches_init(void)
+2 -1
kernel/power/disk.c
··· 416 416 417 417 mutex_lock(&pm_mutex); 418 418 for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { 419 - if (!strncmp(buf, hibernation_modes[i], len)) { 419 + if (len == strlen(hibernation_modes[i]) 420 + && !strncmp(buf, hibernation_modes[i], len)) { 420 421 mode = i; 421 422 break; 422 423 }
+2 -2
kernel/power/main.c
··· 290 290 len = p ? p - buf : n; 291 291 292 292 /* First, check if we are requested to hibernate */ 293 - if (!strncmp(buf, "disk", len)) { 293 + if (len == 4 && !strncmp(buf, "disk", len)) { 294 294 error = hibernate(); 295 295 return error ? error : n; 296 296 } 297 297 298 298 for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) { 299 - if (*s && !strncmp(buf, *s, len)) 299 + if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) 300 300 break; 301 301 } 302 302 if (state < PM_SUSPEND_MAX && *s)
+1 -1
kernel/sysctl.c
··· 227 227 .ctl_name = KERN_CORE_PATTERN, 228 228 .procname = "core_pattern", 229 229 .data = core_pattern, 230 - .maxlen = 128, 230 + .maxlen = CORENAME_MAX_SIZE, 231 231 .mode = 0644, 232 232 .proc_handler = &proc_dostring, 233 233 .strategy = &sysctl_string,
+1 -1
mm/memory.c
··· 481 481 page = vm_normal_page(vma, addr, pte); 482 482 if (page) { 483 483 get_page(page); 484 - page_dup_rmap(page); 484 + page_dup_rmap(page, vma, addr); 485 485 rss[!!PageAnon(page)]++; 486 486 } 487 487
+59 -7
mm/rmap.c
··· 162 162 static void anon_vma_ctor(void *data, struct kmem_cache *cachep, 163 163 unsigned long flags) 164 164 { 165 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 166 - struct anon_vma *anon_vma = data; 165 + struct anon_vma *anon_vma = data; 167 166 168 - spin_lock_init(&anon_vma->lock); 169 - INIT_LIST_HEAD(&anon_vma->head); 170 - } 167 + spin_lock_init(&anon_vma->lock); 168 + INIT_LIST_HEAD(&anon_vma->head); 171 169 } 172 170 173 171 void __init anon_vma_init(void) ··· 530 532 } 531 533 532 534 /** 535 + * page_set_anon_rmap - sanity check anonymous rmap addition 536 + * @page: the page to add the mapping to 537 + * @vma: the vm area in which the mapping is added 538 + * @address: the user virtual address mapped 539 + */ 540 + static void __page_check_anon_rmap(struct page *page, 541 + struct vm_area_struct *vma, unsigned long address) 542 + { 543 + #ifdef CONFIG_DEBUG_VM 544 + /* 545 + * The page's anon-rmap details (mapping and index) are guaranteed to 546 + * be set up correctly at this point. 547 + * 548 + * We have exclusion against page_add_anon_rmap because the caller 549 + * always holds the page locked, except if called from page_dup_rmap, 550 + * in which case the page is already known to be setup. 551 + * 552 + * We have exclusion against page_add_new_anon_rmap because those pages 553 + * are initially only visible via the pagetables, and the pte is locked 554 + * over the call to page_add_new_anon_rmap. 555 + */ 556 + struct anon_vma *anon_vma = vma->anon_vma; 557 + anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 558 + BUG_ON(page->mapping != (struct address_space *)anon_vma); 559 + BUG_ON(page->index != linear_page_index(vma, address)); 560 + #endif 561 + } 562 + 563 + /** 533 564 * page_add_anon_rmap - add pte mapping to an anonymous page 534 565 * @page: the page to add the mapping to 535 566 * @vma: the vm area in which the mapping is added 536 567 * @address: the user virtual address mapped 537 568 * 538 - * The caller needs to hold the pte lock. 569 + * The caller needs to hold the pte lock and the page must be locked. 539 570 */ 540 571 void page_add_anon_rmap(struct page *page, 541 572 struct vm_area_struct *vma, unsigned long address) 542 573 { 574 + VM_BUG_ON(!PageLocked(page)); 575 + VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 543 576 if (atomic_inc_and_test(&page->_mapcount)) 544 577 __page_set_anon_rmap(page, vma, address); 545 - /* else checking page index and mapping is racy */ 578 + else 579 + __page_check_anon_rmap(page, vma, address); 546 580 } 547 581 548 582 /* ··· 585 555 * 586 556 * Same as page_add_anon_rmap but must only be called on *new* pages. 587 557 * This means the inc-and-test can be bypassed. 558 + * Page does not have to be locked. 588 559 */ 589 560 void page_add_new_anon_rmap(struct page *page, 590 561 struct vm_area_struct *vma, unsigned long address) 591 562 { 563 + BUG_ON(address < vma->vm_start || address >= vma->vm_end); 592 564 atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ 593 565 __page_set_anon_rmap(page, vma, address); 594 566 } ··· 606 574 if (atomic_inc_and_test(&page->_mapcount)) 607 575 __inc_zone_page_state(page, NR_FILE_MAPPED); 608 576 } 577 + 578 + #ifdef CONFIG_DEBUG_VM 579 + /** 580 + * page_dup_rmap - duplicate pte mapping to a page 581 + * @page: the page to add the mapping to 582 + * 583 + * For copy_page_range only: minimal extract from page_add_file_rmap / 584 + * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's 585 + * quicker. 586 + * 587 + * The caller needs to hold the pte lock. 588 + */ 589 + void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) 590 + { 591 + BUG_ON(page_mapcount(page) == 0); 592 + if (PageAnon(page)) 593 + __page_check_anon_rmap(page, vma, address); 594 + atomic_inc(&page->_mapcount); 595 + } 596 + #endif 609 597 610 598 /** 611 599 * page_remove_rmap - take down pte mapping from a page
+3 -5
mm/shmem.c
··· 2358 2358 { 2359 2359 struct shmem_inode_info *p = (struct shmem_inode_info *) foo; 2360 2360 2361 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 2362 - inode_init_once(&p->vfs_inode); 2361 + inode_init_once(&p->vfs_inode); 2363 2362 #ifdef CONFIG_TMPFS_POSIX_ACL 2364 - p->i_acl = NULL; 2365 - p->i_default_acl = NULL; 2363 + p->i_acl = NULL; 2364 + p->i_default_acl = NULL; 2366 2365 #endif 2367 - } 2368 2366 } 2369 2367 2370 2368 static int init_inodecache(void)
+9 -48
mm/slab.c
··· 409 409 /* constructor func */ 410 410 void (*ctor) (void *, struct kmem_cache *, unsigned long); 411 411 412 - /* de-constructor func */ 413 - void (*dtor) (void *, struct kmem_cache *, unsigned long); 414 - 415 412 /* 5) cache creation/removal */ 416 413 const char *name; 417 414 struct list_head next; ··· 566 569 #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 567 570 #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 568 571 569 - #endif 570 - 571 - /* 572 - * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp 573 - * order. 574 - */ 575 - #if defined(CONFIG_LARGE_ALLOCS) 576 - #define MAX_OBJ_ORDER 13 /* up to 32Mb */ 577 - #define MAX_GFP_ORDER 13 /* up to 32Mb */ 578 - #elif defined(CONFIG_MMU) 579 - #define MAX_OBJ_ORDER 5 /* 32 pages */ 580 - #define MAX_GFP_ORDER 5 /* 32 pages */ 581 - #else 582 - #define MAX_OBJ_ORDER 8 /* up to 1Mb */ 583 - #define MAX_GFP_ORDER 8 /* up to 1Mb */ 584 572 #endif 585 573 586 574 /* ··· 774 792 */ 775 793 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); 776 794 #endif 795 + WARN_ON_ONCE(size == 0); 777 796 while (size > csizep->cs_size) 778 797 csizep++; 779 798 ··· 1894 1911 slab_error(cachep, "end of a freed object " 1895 1912 "was overwritten"); 1896 1913 } 1897 - if (cachep->dtor && !(cachep->flags & SLAB_POISON)) 1898 - (cachep->dtor) (objp + obj_offset(cachep), cachep, 0); 1899 1914 } 1900 1915 } 1901 1916 #else 1902 1917 static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1903 1918 { 1904 - if (cachep->dtor) { 1905 - int i; 1906 - for (i = 0; i < cachep->num; i++) { 1907 - void *objp = index_to_obj(cachep, slabp, i); 1908 - (cachep->dtor) (objp, cachep, 0); 1909 - } 1910 - } 1911 1919 } 1912 1920 #endif 1913 1921 ··· 1987 2013 size_t left_over = 0; 1988 2014 int gfporder; 1989 2015 1990 - for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) { 2016 + for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) { 1991 2017 unsigned int num; 1992 2018 size_t remainder; 1993 2019 ··· 2098 2124 * @align: The required alignment for the objects. 2099 2125 * @flags: SLAB flags 2100 2126 * @ctor: A constructor for the objects. 2101 - * @dtor: A destructor for the objects. 2127 + * @dtor: A destructor for the objects (not implemented anymore). 2102 2128 * 2103 2129 * Returns a ptr to the cache on success, NULL on failure. 2104 2130 * Cannot be called within a int, but can be interrupted. ··· 2133 2159 * Sanity checks... these are all serious usage bugs. 2134 2160 */ 2135 2161 if (!name || in_interrupt() || (size < BYTES_PER_WORD) || 2136 - (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) { 2162 + size > KMALLOC_MAX_SIZE || dtor) { 2137 2163 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, 2138 2164 name); 2139 2165 BUG(); ··· 2187 2213 if (flags & SLAB_DESTROY_BY_RCU) 2188 2214 BUG_ON(flags & SLAB_POISON); 2189 2215 #endif 2190 - if (flags & SLAB_DESTROY_BY_RCU) 2191 - BUG_ON(dtor); 2192 - 2193 2216 /* 2194 2217 * Always checks flags, a caller might be expecting debug support which 2195 2218 * isn't available. ··· 2341 2370 BUG_ON(!cachep->slabp_cache); 2342 2371 } 2343 2372 cachep->ctor = ctor; 2344 - cachep->dtor = dtor; 2345 2373 cachep->name = name; 2346 2374 2347 2375 if (setup_cpu_cache(cachep)) { ··· 2595 2625 } 2596 2626 2597 2627 static void cache_init_objs(struct kmem_cache *cachep, 2598 - struct slab *slabp, unsigned long ctor_flags) 2628 + struct slab *slabp) 2599 2629 { 2600 2630 int i; 2601 2631 ··· 2619 2649 */ 2620 2650 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2621 2651 cachep->ctor(objp + obj_offset(cachep), cachep, 2622 - ctor_flags); 2652 + 0); 2623 2653 2624 2654 if (cachep->flags & SLAB_RED_ZONE) { 2625 2655 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) ··· 2635 2665 cachep->buffer_size / PAGE_SIZE, 0); 2636 2666 #else 2637 2667 if (cachep->ctor) 2638 - cachep->ctor(objp, cachep, ctor_flags); 2668 + cachep->ctor(objp, cachep, 0); 2639 2669 #endif 2640 2670 slab_bufctl(slabp)[i] = i + 1; 2641 2671 } ··· 2724 2754 struct slab *slabp; 2725 2755 size_t offset; 2726 2756 gfp_t local_flags; 2727 - unsigned long ctor_flags; 2728 2757 struct kmem_list3 *l3; 2729 2758 2730 2759 /* ··· 2732 2763 */ 2733 2764 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); 2734 2765 2735 - ctor_flags = SLAB_CTOR_CONSTRUCTOR; 2736 2766 local_flags = (flags & GFP_LEVEL_MASK); 2737 2767 /* Take the l3 list lock to change the colour_next on this node */ 2738 2768 check_irq_off(); ··· 2776 2808 slabp->nodeid = nodeid; 2777 2809 slab_map_pages(cachep, slabp, objp); 2778 2810 2779 - cache_init_objs(cachep, slabp, ctor_flags); 2811 + cache_init_objs(cachep, slabp); 2780 2812 2781 2813 if (local_flags & __GFP_WAIT) 2782 2814 local_irq_disable(); ··· 2803 2835 * Perform extra freeing checks: 2804 2836 * - detect bad pointers. 2805 2837 * - POISON/RED_ZONE checking 2806 - * - destructor calls, for caches with POISON+dtor 2807 2838 */ 2808 2839 static void kfree_debugcheck(const void *objp) 2809 2840 { ··· 2861 2894 BUG_ON(objnr >= cachep->num); 2862 2895 BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); 2863 2896 2864 - if (cachep->flags & SLAB_POISON && cachep->dtor) { 2865 - /* we want to cache poison the object, 2866 - * call the destruction callback 2867 - */ 2868 - cachep->dtor(objp + obj_offset(cachep), cachep, 0); 2869 - } 2870 2897 #ifdef CONFIG_DEBUG_SLAB_LEAK 2871 2898 slab_bufctl(slabp)[objnr] = BUFCTL_FREE; 2872 2899 #endif ··· 3060 3099 #endif 3061 3100 objp += obj_offset(cachep); 3062 3101 if (cachep->ctor && cachep->flags & SLAB_POISON) 3063 - cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR); 3102 + cachep->ctor(objp, cachep, 0); 3064 3103 #if ARCH_SLAB_MINALIGN 3065 3104 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { 3066 3105 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
+43 -10
mm/slob.c
··· 35 35 #include <linux/init.h> 36 36 #include <linux/module.h> 37 37 #include <linux/timer.h> 38 + #include <linux/rcupdate.h> 38 39 39 40 struct slob_block { 40 41 int units; ··· 53 52 struct bigblock *next; 54 53 }; 55 54 typedef struct bigblock bigblock_t; 55 + 56 + /* 57 + * struct slob_rcu is inserted at the tail of allocated slob blocks, which 58 + * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free 59 + * the block using call_rcu. 60 + */ 61 + struct slob_rcu { 62 + struct rcu_head head; 63 + int size; 64 + }; 56 65 57 66 static slob_t arena = { .next = &arena, .units = 1 }; 58 67 static slob_t *slobfree = &arena; ··· 277 266 278 267 struct kmem_cache { 279 268 unsigned int size, align; 269 + unsigned long flags; 280 270 const char *name; 281 271 void (*ctor)(void *, struct kmem_cache *, unsigned long); 282 - void (*dtor)(void *, struct kmem_cache *, unsigned long); 283 272 }; 284 273 285 274 struct kmem_cache *kmem_cache_create(const char *name, size_t size, ··· 294 283 if (c) { 295 284 c->name = name; 296 285 c->size = size; 286 + if (flags & SLAB_DESTROY_BY_RCU) { 287 + /* leave room for rcu footer at the end of object */ 288 + c->size += sizeof(struct slob_rcu); 289 + } 290 + c->flags = flags; 297 291 c->ctor = ctor; 298 - c->dtor = dtor; 299 292 /* ignore alignment unless it's forced */ 300 293 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; 301 294 if (c->align < align) ··· 327 312 b = (void *)__get_free_pages(flags, get_order(c->size)); 328 313 329 314 if (c->ctor) 330 - c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR); 315 + c->ctor(b, c, 0); 331 316 332 317 return b; 333 318 } ··· 343 328 } 344 329 EXPORT_SYMBOL(kmem_cache_zalloc); 345 330 331 + static void __kmem_cache_free(void *b, int size) 332 + { 333 + if (size < PAGE_SIZE) 334 + slob_free(b, size); 335 + else 336 + free_pages((unsigned long)b, get_order(size)); 337 + } 338 + 339 + static void kmem_rcu_free(struct rcu_head *head) 340 + { 341 + struct slob_rcu *slob_rcu = (struct slob_rcu *)head; 342 + void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu)); 343 + 344 + __kmem_cache_free(b, slob_rcu->size); 345 + } 346 + 346 347 void kmem_cache_free(struct kmem_cache *c, void *b) 347 348 { 348 - if (c->dtor) 349 - c->dtor(b, c, 0); 350 - 351 - if (c->size < PAGE_SIZE) 352 - slob_free(b, c->size); 353 - else 354 - free_pages((unsigned long)b, get_order(c->size)); 349 + if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { 350 + struct slob_rcu *slob_rcu; 351 + slob_rcu = b + (c->size - sizeof(struct slob_rcu)); 352 + INIT_RCU_HEAD(&slob_rcu->head); 353 + slob_rcu->size = c->size; 354 + call_rcu(&slob_rcu->head, kmem_rcu_free); 355 + } else { 356 + __kmem_cache_free(b, c->size); 357 + } 355 358 } 356 359 EXPORT_SYMBOL(kmem_cache_free); 357 360
+120 -116
mm/slub.c
··· 78 78 * 79 79 * Overloading of page flags that are otherwise used for LRU management. 80 80 * 81 - * PageActive The slab is used as a cpu cache. Allocations 82 - * may be performed from the slab. The slab is not 83 - * on any slab list and cannot be moved onto one. 84 - * The cpu slab may be equipped with an additioanl 81 + * PageActive The slab is frozen and exempt from list processing. 82 + * This means that the slab is dedicated to a purpose 83 + * such as satisfying allocations for a specific 84 + * processor. Objects may be freed in the slab while 85 + * it is frozen but slab_free will then skip the usual 86 + * list operations. It is up to the processor holding 87 + * the slab to integrate the slab into the slab lists 88 + * when the slab is no longer needed. 89 + * 90 + * One use of this flag is to mark slabs that are 91 + * used for allocations. Then such a slab becomes a cpu 92 + * slab. The cpu slab may be equipped with an additional 85 93 * lockless_freelist that allows lockless access to 86 94 * free objects in addition to the regular freelist 87 95 * that requires the slab lock. ··· 99 91 * the fast path and disables lockless freelists. 100 92 */ 101 93 94 + #define FROZEN (1 << PG_active) 95 + 96 + #ifdef CONFIG_SLUB_DEBUG 97 + #define SLABDEBUG (1 << PG_error) 98 + #else 99 + #define SLABDEBUG 0 100 + #endif 101 + 102 + static inline int SlabFrozen(struct page *page) 103 + { 104 + return page->flags & FROZEN; 105 + } 106 + 107 + static inline void SetSlabFrozen(struct page *page) 108 + { 109 + page->flags |= FROZEN; 110 + } 111 + 112 + static inline void ClearSlabFrozen(struct page *page) 113 + { 114 + page->flags &= ~FROZEN; 115 + } 116 + 102 117 static inline int SlabDebug(struct page *page) 103 118 { 104 - #ifdef CONFIG_SLUB_DEBUG 105 - return PageError(page); 106 - #else 107 - return 0; 108 - #endif 119 + return page->flags & SLABDEBUG; 109 120 } 110 121 111 122 static inline void SetSlabDebug(struct page *page) 112 123 { 113 - #ifdef CONFIG_SLUB_DEBUG 114 - SetPageError(page); 115 - #endif 124 + page->flags |= SLABDEBUG; 116 125 } 117 126 118 127 static inline void ClearSlabDebug(struct page *page) 119 128 { 120 - #ifdef CONFIG_SLUB_DEBUG 121 - ClearPageError(page); 122 - #endif 129 + page->flags &= ~SLABDEBUG; 123 130 } 124 131 125 132 /* ··· 742 719 return search == NULL; 743 720 } 744 721 722 + static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) 723 + { 724 + if (s->flags & SLAB_TRACE) { 725 + printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 726 + s->name, 727 + alloc ? "alloc" : "free", 728 + object, page->inuse, 729 + page->freelist); 730 + 731 + if (!alloc) 732 + print_section("Object", (void *)object, s->objsize); 733 + 734 + dump_stack(); 735 + } 736 + } 737 + 745 738 /* 746 739 * Tracking of fully allocated slabs for debugging purposes. 747 740 */ ··· 782 743 spin_unlock(&n->list_lock); 783 744 } 784 745 785 - static int alloc_object_checks(struct kmem_cache *s, struct page *page, 786 - void *object) 746 + static void setup_object_debug(struct kmem_cache *s, struct page *page, 747 + void *object) 748 + { 749 + if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) 750 + return; 751 + 752 + init_object(s, object, 0); 753 + init_tracking(s, object); 754 + } 755 + 756 + static int alloc_debug_processing(struct kmem_cache *s, struct page *page, 757 + void *object, void *addr) 787 758 { 788 759 if (!check_slab(s, page)) 789 760 goto bad; ··· 808 759 goto bad; 809 760 } 810 761 811 - if (!object) 812 - return 1; 813 - 814 - if (!check_object(s, page, object, 0)) 762 + if (object && !check_object(s, page, object, 0)) 815 763 goto bad; 816 764 765 + /* Success perform special debug activities for allocs */ 766 + if (s->flags & SLAB_STORE_USER) 767 + set_track(s, object, TRACK_ALLOC, addr); 768 + trace(s, page, object, 1); 769 + init_object(s, object, 1); 817 770 return 1; 771 + 818 772 bad: 819 773 if (PageSlab(page)) { 820 774 /* ··· 835 783 return 0; 836 784 } 837 785 838 - static int free_object_checks(struct kmem_cache *s, struct page *page, 839 - void *object) 786 + static int free_debug_processing(struct kmem_cache *s, struct page *page, 787 + void *object, void *addr) 840 788 { 841 789 if (!check_slab(s, page)) 842 790 goto fail; ··· 870 818 "to slab %s", object, page->slab->name); 871 819 goto fail; 872 820 } 821 + 822 + /* Special debug activities for freeing objects */ 823 + if (!SlabFrozen(page) && !page->freelist) 824 + remove_full(s, page); 825 + if (s->flags & SLAB_STORE_USER) 826 + set_track(s, object, TRACK_FREE, addr); 827 + trace(s, page, object, 0); 828 + init_object(s, object, 0); 873 829 return 1; 830 + 874 831 fail: 875 832 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n", 876 833 s->name, page, object); 877 834 return 0; 878 - } 879 - 880 - static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) 881 - { 882 - if (s->flags & SLAB_TRACE) { 883 - printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 884 - s->name, 885 - alloc ? "alloc" : "free", 886 - object, page->inuse, 887 - page->freelist); 888 - 889 - if (!alloc) 890 - print_section("Object", (void *)object, s->objsize); 891 - 892 - dump_stack(); 893 - } 894 835 } 895 836 896 837 static int __init setup_slub_debug(char *str) ··· 936 891 * On 32 bit platforms the limit is 256k. On 64bit platforms 937 892 * the limit is 512k. 938 893 * 939 - * Debugging or ctor/dtors may create a need to move the free 894 + * Debugging or ctor may create a need to move the free 940 895 * pointer. Fail if this happens. 941 896 */ 942 897 if (s->size >= 65535 * sizeof(void *)) { 943 898 BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON | 944 899 SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); 945 - BUG_ON(s->ctor || s->dtor); 900 + BUG_ON(s->ctor); 946 901 } 947 902 else 948 903 /* ··· 954 909 s->flags |= slub_debug; 955 910 } 956 911 #else 912 + static inline void setup_object_debug(struct kmem_cache *s, 913 + struct page *page, void *object) {} 957 914 958 - static inline int alloc_object_checks(struct kmem_cache *s, 959 - struct page *page, void *object) { return 0; } 915 + static inline int alloc_debug_processing(struct kmem_cache *s, 916 + struct page *page, void *object, void *addr) { return 0; } 960 917 961 - static inline int free_object_checks(struct kmem_cache *s, 962 - struct page *page, void *object) { return 0; } 918 + static inline int free_debug_processing(struct kmem_cache *s, 919 + struct page *page, void *object, void *addr) { return 0; } 963 920 964 - static inline void add_full(struct kmem_cache_node *n, struct page *page) {} 965 - static inline void remove_full(struct kmem_cache *s, struct page *page) {} 966 - static inline void trace(struct kmem_cache *s, struct page *page, 967 - void *object, int alloc) {} 968 - static inline void init_object(struct kmem_cache *s, 969 - void *object, int active) {} 970 - static inline void init_tracking(struct kmem_cache *s, void *object) {} 971 921 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 972 922 { return 1; } 973 923 static inline int check_object(struct kmem_cache *s, struct page *page, 974 924 void *object, int active) { return 1; } 975 - static inline void set_track(struct kmem_cache *s, void *object, 976 - enum track_item alloc, void *addr) {} 925 + static inline void add_full(struct kmem_cache_node *n, struct page *page) {} 977 926 static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {} 978 927 #define slub_debug 0 979 928 #endif ··· 1004 965 static void setup_object(struct kmem_cache *s, struct page *page, 1005 966 void *object) 1006 967 { 1007 - if (SlabDebug(page)) { 1008 - init_object(s, object, 0); 1009 - init_tracking(s, object); 1010 - } 1011 - 968 + setup_object_debug(s, page, object); 1012 969 if (unlikely(s->ctor)) 1013 - s->ctor(object, s, SLAB_CTOR_CONSTRUCTOR); 970 + s->ctor(object, s, 0); 1014 971 } 1015 972 1016 973 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) ··· 1065 1030 { 1066 1031 int pages = 1 << s->order; 1067 1032 1068 - if (unlikely(SlabDebug(page) || s->dtor)) { 1033 + if (unlikely(SlabDebug(page))) { 1069 1034 void *p; 1070 1035 1071 1036 slab_pad_check(s, page); 1072 - for_each_object(p, s, page_address(page)) { 1073 - if (s->dtor) 1074 - s->dtor(p, s, 0); 1037 + for_each_object(p, s, page_address(page)) 1075 1038 check_object(s, page, p, 0); 1076 - } 1077 1039 } 1078 1040 1079 1041 mod_zone_page_state(page_zone(page), ··· 1170 1138 * 1171 1139 * Must hold list_lock. 1172 1140 */ 1173 - static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page) 1141 + static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) 1174 1142 { 1175 1143 if (slab_trylock(page)) { 1176 1144 list_del(&page->lru); 1177 1145 n->nr_partial--; 1146 + SetSlabFrozen(page); 1178 1147 return 1; 1179 1148 } 1180 1149 return 0; ··· 1199 1166 1200 1167 spin_lock(&n->list_lock); 1201 1168 list_for_each_entry(page, &n->partial, lru) 1202 - if (lock_and_del_slab(n, page)) 1169 + if (lock_and_freeze_slab(n, page)) 1203 1170 goto out; 1204 1171 page = NULL; 1205 1172 out: ··· 1278 1245 * 1279 1246 * On exit the slab lock will have been dropped. 1280 1247 */ 1281 - static void putback_slab(struct kmem_cache *s, struct page *page) 1248 + static void unfreeze_slab(struct kmem_cache *s, struct page *page) 1282 1249 { 1283 1250 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1284 1251 1252 + ClearSlabFrozen(page); 1285 1253 if (page->inuse) { 1286 1254 1287 1255 if (page->freelist) ··· 1333 1299 page->inuse--; 1334 1300 } 1335 1301 s->cpu_slab[cpu] = NULL; 1336 - ClearPageActive(page); 1337 - 1338 - putback_slab(s, page); 1302 + unfreeze_slab(s, page); 1339 1303 } 1340 1304 1341 1305 static void flush_slab(struct kmem_cache *s, struct page *page, int cpu) ··· 1424 1392 new_slab: 1425 1393 page = get_partial(s, gfpflags, node); 1426 1394 if (page) { 1427 - have_slab: 1428 1395 s->cpu_slab[cpu] = page; 1429 - SetPageActive(page); 1430 1396 goto load_freelist; 1431 1397 } 1432 1398 ··· 1454 1424 flush_slab(s, s->cpu_slab[cpu], cpu); 1455 1425 } 1456 1426 slab_lock(page); 1457 - goto have_slab; 1427 + SetSlabFrozen(page); 1428 + s->cpu_slab[cpu] = page; 1429 + goto load_freelist; 1458 1430 } 1459 1431 return NULL; 1460 1432 debug: 1461 1433 object = page->freelist; 1462 - if (!alloc_object_checks(s, page, object)) 1434 + if (!alloc_debug_processing(s, page, object, addr)) 1463 1435 goto another_slab; 1464 - if (s->flags & SLAB_STORE_USER) 1465 - set_track(s, object, TRACK_ALLOC, addr); 1466 - trace(s, page, object, 1); 1467 - init_object(s, object, 1); 1468 1436 1469 1437 page->inuse++; 1470 1438 page->freelist = object[page->offset]; ··· 1539 1511 page->freelist = object; 1540 1512 page->inuse--; 1541 1513 1542 - if (unlikely(PageActive(page))) 1543 - /* 1544 - * Cpu slabs are never on partial lists and are 1545 - * never freed. 1546 - */ 1514 + if (unlikely(SlabFrozen(page))) 1547 1515 goto out_unlock; 1548 1516 1549 1517 if (unlikely(!page->inuse)) ··· 1569 1545 return; 1570 1546 1571 1547 debug: 1572 - if (!free_object_checks(s, page, x)) 1548 + if (!free_debug_processing(s, page, x, addr)) 1573 1549 goto out_unlock; 1574 - if (!PageActive(page) && !page->freelist) 1575 - remove_full(s, page); 1576 - if (s->flags & SLAB_STORE_USER) 1577 - set_track(s, x, TRACK_FREE, addr); 1578 - trace(s, page, object, 0); 1579 - init_object(s, object, 0); 1580 1550 goto checks_ok; 1581 1551 } 1582 1552 ··· 1807 1789 page->freelist = get_freepointer(kmalloc_caches, n); 1808 1790 page->inuse++; 1809 1791 kmalloc_caches->node[node] = n; 1810 - init_object(kmalloc_caches, n, 1); 1792 + setup_object_debug(kmalloc_caches, page, n); 1811 1793 init_kmem_cache_node(n); 1812 1794 atomic_long_inc(&n->nr_slabs); 1813 1795 add_partial(n, page); ··· 1889 1871 * then we should never poison the object itself. 1890 1872 */ 1891 1873 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && 1892 - !s->ctor && !s->dtor) 1874 + !s->ctor) 1893 1875 s->flags |= __OBJECT_POISON; 1894 1876 else 1895 1877 s->flags &= ~__OBJECT_POISON; ··· 1919 1901 1920 1902 #ifdef CONFIG_SLUB_DEBUG 1921 1903 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || 1922 - s->ctor || s->dtor)) { 1904 + s->ctor)) { 1923 1905 /* 1924 1906 * Relocate free pointer after the object if it is not 1925 1907 * permitted to overwrite the first word of the object on ··· 1988 1970 static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, 1989 1971 const char *name, size_t size, 1990 1972 size_t align, unsigned long flags, 1991 - void (*ctor)(void *, struct kmem_cache *, unsigned long), 1992 - void (*dtor)(void *, struct kmem_cache *, unsigned long)) 1973 + void (*ctor)(void *, struct kmem_cache *, unsigned long)) 1993 1974 { 1994 1975 memset(s, 0, kmem_size); 1995 1976 s->name = name; 1996 1977 s->ctor = ctor; 1997 - s->dtor = dtor; 1998 1978 s->objsize = size; 1999 1979 s->flags = flags; 2000 1980 s->align = align; ··· 2177 2161 2178 2162 down_write(&slub_lock); 2179 2163 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, 2180 - flags, NULL, NULL)) 2164 + flags, NULL)) 2181 2165 goto panic; 2182 2166 2183 2167 list_add(&s->list, &slab_caches); ··· 2479 2463 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) 2480 2464 return 1; 2481 2465 2482 - if (s->ctor || s->dtor) 2466 + if (s->ctor) 2483 2467 return 1; 2484 2468 2485 2469 return 0; ··· 2487 2471 2488 2472 static struct kmem_cache *find_mergeable(size_t size, 2489 2473 size_t align, unsigned long flags, 2490 - void (*ctor)(void *, struct kmem_cache *, unsigned long), 2491 - void (*dtor)(void *, struct kmem_cache *, unsigned long)) 2474 + void (*ctor)(void *, struct kmem_cache *, unsigned long)) 2492 2475 { 2493 2476 struct list_head *h; 2494 2477 2495 2478 if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) 2496 2479 return NULL; 2497 2480 2498 - if (ctor || dtor) 2481 + if (ctor) 2499 2482 return NULL; 2500 2483 2501 2484 size = ALIGN(size, sizeof(void *)); ··· 2536 2521 { 2537 2522 struct kmem_cache *s; 2538 2523 2524 + BUG_ON(dtor); 2539 2525 down_write(&slub_lock); 2540 - s = find_mergeable(size, align, flags, ctor, dtor); 2526 + s = find_mergeable(size, align, flags, ctor); 2541 2527 if (s) { 2542 2528 s->refcount++; 2543 2529 /* ··· 2552 2536 } else { 2553 2537 s = kmalloc(kmem_size, GFP_KERNEL); 2554 2538 if (s && kmem_cache_open(s, GFP_KERNEL, name, 2555 - size, align, flags, ctor, dtor)) { 2539 + size, align, flags, ctor)) { 2556 2540 if (sysfs_slab_add(s)) { 2557 2541 kfree(s); 2558 2542 goto err; ··· 3193 3177 } 3194 3178 SLAB_ATTR_RO(ctor); 3195 3179 3196 - static ssize_t dtor_show(struct kmem_cache *s, char *buf) 3197 - { 3198 - if (s->dtor) { 3199 - int n = sprint_symbol(buf, (unsigned long)s->dtor); 3200 - 3201 - return n + sprintf(buf + n, "\n"); 3202 - } 3203 - return 0; 3204 - } 3205 - SLAB_ATTR_RO(dtor); 3206 - 3207 3180 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 3208 3181 { 3209 3182 return sprintf(buf, "%d\n", s->refcount - 1); ··· 3424 3419 &partial_attr.attr, 3425 3420 &cpu_slabs_attr.attr, 3426 3421 &ctor_attr.attr, 3427 - &dtor_attr.attr, 3428 3422 &aliases_attr.attr, 3429 3423 &align_attr.attr, 3430 3424 &sanity_checks_attr.attr,
+1 -1
mm/vmalloc.c
··· 311 311 return v; 312 312 } 313 313 314 - void __vunmap(void *addr, int deallocate_pages) 314 + static void __vunmap(void *addr, int deallocate_pages) 315 315 { 316 316 struct vm_struct *area; 317 317
+1 -1
net/ipx/af_ipx.c
··· 87 87 unsigned char *node); 88 88 extern void ipxrtr_del_routes(struct ipx_interface *intrfc); 89 89 extern int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx, 90 - struct iovec *iov, int len, int noblock); 90 + struct iovec *iov, size_t len, int noblock); 91 91 extern int ipxrtr_route_skb(struct sk_buff *skb); 92 92 extern struct ipx_route *ipxrtr_lookup(__be32 net); 93 93 extern int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
+1 -2
net/socket.c
··· 261 261 { 262 262 struct socket_alloc *ei = (struct socket_alloc *)foo; 263 263 264 - if (flags & SLAB_CTOR_CONSTRUCTOR) 265 - inode_init_once(&ei->vfs_inode); 264 + inode_init_once(&ei->vfs_inode); 266 265 } 267 266 268 267 static int init_inodecache(void)
+11 -13
net/sunrpc/rpc_pipe.c
··· 828 828 { 829 829 struct rpc_inode *rpci = (struct rpc_inode *) foo; 830 830 831 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 832 - inode_init_once(&rpci->vfs_inode); 833 - rpci->private = NULL; 834 - rpci->nreaders = 0; 835 - rpci->nwriters = 0; 836 - INIT_LIST_HEAD(&rpci->in_upcall); 837 - INIT_LIST_HEAD(&rpci->pipe); 838 - rpci->pipelen = 0; 839 - init_waitqueue_head(&rpci->waitq); 840 - INIT_DELAYED_WORK(&rpci->queue_timeout, 841 - rpc_timeout_upcall_queue); 842 - rpci->ops = NULL; 843 - } 831 + inode_init_once(&rpci->vfs_inode); 832 + rpci->private = NULL; 833 + rpci->nreaders = 0; 834 + rpci->nwriters = 0; 835 + INIT_LIST_HEAD(&rpci->in_upcall); 836 + INIT_LIST_HEAD(&rpci->pipe); 837 + rpci->pipelen = 0; 838 + init_waitqueue_head(&rpci->waitq); 839 + INIT_DELAYED_WORK(&rpci->queue_timeout, 840 + rpc_timeout_upcall_queue); 841 + rpci->ops = NULL; 844 842 } 845 843 846 844 int register_rpc_pipefs(void)