Merge branch 'master' of /home/trondmy/repositories/git/linux-2.6/

+650 -672
+80 -47
Documentation/DocBook/kernel-locking.tmpl
··· 551 <function>spin_lock_irqsave()</function>, which is a superset 552 of all other spinlock primitives. 553 </para> 554 <table> 555 <title>Table of Locking Requirements</title> 556 <tgroup cols="11"> 557 <tbody> 558 <row> 559 <entry></entry> 560 <entry>IRQ Handler A</entry> ··· 578 579 <row> 580 <entry>IRQ Handler B</entry> 581 - <entry>spin_lock_irqsave</entry> 582 <entry>None</entry> 583 </row> 584 585 <row> 586 <entry>Softirq A</entry> 587 - <entry>spin_lock_irq</entry> 588 - <entry>spin_lock_irq</entry> 589 - <entry>spin_lock</entry> 590 </row> 591 592 <row> 593 <entry>Softirq B</entry> 594 - <entry>spin_lock_irq</entry> 595 - <entry>spin_lock_irq</entry> 596 - <entry>spin_lock</entry> 597 - <entry>spin_lock</entry> 598 </row> 599 600 <row> 601 <entry>Tasklet A</entry> 602 - <entry>spin_lock_irq</entry> 603 - <entry>spin_lock_irq</entry> 604 - <entry>spin_lock</entry> 605 - <entry>spin_lock</entry> 606 <entry>None</entry> 607 </row> 608 609 <row> 610 <entry>Tasklet B</entry> 611 - <entry>spin_lock_irq</entry> 612 - <entry>spin_lock_irq</entry> 613 - <entry>spin_lock</entry> 614 - <entry>spin_lock</entry> 615 - <entry>spin_lock</entry> 616 <entry>None</entry> 617 </row> 618 619 <row> 620 <entry>Timer A</entry> 621 - <entry>spin_lock_irq</entry> 622 - <entry>spin_lock_irq</entry> 623 - <entry>spin_lock</entry> 624 - <entry>spin_lock</entry> 625 - <entry>spin_lock</entry> 626 - <entry>spin_lock</entry> 627 <entry>None</entry> 628 </row> 629 630 <row> 631 <entry>Timer B</entry> 632 - <entry>spin_lock_irq</entry> 633 - <entry>spin_lock_irq</entry> 634 - <entry>spin_lock</entry> 635 - <entry>spin_lock</entry> 636 - <entry>spin_lock</entry> 637 - <entry>spin_lock</entry> 638 - <entry>spin_lock</entry> 639 <entry>None</entry> 640 </row> 641 642 <row> 643 <entry>User Context A</entry> 644 - <entry>spin_lock_irq</entry> 645 - <entry>spin_lock_irq</entry> 646 - <entry>spin_lock_bh</entry> 647 - <entry>spin_lock_bh</entry> 648 - <entry>spin_lock_bh</entry> 649 - <entry>spin_lock_bh</entry> 650 - <entry>spin_lock_bh</entry> 651 - <entry>spin_lock_bh</entry> 652 <entry>None</entry> 653 </row> 654 655 <row> 656 <entry>User Context B</entry> 657 - <entry>spin_lock_irq</entry> 658 - <entry>spin_lock_irq</entry> 659 - <entry>spin_lock_bh</entry> 660 - <entry>spin_lock_bh</entry> 661 - <entry>spin_lock_bh</entry> 662 - <entry>spin_lock_bh</entry> 663 - <entry>spin_lock_bh</entry> 664 - <entry>spin_lock_bh</entry> 665 - <entry>down_interruptible</entry> 666 <entry>None</entry> 667 </row> 668 669 </tbody> 670 </tgroup> 671 </table> 672 </sect1> 673 </chapter> 674
··· 551 <function>spin_lock_irqsave()</function>, which is a superset 552 of all other spinlock primitives. 553 </para> 554 + 555 <table> 556 <title>Table of Locking Requirements</title> 557 <tgroup cols="11"> 558 <tbody> 559 + 560 <row> 561 <entry></entry> 562 <entry>IRQ Handler A</entry> ··· 576 577 <row> 578 <entry>IRQ Handler B</entry> 579 + <entry>SLIS</entry> 580 <entry>None</entry> 581 </row> 582 583 <row> 584 <entry>Softirq A</entry> 585 + <entry>SLI</entry> 586 + <entry>SLI</entry> 587 + <entry>SL</entry> 588 </row> 589 590 <row> 591 <entry>Softirq B</entry> 592 + <entry>SLI</entry> 593 + <entry>SLI</entry> 594 + <entry>SL</entry> 595 + <entry>SL</entry> 596 </row> 597 598 <row> 599 <entry>Tasklet A</entry> 600 + <entry>SLI</entry> 601 + <entry>SLI</entry> 602 + <entry>SL</entry> 603 + <entry>SL</entry> 604 <entry>None</entry> 605 </row> 606 607 <row> 608 <entry>Tasklet B</entry> 609 + <entry>SLI</entry> 610 + <entry>SLI</entry> 611 + <entry>SL</entry> 612 + <entry>SL</entry> 613 + <entry>SL</entry> 614 <entry>None</entry> 615 </row> 616 617 <row> 618 <entry>Timer A</entry> 619 + <entry>SLI</entry> 620 + <entry>SLI</entry> 621 + <entry>SL</entry> 622 + <entry>SL</entry> 623 + <entry>SL</entry> 624 + <entry>SL</entry> 625 <entry>None</entry> 626 </row> 627 628 <row> 629 <entry>Timer B</entry> 630 + <entry>SLI</entry> 631 + <entry>SLI</entry> 632 + <entry>SL</entry> 633 + <entry>SL</entry> 634 + <entry>SL</entry> 635 + <entry>SL</entry> 636 + <entry>SL</entry> 637 <entry>None</entry> 638 </row> 639 640 <row> 641 <entry>User Context A</entry> 642 + <entry>SLI</entry> 643 + <entry>SLI</entry> 644 + <entry>SLBH</entry> 645 + <entry>SLBH</entry> 646 + <entry>SLBH</entry> 647 + <entry>SLBH</entry> 648 + <entry>SLBH</entry> 649 + <entry>SLBH</entry> 650 <entry>None</entry> 651 </row> 652 653 <row> 654 <entry>User Context B</entry> 655 + <entry>SLI</entry> 656 + <entry>SLI</entry> 657 + <entry>SLBH</entry> 658 + <entry>SLBH</entry> 659 + <entry>SLBH</entry> 660 + <entry>SLBH</entry> 661 + <entry>SLBH</entry> 662 + <entry>SLBH</entry> 663 + <entry>DI</entry> 664 <entry>None</entry> 665 </row> 666 667 </tbody> 668 </tgroup> 669 </table> 670 + 671 + <table> 672 + <title>Legend for Locking Requirements Table</title> 673 + <tgroup cols="2"> 674 + <tbody> 675 + 676 + <row> 677 + <entry>SLIS</entry> 678 + <entry>spin_lock_irqsave</entry> 679 + </row> 680 + <row> 681 + <entry>SLI</entry> 682 + <entry>spin_lock_irq</entry> 683 + </row> 684 + <row> 685 + <entry>SL</entry> 686 + <entry>spin_lock</entry> 687 + </row> 688 + <row> 689 + <entry>SLBH</entry> 690 + <entry>spin_lock_bh</entry> 691 + </row> 692 + <row> 693 + <entry>DI</entry> 694 + <entry>down_interruptible</entry> 695 + </row> 696 + 697 + </tbody> 698 + </tgroup> 699 + </table> 700 + 701 </sect1> 702 </chapter> 703
+6 -2
Documentation/gpio.txt
··· 111 112 The return value is zero for success, else a negative errno. It should 113 be checked, since the get/set calls don't have error returns and since 114 - misconfiguration is possible. (These calls could sleep.) 115 116 For output GPIOs, the value provided becomes the initial output value. 117 This helps avoid signal glitching during system startup. ··· 199 200 Passing invalid GPIO numbers to gpio_request() will fail, as will requesting 201 GPIOs that have already been claimed with that call. The return value of 202 - gpio_request() must be checked. (These calls could sleep.) 203 204 These calls serve two basic purposes. One is marking the signals which 205 are actually in use as GPIOs, for better diagnostics; systems may have
··· 111 112 The return value is zero for success, else a negative errno. It should 113 be checked, since the get/set calls don't have error returns and since 114 + misconfiguration is possible. You should normally issue these calls from 115 + a task context. However, for spinlock-safe GPIOs it's OK to use them 116 + before tasking is enabled, as part of early board setup. 117 118 For output GPIOs, the value provided becomes the initial output value. 119 This helps avoid signal glitching during system startup. ··· 197 198 Passing invalid GPIO numbers to gpio_request() will fail, as will requesting 199 GPIOs that have already been claimed with that call. The return value of 200 + gpio_request() must be checked. You should normally issue these calls from 201 + a task context. However, for spinlock-safe GPIOs it's OK to request GPIOs 202 + before tasking is enabled, as part of early board setup. 203 204 These calls serve two basic purposes. One is marking the signals which 205 are actually in use as GPIOs, for better diagnostics; systems may have
+12 -5
Documentation/vm/slabinfo.c
··· 242 243 memset(numa, 0, MAX_NODES * sizeof(int)); 244 245 while (*t == 'N') { 246 t++; 247 node = strtoul(t, &t, 10); ··· 389 { 390 if (strcmp(s->name, "*") == 0) 391 return; 392 - printf("\nSlabcache: %-20s Aliases: %2d Order : %2d\n", s->name, s->aliases, s->order); 393 if (s->hwcache_align) 394 printf("** Hardware cacheline aligned\n"); 395 if (s->cache_dma) ··· 796 797 store_size(b1, total_size);store_size(b2, total_waste); 798 store_size(b3, total_waste * 100 / total_used); 799 - printf("Memory used: %6s # Loss : %6s MRatio: %6s%%\n", b1, b2, b3); 800 801 store_size(b1, total_objects);store_size(b2, total_partobj); 802 store_size(b3, total_partobj * 100 / total_objects); 803 - printf("# Objects : %6s # PartObj: %6s ORatio: %6s%%\n", b1, b2, b3); 804 805 printf("\n"); 806 printf("Per Cache Average Min Max Total\n"); ··· 823 store_size(b1, avg_ppart);store_size(b2, min_ppart); 824 store_size(b3, max_ppart); 825 store_size(b4, total_partial * 100 / total_slabs); 826 - printf("%%PartSlab %10s%% %10s%% %10s%% %10s%%\n", 827 b1, b2, b3, b4); 828 829 store_size(b1, avg_partobj);store_size(b2, min_partobj); ··· 835 store_size(b1, avg_ppartobj);store_size(b2, min_ppartobj); 836 store_size(b3, max_ppartobj); 837 store_size(b4, total_partobj * 100 / total_objects); 838 - printf("%% PartObj %10s%% %10s%% %10s%% %10s%%\n", 839 b1, b2, b3, b4); 840 841 store_size(b1, avg_size);store_size(b2, min_size); ··· 1105 ops(slab); 1106 else if (show_slab) 1107 slabcache(slab); 1108 } 1109 } 1110
··· 242 243 memset(numa, 0, MAX_NODES * sizeof(int)); 244 245 + if (!t) 246 + return; 247 + 248 while (*t == 'N') { 249 t++; 250 node = strtoul(t, &t, 10); ··· 386 { 387 if (strcmp(s->name, "*") == 0) 388 return; 389 + 390 + printf("\nSlabcache: %-20s Aliases: %2d Order : %2d Objects: %d\n", 391 + s->name, s->aliases, s->order, s->objects); 392 if (s->hwcache_align) 393 printf("** Hardware cacheline aligned\n"); 394 if (s->cache_dma) ··· 791 792 store_size(b1, total_size);store_size(b2, total_waste); 793 store_size(b3, total_waste * 100 / total_used); 794 + printf("Memory used: %6s # Loss : %6s MRatio:%6s%%\n", b1, b2, b3); 795 796 store_size(b1, total_objects);store_size(b2, total_partobj); 797 store_size(b3, total_partobj * 100 / total_objects); 798 + printf("# Objects : %6s # PartObj: %6s ORatio:%6s%%\n", b1, b2, b3); 799 800 printf("\n"); 801 printf("Per Cache Average Min Max Total\n"); ··· 818 store_size(b1, avg_ppart);store_size(b2, min_ppart); 819 store_size(b3, max_ppart); 820 store_size(b4, total_partial * 100 / total_slabs); 821 + printf("%%PartSlab%10s%% %10s%% %10s%% %10s%%\n", 822 b1, b2, b3, b4); 823 824 store_size(b1, avg_partobj);store_size(b2, min_partobj); ··· 830 store_size(b1, avg_ppartobj);store_size(b2, min_ppartobj); 831 store_size(b3, max_ppartobj); 832 store_size(b4, total_partobj * 100 / total_objects); 833 + printf("%% PartObj%10s%% %10s%% %10s%% %10s%%\n", 834 b1, b2, b3, b4); 835 836 store_size(b1, avg_size);store_size(b2, min_size); ··· 1100 ops(slab); 1101 else if (show_slab) 1102 slabcache(slab); 1103 + else if (show_report) 1104 + report(slab); 1105 } 1106 } 1107
+2 -2
MAINTAINERS
··· 2689 S: Maintained 2690 2691 PARALLEL PORT SUPPORT 2692 - L: linux-parport@lists.infradead.org 2693 S: Orphan 2694 2695 PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES 2696 P: Tim Waugh 2697 M: tim@cyberelk.net 2698 - L: linux-parport@lists.infradead.org 2699 W: http://www.torque.net/linux-pp.html 2700 S: Maintained 2701
··· 2689 S: Maintained 2690 2691 PARALLEL PORT SUPPORT 2692 + L: linux-parport@lists.infradead.org (subscribers-only) 2693 S: Orphan 2694 2695 PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES 2696 P: Tim Waugh 2697 M: tim@cyberelk.net 2698 + L: linux-parport@lists.infradead.org (subscribers-only) 2699 W: http://www.torque.net/linux-pp.html 2700 S: Maintained 2701
-8
arch/blackfin/Kconfig
··· 560 561 source "mm/Kconfig" 562 563 - config LARGE_ALLOCS 564 - bool "Allow allocating large blocks (> 1MB) of memory" 565 - help 566 - Allow the slab memory allocator to keep chains for very large 567 - memory sizes - upto 32MB. You may need this if your system has 568 - a lot of RAM, and you need to able to allocate very large 569 - contiguous chunks. If unsure, say N. 570 - 571 config BFIN_DMA_5XX 572 bool "Enable DMA Support" 573 depends on (BF533 || BF532 || BF531 || BF537 || BF536 || BF534 || BF561)
··· 560 561 source "mm/Kconfig" 562 563 config BFIN_DMA_5XX 564 bool "Enable DMA Support" 565 depends on (BF533 || BF532 || BF531 || BF537 || BF536 || BF534 || BF561)
-8
arch/frv/Kconfig
··· 102 with a lot of RAM, this can be wasteful of precious low memory. 103 Setting this option will put user-space page tables in high memory. 104 105 - config LARGE_ALLOCS 106 - bool "Allow allocating large blocks (> 1MB) of memory" 107 - help 108 - Allow the slab memory allocator to keep chains for very large memory 109 - sizes - up to 32MB. You may need this if your system has a lot of 110 - RAM, and you need to able to allocate very large contiguous chunks. 111 - If unsure, say N. 112 - 113 source "mm/Kconfig" 114 115 choice
··· 102 with a lot of RAM, this can be wasteful of precious low memory. 103 Setting this option will put user-space page tables in high memory. 104 105 source "mm/Kconfig" 106 107 choice
+1 -1
arch/i386/kernel/cpu/mtrr/generic.c
··· 78 } 79 80 /* Grab all of the MTRR state for this CPU into *state */ 81 - void __init get_mtrr_state(void) 82 { 83 unsigned int i; 84 struct mtrr_var_range *vrs;
··· 78 } 79 80 /* Grab all of the MTRR state for this CPU into *state */ 81 + void get_mtrr_state(void) 82 { 83 unsigned int i; 84 struct mtrr_var_range *vrs;
+1 -1
arch/i386/kernel/cpu/mtrr/main.c
··· 639 * initialized (i.e. before smp_init()). 640 * 641 */ 642 - void __init mtrr_bp_init(void) 643 { 644 init_ifs(); 645
··· 639 * initialized (i.e. before smp_init()). 640 * 641 */ 642 + void mtrr_bp_init(void) 643 { 644 init_ifs(); 645
+1 -1
arch/i386/kernel/smp.c
··· 421 } 422 if (!cpus_empty(cpu_mask)) 423 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); 424 - check_pgt_cache(); 425 preempt_enable(); 426 } 427
··· 421 } 422 if (!cpus_empty(cpu_mask)) 423 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); 424 + 425 preempt_enable(); 426 } 427
-8
arch/m68knommu/Kconfig
··· 470 default y 471 depends on (AVNET5282) 472 473 - config LARGE_ALLOCS 474 - bool "Allow allocating large blocks (> 1MB) of memory" 475 - help 476 - Allow the slab memory allocator to keep chains for very large 477 - memory sizes - upto 32MB. You may need this if your system has 478 - a lot of RAM, and you need to able to allocate very large 479 - contiguous chunks. If unsure, say N. 480 - 481 config 4KSTACKS 482 bool "Use 4Kb for kernel stacks instead of 8Kb" 483 default y
··· 470 default y 471 depends on (AVNET5282) 472 473 config 4KSTACKS 474 bool "Use 4Kb for kernel stacks instead of 8Kb" 475 default y
+1 -3
arch/powerpc/platforms/cell/spufs/inode.c
··· 71 { 72 struct spufs_inode_info *ei = p; 73 74 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 75 - inode_init_once(&ei->vfs_inode); 76 - } 77 } 78 79 static struct inode *
··· 71 { 72 struct spufs_inode_info *ei = p; 73 74 + inode_init_once(&ei->vfs_inode); 75 } 76 77 static struct inode *
-8
arch/v850/Kconfig
··· 240 config RESET_GUARD 241 bool "Reset Guard" 242 243 - config LARGE_ALLOCS 244 - bool "Allow allocating large blocks (> 1MB) of memory" 245 - help 246 - Allow the slab memory allocator to keep chains for very large 247 - memory sizes - upto 32MB. You may need this if your system has 248 - a lot of RAM, and you need to able to allocate very large 249 - contiguous chunks. If unsure, say N. 250 - 251 source "mm/Kconfig" 252 253 endmenu
··· 240 config RESET_GUARD 241 bool "Reset Guard" 242 243 source "mm/Kconfig" 244 245 endmenu
+4 -4
drivers/acpi/numa.c
··· 40 #define NID_INVAL -1 41 42 /* maps to convert between proximity domain and logical node ID */ 43 - int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS] 44 = { [0 ... MAX_PXM_DOMAINS - 1] = NID_INVAL }; 45 - int __cpuinitdata node_to_pxm_map[MAX_NUMNODES] 46 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; 47 48 - int __cpuinit pxm_to_node(int pxm) 49 { 50 if (pxm < 0) 51 return NID_INVAL; 52 return pxm_to_node_map[pxm]; 53 } 54 55 - int __cpuinit node_to_pxm(int node) 56 { 57 if (node < 0) 58 return PXM_INVAL;
··· 40 #define NID_INVAL -1 41 42 /* maps to convert between proximity domain and logical node ID */ 43 + static int pxm_to_node_map[MAX_PXM_DOMAINS] 44 = { [0 ... MAX_PXM_DOMAINS - 1] = NID_INVAL }; 45 + static int node_to_pxm_map[MAX_NUMNODES] 46 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; 47 48 + int pxm_to_node(int pxm) 49 { 50 if (pxm < 0) 51 return NID_INVAL; 52 return pxm_to_node_map[pxm]; 53 } 54 55 + int node_to_pxm(int node) 56 { 57 if (node < 0) 58 return PXM_INVAL;
-3
drivers/mtd/ubi/eba.c
··· 940 { 941 struct ltree_entry *le = obj; 942 943 - if (flags & SLAB_CTOR_CONSTRUCTOR) 944 - return; 945 - 946 le->users = 0; 947 init_rwsem(&le->mutex); 948 }
··· 940 { 941 struct ltree_entry *le = obj; 942 943 le->users = 0; 944 init_rwsem(&le->mutex); 945 }
+3 -3
drivers/rtc/Kconfig
··· 59 depends on RTC_CLASS 60 61 config RTC_INTF_SYSFS 62 - boolean "sysfs" 63 depends on RTC_CLASS && SYSFS 64 default RTC_CLASS 65 help ··· 70 will be called rtc-sysfs. 71 72 config RTC_INTF_PROC 73 - boolean "proc" 74 depends on RTC_CLASS && PROC_FS 75 default RTC_CLASS 76 help ··· 82 will be called rtc-proc. 83 84 config RTC_INTF_DEV 85 - boolean "dev" 86 depends on RTC_CLASS 87 default RTC_CLASS 88 help
··· 59 depends on RTC_CLASS 60 61 config RTC_INTF_SYSFS 62 + boolean "/sys/class/rtc/rtcN (sysfs)" 63 depends on RTC_CLASS && SYSFS 64 default RTC_CLASS 65 help ··· 70 will be called rtc-sysfs. 71 72 config RTC_INTF_PROC 73 + boolean "/proc/driver/rtc (procfs for rtc0)" 74 depends on RTC_CLASS && PROC_FS 75 default RTC_CLASS 76 help ··· 82 will be called rtc-proc. 83 84 config RTC_INTF_DEV 85 + boolean "/dev/rtcN (character devices)" 86 depends on RTC_CLASS 87 default RTC_CLASS 88 help
+2 -2
drivers/rtc/rtc-omap.c
··· 371 goto fail; 372 } 373 platform_set_drvdata(pdev, rtc); 374 - dev_set_devdata(&rtc->dev, mem); 375 376 /* clear pending irqs, and set 1/second periodic, 377 * which we'll use instead of update irqs ··· 453 free_irq(omap_rtc_timer, rtc); 454 free_irq(omap_rtc_alarm, rtc); 455 456 - release_resource(dev_get_devdata(&rtc->dev)); 457 rtc_device_unregister(rtc); 458 return 0; 459 }
··· 371 goto fail; 372 } 373 platform_set_drvdata(pdev, rtc); 374 + dev_set_drvdata(&rtc->dev, mem); 375 376 /* clear pending irqs, and set 1/second periodic, 377 * which we'll use instead of update irqs ··· 453 free_irq(omap_rtc_timer, rtc); 454 free_irq(omap_rtc_alarm, rtc); 455 456 + release_resource(dev_get_drvdata(&rtc->dev)); 457 rtc_device_unregister(rtc); 458 return 0; 459 }
+18 -3
drivers/serial/8250.c
··· 894 quot = serial_dl_read(up); 895 quot <<= 3; 896 897 - status1 = serial_in(up, 0x04); /* EXCR1 */ 898 status1 &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ 899 status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ 900 serial_outp(up, 0x04, status1); ··· 2617 */ 2618 void serial8250_resume_port(int line) 2619 { 2620 - uart_resume_port(&serial8250_reg, &serial8250_ports[line].port); 2621 } 2622 2623 /* ··· 2709 struct uart_8250_port *up = &serial8250_ports[i]; 2710 2711 if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev) 2712 - uart_resume_port(&serial8250_reg, &up->port); 2713 } 2714 2715 return 0;
··· 894 quot = serial_dl_read(up); 895 quot <<= 3; 896 897 + status1 = serial_in(up, 0x04); /* EXCR2 */ 898 status1 &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ 899 status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ 900 serial_outp(up, 0x04, status1); ··· 2617 */ 2618 void serial8250_resume_port(int line) 2619 { 2620 + struct uart_8250_port *up = &serial8250_ports[line]; 2621 + 2622 + if (up->capabilities & UART_NATSEMI) { 2623 + unsigned char tmp; 2624 + 2625 + /* Ensure it's still in high speed mode */ 2626 + serial_outp(up, UART_LCR, 0xE0); 2627 + 2628 + tmp = serial_in(up, 0x04); /* EXCR2 */ 2629 + tmp &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ 2630 + tmp |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ 2631 + serial_outp(up, 0x04, tmp); 2632 + 2633 + serial_outp(up, UART_LCR, 0); 2634 + } 2635 + uart_resume_port(&serial8250_reg, &up->port); 2636 } 2637 2638 /* ··· 2694 struct uart_8250_port *up = &serial8250_ports[i]; 2695 2696 if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev) 2697 + serial8250_resume_port(i); 2698 } 2699 2700 return 0;
+31 -24
drivers/serial/icom.c
··· 69 70 static const struct pci_device_id icom_pci_table[] = { 71 { 72 - .vendor = PCI_VENDOR_ID_IBM, 73 - .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1, 74 - .subvendor = PCI_ANY_ID, 75 - .subdevice = PCI_ANY_ID, 76 - .driver_data = ADAPTER_V1, 77 - }, 78 { 79 - .vendor = PCI_VENDOR_ID_IBM, 80 - .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, 81 - .subvendor = PCI_VENDOR_ID_IBM, 82 - .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX, 83 - .driver_data = ADAPTER_V2, 84 - }, 85 { 86 - .vendor = PCI_VENDOR_ID_IBM, 87 - .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, 88 - .subvendor = PCI_VENDOR_ID_IBM, 89 - .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM, 90 - .driver_data = ADAPTER_V2, 91 - }, 92 { 93 - .vendor = PCI_VENDOR_ID_IBM, 94 - .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, 95 - .subvendor = PCI_VENDOR_ID_IBM, 96 - .subdevice = PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL, 97 - .driver_data = ADAPTER_V2, 98 - }, 99 {} 100 }; 101
··· 69 70 static const struct pci_device_id icom_pci_table[] = { 71 { 72 + .vendor = PCI_VENDOR_ID_IBM, 73 + .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1, 74 + .subvendor = PCI_ANY_ID, 75 + .subdevice = PCI_ANY_ID, 76 + .driver_data = ADAPTER_V1, 77 + }, 78 { 79 + .vendor = PCI_VENDOR_ID_IBM, 80 + .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, 81 + .subvendor = PCI_VENDOR_ID_IBM, 82 + .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX, 83 + .driver_data = ADAPTER_V2, 84 + }, 85 { 86 + .vendor = PCI_VENDOR_ID_IBM, 87 + .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, 88 + .subvendor = PCI_VENDOR_ID_IBM, 89 + .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM, 90 + .driver_data = ADAPTER_V2, 91 + }, 92 { 93 + .vendor = PCI_VENDOR_ID_IBM, 94 + .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, 95 + .subvendor = PCI_VENDOR_ID_IBM, 96 + .subdevice = PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL, 97 + .driver_data = ADAPTER_V2, 98 + }, 99 + { 100 + .vendor = PCI_VENDOR_ID_IBM, 101 + .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2, 102 + .subvendor = PCI_VENDOR_ID_IBM, 103 + .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE, 104 + .driver_data = ADAPTER_V2, 105 + }, 106 {} 107 }; 108
+7 -2
drivers/video/console/vgacon.c
··· 368 #endif 369 } 370 371 /* VGA16 modes are not handled by VGACON */ 372 - if ((ORIG_VIDEO_MODE == 0x00) || /* SCREEN_INFO not initialized */ 373 - (ORIG_VIDEO_MODE == 0x0D) || /* 320x200/4 */ 374 (ORIG_VIDEO_MODE == 0x0E) || /* 640x200/4 */ 375 (ORIG_VIDEO_MODE == 0x10) || /* 640x350/4 */ 376 (ORIG_VIDEO_MODE == 0x12) || /* 640x480/4 */
··· 368 #endif 369 } 370 371 + /* SCREEN_INFO initialized? */ 372 + if ((ORIG_VIDEO_MODE == 0) && 373 + (ORIG_VIDEO_LINES == 0) && 374 + (ORIG_VIDEO_COLS == 0)) 375 + goto no_vga; 376 + 377 /* VGA16 modes are not handled by VGACON */ 378 + if ((ORIG_VIDEO_MODE == 0x0D) || /* 320x200/4 */ 379 (ORIG_VIDEO_MODE == 0x0E) || /* 640x200/4 */ 380 (ORIG_VIDEO_MODE == 0x10) || /* 640x350/4 */ 381 (ORIG_VIDEO_MODE == 0x12) || /* 640x480/4 */
+1 -2
fs/adfs/super.c
··· 232 { 233 struct adfs_inode_info *ei = (struct adfs_inode_info *) foo; 234 235 - if (flags & SLAB_CTOR_CONSTRUCTOR) 236 - inode_init_once(&ei->vfs_inode); 237 } 238 239 static int init_inodecache(void)
··· 232 { 233 struct adfs_inode_info *ei = (struct adfs_inode_info *) foo; 234 235 + inode_init_once(&ei->vfs_inode); 236 } 237 238 static int init_inodecache(void)
+3 -5
fs/affs/super.c
··· 87 { 88 struct affs_inode_info *ei = (struct affs_inode_info *) foo; 89 90 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 91 - init_MUTEX(&ei->i_link_lock); 92 - init_MUTEX(&ei->i_ext_lock); 93 - inode_init_once(&ei->vfs_inode); 94 - } 95 } 96 97 static int init_inodecache(void)
··· 87 { 88 struct affs_inode_info *ei = (struct affs_inode_info *) foo; 89 90 + init_MUTEX(&ei->i_link_lock); 91 + init_MUTEX(&ei->i_ext_lock); 92 + inode_init_once(&ei->vfs_inode); 93 } 94 95 static int init_inodecache(void)
+9 -11
fs/afs/super.c
··· 451 { 452 struct afs_vnode *vnode = _vnode; 453 454 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 455 - memset(vnode, 0, sizeof(*vnode)); 456 - inode_init_once(&vnode->vfs_inode); 457 - init_waitqueue_head(&vnode->update_waitq); 458 - mutex_init(&vnode->permits_lock); 459 - mutex_init(&vnode->validate_lock); 460 - spin_lock_init(&vnode->writeback_lock); 461 - spin_lock_init(&vnode->lock); 462 - INIT_LIST_HEAD(&vnode->writebacks); 463 - INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work); 464 - } 465 } 466 467 /*
··· 451 { 452 struct afs_vnode *vnode = _vnode; 453 454 + memset(vnode, 0, sizeof(*vnode)); 455 + inode_init_once(&vnode->vfs_inode); 456 + init_waitqueue_head(&vnode->update_waitq); 457 + mutex_init(&vnode->permits_lock); 458 + mutex_init(&vnode->validate_lock); 459 + spin_lock_init(&vnode->writeback_lock); 460 + spin_lock_init(&vnode->lock); 461 + INIT_LIST_HEAD(&vnode->writebacks); 462 + INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work); 463 } 464 465 /*
+2 -4
fs/befs/linuxvfs.c
··· 292 static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 293 { 294 struct befs_inode_info *bi = (struct befs_inode_info *) foo; 295 - 296 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 297 - inode_init_once(&bi->vfs_inode); 298 - } 299 } 300 301 static void
··· 292 static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 293 { 294 struct befs_inode_info *bi = (struct befs_inode_info *) foo; 295 + 296 + inode_init_once(&bi->vfs_inode); 297 } 298 299 static void
+1 -2
fs/bfs/inode.c
··· 248 { 249 struct bfs_inode_info *bi = foo; 250 251 - if (flags & SLAB_CTOR_CONSTRUCTOR) 252 - inode_init_once(&bi->vfs_inode); 253 } 254 255 static int init_inodecache(void)
··· 248 { 249 struct bfs_inode_info *bi = foo; 250 251 + inode_init_once(&bi->vfs_inode); 252 } 253 254 static int init_inodecache(void)
+7 -9
fs/block_dev.c
··· 458 struct bdev_inode *ei = (struct bdev_inode *) foo; 459 struct block_device *bdev = &ei->bdev; 460 461 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 462 - memset(bdev, 0, sizeof(*bdev)); 463 - mutex_init(&bdev->bd_mutex); 464 - sema_init(&bdev->bd_mount_sem, 1); 465 - INIT_LIST_HEAD(&bdev->bd_inodes); 466 - INIT_LIST_HEAD(&bdev->bd_list); 467 #ifdef CONFIG_SYSFS 468 - INIT_LIST_HEAD(&bdev->bd_holder_list); 469 #endif 470 - inode_init_once(&ei->vfs_inode); 471 - } 472 } 473 474 static inline void __bd_forget(struct inode *inode)
··· 458 struct bdev_inode *ei = (struct bdev_inode *) foo; 459 struct block_device *bdev = &ei->bdev; 460 461 + memset(bdev, 0, sizeof(*bdev)); 462 + mutex_init(&bdev->bd_mutex); 463 + sema_init(&bdev->bd_mount_sem, 1); 464 + INIT_LIST_HEAD(&bdev->bd_inodes); 465 + INIT_LIST_HEAD(&bdev->bd_list); 466 #ifdef CONFIG_SYSFS 467 + INIT_LIST_HEAD(&bdev->bd_holder_list); 468 #endif 469 + inode_init_once(&ei->vfs_inode); 470 } 471 472 static inline void __bd_forget(struct inode *inode)
+6 -19
fs/buffer.c
··· 981 struct page *page; 982 struct buffer_head *bh; 983 984 - page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); 985 if (!page) 986 return NULL; 987 ··· 2899 2900 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 2901 { 2902 - struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); 2903 if (ret) { 2904 get_cpu_var(bh_accounting).nr++; 2905 recalc_bh_state(); 2906 put_cpu_var(bh_accounting); ··· 2919 put_cpu_var(bh_accounting); 2920 } 2921 EXPORT_SYMBOL(free_buffer_head); 2922 - 2923 - static void 2924 - init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags) 2925 - { 2926 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 2927 - struct buffer_head * bh = (struct buffer_head *)data; 2928 - 2929 - memset(bh, 0, sizeof(*bh)); 2930 - INIT_LIST_HEAD(&bh->b_assoc_buffers); 2931 - } 2932 - } 2933 2934 static void buffer_exit_cpu(int cpu) 2935 { ··· 2946 { 2947 int nrpages; 2948 2949 - bh_cachep = kmem_cache_create("buffer_head", 2950 - sizeof(struct buffer_head), 0, 2951 - (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 2952 - SLAB_MEM_SPREAD), 2953 - init_buffer_head, 2954 - NULL); 2955 2956 /* 2957 * Limit the bh occupancy to 10% of ZONE_NORMAL
··· 981 struct page *page; 982 struct buffer_head *bh; 983 984 + page = find_or_create_page(inode->i_mapping, index, 985 + mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); 986 if (!page) 987 return NULL; 988 ··· 2898 2899 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 2900 { 2901 + struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); 2902 if (ret) { 2903 + INIT_LIST_HEAD(&ret->b_assoc_buffers); 2904 get_cpu_var(bh_accounting).nr++; 2905 recalc_bh_state(); 2906 put_cpu_var(bh_accounting); ··· 2917 put_cpu_var(bh_accounting); 2918 } 2919 EXPORT_SYMBOL(free_buffer_head); 2920 2921 static void buffer_exit_cpu(int cpu) 2922 { ··· 2955 { 2956 int nrpages; 2957 2958 + bh_cachep = KMEM_CACHE(buffer_head, 2959 + SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 2960 2961 /* 2962 * Limit the bh occupancy to 10% of ZONE_NORMAL
+2 -4
fs/cifs/cifsfs.c
··· 701 { 702 struct cifsInodeInfo *cifsi = inode; 703 704 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 705 - inode_init_once(&cifsi->vfs_inode); 706 - INIT_LIST_HEAD(&cifsi->lockList); 707 - } 708 } 709 710 static int
··· 701 { 702 struct cifsInodeInfo *cifsi = inode; 703 704 + inode_init_once(&cifsi->vfs_inode); 705 + INIT_LIST_HEAD(&cifsi->lockList); 706 } 707 708 static int
+1 -2
fs/coda/inode.c
··· 62 { 63 struct coda_inode_info *ei = (struct coda_inode_info *) foo; 64 65 - if (flags & SLAB_CTOR_CONSTRUCTOR) 66 - inode_init_once(&ei->vfs_inode); 67 } 68 69 int coda_init_inodecache(void)
··· 62 { 63 struct coda_inode_info *ei = (struct coda_inode_info *) foo; 64 65 + inode_init_once(&ei->vfs_inode); 66 } 67 68 int coda_init_inodecache(void)
+4 -9
fs/compat.c
··· 2230 asmlinkage long compat_sys_timerfd(int ufd, int clockid, int flags, 2231 const struct compat_itimerspec __user *utmr) 2232 { 2233 - long res; 2234 struct itimerspec t; 2235 struct itimerspec __user *ut; 2236 2237 - res = -EFAULT; 2238 if (get_compat_itimerspec(&t, utmr)) 2239 - goto err_exit; 2240 ut = compat_alloc_user_space(sizeof(*ut)); 2241 - if (copy_to_user(ut, &t, sizeof(t)) ) 2242 - goto err_exit; 2243 2244 - res = sys_timerfd(ufd, clockid, flags, ut); 2245 - err_exit: 2246 - return res; 2247 } 2248 2249 #endif /* CONFIG_TIMERFD */ 2250 -
··· 2230 asmlinkage long compat_sys_timerfd(int ufd, int clockid, int flags, 2231 const struct compat_itimerspec __user *utmr) 2232 { 2233 struct itimerspec t; 2234 struct itimerspec __user *ut; 2235 2236 if (get_compat_itimerspec(&t, utmr)) 2237 + return -EFAULT; 2238 ut = compat_alloc_user_space(sizeof(*ut)); 2239 + if (copy_to_user(ut, &t, sizeof(t))) 2240 + return -EFAULT; 2241 2242 + return sys_timerfd(ufd, clockid, flags, ut); 2243 } 2244 2245 #endif /* CONFIG_TIMERFD */
+1 -1
fs/dquot.c
··· 1421 /* If quota was reenabled in the meantime, we have 1422 * nothing to do */ 1423 if (!sb_has_quota_enabled(sb, cnt)) { 1424 - mutex_lock(&toputinode[cnt]->i_mutex); 1425 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | 1426 S_NOATIME | S_NOQUOTA); 1427 truncate_inode_pages(&toputinode[cnt]->i_data, 0);
··· 1421 /* If quota was reenabled in the meantime, we have 1422 * nothing to do */ 1423 if (!sb_has_quota_enabled(sb, cnt)) { 1424 + mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA); 1425 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | 1426 S_NOATIME | S_NOQUOTA); 1427 truncate_inode_pages(&toputinode[cnt]->i_data, 0);
+1 -2
fs/ecryptfs/main.c
··· 583 { 584 struct ecryptfs_inode_info *ei = (struct ecryptfs_inode_info *)vptr; 585 586 - if (flags & SLAB_CTOR_CONSTRUCTOR) 587 - inode_init_once(&ei->vfs_inode); 588 } 589 590 static struct ecryptfs_cache_info {
··· 583 { 584 struct ecryptfs_inode_info *ei = (struct ecryptfs_inode_info *)vptr; 585 586 + inode_init_once(&ei->vfs_inode); 587 } 588 589 static struct ecryptfs_cache_info {
+3 -11
fs/ecryptfs/mmap.c
··· 364 { 365 struct inode *inode = page->mapping->host; 366 int end_byte_in_page; 367 - char *page_virt; 368 369 if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index) 370 goto out; 371 end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE; 372 if (to > end_byte_in_page) 373 end_byte_in_page = to; 374 - page_virt = kmap_atomic(page, KM_USER0); 375 - memset((page_virt + end_byte_in_page), 0, 376 - (PAGE_CACHE_SIZE - end_byte_in_page)); 377 - kunmap_atomic(page_virt, KM_USER0); 378 - flush_dcache_page(page); 379 out: 380 return 0; 381 } ··· 736 { 737 int rc = 0; 738 struct page *tmp_page; 739 - char *tmp_page_virt; 740 741 tmp_page = ecryptfs_get1page(file, index); 742 if (IS_ERR(tmp_page)) { ··· 752 page_cache_release(tmp_page); 753 goto out; 754 } 755 - tmp_page_virt = kmap_atomic(tmp_page, KM_USER0); 756 - memset(((char *)tmp_page_virt + start), 0, num_zeros); 757 - kunmap_atomic(tmp_page_virt, KM_USER0); 758 - flush_dcache_page(tmp_page); 759 rc = ecryptfs_commit_write(file, tmp_page, start, start + num_zeros); 760 if (rc < 0) { 761 ecryptfs_printk(KERN_ERR, "Error attempting to write zero's "
··· 364 { 365 struct inode *inode = page->mapping->host; 366 int end_byte_in_page; 367 368 if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index) 369 goto out; 370 end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE; 371 if (to > end_byte_in_page) 372 end_byte_in_page = to; 373 + zero_user_page(page, end_byte_in_page, 374 + PAGE_CACHE_SIZE - end_byte_in_page, KM_USER0); 375 out: 376 return 0; 377 } ··· 740 { 741 int rc = 0; 742 struct page *tmp_page; 743 744 tmp_page = ecryptfs_get1page(file, index); 745 if (IS_ERR(tmp_page)) { ··· 757 page_cache_release(tmp_page); 758 goto out; 759 } 760 + zero_user_page(tmp_page, start, num_zeros, KM_USER0); 761 rc = ecryptfs_commit_write(file, tmp_page, start, start + num_zeros); 762 if (rc < 0) { 763 ecryptfs_printk(KERN_ERR, "Error attempting to write zero's "
+1 -2
fs/efs/super.c
··· 72 { 73 struct efs_inode_info *ei = (struct efs_inode_info *) foo; 74 75 - if (flags & SLAB_CTOR_CONSTRUCTOR) 76 - inode_init_once(&ei->vfs_inode); 77 } 78 79 static int init_inodecache(void)
··· 72 { 73 struct efs_inode_info *ei = (struct efs_inode_info *) foo; 74 75 + inode_init_once(&ei->vfs_inode); 76 } 77 78 static int init_inodecache(void)
+1 -3
fs/exec.c
··· 60 #endif 61 62 int core_uses_pid; 63 - char core_pattern[128] = "core"; 64 int suid_dumpable = 0; 65 66 EXPORT_SYMBOL(suid_dumpable); ··· 1263 } 1264 1265 EXPORT_SYMBOL(set_binfmt); 1266 - 1267 - #define CORENAME_MAX_SIZE 64 1268 1269 /* format_corename will inspect the pattern parameter, and output a 1270 * name into corename, which must have space for at least
··· 60 #endif 61 62 int core_uses_pid; 63 + char core_pattern[CORENAME_MAX_SIZE] = "core"; 64 int suid_dumpable = 0; 65 66 EXPORT_SYMBOL(suid_dumpable); ··· 1263 } 1264 1265 EXPORT_SYMBOL(set_binfmt); 1266 1267 /* format_corename will inspect the pattern parameter, and output a 1268 * name into corename, which must have space for at least
+3 -5
fs/ext2/super.c
··· 160 { 161 struct ext2_inode_info *ei = (struct ext2_inode_info *) foo; 162 163 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 164 - rwlock_init(&ei->i_meta_lock); 165 #ifdef CONFIG_EXT2_FS_XATTR 166 - init_rwsem(&ei->xattr_sem); 167 #endif 168 - inode_init_once(&ei->vfs_inode); 169 - } 170 } 171 172 static int init_inodecache(void)
··· 160 { 161 struct ext2_inode_info *ei = (struct ext2_inode_info *) foo; 162 163 + rwlock_init(&ei->i_meta_lock); 164 #ifdef CONFIG_EXT2_FS_XATTR 165 + init_rwsem(&ei->xattr_sem); 166 #endif 167 + inode_init_once(&ei->vfs_inode); 168 } 169 170 static int init_inodecache(void)
+4 -6
fs/ext3/super.c
··· 466 { 467 struct ext3_inode_info *ei = (struct ext3_inode_info *) foo; 468 469 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 470 - INIT_LIST_HEAD(&ei->i_orphan); 471 #ifdef CONFIG_EXT3_FS_XATTR 472 - init_rwsem(&ei->xattr_sem); 473 #endif 474 - mutex_init(&ei->truncate_mutex); 475 - inode_init_once(&ei->vfs_inode); 476 - } 477 } 478 479 static int init_inodecache(void)
··· 466 { 467 struct ext3_inode_info *ei = (struct ext3_inode_info *) foo; 468 469 + INIT_LIST_HEAD(&ei->i_orphan); 470 #ifdef CONFIG_EXT3_FS_XATTR 471 + init_rwsem(&ei->xattr_sem); 472 #endif 473 + mutex_init(&ei->truncate_mutex); 474 + inode_init_once(&ei->vfs_inode); 475 } 476 477 static int init_inodecache(void)
+4 -6
fs/ext4/super.c
··· 517 { 518 struct ext4_inode_info *ei = (struct ext4_inode_info *) foo; 519 520 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 521 - INIT_LIST_HEAD(&ei->i_orphan); 522 #ifdef CONFIG_EXT4DEV_FS_XATTR 523 - init_rwsem(&ei->xattr_sem); 524 #endif 525 - mutex_init(&ei->truncate_mutex); 526 - inode_init_once(&ei->vfs_inode); 527 - } 528 } 529 530 static int init_inodecache(void)
··· 517 { 518 struct ext4_inode_info *ei = (struct ext4_inode_info *) foo; 519 520 + INIT_LIST_HEAD(&ei->i_orphan); 521 #ifdef CONFIG_EXT4DEV_FS_XATTR 522 + init_rwsem(&ei->xattr_sem); 523 #endif 524 + mutex_init(&ei->truncate_mutex); 525 + inode_init_once(&ei->vfs_inode); 526 } 527 528 static int init_inodecache(void)
+1 -2
fs/fat/cache.c
··· 40 { 41 struct fat_cache *cache = (struct fat_cache *)foo; 42 43 - if (flags & SLAB_CTOR_CONSTRUCTOR) 44 - INIT_LIST_HEAD(&cache->cache_list); 45 } 46 47 int __init fat_cache_init(void)
··· 40 { 41 struct fat_cache *cache = (struct fat_cache *)foo; 42 43 + INIT_LIST_HEAD(&cache->cache_list); 44 } 45 46 int __init fat_cache_init(void)
+6 -8
fs/fat/inode.c
··· 500 { 501 struct msdos_inode_info *ei = (struct msdos_inode_info *)foo; 502 503 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 504 - spin_lock_init(&ei->cache_lru_lock); 505 - ei->nr_caches = 0; 506 - ei->cache_valid_id = FAT_CACHE_VALID + 1; 507 - INIT_LIST_HEAD(&ei->cache_lru); 508 - INIT_HLIST_NODE(&ei->i_fat_hash); 509 - inode_init_once(&ei->vfs_inode); 510 - } 511 } 512 513 static int __init fat_init_inodecache(void)
··· 500 { 501 struct msdos_inode_info *ei = (struct msdos_inode_info *)foo; 502 503 + spin_lock_init(&ei->cache_lru_lock); 504 + ei->nr_caches = 0; 505 + ei->cache_valid_id = FAT_CACHE_VALID + 1; 506 + INIT_LIST_HEAD(&ei->cache_lru); 507 + INIT_HLIST_NODE(&ei->i_fat_hash); 508 + inode_init_once(&ei->vfs_inode); 509 } 510 511 static int __init fat_init_inodecache(void)
+1 -2
fs/fuse/inode.c
··· 687 { 688 struct inode * inode = foo; 689 690 - if (flags & SLAB_CTOR_CONSTRUCTOR) 691 - inode_init_once(inode); 692 } 693 694 static int __init fuse_fs_init(void)
··· 687 { 688 struct inode * inode = foo; 689 690 + inode_init_once(inode); 691 } 692 693 static int __init fuse_fs_init(void)
+16 -18
fs/gfs2/main.c
··· 27 static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned long flags) 28 { 29 struct gfs2_inode *ip = foo; 30 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 31 - inode_init_once(&ip->i_inode); 32 - spin_lock_init(&ip->i_spin); 33 - init_rwsem(&ip->i_rw_mutex); 34 - memset(ip->i_cache, 0, sizeof(ip->i_cache)); 35 - } 36 } 37 38 static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags) 39 { 40 struct gfs2_glock *gl = foo; 41 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 42 - INIT_HLIST_NODE(&gl->gl_list); 43 - spin_lock_init(&gl->gl_spin); 44 - INIT_LIST_HEAD(&gl->gl_holders); 45 - INIT_LIST_HEAD(&gl->gl_waiters1); 46 - INIT_LIST_HEAD(&gl->gl_waiters3); 47 - gl->gl_lvb = NULL; 48 - atomic_set(&gl->gl_lvb_count, 0); 49 - INIT_LIST_HEAD(&gl->gl_reclaim); 50 - INIT_LIST_HEAD(&gl->gl_ail_list); 51 - atomic_set(&gl->gl_ail_count, 0); 52 - } 53 } 54 55 /**
··· 27 static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned long flags) 28 { 29 struct gfs2_inode *ip = foo; 30 + 31 + inode_init_once(&ip->i_inode); 32 + spin_lock_init(&ip->i_spin); 33 + init_rwsem(&ip->i_rw_mutex); 34 + memset(ip->i_cache, 0, sizeof(ip->i_cache)); 35 } 36 37 static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags) 38 { 39 struct gfs2_glock *gl = foo; 40 + 41 + INIT_HLIST_NODE(&gl->gl_list); 42 + spin_lock_init(&gl->gl_spin); 43 + INIT_LIST_HEAD(&gl->gl_holders); 44 + INIT_LIST_HEAD(&gl->gl_waiters1); 45 + INIT_LIST_HEAD(&gl->gl_waiters3); 46 + gl->gl_lvb = NULL; 47 + atomic_set(&gl->gl_lvb_count, 0); 48 + INIT_LIST_HEAD(&gl->gl_reclaim); 49 + INIT_LIST_HEAD(&gl->gl_ail_list); 50 + atomic_set(&gl->gl_ail_count, 0); 51 } 52 53 /**
+1 -2
fs/hfs/super.c
··· 434 { 435 struct hfs_inode_info *i = p; 436 437 - if (flags & SLAB_CTOR_CONSTRUCTOR) 438 - inode_init_once(&i->vfs_inode); 439 } 440 441 static int __init init_hfs_fs(void)
··· 434 { 435 struct hfs_inode_info *i = p; 436 437 + inode_init_once(&i->vfs_inode); 438 } 439 440 static int __init init_hfs_fs(void)
+1 -2
fs/hfsplus/super.c
··· 470 { 471 struct hfsplus_inode_info *i = p; 472 473 - if (flags & SLAB_CTOR_CONSTRUCTOR) 474 - inode_init_once(&i->vfs_inode); 475 } 476 477 static int __init init_hfsplus_fs(void)
··· 470 { 471 struct hfsplus_inode_info *i = p; 472 473 + inode_init_once(&i->vfs_inode); 474 } 475 476 static int __init init_hfsplus_fs(void)
+3 -5
fs/hpfs/super.c
··· 176 { 177 struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo; 178 179 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 180 - mutex_init(&ei->i_mutex); 181 - mutex_init(&ei->i_parent_mutex); 182 - inode_init_once(&ei->vfs_inode); 183 - } 184 } 185 186 static int init_inodecache(void)
··· 176 { 177 struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo; 178 179 + mutex_init(&ei->i_mutex); 180 + mutex_init(&ei->i_parent_mutex); 181 + inode_init_once(&ei->vfs_inode); 182 } 183 184 static int init_inodecache(void)
+1 -2
fs/hugetlbfs/inode.c
··· 556 { 557 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; 558 559 - if (flags & SLAB_CTOR_CONSTRUCTOR) 560 - inode_init_once(&ei->vfs_inode); 561 } 562 563 const struct file_operations hugetlbfs_file_operations = {
··· 556 { 557 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; 558 559 + inode_init_once(&ei->vfs_inode); 560 } 561 562 const struct file_operations hugetlbfs_file_operations = {
+1 -2
fs/inode.c
··· 213 { 214 struct inode * inode = (struct inode *) foo; 215 216 - if (flags & SLAB_CTOR_CONSTRUCTOR) 217 - inode_init_once(inode); 218 } 219 220 /*
··· 213 { 214 struct inode * inode = (struct inode *) foo; 215 216 + inode_init_once(inode); 217 } 218 219 /*
+1 -2
fs/isofs/inode.c
··· 77 { 78 struct iso_inode_info *ei = foo; 79 80 - if (flags & SLAB_CTOR_CONSTRUCTOR) 81 - inode_init_once(&ei->vfs_inode); 82 } 83 84 static int init_inodecache(void)
··· 77 { 78 struct iso_inode_info *ei = foo; 79 80 + inode_init_once(&ei->vfs_inode); 81 } 82 83 static int init_inodecache(void)
+2 -4
fs/jffs2/super.c
··· 47 { 48 struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo; 49 50 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 51 - init_MUTEX(&ei->sem); 52 - inode_init_once(&ei->vfs_inode); 53 - } 54 } 55 56 static int jffs2_sync_fs(struct super_block *sb, int wait)
··· 47 { 48 struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo; 49 50 + init_MUTEX(&ei->sem); 51 + inode_init_once(&ei->vfs_inode); 52 } 53 54 static int jffs2_sync_fs(struct super_block *sb, int wait)
+8 -10
fs/jfs/jfs_metapage.c
··· 184 { 185 struct metapage *mp = (struct metapage *)foo; 186 187 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 188 - mp->lid = 0; 189 - mp->lsn = 0; 190 - mp->flag = 0; 191 - mp->data = NULL; 192 - mp->clsn = 0; 193 - mp->log = NULL; 194 - set_bit(META_free, &mp->flag); 195 - init_waitqueue_head(&mp->wait); 196 - } 197 } 198 199 static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
··· 184 { 185 struct metapage *mp = (struct metapage *)foo; 186 187 + mp->lid = 0; 188 + mp->lsn = 0; 189 + mp->flag = 0; 190 + mp->data = NULL; 191 + mp->clsn = 0; 192 + mp->log = NULL; 193 + set_bit(META_free, &mp->flag); 194 + init_waitqueue_head(&mp->wait); 195 } 196 197 static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
+10 -12
fs/jfs/super.c
··· 752 { 753 struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo; 754 755 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 756 - memset(jfs_ip, 0, sizeof(struct jfs_inode_info)); 757 - INIT_LIST_HEAD(&jfs_ip->anon_inode_list); 758 - init_rwsem(&jfs_ip->rdwrlock); 759 - mutex_init(&jfs_ip->commit_mutex); 760 - init_rwsem(&jfs_ip->xattr_sem); 761 - spin_lock_init(&jfs_ip->ag_lock); 762 - jfs_ip->active_ag = -1; 763 #ifdef CONFIG_JFS_POSIX_ACL 764 - jfs_ip->i_acl = JFS_ACL_NOT_CACHED; 765 - jfs_ip->i_default_acl = JFS_ACL_NOT_CACHED; 766 #endif 767 - inode_init_once(&jfs_ip->vfs_inode); 768 - } 769 } 770 771 static int __init init_jfs_fs(void)
··· 752 { 753 struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo; 754 755 + memset(jfs_ip, 0, sizeof(struct jfs_inode_info)); 756 + INIT_LIST_HEAD(&jfs_ip->anon_inode_list); 757 + init_rwsem(&jfs_ip->rdwrlock); 758 + mutex_init(&jfs_ip->commit_mutex); 759 + init_rwsem(&jfs_ip->xattr_sem); 760 + spin_lock_init(&jfs_ip->ag_lock); 761 + jfs_ip->active_ag = -1; 762 #ifdef CONFIG_JFS_POSIX_ACL 763 + jfs_ip->i_acl = JFS_ACL_NOT_CACHED; 764 + jfs_ip->i_default_acl = JFS_ACL_NOT_CACHED; 765 #endif 766 + inode_init_once(&jfs_ip->vfs_inode); 767 } 768 769 static int __init init_jfs_fs(void)
-3
fs/locks.c
··· 203 { 204 struct file_lock *lock = (struct file_lock *) foo; 205 206 - if (!(flags & SLAB_CTOR_CONSTRUCTOR)) 207 - return; 208 - 209 locks_init_lock(lock); 210 } 211
··· 203 { 204 struct file_lock *lock = (struct file_lock *) foo; 205 206 locks_init_lock(lock); 207 } 208
+1 -2
fs/minix/inode.c
··· 73 { 74 struct minix_inode_info *ei = (struct minix_inode_info *) foo; 75 76 - if (flags & SLAB_CTOR_CONSTRUCTOR) 77 - inode_init_once(&ei->vfs_inode); 78 } 79 80 static int init_inodecache(void)
··· 73 { 74 struct minix_inode_info *ei = (struct minix_inode_info *) foo; 75 76 + inode_init_once(&ei->vfs_inode); 77 } 78 79 static int init_inodecache(void)
+2 -4
fs/ncpfs/inode.c
··· 60 { 61 struct ncp_inode_info *ei = (struct ncp_inode_info *) foo; 62 63 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 64 - mutex_init(&ei->open_mutex); 65 - inode_init_once(&ei->vfs_inode); 66 - } 67 } 68 69 static int init_inodecache(void)
··· 60 { 61 struct ncp_inode_info *ei = (struct ncp_inode_info *) foo; 62 63 + mutex_init(&ei->open_mutex); 64 + inode_init_once(&ei->vfs_inode); 65 } 66 67 static int init_inodecache(void)
+13 -15
fs/nfs/inode.c
··· 1164 { 1165 struct nfs_inode *nfsi = (struct nfs_inode *) foo; 1166 1167 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 1168 - inode_init_once(&nfsi->vfs_inode); 1169 - spin_lock_init(&nfsi->req_lock); 1170 - INIT_LIST_HEAD(&nfsi->dirty); 1171 - INIT_LIST_HEAD(&nfsi->commit); 1172 - INIT_LIST_HEAD(&nfsi->open_files); 1173 - INIT_LIST_HEAD(&nfsi->access_cache_entry_lru); 1174 - INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); 1175 - INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC); 1176 - atomic_set(&nfsi->data_updates, 0); 1177 - nfsi->ndirty = 0; 1178 - nfsi->ncommit = 0; 1179 - nfsi->npages = 0; 1180 - nfs4_init_once(nfsi); 1181 - } 1182 } 1183 1184 static int __init nfs_init_inodecache(void)
··· 1164 { 1165 struct nfs_inode *nfsi = (struct nfs_inode *) foo; 1166 1167 + inode_init_once(&nfsi->vfs_inode); 1168 + spin_lock_init(&nfsi->req_lock); 1169 + INIT_LIST_HEAD(&nfsi->dirty); 1170 + INIT_LIST_HEAD(&nfsi->commit); 1171 + INIT_LIST_HEAD(&nfsi->open_files); 1172 + INIT_LIST_HEAD(&nfsi->access_cache_entry_lru); 1173 + INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); 1174 + INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC); 1175 + atomic_set(&nfsi->data_updates, 0); 1176 + nfsi->ndirty = 0; 1177 + nfsi->ncommit = 0; 1178 + nfsi->npages = 0; 1179 + nfs4_init_once(nfsi); 1180 } 1181 1182 static int __init nfs_init_inodecache(void)
+1 -2
fs/ntfs/super.c
··· 3085 { 3086 ntfs_inode *ni = (ntfs_inode *)foo; 3087 3088 - if (flags & SLAB_CTOR_CONSTRUCTOR) 3089 - inode_init_once(VFS_I(ni)); 3090 } 3091 3092 /*
··· 3085 { 3086 ntfs_inode *ni = (ntfs_inode *)foo; 3087 3088 + inode_init_once(VFS_I(ni)); 3089 } 3090 3091 /*
+3 -5
fs/ocfs2/dlm/dlmfs.c
··· 262 struct dlmfs_inode_private *ip = 263 (struct dlmfs_inode_private *) foo; 264 265 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 266 - ip->ip_dlm = NULL; 267 - ip->ip_parent = NULL; 268 269 - inode_init_once(&ip->ip_vfs_inode); 270 - } 271 } 272 273 static struct inode *dlmfs_alloc_inode(struct super_block *sb)
··· 262 struct dlmfs_inode_private *ip = 263 (struct dlmfs_inode_private *) foo; 264 265 + ip->ip_dlm = NULL; 266 + ip->ip_parent = NULL; 267 268 + inode_init_once(&ip->ip_vfs_inode); 269 } 270 271 static struct inode *dlmfs_alloc_inode(struct super_block *sb)
+18 -20
fs/ocfs2/super.c
··· 937 { 938 struct ocfs2_inode_info *oi = data; 939 940 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 941 - oi->ip_flags = 0; 942 - oi->ip_open_count = 0; 943 - spin_lock_init(&oi->ip_lock); 944 - ocfs2_extent_map_init(&oi->vfs_inode); 945 - INIT_LIST_HEAD(&oi->ip_io_markers); 946 - oi->ip_created_trans = 0; 947 - oi->ip_last_trans = 0; 948 - oi->ip_dir_start_lookup = 0; 949 950 - init_rwsem(&oi->ip_alloc_sem); 951 - mutex_init(&oi->ip_io_mutex); 952 953 - oi->ip_blkno = 0ULL; 954 - oi->ip_clusters = 0; 955 956 - ocfs2_lock_res_init_once(&oi->ip_rw_lockres); 957 - ocfs2_lock_res_init_once(&oi->ip_meta_lockres); 958 - ocfs2_lock_res_init_once(&oi->ip_data_lockres); 959 - ocfs2_lock_res_init_once(&oi->ip_open_lockres); 960 961 - ocfs2_metadata_cache_init(&oi->vfs_inode); 962 963 - inode_init_once(&oi->vfs_inode); 964 - } 965 } 966 967 static int ocfs2_initialize_mem_caches(void)
··· 937 { 938 struct ocfs2_inode_info *oi = data; 939 940 + oi->ip_flags = 0; 941 + oi->ip_open_count = 0; 942 + spin_lock_init(&oi->ip_lock); 943 + ocfs2_extent_map_init(&oi->vfs_inode); 944 + INIT_LIST_HEAD(&oi->ip_io_markers); 945 + oi->ip_created_trans = 0; 946 + oi->ip_last_trans = 0; 947 + oi->ip_dir_start_lookup = 0; 948 949 + init_rwsem(&oi->ip_alloc_sem); 950 + mutex_init(&oi->ip_io_mutex); 951 952 + oi->ip_blkno = 0ULL; 953 + oi->ip_clusters = 0; 954 955 + ocfs2_lock_res_init_once(&oi->ip_rw_lockres); 956 + ocfs2_lock_res_init_once(&oi->ip_meta_lockres); 957 + ocfs2_lock_res_init_once(&oi->ip_data_lockres); 958 + ocfs2_lock_res_init_once(&oi->ip_open_lockres); 959 960 + ocfs2_metadata_cache_init(&oi->vfs_inode); 961 962 + inode_init_once(&oi->vfs_inode); 963 } 964 965 static int ocfs2_initialize_mem_caches(void)
+1 -2
fs/openpromfs/inode.c
··· 419 { 420 struct op_inode_info *oi = (struct op_inode_info *) data; 421 422 - if (flags & SLAB_CTOR_CONSTRUCTOR) 423 - inode_init_once(&oi->vfs_inode); 424 } 425 426 static int __init init_openprom_fs(void)
··· 419 { 420 struct op_inode_info *oi = (struct op_inode_info *) data; 421 422 + inode_init_once(&oi->vfs_inode); 423 } 424 425 static int __init init_openprom_fs(void)
+1 -2
fs/proc/inode.c
··· 109 { 110 struct proc_inode *ei = (struct proc_inode *) foo; 111 112 - if (flags & SLAB_CTOR_CONSTRUCTOR) 113 - inode_init_once(&ei->vfs_inode); 114 } 115 116 int __init proc_init_inodecache(void)
··· 109 { 110 struct proc_inode *ei = (struct proc_inode *) foo; 111 112 + inode_init_once(&ei->vfs_inode); 113 } 114 115 int __init proc_init_inodecache(void)
+1 -2
fs/qnx4/inode.c
··· 536 { 537 struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo; 538 539 - if (flags & SLAB_CTOR_CONSTRUCTOR) 540 - inode_init_once(&ei->vfs_inode); 541 } 542 543 static int init_inodecache(void)
··· 536 { 537 struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo; 538 539 + inode_init_once(&ei->vfs_inode); 540 } 541 542 static int init_inodecache(void)
+7 -16
fs/quota.c
··· 157 static void quota_sync_sb(struct super_block *sb, int type) 158 { 159 int cnt; 160 - struct inode *discard[MAXQUOTAS]; 161 162 sb->s_qcop->quota_sync(sb, type); 163 /* This is not very clever (and fast) but currently I don't know about ··· 166 sb->s_op->sync_fs(sb, 1); 167 sync_blockdev(sb->s_bdev); 168 169 - /* Now when everything is written we can discard the pagecache so 170 - * that userspace sees the changes. We need i_mutex and so we could 171 - * not do it inside dqonoff_mutex. Moreover we need to be carefull 172 - * about races with quotaoff() (that is the reason why we have own 173 - * reference to inode). */ 174 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); 175 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 176 - discard[cnt] = NULL; 177 if (type != -1 && cnt != type) 178 continue; 179 if (!sb_has_quota_enabled(sb, cnt)) 180 continue; 181 - discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]); 182 } 183 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 184 - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 185 - if (discard[cnt]) { 186 - mutex_lock(&discard[cnt]->i_mutex); 187 - truncate_inode_pages(&discard[cnt]->i_data, 0); 188 - mutex_unlock(&discard[cnt]->i_mutex); 189 - iput(discard[cnt]); 190 - } 191 - } 192 } 193 194 void sync_dquots(struct super_block *sb, int type)
··· 157 static void quota_sync_sb(struct super_block *sb, int type) 158 { 159 int cnt; 160 161 sb->s_qcop->quota_sync(sb, type); 162 /* This is not very clever (and fast) but currently I don't know about ··· 167 sb->s_op->sync_fs(sb, 1); 168 sync_blockdev(sb->s_bdev); 169 170 + /* 171 + * Now when everything is written we can discard the pagecache so 172 + * that userspace sees the changes. 173 + */ 174 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); 175 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 176 if (type != -1 && cnt != type) 177 continue; 178 if (!sb_has_quota_enabled(sb, cnt)) 179 continue; 180 + mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA); 181 + truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0); 182 + mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex); 183 } 184 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 185 } 186 187 void sync_dquots(struct super_block *sb, int type)
+4 -6
fs/reiserfs/super.c
··· 511 { 512 struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo; 513 514 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 515 - INIT_LIST_HEAD(&ei->i_prealloc_list); 516 - inode_init_once(&ei->vfs_inode); 517 #ifdef CONFIG_REISERFS_FS_POSIX_ACL 518 - ei->i_acl_access = NULL; 519 - ei->i_acl_default = NULL; 520 #endif 521 - } 522 } 523 524 static int init_inodecache(void)
··· 511 { 512 struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo; 513 514 + INIT_LIST_HEAD(&ei->i_prealloc_list); 515 + inode_init_once(&ei->vfs_inode); 516 #ifdef CONFIG_REISERFS_FS_POSIX_ACL 517 + ei->i_acl_access = NULL; 518 + ei->i_acl_default = NULL; 519 #endif 520 } 521 522 static int init_inodecache(void)
+3 -4
fs/romfs/inode.c
··· 566 kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode)); 567 } 568 569 - static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 570 { 571 - struct romfs_inode_info *ei = (struct romfs_inode_info *) foo; 572 573 - if (flags & SLAB_CTOR_CONSTRUCTOR) 574 - inode_init_once(&ei->vfs_inode); 575 } 576 577 static int init_inodecache(void)
··· 566 kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode)); 567 } 568 569 + static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) 570 { 571 + struct romfs_inode_info *ei = foo; 572 573 + inode_init_once(&ei->vfs_inode); 574 } 575 576 static int init_inodecache(void)
+1 -2
fs/smbfs/inode.c
··· 70 { 71 struct smb_inode_info *ei = (struct smb_inode_info *) foo; 72 73 - if (flags & SLAB_CTOR_CONSTRUCTOR) 74 - inode_init_once(&ei->vfs_inode); 75 } 76 77 static int init_inodecache(void)
··· 70 { 71 struct smb_inode_info *ei = (struct smb_inode_info *) foo; 72 73 + inode_init_once(&ei->vfs_inode); 74 } 75 76 static int init_inodecache(void)
+1 -2
fs/sysv/inode.c
··· 322 { 323 struct sysv_inode_info *si = (struct sysv_inode_info *)p; 324 325 - if (flags & SLAB_CTOR_CONSTRUCTOR) 326 - inode_init_once(&si->vfs_inode); 327 } 328 329 const struct super_operations sysv_sops = {
··· 322 { 323 struct sysv_inode_info *si = (struct sysv_inode_info *)p; 324 325 + inode_init_once(&si->vfs_inode); 326 } 327 328 const struct super_operations sysv_sops = {
+2 -4
fs/udf/super.c
··· 134 { 135 struct udf_inode_info *ei = (struct udf_inode_info *) foo; 136 137 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 138 - ei->i_ext.i_data = NULL; 139 - inode_init_once(&ei->vfs_inode); 140 - } 141 } 142 143 static int init_inodecache(void)
··· 134 { 135 struct udf_inode_info *ei = (struct udf_inode_info *) foo; 136 137 + ei->i_ext.i_data = NULL; 138 + inode_init_once(&ei->vfs_inode); 139 } 140 141 static int init_inodecache(void)
+1 -2
fs/ufs/super.c
··· 1237 { 1238 struct ufs_inode_info *ei = (struct ufs_inode_info *) foo; 1239 1240 - if (flags & SLAB_CTOR_CONSTRUCTOR) 1241 - inode_init_once(&ei->vfs_inode); 1242 } 1243 1244 static int init_inodecache(void)
··· 1237 { 1238 struct ufs_inode_info *ei = (struct ufs_inode_info *) foo; 1239 1240 + inode_init_once(&ei->vfs_inode); 1241 } 1242 1243 static int init_inodecache(void)
+1 -2
fs/xfs/linux-2.6/xfs_super.c
··· 360 kmem_zone_t *zonep, 361 unsigned long flags) 362 { 363 - if (flags & SLAB_CTOR_CONSTRUCTOR) 364 - inode_init_once(vn_to_inode((bhv_vnode_t *)vnode)); 365 } 366 367 STATIC int
··· 360 kmem_zone_t *zonep, 361 unsigned long flags) 362 { 363 + inode_init_once(vn_to_inode((bhv_vnode_t *)vnode)); 364 } 365 366 STATIC int
+2 -5
include/acpi/acpi_numa.h
··· 11 #define MAX_PXM_DOMAINS (256) /* Old pxm spec is defined 8 bit */ 12 #endif 13 14 - extern int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS]; 15 - extern int __cpuinitdata node_to_pxm_map[MAX_NUMNODES]; 16 - 17 - extern int __cpuinit pxm_to_node(int); 18 - extern int __cpuinit node_to_pxm(int); 19 extern int __cpuinit acpi_map_pxm_to_node(int); 20 extern void __cpuinit acpi_unmap_pxm_to_node(int); 21
··· 11 #define MAX_PXM_DOMAINS (256) /* Old pxm spec is defined 8 bit */ 12 #endif 13 14 + extern int pxm_to_node(int); 15 + extern int node_to_pxm(int); 16 extern int __cpuinit acpi_map_pxm_to_node(int); 17 extern void __cpuinit acpi_unmap_pxm_to_node(int); 18
+2
include/linux/binfmts.h
··· 17 18 #ifdef __KERNEL__ 19 20 /* 21 * This structure is used to hold the arguments that are used when loading binaries. 22 */
··· 17 18 #ifdef __KERNEL__ 19 20 + #define CORENAME_MAX_SIZE 128 21 + 22 /* 23 * This structure is used to hold the arguments that are used when loading binaries. 24 */
+15 -5
include/linux/kmalloc_sizes.h
··· 19 CACHE(32768) 20 CACHE(65536) 21 CACHE(131072) 22 - #if (NR_CPUS > 512) || (MAX_NUMNODES > 256) || !defined(CONFIG_MMU) 23 CACHE(262144) 24 #endif 25 - #ifndef CONFIG_MMU 26 CACHE(524288) 27 CACHE(1048576) 28 - #ifdef CONFIG_LARGE_ALLOCS 29 CACHE(2097152) 30 CACHE(4194304) 31 CACHE(8388608) 32 CACHE(16777216) 33 CACHE(33554432) 34 - #endif /* CONFIG_LARGE_ALLOCS */ 35 - #endif /* CONFIG_MMU */
··· 19 CACHE(32768) 20 CACHE(65536) 21 CACHE(131072) 22 + #if KMALLOC_MAX_SIZE >= 262144 23 CACHE(262144) 24 #endif 25 + #if KMALLOC_MAX_SIZE >= 524288 26 CACHE(524288) 27 + #endif 28 + #if KMALLOC_MAX_SIZE >= 1048576 29 CACHE(1048576) 30 + #endif 31 + #if KMALLOC_MAX_SIZE >= 2097152 32 CACHE(2097152) 33 + #endif 34 + #if KMALLOC_MAX_SIZE >= 4194304 35 CACHE(4194304) 36 + #endif 37 + #if KMALLOC_MAX_SIZE >= 8388608 38 CACHE(8388608) 39 + #endif 40 + #if KMALLOC_MAX_SIZE >= 16777216 41 CACHE(16777216) 42 + #endif 43 + #if KMALLOC_MAX_SIZE >= 33554432 44 CACHE(33554432) 45 + #endif
+1
include/linux/pci_ids.h
··· 471 #define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2 0x0219 472 #define PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX 0x021A 473 #define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM 0x0251 474 #define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252 475 476 #define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */
··· 471 #define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2 0x0219 472 #define PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX 0x021A 473 #define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM 0x0251 474 + #define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361 475 #define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252 476 477 #define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */
+5 -8
include/linux/rmap.h
··· 74 void page_add_file_rmap(struct page *); 75 void page_remove_rmap(struct page *, struct vm_area_struct *); 76 77 - /** 78 - * page_dup_rmap - duplicate pte mapping to a page 79 - * @page: the page to add the mapping to 80 - * 81 - * For copy_page_range only: minimal extract from page_add_rmap, 82 - * avoiding unnecessary tests (already checked) so it's quicker. 83 - */ 84 - static inline void page_dup_rmap(struct page *page) 85 { 86 atomic_inc(&page->_mapcount); 87 } 88 89 /* 90 * Called from mm/vmscan.c to handle paging out
··· 74 void page_add_file_rmap(struct page *); 75 void page_remove_rmap(struct page *, struct vm_area_struct *); 76 77 + #ifdef CONFIG_DEBUG_VM 78 + void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address); 79 + #else 80 + static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) 81 { 82 atomic_inc(&page->_mapcount); 83 } 84 + #endif 85 86 /* 87 * Called from mm/vmscan.c to handle paging out
+15 -6
include/linux/slab.h
··· 32 #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 33 #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ 34 35 - /* Flags passed to a constructor functions */ 36 - #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */ 37 - 38 /* 39 * struct kmem_cache related prototypes 40 */ ··· 72 return kmem_cache_alloc(cachep, flags); 73 } 74 #endif 75 76 /* 77 * Common kmalloc functions provided by all allocators ··· 244 kmalloc_track_caller(size, flags) 245 246 #endif /* DEBUG_SLAB */ 247 - 248 - extern const struct seq_operations slabinfo_op; 249 - ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *); 250 251 #endif /* __KERNEL__ */ 252 #endif /* _LINUX_SLAB_H */
··· 32 #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 33 #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ 34 35 /* 36 * struct kmem_cache related prototypes 37 */ ··· 75 return kmem_cache_alloc(cachep, flags); 76 } 77 #endif 78 + 79 + /* 80 + * The largest kmalloc size supported by the slab allocators is 81 + * 32 megabyte (2^25) or the maximum allocatable page order if that is 82 + * less than 32 MB. 83 + * 84 + * WARNING: Its not easy to increase this value since the allocators have 85 + * to do various tricks to work around compiler limitations in order to 86 + * ensure proper constant folding. 87 + */ 88 + #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) <= 25 ? \ 89 + (MAX_ORDER + PAGE_SHIFT) : 25) 90 + 91 + #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH) 92 + #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT) 93 94 /* 95 * Common kmalloc functions provided by all allocators ··· 232 kmalloc_track_caller(size, flags) 233 234 #endif /* DEBUG_SLAB */ 235 236 #endif /* __KERNEL__ */ 237 #endif /* _LINUX_SLAB_H */
+3
include/linux/slab_def.h
··· 109 110 #endif /* CONFIG_NUMA */ 111 112 #endif /* _LINUX_SLAB_DEF_H */
··· 109 110 #endif /* CONFIG_NUMA */ 111 112 + extern const struct seq_operations slabinfo_op; 113 + ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *); 114 + 115 #endif /* _LINUX_SLAB_DEF_H */
+8 -19
include/linux/slub_def.h
··· 40 int objects; /* Number of objects in slab */ 41 int refcount; /* Refcount for slab cache destroy */ 42 void (*ctor)(void *, struct kmem_cache *, unsigned long); 43 - void (*dtor)(void *, struct kmem_cache *, unsigned long); 44 int inuse; /* Offset to metadata */ 45 int align; /* Alignment */ 46 const char *name; /* Name (only for display!) */ ··· 58 */ 59 #define KMALLOC_SHIFT_LOW 3 60 61 - #ifdef CONFIG_LARGE_ALLOCS 62 - #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) =< 25 ? \ 63 - (MAX_ORDER + PAGE_SHIFT - 1) : 25) 64 - #else 65 - #if !defined(CONFIG_MMU) || NR_CPUS > 512 || MAX_NUMNODES > 256 66 - #define KMALLOC_SHIFT_HIGH 20 67 - #else 68 - #define KMALLOC_SHIFT_HIGH 18 69 - #endif 70 - #endif 71 - 72 /* 73 * We keep the general caches in an array of slab caches that are used for 74 * 2^x bytes of allocations. ··· 68 * Sorry that the following has to be that ugly but some versions of GCC 69 * have trouble with constant propagation and loops. 70 */ 71 - static inline int kmalloc_index(int size) 72 { 73 /* 74 * We should return 0 if size == 0 but we use the smallest object ··· 76 */ 77 WARN_ON_ONCE(size == 0); 78 79 - if (size > (1 << KMALLOC_SHIFT_HIGH)) 80 return -1; 81 82 if (size > 64 && size <= 96) ··· 99 if (size <= 64 * 1024) return 16; 100 if (size <= 128 * 1024) return 17; 101 if (size <= 256 * 1024) return 18; 102 - #if KMALLOC_SHIFT_HIGH > 18 103 if (size <= 512 * 1024) return 19; 104 if (size <= 1024 * 1024) return 20; 105 - #endif 106 - #if KMALLOC_SHIFT_HIGH > 20 107 if (size <= 2 * 1024 * 1024) return 21; 108 if (size <= 4 * 1024 * 1024) return 22; 109 if (size <= 8 * 1024 * 1024) return 23; 110 if (size <= 16 * 1024 * 1024) return 24; 111 if (size <= 32 * 1024 * 1024) return 25; 112 - #endif 113 return -1; 114 115 /* ··· 130 if (index == 0) 131 return NULL; 132 133 - if (index < 0) { 134 /* 135 * Generate a link failure. Would be great if we could 136 * do something to stop the compile here.
··· 40 int objects; /* Number of objects in slab */ 41 int refcount; /* Refcount for slab cache destroy */ 42 void (*ctor)(void *, struct kmem_cache *, unsigned long); 43 int inuse; /* Offset to metadata */ 44 int align; /* Alignment */ 45 const char *name; /* Name (only for display!) */ ··· 59 */ 60 #define KMALLOC_SHIFT_LOW 3 61 62 /* 63 * We keep the general caches in an array of slab caches that are used for 64 * 2^x bytes of allocations. ··· 80 * Sorry that the following has to be that ugly but some versions of GCC 81 * have trouble with constant propagation and loops. 82 */ 83 + static inline int kmalloc_index(size_t size) 84 { 85 /* 86 * We should return 0 if size == 0 but we use the smallest object ··· 88 */ 89 WARN_ON_ONCE(size == 0); 90 91 + if (size > KMALLOC_MAX_SIZE) 92 return -1; 93 94 if (size > 64 && size <= 96) ··· 111 if (size <= 64 * 1024) return 16; 112 if (size <= 128 * 1024) return 17; 113 if (size <= 256 * 1024) return 18; 114 if (size <= 512 * 1024) return 19; 115 if (size <= 1024 * 1024) return 20; 116 if (size <= 2 * 1024 * 1024) return 21; 117 if (size <= 4 * 1024 * 1024) return 22; 118 if (size <= 8 * 1024 * 1024) return 23; 119 if (size <= 16 * 1024 * 1024) return 24; 120 if (size <= 32 * 1024 * 1024) return 25; 121 return -1; 122 123 /* ··· 146 if (index == 0) 147 return NULL; 148 149 + /* 150 + * This function only gets expanded if __builtin_constant_p(size), so 151 + * testing it here shouldn't be needed. But some versions of gcc need 152 + * help. 153 + */ 154 + if (__builtin_constant_p(size) && index < 0) { 155 /* 156 * Generate a link failure. Would be great if we could 157 * do something to stop the compile here.
+3 -4
include/linux/smp.h
··· 6 * Alan Cox. <alan@redhat.com> 7 */ 8 9 10 extern void cpu_idle(void); 11 ··· 100 #define num_booting_cpus() 1 101 #define smp_prepare_boot_cpu() do {} while (0) 102 static inline int smp_call_function_single(int cpuid, void (*func) (void *info), 103 - void *info, int retry, int wait) 104 { 105 - /* Disable interrupts here? */ 106 - func(info); 107 - return 0; 108 } 109 110 #endif /* !SMP */
··· 6 * Alan Cox. <alan@redhat.com> 7 */ 8 9 + #include <linux/errno.h> 10 11 extern void cpu_idle(void); 12 ··· 99 #define num_booting_cpus() 1 100 #define smp_prepare_boot_cpu() do {} while (0) 101 static inline int smp_call_function_single(int cpuid, void (*func) (void *info), 102 + void *info, int retry, int wait) 103 { 104 + return -EBUSY; 105 } 106 107 #endif /* !SMP */
+1 -1
include/linux/workqueue.h
··· 122 int singlethread, 123 int freezeable); 124 #define create_workqueue(name) __create_workqueue((name), 0, 0) 125 - #define create_freezeable_workqueue(name) __create_workqueue((name), 0, 1) 126 #define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0) 127 128 extern void destroy_workqueue(struct workqueue_struct *wq);
··· 122 int singlethread, 123 int freezeable); 124 #define create_workqueue(name) __create_workqueue((name), 0, 0) 125 + #define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1) 126 #define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0) 127 128 extern void destroy_workqueue(struct workqueue_struct *wq);
+2 -6
init/Kconfig
··· 567 a slab allocator. 568 569 config SLUB 570 - depends on EXPERIMENTAL && !ARCH_USES_SLAB_PAGE_STRUCT 571 bool "SLUB (Unqueued Allocator)" 572 help 573 SLUB is a slab allocator that minimizes cache line usage ··· 576 and has enhanced diagnostics. 577 578 config SLOB 579 - # 580 - # SLOB does not support SMP because SLAB_DESTROY_BY_RCU is unsupported 581 - # 582 - depends on EMBEDDED && !SMP && !SPARSEMEM 583 bool "SLOB (Simple Allocator)" 584 help 585 SLOB replaces the SLAB allocator with a drastically simpler 586 - allocator. SLOB is more space efficient that SLAB but does not 587 scale well (single lock for all operations) and is also highly 588 susceptible to fragmentation. SLUB can accomplish a higher object 589 density. It is usually better to use SLUB instead of SLOB.
··· 567 a slab allocator. 568 569 config SLUB 570 bool "SLUB (Unqueued Allocator)" 571 help 572 SLUB is a slab allocator that minimizes cache line usage ··· 577 and has enhanced diagnostics. 578 579 config SLOB 580 + depends on EMBEDDED && !SPARSEMEM 581 bool "SLOB (Simple Allocator)" 582 help 583 SLOB replaces the SLAB allocator with a drastically simpler 584 + allocator. SLOB is more space efficient than SLAB but does not 585 scale well (single lock for all operations) and is also highly 586 susceptible to fragmentation. SLUB can accomplish a higher object 587 density. It is usually better to use SLUB instead of SLOB.
+1 -2
ipc/mqueue.c
··· 215 { 216 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; 217 218 - if (flags & SLAB_CTOR_CONSTRUCTOR) 219 - inode_init_once(&p->vfs_inode); 220 } 221 222 static struct inode *mqueue_alloc_inode(struct super_block *sb)
··· 215 { 216 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; 217 218 + inode_init_once(&p->vfs_inode); 219 } 220 221 static struct inode *mqueue_alloc_inode(struct super_block *sb)
+2 -4
kernel/fork.c
··· 1427 { 1428 struct sighand_struct *sighand = data; 1429 1430 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 1431 - spin_lock_init(&sighand->siglock); 1432 - INIT_LIST_HEAD(&sighand->signalfd_list); 1433 - } 1434 } 1435 1436 void __init proc_caches_init(void)
··· 1427 { 1428 struct sighand_struct *sighand = data; 1429 1430 + spin_lock_init(&sighand->siglock); 1431 + INIT_LIST_HEAD(&sighand->signalfd_list); 1432 } 1433 1434 void __init proc_caches_init(void)
+2 -1
kernel/power/disk.c
··· 416 417 mutex_lock(&pm_mutex); 418 for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { 419 - if (!strncmp(buf, hibernation_modes[i], len)) { 420 mode = i; 421 break; 422 }
··· 416 417 mutex_lock(&pm_mutex); 418 for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { 419 + if (len == strlen(hibernation_modes[i]) 420 + && !strncmp(buf, hibernation_modes[i], len)) { 421 mode = i; 422 break; 423 }
+2 -2
kernel/power/main.c
··· 290 len = p ? p - buf : n; 291 292 /* First, check if we are requested to hibernate */ 293 - if (!strncmp(buf, "disk", len)) { 294 error = hibernate(); 295 return error ? error : n; 296 } 297 298 for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) { 299 - if (*s && !strncmp(buf, *s, len)) 300 break; 301 } 302 if (state < PM_SUSPEND_MAX && *s)
··· 290 len = p ? p - buf : n; 291 292 /* First, check if we are requested to hibernate */ 293 + if (len == 4 && !strncmp(buf, "disk", len)) { 294 error = hibernate(); 295 return error ? error : n; 296 } 297 298 for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) { 299 + if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) 300 break; 301 } 302 if (state < PM_SUSPEND_MAX && *s)
+1 -1
kernel/sysctl.c
··· 227 .ctl_name = KERN_CORE_PATTERN, 228 .procname = "core_pattern", 229 .data = core_pattern, 230 - .maxlen = 128, 231 .mode = 0644, 232 .proc_handler = &proc_dostring, 233 .strategy = &sysctl_string,
··· 227 .ctl_name = KERN_CORE_PATTERN, 228 .procname = "core_pattern", 229 .data = core_pattern, 230 + .maxlen = CORENAME_MAX_SIZE, 231 .mode = 0644, 232 .proc_handler = &proc_dostring, 233 .strategy = &sysctl_string,
+1 -1
mm/memory.c
··· 481 page = vm_normal_page(vma, addr, pte); 482 if (page) { 483 get_page(page); 484 - page_dup_rmap(page); 485 rss[!!PageAnon(page)]++; 486 } 487
··· 481 page = vm_normal_page(vma, addr, pte); 482 if (page) { 483 get_page(page); 484 + page_dup_rmap(page, vma, addr); 485 rss[!!PageAnon(page)]++; 486 } 487
+59 -7
mm/rmap.c
··· 162 static void anon_vma_ctor(void *data, struct kmem_cache *cachep, 163 unsigned long flags) 164 { 165 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 166 - struct anon_vma *anon_vma = data; 167 168 - spin_lock_init(&anon_vma->lock); 169 - INIT_LIST_HEAD(&anon_vma->head); 170 - } 171 } 172 173 void __init anon_vma_init(void) ··· 530 } 531 532 /** 533 * page_add_anon_rmap - add pte mapping to an anonymous page 534 * @page: the page to add the mapping to 535 * @vma: the vm area in which the mapping is added 536 * @address: the user virtual address mapped 537 * 538 - * The caller needs to hold the pte lock. 539 */ 540 void page_add_anon_rmap(struct page *page, 541 struct vm_area_struct *vma, unsigned long address) 542 { 543 if (atomic_inc_and_test(&page->_mapcount)) 544 __page_set_anon_rmap(page, vma, address); 545 - /* else checking page index and mapping is racy */ 546 } 547 548 /* ··· 585 * 586 * Same as page_add_anon_rmap but must only be called on *new* pages. 587 * This means the inc-and-test can be bypassed. 588 */ 589 void page_add_new_anon_rmap(struct page *page, 590 struct vm_area_struct *vma, unsigned long address) 591 { 592 atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ 593 __page_set_anon_rmap(page, vma, address); 594 } ··· 606 if (atomic_inc_and_test(&page->_mapcount)) 607 __inc_zone_page_state(page, NR_FILE_MAPPED); 608 } 609 610 /** 611 * page_remove_rmap - take down pte mapping from a page
··· 162 static void anon_vma_ctor(void *data, struct kmem_cache *cachep, 163 unsigned long flags) 164 { 165 + struct anon_vma *anon_vma = data; 166 167 + spin_lock_init(&anon_vma->lock); 168 + INIT_LIST_HEAD(&anon_vma->head); 169 } 170 171 void __init anon_vma_init(void) ··· 532 } 533 534 /** 535 + * page_set_anon_rmap - sanity check anonymous rmap addition 536 + * @page: the page to add the mapping to 537 + * @vma: the vm area in which the mapping is added 538 + * @address: the user virtual address mapped 539 + */ 540 + static void __page_check_anon_rmap(struct page *page, 541 + struct vm_area_struct *vma, unsigned long address) 542 + { 543 + #ifdef CONFIG_DEBUG_VM 544 + /* 545 + * The page's anon-rmap details (mapping and index) are guaranteed to 546 + * be set up correctly at this point. 547 + * 548 + * We have exclusion against page_add_anon_rmap because the caller 549 + * always holds the page locked, except if called from page_dup_rmap, 550 + * in which case the page is already known to be setup. 551 + * 552 + * We have exclusion against page_add_new_anon_rmap because those pages 553 + * are initially only visible via the pagetables, and the pte is locked 554 + * over the call to page_add_new_anon_rmap. 555 + */ 556 + struct anon_vma *anon_vma = vma->anon_vma; 557 + anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 558 + BUG_ON(page->mapping != (struct address_space *)anon_vma); 559 + BUG_ON(page->index != linear_page_index(vma, address)); 560 + #endif 561 + } 562 + 563 + /** 564 * page_add_anon_rmap - add pte mapping to an anonymous page 565 * @page: the page to add the mapping to 566 * @vma: the vm area in which the mapping is added 567 * @address: the user virtual address mapped 568 * 569 + * The caller needs to hold the pte lock and the page must be locked. 570 */ 571 void page_add_anon_rmap(struct page *page, 572 struct vm_area_struct *vma, unsigned long address) 573 { 574 + VM_BUG_ON(!PageLocked(page)); 575 + VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 576 if (atomic_inc_and_test(&page->_mapcount)) 577 __page_set_anon_rmap(page, vma, address); 578 + else 579 + __page_check_anon_rmap(page, vma, address); 580 } 581 582 /* ··· 555 * 556 * Same as page_add_anon_rmap but must only be called on *new* pages. 557 * This means the inc-and-test can be bypassed. 558 + * Page does not have to be locked. 559 */ 560 void page_add_new_anon_rmap(struct page *page, 561 struct vm_area_struct *vma, unsigned long address) 562 { 563 + BUG_ON(address < vma->vm_start || address >= vma->vm_end); 564 atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ 565 __page_set_anon_rmap(page, vma, address); 566 } ··· 574 if (atomic_inc_and_test(&page->_mapcount)) 575 __inc_zone_page_state(page, NR_FILE_MAPPED); 576 } 577 + 578 + #ifdef CONFIG_DEBUG_VM 579 + /** 580 + * page_dup_rmap - duplicate pte mapping to a page 581 + * @page: the page to add the mapping to 582 + * 583 + * For copy_page_range only: minimal extract from page_add_file_rmap / 584 + * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's 585 + * quicker. 586 + * 587 + * The caller needs to hold the pte lock. 588 + */ 589 + void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) 590 + { 591 + BUG_ON(page_mapcount(page) == 0); 592 + if (PageAnon(page)) 593 + __page_check_anon_rmap(page, vma, address); 594 + atomic_inc(&page->_mapcount); 595 + } 596 + #endif 597 598 /** 599 * page_remove_rmap - take down pte mapping from a page
+3 -5
mm/shmem.c
··· 2358 { 2359 struct shmem_inode_info *p = (struct shmem_inode_info *) foo; 2360 2361 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 2362 - inode_init_once(&p->vfs_inode); 2363 #ifdef CONFIG_TMPFS_POSIX_ACL 2364 - p->i_acl = NULL; 2365 - p->i_default_acl = NULL; 2366 #endif 2367 - } 2368 } 2369 2370 static int init_inodecache(void)
··· 2358 { 2359 struct shmem_inode_info *p = (struct shmem_inode_info *) foo; 2360 2361 + inode_init_once(&p->vfs_inode); 2362 #ifdef CONFIG_TMPFS_POSIX_ACL 2363 + p->i_acl = NULL; 2364 + p->i_default_acl = NULL; 2365 #endif 2366 } 2367 2368 static int init_inodecache(void)
+9 -48
mm/slab.c
··· 409 /* constructor func */ 410 void (*ctor) (void *, struct kmem_cache *, unsigned long); 411 412 - /* de-constructor func */ 413 - void (*dtor) (void *, struct kmem_cache *, unsigned long); 414 - 415 /* 5) cache creation/removal */ 416 const char *name; 417 struct list_head next; ··· 566 #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 567 #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 568 569 - #endif 570 - 571 - /* 572 - * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp 573 - * order. 574 - */ 575 - #if defined(CONFIG_LARGE_ALLOCS) 576 - #define MAX_OBJ_ORDER 13 /* up to 32Mb */ 577 - #define MAX_GFP_ORDER 13 /* up to 32Mb */ 578 - #elif defined(CONFIG_MMU) 579 - #define MAX_OBJ_ORDER 5 /* 32 pages */ 580 - #define MAX_GFP_ORDER 5 /* 32 pages */ 581 - #else 582 - #define MAX_OBJ_ORDER 8 /* up to 1Mb */ 583 - #define MAX_GFP_ORDER 8 /* up to 1Mb */ 584 #endif 585 586 /* ··· 774 */ 775 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); 776 #endif 777 while (size > csizep->cs_size) 778 csizep++; 779 ··· 1894 slab_error(cachep, "end of a freed object " 1895 "was overwritten"); 1896 } 1897 - if (cachep->dtor && !(cachep->flags & SLAB_POISON)) 1898 - (cachep->dtor) (objp + obj_offset(cachep), cachep, 0); 1899 } 1900 } 1901 #else 1902 static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1903 { 1904 - if (cachep->dtor) { 1905 - int i; 1906 - for (i = 0; i < cachep->num; i++) { 1907 - void *objp = index_to_obj(cachep, slabp, i); 1908 - (cachep->dtor) (objp, cachep, 0); 1909 - } 1910 - } 1911 } 1912 #endif 1913 ··· 1987 size_t left_over = 0; 1988 int gfporder; 1989 1990 - for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) { 1991 unsigned int num; 1992 size_t remainder; 1993 ··· 2098 * @align: The required alignment for the objects. 2099 * @flags: SLAB flags 2100 * @ctor: A constructor for the objects. 2101 - * @dtor: A destructor for the objects. 2102 * 2103 * Returns a ptr to the cache on success, NULL on failure. 2104 * Cannot be called within a int, but can be interrupted. ··· 2133 * Sanity checks... these are all serious usage bugs. 2134 */ 2135 if (!name || in_interrupt() || (size < BYTES_PER_WORD) || 2136 - (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) { 2137 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, 2138 name); 2139 BUG(); ··· 2187 if (flags & SLAB_DESTROY_BY_RCU) 2188 BUG_ON(flags & SLAB_POISON); 2189 #endif 2190 - if (flags & SLAB_DESTROY_BY_RCU) 2191 - BUG_ON(dtor); 2192 - 2193 /* 2194 * Always checks flags, a caller might be expecting debug support which 2195 * isn't available. ··· 2341 BUG_ON(!cachep->slabp_cache); 2342 } 2343 cachep->ctor = ctor; 2344 - cachep->dtor = dtor; 2345 cachep->name = name; 2346 2347 if (setup_cpu_cache(cachep)) { ··· 2595 } 2596 2597 static void cache_init_objs(struct kmem_cache *cachep, 2598 - struct slab *slabp, unsigned long ctor_flags) 2599 { 2600 int i; 2601 ··· 2619 */ 2620 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2621 cachep->ctor(objp + obj_offset(cachep), cachep, 2622 - ctor_flags); 2623 2624 if (cachep->flags & SLAB_RED_ZONE) { 2625 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) ··· 2635 cachep->buffer_size / PAGE_SIZE, 0); 2636 #else 2637 if (cachep->ctor) 2638 - cachep->ctor(objp, cachep, ctor_flags); 2639 #endif 2640 slab_bufctl(slabp)[i] = i + 1; 2641 } ··· 2724 struct slab *slabp; 2725 size_t offset; 2726 gfp_t local_flags; 2727 - unsigned long ctor_flags; 2728 struct kmem_list3 *l3; 2729 2730 /* ··· 2732 */ 2733 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); 2734 2735 - ctor_flags = SLAB_CTOR_CONSTRUCTOR; 2736 local_flags = (flags & GFP_LEVEL_MASK); 2737 /* Take the l3 list lock to change the colour_next on this node */ 2738 check_irq_off(); ··· 2776 slabp->nodeid = nodeid; 2777 slab_map_pages(cachep, slabp, objp); 2778 2779 - cache_init_objs(cachep, slabp, ctor_flags); 2780 2781 if (local_flags & __GFP_WAIT) 2782 local_irq_disable(); ··· 2803 * Perform extra freeing checks: 2804 * - detect bad pointers. 2805 * - POISON/RED_ZONE checking 2806 - * - destructor calls, for caches with POISON+dtor 2807 */ 2808 static void kfree_debugcheck(const void *objp) 2809 { ··· 2861 BUG_ON(objnr >= cachep->num); 2862 BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); 2863 2864 - if (cachep->flags & SLAB_POISON && cachep->dtor) { 2865 - /* we want to cache poison the object, 2866 - * call the destruction callback 2867 - */ 2868 - cachep->dtor(objp + obj_offset(cachep), cachep, 0); 2869 - } 2870 #ifdef CONFIG_DEBUG_SLAB_LEAK 2871 slab_bufctl(slabp)[objnr] = BUFCTL_FREE; 2872 #endif ··· 3060 #endif 3061 objp += obj_offset(cachep); 3062 if (cachep->ctor && cachep->flags & SLAB_POISON) 3063 - cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR); 3064 #if ARCH_SLAB_MINALIGN 3065 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { 3066 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
··· 409 /* constructor func */ 410 void (*ctor) (void *, struct kmem_cache *, unsigned long); 411 412 /* 5) cache creation/removal */ 413 const char *name; 414 struct list_head next; ··· 569 #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 570 #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 571 572 #endif 573 574 /* ··· 792 */ 793 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); 794 #endif 795 + WARN_ON_ONCE(size == 0); 796 while (size > csizep->cs_size) 797 csizep++; 798 ··· 1911 slab_error(cachep, "end of a freed object " 1912 "was overwritten"); 1913 } 1914 } 1915 } 1916 #else 1917 static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1918 { 1919 } 1920 #endif 1921 ··· 2013 size_t left_over = 0; 2014 int gfporder; 2015 2016 + for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) { 2017 unsigned int num; 2018 size_t remainder; 2019 ··· 2124 * @align: The required alignment for the objects. 2125 * @flags: SLAB flags 2126 * @ctor: A constructor for the objects. 2127 + * @dtor: A destructor for the objects (not implemented anymore). 2128 * 2129 * Returns a ptr to the cache on success, NULL on failure. 2130 * Cannot be called within a int, but can be interrupted. ··· 2159 * Sanity checks... these are all serious usage bugs. 2160 */ 2161 if (!name || in_interrupt() || (size < BYTES_PER_WORD) || 2162 + size > KMALLOC_MAX_SIZE || dtor) { 2163 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, 2164 name); 2165 BUG(); ··· 2213 if (flags & SLAB_DESTROY_BY_RCU) 2214 BUG_ON(flags & SLAB_POISON); 2215 #endif 2216 /* 2217 * Always checks flags, a caller might be expecting debug support which 2218 * isn't available. ··· 2370 BUG_ON(!cachep->slabp_cache); 2371 } 2372 cachep->ctor = ctor; 2373 cachep->name = name; 2374 2375 if (setup_cpu_cache(cachep)) { ··· 2625 } 2626 2627 static void cache_init_objs(struct kmem_cache *cachep, 2628 + struct slab *slabp) 2629 { 2630 int i; 2631 ··· 2649 */ 2650 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2651 cachep->ctor(objp + obj_offset(cachep), cachep, 2652 + 0); 2653 2654 if (cachep->flags & SLAB_RED_ZONE) { 2655 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) ··· 2665 cachep->buffer_size / PAGE_SIZE, 0); 2666 #else 2667 if (cachep->ctor) 2668 + cachep->ctor(objp, cachep, 0); 2669 #endif 2670 slab_bufctl(slabp)[i] = i + 1; 2671 } ··· 2754 struct slab *slabp; 2755 size_t offset; 2756 gfp_t local_flags; 2757 struct kmem_list3 *l3; 2758 2759 /* ··· 2763 */ 2764 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); 2765 2766 local_flags = (flags & GFP_LEVEL_MASK); 2767 /* Take the l3 list lock to change the colour_next on this node */ 2768 check_irq_off(); ··· 2808 slabp->nodeid = nodeid; 2809 slab_map_pages(cachep, slabp, objp); 2810 2811 + cache_init_objs(cachep, slabp); 2812 2813 if (local_flags & __GFP_WAIT) 2814 local_irq_disable(); ··· 2835 * Perform extra freeing checks: 2836 * - detect bad pointers. 2837 * - POISON/RED_ZONE checking 2838 */ 2839 static void kfree_debugcheck(const void *objp) 2840 { ··· 2894 BUG_ON(objnr >= cachep->num); 2895 BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); 2896 2897 #ifdef CONFIG_DEBUG_SLAB_LEAK 2898 slab_bufctl(slabp)[objnr] = BUFCTL_FREE; 2899 #endif ··· 3099 #endif 3100 objp += obj_offset(cachep); 3101 if (cachep->ctor && cachep->flags & SLAB_POISON) 3102 + cachep->ctor(objp, cachep, 0); 3103 #if ARCH_SLAB_MINALIGN 3104 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { 3105 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
+43 -10
mm/slob.c
··· 35 #include <linux/init.h> 36 #include <linux/module.h> 37 #include <linux/timer.h> 38 39 struct slob_block { 40 int units; ··· 53 struct bigblock *next; 54 }; 55 typedef struct bigblock bigblock_t; 56 57 static slob_t arena = { .next = &arena, .units = 1 }; 58 static slob_t *slobfree = &arena; ··· 277 278 struct kmem_cache { 279 unsigned int size, align; 280 const char *name; 281 void (*ctor)(void *, struct kmem_cache *, unsigned long); 282 - void (*dtor)(void *, struct kmem_cache *, unsigned long); 283 }; 284 285 struct kmem_cache *kmem_cache_create(const char *name, size_t size, ··· 294 if (c) { 295 c->name = name; 296 c->size = size; 297 c->ctor = ctor; 298 - c->dtor = dtor; 299 /* ignore alignment unless it's forced */ 300 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; 301 if (c->align < align) ··· 327 b = (void *)__get_free_pages(flags, get_order(c->size)); 328 329 if (c->ctor) 330 - c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR); 331 332 return b; 333 } ··· 343 } 344 EXPORT_SYMBOL(kmem_cache_zalloc); 345 346 void kmem_cache_free(struct kmem_cache *c, void *b) 347 { 348 - if (c->dtor) 349 - c->dtor(b, c, 0); 350 - 351 - if (c->size < PAGE_SIZE) 352 - slob_free(b, c->size); 353 - else 354 - free_pages((unsigned long)b, get_order(c->size)); 355 } 356 EXPORT_SYMBOL(kmem_cache_free); 357
··· 35 #include <linux/init.h> 36 #include <linux/module.h> 37 #include <linux/timer.h> 38 + #include <linux/rcupdate.h> 39 40 struct slob_block { 41 int units; ··· 52 struct bigblock *next; 53 }; 54 typedef struct bigblock bigblock_t; 55 + 56 + /* 57 + * struct slob_rcu is inserted at the tail of allocated slob blocks, which 58 + * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free 59 + * the block using call_rcu. 60 + */ 61 + struct slob_rcu { 62 + struct rcu_head head; 63 + int size; 64 + }; 65 66 static slob_t arena = { .next = &arena, .units = 1 }; 67 static slob_t *slobfree = &arena; ··· 266 267 struct kmem_cache { 268 unsigned int size, align; 269 + unsigned long flags; 270 const char *name; 271 void (*ctor)(void *, struct kmem_cache *, unsigned long); 272 }; 273 274 struct kmem_cache *kmem_cache_create(const char *name, size_t size, ··· 283 if (c) { 284 c->name = name; 285 c->size = size; 286 + if (flags & SLAB_DESTROY_BY_RCU) { 287 + /* leave room for rcu footer at the end of object */ 288 + c->size += sizeof(struct slob_rcu); 289 + } 290 + c->flags = flags; 291 c->ctor = ctor; 292 /* ignore alignment unless it's forced */ 293 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; 294 if (c->align < align) ··· 312 b = (void *)__get_free_pages(flags, get_order(c->size)); 313 314 if (c->ctor) 315 + c->ctor(b, c, 0); 316 317 return b; 318 } ··· 328 } 329 EXPORT_SYMBOL(kmem_cache_zalloc); 330 331 + static void __kmem_cache_free(void *b, int size) 332 + { 333 + if (size < PAGE_SIZE) 334 + slob_free(b, size); 335 + else 336 + free_pages((unsigned long)b, get_order(size)); 337 + } 338 + 339 + static void kmem_rcu_free(struct rcu_head *head) 340 + { 341 + struct slob_rcu *slob_rcu = (struct slob_rcu *)head; 342 + void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu)); 343 + 344 + __kmem_cache_free(b, slob_rcu->size); 345 + } 346 + 347 void kmem_cache_free(struct kmem_cache *c, void *b) 348 { 349 + if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { 350 + struct slob_rcu *slob_rcu; 351 + slob_rcu = b + (c->size - sizeof(struct slob_rcu)); 352 + INIT_RCU_HEAD(&slob_rcu->head); 353 + slob_rcu->size = c->size; 354 + call_rcu(&slob_rcu->head, kmem_rcu_free); 355 + } else { 356 + __kmem_cache_free(b, c->size); 357 + } 358 } 359 EXPORT_SYMBOL(kmem_cache_free); 360
+120 -116
mm/slub.c
··· 78 * 79 * Overloading of page flags that are otherwise used for LRU management. 80 * 81 - * PageActive The slab is used as a cpu cache. Allocations 82 - * may be performed from the slab. The slab is not 83 - * on any slab list and cannot be moved onto one. 84 - * The cpu slab may be equipped with an additioanl 85 * lockless_freelist that allows lockless access to 86 * free objects in addition to the regular freelist 87 * that requires the slab lock. ··· 99 * the fast path and disables lockless freelists. 100 */ 101 102 static inline int SlabDebug(struct page *page) 103 { 104 - #ifdef CONFIG_SLUB_DEBUG 105 - return PageError(page); 106 - #else 107 - return 0; 108 - #endif 109 } 110 111 static inline void SetSlabDebug(struct page *page) 112 { 113 - #ifdef CONFIG_SLUB_DEBUG 114 - SetPageError(page); 115 - #endif 116 } 117 118 static inline void ClearSlabDebug(struct page *page) 119 { 120 - #ifdef CONFIG_SLUB_DEBUG 121 - ClearPageError(page); 122 - #endif 123 } 124 125 /* ··· 742 return search == NULL; 743 } 744 745 /* 746 * Tracking of fully allocated slabs for debugging purposes. 747 */ ··· 782 spin_unlock(&n->list_lock); 783 } 784 785 - static int alloc_object_checks(struct kmem_cache *s, struct page *page, 786 - void *object) 787 { 788 if (!check_slab(s, page)) 789 goto bad; ··· 808 goto bad; 809 } 810 811 - if (!object) 812 - return 1; 813 - 814 - if (!check_object(s, page, object, 0)) 815 goto bad; 816 817 return 1; 818 bad: 819 if (PageSlab(page)) { 820 /* ··· 835 return 0; 836 } 837 838 - static int free_object_checks(struct kmem_cache *s, struct page *page, 839 - void *object) 840 { 841 if (!check_slab(s, page)) 842 goto fail; ··· 870 "to slab %s", object, page->slab->name); 871 goto fail; 872 } 873 return 1; 874 fail: 875 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n", 876 s->name, page, object); 877 return 0; 878 - } 879 - 880 - static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) 881 - { 882 - if (s->flags & SLAB_TRACE) { 883 - printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 884 - s->name, 885 - alloc ? "alloc" : "free", 886 - object, page->inuse, 887 - page->freelist); 888 - 889 - if (!alloc) 890 - print_section("Object", (void *)object, s->objsize); 891 - 892 - dump_stack(); 893 - } 894 } 895 896 static int __init setup_slub_debug(char *str) ··· 936 * On 32 bit platforms the limit is 256k. On 64bit platforms 937 * the limit is 512k. 938 * 939 - * Debugging or ctor/dtors may create a need to move the free 940 * pointer. Fail if this happens. 941 */ 942 if (s->size >= 65535 * sizeof(void *)) { 943 BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON | 944 SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); 945 - BUG_ON(s->ctor || s->dtor); 946 } 947 else 948 /* ··· 954 s->flags |= slub_debug; 955 } 956 #else 957 958 - static inline int alloc_object_checks(struct kmem_cache *s, 959 - struct page *page, void *object) { return 0; } 960 961 - static inline int free_object_checks(struct kmem_cache *s, 962 - struct page *page, void *object) { return 0; } 963 964 - static inline void add_full(struct kmem_cache_node *n, struct page *page) {} 965 - static inline void remove_full(struct kmem_cache *s, struct page *page) {} 966 - static inline void trace(struct kmem_cache *s, struct page *page, 967 - void *object, int alloc) {} 968 - static inline void init_object(struct kmem_cache *s, 969 - void *object, int active) {} 970 - static inline void init_tracking(struct kmem_cache *s, void *object) {} 971 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 972 { return 1; } 973 static inline int check_object(struct kmem_cache *s, struct page *page, 974 void *object, int active) { return 1; } 975 - static inline void set_track(struct kmem_cache *s, void *object, 976 - enum track_item alloc, void *addr) {} 977 static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {} 978 #define slub_debug 0 979 #endif ··· 1004 static void setup_object(struct kmem_cache *s, struct page *page, 1005 void *object) 1006 { 1007 - if (SlabDebug(page)) { 1008 - init_object(s, object, 0); 1009 - init_tracking(s, object); 1010 - } 1011 - 1012 if (unlikely(s->ctor)) 1013 - s->ctor(object, s, SLAB_CTOR_CONSTRUCTOR); 1014 } 1015 1016 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) ··· 1065 { 1066 int pages = 1 << s->order; 1067 1068 - if (unlikely(SlabDebug(page) || s->dtor)) { 1069 void *p; 1070 1071 slab_pad_check(s, page); 1072 - for_each_object(p, s, page_address(page)) { 1073 - if (s->dtor) 1074 - s->dtor(p, s, 0); 1075 check_object(s, page, p, 0); 1076 - } 1077 } 1078 1079 mod_zone_page_state(page_zone(page), ··· 1170 * 1171 * Must hold list_lock. 1172 */ 1173 - static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page) 1174 { 1175 if (slab_trylock(page)) { 1176 list_del(&page->lru); 1177 n->nr_partial--; 1178 return 1; 1179 } 1180 return 0; ··· 1199 1200 spin_lock(&n->list_lock); 1201 list_for_each_entry(page, &n->partial, lru) 1202 - if (lock_and_del_slab(n, page)) 1203 goto out; 1204 page = NULL; 1205 out: ··· 1278 * 1279 * On exit the slab lock will have been dropped. 1280 */ 1281 - static void putback_slab(struct kmem_cache *s, struct page *page) 1282 { 1283 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1284 1285 if (page->inuse) { 1286 1287 if (page->freelist) ··· 1333 page->inuse--; 1334 } 1335 s->cpu_slab[cpu] = NULL; 1336 - ClearPageActive(page); 1337 - 1338 - putback_slab(s, page); 1339 } 1340 1341 static void flush_slab(struct kmem_cache *s, struct page *page, int cpu) ··· 1424 new_slab: 1425 page = get_partial(s, gfpflags, node); 1426 if (page) { 1427 - have_slab: 1428 s->cpu_slab[cpu] = page; 1429 - SetPageActive(page); 1430 goto load_freelist; 1431 } 1432 ··· 1454 flush_slab(s, s->cpu_slab[cpu], cpu); 1455 } 1456 slab_lock(page); 1457 - goto have_slab; 1458 } 1459 return NULL; 1460 debug: 1461 object = page->freelist; 1462 - if (!alloc_object_checks(s, page, object)) 1463 goto another_slab; 1464 - if (s->flags & SLAB_STORE_USER) 1465 - set_track(s, object, TRACK_ALLOC, addr); 1466 - trace(s, page, object, 1); 1467 - init_object(s, object, 1); 1468 1469 page->inuse++; 1470 page->freelist = object[page->offset]; ··· 1539 page->freelist = object; 1540 page->inuse--; 1541 1542 - if (unlikely(PageActive(page))) 1543 - /* 1544 - * Cpu slabs are never on partial lists and are 1545 - * never freed. 1546 - */ 1547 goto out_unlock; 1548 1549 if (unlikely(!page->inuse)) ··· 1569 return; 1570 1571 debug: 1572 - if (!free_object_checks(s, page, x)) 1573 goto out_unlock; 1574 - if (!PageActive(page) && !page->freelist) 1575 - remove_full(s, page); 1576 - if (s->flags & SLAB_STORE_USER) 1577 - set_track(s, x, TRACK_FREE, addr); 1578 - trace(s, page, object, 0); 1579 - init_object(s, object, 0); 1580 goto checks_ok; 1581 } 1582 ··· 1807 page->freelist = get_freepointer(kmalloc_caches, n); 1808 page->inuse++; 1809 kmalloc_caches->node[node] = n; 1810 - init_object(kmalloc_caches, n, 1); 1811 init_kmem_cache_node(n); 1812 atomic_long_inc(&n->nr_slabs); 1813 add_partial(n, page); ··· 1889 * then we should never poison the object itself. 1890 */ 1891 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && 1892 - !s->ctor && !s->dtor) 1893 s->flags |= __OBJECT_POISON; 1894 else 1895 s->flags &= ~__OBJECT_POISON; ··· 1919 1920 #ifdef CONFIG_SLUB_DEBUG 1921 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || 1922 - s->ctor || s->dtor)) { 1923 /* 1924 * Relocate free pointer after the object if it is not 1925 * permitted to overwrite the first word of the object on ··· 1988 static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, 1989 const char *name, size_t size, 1990 size_t align, unsigned long flags, 1991 - void (*ctor)(void *, struct kmem_cache *, unsigned long), 1992 - void (*dtor)(void *, struct kmem_cache *, unsigned long)) 1993 { 1994 memset(s, 0, kmem_size); 1995 s->name = name; 1996 s->ctor = ctor; 1997 - s->dtor = dtor; 1998 s->objsize = size; 1999 s->flags = flags; 2000 s->align = align; ··· 2177 2178 down_write(&slub_lock); 2179 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, 2180 - flags, NULL, NULL)) 2181 goto panic; 2182 2183 list_add(&s->list, &slab_caches); ··· 2479 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) 2480 return 1; 2481 2482 - if (s->ctor || s->dtor) 2483 return 1; 2484 2485 return 0; ··· 2487 2488 static struct kmem_cache *find_mergeable(size_t size, 2489 size_t align, unsigned long flags, 2490 - void (*ctor)(void *, struct kmem_cache *, unsigned long), 2491 - void (*dtor)(void *, struct kmem_cache *, unsigned long)) 2492 { 2493 struct list_head *h; 2494 2495 if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) 2496 return NULL; 2497 2498 - if (ctor || dtor) 2499 return NULL; 2500 2501 size = ALIGN(size, sizeof(void *)); ··· 2536 { 2537 struct kmem_cache *s; 2538 2539 down_write(&slub_lock); 2540 - s = find_mergeable(size, align, flags, ctor, dtor); 2541 if (s) { 2542 s->refcount++; 2543 /* ··· 2552 } else { 2553 s = kmalloc(kmem_size, GFP_KERNEL); 2554 if (s && kmem_cache_open(s, GFP_KERNEL, name, 2555 - size, align, flags, ctor, dtor)) { 2556 if (sysfs_slab_add(s)) { 2557 kfree(s); 2558 goto err; ··· 3193 } 3194 SLAB_ATTR_RO(ctor); 3195 3196 - static ssize_t dtor_show(struct kmem_cache *s, char *buf) 3197 - { 3198 - if (s->dtor) { 3199 - int n = sprint_symbol(buf, (unsigned long)s->dtor); 3200 - 3201 - return n + sprintf(buf + n, "\n"); 3202 - } 3203 - return 0; 3204 - } 3205 - SLAB_ATTR_RO(dtor); 3206 - 3207 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 3208 { 3209 return sprintf(buf, "%d\n", s->refcount - 1); ··· 3424 &partial_attr.attr, 3425 &cpu_slabs_attr.attr, 3426 &ctor_attr.attr, 3427 - &dtor_attr.attr, 3428 &aliases_attr.attr, 3429 &align_attr.attr, 3430 &sanity_checks_attr.attr,
··· 78 * 79 * Overloading of page flags that are otherwise used for LRU management. 80 * 81 + * PageActive The slab is frozen and exempt from list processing. 82 + * This means that the slab is dedicated to a purpose 83 + * such as satisfying allocations for a specific 84 + * processor. Objects may be freed in the slab while 85 + * it is frozen but slab_free will then skip the usual 86 + * list operations. It is up to the processor holding 87 + * the slab to integrate the slab into the slab lists 88 + * when the slab is no longer needed. 89 + * 90 + * One use of this flag is to mark slabs that are 91 + * used for allocations. Then such a slab becomes a cpu 92 + * slab. The cpu slab may be equipped with an additional 93 * lockless_freelist that allows lockless access to 94 * free objects in addition to the regular freelist 95 * that requires the slab lock. ··· 91 * the fast path and disables lockless freelists. 92 */ 93 94 + #define FROZEN (1 << PG_active) 95 + 96 + #ifdef CONFIG_SLUB_DEBUG 97 + #define SLABDEBUG (1 << PG_error) 98 + #else 99 + #define SLABDEBUG 0 100 + #endif 101 + 102 + static inline int SlabFrozen(struct page *page) 103 + { 104 + return page->flags & FROZEN; 105 + } 106 + 107 + static inline void SetSlabFrozen(struct page *page) 108 + { 109 + page->flags |= FROZEN; 110 + } 111 + 112 + static inline void ClearSlabFrozen(struct page *page) 113 + { 114 + page->flags &= ~FROZEN; 115 + } 116 + 117 static inline int SlabDebug(struct page *page) 118 { 119 + return page->flags & SLABDEBUG; 120 } 121 122 static inline void SetSlabDebug(struct page *page) 123 { 124 + page->flags |= SLABDEBUG; 125 } 126 127 static inline void ClearSlabDebug(struct page *page) 128 { 129 + page->flags &= ~SLABDEBUG; 130 } 131 132 /* ··· 719 return search == NULL; 720 } 721 722 + static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) 723 + { 724 + if (s->flags & SLAB_TRACE) { 725 + printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 726 + s->name, 727 + alloc ? "alloc" : "free", 728 + object, page->inuse, 729 + page->freelist); 730 + 731 + if (!alloc) 732 + print_section("Object", (void *)object, s->objsize); 733 + 734 + dump_stack(); 735 + } 736 + } 737 + 738 /* 739 * Tracking of fully allocated slabs for debugging purposes. 740 */ ··· 743 spin_unlock(&n->list_lock); 744 } 745 746 + static void setup_object_debug(struct kmem_cache *s, struct page *page, 747 + void *object) 748 + { 749 + if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) 750 + return; 751 + 752 + init_object(s, object, 0); 753 + init_tracking(s, object); 754 + } 755 + 756 + static int alloc_debug_processing(struct kmem_cache *s, struct page *page, 757 + void *object, void *addr) 758 { 759 if (!check_slab(s, page)) 760 goto bad; ··· 759 goto bad; 760 } 761 762 + if (object && !check_object(s, page, object, 0)) 763 goto bad; 764 765 + /* Success perform special debug activities for allocs */ 766 + if (s->flags & SLAB_STORE_USER) 767 + set_track(s, object, TRACK_ALLOC, addr); 768 + trace(s, page, object, 1); 769 + init_object(s, object, 1); 770 return 1; 771 + 772 bad: 773 if (PageSlab(page)) { 774 /* ··· 783 return 0; 784 } 785 786 + static int free_debug_processing(struct kmem_cache *s, struct page *page, 787 + void *object, void *addr) 788 { 789 if (!check_slab(s, page)) 790 goto fail; ··· 818 "to slab %s", object, page->slab->name); 819 goto fail; 820 } 821 + 822 + /* Special debug activities for freeing objects */ 823 + if (!SlabFrozen(page) && !page->freelist) 824 + remove_full(s, page); 825 + if (s->flags & SLAB_STORE_USER) 826 + set_track(s, object, TRACK_FREE, addr); 827 + trace(s, page, object, 0); 828 + init_object(s, object, 0); 829 return 1; 830 + 831 fail: 832 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n", 833 s->name, page, object); 834 return 0; 835 } 836 837 static int __init setup_slub_debug(char *str) ··· 891 * On 32 bit platforms the limit is 256k. On 64bit platforms 892 * the limit is 512k. 893 * 894 + * Debugging or ctor may create a need to move the free 895 * pointer. Fail if this happens. 896 */ 897 if (s->size >= 65535 * sizeof(void *)) { 898 BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON | 899 SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); 900 + BUG_ON(s->ctor); 901 } 902 else 903 /* ··· 909 s->flags |= slub_debug; 910 } 911 #else 912 + static inline void setup_object_debug(struct kmem_cache *s, 913 + struct page *page, void *object) {} 914 915 + static inline int alloc_debug_processing(struct kmem_cache *s, 916 + struct page *page, void *object, void *addr) { return 0; } 917 918 + static inline int free_debug_processing(struct kmem_cache *s, 919 + struct page *page, void *object, void *addr) { return 0; } 920 921 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 922 { return 1; } 923 static inline int check_object(struct kmem_cache *s, struct page *page, 924 void *object, int active) { return 1; } 925 + static inline void add_full(struct kmem_cache_node *n, struct page *page) {} 926 static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {} 927 #define slub_debug 0 928 #endif ··· 965 static void setup_object(struct kmem_cache *s, struct page *page, 966 void *object) 967 { 968 + setup_object_debug(s, page, object); 969 if (unlikely(s->ctor)) 970 + s->ctor(object, s, 0); 971 } 972 973 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) ··· 1030 { 1031 int pages = 1 << s->order; 1032 1033 + if (unlikely(SlabDebug(page))) { 1034 void *p; 1035 1036 slab_pad_check(s, page); 1037 + for_each_object(p, s, page_address(page)) 1038 check_object(s, page, p, 0); 1039 } 1040 1041 mod_zone_page_state(page_zone(page), ··· 1138 * 1139 * Must hold list_lock. 1140 */ 1141 + static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) 1142 { 1143 if (slab_trylock(page)) { 1144 list_del(&page->lru); 1145 n->nr_partial--; 1146 + SetSlabFrozen(page); 1147 return 1; 1148 } 1149 return 0; ··· 1166 1167 spin_lock(&n->list_lock); 1168 list_for_each_entry(page, &n->partial, lru) 1169 + if (lock_and_freeze_slab(n, page)) 1170 goto out; 1171 page = NULL; 1172 out: ··· 1245 * 1246 * On exit the slab lock will have been dropped. 1247 */ 1248 + static void unfreeze_slab(struct kmem_cache *s, struct page *page) 1249 { 1250 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1251 1252 + ClearSlabFrozen(page); 1253 if (page->inuse) { 1254 1255 if (page->freelist) ··· 1299 page->inuse--; 1300 } 1301 s->cpu_slab[cpu] = NULL; 1302 + unfreeze_slab(s, page); 1303 } 1304 1305 static void flush_slab(struct kmem_cache *s, struct page *page, int cpu) ··· 1392 new_slab: 1393 page = get_partial(s, gfpflags, node); 1394 if (page) { 1395 s->cpu_slab[cpu] = page; 1396 goto load_freelist; 1397 } 1398 ··· 1424 flush_slab(s, s->cpu_slab[cpu], cpu); 1425 } 1426 slab_lock(page); 1427 + SetSlabFrozen(page); 1428 + s->cpu_slab[cpu] = page; 1429 + goto load_freelist; 1430 } 1431 return NULL; 1432 debug: 1433 object = page->freelist; 1434 + if (!alloc_debug_processing(s, page, object, addr)) 1435 goto another_slab; 1436 1437 page->inuse++; 1438 page->freelist = object[page->offset]; ··· 1511 page->freelist = object; 1512 page->inuse--; 1513 1514 + if (unlikely(SlabFrozen(page))) 1515 goto out_unlock; 1516 1517 if (unlikely(!page->inuse)) ··· 1545 return; 1546 1547 debug: 1548 + if (!free_debug_processing(s, page, x, addr)) 1549 goto out_unlock; 1550 goto checks_ok; 1551 } 1552 ··· 1789 page->freelist = get_freepointer(kmalloc_caches, n); 1790 page->inuse++; 1791 kmalloc_caches->node[node] = n; 1792 + setup_object_debug(kmalloc_caches, page, n); 1793 init_kmem_cache_node(n); 1794 atomic_long_inc(&n->nr_slabs); 1795 add_partial(n, page); ··· 1871 * then we should never poison the object itself. 1872 */ 1873 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && 1874 + !s->ctor) 1875 s->flags |= __OBJECT_POISON; 1876 else 1877 s->flags &= ~__OBJECT_POISON; ··· 1901 1902 #ifdef CONFIG_SLUB_DEBUG 1903 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || 1904 + s->ctor)) { 1905 /* 1906 * Relocate free pointer after the object if it is not 1907 * permitted to overwrite the first word of the object on ··· 1970 static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, 1971 const char *name, size_t size, 1972 size_t align, unsigned long flags, 1973 + void (*ctor)(void *, struct kmem_cache *, unsigned long)) 1974 { 1975 memset(s, 0, kmem_size); 1976 s->name = name; 1977 s->ctor = ctor; 1978 s->objsize = size; 1979 s->flags = flags; 1980 s->align = align; ··· 2161 2162 down_write(&slub_lock); 2163 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, 2164 + flags, NULL)) 2165 goto panic; 2166 2167 list_add(&s->list, &slab_caches); ··· 2463 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) 2464 return 1; 2465 2466 + if (s->ctor) 2467 return 1; 2468 2469 return 0; ··· 2471 2472 static struct kmem_cache *find_mergeable(size_t size, 2473 size_t align, unsigned long flags, 2474 + void (*ctor)(void *, struct kmem_cache *, unsigned long)) 2475 { 2476 struct list_head *h; 2477 2478 if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) 2479 return NULL; 2480 2481 + if (ctor) 2482 return NULL; 2483 2484 size = ALIGN(size, sizeof(void *)); ··· 2521 { 2522 struct kmem_cache *s; 2523 2524 + BUG_ON(dtor); 2525 down_write(&slub_lock); 2526 + s = find_mergeable(size, align, flags, ctor); 2527 if (s) { 2528 s->refcount++; 2529 /* ··· 2536 } else { 2537 s = kmalloc(kmem_size, GFP_KERNEL); 2538 if (s && kmem_cache_open(s, GFP_KERNEL, name, 2539 + size, align, flags, ctor)) { 2540 if (sysfs_slab_add(s)) { 2541 kfree(s); 2542 goto err; ··· 3177 } 3178 SLAB_ATTR_RO(ctor); 3179 3180 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 3181 { 3182 return sprintf(buf, "%d\n", s->refcount - 1); ··· 3419 &partial_attr.attr, 3420 &cpu_slabs_attr.attr, 3421 &ctor_attr.attr, 3422 &aliases_attr.attr, 3423 &align_attr.attr, 3424 &sanity_checks_attr.attr,
+1 -1
mm/vmalloc.c
··· 311 return v; 312 } 313 314 - void __vunmap(void *addr, int deallocate_pages) 315 { 316 struct vm_struct *area; 317
··· 311 return v; 312 } 313 314 + static void __vunmap(void *addr, int deallocate_pages) 315 { 316 struct vm_struct *area; 317
+1 -1
net/ipx/af_ipx.c
··· 87 unsigned char *node); 88 extern void ipxrtr_del_routes(struct ipx_interface *intrfc); 89 extern int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx, 90 - struct iovec *iov, int len, int noblock); 91 extern int ipxrtr_route_skb(struct sk_buff *skb); 92 extern struct ipx_route *ipxrtr_lookup(__be32 net); 93 extern int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
··· 87 unsigned char *node); 88 extern void ipxrtr_del_routes(struct ipx_interface *intrfc); 89 extern int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx, 90 + struct iovec *iov, size_t len, int noblock); 91 extern int ipxrtr_route_skb(struct sk_buff *skb); 92 extern struct ipx_route *ipxrtr_lookup(__be32 net); 93 extern int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
+1 -2
net/socket.c
··· 261 { 262 struct socket_alloc *ei = (struct socket_alloc *)foo; 263 264 - if (flags & SLAB_CTOR_CONSTRUCTOR) 265 - inode_init_once(&ei->vfs_inode); 266 } 267 268 static int init_inodecache(void)
··· 261 { 262 struct socket_alloc *ei = (struct socket_alloc *)foo; 263 264 + inode_init_once(&ei->vfs_inode); 265 } 266 267 static int init_inodecache(void)
+11 -13
net/sunrpc/rpc_pipe.c
··· 828 { 829 struct rpc_inode *rpci = (struct rpc_inode *) foo; 830 831 - if (flags & SLAB_CTOR_CONSTRUCTOR) { 832 - inode_init_once(&rpci->vfs_inode); 833 - rpci->private = NULL; 834 - rpci->nreaders = 0; 835 - rpci->nwriters = 0; 836 - INIT_LIST_HEAD(&rpci->in_upcall); 837 - INIT_LIST_HEAD(&rpci->pipe); 838 - rpci->pipelen = 0; 839 - init_waitqueue_head(&rpci->waitq); 840 - INIT_DELAYED_WORK(&rpci->queue_timeout, 841 - rpc_timeout_upcall_queue); 842 - rpci->ops = NULL; 843 - } 844 } 845 846 int register_rpc_pipefs(void)
··· 828 { 829 struct rpc_inode *rpci = (struct rpc_inode *) foo; 830 831 + inode_init_once(&rpci->vfs_inode); 832 + rpci->private = NULL; 833 + rpci->nreaders = 0; 834 + rpci->nwriters = 0; 835 + INIT_LIST_HEAD(&rpci->in_upcall); 836 + INIT_LIST_HEAD(&rpci->pipe); 837 + rpci->pipelen = 0; 838 + init_waitqueue_head(&rpci->waitq); 839 + INIT_DELAYED_WORK(&rpci->queue_timeout, 840 + rpc_timeout_upcall_queue); 841 + rpci->ops = NULL; 842 } 843 844 int register_rpc_pipefs(void)