Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'sh/alphaproject' into sh-latest

+6694 -1000
+3
Documentation/filesystems/ntfs.txt
··· 457 457 458 458 Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog. 459 459 460 + 2.1.30: 461 + - Fix writev() (it kept writing the first segment over and over again 462 + instead of moving onto subsequent segments). 460 463 2.1.29: 461 464 - Fix a deadlock when mounting read-write. 462 465 2.1.28:
+3 -3
MAINTAINERS
··· 4383 4383 F: drivers/scsi/nsp32* 4384 4384 4385 4385 NTFS FILESYSTEM 4386 - M: Anton Altaparmakov <aia21@cantab.net> 4386 + M: Anton Altaparmakov <anton@tuxera.com> 4387 4387 L: linux-ntfs-dev@lists.sourceforge.net 4388 - W: http://www.linux-ntfs.org/ 4388 + W: http://www.tuxera.com/ 4389 4389 T: git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs-2.6.git 4390 - S: Maintained 4390 + S: Supported 4391 4391 F: Documentation/filesystems/ntfs.txt 4392 4392 F: fs/ntfs/ 4393 4393
+18
arch/sh/boards/Kconfig
··· 3 3 config SOLUTION_ENGINE 4 4 bool 5 5 6 + config SH_ALPHA_BOARD 7 + bool 8 + 6 9 config SH_SOLUTION_ENGINE 7 10 bool "SolutionEngine" 8 11 select SOLUTION_ENGINE ··· 322 319 It has an Ethernet interface (SMC9118), direct connected 323 320 Compact Flash socket, two serial ports and PC-104 bus. 324 321 More information at <http://sh2000.sh-linux.org>. 322 + 323 + config SH_APSH4A3A 324 + bool "AP-SH4A-3A" 325 + select SH_ALPHA_BOARD 326 + depends on CPU_SUBTYPE_SH7785 327 + help 328 + Select AP-SH4A-3A if configuring for an ALPHAPROJECT AP-SH4A-3A. 329 + 330 + config SH_APSH4AD0A 331 + bool "AP-SH4AD-0A" 332 + select SH_ALPHA_BOARD 333 + select SYS_SUPPORTS_PCI 334 + depends on CPU_SUBTYPE_SH7786 335 + help 336 + Select AP-SH4AD-0A if configuring for an ALPHAPROJECT AP-SH4AD-0A. 325 337 326 338 endmenu 327 339
+2
arch/sh/boards/Makefile
··· 13 13 obj-$(CONFIG_SH_POLARIS) += board-polaris.o 14 14 obj-$(CONFIG_SH_TITAN) += board-titan.o 15 15 obj-$(CONFIG_SH_SH7757LCR) += board-sh7757lcr.o 16 + obj-$(CONFIG_SH_APSH4A3A) += board-apsh4a3a.o 17 + obj-$(CONFIG_SH_APSH4AD0A) += board-apsh4ad0a.o
+175
arch/sh/boards/board-apsh4a3a.c
··· 1 + /* 2 + * ALPHAPROJECT AP-SH4A-3A Support. 3 + * 4 + * Copyright (C) 2010 ALPHAPROJECT Co.,Ltd. 5 + * Copyright (C) 2008 Yoshihiro Shimoda 6 + * Copyright (C) 2009 Paul Mundt 7 + * 8 + * This file is subject to the terms and conditions of the GNU General Public 9 + * License. See the file "COPYING" in the main directory of this archive 10 + * for more details. 11 + */ 12 + #include <linux/init.h> 13 + #include <linux/platform_device.h> 14 + #include <linux/io.h> 15 + #include <linux/mtd/physmap.h> 16 + #include <linux/smsc911x.h> 17 + #include <linux/irq.h> 18 + #include <linux/clk.h> 19 + #include <asm/machvec.h> 20 + #include <asm/sizes.h> 21 + #include <asm/clock.h> 22 + 23 + static struct mtd_partition nor_flash_partitions[] = { 24 + { 25 + .name = "loader", 26 + .offset = 0x00000000, 27 + .size = 512 * 1024, 28 + }, 29 + { 30 + .name = "bootenv", 31 + .offset = MTDPART_OFS_APPEND, 32 + .size = 512 * 1024, 33 + }, 34 + { 35 + .name = "kernel", 36 + .offset = MTDPART_OFS_APPEND, 37 + .size = 4 * 1024 * 1024, 38 + }, 39 + { 40 + .name = "data", 41 + .offset = MTDPART_OFS_APPEND, 42 + .size = MTDPART_SIZ_FULL, 43 + }, 44 + }; 45 + 46 + static struct physmap_flash_data nor_flash_data = { 47 + .width = 4, 48 + .parts = nor_flash_partitions, 49 + .nr_parts = ARRAY_SIZE(nor_flash_partitions), 50 + }; 51 + 52 + static struct resource nor_flash_resources[] = { 53 + [0] = { 54 + .start = 0x00000000, 55 + .end = 0x01000000 - 1, 56 + .flags = IORESOURCE_MEM, 57 + } 58 + }; 59 + 60 + static struct platform_device nor_flash_device = { 61 + .name = "physmap-flash", 62 + .dev = { 63 + .platform_data = &nor_flash_data, 64 + }, 65 + .num_resources = ARRAY_SIZE(nor_flash_resources), 66 + .resource = nor_flash_resources, 67 + }; 68 + 69 + static struct resource smsc911x_resources[] = { 70 + [0] = { 71 + .name = "smsc911x-memory", 72 + .start = 0xA4000000, 73 + .end = 0xA4000000 + SZ_256 - 1, 74 + .flags = IORESOURCE_MEM, 75 + }, 76 + [1] = { 77 + .name = "smsc911x-irq", 78 + .start = evt2irq(0x200), 79 + .end = evt2irq(0x200), 80 + .flags = IORESOURCE_IRQ, 81 + }, 82 + }; 83 + 84 + static struct smsc911x_platform_config smsc911x_config = { 85 + .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, 86 + .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, 87 + .flags = SMSC911X_USE_16BIT, 88 + .phy_interface = PHY_INTERFACE_MODE_MII, 89 + }; 90 + 91 + static struct platform_device smsc911x_device = { 92 + .name = "smsc911x", 93 + .id = -1, 94 + .num_resources = ARRAY_SIZE(smsc911x_resources), 95 + .resource = smsc911x_resources, 96 + .dev = { 97 + .platform_data = &smsc911x_config, 98 + }, 99 + }; 100 + 101 + static struct platform_device *apsh4a3a_devices[] __initdata = { 102 + &nor_flash_device, 103 + &smsc911x_device, 104 + }; 105 + 106 + static int __init apsh4a3a_devices_setup(void) 107 + { 108 + return platform_add_devices(apsh4a3a_devices, 109 + ARRAY_SIZE(apsh4a3a_devices)); 110 + } 111 + device_initcall(apsh4a3a_devices_setup); 112 + 113 + static int apsh4a3a_clk_init(void) 114 + { 115 + struct clk *clk; 116 + int ret; 117 + 118 + clk = clk_get(NULL, "extal"); 119 + if (!clk || IS_ERR(clk)) 120 + return PTR_ERR(clk); 121 + ret = clk_set_rate(clk, 33333000); 122 + clk_put(clk); 123 + 124 + return ret; 125 + } 126 + 127 + /* Initialize the board */ 128 + static void __init apsh4a3a_setup(char **cmdline_p) 129 + { 130 + printk(KERN_INFO "Alpha Project AP-SH4A-3A support:\n"); 131 + } 132 + 133 + static void __init apsh4a3a_init_irq(void) 134 + { 135 + plat_irq_setup_pins(IRQ_MODE_IRQ7654); 136 + } 137 + 138 + /* Return the board specific boot mode pin configuration */ 139 + static int apsh4a3a_mode_pins(void) 140 + { 141 + int value = 0; 142 + 143 + /* These are the factory default settings of SW1 and SW2. 144 + * If you change these dip switches then you will need to 145 + * adjust the values below as well. 146 + */ 147 + value &= ~MODE_PIN0; /* Clock Mode 16 */ 148 + value &= ~MODE_PIN1; 149 + value &= ~MODE_PIN2; 150 + value &= ~MODE_PIN3; 151 + value |= MODE_PIN4; 152 + value &= ~MODE_PIN5; /* 16-bit Area0 bus width */ 153 + value |= MODE_PIN6; /* Area 0 SRAM interface */ 154 + value |= MODE_PIN7; 155 + value |= MODE_PIN8; /* Little Endian */ 156 + value |= MODE_PIN9; /* Master Mode */ 157 + value |= MODE_PIN10; /* Crystal resonator */ 158 + value |= MODE_PIN11; /* Display Unit */ 159 + value |= MODE_PIN12; 160 + value &= ~MODE_PIN13; /* 29-bit address mode */ 161 + value |= MODE_PIN14; /* No PLL step-up */ 162 + 163 + return value; 164 + } 165 + 166 + /* 167 + * The Machine Vector 168 + */ 169 + static struct sh_machine_vector mv_apsh4a3a __initmv = { 170 + .mv_name = "AP-SH4A-3A", 171 + .mv_setup = apsh4a3a_setup, 172 + .mv_clk_init = apsh4a3a_clk_init, 173 + .mv_init_irq = apsh4a3a_init_irq, 174 + .mv_mode_pins = apsh4a3a_mode_pins, 175 + };
+125
arch/sh/boards/board-apsh4ad0a.c
··· 1 + /* 2 + * ALPHAPROJECT AP-SH4AD-0A Support. 3 + * 4 + * Copyright (C) 2010 ALPHAPROJECT Co.,Ltd. 5 + * Copyright (C) 2010 Matt Fleming 6 + * Copyright (C) 2010 Paul Mundt 7 + * 8 + * This file is subject to the terms and conditions of the GNU General Public 9 + * License. See the file "COPYING" in the main directory of this archive 10 + * for more details. 11 + */ 12 + #include <linux/init.h> 13 + #include <linux/platform_device.h> 14 + #include <linux/io.h> 15 + #include <linux/smsc911x.h> 16 + #include <linux/irq.h> 17 + #include <linux/clk.h> 18 + #include <asm/machvec.h> 19 + #include <asm/sizes.h> 20 + 21 + static struct resource smsc911x_resources[] = { 22 + [0] = { 23 + .name = "smsc911x-memory", 24 + .start = 0xA4000000, 25 + .end = 0xA4000000 + SZ_256 - 1, 26 + .flags = IORESOURCE_MEM, 27 + }, 28 + [1] = { 29 + .name = "smsc911x-irq", 30 + .start = evt2irq(0x200), 31 + .end = evt2irq(0x200), 32 + .flags = IORESOURCE_IRQ, 33 + }, 34 + }; 35 + 36 + static struct smsc911x_platform_config smsc911x_config = { 37 + .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, 38 + .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, 39 + .flags = SMSC911X_USE_16BIT, 40 + .phy_interface = PHY_INTERFACE_MODE_MII, 41 + }; 42 + 43 + static struct platform_device smsc911x_device = { 44 + .name = "smsc911x", 45 + .id = -1, 46 + .num_resources = ARRAY_SIZE(smsc911x_resources), 47 + .resource = smsc911x_resources, 48 + .dev = { 49 + .platform_data = &smsc911x_config, 50 + }, 51 + }; 52 + 53 + static struct platform_device *apsh4ad0a_devices[] __initdata = { 54 + &smsc911x_device, 55 + }; 56 + 57 + static int __init apsh4ad0a_devices_setup(void) 58 + { 59 + return platform_add_devices(apsh4ad0a_devices, 60 + ARRAY_SIZE(apsh4ad0a_devices)); 61 + } 62 + device_initcall(apsh4ad0a_devices_setup); 63 + 64 + static int apsh4ad0a_mode_pins(void) 65 + { 66 + int value = 0; 67 + 68 + /* These are the factory default settings of SW1 and SW2. 69 + * If you change these dip switches then you will need to 70 + * adjust the values below as well. 71 + */ 72 + value |= MODE_PIN0; /* Clock Mode 3 */ 73 + value |= MODE_PIN1; 74 + value &= ~MODE_PIN2; 75 + value &= ~MODE_PIN3; 76 + value &= ~MODE_PIN4; /* 16-bit Area0 bus width */ 77 + value |= MODE_PIN5; 78 + value |= MODE_PIN6; 79 + value |= MODE_PIN7; /* Normal mode */ 80 + value |= MODE_PIN8; /* Little Endian */ 81 + value |= MODE_PIN9; /* Crystal resonator */ 82 + value &= ~MODE_PIN10; /* 29-bit address mode */ 83 + value &= ~MODE_PIN11; /* PCI-E Root port */ 84 + value &= ~MODE_PIN12; /* 4 lane + 1 lane */ 85 + value |= MODE_PIN13; /* AUD Enable */ 86 + value &= ~MODE_PIN14; /* Normal Operation */ 87 + 88 + return value; 89 + } 90 + 91 + static int apsh4ad0a_clk_init(void) 92 + { 93 + struct clk *clk; 94 + int ret; 95 + 96 + clk = clk_get(NULL, "extal"); 97 + if (!clk || IS_ERR(clk)) 98 + return PTR_ERR(clk); 99 + ret = clk_set_rate(clk, 33333000); 100 + clk_put(clk); 101 + 102 + return ret; 103 + } 104 + 105 + /* Initialize the board */ 106 + static void __init apsh4ad0a_setup(char **cmdline_p) 107 + { 108 + pr_info("Alpha Project AP-SH4AD-0A support:\n"); 109 + } 110 + 111 + static void __init apsh4ad0a_init_irq(void) 112 + { 113 + plat_irq_setup_pins(IRQ_MODE_IRQ3210); 114 + } 115 + 116 + /* 117 + * The Machine Vector 118 + */ 119 + static struct sh_machine_vector mv_apsh4ad0a __initmv = { 120 + .mv_name = "AP-SH4AD-0A", 121 + .mv_setup = apsh4ad0a_setup, 122 + .mv_mode_pins = apsh4ad0a_mode_pins, 123 + .mv_clk_init = apsh4ad0a_clk_init, 124 + .mv_init_irq = apsh4ad0a_init_irq, 125 + };
+102
arch/sh/configs/apsh4a3a_defconfig
··· 1 + CONFIG_EXPERIMENTAL=y 2 + CONFIG_SYSVIPC=y 3 + CONFIG_BSD_PROCESS_ACCT=y 4 + CONFIG_IKCONFIG=y 5 + CONFIG_IKCONFIG_PROC=y 6 + CONFIG_LOG_BUF_SHIFT=14 7 + CONFIG_SYSFS_DEPRECATED=y 8 + CONFIG_SYSFS_DEPRECATED_V2=y 9 + CONFIG_BLK_DEV_INITRD=y 10 + CONFIG_SLAB=y 11 + CONFIG_PROFILING=y 12 + CONFIG_MODULES=y 13 + CONFIG_MODULE_UNLOAD=y 14 + # CONFIG_BLK_DEV_BSG is not set 15 + CONFIG_CPU_SUBTYPE_SH7785=y 16 + CONFIG_MEMORY_START=0x0C000000 17 + CONFIG_FLATMEM_MANUAL=y 18 + CONFIG_SH_STORE_QUEUES=y 19 + CONFIG_SH_APSH4A3A=y 20 + CONFIG_HIGH_RES_TIMERS=y 21 + CONFIG_KEXEC=y 22 + CONFIG_PREEMPT=y 23 + # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 24 + CONFIG_NET=y 25 + CONFIG_PACKET=y 26 + CONFIG_UNIX=y 27 + CONFIG_INET=y 28 + CONFIG_IP_ADVANCED_ROUTER=y 29 + CONFIG_IP_PNP=y 30 + CONFIG_IP_PNP_DHCP=y 31 + # CONFIG_INET_LRO is not set 32 + # CONFIG_IPV6 is not set 33 + # CONFIG_WIRELESS is not set 34 + CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 35 + # CONFIG_FW_LOADER is not set 36 + CONFIG_MTD=y 37 + CONFIG_MTD_CONCAT=y 38 + CONFIG_MTD_PARTITIONS=y 39 + CONFIG_MTD_CHAR=y 40 + CONFIG_MTD_BLOCK=y 41 + CONFIG_MTD_CFI=y 42 + CONFIG_MTD_CFI_AMDSTD=y 43 + CONFIG_MTD_PHYSMAP=y 44 + CONFIG_BLK_DEV_RAM=y 45 + CONFIG_BLK_DEV_RAM_SIZE=16384 46 + CONFIG_NETDEVICES=y 47 + CONFIG_NET_ETHERNET=y 48 + CONFIG_SMSC911X=y 49 + # CONFIG_NETDEV_1000 is not set 50 + # CONFIG_NETDEV_10000 is not set 51 + # CONFIG_WLAN is not set 52 + # CONFIG_INPUT_MOUSEDEV is not set 53 + # CONFIG_INPUT_KEYBOARD is not set 54 + # CONFIG_INPUT_MOUSE is not set 55 + # CONFIG_SERIO is not set 56 + CONFIG_VT_HW_CONSOLE_BINDING=y 57 + CONFIG_SERIAL_SH_SCI=y 58 + CONFIG_SERIAL_SH_SCI_NR_UARTS=6 59 + CONFIG_SERIAL_SH_SCI_CONSOLE=y 60 + CONFIG_HW_RANDOM=y 61 + # CONFIG_HWMON is not set 62 + CONFIG_FB=y 63 + CONFIG_FB_SH7785FB=y 64 + CONFIG_FRAMEBUFFER_CONSOLE=y 65 + CONFIG_FONTS=y 66 + CONFIG_FONT_8x8=y 67 + CONFIG_FONT_8x16=y 68 + CONFIG_LOGO=y 69 + # CONFIG_HID_SUPPORT is not set 70 + # CONFIG_USB_SUPPORT is not set 71 + CONFIG_EXT2_FS=y 72 + CONFIG_EXT3_FS=y 73 + # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 74 + CONFIG_MSDOS_FS=y 75 + CONFIG_VFAT_FS=y 76 + CONFIG_NTFS_FS=y 77 + CONFIG_NTFS_RW=y 78 + CONFIG_PROC_KCORE=y 79 + CONFIG_TMPFS=y 80 + CONFIG_JFFS2_FS=y 81 + CONFIG_CRAMFS=y 82 + CONFIG_NFS_FS=y 83 + CONFIG_NFS_V3=y 84 + CONFIG_NFS_V4=y 85 + CONFIG_CIFS=y 86 + CONFIG_NLS_DEFAULT="utf8" 87 + CONFIG_NLS_CODEPAGE_437=y 88 + CONFIG_NLS_CODEPAGE_932=y 89 + CONFIG_NLS_ASCII=y 90 + CONFIG_NLS_ISO8859_1=y 91 + CONFIG_NLS_UTF8=y 92 + # CONFIG_ENABLE_WARN_DEPRECATED is not set 93 + # CONFIG_ENABLE_MUST_CHECK is not set 94 + CONFIG_DEBUG_FS=y 95 + CONFIG_DEBUG_KERNEL=y 96 + # CONFIG_DEBUG_PREEMPT is not set 97 + # CONFIG_DEBUG_BUGVERBOSE is not set 98 + CONFIG_DEBUG_INFO=y 99 + # CONFIG_RCU_CPU_STALL_DETECTOR is not set 100 + # CONFIG_FTRACE is not set 101 + # CONFIG_CRYPTO_ANSI_CPRNG is not set 102 + # CONFIG_CRYPTO_HW is not set
+133
arch/sh/configs/apsh4ad0a_defconfig
··· 1 + CONFIG_EXPERIMENTAL=y 2 + CONFIG_SYSVIPC=y 3 + CONFIG_POSIX_MQUEUE=y 4 + CONFIG_BSD_PROCESS_ACCT=y 5 + CONFIG_RCU_TRACE=y 6 + CONFIG_IKCONFIG=y 7 + CONFIG_IKCONFIG_PROC=y 8 + CONFIG_LOG_BUF_SHIFT=14 9 + CONFIG_CGROUPS=y 10 + CONFIG_CGROUP_NS=y 11 + CONFIG_CGROUP_FREEZER=y 12 + CONFIG_CGROUP_DEVICE=y 13 + CONFIG_CGROUP_CPUACCT=y 14 + CONFIG_RESOURCE_COUNTERS=y 15 + CONFIG_CGROUP_MEM_RES_CTLR=y 16 + CONFIG_BLK_CGROUP=y 17 + CONFIG_NAMESPACES=y 18 + CONFIG_BLK_DEV_INITRD=y 19 + CONFIG_KALLSYMS_ALL=y 20 + # CONFIG_COMPAT_BRK is not set 21 + CONFIG_SLAB=y 22 + CONFIG_PROFILING=y 23 + CONFIG_MODULES=y 24 + CONFIG_MODULE_UNLOAD=y 25 + # CONFIG_LBDAF is not set 26 + # CONFIG_BLK_DEV_BSG is not set 27 + CONFIG_CFQ_GROUP_IOSCHED=y 28 + CONFIG_CPU_SUBTYPE_SH7786=y 29 + CONFIG_MEMORY_SIZE=0x10000000 30 + CONFIG_HUGETLB_PAGE_SIZE_1MB=y 31 + CONFIG_MEMORY_HOTPLUG=y 32 + CONFIG_MEMORY_HOTREMOVE=y 33 + CONFIG_KSM=y 34 + CONFIG_SH_STORE_QUEUES=y 35 + CONFIG_SH_APSH4AD0A=y 36 + CONFIG_NO_HZ=y 37 + CONFIG_HIGH_RES_TIMERS=y 38 + CONFIG_CPU_FREQ=y 39 + CONFIG_CPU_FREQ_GOV_POWERSAVE=m 40 + CONFIG_CPU_FREQ_GOV_USERSPACE=m 41 + CONFIG_CPU_FREQ_GOV_ONDEMAND=m 42 + CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m 43 + CONFIG_SH_CPU_FREQ=y 44 + CONFIG_KEXEC=y 45 + CONFIG_SECCOMP=y 46 + CONFIG_PREEMPT=y 47 + # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 48 + CONFIG_BINFMT_MISC=y 49 + CONFIG_PM=y 50 + CONFIG_PM_DEBUG=y 51 + CONFIG_PM_VERBOSE=y 52 + CONFIG_PM_RUNTIME=y 53 + CONFIG_CPU_IDLE=y 54 + CONFIG_NET=y 55 + CONFIG_PACKET=y 56 + CONFIG_UNIX=y 57 + CONFIG_NET_KEY=y 58 + CONFIG_INET=y 59 + # CONFIG_INET_LRO is not set 60 + # CONFIG_IPV6 is not set 61 + # CONFIG_WIRELESS is not set 62 + CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 63 + # CONFIG_FW_LOADER is not set 64 + CONFIG_MTD=y 65 + CONFIG_MTD_CFI=y 66 + CONFIG_BLK_DEV_RAM=y 67 + CONFIG_BLK_DEV_RAM_SIZE=16384 68 + CONFIG_SCSI=y 69 + CONFIG_BLK_DEV_SD=y 70 + CONFIG_SCSI_MULTI_LUN=y 71 + # CONFIG_SCSI_LOWLEVEL is not set 72 + CONFIG_NETDEVICES=y 73 + CONFIG_MDIO_BITBANG=y 74 + CONFIG_NET_ETHERNET=y 75 + CONFIG_SMSC911X=y 76 + # CONFIG_NETDEV_1000 is not set 77 + # CONFIG_NETDEV_10000 is not set 78 + # CONFIG_WLAN is not set 79 + CONFIG_INPUT_EVDEV=y 80 + # CONFIG_INPUT_KEYBOARD is not set 81 + # CONFIG_INPUT_MOUSE is not set 82 + # CONFIG_SERIO is not set 83 + CONFIG_SERIAL_SH_SCI=y 84 + CONFIG_SERIAL_SH_SCI_NR_UARTS=6 85 + CONFIG_SERIAL_SH_SCI_CONSOLE=y 86 + # CONFIG_LEGACY_PTYS is not set 87 + # CONFIG_HW_RANDOM is not set 88 + # CONFIG_HWMON is not set 89 + CONFIG_VIDEO_OUTPUT_CONTROL=y 90 + CONFIG_FB=y 91 + CONFIG_FB_SH7785FB=y 92 + CONFIG_FRAMEBUFFER_CONSOLE=y 93 + CONFIG_FONTS=y 94 + CONFIG_FONT_8x8=y 95 + CONFIG_FONT_8x16=y 96 + CONFIG_LOGO=y 97 + CONFIG_USB=y 98 + CONFIG_USB_DEBUG=y 99 + CONFIG_USB_MON=y 100 + CONFIG_USB_OHCI_HCD=y 101 + CONFIG_USB_STORAGE=y 102 + CONFIG_EXT2_FS=y 103 + CONFIG_EXT3_FS=y 104 + # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 105 + CONFIG_MSDOS_FS=y 106 + CONFIG_VFAT_FS=y 107 + CONFIG_NTFS_FS=y 108 + CONFIG_NTFS_RW=y 109 + CONFIG_PROC_KCORE=y 110 + CONFIG_TMPFS=y 111 + CONFIG_HUGETLBFS=y 112 + CONFIG_JFFS2_FS=y 113 + CONFIG_CRAMFS=y 114 + CONFIG_NFS_FS=y 115 + CONFIG_NFS_V3=y 116 + CONFIG_NFS_V4=y 117 + CONFIG_CIFS=y 118 + CONFIG_NLS_DEFAULT="utf8" 119 + CONFIG_NLS_CODEPAGE_437=y 120 + CONFIG_NLS_CODEPAGE_932=y 121 + CONFIG_NLS_ASCII=y 122 + CONFIG_NLS_ISO8859_1=y 123 + CONFIG_NLS_UTF8=y 124 + # CONFIG_ENABLE_MUST_CHECK is not set 125 + CONFIG_MAGIC_SYSRQ=y 126 + CONFIG_DEBUG_KERNEL=y 127 + CONFIG_DEBUG_SHIRQ=y 128 + CONFIG_DETECT_HUNG_TASK=y 129 + CONFIG_DEBUG_INFO=y 130 + CONFIG_DEBUG_VM=y 131 + # CONFIG_RCU_CPU_STALL_DETECTOR is not set 132 + CONFIG_DWARF_UNWINDER=y 133 + # CONFIG_CRYPTO_ANSI_CPRNG is not set
+3
arch/sh/tools/mach-types
··· 9 9 HIGHLANDER SH_HIGHLANDER 10 10 RTS7751R2D SH_RTS7751R2D 11 11 RSK SH_RSK 12 + ALPHA_BOARD SH_ALPHA_BOARD 12 13 13 14 # 14 15 # List of companion chips / MFDs. ··· 62 61 POLARIS SH_POLARIS 63 62 KFR2R09 SH_KFR2R09 64 63 ECOVEC SH_ECOVEC 64 + APSH4A3A SH_APSH4A3A 65 + APSH4AD0A SH_APSH4AD0A
+1
drivers/block/Kconfig
··· 464 464 tristate "Xen virtual block device support" 465 465 depends on XEN 466 466 default y 467 + select XEN_XENBUS_FRONTEND 467 468 help 468 469 This driver implements the front-end of the Xen virtual 469 470 block device driver. It communicates with a back-end driver
+2
drivers/char/agp/intel-agp.h
··· 94 94 #define G4x_GMCH_SIZE_VT_1_5M (0xa << 8) 95 95 #define G4x_GMCH_SIZE_VT_2M (0xc << 8) 96 96 97 + #define GFX_FLSH_CNTL 0x2170 /* 915+ */ 98 + 97 99 #define I810_DRAM_CTL 0x3000 98 100 #define I810_DRAM_ROW_0 0x00000001 99 101 #define I810_DRAM_ROW_0_SDRAM 0x00000001
+13 -4
drivers/char/agp/intel-gtt.c
··· 688 688 689 689 intel_private.base.stolen_size = intel_gtt_stolen_size(); 690 690 691 + intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; 692 + 691 693 ret = intel_gtt_setup_scratch_page(); 692 694 if (ret != 0) { 693 695 intel_gtt_cleanup(); 694 696 return ret; 695 697 } 696 - 697 - intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; 698 698 699 699 return 0; 700 700 } ··· 814 814 } 815 815 } 816 816 817 + /* On the resume path we may be adjusting the PGTBL value, so 818 + * be paranoid and flush all chipset write buffers... 819 + */ 820 + if (INTEL_GTT_GEN >= 3) 821 + writel(0, intel_private.registers+GFX_FLSH_CNTL); 822 + 817 823 reg = intel_private.registers+I810_PGETBL_CTL; 818 824 writel(intel_private.PGETBL_save, reg); 819 825 if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) { ··· 828 822 readl(reg), intel_private.PGETBL_save); 829 823 return false; 830 824 } 825 + 826 + if (INTEL_GTT_GEN >= 3) 827 + writel(0, intel_private.registers+GFX_FLSH_CNTL); 831 828 832 829 return true; 833 830 } ··· 1000 991 if (mem->page_count == 0) 1001 992 return 0; 1002 993 994 + intel_gtt_clear_range(pg_start, mem->page_count); 995 + 1003 996 if (intel_private.base.needs_dmar) { 1004 997 intel_gtt_unmap_memory(mem->sg_list, mem->num_sg); 1005 998 mem->sg_list = NULL; 1006 999 mem->num_sg = 0; 1007 1000 } 1008 - 1009 - intel_gtt_clear_range(pg_start, mem->page_count); 1010 1001 1011 1002 return 0; 1012 1003 }
+81 -6
drivers/gpu/drm/i915/i915_debugfs.c
··· 106 106 } 107 107 } 108 108 109 + static const char *agp_type_str(int type) 110 + { 111 + switch (type) { 112 + case 0: return " uncached"; 113 + case 1: return " snooped"; 114 + default: return ""; 115 + } 116 + } 117 + 109 118 static void 110 119 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 111 120 { 112 - seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s", 121 + seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s", 113 122 &obj->base, 114 123 get_pin_flag(obj), 115 124 get_tiling_flag(obj), ··· 127 118 obj->base.write_domain, 128 119 obj->last_rendering_seqno, 129 120 obj->last_fenced_seqno, 121 + agp_type_str(obj->agp_type == AGP_USER_CACHED_MEMORY), 130 122 obj->dirty ? " dirty" : "", 131 123 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 132 124 if (obj->base.name) ··· 282 272 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); 283 273 284 274 mutex_unlock(&dev->struct_mutex); 275 + 276 + return 0; 277 + } 278 + 279 + static int i915_gem_gtt_info(struct seq_file *m, void* data) 280 + { 281 + struct drm_info_node *node = (struct drm_info_node *) m->private; 282 + struct drm_device *dev = node->minor->dev; 283 + struct drm_i915_private *dev_priv = dev->dev_private; 284 + struct drm_i915_gem_object *obj; 285 + size_t total_obj_size, total_gtt_size; 286 + int count, ret; 287 + 288 + ret = mutex_lock_interruptible(&dev->struct_mutex); 289 + if (ret) 290 + return ret; 291 + 292 + total_obj_size = total_gtt_size = count = 0; 293 + list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 294 + seq_printf(m, " "); 295 + describe_obj(m, obj); 296 + seq_printf(m, "\n"); 297 + total_obj_size += obj->base.size; 298 + total_gtt_size += obj->gtt_space->size; 299 + count++; 300 + } 301 + 302 + mutex_unlock(&dev->struct_mutex); 303 + 304 + seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 305 + count, total_obj_size, total_gtt_size); 285 306 286 307 return 0; 287 308 } ··· 497 456 } 498 457 seq_printf(m, "Interrupts received: %d\n", 499 458 atomic_read(&dev_priv->irq_received)); 500 - for (i = 0; i < I915_NUM_RINGS; i++) 459 + for (i = 0; i < I915_NUM_RINGS; i++) { 460 + if (IS_GEN6(dev)) { 461 + seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", 462 + dev_priv->ring[i].name, 463 + I915_READ_IMR(&dev_priv->ring[i])); 464 + } 501 465 i915_ring_seqno_info(m, &dev_priv->ring[i]); 466 + } 502 467 mutex_unlock(&dev->struct_mutex); 503 468 504 469 return 0; ··· 703 656 seq_printf(m, "%s [%d]:\n", name, count); 704 657 705 658 while (count--) { 706 - seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s", 659 + seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s%s", 707 660 err->gtt_offset, 708 661 err->size, 709 662 err->read_domains, ··· 713 666 tiling_flag(err->tiling), 714 667 dirty_flag(err->dirty), 715 668 purgeable_flag(err->purgeable), 716 - ring_str(err->ring)); 669 + ring_str(err->ring), 670 + agp_type_str(err->agp_type)); 717 671 718 672 if (err->name) 719 673 seq_printf(m, " (name: %d)", err->name); ··· 792 744 if (error->batchbuffer[i]) { 793 745 struct drm_i915_error_object *obj = error->batchbuffer[i]; 794 746 795 - seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); 747 + seq_printf(m, "%s --- gtt_offset = 0x%08x\n", 748 + dev_priv->ring[i].name, 749 + obj->gtt_offset); 796 750 offset = 0; 797 751 for (page = 0; page < obj->page_count; page++) { 798 752 for (elt = 0; elt < PAGE_SIZE/4; elt++) { ··· 940 890 struct drm_device *dev = node->minor->dev; 941 891 drm_i915_private_t *dev_priv = dev->dev_private; 942 892 u32 rgvmodectl = I915_READ(MEMMODECTL); 943 - u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY); 893 + u32 rstdbyctl = I915_READ(RSTDBYCTL); 944 894 u16 crstandvid = I915_READ16(CRSTANDVID); 945 895 946 896 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? ··· 963 913 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 964 914 seq_printf(m, "Render standby enabled: %s\n", 965 915 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 916 + seq_printf(m, "Current RS state: "); 917 + switch (rstdbyctl & RSX_STATUS_MASK) { 918 + case RSX_STATUS_ON: 919 + seq_printf(m, "on\n"); 920 + break; 921 + case RSX_STATUS_RC1: 922 + seq_printf(m, "RC1\n"); 923 + break; 924 + case RSX_STATUS_RC1E: 925 + seq_printf(m, "RC1E\n"); 926 + break; 927 + case RSX_STATUS_RS1: 928 + seq_printf(m, "RS1\n"); 929 + break; 930 + case RSX_STATUS_RS2: 931 + seq_printf(m, "RS2 (RC6)\n"); 932 + break; 933 + case RSX_STATUS_RS3: 934 + seq_printf(m, "RC3 (RC6+)\n"); 935 + break; 936 + default: 937 + seq_printf(m, "unknown\n"); 938 + break; 939 + } 966 940 967 941 return 0; 968 942 } ··· 1261 1187 static struct drm_info_list i915_debugfs_list[] = { 1262 1188 {"i915_capabilities", i915_capabilities, 0, 0}, 1263 1189 {"i915_gem_objects", i915_gem_object_info, 0}, 1190 + {"i915_gem_gtt", i915_gem_gtt_info, 0}, 1264 1191 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 1265 1192 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 1266 1193 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
-8
drivers/gpu/drm/i915/i915_dma.c
··· 1962 1962 /* enable GEM by default */ 1963 1963 dev_priv->has_gem = 1; 1964 1964 1965 - if (dev_priv->has_gem == 0 && 1966 - drm_core_check_feature(dev, DRIVER_MODESET)) { 1967 - DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n"); 1968 - ret = -ENODEV; 1969 - goto out_workqueue_free; 1970 - } 1971 - 1972 1965 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1973 1966 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 1974 1967 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { ··· 2048 2055 2049 2056 intel_teardown_gmbus(dev); 2050 2057 intel_teardown_mchbar(dev); 2051 - out_workqueue_free: 2052 2058 destroy_workqueue(dev_priv->wq); 2053 2059 out_iomapfree: 2054 2060 io_mapping_free(dev_priv->mm.gtt_mapping);
+9
drivers/gpu/drm/i915/i915_drv.c
··· 49 49 unsigned int i915_lvds_downclock = 0; 50 50 module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 51 51 52 + bool i915_try_reset = true; 53 + module_param_named(reset, i915_try_reset, bool, 0600); 54 + 52 55 static struct drm_driver driver; 53 56 extern int intel_agp_enabled; 54 57 ··· 355 352 356 353 /* Resume the modeset for every activated CRTC */ 357 354 drm_helper_resume_force_mode(dev); 355 + 356 + if (dev_priv->renderctx && dev_priv->pwrctx) 357 + ironlake_enable_rc6(dev); 358 358 } 359 359 360 360 intel_opregion_init(dev); ··· 480 474 */ 481 475 bool need_display = true; 482 476 int ret; 477 + 478 + if (!i915_try_reset) 479 + return 0; 483 480 484 481 if (!mutex_trylock(&dev->struct_mutex)) 485 482 return -EBUSY;
+11 -13
drivers/gpu/drm/i915/i915_drv.h
··· 172 172 int page_count; 173 173 u32 gtt_offset; 174 174 u32 *pages[0]; 175 - } *ringbuffer, *batchbuffer[2]; 175 + } *ringbuffer, *batchbuffer[I915_NUM_RINGS]; 176 176 struct drm_i915_error_buffer { 177 - size_t size; 177 + u32 size; 178 178 u32 name; 179 179 u32 seqno; 180 180 u32 gtt_offset; 181 181 u32 read_domains; 182 182 u32 write_domain; 183 - u32 fence_reg; 183 + s32 fence_reg:5; 184 184 s32 pinned:2; 185 185 u32 tiling:2; 186 186 u32 dirty:1; 187 187 u32 purgeable:1; 188 188 u32 ring:4; 189 + u32 agp_type:1; 189 190 } *active_bo, *pinned_bo; 190 191 u32 active_bo_count, pinned_bo_count; 191 192 struct intel_overlay_error_state *overlay; ··· 333 332 334 333 /* LVDS info */ 335 334 int backlight_level; /* restore backlight to this value */ 335 + bool backlight_enabled; 336 336 struct drm_display_mode *panel_fixed_mode; 337 337 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 338 338 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ ··· 796 794 */ 797 795 struct hlist_node exec_node; 798 796 unsigned long exec_handle; 797 + struct drm_i915_gem_exec_object2 *exec_entry; 799 798 800 799 /** 801 800 * Current offset of the object in GTT space. ··· 1009 1006 extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc); 1010 1007 extern int i915_vblank_swap(struct drm_device *dev, void *data, 1011 1008 struct drm_file *file_priv); 1012 - extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); 1013 - extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask); 1014 - extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, 1015 - u32 mask); 1016 - extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, 1017 - u32 mask); 1018 1009 1019 1010 void 1020 1011 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); ··· 1088 1091 struct drm_file *file_priv); 1089 1092 void i915_gem_load(struct drm_device *dev); 1090 1093 int i915_gem_init_object(struct drm_gem_object *obj); 1091 - void i915_gem_flush_ring(struct drm_device *dev, 1092 - struct intel_ring_buffer *ring, 1093 - uint32_t invalidate_domains, 1094 - uint32_t flush_domains); 1094 + int __must_check i915_gem_flush_ring(struct drm_device *dev, 1095 + struct intel_ring_buffer *ring, 1096 + uint32_t invalidate_domains, 1097 + uint32_t flush_domains); 1095 1098 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 1096 1099 size_t size); 1097 1100 void i915_gem_free_object(struct drm_gem_object *obj); ··· 1262 1265 extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); 1263 1266 extern bool intel_fbc_enabled(struct drm_device *dev); 1264 1267 extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1268 + extern void ironlake_enable_rc6(struct drm_device *dev); 1265 1269 extern void gen6_set_rps(struct drm_device *dev, u8 val); 1266 1270 extern void intel_detect_pch (struct drm_device *dev); 1267 1271 extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
+110 -46
drivers/gpu/drm/i915/i915_gem.c
··· 35 35 #include <linux/swap.h> 36 36 #include <linux/pci.h> 37 37 38 - static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); 38 + static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); 39 39 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); 40 40 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); 41 - static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, 42 - bool write); 43 - static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, 44 - uint64_t offset, 45 - uint64_t size); 41 + static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, 42 + bool write); 43 + static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, 44 + uint64_t offset, 45 + uint64_t size); 46 46 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); 47 - static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, 48 - unsigned alignment, 49 - bool map_and_fenceable); 47 + static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, 48 + unsigned alignment, 49 + bool map_and_fenceable); 50 50 static void i915_gem_clear_fence_reg(struct drm_device *dev, 51 51 struct drm_i915_fence_reg *reg); 52 52 static int i915_gem_phys_pwrite(struct drm_device *dev, ··· 1935 1935 { 1936 1936 drm_i915_private_t *dev_priv; 1937 1937 struct drm_device *dev; 1938 + bool idle; 1939 + int i; 1938 1940 1939 1941 dev_priv = container_of(work, drm_i915_private_t, 1940 1942 mm.retire_work.work); ··· 1950 1948 1951 1949 i915_gem_retire_requests(dev); 1952 1950 1953 - if (!dev_priv->mm.suspended && 1954 - (!list_empty(&dev_priv->ring[RCS].request_list) || 1955 - !list_empty(&dev_priv->ring[VCS].request_list) || 1956 - !list_empty(&dev_priv->ring[BCS].request_list))) 1951 + /* Send a periodic flush down the ring so we don't hold onto GEM 1952 + * objects indefinitely. 1953 + */ 1954 + idle = true; 1955 + for (i = 0; i < I915_NUM_RINGS; i++) { 1956 + struct intel_ring_buffer *ring = &dev_priv->ring[i]; 1957 + 1958 + if (!list_empty(&ring->gpu_write_list)) { 1959 + struct drm_i915_gem_request *request; 1960 + int ret; 1961 + 1962 + ret = i915_gem_flush_ring(dev, ring, 0, 1963 + I915_GEM_GPU_DOMAINS); 1964 + request = kzalloc(sizeof(*request), GFP_KERNEL); 1965 + if (ret || request == NULL || 1966 + i915_add_request(dev, NULL, request, ring)) 1967 + kfree(request); 1968 + } 1969 + 1970 + idle &= list_empty(&ring->request_list); 1971 + } 1972 + 1973 + if (!dev_priv->mm.suspended && !idle) 1957 1974 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 1975 + 1958 1976 mutex_unlock(&dev->struct_mutex); 1959 1977 } 1960 1978 ··· 2164 2142 return ret; 2165 2143 } 2166 2144 2167 - void 2145 + int 2168 2146 i915_gem_flush_ring(struct drm_device *dev, 2169 2147 struct intel_ring_buffer *ring, 2170 2148 uint32_t invalidate_domains, 2171 2149 uint32_t flush_domains) 2172 2150 { 2173 - ring->flush(ring, invalidate_domains, flush_domains); 2151 + int ret; 2152 + 2153 + ret = ring->flush(ring, invalidate_domains, flush_domains); 2154 + if (ret) 2155 + return ret; 2156 + 2174 2157 i915_gem_process_flushing_list(dev, flush_domains, ring); 2158 + return 0; 2175 2159 } 2176 2160 2177 2161 static int i915_ring_idle(struct drm_device *dev, 2178 2162 struct intel_ring_buffer *ring) 2179 2163 { 2164 + int ret; 2165 + 2180 2166 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) 2181 2167 return 0; 2182 2168 2183 - if (!list_empty(&ring->gpu_write_list)) 2184 - i915_gem_flush_ring(dev, ring, 2169 + if (!list_empty(&ring->gpu_write_list)) { 2170 + ret = i915_gem_flush_ring(dev, ring, 2185 2171 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 2172 + if (ret) 2173 + return ret; 2174 + } 2175 + 2186 2176 return i915_wait_request(dev, 2187 2177 i915_gem_next_request_seqno(dev, ring), 2188 2178 ring); ··· 2404 2370 int ret; 2405 2371 2406 2372 if (obj->fenced_gpu_access) { 2407 - if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) 2408 - i915_gem_flush_ring(obj->base.dev, 2409 - obj->last_fenced_ring, 2410 - 0, obj->base.write_domain); 2373 + if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 2374 + ret = i915_gem_flush_ring(obj->base.dev, 2375 + obj->last_fenced_ring, 2376 + 0, obj->base.write_domain); 2377 + if (ret) 2378 + return ret; 2379 + } 2411 2380 2412 2381 obj->fenced_gpu_access = false; 2413 2382 } ··· 2429 2392 obj->last_fenced_seqno = 0; 2430 2393 obj->last_fenced_ring = NULL; 2431 2394 } 2395 + 2396 + /* Ensure that all CPU reads are completed before installing a fence 2397 + * and all writes before removing the fence. 2398 + */ 2399 + if (obj->base.read_domains & I915_GEM_DOMAIN_GTT) 2400 + mb(); 2432 2401 2433 2402 return 0; 2434 2403 } ··· 2566 2523 return ret; 2567 2524 } else if (obj->tiling_changed) { 2568 2525 if (obj->fenced_gpu_access) { 2569 - if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) 2570 - i915_gem_flush_ring(obj->base.dev, obj->ring, 2571 - 0, obj->base.write_domain); 2526 + if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 2527 + ret = i915_gem_flush_ring(obj->base.dev, obj->ring, 2528 + 0, obj->base.write_domain); 2529 + if (ret) 2530 + return ret; 2531 + } 2572 2532 2573 2533 obj->fenced_gpu_access = false; 2574 2534 } ··· 2782 2736 obj->gtt_space = NULL; 2783 2737 2784 2738 if (ret == -ENOMEM) { 2785 - /* first try to clear up some space from the GTT */ 2786 - ret = i915_gem_evict_something(dev, size, 2787 - alignment, 2788 - map_and_fenceable); 2739 + /* first try to reclaim some memory by clearing the GTT */ 2740 + ret = i915_gem_evict_everything(dev, false); 2789 2741 if (ret) { 2790 2742 /* now try to shrink everyone else */ 2791 2743 if (gfpmask) { ··· 2791 2747 goto search_free; 2792 2748 } 2793 2749 2794 - return ret; 2750 + return -ENOMEM; 2795 2751 } 2796 2752 2797 2753 goto search_free; ··· 2806 2762 drm_mm_put_block(obj->gtt_space); 2807 2763 obj->gtt_space = NULL; 2808 2764 2809 - ret = i915_gem_evict_something(dev, size, 2810 - alignment, map_and_fenceable); 2811 - if (ret) 2765 + if (i915_gem_evict_everything(dev, false)) 2812 2766 return ret; 2813 2767 2814 2768 goto search_free; ··· 2853 2811 } 2854 2812 2855 2813 /** Flushes any GPU write domain for the object if it's dirty. */ 2856 - static void 2814 + static int 2857 2815 i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) 2858 2816 { 2859 2817 struct drm_device *dev = obj->base.dev; 2860 2818 2861 2819 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) 2862 - return; 2820 + return 0; 2863 2821 2864 2822 /* Queue the GPU write cache flushing we need. */ 2865 - i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); 2866 - BUG_ON(obj->base.write_domain); 2823 + return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); 2867 2824 } 2868 2825 2869 2826 /** Flushes the GTT write domain for the object if it's dirty. */ ··· 2874 2833 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) 2875 2834 return; 2876 2835 2877 - /* No actual flushing is required for the GTT write domain. Writes 2836 + /* No actual flushing is required for the GTT write domain. Writes 2878 2837 * to it immediately go to main memory as far as we know, so there's 2879 2838 * no chipset flush. It also doesn't land in render cache. 2839 + * 2840 + * However, we do have to enforce the order so that all writes through 2841 + * the GTT land before any writes to the device, such as updates to 2842 + * the GATT itself. 2880 2843 */ 2844 + wmb(); 2845 + 2881 2846 i915_gem_release_mmap(obj); 2882 2847 2883 2848 old_write_domain = obj->base.write_domain; ··· 2929 2882 if (obj->gtt_space == NULL) 2930 2883 return -EINVAL; 2931 2884 2932 - i915_gem_object_flush_gpu_write_domain(obj); 2885 + ret = i915_gem_object_flush_gpu_write_domain(obj); 2886 + if (ret) 2887 + return ret; 2888 + 2933 2889 if (obj->pending_gpu_write || write) { 2934 2890 ret = i915_gem_object_wait_rendering(obj, true); 2935 2891 if (ret) ··· 2977 2927 if (obj->gtt_space == NULL) 2978 2928 return -EINVAL; 2979 2929 2980 - i915_gem_object_flush_gpu_write_domain(obj); 2930 + ret = i915_gem_object_flush_gpu_write_domain(obj); 2931 + if (ret) 2932 + return ret; 2933 + 2981 2934 2982 2935 /* Currently, we are always called from an non-interruptible context. */ 2983 2936 if (pipelined != obj->ring) { ··· 3005 2952 i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, 3006 2953 bool interruptible) 3007 2954 { 2955 + int ret; 2956 + 3008 2957 if (!obj->active) 3009 2958 return 0; 3010 2959 3011 - if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) 3012 - i915_gem_flush_ring(obj->base.dev, obj->ring, 3013 - 0, obj->base.write_domain); 2960 + if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 2961 + ret = i915_gem_flush_ring(obj->base.dev, obj->ring, 2962 + 0, obj->base.write_domain); 2963 + if (ret) 2964 + return ret; 2965 + } 3014 2966 3015 2967 return i915_gem_object_wait_rendering(obj, interruptible); 3016 2968 } ··· 3032 2974 uint32_t old_write_domain, old_read_domains; 3033 2975 int ret; 3034 2976 3035 - i915_gem_object_flush_gpu_write_domain(obj); 2977 + ret = i915_gem_object_flush_gpu_write_domain(obj); 2978 + if (ret) 2979 + return ret; 2980 + 3036 2981 ret = i915_gem_object_wait_rendering(obj, true); 3037 2982 if (ret) 3038 2983 return ret; ··· 3130 3069 if (offset == 0 && size == obj->base.size) 3131 3070 return i915_gem_object_set_to_cpu_domain(obj, 0); 3132 3071 3133 - i915_gem_object_flush_gpu_write_domain(obj); 3072 + ret = i915_gem_object_flush_gpu_write_domain(obj); 3073 + if (ret) 3074 + return ret; 3075 + 3134 3076 ret = i915_gem_object_wait_rendering(obj, true); 3135 3077 if (ret) 3136 3078 return ret; ··· 3426 3362 * flush earlier is beneficial. 3427 3363 */ 3428 3364 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 3429 - i915_gem_flush_ring(dev, obj->ring, 3430 - 0, obj->base.write_domain); 3365 + ret = i915_gem_flush_ring(dev, obj->ring, 3366 + 0, obj->base.write_domain); 3431 3367 } else if (obj->ring->outstanding_lazy_request == 3432 3368 obj->last_rendering_seqno) { 3433 3369 struct drm_i915_gem_request *request;
+8 -1
drivers/gpu/drm/i915/i915_gem_evict.c
··· 127 127 } 128 128 129 129 /* Nothing found, clean up and bail out! */ 130 - list_for_each_entry(obj, &unwind_list, exec_list) { 130 + while (!list_empty(&unwind_list)) { 131 + obj = list_first_entry(&unwind_list, 132 + struct drm_i915_gem_object, 133 + exec_list); 134 + 131 135 ret = drm_mm_scan_remove_block(obj->gtt_space); 132 136 BUG_ON(ret); 137 + 138 + list_del_init(&obj->exec_list); 133 139 drm_gem_object_unreference(&obj->base); 134 140 } 135 141 ··· 168 162 exec_list); 169 163 if (ret == 0) 170 164 ret = i915_gem_object_unbind(obj); 165 + 171 166 list_del_init(&obj->exec_list); 172 167 drm_gem_object_unreference(&obj->base); 173 168 }
+75 -44
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 268 268 static int 269 269 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, 270 270 struct eb_objects *eb, 271 - struct drm_i915_gem_exec_object2 *entry, 272 271 struct drm_i915_gem_relocation_entry *reloc) 273 272 { 274 273 struct drm_device *dev = obj->base.dev; ··· 410 411 411 412 static int 412 413 i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, 413 - struct eb_objects *eb, 414 - struct drm_i915_gem_exec_object2 *entry) 414 + struct eb_objects *eb) 415 415 { 416 416 struct drm_i915_gem_relocation_entry __user *user_relocs; 417 + struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 417 418 int i, ret; 418 419 419 420 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; ··· 425 426 sizeof(reloc))) 426 427 return -EFAULT; 427 428 428 - ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &reloc); 429 + ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc); 429 430 if (ret) 430 431 return ret; 431 432 ··· 441 442 static int 442 443 i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, 443 444 struct eb_objects *eb, 444 - struct drm_i915_gem_exec_object2 *entry, 445 445 struct drm_i915_gem_relocation_entry *relocs) 446 446 { 447 + const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 447 448 int i, ret; 448 449 449 450 for (i = 0; i < entry->relocation_count; i++) { 450 - ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &relocs[i]); 451 + ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]); 451 452 if (ret) 452 453 return ret; 453 454 } ··· 458 459 static int 459 460 i915_gem_execbuffer_relocate(struct drm_device *dev, 460 461 struct eb_objects *eb, 461 - struct list_head *objects, 462 - struct drm_i915_gem_exec_object2 *exec) 462 + struct list_head *objects) 463 463 { 464 464 struct drm_i915_gem_object *obj; 465 465 int ret; ··· 466 468 list_for_each_entry(obj, objects, exec_list) { 467 469 obj->base.pending_read_domains = 0; 468 470 obj->base.pending_write_domain = 0; 469 - ret = i915_gem_execbuffer_relocate_object(obj, eb, exec++); 471 + ret = i915_gem_execbuffer_relocate_object(obj, eb); 470 472 if (ret) 471 473 return ret; 472 474 } ··· 477 479 static int 478 480 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 479 481 struct drm_file *file, 480 - struct list_head *objects, 481 - struct drm_i915_gem_exec_object2 *exec) 482 + struct list_head *objects) 482 483 { 483 484 struct drm_i915_gem_object *obj; 484 - struct drm_i915_gem_exec_object2 *entry; 485 485 int ret, retry; 486 486 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 487 + struct list_head ordered_objects; 488 + 489 + INIT_LIST_HEAD(&ordered_objects); 490 + while (!list_empty(objects)) { 491 + struct drm_i915_gem_exec_object2 *entry; 492 + bool need_fence, need_mappable; 493 + 494 + obj = list_first_entry(objects, 495 + struct drm_i915_gem_object, 496 + exec_list); 497 + entry = obj->exec_entry; 498 + 499 + need_fence = 500 + has_fenced_gpu_access && 501 + entry->flags & EXEC_OBJECT_NEEDS_FENCE && 502 + obj->tiling_mode != I915_TILING_NONE; 503 + need_mappable = 504 + entry->relocation_count ? true : need_fence; 505 + 506 + if (need_mappable) 507 + list_move(&obj->exec_list, &ordered_objects); 508 + else 509 + list_move_tail(&obj->exec_list, &ordered_objects); 510 + } 511 + list_splice(&ordered_objects, objects); 487 512 488 513 /* Attempt to pin all of the buffers into the GTT. 489 514 * This is done in 3 phases: ··· 525 504 ret = 0; 526 505 527 506 /* Unbind any ill-fitting objects or pin. */ 528 - entry = exec; 529 507 list_for_each_entry(obj, objects, exec_list) { 508 + struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 530 509 bool need_fence, need_mappable; 531 - 532 - if (!obj->gtt_space) { 533 - entry++; 510 + if (!obj->gtt_space) 534 511 continue; 535 - } 536 512 537 513 need_fence = 538 514 has_fenced_gpu_access && ··· 552 534 } 553 535 554 536 /* Bind fresh objects */ 555 - entry = exec; 556 537 list_for_each_entry(obj, objects, exec_list) { 538 + struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 557 539 bool need_fence; 558 540 559 541 need_fence = ··· 588 570 } 589 571 590 572 entry->offset = obj->gtt_offset; 591 - entry++; 592 573 } 593 574 594 575 /* Decrement pin count for bound objects */ ··· 639 622 int i, total, ret; 640 623 641 624 /* We may process another execbuffer during the unlock... */ 642 - while (list_empty(objects)) { 625 + while (!list_empty(objects)) { 643 626 obj = list_first_entry(objects, 644 627 struct drm_i915_gem_object, 645 628 exec_list); ··· 682 665 } 683 666 684 667 /* reacquire the objects */ 685 - INIT_LIST_HEAD(objects); 686 668 eb_reset(eb); 687 669 for (i = 0; i < count; i++) { 688 670 struct drm_i915_gem_object *obj; ··· 697 681 698 682 list_add_tail(&obj->exec_list, objects); 699 683 obj->exec_handle = exec[i].handle; 684 + obj->exec_entry = &exec[i]; 700 685 eb_add_object(eb, obj); 701 686 } 702 687 703 - ret = i915_gem_execbuffer_reserve(ring, file, objects, exec); 688 + ret = i915_gem_execbuffer_reserve(ring, file, objects); 704 689 if (ret) 705 690 goto err; 706 691 ··· 710 693 obj->base.pending_read_domains = 0; 711 694 obj->base.pending_write_domain = 0; 712 695 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, 713 - exec, 714 696 reloc + total); 715 697 if (ret) 716 698 goto err; ··· 729 713 return ret; 730 714 } 731 715 732 - static void 716 + static int 733 717 i915_gem_execbuffer_flush(struct drm_device *dev, 734 718 uint32_t invalidate_domains, 735 719 uint32_t flush_domains, 736 720 uint32_t flush_rings) 737 721 { 738 722 drm_i915_private_t *dev_priv = dev->dev_private; 739 - int i; 723 + int i, ret; 740 724 741 725 if (flush_domains & I915_GEM_DOMAIN_CPU) 742 726 intel_gtt_chipset_flush(); 743 727 728 + if (flush_domains & I915_GEM_DOMAIN_GTT) 729 + wmb(); 730 + 744 731 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { 745 732 for (i = 0; i < I915_NUM_RINGS; i++) 746 - if (flush_rings & (1 << i)) 747 - i915_gem_flush_ring(dev, &dev_priv->ring[i], 748 - invalidate_domains, 749 - flush_domains); 733 + if (flush_rings & (1 << i)) { 734 + ret = i915_gem_flush_ring(dev, 735 + &dev_priv->ring[i], 736 + invalidate_domains, 737 + flush_domains); 738 + if (ret) 739 + return ret; 740 + } 750 741 } 742 + 743 + return 0; 751 744 } 752 745 753 746 static int ··· 820 795 cd.invalidate_domains, 821 796 cd.flush_domains); 822 797 #endif 823 - i915_gem_execbuffer_flush(ring->dev, 824 - cd.invalidate_domains, 825 - cd.flush_domains, 826 - cd.flush_rings); 798 + ret = i915_gem_execbuffer_flush(ring->dev, 799 + cd.invalidate_domains, 800 + cd.flush_domains, 801 + cd.flush_rings); 802 + if (ret) 803 + return ret; 827 804 } 828 805 829 806 list_for_each_entry(obj, objects, exec_list) { ··· 948 921 struct intel_ring_buffer *ring) 949 922 { 950 923 struct drm_i915_gem_request *request; 951 - u32 flush_domains; 924 + u32 invalidate; 952 925 953 926 /* 954 927 * Ensure that the commands in the batch buffer are ··· 956 929 * 957 930 * The sampler always gets flushed on i965 (sigh). 958 931 */ 959 - flush_domains = 0; 932 + invalidate = I915_GEM_DOMAIN_COMMAND; 960 933 if (INTEL_INFO(dev)->gen >= 4) 961 - flush_domains |= I915_GEM_DOMAIN_SAMPLER; 962 - 963 - ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains); 934 + invalidate |= I915_GEM_DOMAIN_SAMPLER; 935 + if (ring->flush(ring, invalidate, 0)) { 936 + i915_gem_next_request_seqno(dev, ring); 937 + return; 938 + } 964 939 965 940 /* Add a breadcrumb for the completion of the batch buffer */ 966 941 request = kzalloc(sizeof(*request), GFP_KERNEL); ··· 1127 1098 1128 1099 list_add_tail(&obj->exec_list, &objects); 1129 1100 obj->exec_handle = exec[i].handle; 1101 + obj->exec_entry = &exec[i]; 1130 1102 eb_add_object(eb, obj); 1131 1103 } 1132 1104 1105 + /* take note of the batch buffer before we might reorder the lists */ 1106 + batch_obj = list_entry(objects.prev, 1107 + struct drm_i915_gem_object, 1108 + exec_list); 1109 + 1133 1110 /* Move the objects en-masse into the GTT, evicting if necessary. */ 1134 - ret = i915_gem_execbuffer_reserve(ring, file, &objects, exec); 1111 + ret = i915_gem_execbuffer_reserve(ring, file, &objects); 1135 1112 if (ret) 1136 1113 goto err; 1137 1114 1138 1115 /* The objects are in their final locations, apply the relocations. */ 1139 - ret = i915_gem_execbuffer_relocate(dev, eb, &objects, exec); 1116 + ret = i915_gem_execbuffer_relocate(dev, eb, &objects); 1140 1117 if (ret) { 1141 1118 if (ret == -EFAULT) { 1142 1119 ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, ··· 1156 1121 } 1157 1122 1158 1123 /* Set the pending read domains for the batch buffer to COMMAND */ 1159 - batch_obj = list_entry(objects.prev, 1160 - struct drm_i915_gem_object, 1161 - exec_list); 1162 1124 if (batch_obj->base.pending_write_domain) { 1163 1125 DRM_ERROR("Attempting to use self-modifying batch buffer\n"); 1164 1126 ret = -EINVAL; ··· 1372 1340 drm_free_large(exec2_list); 1373 1341 return ret; 1374 1342 } 1375 -
+5 -9
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 85 85 86 86 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) 87 87 { 88 - struct drm_device *dev = obj->base.dev; 89 - struct drm_i915_private *dev_priv = dev->dev_private; 90 - 91 - if (dev_priv->mm.gtt->needs_dmar) { 92 - intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); 93 - obj->sg_list = NULL; 94 - obj->num_sg = 0; 95 - } 96 - 97 88 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, 98 89 obj->base.size >> PAGE_SHIFT); 90 + 91 + if (obj->sg_list) { 92 + intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); 93 + obj->sg_list = NULL; 94 + } 99 95 }
+103 -166
drivers/gpu/drm/i915/i915_irq.c
··· 64 64 #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ 65 65 DRM_I915_VBLANK_PIPE_B) 66 66 67 - void 68 - ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) 69 - { 70 - if ((dev_priv->gt_irq_mask & mask) != 0) { 71 - dev_priv->gt_irq_mask &= ~mask; 72 - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 73 - POSTING_READ(GTIMR); 74 - } 75 - } 76 - 77 - void 78 - ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) 79 - { 80 - if ((dev_priv->gt_irq_mask & mask) != mask) { 81 - dev_priv->gt_irq_mask |= mask; 82 - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 83 - POSTING_READ(GTIMR); 84 - } 85 - } 86 - 87 67 /* For display hotplug interrupt */ 88 68 static void 89 69 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) ··· 82 102 dev_priv->irq_mask |= mask; 83 103 I915_WRITE(DEIMR, dev_priv->irq_mask); 84 104 POSTING_READ(DEIMR); 85 - } 86 - } 87 - 88 - void 89 - i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) 90 - { 91 - if ((dev_priv->irq_mask & mask) != 0) { 92 - dev_priv->irq_mask &= ~mask; 93 - I915_WRITE(IMR, dev_priv->irq_mask); 94 - POSTING_READ(IMR); 95 - } 96 - } 97 - 98 - void 99 - i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) 100 - { 101 - if ((dev_priv->irq_mask & mask) != mask) { 102 - dev_priv->irq_mask |= mask; 103 - I915_WRITE(IMR, dev_priv->irq_mask); 104 - POSTING_READ(IMR); 105 105 } 106 106 } 107 107 ··· 349 389 { 350 390 struct drm_i915_private *dev_priv = dev->dev_private; 351 391 u32 seqno = ring->get_seqno(ring); 352 - ring->irq_seqno = seqno; 392 + 353 393 trace_i915_gem_request_complete(dev, seqno); 394 + 395 + ring->irq_seqno = seqno; 354 396 wake_up_all(&ring->irq_queue); 397 + 355 398 dev_priv->hangcheck_count = 0; 356 399 mod_timer(&dev_priv->hangcheck_timer, 357 400 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); ··· 396 433 dev_priv->cur_delay = new_delay; 397 434 398 435 I915_WRITE(GEN6_PMIIR, pm_iir); 436 + } 437 + 438 + static void pch_irq_handler(struct drm_device *dev) 439 + { 440 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 441 + u32 pch_iir; 442 + 443 + pch_iir = I915_READ(SDEIIR); 444 + 445 + if (pch_iir & SDE_AUDIO_POWER_MASK) 446 + DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 447 + (pch_iir & SDE_AUDIO_POWER_MASK) >> 448 + SDE_AUDIO_POWER_SHIFT); 449 + 450 + if (pch_iir & SDE_GMBUS) 451 + DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); 452 + 453 + if (pch_iir & SDE_AUDIO_HDCP_MASK) 454 + DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 455 + 456 + if (pch_iir & SDE_AUDIO_TRANS_MASK) 457 + DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 458 + 459 + if (pch_iir & SDE_POISON) 460 + DRM_ERROR("PCH poison interrupt\n"); 461 + 462 + if (pch_iir & SDE_FDI_MASK) { 463 + u32 fdia, fdib; 464 + 465 + fdia = I915_READ(FDI_RXA_IIR); 466 + fdib = I915_READ(FDI_RXB_IIR); 467 + DRM_DEBUG_DRIVER("PCH FDI RX interrupt; FDI RXA IIR: 0x%08x, FDI RXB IIR: 0x%08x\n", fdia, fdib); 468 + } 469 + 470 + if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 471 + DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 472 + 473 + if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 474 + DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 475 + 476 + if (pch_iir & SDE_TRANSB_FIFO_UNDER) 477 + DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); 478 + if (pch_iir & SDE_TRANSA_FIFO_UNDER) 479 + DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); 399 480 } 400 481 401 482 static irqreturn_t ironlake_irq_handler(struct drm_device *dev) ··· 509 502 drm_handle_vblank(dev, 1); 510 503 511 504 /* check event from PCH */ 512 - if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask)) 513 - queue_work(dev_priv->wq, &dev_priv->hotplug_work); 505 + if (de_iir & DE_PCH_EVENT) { 506 + if (pch_iir & hotplug_mask) 507 + queue_work(dev_priv->wq, &dev_priv->hotplug_work); 508 + pch_irq_handler(dev); 509 + } 514 510 515 511 if (de_iir & DE_PCU_EVENT) { 516 512 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); ··· 566 556 567 557 #ifdef CONFIG_DEBUG_FS 568 558 static struct drm_i915_error_object * 569 - i915_error_object_create(struct drm_device *dev, 559 + i915_error_object_create(struct drm_i915_private *dev_priv, 570 560 struct drm_i915_gem_object *src) 571 561 { 572 - drm_i915_private_t *dev_priv = dev->dev_private; 573 562 struct drm_i915_error_object *dst; 574 563 int page, page_count; 575 564 u32 reloc_offset; ··· 641 632 kfree(error); 642 633 } 643 634 644 - static u32 645 - i915_get_bbaddr(struct drm_device *dev, u32 *ring) 646 - { 647 - u32 cmd; 648 - 649 - if (IS_I830(dev) || IS_845G(dev)) 650 - cmd = MI_BATCH_BUFFER; 651 - else if (INTEL_INFO(dev)->gen >= 4) 652 - cmd = (MI_BATCH_BUFFER_START | (2 << 6) | 653 - MI_BATCH_NON_SECURE_I965); 654 - else 655 - cmd = (MI_BATCH_BUFFER_START | (2 << 6)); 656 - 657 - return ring[0] == cmd ? ring[1] : 0; 658 - } 659 - 660 - static u32 661 - i915_ringbuffer_last_batch(struct drm_device *dev, 662 - struct intel_ring_buffer *ring) 663 - { 664 - struct drm_i915_private *dev_priv = dev->dev_private; 665 - u32 head, bbaddr; 666 - u32 *val; 667 - 668 - /* Locate the current position in the ringbuffer and walk back 669 - * to find the most recently dispatched batch buffer. 670 - */ 671 - head = I915_READ_HEAD(ring) & HEAD_ADDR; 672 - 673 - val = (u32 *)(ring->virtual_start + head); 674 - while (--val >= (u32 *)ring->virtual_start) { 675 - bbaddr = i915_get_bbaddr(dev, val); 676 - if (bbaddr) 677 - return bbaddr; 678 - } 679 - 680 - val = (u32 *)(ring->virtual_start + ring->size); 681 - while (--val >= (u32 *)ring->virtual_start) { 682 - bbaddr = i915_get_bbaddr(dev, val); 683 - if (bbaddr) 684 - return bbaddr; 685 - } 686 - 687 - return 0; 688 - } 689 - 690 635 static u32 capture_bo_list(struct drm_i915_error_buffer *err, 691 636 int count, 692 637 struct list_head *head) ··· 665 702 err->dirty = obj->dirty; 666 703 err->purgeable = obj->madv != I915_MADV_WILLNEED; 667 704 err->ring = obj->ring ? obj->ring->id : 0; 705 + err->agp_type = obj->agp_type == AGP_USER_CACHED_MEMORY; 668 706 669 707 if (++i == count) 670 708 break; ··· 705 741 } 706 742 } 707 743 744 + static struct drm_i915_error_object * 745 + i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, 746 + struct intel_ring_buffer *ring) 747 + { 748 + struct drm_i915_gem_object *obj; 749 + u32 seqno; 750 + 751 + if (!ring->get_seqno) 752 + return NULL; 753 + 754 + seqno = ring->get_seqno(ring); 755 + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 756 + if (obj->ring != ring) 757 + continue; 758 + 759 + if (!i915_seqno_passed(obj->last_rendering_seqno, seqno)) 760 + continue; 761 + 762 + if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) 763 + continue; 764 + 765 + /* We need to copy these to an anonymous buffer as the simplest 766 + * method to avoid being overwritten by userspace. 767 + */ 768 + return i915_error_object_create(dev_priv, obj); 769 + } 770 + 771 + return NULL; 772 + } 773 + 708 774 /** 709 775 * i915_capture_error_state - capture an error record for later analysis 710 776 * @dev: drm device ··· 749 755 struct drm_i915_private *dev_priv = dev->dev_private; 750 756 struct drm_i915_gem_object *obj; 751 757 struct drm_i915_error_state *error; 752 - struct drm_i915_gem_object *batchbuffer[2]; 753 758 unsigned long flags; 754 - u32 bbaddr; 755 - int count; 759 + int i; 756 760 757 761 spin_lock_irqsave(&dev_priv->error_lock, flags); 758 762 error = dev_priv->first_error; ··· 809 817 } 810 818 i915_gem_record_fences(dev, error); 811 819 812 - bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->ring[RCS]); 813 - 814 - /* Grab the current batchbuffer, most likely to have crashed. */ 815 - batchbuffer[0] = NULL; 816 - batchbuffer[1] = NULL; 817 - count = 0; 818 - list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 819 - if (batchbuffer[0] == NULL && 820 - bbaddr >= obj->gtt_offset && 821 - bbaddr < obj->gtt_offset + obj->base.size) 822 - batchbuffer[0] = obj; 823 - 824 - if (batchbuffer[1] == NULL && 825 - error->acthd >= obj->gtt_offset && 826 - error->acthd < obj->gtt_offset + obj->base.size) 827 - batchbuffer[1] = obj; 828 - 829 - count++; 830 - } 831 - /* Scan the other lists for completeness for those bizarre errors. */ 832 - if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { 833 - list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { 834 - if (batchbuffer[0] == NULL && 835 - bbaddr >= obj->gtt_offset && 836 - bbaddr < obj->gtt_offset + obj->base.size) 837 - batchbuffer[0] = obj; 838 - 839 - if (batchbuffer[1] == NULL && 840 - error->acthd >= obj->gtt_offset && 841 - error->acthd < obj->gtt_offset + obj->base.size) 842 - batchbuffer[1] = obj; 843 - 844 - if (batchbuffer[0] && batchbuffer[1]) 845 - break; 846 - } 847 - } 848 - if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { 849 - list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { 850 - if (batchbuffer[0] == NULL && 851 - bbaddr >= obj->gtt_offset && 852 - bbaddr < obj->gtt_offset + obj->base.size) 853 - batchbuffer[0] = obj; 854 - 855 - if (batchbuffer[1] == NULL && 856 - error->acthd >= obj->gtt_offset && 857 - error->acthd < obj->gtt_offset + obj->base.size) 858 - batchbuffer[1] = obj; 859 - 860 - if (batchbuffer[0] && batchbuffer[1]) 861 - break; 862 - } 863 - } 864 - 865 - /* We need to copy these to an anonymous buffer as the simplest 866 - * method to avoid being overwritten by userspace. 867 - */ 868 - error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); 869 - if (batchbuffer[1] != batchbuffer[0]) 870 - error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); 871 - else 872 - error->batchbuffer[1] = NULL; 820 + /* Record the active batchbuffers */ 821 + for (i = 0; i < I915_NUM_RINGS; i++) 822 + error->batchbuffer[i] = 823 + i915_error_first_batchbuffer(dev_priv, 824 + &dev_priv->ring[i]); 873 825 874 826 /* Record the ringbuffer */ 875 - error->ringbuffer = i915_error_object_create(dev, 827 + error->ringbuffer = i915_error_object_create(dev_priv, 876 828 dev_priv->ring[RCS].obj); 877 829 878 830 /* Record buffers on the active and pinned lists. */ 879 831 error->active_bo = NULL; 880 832 error->pinned_bo = NULL; 881 833 882 - error->active_bo_count = count; 834 + i = 0; 835 + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) 836 + i++; 837 + error->active_bo_count = i; 883 838 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) 884 - count++; 885 - error->pinned_bo_count = count - error->active_bo_count; 839 + i++; 840 + error->pinned_bo_count = i - error->active_bo_count; 886 841 887 - if (count) { 888 - error->active_bo = kmalloc(sizeof(*error->active_bo)*count, 842 + if (i) { 843 + error->active_bo = kmalloc(sizeof(*error->active_bo)*i, 889 844 GFP_ATOMIC); 890 845 if (error->active_bo) 891 846 error->pinned_bo = ··· 1612 1673 1613 1674 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1614 1675 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1615 - if (IS_GEN6(dev)) { 1616 - I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_USER_INTERRUPT); 1617 - I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_USER_INTERRUPT); 1618 - I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT); 1619 - } 1620 1676 1621 1677 if (IS_GEN6(dev)) 1622 1678 render_irqs = ··· 1632 1698 } else { 1633 1699 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1634 1700 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1701 + hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK; 1702 + I915_WRITE(FDI_RXA_IMR, 0); 1703 + I915_WRITE(FDI_RXB_IMR, 0); 1635 1704 } 1636 1705 1637 1706 dev_priv->pch_irq_mask = ~hotplug_mask;
+88 -7
drivers/gpu/drm/i915/i915_reg.h
··· 145 145 #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ 146 146 #define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ 147 147 #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) 148 + #define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) 149 + #define MI_SUSPEND_FLUSH_EN (1<<0) 148 150 #define MI_REPORT_HEAD MI_INSTR(0x07, 0) 149 151 #define MI_OVERLAY_FLIP MI_INSTR(0x11,0) 150 152 #define MI_OVERLAY_CONTINUE (0x0<<21) ··· 161 159 #define MI_MM_SPACE_PHYSICAL (0<<8) 162 160 #define MI_SAVE_EXT_STATE_EN (1<<3) 163 161 #define MI_RESTORE_EXT_STATE_EN (1<<2) 162 + #define MI_FORCE_RESTORE (1<<1) 164 163 #define MI_RESTORE_INHIBIT (1<<0) 165 164 #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) 166 165 #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ ··· 291 288 #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) 292 289 #define RING_ACTHD(base) ((base)+0x74) 293 290 #define RING_NOPID(base) ((base)+0x94) 291 + #define RING_IMR(base) ((base)+0xa8) 294 292 #define TAIL_ADDR 0x001FFFF8 295 293 #define HEAD_WRAP_COUNT 0xFFE00000 296 294 #define HEAD_WRAP_ONE 0x00200000 ··· 1134 1130 #define RCBMINAVG 0x111a0 1135 1131 #define RCUPEI 0x111b0 1136 1132 #define RCDNEI 0x111b4 1137 - #define MCHBAR_RENDER_STANDBY 0x111b8 1138 - #define RCX_SW_EXIT (1<<23) 1139 - #define RSX_STATUS_MASK 0x00700000 1133 + #define RSTDBYCTL 0x111b8 1134 + #define RS1EN (1<<31) 1135 + #define RS2EN (1<<30) 1136 + #define RS3EN (1<<29) 1137 + #define D3RS3EN (1<<28) /* Display D3 imlies RS3 */ 1138 + #define SWPROMORSX (1<<27) /* RSx promotion timers ignored */ 1139 + #define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */ 1140 + #define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */ 1141 + #define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */ 1142 + #define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */ 1143 + #define RSX_STATUS_MASK (7<<20) 1144 + #define RSX_STATUS_ON (0<<20) 1145 + #define RSX_STATUS_RC1 (1<<20) 1146 + #define RSX_STATUS_RC1E (2<<20) 1147 + #define RSX_STATUS_RS1 (3<<20) 1148 + #define RSX_STATUS_RS2 (4<<20) /* aka rc6 */ 1149 + #define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */ 1150 + #define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */ 1151 + #define RSX_STATUS_RSVD2 (7<<20) 1152 + #define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */ 1153 + #define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */ 1154 + #define JRSC (1<<17) /* rsx coupled to cpu c-state */ 1155 + #define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */ 1156 + #define RS1CONTSAV_MASK (3<<14) 1157 + #define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */ 1158 + #define RS1CONTSAV_RSVD (1<<14) 1159 + #define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */ 1160 + #define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */ 1161 + #define NORMSLEXLAT_MASK (3<<12) 1162 + #define SLOW_RS123 (0<<12) 1163 + #define SLOW_RS23 (1<<12) 1164 + #define SLOW_RS3 (2<<12) 1165 + #define NORMAL_RS123 (3<<12) 1166 + #define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */ 1167 + #define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */ 1168 + #define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */ 1169 + #define STATELOCK (1<<7) /* locked to rs_cstate if 0 */ 1170 + #define RS_CSTATE_MASK (3<<4) 1171 + #define RS_CSTATE_C367_RS1 (0<<4) 1172 + #define RS_CSTATE_C36_RS1_C7_RS2 (1<<4) 1173 + #define RS_CSTATE_RSVD (2<<4) 1174 + #define RS_CSTATE_C367_RS2 (3<<4) 1175 + #define REDSAVES (1<<3) /* no context save if was idle during rs0 */ 1176 + #define REDRESTORES (1<<2) /* no restore if was idle during rs0 */ 1140 1177 #define VIDCTL 0x111c0 1141 1178 #define VIDSTS 0x111c8 1142 1179 #define VIDSTART 0x111cc /* 8 bits */ ··· 2390 2345 2391 2346 /* Memory latency timer register */ 2392 2347 #define MLTR_ILK 0x11222 2348 + #define MLTR_WM1_SHIFT 0 2349 + #define MLTR_WM2_SHIFT 8 2393 2350 /* the unit of memory self-refresh latency time is 0.5us */ 2394 2351 #define ILK_SRLT_MASK 0x3f 2352 + #define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK) 2353 + #define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT) 2354 + #define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT) 2395 2355 2396 2356 /* define the fifo size on Ironlake */ 2397 2357 #define ILK_DISPLAY_FIFO 128 ··· 2778 2728 /* PCH */ 2779 2729 2780 2730 /* south display engine interrupt */ 2731 + #define SDE_AUDIO_POWER_D (1 << 27) 2732 + #define SDE_AUDIO_POWER_C (1 << 26) 2733 + #define SDE_AUDIO_POWER_B (1 << 25) 2734 + #define SDE_AUDIO_POWER_SHIFT (25) 2735 + #define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT) 2736 + #define SDE_GMBUS (1 << 24) 2737 + #define SDE_AUDIO_HDCP_TRANSB (1 << 23) 2738 + #define SDE_AUDIO_HDCP_TRANSA (1 << 22) 2739 + #define SDE_AUDIO_HDCP_MASK (3 << 22) 2740 + #define SDE_AUDIO_TRANSB (1 << 21) 2741 + #define SDE_AUDIO_TRANSA (1 << 20) 2742 + #define SDE_AUDIO_TRANS_MASK (3 << 20) 2743 + #define SDE_POISON (1 << 19) 2744 + /* 18 reserved */ 2745 + #define SDE_FDI_RXB (1 << 17) 2746 + #define SDE_FDI_RXA (1 << 16) 2747 + #define SDE_FDI_MASK (3 << 16) 2748 + #define SDE_AUXD (1 << 15) 2749 + #define SDE_AUXC (1 << 14) 2750 + #define SDE_AUXB (1 << 13) 2751 + #define SDE_AUX_MASK (7 << 13) 2752 + /* 12 reserved */ 2781 2753 #define SDE_CRT_HOTPLUG (1 << 11) 2782 2754 #define SDE_PORTD_HOTPLUG (1 << 10) 2783 2755 #define SDE_PORTC_HOTPLUG (1 << 9) 2784 2756 #define SDE_PORTB_HOTPLUG (1 << 8) 2785 2757 #define SDE_SDVOB_HOTPLUG (1 << 6) 2786 2758 #define SDE_HOTPLUG_MASK (0xf << 8) 2759 + #define SDE_TRANSB_CRC_DONE (1 << 5) 2760 + #define SDE_TRANSB_CRC_ERR (1 << 4) 2761 + #define SDE_TRANSB_FIFO_UNDER (1 << 3) 2762 + #define SDE_TRANSA_CRC_DONE (1 << 2) 2763 + #define SDE_TRANSA_CRC_ERR (1 << 1) 2764 + #define SDE_TRANSA_FIFO_UNDER (1 << 0) 2765 + #define SDE_TRANS_MASK (0x3f) 2787 2766 /* CPT */ 2788 2767 #define SDE_CRT_HOTPLUG_CPT (1 << 19) 2789 2768 #define SDE_PORTD_HOTPLUG_CPT (1 << 23) ··· 3253 3174 #define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) 3254 3175 #define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) 3255 3176 /* SNB B-stepping */ 3256 - #define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22) 3257 - #define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22) 3258 - #define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) 3259 - #define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) 3177 + #define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0<<22) 3178 + #define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1<<22) 3179 + #define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a<<22) 3180 + #define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39<<22) 3181 + #define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22) 3260 3182 #define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) 3261 3183 3262 3184 #define FORCEWAKE 0xA18C ··· 3319 3239 3320 3240 #define GEN6_PCODE_MAILBOX 0x138124 3321 3241 #define GEN6_PCODE_READY (1<<31) 3242 + #define GEN6_READ_OC_PARAMS 0xc 3322 3243 #define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9 3323 3244 #define GEN6_PCODE_DATA 0x138128 3324 3245
+2 -6
drivers/gpu/drm/i915/i915_suspend.c
··· 740 740 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); 741 741 I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); 742 742 I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); 743 - I915_WRITE(MCHBAR_RENDER_STANDBY, 743 + I915_WRITE(RSTDBYCTL, 744 744 dev_priv->saveMCHBAR_RENDER_STANDBY); 745 745 } else { 746 746 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); ··· 811 811 dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); 812 812 dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); 813 813 dev_priv->saveMCHBAR_RENDER_STANDBY = 814 - I915_READ(MCHBAR_RENDER_STANDBY); 814 + I915_READ(RSTDBYCTL); 815 815 } else { 816 816 dev_priv->saveIER = I915_READ(IER); 817 817 dev_priv->saveIMR = I915_READ(IMR); ··· 821 821 ironlake_disable_drps(dev); 822 822 if (IS_GEN6(dev)) 823 823 gen6_disable_rps(dev); 824 - 825 - /* XXX disabling the clock gating breaks suspend on gm45 826 - intel_disable_clock_gating(dev); 827 - */ 828 824 829 825 /* Cache mode state */ 830 826 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+25 -5
drivers/gpu/drm/i915/intel_crt.c
··· 30 30 #include "drm.h" 31 31 #include "drm_crtc.h" 32 32 #include "drm_crtc_helper.h" 33 + #include "drm_edid.h" 33 34 #include "intel_drv.h" 34 35 #include "i915_drm.h" 35 36 #include "i915_drv.h" ··· 288 287 return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1; 289 288 } 290 289 291 - static bool intel_crt_detect_ddc(struct intel_crt *crt) 290 + static bool intel_crt_detect_ddc(struct drm_connector *connector) 292 291 { 292 + struct intel_crt *crt = intel_attached_crt(connector); 293 293 struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; 294 294 295 295 /* CRT should always be at 0, but check anyway */ ··· 303 301 } 304 302 305 303 if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) { 306 - DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); 307 - return true; 304 + struct edid *edid; 305 + bool is_digital = false; 306 + 307 + edid = drm_get_edid(connector, 308 + &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); 309 + /* 310 + * This may be a DVI-I connector with a shared DDC 311 + * link between analog and digital outputs, so we 312 + * have to check the EDID input spec of the attached device. 313 + */ 314 + if (edid != NULL) { 315 + is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; 316 + connector->display_info.raw_edid = NULL; 317 + kfree(edid); 318 + } 319 + 320 + if (!is_digital) { 321 + DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); 322 + return true; 323 + } 308 324 } 309 325 310 326 return false; ··· 478 458 } 479 459 } 480 460 481 - if (intel_crt_detect_ddc(crt)) 461 + if (intel_crt_detect_ddc(connector)) 482 462 return connector_status_connected; 483 463 484 464 if (!force) ··· 492 472 crtc = intel_get_load_detect_pipe(&crt->base, connector, 493 473 NULL, &dpms_mode); 494 474 if (crtc) { 495 - if (intel_crt_detect_ddc(crt)) 475 + if (intel_crt_detect_ddc(connector)) 496 476 status = connector_status_connected; 497 477 else 498 478 status = intel_crt_load_detect(crtc, crt);
+269 -207
drivers/gpu/drm/i915/intel_display.c
··· 3418 3418 static bool ironlake_compute_wm0(struct drm_device *dev, 3419 3419 int pipe, 3420 3420 const struct intel_watermark_params *display, 3421 - int display_latency, 3421 + int display_latency_ns, 3422 3422 const struct intel_watermark_params *cursor, 3423 - int cursor_latency, 3423 + int cursor_latency_ns, 3424 3424 int *plane_wm, 3425 3425 int *cursor_wm) 3426 3426 { 3427 3427 struct drm_crtc *crtc; 3428 - int htotal, hdisplay, clock, pixel_size = 0; 3429 - int line_time_us, line_count, entries; 3428 + int htotal, hdisplay, clock, pixel_size; 3429 + int line_time_us, line_count; 3430 + int entries, tlb_miss; 3430 3431 3431 3432 crtc = intel_get_crtc_for_pipe(dev, pipe); 3432 3433 if (crtc->fb == NULL || !crtc->enabled) ··· 3439 3438 pixel_size = crtc->fb->bits_per_pixel / 8; 3440 3439 3441 3440 /* Use the small buffer method to calculate plane watermark */ 3442 - entries = ((clock * pixel_size / 1000) * display_latency * 100) / 1000; 3441 + entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; 3442 + tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; 3443 + if (tlb_miss > 0) 3444 + entries += tlb_miss; 3443 3445 entries = DIV_ROUND_UP(entries, display->cacheline_size); 3444 3446 *plane_wm = entries + display->guard_size; 3445 3447 if (*plane_wm > (int)display->max_wm) ··· 3450 3446 3451 3447 /* Use the large buffer method to calculate cursor watermark */ 3452 3448 line_time_us = ((htotal * 1000) / clock); 3453 - line_count = (cursor_latency * 100 / line_time_us + 1000) / 1000; 3449 + line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; 3454 3450 entries = line_count * 64 * pixel_size; 3451 + tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; 3452 + if (tlb_miss > 0) 3453 + entries += tlb_miss; 3455 3454 entries = DIV_ROUND_UP(entries, cursor->cacheline_size); 3456 3455 *cursor_wm = entries + cursor->guard_size; 3457 3456 if (*cursor_wm > (int)cursor->max_wm) ··· 3463 3456 return true; 3464 3457 } 3465 3458 3459 + /* 3460 + * Check the wm result. 3461 + * 3462 + * If any calculated watermark values is larger than the maximum value that 3463 + * can be programmed into the associated watermark register, that watermark 3464 + * must be disabled. 3465 + */ 3466 + static bool ironlake_check_srwm(struct drm_device *dev, int level, 3467 + int fbc_wm, int display_wm, int cursor_wm, 3468 + const struct intel_watermark_params *display, 3469 + const struct intel_watermark_params *cursor) 3470 + { 3471 + struct drm_i915_private *dev_priv = dev->dev_private; 3472 + 3473 + DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," 3474 + " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); 3475 + 3476 + if (fbc_wm > SNB_FBC_MAX_SRWM) { 3477 + DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", 3478 + fbc_wm, SNB_FBC_MAX_SRWM, level); 3479 + 3480 + /* fbc has it's own way to disable FBC WM */ 3481 + I915_WRITE(DISP_ARB_CTL, 3482 + I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); 3483 + return false; 3484 + } 3485 + 3486 + if (display_wm > display->max_wm) { 3487 + DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", 3488 + display_wm, SNB_DISPLAY_MAX_SRWM, level); 3489 + return false; 3490 + } 3491 + 3492 + if (cursor_wm > cursor->max_wm) { 3493 + DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", 3494 + cursor_wm, SNB_CURSOR_MAX_SRWM, level); 3495 + return false; 3496 + } 3497 + 3498 + if (!(fbc_wm || display_wm || cursor_wm)) { 3499 + DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); 3500 + return false; 3501 + } 3502 + 3503 + return true; 3504 + } 3505 + 3506 + /* 3507 + * Compute watermark values of WM[1-3], 3508 + */ 3509 + static bool ironlake_compute_srwm(struct drm_device *dev, int level, 3510 + int hdisplay, int htotal, 3511 + int pixel_size, int clock, int latency_ns, 3512 + const struct intel_watermark_params *display, 3513 + const struct intel_watermark_params *cursor, 3514 + int *fbc_wm, int *display_wm, int *cursor_wm) 3515 + { 3516 + 3517 + unsigned long line_time_us; 3518 + int line_count, line_size; 3519 + int small, large; 3520 + int entries; 3521 + 3522 + if (!latency_ns) { 3523 + *fbc_wm = *display_wm = *cursor_wm = 0; 3524 + return false; 3525 + } 3526 + 3527 + line_time_us = (htotal * 1000) / clock; 3528 + line_count = (latency_ns / line_time_us + 1000) / 1000; 3529 + line_size = hdisplay * pixel_size; 3530 + 3531 + /* Use the minimum of the small and large buffer method for primary */ 3532 + small = ((clock * pixel_size / 1000) * latency_ns) / 1000; 3533 + large = line_count * line_size; 3534 + 3535 + entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); 3536 + *display_wm = entries + display->guard_size; 3537 + 3538 + /* 3539 + * Spec says: 3540 + * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 3541 + */ 3542 + *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; 3543 + 3544 + /* calculate the self-refresh watermark for display cursor */ 3545 + entries = line_count * pixel_size * 64; 3546 + entries = DIV_ROUND_UP(entries, cursor->cacheline_size); 3547 + *cursor_wm = entries + cursor->guard_size; 3548 + 3549 + return ironlake_check_srwm(dev, level, 3550 + *fbc_wm, *display_wm, *cursor_wm, 3551 + display, cursor); 3552 + } 3553 + 3466 3554 static void ironlake_update_wm(struct drm_device *dev, 3467 3555 int planea_clock, int planeb_clock, 3468 - int sr_hdisplay, int sr_htotal, 3556 + int hdisplay, int htotal, 3469 3557 int pixel_size) 3470 3558 { 3471 3559 struct drm_i915_private *dev_priv = dev->dev_private; 3472 - int plane_wm, cursor_wm, enabled; 3473 - int tmp; 3560 + int fbc_wm, plane_wm, cursor_wm, enabled; 3561 + int clock; 3474 3562 3475 3563 enabled = 0; 3476 3564 if (ironlake_compute_wm0(dev, 0, ··· 3600 3498 * Calculate and update the self-refresh watermark only when one 3601 3499 * display plane is used. 3602 3500 */ 3603 - tmp = 0; 3604 - if (enabled == 1) { 3605 - unsigned long line_time_us; 3606 - int small, large, plane_fbc; 3607 - int sr_clock, entries; 3608 - int line_count, line_size; 3609 - /* Read the self-refresh latency. The unit is 0.5us */ 3610 - int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; 3501 + I915_WRITE(WM3_LP_ILK, 0); 3502 + I915_WRITE(WM2_LP_ILK, 0); 3503 + I915_WRITE(WM1_LP_ILK, 0); 3611 3504 3612 - sr_clock = planea_clock ? planea_clock : planeb_clock; 3613 - line_time_us = (sr_htotal * 1000) / sr_clock; 3505 + if (enabled != 1) 3506 + return; 3614 3507 3615 - /* Use ns/us then divide to preserve precision */ 3616 - line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) 3617 - / 1000; 3618 - line_size = sr_hdisplay * pixel_size; 3508 + clock = planea_clock ? planea_clock : planeb_clock; 3619 3509 3620 - /* Use the minimum of the small and large buffer method for primary */ 3621 - small = ((sr_clock * pixel_size / 1000) * (ilk_sr_latency * 500)) / 1000; 3622 - large = line_count * line_size; 3510 + /* WM1 */ 3511 + if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size, 3512 + clock, ILK_READ_WM1_LATENCY() * 500, 3513 + &ironlake_display_srwm_info, 3514 + &ironlake_cursor_srwm_info, 3515 + &fbc_wm, &plane_wm, &cursor_wm)) 3516 + return; 3623 3517 3624 - entries = DIV_ROUND_UP(min(small, large), 3625 - ironlake_display_srwm_info.cacheline_size); 3518 + I915_WRITE(WM1_LP_ILK, 3519 + WM1_LP_SR_EN | 3520 + (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | 3521 + (fbc_wm << WM1_LP_FBC_SHIFT) | 3522 + (plane_wm << WM1_LP_SR_SHIFT) | 3523 + cursor_wm); 3626 3524 3627 - plane_fbc = entries * 64; 3628 - plane_fbc = DIV_ROUND_UP(plane_fbc, line_size); 3525 + /* WM2 */ 3526 + if (!ironlake_compute_srwm(dev, 2, hdisplay, htotal, pixel_size, 3527 + clock, ILK_READ_WM2_LATENCY() * 500, 3528 + &ironlake_display_srwm_info, 3529 + &ironlake_cursor_srwm_info, 3530 + &fbc_wm, &plane_wm, &cursor_wm)) 3531 + return; 3629 3532 3630 - plane_wm = entries + ironlake_display_srwm_info.guard_size; 3631 - if (plane_wm > (int)ironlake_display_srwm_info.max_wm) 3632 - plane_wm = ironlake_display_srwm_info.max_wm; 3633 - 3634 - /* calculate the self-refresh watermark for display cursor */ 3635 - entries = line_count * pixel_size * 64; 3636 - entries = DIV_ROUND_UP(entries, 3637 - ironlake_cursor_srwm_info.cacheline_size); 3638 - 3639 - cursor_wm = entries + ironlake_cursor_srwm_info.guard_size; 3640 - if (cursor_wm > (int)ironlake_cursor_srwm_info.max_wm) 3641 - cursor_wm = ironlake_cursor_srwm_info.max_wm; 3642 - 3643 - /* configure watermark and enable self-refresh */ 3644 - tmp = (WM1_LP_SR_EN | 3645 - (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | 3646 - (plane_fbc << WM1_LP_FBC_SHIFT) | 3647 - (plane_wm << WM1_LP_SR_SHIFT) | 3648 - cursor_wm); 3649 - DRM_DEBUG_KMS("self-refresh watermark: display plane %d, fbc lines %d," 3650 - " cursor %d\n", plane_wm, plane_fbc, cursor_wm); 3651 - } 3652 - I915_WRITE(WM1_LP_ILK, tmp); 3653 - /* XXX setup WM2 and WM3 */ 3654 - } 3655 - 3656 - /* 3657 - * Check the wm result. 3658 - * 3659 - * If any calculated watermark values is larger than the maximum value that 3660 - * can be programmed into the associated watermark register, that watermark 3661 - * must be disabled. 3662 - * 3663 - * Also return true if all of those watermark values is 0, which is set by 3664 - * sandybridge_compute_srwm, to indicate the latency is ZERO. 3665 - */ 3666 - static bool sandybridge_check_srwm(struct drm_device *dev, int level, 3667 - int fbc_wm, int display_wm, int cursor_wm) 3668 - { 3669 - struct drm_i915_private *dev_priv = dev->dev_private; 3670 - 3671 - DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," 3672 - " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); 3673 - 3674 - if (fbc_wm > SNB_FBC_MAX_SRWM) { 3675 - DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", 3676 - fbc_wm, SNB_FBC_MAX_SRWM, level); 3677 - 3678 - /* fbc has it's own way to disable FBC WM */ 3679 - I915_WRITE(DISP_ARB_CTL, 3680 - I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); 3681 - return false; 3682 - } 3683 - 3684 - if (display_wm > SNB_DISPLAY_MAX_SRWM) { 3685 - DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", 3686 - display_wm, SNB_DISPLAY_MAX_SRWM, level); 3687 - return false; 3688 - } 3689 - 3690 - if (cursor_wm > SNB_CURSOR_MAX_SRWM) { 3691 - DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", 3692 - cursor_wm, SNB_CURSOR_MAX_SRWM, level); 3693 - return false; 3694 - } 3695 - 3696 - if (!(fbc_wm || display_wm || cursor_wm)) { 3697 - DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); 3698 - return false; 3699 - } 3700 - 3701 - return true; 3702 - } 3703 - 3704 - /* 3705 - * Compute watermark values of WM[1-3], 3706 - */ 3707 - static bool sandybridge_compute_srwm(struct drm_device *dev, int level, 3708 - int hdisplay, int htotal, int pixel_size, 3709 - int clock, int latency_ns, int *fbc_wm, 3710 - int *display_wm, int *cursor_wm) 3711 - { 3712 - 3713 - unsigned long line_time_us; 3714 - int small, large; 3715 - int entries; 3716 - int line_count, line_size; 3717 - 3718 - if (!latency_ns) { 3719 - *fbc_wm = *display_wm = *cursor_wm = 0; 3720 - return false; 3721 - } 3722 - 3723 - line_time_us = (htotal * 1000) / clock; 3724 - line_count = (latency_ns / line_time_us + 1000) / 1000; 3725 - line_size = hdisplay * pixel_size; 3726 - 3727 - /* Use the minimum of the small and large buffer method for primary */ 3728 - small = ((clock * pixel_size / 1000) * latency_ns) / 1000; 3729 - large = line_count * line_size; 3730 - 3731 - entries = DIV_ROUND_UP(min(small, large), 3732 - sandybridge_display_srwm_info.cacheline_size); 3733 - *display_wm = entries + sandybridge_display_srwm_info.guard_size; 3533 + I915_WRITE(WM2_LP_ILK, 3534 + WM2_LP_EN | 3535 + (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | 3536 + (fbc_wm << WM1_LP_FBC_SHIFT) | 3537 + (plane_wm << WM1_LP_SR_SHIFT) | 3538 + cursor_wm); 3734 3539 3735 3540 /* 3736 - * Spec said: 3737 - * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 3541 + * WM3 is unsupported on ILK, probably because we don't have latency 3542 + * data for that power state 3738 3543 */ 3739 - *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; 3740 - 3741 - /* calculate the self-refresh watermark for display cursor */ 3742 - entries = line_count * pixel_size * 64; 3743 - entries = DIV_ROUND_UP(entries, 3744 - sandybridge_cursor_srwm_info.cacheline_size); 3745 - *cursor_wm = entries + sandybridge_cursor_srwm_info.guard_size; 3746 - 3747 - return sandybridge_check_srwm(dev, level, 3748 - *fbc_wm, *display_wm, *cursor_wm); 3749 3544 } 3750 3545 3751 3546 static void sandybridge_update_wm(struct drm_device *dev, ··· 3651 3652 int pixel_size) 3652 3653 { 3653 3654 struct drm_i915_private *dev_priv = dev->dev_private; 3654 - int latency = SNB_READ_WM0_LATENCY(); 3655 + int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ 3655 3656 int fbc_wm, plane_wm, cursor_wm, enabled; 3656 3657 int clock; 3657 3658 ··· 3700 3701 clock = planea_clock ? planea_clock : planeb_clock; 3701 3702 3702 3703 /* WM1 */ 3703 - if (!sandybridge_compute_srwm(dev, 1, hdisplay, htotal, pixel_size, 3704 - clock, SNB_READ_WM1_LATENCY() * 500, 3705 - &fbc_wm, &plane_wm, &cursor_wm)) 3704 + if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size, 3705 + clock, SNB_READ_WM1_LATENCY() * 500, 3706 + &sandybridge_display_srwm_info, 3707 + &sandybridge_cursor_srwm_info, 3708 + &fbc_wm, &plane_wm, &cursor_wm)) 3706 3709 return; 3707 3710 3708 3711 I915_WRITE(WM1_LP_ILK, ··· 3715 3714 cursor_wm); 3716 3715 3717 3716 /* WM2 */ 3718 - if (!sandybridge_compute_srwm(dev, 2, 3719 - hdisplay, htotal, pixel_size, 3720 - clock, SNB_READ_WM2_LATENCY() * 500, 3721 - &fbc_wm, &plane_wm, &cursor_wm)) 3717 + if (!ironlake_compute_srwm(dev, 2, 3718 + hdisplay, htotal, pixel_size, 3719 + clock, SNB_READ_WM2_LATENCY() * 500, 3720 + &sandybridge_display_srwm_info, 3721 + &sandybridge_cursor_srwm_info, 3722 + &fbc_wm, &plane_wm, &cursor_wm)) 3722 3723 return; 3723 3724 3724 3725 I915_WRITE(WM2_LP_ILK, ··· 3731 3728 cursor_wm); 3732 3729 3733 3730 /* WM3 */ 3734 - if (!sandybridge_compute_srwm(dev, 3, 3735 - hdisplay, htotal, pixel_size, 3736 - clock, SNB_READ_WM3_LATENCY() * 500, 3737 - &fbc_wm, &plane_wm, &cursor_wm)) 3731 + if (!ironlake_compute_srwm(dev, 3, 3732 + hdisplay, htotal, pixel_size, 3733 + clock, SNB_READ_WM3_LATENCY() * 500, 3734 + &sandybridge_display_srwm_info, 3735 + &sandybridge_cursor_srwm_info, 3736 + &fbc_wm, &plane_wm, &cursor_wm)) 3738 3737 return; 3739 3738 3740 3739 I915_WRITE(WM3_LP_ILK, ··· 3956 3951 int lane = 0, link_bw, bpp; 3957 3952 /* CPU eDP doesn't require FDI link, so just set DP M/N 3958 3953 according to current link config */ 3959 - if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) { 3954 + if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 3960 3955 target_clock = mode->clock; 3961 3956 intel_edp_link_config(has_edp_encoder, 3962 3957 &lane, &link_bw); ··· 5043 5038 drm_i915_private_t *dev_priv = dev->dev_private; 5044 5039 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5045 5040 int pipe = intel_crtc->pipe; 5046 - int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 5047 - int dpll = I915_READ(dpll_reg); 5041 + int dpll_reg = DPLL(pipe); 5042 + int dpll; 5048 5043 5049 5044 if (HAS_PCH_SPLIT(dev)) 5050 5045 return; ··· 5052 5047 if (!dev_priv->lvds_downclock_avail) 5053 5048 return; 5054 5049 5050 + dpll = I915_READ(dpll_reg); 5055 5051 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { 5056 5052 DRM_DEBUG_DRIVER("upclocking LVDS\n"); 5057 5053 5058 5054 /* Unlock panel regs */ 5059 - I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | 5060 - PANEL_UNLOCK_REGS); 5055 + I915_WRITE(PP_CONTROL, 5056 + I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); 5061 5057 5062 5058 dpll &= ~DISPLAY_RATE_SELECT_FPA1; 5063 5059 I915_WRITE(dpll_reg, dpll); 5064 - dpll = I915_READ(dpll_reg); 5060 + POSTING_READ(dpll_reg); 5065 5061 intel_wait_for_vblank(dev, pipe); 5062 + 5066 5063 dpll = I915_READ(dpll_reg); 5067 5064 if (dpll & DISPLAY_RATE_SELECT_FPA1) 5068 5065 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); ··· 5809 5802 encoder->base.possible_clones = 5810 5803 intel_encoder_clones(dev, encoder->clone_mask); 5811 5804 } 5805 + 5806 + intel_panel_setup_backlight(dev); 5812 5807 } 5813 5808 5814 5809 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) ··· 6154 6145 6155 6146 void gen6_enable_rps(struct drm_i915_private *dev_priv) 6156 6147 { 6148 + u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 6149 + u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 6150 + u32 pcu_mbox; 6151 + int cur_freq, min_freq, max_freq; 6157 6152 int i; 6158 6153 6159 6154 /* Here begins a magic sequence of register writes to enable ··· 6228 6215 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 6229 6216 500)) 6230 6217 DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); 6218 + 6219 + min_freq = (rp_state_cap & 0xff0000) >> 16; 6220 + max_freq = rp_state_cap & 0xff; 6221 + cur_freq = (gt_perf_status & 0xff00) >> 8; 6222 + 6223 + /* Check for overclock support */ 6224 + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 6225 + 500)) 6226 + DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); 6227 + I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); 6228 + pcu_mbox = I915_READ(GEN6_PCODE_DATA); 6229 + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 6230 + 500)) 6231 + DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); 6232 + if (pcu_mbox & (1<<31)) { /* OC supported */ 6233 + max_freq = pcu_mbox & 0xff; 6234 + DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 100); 6235 + } 6236 + 6237 + /* In units of 100MHz */ 6238 + dev_priv->max_delay = max_freq; 6239 + dev_priv->min_delay = min_freq; 6240 + dev_priv->cur_delay = cur_freq; 6231 6241 6232 6242 /* requires MSI enabled */ 6233 6243 I915_WRITE(GEN6_PMIER, ··· 6422 6386 } else if (IS_I830(dev)) { 6423 6387 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 6424 6388 } 6425 - 6426 - /* 6427 - * GPU can automatically power down the render unit if given a page 6428 - * to save state. 6429 - */ 6430 - if (IS_IRONLAKE_M(dev) && 0) { /* XXX causes a failure during suspend */ 6431 - if (dev_priv->renderctx == NULL) 6432 - dev_priv->renderctx = intel_alloc_context_page(dev); 6433 - if (dev_priv->renderctx) { 6434 - struct drm_i915_gem_object *obj = dev_priv->renderctx; 6435 - if (BEGIN_LP_RING(4) == 0) { 6436 - OUT_RING(MI_SET_CONTEXT); 6437 - OUT_RING(obj->gtt_offset | 6438 - MI_MM_SPACE_GTT | 6439 - MI_SAVE_EXT_STATE_EN | 6440 - MI_RESTORE_EXT_STATE_EN | 6441 - MI_RESTORE_INHIBIT); 6442 - OUT_RING(MI_NOOP); 6443 - OUT_RING(MI_FLUSH); 6444 - ADVANCE_LP_RING(); 6445 - } 6446 - } else 6447 - DRM_DEBUG_KMS("Failed to allocate render context." 6448 - "Disable RC6\n"); 6449 - } 6450 - 6451 - if (IS_GEN4(dev) && IS_MOBILE(dev)) { 6452 - if (dev_priv->pwrctx == NULL) 6453 - dev_priv->pwrctx = intel_alloc_context_page(dev); 6454 - if (dev_priv->pwrctx) { 6455 - struct drm_i915_gem_object *obj = dev_priv->pwrctx; 6456 - I915_WRITE(PWRCTXA, obj->gtt_offset | PWRCTX_EN); 6457 - I915_WRITE(MCHBAR_RENDER_STANDBY, 6458 - I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); 6459 - } 6460 - } 6461 6389 } 6462 6390 6463 6391 void intel_disable_clock_gating(struct drm_device *dev) ··· 6449 6449 drm_gem_object_unreference(&obj->base); 6450 6450 dev_priv->pwrctx = NULL; 6451 6451 } 6452 + } 6453 + 6454 + static void ironlake_disable_rc6(struct drm_device *dev) 6455 + { 6456 + struct drm_i915_private *dev_priv = dev->dev_private; 6457 + 6458 + /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ 6459 + I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); 6460 + wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), 6461 + 10); 6462 + POSTING_READ(CCID); 6463 + I915_WRITE(PWRCTXA, 0); 6464 + POSTING_READ(PWRCTXA); 6465 + I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 6466 + POSTING_READ(RSTDBYCTL); 6467 + i915_gem_object_unpin(dev_priv->renderctx); 6468 + drm_gem_object_unreference(&dev_priv->renderctx->base); 6469 + dev_priv->renderctx = NULL; 6470 + i915_gem_object_unpin(dev_priv->pwrctx); 6471 + drm_gem_object_unreference(&dev_priv->pwrctx->base); 6472 + dev_priv->pwrctx = NULL; 6473 + } 6474 + 6475 + void ironlake_enable_rc6(struct drm_device *dev) 6476 + { 6477 + struct drm_i915_private *dev_priv = dev->dev_private; 6478 + int ret; 6479 + 6480 + /* 6481 + * GPU can automatically power down the render unit if given a page 6482 + * to save state. 6483 + */ 6484 + ret = BEGIN_LP_RING(6); 6485 + if (ret) { 6486 + ironlake_disable_rc6(dev); 6487 + return; 6488 + } 6489 + OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); 6490 + OUT_RING(MI_SET_CONTEXT); 6491 + OUT_RING(dev_priv->renderctx->gtt_offset | 6492 + MI_MM_SPACE_GTT | 6493 + MI_SAVE_EXT_STATE_EN | 6494 + MI_RESTORE_EXT_STATE_EN | 6495 + MI_RESTORE_INHIBIT); 6496 + OUT_RING(MI_SUSPEND_FLUSH); 6497 + OUT_RING(MI_NOOP); 6498 + OUT_RING(MI_FLUSH); 6499 + ADVANCE_LP_RING(); 6500 + 6501 + I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); 6502 + I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 6452 6503 } 6453 6504 6454 6505 /* Set up chip specific display functions */ ··· 6716 6665 dev->mode_config.max_width = 8192; 6717 6666 dev->mode_config.max_height = 8192; 6718 6667 } 6719 - 6720 - /* set memory base */ 6721 - if (IS_GEN2(dev)) 6722 - dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0); 6723 - else 6724 - dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2); 6668 + dev->mode_config.fb_base = dev->agp->base; 6725 6669 6726 6670 if (IS_MOBILE(dev) || !IS_GEN2(dev)) 6727 6671 dev_priv->num_pipe = 2; ··· 6744 6698 if (IS_GEN6(dev)) 6745 6699 gen6_enable_rps(dev_priv); 6746 6700 6701 + if (IS_IRONLAKE_M(dev)) { 6702 + dev_priv->renderctx = intel_alloc_context_page(dev); 6703 + if (!dev_priv->renderctx) 6704 + goto skip_rc6; 6705 + dev_priv->pwrctx = intel_alloc_context_page(dev); 6706 + if (!dev_priv->pwrctx) { 6707 + i915_gem_object_unpin(dev_priv->renderctx); 6708 + drm_gem_object_unreference(&dev_priv->renderctx->base); 6709 + dev_priv->renderctx = NULL; 6710 + goto skip_rc6; 6711 + } 6712 + ironlake_enable_rc6(dev); 6713 + } 6714 + 6715 + skip_rc6: 6747 6716 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 6748 6717 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 6749 6718 (unsigned long)dev); ··· 6795 6734 if (IS_GEN6(dev)) 6796 6735 gen6_disable_rps(dev); 6797 6736 6798 - intel_disable_clock_gating(dev); 6737 + if (IS_IRONLAKE_M(dev)) 6738 + ironlake_disable_rc6(dev); 6799 6739 6800 6740 mutex_unlock(&dev->struct_mutex); 6801 6741
+39 -11
drivers/gpu/drm/i915/intel_dp.c
··· 1153 1153 static uint32_t 1154 1154 intel_gen6_edp_signal_levels(uint8_t train_set) 1155 1155 { 1156 - switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) { 1156 + int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1157 + DP_TRAIN_PRE_EMPHASIS_MASK); 1158 + switch (signal_levels) { 1157 1159 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1158 - return EDP_LINK_TRAIN_400MV_0DB_SNB_B; 1160 + case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1161 + return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1162 + case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1163 + return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 1159 1164 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1160 - return EDP_LINK_TRAIN_400MV_6DB_SNB_B; 1165 + case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 1166 + return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 1161 1167 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1162 - return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B; 1168 + case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1169 + return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 1163 1170 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1164 - return EDP_LINK_TRAIN_800MV_0DB_SNB_B; 1171 + case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: 1172 + return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 1165 1173 default: 1166 - DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n"); 1167 - return EDP_LINK_TRAIN_400MV_0DB_SNB_B; 1174 + DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1175 + "0x%x\n", signal_levels); 1176 + return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1168 1177 } 1169 1178 } 1170 1179 ··· 1343 1334 struct drm_device *dev = intel_dp->base.base.dev; 1344 1335 struct drm_i915_private *dev_priv = dev->dev_private; 1345 1336 bool channel_eq = false; 1346 - int tries; 1337 + int tries, cr_tries; 1347 1338 u32 reg; 1348 1339 uint32_t DP = intel_dp->DP; 1349 1340 1350 1341 /* channel equalization */ 1351 1342 tries = 0; 1343 + cr_tries = 0; 1352 1344 channel_eq = false; 1353 1345 for (;;) { 1354 1346 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1355 1347 uint32_t signal_levels; 1348 + 1349 + if (cr_tries > 5) { 1350 + DRM_ERROR("failed to train DP, aborting\n"); 1351 + intel_dp_link_down(intel_dp); 1352 + break; 1353 + } 1356 1354 1357 1355 if (IS_GEN6(dev) && is_edp(intel_dp)) { 1358 1356 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); ··· 1383 1367 if (!intel_dp_get_link_status(intel_dp)) 1384 1368 break; 1385 1369 1370 + /* Make sure clock is still ok */ 1371 + if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { 1372 + intel_dp_start_link_train(intel_dp); 1373 + cr_tries++; 1374 + continue; 1375 + } 1376 + 1386 1377 if (intel_channel_eq_ok(intel_dp)) { 1387 1378 channel_eq = true; 1388 1379 break; 1389 1380 } 1390 1381 1391 - /* Try 5 times */ 1392 - if (tries > 5) 1393 - break; 1382 + /* Try 5 times, then try clock recovery if that fails */ 1383 + if (tries > 5) { 1384 + intel_dp_link_down(intel_dp); 1385 + intel_dp_start_link_train(intel_dp); 1386 + tries = 0; 1387 + cr_tries++; 1388 + continue; 1389 + } 1394 1390 1395 1391 /* Compute new intel_dp->train_set as requested by target */ 1396 1392 intel_get_adjust_train(intel_dp);
+3
drivers/gpu/drm/i915/intel_drv.h
··· 257 257 extern u32 intel_panel_get_max_backlight(struct drm_device *dev); 258 258 extern u32 intel_panel_get_backlight(struct drm_device *dev); 259 259 extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); 260 + extern void intel_panel_setup_backlight(struct drm_device *dev); 261 + extern void intel_panel_enable_backlight(struct drm_device *dev); 262 + extern void intel_panel_disable_backlight(struct drm_device *dev); 260 263 261 264 extern void intel_crtc_load_lut(struct drm_crtc *crtc); 262 265 extern void intel_encoder_prepare (struct drm_encoder *encoder);
+9 -11
drivers/gpu/drm/i915/intel_fb.c
··· 62 62 struct drm_fb_helper_surface_size *sizes) 63 63 { 64 64 struct drm_device *dev = ifbdev->helper.dev; 65 + struct drm_i915_private *dev_priv = dev->dev_private; 65 66 struct fb_info *info; 66 67 struct drm_framebuffer *fb; 67 68 struct drm_mode_fb_cmd mode_cmd; ··· 78 77 mode_cmd.height = sizes->surface_height; 79 78 80 79 mode_cmd.bpp = sizes->surface_bpp; 81 - mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64); 80 + mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64); 82 81 mode_cmd.depth = sizes->surface_depth; 83 82 84 83 size = mode_cmd.pitch * mode_cmd.height; ··· 121 120 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; 122 121 info->fbops = &intelfb_ops; 123 122 123 + ret = fb_alloc_cmap(&info->cmap, 256, 0); 124 + if (ret) { 125 + ret = -ENOMEM; 126 + goto out_unpin; 127 + } 124 128 /* setup aperture base/size for vesafb takeover */ 125 129 info->apertures = alloc_apertures(1); 126 130 if (!info->apertures) { ··· 133 127 goto out_unpin; 134 128 } 135 129 info->apertures->ranges[0].base = dev->mode_config.fb_base; 136 - if (!IS_GEN2(dev)) 137 - info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2); 138 - else 139 - info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); 130 + info->apertures->ranges[0].size = 131 + dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 140 132 141 133 info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; 142 134 info->fix.smem_len = size; ··· 142 138 info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size); 143 139 if (!info->screen_base) { 144 140 ret = -ENOSPC; 145 - goto out_unpin; 146 - } 147 - 148 - ret = fb_alloc_cmap(&info->cmap, 256, 0); 149 - if (ret) { 150 - ret = -ENOMEM; 151 141 goto out_unpin; 152 142 } 153 143 info->screen_size = size;
+6 -8
drivers/gpu/drm/i915/intel_lvds.c
··· 106 106 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); 107 107 POSTING_READ(lvds_reg); 108 108 109 - intel_panel_set_backlight(dev, dev_priv->backlight_level); 109 + intel_panel_enable_backlight(dev); 110 110 } 111 111 112 112 static void intel_lvds_disable(struct intel_lvds *intel_lvds) ··· 123 123 lvds_reg = LVDS; 124 124 } 125 125 126 - dev_priv->backlight_level = intel_panel_get_backlight(dev); 127 - intel_panel_set_backlight(dev, 0); 126 + intel_panel_disable_backlight(dev); 128 127 129 128 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); 130 129 ··· 374 375 } 375 376 376 377 out: 378 + if ((pfit_control & PFIT_ENABLE) == 0) { 379 + pfit_control = 0; 380 + pfit_pgm_ratios = 0; 381 + } 377 382 if (pfit_control != intel_lvds->pfit_control || 378 383 pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { 379 384 intel_lvds->pfit_control = pfit_control; ··· 400 397 struct drm_device *dev = encoder->dev; 401 398 struct drm_i915_private *dev_priv = dev->dev_private; 402 399 struct intel_lvds *intel_lvds = to_intel_lvds(encoder); 403 - 404 - dev_priv->backlight_level = intel_panel_get_backlight(dev); 405 400 406 401 /* We try to do the minimum that is necessary in order to unlock 407 402 * the registers for mode setting. ··· 430 429 struct drm_device *dev = encoder->dev; 431 430 struct drm_i915_private *dev_priv = dev->dev_private; 432 431 struct intel_lvds *intel_lvds = to_intel_lvds(encoder); 433 - 434 - if (dev_priv->backlight_level == 0) 435 - dev_priv->backlight_level = intel_panel_get_max_backlight(dev); 436 432 437 433 /* Undo any unlocking done in prepare to prevent accidental 438 434 * adjustment of the registers.
+31
drivers/gpu/drm/i915/intel_panel.c
··· 250 250 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; 251 251 I915_WRITE(BLC_PWM_CTL, tmp | level); 252 252 } 253 + 254 + void intel_panel_disable_backlight(struct drm_device *dev) 255 + { 256 + struct drm_i915_private *dev_priv = dev->dev_private; 257 + 258 + if (dev_priv->backlight_enabled) { 259 + dev_priv->backlight_level = intel_panel_get_backlight(dev); 260 + dev_priv->backlight_enabled = false; 261 + } 262 + 263 + intel_panel_set_backlight(dev, 0); 264 + } 265 + 266 + void intel_panel_enable_backlight(struct drm_device *dev) 267 + { 268 + struct drm_i915_private *dev_priv = dev->dev_private; 269 + 270 + if (dev_priv->backlight_level == 0) 271 + dev_priv->backlight_level = intel_panel_get_max_backlight(dev); 272 + 273 + intel_panel_set_backlight(dev, dev_priv->backlight_level); 274 + dev_priv->backlight_enabled = true; 275 + } 276 + 277 + void intel_panel_setup_backlight(struct drm_device *dev) 278 + { 279 + struct drm_i915_private *dev_priv = dev->dev_private; 280 + 281 + dev_priv->backlight_level = intel_panel_get_max_backlight(dev); 282 + dev_priv->backlight_enabled = dev_priv->backlight_level != 0; 283 + }
+187 -74
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 48 48 return seqno; 49 49 } 50 50 51 - static void 51 + static int 52 52 render_ring_flush(struct intel_ring_buffer *ring, 53 53 u32 invalidate_domains, 54 54 u32 flush_domains) ··· 56 56 struct drm_device *dev = ring->dev; 57 57 drm_i915_private_t *dev_priv = dev->dev_private; 58 58 u32 cmd; 59 + int ret; 59 60 60 61 #if WATCH_EXEC 61 62 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, ··· 117 116 #if WATCH_EXEC 118 117 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); 119 118 #endif 120 - if (intel_ring_begin(ring, 2) == 0) { 121 - intel_ring_emit(ring, cmd); 122 - intel_ring_emit(ring, MI_NOOP); 123 - intel_ring_advance(ring); 124 - } 119 + ret = intel_ring_begin(ring, 2); 120 + if (ret) 121 + return ret; 122 + 123 + intel_ring_emit(ring, cmd); 124 + intel_ring_emit(ring, MI_NOOP); 125 + intel_ring_advance(ring); 125 126 } 127 + 128 + return 0; 126 129 } 127 130 128 131 static void ring_write_tail(struct intel_ring_buffer *ring, ··· 485 480 return pc->cpu_page[0]; 486 481 } 487 482 483 + static void 484 + ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask) 485 + { 486 + dev_priv->gt_irq_mask &= ~mask; 487 + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 488 + POSTING_READ(GTIMR); 489 + } 490 + 491 + static void 492 + ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask) 493 + { 494 + dev_priv->gt_irq_mask |= mask; 495 + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 496 + POSTING_READ(GTIMR); 497 + } 498 + 499 + static void 500 + i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) 501 + { 502 + dev_priv->irq_mask &= ~mask; 503 + I915_WRITE(IMR, dev_priv->irq_mask); 504 + POSTING_READ(IMR); 505 + } 506 + 507 + static void 508 + i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) 509 + { 510 + dev_priv->irq_mask |= mask; 511 + I915_WRITE(IMR, dev_priv->irq_mask); 512 + POSTING_READ(IMR); 513 + } 514 + 488 515 static bool 489 516 render_ring_get_irq(struct intel_ring_buffer *ring) 490 517 { 491 518 struct drm_device *dev = ring->dev; 519 + drm_i915_private_t *dev_priv = dev->dev_private; 492 520 493 521 if (!dev->irq_enabled) 494 522 return false; 495 523 496 - if (atomic_inc_return(&ring->irq_refcount) == 1) { 497 - drm_i915_private_t *dev_priv = dev->dev_private; 498 - unsigned long irqflags; 499 - 500 - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 524 + spin_lock(&ring->irq_lock); 525 + if (ring->irq_refcount++ == 0) { 501 526 if (HAS_PCH_SPLIT(dev)) 502 - ironlake_enable_graphics_irq(dev_priv, 503 - GT_PIPE_NOTIFY | GT_USER_INTERRUPT); 527 + ironlake_enable_irq(dev_priv, 528 + GT_PIPE_NOTIFY | GT_USER_INTERRUPT); 504 529 else 505 530 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 506 - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 507 531 } 532 + spin_unlock(&ring->irq_lock); 508 533 509 534 return true; 510 535 } ··· 543 508 render_ring_put_irq(struct intel_ring_buffer *ring) 544 509 { 545 510 struct drm_device *dev = ring->dev; 511 + drm_i915_private_t *dev_priv = dev->dev_private; 546 512 547 - if (atomic_dec_and_test(&ring->irq_refcount)) { 548 - drm_i915_private_t *dev_priv = dev->dev_private; 549 - unsigned long irqflags; 550 - 551 - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 513 + spin_lock(&ring->irq_lock); 514 + if (--ring->irq_refcount == 0) { 552 515 if (HAS_PCH_SPLIT(dev)) 553 - ironlake_disable_graphics_irq(dev_priv, 554 - GT_USER_INTERRUPT | 555 - GT_PIPE_NOTIFY); 516 + ironlake_disable_irq(dev_priv, 517 + GT_USER_INTERRUPT | 518 + GT_PIPE_NOTIFY); 556 519 else 557 520 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 558 - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 559 521 } 522 + spin_unlock(&ring->irq_lock); 560 523 } 561 524 562 525 void intel_ring_setup_status_page(struct intel_ring_buffer *ring) ··· 567 534 POSTING_READ(mmio); 568 535 } 569 536 570 - static void 537 + static int 571 538 bsd_ring_flush(struct intel_ring_buffer *ring, 572 539 u32 invalidate_domains, 573 540 u32 flush_domains) 574 541 { 575 - if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 576 - return; 542 + int ret; 577 543 578 - if (intel_ring_begin(ring, 2) == 0) { 579 - intel_ring_emit(ring, MI_FLUSH); 580 - intel_ring_emit(ring, MI_NOOP); 581 - intel_ring_advance(ring); 582 - } 544 + if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 545 + return 0; 546 + 547 + ret = intel_ring_begin(ring, 2); 548 + if (ret) 549 + return ret; 550 + 551 + intel_ring_emit(ring, MI_FLUSH); 552 + intel_ring_emit(ring, MI_NOOP); 553 + intel_ring_advance(ring); 554 + return 0; 583 555 } 584 556 585 557 static int ··· 615 577 ring_get_irq(struct intel_ring_buffer *ring, u32 flag) 616 578 { 617 579 struct drm_device *dev = ring->dev; 580 + drm_i915_private_t *dev_priv = dev->dev_private; 618 581 619 582 if (!dev->irq_enabled) 620 583 return false; 621 584 622 - if (atomic_inc_return(&ring->irq_refcount) == 1) { 623 - drm_i915_private_t *dev_priv = dev->dev_private; 624 - unsigned long irqflags; 625 - 626 - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 627 - ironlake_enable_graphics_irq(dev_priv, flag); 628 - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 629 - } 585 + spin_lock(&ring->irq_lock); 586 + if (ring->irq_refcount++ == 0) 587 + ironlake_enable_irq(dev_priv, flag); 588 + spin_unlock(&ring->irq_lock); 630 589 631 590 return true; 632 591 } ··· 632 597 ring_put_irq(struct intel_ring_buffer *ring, u32 flag) 633 598 { 634 599 struct drm_device *dev = ring->dev; 600 + drm_i915_private_t *dev_priv = dev->dev_private; 635 601 636 - if (atomic_dec_and_test(&ring->irq_refcount)) { 637 - drm_i915_private_t *dev_priv = dev->dev_private; 638 - unsigned long irqflags; 602 + spin_lock(&ring->irq_lock); 603 + if (--ring->irq_refcount == 0) 604 + ironlake_disable_irq(dev_priv, flag); 605 + spin_unlock(&ring->irq_lock); 606 + } 639 607 640 - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 641 - ironlake_disable_graphics_irq(dev_priv, flag); 642 - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 608 + static bool 609 + gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) 610 + { 611 + struct drm_device *dev = ring->dev; 612 + drm_i915_private_t *dev_priv = dev->dev_private; 613 + 614 + if (!dev->irq_enabled) 615 + return false; 616 + 617 + spin_lock(&ring->irq_lock); 618 + if (ring->irq_refcount++ == 0) { 619 + ring->irq_mask &= ~rflag; 620 + I915_WRITE_IMR(ring, ring->irq_mask); 621 + ironlake_enable_irq(dev_priv, gflag); 643 622 } 623 + spin_unlock(&ring->irq_lock); 624 + 625 + return true; 626 + } 627 + 628 + static void 629 + gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) 630 + { 631 + struct drm_device *dev = ring->dev; 632 + drm_i915_private_t *dev_priv = dev->dev_private; 633 + 634 + spin_lock(&ring->irq_lock); 635 + if (--ring->irq_refcount == 0) { 636 + ring->irq_mask |= rflag; 637 + I915_WRITE_IMR(ring, ring->irq_mask); 638 + ironlake_disable_irq(dev_priv, gflag); 639 + } 640 + spin_unlock(&ring->irq_lock); 644 641 } 645 642 646 643 static bool ··· 815 748 INIT_LIST_HEAD(&ring->request_list); 816 749 INIT_LIST_HEAD(&ring->gpu_write_list); 817 750 751 + spin_lock_init(&ring->irq_lock); 752 + ring->irq_mask = ~0; 753 + 818 754 if (I915_NEED_GFX_HWS(dev)) { 819 755 ret = init_status_page(ring); 820 756 if (ret) ··· 854 784 ret = ring->init(ring); 855 785 if (ret) 856 786 goto err_unmap; 787 + 788 + /* Workaround an erratum on the i830 which causes a hang if 789 + * the TAIL pointer points to within the last 2 cachelines 790 + * of the buffer. 791 + */ 792 + ring->effective_size = ring->size; 793 + if (IS_I830(ring->dev)) 794 + ring->effective_size -= 128; 857 795 858 796 return 0; 859 797 ··· 905 827 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) 906 828 { 907 829 unsigned int *virt; 908 - int rem; 909 - rem = ring->size - ring->tail; 830 + int rem = ring->size - ring->tail; 910 831 911 832 if (ring->space < rem) { 912 833 int ret = intel_wait_ring_buffer(ring, rem); ··· 972 895 int n = 4*num_dwords; 973 896 int ret; 974 897 975 - if (unlikely(ring->tail + n > ring->size)) { 898 + if (unlikely(ring->tail + n > ring->effective_size)) { 976 899 ret = intel_wrap_ring_buffer(ring); 977 900 if (unlikely(ret)) 978 901 return ret; ··· 1050 973 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); 1051 974 } 1052 975 1053 - static void gen6_ring_flush(struct intel_ring_buffer *ring, 1054 - u32 invalidate_domains, 1055 - u32 flush_domains) 976 + static int gen6_ring_flush(struct intel_ring_buffer *ring, 977 + u32 invalidate_domains, 978 + u32 flush_domains) 1056 979 { 1057 - if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1058 - return; 980 + int ret; 1059 981 1060 - if (intel_ring_begin(ring, 4) == 0) { 1061 - intel_ring_emit(ring, MI_FLUSH_DW); 1062 - intel_ring_emit(ring, 0); 1063 - intel_ring_emit(ring, 0); 1064 - intel_ring_emit(ring, 0); 1065 - intel_ring_advance(ring); 1066 - } 982 + if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 983 + return 0; 984 + 985 + ret = intel_ring_begin(ring, 4); 986 + if (ret) 987 + return ret; 988 + 989 + intel_ring_emit(ring, MI_FLUSH_DW); 990 + intel_ring_emit(ring, 0); 991 + intel_ring_emit(ring, 0); 992 + intel_ring_emit(ring, 0); 993 + intel_ring_advance(ring); 994 + return 0; 1067 995 } 1068 996 1069 997 static int ··· 1090 1008 } 1091 1009 1092 1010 static bool 1011 + gen6_render_ring_get_irq(struct intel_ring_buffer *ring) 1012 + { 1013 + return gen6_ring_get_irq(ring, 1014 + GT_USER_INTERRUPT, 1015 + GEN6_RENDER_USER_INTERRUPT); 1016 + } 1017 + 1018 + static void 1019 + gen6_render_ring_put_irq(struct intel_ring_buffer *ring) 1020 + { 1021 + return gen6_ring_put_irq(ring, 1022 + GT_USER_INTERRUPT, 1023 + GEN6_RENDER_USER_INTERRUPT); 1024 + } 1025 + 1026 + static bool 1093 1027 gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) 1094 1028 { 1095 - return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); 1029 + return gen6_ring_get_irq(ring, 1030 + GT_GEN6_BSD_USER_INTERRUPT, 1031 + GEN6_BSD_USER_INTERRUPT); 1096 1032 } 1097 1033 1098 1034 static void 1099 1035 gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) 1100 1036 { 1101 - ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); 1037 + return gen6_ring_put_irq(ring, 1038 + GT_GEN6_BSD_USER_INTERRUPT, 1039 + GEN6_BSD_USER_INTERRUPT); 1102 1040 } 1103 1041 1104 1042 /* ring buffer for Video Codec for Gen6+ */ ··· 1142 1040 static bool 1143 1041 blt_ring_get_irq(struct intel_ring_buffer *ring) 1144 1042 { 1145 - return ring_get_irq(ring, GT_BLT_USER_INTERRUPT); 1043 + return gen6_ring_get_irq(ring, 1044 + GT_BLT_USER_INTERRUPT, 1045 + GEN6_BLITTER_USER_INTERRUPT); 1146 1046 } 1147 1047 1148 1048 static void 1149 1049 blt_ring_put_irq(struct intel_ring_buffer *ring) 1150 1050 { 1151 - ring_put_irq(ring, GT_BLT_USER_INTERRUPT); 1051 + gen6_ring_put_irq(ring, 1052 + GT_BLT_USER_INTERRUPT, 1053 + GEN6_BLITTER_USER_INTERRUPT); 1152 1054 } 1153 1055 1154 1056 ··· 1221 1115 return intel_ring_begin(ring, 4); 1222 1116 } 1223 1117 1224 - static void blt_ring_flush(struct intel_ring_buffer *ring, 1118 + static int blt_ring_flush(struct intel_ring_buffer *ring, 1225 1119 u32 invalidate_domains, 1226 1120 u32 flush_domains) 1227 1121 { 1228 - if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1229 - return; 1122 + int ret; 1230 1123 1231 - if (blt_ring_begin(ring, 4) == 0) { 1232 - intel_ring_emit(ring, MI_FLUSH_DW); 1233 - intel_ring_emit(ring, 0); 1234 - intel_ring_emit(ring, 0); 1235 - intel_ring_emit(ring, 0); 1236 - intel_ring_advance(ring); 1237 - } 1124 + if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1125 + return 0; 1126 + 1127 + ret = blt_ring_begin(ring, 4); 1128 + if (ret) 1129 + return ret; 1130 + 1131 + intel_ring_emit(ring, MI_FLUSH_DW); 1132 + intel_ring_emit(ring, 0); 1133 + intel_ring_emit(ring, 0); 1134 + intel_ring_emit(ring, 0); 1135 + intel_ring_advance(ring); 1136 + return 0; 1238 1137 } 1239 1138 1240 1139 static void blt_ring_cleanup(struct intel_ring_buffer *ring) ··· 1276 1165 *ring = render_ring; 1277 1166 if (INTEL_INFO(dev)->gen >= 6) { 1278 1167 ring->add_request = gen6_add_request; 1168 + ring->irq_get = gen6_render_ring_get_irq; 1169 + ring->irq_put = gen6_render_ring_put_irq; 1279 1170 } else if (IS_GEN5(dev)) { 1280 1171 ring->add_request = pc_render_add_request; 1281 1172 ring->get_seqno = pc_render_get_seqno;
+21 -15
drivers/gpu/drm/i915/intel_ringbuffer.h
··· 16 16 17 17 #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) 18 18 19 - #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL(ring->mmio_base)) 20 - #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val) 19 + #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) 20 + #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) 21 21 22 - #define I915_READ_START(ring) I915_RING_READ(RING_START(ring->mmio_base)) 23 - #define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val) 22 + #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) 23 + #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) 24 24 25 - #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD(ring->mmio_base)) 26 - #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val) 25 + #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) 26 + #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) 27 27 28 - #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base)) 29 - #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val) 28 + #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) 29 + #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) 30 30 31 - #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID(ring->mmio_base)) 32 - #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0(ring->mmio_base)) 33 - #define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1(ring->mmio_base)) 31 + #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 32 + #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) 33 + 34 + #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) 35 + #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) 36 + #define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1((ring)->mmio_base)) 34 37 35 38 struct intel_ring_buffer { 36 39 const char *name; ··· 52 49 u32 tail; 53 50 int space; 54 51 int size; 52 + int effective_size; 55 53 struct intel_hw_status_page status_page; 56 54 55 + spinlock_t irq_lock; 56 + u32 irq_refcount; 57 + u32 irq_mask; 57 58 u32 irq_seqno; /* last seq seem at irq time */ 58 59 u32 waiting_seqno; 59 60 u32 sync_seqno[I915_NUM_RINGS-1]; 60 - atomic_t irq_refcount; 61 61 bool __must_check (*irq_get)(struct intel_ring_buffer *ring); 62 62 void (*irq_put)(struct intel_ring_buffer *ring); 63 63 ··· 68 62 69 63 void (*write_tail)(struct intel_ring_buffer *ring, 70 64 u32 value); 71 - void (*flush)(struct intel_ring_buffer *ring, 72 - u32 invalidate_domains, 73 - u32 flush_domains); 65 + int __must_check (*flush)(struct intel_ring_buffer *ring, 66 + u32 invalidate_domains, 67 + u32 flush_domains); 74 68 int (*add_request)(struct intel_ring_buffer *ring, 75 69 u32 *seqno); 76 70 u32 (*get_seqno)(struct intel_ring_buffer *ring);
+11 -22
drivers/gpu/drm/i915/intel_sdvo.c
··· 1024 1024 if (!intel_sdvo_set_target_input(intel_sdvo)) 1025 1025 return; 1026 1026 1027 - if (intel_sdvo->has_hdmi_monitor && 1028 - !intel_sdvo_set_avi_infoframe(intel_sdvo)) 1029 - return; 1027 + if (intel_sdvo->has_hdmi_monitor) { 1028 + intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); 1029 + intel_sdvo_set_colorimetry(intel_sdvo, 1030 + SDVO_COLORIMETRY_RGB256); 1031 + intel_sdvo_set_avi_infoframe(intel_sdvo); 1032 + } else 1033 + intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI); 1030 1034 1031 1035 if (intel_sdvo->is_tv && 1032 1036 !intel_sdvo_set_tv_format(intel_sdvo)) ··· 1401 1397 return connector_status_disconnected; 1402 1398 1403 1399 intel_sdvo->attached_output = response; 1400 + 1401 + intel_sdvo->has_hdmi_monitor = false; 1402 + intel_sdvo->has_hdmi_audio = false; 1404 1403 1405 1404 if ((intel_sdvo_connector->output_flag & response) == 0) 1406 1405 ret = connector_status_disconnected; ··· 1929 1922 static bool 1930 1923 intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) 1931 1924 { 1932 - int is_hdmi; 1933 - 1934 - if (!intel_sdvo_check_supp_encode(intel_sdvo)) 1935 - return false; 1936 - 1937 - if (!intel_sdvo_set_target_output(intel_sdvo, 1938 - device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1)) 1939 - return false; 1940 - 1941 - is_hdmi = 0; 1942 - if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1)) 1943 - return false; 1944 - 1945 - return !!is_hdmi; 1925 + return intel_sdvo_check_supp_encode(intel_sdvo); 1946 1926 } 1947 1927 1948 1928 static u8 ··· 2031 2037 connector->connector_type = DRM_MODE_CONNECTOR_DVID; 2032 2038 2033 2039 if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { 2034 - /* enable hdmi encoding mode if supported */ 2035 - intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); 2036 - intel_sdvo_set_colorimetry(intel_sdvo, 2037 - SDVO_COLORIMETRY_RGB256); 2038 2040 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; 2039 - 2040 2041 intel_sdvo->is_hdmi = true; 2041 2042 } 2042 2043 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+1
drivers/net/Kconfig
··· 2963 2963 config XEN_NETDEV_FRONTEND 2964 2964 tristate "Xen network device frontend driver" 2965 2965 depends on XEN 2966 + select XEN_XENBUS_FRONTEND 2966 2967 default y 2967 2968 help 2968 2969 The network device frontend driver allows the kernel to
+1
drivers/pci/Kconfig
··· 45 45 depends on PCI && X86 && XEN 46 46 select HOTPLUG 47 47 select PCI_XEN 48 + select XEN_XENBUS_FRONTEND 48 49 default y 49 50 help 50 51 The PCI device frontend driver allows the kernel to import arbitrary
+11
drivers/xen/Kconfig
··· 29 29 firing. 30 30 If in doubt, say yes. 31 31 32 + config XEN_BACKEND 33 + bool "Backend driver support" 34 + depends on XEN_DOM0 35 + default y 36 + help 37 + Support for backend device drivers that provide I/O services 38 + to other virtual machines. 39 + 32 40 config XENFS 33 41 tristate "Xen filesystem" 34 42 default y ··· 69 61 hypervisor environment. When running native or in another 70 62 virtual environment, /sys/hypervisor will still be present, 71 63 but will have no xen contents. 64 + 65 + config XEN_XENBUS_FRONTEND 66 + tristate 72 67 73 68 config XEN_PLATFORM_PCI 74 69 tristate "xen platform pci device driver"
+5
drivers/xen/xenbus/Makefile
··· 5 5 xenbus-objs += xenbus_comms.o 6 6 xenbus-objs += xenbus_xs.o 7 7 xenbus-objs += xenbus_probe.o 8 + 9 + xenbus-be-objs-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o 10 + xenbus-objs += $(xenbus-be-objs-y) 11 + 12 + obj-$(CONFIG_XEN_XENBUS_FRONTEND) += xenbus_probe_frontend.o
+55 -296
drivers/xen/xenbus/xenbus_probe.c
··· 56 56 #include <xen/events.h> 57 57 #include <xen/page.h> 58 58 59 - #include <xen/platform_pci.h> 60 59 #include <xen/hvm.h> 61 60 62 61 #include "xenbus_comms.h" ··· 71 72 static unsigned long xen_store_mfn; 72 73 73 74 static BLOCKING_NOTIFIER_HEAD(xenstore_chain); 74 - 75 - static void wait_for_devices(struct xenbus_driver *xendrv); 76 - 77 - static int xenbus_probe_frontend(const char *type, const char *name); 78 - 79 - static void xenbus_dev_shutdown(struct device *_dev); 80 - 81 - static int xenbus_dev_suspend(struct device *dev, pm_message_t state); 82 - static int xenbus_dev_resume(struct device *dev); 83 75 84 76 /* If something in array of ids matches this device, return it. */ 85 77 static const struct xenbus_device_id * ··· 92 102 93 103 return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; 94 104 } 95 - 96 - static int xenbus_uevent(struct device *_dev, struct kobj_uevent_env *env) 97 - { 98 - struct xenbus_device *dev = to_xenbus_device(_dev); 99 - 100 - if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype)) 101 - return -ENOMEM; 102 - 103 - return 0; 104 - } 105 - 106 - /* device/<type>/<id> => <type>-<id> */ 107 - static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) 108 - { 109 - nodename = strchr(nodename, '/'); 110 - if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) { 111 - printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename); 112 - return -EINVAL; 113 - } 114 - 115 - strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE); 116 - if (!strchr(bus_id, '/')) { 117 - printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id); 118 - return -EINVAL; 119 - } 120 - *strchr(bus_id, '/') = '-'; 121 - return 0; 122 - } 105 + EXPORT_SYMBOL_GPL(xenbus_match); 123 106 124 107 125 108 static void free_otherend_details(struct xenbus_device *dev) ··· 112 149 } 113 150 114 151 115 - int read_otherend_details(struct xenbus_device *xendev, 152 + static int talk_to_otherend(struct xenbus_device *dev) 153 + { 154 + struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); 155 + 156 + free_otherend_watch(dev); 157 + free_otherend_details(dev); 158 + 159 + return drv->read_otherend_details(dev); 160 + } 161 + 162 + 163 + 164 + static int watch_otherend(struct xenbus_device *dev) 165 + { 166 + struct xen_bus_type *bus = 167 + container_of(dev->dev.bus, struct xen_bus_type, bus); 168 + 169 + return xenbus_watch_pathfmt(dev, &dev->otherend_watch, 170 + bus->otherend_changed, 171 + "%s/%s", dev->otherend, "state"); 172 + } 173 + 174 + 175 + int xenbus_read_otherend_details(struct xenbus_device *xendev, 116 176 char *id_node, char *path_node) 117 177 { 118 178 int err = xenbus_gather(XBT_NIL, xendev->nodename, ··· 160 174 161 175 return 0; 162 176 } 177 + EXPORT_SYMBOL_GPL(xenbus_read_otherend_details); 163 178 164 - 165 - static int read_backend_details(struct xenbus_device *xendev) 166 - { 167 - return read_otherend_details(xendev, "backend-id", "backend"); 168 - } 169 - 170 - static struct device_attribute xenbus_dev_attrs[] = { 171 - __ATTR_NULL 172 - }; 173 - 174 - /* Bus type for frontend drivers. */ 175 - static struct xen_bus_type xenbus_frontend = { 176 - .root = "device", 177 - .levels = 2, /* device/type/<id> */ 178 - .get_bus_id = frontend_bus_id, 179 - .probe = xenbus_probe_frontend, 180 - .bus = { 181 - .name = "xen", 182 - .match = xenbus_match, 183 - .uevent = xenbus_uevent, 184 - .probe = xenbus_dev_probe, 185 - .remove = xenbus_dev_remove, 186 - .shutdown = xenbus_dev_shutdown, 187 - .dev_attrs = xenbus_dev_attrs, 188 - 189 - .suspend = xenbus_dev_suspend, 190 - .resume = xenbus_dev_resume, 191 - }, 192 - }; 193 - 194 - static void otherend_changed(struct xenbus_watch *watch, 195 - const char **vec, unsigned int len) 179 + void xenbus_otherend_changed(struct xenbus_watch *watch, 180 + const char **vec, unsigned int len, 181 + int ignore_on_shutdown) 196 182 { 197 183 struct xenbus_device *dev = 198 184 container_of(watch, struct xenbus_device, otherend_watch); ··· 192 234 * work that can fail e.g., when the rootfs is gone. 193 235 */ 194 236 if (system_state > SYSTEM_RUNNING) { 195 - struct xen_bus_type *bus = bus; 196 - bus = container_of(dev->dev.bus, struct xen_bus_type, bus); 197 - /* If we're frontend, drive the state machine to Closed. */ 198 - /* This should cause the backend to release our resources. */ 199 - if ((bus == &xenbus_frontend) && (state == XenbusStateClosing)) 237 + if (ignore_on_shutdown && (state == XenbusStateClosing)) 200 238 xenbus_frontend_closed(dev); 201 239 return; 202 240 } ··· 200 246 if (drv->otherend_changed) 201 247 drv->otherend_changed(dev, state); 202 248 } 203 - 204 - 205 - static int talk_to_otherend(struct xenbus_device *dev) 206 - { 207 - struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); 208 - 209 - free_otherend_watch(dev); 210 - free_otherend_details(dev); 211 - 212 - return drv->read_otherend_details(dev); 213 - } 214 - 215 - 216 - static int watch_otherend(struct xenbus_device *dev) 217 - { 218 - return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed, 219 - "%s/%s", dev->otherend, "state"); 220 - } 221 - 249 + EXPORT_SYMBOL_GPL(xenbus_otherend_changed); 222 250 223 251 int xenbus_dev_probe(struct device *_dev) 224 252 { ··· 244 308 fail: 245 309 xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); 246 310 xenbus_switch_state(dev, XenbusStateClosed); 247 - return -ENODEV; 311 + return err; 248 312 } 313 + EXPORT_SYMBOL_GPL(xenbus_dev_probe); 249 314 250 315 int xenbus_dev_remove(struct device *_dev) 251 316 { ··· 264 327 xenbus_switch_state(dev, XenbusStateClosed); 265 328 return 0; 266 329 } 330 + EXPORT_SYMBOL_GPL(xenbus_dev_remove); 267 331 268 - static void xenbus_dev_shutdown(struct device *_dev) 332 + void xenbus_dev_shutdown(struct device *_dev) 269 333 { 270 334 struct xenbus_device *dev = to_xenbus_device(_dev); 271 335 unsigned long timeout = 5*HZ; ··· 287 349 out: 288 350 put_device(&dev->dev); 289 351 } 352 + EXPORT_SYMBOL_GPL(xenbus_dev_shutdown); 290 353 291 354 int xenbus_register_driver_common(struct xenbus_driver *drv, 292 355 struct xen_bus_type *bus, ··· 301 362 302 363 return driver_register(&drv->driver); 303 364 } 304 - 305 - int __xenbus_register_frontend(struct xenbus_driver *drv, 306 - struct module *owner, const char *mod_name) 307 - { 308 - int ret; 309 - 310 - drv->read_otherend_details = read_backend_details; 311 - 312 - ret = xenbus_register_driver_common(drv, &xenbus_frontend, 313 - owner, mod_name); 314 - if (ret) 315 - return ret; 316 - 317 - /* If this driver is loaded as a module wait for devices to attach. */ 318 - wait_for_devices(drv); 319 - 320 - return 0; 321 - } 322 - EXPORT_SYMBOL_GPL(__xenbus_register_frontend); 365 + EXPORT_SYMBOL_GPL(xenbus_register_driver_common); 323 366 324 367 void xenbus_unregister_driver(struct xenbus_driver *drv) 325 368 { ··· 472 551 kfree(xendev); 473 552 return err; 474 553 } 475 - 476 - /* device/<typename>/<name> */ 477 - static int xenbus_probe_frontend(const char *type, const char *name) 478 - { 479 - char *nodename; 480 - int err; 481 - 482 - nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", 483 - xenbus_frontend.root, type, name); 484 - if (!nodename) 485 - return -ENOMEM; 486 - 487 - DPRINTK("%s", nodename); 488 - 489 - err = xenbus_probe_node(&xenbus_frontend, type, nodename); 490 - kfree(nodename); 491 - return err; 492 - } 554 + EXPORT_SYMBOL_GPL(xenbus_probe_node); 493 555 494 556 static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) 495 557 { ··· 486 582 return PTR_ERR(dir); 487 583 488 584 for (i = 0; i < dir_n; i++) { 489 - err = bus->probe(type, dir[i]); 585 + err = bus->probe(bus, type, dir[i]); 490 586 if (err) 491 587 break; 492 588 } 589 + 493 590 kfree(dir); 494 591 return err; 495 592 } ··· 510 605 if (err) 511 606 break; 512 607 } 608 + 513 609 kfree(dir); 514 610 return err; 515 611 } 612 + EXPORT_SYMBOL_GPL(xenbus_probe_devices); 516 613 517 614 static unsigned int char_count(const char *str, char c) 518 615 { ··· 577 670 } 578 671 EXPORT_SYMBOL_GPL(xenbus_dev_changed); 579 672 580 - static void frontend_changed(struct xenbus_watch *watch, 581 - const char **vec, unsigned int len) 582 - { 583 - DPRINTK(""); 584 - 585 - xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); 586 - } 587 - 588 - /* We watch for devices appearing and vanishing. */ 589 - static struct xenbus_watch fe_watch = { 590 - .node = "device", 591 - .callback = frontend_changed, 592 - }; 593 - 594 - static int xenbus_dev_suspend(struct device *dev, pm_message_t state) 673 + int xenbus_dev_suspend(struct device *dev, pm_message_t state) 595 674 { 596 675 int err = 0; 597 676 struct xenbus_driver *drv; 598 - struct xenbus_device *xdev; 677 + struct xenbus_device *xdev 678 + = container_of(dev, struct xenbus_device, dev); 599 679 600 - DPRINTK(""); 680 + DPRINTK("%s", xdev->nodename); 601 681 602 682 if (dev->driver == NULL) 603 683 return 0; 604 684 drv = to_xenbus_driver(dev->driver); 605 - xdev = container_of(dev, struct xenbus_device, dev); 606 685 if (drv->suspend) 607 686 err = drv->suspend(xdev, state); 608 687 if (err) ··· 596 703 "xenbus: suspend %s failed: %i\n", dev_name(dev), err); 597 704 return 0; 598 705 } 706 + EXPORT_SYMBOL_GPL(xenbus_dev_suspend); 599 707 600 - static int xenbus_dev_resume(struct device *dev) 708 + int xenbus_dev_resume(struct device *dev) 601 709 { 602 710 int err; 603 711 struct xenbus_driver *drv; 604 - struct xenbus_device *xdev; 712 + struct xenbus_device *xdev 713 + = container_of(dev, struct xenbus_device, dev); 605 714 606 - DPRINTK(""); 715 + DPRINTK("%s", xdev->nodename); 607 716 608 717 if (dev->driver == NULL) 609 718 return 0; 610 - 611 719 drv = to_xenbus_driver(dev->driver); 612 - xdev = container_of(dev, struct xenbus_device, dev); 613 - 614 720 err = talk_to_otherend(xdev); 615 721 if (err) { 616 722 printk(KERN_WARNING ··· 640 748 641 749 return 0; 642 750 } 751 + EXPORT_SYMBOL_GPL(xenbus_dev_resume); 643 752 644 753 /* A flag to determine if xenstored is 'ready' (i.e. has started) */ 645 754 int xenstored_ready = 0; ··· 668 775 void xenbus_probe(struct work_struct *unused) 669 776 { 670 777 xenstored_ready = 1; 671 - 672 - /* Enumerate devices in xenstore and watch for changes. */ 673 - xenbus_probe_devices(&xenbus_frontend); 674 - register_xenbus_watch(&fe_watch); 675 - xenbus_backend_probe_and_watch(); 676 778 677 779 /* Notify others that xenstore is up */ 678 780 blocking_notifier_call_chain(&xenstore_chain, 0, NULL); ··· 697 809 698 810 err = -ENODEV; 699 811 if (!xen_domain()) 700 - goto out_error; 701 - 702 - /* Register ourselves with the kernel bus subsystem */ 703 - err = bus_register(&xenbus_frontend.bus); 704 - if (err) 705 - goto out_error; 706 - 707 - err = xenbus_backend_bus_register(); 708 - if (err) 709 - goto out_unreg_front; 812 + return err; 710 813 711 814 /* 712 815 * Domain0 doesn't have a store_evtchn or store_mfn yet. ··· 753 874 if (err) { 754 875 printk(KERN_WARNING 755 876 "XENBUS: Error initializing xenstore comms: %i\n", err); 756 - goto out_unreg_back; 877 + goto out_error; 757 878 } 758 879 759 880 #ifdef CONFIG_XEN_COMPAT_XENFS ··· 766 887 767 888 return 0; 768 889 769 - out_unreg_back: 770 - xenbus_backend_bus_unregister(); 771 - 772 - out_unreg_front: 773 - bus_unregister(&xenbus_frontend.bus); 774 - 775 890 out_error: 776 891 if (page != 0) 777 892 free_page(page); 893 + 778 894 return err; 779 895 } 780 896 781 897 postcore_initcall(xenbus_init); 782 898 783 899 MODULE_LICENSE("GPL"); 784 - 785 - static int is_device_connecting(struct device *dev, void *data) 786 - { 787 - struct xenbus_device *xendev = to_xenbus_device(dev); 788 - struct device_driver *drv = data; 789 - struct xenbus_driver *xendrv; 790 - 791 - /* 792 - * A device with no driver will never connect. We care only about 793 - * devices which should currently be in the process of connecting. 794 - */ 795 - if (!dev->driver) 796 - return 0; 797 - 798 - /* Is this search limited to a particular driver? */ 799 - if (drv && (dev->driver != drv)) 800 - return 0; 801 - 802 - xendrv = to_xenbus_driver(dev->driver); 803 - return (xendev->state < XenbusStateConnected || 804 - (xendev->state == XenbusStateConnected && 805 - xendrv->is_ready && !xendrv->is_ready(xendev))); 806 - } 807 - 808 - static int exists_connecting_device(struct device_driver *drv) 809 - { 810 - return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, 811 - is_device_connecting); 812 - } 813 - 814 - static int print_device_status(struct device *dev, void *data) 815 - { 816 - struct xenbus_device *xendev = to_xenbus_device(dev); 817 - struct device_driver *drv = data; 818 - 819 - /* Is this operation limited to a particular driver? */ 820 - if (drv && (dev->driver != drv)) 821 - return 0; 822 - 823 - if (!dev->driver) { 824 - /* Information only: is this too noisy? */ 825 - printk(KERN_INFO "XENBUS: Device with no driver: %s\n", 826 - xendev->nodename); 827 - } else if (xendev->state < XenbusStateConnected) { 828 - enum xenbus_state rstate = XenbusStateUnknown; 829 - if (xendev->otherend) 830 - rstate = xenbus_read_driver_state(xendev->otherend); 831 - printk(KERN_WARNING "XENBUS: Timeout connecting " 832 - "to device: %s (local state %d, remote state %d)\n", 833 - xendev->nodename, xendev->state, rstate); 834 - } 835 - 836 - return 0; 837 - } 838 - 839 - /* We only wait for device setup after most initcalls have run. */ 840 - static int ready_to_wait_for_devices; 841 - 842 - /* 843 - * On a 5-minute timeout, wait for all devices currently configured. We need 844 - * to do this to guarantee that the filesystems and / or network devices 845 - * needed for boot are available, before we can allow the boot to proceed. 846 - * 847 - * This needs to be on a late_initcall, to happen after the frontend device 848 - * drivers have been initialised, but before the root fs is mounted. 849 - * 850 - * A possible improvement here would be to have the tools add a per-device 851 - * flag to the store entry, indicating whether it is needed at boot time. 852 - * This would allow people who knew what they were doing to accelerate their 853 - * boot slightly, but of course needs tools or manual intervention to set up 854 - * those flags correctly. 855 - */ 856 - static void wait_for_devices(struct xenbus_driver *xendrv) 857 - { 858 - unsigned long start = jiffies; 859 - struct device_driver *drv = xendrv ? &xendrv->driver : NULL; 860 - unsigned int seconds_waited = 0; 861 - 862 - if (!ready_to_wait_for_devices || !xen_domain()) 863 - return; 864 - 865 - while (exists_connecting_device(drv)) { 866 - if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { 867 - if (!seconds_waited) 868 - printk(KERN_WARNING "XENBUS: Waiting for " 869 - "devices to initialise: "); 870 - seconds_waited += 5; 871 - printk("%us...", 300 - seconds_waited); 872 - if (seconds_waited == 300) 873 - break; 874 - } 875 - 876 - schedule_timeout_interruptible(HZ/10); 877 - } 878 - 879 - if (seconds_waited) 880 - printk("\n"); 881 - 882 - bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, 883 - print_device_status); 884 - } 885 - 886 - #ifndef MODULE 887 - static int __init boot_wait_for_devices(void) 888 - { 889 - if (xen_hvm_domain() && !xen_platform_pci_unplug) 890 - return -ENODEV; 891 - 892 - ready_to_wait_for_devices = 1; 893 - wait_for_devices(NULL); 894 - return 0; 895 - } 896 - 897 - late_initcall(boot_wait_for_devices); 898 - #endif
+16 -15
drivers/xen/xenbus/xenbus_probe.h
··· 36 36 37 37 #define XEN_BUS_ID_SIZE 20 38 38 39 - #ifdef CONFIG_XEN_BACKEND 40 - extern void xenbus_backend_suspend(int (*fn)(struct device *, void *)); 41 - extern void xenbus_backend_resume(int (*fn)(struct device *, void *)); 42 - extern void xenbus_backend_probe_and_watch(void); 43 - extern int xenbus_backend_bus_register(void); 44 - extern void xenbus_backend_bus_unregister(void); 45 - #else 46 - static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {} 47 - static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {} 48 - static inline void xenbus_backend_probe_and_watch(void) {} 49 - static inline int xenbus_backend_bus_register(void) { return 0; } 50 - static inline void xenbus_backend_bus_unregister(void) {} 51 - #endif 52 - 53 39 struct xen_bus_type 54 40 { 55 41 char *root; 56 42 unsigned int levels; 57 43 int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename); 58 - int (*probe)(const char *type, const char *dir); 44 + int (*probe)(struct xen_bus_type *bus, const char *type, 45 + const char *dir); 46 + void (*otherend_changed)(struct xenbus_watch *watch, const char **vec, 47 + unsigned int len); 59 48 struct bus_type bus; 60 49 }; 61 50 ··· 61 72 extern int xenbus_probe_devices(struct xen_bus_type *bus); 62 73 63 74 extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); 75 + 76 + extern void xenbus_dev_shutdown(struct device *_dev); 77 + 78 + extern int xenbus_dev_suspend(struct device *dev, pm_message_t state); 79 + extern int xenbus_dev_resume(struct device *dev); 80 + 81 + extern void xenbus_otherend_changed(struct xenbus_watch *watch, 82 + const char **vec, unsigned int len, 83 + int ignore_on_shutdown); 84 + 85 + extern int xenbus_read_otherend_details(struct xenbus_device *xendev, 86 + char *id_node, char *path_node); 64 87 65 88 #endif
+276
drivers/xen/xenbus/xenbus_probe_backend.c
··· 1 + /****************************************************************************** 2 + * Talks to Xen Store to figure out what devices we have (backend half). 3 + * 4 + * Copyright (C) 2005 Rusty Russell, IBM Corporation 5 + * Copyright (C) 2005 Mike Wray, Hewlett-Packard 6 + * Copyright (C) 2005, 2006 XenSource Ltd 7 + * Copyright (C) 2007 Solarflare Communications, Inc. 8 + * 9 + * This program is free software; you can redistribute it and/or 10 + * modify it under the terms of the GNU General Public License version 2 11 + * as published by the Free Software Foundation; or, when distributed 12 + * separately from the Linux kernel or incorporated into other 13 + * software packages, subject to the following license: 14 + * 15 + * Permission is hereby granted, free of charge, to any person obtaining a copy 16 + * of this source file (the "Software"), to deal in the Software without 17 + * restriction, including without limitation the rights to use, copy, modify, 18 + * merge, publish, distribute, sublicense, and/or sell copies of the Software, 19 + * and to permit persons to whom the Software is furnished to do so, subject to 20 + * the following conditions: 21 + * 22 + * The above copyright notice and this permission notice shall be included in 23 + * all copies or substantial portions of the Software. 24 + * 25 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 28 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 29 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 30 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 31 + * IN THE SOFTWARE. 32 + */ 33 + 34 + #define DPRINTK(fmt, args...) \ 35 + pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ 36 + __func__, __LINE__, ##args) 37 + 38 + #include <linux/kernel.h> 39 + #include <linux/err.h> 40 + #include <linux/string.h> 41 + #include <linux/ctype.h> 42 + #include <linux/fcntl.h> 43 + #include <linux/mm.h> 44 + #include <linux/notifier.h> 45 + 46 + #include <asm/page.h> 47 + #include <asm/pgtable.h> 48 + #include <asm/xen/hypervisor.h> 49 + #include <asm/hypervisor.h> 50 + #include <xen/xenbus.h> 51 + #include <xen/features.h> 52 + 53 + #include "xenbus_comms.h" 54 + #include "xenbus_probe.h" 55 + 56 + /* backend/<type>/<fe-uuid>/<id> => <type>-<fe-domid>-<id> */ 57 + static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) 58 + { 59 + int domid, err; 60 + const char *devid, *type, *frontend; 61 + unsigned int typelen; 62 + 63 + type = strchr(nodename, '/'); 64 + if (!type) 65 + return -EINVAL; 66 + type++; 67 + typelen = strcspn(type, "/"); 68 + if (!typelen || type[typelen] != '/') 69 + return -EINVAL; 70 + 71 + devid = strrchr(nodename, '/') + 1; 72 + 73 + err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid, 74 + "frontend", NULL, &frontend, 75 + NULL); 76 + if (err) 77 + return err; 78 + if (strlen(frontend) == 0) 79 + err = -ERANGE; 80 + if (!err && !xenbus_exists(XBT_NIL, frontend, "")) 81 + err = -ENOENT; 82 + kfree(frontend); 83 + 84 + if (err) 85 + return err; 86 + 87 + if (snprintf(bus_id, XEN_BUS_ID_SIZE, "%.*s-%i-%s", 88 + typelen, type, domid, devid) >= XEN_BUS_ID_SIZE) 89 + return -ENOSPC; 90 + return 0; 91 + } 92 + 93 + static int xenbus_uevent_backend(struct device *dev, 94 + struct kobj_uevent_env *env) 95 + { 96 + struct xenbus_device *xdev; 97 + struct xenbus_driver *drv; 98 + struct xen_bus_type *bus; 99 + 100 + DPRINTK(""); 101 + 102 + if (dev == NULL) 103 + return -ENODEV; 104 + 105 + xdev = to_xenbus_device(dev); 106 + bus = container_of(xdev->dev.bus, struct xen_bus_type, bus); 107 + if (xdev == NULL) 108 + return -ENODEV; 109 + 110 + /* stuff we want to pass to /sbin/hotplug */ 111 + if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype)) 112 + return -ENOMEM; 113 + 114 + if (add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename)) 115 + return -ENOMEM; 116 + 117 + if (add_uevent_var(env, "XENBUS_BASE_PATH=%s", bus->root)) 118 + return -ENOMEM; 119 + 120 + if (dev->driver) { 121 + drv = to_xenbus_driver(dev->driver); 122 + if (drv && drv->uevent) 123 + return drv->uevent(xdev, env); 124 + } 125 + 126 + return 0; 127 + } 128 + 129 + /* backend/<typename>/<frontend-uuid>/<name> */ 130 + static int xenbus_probe_backend_unit(struct xen_bus_type *bus, 131 + const char *dir, 132 + const char *type, 133 + const char *name) 134 + { 135 + char *nodename; 136 + int err; 137 + 138 + nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name); 139 + if (!nodename) 140 + return -ENOMEM; 141 + 142 + DPRINTK("%s\n", nodename); 143 + 144 + err = xenbus_probe_node(bus, type, nodename); 145 + kfree(nodename); 146 + return err; 147 + } 148 + 149 + /* backend/<typename>/<frontend-domid> */ 150 + static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type, 151 + const char *domid) 152 + { 153 + char *nodename; 154 + int err = 0; 155 + char **dir; 156 + unsigned int i, dir_n = 0; 157 + 158 + DPRINTK(""); 159 + 160 + nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, domid); 161 + if (!nodename) 162 + return -ENOMEM; 163 + 164 + dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n); 165 + if (IS_ERR(dir)) { 166 + kfree(nodename); 167 + return PTR_ERR(dir); 168 + } 169 + 170 + for (i = 0; i < dir_n; i++) { 171 + err = xenbus_probe_backend_unit(bus, nodename, type, dir[i]); 172 + if (err) 173 + break; 174 + } 175 + kfree(dir); 176 + kfree(nodename); 177 + return err; 178 + } 179 + 180 + static void frontend_changed(struct xenbus_watch *watch, 181 + const char **vec, unsigned int len) 182 + { 183 + xenbus_otherend_changed(watch, vec, len, 0); 184 + } 185 + 186 + static struct device_attribute xenbus_backend_dev_attrs[] = { 187 + __ATTR_NULL 188 + }; 189 + 190 + static struct xen_bus_type xenbus_backend = { 191 + .root = "backend", 192 + .levels = 3, /* backend/type/<frontend>/<id> */ 193 + .get_bus_id = backend_bus_id, 194 + .probe = xenbus_probe_backend, 195 + .otherend_changed = frontend_changed, 196 + .bus = { 197 + .name = "xen-backend", 198 + .match = xenbus_match, 199 + .uevent = xenbus_uevent_backend, 200 + .probe = xenbus_dev_probe, 201 + .remove = xenbus_dev_remove, 202 + .shutdown = xenbus_dev_shutdown, 203 + .dev_attrs = xenbus_backend_dev_attrs, 204 + }, 205 + }; 206 + 207 + static void backend_changed(struct xenbus_watch *watch, 208 + const char **vec, unsigned int len) 209 + { 210 + DPRINTK(""); 211 + 212 + xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_backend); 213 + } 214 + 215 + static struct xenbus_watch be_watch = { 216 + .node = "backend", 217 + .callback = backend_changed, 218 + }; 219 + 220 + static int read_frontend_details(struct xenbus_device *xendev) 221 + { 222 + return xenbus_read_otherend_details(xendev, "frontend-id", "frontend"); 223 + } 224 + 225 + int xenbus_dev_is_online(struct xenbus_device *dev) 226 + { 227 + int rc, val; 228 + 229 + rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val); 230 + if (rc != 1) 231 + val = 0; /* no online node present */ 232 + 233 + return val; 234 + } 235 + EXPORT_SYMBOL_GPL(xenbus_dev_is_online); 236 + 237 + int __xenbus_register_backend(struct xenbus_driver *drv, 238 + struct module *owner, const char *mod_name) 239 + { 240 + drv->read_otherend_details = read_frontend_details; 241 + 242 + return xenbus_register_driver_common(drv, &xenbus_backend, 243 + owner, mod_name); 244 + } 245 + EXPORT_SYMBOL_GPL(__xenbus_register_backend); 246 + 247 + static int backend_probe_and_watch(struct notifier_block *notifier, 248 + unsigned long event, 249 + void *data) 250 + { 251 + /* Enumerate devices in xenstore and watch for changes. */ 252 + xenbus_probe_devices(&xenbus_backend); 253 + register_xenbus_watch(&be_watch); 254 + 255 + return NOTIFY_DONE; 256 + } 257 + 258 + static int __init xenbus_probe_backend_init(void) 259 + { 260 + static struct notifier_block xenstore_notifier = { 261 + .notifier_call = backend_probe_and_watch 262 + }; 263 + int err; 264 + 265 + DPRINTK(""); 266 + 267 + /* Register ourselves with the kernel bus subsystem */ 268 + err = bus_register(&xenbus_backend.bus); 269 + if (err) 270 + return err; 271 + 272 + register_xenstore_notifier(&xenstore_notifier); 273 + 274 + return 0; 275 + } 276 + subsys_initcall(xenbus_probe_backend_init);
+294
drivers/xen/xenbus/xenbus_probe_frontend.c
··· 1 + #define DPRINTK(fmt, args...) \ 2 + pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ 3 + __func__, __LINE__, ##args) 4 + 5 + #include <linux/kernel.h> 6 + #include <linux/err.h> 7 + #include <linux/string.h> 8 + #include <linux/ctype.h> 9 + #include <linux/fcntl.h> 10 + #include <linux/mm.h> 11 + #include <linux/proc_fs.h> 12 + #include <linux/notifier.h> 13 + #include <linux/kthread.h> 14 + #include <linux/mutex.h> 15 + #include <linux/io.h> 16 + 17 + #include <asm/page.h> 18 + #include <asm/pgtable.h> 19 + #include <asm/xen/hypervisor.h> 20 + #include <xen/xenbus.h> 21 + #include <xen/events.h> 22 + #include <xen/page.h> 23 + 24 + #include <xen/platform_pci.h> 25 + 26 + #include "xenbus_comms.h" 27 + #include "xenbus_probe.h" 28 + 29 + 30 + /* device/<type>/<id> => <type>-<id> */ 31 + static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) 32 + { 33 + nodename = strchr(nodename, '/'); 34 + if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) { 35 + printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename); 36 + return -EINVAL; 37 + } 38 + 39 + strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE); 40 + if (!strchr(bus_id, '/')) { 41 + printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id); 42 + return -EINVAL; 43 + } 44 + *strchr(bus_id, '/') = '-'; 45 + return 0; 46 + } 47 + 48 + /* device/<typename>/<name> */ 49 + static int xenbus_probe_frontend(struct xen_bus_type *bus, const char *type, 50 + const char *name) 51 + { 52 + char *nodename; 53 + int err; 54 + 55 + nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name); 56 + if (!nodename) 57 + return -ENOMEM; 58 + 59 + DPRINTK("%s", nodename); 60 + 61 + err = xenbus_probe_node(bus, type, nodename); 62 + kfree(nodename); 63 + return err; 64 + } 65 + 66 + static int xenbus_uevent_frontend(struct device *_dev, 67 + struct kobj_uevent_env *env) 68 + { 69 + struct xenbus_device *dev = to_xenbus_device(_dev); 70 + 71 + if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype)) 72 + return -ENOMEM; 73 + 74 + return 0; 75 + } 76 + 77 + 78 + static void backend_changed(struct xenbus_watch *watch, 79 + const char **vec, unsigned int len) 80 + { 81 + xenbus_otherend_changed(watch, vec, len, 1); 82 + } 83 + 84 + static struct device_attribute xenbus_frontend_dev_attrs[] = { 85 + __ATTR_NULL 86 + }; 87 + 88 + static struct xen_bus_type xenbus_frontend = { 89 + .root = "device", 90 + .levels = 2, /* device/type/<id> */ 91 + .get_bus_id = frontend_bus_id, 92 + .probe = xenbus_probe_frontend, 93 + .otherend_changed = backend_changed, 94 + .bus = { 95 + .name = "xen", 96 + .match = xenbus_match, 97 + .uevent = xenbus_uevent_frontend, 98 + .probe = xenbus_dev_probe, 99 + .remove = xenbus_dev_remove, 100 + .shutdown = xenbus_dev_shutdown, 101 + .dev_attrs = xenbus_frontend_dev_attrs, 102 + 103 + .suspend = xenbus_dev_suspend, 104 + .resume = xenbus_dev_resume, 105 + }, 106 + }; 107 + 108 + static void frontend_changed(struct xenbus_watch *watch, 109 + const char **vec, unsigned int len) 110 + { 111 + DPRINTK(""); 112 + 113 + xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); 114 + } 115 + 116 + 117 + /* We watch for devices appearing and vanishing. */ 118 + static struct xenbus_watch fe_watch = { 119 + .node = "device", 120 + .callback = frontend_changed, 121 + }; 122 + 123 + static int read_backend_details(struct xenbus_device *xendev) 124 + { 125 + return xenbus_read_otherend_details(xendev, "backend-id", "backend"); 126 + } 127 + 128 + static int is_device_connecting(struct device *dev, void *data) 129 + { 130 + struct xenbus_device *xendev = to_xenbus_device(dev); 131 + struct device_driver *drv = data; 132 + struct xenbus_driver *xendrv; 133 + 134 + /* 135 + * A device with no driver will never connect. We care only about 136 + * devices which should currently be in the process of connecting. 137 + */ 138 + if (!dev->driver) 139 + return 0; 140 + 141 + /* Is this search limited to a particular driver? */ 142 + if (drv && (dev->driver != drv)) 143 + return 0; 144 + 145 + xendrv = to_xenbus_driver(dev->driver); 146 + return (xendev->state < XenbusStateConnected || 147 + (xendev->state == XenbusStateConnected && 148 + xendrv->is_ready && !xendrv->is_ready(xendev))); 149 + } 150 + 151 + static int exists_connecting_device(struct device_driver *drv) 152 + { 153 + return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, 154 + is_device_connecting); 155 + } 156 + 157 + static int print_device_status(struct device *dev, void *data) 158 + { 159 + struct xenbus_device *xendev = to_xenbus_device(dev); 160 + struct device_driver *drv = data; 161 + 162 + /* Is this operation limited to a particular driver? */ 163 + if (drv && (dev->driver != drv)) 164 + return 0; 165 + 166 + if (!dev->driver) { 167 + /* Information only: is this too noisy? */ 168 + printk(KERN_INFO "XENBUS: Device with no driver: %s\n", 169 + xendev->nodename); 170 + } else if (xendev->state < XenbusStateConnected) { 171 + enum xenbus_state rstate = XenbusStateUnknown; 172 + if (xendev->otherend) 173 + rstate = xenbus_read_driver_state(xendev->otherend); 174 + printk(KERN_WARNING "XENBUS: Timeout connecting " 175 + "to device: %s (local state %d, remote state %d)\n", 176 + xendev->nodename, xendev->state, rstate); 177 + } 178 + 179 + return 0; 180 + } 181 + 182 + /* We only wait for device setup after most initcalls have run. */ 183 + static int ready_to_wait_for_devices; 184 + 185 + /* 186 + * On a 5-minute timeout, wait for all devices currently configured. We need 187 + * to do this to guarantee that the filesystems and / or network devices 188 + * needed for boot are available, before we can allow the boot to proceed. 189 + * 190 + * This needs to be on a late_initcall, to happen after the frontend device 191 + * drivers have been initialised, but before the root fs is mounted. 192 + * 193 + * A possible improvement here would be to have the tools add a per-device 194 + * flag to the store entry, indicating whether it is needed at boot time. 195 + * This would allow people who knew what they were doing to accelerate their 196 + * boot slightly, but of course needs tools or manual intervention to set up 197 + * those flags correctly. 198 + */ 199 + static void wait_for_devices(struct xenbus_driver *xendrv) 200 + { 201 + unsigned long start = jiffies; 202 + struct device_driver *drv = xendrv ? &xendrv->driver : NULL; 203 + unsigned int seconds_waited = 0; 204 + 205 + if (!ready_to_wait_for_devices || !xen_domain()) 206 + return; 207 + 208 + while (exists_connecting_device(drv)) { 209 + if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { 210 + if (!seconds_waited) 211 + printk(KERN_WARNING "XENBUS: Waiting for " 212 + "devices to initialise: "); 213 + seconds_waited += 5; 214 + printk("%us...", 300 - seconds_waited); 215 + if (seconds_waited == 300) 216 + break; 217 + } 218 + 219 + schedule_timeout_interruptible(HZ/10); 220 + } 221 + 222 + if (seconds_waited) 223 + printk("\n"); 224 + 225 + bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, 226 + print_device_status); 227 + } 228 + 229 + int __xenbus_register_frontend(struct xenbus_driver *drv, 230 + struct module *owner, const char *mod_name) 231 + { 232 + int ret; 233 + 234 + drv->read_otherend_details = read_backend_details; 235 + 236 + ret = xenbus_register_driver_common(drv, &xenbus_frontend, 237 + owner, mod_name); 238 + if (ret) 239 + return ret; 240 + 241 + /* If this driver is loaded as a module wait for devices to attach. */ 242 + wait_for_devices(drv); 243 + 244 + return 0; 245 + } 246 + EXPORT_SYMBOL_GPL(__xenbus_register_frontend); 247 + 248 + static int frontend_probe_and_watch(struct notifier_block *notifier, 249 + unsigned long event, 250 + void *data) 251 + { 252 + /* Enumerate devices in xenstore and watch for changes. */ 253 + xenbus_probe_devices(&xenbus_frontend); 254 + register_xenbus_watch(&fe_watch); 255 + 256 + return NOTIFY_DONE; 257 + } 258 + 259 + 260 + static int __init xenbus_probe_frontend_init(void) 261 + { 262 + static struct notifier_block xenstore_notifier = { 263 + .notifier_call = frontend_probe_and_watch 264 + }; 265 + int err; 266 + 267 + DPRINTK(""); 268 + 269 + /* Register ourselves with the kernel bus subsystem */ 270 + err = bus_register(&xenbus_frontend.bus); 271 + if (err) 272 + return err; 273 + 274 + register_xenstore_notifier(&xenstore_notifier); 275 + 276 + return 0; 277 + } 278 + subsys_initcall(xenbus_probe_frontend_init); 279 + 280 + #ifndef MODULE 281 + static int __init boot_wait_for_devices(void) 282 + { 283 + if (xen_hvm_domain() && !xen_platform_pci_unplug) 284 + return -ENODEV; 285 + 286 + ready_to_wait_for_devices = 1; 287 + wait_for_devices(NULL); 288 + return 0; 289 + } 290 + 291 + late_initcall(boot_wait_for_devices); 292 + #endif 293 + 294 + MODULE_LICENSE("GPL");
+1 -1
fs/ntfs/Makefile
··· 6 6 index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \ 7 7 unistr.o upcase.o 8 8 9 - EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.29\" 9 + EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.30\" 10 10 11 11 ifeq ($(CONFIG_NTFS_DEBUG),y) 12 12 EXTRA_CFLAGS += -DDEBUG
+17 -18
fs/ntfs/file.c
··· 1 1 /* 2 2 * file.c - NTFS kernel file operations. Part of the Linux-NTFS project. 3 3 * 4 - * Copyright (c) 2001-2007 Anton Altaparmakov 4 + * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc. 5 5 * 6 6 * This program/include file is free software; you can redistribute it and/or 7 7 * modify it under the terms of the GNU General Public License as published ··· 1380 1380 * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s 1381 1381 * single-segment behaviour. 1382 1382 * 1383 - * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both 1384 - * when atomic and when not atomic. This is ok because 1385 - * __ntfs_copy_from_user_iovec_inatomic() calls __copy_from_user_inatomic() 1386 - * and it is ok to call this when non-atomic. 1387 - * Infact, the only difference between __copy_from_user_inatomic() and 1383 + * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both when 1384 + * atomic and when not atomic. This is ok because it calls 1385 + * __copy_from_user_inatomic() and it is ok to call this when non-atomic. In 1386 + * fact, the only difference between __copy_from_user_inatomic() and 1388 1387 * __copy_from_user() is that the latter calls might_sleep() and the former 1389 - * should not zero the tail of the buffer on error. And on many 1390 - * architectures __copy_from_user_inatomic() is just defined to 1391 - * __copy_from_user() so it makes no difference at all on those architectures. 1388 + * should not zero the tail of the buffer on error. And on many architectures 1389 + * __copy_from_user_inatomic() is just defined to __copy_from_user() so it 1390 + * makes no difference at all on those architectures. 1392 1391 */ 1393 1392 static inline size_t ntfs_copy_from_user_iovec(struct page **pages, 1394 1393 unsigned nr_pages, unsigned ofs, const struct iovec **iov, ··· 1408 1409 if (unlikely(copied != len)) { 1409 1410 /* Do it the slow way. */ 1410 1411 addr = kmap(*pages); 1411 - copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs, 1412 - *iov, *iov_ofs, len); 1413 - /* 1414 - * Zero the rest of the target like __copy_from_user(). 1415 - */ 1416 - memset(addr + ofs + copied, 0, len - copied); 1417 - kunmap(*pages); 1412 + copied = __ntfs_copy_from_user_iovec_inatomic(addr + 1413 + ofs, *iov, *iov_ofs, len); 1418 1414 if (unlikely(copied != len)) 1419 1415 goto err_out; 1416 + kunmap(*pages); 1420 1417 } 1421 1418 total += len; 1419 + ntfs_set_next_iovec(iov, iov_ofs, len); 1422 1420 bytes -= len; 1423 1421 if (!bytes) 1424 1422 break; 1425 - ntfs_set_next_iovec(iov, iov_ofs, len); 1426 1423 ofs = 0; 1427 1424 } while (++pages < last_page); 1428 1425 out: 1429 1426 return total; 1430 1427 err_out: 1431 - total += copied; 1428 + BUG_ON(copied > len); 1432 1429 /* Zero the rest of the target like __copy_from_user(). */ 1430 + memset(addr + ofs + copied, 0, len - copied); 1431 + kunmap(*pages); 1432 + total += copied; 1433 + ntfs_set_next_iovec(iov, iov_ofs, copied); 1433 1434 while (++pages < last_page) { 1434 1435 bytes -= len; 1435 1436 if (!bytes)
+3 -3
fs/ntfs/super.c
··· 1 1 /* 2 2 * super.c - NTFS kernel super block handling. Part of the Linux-NTFS project. 3 3 * 4 - * Copyright (c) 2001-2007 Anton Altaparmakov 4 + * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc. 5 5 * Copyright (c) 2001,2002 Richard Russon 6 6 * 7 7 * This program/include file is free software; you can redistribute it and/or ··· 3193 3193 ntfs_sysctl(0); 3194 3194 } 3195 3195 3196 - MODULE_AUTHOR("Anton Altaparmakov <aia21@cantab.net>"); 3197 - MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2007 Anton Altaparmakov"); 3196 + MODULE_AUTHOR("Anton Altaparmakov <anton@tuxera.com>"); 3197 + MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc."); 3198 3198 MODULE_VERSION(NTFS_VERSION); 3199 3199 MODULE_LICENSE("GPL"); 3200 3200 #ifdef DEBUG
+1 -1
include/xen/xenbus.h
··· 94 94 int (*remove)(struct xenbus_device *dev); 95 95 int (*suspend)(struct xenbus_device *dev, pm_message_t state); 96 96 int (*resume)(struct xenbus_device *dev); 97 - int (*uevent)(struct xenbus_device *, char **, int, char *, int); 97 + int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *); 98 98 struct device_driver driver; 99 99 int (*read_otherend_details)(struct xenbus_device *dev); 100 100 int (*is_ready)(struct xenbus_device *dev);
+8
tools/power/x86/turbostat/Makefile
··· 1 + turbostat : turbostat.c 2 + 3 + clean : 4 + rm -f turbostat 5 + 6 + install : 7 + install turbostat /usr/bin/turbostat 8 + install turbostat.8 /usr/share/man/man8
+172
tools/power/x86/turbostat/turbostat.8
··· 1 + .TH TURBOSTAT 8 2 + .SH NAME 3 + turbostat \- Report processor frequency and idle statistics 4 + .SH SYNOPSIS 5 + .ft B 6 + .B turbostat 7 + .RB [ "\-v" ] 8 + .RB [ "\-M MSR#" ] 9 + .RB command 10 + .br 11 + .B turbostat 12 + .RB [ "\-v" ] 13 + .RB [ "\-M MSR#" ] 14 + .RB [ "\-i interval_sec" ] 15 + .SH DESCRIPTION 16 + \fBturbostat \fP reports processor topology, frequency 17 + and idle power state statistics on modern X86 processors. 18 + Either \fBcommand\fP is forked and statistics are printed 19 + upon its completion, or statistics are printed periodically. 20 + 21 + \fBturbostat \fP 22 + requires that the processor 23 + supports an "invariant" TSC, plus the APERF and MPERF MSRs. 24 + \fBturbostat \fP will report idle cpu power state residency 25 + on processors that additionally support C-state residency counters. 26 + 27 + .SS Options 28 + The \fB-v\fP option increases verbosity. 29 + .PP 30 + The \fB-M MSR#\fP option dumps the specified MSR, 31 + in addition to the usual frequency and idle statistics. 32 + .PP 33 + The \fB-i interval_sec\fP option prints statistics every \fiinterval_sec\fP seconds. 34 + The default is 5 seconds. 35 + .PP 36 + The \fBcommand\fP parameter forks \fBcommand\fP and upon its exit, 37 + displays the statistics gathered since it was forked. 38 + .PP 39 + .SH FIELD DESCRIPTIONS 40 + .nf 41 + \fBpkg\fP processor package number. 42 + \fBcore\fP processor core number. 43 + \fBCPU\fP Linux CPU (logical processor) number. 44 + \fB%c0\fP percent of the interval that the CPU retired instructions. 45 + \fBGHz\fP average clock rate while the CPU was in c0 state. 46 + \fBTSC\fP average GHz that the TSC ran during the entire interval. 47 + \fB%c1, %c3, %c6\fP show the percentage residency in hardware core idle states. 48 + \fB%pc3, %pc6\fP percentage residency in hardware package idle states. 49 + .fi 50 + .PP 51 + .SH EXAMPLE 52 + Without any parameters, turbostat prints out counters ever 5 seconds. 53 + (override interval with "-i sec" option, or specify a command 54 + for turbostat to fork). 55 + 56 + The first row of statistics reflect the average for the entire system. 57 + Subsequent rows show per-CPU statistics. 58 + 59 + .nf 60 + [root@x980]# ./turbostat 61 + core CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6 62 + 0.04 1.62 3.38 0.11 0.00 99.85 0.00 95.07 63 + 0 0 0.04 1.62 3.38 0.06 0.00 99.90 0.00 95.07 64 + 0 6 0.02 1.62 3.38 0.08 0.00 99.90 0.00 95.07 65 + 1 2 0.10 1.62 3.38 0.29 0.00 99.61 0.00 95.07 66 + 1 8 0.11 1.62 3.38 0.28 0.00 99.61 0.00 95.07 67 + 2 4 0.01 1.62 3.38 0.01 0.00 99.98 0.00 95.07 68 + 2 10 0.01 1.61 3.38 0.02 0.00 99.98 0.00 95.07 69 + 8 1 0.07 1.62 3.38 0.15 0.00 99.78 0.00 95.07 70 + 8 7 0.03 1.62 3.38 0.19 0.00 99.78 0.00 95.07 71 + 9 3 0.01 1.62 3.38 0.02 0.00 99.98 0.00 95.07 72 + 9 9 0.01 1.62 3.38 0.02 0.00 99.98 0.00 95.07 73 + 10 5 0.01 1.62 3.38 0.13 0.00 99.86 0.00 95.07 74 + 10 11 0.08 1.62 3.38 0.05 0.00 99.86 0.00 95.07 75 + .fi 76 + .SH VERBOSE EXAMPLE 77 + The "-v" option adds verbosity to the output: 78 + 79 + .nf 80 + GenuineIntel 11 CPUID levels; family:model:stepping 0x6:2c:2 (6:44:2) 81 + 12 * 133 = 1600 MHz max efficiency 82 + 25 * 133 = 3333 MHz TSC frequency 83 + 26 * 133 = 3467 MHz max turbo 4 active cores 84 + 26 * 133 = 3467 MHz max turbo 3 active cores 85 + 27 * 133 = 3600 MHz max turbo 2 active cores 86 + 27 * 133 = 3600 MHz max turbo 1 active cores 87 + 88 + .fi 89 + The \fBmax efficiency\fP frequency, a.k.a. Low Frequency Mode, is the frequency 90 + available at the minimum package voltage. The \fBTSC frequency\fP is the nominal 91 + maximum frequency of the processor if turbo-mode were not available. This frequency 92 + should be sustainable on all CPUs indefinitely, given nominal power and cooling. 93 + The remaining rows show what maximum turbo frequency is possible 94 + depending on the number of idle cores. Note that this information is 95 + not available on all processors. 96 + .SH FORK EXAMPLE 97 + If turbostat is invoked with a command, it will fork that command 98 + and output the statistics gathered when the command exits. 99 + eg. Here a cycle soaker is run on 1 CPU (see %c0) for a few seconds 100 + until ^C while the other CPUs are mostly idle: 101 + 102 + .nf 103 + [root@x980 lenb]# ./turbostat cat /dev/zero > /dev/null 104 + 105 + ^Ccore CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6 106 + 8.49 3.63 3.38 16.23 0.66 74.63 0.00 0.00 107 + 0 0 1.22 3.62 3.38 32.18 0.00 66.60 0.00 0.00 108 + 0 6 0.40 3.61 3.38 33.00 0.00 66.60 0.00 0.00 109 + 1 2 0.11 3.14 3.38 0.19 3.95 95.75 0.00 0.00 110 + 1 8 0.05 2.88 3.38 0.25 3.95 95.75 0.00 0.00 111 + 2 4 0.00 3.13 3.38 0.02 0.00 99.98 0.00 0.00 112 + 2 10 0.00 3.09 3.38 0.02 0.00 99.98 0.00 0.00 113 + 8 1 0.04 3.50 3.38 14.43 0.00 85.54 0.00 0.00 114 + 8 7 0.03 2.98 3.38 14.43 0.00 85.54 0.00 0.00 115 + 9 3 0.00 3.16 3.38 100.00 0.00 0.00 0.00 0.00 116 + 9 9 99.93 3.63 3.38 0.06 0.00 0.00 0.00 0.00 117 + 10 5 0.01 2.82 3.38 0.08 0.00 99.91 0.00 0.00 118 + 10 11 0.02 3.36 3.38 0.06 0.00 99.91 0.00 0.00 119 + 6.950866 sec 120 + 121 + .fi 122 + Above the cycle soaker drives cpu9 up 3.6 Ghz turbo limit 123 + while the other processors are generally in various states of idle. 124 + 125 + Note that cpu3 is an HT sibling sharing core9 126 + with cpu9, and thus it is unable to get to an idle state 127 + deeper than c1 while cpu9 is busy. 128 + 129 + Note that turbostat reports average GHz of 3.61, while 130 + the arithmetic average of the GHz column above is 3.24. 131 + This is a weighted average, where the weight is %c0. ie. it is the total number of 132 + un-halted cycles elapsed per time divided by the number of CPUs. 133 + .SH NOTES 134 + 135 + .B "turbostat " 136 + must be run as root. 137 + 138 + .B "turbostat " 139 + reads hardware counters, but doesn't write them. 140 + So it will not interfere with the OS or other programs, including 141 + multiple invocations of itself. 142 + 143 + \fBturbostat \fP 144 + may work poorly on Linux-2.6.20 through 2.6.29, 145 + as \fBacpi-cpufreq \fPperiodically cleared the APERF and MPERF 146 + in those kernels. 147 + 148 + The APERF, MPERF MSRs are defined to count non-halted cycles. 149 + Although it is not guaranteed by the architecture, turbostat assumes 150 + that they count at TSC rate, which is true on all processors tested to date. 151 + 152 + .SH REFERENCES 153 + "Intel® Turbo Boost Technology 154 + in Intel® Core™ Microarchitecture (Nehalem) Based Processors" 155 + http://download.intel.com/design/processor/applnots/320354.pdf 156 + 157 + "Intel® 64 and IA-32 Architectures Software Developer's Manual 158 + Volume 3B: System Programming Guide" 159 + http://www.intel.com/products/processor/manuals/ 160 + 161 + .SH FILES 162 + .ta 163 + .nf 164 + /dev/cpu/*/msr 165 + .fi 166 + 167 + .SH "SEE ALSO" 168 + msr(4), vmstat(8) 169 + .PP 170 + .SH AUTHORS 171 + .nf 172 + Written by Len Brown <len.brown@intel.com>
+1048
tools/power/x86/turbostat/turbostat.c
··· 1 + /* 2 + * turbostat -- show CPU frequency and C-state residency 3 + * on modern Intel turbo-capable processors. 4 + * 5 + * Copyright (c) 2010, Intel Corporation. 6 + * Len Brown <len.brown@intel.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms and conditions of the GNU General Public License, 10 + * version 2, as published by the Free Software Foundation. 11 + * 12 + * This program is distributed in the hope it will be useful, but WITHOUT 13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 + * more details. 16 + * 17 + * You should have received a copy of the GNU General Public License along with 18 + * this program; if not, write to the Free Software Foundation, Inc., 19 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 20 + */ 21 + 22 + #include <stdio.h> 23 + #include <unistd.h> 24 + #include <sys/types.h> 25 + #include <sys/wait.h> 26 + #include <sys/stat.h> 27 + #include <sys/resource.h> 28 + #include <fcntl.h> 29 + #include <signal.h> 30 + #include <sys/time.h> 31 + #include <stdlib.h> 32 + #include <dirent.h> 33 + #include <string.h> 34 + #include <ctype.h> 35 + 36 + #define MSR_TSC 0x10 37 + #define MSR_NEHALEM_PLATFORM_INFO 0xCE 38 + #define MSR_NEHALEM_TURBO_RATIO_LIMIT 0x1AD 39 + #define MSR_APERF 0xE8 40 + #define MSR_MPERF 0xE7 41 + #define MSR_PKG_C2_RESIDENCY 0x60D /* SNB only */ 42 + #define MSR_PKG_C3_RESIDENCY 0x3F8 43 + #define MSR_PKG_C6_RESIDENCY 0x3F9 44 + #define MSR_PKG_C7_RESIDENCY 0x3FA /* SNB only */ 45 + #define MSR_CORE_C3_RESIDENCY 0x3FC 46 + #define MSR_CORE_C6_RESIDENCY 0x3FD 47 + #define MSR_CORE_C7_RESIDENCY 0x3FE /* SNB only */ 48 + 49 + char *proc_stat = "/proc/stat"; 50 + unsigned int interval_sec = 5; /* set with -i interval_sec */ 51 + unsigned int verbose; /* set with -v */ 52 + unsigned int skip_c0; 53 + unsigned int skip_c1; 54 + unsigned int do_nhm_cstates; 55 + unsigned int do_snb_cstates; 56 + unsigned int has_aperf; 57 + unsigned int units = 1000000000; /* Ghz etc */ 58 + unsigned int genuine_intel; 59 + unsigned int has_invariant_tsc; 60 + unsigned int do_nehalem_platform_info; 61 + unsigned int do_nehalem_turbo_ratio_limit; 62 + unsigned int extra_msr_offset; 63 + double bclk; 64 + unsigned int show_pkg; 65 + unsigned int show_core; 66 + unsigned int show_cpu; 67 + 68 + int aperf_mperf_unstable; 69 + int backwards_count; 70 + char *progname; 71 + int need_reinitialize; 72 + 73 + int num_cpus; 74 + 75 + typedef struct per_cpu_counters { 76 + unsigned long long tsc; /* per thread */ 77 + unsigned long long aperf; /* per thread */ 78 + unsigned long long mperf; /* per thread */ 79 + unsigned long long c1; /* per thread (calculated) */ 80 + unsigned long long c3; /* per core */ 81 + unsigned long long c6; /* per core */ 82 + unsigned long long c7; /* per core */ 83 + unsigned long long pc2; /* per package */ 84 + unsigned long long pc3; /* per package */ 85 + unsigned long long pc6; /* per package */ 86 + unsigned long long pc7; /* per package */ 87 + unsigned long long extra_msr; /* per thread */ 88 + int pkg; 89 + int core; 90 + int cpu; 91 + struct per_cpu_counters *next; 92 + } PCC; 93 + 94 + PCC *pcc_even; 95 + PCC *pcc_odd; 96 + PCC *pcc_delta; 97 + PCC *pcc_average; 98 + struct timeval tv_even; 99 + struct timeval tv_odd; 100 + struct timeval tv_delta; 101 + 102 + unsigned long long get_msr(int cpu, off_t offset) 103 + { 104 + ssize_t retval; 105 + unsigned long long msr; 106 + char pathname[32]; 107 + int fd; 108 + 109 + sprintf(pathname, "/dev/cpu/%d/msr", cpu); 110 + fd = open(pathname, O_RDONLY); 111 + if (fd < 0) { 112 + perror(pathname); 113 + need_reinitialize = 1; 114 + return 0; 115 + } 116 + 117 + retval = pread(fd, &msr, sizeof msr, offset); 118 + if (retval != sizeof msr) { 119 + fprintf(stderr, "cpu%d pread(..., 0x%zx) = %jd\n", 120 + cpu, offset, retval); 121 + exit(-2); 122 + } 123 + 124 + close(fd); 125 + return msr; 126 + } 127 + 128 + void print_header() 129 + { 130 + if (show_pkg) 131 + fprintf(stderr, "pkg "); 132 + if (show_core) 133 + fprintf(stderr, "core"); 134 + if (show_cpu) 135 + fprintf(stderr, " CPU"); 136 + if (do_nhm_cstates) 137 + fprintf(stderr, " %%c0 "); 138 + if (has_aperf) 139 + fprintf(stderr, " GHz"); 140 + fprintf(stderr, " TSC"); 141 + if (do_nhm_cstates) 142 + fprintf(stderr, " %%c1 "); 143 + if (do_nhm_cstates) 144 + fprintf(stderr, " %%c3 "); 145 + if (do_nhm_cstates) 146 + fprintf(stderr, " %%c6 "); 147 + if (do_snb_cstates) 148 + fprintf(stderr, " %%c7 "); 149 + if (do_snb_cstates) 150 + fprintf(stderr, " %%pc2 "); 151 + if (do_nhm_cstates) 152 + fprintf(stderr, " %%pc3 "); 153 + if (do_nhm_cstates) 154 + fprintf(stderr, " %%pc6 "); 155 + if (do_snb_cstates) 156 + fprintf(stderr, " %%pc7 "); 157 + if (extra_msr_offset) 158 + fprintf(stderr, " MSR 0x%x ", extra_msr_offset); 159 + 160 + putc('\n', stderr); 161 + } 162 + 163 + void dump_pcc(PCC *pcc) 164 + { 165 + fprintf(stderr, "package: %d ", pcc->pkg); 166 + fprintf(stderr, "core:: %d ", pcc->core); 167 + fprintf(stderr, "CPU: %d ", pcc->cpu); 168 + fprintf(stderr, "TSC: %016llX\n", pcc->tsc); 169 + fprintf(stderr, "c3: %016llX\n", pcc->c3); 170 + fprintf(stderr, "c6: %016llX\n", pcc->c6); 171 + fprintf(stderr, "c7: %016llX\n", pcc->c7); 172 + fprintf(stderr, "aperf: %016llX\n", pcc->aperf); 173 + fprintf(stderr, "pc2: %016llX\n", pcc->pc2); 174 + fprintf(stderr, "pc3: %016llX\n", pcc->pc3); 175 + fprintf(stderr, "pc6: %016llX\n", pcc->pc6); 176 + fprintf(stderr, "pc7: %016llX\n", pcc->pc7); 177 + fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, pcc->extra_msr); 178 + } 179 + 180 + void dump_list(PCC *pcc) 181 + { 182 + printf("dump_list 0x%p\n", pcc); 183 + 184 + for (; pcc; pcc = pcc->next) 185 + dump_pcc(pcc); 186 + } 187 + 188 + void print_pcc(PCC *p) 189 + { 190 + double interval_float; 191 + 192 + interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0; 193 + 194 + /* topology columns, print blanks on 1st (average) line */ 195 + if (p == pcc_average) { 196 + if (show_pkg) 197 + fprintf(stderr, " "); 198 + if (show_core) 199 + fprintf(stderr, " "); 200 + if (show_cpu) 201 + fprintf(stderr, " "); 202 + } else { 203 + if (show_pkg) 204 + fprintf(stderr, "%4d", p->pkg); 205 + if (show_core) 206 + fprintf(stderr, "%4d", p->core); 207 + if (show_cpu) 208 + fprintf(stderr, "%4d", p->cpu); 209 + } 210 + 211 + /* %c0 */ 212 + if (do_nhm_cstates) { 213 + if (!skip_c0) 214 + fprintf(stderr, "%7.2f", 100.0 * p->mperf/p->tsc); 215 + else 216 + fprintf(stderr, " ****"); 217 + } 218 + 219 + /* GHz */ 220 + if (has_aperf) { 221 + if (!aperf_mperf_unstable) { 222 + fprintf(stderr, "%5.2f", 223 + 1.0 * p->tsc / units * p->aperf / 224 + p->mperf / interval_float); 225 + } else { 226 + if (p->aperf > p->tsc || p->mperf > p->tsc) { 227 + fprintf(stderr, " ****"); 228 + } else { 229 + fprintf(stderr, "%4.1f*", 230 + 1.0 * p->tsc / 231 + units * p->aperf / 232 + p->mperf / interval_float); 233 + } 234 + } 235 + } 236 + 237 + /* TSC */ 238 + fprintf(stderr, "%5.2f", 1.0 * p->tsc/units/interval_float); 239 + 240 + if (do_nhm_cstates) { 241 + if (!skip_c1) 242 + fprintf(stderr, "%7.2f", 100.0 * p->c1/p->tsc); 243 + else 244 + fprintf(stderr, " ****"); 245 + } 246 + if (do_nhm_cstates) 247 + fprintf(stderr, "%7.2f", 100.0 * p->c3/p->tsc); 248 + if (do_nhm_cstates) 249 + fprintf(stderr, "%7.2f", 100.0 * p->c6/p->tsc); 250 + if (do_snb_cstates) 251 + fprintf(stderr, "%7.2f", 100.0 * p->c7/p->tsc); 252 + if (do_snb_cstates) 253 + fprintf(stderr, "%7.2f", 100.0 * p->pc2/p->tsc); 254 + if (do_nhm_cstates) 255 + fprintf(stderr, "%7.2f", 100.0 * p->pc3/p->tsc); 256 + if (do_nhm_cstates) 257 + fprintf(stderr, "%7.2f", 100.0 * p->pc6/p->tsc); 258 + if (do_snb_cstates) 259 + fprintf(stderr, "%7.2f", 100.0 * p->pc7/p->tsc); 260 + if (extra_msr_offset) 261 + fprintf(stderr, " 0x%016llx", p->extra_msr); 262 + putc('\n', stderr); 263 + } 264 + 265 + void print_counters(PCC *cnt) 266 + { 267 + PCC *pcc; 268 + 269 + print_header(); 270 + 271 + if (num_cpus > 1) 272 + print_pcc(pcc_average); 273 + 274 + for (pcc = cnt; pcc != NULL; pcc = pcc->next) 275 + print_pcc(pcc); 276 + 277 + } 278 + 279 + #define SUBTRACT_COUNTER(after, before, delta) (delta = (after - before), (before > after)) 280 + 281 + 282 + int compute_delta(PCC *after, PCC *before, PCC *delta) 283 + { 284 + int errors = 0; 285 + int perf_err = 0; 286 + 287 + skip_c0 = skip_c1 = 0; 288 + 289 + for ( ; after && before && delta; 290 + after = after->next, before = before->next, delta = delta->next) { 291 + if (before->cpu != after->cpu) { 292 + printf("cpu configuration changed: %d != %d\n", 293 + before->cpu, after->cpu); 294 + return -1; 295 + } 296 + 297 + if (SUBTRACT_COUNTER(after->tsc, before->tsc, delta->tsc)) { 298 + fprintf(stderr, "cpu%d TSC went backwards %llX to %llX\n", 299 + before->cpu, before->tsc, after->tsc); 300 + errors++; 301 + } 302 + /* check for TSC < 1 Mcycles over interval */ 303 + if (delta->tsc < (1000 * 1000)) { 304 + fprintf(stderr, "Insanely slow TSC rate," 305 + " TSC stops in idle?\n"); 306 + fprintf(stderr, "You can disable all c-states" 307 + " by booting with \"idle=poll\"\n"); 308 + fprintf(stderr, "or just the deep ones with" 309 + " \"processor.max_cstate=1\"\n"); 310 + exit(-3); 311 + } 312 + if (SUBTRACT_COUNTER(after->c3, before->c3, delta->c3)) { 313 + fprintf(stderr, "cpu%d c3 counter went backwards %llX to %llX\n", 314 + before->cpu, before->c3, after->c3); 315 + errors++; 316 + } 317 + if (SUBTRACT_COUNTER(after->c6, before->c6, delta->c6)) { 318 + fprintf(stderr, "cpu%d c6 counter went backwards %llX to %llX\n", 319 + before->cpu, before->c6, after->c6); 320 + errors++; 321 + } 322 + if (SUBTRACT_COUNTER(after->c7, before->c7, delta->c7)) { 323 + fprintf(stderr, "cpu%d c7 counter went backwards %llX to %llX\n", 324 + before->cpu, before->c7, after->c7); 325 + errors++; 326 + } 327 + if (SUBTRACT_COUNTER(after->pc2, before->pc2, delta->pc2)) { 328 + fprintf(stderr, "cpu%d pc2 counter went backwards %llX to %llX\n", 329 + before->cpu, before->pc2, after->pc2); 330 + errors++; 331 + } 332 + if (SUBTRACT_COUNTER(after->pc3, before->pc3, delta->pc3)) { 333 + fprintf(stderr, "cpu%d pc3 counter went backwards %llX to %llX\n", 334 + before->cpu, before->pc3, after->pc3); 335 + errors++; 336 + } 337 + if (SUBTRACT_COUNTER(after->pc6, before->pc6, delta->pc6)) { 338 + fprintf(stderr, "cpu%d pc6 counter went backwards %llX to %llX\n", 339 + before->cpu, before->pc6, after->pc6); 340 + errors++; 341 + } 342 + if (SUBTRACT_COUNTER(after->pc7, before->pc7, delta->pc7)) { 343 + fprintf(stderr, "cpu%d pc7 counter went backwards %llX to %llX\n", 344 + before->cpu, before->pc7, after->pc7); 345 + errors++; 346 + } 347 + 348 + perf_err = SUBTRACT_COUNTER(after->aperf, before->aperf, delta->aperf); 349 + if (perf_err) { 350 + fprintf(stderr, "cpu%d aperf counter went backwards %llX to %llX\n", 351 + before->cpu, before->aperf, after->aperf); 352 + } 353 + perf_err |= SUBTRACT_COUNTER(after->mperf, before->mperf, delta->mperf); 354 + if (perf_err) { 355 + fprintf(stderr, "cpu%d mperf counter went backwards %llX to %llX\n", 356 + before->cpu, before->mperf, after->mperf); 357 + } 358 + if (perf_err) { 359 + if (!aperf_mperf_unstable) { 360 + fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname); 361 + fprintf(stderr, "* Frequency results do not cover entire interval *\n"); 362 + fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n"); 363 + 364 + aperf_mperf_unstable = 1; 365 + } 366 + /* 367 + * mperf delta is likely a huge "positive" number 368 + * can not use it for calculating c0 time 369 + */ 370 + skip_c0 = 1; 371 + skip_c1 = 1; 372 + } 373 + 374 + /* 375 + * As mperf and tsc collection are not atomic, 376 + * it is possible for mperf's non-halted cycles 377 + * to exceed TSC's all cycles: show c1 = 0% in that case. 378 + */ 379 + if (delta->mperf > delta->tsc) 380 + delta->c1 = 0; 381 + else /* normal case, derive c1 */ 382 + delta->c1 = delta->tsc - delta->mperf 383 + - delta->c3 - delta->c6 - delta->c7; 384 + 385 + if (delta->mperf == 0) 386 + delta->mperf = 1; /* divide by 0 protection */ 387 + 388 + /* 389 + * for "extra msr", just copy the latest w/o subtracting 390 + */ 391 + delta->extra_msr = after->extra_msr; 392 + if (errors) { 393 + fprintf(stderr, "ERROR cpu%d before:\n", before->cpu); 394 + dump_pcc(before); 395 + fprintf(stderr, "ERROR cpu%d after:\n", before->cpu); 396 + dump_pcc(after); 397 + errors = 0; 398 + } 399 + } 400 + return 0; 401 + } 402 + 403 + void compute_average(PCC *delta, PCC *avg) 404 + { 405 + PCC *sum; 406 + 407 + sum = calloc(1, sizeof(PCC)); 408 + if (sum == NULL) { 409 + perror("calloc sum"); 410 + exit(1); 411 + } 412 + 413 + for (; delta; delta = delta->next) { 414 + sum->tsc += delta->tsc; 415 + sum->c1 += delta->c1; 416 + sum->c3 += delta->c3; 417 + sum->c6 += delta->c6; 418 + sum->c7 += delta->c7; 419 + sum->aperf += delta->aperf; 420 + sum->mperf += delta->mperf; 421 + sum->pc2 += delta->pc2; 422 + sum->pc3 += delta->pc3; 423 + sum->pc6 += delta->pc6; 424 + sum->pc7 += delta->pc7; 425 + } 426 + avg->tsc = sum->tsc/num_cpus; 427 + avg->c1 = sum->c1/num_cpus; 428 + avg->c3 = sum->c3/num_cpus; 429 + avg->c6 = sum->c6/num_cpus; 430 + avg->c7 = sum->c7/num_cpus; 431 + avg->aperf = sum->aperf/num_cpus; 432 + avg->mperf = sum->mperf/num_cpus; 433 + avg->pc2 = sum->pc2/num_cpus; 434 + avg->pc3 = sum->pc3/num_cpus; 435 + avg->pc6 = sum->pc6/num_cpus; 436 + avg->pc7 = sum->pc7/num_cpus; 437 + 438 + free(sum); 439 + } 440 + 441 + void get_counters(PCC *pcc) 442 + { 443 + for ( ; pcc; pcc = pcc->next) { 444 + pcc->tsc = get_msr(pcc->cpu, MSR_TSC); 445 + if (do_nhm_cstates) 446 + pcc->c3 = get_msr(pcc->cpu, MSR_CORE_C3_RESIDENCY); 447 + if (do_nhm_cstates) 448 + pcc->c6 = get_msr(pcc->cpu, MSR_CORE_C6_RESIDENCY); 449 + if (do_snb_cstates) 450 + pcc->c7 = get_msr(pcc->cpu, MSR_CORE_C7_RESIDENCY); 451 + if (has_aperf) 452 + pcc->aperf = get_msr(pcc->cpu, MSR_APERF); 453 + if (has_aperf) 454 + pcc->mperf = get_msr(pcc->cpu, MSR_MPERF); 455 + if (do_snb_cstates) 456 + pcc->pc2 = get_msr(pcc->cpu, MSR_PKG_C2_RESIDENCY); 457 + if (do_nhm_cstates) 458 + pcc->pc3 = get_msr(pcc->cpu, MSR_PKG_C3_RESIDENCY); 459 + if (do_nhm_cstates) 460 + pcc->pc6 = get_msr(pcc->cpu, MSR_PKG_C6_RESIDENCY); 461 + if (do_snb_cstates) 462 + pcc->pc7 = get_msr(pcc->cpu, MSR_PKG_C7_RESIDENCY); 463 + if (extra_msr_offset) 464 + pcc->extra_msr = get_msr(pcc->cpu, extra_msr_offset); 465 + } 466 + } 467 + 468 + 469 + void print_nehalem_info() 470 + { 471 + unsigned long long msr; 472 + unsigned int ratio; 473 + 474 + if (!do_nehalem_platform_info) 475 + return; 476 + 477 + msr = get_msr(0, MSR_NEHALEM_PLATFORM_INFO); 478 + 479 + ratio = (msr >> 40) & 0xFF; 480 + fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency\n", 481 + ratio, bclk, ratio * bclk); 482 + 483 + ratio = (msr >> 8) & 0xFF; 484 + fprintf(stderr, "%d * %.0f = %.0f MHz TSC frequency\n", 485 + ratio, bclk, ratio * bclk); 486 + 487 + if (verbose > 1) 488 + fprintf(stderr, "MSR_NEHALEM_PLATFORM_INFO: 0x%llx\n", msr); 489 + 490 + if (!do_nehalem_turbo_ratio_limit) 491 + return; 492 + 493 + msr = get_msr(0, MSR_NEHALEM_TURBO_RATIO_LIMIT); 494 + 495 + ratio = (msr >> 24) & 0xFF; 496 + if (ratio) 497 + fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 4 active cores\n", 498 + ratio, bclk, ratio * bclk); 499 + 500 + ratio = (msr >> 16) & 0xFF; 501 + if (ratio) 502 + fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 3 active cores\n", 503 + ratio, bclk, ratio * bclk); 504 + 505 + ratio = (msr >> 8) & 0xFF; 506 + if (ratio) 507 + fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 2 active cores\n", 508 + ratio, bclk, ratio * bclk); 509 + 510 + ratio = (msr >> 0) & 0xFF; 511 + if (ratio) 512 + fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n", 513 + ratio, bclk, ratio * bclk); 514 + 515 + } 516 + 517 + void free_counter_list(PCC *list) 518 + { 519 + PCC *p; 520 + 521 + for (p = list; p; ) { 522 + PCC *free_me; 523 + 524 + free_me = p; 525 + p = p->next; 526 + free(free_me); 527 + } 528 + return; 529 + } 530 + 531 + void free_all_counters(void) 532 + { 533 + free_counter_list(pcc_even); 534 + pcc_even = NULL; 535 + 536 + free_counter_list(pcc_odd); 537 + pcc_odd = NULL; 538 + 539 + free_counter_list(pcc_delta); 540 + pcc_delta = NULL; 541 + 542 + free_counter_list(pcc_average); 543 + pcc_average = NULL; 544 + } 545 + 546 + void insert_cpu_counters(PCC **list, PCC *new) 547 + { 548 + PCC *prev; 549 + 550 + /* 551 + * list was empty 552 + */ 553 + if (*list == NULL) { 554 + new->next = *list; 555 + *list = new; 556 + return; 557 + } 558 + 559 + show_cpu = 1; /* there is more than one CPU */ 560 + 561 + /* 562 + * insert on front of list. 563 + * It is sorted by ascending package#, core#, cpu# 564 + */ 565 + if (((*list)->pkg > new->pkg) || 566 + (((*list)->pkg == new->pkg) && ((*list)->core > new->core)) || 567 + (((*list)->pkg == new->pkg) && ((*list)->core == new->core) && ((*list)->cpu > new->cpu))) { 568 + new->next = *list; 569 + *list = new; 570 + return; 571 + } 572 + 573 + prev = *list; 574 + 575 + while (prev->next && (prev->next->pkg < new->pkg)) { 576 + prev = prev->next; 577 + show_pkg = 1; /* there is more than 1 package */ 578 + } 579 + 580 + while (prev->next && (prev->next->pkg == new->pkg) 581 + && (prev->next->core < new->core)) { 582 + prev = prev->next; 583 + show_core = 1; /* there is more than 1 core */ 584 + } 585 + 586 + while (prev->next && (prev->next->pkg == new->pkg) 587 + && (prev->next->core == new->core) 588 + && (prev->next->cpu < new->cpu)) { 589 + prev = prev->next; 590 + } 591 + 592 + /* 593 + * insert after "prev" 594 + */ 595 + new->next = prev->next; 596 + prev->next = new; 597 + 598 + return; 599 + } 600 + 601 + void alloc_new_cpu_counters(int pkg, int core, int cpu) 602 + { 603 + PCC *new; 604 + 605 + if (verbose > 1) 606 + printf("pkg%d core%d, cpu%d\n", pkg, core, cpu); 607 + 608 + new = (PCC *)calloc(1, sizeof(PCC)); 609 + if (new == NULL) { 610 + perror("calloc"); 611 + exit(1); 612 + } 613 + new->pkg = pkg; 614 + new->core = core; 615 + new->cpu = cpu; 616 + insert_cpu_counters(&pcc_odd, new); 617 + 618 + new = (PCC *)calloc(1, sizeof(PCC)); 619 + if (new == NULL) { 620 + perror("calloc"); 621 + exit(1); 622 + } 623 + new->pkg = pkg; 624 + new->core = core; 625 + new->cpu = cpu; 626 + insert_cpu_counters(&pcc_even, new); 627 + 628 + new = (PCC *)calloc(1, sizeof(PCC)); 629 + if (new == NULL) { 630 + perror("calloc"); 631 + exit(1); 632 + } 633 + new->pkg = pkg; 634 + new->core = core; 635 + new->cpu = cpu; 636 + insert_cpu_counters(&pcc_delta, new); 637 + 638 + new = (PCC *)calloc(1, sizeof(PCC)); 639 + if (new == NULL) { 640 + perror("calloc"); 641 + exit(1); 642 + } 643 + new->pkg = pkg; 644 + new->core = core; 645 + new->cpu = cpu; 646 + pcc_average = new; 647 + } 648 + 649 + int get_physical_package_id(int cpu) 650 + { 651 + char path[64]; 652 + FILE *filep; 653 + int pkg; 654 + 655 + sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu); 656 + filep = fopen(path, "r"); 657 + if (filep == NULL) { 658 + perror(path); 659 + exit(1); 660 + } 661 + fscanf(filep, "%d", &pkg); 662 + fclose(filep); 663 + return pkg; 664 + } 665 + 666 + int get_core_id(int cpu) 667 + { 668 + char path[64]; 669 + FILE *filep; 670 + int core; 671 + 672 + sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/core_id", cpu); 673 + filep = fopen(path, "r"); 674 + if (filep == NULL) { 675 + perror(path); 676 + exit(1); 677 + } 678 + fscanf(filep, "%d", &core); 679 + fclose(filep); 680 + return core; 681 + } 682 + 683 + /* 684 + * run func(index, cpu) on every cpu in /proc/stat 685 + */ 686 + 687 + int for_all_cpus(void (func)(int, int, int)) 688 + { 689 + FILE *fp; 690 + int cpu_count; 691 + int retval; 692 + 693 + fp = fopen(proc_stat, "r"); 694 + if (fp == NULL) { 695 + perror(proc_stat); 696 + exit(1); 697 + } 698 + 699 + retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n"); 700 + if (retval != 0) { 701 + perror("/proc/stat format"); 702 + exit(1); 703 + } 704 + 705 + for (cpu_count = 0; ; cpu_count++) { 706 + int cpu; 707 + 708 + retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu); 709 + if (retval != 1) 710 + break; 711 + 712 + func(get_physical_package_id(cpu), get_core_id(cpu), cpu); 713 + } 714 + fclose(fp); 715 + return cpu_count; 716 + } 717 + 718 + void re_initialize(void) 719 + { 720 + printf("turbostat: topology changed, re-initializing.\n"); 721 + free_all_counters(); 722 + num_cpus = for_all_cpus(alloc_new_cpu_counters); 723 + need_reinitialize = 0; 724 + printf("num_cpus is now %d\n", num_cpus); 725 + } 726 + 727 + void dummy(int pkg, int core, int cpu) { return; } 728 + /* 729 + * check to see if a cpu came on-line 730 + */ 731 + void verify_num_cpus() 732 + { 733 + int new_num_cpus; 734 + 735 + new_num_cpus = for_all_cpus(dummy); 736 + 737 + if (new_num_cpus != num_cpus) { 738 + if (verbose) 739 + printf("num_cpus was %d, is now %d\n", 740 + num_cpus, new_num_cpus); 741 + need_reinitialize = 1; 742 + } 743 + 744 + return; 745 + } 746 + 747 + void turbostat_loop() 748 + { 749 + restart: 750 + get_counters(pcc_even); 751 + gettimeofday(&tv_even, (struct timezone *)NULL); 752 + 753 + while (1) { 754 + verify_num_cpus(); 755 + if (need_reinitialize) { 756 + re_initialize(); 757 + goto restart; 758 + } 759 + sleep(interval_sec); 760 + get_counters(pcc_odd); 761 + gettimeofday(&tv_odd, (struct timezone *)NULL); 762 + 763 + compute_delta(pcc_odd, pcc_even, pcc_delta); 764 + timersub(&tv_odd, &tv_even, &tv_delta); 765 + compute_average(pcc_delta, pcc_average); 766 + print_counters(pcc_delta); 767 + if (need_reinitialize) { 768 + re_initialize(); 769 + goto restart; 770 + } 771 + sleep(interval_sec); 772 + get_counters(pcc_even); 773 + gettimeofday(&tv_even, (struct timezone *)NULL); 774 + compute_delta(pcc_even, pcc_odd, pcc_delta); 775 + timersub(&tv_even, &tv_odd, &tv_delta); 776 + compute_average(pcc_delta, pcc_average); 777 + print_counters(pcc_delta); 778 + } 779 + } 780 + 781 + void check_dev_msr() 782 + { 783 + struct stat sb; 784 + 785 + if (stat("/dev/cpu/0/msr", &sb)) { 786 + fprintf(stderr, "no /dev/cpu/0/msr\n"); 787 + fprintf(stderr, "Try \"# modprobe msr\"\n"); 788 + exit(-5); 789 + } 790 + } 791 + 792 + void check_super_user() 793 + { 794 + if (getuid() != 0) { 795 + fprintf(stderr, "must be root\n"); 796 + exit(-6); 797 + } 798 + } 799 + 800 + int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model) 801 + { 802 + if (!genuine_intel) 803 + return 0; 804 + 805 + if (family != 6) 806 + return 0; 807 + 808 + switch (model) { 809 + case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */ 810 + case 0x1E: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */ 811 + case 0x1F: /* Core i7 and i5 Processor - Nehalem */ 812 + case 0x25: /* Westmere Client - Clarkdale, Arrandale */ 813 + case 0x2C: /* Westmere EP - Gulftown */ 814 + case 0x2A: /* SNB */ 815 + case 0x2D: /* SNB Xeon */ 816 + return 1; 817 + case 0x2E: /* Nehalem-EX Xeon - Beckton */ 818 + case 0x2F: /* Westmere-EX Xeon - Eagleton */ 819 + default: 820 + return 0; 821 + } 822 + } 823 + 824 + int is_snb(unsigned int family, unsigned int model) 825 + { 826 + if (!genuine_intel) 827 + return 0; 828 + 829 + switch (model) { 830 + case 0x2A: 831 + case 0x2D: 832 + return 1; 833 + } 834 + return 0; 835 + } 836 + 837 + double discover_bclk(unsigned int family, unsigned int model) 838 + { 839 + if (is_snb(family, model)) 840 + return 100.00; 841 + else 842 + return 133.33; 843 + } 844 + 845 + void check_cpuid() 846 + { 847 + unsigned int eax, ebx, ecx, edx, max_level; 848 + unsigned int fms, family, model, stepping; 849 + 850 + eax = ebx = ecx = edx = 0; 851 + 852 + asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0)); 853 + 854 + if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e) 855 + genuine_intel = 1; 856 + 857 + if (verbose) 858 + fprintf(stderr, "%.4s%.4s%.4s ", 859 + (char *)&ebx, (char *)&edx, (char *)&ecx); 860 + 861 + asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx"); 862 + family = (fms >> 8) & 0xf; 863 + model = (fms >> 4) & 0xf; 864 + stepping = fms & 0xf; 865 + if (family == 6 || family == 0xf) 866 + model += ((fms >> 16) & 0xf) << 4; 867 + 868 + if (verbose) 869 + fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n", 870 + max_level, family, model, stepping, family, model, stepping); 871 + 872 + if (!(edx & (1 << 5))) { 873 + fprintf(stderr, "CPUID: no MSR\n"); 874 + exit(1); 875 + } 876 + 877 + /* 878 + * check max extended function levels of CPUID. 879 + * This is needed to check for invariant TSC. 880 + * This check is valid for both Intel and AMD. 881 + */ 882 + ebx = ecx = edx = 0; 883 + asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000000)); 884 + 885 + if (max_level < 0x80000007) { 886 + fprintf(stderr, "CPUID: no invariant TSC (max_level 0x%x)\n", max_level); 887 + exit(1); 888 + } 889 + 890 + /* 891 + * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8 892 + * this check is valid for both Intel and AMD 893 + */ 894 + asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007)); 895 + has_invariant_tsc = edx && (1 << 8); 896 + 897 + if (!has_invariant_tsc) { 898 + fprintf(stderr, "No invariant TSC\n"); 899 + exit(1); 900 + } 901 + 902 + /* 903 + * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0 904 + * this check is valid for both Intel and AMD 905 + */ 906 + 907 + asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6)); 908 + has_aperf = ecx && (1 << 0); 909 + if (!has_aperf) { 910 + fprintf(stderr, "No APERF MSR\n"); 911 + exit(1); 912 + } 913 + 914 + do_nehalem_platform_info = genuine_intel && has_invariant_tsc; 915 + do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */ 916 + do_snb_cstates = is_snb(family, model); 917 + bclk = discover_bclk(family, model); 918 + 919 + do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model); 920 + } 921 + 922 + 923 + void usage() 924 + { 925 + fprintf(stderr, "%s: [-v] [-M MSR#] [-i interval_sec | command ...]\n", 926 + progname); 927 + exit(1); 928 + } 929 + 930 + 931 + /* 932 + * in /dev/cpu/ return success for names that are numbers 933 + * ie. filter out ".", "..", "microcode". 934 + */ 935 + int dir_filter(const struct dirent *dirp) 936 + { 937 + if (isdigit(dirp->d_name[0])) 938 + return 1; 939 + else 940 + return 0; 941 + } 942 + 943 + int open_dev_cpu_msr(int dummy1) 944 + { 945 + return 0; 946 + } 947 + 948 + void turbostat_init() 949 + { 950 + check_cpuid(); 951 + 952 + check_dev_msr(); 953 + check_super_user(); 954 + 955 + num_cpus = for_all_cpus(alloc_new_cpu_counters); 956 + 957 + if (verbose) 958 + print_nehalem_info(); 959 + } 960 + 961 + int fork_it(char **argv) 962 + { 963 + int retval; 964 + pid_t child_pid; 965 + get_counters(pcc_even); 966 + gettimeofday(&tv_even, (struct timezone *)NULL); 967 + 968 + child_pid = fork(); 969 + if (!child_pid) { 970 + /* child */ 971 + execvp(argv[0], argv); 972 + } else { 973 + int status; 974 + 975 + /* parent */ 976 + if (child_pid == -1) { 977 + perror("fork"); 978 + exit(1); 979 + } 980 + 981 + signal(SIGINT, SIG_IGN); 982 + signal(SIGQUIT, SIG_IGN); 983 + if (waitpid(child_pid, &status, 0) == -1) { 984 + perror("wait"); 985 + exit(1); 986 + } 987 + } 988 + get_counters(pcc_odd); 989 + gettimeofday(&tv_odd, (struct timezone *)NULL); 990 + retval = compute_delta(pcc_odd, pcc_even, pcc_delta); 991 + 992 + timersub(&tv_odd, &tv_even, &tv_delta); 993 + compute_average(pcc_delta, pcc_average); 994 + if (!retval) 995 + print_counters(pcc_delta); 996 + 997 + fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);; 998 + 999 + return 0; 1000 + } 1001 + 1002 + void cmdline(int argc, char **argv) 1003 + { 1004 + int opt; 1005 + 1006 + progname = argv[0]; 1007 + 1008 + while ((opt = getopt(argc, argv, "+vi:M:")) != -1) { 1009 + switch (opt) { 1010 + case 'v': 1011 + verbose++; 1012 + break; 1013 + case 'i': 1014 + interval_sec = atoi(optarg); 1015 + break; 1016 + case 'M': 1017 + sscanf(optarg, "%x", &extra_msr_offset); 1018 + if (verbose > 1) 1019 + fprintf(stderr, "MSR 0x%X\n", extra_msr_offset); 1020 + break; 1021 + default: 1022 + usage(); 1023 + } 1024 + } 1025 + } 1026 + 1027 + int main(int argc, char **argv) 1028 + { 1029 + cmdline(argc, argv); 1030 + 1031 + if (verbose > 1) 1032 + fprintf(stderr, "turbostat Dec 6, 2010" 1033 + " - Len Brown <lenb@kernel.org>\n"); 1034 + if (verbose > 1) 1035 + fprintf(stderr, "http://userweb.kernel.org/~lenb/acpi/utils/pmtools/turbostat/\n"); 1036 + 1037 + turbostat_init(); 1038 + 1039 + /* 1040 + * if any params left, it must be a command to fork 1041 + */ 1042 + if (argc - optind) 1043 + return fork_it(argv + optind); 1044 + else 1045 + turbostat_loop(); 1046 + 1047 + return 0; 1048 + }
+8
tools/power/x86/x86_energy_perf_policy/Makefile
··· 1 + x86_energy_perf_policy : x86_energy_perf_policy.c 2 + 3 + clean : 4 + rm -f x86_energy_perf_policy 5 + 6 + install : 7 + install x86_energy_perf_policy /usr/bin/ 8 + install x86_energy_perf_policy.8 /usr/share/man/man8/
+104
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
··· 1 + .\" This page Copyright (C) 2010 Len Brown <len.brown@intel.com> 2 + .\" Distributed under the GPL, Copyleft 1994. 3 + .TH X86_ENERGY_PERF_POLICY 8 4 + .SH NAME 5 + x86_energy_perf_policy \- read or write MSR_IA32_ENERGY_PERF_BIAS 6 + .SH SYNOPSIS 7 + .ft B 8 + .B x86_energy_perf_policy 9 + .RB [ "\-c cpu" ] 10 + .RB [ "\-v" ] 11 + .RB "\-r" 12 + .br 13 + .B x86_energy_perf_policy 14 + .RB [ "\-c cpu" ] 15 + .RB [ "\-v" ] 16 + .RB 'performance' 17 + .br 18 + .B x86_energy_perf_policy 19 + .RB [ "\-c cpu" ] 20 + .RB [ "\-v" ] 21 + .RB 'normal' 22 + .br 23 + .B x86_energy_perf_policy 24 + .RB [ "\-c cpu" ] 25 + .RB [ "\-v" ] 26 + .RB 'powersave' 27 + .br 28 + .B x86_energy_perf_policy 29 + .RB [ "\-c cpu" ] 30 + .RB [ "\-v" ] 31 + .RB n 32 + .br 33 + .SH DESCRIPTION 34 + \fBx86_energy_perf_policy\fP 35 + allows software to convey 36 + its policy for the relative importance of performance 37 + versus energy savings to the processor. 38 + 39 + The processor uses this information in model-specific ways 40 + when it must select trade-offs between performance and 41 + energy efficiency. 42 + 43 + This policy hint does not supersede Processor Performance states 44 + (P-states) or CPU Idle power states (C-states), but allows 45 + software to have influence where it would otherwise be unable 46 + to express a preference. 47 + 48 + For example, this setting may tell the hardware how 49 + aggressively or conservatively to control frequency 50 + in the "turbo range" above the explicitly OS-controlled 51 + P-state frequency range. It may also tell the hardware 52 + how aggressively is should enter the OS requested C-states. 53 + 54 + Support for this feature is indicated by CPUID.06H.ECX.bit3 55 + per the Intel Architectures Software Developer's Manual. 56 + 57 + .SS Options 58 + \fB-c\fP limits operation to a single CPU. 59 + The default is to operate on all CPUs. 60 + Note that MSR_IA32_ENERGY_PERF_BIAS is defined per 61 + logical processor, but that the initial implementations 62 + of the MSR were shared among all processors in each package. 63 + .PP 64 + \fB-v\fP increases verbosity. By default 65 + x86_energy_perf_policy is silent. 66 + .PP 67 + \fB-r\fP is for "read-only" mode - the unchanged state 68 + is read and displayed. 69 + .PP 70 + .I performance 71 + Set a policy where performance is paramount. 72 + The processor will be unwilling to sacrifice any performance 73 + for the sake of energy saving. This is the hardware default. 74 + .PP 75 + .I normal 76 + Set a policy with a normal balance between performance and energy efficiency. 77 + The processor will tolerate minor performance compromise 78 + for potentially significant energy savings. 79 + This reasonable default for most desktops and servers. 80 + .PP 81 + .I powersave 82 + Set a policy where the processor can accept 83 + a measurable performance hit to maximize energy efficiency. 84 + .PP 85 + .I n 86 + Set MSR_IA32_ENERGY_PERF_BIAS to the specified number. 87 + The range of valid numbers is 0-15, where 0 is maximum 88 + performance and 15 is maximum energy efficiency. 89 + 90 + .SH NOTES 91 + .B "x86_energy_perf_policy " 92 + runs only as root. 93 + .SH FILES 94 + .ta 95 + .nf 96 + /dev/cpu/*/msr 97 + .fi 98 + 99 + .SH "SEE ALSO" 100 + msr(4) 101 + .PP 102 + .SH AUTHORS 103 + .nf 104 + Written by Len Brown <len.brown@intel.com>
+325
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
··· 1 + /* 2 + * x86_energy_perf_policy -- set the energy versus performance 3 + * policy preference bias on recent X86 processors. 4 + */ 5 + /* 6 + * Copyright (c) 2010, Intel Corporation. 7 + * Len Brown <len.brown@intel.com> 8 + * 9 + * This program is free software; you can redistribute it and/or modify it 10 + * under the terms and conditions of the GNU General Public License, 11 + * version 2, as published by the Free Software Foundation. 12 + * 13 + * This program is distributed in the hope it will be useful, but WITHOUT 14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 + * more details. 17 + * 18 + * You should have received a copy of the GNU General Public License along with 19 + * this program; if not, write to the Free Software Foundation, Inc., 20 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 + */ 22 + 23 + #include <stdio.h> 24 + #include <unistd.h> 25 + #include <sys/types.h> 26 + #include <sys/stat.h> 27 + #include <sys/resource.h> 28 + #include <fcntl.h> 29 + #include <signal.h> 30 + #include <sys/time.h> 31 + #include <stdlib.h> 32 + #include <string.h> 33 + 34 + unsigned int verbose; /* set with -v */ 35 + unsigned int read_only; /* set with -r */ 36 + char *progname; 37 + unsigned long long new_bias; 38 + int cpu = -1; 39 + 40 + /* 41 + * Usage: 42 + * 43 + * -c cpu: limit action to a single CPU (default is all CPUs) 44 + * -v: verbose output (can invoke more than once) 45 + * -r: read-only, don't change any settings 46 + * 47 + * performance 48 + * Performance is paramount. 49 + * Unwilling to sacrafice any performance 50 + * for the sake of energy saving. (hardware default) 51 + * 52 + * normal 53 + * Can tolerate minor performance compromise 54 + * for potentially significant energy savings. 55 + * (reasonable default for most desktops and servers) 56 + * 57 + * powersave 58 + * Can tolerate significant performance hit 59 + * to maximize energy savings. 60 + * 61 + * n 62 + * a numerical value to write to the underlying MSR. 63 + */ 64 + void usage(void) 65 + { 66 + printf("%s: [-c cpu] [-v] " 67 + "(-r | 'performance' | 'normal' | 'powersave' | n)\n", 68 + progname); 69 + exit(1); 70 + } 71 + 72 + #define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0 73 + 74 + #define BIAS_PERFORMANCE 0 75 + #define BIAS_BALANCE 6 76 + #define BIAS_POWERSAVE 15 77 + 78 + void cmdline(int argc, char **argv) 79 + { 80 + int opt; 81 + 82 + progname = argv[0]; 83 + 84 + while ((opt = getopt(argc, argv, "+rvc:")) != -1) { 85 + switch (opt) { 86 + case 'c': 87 + cpu = atoi(optarg); 88 + break; 89 + case 'r': 90 + read_only = 1; 91 + break; 92 + case 'v': 93 + verbose++; 94 + break; 95 + default: 96 + usage(); 97 + } 98 + } 99 + /* if -r, then should be no additional optind */ 100 + if (read_only && (argc > optind)) 101 + usage(); 102 + 103 + /* 104 + * if no -r , then must be one additional optind 105 + */ 106 + if (!read_only) { 107 + 108 + if (argc != optind + 1) { 109 + printf("must supply -r or policy param\n"); 110 + usage(); 111 + } 112 + 113 + if (!strcmp("performance", argv[optind])) { 114 + new_bias = BIAS_PERFORMANCE; 115 + } else if (!strcmp("normal", argv[optind])) { 116 + new_bias = BIAS_BALANCE; 117 + } else if (!strcmp("powersave", argv[optind])) { 118 + new_bias = BIAS_POWERSAVE; 119 + } else { 120 + char *endptr; 121 + 122 + new_bias = strtoull(argv[optind], &endptr, 0); 123 + if (endptr == argv[optind] || 124 + new_bias > BIAS_POWERSAVE) { 125 + fprintf(stderr, "invalid value: %s\n", 126 + argv[optind]); 127 + usage(); 128 + } 129 + } 130 + } 131 + } 132 + 133 + /* 134 + * validate_cpuid() 135 + * returns on success, quietly exits on failure (make verbose with -v) 136 + */ 137 + void validate_cpuid(void) 138 + { 139 + unsigned int eax, ebx, ecx, edx, max_level; 140 + char brand[16]; 141 + unsigned int fms, family, model, stepping; 142 + 143 + eax = ebx = ecx = edx = 0; 144 + 145 + asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), 146 + "=d" (edx) : "a" (0)); 147 + 148 + if (ebx != 0x756e6547 || edx != 0x49656e69 || ecx != 0x6c65746e) { 149 + if (verbose) 150 + fprintf(stderr, "%.4s%.4s%.4s != GenuineIntel", 151 + (char *)&ebx, (char *)&edx, (char *)&ecx); 152 + exit(1); 153 + } 154 + 155 + asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx"); 156 + family = (fms >> 8) & 0xf; 157 + model = (fms >> 4) & 0xf; 158 + stepping = fms & 0xf; 159 + if (family == 6 || family == 0xf) 160 + model += ((fms >> 16) & 0xf) << 4; 161 + 162 + if (verbose > 1) 163 + printf("CPUID %s %d levels family:model:stepping " 164 + "0x%x:%x:%x (%d:%d:%d)\n", brand, max_level, 165 + family, model, stepping, family, model, stepping); 166 + 167 + if (!(edx & (1 << 5))) { 168 + if (verbose) 169 + printf("CPUID: no MSR\n"); 170 + exit(1); 171 + } 172 + 173 + /* 174 + * Support for MSR_IA32_ENERGY_PERF_BIAS 175 + * is indicated by CPUID.06H.ECX.bit3 176 + */ 177 + asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (6)); 178 + if (verbose) 179 + printf("CPUID.06H.ECX: 0x%x\n", ecx); 180 + if (!(ecx & (1 << 3))) { 181 + if (verbose) 182 + printf("CPUID: No MSR_IA32_ENERGY_PERF_BIAS\n"); 183 + exit(1); 184 + } 185 + return; /* success */ 186 + } 187 + 188 + unsigned long long get_msr(int cpu, int offset) 189 + { 190 + unsigned long long msr; 191 + char msr_path[32]; 192 + int retval; 193 + int fd; 194 + 195 + sprintf(msr_path, "/dev/cpu/%d/msr", cpu); 196 + fd = open(msr_path, O_RDONLY); 197 + if (fd < 0) { 198 + printf("Try \"# modprobe msr\"\n"); 199 + perror(msr_path); 200 + exit(1); 201 + } 202 + 203 + retval = pread(fd, &msr, sizeof msr, offset); 204 + 205 + if (retval != sizeof msr) { 206 + printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval); 207 + exit(-2); 208 + } 209 + close(fd); 210 + return msr; 211 + } 212 + 213 + unsigned long long put_msr(int cpu, unsigned long long new_msr, int offset) 214 + { 215 + unsigned long long old_msr; 216 + char msr_path[32]; 217 + int retval; 218 + int fd; 219 + 220 + sprintf(msr_path, "/dev/cpu/%d/msr", cpu); 221 + fd = open(msr_path, O_RDWR); 222 + if (fd < 0) { 223 + perror(msr_path); 224 + exit(1); 225 + } 226 + 227 + retval = pread(fd, &old_msr, sizeof old_msr, offset); 228 + if (retval != sizeof old_msr) { 229 + perror("pwrite"); 230 + printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval); 231 + exit(-2); 232 + } 233 + 234 + retval = pwrite(fd, &new_msr, sizeof new_msr, offset); 235 + if (retval != sizeof new_msr) { 236 + perror("pwrite"); 237 + printf("pwrite cpu%d 0x%x = %d\n", cpu, offset, retval); 238 + exit(-2); 239 + } 240 + 241 + close(fd); 242 + 243 + return old_msr; 244 + } 245 + 246 + void print_msr(int cpu) 247 + { 248 + printf("cpu%d: 0x%016llx\n", 249 + cpu, get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS)); 250 + } 251 + 252 + void update_msr(int cpu) 253 + { 254 + unsigned long long previous_msr; 255 + 256 + previous_msr = put_msr(cpu, new_bias, MSR_IA32_ENERGY_PERF_BIAS); 257 + 258 + if (verbose) 259 + printf("cpu%d msr0x%x 0x%016llx -> 0x%016llx\n", 260 + cpu, MSR_IA32_ENERGY_PERF_BIAS, previous_msr, new_bias); 261 + 262 + return; 263 + } 264 + 265 + char *proc_stat = "/proc/stat"; 266 + /* 267 + * run func() on every cpu in /dev/cpu 268 + */ 269 + void for_every_cpu(void (func)(int)) 270 + { 271 + FILE *fp; 272 + int retval; 273 + 274 + fp = fopen(proc_stat, "r"); 275 + if (fp == NULL) { 276 + perror(proc_stat); 277 + exit(1); 278 + } 279 + 280 + retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n"); 281 + if (retval != 0) { 282 + perror("/proc/stat format"); 283 + exit(1); 284 + } 285 + 286 + while (1) { 287 + int cpu; 288 + 289 + retval = fscanf(fp, 290 + "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", 291 + &cpu); 292 + if (retval != 1) 293 + return; 294 + 295 + func(cpu); 296 + } 297 + fclose(fp); 298 + } 299 + 300 + int main(int argc, char **argv) 301 + { 302 + cmdline(argc, argv); 303 + 304 + if (verbose > 1) 305 + printf("x86_energy_perf_policy Nov 24, 2010" 306 + " - Len Brown <lenb@kernel.org>\n"); 307 + if (verbose > 1 && !read_only) 308 + printf("new_bias %lld\n", new_bias); 309 + 310 + validate_cpuid(); 311 + 312 + if (cpu != -1) { 313 + if (read_only) 314 + print_msr(cpu); 315 + else 316 + update_msr(cpu); 317 + } else { 318 + if (read_only) 319 + for_every_cpu(print_msr); 320 + else 321 + for_every_cpu(update_msr); 322 + } 323 + 324 + return 0; 325 + }
+30
tools/testing/ktest/compare-ktest-sample.pl
··· 1 + #!/usr/bin/perl 2 + 3 + open (IN,"ktest.pl"); 4 + while (<IN>) { 5 + if (/\$opt\{"?([A-Z].*?)(\[.*\])?"?\}/ || 6 + /set_test_option\("(.*?)"/) { 7 + $opt{$1} = 1; 8 + } 9 + } 10 + close IN; 11 + 12 + open (IN, "sample.conf"); 13 + while (<IN>) { 14 + if (/^\s*#?\s*(\S+)\s*=/) { 15 + $samp{$1} = 1; 16 + } 17 + } 18 + close IN; 19 + 20 + foreach $opt (keys %opt) { 21 + if (!defined($samp{$opt})) { 22 + print "opt = $opt\n"; 23 + } 24 + } 25 + 26 + foreach $samp (keys %samp) { 27 + if (!defined($opt{$samp})) { 28 + print "samp = $samp\n"; 29 + } 30 + }
+2023
tools/testing/ktest/ktest.pl
··· 1 + #!/usr/bin/perl -w 2 + # 3 + # Copywrite 2010 - Steven Rostedt <srostedt@redhat.com>, Red Hat Inc. 4 + # Licensed under the terms of the GNU GPL License version 2 5 + # 6 + 7 + use strict; 8 + use IPC::Open2; 9 + use Fcntl qw(F_GETFL F_SETFL O_NONBLOCK); 10 + use File::Path qw(mkpath); 11 + use File::Copy qw(cp); 12 + use FileHandle; 13 + 14 + my $VERSION = "0.2"; 15 + 16 + $| = 1; 17 + 18 + my %opt; 19 + my %repeat_tests; 20 + my %repeats; 21 + my %default; 22 + 23 + #default opts 24 + $default{"NUM_TESTS"} = 1; 25 + $default{"REBOOT_TYPE"} = "grub"; 26 + $default{"TEST_TYPE"} = "test"; 27 + $default{"BUILD_TYPE"} = "randconfig"; 28 + $default{"MAKE_CMD"} = "make"; 29 + $default{"TIMEOUT"} = 120; 30 + $default{"TMP_DIR"} = "/tmp/ktest"; 31 + $default{"SLEEP_TIME"} = 60; # sleep time between tests 32 + $default{"BUILD_NOCLEAN"} = 0; 33 + $default{"REBOOT_ON_ERROR"} = 0; 34 + $default{"POWEROFF_ON_ERROR"} = 0; 35 + $default{"REBOOT_ON_SUCCESS"} = 1; 36 + $default{"POWEROFF_ON_SUCCESS"} = 0; 37 + $default{"BUILD_OPTIONS"} = ""; 38 + $default{"BISECT_SLEEP_TIME"} = 60; # sleep time between bisects 39 + $default{"CLEAR_LOG"} = 0; 40 + $default{"SUCCESS_LINE"} = "login:"; 41 + $default{"BOOTED_TIMEOUT"} = 1; 42 + $default{"DIE_ON_FAILURE"} = 1; 43 + $default{"SSH_EXEC"} = "ssh \$SSH_USER\@\$MACHINE \$SSH_COMMAND"; 44 + $default{"SCP_TO_TARGET"} = "scp \$SRC_FILE \$SSH_USER\@\$MACHINE:\$DST_FILE"; 45 + $default{"REBOOT"} = "ssh \$SSH_USER\@\$MACHINE reboot"; 46 + $default{"STOP_AFTER_SUCCESS"} = 10; 47 + $default{"STOP_AFTER_FAILURE"} = 60; 48 + $default{"LOCALVERSION"} = "-test"; 49 + 50 + my $ktest_config; 51 + my $version; 52 + my $machine; 53 + my $ssh_user; 54 + my $tmpdir; 55 + my $builddir; 56 + my $outputdir; 57 + my $output_config; 58 + my $test_type; 59 + my $build_type; 60 + my $build_options; 61 + my $reboot_type; 62 + my $reboot_script; 63 + my $power_cycle; 64 + my $reboot; 65 + my $reboot_on_error; 66 + my $poweroff_on_error; 67 + my $die_on_failure; 68 + my $powercycle_after_reboot; 69 + my $poweroff_after_halt; 70 + my $ssh_exec; 71 + my $scp_to_target; 72 + my $power_off; 73 + my $grub_menu; 74 + my $grub_number; 75 + my $target; 76 + my $make; 77 + my $post_install; 78 + my $noclean; 79 + my $minconfig; 80 + my $addconfig; 81 + my $in_bisect = 0; 82 + my $bisect_bad = ""; 83 + my $reverse_bisect; 84 + my $in_patchcheck = 0; 85 + my $run_test; 86 + my $redirect; 87 + my $buildlog; 88 + my $dmesg; 89 + my $monitor_fp; 90 + my $monitor_pid; 91 + my $monitor_cnt = 0; 92 + my $sleep_time; 93 + my $bisect_sleep_time; 94 + my $store_failures; 95 + my $timeout; 96 + my $booted_timeout; 97 + my $console; 98 + my $success_line; 99 + my $stop_after_success; 100 + my $stop_after_failure; 101 + my $build_target; 102 + my $target_image; 103 + my $localversion; 104 + my $iteration = 0; 105 + my $successes = 0; 106 + 107 + my %entered_configs; 108 + my %config_help; 109 + 110 + $config_help{"MACHINE"} = << "EOF" 111 + The machine hostname that you will test. 112 + EOF 113 + ; 114 + $config_help{"SSH_USER"} = << "EOF" 115 + The box is expected to have ssh on normal bootup, provide the user 116 + (most likely root, since you need privileged operations) 117 + EOF 118 + ; 119 + $config_help{"BUILD_DIR"} = << "EOF" 120 + The directory that contains the Linux source code (full path). 121 + EOF 122 + ; 123 + $config_help{"OUTPUT_DIR"} = << "EOF" 124 + The directory that the objects will be built (full path). 125 + (can not be same as BUILD_DIR) 126 + EOF 127 + ; 128 + $config_help{"BUILD_TARGET"} = << "EOF" 129 + The location of the compiled file to copy to the target. 130 + (relative to OUTPUT_DIR) 131 + EOF 132 + ; 133 + $config_help{"TARGET_IMAGE"} = << "EOF" 134 + The place to put your image on the test machine. 135 + EOF 136 + ; 137 + $config_help{"POWER_CYCLE"} = << "EOF" 138 + A script or command to reboot the box. 139 + 140 + Here is a digital loggers power switch example 141 + POWER_CYCLE = wget --no-proxy -O /dev/null -q --auth-no-challenge 'http://admin:admin\@power/outlet?5=CCL' 142 + 143 + Here is an example to reboot a virtual box on the current host 144 + with the name "Guest". 145 + POWER_CYCLE = virsh destroy Guest; sleep 5; virsh start Guest 146 + EOF 147 + ; 148 + $config_help{"CONSOLE"} = << "EOF" 149 + The script or command that reads the console 150 + 151 + If you use ttywatch server, something like the following would work. 152 + CONSOLE = nc -d localhost 3001 153 + 154 + For a virtual machine with guest name "Guest". 155 + CONSOLE = virsh console Guest 156 + EOF 157 + ; 158 + $config_help{"LOCALVERSION"} = << "EOF" 159 + Required version ending to differentiate the test 160 + from other linux builds on the system. 161 + EOF 162 + ; 163 + $config_help{"REBOOT_TYPE"} = << "EOF" 164 + Way to reboot the box to the test kernel. 165 + Only valid options so far are "grub" and "script". 166 + 167 + If you specify grub, it will assume grub version 1 168 + and will search in /boot/grub/menu.lst for the title \$GRUB_MENU 169 + and select that target to reboot to the kernel. If this is not 170 + your setup, then specify "script" and have a command or script 171 + specified in REBOOT_SCRIPT to boot to the target. 172 + 173 + The entry in /boot/grub/menu.lst must be entered in manually. 174 + The test will not modify that file. 175 + EOF 176 + ; 177 + $config_help{"GRUB_MENU"} = << "EOF" 178 + The grub title name for the test kernel to boot 179 + (Only mandatory if REBOOT_TYPE = grub) 180 + 181 + Note, ktest.pl will not update the grub menu.lst, you need to 182 + manually add an option for the test. ktest.pl will search 183 + the grub menu.lst for this option to find what kernel to 184 + reboot into. 185 + 186 + For example, if in the /boot/grub/menu.lst the test kernel title has: 187 + title Test Kernel 188 + kernel vmlinuz-test 189 + GRUB_MENU = Test Kernel 190 + EOF 191 + ; 192 + $config_help{"REBOOT_SCRIPT"} = << "EOF" 193 + A script to reboot the target into the test kernel 194 + (Only mandatory if REBOOT_TYPE = script) 195 + EOF 196 + ; 197 + 198 + 199 + sub get_ktest_config { 200 + my ($config) = @_; 201 + 202 + return if (defined($opt{$config})); 203 + 204 + if (defined($config_help{$config})) { 205 + print "\n"; 206 + print $config_help{$config}; 207 + } 208 + 209 + for (;;) { 210 + print "$config = "; 211 + if (defined($default{$config})) { 212 + print "\[$default{$config}\] "; 213 + } 214 + $entered_configs{$config} = <STDIN>; 215 + $entered_configs{$config} =~ s/^\s*(.*\S)\s*$/$1/; 216 + if ($entered_configs{$config} =~ /^\s*$/) { 217 + if ($default{$config}) { 218 + $entered_configs{$config} = $default{$config}; 219 + } else { 220 + print "Your answer can not be blank\n"; 221 + next; 222 + } 223 + } 224 + last; 225 + } 226 + } 227 + 228 + sub get_ktest_configs { 229 + get_ktest_config("MACHINE"); 230 + get_ktest_config("SSH_USER"); 231 + get_ktest_config("BUILD_DIR"); 232 + get_ktest_config("OUTPUT_DIR"); 233 + get_ktest_config("BUILD_TARGET"); 234 + get_ktest_config("TARGET_IMAGE"); 235 + get_ktest_config("POWER_CYCLE"); 236 + get_ktest_config("CONSOLE"); 237 + get_ktest_config("LOCALVERSION"); 238 + 239 + my $rtype = $opt{"REBOOT_TYPE"}; 240 + 241 + if (!defined($rtype)) { 242 + if (!defined($opt{"GRUB_MENU"})) { 243 + get_ktest_config("REBOOT_TYPE"); 244 + $rtype = $entered_configs{"REBOOT_TYPE"}; 245 + } else { 246 + $rtype = "grub"; 247 + } 248 + } 249 + 250 + if ($rtype eq "grub") { 251 + get_ktest_config("GRUB_MENU"); 252 + } else { 253 + get_ktest_config("REBOOT_SCRIPT"); 254 + } 255 + } 256 + 257 + sub set_value { 258 + my ($lvalue, $rvalue) = @_; 259 + 260 + if (defined($opt{$lvalue})) { 261 + die "Error: Option $lvalue defined more than once!\n"; 262 + } 263 + if ($rvalue =~ /^\s*$/) { 264 + delete $opt{$lvalue}; 265 + } else { 266 + $opt{$lvalue} = $rvalue; 267 + } 268 + } 269 + 270 + sub read_config { 271 + my ($config) = @_; 272 + 273 + open(IN, $config) || die "can't read file $config"; 274 + 275 + my $name = $config; 276 + $name =~ s,.*/(.*),$1,; 277 + 278 + my $test_num = 0; 279 + my $default = 1; 280 + my $repeat = 1; 281 + my $num_tests_set = 0; 282 + my $skip = 0; 283 + my $rest; 284 + 285 + while (<IN>) { 286 + 287 + # ignore blank lines and comments 288 + next if (/^\s*$/ || /\s*\#/); 289 + 290 + if (/^\s*TEST_START(.*)/) { 291 + 292 + $rest = $1; 293 + 294 + if ($num_tests_set) { 295 + die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n"; 296 + } 297 + 298 + my $old_test_num = $test_num; 299 + my $old_repeat = $repeat; 300 + 301 + $test_num += $repeat; 302 + $default = 0; 303 + $repeat = 1; 304 + 305 + if ($rest =~ /\s+SKIP(.*)/) { 306 + $rest = $1; 307 + $skip = 1; 308 + } else { 309 + $skip = 0; 310 + } 311 + 312 + if ($rest =~ /\s+ITERATE\s+(\d+)(.*)$/) { 313 + $repeat = $1; 314 + $rest = $2; 315 + $repeat_tests{"$test_num"} = $repeat; 316 + } 317 + 318 + if ($rest =~ /\s+SKIP(.*)/) { 319 + $rest = $1; 320 + $skip = 1; 321 + } 322 + 323 + if ($rest !~ /^\s*$/) { 324 + die "$name: $.: Gargbage found after TEST_START\n$_"; 325 + } 326 + 327 + if ($skip) { 328 + $test_num = $old_test_num; 329 + $repeat = $old_repeat; 330 + } 331 + 332 + } elsif (/^\s*DEFAULTS(.*)$/) { 333 + $default = 1; 334 + 335 + $rest = $1; 336 + 337 + if ($rest =~ /\s+SKIP(.*)/) { 338 + $rest = $1; 339 + $skip = 1; 340 + } else { 341 + $skip = 0; 342 + } 343 + 344 + if ($rest !~ /^\s*$/) { 345 + die "$name: $.: Gargbage found after DEFAULTS\n$_"; 346 + } 347 + 348 + } elsif (/^\s*([A-Z_\[\]\d]+)\s*=\s*(.*?)\s*$/) { 349 + 350 + next if ($skip); 351 + 352 + my $lvalue = $1; 353 + my $rvalue = $2; 354 + 355 + if (!$default && 356 + ($lvalue eq "NUM_TESTS" || 357 + $lvalue eq "LOG_FILE" || 358 + $lvalue eq "CLEAR_LOG")) { 359 + die "$name: $.: $lvalue must be set in DEFAULTS section\n"; 360 + } 361 + 362 + if ($lvalue eq "NUM_TESTS") { 363 + if ($test_num) { 364 + die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n"; 365 + } 366 + if (!$default) { 367 + die "$name: $.: NUM_TESTS must be set in default section\n"; 368 + } 369 + $num_tests_set = 1; 370 + } 371 + 372 + if ($default || $lvalue =~ /\[\d+\]$/) { 373 + set_value($lvalue, $rvalue); 374 + } else { 375 + my $val = "$lvalue\[$test_num\]"; 376 + set_value($val, $rvalue); 377 + 378 + if ($repeat > 1) { 379 + $repeats{$val} = $repeat; 380 + } 381 + } 382 + } else { 383 + die "$name: $.: Garbage found in config\n$_"; 384 + } 385 + } 386 + 387 + close(IN); 388 + 389 + if ($test_num) { 390 + $test_num += $repeat - 1; 391 + $opt{"NUM_TESTS"} = $test_num; 392 + } 393 + 394 + # make sure we have all mandatory configs 395 + get_ktest_configs; 396 + 397 + # set any defaults 398 + 399 + foreach my $default (keys %default) { 400 + if (!defined($opt{$default})) { 401 + $opt{$default} = $default{$default}; 402 + } 403 + } 404 + } 405 + 406 + sub _logit { 407 + if (defined($opt{"LOG_FILE"})) { 408 + open(OUT, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}"; 409 + print OUT @_; 410 + close(OUT); 411 + } 412 + } 413 + 414 + sub logit { 415 + if (defined($opt{"LOG_FILE"})) { 416 + _logit @_; 417 + } else { 418 + print @_; 419 + } 420 + } 421 + 422 + sub doprint { 423 + print @_; 424 + _logit @_; 425 + } 426 + 427 + sub run_command; 428 + 429 + sub reboot { 430 + # try to reboot normally 431 + if (run_command $reboot) { 432 + if (defined($powercycle_after_reboot)) { 433 + sleep $powercycle_after_reboot; 434 + run_command "$power_cycle"; 435 + } 436 + } else { 437 + # nope? power cycle it. 438 + run_command "$power_cycle"; 439 + } 440 + } 441 + 442 + sub do_not_reboot { 443 + my $i = $iteration; 444 + 445 + return $test_type eq "build" || 446 + ($test_type eq "patchcheck" && $opt{"PATCHCHECK_TYPE[$i]"} eq "build") || 447 + ($test_type eq "bisect" && $opt{"BISECT_TYPE[$i]"} eq "build"); 448 + } 449 + 450 + sub dodie { 451 + doprint "CRITICAL FAILURE... ", @_, "\n"; 452 + 453 + my $i = $iteration; 454 + 455 + if ($reboot_on_error && !do_not_reboot) { 456 + 457 + doprint "REBOOTING\n"; 458 + reboot; 459 + 460 + } elsif ($poweroff_on_error && defined($power_off)) { 461 + doprint "POWERING OFF\n"; 462 + `$power_off`; 463 + } 464 + 465 + die @_, "\n"; 466 + } 467 + 468 + sub open_console { 469 + my ($fp) = @_; 470 + 471 + my $flags; 472 + 473 + my $pid = open($fp, "$console|") or 474 + dodie "Can't open console $console"; 475 + 476 + $flags = fcntl($fp, F_GETFL, 0) or 477 + dodie "Can't get flags for the socket: $!"; 478 + $flags = fcntl($fp, F_SETFL, $flags | O_NONBLOCK) or 479 + dodie "Can't set flags for the socket: $!"; 480 + 481 + return $pid; 482 + } 483 + 484 + sub close_console { 485 + my ($fp, $pid) = @_; 486 + 487 + doprint "kill child process $pid\n"; 488 + kill 2, $pid; 489 + 490 + print "closing!\n"; 491 + close($fp); 492 + } 493 + 494 + sub start_monitor { 495 + if ($monitor_cnt++) { 496 + return; 497 + } 498 + $monitor_fp = \*MONFD; 499 + $monitor_pid = open_console $monitor_fp; 500 + 501 + return; 502 + 503 + open(MONFD, "Stop perl from warning about single use of MONFD"); 504 + } 505 + 506 + sub end_monitor { 507 + if (--$monitor_cnt) { 508 + return; 509 + } 510 + close_console($monitor_fp, $monitor_pid); 511 + } 512 + 513 + sub wait_for_monitor { 514 + my ($time) = @_; 515 + my $line; 516 + 517 + doprint "** Wait for monitor to settle down **\n"; 518 + 519 + # read the monitor and wait for the system to calm down 520 + do { 521 + $line = wait_for_input($monitor_fp, $time); 522 + print "$line" if (defined($line)); 523 + } while (defined($line)); 524 + print "** Monitor flushed **\n"; 525 + } 526 + 527 + sub fail { 528 + 529 + if ($die_on_failure) { 530 + dodie @_; 531 + } 532 + 533 + doprint "FAILED\n"; 534 + 535 + my $i = $iteration; 536 + 537 + # no need to reboot for just building. 538 + if (!do_not_reboot) { 539 + doprint "REBOOTING\n"; 540 + reboot; 541 + start_monitor; 542 + wait_for_monitor $sleep_time; 543 + end_monitor; 544 + } 545 + 546 + doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; 547 + doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; 548 + doprint "KTEST RESULT: TEST $i Failed: ", @_, "\n"; 549 + doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; 550 + doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; 551 + 552 + return 1 if (!defined($store_failures)); 553 + 554 + my @t = localtime; 555 + my $date = sprintf "%04d%02d%02d%02d%02d%02d", 556 + 1900+$t[5],$t[4],$t[3],$t[2],$t[1],$t[0]; 557 + 558 + my $type = $build_type; 559 + if ($type =~ /useconfig/) { 560 + $type = "useconfig"; 561 + } 562 + 563 + my $dir = "$machine-$test_type-$type-fail-$date"; 564 + my $faildir = "$store_failures/$dir"; 565 + 566 + if (!-d $faildir) { 567 + mkpath($faildir) or 568 + die "can't create $faildir"; 569 + } 570 + if (-f "$output_config") { 571 + cp "$output_config", "$faildir/config" or 572 + die "failed to copy .config"; 573 + } 574 + if (-f $buildlog) { 575 + cp $buildlog, "$faildir/buildlog" or 576 + die "failed to move $buildlog"; 577 + } 578 + if (-f $dmesg) { 579 + cp $dmesg, "$faildir/dmesg" or 580 + die "failed to move $dmesg"; 581 + } 582 + 583 + doprint "*** Saved info to $faildir ***\n"; 584 + 585 + return 1; 586 + } 587 + 588 + sub run_command { 589 + my ($command) = @_; 590 + my $dolog = 0; 591 + my $dord = 0; 592 + my $pid; 593 + 594 + $command =~ s/\$SSH_USER/$ssh_user/g; 595 + $command =~ s/\$MACHINE/$machine/g; 596 + 597 + doprint("$command ... "); 598 + 599 + $pid = open(CMD, "$command 2>&1 |") or 600 + (fail "unable to exec $command" and return 0); 601 + 602 + if (defined($opt{"LOG_FILE"})) { 603 + open(LOG, ">>$opt{LOG_FILE}") or 604 + dodie "failed to write to log"; 605 + $dolog = 1; 606 + } 607 + 608 + if (defined($redirect)) { 609 + open (RD, ">$redirect") or 610 + dodie "failed to write to redirect $redirect"; 611 + $dord = 1; 612 + } 613 + 614 + while (<CMD>) { 615 + print LOG if ($dolog); 616 + print RD if ($dord); 617 + } 618 + 619 + waitpid($pid, 0); 620 + my $failed = $?; 621 + 622 + close(CMD); 623 + close(LOG) if ($dolog); 624 + close(RD) if ($dord); 625 + 626 + if ($failed) { 627 + doprint "FAILED!\n"; 628 + } else { 629 + doprint "SUCCESS\n"; 630 + } 631 + 632 + return !$failed; 633 + } 634 + 635 + sub run_ssh { 636 + my ($cmd) = @_; 637 + my $cp_exec = $ssh_exec; 638 + 639 + $cp_exec =~ s/\$SSH_COMMAND/$cmd/g; 640 + return run_command "$cp_exec"; 641 + } 642 + 643 + sub run_scp { 644 + my ($src, $dst) = @_; 645 + my $cp_scp = $scp_to_target; 646 + 647 + $cp_scp =~ s/\$SRC_FILE/$src/g; 648 + $cp_scp =~ s/\$DST_FILE/$dst/g; 649 + 650 + return run_command "$cp_scp"; 651 + } 652 + 653 + sub get_grub_index { 654 + 655 + if ($reboot_type ne "grub") { 656 + return; 657 + } 658 + return if (defined($grub_number)); 659 + 660 + doprint "Find grub menu ... "; 661 + $grub_number = -1; 662 + 663 + my $ssh_grub = $ssh_exec; 664 + $ssh_grub =~ s,\$SSH_COMMAND,cat /boot/grub/menu.lst,g; 665 + 666 + open(IN, "$ssh_grub |") 667 + or die "unable to get menu.lst"; 668 + 669 + while (<IN>) { 670 + if (/^\s*title\s+$grub_menu\s*$/) { 671 + $grub_number++; 672 + last; 673 + } elsif (/^\s*title\s/) { 674 + $grub_number++; 675 + } 676 + } 677 + close(IN); 678 + 679 + die "Could not find '$grub_menu' in /boot/grub/menu on $machine" 680 + if ($grub_number < 0); 681 + doprint "$grub_number\n"; 682 + } 683 + 684 + sub wait_for_input 685 + { 686 + my ($fp, $time) = @_; 687 + my $rin; 688 + my $ready; 689 + my $line; 690 + my $ch; 691 + 692 + if (!defined($time)) { 693 + $time = $timeout; 694 + } 695 + 696 + $rin = ''; 697 + vec($rin, fileno($fp), 1) = 1; 698 + $ready = select($rin, undef, undef, $time); 699 + 700 + $line = ""; 701 + 702 + # try to read one char at a time 703 + while (sysread $fp, $ch, 1) { 704 + $line .= $ch; 705 + last if ($ch eq "\n"); 706 + } 707 + 708 + if (!length($line)) { 709 + return undef; 710 + } 711 + 712 + return $line; 713 + } 714 + 715 + sub reboot_to { 716 + if ($reboot_type eq "grub") { 717 + run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch; reboot)'"; 718 + return; 719 + } 720 + 721 + run_command "$reboot_script"; 722 + } 723 + 724 + sub get_sha1 { 725 + my ($commit) = @_; 726 + 727 + doprint "git rev-list --max-count=1 $commit ... "; 728 + my $sha1 = `git rev-list --max-count=1 $commit`; 729 + my $ret = $?; 730 + 731 + logit $sha1; 732 + 733 + if ($ret) { 734 + doprint "FAILED\n"; 735 + dodie "Failed to get git $commit"; 736 + } 737 + 738 + print "SUCCESS\n"; 739 + 740 + chomp $sha1; 741 + 742 + return $sha1; 743 + } 744 + 745 + sub monitor { 746 + my $booted = 0; 747 + my $bug = 0; 748 + my $skip_call_trace = 0; 749 + my $loops; 750 + 751 + wait_for_monitor 5; 752 + 753 + my $line; 754 + my $full_line = ""; 755 + 756 + open(DMESG, "> $dmesg") or 757 + die "unable to write to $dmesg"; 758 + 759 + reboot_to; 760 + 761 + my $success_start; 762 + my $failure_start; 763 + 764 + for (;;) { 765 + 766 + if ($booted) { 767 + $line = wait_for_input($monitor_fp, $booted_timeout); 768 + } else { 769 + $line = wait_for_input($monitor_fp); 770 + } 771 + 772 + last if (!defined($line)); 773 + 774 + doprint $line; 775 + print DMESG $line; 776 + 777 + # we are not guaranteed to get a full line 778 + $full_line .= $line; 779 + 780 + if ($full_line =~ /$success_line/) { 781 + $booted = 1; 782 + $success_start = time; 783 + } 784 + 785 + if ($booted && defined($stop_after_success) && 786 + $stop_after_success >= 0) { 787 + my $now = time; 788 + if ($now - $success_start >= $stop_after_success) { 789 + doprint "Test forced to stop after $stop_after_success seconds after success\n"; 790 + last; 791 + } 792 + } 793 + 794 + if ($full_line =~ /\[ backtrace testing \]/) { 795 + $skip_call_trace = 1; 796 + } 797 + 798 + if ($full_line =~ /call trace:/i) { 799 + if (!$skip_call_trace) { 800 + $bug = 1; 801 + $failure_start = time; 802 + } 803 + } 804 + 805 + if ($bug && defined($stop_after_failure) && 806 + $stop_after_failure >= 0) { 807 + my $now = time; 808 + if ($now - $failure_start >= $stop_after_failure) { 809 + doprint "Test forced to stop after $stop_after_failure seconds after failure\n"; 810 + last; 811 + } 812 + } 813 + 814 + if ($full_line =~ /\[ end of backtrace testing \]/) { 815 + $skip_call_trace = 0; 816 + } 817 + 818 + if ($full_line =~ /Kernel panic -/) { 819 + $bug = 1; 820 + } 821 + 822 + if ($line =~ /\n/) { 823 + $full_line = ""; 824 + } 825 + } 826 + 827 + close(DMESG); 828 + 829 + if ($bug) { 830 + return 0 if ($in_bisect); 831 + fail "failed - got a bug report" and return 0; 832 + } 833 + 834 + if (!$booted) { 835 + return 0 if ($in_bisect); 836 + fail "failed - never got a boot prompt." and return 0; 837 + } 838 + 839 + return 1; 840 + } 841 + 842 + sub install { 843 + 844 + run_scp "$outputdir/$build_target", "$target_image" or 845 + dodie "failed to copy image"; 846 + 847 + my $install_mods = 0; 848 + 849 + # should we process modules? 850 + $install_mods = 0; 851 + open(IN, "$output_config") or dodie("Can't read config file"); 852 + while (<IN>) { 853 + if (/CONFIG_MODULES(=y)?/) { 854 + $install_mods = 1 if (defined($1)); 855 + last; 856 + } 857 + } 858 + close(IN); 859 + 860 + if (!$install_mods) { 861 + doprint "No modules needed\n"; 862 + return; 863 + } 864 + 865 + run_command "$make INSTALL_MOD_PATH=$tmpdir modules_install" or 866 + dodie "Failed to install modules"; 867 + 868 + my $modlib = "/lib/modules/$version"; 869 + my $modtar = "ktest-mods.tar.bz2"; 870 + 871 + run_ssh "rm -rf $modlib" or 872 + dodie "failed to remove old mods: $modlib"; 873 + 874 + # would be nice if scp -r did not follow symbolic links 875 + run_command "cd $tmpdir && tar -cjf $modtar lib/modules/$version" or 876 + dodie "making tarball"; 877 + 878 + run_scp "$tmpdir/$modtar", "/tmp" or 879 + dodie "failed to copy modules"; 880 + 881 + unlink "$tmpdir/$modtar"; 882 + 883 + run_ssh "'(cd / && tar xf /tmp/$modtar)'" or 884 + dodie "failed to tar modules"; 885 + 886 + run_ssh "rm -f /tmp/$modtar"; 887 + 888 + return if (!defined($post_install)); 889 + 890 + my $cp_post_install = $post_install; 891 + $cp_post_install = s/\$KERNEL_VERSION/$version/g; 892 + run_command "$cp_post_install" or 893 + dodie "Failed to run post install"; 894 + } 895 + 896 + sub check_buildlog { 897 + my ($patch) = @_; 898 + 899 + my @files = `git show $patch | diffstat -l`; 900 + 901 + open(IN, "git show $patch |") or 902 + dodie "failed to show $patch"; 903 + while (<IN>) { 904 + if (m,^--- a/(.*),) { 905 + chomp $1; 906 + $files[$#files] = $1; 907 + } 908 + } 909 + close(IN); 910 + 911 + open(IN, $buildlog) or dodie "Can't open $buildlog"; 912 + while (<IN>) { 913 + if (/^\s*(.*?):.*(warning|error)/) { 914 + my $err = $1; 915 + foreach my $file (@files) { 916 + my $fullpath = "$builddir/$file"; 917 + if ($file eq $err || $fullpath eq $err) { 918 + fail "$file built with warnings" and return 0; 919 + } 920 + } 921 + } 922 + } 923 + close(IN); 924 + 925 + return 1; 926 + } 927 + 928 + sub build { 929 + my ($type) = @_; 930 + my $defconfig = ""; 931 + 932 + unlink $buildlog; 933 + 934 + if ($type =~ /^useconfig:(.*)/) { 935 + run_command "cp $1 $output_config" or 936 + dodie "could not copy $1 to .config"; 937 + 938 + $type = "oldconfig"; 939 + } 940 + 941 + # old config can ask questions 942 + if ($type eq "oldconfig") { 943 + $type = "oldnoconfig"; 944 + 945 + # allow for empty configs 946 + run_command "touch $output_config"; 947 + 948 + run_command "mv $output_config $outputdir/config_temp" or 949 + dodie "moving .config"; 950 + 951 + if (!$noclean && !run_command "$make mrproper") { 952 + dodie "make mrproper"; 953 + } 954 + 955 + run_command "mv $outputdir/config_temp $output_config" or 956 + dodie "moving config_temp"; 957 + 958 + } elsif (!$noclean) { 959 + unlink "$output_config"; 960 + run_command "$make mrproper" or 961 + dodie "make mrproper"; 962 + } 963 + 964 + # add something to distinguish this build 965 + open(OUT, "> $outputdir/localversion") or dodie("Can't make localversion file"); 966 + print OUT "$localversion\n"; 967 + close(OUT); 968 + 969 + if (defined($minconfig)) { 970 + $defconfig = "KCONFIG_ALLCONFIG=$minconfig"; 971 + } 972 + 973 + run_command "$defconfig $make $type" or 974 + dodie "failed make config"; 975 + 976 + $redirect = "$buildlog"; 977 + if (!run_command "$make $build_options") { 978 + undef $redirect; 979 + # bisect may need this to pass 980 + return 0 if ($in_bisect); 981 + fail "failed build" and return 0; 982 + } 983 + undef $redirect; 984 + 985 + return 1; 986 + } 987 + 988 + sub halt { 989 + if (!run_ssh "halt" or defined($power_off)) { 990 + if (defined($poweroff_after_halt)) { 991 + sleep $poweroff_after_halt; 992 + run_command "$power_off"; 993 + } 994 + } else { 995 + # nope? the zap it! 996 + run_command "$power_off"; 997 + } 998 + } 999 + 1000 + sub success { 1001 + my ($i) = @_; 1002 + 1003 + $successes++; 1004 + 1005 + doprint "\n\n*******************************************\n"; 1006 + doprint "*******************************************\n"; 1007 + doprint "KTEST RESULT: TEST $i SUCCESS!!!! **\n"; 1008 + doprint "*******************************************\n"; 1009 + doprint "*******************************************\n"; 1010 + 1011 + if ($i != $opt{"NUM_TESTS"} && !do_not_reboot) { 1012 + doprint "Reboot and wait $sleep_time seconds\n"; 1013 + reboot; 1014 + start_monitor; 1015 + wait_for_monitor $sleep_time; 1016 + end_monitor; 1017 + } 1018 + } 1019 + 1020 + sub get_version { 1021 + # get the release name 1022 + doprint "$make kernelrelease ... "; 1023 + $version = `$make kernelrelease | tail -1`; 1024 + chomp($version); 1025 + doprint "$version\n"; 1026 + } 1027 + 1028 + sub child_run_test { 1029 + my $failed = 0; 1030 + 1031 + # child should have no power 1032 + $reboot_on_error = 0; 1033 + $poweroff_on_error = 0; 1034 + $die_on_failure = 1; 1035 + 1036 + run_command $run_test or $failed = 1; 1037 + exit $failed; 1038 + } 1039 + 1040 + my $child_done; 1041 + 1042 + sub child_finished { 1043 + $child_done = 1; 1044 + } 1045 + 1046 + sub do_run_test { 1047 + my $child_pid; 1048 + my $child_exit; 1049 + my $line; 1050 + my $full_line; 1051 + my $bug = 0; 1052 + 1053 + wait_for_monitor 1; 1054 + 1055 + doprint "run test $run_test\n"; 1056 + 1057 + $child_done = 0; 1058 + 1059 + $SIG{CHLD} = qw(child_finished); 1060 + 1061 + $child_pid = fork; 1062 + 1063 + child_run_test if (!$child_pid); 1064 + 1065 + $full_line = ""; 1066 + 1067 + do { 1068 + $line = wait_for_input($monitor_fp, 1); 1069 + if (defined($line)) { 1070 + 1071 + # we are not guaranteed to get a full line 1072 + $full_line .= $line; 1073 + 1074 + if ($full_line =~ /call trace:/i) { 1075 + $bug = 1; 1076 + } 1077 + 1078 + if ($full_line =~ /Kernel panic -/) { 1079 + $bug = 1; 1080 + } 1081 + 1082 + if ($line =~ /\n/) { 1083 + $full_line = ""; 1084 + } 1085 + } 1086 + } while (!$child_done && !$bug); 1087 + 1088 + if ($bug) { 1089 + doprint "Detected kernel crash!\n"; 1090 + # kill the child with extreme prejudice 1091 + kill 9, $child_pid; 1092 + } 1093 + 1094 + waitpid $child_pid, 0; 1095 + $child_exit = $?; 1096 + 1097 + if ($bug || $child_exit) { 1098 + return 0 if $in_bisect; 1099 + fail "test failed" and return 0; 1100 + } 1101 + return 1; 1102 + } 1103 + 1104 + sub run_git_bisect { 1105 + my ($command) = @_; 1106 + 1107 + doprint "$command ... "; 1108 + 1109 + my $output = `$command 2>&1`; 1110 + my $ret = $?; 1111 + 1112 + logit $output; 1113 + 1114 + if ($ret) { 1115 + doprint "FAILED\n"; 1116 + dodie "Failed to git bisect"; 1117 + } 1118 + 1119 + doprint "SUCCESS\n"; 1120 + if ($output =~ m/^(Bisecting: .*\(roughly \d+ steps?\))\s+\[([[:xdigit:]]+)\]/) { 1121 + doprint "$1 [$2]\n"; 1122 + } elsif ($output =~ m/^([[:xdigit:]]+) is the first bad commit/) { 1123 + $bisect_bad = $1; 1124 + doprint "Found bad commit... $1\n"; 1125 + return 0; 1126 + } else { 1127 + # we already logged it, just print it now. 1128 + print $output; 1129 + } 1130 + 1131 + return 1; 1132 + } 1133 + 1134 + # returns 1 on success, 0 on failure 1135 + sub run_bisect_test { 1136 + my ($type, $buildtype) = @_; 1137 + 1138 + my $failed = 0; 1139 + my $result; 1140 + my $output; 1141 + my $ret; 1142 + 1143 + $in_bisect = 1; 1144 + 1145 + build $buildtype or $failed = 1; 1146 + 1147 + if ($type ne "build") { 1148 + dodie "Failed on build" if $failed; 1149 + 1150 + # Now boot the box 1151 + get_grub_index; 1152 + get_version; 1153 + install; 1154 + 1155 + start_monitor; 1156 + monitor or $failed = 1; 1157 + 1158 + if ($type ne "boot") { 1159 + dodie "Failed on boot" if $failed; 1160 + 1161 + do_run_test or $failed = 1; 1162 + } 1163 + end_monitor; 1164 + } 1165 + 1166 + if ($failed) { 1167 + $result = 0; 1168 + 1169 + # reboot the box to a good kernel 1170 + if ($type ne "build") { 1171 + doprint "Reboot and sleep $bisect_sleep_time seconds\n"; 1172 + reboot; 1173 + start_monitor; 1174 + wait_for_monitor $bisect_sleep_time; 1175 + end_monitor; 1176 + } 1177 + } else { 1178 + $result = 1; 1179 + } 1180 + $in_bisect = 0; 1181 + 1182 + return $result; 1183 + } 1184 + 1185 + sub run_bisect { 1186 + my ($type) = @_; 1187 + my $buildtype = "oldconfig"; 1188 + 1189 + # We should have a minconfig to use? 1190 + if (defined($minconfig)) { 1191 + $buildtype = "useconfig:$minconfig"; 1192 + } 1193 + 1194 + my $ret = run_bisect_test $type, $buildtype; 1195 + 1196 + 1197 + # Are we looking for where it worked, not failed? 1198 + if ($reverse_bisect) { 1199 + $ret = !$ret; 1200 + } 1201 + 1202 + if ($ret) { 1203 + return "good"; 1204 + } else { 1205 + return "bad"; 1206 + } 1207 + } 1208 + 1209 + sub bisect { 1210 + my ($i) = @_; 1211 + 1212 + my $result; 1213 + 1214 + die "BISECT_GOOD[$i] not defined\n" if (!defined($opt{"BISECT_GOOD[$i]"})); 1215 + die "BISECT_BAD[$i] not defined\n" if (!defined($opt{"BISECT_BAD[$i]"})); 1216 + die "BISECT_TYPE[$i] not defined\n" if (!defined($opt{"BISECT_TYPE[$i]"})); 1217 + 1218 + my $good = $opt{"BISECT_GOOD[$i]"}; 1219 + my $bad = $opt{"BISECT_BAD[$i]"}; 1220 + my $type = $opt{"BISECT_TYPE[$i]"}; 1221 + my $start = $opt{"BISECT_START[$i]"}; 1222 + my $replay = $opt{"BISECT_REPLAY[$i]"}; 1223 + 1224 + # convert to true sha1's 1225 + $good = get_sha1($good); 1226 + $bad = get_sha1($bad); 1227 + 1228 + if (defined($opt{"BISECT_REVERSE[$i]"}) && 1229 + $opt{"BISECT_REVERSE[$i]"} == 1) { 1230 + doprint "Performing a reverse bisect (bad is good, good is bad!)\n"; 1231 + $reverse_bisect = 1; 1232 + } else { 1233 + $reverse_bisect = 0; 1234 + } 1235 + 1236 + # Can't have a test without having a test to run 1237 + if ($type eq "test" && !defined($run_test)) { 1238 + $type = "boot"; 1239 + } 1240 + 1241 + my $check = $opt{"BISECT_CHECK[$i]"}; 1242 + if (defined($check) && $check ne "0") { 1243 + 1244 + # get current HEAD 1245 + my $head = get_sha1("HEAD"); 1246 + 1247 + if ($check ne "good") { 1248 + doprint "TESTING BISECT BAD [$bad]\n"; 1249 + run_command "git checkout $bad" or 1250 + die "Failed to checkout $bad"; 1251 + 1252 + $result = run_bisect $type; 1253 + 1254 + if ($result ne "bad") { 1255 + fail "Tested BISECT_BAD [$bad] and it succeeded" and return 0; 1256 + } 1257 + } 1258 + 1259 + if ($check ne "bad") { 1260 + doprint "TESTING BISECT GOOD [$good]\n"; 1261 + run_command "git checkout $good" or 1262 + die "Failed to checkout $good"; 1263 + 1264 + $result = run_bisect $type; 1265 + 1266 + if ($result ne "good") { 1267 + fail "Tested BISECT_GOOD [$good] and it failed" and return 0; 1268 + } 1269 + } 1270 + 1271 + # checkout where we started 1272 + run_command "git checkout $head" or 1273 + die "Failed to checkout $head"; 1274 + } 1275 + 1276 + run_command "git bisect start" or 1277 + dodie "could not start bisect"; 1278 + 1279 + run_command "git bisect good $good" or 1280 + dodie "could not set bisect good to $good"; 1281 + 1282 + run_git_bisect "git bisect bad $bad" or 1283 + dodie "could not set bisect bad to $bad"; 1284 + 1285 + if (defined($replay)) { 1286 + run_command "git bisect replay $replay" or 1287 + dodie "failed to run replay"; 1288 + } 1289 + 1290 + if (defined($start)) { 1291 + run_command "git checkout $start" or 1292 + dodie "failed to checkout $start"; 1293 + } 1294 + 1295 + my $test; 1296 + do { 1297 + $result = run_bisect $type; 1298 + $test = run_git_bisect "git bisect $result"; 1299 + } while ($test); 1300 + 1301 + run_command "git bisect log" or 1302 + dodie "could not capture git bisect log"; 1303 + 1304 + run_command "git bisect reset" or 1305 + dodie "could not reset git bisect"; 1306 + 1307 + doprint "Bad commit was [$bisect_bad]\n"; 1308 + 1309 + success $i; 1310 + } 1311 + 1312 + my %config_ignore; 1313 + my %config_set; 1314 + 1315 + my %config_list; 1316 + my %null_config; 1317 + 1318 + my %dependency; 1319 + 1320 + sub process_config_ignore { 1321 + my ($config) = @_; 1322 + 1323 + open (IN, $config) 1324 + or dodie "Failed to read $config"; 1325 + 1326 + while (<IN>) { 1327 + if (/^(.*?(CONFIG\S*)(=.*| is not set))/) { 1328 + $config_ignore{$2} = $1; 1329 + } 1330 + } 1331 + 1332 + close(IN); 1333 + } 1334 + 1335 + sub read_current_config { 1336 + my ($config_ref) = @_; 1337 + 1338 + %{$config_ref} = (); 1339 + undef %{$config_ref}; 1340 + 1341 + my @key = keys %{$config_ref}; 1342 + if ($#key >= 0) { 1343 + print "did not delete!\n"; 1344 + exit; 1345 + } 1346 + open (IN, "$output_config"); 1347 + 1348 + while (<IN>) { 1349 + if (/^(CONFIG\S+)=(.*)/) { 1350 + ${$config_ref}{$1} = $2; 1351 + } 1352 + } 1353 + close(IN); 1354 + } 1355 + 1356 + sub get_dependencies { 1357 + my ($config) = @_; 1358 + 1359 + my $arr = $dependency{$config}; 1360 + if (!defined($arr)) { 1361 + return (); 1362 + } 1363 + 1364 + my @deps = @{$arr}; 1365 + 1366 + foreach my $dep (@{$arr}) { 1367 + print "ADD DEP $dep\n"; 1368 + @deps = (@deps, get_dependencies $dep); 1369 + } 1370 + 1371 + return @deps; 1372 + } 1373 + 1374 + sub create_config { 1375 + my @configs = @_; 1376 + 1377 + open(OUT, ">$output_config") or dodie "Can not write to $output_config"; 1378 + 1379 + foreach my $config (@configs) { 1380 + print OUT "$config_set{$config}\n"; 1381 + my @deps = get_dependencies $config; 1382 + foreach my $dep (@deps) { 1383 + print OUT "$config_set{$dep}\n"; 1384 + } 1385 + } 1386 + 1387 + foreach my $config (keys %config_ignore) { 1388 + print OUT "$config_ignore{$config}\n"; 1389 + } 1390 + close(OUT); 1391 + 1392 + # exit; 1393 + run_command "$make oldnoconfig" or 1394 + dodie "failed make config oldconfig"; 1395 + 1396 + } 1397 + 1398 + sub compare_configs { 1399 + my (%a, %b) = @_; 1400 + 1401 + foreach my $item (keys %a) { 1402 + if (!defined($b{$item})) { 1403 + print "diff $item\n"; 1404 + return 1; 1405 + } 1406 + delete $b{$item}; 1407 + } 1408 + 1409 + my @keys = keys %b; 1410 + if ($#keys) { 1411 + print "diff2 $keys[0]\n"; 1412 + } 1413 + return -1 if ($#keys >= 0); 1414 + 1415 + return 0; 1416 + } 1417 + 1418 + sub run_config_bisect_test { 1419 + my ($type) = @_; 1420 + 1421 + return run_bisect_test $type, "oldconfig"; 1422 + } 1423 + 1424 + sub process_passed { 1425 + my (%configs) = @_; 1426 + 1427 + doprint "These configs had no failure: (Enabling them for further compiles)\n"; 1428 + # Passed! All these configs are part of a good compile. 1429 + # Add them to the min options. 1430 + foreach my $config (keys %configs) { 1431 + if (defined($config_list{$config})) { 1432 + doprint " removing $config\n"; 1433 + $config_ignore{$config} = $config_list{$config}; 1434 + delete $config_list{$config}; 1435 + } 1436 + } 1437 + doprint "config copied to $outputdir/config_good\n"; 1438 + run_command "cp -f $output_config $outputdir/config_good"; 1439 + } 1440 + 1441 + sub process_failed { 1442 + my ($config) = @_; 1443 + 1444 + doprint "\n\n***************************************\n"; 1445 + doprint "Found bad config: $config\n"; 1446 + doprint "***************************************\n\n"; 1447 + } 1448 + 1449 + sub run_config_bisect { 1450 + 1451 + my @start_list = keys %config_list; 1452 + 1453 + if ($#start_list < 0) { 1454 + doprint "No more configs to test!!!\n"; 1455 + return -1; 1456 + } 1457 + 1458 + doprint "***** RUN TEST ***\n"; 1459 + my $type = $opt{"CONFIG_BISECT_TYPE[$iteration]"}; 1460 + my $ret; 1461 + my %current_config; 1462 + 1463 + my $count = $#start_list + 1; 1464 + doprint " $count configs to test\n"; 1465 + 1466 + my $half = int($#start_list / 2); 1467 + 1468 + do { 1469 + my @tophalf = @start_list[0 .. $half]; 1470 + 1471 + create_config @tophalf; 1472 + read_current_config \%current_config; 1473 + 1474 + $count = $#tophalf + 1; 1475 + doprint "Testing $count configs\n"; 1476 + my $found = 0; 1477 + # make sure we test something 1478 + foreach my $config (@tophalf) { 1479 + if (defined($current_config{$config})) { 1480 + logit " $config\n"; 1481 + $found = 1; 1482 + } 1483 + } 1484 + if (!$found) { 1485 + # try the other half 1486 + doprint "Top half produced no set configs, trying bottom half\n"; 1487 + @tophalf = @start_list[$half .. $#start_list]; 1488 + create_config @tophalf; 1489 + read_current_config \%current_config; 1490 + foreach my $config (@tophalf) { 1491 + if (defined($current_config{$config})) { 1492 + logit " $config\n"; 1493 + $found = 1; 1494 + } 1495 + } 1496 + if (!$found) { 1497 + doprint "Failed: Can't make new config with current configs\n"; 1498 + foreach my $config (@start_list) { 1499 + doprint " CONFIG: $config\n"; 1500 + } 1501 + return -1; 1502 + } 1503 + $count = $#tophalf + 1; 1504 + doprint "Testing $count configs\n"; 1505 + } 1506 + 1507 + $ret = run_config_bisect_test $type; 1508 + 1509 + if ($ret) { 1510 + process_passed %current_config; 1511 + return 0; 1512 + } 1513 + 1514 + doprint "This config had a failure.\n"; 1515 + doprint "Removing these configs that were not set in this config:\n"; 1516 + doprint "config copied to $outputdir/config_bad\n"; 1517 + run_command "cp -f $output_config $outputdir/config_bad"; 1518 + 1519 + # A config exists in this group that was bad. 1520 + foreach my $config (keys %config_list) { 1521 + if (!defined($current_config{$config})) { 1522 + doprint " removing $config\n"; 1523 + delete $config_list{$config}; 1524 + } 1525 + } 1526 + 1527 + @start_list = @tophalf; 1528 + 1529 + if ($#start_list == 0) { 1530 + process_failed $start_list[0]; 1531 + return 1; 1532 + } 1533 + 1534 + # remove half the configs we are looking at and see if 1535 + # they are good. 1536 + $half = int($#start_list / 2); 1537 + } while ($half > 0); 1538 + 1539 + # we found a single config, try it again 1540 + my @tophalf = @start_list[0 .. 0]; 1541 + 1542 + $ret = run_config_bisect_test $type; 1543 + if ($ret) { 1544 + process_passed %current_config; 1545 + return 0; 1546 + } 1547 + 1548 + process_failed $start_list[0]; 1549 + return 1; 1550 + } 1551 + 1552 + sub config_bisect { 1553 + my ($i) = @_; 1554 + 1555 + my $start_config = $opt{"CONFIG_BISECT[$i]"}; 1556 + 1557 + my $tmpconfig = "$tmpdir/use_config"; 1558 + 1559 + # Make the file with the bad config and the min config 1560 + if (defined($minconfig)) { 1561 + # read the min config for things to ignore 1562 + run_command "cp $minconfig $tmpconfig" or 1563 + dodie "failed to copy $minconfig to $tmpconfig"; 1564 + } else { 1565 + unlink $tmpconfig; 1566 + } 1567 + 1568 + # Add other configs 1569 + if (defined($addconfig)) { 1570 + run_command "cat $addconfig >> $tmpconfig" or 1571 + dodie "failed to append $addconfig"; 1572 + } 1573 + 1574 + my $defconfig = ""; 1575 + if (-f $tmpconfig) { 1576 + $defconfig = "KCONFIG_ALLCONFIG=$tmpconfig"; 1577 + process_config_ignore $tmpconfig; 1578 + } 1579 + 1580 + # now process the start config 1581 + run_command "cp $start_config $output_config" or 1582 + dodie "failed to copy $start_config to $output_config"; 1583 + 1584 + # read directly what we want to check 1585 + my %config_check; 1586 + open (IN, $output_config) 1587 + or dodie "faied to open $output_config"; 1588 + 1589 + while (<IN>) { 1590 + if (/^((CONFIG\S*)=.*)/) { 1591 + $config_check{$2} = $1; 1592 + } 1593 + } 1594 + close(IN); 1595 + 1596 + # Now run oldconfig with the minconfig (and addconfigs) 1597 + run_command "$defconfig $make oldnoconfig" or 1598 + dodie "failed make config oldconfig"; 1599 + 1600 + # check to see what we lost (or gained) 1601 + open (IN, $output_config) 1602 + or dodie "Failed to read $start_config"; 1603 + 1604 + my %removed_configs; 1605 + my %added_configs; 1606 + 1607 + while (<IN>) { 1608 + if (/^((CONFIG\S*)=.*)/) { 1609 + # save off all options 1610 + $config_set{$2} = $1; 1611 + if (defined($config_check{$2})) { 1612 + if (defined($config_ignore{$2})) { 1613 + $removed_configs{$2} = $1; 1614 + } else { 1615 + $config_list{$2} = $1; 1616 + } 1617 + } elsif (!defined($config_ignore{$2})) { 1618 + $added_configs{$2} = $1; 1619 + $config_list{$2} = $1; 1620 + } 1621 + } 1622 + } 1623 + close(IN); 1624 + 1625 + my @confs = keys %removed_configs; 1626 + if ($#confs >= 0) { 1627 + doprint "Configs overridden by default configs and removed from check:\n"; 1628 + foreach my $config (@confs) { 1629 + doprint " $config\n"; 1630 + } 1631 + } 1632 + @confs = keys %added_configs; 1633 + if ($#confs >= 0) { 1634 + doprint "Configs appearing in make oldconfig and added:\n"; 1635 + foreach my $config (@confs) { 1636 + doprint " $config\n"; 1637 + } 1638 + } 1639 + 1640 + my %config_test; 1641 + my $once = 0; 1642 + 1643 + # Sometimes kconfig does weird things. We must make sure 1644 + # that the config we autocreate has everything we need 1645 + # to test, otherwise we may miss testing configs, or 1646 + # may not be able to create a new config. 1647 + # Here we create a config with everything set. 1648 + create_config (keys %config_list); 1649 + read_current_config \%config_test; 1650 + foreach my $config (keys %config_list) { 1651 + if (!defined($config_test{$config})) { 1652 + if (!$once) { 1653 + $once = 1; 1654 + doprint "Configs not produced by kconfig (will not be checked):\n"; 1655 + } 1656 + doprint " $config\n"; 1657 + delete $config_list{$config}; 1658 + } 1659 + } 1660 + my $ret; 1661 + do { 1662 + $ret = run_config_bisect; 1663 + } while (!$ret); 1664 + 1665 + return $ret if ($ret < 0); 1666 + 1667 + success $i; 1668 + } 1669 + 1670 + sub patchcheck { 1671 + my ($i) = @_; 1672 + 1673 + die "PATCHCHECK_START[$i] not defined\n" 1674 + if (!defined($opt{"PATCHCHECK_START[$i]"})); 1675 + die "PATCHCHECK_TYPE[$i] not defined\n" 1676 + if (!defined($opt{"PATCHCHECK_TYPE[$i]"})); 1677 + 1678 + my $start = $opt{"PATCHCHECK_START[$i]"}; 1679 + 1680 + my $end = "HEAD"; 1681 + if (defined($opt{"PATCHCHECK_END[$i]"})) { 1682 + $end = $opt{"PATCHCHECK_END[$i]"}; 1683 + } 1684 + 1685 + # Get the true sha1's since we can use things like HEAD~3 1686 + $start = get_sha1($start); 1687 + $end = get_sha1($end); 1688 + 1689 + my $type = $opt{"PATCHCHECK_TYPE[$i]"}; 1690 + 1691 + # Can't have a test without having a test to run 1692 + if ($type eq "test" && !defined($run_test)) { 1693 + $type = "boot"; 1694 + } 1695 + 1696 + open (IN, "git log --pretty=oneline $end|") or 1697 + dodie "could not get git list"; 1698 + 1699 + my @list; 1700 + 1701 + while (<IN>) { 1702 + chomp; 1703 + $list[$#list+1] = $_; 1704 + last if (/^$start/); 1705 + } 1706 + close(IN); 1707 + 1708 + if ($list[$#list] !~ /^$start/) { 1709 + fail "SHA1 $start not found"; 1710 + } 1711 + 1712 + # go backwards in the list 1713 + @list = reverse @list; 1714 + 1715 + my $save_clean = $noclean; 1716 + 1717 + $in_patchcheck = 1; 1718 + foreach my $item (@list) { 1719 + my $sha1 = $item; 1720 + $sha1 =~ s/^([[:xdigit:]]+).*/$1/; 1721 + 1722 + doprint "\nProcessing commit $item\n\n"; 1723 + 1724 + run_command "git checkout $sha1" or 1725 + die "Failed to checkout $sha1"; 1726 + 1727 + # only clean on the first and last patch 1728 + if ($item eq $list[0] || 1729 + $item eq $list[$#list]) { 1730 + $noclean = $save_clean; 1731 + } else { 1732 + $noclean = 1; 1733 + } 1734 + 1735 + if (defined($minconfig)) { 1736 + build "useconfig:$minconfig" or return 0; 1737 + } else { 1738 + # ?? no config to use? 1739 + build "oldconfig" or return 0; 1740 + } 1741 + 1742 + check_buildlog $sha1 or return 0; 1743 + 1744 + next if ($type eq "build"); 1745 + 1746 + get_grub_index; 1747 + get_version; 1748 + install; 1749 + 1750 + my $failed = 0; 1751 + 1752 + start_monitor; 1753 + monitor or $failed = 1; 1754 + 1755 + if (!$failed && $type ne "boot"){ 1756 + do_run_test or $failed = 1; 1757 + } 1758 + end_monitor; 1759 + return 0 if ($failed); 1760 + 1761 + } 1762 + $in_patchcheck = 0; 1763 + success $i; 1764 + 1765 + return 1; 1766 + } 1767 + 1768 + $#ARGV < 1 or die "ktest.pl version: $VERSION\n usage: ktest.pl config-file\n"; 1769 + 1770 + if ($#ARGV == 0) { 1771 + $ktest_config = $ARGV[0]; 1772 + if (! -f $ktest_config) { 1773 + print "$ktest_config does not exist.\n"; 1774 + my $ans; 1775 + for (;;) { 1776 + print "Create it? [Y/n] "; 1777 + $ans = <STDIN>; 1778 + chomp $ans; 1779 + if ($ans =~ /^\s*$/) { 1780 + $ans = "y"; 1781 + } 1782 + last if ($ans =~ /^y$/i || $ans =~ /^n$/i); 1783 + print "Please answer either 'y' or 'n'.\n"; 1784 + } 1785 + if ($ans !~ /^y$/i) { 1786 + exit 0; 1787 + } 1788 + } 1789 + } else { 1790 + $ktest_config = "ktest.conf"; 1791 + } 1792 + 1793 + if (! -f $ktest_config) { 1794 + open(OUT, ">$ktest_config") or die "Can not create $ktest_config"; 1795 + print OUT << "EOF" 1796 + # Generated by ktest.pl 1797 + # 1798 + # Define each test with TEST_START 1799 + # The config options below it will override the defaults 1800 + TEST_START 1801 + 1802 + DEFAULTS 1803 + EOF 1804 + ; 1805 + close(OUT); 1806 + } 1807 + read_config $ktest_config; 1808 + 1809 + # Append any configs entered in manually to the config file. 1810 + my @new_configs = keys %entered_configs; 1811 + if ($#new_configs >= 0) { 1812 + print "\nAppending entered in configs to $ktest_config\n"; 1813 + open(OUT, ">>$ktest_config") or die "Can not append to $ktest_config"; 1814 + foreach my $config (@new_configs) { 1815 + print OUT "$config = $entered_configs{$config}\n"; 1816 + $opt{$config} = $entered_configs{$config}; 1817 + } 1818 + } 1819 + 1820 + if ($opt{"CLEAR_LOG"} && defined($opt{"LOG_FILE"})) { 1821 + unlink $opt{"LOG_FILE"}; 1822 + } 1823 + 1824 + doprint "\n\nSTARTING AUTOMATED TESTS\n\n"; 1825 + 1826 + for (my $i = 0, my $repeat = 1; $i <= $opt{"NUM_TESTS"}; $i += $repeat) { 1827 + 1828 + if (!$i) { 1829 + doprint "DEFAULT OPTIONS:\n"; 1830 + } else { 1831 + doprint "\nTEST $i OPTIONS"; 1832 + if (defined($repeat_tests{$i})) { 1833 + $repeat = $repeat_tests{$i}; 1834 + doprint " ITERATE $repeat"; 1835 + } 1836 + doprint "\n"; 1837 + } 1838 + 1839 + foreach my $option (sort keys %opt) { 1840 + 1841 + if ($option =~ /\[(\d+)\]$/) { 1842 + next if ($i != $1); 1843 + } else { 1844 + next if ($i); 1845 + } 1846 + 1847 + doprint "$option = $opt{$option}\n"; 1848 + } 1849 + } 1850 + 1851 + sub set_test_option { 1852 + my ($name, $i) = @_; 1853 + 1854 + my $option = "$name\[$i\]"; 1855 + 1856 + if (defined($opt{$option})) { 1857 + return $opt{$option}; 1858 + } 1859 + 1860 + foreach my $test (keys %repeat_tests) { 1861 + if ($i >= $test && 1862 + $i < $test + $repeat_tests{$test}) { 1863 + $option = "$name\[$test\]"; 1864 + if (defined($opt{$option})) { 1865 + return $opt{$option}; 1866 + } 1867 + } 1868 + } 1869 + 1870 + if (defined($opt{$name})) { 1871 + return $opt{$name}; 1872 + } 1873 + 1874 + return undef; 1875 + } 1876 + 1877 + # First we need to do is the builds 1878 + for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { 1879 + 1880 + $iteration = $i; 1881 + 1882 + my $makecmd = set_test_option("MAKE_CMD", $i); 1883 + 1884 + $machine = set_test_option("MACHINE", $i); 1885 + $ssh_user = set_test_option("SSH_USER", $i); 1886 + $tmpdir = set_test_option("TMP_DIR", $i); 1887 + $outputdir = set_test_option("OUTPUT_DIR", $i); 1888 + $builddir = set_test_option("BUILD_DIR", $i); 1889 + $test_type = set_test_option("TEST_TYPE", $i); 1890 + $build_type = set_test_option("BUILD_TYPE", $i); 1891 + $build_options = set_test_option("BUILD_OPTIONS", $i); 1892 + $power_cycle = set_test_option("POWER_CYCLE", $i); 1893 + $reboot = set_test_option("REBOOT", $i); 1894 + $noclean = set_test_option("BUILD_NOCLEAN", $i); 1895 + $minconfig = set_test_option("MIN_CONFIG", $i); 1896 + $run_test = set_test_option("TEST", $i); 1897 + $addconfig = set_test_option("ADD_CONFIG", $i); 1898 + $reboot_type = set_test_option("REBOOT_TYPE", $i); 1899 + $grub_menu = set_test_option("GRUB_MENU", $i); 1900 + $post_install = set_test_option("POST_INSTALL", $i); 1901 + $reboot_script = set_test_option("REBOOT_SCRIPT", $i); 1902 + $reboot_on_error = set_test_option("REBOOT_ON_ERROR", $i); 1903 + $poweroff_on_error = set_test_option("POWEROFF_ON_ERROR", $i); 1904 + $die_on_failure = set_test_option("DIE_ON_FAILURE", $i); 1905 + $power_off = set_test_option("POWER_OFF", $i); 1906 + $powercycle_after_reboot = set_test_option("POWERCYCLE_AFTER_REBOOT", $i); 1907 + $poweroff_after_halt = set_test_option("POWEROFF_AFTER_HALT", $i); 1908 + $sleep_time = set_test_option("SLEEP_TIME", $i); 1909 + $bisect_sleep_time = set_test_option("BISECT_SLEEP_TIME", $i); 1910 + $store_failures = set_test_option("STORE_FAILURES", $i); 1911 + $timeout = set_test_option("TIMEOUT", $i); 1912 + $booted_timeout = set_test_option("BOOTED_TIMEOUT", $i); 1913 + $console = set_test_option("CONSOLE", $i); 1914 + $success_line = set_test_option("SUCCESS_LINE", $i); 1915 + $stop_after_success = set_test_option("STOP_AFTER_SUCCESS", $i); 1916 + $stop_after_failure = set_test_option("STOP_AFTER_FAILURE", $i); 1917 + $build_target = set_test_option("BUILD_TARGET", $i); 1918 + $ssh_exec = set_test_option("SSH_EXEC", $i); 1919 + $scp_to_target = set_test_option("SCP_TO_TARGET", $i); 1920 + $target_image = set_test_option("TARGET_IMAGE", $i); 1921 + $localversion = set_test_option("LOCALVERSION", $i); 1922 + 1923 + chdir $builddir || die "can't change directory to $builddir"; 1924 + 1925 + if (!-d $tmpdir) { 1926 + mkpath($tmpdir) or 1927 + die "can't create $tmpdir"; 1928 + } 1929 + 1930 + $ENV{"SSH_USER"} = $ssh_user; 1931 + $ENV{"MACHINE"} = $machine; 1932 + 1933 + $target = "$ssh_user\@$machine"; 1934 + 1935 + $buildlog = "$tmpdir/buildlog-$machine"; 1936 + $dmesg = "$tmpdir/dmesg-$machine"; 1937 + $make = "$makecmd O=$outputdir"; 1938 + $output_config = "$outputdir/.config"; 1939 + 1940 + if ($reboot_type eq "grub") { 1941 + dodie "GRUB_MENU not defined" if (!defined($grub_menu)); 1942 + } elsif (!defined($reboot_script)) { 1943 + dodie "REBOOT_SCRIPT not defined" 1944 + } 1945 + 1946 + my $run_type = $build_type; 1947 + if ($test_type eq "patchcheck") { 1948 + $run_type = $opt{"PATCHCHECK_TYPE[$i]"}; 1949 + } elsif ($test_type eq "bisect") { 1950 + $run_type = $opt{"BISECT_TYPE[$i]"}; 1951 + } elsif ($test_type eq "config_bisect") { 1952 + $run_type = $opt{"CONFIG_BISECT_TYPE[$i]"}; 1953 + } 1954 + 1955 + # mistake in config file? 1956 + if (!defined($run_type)) { 1957 + $run_type = "ERROR"; 1958 + } 1959 + 1960 + doprint "\n\n"; 1961 + doprint "RUNNING TEST $i of $opt{NUM_TESTS} with option $test_type $run_type\n\n"; 1962 + 1963 + unlink $dmesg; 1964 + unlink $buildlog; 1965 + 1966 + if (!defined($minconfig)) { 1967 + $minconfig = $addconfig; 1968 + 1969 + } elsif (defined($addconfig)) { 1970 + run_command "cat $addconfig $minconfig > $tmpdir/add_config" or 1971 + dodie "Failed to create temp config"; 1972 + $minconfig = "$tmpdir/add_config"; 1973 + } 1974 + 1975 + my $checkout = $opt{"CHECKOUT[$i]"}; 1976 + if (defined($checkout)) { 1977 + run_command "git checkout $checkout" or 1978 + die "failed to checkout $checkout"; 1979 + } 1980 + 1981 + if ($test_type eq "bisect") { 1982 + bisect $i; 1983 + next; 1984 + } elsif ($test_type eq "config_bisect") { 1985 + config_bisect $i; 1986 + next; 1987 + } elsif ($test_type eq "patchcheck") { 1988 + patchcheck $i; 1989 + next; 1990 + } 1991 + 1992 + if ($build_type ne "nobuild") { 1993 + build $build_type or next; 1994 + } 1995 + 1996 + if ($test_type ne "build") { 1997 + get_grub_index; 1998 + get_version; 1999 + install; 2000 + 2001 + my $failed = 0; 2002 + start_monitor; 2003 + monitor or $failed = 1;; 2004 + 2005 + if (!$failed && $test_type ne "boot" && defined($run_test)) { 2006 + do_run_test or $failed = 1; 2007 + } 2008 + end_monitor; 2009 + next if ($failed); 2010 + } 2011 + 2012 + success $i; 2013 + } 2014 + 2015 + if ($opt{"POWEROFF_ON_SUCCESS"}) { 2016 + halt; 2017 + } elsif ($opt{"REBOOT_ON_SUCCESS"} && !do_not_reboot) { 2018 + reboot; 2019 + } 2020 + 2021 + doprint "\n $successes of $opt{NUM_TESTS} tests were successful\n\n"; 2022 + 2023 + exit 0;
+622
tools/testing/ktest/sample.conf
··· 1 + # 2 + # Config file for ktest.pl 3 + # 4 + # Note, all paths must be absolute 5 + # 6 + 7 + # Options set in the beginning of the file are considered to be 8 + # default options. These options can be overriden by test specific 9 + # options, with the following exceptions: 10 + # 11 + # LOG_FILE 12 + # CLEAR_LOG 13 + # POWEROFF_ON_SUCCESS 14 + # REBOOT_ON_SUCCESS 15 + # 16 + # Test specific options are set after the label: 17 + # 18 + # TEST_START 19 + # 20 + # The options after a TEST_START label are specific to that test. 21 + # Each TEST_START label will set up a new test. If you want to 22 + # perform a test more than once, you can add the ITERATE label 23 + # to it followed by the number of times you want that test 24 + # to iterate. If the ITERATE is left off, the test will only 25 + # be performed once. 26 + # 27 + # TEST_START ITERATE 10 28 + # 29 + # You can skip a test by adding SKIP (before or after the ITERATE 30 + # and number) 31 + # 32 + # TEST_START SKIP 33 + # 34 + # TEST_START SKIP ITERATE 10 35 + # 36 + # TEST_START ITERATE 10 SKIP 37 + # 38 + # The SKIP label causes the options and the test itself to be ignored. 39 + # This is useful to set up several different tests in one config file, and 40 + # only enabling the ones you want to use for a current test run. 41 + # 42 + # You can add default options anywhere in the file as well 43 + # with the DEFAULTS tag. This allows you to have default options 44 + # after the test options to keep the test options at the top 45 + # of the file. You can even place the DEFAULTS tag between 46 + # test cases (but not in the middle of a single test case) 47 + # 48 + # TEST_START 49 + # MIN_CONFIG = /home/test/config-test1 50 + # 51 + # DEFAULTS 52 + # MIN_CONFIG = /home/test/config-default 53 + # 54 + # TEST_START ITERATE 10 55 + # 56 + # The above will run the first test with MIN_CONFIG set to 57 + # /home/test/config-test-1. Then 10 tests will be executed 58 + # with MIN_CONFIG with /home/test/config-default. 59 + # 60 + # You can also disable defaults with the SKIP option 61 + # 62 + # DEFAULTS SKIP 63 + # MIN_CONFIG = /home/test/config-use-sometimes 64 + # 65 + # DEFAULTS 66 + # MIN_CONFIG = /home/test/config-most-times 67 + # 68 + # The above will ignore the first MIN_CONFIG. If you want to 69 + # use the first MIN_CONFIG, remove the SKIP from the first 70 + # DEFAULTS tag and add it to the second. Be careful, options 71 + # may only be declared once per test or default. If you have 72 + # the same option name under the same test or as default 73 + # ktest will fail to execute, and no tests will run. 74 + # 75 + 76 + 77 + #### Mandatory Default Options #### 78 + 79 + # These options must be in the default section, although most 80 + # may be overridden by test options. 81 + 82 + # The machine hostname that you will test 83 + #MACHINE = target 84 + 85 + # The box is expected to have ssh on normal bootup, provide the user 86 + # (most likely root, since you need privileged operations) 87 + #SSH_USER = root 88 + 89 + # The directory that contains the Linux source code 90 + #BUILD_DIR = /home/test/linux.git 91 + 92 + # The directory that the objects will be built 93 + # (can not be same as BUILD_DIR) 94 + #OUTPUT_DIR = /home/test/build/target 95 + 96 + # The location of the compiled file to copy to the target 97 + # (relative to OUTPUT_DIR) 98 + #BUILD_TARGET = arch/x86/boot/bzImage 99 + 100 + # The place to put your image on the test machine 101 + #TARGET_IMAGE = /boot/vmlinuz-test 102 + 103 + # A script or command to reboot the box 104 + # 105 + # Here is a digital loggers power switch example 106 + #POWER_CYCLE = wget --no-proxy -O /dev/null -q --auth-no-challenge 'http://admin:admin@power/outlet?5=CCL' 107 + # 108 + # Here is an example to reboot a virtual box on the current host 109 + # with the name "Guest". 110 + #POWER_CYCLE = virsh destroy Guest; sleep 5; virsh start Guest 111 + 112 + # The script or command that reads the console 113 + # 114 + # If you use ttywatch server, something like the following would work. 115 + #CONSOLE = nc -d localhost 3001 116 + # 117 + # For a virtual machine with guest name "Guest". 118 + #CONSOLE = virsh console Guest 119 + 120 + # Required version ending to differentiate the test 121 + # from other linux builds on the system. 122 + #LOCALVERSION = -test 123 + 124 + # The grub title name for the test kernel to boot 125 + # (Only mandatory if REBOOT_TYPE = grub) 126 + # 127 + # Note, ktest.pl will not update the grub menu.lst, you need to 128 + # manually add an option for the test. ktest.pl will search 129 + # the grub menu.lst for this option to find what kernel to 130 + # reboot into. 131 + # 132 + # For example, if in the /boot/grub/menu.lst the test kernel title has: 133 + # title Test Kernel 134 + # kernel vmlinuz-test 135 + #GRUB_MENU = Test Kernel 136 + 137 + # A script to reboot the target into the test kernel 138 + # (Only mandatory if REBOOT_TYPE = script) 139 + #REBOOT_SCRIPT = 140 + 141 + #### Optional Config Options (all have defaults) #### 142 + 143 + # Start a test setup. If you leave this off, all options 144 + # will be default and the test will run once. 145 + # This is a label and not really an option (it takes no value). 146 + # You can append ITERATE and a number after it to iterate the 147 + # test a number of times, or SKIP to ignore this test. 148 + # 149 + #TEST_START 150 + #TEST_START ITERATE 5 151 + #TEST_START SKIP 152 + 153 + # Have the following options as default again. Used after tests 154 + # have already been defined by TEST_START. Optionally, you can 155 + # just define all default options before the first TEST_START 156 + # and you do not need this option. 157 + # 158 + # This is a label and not really an option (it takes no value). 159 + # You can append SKIP to this label and the options within this 160 + # section will be ignored. 161 + # 162 + # DEFAULTS 163 + # DEFAULTS SKIP 164 + 165 + # The default test type (default test) 166 + # The test types may be: 167 + # build - only build the kernel, do nothing else 168 + # boot - build and boot the kernel 169 + # test - build, boot and if TEST is set, run the test script 170 + # (If TEST is not set, it defaults back to boot) 171 + # bisect - Perform a bisect on the kernel (see BISECT_TYPE below) 172 + # patchcheck - Do a test on a series of commits in git (see PATCHCHECK below) 173 + #TEST_TYPE = test 174 + 175 + # Test to run if there is a successful boot and TEST_TYPE is test. 176 + # Must exit with 0 on success and non zero on error 177 + # default (undefined) 178 + #TEST = ssh user@machine /root/run_test 179 + 180 + # The build type is any make config type or special command 181 + # (default randconfig) 182 + # nobuild - skip the clean and build step 183 + # useconfig:/path/to/config - use the given config and run 184 + # oldconfig on it. 185 + # This option is ignored if TEST_TYPE is patchcheck or bisect 186 + #BUILD_TYPE = randconfig 187 + 188 + # The make command (default make) 189 + # If you are building a 32bit x86 on a 64 bit host 190 + #MAKE_CMD = CC=i386-gcc AS=i386-as make ARCH=i386 191 + 192 + # Any build options for the make of the kernel (not for other makes, like configs) 193 + # (default "") 194 + #BUILD_OPTIONS = -j20 195 + 196 + # If you need an initrd, you can add a script or code here to install 197 + # it. The environment variable KERNEL_VERSION will be set to the 198 + # kernel version that is used. Remember to add the initrd line 199 + # to your grub menu.lst file. 200 + # 201 + # Here's a couple of examples to use: 202 + #POST_INSTALL = ssh user@target /sbin/mkinitrd --allow-missing -f /boot/initramfs-test.img $KERNEL_VERSION 203 + # 204 + # or on some systems: 205 + #POST_INSTALL = ssh user@target /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION 206 + 207 + # Way to reboot the box to the test kernel. 208 + # Only valid options so far are "grub" and "script" 209 + # (default grub) 210 + # If you specify grub, it will assume grub version 1 211 + # and will search in /boot/grub/menu.lst for the title $GRUB_MENU 212 + # and select that target to reboot to the kernel. If this is not 213 + # your setup, then specify "script" and have a command or script 214 + # specified in REBOOT_SCRIPT to boot to the target. 215 + # 216 + # The entry in /boot/grub/menu.lst must be entered in manually. 217 + # The test will not modify that file. 218 + #REBOOT_TYPE = grub 219 + 220 + # The min config that is needed to build for the machine 221 + # A nice way to create this is with the following: 222 + # 223 + # $ ssh target 224 + # $ lsmod > mymods 225 + # $ scp mymods host:/tmp 226 + # $ exit 227 + # $ cd linux.git 228 + # $ rm .config 229 + # $ make LSMOD=mymods localyesconfig 230 + # $ grep '^CONFIG' .config > /home/test/config-min 231 + # 232 + # If you want even less configs: 233 + # 234 + # log in directly to target (do not ssh) 235 + # 236 + # $ su 237 + # # lsmod | cut -d' ' -f1 | xargs rmmod 238 + # 239 + # repeat the above several times 240 + # 241 + # # lsmod > mymods 242 + # # reboot 243 + # 244 + # May need to reboot to get your network back to copy the mymods 245 + # to the host, and then remove the previous .config and run the 246 + # localyesconfig again. The CONFIG_MIN generated like this will 247 + # not guarantee network activity to the box so the TEST_TYPE of 248 + # test may fail. 249 + # 250 + # You might also want to set: 251 + # CONFIG_CMDLINE="<your options here>" 252 + # randconfig may set the above and override your real command 253 + # line options. 254 + # (default undefined) 255 + #MIN_CONFIG = /home/test/config-min 256 + 257 + # Sometimes there's options that just break the boot and 258 + # you do not care about. Here are a few: 259 + # # CONFIG_STAGING is not set 260 + # Staging drivers are horrible, and can break the build. 261 + # # CONFIG_SCSI_DEBUG is not set 262 + # SCSI_DEBUG may change your root partition 263 + # # CONFIG_KGDB_SERIAL_CONSOLE is not set 264 + # KGDB may cause oops waiting for a connection that's not there. 265 + # This option points to the file containing config options that will be prepended 266 + # to the MIN_CONFIG (or be the MIN_CONFIG if it is not set) 267 + # 268 + # Note, config options in MIN_CONFIG will override these options. 269 + # 270 + # (default undefined) 271 + #ADD_CONFIG = /home/test/config-broken 272 + 273 + # The location on the host where to write temp files 274 + # (default /tmp/ktest) 275 + #TMP_DIR = /tmp/ktest 276 + 277 + # Optional log file to write the status (recommended) 278 + # Note, this is a DEFAULT section only option. 279 + # (default undefined) 280 + #LOG_FILE = /home/test/logfiles/target.log 281 + 282 + # Remove old logfile if it exists before starting all tests. 283 + # Note, this is a DEFAULT section only option. 284 + # (default 0) 285 + #CLEAR_LOG = 0 286 + 287 + # Line to define a successful boot up in console output. 288 + # This is what the line contains, not the entire line. If you need 289 + # the entire line to match, then use regural expression syntax like: 290 + # (do not add any quotes around it) 291 + # 292 + # SUCCESS_LINE = ^MyBox Login:$ 293 + # 294 + # (default "login:") 295 + #SUCCESS_LINE = login: 296 + 297 + # In case the console constantly fills the screen, having 298 + # a specified time to stop the test after success is recommended. 299 + # (in seconds) 300 + # (default 10) 301 + #STOP_AFTER_SUCCESS = 10 302 + 303 + # In case the console constantly fills the screen, having 304 + # a specified time to stop the test after failure is recommended. 305 + # (in seconds) 306 + # (default 60) 307 + #STOP_AFTER_FAILURE = 60 308 + 309 + # Stop testing if a build fails. If set, the script will end if 310 + # a failure is detected, otherwise it will save off the .config, 311 + # dmesg and bootlog in a directory called 312 + # MACHINE-TEST_TYPE_BUILD_TYPE-fail-yyyymmddhhmmss 313 + # if the STORE_FAILURES directory is set. 314 + # (default 1) 315 + # Note, even if this is set to zero, there are some errors that still 316 + # stop the tests. 317 + #DIE_ON_FAILURE = 1 318 + 319 + # Directory to store failure directories on failure. If this is not 320 + # set, DIE_ON_FAILURE=0 will not save off the .config, dmesg and 321 + # bootlog. This option is ignored if DIE_ON_FAILURE is not set. 322 + # (default undefined) 323 + #STORE_FAILURES = /home/test/failures 324 + 325 + # Build without doing a make mrproper, or removing .config 326 + # (default 0) 327 + #BUILD_NOCLEAN = 0 328 + 329 + # As the test reads the console, after it hits the SUCCESS_LINE 330 + # the time it waits for the monitor to settle down between reads 331 + # can usually be lowered. 332 + # (in seconds) (default 1) 333 + #BOOTED_TIMEOUT = 1 334 + 335 + # The timeout in seconds when we consider the box hung after 336 + # the console stop producing output. Be sure to leave enough 337 + # time here to get pass a reboot. Some machines may not produce 338 + # any console output for a long time during a reboot. You do 339 + # not want the test to fail just because the system was in 340 + # the process of rebooting to the test kernel. 341 + # (default 120) 342 + #TIMEOUT = 120 343 + 344 + # In between tests, a reboot of the box may occur, and this 345 + # is the time to wait for the console after it stops producing 346 + # output. Some machines may not produce a large lag on reboot 347 + # so this should accommodate it. 348 + # The difference between this and TIMEOUT, is that TIMEOUT happens 349 + # when rebooting to the test kernel. This sleep time happens 350 + # after a test has completed and we are about to start running 351 + # another test. If a reboot to the reliable kernel happens, 352 + # we wait SLEEP_TIME for the console to stop producing output 353 + # before starting the next test. 354 + # (default 60) 355 + #SLEEP_TIME = 60 356 + 357 + # The time in between bisects to sleep (in seconds) 358 + # (default 60) 359 + #BISECT_SLEEP_TIME = 60 360 + 361 + # Reboot the target box on error (default 0) 362 + #REBOOT_ON_ERROR = 0 363 + 364 + # Power off the target on error (ignored if REBOOT_ON_ERROR is set) 365 + # Note, this is a DEFAULT section only option. 366 + # (default 0) 367 + #POWEROFF_ON_ERROR = 0 368 + 369 + # Power off the target after all tests have completed successfully 370 + # Note, this is a DEFAULT section only option. 371 + # (default 0) 372 + #POWEROFF_ON_SUCCESS = 0 373 + 374 + # Reboot the target after all test completed successfully (default 1) 375 + # (ignored if POWEROFF_ON_SUCCESS is set) 376 + #REBOOT_ON_SUCCESS = 1 377 + 378 + # In case there are isses with rebooting, you can specify this 379 + # to always powercycle after this amount of time after calling 380 + # reboot. 381 + # Note, POWERCYCLE_AFTER_REBOOT = 0 does NOT disable it. It just 382 + # makes it powercycle immediately after rebooting. Do not define 383 + # it if you do not want it. 384 + # (default undefined) 385 + #POWERCYCLE_AFTER_REBOOT = 5 386 + 387 + # In case there's isses with halting, you can specify this 388 + # to always poweroff after this amount of time after calling 389 + # halt. 390 + # Note, POWEROFF_AFTER_HALT = 0 does NOT disable it. It just 391 + # makes it poweroff immediately after halting. Do not define 392 + # it if you do not want it. 393 + # (default undefined) 394 + #POWEROFF_AFTER_HALT = 20 395 + 396 + # A script or command to power off the box (default undefined) 397 + # Needed for POWEROFF_ON_ERROR and SUCCESS 398 + # 399 + # Example for digital loggers power switch: 400 + #POWER_OFF = wget --no-proxy -O /dev/null -q --auth-no-challenge 'http://admin:admin@power/outlet?5=OFF' 401 + # 402 + # Example for a virtual guest call "Guest". 403 + #POWER_OFF = virsh destroy Guest 404 + 405 + # The way to execute a command on the target 406 + # (default ssh $SSH_USER@$MACHINE $SSH_COMMAND";) 407 + # The variables SSH_USER, MACHINE and SSH_COMMAND are defined 408 + #SSH_EXEC = ssh $SSH_USER@$MACHINE $SSH_COMMAND"; 409 + 410 + # The way to copy a file to the target 411 + # (default scp $SRC_FILE $SSH_USER@$MACHINE:$DST_FILE) 412 + # The variables SSH_USER, MACHINE, SRC_FILE and DST_FILE are defined. 413 + #SCP_TO_TARGET = scp $SRC_FILE $SSH_USER@$MACHINE:$DST_FILE 414 + 415 + # The nice way to reboot the target 416 + # (default ssh $SSH_USER@$MACHINE reboot) 417 + # The variables SSH_USER and MACHINE are defined. 418 + #REBOOT = ssh $SSH_USER@$MACHINE reboot 419 + 420 + #### Per test run options #### 421 + # The following options are only allowed in TEST_START sections. 422 + # They are ignored in the DEFAULTS sections. 423 + # 424 + # All of these are optional and undefined by default, although 425 + # some of these options are required for TEST_TYPE of patchcheck 426 + # and bisect. 427 + # 428 + # 429 + # CHECKOUT = branch 430 + # 431 + # If the BUILD_DIR is a git repository, then you can set this option 432 + # to checkout the given branch before running the TEST. If you 433 + # specify this for the first run, that branch will be used for 434 + # all preceding tests until a new CHECKOUT is set. 435 + # 436 + # 437 + # 438 + # For TEST_TYPE = patchcheck 439 + # 440 + # This expects the BUILD_DIR to be a git repository, and 441 + # will checkout the PATCHCHECK_START commit. 442 + # 443 + # The option BUILD_TYPE will be ignored. 444 + # 445 + # The MIN_CONFIG will be used for all builds of the patchcheck. The build type 446 + # used for patchcheck is oldconfig. 447 + # 448 + # PATCHCHECK_START is required and is the first patch to 449 + # test (the SHA1 of the commit). You may also specify anything 450 + # that git checkout allows (branch name, tage, HEAD~3). 451 + # 452 + # PATCHCHECK_END is the last patch to check (default HEAD) 453 + # 454 + # PATCHCHECK_TYPE is required and is the type of test to run: 455 + # build, boot, test. 456 + # 457 + # Note, the build test will look for warnings, if a warning occurred 458 + # in a file that a commit touches, the build will fail. 459 + # 460 + # If BUILD_NOCLEAN is set, then make mrproper will not be run on 461 + # any of the builds, just like all other TEST_TYPE tests. But 462 + # what makes patchcheck different from the other tests, is if 463 + # BUILD_NOCLEAN is not set, only the first and last patch run 464 + # make mrproper. This helps speed up the test. 465 + # 466 + # Example: 467 + # TEST_START 468 + # TEST_TYPE = patchcheck 469 + # CHECKOUT = mybranch 470 + # PATCHCHECK_TYPE = boot 471 + # PATCHCHECK_START = 747e94ae3d1b4c9bf5380e569f614eb9040b79e7 472 + # PATCHCHECK_END = HEAD~2 473 + # 474 + # 475 + # 476 + # For TEST_TYPE = bisect 477 + # 478 + # You can specify a git bisect if the BUILD_DIR is a git repository. 479 + # The MIN_CONFIG will be used for all builds of the bisect. The build type 480 + # used for bisecting is oldconfig. 481 + # 482 + # The option BUILD_TYPE will be ignored. 483 + # 484 + # BISECT_TYPE is the type of test to perform: 485 + # build - bad fails to build 486 + # boot - bad builds but fails to boot 487 + # test - bad boots but fails a test 488 + # 489 + # BISECT_GOOD is the commit (SHA1) to label as good (accepts all git good commit types) 490 + # BISECT_BAD is the commit to label as bad (accepts all git bad commit types) 491 + # 492 + # The above three options are required for a bisect operation. 493 + # 494 + # BISECT_REPLAY = /path/to/replay/file (optional, default undefined) 495 + # 496 + # If an operation failed in the bisect that was not expected to 497 + # fail. Then the test ends. The state of the BUILD_DIR will be 498 + # left off at where the failure occurred. You can examine the 499 + # reason for the failure, and perhaps even find a git commit 500 + # that would work to continue with. You can run: 501 + # 502 + # git bisect log > /path/to/replay/file 503 + # 504 + # The adding: 505 + # 506 + # BISECT_REPLAY= /path/to/replay/file 507 + # 508 + # And running the test again. The test will perform the initial 509 + # git bisect start, git bisect good, and git bisect bad, and 510 + # then it will run git bisect replay on this file, before 511 + # continuing with the bisect. 512 + # 513 + # BISECT_START = commit (optional, default undefined) 514 + # 515 + # As with BISECT_REPLAY, if the test failed on a commit that 516 + # just happen to have a bad commit in the middle of the bisect, 517 + # and you need to skip it. If BISECT_START is defined, it 518 + # will checkout that commit after doing the initial git bisect start, 519 + # git bisect good, git bisect bad, and running the git bisect replay 520 + # if the BISECT_REPLAY is set. 521 + # 522 + # BISECT_REVERSE = 1 (optional, default 0) 523 + # 524 + # In those strange instances where it was broken forever 525 + # and you are trying to find where it started to work! 526 + # Set BISECT_GOOD to the commit that was last known to fail 527 + # Set BISECT_BAD to the commit that is known to start working. 528 + # With BISECT_REVERSE = 1, The test will consider failures as 529 + # good, and success as bad. 530 + # 531 + # BISECT_CHECK = 1 (optional, default 0) 532 + # 533 + # Just to be sure the good is good and bad is bad, setting 534 + # BISECT_CHECK to 1 will start the bisect by first checking 535 + # out BISECT_BAD and makes sure it fails, then it will check 536 + # out BISECT_GOOD and makes sure it succeeds before starting 537 + # the bisect (it works for BISECT_REVERSE too). 538 + # 539 + # You can limit the test to just check BISECT_GOOD or 540 + # BISECT_BAD with BISECT_CHECK = good or 541 + # BISECT_CHECK = bad, respectively. 542 + # 543 + # Example: 544 + # TEST_START 545 + # TEST_TYPE = bisect 546 + # BISECT_GOOD = v2.6.36 547 + # BISECT_BAD = b5153163ed580e00c67bdfecb02b2e3843817b3e 548 + # BISECT_TYPE = build 549 + # MIN_CONFIG = /home/test/config-bisect 550 + # 551 + # 552 + # 553 + # For TEST_TYPE = config_bisect 554 + # 555 + # In those cases that you have two different configs. One of them 556 + # work, the other does not, and you do not know what config causes 557 + # the problem. 558 + # The TEST_TYPE config_bisect will bisect the bad config looking for 559 + # what config causes the failure. 560 + # 561 + # The way it works is this: 562 + # 563 + # First it finds a config to work with. Since a different version, or 564 + # MIN_CONFIG may cause different dependecies, it must run through this 565 + # preparation. 566 + # 567 + # Overwrites any config set in the bad config with a config set in 568 + # either the MIN_CONFIG or ADD_CONFIG. Thus, make sure these configs 569 + # are minimal and do not disable configs you want to test: 570 + # (ie. # CONFIG_FOO is not set). 571 + # 572 + # An oldconfig is run on the bad config and any new config that 573 + # appears will be added to the configs to test. 574 + # 575 + # Finally, it generates a config with the above result and runs it 576 + # again through make oldconfig to produce a config that should be 577 + # satisfied by kconfig. 578 + # 579 + # Then it starts the bisect. 580 + # 581 + # The configs to test are cut in half. If all the configs in this 582 + # half depend on a config in the other half, then the other half 583 + # is tested instead. If no configs are enabled by either half, then 584 + # this means a circular dependency exists and the test fails. 585 + # 586 + # A config is created with the test half, and the bisect test is run. 587 + # 588 + # If the bisect succeeds, then all configs in the generated config 589 + # are removed from the configs to test and added to the configs that 590 + # will be enabled for all builds (they will be enabled, but not be part 591 + # of the configs to examine). 592 + # 593 + # If the bisect fails, then all test configs that were not enabled by 594 + # the config file are removed from the test. These configs will not 595 + # be enabled in future tests. Since current config failed, we consider 596 + # this to be a subset of the config that we started with. 597 + # 598 + # When we are down to one config, it is considered the bad config. 599 + # 600 + # Note, the config chosen may not be the true bad config. Due to 601 + # dependencies and selections of the kbuild system, mulitple 602 + # configs may be needed to cause a failure. If you disable the 603 + # config that was found and restart the test, if the test fails 604 + # again, it is recommended to rerun the config_bisect with a new 605 + # bad config without the found config enabled. 606 + # 607 + # The option BUILD_TYPE will be ignored. 608 + # 609 + # CONFIG_BISECT_TYPE is the type of test to perform: 610 + # build - bad fails to build 611 + # boot - bad builds but fails to boot 612 + # test - bad boots but fails a test 613 + # 614 + # CONFIG_BISECT is the config that failed to boot 615 + # 616 + # Example: 617 + # TEST_START 618 + # TEST_TYPE = config_bisect 619 + # CONFIG_BISECT_TYPE = build 620 + # CONFIG_BISECT = /home/test/�onfig-bad 621 + # MIN_CONFIG = /home/test/config-min 622 + #