Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge commit 'origin/master' into next

Manual merge of:
drivers/char/hvc_console.c
drivers/char/hvc_console.h

+8635 -3381
-1
Documentation/dontdiff
··· 69 69 bbootsect 70 70 bin2c 71 71 binkernel.spec 72 - binoffset 73 72 bootsect 74 73 bounds.h 75 74 bsetup
+9
Documentation/kernel-parameters.txt
··· 199 199 acpi_display_output=video 200 200 See above. 201 201 202 + acpi_early_pdc_eval [HW,ACPI] Evaluate processor _PDC methods 203 + early. Needed on some platforms to properly 204 + initialize the EC. 205 + 202 206 acpi_irq_balance [HW,ACPI] 203 207 ACPI will balance active IRQs 204 208 default in APIC mode ··· 314 310 315 311 aic79xx= [HW,SCSI] 316 312 See Documentation/scsi/aic79xx.txt. 313 + 314 + alignment= [KNL,ARM] 315 + Allow the default userspace alignment fault handler 316 + behaviour to be specified. Bit 0 enables warnings, 317 + bit 1 enables fixups, and bit 2 sends a segfault. 317 318 318 319 amd_iommu= [HW,X86-84] 319 320 Pass parameters to the AMD IOMMU driver in the system.
-1
Documentation/lguest/lguest.c
··· 34 34 #include <sys/uio.h> 35 35 #include <termios.h> 36 36 #include <getopt.h> 37 - #include <zlib.h> 38 37 #include <assert.h> 39 38 #include <sched.h> 40 39 #include <limits.h>
+4 -4
Documentation/networking/ip-sysctl.txt
··· 1074 1074 Default: 5 1075 1075 1076 1076 max_addresses - INTEGER 1077 - Number of maximum addresses per interface. 0 disables limitation. 1078 - It is recommended not set too large value (or 0) because it would 1079 - be too easy way to crash kernel to allow to create too much of 1080 - autoconfigured addresses. 1077 + Maximum number of autoconfigured addresses per interface. Setting 1078 + to zero disables the limitation. It is not recommended to set this 1079 + value too large (or to zero) because it would be an easy way to 1080 + crash the kernel by allowing too many addresses to be created. 1081 1081 Default: 16 1082 1082 1083 1083 disable_ipv6 - BOOLEAN
+13 -8
MAINTAINERS
··· 616 616 S: Maintained 617 617 618 618 ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE 619 - M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt> 619 + M: Paulius Zaleckas <paulius.zaleckas@gmail.com> 620 620 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 621 621 T: git git://gitorious.org/linux-gemini/mainline.git 622 - S: Maintained 622 + S: Odd Fixes 623 623 F: arch/arm/mach-gemini/ 624 624 625 625 ARM/EBSA110 MACHINE SUPPORT ··· 641 641 F: arch/arm/mach-pxa/ezx.c 642 642 643 643 ARM/FARADAY FA526 PORT 644 - M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt> 644 + M: Paulius Zaleckas <paulius.zaleckas@gmail.com> 645 645 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 646 - S: Maintained 646 + S: Odd Fixes 647 647 F: arch/arm/mm/*-fa* 648 648 649 649 ARM/FOOTBRIDGE ARCHITECTURE ··· 1733 1733 F: net/dccp/ 1734 1734 1735 1735 DECnet NETWORK LAYER 1736 - M: Christine Caulfield <christine.caulfield@googlemail.com> 1737 1736 W: http://linux-decnet.sourceforge.net 1738 1737 L: linux-decnet-user@lists.sourceforge.net 1739 - S: Maintained 1738 + S: Orphan 1740 1739 F: Documentation/networking/decnet.txt 1741 1740 F: net/decnet/ 1742 1741 ··· 2392 2393 L: linuxppc-dev@ozlabs.org 2393 2394 S: Odd Fixes 2394 2395 F: drivers/char/hvc_* 2396 + 2397 + VIRTIO CONSOLE DRIVER 2398 + M: Amit Shah <amit.shah@redhat.com> 2399 + L: virtualization@lists.linux-foundation.org 2400 + S: Maintained 2401 + F: drivers/char/virtio_console.c 2395 2402 2396 2403 GSPCA FINEPIX SUBDRIVER 2397 2404 M: Frank Zago <frank@zago.net> ··· 3495 3490 F: drivers/net/wireless/libertas/ 3496 3491 3497 3492 MARVELL MV643XX ETHERNET DRIVER 3498 - M: Lennert Buytenhek <buytenh@marvell.com> 3493 + M: Lennert Buytenhek <buytenh@wantstofly.org> 3499 3494 L: netdev@vger.kernel.org 3500 - S: Supported 3495 + S: Maintained 3501 3496 F: drivers/net/mv643xx_eth.* 3502 3497 F: include/linux/mv643xx.h 3503 3498
+1 -1
Makefile
··· 1 1 VERSION = 2 2 2 PATCHLEVEL = 6 3 3 SUBLEVEL = 33 4 - EXTRAVERSION = -rc8 4 + EXTRAVERSION = 5 5 NAME = Man-Eating Seals of Antiquity 6 6 7 7 # *DOCUMENTATION*
+2 -1
arch/arm/include/asm/cacheflush.h
··· 42 42 #endif 43 43 44 44 #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ 45 - defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) 45 + defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \ 46 + defined(CONFIG_CPU_ARM1026) 46 47 # define MULTI_CACHE 1 47 48 #endif 48 49
+1
arch/arm/kernel/setup.c
··· 102 102 #endif 103 103 #ifdef CONFIG_OUTER_CACHE 104 104 struct outer_cache_fns outer_cache; 105 + EXPORT_SYMBOL(outer_cache); 105 106 #endif 106 107 107 108 struct stack {
+2 -2
arch/arm/mach-gemini/gpio.c
··· 86 86 unsigned int reg_both, reg_level, reg_type; 87 87 88 88 reg_type = __raw_readl(base + GPIO_INT_TYPE); 89 - reg_level = __raw_readl(base + GPIO_INT_BOTH_EDGE); 89 + reg_level = __raw_readl(base + GPIO_INT_LEVEL); 90 90 reg_both = __raw_readl(base + GPIO_INT_BOTH_EDGE); 91 91 92 92 switch (type) { ··· 117 117 } 118 118 119 119 __raw_writel(reg_type, base + GPIO_INT_TYPE); 120 - __raw_writel(reg_level, base + GPIO_INT_BOTH_EDGE); 120 + __raw_writel(reg_level, base + GPIO_INT_LEVEL); 121 121 __raw_writel(reg_both, base + GPIO_INT_BOTH_EDGE); 122 122 123 123 gpio_ack_irq(irq);
+5 -7
arch/arm/mach-omap2/mux.c
··· 961 961 while (superset->reg_offset != OMAP_MUX_TERMINATOR) { 962 962 struct omap_mux *entry; 963 963 964 - #ifndef CONFIG_OMAP_MUX 965 - /* Skip pins that are not muxed as GPIO by bootloader */ 966 - if (!OMAP_MODE_GPIO(omap_mux_read(superset->reg_offset))) { 964 + #ifdef CONFIG_OMAP_MUX 965 + if (!superset->muxnames || !superset->muxnames[0]) { 967 966 superset++; 968 967 continue; 969 968 } 970 - #endif 971 - 972 - #if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS) 973 - if (!superset->muxnames || !superset->muxnames[0]) { 969 + #else 970 + /* Skip pins that are not muxed as GPIO by bootloader */ 971 + if (!OMAP_MODE_GPIO(omap_mux_read(superset->reg_offset))) { 974 972 superset++; 975 973 continue; 976 974 }
+3
arch/arm/mm/alignment.c
··· 11 11 * it under the terms of the GNU General Public License version 2 as 12 12 * published by the Free Software Foundation. 13 13 */ 14 + #include <linux/moduleparam.h> 14 15 #include <linux/compiler.h> 15 16 #include <linux/kernel.h> 16 17 #include <linux/errno.h> ··· 77 76 static unsigned long ai_dword; 78 77 static unsigned long ai_multi; 79 78 static int ai_usermode; 79 + 80 + core_param(alignment, ai_usermode, int, 0600); 80 81 81 82 #define UM_WARN (1 << 0) 82 83 #define UM_FIXUP (1 << 1)
+44 -2
arch/arm/tools/mach-types
··· 12 12 # 13 13 # http://www.arm.linux.org.uk/developer/machines/?action=new 14 14 # 15 - # Last update: Thu Jan 28 22:15:54 2010 15 + # Last update: Sat Feb 20 14:16:15 2010 16 16 # 17 17 # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number 18 18 # ··· 2257 2257 oratismadi MACH_ORATISMADI ORATISMADI 2269 2258 2258 oratisot16 MACH_ORATISOT16 ORATISOT16 2270 2259 2259 oratisdesk MACH_ORATISDESK ORATISDESK 2271 2260 - v2_ca9 MACH_V2P_CA9 V2P_CA9 2272 2260 + vexpress MACH_VEXPRESS VEXPRESS 2272 2261 2261 sintexo MACH_SINTEXO SINTEXO 2273 2262 2262 cm3389 MACH_CM3389 CM3389 2274 2263 2263 omap3_cio MACH_OMAP3_CIO OMAP3_CIO 2275 ··· 2636 2636 dm365_leopard MACH_DM365_LEOPARD DM365_LEOPARD 2649 2637 2637 mityomapl138 MACH_MITYOMAPL138 MITYOMAPL138 2650 2638 2638 scat110 MACH_SCAT110 SCAT110 2651 2639 + acer_a1 MACH_ACER_A1 ACER_A1 2652 2640 + cmcontrol MACH_CMCONTROL CMCONTROL 2653 2641 + pelco_lamar MACH_PELCO_LAMAR PELCO_LAMAR 2654 2642 + rfp43 MACH_RFP43 RFP43 2655 2643 + sk86r0301 MACH_SK86R0301 SK86R0301 2656 2644 + ctpxa MACH_CTPXA CTPXA 2657 2645 + epb_arm9_a MACH_EPB_ARM9_A EPB_ARM9_A 2658 2646 + guruplug MACH_GURUPLUG GURUPLUG 2659 2647 + spear310 MACH_SPEAR310 SPEAR310 2660 2648 + spear320 MACH_SPEAR320 SPEAR320 2661 2649 + robotx MACH_ROBOTX ROBOTX 2662 2650 + lsxhl MACH_LSXHL LSXHL 2663 2651 + smartlite MACH_SMARTLITE SMARTLITE 2664 2652 + cws2 MACH_CWS2 CWS2 2665 2653 + m619 MACH_M619 M619 2666 2654 + smartview MACH_SMARTVIEW SMARTVIEW 2667 2655 + lsa_salsa MACH_LSA_SALSA LSA_SALSA 2668 2656 + kizbox MACH_KIZBOX KIZBOX 2669 2657 + htccharmer MACH_HTCCHARMER HTCCHARMER 2670 2658 + guf_neso_lt MACH_GUF_NESO_LT GUF_NESO_LT 2671 2659 + pm9g45 MACH_PM9G45 PM9G45 2672 2660 + htcpanther MACH_HTCPANTHER HTCPANTHER 2673 2661 + htcpanther_cdma MACH_HTCPANTHER_CDMA HTCPANTHER_CDMA 2674 2662 + reb01 MACH_REB01 REB01 2675 2663 + aquila MACH_AQUILA AQUILA 2676 2664 + spark_sls_hw2 MACH_SPARK_SLS_HW2 SPARK_SLS_HW2 2677 2665 + sheeva_esata MACH_ESATA_SHEEVAPLUG ESATA_SHEEVAPLUG 2678 2666 + surf7x30 MACH_SURF7X30 SURF7X30 2679 2667 + micro2440 MACH_MICRO2440 MICRO2440 2680 2668 + am2440 MACH_AM2440 AM2440 2681 2669 + tq2440 MACH_TQ2440 TQ2440 2682 2670 + lpc2478oem MACH_LPC2478OEM LPC2478OEM 2683 2671 + ak880x MACH_AK880X AK880X 2684 2672 + cobra3530 MACH_COBRA3530 COBRA3530 2685 2673 + pmppb MACH_PMPPB PMPPB 2686 2674 + u6715 MACH_U6715 U6715 2687 2675 + axar1500_sender MACH_AXAR1500_SENDER AXAR1500_SENDER 2688 2676 + g30_dvb MACH_G30_DVB G30_DVB 2689 2677 + vc088x MACH_VC088X VC088X 2690 2678 + mioa702 MACH_MIOA702 MIOA702 2691 2679 + hpmin MACH_HPMIN HPMIN 2692 2680 + ak880xak MACH_AK880XAK AK880XAK 2693
+1
arch/ia64/include/asm/acpi.h
··· 94 94 #define acpi_noirq 0 /* ACPI always enabled on IA64 */ 95 95 #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */ 96 96 #define acpi_strict 1 /* no ACPI spec workarounds on IA64 */ 97 + #define acpi_ht 0 /* no HT-only mode on IA64 */ 97 98 #endif 98 99 #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ 99 100 static inline void disable_acpi(void) { }
+1 -1
arch/ia64/sn/kernel/setup.c
··· 71 71 DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); 72 72 EXPORT_PER_CPU_SYMBOL(__sn_hub_info); 73 73 74 - DEFINE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid); 74 + DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); 75 75 EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); 76 76 77 77 DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
+1
arch/microblaze/Kconfig
··· 130 130 131 131 config OF 132 132 def_bool y 133 + select OF_FLATTREE 133 134 134 135 config PROC_DEVICETREE 135 136 bool "Support for device tree in /proc"
+1 -1
arch/microblaze/include/asm/io.h
··· 217 217 * Little endian 218 218 */ 219 219 220 - #define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a)); 220 + #define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a)) 221 221 #define out_le16(a, v) __raw_writew(__cpu_to_le16(v), (a)) 222 222 223 223 #define in_le32(a) __le32_to_cpu(__raw_readl(a))
-20
arch/microblaze/include/asm/prom.h
··· 26 26 #include <asm/irq.h> 27 27 #include <asm/atomic.h> 28 28 29 - #define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1 30 - #define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1 31 - 32 - #define of_compat_cmp(s1, s2, l) strncasecmp((s1), (s2), (l)) 33 - #define of_prop_cmp(s1, s2) strcmp((s1), (s2)) 34 - #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) 35 - 36 - extern struct device_node *of_chosen; 37 - 38 29 #define HAVE_ARCH_DEVTREE_FIXUPS 39 - 40 - extern struct device_node *allnodes; /* temporary while merging */ 41 - extern rwlock_t devtree_lock; /* temporary while merging */ 42 - 43 - /* For updating the device tree at runtime */ 44 - extern void of_attach_node(struct device_node *); 45 - extern void of_detach_node(struct device_node *); 46 30 47 31 /* Other Prototypes */ 48 32 extern int early_uartlite_console(void); 49 - 50 - extern struct resource *request_OF_resource(struct device_node *node, 51 - int index, const char *name_postfix); 52 - extern int release_OF_resource(struct device_node *node, int index); 53 33 54 34 /* 55 35 * OF address retreival & translation
+8 -19
arch/microblaze/kernel/cpu/cache.c
··· 172 172 /* It is used only first parameter for OP - for wic, wdc */ 173 173 #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \ 174 174 do { \ 175 - int step = -line_length; \ 176 - int count = end - start; \ 177 - BUG_ON(count <= 0); \ 175 + int volatile temp; \ 176 + BUG_ON(end - start <= 0); \ 178 177 \ 179 - __asm__ __volatile__ (" 1: addk %0, %0, %1; \ 180 - " #op " %0, r0; \ 181 - bgtid %1, 1b; \ 182 - addk %1, %1, %2; \ 183 - " : : "r" (start), "r" (count), \ 184 - "r" (step) : "memory"); \ 178 + __asm__ __volatile__ (" 1: " #op " %1, r0; \ 179 + cmpu %0, %1, %2; \ 180 + bgtid %0, 1b; \ 181 + addk %1, %1, %3; \ 182 + " : : "r" (temp), "r" (start), "r" (end),\ 183 + "r" (line_length) : "memory"); \ 185 184 } while (0); 186 185 187 186 static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) ··· 312 313 pr_debug("%s\n", __func__); 313 314 CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length, 314 315 wdc.clear) 315 - 316 - #if 0 317 - unsigned int i; 318 - 319 - pr_debug("%s\n", __func__); 320 - 321 - /* Just loop through cache size and invalidate it */ 322 - for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length) 323 - __invalidate_dcache(0, i); 324 - #endif 325 316 } 326 317 327 318 static void __invalidate_dcache_range_wb(unsigned long start,
+1 -1
arch/microblaze/kernel/of_platform.c
··· 185 185 static int of_dev_phandle_match(struct device *dev, void *data) 186 186 { 187 187 phandle *ph = data; 188 - return to_of_device(dev)->node->linux_phandle == *ph; 188 + return to_of_device(dev)->node->phandle == *ph; 189 189 } 190 190 191 191 struct of_device *of_find_device_by_phandle(phandle ph)
+13 -977
arch/microblaze/kernel/prom.c
··· 42 42 #include <asm/sections.h> 43 43 #include <asm/pci-bridge.h> 44 44 45 - static int __initdata dt_root_addr_cells; 46 - static int __initdata dt_root_size_cells; 47 - 48 - typedef u32 cell_t; 49 - 50 - static struct boot_param_header *initial_boot_params; 51 - 52 - /* export that to outside world */ 53 - struct device_node *of_chosen; 54 - 55 - static inline char *find_flat_dt_string(u32 offset) 45 + void __init early_init_dt_scan_chosen_arch(unsigned long node) 56 46 { 57 - return ((char *)initial_boot_params) + 58 - initial_boot_params->off_dt_strings + offset; 47 + /* No Microblaze specific code here */ 59 48 } 60 49 61 - /** 62 - * This function is used to scan the flattened device-tree, it is 63 - * used to extract the memory informations at boot before we can 64 - * unflatten the tree 65 - */ 66 - int __init of_scan_flat_dt(int (*it)(unsigned long node, 67 - const char *uname, int depth, 68 - void *data), 69 - void *data) 50 + void __init early_init_dt_add_memory_arch(u64 base, u64 size) 70 51 { 71 - unsigned long p = ((unsigned long)initial_boot_params) + 72 - initial_boot_params->off_dt_struct; 73 - int rc = 0; 74 - int depth = -1; 75 - 76 - do { 77 - u32 tag = *((u32 *)p); 78 - char *pathp; 79 - 80 - p += 4; 81 - if (tag == OF_DT_END_NODE) { 82 - depth--; 83 - continue; 84 - } 85 - if (tag == OF_DT_NOP) 86 - continue; 87 - if (tag == OF_DT_END) 88 - break; 89 - if (tag == OF_DT_PROP) { 90 - u32 sz = *((u32 *)p); 91 - p += 8; 92 - if (initial_boot_params->version < 0x10) 93 - p = _ALIGN(p, sz >= 8 ? 8 : 4); 94 - p += sz; 95 - p = _ALIGN(p, 4); 96 - continue; 97 - } 98 - if (tag != OF_DT_BEGIN_NODE) { 99 - printk(KERN_WARNING "Invalid tag %x scanning flattened" 100 - " device tree !\n", tag); 101 - return -EINVAL; 102 - } 103 - depth++; 104 - pathp = (char *)p; 105 - p = _ALIGN(p + strlen(pathp) + 1, 4); 106 - if ((*pathp) == '/') { 107 - char *lp, *np; 108 - for (lp = NULL, np = pathp; *np; np++) 109 - if ((*np) == '/') 110 - lp = np+1; 111 - if (lp != NULL) 112 - pathp = lp; 113 - } 114 - rc = it(p, pathp, depth, data); 115 - if (rc != 0) 116 - break; 117 - } while (1); 118 - 119 - return rc; 52 + lmb_add(base, size); 120 53 } 121 54 122 - unsigned long __init of_get_flat_dt_root(void) 55 + u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) 123 56 { 124 - unsigned long p = ((unsigned long)initial_boot_params) + 125 - initial_boot_params->off_dt_struct; 126 - 127 - while (*((u32 *)p) == OF_DT_NOP) 128 - p += 4; 129 - BUG_ON(*((u32 *)p) != OF_DT_BEGIN_NODE); 130 - p += 4; 131 - return _ALIGN(p + strlen((char *)p) + 1, 4); 57 + return lmb_alloc(size, align); 132 58 } 133 - 134 - /** 135 - * This function can be used within scan_flattened_dt callback to get 136 - * access to properties 137 - */ 138 - void *__init of_get_flat_dt_prop(unsigned long node, const char *name, 139 - unsigned long *size) 140 - { 141 - unsigned long p = node; 142 - 143 - do { 144 - u32 tag = *((u32 *)p); 145 - u32 sz, noff; 146 - const char *nstr; 147 - 148 - p += 4; 149 - if (tag == OF_DT_NOP) 150 - continue; 151 - if (tag != OF_DT_PROP) 152 - return NULL; 153 - 154 - sz = *((u32 *)p); 155 - noff = *((u32 *)(p + 4)); 156 - p += 8; 157 - if (initial_boot_params->version < 0x10) 158 - p = _ALIGN(p, sz >= 8 ? 8 : 4); 159 - 160 - nstr = find_flat_dt_string(noff); 161 - if (nstr == NULL) { 162 - printk(KERN_WARNING "Can't find property index" 163 - " name !\n"); 164 - return NULL; 165 - } 166 - if (strcmp(name, nstr) == 0) { 167 - if (size) 168 - *size = sz; 169 - return (void *)p; 170 - } 171 - p += sz; 172 - p = _ALIGN(p, 4); 173 - } while (1); 174 - } 175 - 176 - int __init of_flat_dt_is_compatible(unsigned long node, const char *compat) 177 - { 178 - const char *cp; 179 - unsigned long cplen, l; 180 - 181 - cp = of_get_flat_dt_prop(node, "compatible", &cplen); 182 - if (cp == NULL) 183 - return 0; 184 - while (cplen > 0) { 185 - if (strncasecmp(cp, compat, strlen(compat)) == 0) 186 - return 1; 187 - l = strlen(cp) + 1; 188 - cp += l; 189 - cplen -= l; 190 - } 191 - 192 - return 0; 193 - } 194 - 195 - static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size, 196 - unsigned long align) 197 - { 198 - void *res; 199 - 200 - *mem = _ALIGN(*mem, align); 201 - res = (void *)*mem; 202 - *mem += size; 203 - 204 - return res; 205 - } 206 - 207 - static unsigned long __init unflatten_dt_node(unsigned long mem, 208 - unsigned long *p, 209 - struct device_node *dad, 210 - struct device_node ***allnextpp, 211 - unsigned long fpsize) 212 - { 213 - struct device_node *np; 214 - struct property *pp, **prev_pp = NULL; 215 - char *pathp; 216 - u32 tag; 217 - unsigned int l, allocl; 218 - int has_name = 0; 219 - int new_format = 0; 220 - 221 - tag = *((u32 *)(*p)); 222 - if (tag != OF_DT_BEGIN_NODE) { 223 - printk("Weird tag at start of node: %x\n", tag); 224 - return mem; 225 - } 226 - *p += 4; 227 - pathp = (char *)*p; 228 - l = allocl = strlen(pathp) + 1; 229 - *p = _ALIGN(*p + l, 4); 230 - 231 - /* version 0x10 has a more compact unit name here instead of the full 232 - * path. we accumulate the full path size using "fpsize", we'll rebuild 233 - * it later. We detect this because the first character of the name is 234 - * not '/'. 235 - */ 236 - if ((*pathp) != '/') { 237 - new_format = 1; 238 - if (fpsize == 0) { 239 - /* root node: special case. fpsize accounts for path 240 - * plus terminating zero. root node only has '/', so 241 - * fpsize should be 2, but we want to avoid the first 242 - * level nodes to have two '/' so we use fpsize 1 here 243 - */ 244 - fpsize = 1; 245 - allocl = 2; 246 - } else { 247 - /* account for '/' and path size minus terminal 0 248 - * already in 'l' 249 - */ 250 - fpsize += l; 251 - allocl = fpsize; 252 - } 253 - } 254 - 255 - np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl, 256 - __alignof__(struct device_node)); 257 - if (allnextpp) { 258 - memset(np, 0, sizeof(*np)); 259 - np->full_name = ((char *)np) + sizeof(struct device_node); 260 - if (new_format) { 261 - char *p2 = np->full_name; 262 - /* rebuild full path for new format */ 263 - if (dad && dad->parent) { 264 - strcpy(p2, dad->full_name); 265 - #ifdef DEBUG 266 - if ((strlen(p2) + l + 1) != allocl) { 267 - pr_debug("%s: p: %d, l: %d, a: %d\n", 268 - pathp, (int)strlen(p2), 269 - l, allocl); 270 - } 271 - #endif 272 - p2 += strlen(p2); 273 - } 274 - *(p2++) = '/'; 275 - memcpy(p2, pathp, l); 276 - } else 277 - memcpy(np->full_name, pathp, l); 278 - prev_pp = &np->properties; 279 - **allnextpp = np; 280 - *allnextpp = &np->allnext; 281 - if (dad != NULL) { 282 - np->parent = dad; 283 - /* we temporarily use the next field as `last_child'*/ 284 - if (dad->next == NULL) 285 - dad->child = np; 286 - else 287 - dad->next->sibling = np; 288 - dad->next = np; 289 - } 290 - kref_init(&np->kref); 291 - } 292 - while (1) { 293 - u32 sz, noff; 294 - char *pname; 295 - 296 - tag = *((u32 *)(*p)); 297 - if (tag == OF_DT_NOP) { 298 - *p += 4; 299 - continue; 300 - } 301 - if (tag != OF_DT_PROP) 302 - break; 303 - *p += 4; 304 - sz = *((u32 *)(*p)); 305 - noff = *((u32 *)((*p) + 4)); 306 - *p += 8; 307 - if (initial_boot_params->version < 0x10) 308 - *p = _ALIGN(*p, sz >= 8 ? 8 : 4); 309 - 310 - pname = find_flat_dt_string(noff); 311 - if (pname == NULL) { 312 - printk(KERN_INFO 313 - "Can't find property name in list !\n"); 314 - break; 315 - } 316 - if (strcmp(pname, "name") == 0) 317 - has_name = 1; 318 - l = strlen(pname) + 1; 319 - pp = unflatten_dt_alloc(&mem, sizeof(struct property), 320 - __alignof__(struct property)); 321 - if (allnextpp) { 322 - if (strcmp(pname, "linux,phandle") == 0) { 323 - np->node = *((u32 *)*p); 324 - if (np->linux_phandle == 0) 325 - np->linux_phandle = np->node; 326 - } 327 - if (strcmp(pname, "ibm,phandle") == 0) 328 - np->linux_phandle = *((u32 *)*p); 329 - pp->name = pname; 330 - pp->length = sz; 331 - pp->value = (void *)*p; 332 - *prev_pp = pp; 333 - prev_pp = &pp->next; 334 - } 335 - *p = _ALIGN((*p) + sz, 4); 336 - } 337 - /* with version 0x10 we may not have the name property, recreate 338 - * it here from the unit name if absent 339 - */ 340 - if (!has_name) { 341 - char *p1 = pathp, *ps = pathp, *pa = NULL; 342 - int sz; 343 - 344 - while (*p1) { 345 - if ((*p1) == '@') 346 - pa = p1; 347 - if ((*p1) == '/') 348 - ps = p1 + 1; 349 - p1++; 350 - } 351 - if (pa < ps) 352 - pa = p1; 353 - sz = (pa - ps) + 1; 354 - pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz, 355 - __alignof__(struct property)); 356 - if (allnextpp) { 357 - pp->name = "name"; 358 - pp->length = sz; 359 - pp->value = pp + 1; 360 - *prev_pp = pp; 361 - prev_pp = &pp->next; 362 - memcpy(pp->value, ps, sz - 1); 363 - ((char *)pp->value)[sz - 1] = 0; 364 - pr_debug("fixed up name for %s -> %s\n", pathp, 365 - (char *)pp->value); 366 - } 367 - } 368 - if (allnextpp) { 369 - *prev_pp = NULL; 370 - np->name = of_get_property(np, "name", NULL); 371 - np->type = of_get_property(np, "device_type", NULL); 372 - 373 - if (!np->name) 374 - np->name = "<NULL>"; 375 - if (!np->type) 376 - np->type = "<NULL>"; 377 - } 378 - while (tag == OF_DT_BEGIN_NODE) { 379 - mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize); 380 - tag = *((u32 *)(*p)); 381 - } 382 - if (tag != OF_DT_END_NODE) { 383 - printk(KERN_INFO "Weird tag at end of node: %x\n", tag); 384 - return mem; 385 - } 386 - *p += 4; 387 - return mem; 388 - } 389 - 390 - /** 391 - * unflattens the device-tree passed by the firmware, creating the 392 - * tree of struct device_node. It also fills the "name" and "type" 393 - * pointers of the nodes so the normal device-tree walking functions 394 - * can be used (this used to be done by finish_device_tree) 395 - */ 396 - void __init unflatten_device_tree(void) 397 - { 398 - unsigned long start, mem, size; 399 - struct device_node **allnextp = &allnodes; 400 - 401 - pr_debug(" -> unflatten_device_tree()\n"); 402 - 403 - /* First pass, scan for size */ 404 - start = ((unsigned long)initial_boot_params) + 405 - initial_boot_params->off_dt_struct; 406 - size = unflatten_dt_node(0, &start, NULL, NULL, 0); 407 - size = (size | 3) + 1; 408 - 409 - pr_debug(" size is %lx, allocating...\n", size); 410 - 411 - /* Allocate memory for the expanded device tree */ 412 - mem = lmb_alloc(size + 4, __alignof__(struct device_node)); 413 - mem = (unsigned long) __va(mem); 414 - 415 - ((u32 *)mem)[size / 4] = 0xdeadbeef; 416 - 417 - pr_debug(" unflattening %lx...\n", mem); 418 - 419 - /* Second pass, do actual unflattening */ 420 - start = ((unsigned long)initial_boot_params) + 421 - initial_boot_params->off_dt_struct; 422 - unflatten_dt_node(mem, &start, NULL, &allnextp, 0); 423 - if (*((u32 *)start) != OF_DT_END) 424 - printk(KERN_WARNING "Weird tag at end of tree: %08x\n", 425 - *((u32 *)start)); 426 - if (((u32 *)mem)[size / 4] != 0xdeadbeef) 427 - printk(KERN_WARNING "End of tree marker overwritten: %08x\n", 428 - ((u32 *)mem)[size / 4]); 429 - *allnextp = NULL; 430 - 431 - /* Get pointer to OF "/chosen" node for use everywhere */ 432 - of_chosen = of_find_node_by_path("/chosen"); 433 - if (of_chosen == NULL) 434 - of_chosen = of_find_node_by_path("/chosen@0"); 435 - 436 - pr_debug(" <- unflatten_device_tree()\n"); 437 - } 438 - 439 - #define early_init_dt_scan_drconf_memory(node) 0 440 - 441 - static int __init early_init_dt_scan_cpus(unsigned long node, 442 - const char *uname, int depth, 443 - void *data) 444 - { 445 - static int logical_cpuid; 446 - char *type = of_get_flat_dt_prop(node, "device_type", NULL); 447 - const u32 *intserv; 448 - int i, nthreads; 449 - int found = 0; 450 - 451 - /* We are scanning "cpu" nodes only */ 452 - if (type == NULL || strcmp(type, "cpu") != 0) 453 - return 0; 454 - 455 - /* Get physical cpuid */ 456 - intserv = of_get_flat_dt_prop(node, "reg", NULL); 457 - nthreads = 1; 458 - 459 - /* 460 - * Now see if any of these threads match our boot cpu. 461 - * NOTE: This must match the parsing done in smp_setup_cpu_maps. 462 - */ 463 - for (i = 0; i < nthreads; i++) { 464 - /* 465 - * version 2 of the kexec param format adds the phys cpuid of 466 - * booted proc. 467 - */ 468 - if (initial_boot_params && initial_boot_params->version >= 2) { 469 - if (intserv[i] == 470 - initial_boot_params->boot_cpuid_phys) { 471 - found = 1; 472 - break; 473 - } 474 - } else { 475 - /* 476 - * Check if it's the boot-cpu, set it's hw index now, 477 - * unfortunately this format did not support booting 478 - * off secondary threads. 479 - */ 480 - if (of_get_flat_dt_prop(node, 481 - "linux,boot-cpu", NULL) != NULL) { 482 - found = 1; 483 - break; 484 - } 485 - } 486 - 487 - #ifdef CONFIG_SMP 488 - /* logical cpu id is always 0 on UP kernels */ 489 - logical_cpuid++; 490 - #endif 491 - } 492 - 493 - if (found) { 494 - pr_debug("boot cpu: logical %d physical %d\n", logical_cpuid, 495 - intserv[i]); 496 - boot_cpuid = logical_cpuid; 497 - } 498 - 499 - return 0; 500 - } 501 - 502 - #ifdef CONFIG_BLK_DEV_INITRD 503 - static void __init early_init_dt_check_for_initrd(unsigned long node) 504 - { 505 - unsigned long l; 506 - u32 *prop; 507 - 508 - pr_debug("Looking for initrd properties... "); 509 - 510 - prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l); 511 - if (prop) { 512 - initrd_start = (unsigned long) 513 - __va((u32)of_read_ulong(prop, l/4)); 514 - 515 - prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l); 516 - if (prop) { 517 - initrd_end = (unsigned long) 518 - __va((u32)of_read_ulong(prop, 1/4)); 519 - initrd_below_start_ok = 1; 520 - } else { 521 - initrd_start = 0; 522 - } 523 - } 524 - 525 - pr_debug("initrd_start=0x%lx initrd_end=0x%lx\n", 526 - initrd_start, initrd_end); 527 - } 528 - #else 529 - static inline void early_init_dt_check_for_initrd(unsigned long node) 530 - { 531 - } 532 - #endif /* CONFIG_BLK_DEV_INITRD */ 533 - 534 - static int __init early_init_dt_scan_chosen(unsigned long node, 535 - const char *uname, int depth, void *data) 536 - { 537 - unsigned long l; 538 - char *p; 539 - 540 - pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname); 541 - 542 - if (depth != 1 || 543 - (strcmp(uname, "chosen") != 0 && 544 - strcmp(uname, "chosen@0") != 0)) 545 - return 0; 546 - 547 - #ifdef CONFIG_KEXEC 548 - lprop = (u64 *)of_get_flat_dt_prop(node, 549 - "linux,crashkernel-base", NULL); 550 - if (lprop) 551 - crashk_res.start = *lprop; 552 - 553 - lprop = (u64 *)of_get_flat_dt_prop(node, 554 - "linux,crashkernel-size", NULL); 555 - if (lprop) 556 - crashk_res.end = crashk_res.start + *lprop - 1; 557 - #endif 558 - 559 - early_init_dt_check_for_initrd(node); 560 - 561 - /* Retreive command line */ 562 - p = of_get_flat_dt_prop(node, "bootargs", &l); 563 - if (p != NULL && l > 0) 564 - strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); 565 - 566 - #ifdef CONFIG_CMDLINE 567 - #ifndef CONFIG_CMDLINE_FORCE 568 - if (p == NULL || l == 0 || (l == 1 && (*p) == 0)) 569 - #endif 570 - strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 571 - #endif /* CONFIG_CMDLINE */ 572 - 573 - pr_debug("Command line is: %s\n", cmd_line); 574 - 575 - /* break now */ 576 - return 1; 577 - } 578 - 579 - static int __init early_init_dt_scan_root(unsigned long node, 580 - const char *uname, int depth, void *data) 581 - { 582 - u32 *prop; 583 - 584 - if (depth != 0) 585 - return 0; 586 - 587 - prop = of_get_flat_dt_prop(node, "#size-cells", NULL); 588 - dt_root_size_cells = (prop == NULL) ? 1 : *prop; 589 - pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells); 590 - 591 - prop = of_get_flat_dt_prop(node, "#address-cells", NULL); 592 - dt_root_addr_cells = (prop == NULL) ? 2 : *prop; 593 - pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells); 594 - 595 - /* break now */ 596 - return 1; 597 - } 598 - 599 - static u64 __init dt_mem_next_cell(int s, cell_t **cellp) 600 - { 601 - cell_t *p = *cellp; 602 - 603 - *cellp = p + s; 604 - return of_read_number(p, s); 605 - } 606 - 607 - static int __init early_init_dt_scan_memory(unsigned long node, 608 - const char *uname, int depth, void *data) 609 - { 610 - char *type = of_get_flat_dt_prop(node, "device_type", NULL); 611 - cell_t *reg, *endp; 612 - unsigned long l; 613 - 614 - /* Look for the ibm,dynamic-reconfiguration-memory node */ 615 - /* if (depth == 1 && 616 - strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0) 617 - return early_init_dt_scan_drconf_memory(node); 618 - */ 619 - /* We are scanning "memory" nodes only */ 620 - if (type == NULL) { 621 - /* 622 - * The longtrail doesn't have a device_type on the 623 - * /memory node, so look for the node called /memory@0. 624 - */ 625 - if (depth != 1 || strcmp(uname, "memory@0") != 0) 626 - return 0; 627 - } else if (strcmp(type, "memory") != 0) 628 - return 0; 629 - 630 - reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l); 631 - if (reg == NULL) 632 - reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l); 633 - if (reg == NULL) 634 - return 0; 635 - 636 - endp = reg + (l / sizeof(cell_t)); 637 - 638 - pr_debug("memory scan node %s, reg size %ld, data: %x %x %x %x,\n", 639 - uname, l, reg[0], reg[1], reg[2], reg[3]); 640 - 641 - while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { 642 - u64 base, size; 643 - 644 - base = dt_mem_next_cell(dt_root_addr_cells, &reg); 645 - size = dt_mem_next_cell(dt_root_size_cells, &reg); 646 - 647 - if (size == 0) 648 - continue; 649 - pr_debug(" - %llx , %llx\n", (unsigned long long)base, 650 - (unsigned long long)size); 651 - 652 - lmb_add(base, size); 653 - } 654 - return 0; 655 - } 656 - 657 - #ifdef CONFIG_PHYP_DUMP 658 - /** 659 - * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg 660 - * 661 - * Function to find the largest size we need to reserve 662 - * during early boot process. 663 - * 664 - * It either looks for boot param and returns that OR 665 - * returns larger of 256 or 5% rounded down to multiples of 256MB. 666 - * 667 - */ 668 - static inline unsigned long phyp_dump_calculate_reserve_size(void) 669 - { 670 - unsigned long tmp; 671 - 672 - if (phyp_dump_info->reserve_bootvar) 673 - return phyp_dump_info->reserve_bootvar; 674 - 675 - /* divide by 20 to get 5% of value */ 676 - tmp = lmb_end_of_DRAM(); 677 - do_div(tmp, 20); 678 - 679 - /* round it down in multiples of 256 */ 680 - tmp = tmp & ~0x0FFFFFFFUL; 681 - 682 - return (tmp > PHYP_DUMP_RMR_END ? tmp : PHYP_DUMP_RMR_END); 683 - } 684 - 685 - /** 686 - * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory 687 - * 688 - * This routine may reserve memory regions in the kernel only 689 - * if the system is supported and a dump was taken in last 690 - * boot instance or if the hardware is supported and the 691 - * scratch area needs to be setup. In other instances it returns 692 - * without reserving anything. The memory in case of dump being 693 - * active is freed when the dump is collected (by userland tools). 694 - */ 695 - static void __init phyp_dump_reserve_mem(void) 696 - { 697 - unsigned long base, size; 698 - unsigned long variable_reserve_size; 699 - 700 - if (!phyp_dump_info->phyp_dump_configured) { 701 - printk(KERN_ERR "Phyp-dump not supported on this hardware\n"); 702 - return; 703 - } 704 - 705 - if (!phyp_dump_info->phyp_dump_at_boot) { 706 - printk(KERN_INFO "Phyp-dump disabled at boot time\n"); 707 - return; 708 - } 709 - 710 - variable_reserve_size = phyp_dump_calculate_reserve_size(); 711 - 712 - if (phyp_dump_info->phyp_dump_is_active) { 713 - /* Reserve *everything* above RMR.Area freed by userland tools*/ 714 - base = variable_reserve_size; 715 - size = lmb_end_of_DRAM() - base; 716 - 717 - /* XXX crashed_ram_end is wrong, since it may be beyond 718 - * the memory_limit, it will need to be adjusted. */ 719 - lmb_reserve(base, size); 720 - 721 - phyp_dump_info->init_reserve_start = base; 722 - phyp_dump_info->init_reserve_size = size; 723 - } else { 724 - size = phyp_dump_info->cpu_state_size + 725 - phyp_dump_info->hpte_region_size + 726 - variable_reserve_size; 727 - base = lmb_end_of_DRAM() - size; 728 - lmb_reserve(base, size); 729 - phyp_dump_info->init_reserve_start = base; 730 - phyp_dump_info->init_reserve_size = size; 731 - } 732 - } 733 - #else 734 - static inline void __init phyp_dump_reserve_mem(void) {} 735 - #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */ 736 59 737 60 #ifdef CONFIG_EARLY_PRINTK 738 61 /* MS this is Microblaze specifig function */ ··· 98 775 /* Setup flat device-tree pointer */ 99 776 initial_boot_params = params; 100 777 101 - #ifdef CONFIG_PHYP_DUMP 102 - /* scan tree to see if dump occured during last boot */ 103 - of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL); 104 - #endif 105 - 106 778 /* Retrieve various informations from the /chosen node of the 107 779 * device-tree, including the platform type, initrd location and 108 780 * size, TCE reserve, and more ... ··· 117 799 118 800 pr_debug("Phys. mem: %lx\n", (unsigned long) lmb_phys_mem_size()); 119 801 120 - pr_debug("Scanning CPUs ...\n"); 121 - 122 - /* Retreive CPU related informations from the flat tree 123 - * (altivec support, boot CPU ID, ...) 124 - */ 125 - of_scan_flat_dt(early_init_dt_scan_cpus, NULL); 126 - 127 802 pr_debug(" <- early_init_devtree()\n"); 128 803 } 129 804 130 - /** 131 - * Indicates whether the root node has a given value in its 132 - * compatible property. 133 - */ 134 - int machine_is_compatible(const char *compat) 805 + #ifdef CONFIG_BLK_DEV_INITRD 806 + void __init early_init_dt_setup_initrd_arch(unsigned long start, 807 + unsigned long end) 135 808 { 136 - struct device_node *root; 137 - int rc = 0; 138 - 139 - root = of_find_node_by_path("/"); 140 - if (root) { 141 - rc = of_device_is_compatible(root, compat); 142 - of_node_put(root); 143 - } 144 - return rc; 809 + initrd_start = (unsigned long)__va(start); 810 + initrd_end = (unsigned long)__va(end); 811 + initrd_below_start_ok = 1; 145 812 } 146 - EXPORT_SYMBOL(machine_is_compatible); 813 + #endif 147 814 148 815 /******* 149 816 * ··· 140 837 * this isn't dealt with yet. 141 838 * 142 839 *******/ 143 - 144 - /** 145 - * of_find_node_by_phandle - Find a node given a phandle 146 - * @handle: phandle of the node to find 147 - * 148 - * Returns a node pointer with refcount incremented, use 149 - * of_node_put() on it when done. 150 - */ 151 - struct device_node *of_find_node_by_phandle(phandle handle) 152 - { 153 - struct device_node *np; 154 - 155 - read_lock(&devtree_lock); 156 - for (np = allnodes; np != NULL; np = np->allnext) 157 - if (np->linux_phandle == handle) 158 - break; 159 - of_node_get(np); 160 - read_unlock(&devtree_lock); 161 - return np; 162 - } 163 - EXPORT_SYMBOL(of_find_node_by_phandle); 164 - 165 - /** 166 - * of_node_get - Increment refcount of a node 167 - * @node: Node to inc refcount, NULL is supported to 168 - * simplify writing of callers 169 - * 170 - * Returns node. 171 - */ 172 - struct device_node *of_node_get(struct device_node *node) 173 - { 174 - if (node) 175 - kref_get(&node->kref); 176 - return node; 177 - } 178 - EXPORT_SYMBOL(of_node_get); 179 - 180 - static inline struct device_node *kref_to_device_node(struct kref *kref) 181 - { 182 - return container_of(kref, struct device_node, kref); 183 - } 184 - 185 - /** 186 - * of_node_release - release a dynamically allocated node 187 - * @kref: kref element of the node to be released 188 - * 189 - * In of_node_put() this function is passed to kref_put() 190 - * as the destructor. 191 - */ 192 - static void of_node_release(struct kref *kref) 193 - { 194 - struct device_node *node = kref_to_device_node(kref); 195 - struct property *prop = node->properties; 196 - 197 - /* We should never be releasing nodes that haven't been detached. */ 198 - if (!of_node_check_flag(node, OF_DETACHED)) { 199 - printk(KERN_INFO "WARNING: Bad of_node_put() on %s\n", 200 - node->full_name); 201 - dump_stack(); 202 - kref_init(&node->kref); 203 - return; 204 - } 205 - 206 - if (!of_node_check_flag(node, OF_DYNAMIC)) 207 - return; 208 - 209 - while (prop) { 210 - struct property *next = prop->next; 211 - kfree(prop->name); 212 - kfree(prop->value); 213 - kfree(prop); 214 - prop = next; 215 - 216 - if (!prop) { 217 - prop = node->deadprops; 218 - node->deadprops = NULL; 219 - } 220 - } 221 - kfree(node->full_name); 222 - kfree(node->data); 223 - kfree(node); 224 - } 225 - 226 - /** 227 - * of_node_put - Decrement refcount of a node 228 - * @node: Node to dec refcount, NULL is supported to 229 - * simplify writing of callers 230 - * 231 - */ 232 - void of_node_put(struct device_node *node) 233 - { 234 - if (node) 235 - kref_put(&node->kref, of_node_release); 236 - } 237 - EXPORT_SYMBOL(of_node_put); 238 - 239 - /* 240 - * Plug a device node into the tree and global list. 241 - */ 242 - void of_attach_node(struct device_node *np) 243 - { 244 - unsigned long flags; 245 - 246 - write_lock_irqsave(&devtree_lock, flags); 247 - np->sibling = np->parent->child; 248 - np->allnext = allnodes; 249 - np->parent->child = np; 250 - allnodes = np; 251 - write_unlock_irqrestore(&devtree_lock, flags); 252 - } 253 - 254 - /* 255 - * "Unplug" a node from the device tree. The caller must hold 256 - * a reference to the node. The memory associated with the node 257 - * is not freed until its refcount goes to zero. 258 - */ 259 - void of_detach_node(struct device_node *np) 260 - { 261 - struct device_node *parent; 262 - unsigned long flags; 263 - 264 - write_lock_irqsave(&devtree_lock, flags); 265 - 266 - parent = np->parent; 267 - if (!parent) 268 - goto out_unlock; 269 - 270 - if (allnodes == np) 271 - allnodes = np->allnext; 272 - else { 273 - struct device_node *prev; 274 - for (prev = allnodes; 275 - prev->allnext != np; 276 - prev = prev->allnext) 277 - ; 278 - prev->allnext = np->allnext; 279 - } 280 - 281 - if (parent->child == np) 282 - parent->child = np->sibling; 283 - else { 284 - struct device_node *prevsib; 285 - for (prevsib = np->parent->child; 286 - prevsib->sibling != np; 287 - prevsib = prevsib->sibling) 288 - ; 289 - prevsib->sibling = np->sibling; 290 - } 291 - 292 - of_node_set_flag(np, OF_DETACHED); 293 - 294 - out_unlock: 295 - write_unlock_irqrestore(&devtree_lock, flags); 296 - } 297 - 298 - /* 299 - * Add a property to a node 300 - */ 301 - int prom_add_property(struct device_node *np, struct property *prop) 302 - { 303 - struct property **next; 304 - unsigned long flags; 305 - 306 - prop->next = NULL; 307 - write_lock_irqsave(&devtree_lock, flags); 308 - next = &np->properties; 309 - while (*next) { 310 - if (strcmp(prop->name, (*next)->name) == 0) { 311 - /* duplicate ! don't insert it */ 312 - write_unlock_irqrestore(&devtree_lock, flags); 313 - return -1; 314 - } 315 - next = &(*next)->next; 316 - } 317 - *next = prop; 318 - write_unlock_irqrestore(&devtree_lock, flags); 319 - 320 - #ifdef CONFIG_PROC_DEVICETREE 321 - /* try to add to proc as well if it was initialized */ 322 - if (np->pde) 323 - proc_device_tree_add_prop(np->pde, prop); 324 - #endif /* CONFIG_PROC_DEVICETREE */ 325 - 326 - return 0; 327 - } 328 - 329 - /* 330 - * Remove a property from a node. Note that we don't actually 331 - * remove it, since we have given out who-knows-how-many pointers 332 - * to the data using get-property. Instead we just move the property 333 - * to the "dead properties" list, so it won't be found any more. 334 - */ 335 - int prom_remove_property(struct device_node *np, struct property *prop) 336 - { 337 - struct property **next; 338 - unsigned long flags; 339 - int found = 0; 340 - 341 - write_lock_irqsave(&devtree_lock, flags); 342 - next = &np->properties; 343 - while (*next) { 344 - if (*next == prop) { 345 - /* found the node */ 346 - *next = prop->next; 347 - prop->next = np->deadprops; 348 - np->deadprops = prop; 349 - found = 1; 350 - break; 351 - } 352 - next = &(*next)->next; 353 - } 354 - write_unlock_irqrestore(&devtree_lock, flags); 355 - 356 - if (!found) 357 - return -ENODEV; 358 - 359 - #ifdef CONFIG_PROC_DEVICETREE 360 - /* try to remove the proc node as well */ 361 - if (np->pde) 362 - proc_device_tree_remove_prop(np->pde, prop); 363 - #endif /* CONFIG_PROC_DEVICETREE */ 364 - 365 - return 0; 366 - } 367 - 368 - /* 369 - * Update a property in a node. Note that we don't actually 370 - * remove it, since we have given out who-knows-how-many pointers 371 - * to the data using get-property. Instead we just move the property 372 - * to the "dead properties" list, and add the new property to the 373 - * property list 374 - */ 375 - int prom_update_property(struct device_node *np, 376 - struct property *newprop, 377 - struct property *oldprop) 378 - { 379 - struct property **next; 380 - unsigned long flags; 381 - int found = 0; 382 - 383 - write_lock_irqsave(&devtree_lock, flags); 384 - next = &np->properties; 385 - while (*next) { 386 - if (*next == oldprop) { 387 - /* found the node */ 388 - newprop->next = oldprop->next; 389 - *next = newprop; 390 - oldprop->next = np->deadprops; 391 - np->deadprops = oldprop; 392 - found = 1; 393 - break; 394 - } 395 - next = &(*next)->next; 396 - } 397 - write_unlock_irqrestore(&devtree_lock, flags); 398 - 399 - if (!found) 400 - return -ENODEV; 401 - 402 - #ifdef CONFIG_PROC_DEVICETREE 403 - /* try to add to proc as well if it was initialized */ 404 - if (np->pde) 405 - proc_device_tree_update_prop(np->pde, newprop, oldprop); 406 - #endif /* CONFIG_PROC_DEVICETREE */ 407 - 408 - return 0; 409 - } 410 840 411 841 #if defined(CONFIG_DEBUG_FS) && defined(DEBUG) 412 842 static struct debugfs_blob_wrapper flat_dt_blob;
+8
arch/mips/bcm47xx/prom.c
··· 141 141 break; 142 142 } 143 143 144 + /* Ignoring the last page when ddr size is 128M. Cached 145 + * accesses to last page is causing the processor to prefetch 146 + * using address above 128M stepping out of the ddr address 147 + * space. 148 + */ 149 + if (mem == 0x8000000) 150 + mem -= 0x1000; 151 + 144 152 add_memory_region(0, mem, BOOT_MEM_RAM); 145 153 } 146 154
+1
arch/mips/mm/highmem.c
··· 1 1 #include <linux/module.h> 2 2 #include <linux/highmem.h> 3 + #include <linux/sched.h> 3 4 #include <linux/smp.h> 4 5 #include <asm/fixmap.h> 5 6 #include <asm/tlbflush.h>
-1
arch/parisc/Kconfig
··· 18 18 select BUG 19 19 select HAVE_PERF_EVENTS 20 20 select GENERIC_ATOMIC64 if !64BIT 21 - select HAVE_ARCH_TRACEHOOK 22 21 help 23 22 The PA-RISC microprocessor is designed by Hewlett-Packard and used 24 23 in many of their workstations & servers (HP9000 700 and 800 series,
+5 -2
arch/parisc/kernel/pci.c
··· 18 18 19 19 #include <asm/io.h> 20 20 #include <asm/system.h> 21 - #include <asm/cache.h> /* for L1_CACHE_BYTES */ 22 21 #include <asm/superio.h> 23 22 24 23 #define DEBUG_RESOURCES 0 ··· 122 123 } else { 123 124 printk(KERN_WARNING "pci_bios != NULL but init() is!\n"); 124 125 } 126 + 127 + /* Set the CLS for PCI as early as possible. */ 128 + pci_cache_line_size = pci_dfl_cache_line_size; 129 + 125 130 return 0; 126 131 } 127 132 ··· 174 171 ** upper byte is PCI_LATENCY_TIMER. 175 172 */ 176 173 pci_write_config_word(dev, PCI_CACHE_LINE_SIZE, 177 - (0x80 << 8) | (L1_CACHE_BYTES / sizeof(u32))); 174 + (0x80 << 8) | pci_cache_line_size); 178 175 } 179 176 180 177
+1
arch/powerpc/Kconfig
··· 173 173 174 174 config OF 175 175 def_bool y 176 + select OF_FLATTREE 176 177 177 178 config PPC_UDBG_16550 178 179 bool
-18
arch/powerpc/include/asm/prom.h
··· 23 23 #include <asm/irq.h> 24 24 #include <asm/atomic.h> 25 25 26 - #define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1 27 - #define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1 28 - 29 - #define of_compat_cmp(s1, s2, l) strcasecmp((s1), (s2)) 30 - #define of_prop_cmp(s1, s2) strcmp((s1), (s2)) 31 - #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) 32 - 33 - extern struct device_node *of_chosen; 34 - 35 26 #define HAVE_ARCH_DEVTREE_FIXUPS 36 - 37 - /* For updating the device tree at runtime */ 38 - extern void of_attach_node(struct device_node *); 39 - extern void of_detach_node(struct device_node *); 40 27 41 28 #ifdef CONFIG_PPC32 42 29 /* ··· 38 51 extern struct device_node* pci_device_to_OF_node(struct pci_dev *); 39 52 extern void pci_create_OF_bus_map(void); 40 53 #endif 41 - 42 - extern struct resource *request_OF_resource(struct device_node* node, 43 - int index, const char* name_postfix); 44 - extern int release_OF_resource(struct device_node* node, int index); 45 - 46 54 47 55 /* 48 56 * OF address retreival & translation
+1 -1
arch/powerpc/kernel/of_platform.c
··· 214 214 static int of_dev_phandle_match(struct device *dev, void *data) 215 215 { 216 216 phandle *ph = data; 217 - return to_of_device(dev)->node->linux_phandle == *ph; 217 + return to_of_device(dev)->node->phandle == *ph; 218 218 } 219 219 220 220 struct of_device *of_find_device_by_phandle(phandle ph)
+1 -1
arch/powerpc/kernel/pci_64.c
··· 224 224 * G5 machines... So when something asks for bus 0 io base 225 225 * (bus 0 is HT root), we return the AGP one instead. 226 226 */ 227 - if (in_bus == 0 && machine_is_compatible("MacRISC4")) { 227 + if (in_bus == 0 && of_machine_is_compatible("MacRISC4")) { 228 228 struct device_node *agp; 229 229 230 230 agp = of_find_compatible_node(NULL, NULL, "u3-agp");
+54 -843
arch/powerpc/kernel/prom.c
··· 61 61 #define DBG(fmt...) 62 62 #endif 63 63 64 - 65 - static int __initdata dt_root_addr_cells; 66 - static int __initdata dt_root_size_cells; 67 - 68 64 #ifdef CONFIG_PPC64 69 65 int __initdata iommu_is_off; 70 66 int __initdata iommu_force_on; 71 67 unsigned long tce_alloc_start, tce_alloc_end; 72 68 #endif 73 - 74 - typedef u32 cell_t; 75 - 76 - #if 0 77 - static struct boot_param_header *initial_boot_params __initdata; 78 - #else 79 - struct boot_param_header *initial_boot_params; 80 - #endif 81 - 82 - extern struct device_node *allnodes; /* temporary while merging */ 83 - 84 - extern rwlock_t devtree_lock; /* temporary while merging */ 85 - 86 - /* export that to outside world */ 87 - struct device_node *of_chosen; 88 - 89 - static inline char *find_flat_dt_string(u32 offset) 90 - { 91 - return ((char *)initial_boot_params) + 92 - initial_boot_params->off_dt_strings + offset; 93 - } 94 - 95 - /** 96 - * This function is used to scan the flattened device-tree, it is 97 - * used to extract the memory informations at boot before we can 98 - * unflatten the tree 99 - */ 100 - int __init of_scan_flat_dt(int (*it)(unsigned long node, 101 - const char *uname, int depth, 102 - void *data), 103 - void *data) 104 - { 105 - unsigned long p = ((unsigned long)initial_boot_params) + 106 - initial_boot_params->off_dt_struct; 107 - int rc = 0; 108 - int depth = -1; 109 - 110 - do { 111 - u32 tag = *((u32 *)p); 112 - char *pathp; 113 - 114 - p += 4; 115 - if (tag == OF_DT_END_NODE) { 116 - depth --; 117 - continue; 118 - } 119 - if (tag == OF_DT_NOP) 120 - continue; 121 - if (tag == OF_DT_END) 122 - break; 123 - if (tag == OF_DT_PROP) { 124 - u32 sz = *((u32 *)p); 125 - p += 8; 126 - if (initial_boot_params->version < 0x10) 127 - p = _ALIGN(p, sz >= 8 ? 8 : 4); 128 - p += sz; 129 - p = _ALIGN(p, 4); 130 - continue; 131 - } 132 - if (tag != OF_DT_BEGIN_NODE) { 133 - printk(KERN_WARNING "Invalid tag %x scanning flattened" 134 - " device tree !\n", tag); 135 - return -EINVAL; 136 - } 137 - depth++; 138 - pathp = (char *)p; 139 - p = _ALIGN(p + strlen(pathp) + 1, 4); 140 - if ((*pathp) == '/') { 141 - char *lp, *np; 142 - for (lp = NULL, np = pathp; *np; np++) 143 - if ((*np) == '/') 144 - lp = np+1; 145 - if (lp != NULL) 146 - pathp = lp; 147 - } 148 - rc = it(p, pathp, depth, data); 149 - if (rc != 0) 150 - break; 151 - } while(1); 152 - 153 - return rc; 154 - } 155 - 156 - unsigned long __init of_get_flat_dt_root(void) 157 - { 158 - unsigned long p = ((unsigned long)initial_boot_params) + 159 - initial_boot_params->off_dt_struct; 160 - 161 - while(*((u32 *)p) == OF_DT_NOP) 162 - p += 4; 163 - BUG_ON (*((u32 *)p) != OF_DT_BEGIN_NODE); 164 - p += 4; 165 - return _ALIGN(p + strlen((char *)p) + 1, 4); 166 - } 167 - 168 - /** 169 - * This function can be used within scan_flattened_dt callback to get 170 - * access to properties 171 - */ 172 - void* __init of_get_flat_dt_prop(unsigned long node, const char *name, 173 - unsigned long *size) 174 - { 175 - unsigned long p = node; 176 - 177 - do { 178 - u32 tag = *((u32 *)p); 179 - u32 sz, noff; 180 - const char *nstr; 181 - 182 - p += 4; 183 - if (tag == OF_DT_NOP) 184 - continue; 185 - if (tag != OF_DT_PROP) 186 - return NULL; 187 - 188 - sz = *((u32 *)p); 189 - noff = *((u32 *)(p + 4)); 190 - p += 8; 191 - if (initial_boot_params->version < 0x10) 192 - p = _ALIGN(p, sz >= 8 ? 8 : 4); 193 - 194 - nstr = find_flat_dt_string(noff); 195 - if (nstr == NULL) { 196 - printk(KERN_WARNING "Can't find property index" 197 - " name !\n"); 198 - return NULL; 199 - } 200 - if (strcmp(name, nstr) == 0) { 201 - if (size) 202 - *size = sz; 203 - return (void *)p; 204 - } 205 - p += sz; 206 - p = _ALIGN(p, 4); 207 - } while(1); 208 - } 209 - 210 - int __init of_flat_dt_is_compatible(unsigned long node, const char *compat) 211 - { 212 - const char* cp; 213 - unsigned long cplen, l; 214 - 215 - cp = of_get_flat_dt_prop(node, "compatible", &cplen); 216 - if (cp == NULL) 217 - return 0; 218 - while (cplen > 0) { 219 - if (strncasecmp(cp, compat, strlen(compat)) == 0) 220 - return 1; 221 - l = strlen(cp) + 1; 222 - cp += l; 223 - cplen -= l; 224 - } 225 - 226 - return 0; 227 - } 228 - 229 - static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size, 230 - unsigned long align) 231 - { 232 - void *res; 233 - 234 - *mem = _ALIGN(*mem, align); 235 - res = (void *)*mem; 236 - *mem += size; 237 - 238 - return res; 239 - } 240 - 241 - static unsigned long __init unflatten_dt_node(unsigned long mem, 242 - unsigned long *p, 243 - struct device_node *dad, 244 - struct device_node ***allnextpp, 245 - unsigned long fpsize) 246 - { 247 - struct device_node *np; 248 - struct property *pp, **prev_pp = NULL; 249 - char *pathp; 250 - u32 tag; 251 - unsigned int l, allocl; 252 - int has_name = 0; 253 - int new_format = 0; 254 - 255 - tag = *((u32 *)(*p)); 256 - if (tag != OF_DT_BEGIN_NODE) { 257 - printk("Weird tag at start of node: %x\n", tag); 258 - return mem; 259 - } 260 - *p += 4; 261 - pathp = (char *)*p; 262 - l = allocl = strlen(pathp) + 1; 263 - *p = _ALIGN(*p + l, 4); 264 - 265 - /* version 0x10 has a more compact unit name here instead of the full 266 - * path. we accumulate the full path size using "fpsize", we'll rebuild 267 - * it later. We detect this because the first character of the name is 268 - * not '/'. 269 - */ 270 - if ((*pathp) != '/') { 271 - new_format = 1; 272 - if (fpsize == 0) { 273 - /* root node: special case. fpsize accounts for path 274 - * plus terminating zero. root node only has '/', so 275 - * fpsize should be 2, but we want to avoid the first 276 - * level nodes to have two '/' so we use fpsize 1 here 277 - */ 278 - fpsize = 1; 279 - allocl = 2; 280 - } else { 281 - /* account for '/' and path size minus terminal 0 282 - * already in 'l' 283 - */ 284 - fpsize += l; 285 - allocl = fpsize; 286 - } 287 - } 288 - 289 - 290 - np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl, 291 - __alignof__(struct device_node)); 292 - if (allnextpp) { 293 - memset(np, 0, sizeof(*np)); 294 - np->full_name = ((char*)np) + sizeof(struct device_node); 295 - if (new_format) { 296 - char *p = np->full_name; 297 - /* rebuild full path for new format */ 298 - if (dad && dad->parent) { 299 - strcpy(p, dad->full_name); 300 - #ifdef DEBUG 301 - if ((strlen(p) + l + 1) != allocl) { 302 - DBG("%s: p: %d, l: %d, a: %d\n", 303 - pathp, (int)strlen(p), l, allocl); 304 - } 305 - #endif 306 - p += strlen(p); 307 - } 308 - *(p++) = '/'; 309 - memcpy(p, pathp, l); 310 - } else 311 - memcpy(np->full_name, pathp, l); 312 - prev_pp = &np->properties; 313 - **allnextpp = np; 314 - *allnextpp = &np->allnext; 315 - if (dad != NULL) { 316 - np->parent = dad; 317 - /* we temporarily use the next field as `last_child'*/ 318 - if (dad->next == 0) 319 - dad->child = np; 320 - else 321 - dad->next->sibling = np; 322 - dad->next = np; 323 - } 324 - kref_init(&np->kref); 325 - } 326 - while(1) { 327 - u32 sz, noff; 328 - char *pname; 329 - 330 - tag = *((u32 *)(*p)); 331 - if (tag == OF_DT_NOP) { 332 - *p += 4; 333 - continue; 334 - } 335 - if (tag != OF_DT_PROP) 336 - break; 337 - *p += 4; 338 - sz = *((u32 *)(*p)); 339 - noff = *((u32 *)((*p) + 4)); 340 - *p += 8; 341 - if (initial_boot_params->version < 0x10) 342 - *p = _ALIGN(*p, sz >= 8 ? 8 : 4); 343 - 344 - pname = find_flat_dt_string(noff); 345 - if (pname == NULL) { 346 - printk("Can't find property name in list !\n"); 347 - break; 348 - } 349 - if (strcmp(pname, "name") == 0) 350 - has_name = 1; 351 - l = strlen(pname) + 1; 352 - pp = unflatten_dt_alloc(&mem, sizeof(struct property), 353 - __alignof__(struct property)); 354 - if (allnextpp) { 355 - if (strcmp(pname, "linux,phandle") == 0) { 356 - np->node = *((u32 *)*p); 357 - if (np->linux_phandle == 0) 358 - np->linux_phandle = np->node; 359 - } 360 - if (strcmp(pname, "ibm,phandle") == 0) 361 - np->linux_phandle = *((u32 *)*p); 362 - pp->name = pname; 363 - pp->length = sz; 364 - pp->value = (void *)*p; 365 - *prev_pp = pp; 366 - prev_pp = &pp->next; 367 - } 368 - *p = _ALIGN((*p) + sz, 4); 369 - } 370 - /* with version 0x10 we may not have the name property, recreate 371 - * it here from the unit name if absent 372 - */ 373 - if (!has_name) { 374 - char *p = pathp, *ps = pathp, *pa = NULL; 375 - int sz; 376 - 377 - while (*p) { 378 - if ((*p) == '@') 379 - pa = p; 380 - if ((*p) == '/') 381 - ps = p + 1; 382 - p++; 383 - } 384 - if (pa < ps) 385 - pa = p; 386 - sz = (pa - ps) + 1; 387 - pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz, 388 - __alignof__(struct property)); 389 - if (allnextpp) { 390 - pp->name = "name"; 391 - pp->length = sz; 392 - pp->value = pp + 1; 393 - *prev_pp = pp; 394 - prev_pp = &pp->next; 395 - memcpy(pp->value, ps, sz - 1); 396 - ((char *)pp->value)[sz - 1] = 0; 397 - DBG("fixed up name for %s -> %s\n", pathp, 398 - (char *)pp->value); 399 - } 400 - } 401 - if (allnextpp) { 402 - *prev_pp = NULL; 403 - np->name = of_get_property(np, "name", NULL); 404 - np->type = of_get_property(np, "device_type", NULL); 405 - 406 - if (!np->name) 407 - np->name = "<NULL>"; 408 - if (!np->type) 409 - np->type = "<NULL>"; 410 - } 411 - while (tag == OF_DT_BEGIN_NODE) { 412 - mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize); 413 - tag = *((u32 *)(*p)); 414 - } 415 - if (tag != OF_DT_END_NODE) { 416 - printk("Weird tag at end of node: %x\n", tag); 417 - return mem; 418 - } 419 - *p += 4; 420 - return mem; 421 - } 422 69 423 70 static int __init early_parse_mem(char *p) 424 71 { ··· 93 446 DBG("-> move_device_tree\n"); 94 447 95 448 start = __pa(initial_boot_params); 96 - size = initial_boot_params->totalsize; 449 + size = be32_to_cpu(initial_boot_params->totalsize); 97 450 98 451 if ((memory_limit && (start + size) > memory_limit) || 99 452 overlaps_crashkernel(start, size)) { ··· 104 457 } 105 458 106 459 DBG("<- move_device_tree\n"); 107 - } 108 - 109 - /** 110 - * unflattens the device-tree passed by the firmware, creating the 111 - * tree of struct device_node. It also fills the "name" and "type" 112 - * pointers of the nodes so the normal device-tree walking functions 113 - * can be used (this used to be done by finish_device_tree) 114 - */ 115 - void __init unflatten_device_tree(void) 116 - { 117 - unsigned long start, mem, size; 118 - struct device_node **allnextp = &allnodes; 119 - 120 - DBG(" -> unflatten_device_tree()\n"); 121 - 122 - /* First pass, scan for size */ 123 - start = ((unsigned long)initial_boot_params) + 124 - initial_boot_params->off_dt_struct; 125 - size = unflatten_dt_node(0, &start, NULL, NULL, 0); 126 - size = (size | 3) + 1; 127 - 128 - DBG(" size is %lx, allocating...\n", size); 129 - 130 - /* Allocate memory for the expanded device tree */ 131 - mem = lmb_alloc(size + 4, __alignof__(struct device_node)); 132 - mem = (unsigned long) __va(mem); 133 - 134 - ((u32 *)mem)[size / 4] = 0xdeadbeef; 135 - 136 - DBG(" unflattening %lx...\n", mem); 137 - 138 - /* Second pass, do actual unflattening */ 139 - start = ((unsigned long)initial_boot_params) + 140 - initial_boot_params->off_dt_struct; 141 - unflatten_dt_node(mem, &start, NULL, &allnextp, 0); 142 - if (*((u32 *)start) != OF_DT_END) 143 - printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start)); 144 - if (((u32 *)mem)[size / 4] != 0xdeadbeef) 145 - printk(KERN_WARNING "End of tree marker overwritten: %08x\n", 146 - ((u32 *)mem)[size / 4] ); 147 - *allnextp = NULL; 148 - 149 - /* Get pointer to OF "/chosen" node for use everywhere */ 150 - of_chosen = of_find_node_by_path("/chosen"); 151 - if (of_chosen == NULL) 152 - of_chosen = of_find_node_by_path("/chosen@0"); 153 - 154 - DBG(" <- unflatten_device_tree()\n"); 155 460 } 156 461 157 462 /* ··· 362 763 return 0; 363 764 } 364 765 365 - #ifdef CONFIG_BLK_DEV_INITRD 366 - static void __init early_init_dt_check_for_initrd(unsigned long node) 367 - { 368 - unsigned long l; 369 - u32 *prop; 370 - 371 - DBG("Looking for initrd properties... "); 372 - 373 - prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l); 374 - if (prop) { 375 - initrd_start = (unsigned long)__va(of_read_ulong(prop, l/4)); 376 - 377 - prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l); 378 - if (prop) { 379 - initrd_end = (unsigned long) 380 - __va(of_read_ulong(prop, l/4)); 381 - initrd_below_start_ok = 1; 382 - } else { 383 - initrd_start = 0; 384 - } 385 - } 386 - 387 - DBG("initrd_start=0x%lx initrd_end=0x%lx\n", initrd_start, initrd_end); 388 - } 389 - #else 390 - static inline void early_init_dt_check_for_initrd(unsigned long node) 391 - { 392 - } 393 - #endif /* CONFIG_BLK_DEV_INITRD */ 394 - 395 - static int __init early_init_dt_scan_chosen(unsigned long node, 396 - const char *uname, int depth, void *data) 766 + void __init early_init_dt_scan_chosen_arch(unsigned long node) 397 767 { 398 768 unsigned long *lprop; 399 - unsigned long l; 400 - char *p; 401 - 402 - DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname); 403 - 404 - if (depth != 1 || 405 - (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) 406 - return 0; 407 769 408 770 #ifdef CONFIG_PPC64 409 771 /* check if iommu is forced on or off */ ··· 375 815 #endif 376 816 377 817 /* mem=x on the command line is the preferred mechanism */ 378 - lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL); 379 - if (lprop) 380 - memory_limit = *lprop; 818 + lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL); 819 + if (lprop) 820 + memory_limit = *lprop; 381 821 382 822 #ifdef CONFIG_PPC64 383 - lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL); 384 - if (lprop) 385 - tce_alloc_start = *lprop; 386 - lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL); 387 - if (lprop) 388 - tce_alloc_end = *lprop; 823 + lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL); 824 + if (lprop) 825 + tce_alloc_start = *lprop; 826 + lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL); 827 + if (lprop) 828 + tce_alloc_end = *lprop; 389 829 #endif 390 830 391 831 #ifdef CONFIG_KEXEC ··· 397 837 if (lprop) 398 838 crashk_res.end = crashk_res.start + *lprop - 1; 399 839 #endif 400 - 401 - early_init_dt_check_for_initrd(node); 402 - 403 - /* Retreive command line */ 404 - p = of_get_flat_dt_prop(node, "bootargs", &l); 405 - if (p != NULL && l > 0) 406 - strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); 407 - 408 - #ifdef CONFIG_CMDLINE 409 - if (p == NULL || l == 0 || (l == 1 && (*p) == 0)) 410 - strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 411 - #endif /* CONFIG_CMDLINE */ 412 - 413 - DBG("Command line is: %s\n", cmd_line); 414 - 415 - /* break now */ 416 - return 1; 417 - } 418 - 419 - static int __init early_init_dt_scan_root(unsigned long node, 420 - const char *uname, int depth, void *data) 421 - { 422 - u32 *prop; 423 - 424 - if (depth != 0) 425 - return 0; 426 - 427 - prop = of_get_flat_dt_prop(node, "#size-cells", NULL); 428 - dt_root_size_cells = (prop == NULL) ? 1 : *prop; 429 - DBG("dt_root_size_cells = %x\n", dt_root_size_cells); 430 - 431 - prop = of_get_flat_dt_prop(node, "#address-cells", NULL); 432 - dt_root_addr_cells = (prop == NULL) ? 2 : *prop; 433 - DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells); 434 - 435 - /* break now */ 436 - return 1; 437 - } 438 - 439 - static u64 __init dt_mem_next_cell(int s, cell_t **cellp) 440 - { 441 - cell_t *p = *cellp; 442 - 443 - *cellp = p + s; 444 - return of_read_number(p, s); 445 840 } 446 841 447 842 #ifdef CONFIG_PPC_PSERIES ··· 408 893 */ 409 894 static int __init early_init_dt_scan_drconf_memory(unsigned long node) 410 895 { 411 - cell_t *dm, *ls, *usm; 896 + __be32 *dm, *ls, *usm; 412 897 unsigned long l, n, flags; 413 898 u64 base, size, lmb_size; 414 899 unsigned int is_kexec_kdump = 0, rngs; 415 900 416 901 ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l); 417 - if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t)) 902 + if (ls == NULL || l < dt_root_size_cells * sizeof(__be32)) 418 903 return 0; 419 904 lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls); 420 905 421 906 dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l); 422 - if (dm == NULL || l < sizeof(cell_t)) 907 + if (dm == NULL || l < sizeof(__be32)) 423 908 return 0; 424 909 425 910 n = *dm++; /* number of entries */ 426 - if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(cell_t)) 911 + if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(__be32)) 427 912 return 0; 428 913 429 914 /* check if this is a kexec/kdump kernel. */ ··· 478 963 #define early_init_dt_scan_drconf_memory(node) 0 479 964 #endif /* CONFIG_PPC_PSERIES */ 480 965 481 - static int __init early_init_dt_scan_memory(unsigned long node, 482 - const char *uname, int depth, void *data) 966 + static int __init early_init_dt_scan_memory_ppc(unsigned long node, 967 + const char *uname, 968 + int depth, void *data) 483 969 { 484 - char *type = of_get_flat_dt_prop(node, "device_type", NULL); 485 - cell_t *reg, *endp; 486 - unsigned long l; 487 - 488 - /* Look for the ibm,dynamic-reconfiguration-memory node */ 489 970 if (depth == 1 && 490 971 strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0) 491 972 return early_init_dt_scan_drconf_memory(node); 492 - 493 - /* We are scanning "memory" nodes only */ 494 - if (type == NULL) { 495 - /* 496 - * The longtrail doesn't have a device_type on the 497 - * /memory node, so look for the node called /memory@0. 498 - */ 499 - if (depth != 1 || strcmp(uname, "memory@0") != 0) 500 - return 0; 501 - } else if (strcmp(type, "memory") != 0) 502 - return 0; 503 - 504 - reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l); 505 - if (reg == NULL) 506 - reg = of_get_flat_dt_prop(node, "reg", &l); 507 - if (reg == NULL) 508 - return 0; 509 - 510 - endp = reg + (l / sizeof(cell_t)); 511 - 512 - DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n", 513 - uname, l, reg[0], reg[1], reg[2], reg[3]); 514 - 515 - while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { 516 - u64 base, size; 517 - 518 - base = dt_mem_next_cell(dt_root_addr_cells, &reg); 519 - size = dt_mem_next_cell(dt_root_size_cells, &reg); 520 - 521 - if (size == 0) 522 - continue; 523 - DBG(" - %llx , %llx\n", (unsigned long long)base, 524 - (unsigned long long)size); 525 - #ifdef CONFIG_PPC64 526 - if (iommu_is_off) { 527 - if (base >= 0x80000000ul) 528 - continue; 529 - if ((base + size) > 0x80000000ul) 530 - size = 0x80000000ul - base; 531 - } 532 - #endif 533 - lmb_add(base, size); 534 - 535 - memstart_addr = min((u64)memstart_addr, base); 536 - } 537 - 538 - return 0; 973 + 974 + return early_init_dt_scan_memory(node, uname, depth, data); 539 975 } 976 + 977 + void __init early_init_dt_add_memory_arch(u64 base, u64 size) 978 + { 979 + #if defined(CONFIG_PPC64) 980 + if (iommu_is_off) { 981 + if (base >= 0x80000000ul) 982 + return; 983 + if ((base + size) > 0x80000000ul) 984 + size = 0x80000000ul - base; 985 + } 986 + #endif 987 + 988 + lmb_add(base, size); 989 + 990 + memstart_addr = min((u64)memstart_addr, base); 991 + } 992 + 993 + u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) 994 + { 995 + return lmb_alloc(size, align); 996 + } 997 + 998 + #ifdef CONFIG_BLK_DEV_INITRD 999 + void __init early_init_dt_setup_initrd_arch(unsigned long start, 1000 + unsigned long end) 1001 + { 1002 + initrd_start = (unsigned long)__va(start); 1003 + initrd_end = (unsigned long)__va(end); 1004 + initrd_below_start_ok = 1; 1005 + } 1006 + #endif 540 1007 541 1008 static void __init early_reserve_mem(void) 542 1009 { ··· 683 1186 /* Scan memory nodes and rebuild LMBs */ 684 1187 lmb_init(); 685 1188 of_scan_flat_dt(early_init_dt_scan_root, NULL); 686 - of_scan_flat_dt(early_init_dt_scan_memory, NULL); 1189 + of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); 687 1190 688 1191 /* Save command line for /proc/cmdline and then parse parameters */ 689 1192 strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); ··· 731 1234 DBG(" <- early_init_devtree()\n"); 732 1235 } 733 1236 734 - 735 - /** 736 - * Indicates whether the root node has a given value in its 737 - * compatible property. 738 - */ 739 - int machine_is_compatible(const char *compat) 740 - { 741 - struct device_node *root; 742 - int rc = 0; 743 - 744 - root = of_find_node_by_path("/"); 745 - if (root) { 746 - rc = of_device_is_compatible(root, compat); 747 - of_node_put(root); 748 - } 749 - return rc; 750 - } 751 - EXPORT_SYMBOL(machine_is_compatible); 752 - 753 1237 /******* 754 1238 * 755 1239 * New implementation of the OF "find" APIs, return a refcounted ··· 741 1263 * this isn't dealt with yet. 742 1264 * 743 1265 *******/ 744 - 745 - /** 746 - * of_find_node_by_phandle - Find a node given a phandle 747 - * @handle: phandle of the node to find 748 - * 749 - * Returns a node pointer with refcount incremented, use 750 - * of_node_put() on it when done. 751 - */ 752 - struct device_node *of_find_node_by_phandle(phandle handle) 753 - { 754 - struct device_node *np; 755 - 756 - read_lock(&devtree_lock); 757 - for (np = allnodes; np != 0; np = np->allnext) 758 - if (np->linux_phandle == handle) 759 - break; 760 - of_node_get(np); 761 - read_unlock(&devtree_lock); 762 - return np; 763 - } 764 - EXPORT_SYMBOL(of_find_node_by_phandle); 765 1266 766 1267 /** 767 1268 * of_find_next_cache_node - Find a node's subsidiary cache ··· 771 1314 return child; 772 1315 773 1316 return NULL; 774 - } 775 - 776 - /** 777 - * of_node_get - Increment refcount of a node 778 - * @node: Node to inc refcount, NULL is supported to 779 - * simplify writing of callers 780 - * 781 - * Returns node. 782 - */ 783 - struct device_node *of_node_get(struct device_node *node) 784 - { 785 - if (node) 786 - kref_get(&node->kref); 787 - return node; 788 - } 789 - EXPORT_SYMBOL(of_node_get); 790 - 791 - static inline struct device_node * kref_to_device_node(struct kref *kref) 792 - { 793 - return container_of(kref, struct device_node, kref); 794 - } 795 - 796 - /** 797 - * of_node_release - release a dynamically allocated node 798 - * @kref: kref element of the node to be released 799 - * 800 - * In of_node_put() this function is passed to kref_put() 801 - * as the destructor. 802 - */ 803 - static void of_node_release(struct kref *kref) 804 - { 805 - struct device_node *node = kref_to_device_node(kref); 806 - struct property *prop = node->properties; 807 - 808 - /* We should never be releasing nodes that haven't been detached. */ 809 - if (!of_node_check_flag(node, OF_DETACHED)) { 810 - printk("WARNING: Bad of_node_put() on %s\n", node->full_name); 811 - dump_stack(); 812 - kref_init(&node->kref); 813 - return; 814 - } 815 - 816 - if (!of_node_check_flag(node, OF_DYNAMIC)) 817 - return; 818 - 819 - while (prop) { 820 - struct property *next = prop->next; 821 - kfree(prop->name); 822 - kfree(prop->value); 823 - kfree(prop); 824 - prop = next; 825 - 826 - if (!prop) { 827 - prop = node->deadprops; 828 - node->deadprops = NULL; 829 - } 830 - } 831 - kfree(node->full_name); 832 - kfree(node->data); 833 - kfree(node); 834 - } 835 - 836 - /** 837 - * of_node_put - Decrement refcount of a node 838 - * @node: Node to dec refcount, NULL is supported to 839 - * simplify writing of callers 840 - * 841 - */ 842 - void of_node_put(struct device_node *node) 843 - { 844 - if (node) 845 - kref_put(&node->kref, of_node_release); 846 - } 847 - EXPORT_SYMBOL(of_node_put); 848 - 849 - /* 850 - * Plug a device node into the tree and global list. 851 - */ 852 - void of_attach_node(struct device_node *np) 853 - { 854 - unsigned long flags; 855 - 856 - write_lock_irqsave(&devtree_lock, flags); 857 - np->sibling = np->parent->child; 858 - np->allnext = allnodes; 859 - np->parent->child = np; 860 - allnodes = np; 861 - write_unlock_irqrestore(&devtree_lock, flags); 862 - } 863 - 864 - /* 865 - * "Unplug" a node from the device tree. The caller must hold 866 - * a reference to the node. The memory associated with the node 867 - * is not freed until its refcount goes to zero. 868 - */ 869 - void of_detach_node(struct device_node *np) 870 - { 871 - struct device_node *parent; 872 - unsigned long flags; 873 - 874 - write_lock_irqsave(&devtree_lock, flags); 875 - 876 - parent = np->parent; 877 - if (!parent) 878 - goto out_unlock; 879 - 880 - if (allnodes == np) 881 - allnodes = np->allnext; 882 - else { 883 - struct device_node *prev; 884 - for (prev = allnodes; 885 - prev->allnext != np; 886 - prev = prev->allnext) 887 - ; 888 - prev->allnext = np->allnext; 889 - } 890 - 891 - if (parent->child == np) 892 - parent->child = np->sibling; 893 - else { 894 - struct device_node *prevsib; 895 - for (prevsib = np->parent->child; 896 - prevsib->sibling != np; 897 - prevsib = prevsib->sibling) 898 - ; 899 - prevsib->sibling = np->sibling; 900 - } 901 - 902 - of_node_set_flag(np, OF_DETACHED); 903 - 904 - out_unlock: 905 - write_unlock_irqrestore(&devtree_lock, flags); 906 1317 } 907 1318 908 1319 #ifdef CONFIG_PPC_PSERIES ··· 804 1479 if (machine_is(powermac)) 805 1480 return -ENODEV; 806 1481 807 - /* fix up new node's linux_phandle field */ 1482 + /* fix up new node's phandle field */ 808 1483 if ((ibm_phandle = of_get_property(node, "ibm,phandle", NULL))) 809 - node->linux_phandle = *ibm_phandle; 1484 + node->phandle = *ibm_phandle; 810 1485 811 1486 out: 812 1487 of_node_put(parent); ··· 844 1519 } 845 1520 __initcall(prom_reconfig_setup); 846 1521 #endif 847 - 848 - /* 849 - * Add a property to a node 850 - */ 851 - int prom_add_property(struct device_node* np, struct property* prop) 852 - { 853 - struct property **next; 854 - unsigned long flags; 855 - 856 - prop->next = NULL; 857 - write_lock_irqsave(&devtree_lock, flags); 858 - next = &np->properties; 859 - while (*next) { 860 - if (strcmp(prop->name, (*next)->name) == 0) { 861 - /* duplicate ! don't insert it */ 862 - write_unlock_irqrestore(&devtree_lock, flags); 863 - return -1; 864 - } 865 - next = &(*next)->next; 866 - } 867 - *next = prop; 868 - write_unlock_irqrestore(&devtree_lock, flags); 869 - 870 - #ifdef CONFIG_PROC_DEVICETREE 871 - /* try to add to proc as well if it was initialized */ 872 - if (np->pde) 873 - proc_device_tree_add_prop(np->pde, prop); 874 - #endif /* CONFIG_PROC_DEVICETREE */ 875 - 876 - return 0; 877 - } 878 - 879 - /* 880 - * Remove a property from a node. Note that we don't actually 881 - * remove it, since we have given out who-knows-how-many pointers 882 - * to the data using get-property. Instead we just move the property 883 - * to the "dead properties" list, so it won't be found any more. 884 - */ 885 - int prom_remove_property(struct device_node *np, struct property *prop) 886 - { 887 - struct property **next; 888 - unsigned long flags; 889 - int found = 0; 890 - 891 - write_lock_irqsave(&devtree_lock, flags); 892 - next = &np->properties; 893 - while (*next) { 894 - if (*next == prop) { 895 - /* found the node */ 896 - *next = prop->next; 897 - prop->next = np->deadprops; 898 - np->deadprops = prop; 899 - found = 1; 900 - break; 901 - } 902 - next = &(*next)->next; 903 - } 904 - write_unlock_irqrestore(&devtree_lock, flags); 905 - 906 - if (!found) 907 - return -ENODEV; 908 - 909 - #ifdef CONFIG_PROC_DEVICETREE 910 - /* try to remove the proc node as well */ 911 - if (np->pde) 912 - proc_device_tree_remove_prop(np->pde, prop); 913 - #endif /* CONFIG_PROC_DEVICETREE */ 914 - 915 - return 0; 916 - } 917 - 918 - /* 919 - * Update a property in a node. Note that we don't actually 920 - * remove it, since we have given out who-knows-how-many pointers 921 - * to the data using get-property. Instead we just move the property 922 - * to the "dead properties" list, and add the new property to the 923 - * property list 924 - */ 925 - int prom_update_property(struct device_node *np, 926 - struct property *newprop, 927 - struct property *oldprop) 928 - { 929 - struct property **next; 930 - unsigned long flags; 931 - int found = 0; 932 - 933 - write_lock_irqsave(&devtree_lock, flags); 934 - next = &np->properties; 935 - while (*next) { 936 - if (*next == oldprop) { 937 - /* found the node */ 938 - newprop->next = oldprop->next; 939 - *next = newprop; 940 - oldprop->next = np->deadprops; 941 - np->deadprops = oldprop; 942 - found = 1; 943 - break; 944 - } 945 - next = &(*next)->next; 946 - } 947 - write_unlock_irqrestore(&devtree_lock, flags); 948 - 949 - if (!found) 950 - return -ENODEV; 951 - 952 - #ifdef CONFIG_PROC_DEVICETREE 953 - /* try to add to proc as well if it was initialized */ 954 - if (np->pde) 955 - proc_device_tree_update_prop(np->pde, newprop, oldprop); 956 - #endif /* CONFIG_PROC_DEVICETREE */ 957 - 958 - return 0; 959 - } 960 - 961 1522 962 1523 /* Find the device node for a given logical cpu number, also returns the cpu 963 1524 * local thread number (index in ibm,interrupt-server#s) if relevant and
+2 -1
arch/powerpc/platforms/85xx/mpc85xx_mds.c
··· 341 341 } 342 342 343 343 mpic = mpic_alloc(np, r.start, 344 - MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, 344 + MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | 345 + MPIC_BROKEN_FRR_NIRQS, 345 346 0, 256, " OpenPIC "); 346 347 BUG_ON(mpic == NULL); 347 348 of_node_put(np);
+2 -2
arch/powerpc/platforms/85xx/xes_mpc85xx.c
··· 80 80 printk(KERN_INFO "xes_mpc85xx: Enabling L2 as cache\n"); 81 81 82 82 ctl = MPC85xx_L2CTL_L2E | MPC85xx_L2CTL_L2I; 83 - if (machine_is_compatible("MPC8540") || 84 - machine_is_compatible("MPC8560")) 83 + if (of_machine_is_compatible("MPC8540") || 84 + of_machine_is_compatible("MPC8560")) 85 85 /* 86 86 * Assume L2 SRAM is used fully for cache, so set 87 87 * L2BLKSZ (bits 4:5) to match L2SIZ (bits 2:3).
+1 -1
arch/powerpc/platforms/cell/cbe_powerbutton.c
··· 48 48 int ret = 0; 49 49 struct input_dev *dev; 50 50 51 - if (!machine_is_compatible("IBM,CBPLUS-1.0")) { 51 + if (!of_machine_is_compatible("IBM,CBPLUS-1.0")) { 52 52 printk(KERN_ERR "%s: Not a cell blade.\n", __func__); 53 53 ret = -ENODEV; 54 54 goto out;
+1 -1
arch/powerpc/platforms/cell/ras.c
··· 255 255 { 256 256 struct cbe_pmd_regs __iomem *regs; 257 257 258 - sysreset_hack = machine_is_compatible("IBM,CBPLUS-1.0"); 258 + sysreset_hack = of_machine_is_compatible("IBM,CBPLUS-1.0"); 259 259 if (!sysreset_hack) 260 260 return 0; 261 261
+3 -3
arch/powerpc/platforms/cell/spu_manage.c
··· 457 457 continue; 458 458 vic_handles = of_get_property(spu_dn, "vicinity", &lenp); 459 459 for (i=0; i < (lenp / sizeof(phandle)); i++) { 460 - if (vic_handles[i] == target->linux_phandle) 460 + if (vic_handles[i] == target->phandle) 461 461 return spu; 462 462 } 463 463 } ··· 499 499 500 500 if (strcmp(name, "spe") == 0) { 501 501 spu = devnode_spu(cbe, vic_dn); 502 - avoid_ph = last_spu_dn->linux_phandle; 502 + avoid_ph = last_spu_dn->phandle; 503 503 } else { 504 504 /* 505 505 * "mic-tm" and "bif0" nodes do not have ··· 514 514 last_spu->has_mem_affinity = 1; 515 515 spu->has_mem_affinity = 1; 516 516 } 517 - avoid_ph = vic_dn->linux_phandle; 517 + avoid_ph = vic_dn->phandle; 518 518 } 519 519 520 520 list_add_tail(&spu->aff_list, &last_spu->aff_list);
+2 -2
arch/powerpc/platforms/pasemi/cpufreq.c
··· 304 304 305 305 static int __init pas_cpufreq_init(void) 306 306 { 307 - if (!machine_is_compatible("PA6T-1682M") && 308 - !machine_is_compatible("pasemi,pwrficient")) 307 + if (!of_machine_is_compatible("PA6T-1682M") && 308 + !of_machine_is_compatible("pasemi,pwrficient")) 309 309 return -ENODEV; 310 310 311 311 return cpufreq_register_driver(&pas_cpufreq_driver);
+7 -7
arch/powerpc/platforms/powermac/cpufreq_32.c
··· 657 657 cur_freq = (*value) / 1000; 658 658 659 659 /* Check for 7447A based MacRISC3 */ 660 - if (machine_is_compatible("MacRISC3") && 660 + if (of_machine_is_compatible("MacRISC3") && 661 661 of_get_property(cpunode, "dynamic-power-step", NULL) && 662 662 PVR_VER(mfspr(SPRN_PVR)) == 0x8003) { 663 663 pmac_cpufreq_init_7447A(cpunode); 664 664 /* Check for other MacRISC3 machines */ 665 - } else if (machine_is_compatible("PowerBook3,4") || 666 - machine_is_compatible("PowerBook3,5") || 667 - machine_is_compatible("MacRISC3")) { 665 + } else if (of_machine_is_compatible("PowerBook3,4") || 666 + of_machine_is_compatible("PowerBook3,5") || 667 + of_machine_is_compatible("MacRISC3")) { 668 668 pmac_cpufreq_init_MacRISC3(cpunode); 669 669 /* Else check for iBook2 500/600 */ 670 - } else if (machine_is_compatible("PowerBook4,1")) { 670 + } else if (of_machine_is_compatible("PowerBook4,1")) { 671 671 hi_freq = cur_freq; 672 672 low_freq = 400000; 673 673 set_speed_proc = pmu_set_cpu_speed; 674 674 is_pmu_based = 1; 675 675 } 676 676 /* Else check for TiPb 550 */ 677 - else if (machine_is_compatible("PowerBook3,3") && cur_freq == 550000) { 677 + else if (of_machine_is_compatible("PowerBook3,3") && cur_freq == 550000) { 678 678 hi_freq = cur_freq; 679 679 low_freq = 500000; 680 680 set_speed_proc = pmu_set_cpu_speed; 681 681 is_pmu_based = 1; 682 682 } 683 683 /* Else check for TiPb 400 & 500 */ 684 - else if (machine_is_compatible("PowerBook3,2")) { 684 + else if (of_machine_is_compatible("PowerBook3,2")) { 685 685 /* We only know about the 400 MHz and the 500Mhz model 686 686 * they both have 300 MHz as low frequency 687 687 */
+7 -7
arch/powerpc/platforms/powermac/cpufreq_64.c
··· 398 398 int rc = -ENODEV; 399 399 400 400 /* Check supported platforms */ 401 - if (machine_is_compatible("PowerMac8,1") || 402 - machine_is_compatible("PowerMac8,2") || 403 - machine_is_compatible("PowerMac9,1")) 401 + if (of_machine_is_compatible("PowerMac8,1") || 402 + of_machine_is_compatible("PowerMac8,2") || 403 + of_machine_is_compatible("PowerMac9,1")) 404 404 use_volts_smu = 1; 405 - else if (machine_is_compatible("PowerMac11,2")) 405 + else if (of_machine_is_compatible("PowerMac11,2")) 406 406 use_volts_vdnap = 1; 407 407 else 408 408 return -ENODEV; ··· 729 729 return -ENODEV; 730 730 } 731 731 732 - if (machine_is_compatible("PowerMac7,2") || 733 - machine_is_compatible("PowerMac7,3") || 734 - machine_is_compatible("RackMac3,1")) 732 + if (of_machine_is_compatible("PowerMac7,2") || 733 + of_machine_is_compatible("PowerMac7,3") || 734 + of_machine_is_compatible("RackMac3,1")) 735 735 rc = g5_pm72_cpufreq_init(cpus); 736 736 #ifdef CONFIG_PMAC_SMU 737 737 else
+1 -1
arch/powerpc/platforms/powermac/feature.c
··· 2426 2426 } 2427 2427 } 2428 2428 for(i=0; i<ARRAY_SIZE(pmac_mb_defs); i++) { 2429 - if (machine_is_compatible(pmac_mb_defs[i].model_string)) { 2429 + if (of_machine_is_compatible(pmac_mb_defs[i].model_string)) { 2430 2430 pmac_mb = pmac_mb_defs[i]; 2431 2431 goto found; 2432 2432 }
+1 -1
arch/powerpc/platforms/powermac/pfunc_core.c
··· 842 842 list_for_each_entry(func, &dev->functions, link) { 843 843 if (name && strcmp(name, func->name)) 844 844 continue; 845 - if (func->phandle && target->node != func->phandle) 845 + if (func->phandle && target->phandle != func->phandle) 846 846 continue; 847 847 if ((func->flags & flags) == 0) 848 848 continue;
+6 -6
arch/powerpc/platforms/powermac/smp.c
··· 693 693 #ifdef CONFIG_PPC64 694 694 695 695 /* i2c based HW sync on some G5s */ 696 - if (machine_is_compatible("PowerMac7,2") || 697 - machine_is_compatible("PowerMac7,3") || 698 - machine_is_compatible("RackMac3,1")) 696 + if (of_machine_is_compatible("PowerMac7,2") || 697 + of_machine_is_compatible("PowerMac7,3") || 698 + of_machine_is_compatible("RackMac3,1")) 699 699 smp_core99_setup_i2c_hwsync(ncpus); 700 700 701 701 /* pfunc based HW sync on recent G5s */ ··· 713 713 #else /* CONFIG_PPC64 */ 714 714 715 715 /* GPIO based HW sync on ppc32 Core99 */ 716 - if (pmac_tb_freeze == NULL && !machine_is_compatible("MacRISC4")) { 716 + if (pmac_tb_freeze == NULL && !of_machine_is_compatible("MacRISC4")) { 717 717 struct device_node *cpu; 718 718 const u32 *tbprop = NULL; 719 719 ··· 750 750 #endif 751 751 752 752 /* 32 bits SMP can't NAP */ 753 - if (!machine_is_compatible("MacRISC4")) 753 + if (!of_machine_is_compatible("MacRISC4")) 754 754 powersave_nap = 0; 755 755 } 756 756 ··· 852 852 /* If we didn't start the second CPU, we must take 853 853 * it off the bus 854 854 */ 855 - if (machine_is_compatible("MacRISC4") && 855 + if (of_machine_is_compatible("MacRISC4") && 856 856 num_online_cpus() < 2) 857 857 g5_phy_disable_cpu1(); 858 858 #endif /* CONFIG_PPC64 */
+4 -4
arch/powerpc/platforms/powermac/time.c
··· 317 317 * calibration. That's better since the VIA itself seems 318 318 * to be slightly off. --BenH 319 319 */ 320 - if (!machine_is_compatible("MacRISC2") && 321 - !machine_is_compatible("MacRISC3") && 322 - !machine_is_compatible("MacRISC4")) 320 + if (!of_machine_is_compatible("MacRISC2") && 321 + !of_machine_is_compatible("MacRISC3") && 322 + !of_machine_is_compatible("MacRISC4")) 323 323 if (via_calibrate_decr()) 324 324 return; 325 325 ··· 328 328 * probably implement calibration based on the KL timer on these 329 329 * machines anyway... -BenH 330 330 */ 331 - if (machine_is_compatible("PowerMac3,5")) 331 + if (of_machine_is_compatible("PowerMac3,5")) 332 332 if (via_calibrate_decr()) 333 333 return; 334 334 #endif
+3 -3
arch/powerpc/platforms/powermac/udbg_scc.c
··· 132 132 scc_inittab[1] = in_8(sccc); 133 133 out_8(sccc, 12); 134 134 scc_inittab[3] = in_8(sccc); 135 - } else if (machine_is_compatible("RackMac1,1") 136 - || machine_is_compatible("RackMac1,2") 137 - || machine_is_compatible("MacRISC4")) { 135 + } else if (of_machine_is_compatible("RackMac1,1") 136 + || of_machine_is_compatible("RackMac1,2") 137 + || of_machine_is_compatible("MacRISC4")) { 138 138 /* Xserves and G5s default to 57600 */ 139 139 scc_inittab[1] = 0; 140 140 scc_inittab[3] = 0;
+2 -2
arch/powerpc/sysdev/grackle.c
··· 56 56 void __init setup_grackle(struct pci_controller *hose) 57 57 { 58 58 setup_indirect_pci(hose, 0xfec00000, 0xfee00000, 0); 59 - if (machine_is_compatible("PowerMac1,1")) 59 + if (of_machine_is_compatible("PowerMac1,1")) 60 60 ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS); 61 - if (machine_is_compatible("AAPL,PowerBook1998")) 61 + if (of_machine_is_compatible("AAPL,PowerBook1998")) 62 62 grackle_set_loop_snoop(hose, 1); 63 63 #if 0 /* Disabled for now, HW problems ??? */ 64 64 grackle_set_stg(hose, 1);
+2 -2
arch/sparc/include/asm/stat.h
··· 53 53 ino_t st_ino; 54 54 mode_t st_mode; 55 55 short st_nlink; 56 - uid_t st_uid; 57 - gid_t st_gid; 56 + uid16_t st_uid; 57 + gid16_t st_gid; 58 58 unsigned short st_rdev; 59 59 off_t st_size; 60 60 time_t st_atime;
+1 -1
arch/sparc/kernel/devices.c
··· 59 59 60 60 cur_inst = 0; 61 61 for_each_node_by_type(dp, "cpu") { 62 - int err = check_cpu_node(dp->node, &cur_inst, 62 + int err = check_cpu_node(dp->phandle, &cur_inst, 63 63 compare, compare_arg, 64 64 prom_node, mid); 65 65 if (!err) {
+4
arch/sparc/kernel/kstack.h
··· 11 11 { 12 12 unsigned long base = (unsigned long) tp; 13 13 14 + /* Stack pointer must be 16-byte aligned. */ 15 + if (sp & (16UL - 1)) 16 + return false; 17 + 14 18 if (sp >= (base + sizeof(struct thread_info)) && 15 19 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf))) 16 20 return true;
+2 -2
arch/sparc/kernel/of_device_32.c
··· 105 105 106 106 static int of_bus_ambapp_match(struct device_node *np) 107 107 { 108 - return !strcmp(np->name, "ambapp"); 108 + return !strcmp(np->type, "ambapp"); 109 109 } 110 110 111 111 static void of_bus_ambapp_count_cells(struct device_node *child, ··· 433 433 if (!parent) 434 434 dev_set_name(&op->dev, "root"); 435 435 else 436 - dev_set_name(&op->dev, "%08x", dp->node); 436 + dev_set_name(&op->dev, "%08x", dp->phandle); 437 437 438 438 if (of_device_register(op)) { 439 439 printk("%s: Could not register of device.\n",
+1 -1
arch/sparc/kernel/of_device_64.c
··· 676 676 if (!parent) 677 677 dev_set_name(&op->dev, "root"); 678 678 else 679 - dev_set_name(&op->dev, "%08x", dp->node); 679 + dev_set_name(&op->dev, "%08x", dp->phandle); 680 680 681 681 if (of_device_register(op)) { 682 682 printk("%s: Could not register of device.\n",
+7
arch/sparc/kernel/pci.c
··· 247 247 struct pci_bus *bus, int devfn) 248 248 { 249 249 struct dev_archdata *sd; 250 + struct pci_slot *slot; 250 251 struct of_device *op; 251 252 struct pci_dev *dev; 252 253 const char *type; ··· 287 286 dev->dev.bus = &pci_bus_type; 288 287 dev->devfn = devfn; 289 288 dev->multifunction = 0; /* maybe a lie? */ 289 + set_pcie_port_type(dev); 290 + 291 + list_for_each_entry(slot, &dev->bus->slots, list) 292 + if (PCI_SLOT(dev->devfn) == slot->number) 293 + dev->slot = slot; 290 294 291 295 dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff); 292 296 dev->device = of_getintprop_default(node, "device-id", 0xffff); ··· 328 322 329 323 dev->current_state = 4; /* unknown power state */ 330 324 dev->error_state = pci_channel_io_normal; 325 + dev->dma_mask = 0xffffffff; 331 326 332 327 if (!strcmp(node->name, "pci")) { 333 328 /* a PCI-PCI bridge */
-3
arch/sparc/kernel/prom.h
··· 4 4 #include <linux/spinlock.h> 5 5 #include <asm/prom.h> 6 6 7 - extern struct device_node *allnodes; /* temporary while merging */ 8 - extern rwlock_t devtree_lock; /* temporary while merging */ 9 - 10 7 extern void * prom_early_alloc(unsigned long size); 11 8 extern void irq_trans_init(struct device_node *dp); 12 9
+3 -15
arch/sparc/kernel/prom_common.c
··· 37 37 char *of_console_options; 38 38 EXPORT_SYMBOL(of_console_options); 39 39 40 - struct device_node *of_find_node_by_phandle(phandle handle) 41 - { 42 - struct device_node *np; 43 - 44 - for (np = allnodes; np; np = np->allnext) 45 - if (np->node == handle) 46 - break; 47 - 48 - return np; 49 - } 50 - EXPORT_SYMBOL(of_find_node_by_phandle); 51 - 52 40 int of_getintprop_default(struct device_node *np, const char *name, int def) 53 41 { 54 42 struct property *prop; ··· 77 89 void *old_val = prop->value; 78 90 int ret; 79 91 80 - ret = prom_setprop(dp->node, name, val, len); 92 + ret = prom_setprop(dp->phandle, name, val, len); 81 93 82 94 err = -EINVAL; 83 95 if (ret >= 0) { ··· 224 236 225 237 dp->name = get_one_property(node, "name"); 226 238 dp->type = get_one_property(node, "device_type"); 227 - dp->node = node; 239 + dp->phandle = node; 228 240 229 241 dp->properties = build_prop_list(node); 230 242 ··· 301 313 302 314 nextp = &allnodes->allnext; 303 315 allnodes->child = prom_build_tree(allnodes, 304 - prom_getchild(allnodes->node), 316 + prom_getchild(allnodes->phandle), 305 317 &nextp); 306 318 of_console_init(); 307 319
+1 -1
arch/sparc/kernel/smp_64.c
··· 370 370 } else { 371 371 struct device_node *dp = of_find_node_by_cpuid(cpu); 372 372 373 - prom_startcpu(dp->node, entry, cookie); 373 + prom_startcpu(dp->phandle, entry, cookie); 374 374 } 375 375 376 376 for (timeout = 0; timeout < 50000; timeout++) {
+4 -2
arch/sparc/kernel/tsb.S
··· 191 191 192 192 tsb_itlb_load: 193 193 /* Executable bit must be set. */ 194 - 661: andcc %g5, _PAGE_EXEC_4U, %g0 195 - .section .sun4v_1insn_patch, "ax" 194 + 661: sethi %hi(_PAGE_EXEC_4U), %g4 195 + andcc %g5, %g4, %g0 196 + .section .sun4v_2insn_patch, "ax" 196 197 .word 661b 197 198 andcc %g5, _PAGE_EXEC_4V, %g0 199 + nop 198 200 .previous 199 201 200 202 be,pn %xcc, tsb_do_fault
+2
arch/x86/include/asm/processor.h
··· 450 450 struct perf_event *ptrace_bps[HBP_NUM]; 451 451 /* Debug status used for traps, single steps, etc... */ 452 452 unsigned long debugreg6; 453 + /* Keep track of the exact dr7 value set by the user */ 454 + unsigned long ptrace_dr7; 453 455 /* Fault info: */ 454 456 unsigned long cr2; 455 457 unsigned long trap_no;
-8
arch/x86/kernel/acpi/boot.c
··· 1344 1344 }, 1345 1345 { 1346 1346 .callback = force_acpi_ht, 1347 - .ident = "ASUS P2B-DS", 1348 - .matches = { 1349 - DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), 1350 - DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"), 1351 - }, 1352 - }, 1353 - { 1354 - .callback = force_acpi_ht, 1355 1347 .ident = "ASUS CUR-DLS", 1356 1348 .matches = { 1357 1349 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+7 -23
arch/x86/kernel/hw_breakpoint.c
··· 212 212 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); 213 213 } 214 214 215 - /* 216 - * Store a breakpoint's encoded address, length, and type. 217 - */ 218 - static int arch_store_info(struct perf_event *bp) 219 - { 220 - struct arch_hw_breakpoint *info = counter_arch_bp(bp); 221 - /* 222 - * For kernel-addresses, either the address or symbol name can be 223 - * specified. 224 - */ 225 - if (info->name) 226 - info->address = (unsigned long) 227 - kallsyms_lookup_name(info->name); 228 - if (info->address) 229 - return 0; 230 - 231 - return -EINVAL; 232 - } 233 - 234 215 int arch_bp_generic_fields(int x86_len, int x86_type, 235 216 int *gen_len, int *gen_type) 236 217 { ··· 343 362 return ret; 344 363 } 345 364 346 - ret = arch_store_info(bp); 347 - 348 - if (ret < 0) 349 - return ret; 365 + /* 366 + * For kernel-addresses, either the address or symbol name can be 367 + * specified. 368 + */ 369 + if (info->name) 370 + info->address = (unsigned long) 371 + kallsyms_lookup_name(info->name); 350 372 /* 351 373 * Check that the low-order bits of the address are appropriate 352 374 * for the alignment implied by len.
+5 -2
arch/x86/kernel/ptrace.c
··· 702 702 } else if (n == 6) { 703 703 val = thread->debugreg6; 704 704 } else if (n == 7) { 705 - val = ptrace_get_dr7(thread->ptrace_bps); 705 + val = thread->ptrace_dr7; 706 706 } 707 707 return val; 708 708 } ··· 778 778 return rc; 779 779 } 780 780 /* All that's left is DR7 */ 781 - if (n == 7) 781 + if (n == 7) { 782 782 rc = ptrace_write_dr7(tsk, val); 783 + if (!rc) 784 + thread->ptrace_dr7 = val; 785 + } 783 786 784 787 ret_path: 785 788 return rc;
+2 -9
block/blk-core.c
··· 1147 1147 */ 1148 1148 static inline bool queue_should_plug(struct request_queue *q) 1149 1149 { 1150 - return !(blk_queue_nonrot(q) && blk_queue_queuing(q)); 1150 + return !(blk_queue_nonrot(q) && blk_queue_tagged(q)); 1151 1151 } 1152 1152 1153 1153 static int __make_request(struct request_queue *q, struct bio *bio) ··· 1859 1859 * and to it is freed is accounted as io that is in progress at 1860 1860 * the driver side. 1861 1861 */ 1862 - if (blk_account_rq(rq)) { 1862 + if (blk_account_rq(rq)) 1863 1863 q->in_flight[rq_is_sync(rq)]++; 1864 - /* 1865 - * Mark this device as supporting hardware queuing, if 1866 - * we have more IOs in flight than 4. 1867 - */ 1868 - if (!blk_queue_queuing(q) && queue_in_flight(q) > 4) 1869 - set_bit(QUEUE_FLAG_CQ, &q->queue_flags); 1870 - } 1871 1864 } 1872 1865 1873 1866 /**
+1
drivers/acpi/dock.c
··· 935 935 struct platform_device *dd; 936 936 937 937 id = dock_station_count; 938 + memset(&ds, 0, sizeof(ds)); 938 939 dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds)); 939 940 if (IS_ERR(dd)) 940 941 return PTR_ERR(dd);
+24 -12
drivers/acpi/processor_idle.c
··· 110 110 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 111 111 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, 112 112 (void *)2}, 113 + { set_max_cstate, "Pavilion zv5000", { 114 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 115 + DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")}, 116 + (void *)1}, 117 + { set_max_cstate, "Asus L8400B", { 118 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 119 + DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, 120 + (void *)1}, 113 121 {}, 114 122 }; 115 123 ··· 880 872 return(acpi_idle_enter_c1(dev, state)); 881 873 882 874 local_irq_disable(); 883 - current_thread_info()->status &= ~TS_POLLING; 884 - /* 885 - * TS_POLLING-cleared state must be visible before we test 886 - * NEED_RESCHED: 887 - */ 888 - smp_mb(); 875 + if (cx->entry_method != ACPI_CSTATE_FFH) { 876 + current_thread_info()->status &= ~TS_POLLING; 877 + /* 878 + * TS_POLLING-cleared state must be visible before we test 879 + * NEED_RESCHED: 880 + */ 881 + smp_mb(); 882 + } 889 883 890 884 if (unlikely(need_resched())) { 891 885 current_thread_info()->status |= TS_POLLING; ··· 967 957 } 968 958 969 959 local_irq_disable(); 970 - current_thread_info()->status &= ~TS_POLLING; 971 - /* 972 - * TS_POLLING-cleared state must be visible before we test 973 - * NEED_RESCHED: 974 - */ 975 - smp_mb(); 960 + if (cx->entry_method != ACPI_CSTATE_FFH) { 961 + current_thread_info()->status &= ~TS_POLLING; 962 + /* 963 + * TS_POLLING-cleared state must be visible before we test 964 + * NEED_RESCHED: 965 + */ 966 + smp_mb(); 967 + } 976 968 977 969 if (unlikely(need_resched())) { 978 970 current_thread_info()->status |= TS_POLLING;
+14
drivers/acpi/processor_pdc.c
··· 125 125 return status; 126 126 } 127 127 128 + static int early_pdc_done; 129 + 128 130 void acpi_processor_set_pdc(acpi_handle handle) 129 131 { 130 132 struct acpi_object_list *obj_list; 131 133 132 134 if (arch_has_acpi_pdc() == false) 135 + return; 136 + 137 + if (early_pdc_done) 133 138 return; 134 139 135 140 obj_list = acpi_processor_alloc_pdc(); ··· 155 150 early_pdc_optin = 1; 156 151 return 0; 157 152 } 153 + 154 + static int param_early_pdc_optin(char *s) 155 + { 156 + early_pdc_optin = 1; 157 + return 1; 158 + } 159 + __setup("acpi_early_pdc_eval", param_early_pdc_optin); 158 160 159 161 static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = { 160 162 { ··· 204 192 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 205 193 ACPI_UINT32_MAX, 206 194 early_init_pdc, NULL, NULL, NULL); 195 + 196 + early_pdc_done = 1; 207 197 }
+5 -1
drivers/acpi/processor_perflib.c
··· 413 413 if (result) 414 414 goto update_bios; 415 415 416 - return 0; 416 + /* We need to call _PPC once when cpufreq starts */ 417 + if (ignore_ppc != 1) 418 + result = acpi_processor_get_platform_limit(pr); 419 + 420 + return result; 417 421 418 422 /* 419 423 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
+22 -5
drivers/acpi/scan.c
··· 1336 1336 1337 1337 if (child) 1338 1338 *child = device; 1339 - return 0; 1339 + 1340 + if (device) 1341 + return 0; 1342 + else 1343 + return -ENODEV; 1340 1344 } 1345 + 1346 + /* 1347 + * acpi_bus_add and acpi_bus_start 1348 + * 1349 + * scan a given ACPI tree and (probably recently hot-plugged) 1350 + * create and add or starts found devices. 1351 + * 1352 + * If no devices were found -ENODEV is returned which does not 1353 + * mean that this is a real error, there just have been no suitable 1354 + * ACPI objects in the table trunk from which the kernel could create 1355 + * a device and add/start an appropriate driver. 1356 + */ 1341 1357 1342 1358 int 1343 1359 acpi_bus_add(struct acpi_device **child, ··· 1364 1348 memset(&ops, 0, sizeof(ops)); 1365 1349 ops.acpi_op_add = 1; 1366 1350 1367 - acpi_bus_scan(handle, &ops, child); 1368 - return 0; 1351 + return acpi_bus_scan(handle, &ops, child); 1369 1352 } 1370 1353 EXPORT_SYMBOL(acpi_bus_add); 1371 1354 ··· 1372 1357 { 1373 1358 struct acpi_bus_ops ops; 1374 1359 1360 + if (!device) 1361 + return -EINVAL; 1362 + 1375 1363 memset(&ops, 0, sizeof(ops)); 1376 1364 ops.acpi_op_start = 1; 1377 1365 1378 - acpi_bus_scan(device->handle, &ops, NULL); 1379 - return 0; 1366 + return acpi_bus_scan(device->handle, &ops, NULL); 1380 1367 } 1381 1368 EXPORT_SYMBOL(acpi_bus_start); 1382 1369
+2 -2
drivers/acpi/tables.c
··· 213 213 unsigned long table_end; 214 214 acpi_size tbl_size; 215 215 216 - if (acpi_disabled) 216 + if (acpi_disabled && !acpi_ht) 217 217 return -ENODEV; 218 218 219 219 if (!handler) ··· 280 280 struct acpi_table_header *table = NULL; 281 281 acpi_size tbl_size; 282 282 283 - if (acpi_disabled) 283 + if (acpi_disabled && !acpi_ht) 284 284 return -ENODEV; 285 285 286 286 if (!handler)
+10 -2
drivers/ata/ahci.c
··· 3082 3082 ahci_save_initial_config(pdev, hpriv); 3083 3083 3084 3084 /* prepare host */ 3085 - if (hpriv->cap & HOST_CAP_NCQ) 3086 - pi.flags |= ATA_FLAG_NCQ | ATA_FLAG_FPDMA_AA; 3085 + if (hpriv->cap & HOST_CAP_NCQ) { 3086 + pi.flags |= ATA_FLAG_NCQ; 3087 + /* Auto-activate optimization is supposed to be supported on 3088 + all AHCI controllers indicating NCQ support, but it seems 3089 + to be broken at least on some NVIDIA MCP79 chipsets. 3090 + Until we get info on which NVIDIA chipsets don't have this 3091 + issue, if any, disable AA on all NVIDIA AHCIs. */ 3092 + if (pdev->vendor != PCI_VENDOR_ID_NVIDIA) 3093 + pi.flags |= ATA_FLAG_FPDMA_AA; 3094 + } 3087 3095 3088 3096 if (hpriv->cap & HOST_CAP_PMP) 3089 3097 pi.flags |= ATA_FLAG_PMP;
+2
drivers/base/class.c
··· 59 59 else 60 60 pr_debug("class '%s' does not have a release() function, " 61 61 "be careful\n", class->name); 62 + 63 + kfree(cp); 62 64 } 63 65 64 66 static struct sysfs_ops class_sysfs_ops = {
+46 -15
drivers/block/virtio_blk.c
··· 243 243 static int __devinit virtblk_probe(struct virtio_device *vdev) 244 244 { 245 245 struct virtio_blk *vblk; 246 + struct request_queue *q; 246 247 int err; 247 248 u64 cap; 248 - u32 v; 249 - u32 blk_size, sg_elems; 249 + u32 v, blk_size, sg_elems, opt_io_size; 250 + u16 min_io_size; 251 + u8 physical_block_exp, alignment_offset; 250 252 251 253 if (index_to_minor(index) >= 1 << MINORBITS) 252 254 return -ENOSPC; ··· 295 293 goto out_mempool; 296 294 } 297 295 298 - vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock); 299 - if (!vblk->disk->queue) { 296 + q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock); 297 + if (!q) { 300 298 err = -ENOMEM; 301 299 goto out_put_disk; 302 300 } 303 301 304 - vblk->disk->queue->queuedata = vblk; 302 + q->queuedata = vblk; 305 303 306 304 if (index < 26) { 307 305 sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26); ··· 325 323 326 324 /* If barriers are supported, tell block layer that queue is ordered */ 327 325 if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) 328 - blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_DRAIN_FLUSH, 326 + blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH, 329 327 virtblk_prepare_flush); 330 328 else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) 331 - blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL); 329 + blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL); 332 330 333 331 /* If disk is read-only in the host, the guest should obey */ 334 332 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) ··· 347 345 set_capacity(vblk->disk, cap); 348 346 349 347 /* We can handle whatever the host told us to handle. */ 350 - blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2); 351 - blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2); 348 + blk_queue_max_phys_segments(q, vblk->sg_elems-2); 349 + blk_queue_max_hw_segments(q, vblk->sg_elems-2); 352 350 353 351 /* No need to bounce any requests */ 354 - blk_queue_bounce_limit(vblk->disk->queue, BLK_BOUNCE_ANY); 352 + blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); 355 353 356 354 /* No real sector limit. */ 357 - blk_queue_max_sectors(vblk->disk->queue, -1U); 355 + blk_queue_max_sectors(q, -1U); 358 356 359 357 /* Host can optionally specify maximum segment size and number of 360 358 * segments. */ ··· 362 360 offsetof(struct virtio_blk_config, size_max), 363 361 &v); 364 362 if (!err) 365 - blk_queue_max_segment_size(vblk->disk->queue, v); 363 + blk_queue_max_segment_size(q, v); 366 364 else 367 - blk_queue_max_segment_size(vblk->disk->queue, -1U); 365 + blk_queue_max_segment_size(q, -1U); 368 366 369 367 /* Host can optionally specify the block size of the device */ 370 368 err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE, 371 369 offsetof(struct virtio_blk_config, blk_size), 372 370 &blk_size); 373 371 if (!err) 374 - blk_queue_logical_block_size(vblk->disk->queue, blk_size); 372 + blk_queue_logical_block_size(q, blk_size); 373 + else 374 + blk_size = queue_logical_block_size(q); 375 + 376 + /* Use topology information if available */ 377 + err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, 378 + offsetof(struct virtio_blk_config, physical_block_exp), 379 + &physical_block_exp); 380 + if (!err && physical_block_exp) 381 + blk_queue_physical_block_size(q, 382 + blk_size * (1 << physical_block_exp)); 383 + 384 + err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, 385 + offsetof(struct virtio_blk_config, alignment_offset), 386 + &alignment_offset); 387 + if (!err && alignment_offset) 388 + blk_queue_alignment_offset(q, blk_size * alignment_offset); 389 + 390 + err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, 391 + offsetof(struct virtio_blk_config, min_io_size), 392 + &min_io_size); 393 + if (!err && min_io_size) 394 + blk_queue_io_min(q, blk_size * min_io_size); 395 + 396 + err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, 397 + offsetof(struct virtio_blk_config, opt_io_size), 398 + &opt_io_size); 399 + if (!err && opt_io_size) 400 + blk_queue_io_opt(q, blk_size * opt_io_size); 401 + 375 402 376 403 add_disk(vblk->disk); 377 404 return 0; ··· 443 412 static unsigned int features[] = { 444 413 VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, 445 414 VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, 446 - VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH 415 + VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY 447 416 }; 448 417 449 418 /*
+8
drivers/char/Kconfig
··· 666 666 help 667 667 Virtio console for use with lguest and other hypervisors. 668 668 669 + Also serves as a general-purpose serial device for data 670 + transfer between the guest and host. Character devices at 671 + /dev/vportNpn will be created when corresponding ports are 672 + found, where N is the device number and n is the port number 673 + within that device. If specified by the host, a sysfs 674 + attribute called 'name' will be populated with a name for 675 + the port which can be used by udev scripts to create a 676 + symlink to the device. 669 677 670 678 config HVCS 671 679 tristate "IBM Hypervisor Virtual Console Server support"
+1 -1
drivers/char/hvc_beat.c
··· 99 99 100 100 static int __init hvc_beat_console_init(void) 101 101 { 102 - if (hvc_beat_useit && machine_is_compatible("Beat")) { 102 + if (hvc_beat_useit && of_machine_is_compatible("Beat")) { 103 103 hvc_instantiate(0, 0, &hvc_beat_get_put_ops); 104 104 } 105 105 return 0;
+1451 -165
drivers/char/virtio_console.c
··· 1 - /*D:300 2 - * The Guest console driver 3 - * 4 - * Writing console drivers is one of the few remaining Dark Arts in Linux. 5 - * Fortunately for us, the path of virtual consoles has been well-trodden by 6 - * the PowerPC folks, who wrote "hvc_console.c" to generically support any 7 - * virtual console. We use that infrastructure which only requires us to write 8 - * the basic put_chars and get_chars functions and call the right register 9 - * functions. 10 - :*/ 11 - 12 - /*M:002 The console can be flooded: while the Guest is processing input the 13 - * Host can send more. Buffering in the Host could alleviate this, but it is a 14 - * difficult problem in general. :*/ 15 - /* Copyright (C) 2006, 2007 Rusty Russell, IBM Corporation 1 + /* 2 + * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation 3 + * Copyright (C) 2009, 2010 Red Hat, Inc. 16 4 * 17 5 * This program is free software; you can redistribute it and/or modify 18 6 * it under the terms of the GNU General Public License as published by ··· 16 28 * along with this program; if not, write to the Free Software 17 29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 30 */ 31 + #include <linux/cdev.h> 32 + #include <linux/debugfs.h> 33 + #include <linux/device.h> 19 34 #include <linux/err.h> 35 + #include <linux/fs.h> 20 36 #include <linux/init.h> 37 + #include <linux/list.h> 38 + #include <linux/poll.h> 39 + #include <linux/sched.h> 40 + #include <linux/spinlock.h> 21 41 #include <linux/virtio.h> 22 42 #include <linux/virtio_console.h> 43 + #include <linux/wait.h> 44 + #include <linux/workqueue.h> 23 45 #include "hvc_console.h" 24 46 25 - /*D:340 These represent our input and output console queues, and the virtio 26 - * operations for them. */ 27 - static struct virtqueue *in_vq, *out_vq; 28 - static struct virtio_device *vdev; 29 - 30 - /* This is our input buffer, and how much data is left in it. */ 31 - static unsigned int in_len; 32 - static char *in, *inbuf; 33 - 34 - /* The operations for our console. */ 35 - static struct hv_ops virtio_cons; 36 - 37 - /* The hvc device */ 38 - static struct hvc_struct *hvc; 39 - 40 - /*D:310 The put_chars() callback is pretty straightforward. 47 + /* 48 + * This is a global struct for storing common data for all the devices 49 + * this driver handles. 41 50 * 42 - * We turn the characters into a scatter-gather list, add it to the output 43 - * queue and then kick the Host. Then we sit here waiting for it to finish: 44 - * inefficient in theory, but in practice implementations will do it 45 - * immediately (lguest's Launcher does). */ 46 - static int put_chars(u32 vtermno, const char *buf, int count) 51 + * Mainly, it has a linked list for all the consoles in one place so 52 + * that callbacks from hvc for get_chars(), put_chars() work properly 53 + * across multiple devices and multiple ports per device. 54 + */ 55 + struct ports_driver_data { 56 + /* Used for registering chardevs */ 57 + struct class *class; 58 + 59 + /* Used for exporting per-port information to debugfs */ 60 + struct dentry *debugfs_dir; 61 + 62 + /* Number of devices this driver is handling */ 63 + unsigned int index; 64 + 65 + /* 66 + * This is used to keep track of the number of hvc consoles 67 + * spawned by this driver. This number is given as the first 68 + * argument to hvc_alloc(). To correctly map an initial 69 + * console spawned via hvc_instantiate to the console being 70 + * hooked up via hvc_alloc, we need to pass the same vtermno. 71 + * 72 + * We also just assume the first console being initialised was 73 + * the first one that got used as the initial console. 74 + */ 75 + unsigned int next_vtermno; 76 + 77 + /* All the console devices handled by this driver */ 78 + struct list_head consoles; 79 + }; 80 + static struct ports_driver_data pdrvdata; 81 + 82 + DEFINE_SPINLOCK(pdrvdata_lock); 83 + 84 + /* This struct holds information that's relevant only for console ports */ 85 + struct console { 86 + /* We'll place all consoles in a list in the pdrvdata struct */ 87 + struct list_head list; 88 + 89 + /* The hvc device associated with this console port */ 90 + struct hvc_struct *hvc; 91 + 92 + /* 93 + * This number identifies the number that we used to register 94 + * with hvc in hvc_instantiate() and hvc_alloc(); this is the 95 + * number passed on by the hvc callbacks to us to 96 + * differentiate between the other console ports handled by 97 + * this driver 98 + */ 99 + u32 vtermno; 100 + }; 101 + 102 + struct port_buffer { 103 + char *buf; 104 + 105 + /* size of the buffer in *buf above */ 106 + size_t size; 107 + 108 + /* used length of the buffer */ 109 + size_t len; 110 + /* offset in the buf from which to consume data */ 111 + size_t offset; 112 + }; 113 + 114 + /* 115 + * This is a per-device struct that stores data common to all the 116 + * ports for that device (vdev->priv). 117 + */ 118 + struct ports_device { 119 + /* 120 + * Workqueue handlers where we process deferred work after 121 + * notification 122 + */ 123 + struct work_struct control_work; 124 + struct work_struct config_work; 125 + 126 + struct list_head ports; 127 + 128 + /* To protect the list of ports */ 129 + spinlock_t ports_lock; 130 + 131 + /* To protect the vq operations for the control channel */ 132 + spinlock_t cvq_lock; 133 + 134 + /* The current config space is stored here */ 135 + struct virtio_console_config config; 136 + 137 + /* The virtio device we're associated with */ 138 + struct virtio_device *vdev; 139 + 140 + /* 141 + * A couple of virtqueues for the control channel: one for 142 + * guest->host transfers, one for host->guest transfers 143 + */ 144 + struct virtqueue *c_ivq, *c_ovq; 145 + 146 + /* Array of per-port IO virtqueues */ 147 + struct virtqueue **in_vqs, **out_vqs; 148 + 149 + /* Used for numbering devices for sysfs and debugfs */ 150 + unsigned int drv_index; 151 + 152 + /* Major number for this device. Ports will be created as minors. */ 153 + int chr_major; 154 + }; 155 + 156 + /* This struct holds the per-port data */ 157 + struct port { 158 + /* Next port in the list, head is in the ports_device */ 159 + struct list_head list; 160 + 161 + /* Pointer to the parent virtio_console device */ 162 + struct ports_device *portdev; 163 + 164 + /* The current buffer from which data has to be fed to readers */ 165 + struct port_buffer *inbuf; 166 + 167 + /* 168 + * To protect the operations on the in_vq associated with this 169 + * port. Has to be a spinlock because it can be called from 170 + * interrupt context (get_char()). 171 + */ 172 + spinlock_t inbuf_lock; 173 + 174 + /* The IO vqs for this port */ 175 + struct virtqueue *in_vq, *out_vq; 176 + 177 + /* File in the debugfs directory that exposes this port's information */ 178 + struct dentry *debugfs_file; 179 + 180 + /* 181 + * The entries in this struct will be valid if this port is 182 + * hooked up to an hvc console 183 + */ 184 + struct console cons; 185 + 186 + /* Each port associates with a separate char device */ 187 + struct cdev cdev; 188 + struct device *dev; 189 + 190 + /* A waitqueue for poll() or blocking read operations */ 191 + wait_queue_head_t waitqueue; 192 + 193 + /* The 'name' of the port that we expose via sysfs properties */ 194 + char *name; 195 + 196 + /* The 'id' to identify the port with the Host */ 197 + u32 id; 198 + 199 + /* Is the host device open */ 200 + bool host_connected; 201 + 202 + /* We should allow only one process to open a port */ 203 + bool guest_connected; 204 + }; 205 + 206 + /* This is the very early arch-specified put chars function. */ 207 + static int (*early_put_chars)(u32, const char *, int); 208 + 209 + static struct port *find_port_by_vtermno(u32 vtermno) 47 210 { 48 - struct scatterlist sg[1]; 211 + struct port *port; 212 + struct console *cons; 213 + unsigned long flags; 214 + 215 + spin_lock_irqsave(&pdrvdata_lock, flags); 216 + list_for_each_entry(cons, &pdrvdata.consoles, list) { 217 + if (cons->vtermno == vtermno) { 218 + port = container_of(cons, struct port, cons); 219 + goto out; 220 + } 221 + } 222 + port = NULL; 223 + out: 224 + spin_unlock_irqrestore(&pdrvdata_lock, flags); 225 + return port; 226 + } 227 + 228 + static struct port *find_port_by_id(struct ports_device *portdev, u32 id) 229 + { 230 + struct port *port; 231 + unsigned long flags; 232 + 233 + spin_lock_irqsave(&portdev->ports_lock, flags); 234 + list_for_each_entry(port, &portdev->ports, list) 235 + if (port->id == id) 236 + goto out; 237 + port = NULL; 238 + out: 239 + spin_unlock_irqrestore(&portdev->ports_lock, flags); 240 + 241 + return port; 242 + } 243 + 244 + static struct port *find_port_by_vq(struct ports_device *portdev, 245 + struct virtqueue *vq) 246 + { 247 + struct port *port; 248 + unsigned long flags; 249 + 250 + spin_lock_irqsave(&portdev->ports_lock, flags); 251 + list_for_each_entry(port, &portdev->ports, list) 252 + if (port->in_vq == vq || port->out_vq == vq) 253 + goto out; 254 + port = NULL; 255 + out: 256 + spin_unlock_irqrestore(&portdev->ports_lock, flags); 257 + return port; 258 + } 259 + 260 + static bool is_console_port(struct port *port) 261 + { 262 + if (port->cons.hvc) 263 + return true; 264 + return false; 265 + } 266 + 267 + static inline bool use_multiport(struct ports_device *portdev) 268 + { 269 + /* 270 + * This condition can be true when put_chars is called from 271 + * early_init 272 + */ 273 + if (!portdev->vdev) 274 + return 0; 275 + return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT); 276 + } 277 + 278 + static void free_buf(struct port_buffer *buf) 279 + { 280 + kfree(buf->buf); 281 + kfree(buf); 282 + } 283 + 284 + static struct port_buffer *alloc_buf(size_t buf_size) 285 + { 286 + struct port_buffer *buf; 287 + 288 + buf = kmalloc(sizeof(*buf), GFP_KERNEL); 289 + if (!buf) 290 + goto fail; 291 + buf->buf = kzalloc(buf_size, GFP_KERNEL); 292 + if (!buf->buf) 293 + goto free_buf; 294 + buf->len = 0; 295 + buf->offset = 0; 296 + buf->size = buf_size; 297 + return buf; 298 + 299 + free_buf: 300 + kfree(buf); 301 + fail: 302 + return NULL; 303 + } 304 + 305 + /* Callers should take appropriate locks */ 306 + static void *get_inbuf(struct port *port) 307 + { 308 + struct port_buffer *buf; 309 + struct virtqueue *vq; 49 310 unsigned int len; 50 311 51 - /* This is a convenient routine to initialize a single-elem sg list */ 52 - sg_init_one(sg, buf, count); 53 - 54 - /* add_buf wants a token to identify this buffer: we hand it any 55 - * non-NULL pointer, since there's only ever one buffer. */ 56 - if (out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, (void *)1) >= 0) { 57 - /* Tell Host to go! */ 58 - out_vq->vq_ops->kick(out_vq); 59 - /* Chill out until it's done with the buffer. */ 60 - while (!out_vq->vq_ops->get_buf(out_vq, &len)) 61 - cpu_relax(); 312 + vq = port->in_vq; 313 + buf = vq->vq_ops->get_buf(vq, &len); 314 + if (buf) { 315 + buf->len = len; 316 + buf->offset = 0; 62 317 } 63 - 64 - /* We're expected to return the amount of data we wrote: all of it. */ 65 - return count; 318 + return buf; 66 319 } 67 320 68 - /* Create a scatter-gather list representing our input buffer and put it in the 69 - * queue. */ 70 - static void add_inbuf(void) 321 + /* 322 + * Create a scatter-gather list representing our input buffer and put 323 + * it in the queue. 324 + * 325 + * Callers should take appropriate locks. 326 + */ 327 + static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf) 71 328 { 72 329 struct scatterlist sg[1]; 73 - sg_init_one(sg, inbuf, PAGE_SIZE); 330 + int ret; 74 331 75 - /* We should always be able to add one buffer to an empty queue. */ 76 - if (in_vq->vq_ops->add_buf(in_vq, sg, 0, 1, inbuf) < 0) 77 - BUG(); 78 - in_vq->vq_ops->kick(in_vq); 332 + sg_init_one(sg, buf->buf, buf->size); 333 + 334 + ret = vq->vq_ops->add_buf(vq, sg, 0, 1, buf); 335 + vq->vq_ops->kick(vq); 336 + return ret; 79 337 } 80 338 81 - /*D:350 get_chars() is the callback from the hvc_console infrastructure when 82 - * an interrupt is received. 339 + /* Discard any unread data this port has. Callers lockers. */ 340 + static void discard_port_data(struct port *port) 341 + { 342 + struct port_buffer *buf; 343 + struct virtqueue *vq; 344 + unsigned int len; 345 + int ret; 346 + 347 + vq = port->in_vq; 348 + if (port->inbuf) 349 + buf = port->inbuf; 350 + else 351 + buf = vq->vq_ops->get_buf(vq, &len); 352 + 353 + ret = 0; 354 + while (buf) { 355 + if (add_inbuf(vq, buf) < 0) { 356 + ret++; 357 + free_buf(buf); 358 + } 359 + buf = vq->vq_ops->get_buf(vq, &len); 360 + } 361 + port->inbuf = NULL; 362 + if (ret) 363 + dev_warn(port->dev, "Errors adding %d buffers back to vq\n", 364 + ret); 365 + } 366 + 367 + static bool port_has_data(struct port *port) 368 + { 369 + unsigned long flags; 370 + bool ret; 371 + 372 + spin_lock_irqsave(&port->inbuf_lock, flags); 373 + if (port->inbuf) { 374 + ret = true; 375 + goto out; 376 + } 377 + port->inbuf = get_inbuf(port); 378 + if (port->inbuf) { 379 + ret = true; 380 + goto out; 381 + } 382 + ret = false; 383 + out: 384 + spin_unlock_irqrestore(&port->inbuf_lock, flags); 385 + return ret; 386 + } 387 + 388 + static ssize_t send_control_msg(struct port *port, unsigned int event, 389 + unsigned int value) 390 + { 391 + struct scatterlist sg[1]; 392 + struct virtio_console_control cpkt; 393 + struct virtqueue *vq; 394 + int len; 395 + 396 + if (!use_multiport(port->portdev)) 397 + return 0; 398 + 399 + cpkt.id = port->id; 400 + cpkt.event = event; 401 + cpkt.value = value; 402 + 403 + vq = port->portdev->c_ovq; 404 + 405 + sg_init_one(sg, &cpkt, sizeof(cpkt)); 406 + if (vq->vq_ops->add_buf(vq, sg, 1, 0, &cpkt) >= 0) { 407 + vq->vq_ops->kick(vq); 408 + while (!vq->vq_ops->get_buf(vq, &len)) 409 + cpu_relax(); 410 + } 411 + return 0; 412 + } 413 + 414 + static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count) 415 + { 416 + struct scatterlist sg[1]; 417 + struct virtqueue *out_vq; 418 + ssize_t ret; 419 + unsigned int len; 420 + 421 + out_vq = port->out_vq; 422 + 423 + sg_init_one(sg, in_buf, in_count); 424 + ret = out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, in_buf); 425 + 426 + /* Tell Host to go! */ 427 + out_vq->vq_ops->kick(out_vq); 428 + 429 + if (ret < 0) { 430 + len = 0; 431 + goto fail; 432 + } 433 + 434 + /* 435 + * Wait till the host acknowledges it pushed out the data we 436 + * sent. Also ensure we return to userspace the number of 437 + * bytes that were successfully consumed by the host. 438 + */ 439 + while (!out_vq->vq_ops->get_buf(out_vq, &len)) 440 + cpu_relax(); 441 + fail: 442 + /* We're expected to return the amount of data we wrote */ 443 + return len; 444 + } 445 + 446 + /* 447 + * Give out the data that's requested from the buffer that we have 448 + * queued up. 449 + */ 450 + static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count, 451 + bool to_user) 452 + { 453 + struct port_buffer *buf; 454 + unsigned long flags; 455 + 456 + if (!out_count || !port_has_data(port)) 457 + return 0; 458 + 459 + buf = port->inbuf; 460 + out_count = min(out_count, buf->len - buf->offset); 461 + 462 + if (to_user) { 463 + ssize_t ret; 464 + 465 + ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count); 466 + if (ret) 467 + return -EFAULT; 468 + } else { 469 + memcpy(out_buf, buf->buf + buf->offset, out_count); 470 + } 471 + 472 + buf->offset += out_count; 473 + 474 + if (buf->offset == buf->len) { 475 + /* 476 + * We're done using all the data in this buffer. 477 + * Re-queue so that the Host can send us more data. 478 + */ 479 + spin_lock_irqsave(&port->inbuf_lock, flags); 480 + port->inbuf = NULL; 481 + 482 + if (add_inbuf(port->in_vq, buf) < 0) 483 + dev_warn(port->dev, "failed add_buf\n"); 484 + 485 + spin_unlock_irqrestore(&port->inbuf_lock, flags); 486 + } 487 + /* Return the number of bytes actually copied */ 488 + return out_count; 489 + } 490 + 491 + /* The condition that must be true for polling to end */ 492 + static bool wait_is_over(struct port *port) 493 + { 494 + return port_has_data(port) || !port->host_connected; 495 + } 496 + 497 + static ssize_t port_fops_read(struct file *filp, char __user *ubuf, 498 + size_t count, loff_t *offp) 499 + { 500 + struct port *port; 501 + ssize_t ret; 502 + 503 + port = filp->private_data; 504 + 505 + if (!port_has_data(port)) { 506 + /* 507 + * If nothing's connected on the host just return 0 in 508 + * case of list_empty; this tells the userspace app 509 + * that there's no connection 510 + */ 511 + if (!port->host_connected) 512 + return 0; 513 + if (filp->f_flags & O_NONBLOCK) 514 + return -EAGAIN; 515 + 516 + ret = wait_event_interruptible(port->waitqueue, 517 + wait_is_over(port)); 518 + if (ret < 0) 519 + return ret; 520 + } 521 + /* 522 + * We could've received a disconnection message while we were 523 + * waiting for more data. 524 + * 525 + * This check is not clubbed in the if() statement above as we 526 + * might receive some data as well as the host could get 527 + * disconnected after we got woken up from our wait. So we 528 + * really want to give off whatever data we have and only then 529 + * check for host_connected. 530 + */ 531 + if (!port_has_data(port) && !port->host_connected) 532 + return 0; 533 + 534 + return fill_readbuf(port, ubuf, count, true); 535 + } 536 + 537 + static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, 538 + size_t count, loff_t *offp) 539 + { 540 + struct port *port; 541 + char *buf; 542 + ssize_t ret; 543 + 544 + port = filp->private_data; 545 + 546 + count = min((size_t)(32 * 1024), count); 547 + 548 + buf = kmalloc(count, GFP_KERNEL); 549 + if (!buf) 550 + return -ENOMEM; 551 + 552 + ret = copy_from_user(buf, ubuf, count); 553 + if (ret) { 554 + ret = -EFAULT; 555 + goto free_buf; 556 + } 557 + 558 + ret = send_buf(port, buf, count); 559 + free_buf: 560 + kfree(buf); 561 + return ret; 562 + } 563 + 564 + static unsigned int port_fops_poll(struct file *filp, poll_table *wait) 565 + { 566 + struct port *port; 567 + unsigned int ret; 568 + 569 + port = filp->private_data; 570 + poll_wait(filp, &port->waitqueue, wait); 571 + 572 + ret = 0; 573 + if (port->inbuf) 574 + ret |= POLLIN | POLLRDNORM; 575 + if (port->host_connected) 576 + ret |= POLLOUT; 577 + if (!port->host_connected) 578 + ret |= POLLHUP; 579 + 580 + return ret; 581 + } 582 + 583 + static int port_fops_release(struct inode *inode, struct file *filp) 584 + { 585 + struct port *port; 586 + 587 + port = filp->private_data; 588 + 589 + /* Notify host of port being closed */ 590 + send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); 591 + 592 + spin_lock_irq(&port->inbuf_lock); 593 + port->guest_connected = false; 594 + 595 + discard_port_data(port); 596 + 597 + spin_unlock_irq(&port->inbuf_lock); 598 + 599 + return 0; 600 + } 601 + 602 + static int port_fops_open(struct inode *inode, struct file *filp) 603 + { 604 + struct cdev *cdev = inode->i_cdev; 605 + struct port *port; 606 + 607 + port = container_of(cdev, struct port, cdev); 608 + filp->private_data = port; 609 + 610 + /* 611 + * Don't allow opening of console port devices -- that's done 612 + * via /dev/hvc 613 + */ 614 + if (is_console_port(port)) 615 + return -ENXIO; 616 + 617 + /* Allow only one process to open a particular port at a time */ 618 + spin_lock_irq(&port->inbuf_lock); 619 + if (port->guest_connected) { 620 + spin_unlock_irq(&port->inbuf_lock); 621 + return -EMFILE; 622 + } 623 + 624 + port->guest_connected = true; 625 + spin_unlock_irq(&port->inbuf_lock); 626 + 627 + /* Notify host of port being opened */ 628 + send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); 629 + 630 + return 0; 631 + } 632 + 633 + /* 634 + * The file operations that we support: programs in the guest can open 635 + * a console device, read from it, write to it, poll for data and 636 + * close it. The devices are at 637 + * /dev/vport<device number>p<port number> 638 + */ 639 + static const struct file_operations port_fops = { 640 + .owner = THIS_MODULE, 641 + .open = port_fops_open, 642 + .read = port_fops_read, 643 + .write = port_fops_write, 644 + .poll = port_fops_poll, 645 + .release = port_fops_release, 646 + }; 647 + 648 + /* 649 + * The put_chars() callback is pretty straightforward. 83 650 * 84 - * Most of the code deals with the fact that the hvc_console() infrastructure 85 - * only asks us for 16 bytes at a time. We keep in_offset and in_used fields 86 - * for partially-filled buffers. */ 651 + * We turn the characters into a scatter-gather list, add it to the 652 + * output queue and then kick the Host. Then we sit here waiting for 653 + * it to finish: inefficient in theory, but in practice 654 + * implementations will do it immediately (lguest's Launcher does). 655 + */ 656 + static int put_chars(u32 vtermno, const char *buf, int count) 657 + { 658 + struct port *port; 659 + 660 + port = find_port_by_vtermno(vtermno); 661 + if (!port) 662 + return 0; 663 + 664 + if (unlikely(early_put_chars)) 665 + return early_put_chars(vtermno, buf, count); 666 + 667 + return send_buf(port, (void *)buf, count); 668 + } 669 + 670 + /* 671 + * get_chars() is the callback from the hvc_console infrastructure 672 + * when an interrupt is received. 673 + * 674 + * We call out to fill_readbuf that gets us the required data from the 675 + * buffers that are queued up. 676 + */ 87 677 static int get_chars(u32 vtermno, char *buf, int count) 88 678 { 679 + struct port *port; 680 + 681 + port = find_port_by_vtermno(vtermno); 682 + if (!port) 683 + return 0; 684 + 89 685 /* If we don't have an input queue yet, we can't get input. */ 90 - BUG_ON(!in_vq); 686 + BUG_ON(!port->in_vq); 91 687 92 - /* No buffer? Try to get one. */ 93 - if (!in_len) { 94 - in = in_vq->vq_ops->get_buf(in_vq, &in_len); 95 - if (!in) 96 - return 0; 97 - } 98 - 99 - /* You want more than we have to give? Well, try wanting less! */ 100 - if (in_len < count) 101 - count = in_len; 102 - 103 - /* Copy across to their buffer and increment offset. */ 104 - memcpy(buf, in, count); 105 - in += count; 106 - in_len -= count; 107 - 108 - /* Finished? Re-register buffer so Host will use it again. */ 109 - if (in_len == 0) 110 - add_inbuf(); 111 - 112 - return count; 113 - } 114 - /*:*/ 115 - 116 - /*D:320 Console drivers are initialized very early so boot messages can go out, 117 - * so we do things slightly differently from the generic virtio initialization 118 - * of the net and block drivers. 119 - * 120 - * At this stage, the console is output-only. It's too early to set up a 121 - * virtqueue, so we let the drivers do some boutique early-output thing. */ 122 - int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)) 123 - { 124 - virtio_cons.put_chars = put_chars; 125 - return hvc_instantiate(0, 0, &virtio_cons); 688 + return fill_readbuf(port, buf, count, false); 126 689 } 127 690 128 - /* 129 - * virtio console configuration. This supports: 130 - * - console resize 131 - */ 132 - static void virtcons_apply_config(struct virtio_device *dev) 691 + static void resize_console(struct port *port) 133 692 { 693 + struct virtio_device *vdev; 134 694 struct winsize ws; 135 695 136 - if (virtio_has_feature(dev, VIRTIO_CONSOLE_F_SIZE)) { 137 - dev->config->get(dev, 138 - offsetof(struct virtio_console_config, cols), 139 - &ws.ws_col, sizeof(u16)); 140 - dev->config->get(dev, 141 - offsetof(struct virtio_console_config, rows), 142 - &ws.ws_row, sizeof(u16)); 143 - hvc_resize(hvc, ws); 696 + vdev = port->portdev->vdev; 697 + if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) { 698 + vdev->config->get(vdev, 699 + offsetof(struct virtio_console_config, cols), 700 + &ws.ws_col, sizeof(u16)); 701 + vdev->config->get(vdev, 702 + offsetof(struct virtio_console_config, rows), 703 + &ws.ws_row, sizeof(u16)); 704 + hvc_resize(port->cons.hvc, ws); 144 705 } 145 706 } 146 707 147 - /* 148 - * we support only one console, the hvc struct is a global var 149 - * We set the configuration at this point, since we now have a tty 150 - */ 708 + /* We set the configuration at this point, since we now have a tty */ 151 709 static int notifier_add_vio(struct hvc_struct *hp, int data) 152 710 { 711 + struct port *port; 712 + 713 + port = find_port_by_vtermno(hp->vtermno); 714 + if (!port) 715 + return -EINVAL; 716 + 153 717 hp->irq_requested = 1; 154 - virtcons_apply_config(vdev); 718 + resize_console(port); 155 719 156 720 return 0; 157 721 } ··· 713 173 hp->irq_requested = 0; 714 174 } 715 175 716 - static void hvc_handle_input(struct virtqueue *vq) 176 + /* The operations for console ports. */ 177 + static const struct hv_ops hv_ops = { 178 + .get_chars = get_chars, 179 + .put_chars = put_chars, 180 + .notifier_add = notifier_add_vio, 181 + .notifier_del = notifier_del_vio, 182 + .notifier_hangup = notifier_del_vio, 183 + }; 184 + 185 + /* 186 + * Console drivers are initialized very early so boot messages can go 187 + * out, so we do things slightly differently from the generic virtio 188 + * initialization of the net and block drivers. 189 + * 190 + * At this stage, the console is output-only. It's too early to set 191 + * up a virtqueue, so we let the drivers do some boutique early-output 192 + * thing. 193 + */ 194 + int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)) 717 195 { 718 - if (hvc_poll(hvc)) 196 + early_put_chars = put_chars; 197 + return hvc_instantiate(0, 0, &hv_ops); 198 + } 199 + 200 + int init_port_console(struct port *port) 201 + { 202 + int ret; 203 + 204 + /* 205 + * The Host's telling us this port is a console port. Hook it 206 + * up with an hvc console. 207 + * 208 + * To set up and manage our virtual console, we call 209 + * hvc_alloc(). 210 + * 211 + * The first argument of hvc_alloc() is the virtual console 212 + * number. The second argument is the parameter for the 213 + * notification mechanism (like irq number). We currently 214 + * leave this as zero, virtqueues have implicit notifications. 215 + * 216 + * The third argument is a "struct hv_ops" containing the 217 + * put_chars() get_chars(), notifier_add() and notifier_del() 218 + * pointers. The final argument is the output buffer size: we 219 + * can do any size, so we put PAGE_SIZE here. 220 + */ 221 + port->cons.vtermno = pdrvdata.next_vtermno; 222 + 223 + port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE); 224 + if (IS_ERR(port->cons.hvc)) { 225 + ret = PTR_ERR(port->cons.hvc); 226 + dev_err(port->dev, 227 + "error %d allocating hvc for port\n", ret); 228 + port->cons.hvc = NULL; 229 + return ret; 230 + } 231 + spin_lock_irq(&pdrvdata_lock); 232 + pdrvdata.next_vtermno++; 233 + list_add_tail(&port->cons.list, &pdrvdata.consoles); 234 + spin_unlock_irq(&pdrvdata_lock); 235 + port->guest_connected = true; 236 + 237 + /* Notify host of port being opened */ 238 + send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); 239 + 240 + return 0; 241 + } 242 + 243 + static ssize_t show_port_name(struct device *dev, 244 + struct device_attribute *attr, char *buffer) 245 + { 246 + struct port *port; 247 + 248 + port = dev_get_drvdata(dev); 249 + 250 + return sprintf(buffer, "%s\n", port->name); 251 + } 252 + 253 + static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL); 254 + 255 + static struct attribute *port_sysfs_entries[] = { 256 + &dev_attr_name.attr, 257 + NULL 258 + }; 259 + 260 + static struct attribute_group port_attribute_group = { 261 + .name = NULL, /* put in device directory */ 262 + .attrs = port_sysfs_entries, 263 + }; 264 + 265 + static int debugfs_open(struct inode *inode, struct file *filp) 266 + { 267 + filp->private_data = inode->i_private; 268 + return 0; 269 + } 270 + 271 + static ssize_t debugfs_read(struct file *filp, char __user *ubuf, 272 + size_t count, loff_t *offp) 273 + { 274 + struct port *port; 275 + char *buf; 276 + ssize_t ret, out_offset, out_count; 277 + 278 + out_count = 1024; 279 + buf = kmalloc(out_count, GFP_KERNEL); 280 + if (!buf) 281 + return -ENOMEM; 282 + 283 + port = filp->private_data; 284 + out_offset = 0; 285 + out_offset += snprintf(buf + out_offset, out_count, 286 + "name: %s\n", port->name ? port->name : ""); 287 + out_offset += snprintf(buf + out_offset, out_count - out_offset, 288 + "guest_connected: %d\n", port->guest_connected); 289 + out_offset += snprintf(buf + out_offset, out_count - out_offset, 290 + "host_connected: %d\n", port->host_connected); 291 + out_offset += snprintf(buf + out_offset, out_count - out_offset, 292 + "is_console: %s\n", 293 + is_console_port(port) ? "yes" : "no"); 294 + out_offset += snprintf(buf + out_offset, out_count - out_offset, 295 + "console_vtermno: %u\n", port->cons.vtermno); 296 + 297 + ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); 298 + kfree(buf); 299 + return ret; 300 + } 301 + 302 + static const struct file_operations port_debugfs_ops = { 303 + .owner = THIS_MODULE, 304 + .open = debugfs_open, 305 + .read = debugfs_read, 306 + }; 307 + 308 + /* Remove all port-specific data. */ 309 + static int remove_port(struct port *port) 310 + { 311 + struct port_buffer *buf; 312 + 313 + spin_lock_irq(&port->portdev->ports_lock); 314 + list_del(&port->list); 315 + spin_unlock_irq(&port->portdev->ports_lock); 316 + 317 + if (is_console_port(port)) { 318 + spin_lock_irq(&pdrvdata_lock); 319 + list_del(&port->cons.list); 320 + spin_unlock_irq(&pdrvdata_lock); 321 + hvc_remove(port->cons.hvc); 322 + } 323 + if (port->guest_connected) 324 + send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); 325 + 326 + sysfs_remove_group(&port->dev->kobj, &port_attribute_group); 327 + device_destroy(pdrvdata.class, port->dev->devt); 328 + cdev_del(&port->cdev); 329 + 330 + /* Remove unused data this port might have received. */ 331 + discard_port_data(port); 332 + 333 + /* Remove buffers we queued up for the Host to send us data in. */ 334 + while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) 335 + free_buf(buf); 336 + 337 + kfree(port->name); 338 + 339 + debugfs_remove(port->debugfs_file); 340 + 341 + kfree(port); 342 + return 0; 343 + } 344 + 345 + /* Any private messages that the Host and Guest want to share */ 346 + static void handle_control_message(struct ports_device *portdev, 347 + struct port_buffer *buf) 348 + { 349 + struct virtio_console_control *cpkt; 350 + struct port *port; 351 + size_t name_size; 352 + int err; 353 + 354 + cpkt = (struct virtio_console_control *)(buf->buf + buf->offset); 355 + 356 + port = find_port_by_id(portdev, cpkt->id); 357 + if (!port) { 358 + /* No valid header at start of buffer. Drop it. */ 359 + dev_dbg(&portdev->vdev->dev, 360 + "Invalid index %u in control packet\n", cpkt->id); 361 + return; 362 + } 363 + 364 + switch (cpkt->event) { 365 + case VIRTIO_CONSOLE_CONSOLE_PORT: 366 + if (!cpkt->value) 367 + break; 368 + if (is_console_port(port)) 369 + break; 370 + 371 + init_port_console(port); 372 + /* 373 + * Could remove the port here in case init fails - but 374 + * have to notify the host first. 375 + */ 376 + break; 377 + case VIRTIO_CONSOLE_RESIZE: 378 + if (!is_console_port(port)) 379 + break; 380 + port->cons.hvc->irq_requested = 1; 381 + resize_console(port); 382 + break; 383 + case VIRTIO_CONSOLE_PORT_OPEN: 384 + port->host_connected = cpkt->value; 385 + wake_up_interruptible(&port->waitqueue); 386 + break; 387 + case VIRTIO_CONSOLE_PORT_NAME: 388 + /* 389 + * Skip the size of the header and the cpkt to get the size 390 + * of the name that was sent 391 + */ 392 + name_size = buf->len - buf->offset - sizeof(*cpkt) + 1; 393 + 394 + port->name = kmalloc(name_size, GFP_KERNEL); 395 + if (!port->name) { 396 + dev_err(port->dev, 397 + "Not enough space to store port name\n"); 398 + break; 399 + } 400 + strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt), 401 + name_size - 1); 402 + port->name[name_size - 1] = 0; 403 + 404 + /* 405 + * Since we only have one sysfs attribute, 'name', 406 + * create it only if we have a name for the port. 407 + */ 408 + err = sysfs_create_group(&port->dev->kobj, 409 + &port_attribute_group); 410 + if (err) 411 + dev_err(port->dev, 412 + "Error %d creating sysfs device attributes\n", 413 + err); 414 + 415 + break; 416 + case VIRTIO_CONSOLE_PORT_REMOVE: 417 + /* 418 + * Hot unplug the port. We don't decrement nr_ports 419 + * since we don't want to deal with extra complexities 420 + * of using the lowest-available port id: We can just 421 + * pick up the nr_ports number as the id and not have 422 + * userspace send it to us. This helps us in two 423 + * ways: 424 + * 425 + * - We don't need to have a 'port_id' field in the 426 + * config space when a port is hot-added. This is a 427 + * good thing as we might queue up multiple hotplug 428 + * requests issued in our workqueue. 429 + * 430 + * - Another way to deal with this would have been to 431 + * use a bitmap of the active ports and select the 432 + * lowest non-active port from that map. That 433 + * bloats the already tight config space and we 434 + * would end up artificially limiting the 435 + * max. number of ports to sizeof(bitmap). Right 436 + * now we can support 2^32 ports (as the port id is 437 + * stored in a u32 type). 438 + * 439 + */ 440 + remove_port(port); 441 + break; 442 + } 443 + } 444 + 445 + static void control_work_handler(struct work_struct *work) 446 + { 447 + struct ports_device *portdev; 448 + struct virtqueue *vq; 449 + struct port_buffer *buf; 450 + unsigned int len; 451 + 452 + portdev = container_of(work, struct ports_device, control_work); 453 + vq = portdev->c_ivq; 454 + 455 + spin_lock(&portdev->cvq_lock); 456 + while ((buf = vq->vq_ops->get_buf(vq, &len))) { 457 + spin_unlock(&portdev->cvq_lock); 458 + 459 + buf->len = len; 460 + buf->offset = 0; 461 + 462 + handle_control_message(portdev, buf); 463 + 464 + spin_lock(&portdev->cvq_lock); 465 + if (add_inbuf(portdev->c_ivq, buf) < 0) { 466 + dev_warn(&portdev->vdev->dev, 467 + "Error adding buffer to queue\n"); 468 + free_buf(buf); 469 + } 470 + } 471 + spin_unlock(&portdev->cvq_lock); 472 + } 473 + 474 + static void in_intr(struct virtqueue *vq) 475 + { 476 + struct port *port; 477 + unsigned long flags; 478 + 479 + port = find_port_by_vq(vq->vdev->priv, vq); 480 + if (!port) 481 + return; 482 + 483 + spin_lock_irqsave(&port->inbuf_lock, flags); 484 + if (!port->inbuf) 485 + port->inbuf = get_inbuf(port); 486 + 487 + /* 488 + * Don't queue up data when port is closed. This condition 489 + * can be reached when a console port is not yet connected (no 490 + * tty is spawned) and the host sends out data to console 491 + * ports. For generic serial ports, the host won't 492 + * (shouldn't) send data till the guest is connected. 493 + */ 494 + if (!port->guest_connected) 495 + discard_port_data(port); 496 + 497 + spin_unlock_irqrestore(&port->inbuf_lock, flags); 498 + 499 + wake_up_interruptible(&port->waitqueue); 500 + 501 + if (is_console_port(port) && hvc_poll(port->cons.hvc)) 719 502 hvc_kick(); 720 503 } 721 504 722 - /*D:370 Once we're further in boot, we get probed like any other virtio device. 723 - * At this stage we set up the output virtqueue. 724 - * 725 - * To set up and manage our virtual console, we call hvc_alloc(). Since we 726 - * never remove the console device we never need this pointer again. 727 - * 728 - * Finally we put our input buffer in the input queue, ready to receive. */ 729 - static int __devinit virtcons_probe(struct virtio_device *dev) 505 + static void control_intr(struct virtqueue *vq) 730 506 { 731 - vq_callback_t *callbacks[] = { hvc_handle_input, NULL}; 732 - const char *names[] = { "input", "output" }; 733 - struct virtqueue *vqs[2]; 507 + struct ports_device *portdev; 508 + 509 + portdev = vq->vdev->priv; 510 + schedule_work(&portdev->control_work); 511 + } 512 + 513 + static void config_intr(struct virtio_device *vdev) 514 + { 515 + struct ports_device *portdev; 516 + 517 + portdev = vdev->priv; 518 + if (use_multiport(portdev)) { 519 + /* Handle port hot-add */ 520 + schedule_work(&portdev->config_work); 521 + } 522 + /* 523 + * We'll use this way of resizing only for legacy support. 524 + * For newer userspace (VIRTIO_CONSOLE_F_MULTPORT+), use 525 + * control messages to indicate console size changes so that 526 + * it can be done per-port 527 + */ 528 + resize_console(find_port_by_id(portdev, 0)); 529 + } 530 + 531 + static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) 532 + { 533 + struct port_buffer *buf; 534 + unsigned int ret; 734 535 int err; 735 536 736 - vdev = dev; 537 + ret = 0; 538 + do { 539 + buf = alloc_buf(PAGE_SIZE); 540 + if (!buf) 541 + break; 737 542 738 - /* This is the scratch page we use to receive console input */ 739 - inbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); 740 - if (!inbuf) { 543 + spin_lock_irq(lock); 544 + err = add_inbuf(vq, buf); 545 + if (err < 0) { 546 + spin_unlock_irq(lock); 547 + free_buf(buf); 548 + break; 549 + } 550 + ret++; 551 + spin_unlock_irq(lock); 552 + } while (err > 0); 553 + 554 + return ret; 555 + } 556 + 557 + static int add_port(struct ports_device *portdev, u32 id) 558 + { 559 + char debugfs_name[16]; 560 + struct port *port; 561 + struct port_buffer *buf; 562 + dev_t devt; 563 + int err; 564 + 565 + port = kmalloc(sizeof(*port), GFP_KERNEL); 566 + if (!port) { 741 567 err = -ENOMEM; 742 568 goto fail; 743 569 } 744 570 745 - /* Find the queues. */ 746 - /* FIXME: This is why we want to wean off hvc: we do nothing 747 - * when input comes in. */ 748 - err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names); 749 - if (err) 750 - goto free; 571 + port->portdev = portdev; 572 + port->id = id; 751 573 752 - in_vq = vqs[0]; 753 - out_vq = vqs[1]; 574 + port->name = NULL; 575 + port->inbuf = NULL; 576 + port->cons.hvc = NULL; 754 577 755 - /* Start using the new console output. */ 756 - virtio_cons.get_chars = get_chars; 757 - virtio_cons.put_chars = put_chars; 758 - virtio_cons.notifier_add = notifier_add_vio; 759 - virtio_cons.notifier_del = notifier_del_vio; 760 - virtio_cons.notifier_hangup = notifier_del_vio; 578 + port->host_connected = port->guest_connected = false; 761 579 762 - /* The first argument of hvc_alloc() is the virtual console number, so 763 - * we use zero. The second argument is the parameter for the 764 - * notification mechanism (like irq number). We currently leave this 765 - * as zero, virtqueues have implicit notifications. 766 - * 767 - * The third argument is a "struct hv_ops" containing the put_chars() 768 - * get_chars(), notifier_add() and notifier_del() pointers. 769 - * The final argument is the output buffer size: we can do any size, 770 - * so we put PAGE_SIZE here. */ 771 - hvc = hvc_alloc(0, 0, &virtio_cons, PAGE_SIZE); 772 - if (IS_ERR(hvc)) { 773 - err = PTR_ERR(hvc); 774 - goto free_vqs; 580 + port->in_vq = portdev->in_vqs[port->id]; 581 + port->out_vq = portdev->out_vqs[port->id]; 582 + 583 + cdev_init(&port->cdev, &port_fops); 584 + 585 + devt = MKDEV(portdev->chr_major, id); 586 + err = cdev_add(&port->cdev, devt, 1); 587 + if (err < 0) { 588 + dev_err(&port->portdev->vdev->dev, 589 + "Error %d adding cdev for port %u\n", err, id); 590 + goto free_port; 591 + } 592 + port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, 593 + devt, port, "vport%up%u", 594 + port->portdev->drv_index, id); 595 + if (IS_ERR(port->dev)) { 596 + err = PTR_ERR(port->dev); 597 + dev_err(&port->portdev->vdev->dev, 598 + "Error %d creating device for port %u\n", 599 + err, id); 600 + goto free_cdev; 775 601 } 776 602 777 - /* Register the input buffer the first time. */ 778 - add_inbuf(); 603 + spin_lock_init(&port->inbuf_lock); 604 + init_waitqueue_head(&port->waitqueue); 605 + 606 + /* Fill the in_vq with buffers so the host can send us data. */ 607 + err = fill_queue(port->in_vq, &port->inbuf_lock); 608 + if (!err) { 609 + dev_err(port->dev, "Error allocating inbufs\n"); 610 + err = -ENOMEM; 611 + goto free_device; 612 + } 613 + 614 + /* 615 + * If we're not using multiport support, this has to be a console port 616 + */ 617 + if (!use_multiport(port->portdev)) { 618 + err = init_port_console(port); 619 + if (err) 620 + goto free_inbufs; 621 + } 622 + 623 + spin_lock_irq(&portdev->ports_lock); 624 + list_add_tail(&port->list, &port->portdev->ports); 625 + spin_unlock_irq(&portdev->ports_lock); 626 + 627 + /* 628 + * Tell the Host we're set so that it can send us various 629 + * configuration parameters for this port (eg, port name, 630 + * caching, whether this is a console port, etc.) 631 + */ 632 + send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); 633 + 634 + if (pdrvdata.debugfs_dir) { 635 + /* 636 + * Finally, create the debugfs file that we can use to 637 + * inspect a port's state at any time 638 + */ 639 + sprintf(debugfs_name, "vport%up%u", 640 + port->portdev->drv_index, id); 641 + port->debugfs_file = debugfs_create_file(debugfs_name, 0444, 642 + pdrvdata.debugfs_dir, 643 + port, 644 + &port_debugfs_ops); 645 + } 646 + return 0; 647 + 648 + free_inbufs: 649 + while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq))) 650 + free_buf(buf); 651 + free_device: 652 + device_destroy(pdrvdata.class, port->dev->devt); 653 + free_cdev: 654 + cdev_del(&port->cdev); 655 + free_port: 656 + kfree(port); 657 + fail: 658 + return err; 659 + } 660 + 661 + /* 662 + * The workhandler for config-space updates. 663 + * 664 + * This is called when ports are hot-added. 665 + */ 666 + static void config_work_handler(struct work_struct *work) 667 + { 668 + struct virtio_console_config virtconconf; 669 + struct ports_device *portdev; 670 + struct virtio_device *vdev; 671 + int err; 672 + 673 + portdev = container_of(work, struct ports_device, config_work); 674 + 675 + vdev = portdev->vdev; 676 + vdev->config->get(vdev, 677 + offsetof(struct virtio_console_config, nr_ports), 678 + &virtconconf.nr_ports, 679 + sizeof(virtconconf.nr_ports)); 680 + 681 + if (portdev->config.nr_ports == virtconconf.nr_ports) { 682 + /* 683 + * Port 0 got hot-added. Since we already did all the 684 + * other initialisation for it, just tell the Host 685 + * that the port is ready if we find the port. In 686 + * case the port was hot-removed earlier, we call 687 + * add_port to add the port. 688 + */ 689 + struct port *port; 690 + 691 + port = find_port_by_id(portdev, 0); 692 + if (!port) 693 + add_port(portdev, 0); 694 + else 695 + send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); 696 + return; 697 + } 698 + if (virtconconf.nr_ports > portdev->config.max_nr_ports) { 699 + dev_warn(&vdev->dev, 700 + "More ports specified (%u) than allowed (%u)", 701 + portdev->config.nr_ports + 1, 702 + portdev->config.max_nr_ports); 703 + return; 704 + } 705 + if (virtconconf.nr_ports < portdev->config.nr_ports) 706 + return; 707 + 708 + /* Hot-add ports */ 709 + while (virtconconf.nr_ports - portdev->config.nr_ports) { 710 + err = add_port(portdev, portdev->config.nr_ports); 711 + if (err) 712 + break; 713 + portdev->config.nr_ports++; 714 + } 715 + } 716 + 717 + static int init_vqs(struct ports_device *portdev) 718 + { 719 + vq_callback_t **io_callbacks; 720 + char **io_names; 721 + struct virtqueue **vqs; 722 + u32 i, j, nr_ports, nr_queues; 723 + int err; 724 + 725 + nr_ports = portdev->config.max_nr_ports; 726 + nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; 727 + 728 + vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL); 729 + if (!vqs) { 730 + err = -ENOMEM; 731 + goto fail; 732 + } 733 + io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL); 734 + if (!io_callbacks) { 735 + err = -ENOMEM; 736 + goto free_vqs; 737 + } 738 + io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL); 739 + if (!io_names) { 740 + err = -ENOMEM; 741 + goto free_callbacks; 742 + } 743 + portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), 744 + GFP_KERNEL); 745 + if (!portdev->in_vqs) { 746 + err = -ENOMEM; 747 + goto free_names; 748 + } 749 + portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), 750 + GFP_KERNEL); 751 + if (!portdev->out_vqs) { 752 + err = -ENOMEM; 753 + goto free_invqs; 754 + } 755 + 756 + /* 757 + * For backward compat (newer host but older guest), the host 758 + * spawns a console port first and also inits the vqs for port 759 + * 0 before others. 760 + */ 761 + j = 0; 762 + io_callbacks[j] = in_intr; 763 + io_callbacks[j + 1] = NULL; 764 + io_names[j] = "input"; 765 + io_names[j + 1] = "output"; 766 + j += 2; 767 + 768 + if (use_multiport(portdev)) { 769 + io_callbacks[j] = control_intr; 770 + io_callbacks[j + 1] = NULL; 771 + io_names[j] = "control-i"; 772 + io_names[j + 1] = "control-o"; 773 + 774 + for (i = 1; i < nr_ports; i++) { 775 + j += 2; 776 + io_callbacks[j] = in_intr; 777 + io_callbacks[j + 1] = NULL; 778 + io_names[j] = "input"; 779 + io_names[j + 1] = "output"; 780 + } 781 + } 782 + /* Find the queues. */ 783 + err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs, 784 + io_callbacks, 785 + (const char **)io_names); 786 + if (err) 787 + goto free_outvqs; 788 + 789 + j = 0; 790 + portdev->in_vqs[0] = vqs[0]; 791 + portdev->out_vqs[0] = vqs[1]; 792 + j += 2; 793 + if (use_multiport(portdev)) { 794 + portdev->c_ivq = vqs[j]; 795 + portdev->c_ovq = vqs[j + 1]; 796 + 797 + for (i = 1; i < nr_ports; i++) { 798 + j += 2; 799 + portdev->in_vqs[i] = vqs[j]; 800 + portdev->out_vqs[i] = vqs[j + 1]; 801 + } 802 + } 803 + kfree(io_callbacks); 804 + kfree(io_names); 805 + kfree(vqs); 806 + 807 + return 0; 808 + 809 + free_names: 810 + kfree(io_names); 811 + free_callbacks: 812 + kfree(io_callbacks); 813 + free_outvqs: 814 + kfree(portdev->out_vqs); 815 + free_invqs: 816 + kfree(portdev->in_vqs); 817 + free_vqs: 818 + kfree(vqs); 819 + fail: 820 + return err; 821 + } 822 + 823 + static const struct file_operations portdev_fops = { 824 + .owner = THIS_MODULE, 825 + }; 826 + 827 + /* 828 + * Once we're further in boot, we get probed like any other virtio 829 + * device. 830 + * 831 + * If the host also supports multiple console ports, we check the 832 + * config space to see how many ports the host has spawned. We 833 + * initialize each port found. 834 + */ 835 + static int __devinit virtcons_probe(struct virtio_device *vdev) 836 + { 837 + struct ports_device *portdev; 838 + u32 i; 839 + int err; 840 + bool multiport; 841 + 842 + portdev = kmalloc(sizeof(*portdev), GFP_KERNEL); 843 + if (!portdev) { 844 + err = -ENOMEM; 845 + goto fail; 846 + } 847 + 848 + /* Attach this portdev to this virtio_device, and vice-versa. */ 849 + portdev->vdev = vdev; 850 + vdev->priv = portdev; 851 + 852 + spin_lock_irq(&pdrvdata_lock); 853 + portdev->drv_index = pdrvdata.index++; 854 + spin_unlock_irq(&pdrvdata_lock); 855 + 856 + portdev->chr_major = register_chrdev(0, "virtio-portsdev", 857 + &portdev_fops); 858 + if (portdev->chr_major < 0) { 859 + dev_err(&vdev->dev, 860 + "Error %d registering chrdev for device %u\n", 861 + portdev->chr_major, portdev->drv_index); 862 + err = portdev->chr_major; 863 + goto free; 864 + } 865 + 866 + multiport = false; 867 + portdev->config.nr_ports = 1; 868 + portdev->config.max_nr_ports = 1; 869 + if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { 870 + multiport = true; 871 + vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT; 872 + 873 + vdev->config->get(vdev, offsetof(struct virtio_console_config, 874 + nr_ports), 875 + &portdev->config.nr_ports, 876 + sizeof(portdev->config.nr_ports)); 877 + vdev->config->get(vdev, offsetof(struct virtio_console_config, 878 + max_nr_ports), 879 + &portdev->config.max_nr_ports, 880 + sizeof(portdev->config.max_nr_ports)); 881 + if (portdev->config.nr_ports > portdev->config.max_nr_ports) { 882 + dev_warn(&vdev->dev, 883 + "More ports (%u) specified than allowed (%u). Will init %u ports.", 884 + portdev->config.nr_ports, 885 + portdev->config.max_nr_ports, 886 + portdev->config.max_nr_ports); 887 + 888 + portdev->config.nr_ports = portdev->config.max_nr_ports; 889 + } 890 + } 891 + 892 + /* Let the Host know we support multiple ports.*/ 893 + vdev->config->finalize_features(vdev); 894 + 895 + err = init_vqs(portdev); 896 + if (err < 0) { 897 + dev_err(&vdev->dev, "Error %d initializing vqs\n", err); 898 + goto free_chrdev; 899 + } 900 + 901 + spin_lock_init(&portdev->ports_lock); 902 + INIT_LIST_HEAD(&portdev->ports); 903 + 904 + if (multiport) { 905 + spin_lock_init(&portdev->cvq_lock); 906 + INIT_WORK(&portdev->control_work, &control_work_handler); 907 + INIT_WORK(&portdev->config_work, &config_work_handler); 908 + 909 + err = fill_queue(portdev->c_ivq, &portdev->cvq_lock); 910 + if (!err) { 911 + dev_err(&vdev->dev, 912 + "Error allocating buffers for control queue\n"); 913 + err = -ENOMEM; 914 + goto free_vqs; 915 + } 916 + } 917 + 918 + for (i = 0; i < portdev->config.nr_ports; i++) 919 + add_port(portdev, i); 920 + 921 + /* Start using the new console output. */ 922 + early_put_chars = NULL; 779 923 return 0; 780 924 781 925 free_vqs: 782 926 vdev->config->del_vqs(vdev); 927 + kfree(portdev->in_vqs); 928 + kfree(portdev->out_vqs); 929 + free_chrdev: 930 + unregister_chrdev(portdev->chr_major, "virtio-portsdev"); 783 931 free: 784 - kfree(inbuf); 932 + kfree(portdev); 785 933 fail: 786 934 return err; 935 + } 936 + 937 + static void virtcons_remove(struct virtio_device *vdev) 938 + { 939 + struct ports_device *portdev; 940 + struct port *port, *port2; 941 + struct port_buffer *buf; 942 + unsigned int len; 943 + 944 + portdev = vdev->priv; 945 + 946 + cancel_work_sync(&portdev->control_work); 947 + cancel_work_sync(&portdev->config_work); 948 + 949 + list_for_each_entry_safe(port, port2, &portdev->ports, list) 950 + remove_port(port); 951 + 952 + unregister_chrdev(portdev->chr_major, "virtio-portsdev"); 953 + 954 + while ((buf = portdev->c_ivq->vq_ops->get_buf(portdev->c_ivq, &len))) 955 + free_buf(buf); 956 + 957 + while ((buf = portdev->c_ivq->vq_ops->detach_unused_buf(portdev->c_ivq))) 958 + free_buf(buf); 959 + 960 + vdev->config->del_vqs(vdev); 961 + kfree(portdev->in_vqs); 962 + kfree(portdev->out_vqs); 963 + 964 + kfree(portdev); 787 965 } 788 966 789 967 static struct virtio_device_id id_table[] = { ··· 1511 253 1512 254 static unsigned int features[] = { 1513 255 VIRTIO_CONSOLE_F_SIZE, 256 + VIRTIO_CONSOLE_F_MULTIPORT, 1514 257 }; 1515 258 1516 259 static struct virtio_driver virtio_console = { ··· 1521 262 .driver.owner = THIS_MODULE, 1522 263 .id_table = id_table, 1523 264 .probe = virtcons_probe, 1524 - .config_changed = virtcons_apply_config, 265 + .remove = virtcons_remove, 266 + .config_changed = config_intr, 1525 267 }; 1526 268 1527 269 static int __init init(void) 1528 270 { 271 + int err; 272 + 273 + pdrvdata.class = class_create(THIS_MODULE, "virtio-ports"); 274 + if (IS_ERR(pdrvdata.class)) { 275 + err = PTR_ERR(pdrvdata.class); 276 + pr_err("Error %d creating virtio-ports class\n", err); 277 + return err; 278 + } 279 + 280 + pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL); 281 + if (!pdrvdata.debugfs_dir) { 282 + pr_warning("Error %ld creating debugfs dir for virtio-ports\n", 283 + PTR_ERR(pdrvdata.debugfs_dir)); 284 + } 285 + INIT_LIST_HEAD(&pdrvdata.consoles); 286 + 1529 287 return register_virtio_driver(&virtio_console); 1530 288 } 289 + 290 + static void __exit fini(void) 291 + { 292 + unregister_virtio_driver(&virtio_console); 293 + 294 + class_destroy(pdrvdata.class); 295 + if (pdrvdata.debugfs_dir) 296 + debugfs_remove_recursive(pdrvdata.debugfs_dir); 297 + } 1531 298 module_init(init); 299 + module_exit(fini); 1532 300 1533 301 MODULE_DEVICE_TABLE(virtio, id_table); 1534 302 MODULE_DESCRIPTION("Virtio console driver");
+1 -1
drivers/clocksource/cs5535-clockevt.c
··· 21 21 22 22 #define DRV_NAME "cs5535-clockevt" 23 23 24 - static int timer_irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ; 24 + static int timer_irq; 25 25 module_param_named(irq, timer_irq, int, 0644); 26 26 MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks."); 27 27
+45 -2
drivers/gpu/drm/drm_edid.c
··· 598 598 return mode; 599 599 } 600 600 601 + /* 602 + * EDID is delightfully ambiguous about how interlaced modes are to be 603 + * encoded. Our internal representation is of frame height, but some 604 + * HDTV detailed timings are encoded as field height. 605 + * 606 + * The format list here is from CEA, in frame size. Technically we 607 + * should be checking refresh rate too. Whatever. 608 + */ 609 + static void 610 + drm_mode_do_interlace_quirk(struct drm_display_mode *mode, 611 + struct detailed_pixel_timing *pt) 612 + { 613 + int i; 614 + static const struct { 615 + int w, h; 616 + } cea_interlaced[] = { 617 + { 1920, 1080 }, 618 + { 720, 480 }, 619 + { 1440, 480 }, 620 + { 2880, 480 }, 621 + { 720, 576 }, 622 + { 1440, 576 }, 623 + { 2880, 576 }, 624 + }; 625 + static const int n_sizes = 626 + sizeof(cea_interlaced)/sizeof(cea_interlaced[0]); 627 + 628 + if (!(pt->misc & DRM_EDID_PT_INTERLACED)) 629 + return; 630 + 631 + for (i = 0; i < n_sizes; i++) { 632 + if ((mode->hdisplay == cea_interlaced[i].w) && 633 + (mode->vdisplay == cea_interlaced[i].h / 2)) { 634 + mode->vdisplay *= 2; 635 + mode->vsync_start *= 2; 636 + mode->vsync_end *= 2; 637 + mode->vtotal *= 2; 638 + mode->vtotal |= 1; 639 + } 640 + } 641 + 642 + mode->flags |= DRM_MODE_FLAG_INTERLACE; 643 + } 644 + 601 645 /** 602 646 * drm_mode_detailed - create a new mode from an EDID detailed timing section 603 647 * @dev: DRM device (needed to create new mode) ··· 724 680 725 681 drm_mode_set_name(mode); 726 682 727 - if (pt->misc & DRM_EDID_PT_INTERLACED) 728 - mode->flags |= DRM_MODE_FLAG_INTERLACE; 683 + drm_mode_do_interlace_quirk(mode, pt); 729 684 730 685 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { 731 686 pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
+9 -21
drivers/gpu/drm/i915/i915_drv.c
··· 176 176 177 177 static int i915_drm_freeze(struct drm_device *dev) 178 178 { 179 + struct drm_i915_private *dev_priv = dev->dev_private; 180 + 179 181 pci_save_state(dev->pdev); 180 182 181 183 /* If KMS is active, we do the leavevt stuff here */ ··· 193 191 194 192 i915_save_state(dev); 195 193 196 - return 0; 197 - } 198 - 199 - static void i915_drm_suspend(struct drm_device *dev) 200 - { 201 - struct drm_i915_private *dev_priv = dev->dev_private; 202 - 203 194 intel_opregion_free(dev, 1); 204 195 205 196 /* Modeset on resume, not lid events */ 206 197 dev_priv->modeset_on_lid = 0; 198 + 199 + return 0; 207 200 } 208 201 209 202 static int i915_suspend(struct drm_device *dev, pm_message_t state) ··· 218 221 if (error) 219 222 return error; 220 223 221 - i915_drm_suspend(dev); 222 - 223 224 if (state.event == PM_EVENT_SUSPEND) { 224 225 /* Shut down the device */ 225 226 pci_disable_device(dev->pdev); ··· 231 236 { 232 237 struct drm_i915_private *dev_priv = dev->dev_private; 233 238 int error = 0; 239 + 240 + i915_restore_state(dev); 241 + 242 + intel_opregion_init(dev, 1); 234 243 235 244 /* KMS EnterVT equivalent */ 236 245 if (drm_core_check_feature(dev, DRIVER_MODESET)) { ··· 261 262 return -EIO; 262 263 263 264 pci_set_master(dev->pdev); 264 - 265 - i915_restore_state(dev); 266 - 267 - intel_opregion_init(dev, 1); 268 265 269 266 return i915_drm_thaw(dev); 270 267 } ··· 418 423 if (error) 419 424 return error; 420 425 421 - i915_drm_suspend(drm_dev); 422 - 423 426 pci_disable_device(pdev); 424 427 pci_set_power_state(pdev, PCI_D3hot); 425 428 ··· 457 464 { 458 465 struct pci_dev *pdev = to_pci_dev(dev); 459 466 struct drm_device *drm_dev = pci_get_drvdata(pdev); 460 - int error; 461 467 462 - error = i915_drm_freeze(drm_dev); 463 - if (!error) 464 - i915_drm_suspend(drm_dev); 465 - 466 - return error; 468 + return i915_drm_freeze(drm_dev); 467 469 } 468 470 469 471 const struct dev_pm_ops i915_pm_ops = {
+7
drivers/gpu/drm/i915/intel_lvds.c
··· 636 636 DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), 637 637 }, 638 638 }, 639 + { 640 + .ident = "Clevo M5x0N", 641 + .matches = { 642 + DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), 643 + DMI_MATCH(DMI_BOARD_NAME, "M5x0N"), 644 + }, 645 + }, 639 646 { } 640 647 }; 641 648
+3 -4
drivers/gpu/drm/nouveau/nouveau_bios.c
··· 5861 5861 struct drm_nouveau_private *dev_priv = dev->dev_private; 5862 5862 struct nvbios *bios = &dev_priv->VBIOS; 5863 5863 struct init_exec iexec = { true, false }; 5864 - unsigned long flags; 5865 5864 5866 - spin_lock_irqsave(&bios->lock, flags); 5865 + mutex_lock(&bios->lock); 5867 5866 bios->display.output = dcbent; 5868 5867 parse_init_table(bios, table, &iexec); 5869 5868 bios->display.output = NULL; 5870 - spin_unlock_irqrestore(&bios->lock, flags); 5869 + mutex_unlock(&bios->lock); 5871 5870 } 5872 5871 5873 5872 static bool NVInitVBIOS(struct drm_device *dev) ··· 5875 5876 struct nvbios *bios = &dev_priv->VBIOS; 5876 5877 5877 5878 memset(bios, 0, sizeof(struct nvbios)); 5878 - spin_lock_init(&bios->lock); 5879 + mutex_init(&bios->lock); 5879 5880 bios->dev = dev; 5880 5881 5881 5882 if (!NVShadowVBIOS(dev, bios->data))
+1 -1
drivers/gpu/drm/nouveau/nouveau_bios.h
··· 205 205 struct drm_device *dev; 206 206 struct nouveau_bios_info pub; 207 207 208 - spinlock_t lock; 208 + struct mutex lock; 209 209 210 210 uint8_t data[NV_PROM_SIZE]; 211 211 unsigned int length;
+1
drivers/gpu/drm/nouveau/nouveau_drv.h
··· 583 583 uint64_t vm_end; 584 584 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; 585 585 int vm_vram_pt_nr; 586 + uint64_t vram_sys_base; 586 587 587 588 /* the mtrr covering the FB */ 588 589 int fb_mtrr;
+71 -40
drivers/gpu/drm/nouveau/nouveau_mem.c
··· 285 285 uint32_t flags, uint64_t phys) 286 286 { 287 287 struct drm_nouveau_private *dev_priv = dev->dev_private; 288 - struct nouveau_gpuobj **pgt; 289 - unsigned psz, pfl, pages; 288 + struct nouveau_gpuobj *pgt; 289 + unsigned block; 290 + int i; 290 291 291 - if (virt >= dev_priv->vm_gart_base && 292 - (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) { 293 - psz = 12; 294 - pgt = &dev_priv->gart_info.sg_ctxdma; 295 - pfl = 0x21; 296 - virt -= dev_priv->vm_gart_base; 297 - } else 298 - if (virt >= dev_priv->vm_vram_base && 299 - (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) { 300 - psz = 16; 301 - pgt = dev_priv->vm_vram_pt; 302 - pfl = 0x01; 303 - virt -= dev_priv->vm_vram_base; 304 - } else { 305 - NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n", 306 - virt, virt + size - 1); 307 - return -EINVAL; 292 + virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1; 293 + size = (size >> 16) << 1; 294 + 295 + phys |= ((uint64_t)flags << 32); 296 + phys |= 1; 297 + if (dev_priv->vram_sys_base) { 298 + phys += dev_priv->vram_sys_base; 299 + phys |= 0x30; 308 300 } 309 301 310 - pages = size >> psz; 311 - 312 302 dev_priv->engine.instmem.prepare_access(dev, true); 313 - if (flags & 0x80000000) { 314 - while (pages--) { 315 - struct nouveau_gpuobj *pt = pgt[virt >> 29]; 316 - unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; 303 + while (size) { 304 + unsigned offset_h = upper_32_bits(phys); 305 + unsigned offset_l = lower_32_bits(phys); 306 + unsigned pte, end; 317 307 318 - nv_wo32(dev, pt, pte++, 0x00000000); 319 - nv_wo32(dev, pt, pte++, 0x00000000); 320 - 321 - virt += (1 << psz); 308 + for (i = 7; i >= 0; i--) { 309 + block = 1 << (i + 1); 310 + if (size >= block && !(virt & (block - 1))) 311 + break; 322 312 } 323 - } else { 324 - while (pages--) { 325 - struct nouveau_gpuobj *pt = pgt[virt >> 29]; 326 - unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; 327 - unsigned offset_h = upper_32_bits(phys) & 0xff; 328 - unsigned offset_l = lower_32_bits(phys); 313 + offset_l |= (i << 7); 329 314 330 - nv_wo32(dev, pt, pte++, offset_l | pfl); 331 - nv_wo32(dev, pt, pte++, offset_h | flags); 315 + phys += block << 15; 316 + size -= block; 332 317 333 - phys += (1 << psz); 334 - virt += (1 << psz); 318 + while (block) { 319 + pgt = dev_priv->vm_vram_pt[virt >> 14]; 320 + pte = virt & 0x3ffe; 321 + 322 + end = pte + block; 323 + if (end > 16384) 324 + end = 16384; 325 + block -= (end - pte); 326 + virt += (end - pte); 327 + 328 + while (pte < end) { 329 + nv_wo32(dev, pgt, pte++, offset_l); 330 + nv_wo32(dev, pgt, pte++, offset_h); 331 + } 335 332 } 336 333 } 337 334 dev_priv->engine.instmem.finish_access(dev); ··· 353 356 void 354 357 nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) 355 358 { 356 - nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0); 359 + struct drm_nouveau_private *dev_priv = dev->dev_private; 360 + struct nouveau_gpuobj *pgt; 361 + unsigned pages, pte, end; 362 + 363 + virt -= dev_priv->vm_vram_base; 364 + pages = (size >> 16) << 1; 365 + 366 + dev_priv->engine.instmem.prepare_access(dev, true); 367 + while (pages) { 368 + pgt = dev_priv->vm_vram_pt[virt >> 29]; 369 + pte = (virt & 0x1ffe0000ULL) >> 15; 370 + 371 + end = pte + pages; 372 + if (end > 16384) 373 + end = 16384; 374 + pages -= (end - pte); 375 + virt += (end - pte) << 15; 376 + 377 + while (pte < end) 378 + nv_wo32(dev, pgt, pte++, 0); 379 + } 380 + dev_priv->engine.instmem.finish_access(dev); 381 + 382 + nv_wr32(dev, 0x100c80, 0x00050001); 383 + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { 384 + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); 385 + NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); 386 + return; 387 + } 388 + 389 + nv_wr32(dev, 0x100c80, 0x00000001); 390 + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { 391 + NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); 392 + NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); 393 + } 357 394 } 358 395 359 396 /*
+5 -1
drivers/gpu/drm/nouveau/nv04_dac.c
··· 119 119 struct drm_connector *connector) 120 120 { 121 121 struct drm_device *dev = encoder->dev; 122 - uint8_t saved_seq1, saved_pi, saved_rpc1; 122 + uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode; 123 123 uint8_t saved_palette0[3], saved_palette_mask; 124 124 uint32_t saved_rtest_ctrl, saved_rgen_ctrl; 125 125 int i; ··· 134 134 if (nv_two_heads(dev)) 135 135 /* only implemented for head A for now */ 136 136 NVSetOwner(dev, 0); 137 + 138 + saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX); 139 + NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80); 137 140 138 141 saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX); 139 142 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20); ··· 206 203 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); 207 204 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); 208 205 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1); 206 + NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode); 209 207 210 208 if (blue == 0x18) { 211 209 NV_INFO(dev, "Load detected on head A\n");
+2
drivers/gpu/drm/nouveau/nv17_tv.c
··· 579 579 nouveau_encoder(encoder)->restore.output); 580 580 581 581 nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); 582 + 583 + nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED; 582 584 } 583 585 584 586 static int nv17_tv_create_resources(struct drm_encoder *encoder,
+40 -18
drivers/gpu/drm/nouveau/nv50_instmem.c
··· 76 76 for (i = 0x1700; i <= 0x1710; i += 4) 77 77 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); 78 78 79 + if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) 80 + dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12; 81 + else 82 + dev_priv->vram_sys_base = 0; 83 + 79 84 /* Reserve the last MiB of VRAM, we should probably try to avoid 80 85 * setting up the below tables over the top of the VBIOS image at 81 86 * some point. ··· 177 172 * We map the entire fake channel into the start of the PRAMIN BAR 178 173 */ 179 174 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, 180 - 0, &priv->pramin_pt); 175 + 0, &priv->pramin_pt); 181 176 if (ret) 182 177 return ret; 183 178 184 - for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) { 185 - if (v < (c_offset + c_size)) 186 - BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1); 187 - else 188 - BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009); 179 + v = c_offset | 1; 180 + if (dev_priv->vram_sys_base) { 181 + v += dev_priv->vram_sys_base; 182 + v |= 0x30; 183 + } 184 + 185 + i = 0; 186 + while (v < dev_priv->vram_sys_base + c_offset + c_size) { 187 + BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v); 189 188 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); 189 + v += 0x1000; 190 + i += 8; 191 + } 192 + 193 + while (i < pt_size) { 194 + BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000); 195 + BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); 196 + i += 8; 190 197 } 191 198 192 199 BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); ··· 433 416 { 434 417 struct drm_nouveau_private *dev_priv = dev->dev_private; 435 418 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; 436 - uint32_t pte, pte_end, vram; 419 + struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj; 420 + uint32_t pte, pte_end; 421 + uint64_t vram; 437 422 438 423 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) 439 424 return -EINVAL; ··· 443 424 NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n", 444 425 gpuobj->im_pramin->start, gpuobj->im_pramin->size); 445 426 446 - pte = (gpuobj->im_pramin->start >> 12) << 3; 447 - pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; 427 + pte = (gpuobj->im_pramin->start >> 12) << 1; 428 + pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; 448 429 vram = gpuobj->im_backing_start; 449 430 450 431 NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n", 451 432 gpuobj->im_pramin->start, pte, pte_end); 452 433 NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); 453 434 435 + vram |= 1; 436 + if (dev_priv->vram_sys_base) { 437 + vram += dev_priv->vram_sys_base; 438 + vram |= 0x30; 439 + } 440 + 454 441 dev_priv->engine.instmem.prepare_access(dev, true); 455 442 while (pte < pte_end) { 456 - nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1); 457 - nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); 458 - 459 - pte += 8; 443 + nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram)); 444 + nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram)); 460 445 vram += NV50_INSTMEM_PAGE_SIZE; 461 446 } 462 447 dev_priv->engine.instmem.finish_access(dev); ··· 493 470 if (gpuobj->im_bound == 0) 494 471 return -EINVAL; 495 472 496 - pte = (gpuobj->im_pramin->start >> 12) << 3; 497 - pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; 473 + pte = (gpuobj->im_pramin->start >> 12) << 1; 474 + pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; 498 475 499 476 dev_priv->engine.instmem.prepare_access(dev, true); 500 477 while (pte < pte_end) { 501 - nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009); 502 - nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); 503 - pte += 8; 478 + nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); 479 + nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); 504 480 } 505 481 dev_priv->engine.instmem.finish_access(dev); 506 482
+1 -1
drivers/gpu/drm/radeon/atom.c
··· 643 643 uint8_t count = U8((*ptr)++); 644 644 SDEBUG(" count: %d\n", count); 645 645 if (arg == ATOM_UNIT_MICROSEC) 646 - schedule_timeout_uninterruptible(usecs_to_jiffies(count)); 646 + udelay(count); 647 647 else 648 648 schedule_timeout_uninterruptible(msecs_to_jiffies(count)); 649 649 }
-3
drivers/gpu/drm/radeon/r600_blit_kms.c
··· 543 543 void r600_vb_ib_put(struct radeon_device *rdev) 544 544 { 545 545 radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); 546 - mutex_lock(&rdev->ib_pool.mutex); 547 - list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs); 548 - mutex_unlock(&rdev->ib_pool.mutex); 549 546 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); 550 547 } 551 548
+6 -3
drivers/gpu/drm/radeon/r600_cp.c
··· 1428 1428 1429 1429 gb_tiling_config |= R600_BANK_SWAPS(1); 1430 1430 1431 - backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes, 1432 - dev_priv->r600_max_backends, 1433 - (0xff << dev_priv->r600_max_backends) & 0xff); 1431 + if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740) 1432 + backend_map = 0x28; 1433 + else 1434 + backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes, 1435 + dev_priv->r600_max_backends, 1436 + (0xff << dev_priv->r600_max_backends) & 0xff); 1434 1437 gb_tiling_config |= R600_BACKEND_MAP(backend_map); 1435 1438 1436 1439 cc_gc_shader_pipe_config =
+5 -4
drivers/gpu/drm/radeon/radeon.h
··· 96 96 * symbol; 97 97 */ 98 98 #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 99 + /* RADEON_IB_POOL_SIZE must be a power of 2 */ 99 100 #define RADEON_IB_POOL_SIZE 16 100 101 #define RADEON_DEBUGFS_MAX_NUM_FILES 32 101 102 #define RADEONFB_CONN_LIMIT 4 ··· 364 363 */ 365 364 struct radeon_ib { 366 365 struct list_head list; 367 - unsigned long idx; 366 + unsigned idx; 368 367 uint64_t gpu_addr; 369 368 struct radeon_fence *fence; 370 - uint32_t *ptr; 369 + uint32_t *ptr; 371 370 uint32_t length_dw; 371 + bool free; 372 372 }; 373 373 374 374 /* ··· 379 377 struct radeon_ib_pool { 380 378 struct mutex mutex; 381 379 struct radeon_bo *robj; 382 - struct list_head scheduled_ibs; 383 380 struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; 384 381 bool ready; 385 - DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE); 382 + unsigned head_id; 386 383 }; 387 384 388 385 struct radeon_cp {
+9
drivers/gpu/drm/radeon/radeon_atombios.c
··· 206 206 *connector_type = DRM_MODE_CONNECTOR_DVID; 207 207 } 208 208 209 + /* Asrock RS600 board lists the DVI port as HDMI */ 210 + if ((dev->pdev->device == 0x7941) && 211 + (dev->pdev->subsystem_vendor == 0x1849) && 212 + (dev->pdev->subsystem_device == 0x7941)) { 213 + if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) && 214 + (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) 215 + *connector_type = DRM_MODE_CONNECTOR_DVID; 216 + } 217 + 209 218 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ 210 219 if ((dev->pdev->device == 0x7941) && 211 220 (dev->pdev->subsystem_vendor == 0x147b) &&
+22 -22
drivers/gpu/drm/radeon/radeon_combios.c
··· 1279 1279 rdev->mode_info.connector_table = radeon_connector_table; 1280 1280 if (rdev->mode_info.connector_table == CT_NONE) { 1281 1281 #ifdef CONFIG_PPC_PMAC 1282 - if (machine_is_compatible("PowerBook3,3")) { 1282 + if (of_machine_is_compatible("PowerBook3,3")) { 1283 1283 /* powerbook with VGA */ 1284 1284 rdev->mode_info.connector_table = CT_POWERBOOK_VGA; 1285 - } else if (machine_is_compatible("PowerBook3,4") || 1286 - machine_is_compatible("PowerBook3,5")) { 1285 + } else if (of_machine_is_compatible("PowerBook3,4") || 1286 + of_machine_is_compatible("PowerBook3,5")) { 1287 1287 /* powerbook with internal tmds */ 1288 1288 rdev->mode_info.connector_table = CT_POWERBOOK_INTERNAL; 1289 - } else if (machine_is_compatible("PowerBook5,1") || 1290 - machine_is_compatible("PowerBook5,2") || 1291 - machine_is_compatible("PowerBook5,3") || 1292 - machine_is_compatible("PowerBook5,4") || 1293 - machine_is_compatible("PowerBook5,5")) { 1289 + } else if (of_machine_is_compatible("PowerBook5,1") || 1290 + of_machine_is_compatible("PowerBook5,2") || 1291 + of_machine_is_compatible("PowerBook5,3") || 1292 + of_machine_is_compatible("PowerBook5,4") || 1293 + of_machine_is_compatible("PowerBook5,5")) { 1294 1294 /* powerbook with external single link tmds (sil164) */ 1295 1295 rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL; 1296 - } else if (machine_is_compatible("PowerBook5,6")) { 1296 + } else if (of_machine_is_compatible("PowerBook5,6")) { 1297 1297 /* powerbook with external dual or single link tmds */ 1298 1298 rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL; 1299 - } else if (machine_is_compatible("PowerBook5,7") || 1300 - machine_is_compatible("PowerBook5,8") || 1301 - machine_is_compatible("PowerBook5,9")) { 1299 + } else if (of_machine_is_compatible("PowerBook5,7") || 1300 + of_machine_is_compatible("PowerBook5,8") || 1301 + of_machine_is_compatible("PowerBook5,9")) { 1302 1302 /* PowerBook6,2 ? */ 1303 1303 /* powerbook with external dual link tmds (sil1178?) */ 1304 1304 rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL; 1305 - } else if (machine_is_compatible("PowerBook4,1") || 1306 - machine_is_compatible("PowerBook4,2") || 1307 - machine_is_compatible("PowerBook4,3") || 1308 - machine_is_compatible("PowerBook6,3") || 1309 - machine_is_compatible("PowerBook6,5") || 1310 - machine_is_compatible("PowerBook6,7")) { 1305 + } else if (of_machine_is_compatible("PowerBook4,1") || 1306 + of_machine_is_compatible("PowerBook4,2") || 1307 + of_machine_is_compatible("PowerBook4,3") || 1308 + of_machine_is_compatible("PowerBook6,3") || 1309 + of_machine_is_compatible("PowerBook6,5") || 1310 + of_machine_is_compatible("PowerBook6,7")) { 1311 1311 /* ibook */ 1312 1312 rdev->mode_info.connector_table = CT_IBOOK; 1313 - } else if (machine_is_compatible("PowerMac4,4")) { 1313 + } else if (of_machine_is_compatible("PowerMac4,4")) { 1314 1314 /* emac */ 1315 1315 rdev->mode_info.connector_table = CT_EMAC; 1316 - } else if (machine_is_compatible("PowerMac10,1")) { 1316 + } else if (of_machine_is_compatible("PowerMac10,1")) { 1317 1317 /* mini with internal tmds */ 1318 1318 rdev->mode_info.connector_table = CT_MINI_INTERNAL; 1319 - } else if (machine_is_compatible("PowerMac10,2")) { 1319 + } else if (of_machine_is_compatible("PowerMac10,2")) { 1320 1320 /* mini with external tmds */ 1321 1321 rdev->mode_info.connector_table = CT_MINI_EXTERNAL; 1322 - } else if (machine_is_compatible("PowerMac12,1")) { 1322 + } else if (of_machine_is_compatible("PowerMac12,1")) { 1323 1323 /* PowerMac8,1 ? */ 1324 1324 /* imac g5 isight */ 1325 1325 rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
+2 -3
drivers/gpu/drm/radeon/radeon_connectors.c
··· 780 780 * connected and the DVI port disconnected. If the edid doesn't 781 781 * say HDMI, vice versa. 782 782 */ 783 - if (radeon_connector->shared_ddc && connector_status_connected) { 783 + if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { 784 784 struct drm_device *dev = connector->dev; 785 785 struct drm_connector *list_connector; 786 786 struct radeon_connector *list_radeon_connector; ··· 1060 1060 return; 1061 1061 } 1062 1062 if (radeon_connector->ddc_bus && i2c_bus->valid) { 1063 - if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus, 1064 - sizeof(struct radeon_i2c_bus_rec)) == 0) { 1063 + if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) { 1065 1064 radeon_connector->shared_ddc = true; 1066 1065 shared_ddc = true; 1067 1066 }
+4 -6
drivers/gpu/drm/radeon/radeon_cs.c
··· 86 86 &p->validated); 87 87 } 88 88 } 89 - return radeon_bo_list_validate(&p->validated, p->ib->fence); 89 + return radeon_bo_list_validate(&p->validated); 90 90 } 91 91 92 92 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) ··· 189 189 { 190 190 unsigned i; 191 191 192 - if (error && parser->ib) { 193 - radeon_bo_list_unvalidate(&parser->validated, 194 - parser->ib->fence); 195 - } else { 196 - radeon_bo_list_unreserve(&parser->validated); 192 + if (!error && parser->ib) { 193 + radeon_bo_list_fence(&parser->validated, parser->ib->fence); 197 194 } 195 + radeon_bo_list_unreserve(&parser->validated); 198 196 for (i = 0; i < parser->nrelocs; i++) { 199 197 if (parser->relocs[i].gobj) { 200 198 mutex_lock(&parser->rdev->ddev->struct_mutex);
+2 -1
drivers/gpu/drm/radeon/radeon_drv.h
··· 106 106 * 1.29- R500 3D cmd buffer support 107 107 * 1.30- Add support for occlusion queries 108 108 * 1.31- Add support for num Z pipes from GET_PARAM 109 + * 1.32- fixes for rv740 setup 109 110 */ 110 111 #define DRIVER_MAJOR 1 111 - #define DRIVER_MINOR 31 112 + #define DRIVER_MINOR 32 112 113 #define DRIVER_PATCHLEVEL 0 113 114 114 115 enum radeon_cp_microcode_version {
+14 -20
drivers/gpu/drm/radeon/radeon_object.c
··· 306 306 } 307 307 } 308 308 309 - int radeon_bo_list_validate(struct list_head *head, void *fence) 309 + int radeon_bo_list_validate(struct list_head *head) 310 310 { 311 311 struct radeon_bo_list *lobj; 312 312 struct radeon_bo *bo; 313 - struct radeon_fence *old_fence = NULL; 314 313 int r; 315 314 316 315 r = radeon_bo_list_reserve(head); ··· 333 334 } 334 335 lobj->gpu_offset = radeon_bo_gpu_offset(bo); 335 336 lobj->tiling_flags = bo->tiling_flags; 336 - if (fence) { 337 - old_fence = (struct radeon_fence *)bo->tbo.sync_obj; 338 - bo->tbo.sync_obj = radeon_fence_ref(fence); 339 - bo->tbo.sync_obj_arg = NULL; 340 - } 341 - if (old_fence) { 342 - radeon_fence_unref(&old_fence); 343 - } 344 337 } 345 338 return 0; 346 339 } 347 340 348 - void radeon_bo_list_unvalidate(struct list_head *head, void *fence) 341 + void radeon_bo_list_fence(struct list_head *head, void *fence) 349 342 { 350 343 struct radeon_bo_list *lobj; 351 - struct radeon_fence *old_fence; 344 + struct radeon_bo *bo; 345 + struct radeon_fence *old_fence = NULL; 352 346 353 - if (fence) 354 - list_for_each_entry(lobj, head, list) { 355 - old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj); 356 - if (old_fence == fence) { 357 - lobj->bo->tbo.sync_obj = NULL; 358 - radeon_fence_unref(&old_fence); 359 - } 347 + list_for_each_entry(lobj, head, list) { 348 + bo = lobj->bo; 349 + spin_lock(&bo->tbo.lock); 350 + old_fence = (struct radeon_fence *)bo->tbo.sync_obj; 351 + bo->tbo.sync_obj = radeon_fence_ref(fence); 352 + bo->tbo.sync_obj_arg = NULL; 353 + spin_unlock(&bo->tbo.lock); 354 + if (old_fence) { 355 + radeon_fence_unref(&old_fence); 360 356 } 361 - radeon_bo_list_unreserve(head); 357 + } 362 358 } 363 359 364 360 int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
+2 -2
drivers/gpu/drm/radeon/radeon_object.h
··· 156 156 struct list_head *head); 157 157 extern int radeon_bo_list_reserve(struct list_head *head); 158 158 extern void radeon_bo_list_unreserve(struct list_head *head); 159 - extern int radeon_bo_list_validate(struct list_head *head, void *fence); 160 - extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence); 159 + extern int radeon_bo_list_validate(struct list_head *head); 160 + extern void radeon_bo_list_fence(struct list_head *head, void *fence); 161 161 extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 162 162 struct vm_area_struct *vma); 163 163 extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+42 -65
drivers/gpu/drm/radeon/radeon_ring.c
··· 41 41 { 42 42 struct radeon_fence *fence; 43 43 struct radeon_ib *nib; 44 - unsigned long i; 45 - int r = 0; 44 + int r = 0, i, c; 46 45 47 46 *ib = NULL; 48 47 r = radeon_fence_create(rdev, &fence); 49 48 if (r) { 50 - DRM_ERROR("failed to create fence for new IB\n"); 49 + dev_err(rdev->dev, "failed to create fence for new IB\n"); 51 50 return r; 52 51 } 53 52 mutex_lock(&rdev->ib_pool.mutex); 54 - i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); 55 - if (i < RADEON_IB_POOL_SIZE) { 56 - set_bit(i, rdev->ib_pool.alloc_bm); 57 - rdev->ib_pool.ibs[i].length_dw = 0; 58 - *ib = &rdev->ib_pool.ibs[i]; 59 - mutex_unlock(&rdev->ib_pool.mutex); 60 - goto out; 53 + for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) { 54 + i &= (RADEON_IB_POOL_SIZE - 1); 55 + if (rdev->ib_pool.ibs[i].free) { 56 + nib = &rdev->ib_pool.ibs[i]; 57 + break; 58 + } 61 59 } 62 - if (list_empty(&rdev->ib_pool.scheduled_ibs)) { 63 - /* we go do nothings here */ 60 + if (nib == NULL) { 61 + /* This should never happen, it means we allocated all 62 + * IB and haven't scheduled one yet, return EBUSY to 63 + * userspace hoping that on ioctl recall we get better 64 + * luck 65 + */ 66 + dev_err(rdev->dev, "no free indirect buffer !\n"); 64 67 mutex_unlock(&rdev->ib_pool.mutex); 65 - DRM_ERROR("all IB allocated none scheduled.\n"); 66 - r = -EINVAL; 67 - goto out; 68 + radeon_fence_unref(&fence); 69 + return -EBUSY; 68 70 } 69 - /* get the first ib on the scheduled list */ 70 - nib = list_entry(rdev->ib_pool.scheduled_ibs.next, 71 - struct radeon_ib, list); 72 - if (nib->fence == NULL) { 73 - /* we go do nothings here */ 71 + rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1); 72 + nib->free = false; 73 + if (nib->fence) { 74 74 mutex_unlock(&rdev->ib_pool.mutex); 75 - DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); 76 - r = -EINVAL; 77 - goto out; 78 - } 79 - mutex_unlock(&rdev->ib_pool.mutex); 80 - 81 - r = radeon_fence_wait(nib->fence, false); 82 - if (r) { 83 - DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, 84 - (unsigned long)nib->gpu_addr, nib->length_dw); 85 - DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n"); 86 - goto out; 75 + r = radeon_fence_wait(nib->fence, false); 76 + if (r) { 77 + dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n", 78 + nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw); 79 + mutex_lock(&rdev->ib_pool.mutex); 80 + nib->free = true; 81 + mutex_unlock(&rdev->ib_pool.mutex); 82 + radeon_fence_unref(&fence); 83 + return r; 84 + } 85 + mutex_lock(&rdev->ib_pool.mutex); 87 86 } 88 87 radeon_fence_unref(&nib->fence); 89 - 88 + nib->fence = fence; 90 89 nib->length_dw = 0; 91 - 92 - /* scheduled list is accessed here */ 93 - mutex_lock(&rdev->ib_pool.mutex); 94 - list_del(&nib->list); 95 - INIT_LIST_HEAD(&nib->list); 96 90 mutex_unlock(&rdev->ib_pool.mutex); 97 - 98 91 *ib = nib; 99 - out: 100 - if (r) { 101 - radeon_fence_unref(&fence); 102 - } else { 103 - (*ib)->fence = fence; 104 - } 105 - return r; 92 + return 0; 106 93 } 107 94 108 95 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) ··· 100 113 if (tmp == NULL) { 101 114 return; 102 115 } 103 - mutex_lock(&rdev->ib_pool.mutex); 104 - if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) { 105 - /* IB is scheduled & not signaled don't do anythings */ 106 - mutex_unlock(&rdev->ib_pool.mutex); 107 - return; 108 - } 109 - list_del(&tmp->list); 110 - INIT_LIST_HEAD(&tmp->list); 111 - if (tmp->fence) 116 + if (!tmp->fence->emited) 112 117 radeon_fence_unref(&tmp->fence); 113 - 114 - tmp->length_dw = 0; 115 - clear_bit(tmp->idx, rdev->ib_pool.alloc_bm); 118 + mutex_lock(&rdev->ib_pool.mutex); 119 + tmp->free = true; 116 120 mutex_unlock(&rdev->ib_pool.mutex); 117 121 } 118 122 ··· 113 135 114 136 if (!ib->length_dw || !rdev->cp.ready) { 115 137 /* TODO: Nothings in the ib we should report. */ 116 - DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); 138 + DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); 117 139 return -EINVAL; 118 140 } 119 141 ··· 126 148 radeon_ring_ib_execute(rdev, ib); 127 149 radeon_fence_emit(rdev, ib->fence); 128 150 mutex_lock(&rdev->ib_pool.mutex); 129 - list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); 151 + /* once scheduled IB is considered free and protected by the fence */ 152 + ib->free = true; 130 153 mutex_unlock(&rdev->ib_pool.mutex); 131 154 radeon_ring_unlock_commit(rdev); 132 155 return 0; ··· 143 164 if (rdev->ib_pool.robj) 144 165 return 0; 145 166 /* Allocate 1M object buffer */ 146 - INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); 147 167 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, 148 168 true, RADEON_GEM_DOMAIN_GTT, 149 169 &rdev->ib_pool.robj); ··· 173 195 rdev->ib_pool.ibs[i].ptr = ptr + offset; 174 196 rdev->ib_pool.ibs[i].idx = i; 175 197 rdev->ib_pool.ibs[i].length_dw = 0; 176 - INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list); 198 + rdev->ib_pool.ibs[i].free = true; 177 199 } 178 - bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); 200 + rdev->ib_pool.head_id = 0; 179 201 rdev->ib_pool.ready = true; 180 202 DRM_INFO("radeon: ib pool ready.\n"); 181 203 if (radeon_debugfs_ib_init(rdev)) { ··· 192 214 return; 193 215 } 194 216 mutex_lock(&rdev->ib_pool.mutex); 195 - bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); 196 217 if (rdev->ib_pool.robj) { 197 218 r = radeon_bo_reserve(rdev->ib_pool.robj, false); 198 219 if (likely(r == 0)) { ··· 340 363 if (ib == NULL) { 341 364 return 0; 342 365 } 343 - seq_printf(m, "IB %04lu\n", ib->idx); 366 + seq_printf(m, "IB %04u\n", ib->idx); 344 367 seq_printf(m, "IB fence %p\n", ib->fence); 345 368 seq_printf(m, "IB size %05u dwords\n", ib->length_dw); 346 369 for (i = 0; i < ib->length_dw; i++) {
+6 -3
drivers/gpu/drm/radeon/rv770.c
··· 549 549 550 550 gb_tiling_config |= BANK_SWAPS(1); 551 551 552 - backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes, 553 - rdev->config.rv770.max_backends, 554 - (0xff << rdev->config.rv770.max_backends) & 0xff); 552 + if (rdev->family == CHIP_RV740) 553 + backend_map = 0x28; 554 + else 555 + backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes, 556 + rdev->config.rv770.max_backends, 557 + (0xff << rdev->config.rv770.max_backends) & 0xff); 555 558 gb_tiling_config |= BACKEND_MAP(backend_map); 556 559 557 560 cc_gc_shader_pipe_config =
+11 -7
drivers/gpu/drm/ttm/ttm_tt.c
··· 196 196 197 197 #ifdef CONFIG_X86 198 198 static inline int ttm_tt_set_page_caching(struct page *p, 199 - enum ttm_caching_state c_state) 199 + enum ttm_caching_state c_old, 200 + enum ttm_caching_state c_new) 200 201 { 201 202 int ret = 0; 202 203 203 204 if (PageHighMem(p)) 204 205 return 0; 205 206 206 - if (get_page_memtype(p) != -1) { 207 + if (c_old != tt_cached) { 207 208 /* p isn't in the default caching state, set it to 208 209 * writeback first to free its current memtype. */ 209 210 ··· 213 212 return ret; 214 213 } 215 214 216 - if (c_state == tt_wc) 215 + if (c_new == tt_wc) 217 216 ret = set_memory_wc((unsigned long) page_address(p), 1); 218 - else if (c_state == tt_uncached) 217 + else if (c_new == tt_uncached) 219 218 ret = set_pages_uc(p, 1); 220 219 221 220 return ret; 222 221 } 223 222 #else /* CONFIG_X86 */ 224 223 static inline int ttm_tt_set_page_caching(struct page *p, 225 - enum ttm_caching_state c_state) 224 + enum ttm_caching_state c_old, 225 + enum ttm_caching_state c_new) 226 226 { 227 227 return 0; 228 228 } ··· 256 254 for (i = 0; i < ttm->num_pages; ++i) { 257 255 cur_page = ttm->pages[i]; 258 256 if (likely(cur_page != NULL)) { 259 - ret = ttm_tt_set_page_caching(cur_page, c_state); 257 + ret = ttm_tt_set_page_caching(cur_page, 258 + ttm->caching_state, 259 + c_state); 260 260 if (unlikely(ret != 0)) 261 261 goto out_err; 262 262 } ··· 272 268 for (j = 0; j < i; ++j) { 273 269 cur_page = ttm->pages[j]; 274 270 if (likely(cur_page != NULL)) { 275 - (void)ttm_tt_set_page_caching(cur_page, 271 + (void)ttm_tt_set_page_caching(cur_page, c_state, 276 272 ttm->caching_state); 277 273 } 278 274 }
+16 -33
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 348 348 */ 349 349 350 350 DRM_INFO("It appears like vesafb is loaded. " 351 - "Ignore above error if any. Entering stealth mode.\n"); 351 + "Ignore above error if any.\n"); 352 352 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); 353 353 if (unlikely(ret != 0)) { 354 354 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); 355 355 goto out_no_device; 356 356 } 357 - vmw_kms_init(dev_priv); 358 - vmw_overlay_init(dev_priv); 359 - } else { 360 - ret = vmw_request_device(dev_priv); 361 - if (unlikely(ret != 0)) 362 - goto out_no_device; 363 - vmw_kms_init(dev_priv); 364 - vmw_overlay_init(dev_priv); 365 - vmw_fb_init(dev_priv); 366 357 } 358 + ret = vmw_request_device(dev_priv); 359 + if (unlikely(ret != 0)) 360 + goto out_no_device; 361 + vmw_kms_init(dev_priv); 362 + vmw_overlay_init(dev_priv); 363 + vmw_fb_init(dev_priv); 367 364 368 365 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 369 366 register_pm_notifier(&dev_priv->pm_nb); ··· 403 406 404 407 unregister_pm_notifier(&dev_priv->pm_nb); 405 408 406 - if (!dev_priv->stealth) { 407 - vmw_fb_close(dev_priv); 408 - vmw_kms_close(dev_priv); 409 - vmw_overlay_close(dev_priv); 410 - vmw_release_device(dev_priv); 411 - pci_release_regions(dev->pdev); 412 - } else { 413 - vmw_kms_close(dev_priv); 414 - vmw_overlay_close(dev_priv); 409 + vmw_fb_close(dev_priv); 410 + vmw_kms_close(dev_priv); 411 + vmw_overlay_close(dev_priv); 412 + vmw_release_device(dev_priv); 413 + if (dev_priv->stealth) 415 414 pci_release_region(dev->pdev, 2); 416 - } 415 + else 416 + pci_release_regions(dev->pdev); 417 + 417 418 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 418 419 drm_irq_uninstall(dev_priv->dev); 419 420 if (dev->devname == vmw_devname) ··· 580 585 int ret = 0; 581 586 582 587 DRM_INFO("Master set.\n"); 583 - if (dev_priv->stealth) { 584 - ret = vmw_request_device(dev_priv); 585 - if (unlikely(ret != 0)) 586 - return ret; 587 - } 588 588 589 589 if (active) { 590 590 BUG_ON(active != &dev_priv->fbdev_master); ··· 639 649 640 650 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 641 651 642 - if (dev_priv->stealth) { 643 - ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); 644 - if (unlikely(ret != 0)) 645 - DRM_ERROR("Unable to clean VRAM on master drop.\n"); 646 - vmw_release_device(dev_priv); 647 - } 648 652 dev_priv->active_master = &dev_priv->fbdev_master; 649 653 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 650 654 ttm_vt_unlock(&dev_priv->fbdev_master.lock); 651 655 652 - if (!dev_priv->stealth) 653 - vmw_fb_on(dev_priv); 656 + vmw_fb_on(dev_priv); 654 657 } 655 658 656 659
+92 -16
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 182 182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); 183 183 } 184 184 185 - static int vmw_cmd_dma(struct vmw_private *dev_priv, 186 - struct vmw_sw_context *sw_context, 187 - SVGA3dCmdHeader *header) 185 + static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, 186 + struct vmw_sw_context *sw_context, 187 + SVGAGuestPtr *ptr, 188 + struct vmw_dma_buffer **vmw_bo_p) 188 189 { 189 - uint32_t handle; 190 190 struct vmw_dma_buffer *vmw_bo = NULL; 191 191 struct ttm_buffer_object *bo; 192 - struct vmw_surface *srf = NULL; 193 - struct vmw_dma_cmd { 194 - SVGA3dCmdHeader header; 195 - SVGA3dCmdSurfaceDMA dma; 196 - } *cmd; 192 + uint32_t handle = ptr->gmrId; 197 193 struct vmw_relocation *reloc; 198 - int ret; 199 194 uint32_t cur_validate_node; 200 195 struct ttm_validate_buffer *val_buf; 196 + int ret; 201 197 202 - cmd = container_of(header, struct vmw_dma_cmd, header); 203 - handle = cmd->dma.guest.ptr.gmrId; 204 198 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); 205 199 if (unlikely(ret != 0)) { 206 200 DRM_ERROR("Could not find or use GMR region.\n"); ··· 203 209 bo = &vmw_bo->base; 204 210 205 211 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { 206 - DRM_ERROR("Max number of DMA commands per submission" 212 + DRM_ERROR("Max number relocations per submission" 207 213 " exceeded\n"); 208 214 ret = -EINVAL; 209 215 goto out_no_reloc; 210 216 } 211 217 212 218 reloc = &sw_context->relocs[sw_context->cur_reloc++]; 213 - reloc->location = &cmd->dma.guest.ptr; 219 + reloc->location = ptr; 214 220 215 221 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); 216 222 if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) { ··· 228 234 list_add_tail(&val_buf->head, &sw_context->validate_nodes); 229 235 ++sw_context->cur_val_buf; 230 236 } 237 + *vmw_bo_p = vmw_bo; 238 + return 0; 231 239 240 + out_no_reloc: 241 + vmw_dmabuf_unreference(&vmw_bo); 242 + vmw_bo_p = NULL; 243 + return ret; 244 + } 245 + 246 + static int vmw_cmd_end_query(struct vmw_private *dev_priv, 247 + struct vmw_sw_context *sw_context, 248 + SVGA3dCmdHeader *header) 249 + { 250 + struct vmw_dma_buffer *vmw_bo; 251 + struct vmw_query_cmd { 252 + SVGA3dCmdHeader header; 253 + SVGA3dCmdEndQuery q; 254 + } *cmd; 255 + int ret; 256 + 257 + cmd = container_of(header, struct vmw_query_cmd, header); 258 + ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 259 + if (unlikely(ret != 0)) 260 + return ret; 261 + 262 + ret = vmw_translate_guest_ptr(dev_priv, sw_context, 263 + &cmd->q.guestResult, 264 + &vmw_bo); 265 + if (unlikely(ret != 0)) 266 + return ret; 267 + 268 + vmw_dmabuf_unreference(&vmw_bo); 269 + return 0; 270 + } 271 + 272 + static int vmw_cmd_wait_query(struct vmw_private *dev_priv, 273 + struct vmw_sw_context *sw_context, 274 + SVGA3dCmdHeader *header) 275 + { 276 + struct vmw_dma_buffer *vmw_bo; 277 + struct vmw_query_cmd { 278 + SVGA3dCmdHeader header; 279 + SVGA3dCmdWaitForQuery q; 280 + } *cmd; 281 + int ret; 282 + 283 + cmd = container_of(header, struct vmw_query_cmd, header); 284 + ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 285 + if (unlikely(ret != 0)) 286 + return ret; 287 + 288 + ret = vmw_translate_guest_ptr(dev_priv, sw_context, 289 + &cmd->q.guestResult, 290 + &vmw_bo); 291 + if (unlikely(ret != 0)) 292 + return ret; 293 + 294 + vmw_dmabuf_unreference(&vmw_bo); 295 + return 0; 296 + } 297 + 298 + 299 + static int vmw_cmd_dma(struct vmw_private *dev_priv, 300 + struct vmw_sw_context *sw_context, 301 + SVGA3dCmdHeader *header) 302 + { 303 + struct vmw_dma_buffer *vmw_bo = NULL; 304 + struct ttm_buffer_object *bo; 305 + struct vmw_surface *srf = NULL; 306 + struct vmw_dma_cmd { 307 + SVGA3dCmdHeader header; 308 + SVGA3dCmdSurfaceDMA dma; 309 + } *cmd; 310 + int ret; 311 + 312 + cmd = container_of(header, struct vmw_dma_cmd, header); 313 + ret = vmw_translate_guest_ptr(dev_priv, sw_context, 314 + &cmd->dma.guest.ptr, 315 + &vmw_bo); 316 + if (unlikely(ret != 0)) 317 + return ret; 318 + 319 + bo = &vmw_bo->base; 232 320 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, 233 321 cmd->dma.host.sid, &srf); 234 322 if (ret) { ··· 455 379 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), 456 380 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), 457 381 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), 458 - VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check), 459 - VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check), 382 + VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), 383 + VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), 460 384 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), 461 385 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, 462 386 &vmw_cmd_blt_surf_screen_check)
+3
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
··· 559 559 info->pixmap.scan_align = 1; 560 560 #endif 561 561 562 + info->aperture_base = vmw_priv->vram_start; 563 + info->aperture_size = vmw_priv->vram_size; 564 + 562 565 /* 563 566 * Dirty & Deferred IO 564 567 */
+1 -1
drivers/gpu/vga/vgaarb.c
··· 961 961 remaining -= 7; 962 962 pr_devel("client 0x%p called 'target'\n", priv); 963 963 /* if target is default */ 964 - if (!strncmp(kbuf, "default", 7)) 964 + if (!strncmp(curr_pos, "default", 7)) 965 965 pdev = pci_dev_get(vga_default_device()); 966 966 else { 967 967 if (!vga_pci_str_to_vars(curr_pos, remaining,
+50 -4
drivers/hid/Kconfig
··· 55 55 menu "Special HID drivers" 56 56 depends on HID 57 57 58 + config HID_3M_PCT 59 + tristate "3M PCT" 60 + depends on USB_HID 61 + ---help--- 62 + Support for 3M PCT touch screens. 63 + 58 64 config HID_A4TECH 59 65 tristate "A4 tech" if EMBEDDED 60 66 depends on USB_HID ··· 189 183 Say Y here if you want to enable force feedback support for Logitech 190 184 Rumblepad 2 devices. 191 185 186 + config LOGIG940_FF 187 + bool "Logitech Flight System G940 force feedback support" 188 + depends on HID_LOGITECH 189 + select INPUT_FF_MEMLESS 190 + help 191 + Say Y here if you want to enable force feedback support for Logitech 192 + Flight System G940 devices. 193 + 194 + config HID_MAGICMOUSE 195 + tristate "Apple MagicMouse multi-touch support" 196 + depends on BT_HIDP 197 + ---help--- 198 + Support for the Apple Magic Mouse multi-touch. 199 + 200 + Say Y here if you want support for the multi-touch features of the 201 + Apple Wireless "Magic" Mouse. 202 + 192 203 config HID_MICROSOFT 193 204 tristate "Microsoft" if EMBEDDED 194 205 depends on USB_HID 195 206 default !EMBEDDED 196 207 ---help--- 197 208 Support for Microsoft devices that are not fully compliant with HID standard. 209 + 210 + config HID_MOSART 211 + tristate "MosArt" 212 + depends on USB_HID 213 + ---help--- 214 + Support for MosArt dual-touch panels. 198 215 199 216 config HID_MONTEREY 200 217 tristate "Monterey" if EMBEDDED ··· 227 198 Support for Monterey Genius KB29E. 228 199 229 200 config HID_NTRIG 230 - tristate "NTrig" if EMBEDDED 201 + tristate "NTrig" 202 + depends on USB_HID 203 + ---help--- 204 + Support for N-Trig touch screen. 205 + 206 + config HID_ORTEK 207 + tristate "Ortek" if EMBEDDED 231 208 depends on USB_HID 232 209 default !EMBEDDED 233 210 ---help--- 234 - Support for N-Trig touch screen. 211 + Support for Ortek WKB-2000 wireless keyboard + mouse trackpad. 235 212 236 213 config HID_PANTHERLORD 237 214 tristate "Pantherlord support" if EMBEDDED ··· 262 227 ---help--- 263 228 Support for Petalynx Maxter remote control. 264 229 230 + config HID_QUANTA 231 + tristate "Quanta Optical Touch" 232 + depends on USB_HID 233 + ---help--- 234 + Support for Quanta Optical Touch dual-touch panels. 235 + 265 236 config HID_SAMSUNG 266 237 tristate "Samsung" if EMBEDDED 267 238 depends on USB_HID ··· 281 240 default !EMBEDDED 282 241 ---help--- 283 242 Support for Sony PS3 controller. 243 + 244 + config HID_STANTUM 245 + tristate "Stantum" 246 + depends on USB_HID 247 + ---help--- 248 + Support for Stantum multitouch panel. 284 249 285 250 config HID_SUNPLUS 286 251 tristate "Sunplus" if EMBEDDED ··· 352 305 Rumble Force or Force Feedback Wheel. 353 306 354 307 config HID_WACOM 355 - tristate "Wacom Bluetooth devices support" if EMBEDDED 308 + tristate "Wacom Bluetooth devices support" 356 309 depends on BT_HIDP 357 - default !EMBEDDED 358 310 ---help--- 359 311 Support for Wacom Graphire Bluetooth tablet. 360 312
+9
drivers/hid/Makefile
··· 18 18 ifdef CONFIG_LOGIRUMBLEPAD2_FF 19 19 hid-logitech-objs += hid-lg2ff.o 20 20 endif 21 + ifdef CONFIG_LOGIG940_FF 22 + hid-logitech-objs += hid-lg3ff.o 23 + endif 21 24 25 + obj-$(CONFIG_HID_3M_PCT) += hid-3m-pct.o 22 26 obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o 23 27 obj-$(CONFIG_HID_APPLE) += hid-apple.o 24 28 obj-$(CONFIG_HID_BELKIN) += hid-belkin.o ··· 35 31 obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o 36 32 obj-$(CONFIG_HID_KYE) += hid-kye.o 37 33 obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o 34 + obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o 38 35 obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o 39 36 obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o 37 + obj-$(CONFIG_HID_MOSART) += hid-mosart.o 40 38 obj-$(CONFIG_HID_NTRIG) += hid-ntrig.o 39 + obj-$(CONFIG_HID_ORTEK) += hid-ortek.o 40 + obj-$(CONFIG_HID_QUANTA) += hid-quanta.o 41 41 obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o 42 42 obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o 43 43 obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o 44 44 obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o 45 45 obj-$(CONFIG_HID_SONY) += hid-sony.o 46 + obj-$(CONFIG_HID_STANTUM) += hid-stantum.o 46 47 obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o 47 48 obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o 48 49 obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o
+290
drivers/hid/hid-3m-pct.c
··· 1 + /* 2 + * HID driver for 3M PCT multitouch panels 3 + * 4 + * Copyright (c) 2009 Stephane Chatty <chatty@enac.fr> 5 + * 6 + */ 7 + 8 + /* 9 + * This program is free software; you can redistribute it and/or modify it 10 + * under the terms of the GNU General Public License as published by the Free 11 + * Software Foundation; either version 2 of the License, or (at your option) 12 + * any later version. 13 + */ 14 + 15 + #include <linux/device.h> 16 + #include <linux/hid.h> 17 + #include <linux/module.h> 18 + #include <linux/usb.h> 19 + 20 + MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>"); 21 + MODULE_DESCRIPTION("3M PCT multitouch panels"); 22 + MODULE_LICENSE("GPL"); 23 + 24 + #include "hid-ids.h" 25 + 26 + struct mmm_finger { 27 + __s32 x, y; 28 + __u8 rank; 29 + bool touch, valid; 30 + }; 31 + 32 + struct mmm_data { 33 + struct mmm_finger f[10]; 34 + __u8 curid, num; 35 + bool touch, valid; 36 + }; 37 + 38 + static int mmm_input_mapping(struct hid_device *hdev, struct hid_input *hi, 39 + struct hid_field *field, struct hid_usage *usage, 40 + unsigned long **bit, int *max) 41 + { 42 + switch (usage->hid & HID_USAGE_PAGE) { 43 + 44 + case HID_UP_BUTTON: 45 + return -1; 46 + 47 + case HID_UP_GENDESK: 48 + switch (usage->hid) { 49 + case HID_GD_X: 50 + hid_map_usage(hi, usage, bit, max, 51 + EV_ABS, ABS_MT_POSITION_X); 52 + /* touchscreen emulation */ 53 + input_set_abs_params(hi->input, ABS_X, 54 + field->logical_minimum, 55 + field->logical_maximum, 0, 0); 56 + return 1; 57 + case HID_GD_Y: 58 + hid_map_usage(hi, usage, bit, max, 59 + EV_ABS, ABS_MT_POSITION_Y); 60 + /* touchscreen emulation */ 61 + input_set_abs_params(hi->input, ABS_Y, 62 + field->logical_minimum, 63 + field->logical_maximum, 0, 0); 64 + return 1; 65 + } 66 + return 0; 67 + 68 + case HID_UP_DIGITIZER: 69 + switch (usage->hid) { 70 + /* we do not want to map these: no input-oriented meaning */ 71 + case 0x14: 72 + case 0x23: 73 + case HID_DG_INPUTMODE: 74 + case HID_DG_DEVICEINDEX: 75 + case HID_DG_CONTACTCOUNT: 76 + case HID_DG_CONTACTMAX: 77 + case HID_DG_INRANGE: 78 + case HID_DG_CONFIDENCE: 79 + return -1; 80 + case HID_DG_TIPSWITCH: 81 + /* touchscreen emulation */ 82 + hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH); 83 + return 1; 84 + case HID_DG_CONTACTID: 85 + hid_map_usage(hi, usage, bit, max, 86 + EV_ABS, ABS_MT_TRACKING_ID); 87 + return 1; 88 + } 89 + /* let hid-input decide for the others */ 90 + return 0; 91 + 92 + case 0xff000000: 93 + /* we do not want to map these: no input-oriented meaning */ 94 + return -1; 95 + } 96 + 97 + return 0; 98 + } 99 + 100 + static int mmm_input_mapped(struct hid_device *hdev, struct hid_input *hi, 101 + struct hid_field *field, struct hid_usage *usage, 102 + unsigned long **bit, int *max) 103 + { 104 + if (usage->type == EV_KEY || usage->type == EV_ABS) 105 + clear_bit(usage->code, *bit); 106 + 107 + return 0; 108 + } 109 + 110 + /* 111 + * this function is called when a whole packet has been received and processed, 112 + * so that it can decide what to send to the input layer. 113 + */ 114 + static void mmm_filter_event(struct mmm_data *md, struct input_dev *input) 115 + { 116 + struct mmm_finger *oldest = 0; 117 + bool pressed = false, released = false; 118 + int i; 119 + 120 + /* 121 + * we need to iterate on all fingers to decide if we have a press 122 + * or a release event in our touchscreen emulation. 123 + */ 124 + for (i = 0; i < 10; ++i) { 125 + struct mmm_finger *f = &md->f[i]; 126 + if (!f->valid) { 127 + /* this finger is just placeholder data, ignore */ 128 + } else if (f->touch) { 129 + /* this finger is on the screen */ 130 + input_event(input, EV_ABS, ABS_MT_TRACKING_ID, i); 131 + input_event(input, EV_ABS, ABS_MT_POSITION_X, f->x); 132 + input_event(input, EV_ABS, ABS_MT_POSITION_Y, f->y); 133 + input_mt_sync(input); 134 + /* 135 + * touchscreen emulation: maintain the age rank 136 + * of this finger, decide if we have a press 137 + */ 138 + if (f->rank == 0) { 139 + f->rank = ++(md->num); 140 + if (f->rank == 1) 141 + pressed = true; 142 + } 143 + if (f->rank == 1) 144 + oldest = f; 145 + } else { 146 + /* this finger took off the screen */ 147 + /* touchscreen emulation: maintain age rank of others */ 148 + int j; 149 + 150 + for (j = 0; j < 10; ++j) { 151 + struct mmm_finger *g = &md->f[j]; 152 + if (g->rank > f->rank) { 153 + g->rank--; 154 + if (g->rank == 1) 155 + oldest = g; 156 + } 157 + } 158 + f->rank = 0; 159 + --(md->num); 160 + if (md->num == 0) 161 + released = true; 162 + } 163 + f->valid = 0; 164 + } 165 + 166 + /* touchscreen emulation */ 167 + if (oldest) { 168 + if (pressed) 169 + input_event(input, EV_KEY, BTN_TOUCH, 1); 170 + input_event(input, EV_ABS, ABS_X, oldest->x); 171 + input_event(input, EV_ABS, ABS_Y, oldest->y); 172 + } else if (released) { 173 + input_event(input, EV_KEY, BTN_TOUCH, 0); 174 + } 175 + } 176 + 177 + /* 178 + * this function is called upon all reports 179 + * so that we can accumulate contact point information, 180 + * and call input_mt_sync after each point. 181 + */ 182 + static int mmm_event(struct hid_device *hid, struct hid_field *field, 183 + struct hid_usage *usage, __s32 value) 184 + { 185 + struct mmm_data *md = hid_get_drvdata(hid); 186 + /* 187 + * strangely, this function can be called before 188 + * field->hidinput is initialized! 189 + */ 190 + if (hid->claimed & HID_CLAIMED_INPUT) { 191 + struct input_dev *input = field->hidinput->input; 192 + switch (usage->hid) { 193 + case HID_DG_TIPSWITCH: 194 + md->touch = value; 195 + break; 196 + case HID_DG_CONFIDENCE: 197 + md->valid = value; 198 + break; 199 + case HID_DG_CONTACTID: 200 + if (md->valid) { 201 + md->curid = value; 202 + md->f[value].touch = md->touch; 203 + md->f[value].valid = 1; 204 + } 205 + break; 206 + case HID_GD_X: 207 + if (md->valid) 208 + md->f[md->curid].x = value; 209 + break; 210 + case HID_GD_Y: 211 + if (md->valid) 212 + md->f[md->curid].y = value; 213 + break; 214 + case HID_DG_CONTACTCOUNT: 215 + mmm_filter_event(md, input); 216 + break; 217 + } 218 + } 219 + 220 + /* we have handled the hidinput part, now remains hiddev */ 221 + if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event) 222 + hid->hiddev_hid_event(hid, field, usage, value); 223 + 224 + return 1; 225 + } 226 + 227 + static int mmm_probe(struct hid_device *hdev, const struct hid_device_id *id) 228 + { 229 + int ret; 230 + struct mmm_data *md; 231 + 232 + md = kzalloc(sizeof(struct mmm_data), GFP_KERNEL); 233 + if (!md) { 234 + dev_err(&hdev->dev, "cannot allocate 3M data\n"); 235 + return -ENOMEM; 236 + } 237 + hid_set_drvdata(hdev, md); 238 + 239 + ret = hid_parse(hdev); 240 + if (!ret) 241 + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 242 + 243 + if (ret) 244 + kfree(md); 245 + return ret; 246 + } 247 + 248 + static void mmm_remove(struct hid_device *hdev) 249 + { 250 + hid_hw_stop(hdev); 251 + kfree(hid_get_drvdata(hdev)); 252 + hid_set_drvdata(hdev, NULL); 253 + } 254 + 255 + static const struct hid_device_id mmm_devices[] = { 256 + { HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) }, 257 + { } 258 + }; 259 + MODULE_DEVICE_TABLE(hid, mmm_devices); 260 + 261 + static const struct hid_usage_id mmm_grabbed_usages[] = { 262 + { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, 263 + { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} 264 + }; 265 + 266 + static struct hid_driver mmm_driver = { 267 + .name = "3m-pct", 268 + .id_table = mmm_devices, 269 + .probe = mmm_probe, 270 + .remove = mmm_remove, 271 + .input_mapping = mmm_input_mapping, 272 + .input_mapped = mmm_input_mapped, 273 + .usage_table = mmm_grabbed_usages, 274 + .event = mmm_event, 275 + }; 276 + 277 + static int __init mmm_init(void) 278 + { 279 + return hid_register_driver(&mmm_driver); 280 + } 281 + 282 + static void __exit mmm_exit(void) 283 + { 284 + hid_unregister_driver(&mmm_driver); 285 + } 286 + 287 + module_init(mmm_init); 288 + module_exit(mmm_exit); 289 + MODULE_LICENSE("GPL"); 290 +
+12 -5
drivers/hid/hid-apple.c
··· 40 40 MODULE_PARM_DESC(fnmode, "Mode of fn key on Apple keyboards (0 = disabled, " 41 41 "[1] = fkeyslast, 2 = fkeysfirst)"); 42 42 43 + static unsigned int iso_layout = 1; 44 + module_param(iso_layout, uint, 0644); 45 + MODULE_PARM_DESC(iso_layout, "Enable/Disable hardcoded ISO-layout of the keyboard. " 46 + "(0 = disabled, [1] = enabled)"); 47 + 43 48 struct apple_sc { 44 49 unsigned long quirks; 45 50 unsigned int fn_on; ··· 204 199 } 205 200 } 206 201 207 - if (asc->quirks & APPLE_ISO_KEYBOARD) { 208 - trans = apple_find_translation(apple_iso_keyboard, usage->code); 209 - if (trans) { 210 - input_event(input, usage->type, trans->to, value); 211 - return 1; 202 + if (iso_layout) { 203 + if (asc->quirks & APPLE_ISO_KEYBOARD) { 204 + trans = apple_find_translation(apple_iso_keyboard, usage->code); 205 + if (trans) { 206 + input_event(input, usage->type, trans->to, value); 207 + return 1; 208 + } 212 209 } 213 210 } 214 211
+16 -7
drivers/hid/hid-core.c
··· 4 4 * Copyright (c) 1999 Andreas Gal 5 5 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> 6 6 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc 7 - * Copyright (c) 2006-2007 Jiri Kosina 7 + * Copyright (c) 2006-2010 Jiri Kosina 8 8 */ 9 9 10 10 /* ··· 51 51 * Register a new report for a device. 52 52 */ 53 53 54 - static struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id) 54 + struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id) 55 55 { 56 56 struct hid_report_enum *report_enum = device->report_enum + type; 57 57 struct hid_report *report; ··· 75 75 76 76 return report; 77 77 } 78 + EXPORT_SYMBOL_GPL(hid_register_report); 78 79 79 80 /* 80 81 * Register a new field for this report. ··· 388 387 __u32 data; 389 388 unsigned n; 390 389 391 - if (item->size == 0) { 390 + /* Local delimiter could have value 0, which allows size to be 0 */ 391 + if (item->size == 0 && item->tag != HID_LOCAL_ITEM_TAG_DELIMITER) { 392 392 dbg_hid("item data expected for local item\n"); 393 393 return -1; 394 394 } ··· 1250 1248 1251 1249 /* a list of devices for which there is a specialized driver on HID bus */ 1252 1250 static const struct hid_device_id hid_blacklist[] = { 1251 + { HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) }, 1253 1252 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, 1254 1253 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, 1255 1254 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) }, 1256 1255 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, 1257 1256 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, 1257 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) }, 1258 1258 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) }, 1259 1259 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) }, 1260 1260 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) }, ··· 1328 1324 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) }, 1329 1325 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) }, 1330 1326 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) }, 1327 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) }, 1331 1328 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) }, 1332 1329 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) }, 1333 1330 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) }, ··· 1342 1337 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) }, 1343 1338 { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, 1344 1339 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, 1340 + { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, 1345 1341 { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, 1342 + { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) }, 1343 + { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) }, 1346 1344 { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, 1347 1345 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, 1346 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, 1348 1347 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, 1348 + { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, USB_DEVICE_ID_MTP) }, 1349 1349 { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, 1350 1350 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) }, 1351 1351 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) }, ··· 1553 1543 { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) }, 1554 1544 { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) }, 1555 1545 { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) }, 1556 - { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM)}, 1557 - { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM2)}, 1546 + { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT)}, 1547 + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)}, 1548 + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)}, 1558 1549 { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, 1559 1550 { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) }, 1560 1551 { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) }, ··· 1672 1661 { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) }, 1673 1662 { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) }, 1674 1663 { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) }, 1675 - { HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY1) }, 1676 - { HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY2) }, 1677 1664 { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LABPRO) }, 1678 1665 { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) }, 1679 1666 { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) },
+3 -3
drivers/hid/hid-debug.c
··· 864 864 [EV_SND] = sounds, [EV_REP] = repeats, 865 865 }; 866 866 867 - void hid_resolv_event(__u8 type, __u16 code, struct seq_file *f) { 868 - 867 + static void hid_resolv_event(__u8 type, __u16 code, struct seq_file *f) 868 + { 869 869 seq_printf(f, "%s.%s", events[type] ? events[type] : "?", 870 870 names[type] ? (names[type][code] ? names[type][code] : "?") : "?"); 871 871 } 872 872 873 - void hid_dump_input_mapping(struct hid_device *hid, struct seq_file *f) 873 + static void hid_dump_input_mapping(struct hid_device *hid, struct seq_file *f) 874 874 { 875 875 int i, j, k; 876 876 struct hid_report *report;
+30 -7
drivers/hid/hid-ids.h
··· 18 18 #ifndef HID_IDS_H_FILE 19 19 #define HID_IDS_H_FILE 20 20 21 + #define USB_VENDOR_ID_3M 0x0596 22 + #define USB_DEVICE_ID_3M1968 0x0500 23 + 21 24 #define USB_VENDOR_ID_A4TECH 0x09da 22 25 #define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006 23 26 #define USB_DEVICE_ID_A4TECH_X5_005D 0x000a ··· 59 56 60 57 #define USB_VENDOR_ID_APPLE 0x05ac 61 58 #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304 59 + #define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d 62 60 #define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI 0x020e 63 61 #define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO 0x020f 64 62 #define USB_DEVICE_ID_APPLE_GEYSER_ANSI 0x0214 ··· 100 96 #define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241 101 97 #define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 102 98 103 - #define USB_VENDOR_ID_ASUS 0x0b05 104 - #define USB_DEVICE_ID_ASUS_LCM 0x1726 105 - #define USB_DEVICE_ID_ASUS_LCM2 0x175b 99 + #define USB_VENDOR_ID_ASUS 0x0486 100 + #define USB_DEVICE_ID_ASUS_T91MT 0x0185 101 + 102 + #define USB_VENDOR_ID_ASUSTEK 0x0b05 103 + #define USB_DEVICE_ID_ASUSTEK_LCM 0x1726 104 + #define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b 106 105 107 106 #define USB_VENDOR_ID_ATEN 0x0557 108 107 #define USB_DEVICE_ID_ATEN_UC100KM 0x2004 ··· 175 168 176 169 #define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f 177 170 #define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100 171 + 172 + #define USB_VENDOR_ID_ETURBOTOUCH 0x22b9 173 + #define USB_DEVICE_ID_ETURBOTOUCH 0x0006 178 174 179 175 #define USB_VENDOR_ID_ETT 0x0664 180 176 #define USB_DEVICE_ID_TC5UH 0x0309 ··· 313 303 #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2 0xc219 314 304 #define USB_DEVICE_ID_LOGITECH_WINGMAN_F3D 0xc283 315 305 #define USB_DEVICE_ID_LOGITECH_FORCE3D_PRO 0xc286 306 + #define USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940 0xc287 316 307 #define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294 317 308 #define USB_DEVICE_ID_LOGITECH_WINGMAN_FFG 0xc293 318 309 #define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL 0xc295 ··· 376 365 #define USB_VENDOR_ID_ONTRAK 0x0a07 377 366 #define USB_DEVICE_ID_ONTRAK_ADU100 0x0064 378 367 368 + #define USB_VENDOR_ID_ORTEK 0x05a4 369 + #define USB_DEVICE_ID_ORTEK_WKB2000 0x2000 370 + 379 371 #define USB_VENDOR_ID_PANJIT 0x134c 380 372 381 373 #define USB_VENDOR_ID_PANTHERLORD 0x0810 ··· 396 382 #define USB_VENDOR_ID_POWERCOM 0x0d9f 397 383 #define USB_DEVICE_ID_POWERCOM_UPS 0x0002 398 384 385 + #define USB_VENDOR_ID_PRODIGE 0x05af 386 + #define USB_DEVICE_ID_PRODIGE_CORDLESS 0x3062 387 + 399 388 #define USB_VENDOR_ID_SAITEK 0x06a3 400 389 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 390 + 391 + #define USB_VENDOR_ID_QUANTA 0x0408 392 + #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH 0x3000 393 + #define USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN 0x3001 401 394 402 395 #define USB_VENDOR_ID_SAMSUNG 0x0419 403 396 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 ··· 417 396 #define USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST 0x0034 418 397 #define USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST 0x0046 419 398 399 + #define USB_VENDOR_ID_STANTUM 0x1f87 400 + #define USB_DEVICE_ID_MTP 0x0002 401 + 420 402 #define USB_VENDOR_ID_SUN 0x0430 421 403 #define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab 422 404 423 405 #define USB_VENDOR_ID_SUNPLUS 0x04fc 424 406 #define USB_DEVICE_ID_SUNPLUS_WDESKTOP 0x05d8 425 407 426 - #define USB_VENDOR_ID_TENX 0x1130 427 - #define USB_DEVICE_ID_TENX_IBUDDY1 0x0001 428 - #define USB_DEVICE_ID_TENX_IBUDDY2 0x0002 429 - 430 408 #define USB_VENDOR_ID_THRUSTMASTER 0x044f 409 + 410 + #define USB_VENDOR_ID_TOUCHPACK 0x1bfd 411 + #define USB_DEVICE_ID_TOUCHPACK_RTS 0x1688 431 412 432 413 #define USB_VENDOR_ID_TOPMAX 0x0663 433 414 #define USB_DEVICE_ID_TOPMAX_COBRAPAD 0x0103
+9 -3
drivers/hid/hid-input.c
··· 1 1 /* 2 2 * Copyright (c) 2000-2001 Vojtech Pavlik 3 - * Copyright (c) 2006-2007 Jiri Kosina 3 + * Copyright (c) 2006-2010 Jiri Kosina 4 4 * 5 5 * HID to Linux Input mapping 6 6 */ ··· 193 193 break; 194 194 195 195 case HID_UP_BUTTON: 196 - code = ((usage->hid - 1) & 0xf); 196 + code = ((usage->hid - 1) & HID_USAGE); 197 197 198 198 switch (field->application) { 199 199 case HID_GD_MOUSE: 200 200 case HID_GD_POINTER: code += 0x110; break; 201 - case HID_GD_JOYSTICK: code += 0x120; break; 201 + case HID_GD_JOYSTICK: 202 + if (code <= 0xf) 203 + code += BTN_JOYSTICK; 204 + else 205 + code += BTN_TRIGGER_HAPPY; 206 + break; 202 207 case HID_GD_GAMEPAD: code += 0x130; break; 203 208 default: 204 209 switch (field->physical) { ··· 405 400 case 0x192: map_key_clear(KEY_CALC); break; 406 401 case 0x194: map_key_clear(KEY_FILE); break; 407 402 case 0x196: map_key_clear(KEY_WWW); break; 403 + case 0x199: map_key_clear(KEY_CHAT); break; 408 404 case 0x19c: map_key_clear(KEY_LOGOFF); break; 409 405 case 0x19e: map_key_clear(KEY_COFFEE); break; 410 406 case 0x1a6: map_key_clear(KEY_HELP); break;
+6 -1
drivers/hid/hid-lg.c
··· 34 34 #define LG_FF 0x200 35 35 #define LG_FF2 0x400 36 36 #define LG_RDESC_REL_ABS 0x800 37 + #define LG_FF3 0x1000 37 38 38 39 /* 39 40 * Certain Logitech keyboards send in report #3 keys which are far ··· 267 266 goto err_free; 268 267 } 269 268 270 - if (quirks & (LG_FF | LG_FF2)) 269 + if (quirks & (LG_FF | LG_FF2 | LG_FF3)) 271 270 connect_mask &= ~HID_CONNECT_FF; 272 271 273 272 ret = hid_hw_start(hdev, connect_mask); ··· 280 279 lgff_init(hdev); 281 280 if (quirks & LG_FF2) 282 281 lg2ff_init(hdev); 282 + if (quirks & LG_FF3) 283 + lg3ff_init(hdev); 283 284 284 285 return 0; 285 286 err_free: ··· 334 331 .driver_data = LG_FF }, 335 332 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2), 336 333 .driver_data = LG_FF2 }, 334 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940), 335 + .driver_data = LG_FF3 }, 337 336 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR), 338 337 .driver_data = LG_RDESC_REL_ABS }, 339 338 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER),
+6
drivers/hid/hid-lg.h
··· 13 13 static inline int lg2ff_init(struct hid_device *hdev) { return -1; } 14 14 #endif 15 15 16 + #ifdef CONFIG_LOGIG940_FF 17 + int lg3ff_init(struct hid_device *hdev); 18 + #else 19 + static inline int lg3ff_init(struct hid_device *hdev) { return -1; } 20 + #endif 21 + 16 22 #endif
+176
drivers/hid/hid-lg3ff.c
··· 1 + /* 2 + * Force feedback support for Logitech Flight System G940 3 + * 4 + * Copyright (c) 2009 Gary Stein <LordCnidarian@gmail.com> 5 + */ 6 + 7 + /* 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + * 18 + * You should have received a copy of the GNU General Public License 19 + * along with this program; if not, write to the Free Software 20 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 + */ 22 + 23 + 24 + #include <linux/input.h> 25 + #include <linux/usb.h> 26 + #include <linux/hid.h> 27 + 28 + #include "usbhid/usbhid.h" 29 + #include "hid-lg.h" 30 + 31 + /* 32 + * G940 Theory of Operation (from experimentation) 33 + * 34 + * There are 63 fields (only 3 of them currently used) 35 + * 0 - seems to be command field 36 + * 1 - 30 deal with the x axis 37 + * 31 -60 deal with the y axis 38 + * 39 + * Field 1 is x axis constant force 40 + * Field 31 is y axis constant force 41 + * 42 + * other interesting fields 1,2,3,4 on x axis 43 + * (same for 31,32,33,34 on y axis) 44 + * 45 + * 0 0 127 127 makes the joystick autocenter hard 46 + * 47 + * 127 0 127 127 makes the joystick loose on the right, 48 + * but stops all movemnt left 49 + * 50 + * -127 0 -127 -127 makes the joystick loose on the left, 51 + * but stops all movement right 52 + * 53 + * 0 0 -127 -127 makes the joystick rattle very hard 54 + * 55 + * I'm sure these are effects that I don't know enough about them 56 + */ 57 + 58 + struct lg3ff_device { 59 + struct hid_report *report; 60 + }; 61 + 62 + static int hid_lg3ff_play(struct input_dev *dev, void *data, 63 + struct ff_effect *effect) 64 + { 65 + struct hid_device *hid = input_get_drvdata(dev); 66 + struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; 67 + struct hid_report *report = list_entry(report_list->next, struct hid_report, list); 68 + int x, y; 69 + 70 + /* 71 + * Maxusage should always be 63 (maximum fields) 72 + * likely a better way to ensure this data is clean 73 + */ 74 + memset(report->field[0]->value, 0, sizeof(__s32)*report->field[0]->maxusage); 75 + 76 + switch (effect->type) { 77 + case FF_CONSTANT: 78 + /* 79 + * Already clamped in ff_memless 80 + * 0 is center (different then other logitech) 81 + */ 82 + x = effect->u.ramp.start_level; 83 + y = effect->u.ramp.end_level; 84 + 85 + /* send command byte */ 86 + report->field[0]->value[0] = 0x51; 87 + 88 + /* 89 + * Sign backwards from other Force3d pro 90 + * which get recast here in two's complement 8 bits 91 + */ 92 + report->field[0]->value[1] = (unsigned char)(-x); 93 + report->field[0]->value[31] = (unsigned char)(-y); 94 + 95 + usbhid_submit_report(hid, report, USB_DIR_OUT); 96 + break; 97 + } 98 + return 0; 99 + } 100 + static void hid_lg3ff_set_autocenter(struct input_dev *dev, u16 magnitude) 101 + { 102 + struct hid_device *hid = input_get_drvdata(dev); 103 + struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; 104 + struct hid_report *report = list_entry(report_list->next, struct hid_report, list); 105 + 106 + /* 107 + * Auto Centering probed from device 108 + * NOTE: deadman's switch on G940 must be covered 109 + * for effects to work 110 + */ 111 + report->field[0]->value[0] = 0x51; 112 + report->field[0]->value[1] = 0x00; 113 + report->field[0]->value[2] = 0x00; 114 + report->field[0]->value[3] = 0x7F; 115 + report->field[0]->value[4] = 0x7F; 116 + report->field[0]->value[31] = 0x00; 117 + report->field[0]->value[32] = 0x00; 118 + report->field[0]->value[33] = 0x7F; 119 + report->field[0]->value[34] = 0x7F; 120 + 121 + usbhid_submit_report(hid, report, USB_DIR_OUT); 122 + } 123 + 124 + 125 + static const signed short ff3_joystick_ac[] = { 126 + FF_CONSTANT, 127 + FF_AUTOCENTER, 128 + -1 129 + }; 130 + 131 + int lg3ff_init(struct hid_device *hid) 132 + { 133 + struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); 134 + struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; 135 + struct input_dev *dev = hidinput->input; 136 + struct hid_report *report; 137 + struct hid_field *field; 138 + const signed short *ff_bits = ff3_joystick_ac; 139 + int error; 140 + int i; 141 + 142 + /* Find the report to use */ 143 + if (list_empty(report_list)) { 144 + err_hid("No output report found"); 145 + return -1; 146 + } 147 + 148 + /* Check that the report looks ok */ 149 + report = list_entry(report_list->next, struct hid_report, list); 150 + if (!report) { 151 + err_hid("NULL output report"); 152 + return -1; 153 + } 154 + 155 + field = report->field[0]; 156 + if (!field) { 157 + err_hid("NULL field"); 158 + return -1; 159 + } 160 + 161 + /* Assume single fixed device G940 */ 162 + for (i = 0; ff_bits[i] >= 0; i++) 163 + set_bit(ff_bits[i], dev->ffbit); 164 + 165 + error = input_ff_create_memless(dev, NULL, hid_lg3ff_play); 166 + if (error) 167 + return error; 168 + 169 + if (test_bit(FF_AUTOCENTER, dev->ffbit)) 170 + dev->ff->set_autocenter = hid_lg3ff_set_autocenter; 171 + 172 + dev_info(&hid->dev, "Force feedback for Logitech Flight System G940 by " 173 + "Gary Stein <LordCnidarian@gmail.com>\n"); 174 + return 0; 175 + } 176 +
+1
drivers/hid/hid-lgff.c
··· 67 67 { 0x046d, 0xc219, ff_rumble }, 68 68 { 0x046d, 0xc283, ff_joystick }, 69 69 { 0x046d, 0xc286, ff_joystick_ac }, 70 + { 0x046d, 0xc287, ff_joystick_ac }, 70 71 { 0x046d, 0xc293, ff_joystick }, 71 72 { 0x046d, 0xc294, ff_wheel }, 72 73 { 0x046d, 0xc295, ff_joystick },
+449
drivers/hid/hid-magicmouse.c
··· 1 + /* 2 + * Apple "Magic" Wireless Mouse driver 3 + * 4 + * Copyright (c) 2010 Michael Poole <mdpoole@troilus.org> 5 + */ 6 + 7 + /* 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms of the GNU General Public License as published by the Free 10 + * Software Foundation; either version 2 of the License, or (at your option) 11 + * any later version. 12 + */ 13 + 14 + #include <linux/device.h> 15 + #include <linux/hid.h> 16 + #include <linux/module.h> 17 + #include <linux/usb.h> 18 + 19 + #include "hid-ids.h" 20 + 21 + static bool emulate_3button = true; 22 + module_param(emulate_3button, bool, 0644); 23 + MODULE_PARM_DESC(emulate_3button, "Emulate a middle button"); 24 + 25 + static int middle_button_start = -350; 26 + static int middle_button_stop = +350; 27 + 28 + static bool emulate_scroll_wheel = true; 29 + module_param(emulate_scroll_wheel, bool, 0644); 30 + MODULE_PARM_DESC(emulate_scroll_wheel, "Emulate a scroll wheel"); 31 + 32 + static bool report_touches = true; 33 + module_param(report_touches, bool, 0644); 34 + MODULE_PARM_DESC(report_touches, "Emit touch records (otherwise, only use them for emulation)"); 35 + 36 + static bool report_undeciphered; 37 + module_param(report_undeciphered, bool, 0644); 38 + MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state field using a MSC_RAW event"); 39 + 40 + #define TOUCH_REPORT_ID 0x29 41 + /* These definitions are not precise, but they're close enough. (Bits 42 + * 0x03 seem to indicate the aspect ratio of the touch, bits 0x70 seem 43 + * to be some kind of bit mask -- 0x20 may be a near-field reading, 44 + * and 0x40 is actual contact, and 0x10 may be a start/stop or change 45 + * indication.) 46 + */ 47 + #define TOUCH_STATE_MASK 0xf0 48 + #define TOUCH_STATE_NONE 0x00 49 + #define TOUCH_STATE_START 0x30 50 + #define TOUCH_STATE_DRAG 0x40 51 + 52 + /** 53 + * struct magicmouse_sc - Tracks Magic Mouse-specific data. 54 + * @input: Input device through which we report events. 55 + * @quirks: Currently unused. 56 + * @last_timestamp: Timestamp from most recent (18-bit) touch report 57 + * (units of milliseconds over short windows, but seems to 58 + * increase faster when there are no touches). 59 + * @delta_time: 18-bit difference between the two most recent touch 60 + * reports from the mouse. 61 + * @ntouches: Number of touches in most recent touch report. 62 + * @scroll_accel: Number of consecutive scroll motions. 63 + * @scroll_jiffies: Time of last scroll motion. 64 + * @touches: Most recent data for a touch, indexed by tracking ID. 65 + * @tracking_ids: Mapping of current touch input data to @touches. 66 + */ 67 + struct magicmouse_sc { 68 + struct input_dev *input; 69 + unsigned long quirks; 70 + 71 + int last_timestamp; 72 + int delta_time; 73 + int ntouches; 74 + int scroll_accel; 75 + unsigned long scroll_jiffies; 76 + 77 + struct { 78 + short x; 79 + short y; 80 + short scroll_y; 81 + u8 size; 82 + } touches[16]; 83 + int tracking_ids[16]; 84 + }; 85 + 86 + static int magicmouse_firm_touch(struct magicmouse_sc *msc) 87 + { 88 + int touch = -1; 89 + int ii; 90 + 91 + /* If there is only one "firm" touch, set touch to its 92 + * tracking ID. 93 + */ 94 + for (ii = 0; ii < msc->ntouches; ii++) { 95 + int idx = msc->tracking_ids[ii]; 96 + if (msc->touches[idx].size < 8) { 97 + /* Ignore this touch. */ 98 + } else if (touch >= 0) { 99 + touch = -1; 100 + break; 101 + } else { 102 + touch = idx; 103 + } 104 + } 105 + 106 + return touch; 107 + } 108 + 109 + static void magicmouse_emit_buttons(struct magicmouse_sc *msc, int state) 110 + { 111 + int last_state = test_bit(BTN_LEFT, msc->input->key) << 0 | 112 + test_bit(BTN_RIGHT, msc->input->key) << 1 | 113 + test_bit(BTN_MIDDLE, msc->input->key) << 2; 114 + 115 + if (emulate_3button) { 116 + int id; 117 + 118 + /* If some button was pressed before, keep it held 119 + * down. Otherwise, if there's exactly one firm 120 + * touch, use that to override the mouse's guess. 121 + */ 122 + if (state == 0) { 123 + /* The button was released. */ 124 + } else if (last_state != 0) { 125 + state = last_state; 126 + } else if ((id = magicmouse_firm_touch(msc)) >= 0) { 127 + int x = msc->touches[id].x; 128 + if (x < middle_button_start) 129 + state = 1; 130 + else if (x > middle_button_stop) 131 + state = 2; 132 + else 133 + state = 4; 134 + } /* else: we keep the mouse's guess */ 135 + 136 + input_report_key(msc->input, BTN_MIDDLE, state & 4); 137 + } 138 + 139 + input_report_key(msc->input, BTN_LEFT, state & 1); 140 + input_report_key(msc->input, BTN_RIGHT, state & 2); 141 + 142 + if (state != last_state) 143 + msc->scroll_accel = 0; 144 + } 145 + 146 + static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tdata) 147 + { 148 + struct input_dev *input = msc->input; 149 + __s32 x_y = tdata[0] << 8 | tdata[1] << 16 | tdata[2] << 24; 150 + int misc = tdata[5] | tdata[6] << 8; 151 + int id = (misc >> 6) & 15; 152 + int x = x_y << 12 >> 20; 153 + int y = -(x_y >> 20); 154 + 155 + /* Store tracking ID and other fields. */ 156 + msc->tracking_ids[raw_id] = id; 157 + msc->touches[id].x = x; 158 + msc->touches[id].y = y; 159 + msc->touches[id].size = misc & 63; 160 + 161 + /* If requested, emulate a scroll wheel by detecting small 162 + * vertical touch motions along the middle of the mouse. 163 + */ 164 + if (emulate_scroll_wheel && 165 + middle_button_start < x && x < middle_button_stop) { 166 + static const int accel_profile[] = { 167 + 256, 228, 192, 160, 128, 96, 64, 32, 168 + }; 169 + unsigned long now = jiffies; 170 + int step = msc->touches[id].scroll_y - y; 171 + 172 + /* Reset acceleration after half a second. */ 173 + if (time_after(now, msc->scroll_jiffies + HZ / 2)) 174 + msc->scroll_accel = 0; 175 + 176 + /* Calculate and apply the scroll motion. */ 177 + switch (tdata[7] & TOUCH_STATE_MASK) { 178 + case TOUCH_STATE_START: 179 + msc->touches[id].scroll_y = y; 180 + msc->scroll_accel = min_t(int, msc->scroll_accel + 1, 181 + ARRAY_SIZE(accel_profile) - 1); 182 + break; 183 + case TOUCH_STATE_DRAG: 184 + step = step / accel_profile[msc->scroll_accel]; 185 + if (step != 0) { 186 + msc->touches[id].scroll_y = y; 187 + msc->scroll_jiffies = now; 188 + input_report_rel(input, REL_WHEEL, step); 189 + } 190 + break; 191 + } 192 + } 193 + 194 + /* Generate the input events for this touch. */ 195 + if (report_touches) { 196 + int orientation = (misc >> 10) - 32; 197 + 198 + input_report_abs(input, ABS_MT_TRACKING_ID, id); 199 + input_report_abs(input, ABS_MT_TOUCH_MAJOR, tdata[3]); 200 + input_report_abs(input, ABS_MT_TOUCH_MINOR, tdata[4]); 201 + input_report_abs(input, ABS_MT_ORIENTATION, orientation); 202 + input_report_abs(input, ABS_MT_POSITION_X, x); 203 + input_report_abs(input, ABS_MT_POSITION_Y, y); 204 + 205 + if (report_undeciphered) 206 + input_event(input, EV_MSC, MSC_RAW, tdata[7]); 207 + 208 + input_mt_sync(input); 209 + } 210 + } 211 + 212 + static int magicmouse_raw_event(struct hid_device *hdev, 213 + struct hid_report *report, u8 *data, int size) 214 + { 215 + struct magicmouse_sc *msc = hid_get_drvdata(hdev); 216 + struct input_dev *input = msc->input; 217 + int x, y, ts, ii, clicks; 218 + 219 + switch (data[0]) { 220 + case 0x10: 221 + if (size != 6) 222 + return 0; 223 + x = (__s16)(data[2] | data[3] << 8); 224 + y = (__s16)(data[4] | data[5] << 8); 225 + clicks = data[1]; 226 + break; 227 + case TOUCH_REPORT_ID: 228 + /* Expect six bytes of prefix, and N*8 bytes of touch data. */ 229 + if (size < 6 || ((size - 6) % 8) != 0) 230 + return 0; 231 + ts = data[3] >> 6 | data[4] << 2 | data[5] << 10; 232 + msc->delta_time = (ts - msc->last_timestamp) & 0x3ffff; 233 + msc->last_timestamp = ts; 234 + msc->ntouches = (size - 6) / 8; 235 + for (ii = 0; ii < msc->ntouches; ii++) 236 + magicmouse_emit_touch(msc, ii, data + ii * 8 + 6); 237 + /* When emulating three-button mode, it is important 238 + * to have the current touch information before 239 + * generating a click event. 240 + */ 241 + x = (signed char)data[1]; 242 + y = (signed char)data[2]; 243 + clicks = data[3]; 244 + break; 245 + case 0x20: /* Theoretically battery status (0-100), but I have 246 + * never seen it -- maybe it is only upon request. 247 + */ 248 + case 0x60: /* Unknown, maybe laser on/off. */ 249 + case 0x61: /* Laser reflection status change. 250 + * data[1]: 0 = spotted, 1 = lost 251 + */ 252 + default: 253 + return 0; 254 + } 255 + 256 + magicmouse_emit_buttons(msc, clicks & 3); 257 + input_report_rel(input, REL_X, x); 258 + input_report_rel(input, REL_Y, y); 259 + input_sync(input); 260 + return 1; 261 + } 262 + 263 + static int magicmouse_input_open(struct input_dev *dev) 264 + { 265 + struct hid_device *hid = input_get_drvdata(dev); 266 + 267 + return hid->ll_driver->open(hid); 268 + } 269 + 270 + static void magicmouse_input_close(struct input_dev *dev) 271 + { 272 + struct hid_device *hid = input_get_drvdata(dev); 273 + 274 + hid->ll_driver->close(hid); 275 + } 276 + 277 + static void magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev) 278 + { 279 + input_set_drvdata(input, hdev); 280 + input->event = hdev->ll_driver->hidinput_input_event; 281 + input->open = magicmouse_input_open; 282 + input->close = magicmouse_input_close; 283 + 284 + input->name = hdev->name; 285 + input->phys = hdev->phys; 286 + input->uniq = hdev->uniq; 287 + input->id.bustype = hdev->bus; 288 + input->id.vendor = hdev->vendor; 289 + input->id.product = hdev->product; 290 + input->id.version = hdev->version; 291 + input->dev.parent = hdev->dev.parent; 292 + 293 + __set_bit(EV_KEY, input->evbit); 294 + __set_bit(BTN_LEFT, input->keybit); 295 + __set_bit(BTN_RIGHT, input->keybit); 296 + if (emulate_3button) 297 + __set_bit(BTN_MIDDLE, input->keybit); 298 + __set_bit(BTN_TOOL_FINGER, input->keybit); 299 + 300 + __set_bit(EV_REL, input->evbit); 301 + __set_bit(REL_X, input->relbit); 302 + __set_bit(REL_Y, input->relbit); 303 + if (emulate_scroll_wheel) 304 + __set_bit(REL_WHEEL, input->relbit); 305 + 306 + if (report_touches) { 307 + __set_bit(EV_ABS, input->evbit); 308 + 309 + input_set_abs_params(input, ABS_MT_TRACKING_ID, 0, 15, 0, 0); 310 + input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 4, 0); 311 + input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255, 4, 0); 312 + input_set_abs_params(input, ABS_MT_ORIENTATION, -32, 31, 1, 0); 313 + input_set_abs_params(input, ABS_MT_POSITION_X, -1100, 1358, 314 + 4, 0); 315 + /* Note: Touch Y position from the device is inverted relative 316 + * to how pointer motion is reported (and relative to how USB 317 + * HID recommends the coordinates work). This driver keeps 318 + * the origin at the same position, and just uses the additive 319 + * inverse of the reported Y. 320 + */ 321 + input_set_abs_params(input, ABS_MT_POSITION_Y, -1589, 2047, 322 + 4, 0); 323 + } 324 + 325 + if (report_undeciphered) { 326 + __set_bit(EV_MSC, input->evbit); 327 + __set_bit(MSC_RAW, input->mscbit); 328 + } 329 + } 330 + 331 + static int magicmouse_probe(struct hid_device *hdev, 332 + const struct hid_device_id *id) 333 + { 334 + __u8 feature_1[] = { 0xd7, 0x01 }; 335 + __u8 feature_2[] = { 0xf8, 0x01, 0x32 }; 336 + struct input_dev *input; 337 + struct magicmouse_sc *msc; 338 + struct hid_report *report; 339 + int ret; 340 + 341 + msc = kzalloc(sizeof(*msc), GFP_KERNEL); 342 + if (msc == NULL) { 343 + dev_err(&hdev->dev, "can't alloc magicmouse descriptor\n"); 344 + return -ENOMEM; 345 + } 346 + 347 + msc->quirks = id->driver_data; 348 + hid_set_drvdata(hdev, msc); 349 + 350 + ret = hid_parse(hdev); 351 + if (ret) { 352 + dev_err(&hdev->dev, "magicmouse hid parse failed\n"); 353 + goto err_free; 354 + } 355 + 356 + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 357 + if (ret) { 358 + dev_err(&hdev->dev, "magicmouse hw start failed\n"); 359 + goto err_free; 360 + } 361 + 362 + report = hid_register_report(hdev, HID_INPUT_REPORT, TOUCH_REPORT_ID); 363 + if (!report) { 364 + dev_err(&hdev->dev, "unable to register touch report\n"); 365 + ret = -ENOMEM; 366 + goto err_stop_hw; 367 + } 368 + report->size = 6; 369 + 370 + ret = hdev->hid_output_raw_report(hdev, feature_1, sizeof(feature_1), 371 + HID_FEATURE_REPORT); 372 + if (ret != sizeof(feature_1)) { 373 + dev_err(&hdev->dev, "unable to request touch data (1:%d)\n", 374 + ret); 375 + goto err_stop_hw; 376 + } 377 + ret = hdev->hid_output_raw_report(hdev, feature_2, 378 + sizeof(feature_2), HID_FEATURE_REPORT); 379 + if (ret != sizeof(feature_2)) { 380 + dev_err(&hdev->dev, "unable to request touch data (2:%d)\n", 381 + ret); 382 + goto err_stop_hw; 383 + } 384 + 385 + input = input_allocate_device(); 386 + if (!input) { 387 + dev_err(&hdev->dev, "can't alloc input device\n"); 388 + ret = -ENOMEM; 389 + goto err_stop_hw; 390 + } 391 + magicmouse_setup_input(input, hdev); 392 + 393 + ret = input_register_device(input); 394 + if (ret) { 395 + dev_err(&hdev->dev, "input device registration failed\n"); 396 + goto err_input; 397 + } 398 + msc->input = input; 399 + 400 + return 0; 401 + err_input: 402 + input_free_device(input); 403 + err_stop_hw: 404 + hid_hw_stop(hdev); 405 + err_free: 406 + kfree(msc); 407 + return ret; 408 + } 409 + 410 + static void magicmouse_remove(struct hid_device *hdev) 411 + { 412 + hid_hw_stop(hdev); 413 + kfree(hid_get_drvdata(hdev)); 414 + } 415 + 416 + static const struct hid_device_id magic_mice[] = { 417 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE), 418 + .driver_data = 0 }, 419 + { } 420 + }; 421 + MODULE_DEVICE_TABLE(hid, magic_mice); 422 + 423 + static struct hid_driver magicmouse_driver = { 424 + .name = "magicmouse", 425 + .id_table = magic_mice, 426 + .probe = magicmouse_probe, 427 + .remove = magicmouse_remove, 428 + .raw_event = magicmouse_raw_event, 429 + }; 430 + 431 + static int __init magicmouse_init(void) 432 + { 433 + int ret; 434 + 435 + ret = hid_register_driver(&magicmouse_driver); 436 + if (ret) 437 + printk(KERN_ERR "can't register magicmouse driver\n"); 438 + 439 + return ret; 440 + } 441 + 442 + static void __exit magicmouse_exit(void) 443 + { 444 + hid_unregister_driver(&magicmouse_driver); 445 + } 446 + 447 + module_init(magicmouse_init); 448 + module_exit(magicmouse_exit); 449 + MODULE_LICENSE("GPL");
+273
drivers/hid/hid-mosart.c
··· 1 + /* 2 + * HID driver for the multitouch panel on the ASUS EeePC T91MT 3 + * 4 + * Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr> 5 + * Copyright (c) 2010 Teemu Tuominen <teemu.tuominen@cybercom.com> 6 + * 7 + */ 8 + 9 + /* 10 + * This program is free software; you can redistribute it and/or modify it 11 + * under the terms of the GNU General Public License as published by the Free 12 + * Software Foundation; either version 2 of the License, or (at your option) 13 + * any later version. 14 + */ 15 + 16 + #include <linux/device.h> 17 + #include <linux/hid.h> 18 + #include <linux/module.h> 19 + #include <linux/usb.h> 20 + #include "usbhid/usbhid.h" 21 + 22 + MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>"); 23 + MODULE_DESCRIPTION("MosArt dual-touch panel"); 24 + MODULE_LICENSE("GPL"); 25 + 26 + #include "hid-ids.h" 27 + 28 + struct mosart_data { 29 + __u16 x, y; 30 + __u8 id; 31 + bool valid; /* valid finger data, or just placeholder? */ 32 + bool first; /* is this the first finger in this frame? */ 33 + bool activity_now; /* at least one active finger in this frame? */ 34 + bool activity; /* at least one active finger previously? */ 35 + }; 36 + 37 + static int mosart_input_mapping(struct hid_device *hdev, struct hid_input *hi, 38 + struct hid_field *field, struct hid_usage *usage, 39 + unsigned long **bit, int *max) 40 + { 41 + switch (usage->hid & HID_USAGE_PAGE) { 42 + 43 + case HID_UP_GENDESK: 44 + switch (usage->hid) { 45 + case HID_GD_X: 46 + hid_map_usage(hi, usage, bit, max, 47 + EV_ABS, ABS_MT_POSITION_X); 48 + /* touchscreen emulation */ 49 + input_set_abs_params(hi->input, ABS_X, 50 + field->logical_minimum, 51 + field->logical_maximum, 0, 0); 52 + return 1; 53 + case HID_GD_Y: 54 + hid_map_usage(hi, usage, bit, max, 55 + EV_ABS, ABS_MT_POSITION_Y); 56 + /* touchscreen emulation */ 57 + input_set_abs_params(hi->input, ABS_Y, 58 + field->logical_minimum, 59 + field->logical_maximum, 0, 0); 60 + return 1; 61 + } 62 + return 0; 63 + 64 + case HID_UP_DIGITIZER: 65 + switch (usage->hid) { 66 + case HID_DG_CONFIDENCE: 67 + case HID_DG_TIPSWITCH: 68 + case HID_DG_INPUTMODE: 69 + case HID_DG_DEVICEINDEX: 70 + case HID_DG_CONTACTCOUNT: 71 + case HID_DG_CONTACTMAX: 72 + case HID_DG_TIPPRESSURE: 73 + case HID_DG_WIDTH: 74 + case HID_DG_HEIGHT: 75 + return -1; 76 + case HID_DG_INRANGE: 77 + /* touchscreen emulation */ 78 + hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH); 79 + return 1; 80 + 81 + case HID_DG_CONTACTID: 82 + hid_map_usage(hi, usage, bit, max, 83 + EV_ABS, ABS_MT_TRACKING_ID); 84 + return 1; 85 + 86 + } 87 + return 0; 88 + 89 + case 0xff000000: 90 + /* ignore HID features */ 91 + return -1; 92 + } 93 + 94 + return 0; 95 + } 96 + 97 + static int mosart_input_mapped(struct hid_device *hdev, struct hid_input *hi, 98 + struct hid_field *field, struct hid_usage *usage, 99 + unsigned long **bit, int *max) 100 + { 101 + if (usage->type == EV_KEY || usage->type == EV_ABS) 102 + clear_bit(usage->code, *bit); 103 + 104 + return 0; 105 + } 106 + 107 + /* 108 + * this function is called when a whole finger has been parsed, 109 + * so that it can decide what to send to the input layer. 110 + */ 111 + static void mosart_filter_event(struct mosart_data *td, struct input_dev *input) 112 + { 113 + td->first = !td->first; /* touchscreen emulation */ 114 + 115 + if (!td->valid) { 116 + /* 117 + * touchscreen emulation: if no finger in this frame is valid 118 + * and there previously was finger activity, this is a release 119 + */ 120 + if (!td->first && !td->activity_now && td->activity) { 121 + input_event(input, EV_KEY, BTN_TOUCH, 0); 122 + td->activity = false; 123 + } 124 + return; 125 + } 126 + 127 + input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id); 128 + input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x); 129 + input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y); 130 + 131 + input_mt_sync(input); 132 + td->valid = false; 133 + 134 + /* touchscreen emulation: if first active finger in this frame... */ 135 + if (!td->activity_now) { 136 + /* if there was no previous activity, emit touch event */ 137 + if (!td->activity) { 138 + input_event(input, EV_KEY, BTN_TOUCH, 1); 139 + td->activity = true; 140 + } 141 + td->activity_now = true; 142 + /* and in any case this is our preferred finger */ 143 + input_event(input, EV_ABS, ABS_X, td->x); 144 + input_event(input, EV_ABS, ABS_Y, td->y); 145 + } 146 + } 147 + 148 + 149 + static int mosart_event(struct hid_device *hid, struct hid_field *field, 150 + struct hid_usage *usage, __s32 value) 151 + { 152 + struct mosart_data *td = hid_get_drvdata(hid); 153 + 154 + if (hid->claimed & HID_CLAIMED_INPUT) { 155 + struct input_dev *input = field->hidinput->input; 156 + switch (usage->hid) { 157 + case HID_DG_INRANGE: 158 + td->valid = !!value; 159 + break; 160 + case HID_GD_X: 161 + td->x = value; 162 + break; 163 + case HID_GD_Y: 164 + td->y = value; 165 + mosart_filter_event(td, input); 166 + break; 167 + case HID_DG_CONTACTID: 168 + td->id = value; 169 + break; 170 + case HID_DG_CONTACTCOUNT: 171 + /* touch emulation: this is the last field in a frame */ 172 + td->first = false; 173 + td->activity_now = false; 174 + break; 175 + case HID_DG_CONFIDENCE: 176 + case HID_DG_TIPSWITCH: 177 + /* avoid interference from generic hidinput handling */ 178 + break; 179 + 180 + default: 181 + /* fallback to the generic hidinput handling */ 182 + return 0; 183 + } 184 + } 185 + 186 + /* we have handled the hidinput part, now remains hiddev */ 187 + if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event) 188 + hid->hiddev_hid_event(hid, field, usage, value); 189 + 190 + return 1; 191 + } 192 + 193 + static int mosart_probe(struct hid_device *hdev, const struct hid_device_id *id) 194 + { 195 + int ret; 196 + struct mosart_data *td; 197 + 198 + 199 + td = kmalloc(sizeof(struct mosart_data), GFP_KERNEL); 200 + if (!td) { 201 + dev_err(&hdev->dev, "cannot allocate MosArt data\n"); 202 + return -ENOMEM; 203 + } 204 + td->valid = false; 205 + td->activity = false; 206 + td->activity_now = false; 207 + td->first = false; 208 + hid_set_drvdata(hdev, td); 209 + 210 + /* currently, it's better to have one evdev device only */ 211 + #if 0 212 + hdev->quirks |= HID_QUIRK_MULTI_INPUT; 213 + #endif 214 + 215 + ret = hid_parse(hdev); 216 + if (ret == 0) 217 + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 218 + 219 + if (ret == 0) { 220 + struct hid_report_enum *re = hdev->report_enum 221 + + HID_FEATURE_REPORT; 222 + struct hid_report *r = re->report_id_hash[7]; 223 + 224 + r->field[0]->value[0] = 0x02; 225 + usbhid_submit_report(hdev, r, USB_DIR_OUT); 226 + } else 227 + kfree(td); 228 + 229 + return ret; 230 + } 231 + 232 + static void mosart_remove(struct hid_device *hdev) 233 + { 234 + hid_hw_stop(hdev); 235 + kfree(hid_get_drvdata(hdev)); 236 + hid_set_drvdata(hdev, NULL); 237 + } 238 + 239 + static const struct hid_device_id mosart_devices[] = { 240 + { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) }, 241 + { } 242 + }; 243 + MODULE_DEVICE_TABLE(hid, mosart_devices); 244 + 245 + static const struct hid_usage_id mosart_grabbed_usages[] = { 246 + { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, 247 + { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} 248 + }; 249 + 250 + static struct hid_driver mosart_driver = { 251 + .name = "mosart", 252 + .id_table = mosart_devices, 253 + .probe = mosart_probe, 254 + .remove = mosart_remove, 255 + .input_mapping = mosart_input_mapping, 256 + .input_mapped = mosart_input_mapped, 257 + .usage_table = mosart_grabbed_usages, 258 + .event = mosart_event, 259 + }; 260 + 261 + static int __init mosart_init(void) 262 + { 263 + return hid_register_driver(&mosart_driver); 264 + } 265 + 266 + static void __exit mosart_exit(void) 267 + { 268 + hid_unregister_driver(&mosart_driver); 269 + } 270 + 271 + module_init(mosart_init); 272 + module_exit(mosart_exit); 273 +
+147 -65
drivers/hid/hid-ntrig.c
··· 25 25 EV_KEY, (c)) 26 26 27 27 struct ntrig_data { 28 - __s32 x, y, id, w, h; 29 - char reading_a_point, found_contact_id; 30 - char pen_active; 31 - char finger_active; 32 - char inverted; 28 + /* Incoming raw values for a single contact */ 29 + __u16 x, y, w, h; 30 + __u16 id; 31 + __u8 confidence; 32 + 33 + bool reading_mt; 34 + __u8 first_contact_confidence; 35 + 36 + __u8 mt_footer[4]; 37 + __u8 mt_foot_count; 33 38 }; 34 39 35 40 /* ··· 47 42 struct hid_field *field, struct hid_usage *usage, 48 43 unsigned long **bit, int *max) 49 44 { 50 - switch (usage->hid & HID_USAGE_PAGE) { 45 + /* No special mappings needed for the pen and single touch */ 46 + if (field->physical) 47 + return 0; 51 48 49 + switch (usage->hid & HID_USAGE_PAGE) { 52 50 case HID_UP_GENDESK: 53 51 switch (usage->hid) { 54 52 case HID_GD_X: ··· 74 66 case HID_UP_DIGITIZER: 75 67 switch (usage->hid) { 76 68 /* we do not want to map these for now */ 77 - case HID_DG_CONTACTID: /* value is useless */ 69 + case HID_DG_CONTACTID: /* Not trustworthy, squelch for now */ 78 70 case HID_DG_INPUTMODE: 79 71 case HID_DG_DEVICEINDEX: 80 - case HID_DG_CONTACTCOUNT: 81 72 case HID_DG_CONTACTMAX: 82 73 return -1; 83 - 84 - /* original mapping by Rafi Rubin */ 85 - case HID_DG_CONFIDENCE: 86 - nt_map_key_clear(BTN_TOOL_DOUBLETAP); 87 - return 1; 88 74 89 75 /* width/height mapped on TouchMajor/TouchMinor/Orientation */ 90 76 case HID_DG_WIDTH: ··· 106 104 struct hid_field *field, struct hid_usage *usage, 107 105 unsigned long **bit, int *max) 108 106 { 107 + /* No special mappings needed for the pen and single touch */ 108 + if (field->physical) 109 + return 0; 110 + 109 111 if (usage->type == EV_KEY || usage->type == EV_REL 110 112 || usage->type == EV_ABS) 111 113 clear_bit(usage->code, *bit); ··· 129 123 struct input_dev *input = field->hidinput->input; 130 124 struct ntrig_data *nd = hid_get_drvdata(hid); 131 125 126 + /* No special handling needed for the pen */ 127 + if (field->application == HID_DG_PEN) 128 + return 0; 129 + 132 130 if (hid->claimed & HID_CLAIMED_INPUT) { 133 131 switch (usage->hid) { 134 - 135 - case HID_DG_INRANGE: 136 - if (field->application & 0x3) 137 - nd->pen_active = (value != 0); 138 - else 139 - nd->finger_active = (value != 0); 140 - return 0; 141 - 142 - case HID_DG_INVERT: 143 - nd->inverted = value; 144 - return 0; 145 - 132 + case 0xff000001: 133 + /* Tag indicating the start of a multitouch group */ 134 + nd->reading_mt = 1; 135 + nd->first_contact_confidence = 0; 136 + break; 137 + case HID_DG_CONFIDENCE: 138 + nd->confidence = value; 139 + break; 146 140 case HID_GD_X: 147 141 nd->x = value; 148 - nd->reading_a_point = 1; 142 + /* Clear the contact footer */ 143 + nd->mt_foot_count = 0; 149 144 break; 150 145 case HID_GD_Y: 151 146 nd->y = value; 152 147 break; 153 148 case HID_DG_CONTACTID: 154 149 nd->id = value; 155 - /* we receive this only when in multitouch mode */ 156 - nd->found_contact_id = 1; 157 150 break; 158 151 case HID_DG_WIDTH: 159 152 nd->w = value; ··· 164 159 * report received in a finger event. We want 165 160 * to emit a normal (X, Y) position 166 161 */ 167 - if (!nd->found_contact_id) { 168 - if (nd->pen_active && nd->finger_active) { 169 - input_report_key(input, BTN_TOOL_DOUBLETAP, 0); 170 - input_report_key(input, BTN_TOOL_DOUBLETAP, 1); 171 - } 162 + if (!nd->reading_mt) { 163 + input_report_key(input, BTN_TOOL_DOUBLETAP, 164 + (nd->confidence != 0)); 172 165 input_event(input, EV_ABS, ABS_X, nd->x); 173 166 input_event(input, EV_ABS, ABS_Y, nd->y); 174 - } 175 - break; 176 - case HID_DG_TIPPRESSURE: 177 - /* 178 - * when in single touch mode, this is the last 179 - * report received in a pen event. We want 180 - * to emit a normal (X, Y) position 181 - */ 182 - if (! nd->found_contact_id) { 183 - if (nd->pen_active && nd->finger_active) { 184 - input_report_key(input, 185 - nd->inverted ? BTN_TOOL_RUBBER : BTN_TOOL_PEN 186 - , 0); 187 - input_report_key(input, 188 - nd->inverted ? BTN_TOOL_RUBBER : BTN_TOOL_PEN 189 - , 1); 190 - } 191 - input_event(input, EV_ABS, ABS_X, nd->x); 192 - input_event(input, EV_ABS, ABS_Y, nd->y); 193 - input_event(input, EV_ABS, ABS_PRESSURE, value); 194 167 } 195 168 break; 196 169 case 0xff000002: ··· 178 195 * this usage tells if the contact point is real 179 196 * or a placeholder 180 197 */ 181 - if (!nd->reading_a_point || value != 1) 198 + 199 + /* Shouldn't get more than 4 footer packets, so skip */ 200 + if (nd->mt_foot_count >= 4) 182 201 break; 202 + 203 + nd->mt_footer[nd->mt_foot_count++] = value; 204 + 205 + /* if the footer isn't complete break */ 206 + if (nd->mt_foot_count != 4) 207 + break; 208 + 209 + /* Pen activity signal, trigger end of touch. */ 210 + if (nd->mt_footer[2]) { 211 + nd->confidence = 0; 212 + break; 213 + } 214 + 215 + /* If the contact was invalid */ 216 + if (!(nd->confidence && nd->mt_footer[0]) 217 + || nd->w <= 250 218 + || nd->h <= 190) { 219 + nd->confidence = 0; 220 + break; 221 + } 222 + 183 223 /* emit a normal (X, Y) for the first point only */ 184 224 if (nd->id == 0) { 225 + nd->first_contact_confidence = nd->confidence; 185 226 input_event(input, EV_ABS, ABS_X, nd->x); 186 227 input_event(input, EV_ABS, ABS_Y, nd->y); 187 228 } ··· 227 220 ABS_MT_TOUCH_MINOR, nd->w); 228 221 } 229 222 input_mt_sync(field->hidinput->input); 230 - nd->reading_a_point = 0; 231 - nd->found_contact_id = 0; 223 + break; 224 + 225 + case HID_DG_CONTACTCOUNT: /* End of a multitouch group */ 226 + if (!nd->reading_mt) 227 + break; 228 + 229 + nd->reading_mt = 0; 230 + 231 + if (nd->first_contact_confidence) { 232 + switch (value) { 233 + case 0: /* for single touch devices */ 234 + case 1: 235 + input_report_key(input, 236 + BTN_TOOL_DOUBLETAP, 1); 237 + break; 238 + case 2: 239 + input_report_key(input, 240 + BTN_TOOL_TRIPLETAP, 1); 241 + break; 242 + case 3: 243 + default: 244 + input_report_key(input, 245 + BTN_TOOL_QUADTAP, 1); 246 + } 247 + input_report_key(input, BTN_TOUCH, 1); 248 + } else { 249 + input_report_key(input, 250 + BTN_TOOL_DOUBLETAP, 0); 251 + input_report_key(input, 252 + BTN_TOOL_TRIPLETAP, 0); 253 + input_report_key(input, 254 + BTN_TOOL_QUADTAP, 0); 255 + } 232 256 break; 233 257 234 258 default: ··· 269 231 } 270 232 271 233 /* we have handled the hidinput part, now remains hiddev */ 272 - if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event) 273 - hid->hiddev_hid_event(hid, field, usage, value); 234 + if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_hid_event) 235 + hid->hiddev_hid_event(hid, field, usage, value); 274 236 275 237 return 1; 276 238 } ··· 279 241 { 280 242 int ret; 281 243 struct ntrig_data *nd; 244 + struct hid_input *hidinput; 245 + struct input_dev *input; 246 + 247 + if (id->driver_data) 248 + hdev->quirks |= HID_QUIRK_MULTI_INPUT; 282 249 283 250 nd = kmalloc(sizeof(struct ntrig_data), GFP_KERNEL); 284 251 if (!nd) { 285 252 dev_err(&hdev->dev, "cannot allocate N-Trig data\n"); 286 253 return -ENOMEM; 287 254 } 288 - nd->reading_a_point = 0; 289 - nd->found_contact_id = 0; 255 + 256 + nd->reading_mt = 0; 290 257 hid_set_drvdata(hdev, nd); 291 258 292 259 ret = hid_parse(hdev); 293 - if (!ret) 294 - ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 260 + if (ret) { 261 + dev_err(&hdev->dev, "parse failed\n"); 262 + goto err_free; 263 + } 295 264 296 - if (ret) 297 - kfree (nd); 265 + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); 266 + if (ret) { 267 + dev_err(&hdev->dev, "hw start failed\n"); 268 + goto err_free; 269 + } 298 270 271 + 272 + list_for_each_entry(hidinput, &hdev->inputs, list) { 273 + input = hidinput->input; 274 + switch (hidinput->report->field[0]->application) { 275 + case HID_DG_PEN: 276 + input->name = "N-Trig Pen"; 277 + break; 278 + case HID_DG_TOUCHSCREEN: 279 + __clear_bit(BTN_TOOL_PEN, input->keybit); 280 + /* 281 + * A little something special to enable 282 + * two and three finger taps. 283 + */ 284 + __set_bit(BTN_TOOL_DOUBLETAP, input->keybit); 285 + __set_bit(BTN_TOOL_TRIPLETAP, input->keybit); 286 + __set_bit(BTN_TOOL_QUADTAP, input->keybit); 287 + /* 288 + * The physical touchscreen (single touch) 289 + * input has a value for physical, whereas 290 + * the multitouch only has logical input 291 + * fields. 292 + */ 293 + input->name = 294 + (hidinput->report->field[0] 295 + ->physical) ? 296 + "N-Trig Touchscreen" : 297 + "N-Trig MultiTouch"; 298 + break; 299 + } 300 + } 301 + 302 + return 0; 303 + err_free: 304 + kfree(nd); 299 305 return ret; 300 306 } 301 307 ··· 358 276 359 277 static const struct hid_usage_id ntrig_grabbed_usages[] = { 360 278 { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, 361 - { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} 279 + { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1 } 362 280 }; 363 281 364 282 static struct hid_driver ntrig_driver = {
+56
drivers/hid/hid-ortek.c
··· 1 + /* 2 + * HID driver for Ortek WKB-2000 (wireless keyboard + mouse trackpad). 3 + * Fixes LogicalMaximum error in USB report description, see 4 + * http://bugzilla.kernel.org/show_bug.cgi?id=14787 5 + * 6 + * Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com> 7 + */ 8 + 9 + /* 10 + * This program is free software; you can redistribute it and/or modify it 11 + * under the terms of the GNU General Public License as published by the Free 12 + * Software Foundation; either version 2 of the License, or (at your option) 13 + * any later version. 14 + */ 15 + 16 + #include <linux/device.h> 17 + #include <linux/hid.h> 18 + #include <linux/module.h> 19 + 20 + #include "hid-ids.h" 21 + 22 + static void ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc, 23 + unsigned int rsize) 24 + { 25 + if (rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) { 26 + dev_info(&hdev->dev, "Fixing up Ortek WKB-2000 " 27 + "report descriptor.\n"); 28 + rdesc[55] = 0x92; 29 + } 30 + } 31 + 32 + static const struct hid_device_id ortek_devices[] = { 33 + { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, 34 + { } 35 + }; 36 + MODULE_DEVICE_TABLE(hid, ortek_devices); 37 + 38 + static struct hid_driver ortek_driver = { 39 + .name = "ortek", 40 + .id_table = ortek_devices, 41 + .report_fixup = ortek_report_fixup 42 + }; 43 + 44 + static int __init ortek_init(void) 45 + { 46 + return hid_register_driver(&ortek_driver); 47 + } 48 + 49 + static void __exit ortek_exit(void) 50 + { 51 + hid_unregister_driver(&ortek_driver); 52 + } 53 + 54 + module_init(ortek_init); 55 + module_exit(ortek_exit); 56 + MODULE_LICENSE("GPL");
+260
drivers/hid/hid-quanta.c
··· 1 + /* 2 + * HID driver for Quanta Optical Touch dual-touch panels 3 + * 4 + * Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr> 5 + * 6 + */ 7 + 8 + /* 9 + * This program is free software; you can redistribute it and/or modify it 10 + * under the terms of the GNU General Public License as published by the Free 11 + * Software Foundation; either version 2 of the License, or (at your option) 12 + * any later version. 13 + */ 14 + 15 + #include <linux/device.h> 16 + #include <linux/hid.h> 17 + #include <linux/module.h> 18 + 19 + MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>"); 20 + MODULE_DESCRIPTION("Quanta dual-touch panel"); 21 + MODULE_LICENSE("GPL"); 22 + 23 + #include "hid-ids.h" 24 + 25 + struct quanta_data { 26 + __u16 x, y; 27 + __u8 id; 28 + bool valid; /* valid finger data, or just placeholder? */ 29 + bool first; /* is this the first finger in this frame? */ 30 + bool activity_now; /* at least one active finger in this frame? */ 31 + bool activity; /* at least one active finger previously? */ 32 + }; 33 + 34 + static int quanta_input_mapping(struct hid_device *hdev, struct hid_input *hi, 35 + struct hid_field *field, struct hid_usage *usage, 36 + unsigned long **bit, int *max) 37 + { 38 + switch (usage->hid & HID_USAGE_PAGE) { 39 + 40 + case HID_UP_GENDESK: 41 + switch (usage->hid) { 42 + case HID_GD_X: 43 + hid_map_usage(hi, usage, bit, max, 44 + EV_ABS, ABS_MT_POSITION_X); 45 + /* touchscreen emulation */ 46 + input_set_abs_params(hi->input, ABS_X, 47 + field->logical_minimum, 48 + field->logical_maximum, 0, 0); 49 + return 1; 50 + case HID_GD_Y: 51 + hid_map_usage(hi, usage, bit, max, 52 + EV_ABS, ABS_MT_POSITION_Y); 53 + /* touchscreen emulation */ 54 + input_set_abs_params(hi->input, ABS_Y, 55 + field->logical_minimum, 56 + field->logical_maximum, 0, 0); 57 + return 1; 58 + } 59 + return 0; 60 + 61 + case HID_UP_DIGITIZER: 62 + switch (usage->hid) { 63 + case HID_DG_CONFIDENCE: 64 + case HID_DG_TIPSWITCH: 65 + case HID_DG_INPUTMODE: 66 + case HID_DG_DEVICEINDEX: 67 + case HID_DG_CONTACTCOUNT: 68 + case HID_DG_CONTACTMAX: 69 + case HID_DG_TIPPRESSURE: 70 + case HID_DG_WIDTH: 71 + case HID_DG_HEIGHT: 72 + return -1; 73 + case HID_DG_INRANGE: 74 + /* touchscreen emulation */ 75 + hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH); 76 + return 1; 77 + case HID_DG_CONTACTID: 78 + hid_map_usage(hi, usage, bit, max, 79 + EV_ABS, ABS_MT_TRACKING_ID); 80 + return 1; 81 + } 82 + return 0; 83 + 84 + case 0xff000000: 85 + /* ignore vendor-specific features */ 86 + return -1; 87 + } 88 + 89 + return 0; 90 + } 91 + 92 + static int quanta_input_mapped(struct hid_device *hdev, struct hid_input *hi, 93 + struct hid_field *field, struct hid_usage *usage, 94 + unsigned long **bit, int *max) 95 + { 96 + if (usage->type == EV_KEY || usage->type == EV_ABS) 97 + clear_bit(usage->code, *bit); 98 + 99 + return 0; 100 + } 101 + 102 + /* 103 + * this function is called when a whole finger has been parsed, 104 + * so that it can decide what to send to the input layer. 105 + */ 106 + static void quanta_filter_event(struct quanta_data *td, struct input_dev *input) 107 + { 108 + 109 + td->first = !td->first; /* touchscreen emulation */ 110 + 111 + if (!td->valid) { 112 + /* 113 + * touchscreen emulation: if no finger in this frame is valid 114 + * and there previously was finger activity, this is a release 115 + */ 116 + if (!td->first && !td->activity_now && td->activity) { 117 + input_event(input, EV_KEY, BTN_TOUCH, 0); 118 + td->activity = false; 119 + } 120 + return; 121 + } 122 + 123 + input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id); 124 + input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x); 125 + input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y); 126 + 127 + input_mt_sync(input); 128 + td->valid = false; 129 + 130 + /* touchscreen emulation: if first active finger in this frame... */ 131 + if (!td->activity_now) { 132 + /* if there was no previous activity, emit touch event */ 133 + if (!td->activity) { 134 + input_event(input, EV_KEY, BTN_TOUCH, 1); 135 + td->activity = true; 136 + } 137 + td->activity_now = true; 138 + /* and in any case this is our preferred finger */ 139 + input_event(input, EV_ABS, ABS_X, td->x); 140 + input_event(input, EV_ABS, ABS_Y, td->y); 141 + } 142 + } 143 + 144 + 145 + static int quanta_event(struct hid_device *hid, struct hid_field *field, 146 + struct hid_usage *usage, __s32 value) 147 + { 148 + struct quanta_data *td = hid_get_drvdata(hid); 149 + 150 + if (hid->claimed & HID_CLAIMED_INPUT) { 151 + struct input_dev *input = field->hidinput->input; 152 + 153 + switch (usage->hid) { 154 + case HID_DG_INRANGE: 155 + td->valid = !!value; 156 + break; 157 + case HID_GD_X: 158 + td->x = value; 159 + break; 160 + case HID_GD_Y: 161 + td->y = value; 162 + quanta_filter_event(td, input); 163 + break; 164 + case HID_DG_CONTACTID: 165 + td->id = value; 166 + break; 167 + case HID_DG_CONTACTCOUNT: 168 + /* touch emulation: this is the last field in a frame */ 169 + td->first = false; 170 + td->activity_now = false; 171 + break; 172 + case HID_DG_CONFIDENCE: 173 + case HID_DG_TIPSWITCH: 174 + /* avoid interference from generic hidinput handling */ 175 + break; 176 + 177 + default: 178 + /* fallback to the generic hidinput handling */ 179 + return 0; 180 + } 181 + } 182 + 183 + /* we have handled the hidinput part, now remains hiddev */ 184 + if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event) 185 + hid->hiddev_hid_event(hid, field, usage, value); 186 + 187 + return 1; 188 + } 189 + 190 + static int quanta_probe(struct hid_device *hdev, const struct hid_device_id *id) 191 + { 192 + int ret; 193 + struct quanta_data *td; 194 + 195 + td = kmalloc(sizeof(struct quanta_data), GFP_KERNEL); 196 + if (!td) { 197 + dev_err(&hdev->dev, "cannot allocate Quanta Touch data\n"); 198 + return -ENOMEM; 199 + } 200 + td->valid = false; 201 + td->activity = false; 202 + td->activity_now = false; 203 + td->first = false; 204 + hid_set_drvdata(hdev, td); 205 + 206 + ret = hid_parse(hdev); 207 + if (!ret) 208 + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 209 + 210 + if (ret) 211 + kfree(td); 212 + 213 + return ret; 214 + } 215 + 216 + static void quanta_remove(struct hid_device *hdev) 217 + { 218 + hid_hw_stop(hdev); 219 + kfree(hid_get_drvdata(hdev)); 220 + hid_set_drvdata(hdev, NULL); 221 + } 222 + 223 + static const struct hid_device_id quanta_devices[] = { 224 + { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, 225 + USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) }, 226 + { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, 227 + USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) }, 228 + { } 229 + }; 230 + MODULE_DEVICE_TABLE(hid, quanta_devices); 231 + 232 + static const struct hid_usage_id quanta_grabbed_usages[] = { 233 + { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, 234 + { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} 235 + }; 236 + 237 + static struct hid_driver quanta_driver = { 238 + .name = "quanta-touch", 239 + .id_table = quanta_devices, 240 + .probe = quanta_probe, 241 + .remove = quanta_remove, 242 + .input_mapping = quanta_input_mapping, 243 + .input_mapped = quanta_input_mapped, 244 + .usage_table = quanta_grabbed_usages, 245 + .event = quanta_event, 246 + }; 247 + 248 + static int __init quanta_init(void) 249 + { 250 + return hid_register_driver(&quanta_driver); 251 + } 252 + 253 + static void __exit quanta_exit(void) 254 + { 255 + hid_unregister_driver(&quanta_driver); 256 + } 257 + 258 + module_init(quanta_init); 259 + module_exit(quanta_exit); 260 +
+20 -3
drivers/hid/hid-sony.c
··· 48 48 * to "operational". Without this, the ps3 controller will not report any 49 49 * events. 50 50 */ 51 - static int sony_set_operational(struct hid_device *hdev) 51 + static int sony_set_operational_usb(struct hid_device *hdev) 52 52 { 53 53 struct usb_interface *intf = to_usb_interface(hdev->dev.parent); 54 54 struct usb_device *dev = interface_to_usbdev(intf); ··· 73 73 return ret; 74 74 } 75 75 76 + static int sony_set_operational_bt(struct hid_device *hdev) 77 + { 78 + unsigned char buf[] = { 0x53, 0xf4, 0x42, 0x03, 0x00, 0x00 }; 79 + return hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT); 80 + } 81 + 76 82 static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) 77 83 { 78 84 int ret; ··· 87 81 88 82 sc = kzalloc(sizeof(*sc), GFP_KERNEL); 89 83 if (sc == NULL) { 90 - dev_err(&hdev->dev, "can't alloc apple descriptor\n"); 84 + dev_err(&hdev->dev, "can't alloc sony descriptor\n"); 91 85 return -ENOMEM; 92 86 } 93 87 ··· 107 101 goto err_free; 108 102 } 109 103 110 - ret = sony_set_operational(hdev); 104 + switch (hdev->bus) { 105 + case BUS_USB: 106 + ret = sony_set_operational_usb(hdev); 107 + break; 108 + case BUS_BLUETOOTH: 109 + ret = sony_set_operational_bt(hdev); 110 + break; 111 + default: 112 + ret = 0; 113 + } 114 + 111 115 if (ret < 0) 112 116 goto err_stop; 113 117 ··· 137 121 138 122 static const struct hid_device_id sony_devices[] = { 139 123 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, 124 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, 140 125 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE), 141 126 .driver_data = VAIO_RDESC_CONSTANT }, 142 127 { }
+283
drivers/hid/hid-stantum.c
··· 1 + /* 2 + * HID driver for Stantum multitouch panels 3 + * 4 + * Copyright (c) 2009 Stephane Chatty <chatty@enac.fr> 5 + * 6 + */ 7 + 8 + /* 9 + * This program is free software; you can redistribute it and/or modify it 10 + * under the terms of the GNU General Public License as published by the Free 11 + * Software Foundation; either version 2 of the License, or (at your option) 12 + * any later version. 13 + */ 14 + 15 + #include <linux/device.h> 16 + #include <linux/hid.h> 17 + #include <linux/module.h> 18 + 19 + MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>"); 20 + MODULE_DESCRIPTION("Stantum HID multitouch panels"); 21 + MODULE_LICENSE("GPL"); 22 + 23 + #include "hid-ids.h" 24 + 25 + struct stantum_data { 26 + __s32 x, y, z, w, h; /* x, y, pressure, width, height */ 27 + __u16 id; /* touch id */ 28 + bool valid; /* valid finger data, or just placeholder? */ 29 + bool first; /* first finger in the HID packet? */ 30 + bool activity; /* at least one active finger so far? */ 31 + }; 32 + 33 + static int stantum_input_mapping(struct hid_device *hdev, struct hid_input *hi, 34 + struct hid_field *field, struct hid_usage *usage, 35 + unsigned long **bit, int *max) 36 + { 37 + switch (usage->hid & HID_USAGE_PAGE) { 38 + 39 + case HID_UP_GENDESK: 40 + switch (usage->hid) { 41 + case HID_GD_X: 42 + hid_map_usage(hi, usage, bit, max, 43 + EV_ABS, ABS_MT_POSITION_X); 44 + /* touchscreen emulation */ 45 + input_set_abs_params(hi->input, ABS_X, 46 + field->logical_minimum, 47 + field->logical_maximum, 0, 0); 48 + return 1; 49 + case HID_GD_Y: 50 + hid_map_usage(hi, usage, bit, max, 51 + EV_ABS, ABS_MT_POSITION_Y); 52 + /* touchscreen emulation */ 53 + input_set_abs_params(hi->input, ABS_Y, 54 + field->logical_minimum, 55 + field->logical_maximum, 0, 0); 56 + return 1; 57 + } 58 + return 0; 59 + 60 + case HID_UP_DIGITIZER: 61 + switch (usage->hid) { 62 + case HID_DG_INRANGE: 63 + case HID_DG_CONFIDENCE: 64 + case HID_DG_INPUTMODE: 65 + case HID_DG_DEVICEINDEX: 66 + case HID_DG_CONTACTCOUNT: 67 + case HID_DG_CONTACTMAX: 68 + return -1; 69 + 70 + case HID_DG_TIPSWITCH: 71 + /* touchscreen emulation */ 72 + hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH); 73 + return 1; 74 + 75 + case HID_DG_WIDTH: 76 + hid_map_usage(hi, usage, bit, max, 77 + EV_ABS, ABS_MT_TOUCH_MAJOR); 78 + return 1; 79 + case HID_DG_HEIGHT: 80 + hid_map_usage(hi, usage, bit, max, 81 + EV_ABS, ABS_MT_TOUCH_MINOR); 82 + input_set_abs_params(hi->input, ABS_MT_ORIENTATION, 83 + 1, 1, 0, 0); 84 + return 1; 85 + case HID_DG_TIPPRESSURE: 86 + hid_map_usage(hi, usage, bit, max, 87 + EV_ABS, ABS_MT_PRESSURE); 88 + return 1; 89 + 90 + case HID_DG_CONTACTID: 91 + hid_map_usage(hi, usage, bit, max, 92 + EV_ABS, ABS_MT_TRACKING_ID); 93 + return 1; 94 + 95 + } 96 + return 0; 97 + 98 + case 0xff000000: 99 + /* no input-oriented meaning */ 100 + return -1; 101 + } 102 + 103 + return 0; 104 + } 105 + 106 + static int stantum_input_mapped(struct hid_device *hdev, struct hid_input *hi, 107 + struct hid_field *field, struct hid_usage *usage, 108 + unsigned long **bit, int *max) 109 + { 110 + if (usage->type == EV_KEY || usage->type == EV_ABS) 111 + clear_bit(usage->code, *bit); 112 + 113 + return 0; 114 + } 115 + 116 + /* 117 + * this function is called when a whole finger has been parsed, 118 + * so that it can decide what to send to the input layer. 119 + */ 120 + static void stantum_filter_event(struct stantum_data *sd, 121 + struct input_dev *input) 122 + { 123 + bool wide; 124 + 125 + if (!sd->valid) { 126 + /* 127 + * touchscreen emulation: if the first finger is not valid and 128 + * there previously was finger activity, this is a release 129 + */ 130 + if (sd->first && sd->activity) { 131 + input_event(input, EV_KEY, BTN_TOUCH, 0); 132 + sd->activity = false; 133 + } 134 + return; 135 + } 136 + 137 + input_event(input, EV_ABS, ABS_MT_TRACKING_ID, sd->id); 138 + input_event(input, EV_ABS, ABS_MT_POSITION_X, sd->x); 139 + input_event(input, EV_ABS, ABS_MT_POSITION_Y, sd->y); 140 + 141 + wide = (sd->w > sd->h); 142 + input_event(input, EV_ABS, ABS_MT_ORIENTATION, wide); 143 + input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, wide ? sd->w : sd->h); 144 + input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, wide ? sd->h : sd->w); 145 + 146 + input_event(input, EV_ABS, ABS_MT_PRESSURE, sd->z); 147 + 148 + input_mt_sync(input); 149 + sd->valid = false; 150 + 151 + /* touchscreen emulation */ 152 + if (sd->first) { 153 + if (!sd->activity) { 154 + input_event(input, EV_KEY, BTN_TOUCH, 1); 155 + sd->activity = true; 156 + } 157 + input_event(input, EV_ABS, ABS_X, sd->x); 158 + input_event(input, EV_ABS, ABS_Y, sd->y); 159 + } 160 + sd->first = false; 161 + } 162 + 163 + 164 + static int stantum_event(struct hid_device *hid, struct hid_field *field, 165 + struct hid_usage *usage, __s32 value) 166 + { 167 + struct stantum_data *sd = hid_get_drvdata(hid); 168 + 169 + if (hid->claimed & HID_CLAIMED_INPUT) { 170 + struct input_dev *input = field->hidinput->input; 171 + 172 + switch (usage->hid) { 173 + case HID_DG_INRANGE: 174 + /* this is the last field in a finger */ 175 + stantum_filter_event(sd, input); 176 + break; 177 + case HID_DG_WIDTH: 178 + sd->w = value; 179 + break; 180 + case HID_DG_HEIGHT: 181 + sd->h = value; 182 + break; 183 + case HID_GD_X: 184 + sd->x = value; 185 + break; 186 + case HID_GD_Y: 187 + sd->y = value; 188 + break; 189 + case HID_DG_TIPPRESSURE: 190 + sd->z = value; 191 + break; 192 + case HID_DG_CONTACTID: 193 + sd->id = value; 194 + break; 195 + case HID_DG_CONFIDENCE: 196 + sd->valid = !!value; 197 + break; 198 + case 0xff000002: 199 + /* this comes only before the first finger */ 200 + sd->first = true; 201 + break; 202 + 203 + default: 204 + /* ignore the others */ 205 + return 1; 206 + } 207 + } 208 + 209 + /* we have handled the hidinput part, now remains hiddev */ 210 + if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event) 211 + hid->hiddev_hid_event(hid, field, usage, value); 212 + 213 + return 1; 214 + } 215 + 216 + static int stantum_probe(struct hid_device *hdev, 217 + const struct hid_device_id *id) 218 + { 219 + int ret; 220 + struct stantum_data *sd; 221 + 222 + sd = kmalloc(sizeof(struct stantum_data), GFP_KERNEL); 223 + if (!sd) { 224 + dev_err(&hdev->dev, "cannot allocate Stantum data\n"); 225 + return -ENOMEM; 226 + } 227 + sd->valid = false; 228 + sd->first = false; 229 + sd->activity = false; 230 + hid_set_drvdata(hdev, sd); 231 + 232 + ret = hid_parse(hdev); 233 + if (!ret) 234 + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 235 + 236 + if (ret) 237 + kfree(sd); 238 + 239 + return ret; 240 + } 241 + 242 + static void stantum_remove(struct hid_device *hdev) 243 + { 244 + hid_hw_stop(hdev); 245 + kfree(hid_get_drvdata(hdev)); 246 + hid_set_drvdata(hdev, NULL); 247 + } 248 + 249 + static const struct hid_device_id stantum_devices[] = { 250 + { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, USB_DEVICE_ID_MTP) }, 251 + { } 252 + }; 253 + MODULE_DEVICE_TABLE(hid, stantum_devices); 254 + 255 + static const struct hid_usage_id stantum_grabbed_usages[] = { 256 + { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, 257 + { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} 258 + }; 259 + 260 + static struct hid_driver stantum_driver = { 261 + .name = "stantum", 262 + .id_table = stantum_devices, 263 + .probe = stantum_probe, 264 + .remove = stantum_remove, 265 + .input_mapping = stantum_input_mapping, 266 + .input_mapped = stantum_input_mapped, 267 + .usage_table = stantum_grabbed_usages, 268 + .event = stantum_event, 269 + }; 270 + 271 + static int __init stantum_init(void) 272 + { 273 + return hid_register_driver(&stantum_driver); 274 + } 275 + 276 + static void __exit stantum_exit(void) 277 + { 278 + hid_unregister_driver(&stantum_driver); 279 + } 280 + 281 + module_init(stantum_init); 282 + module_exit(stantum_exit); 283 +
+28
drivers/hid/hid-wacom.c
··· 156 156 struct hid_input *hidinput; 157 157 struct input_dev *input; 158 158 struct wacom_data *wdata; 159 + char rep_data[2]; 159 160 int ret; 161 + int limit; 160 162 161 163 wdata = kzalloc(sizeof(*wdata), GFP_KERNEL); 162 164 if (wdata == NULL) { ··· 168 166 169 167 hid_set_drvdata(hdev, wdata); 170 168 169 + /* Parse the HID report now */ 171 170 ret = hid_parse(hdev); 172 171 if (ret) { 173 172 dev_err(&hdev->dev, "parse failed\n"); ··· 180 177 dev_err(&hdev->dev, "hw start failed\n"); 181 178 goto err_free; 182 179 } 180 + 181 + /* 182 + * Note that if the raw queries fail, it's not a hard failure and it 183 + * is safe to continue 184 + */ 185 + 186 + /* Set Wacom mode2 */ 187 + rep_data[0] = 0x03; rep_data[1] = 0x00; 188 + limit = 3; 189 + do { 190 + ret = hdev->hid_output_raw_report(hdev, rep_data, 2, 191 + HID_FEATURE_REPORT); 192 + } while (ret < 0 && limit-- > 0); 193 + if (ret < 0) 194 + dev_warn(&hdev->dev, "failed to poke device #1, %d\n", ret); 195 + 196 + /* 0x06 - high reporting speed, 0x05 - low speed */ 197 + rep_data[0] = 0x06; rep_data[1] = 0x00; 198 + limit = 3; 199 + do { 200 + ret = hdev->hid_output_raw_report(hdev, rep_data, 2, 201 + HID_FEATURE_REPORT); 202 + } while (ret < 0 && limit-- > 0); 203 + if (ret < 0) 204 + dev_warn(&hdev->dev, "failed to poke device #2, %d\n", ret); 183 205 184 206 hidinput = list_entry(hdev->inputs.next, struct hid_input, list); 185 207 input = hidinput->input;
+1 -1
drivers/hid/hidraw.c
··· 134 134 goto out; 135 135 } 136 136 137 - ret = dev->hid_output_raw_report(dev, buf, count); 137 + ret = dev->hid_output_raw_report(dev, buf, count, HID_OUTPUT_REPORT); 138 138 out: 139 139 kfree(buf); 140 140 return ret;
+33 -9
drivers/hid/usbhid/hid-core.c
··· 5 5 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> 6 6 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc 7 7 * Copyright (c) 2007-2008 Oliver Neukum 8 - * Copyright (c) 2006-2009 Jiri Kosina 8 + * Copyright (c) 2006-2010 Jiri Kosina 9 9 */ 10 10 11 11 /* ··· 316 316 err_hid("usb_submit_urb(out) failed"); 317 317 return -1; 318 318 } 319 + usbhid->last_out = jiffies; 319 320 } else { 320 321 /* 321 322 * queue work to wake up the device. ··· 378 377 err_hid("usb_submit_urb(ctrl) failed"); 379 378 return -1; 380 379 } 380 + usbhid->last_ctrl = jiffies; 381 381 } else { 382 382 /* 383 383 * queue work to wake up the device. ··· 514 512 usbhid->out[usbhid->outhead].report = report; 515 513 usbhid->outhead = head; 516 514 517 - if (!test_and_set_bit(HID_OUT_RUNNING, &usbhid->iofl)) 515 + if (!test_and_set_bit(HID_OUT_RUNNING, &usbhid->iofl)) { 518 516 if (hid_submit_out(hid)) 519 517 clear_bit(HID_OUT_RUNNING, &usbhid->iofl); 518 + } else { 519 + /* 520 + * the queue is known to run 521 + * but an earlier request may be stuck 522 + * we may need to time out 523 + * no race because this is called under 524 + * spinlock 525 + */ 526 + if (time_after(jiffies, usbhid->last_out + HZ * 5)) 527 + usb_unlink_urb(usbhid->urbout); 528 + } 520 529 return; 521 530 } 522 531 ··· 548 535 usbhid->ctrl[usbhid->ctrlhead].dir = dir; 549 536 usbhid->ctrlhead = head; 550 537 551 - if (!test_and_set_bit(HID_CTRL_RUNNING, &usbhid->iofl)) 538 + if (!test_and_set_bit(HID_CTRL_RUNNING, &usbhid->iofl)) { 552 539 if (hid_submit_ctrl(hid)) 553 540 clear_bit(HID_CTRL_RUNNING, &usbhid->iofl); 541 + } else { 542 + /* 543 + * the queue is known to run 544 + * but an earlier request may be stuck 545 + * we may need to time out 546 + * no race because this is called under 547 + * spinlock 548 + */ 549 + if (time_after(jiffies, usbhid->last_ctrl + HZ * 5)) 550 + usb_unlink_urb(usbhid->urbctrl); 551 + } 554 552 } 555 553 556 554 void usbhid_submit_report(struct hid_device *hid, struct hid_report *report, unsigned char dir) ··· 798 774 return 0; 799 775 } 800 776 801 - static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t count) 777 + static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t count, 778 + unsigned char report_type) 802 779 { 803 780 struct usbhid_device *usbhid = hid->driver_data; 804 781 struct usb_device *dev = hid_to_usb_dev(hid); ··· 810 785 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 811 786 HID_REQ_SET_REPORT, 812 787 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 813 - ((HID_OUTPUT_REPORT + 1) << 8) | *buf, 788 + ((report_type + 1) << 8) | *buf, 814 789 interface->desc.bInterfaceNumber, buf + 1, count - 1, 815 790 USB_CTRL_SET_TIMEOUT); 816 791 ··· 1006 981 1007 982 spin_lock_init(&usbhid->lock); 1008 983 1009 - usbhid->intf = intf; 1010 - usbhid->ifnum = interface->desc.bInterfaceNumber; 1011 - 1012 984 usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL); 1013 985 if (!usbhid->urbctrl) { 1014 986 ret = -ENOMEM; ··· 1176 1154 1177 1155 hid->driver_data = usbhid; 1178 1156 usbhid->hid = hid; 1157 + usbhid->intf = intf; 1158 + usbhid->ifnum = interface->desc.bInterfaceNumber; 1179 1159 1180 1160 ret = hid_add_device(hid); 1181 1161 if (ret) { ··· 1366 1342 1367 1343 #endif /* CONFIG_PM */ 1368 1344 1369 - static struct usb_device_id hid_usb_ids [] = { 1345 + static const struct usb_device_id hid_usb_ids[] = { 1370 1346 { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS, 1371 1347 .bInterfaceClass = USB_INTERFACE_CLASS_HID }, 1372 1348 { } /* Terminating entry */
+3
drivers/hid/usbhid/hid-quirks.c
··· 43 43 44 44 { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL }, 45 45 46 + { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT }, 46 47 { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, 47 48 { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT }, 49 + { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT }, 48 50 49 51 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET }, 50 52 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET }, ··· 59 57 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET }, 60 58 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 61 59 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, 60 + { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET }, 62 61 { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, 63 62 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET }, 64 63 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
+2
drivers/hid/usbhid/usbhid.h
··· 80 80 unsigned char ctrlhead, ctrltail; /* Control fifo head & tail */ 81 81 char *ctrlbuf; /* Control buffer */ 82 82 dma_addr_t ctrlbuf_dma; /* Control buffer dma */ 83 + unsigned long last_ctrl; /* record of last output for timeouts */ 83 84 84 85 struct urb *urbout; /* Output URB */ 85 86 struct hid_output_fifo out[HID_CONTROL_FIFO_SIZE]; /* Output pipe fifo */ 86 87 unsigned char outhead, outtail; /* Output pipe fifo head & tail */ 87 88 char *outbuf; /* Output buffer */ 88 89 dma_addr_t outbuf_dma; /* Output buffer dma */ 90 + unsigned long last_out; /* record of last output for timeouts */ 89 91 90 92 spinlock_t lock; /* fifo spinlock */ 91 93 unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */
+6
drivers/input/input-polldev.c
··· 100 100 struct input_polled_dev *dev = input_get_drvdata(input); 101 101 102 102 cancel_delayed_work_sync(&dev->work); 103 + /* 104 + * Clean up work struct to remove references to the workqueue. 105 + * It may be destroyed by the next call. This causes problems 106 + * at next device open-close in case of poll_interval == 0. 107 + */ 108 + INIT_DELAYED_WORK(&dev->work, dev->work.work.func); 103 109 input_polldev_stop_workqueue(); 104 110 105 111 if (dev->close)
+8
drivers/input/serio/i8042.c
··· 1161 1161 return 0; 1162 1162 } 1163 1163 1164 + static int i8042_pm_thaw(struct device *dev) 1165 + { 1166 + i8042_interrupt(0, NULL); 1167 + 1168 + return 0; 1169 + } 1170 + 1164 1171 static const struct dev_pm_ops i8042_pm_ops = { 1165 1172 .suspend = i8042_pm_reset, 1166 1173 .resume = i8042_pm_restore, 1174 + .thaw = i8042_pm_thaw, 1167 1175 .poweroff = i8042_pm_reset, 1168 1176 .restore = i8042_pm_restore, 1169 1177 };
+4 -4
drivers/input/touchscreen/usbtouchscreen.c
··· 618 618 #ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH 619 619 static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) 620 620 { 621 - dev->x = ((pkt[2] & 0x0F) << 8) | pkt[1] ; 622 - dev->y = ((pkt[4] & 0x0F) << 8) | pkt[3] ; 621 + dev->x = (pkt[2] << 8) | pkt[1]; 622 + dev->y = (pkt[4] << 8) | pkt[3]; 623 623 dev->press = pkt[5] & 0xff; 624 624 dev->touch = pkt[0] & 0x01; 625 625 ··· 809 809 #ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH 810 810 [DEVTYPE_GENERAL_TOUCH] = { 811 811 .min_xc = 0x0, 812 - .max_xc = 0x0500, 812 + .max_xc = 0x7fff, 813 813 .min_yc = 0x0, 814 - .max_yc = 0x0500, 814 + .max_yc = 0x7fff, 815 815 .rept_size = 7, 816 816 .read_data = general_touch_read_data, 817 817 },
+2 -2
drivers/macintosh/adb.c
··· 322 322 adb_controller = NULL; 323 323 } else { 324 324 #ifdef CONFIG_PPC 325 - if (machine_is_compatible("AAPL,PowerBook1998") || 326 - machine_is_compatible("PowerBook1,1")) 325 + if (of_machine_is_compatible("AAPL,PowerBook1998") || 326 + of_machine_is_compatible("PowerBook1,1")) 327 327 sleepy_trackpad = 1; 328 328 #endif /* CONFIG_PPC */ 329 329
+4 -4
drivers/macintosh/therm_pm72.c
··· 1899 1899 */ 1900 1900 if (rackmac) 1901 1901 cpu_pid_type = CPU_PID_TYPE_RACKMAC; 1902 - else if (machine_is_compatible("PowerMac7,3") 1902 + else if (of_machine_is_compatible("PowerMac7,3") 1903 1903 && (cpu_count > 1) 1904 1904 && fcu_fans[CPUA_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID 1905 1905 && fcu_fans[CPUB_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID) { ··· 2234 2234 { 2235 2235 struct device_node *np; 2236 2236 2237 - rackmac = machine_is_compatible("RackMac3,1"); 2237 + rackmac = of_machine_is_compatible("RackMac3,1"); 2238 2238 2239 - if (!machine_is_compatible("PowerMac7,2") && 2240 - !machine_is_compatible("PowerMac7,3") && 2239 + if (!of_machine_is_compatible("PowerMac7,2") && 2240 + !of_machine_is_compatible("PowerMac7,3") && 2241 2241 !rackmac) 2242 2242 return -ENODEV; 2243 2243
+1 -1
drivers/macintosh/therm_windtunnel.c
··· 490 490 info = of_get_property(np, "thermal-info", NULL); 491 491 of_node_put(np); 492 492 493 - if( !info || !machine_is_compatible("PowerMac3,6") ) 493 + if( !info || !of_machine_is_compatible("PowerMac3,6") ) 494 494 return -ENODEV; 495 495 496 496 if( info->id != 3 ) {
+4 -4
drivers/macintosh/via-pmu-backlight.c
··· 150 150 151 151 /* Special case for the old PowerBook since I can't test on it */ 152 152 autosave = 153 - machine_is_compatible("AAPL,3400/2400") || 154 - machine_is_compatible("AAPL,3500"); 153 + of_machine_is_compatible("AAPL,3400/2400") || 154 + of_machine_is_compatible("AAPL,3500"); 155 155 156 156 if (!autosave && 157 157 !pmac_has_backlight_type("pmu") && 158 - !machine_is_compatible("AAPL,PowerBook1998") && 159 - !machine_is_compatible("PowerBook1,1")) 158 + !of_machine_is_compatible("AAPL,PowerBook1998") && 159 + !of_machine_is_compatible("PowerBook1,1")) 160 160 return; 161 161 162 162 snprintf(name, sizeof(name), "pmubl");
+4 -4
drivers/macintosh/via-pmu.c
··· 463 463 #endif 464 464 465 465 #ifdef CONFIG_PPC32 466 - if (machine_is_compatible("AAPL,3400/2400") || 467 - machine_is_compatible("AAPL,3500")) { 466 + if (of_machine_is_compatible("AAPL,3400/2400") || 467 + of_machine_is_compatible("AAPL,3500")) { 468 468 int mb = pmac_call_feature(PMAC_FTR_GET_MB_INFO, 469 469 NULL, PMAC_MB_INFO_MODEL, 0); 470 470 pmu_battery_count = 1; ··· 472 472 pmu_batteries[0].flags |= PMU_BATT_TYPE_COMET; 473 473 else 474 474 pmu_batteries[0].flags |= PMU_BATT_TYPE_HOOPER; 475 - } else if (machine_is_compatible("AAPL,PowerBook1998") || 476 - machine_is_compatible("PowerBook1,1")) { 475 + } else if (of_machine_is_compatible("AAPL,PowerBook1998") || 476 + of_machine_is_compatible("PowerBook1,1")) { 477 477 pmu_battery_count = 2; 478 478 pmu_batteries[0].flags |= PMU_BATT_TYPE_SMART; 479 479 pmu_batteries[1].flags |= PMU_BATT_TYPE_SMART;
+3 -3
drivers/macintosh/windfarm_core.c
··· 468 468 DBG("wf: core loaded\n"); 469 469 470 470 /* Don't register on old machines that use therm_pm72 for now */ 471 - if (machine_is_compatible("PowerMac7,2") || 472 - machine_is_compatible("PowerMac7,3") || 473 - machine_is_compatible("RackMac3,1")) 471 + if (of_machine_is_compatible("PowerMac7,2") || 472 + of_machine_is_compatible("PowerMac7,3") || 473 + of_machine_is_compatible("RackMac3,1")) 474 474 return -ENODEV; 475 475 platform_device_register(&wf_platform_device); 476 476 return 0;
+3 -3
drivers/macintosh/windfarm_cpufreq_clamp.c
··· 76 76 struct wf_control *clamp; 77 77 78 78 /* Don't register on old machines that use therm_pm72 for now */ 79 - if (machine_is_compatible("PowerMac7,2") || 80 - machine_is_compatible("PowerMac7,3") || 81 - machine_is_compatible("RackMac3,1")) 79 + if (of_machine_is_compatible("PowerMac7,2") || 80 + of_machine_is_compatible("PowerMac7,3") || 81 + of_machine_is_compatible("RackMac3,1")) 82 82 return -ENODEV; 83 83 84 84 clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL);
+3 -3
drivers/macintosh/windfarm_lm75_sensor.c
··· 239 239 static int __init wf_lm75_sensor_init(void) 240 240 { 241 241 /* Don't register on old machines that use therm_pm72 for now */ 242 - if (machine_is_compatible("PowerMac7,2") || 243 - machine_is_compatible("PowerMac7,3") || 244 - machine_is_compatible("RackMac3,1")) 242 + if (of_machine_is_compatible("PowerMac7,2") || 243 + of_machine_is_compatible("PowerMac7,3") || 244 + of_machine_is_compatible("RackMac3,1")) 245 245 return -ENODEV; 246 246 return i2c_add_driver(&wf_lm75_driver); 247 247 }
+3 -3
drivers/macintosh/windfarm_max6690_sensor.c
··· 188 188 static int __init wf_max6690_sensor_init(void) 189 189 { 190 190 /* Don't register on old machines that use therm_pm72 for now */ 191 - if (machine_is_compatible("PowerMac7,2") || 192 - machine_is_compatible("PowerMac7,3") || 193 - machine_is_compatible("RackMac3,1")) 191 + if (of_machine_is_compatible("PowerMac7,2") || 192 + of_machine_is_compatible("PowerMac7,3") || 193 + of_machine_is_compatible("RackMac3,1")) 194 194 return -ENODEV; 195 195 return i2c_add_driver(&wf_max6690_driver); 196 196 }
+1 -1
drivers/macintosh/windfarm_pm112.c
··· 676 676 { 677 677 struct device_node *cpu; 678 678 679 - if (!machine_is_compatible("PowerMac11,2")) 679 + if (!of_machine_is_compatible("PowerMac11,2")) 680 680 return -ENODEV; 681 681 682 682 /* Count the number of CPU cores */
+1 -1
drivers/macintosh/windfarm_pm121.c
··· 1008 1008 { 1009 1009 int rc = -ENODEV; 1010 1010 1011 - if (machine_is_compatible("PowerMac12,1")) 1011 + if (of_machine_is_compatible("PowerMac12,1")) 1012 1012 rc = pm121_init_pm(); 1013 1013 1014 1014 if (rc == 0) {
+2 -2
drivers/macintosh/windfarm_pm81.c
··· 779 779 { 780 780 int rc = -ENODEV; 781 781 782 - if (machine_is_compatible("PowerMac8,1") || 783 - machine_is_compatible("PowerMac8,2")) 782 + if (of_machine_is_compatible("PowerMac8,1") || 783 + of_machine_is_compatible("PowerMac8,2")) 784 784 rc = wf_init_pm(); 785 785 786 786 if (rc == 0) {
+1 -1
drivers/macintosh/windfarm_pm91.c
··· 711 711 { 712 712 int rc = -ENODEV; 713 713 714 - if (machine_is_compatible("PowerMac9,1")) 714 + if (of_machine_is_compatible("PowerMac9,1")) 715 715 rc = wf_init_pm(); 716 716 717 717 if (rc == 0) {
+3 -3
drivers/macintosh/windfarm_smu_sensors.c
··· 363 363 * I yet have to figure out what's up with 8,2 and will have to 364 364 * adjust for later, unless we can 100% trust the SDB partition... 365 365 */ 366 - if ((machine_is_compatible("PowerMac8,1") || 367 - machine_is_compatible("PowerMac8,2") || 368 - machine_is_compatible("PowerMac9,1")) && 366 + if ((of_machine_is_compatible("PowerMac8,1") || 367 + of_machine_is_compatible("PowerMac8,2") || 368 + of_machine_is_compatible("PowerMac9,1")) && 369 369 cpuvcp_version >= 2) { 370 370 pow->quadratic = 1; 371 371 DBG("windfarm: CPU Power using quadratic transform\n");
+3 -1
drivers/media/dvb/dvb-usb/Kconfig
··· 112 112 select DVB_MT352 if !DVB_FE_CUSTOMISE 113 113 select DVB_ZL10353 if !DVB_FE_CUSTOMISE 114 114 select DVB_DIB7000P if !DVB_FE_CUSTOMISE 115 - select DVB_LGS8GL5 if !DVB_FE_CUSTOMISE 116 115 select DVB_TUNER_DIB0070 if !DVB_FE_CUSTOMISE 116 + select DVB_ATBM8830 if !DVB_FE_CUSTOMISE 117 + select DVB_LGS8GXX if !DVB_FE_CUSTOMISE 117 118 select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMISE 118 119 select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE 119 120 select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMISE 121 + select MEDIA_TUNER_MAX2165 if !MEDIA_TUNER_CUSTOMISE 120 122 help 121 123 Say Y here to support the Conexant USB2.0 hybrid reference design. 122 124 Currently, only DVB and ATSC modes are supported, analog mode
+2 -2
drivers/media/dvb/frontends/l64781.c
··· 192 192 spi_bias *= qam_tab[p->constellation]; 193 193 spi_bias /= p->code_rate_HP + 1; 194 194 spi_bias /= (guard_tab[p->guard_interval] + 32); 195 - spi_bias *= 1000ULL; 196 - spi_bias /= 1000ULL + ppm/1000; 195 + spi_bias *= 1000; 196 + spi_bias /= 1000 + ppm/1000; 197 197 spi_bias *= p->code_rate_HP; 198 198 199 199 val0x04 = (p->transmission_mode << 2) | p->guard_interval;
+1
drivers/media/video/bt8xx/bttv-driver.c
··· 4461 4461 request_modules(btv); 4462 4462 } 4463 4463 4464 + init_bttv_i2c_ir(btv); 4464 4465 bttv_input_init(btv); 4465 4466 4466 4467 /* everything is fine */
+6 -2
drivers/media/video/bt8xx/bttv-i2c.c
··· 388 388 if (0 == btv->i2c_rc && i2c_scan) 389 389 do_i2c_scan(btv->c.v4l2_dev.name, &btv->i2c_client); 390 390 391 - /* Instantiate the IR receiver device, if present */ 391 + return btv->i2c_rc; 392 + } 393 + 394 + /* Instantiate the I2C IR receiver device, if present */ 395 + void __devinit init_bttv_i2c_ir(struct bttv *btv) 396 + { 392 397 if (0 == btv->i2c_rc) { 393 398 struct i2c_board_info info; 394 399 /* The external IR receiver is at i2c address 0x34 (0x35 for ··· 413 408 strlcpy(info.type, "ir_video", I2C_NAME_SIZE); 414 409 i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list); 415 410 } 416 - return btv->i2c_rc; 417 411 } 418 412 419 413 int __devexit fini_bttv_i2c(struct bttv *btv)
+1
drivers/media/video/bt8xx/bttvp.h
··· 279 279 extern unsigned int bttv_gpio; 280 280 extern void bttv_gpio_tracking(struct bttv *btv, char *comment); 281 281 extern int init_bttv_i2c(struct bttv *btv); 282 + extern void init_bttv_i2c_ir(struct bttv *btv); 282 283 extern int fini_bttv_i2c(struct bttv *btv); 283 284 284 285 #define bttv_printk if (bttv_verbose) printk
+1 -1
drivers/media/video/mt9t112.c
··· 514 514 /* poll to verify out of standby. Must Poll this bit */ 515 515 for (i = 0; i < 100; i++) { 516 516 mt9t112_reg_read(data, client, 0x0018); 517 - if (0x4000 & data) 517 + if (!(0x4000 & data)) 518 518 break; 519 519 520 520 mdelay(10);
+1 -1
drivers/media/video/pwc/pwc-ctrl.c
··· 753 753 buf[0] = 0xff; /* fixed */ 754 754 755 755 ret = send_control_msg(pdev, 756 - SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, sizeof(buf)); 756 + SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, 1); 757 757 758 758 if (!mode && ret >= 0) { 759 759 if (value < 0)
+1
drivers/net/benet/be_cmds.c
··· 296 296 req_hdr->opcode = opcode; 297 297 req_hdr->subsystem = subsystem; 298 298 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); 299 + req_hdr->version = 0; 299 300 } 300 301 301 302 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
+18 -1
drivers/net/e1000/e1000_main.c
··· 4006 4006 } 4007 4007 } 4008 4008 4009 - if (!buffer_info->dma) 4009 + if (!buffer_info->dma) { 4010 4010 buffer_info->dma = pci_map_page(pdev, 4011 4011 buffer_info->page, 0, 4012 4012 buffer_info->length, 4013 4013 PCI_DMA_FROMDEVICE); 4014 + if (pci_dma_mapping_error(pdev, buffer_info->dma)) { 4015 + put_page(buffer_info->page); 4016 + dev_kfree_skb(skb); 4017 + buffer_info->page = NULL; 4018 + buffer_info->skb = NULL; 4019 + buffer_info->dma = 0; 4020 + adapter->alloc_rx_buff_failed++; 4021 + break; /* while !buffer_info->skb */ 4022 + } 4023 + } 4014 4024 4015 4025 rx_desc = E1000_RX_DESC(*rx_ring, i); 4016 4026 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); ··· 4111 4101 skb->data, 4112 4102 buffer_info->length, 4113 4103 PCI_DMA_FROMDEVICE); 4104 + if (pci_dma_mapping_error(pdev, buffer_info->dma)) { 4105 + dev_kfree_skb(skb); 4106 + buffer_info->skb = NULL; 4107 + buffer_info->dma = 0; 4108 + adapter->alloc_rx_buff_failed++; 4109 + break; /* while !buffer_info->skb */ 4110 + } 4114 4111 4115 4112 /* 4116 4113 * XXX if it was allocated cleanly it will never map to a
+22
drivers/net/ixgbe/ixgbe_82598.c
··· 357 357 u32 fctrl_reg; 358 358 u32 rmcs_reg; 359 359 u32 reg; 360 + u32 link_speed = 0; 361 + bool link_up; 360 362 361 363 #ifdef CONFIG_DCB 362 364 if (hw->fc.requested_mode == ixgbe_fc_pfc) 363 365 goto out; 364 366 365 367 #endif /* CONFIG_DCB */ 368 + /* 369 + * On 82598 having Rx FC on causes resets while doing 1G 370 + * so if it's on turn it off once we know link_speed. For 371 + * more details see 82598 Specification update. 372 + */ 373 + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 374 + if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { 375 + switch (hw->fc.requested_mode) { 376 + case ixgbe_fc_full: 377 + hw->fc.requested_mode = ixgbe_fc_tx_pause; 378 + break; 379 + case ixgbe_fc_rx_pause: 380 + hw->fc.requested_mode = ixgbe_fc_none; 381 + break; 382 + default: 383 + /* no change */ 384 + break; 385 + } 386 + } 387 + 366 388 /* Negotiate the fc mode to use */ 367 389 ret_val = ixgbe_fc_autoneg(hw); 368 390 if (ret_val)
+4
drivers/net/ixgbe/ixgbe_main.c
··· 5763 5763 if (err) 5764 5764 goto err_sw_init; 5765 5765 5766 + /* Make it possible the adapter to be woken up via WOL */ 5767 + if (adapter->hw.mac.type == ixgbe_mac_82599EB) 5768 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 5769 + 5766 5770 /* 5767 5771 * If there is a fan on this device and it has failed log the 5768 5772 * failure.
+1 -1
drivers/net/mace.c
··· 206 206 mp->port_aaui = port_aaui; 207 207 else { 208 208 /* Apple Network Server uses the AAUI port */ 209 - if (machine_is_compatible("AAPL,ShinerESB")) 209 + if (of_machine_is_compatible("AAPL,ShinerESB")) 210 210 mp->port_aaui = 1; 211 211 else { 212 212 #ifdef CONFIG_MACE_AAUI_PORT
+1
drivers/net/sfc/efx.c
··· 2284 2284 fail2: 2285 2285 efx_fini_struct(efx); 2286 2286 fail1: 2287 + WARN_ON(rc > 0); 2287 2288 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc); 2288 2289 free_netdev(net_dev); 2289 2290 return rc;
+27 -18
drivers/net/sfc/falcon_boards.c
··· 29 29 #define FALCON_BOARD_SFN4111T 0x51 30 30 #define FALCON_BOARD_SFN4112F 0x52 31 31 32 + /* Board temperature is about 15°C above ambient when air flow is 33 + * limited. */ 34 + #define FALCON_BOARD_TEMP_BIAS 15 35 + 36 + /* SFC4000 datasheet says: 'The maximum permitted junction temperature 37 + * is 125°C; the thermal design of the environment for the SFC4000 38 + * should aim to keep this well below 100°C.' */ 39 + #define FALCON_JUNC_TEMP_MAX 90 40 + 32 41 /***************************************************************************** 33 42 * Support for LM87 sensor chip used on several boards 34 43 */ ··· 557 548 static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */ 558 549 559 550 static const u8 sfe4002_lm87_regs[] = { 560 - LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */ 561 - LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */ 562 - LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */ 563 - LM87_IN_LIMITS(3, 0xb0, 0xc9), /* 5V: 4.6-5.2V */ 564 - LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */ 565 - LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */ 566 - LM87_AIN_LIMITS(0, 0xa0, 0xb2), /* AIN1: 1.66V +/- 5% */ 567 - LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */ 568 - LM87_TEMP_INT_LIMITS(10, 60), /* board */ 569 - LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */ 551 + LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */ 552 + LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */ 553 + LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */ 554 + LM87_IN_LIMITS(3, 0xac, 0xd4), /* 5V: 5.0V +/- 10% */ 555 + LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */ 556 + LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */ 557 + LM87_AIN_LIMITS(0, 0x98, 0xbb), /* AIN1: 1.66V +/- 10% */ 558 + LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */ 559 + LM87_TEMP_INT_LIMITS(0, 80 + FALCON_BOARD_TEMP_BIAS), 560 + LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX), 570 561 0 571 562 }; 572 563 ··· 628 619 static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */ 629 620 630 621 static const u8 sfn4112f_lm87_regs[] = { 631 - LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */ 632 - LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */ 633 - LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */ 634 - LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */ 635 - LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */ 636 - LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */ 637 - LM87_TEMP_INT_LIMITS(10, 60), /* board */ 638 - LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */ 622 + LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */ 623 + LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */ 624 + LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */ 625 + LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */ 626 + LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */ 627 + LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */ 628 + LM87_TEMP_INT_LIMITS(0, 60 + FALCON_BOARD_TEMP_BIAS), 629 + LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX), 639 630 0 640 631 }; 641 632
+1 -1
drivers/net/sfc/mcdi.c
··· 127 127 efx_dword_t reg; 128 128 129 129 /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ 130 - rc = efx_mcdi_poll_reboot(efx); 130 + rc = -efx_mcdi_poll_reboot(efx); 131 131 if (rc) 132 132 goto out; 133 133
+1 -1
drivers/net/sfc/qt202x_phy.c
··· 320 320 321 321 falcon_board(efx)->type->init_phy(efx); 322 322 323 - return rc; 323 + return 0; 324 324 325 325 fail: 326 326 EFX_ERR(efx, "PHY reset timed out\n");
-1
drivers/net/tc35815.c
··· 1437 1437 /* Transmit complete. */ 1438 1438 lp->lstats.tx_ints++; 1439 1439 tc35815_txdone(dev); 1440 - netif_wake_queue(dev); 1441 1440 if (ret < 0) 1442 1441 ret = 0; 1443 1442 }
+5
drivers/net/usb/cdc_ether.c
··· 584 584 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 585 585 .driver_info = (unsigned long) &mbm_info, 586 586 }, { 587 + /* Ericsson C3607w ver 2 */ 588 + USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190b, USB_CLASS_COMM, 589 + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 590 + .driver_info = (unsigned long) &mbm_info, 591 + }, { 587 592 /* Toshiba F3507g */ 588 593 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM, 589 594 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+20 -21
drivers/net/via-velocity.c
··· 1877 1877 /** 1878 1878 * tx_srv - transmit interrupt service 1879 1879 * @vptr; Velocity 1880 - * @status: 1881 1880 * 1882 1881 * Scan the queues looking for transmitted packets that 1883 1882 * we can complete and clean up. Update any statistics as 1884 1883 * necessary/ 1885 1884 */ 1886 - static int velocity_tx_srv(struct velocity_info *vptr, u32 status) 1885 + static int velocity_tx_srv(struct velocity_info *vptr) 1887 1886 { 1888 1887 struct tx_desc *td; 1889 1888 int qnum; ··· 2089 2090 /** 2090 2091 * velocity_rx_srv - service RX interrupt 2091 2092 * @vptr: velocity 2092 - * @status: adapter status (unused) 2093 2093 * 2094 2094 * Walk the receive ring of the velocity adapter and remove 2095 2095 * any received packets from the receive queue. Hand the ring 2096 2096 * slots back to the adapter for reuse. 2097 2097 */ 2098 - static int velocity_rx_srv(struct velocity_info *vptr, int status, 2099 - int budget_left) 2098 + static int velocity_rx_srv(struct velocity_info *vptr, int budget_left) 2100 2099 { 2101 2100 struct net_device_stats *stats = &vptr->dev->stats; 2102 2101 int rd_curr = vptr->rx.curr; ··· 2148 2151 struct velocity_info *vptr = container_of(napi, 2149 2152 struct velocity_info, napi); 2150 2153 unsigned int rx_done; 2151 - u32 isr_status; 2154 + unsigned long flags; 2152 2155 2153 - spin_lock(&vptr->lock); 2154 - isr_status = mac_read_isr(vptr->mac_regs); 2155 - 2156 - /* Ack the interrupt */ 2157 - mac_write_isr(vptr->mac_regs, isr_status); 2158 - if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI))) 2159 - velocity_error(vptr, isr_status); 2160 - 2156 + spin_lock_irqsave(&vptr->lock, flags); 2161 2157 /* 2162 2158 * Do rx and tx twice for performance (taken from the VIA 2163 2159 * out-of-tree driver). 2164 2160 */ 2165 - rx_done = velocity_rx_srv(vptr, isr_status, budget / 2); 2166 - velocity_tx_srv(vptr, isr_status); 2167 - rx_done += velocity_rx_srv(vptr, isr_status, budget - rx_done); 2168 - velocity_tx_srv(vptr, isr_status); 2169 - 2170 - spin_unlock(&vptr->lock); 2161 + rx_done = velocity_rx_srv(vptr, budget / 2); 2162 + velocity_tx_srv(vptr); 2163 + rx_done += velocity_rx_srv(vptr, budget - rx_done); 2164 + velocity_tx_srv(vptr); 2171 2165 2172 2166 /* If budget not fully consumed, exit the polling mode */ 2173 2167 if (rx_done < budget) { 2174 2168 napi_complete(napi); 2175 2169 mac_enable_int(vptr->mac_regs); 2176 2170 } 2171 + spin_unlock_irqrestore(&vptr->lock, flags); 2177 2172 2178 2173 return rx_done; 2179 2174 } ··· 2195 2206 return IRQ_NONE; 2196 2207 } 2197 2208 2209 + /* Ack the interrupt */ 2210 + mac_write_isr(vptr->mac_regs, isr_status); 2211 + 2198 2212 if (likely(napi_schedule_prep(&vptr->napi))) { 2199 2213 mac_disable_int(vptr->mac_regs); 2200 2214 __napi_schedule(&vptr->napi); 2201 2215 } 2216 + 2217 + if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI))) 2218 + velocity_error(vptr, isr_status); 2219 + 2202 2220 spin_unlock(&vptr->lock); 2203 2221 2204 2222 return IRQ_HANDLED; ··· 3096 3100 velocity_init_registers(vptr, VELOCITY_INIT_WOL); 3097 3101 mac_disable_int(vptr->mac_regs); 3098 3102 3099 - velocity_tx_srv(vptr, 0); 3103 + velocity_tx_srv(vptr); 3100 3104 3101 3105 for (i = 0; i < vptr->tx.numq; i++) { 3102 3106 if (vptr->tx.used[i]) ··· 3340 3344 { 3341 3345 struct velocity_info *vptr = netdev_priv(dev); 3342 3346 int max_us = 0x3f * 64; 3347 + unsigned long flags; 3343 3348 3344 3349 /* 6 bits of */ 3345 3350 if (ecmd->tx_coalesce_usecs > max_us) ··· 3362 3365 ecmd->tx_coalesce_usecs); 3363 3366 3364 3367 /* Setup the interrupt suppression and queue timers */ 3368 + spin_lock_irqsave(&vptr->lock, flags); 3365 3369 mac_disable_int(vptr->mac_regs); 3366 3370 setup_adaptive_interrupts(vptr); 3367 3371 setup_queue_timers(vptr); ··· 3370 3372 mac_write_int_mask(vptr->int_mask, vptr->mac_regs); 3371 3373 mac_clear_isr(vptr->mac_regs); 3372 3374 mac_enable_int(vptr->mac_regs); 3375 + spin_unlock_irqrestore(&vptr->lock, flags); 3373 3376 3374 3377 return 0; 3375 3378 }
+2 -2
drivers/net/wireless/ath/ath9k/xmit.c
··· 1615 1615 bf->bf_frmlen -= padsize; 1616 1616 } 1617 1617 1618 - if (conf_is_ht(&hw->conf) && !is_pae(skb)) 1618 + if (conf_is_ht(&hw->conf)) 1619 1619 bf->bf_state.bf_type |= BUF_HT; 1620 1620 1621 1621 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq); ··· 1701 1701 goto tx_done; 1702 1702 } 1703 1703 1704 - if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 1704 + if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) { 1705 1705 /* 1706 1706 * Try aggregation if it's a unicast data frame 1707 1707 * and the destination is HT capable.
+1
drivers/net/wireless/b43/b43.h
··· 115 115 #define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */ 116 116 #define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */ 117 117 #define B43_MMIO_RNG 0x65A 118 + #define B43_MMIO_IFSSLOT 0x684 /* Interframe slot time */ 118 119 #define B43_MMIO_IFSCTL 0x688 /* Interframe space control */ 119 120 #define B43_MMIO_IFSCTL_USE_EDCF 0x0004 120 121 #define B43_MMIO_POWERUP_DELAY 0x6A8
+10 -3
drivers/net/wireless/b43/main.c
··· 628 628 static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time) 629 629 { 630 630 /* slot_time is in usec. */ 631 - if (dev->phy.type != B43_PHYTYPE_G) 631 + /* This test used to exit for all but a G PHY. */ 632 + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) 632 633 return; 633 - b43_write16(dev, 0x684, 510 + slot_time); 634 - b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time); 634 + b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time); 635 + /* Shared memory location 0x0010 is the slot time and should be 636 + * set to slot_time; however, this register is initially 0 and changing 637 + * the value adversely affects the transmit rate for BCM4311 638 + * devices. Until this behavior is unterstood, delete this step 639 + * 640 + * b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time); 641 + */ 635 642 } 636 643 637 644 static void b43_short_slot_timing_enable(struct b43_wldev *dev)
+1 -1
drivers/net/wireless/iwlwifi/iwl-4965.c
··· 2008 2008 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " 2009 2009 "%d index %d\n", scd_ssn , index); 2010 2010 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 2011 - priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 2011 + iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 2012 2012 2013 2013 if (priv->mac80211_registered && 2014 2014 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
+3 -5
drivers/net/wireless/iwlwifi/iwl-5000.c
··· 1125 1125 scd_ssn , index, txq_id, txq->swq_id); 1126 1126 1127 1127 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 1128 - priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 1128 + iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 1129 1129 1130 1130 if (priv->mac80211_registered && 1131 1131 (iwl_queue_space(&txq->q) > txq->q.low_mark) && ··· 1153 1153 tx_resp->failure_frame); 1154 1154 1155 1155 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 1156 - if (ieee80211_is_data_qos(tx_resp->frame_ctrl)) 1157 - priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 1156 + iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 1158 1157 1159 1158 if (priv->mac80211_registered && 1160 1159 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 1161 1160 iwl_wake_queue(priv, txq_id); 1162 1161 } 1163 1162 1164 - if (ieee80211_is_data_qos(tx_resp->frame_ctrl)) 1165 - iwl_txq_check_empty(priv, sta_id, tid, txq_id); 1163 + iwl_txq_check_empty(priv, sta_id, tid, txq_id); 1166 1164 1167 1165 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) 1168 1166 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
+1
drivers/net/wireless/iwlwifi/iwl-core.c
··· 2745 2745 priv->staging_rxon.flags = 0; 2746 2746 2747 2747 iwl_set_rxon_channel(priv, conf->channel); 2748 + iwl_set_rxon_ht(priv, ht_conf); 2748 2749 2749 2750 iwl_set_flags_for_band(priv, conf->channel->band); 2750 2751 spin_unlock_irqrestore(&priv->lock, flags);
+2
drivers/net/wireless/iwlwifi/iwl-core.h
··· 446 446 int iwl_hw_tx_queue_init(struct iwl_priv *priv, 447 447 struct iwl_tx_queue *txq); 448 448 int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); 449 + void iwl_free_tfds_in_queue(struct iwl_priv *priv, 450 + int sta_id, int tid, int freed); 449 451 int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, 450 452 int slots_num, u32 txq_id); 451 453 void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
+4 -1
drivers/net/wireless/iwlwifi/iwl-rx.c
··· 928 928 if (ieee80211_is_mgmt(fc) || 929 929 ieee80211_has_protected(fc) || 930 930 ieee80211_has_morefrags(fc) || 931 - le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) 931 + le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG || 932 + (ieee80211_is_data_qos(fc) && 933 + *ieee80211_get_qos_ctl(hdr) & 934 + IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)) 932 935 ret = skb_linearize(skb); 933 936 else 934 937 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
+20 -2
drivers/net/wireless/iwlwifi/iwl-tx.c
··· 120 120 EXPORT_SYMBOL(iwl_txq_update_write_ptr); 121 121 122 122 123 + void iwl_free_tfds_in_queue(struct iwl_priv *priv, 124 + int sta_id, int tid, int freed) 125 + { 126 + if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed) 127 + priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 128 + else { 129 + IWL_ERR(priv, "free more than tfds_in_queue (%u:%d)\n", 130 + priv->stations[sta_id].tid[tid].tfds_in_queue, 131 + freed); 132 + priv->stations[sta_id].tid[tid].tfds_in_queue = 0; 133 + } 134 + } 135 + EXPORT_SYMBOL(iwl_free_tfds_in_queue); 136 + 123 137 /** 124 138 * iwl_tx_queue_free - Deallocate DMA queue. 125 139 * @txq: Transmit queue to deallocate. ··· 1145 1131 struct iwl_queue *q = &txq->q; 1146 1132 struct iwl_tx_info *tx_info; 1147 1133 int nfreed = 0; 1134 + struct ieee80211_hdr *hdr; 1148 1135 1149 1136 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { 1150 1137 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " ··· 1160 1145 1161 1146 tx_info = &txq->txb[txq->q.read_ptr]; 1162 1147 iwl_tx_status(priv, tx_info->skb[0]); 1148 + 1149 + hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data; 1150 + if (hdr && ieee80211_is_data_qos(hdr->frame_control)) 1151 + nfreed++; 1163 1152 tx_info->skb[0] = NULL; 1164 1153 1165 1154 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) 1166 1155 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); 1167 1156 1168 1157 priv->cfg->ops->lib->txq_free_tfd(priv, txq); 1169 - nfreed++; 1170 1158 } 1171 1159 return nfreed; 1172 1160 } ··· 1577 1559 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { 1578 1560 /* calculate mac80211 ampdu sw queue to wake */ 1579 1561 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index); 1580 - priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 1562 + iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 1581 1563 1582 1564 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && 1583 1565 priv->mac80211_registered &&
+1 -1
drivers/net/wireless/iwmc3200wifi/rx.c
··· 794 794 } 795 795 796 796 bss->bss = kzalloc(bss_len, GFP_KERNEL); 797 - if (!bss) { 797 + if (!bss->bss) { 798 798 kfree(bss); 799 799 IWM_ERR(iwm, "Couldn't allocate bss\n"); 800 800 return -ENOMEM;
+1
drivers/net/wireless/rtl818x/rtl8187_dev.c
··· 65 65 /* Sitecom */ 66 66 {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187}, 67 67 {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B}, 68 + {USB_DEVICE(0x0df6, 0x0029), .driver_info = DEVICE_RTL8187B}, 68 69 /* Sphairon Access Systems GmbH */ 69 70 {USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187}, 70 71 /* Dick Smith Electronics */
+8
drivers/of/Kconfig
··· 1 + config OF_FLATTREE 2 + bool 3 + depends on OF 4 + 5 + config OF_DYNAMIC 6 + def_bool y 7 + depends on OF && PPC_OF 8 + 1 9 config OF_DEVICE 2 10 def_bool y 3 11 depends on OF && (SPARC || PPC_OF || MICROBLAZE)
+1
drivers/of/Makefile
··· 1 1 obj-y = base.o 2 + obj-$(CONFIG_OF_FLATTREE) += fdt.o 2 3 obj-$(CONFIG_OF_DEVICE) += device.o platform.o 3 4 obj-$(CONFIG_OF_GPIO) += gpio.o 4 5 obj-$(CONFIG_OF_I2C) += of_i2c.o
+312 -6
drivers/of/base.c
··· 20 20 #include <linux/module.h> 21 21 #include <linux/of.h> 22 22 #include <linux/spinlock.h> 23 + #include <linux/proc_fs.h> 23 24 24 25 struct device_node *allnodes; 26 + struct device_node *of_chosen; 25 27 26 28 /* use when traversing tree through the allnext, child, sibling, 27 29 * or parent members of struct device_node. ··· 39 37 np = np->parent; 40 38 ip = of_get_property(np, "#address-cells", NULL); 41 39 if (ip) 42 - return *ip; 40 + return be32_to_cpup(ip); 43 41 } while (np->parent); 44 42 /* No #address-cells property for the root node */ 45 43 return OF_ROOT_NODE_ADDR_CELLS_DEFAULT; ··· 55 53 np = np->parent; 56 54 ip = of_get_property(np, "#size-cells", NULL); 57 55 if (ip) 58 - return *ip; 56 + return be32_to_cpup(ip); 59 57 } while (np->parent); 60 58 /* No #size-cells property for the root node */ 61 59 return OF_ROOT_NODE_SIZE_CELLS_DEFAULT; 62 60 } 63 61 EXPORT_SYMBOL(of_n_size_cells); 62 + 63 + #if !defined(CONFIG_SPARC) /* SPARC doesn't do ref counting (yet) */ 64 + /** 65 + * of_node_get - Increment refcount of a node 66 + * @node: Node to inc refcount, NULL is supported to 67 + * simplify writing of callers 68 + * 69 + * Returns node. 70 + */ 71 + struct device_node *of_node_get(struct device_node *node) 72 + { 73 + if (node) 74 + kref_get(&node->kref); 75 + return node; 76 + } 77 + EXPORT_SYMBOL(of_node_get); 78 + 79 + static inline struct device_node *kref_to_device_node(struct kref *kref) 80 + { 81 + return container_of(kref, struct device_node, kref); 82 + } 83 + 84 + /** 85 + * of_node_release - release a dynamically allocated node 86 + * @kref: kref element of the node to be released 87 + * 88 + * In of_node_put() this function is passed to kref_put() 89 + * as the destructor. 90 + */ 91 + static void of_node_release(struct kref *kref) 92 + { 93 + struct device_node *node = kref_to_device_node(kref); 94 + struct property *prop = node->properties; 95 + 96 + /* We should never be releasing nodes that haven't been detached. */ 97 + if (!of_node_check_flag(node, OF_DETACHED)) { 98 + pr_err("ERROR: Bad of_node_put() on %s\n", node->full_name); 99 + dump_stack(); 100 + kref_init(&node->kref); 101 + return; 102 + } 103 + 104 + if (!of_node_check_flag(node, OF_DYNAMIC)) 105 + return; 106 + 107 + while (prop) { 108 + struct property *next = prop->next; 109 + kfree(prop->name); 110 + kfree(prop->value); 111 + kfree(prop); 112 + prop = next; 113 + 114 + if (!prop) { 115 + prop = node->deadprops; 116 + node->deadprops = NULL; 117 + } 118 + } 119 + kfree(node->full_name); 120 + kfree(node->data); 121 + kfree(node); 122 + } 123 + 124 + /** 125 + * of_node_put - Decrement refcount of a node 126 + * @node: Node to dec refcount, NULL is supported to 127 + * simplify writing of callers 128 + * 129 + */ 130 + void of_node_put(struct device_node *node) 131 + { 132 + if (node) 133 + kref_put(&node->kref, of_node_release); 134 + } 135 + EXPORT_SYMBOL(of_node_put); 136 + #endif /* !CONFIG_SPARC */ 64 137 65 138 struct property *of_find_property(const struct device_node *np, 66 139 const char *name, ··· 219 142 return 0; 220 143 } 221 144 EXPORT_SYMBOL(of_device_is_compatible); 145 + 146 + /** 147 + * of_machine_is_compatible - Test root of device tree for a given compatible value 148 + * @compat: compatible string to look for in root node's compatible property. 149 + * 150 + * Returns true if the root node has the given value in its 151 + * compatible property. 152 + */ 153 + int of_machine_is_compatible(const char *compat) 154 + { 155 + struct device_node *root; 156 + int rc = 0; 157 + 158 + root = of_find_node_by_path("/"); 159 + if (root) { 160 + rc = of_device_is_compatible(root, compat); 161 + of_node_put(root); 162 + } 163 + return rc; 164 + } 165 + EXPORT_SYMBOL(of_machine_is_compatible); 222 166 223 167 /** 224 168 * of_device_is_available - check if a device is available for use ··· 617 519 EXPORT_SYMBOL_GPL(of_modalias_node); 618 520 619 521 /** 522 + * of_find_node_by_phandle - Find a node given a phandle 523 + * @handle: phandle of the node to find 524 + * 525 + * Returns a node pointer with refcount incremented, use 526 + * of_node_put() on it when done. 527 + */ 528 + struct device_node *of_find_node_by_phandle(phandle handle) 529 + { 530 + struct device_node *np; 531 + 532 + read_lock(&devtree_lock); 533 + for (np = allnodes; np; np = np->allnext) 534 + if (np->phandle == handle) 535 + break; 536 + of_node_get(np); 537 + read_unlock(&devtree_lock); 538 + return np; 539 + } 540 + EXPORT_SYMBOL(of_find_node_by_phandle); 541 + 542 + /** 620 543 * of_parse_phandle - Resolve a phandle property to a device_node pointer 621 544 * @np: Pointer to device node holding phandle property 622 545 * @phandle_name: Name of property holding a phandle value ··· 697 578 const void **out_args) 698 579 { 699 580 int ret = -EINVAL; 700 - const u32 *list; 701 - const u32 *list_end; 581 + const __be32 *list; 582 + const __be32 *list_end; 702 583 int size; 703 584 int cur_index = 0; 704 585 struct device_node *node = NULL; ··· 712 593 list_end = list + size / sizeof(*list); 713 594 714 595 while (list < list_end) { 715 - const u32 *cells; 596 + const __be32 *cells; 716 597 const phandle *phandle; 717 598 718 599 phandle = list++; ··· 736 617 goto err1; 737 618 } 738 619 739 - list += *cells; 620 + list += be32_to_cpup(cells); 740 621 if (list > list_end) { 741 622 pr_debug("%s: insufficient arguments length\n", 742 623 np->full_name); ··· 777 658 return ret; 778 659 } 779 660 EXPORT_SYMBOL(of_parse_phandles_with_args); 661 + 662 + /** 663 + * prom_add_property - Add a property to a node 664 + */ 665 + int prom_add_property(struct device_node *np, struct property *prop) 666 + { 667 + struct property **next; 668 + unsigned long flags; 669 + 670 + prop->next = NULL; 671 + write_lock_irqsave(&devtree_lock, flags); 672 + next = &np->properties; 673 + while (*next) { 674 + if (strcmp(prop->name, (*next)->name) == 0) { 675 + /* duplicate ! don't insert it */ 676 + write_unlock_irqrestore(&devtree_lock, flags); 677 + return -1; 678 + } 679 + next = &(*next)->next; 680 + } 681 + *next = prop; 682 + write_unlock_irqrestore(&devtree_lock, flags); 683 + 684 + #ifdef CONFIG_PROC_DEVICETREE 685 + /* try to add to proc as well if it was initialized */ 686 + if (np->pde) 687 + proc_device_tree_add_prop(np->pde, prop); 688 + #endif /* CONFIG_PROC_DEVICETREE */ 689 + 690 + return 0; 691 + } 692 + 693 + /** 694 + * prom_remove_property - Remove a property from a node. 695 + * 696 + * Note that we don't actually remove it, since we have given out 697 + * who-knows-how-many pointers to the data using get-property. 698 + * Instead we just move the property to the "dead properties" 699 + * list, so it won't be found any more. 700 + */ 701 + int prom_remove_property(struct device_node *np, struct property *prop) 702 + { 703 + struct property **next; 704 + unsigned long flags; 705 + int found = 0; 706 + 707 + write_lock_irqsave(&devtree_lock, flags); 708 + next = &np->properties; 709 + while (*next) { 710 + if (*next == prop) { 711 + /* found the node */ 712 + *next = prop->next; 713 + prop->next = np->deadprops; 714 + np->deadprops = prop; 715 + found = 1; 716 + break; 717 + } 718 + next = &(*next)->next; 719 + } 720 + write_unlock_irqrestore(&devtree_lock, flags); 721 + 722 + if (!found) 723 + return -ENODEV; 724 + 725 + #ifdef CONFIG_PROC_DEVICETREE 726 + /* try to remove the proc node as well */ 727 + if (np->pde) 728 + proc_device_tree_remove_prop(np->pde, prop); 729 + #endif /* CONFIG_PROC_DEVICETREE */ 730 + 731 + return 0; 732 + } 733 + 734 + /* 735 + * prom_update_property - Update a property in a node. 736 + * 737 + * Note that we don't actually remove it, since we have given out 738 + * who-knows-how-many pointers to the data using get-property. 739 + * Instead we just move the property to the "dead properties" list, 740 + * and add the new property to the property list 741 + */ 742 + int prom_update_property(struct device_node *np, 743 + struct property *newprop, 744 + struct property *oldprop) 745 + { 746 + struct property **next; 747 + unsigned long flags; 748 + int found = 0; 749 + 750 + write_lock_irqsave(&devtree_lock, flags); 751 + next = &np->properties; 752 + while (*next) { 753 + if (*next == oldprop) { 754 + /* found the node */ 755 + newprop->next = oldprop->next; 756 + *next = newprop; 757 + oldprop->next = np->deadprops; 758 + np->deadprops = oldprop; 759 + found = 1; 760 + break; 761 + } 762 + next = &(*next)->next; 763 + } 764 + write_unlock_irqrestore(&devtree_lock, flags); 765 + 766 + if (!found) 767 + return -ENODEV; 768 + 769 + #ifdef CONFIG_PROC_DEVICETREE 770 + /* try to add to proc as well if it was initialized */ 771 + if (np->pde) 772 + proc_device_tree_update_prop(np->pde, newprop, oldprop); 773 + #endif /* CONFIG_PROC_DEVICETREE */ 774 + 775 + return 0; 776 + } 777 + 778 + #if defined(CONFIG_OF_DYNAMIC) 779 + /* 780 + * Support for dynamic device trees. 781 + * 782 + * On some platforms, the device tree can be manipulated at runtime. 783 + * The routines in this section support adding, removing and changing 784 + * device tree nodes. 785 + */ 786 + 787 + /** 788 + * of_attach_node - Plug a device node into the tree and global list. 789 + */ 790 + void of_attach_node(struct device_node *np) 791 + { 792 + unsigned long flags; 793 + 794 + write_lock_irqsave(&devtree_lock, flags); 795 + np->sibling = np->parent->child; 796 + np->allnext = allnodes; 797 + np->parent->child = np; 798 + allnodes = np; 799 + write_unlock_irqrestore(&devtree_lock, flags); 800 + } 801 + 802 + /** 803 + * of_detach_node - "Unplug" a node from the device tree. 804 + * 805 + * The caller must hold a reference to the node. The memory associated with 806 + * the node is not freed until its refcount goes to zero. 807 + */ 808 + void of_detach_node(struct device_node *np) 809 + { 810 + struct device_node *parent; 811 + unsigned long flags; 812 + 813 + write_lock_irqsave(&devtree_lock, flags); 814 + 815 + parent = np->parent; 816 + if (!parent) 817 + goto out_unlock; 818 + 819 + if (allnodes == np) 820 + allnodes = np->allnext; 821 + else { 822 + struct device_node *prev; 823 + for (prev = allnodes; 824 + prev->allnext != np; 825 + prev = prev->allnext) 826 + ; 827 + prev->allnext = np->allnext; 828 + } 829 + 830 + if (parent->child == np) 831 + parent->child = np->sibling; 832 + else { 833 + struct device_node *prevsib; 834 + for (prevsib = np->parent->child; 835 + prevsib->sibling != np; 836 + prevsib = prevsib->sibling) 837 + ; 838 + prevsib->sibling = np->sibling; 839 + } 840 + 841 + of_node_set_flag(np, OF_DETACHED); 842 + 843 + out_unlock: 844 + write_unlock_irqrestore(&devtree_lock, flags); 845 + } 846 + #endif /* defined(CONFIG_OF_DYNAMIC) */ 847 +
+590
drivers/of/fdt.c
··· 1 + /* 2 + * Functions for working with the Flattened Device Tree data format 3 + * 4 + * Copyright 2009 Benjamin Herrenschmidt, IBM Corp 5 + * benh@kernel.crashing.org 6 + * 7 + * This program is free software; you can redistribute it and/or 8 + * modify it under the terms of the GNU General Public License 9 + * version 2 as published by the Free Software Foundation. 10 + */ 11 + 12 + #include <linux/kernel.h> 13 + #include <linux/initrd.h> 14 + #include <linux/of.h> 15 + #include <linux/of_fdt.h> 16 + #include <linux/string.h> 17 + #include <linux/errno.h> 18 + 19 + #ifdef CONFIG_PPC 20 + #include <asm/machdep.h> 21 + #endif /* CONFIG_PPC */ 22 + 23 + #include <asm/page.h> 24 + 25 + int __initdata dt_root_addr_cells; 26 + int __initdata dt_root_size_cells; 27 + 28 + struct boot_param_header *initial_boot_params; 29 + 30 + char *find_flat_dt_string(u32 offset) 31 + { 32 + return ((char *)initial_boot_params) + 33 + be32_to_cpu(initial_boot_params->off_dt_strings) + offset; 34 + } 35 + 36 + /** 37 + * of_scan_flat_dt - scan flattened tree blob and call callback on each. 38 + * @it: callback function 39 + * @data: context data pointer 40 + * 41 + * This function is used to scan the flattened device-tree, it is 42 + * used to extract the memory information at boot before we can 43 + * unflatten the tree 44 + */ 45 + int __init of_scan_flat_dt(int (*it)(unsigned long node, 46 + const char *uname, int depth, 47 + void *data), 48 + void *data) 49 + { 50 + unsigned long p = ((unsigned long)initial_boot_params) + 51 + be32_to_cpu(initial_boot_params->off_dt_struct); 52 + int rc = 0; 53 + int depth = -1; 54 + 55 + do { 56 + u32 tag = be32_to_cpup((__be32 *)p); 57 + char *pathp; 58 + 59 + p += 4; 60 + if (tag == OF_DT_END_NODE) { 61 + depth--; 62 + continue; 63 + } 64 + if (tag == OF_DT_NOP) 65 + continue; 66 + if (tag == OF_DT_END) 67 + break; 68 + if (tag == OF_DT_PROP) { 69 + u32 sz = be32_to_cpup((__be32 *)p); 70 + p += 8; 71 + if (be32_to_cpu(initial_boot_params->version) < 0x10) 72 + p = _ALIGN(p, sz >= 8 ? 8 : 4); 73 + p += sz; 74 + p = _ALIGN(p, 4); 75 + continue; 76 + } 77 + if (tag != OF_DT_BEGIN_NODE) { 78 + pr_err("Invalid tag %x in flat device tree!\n", tag); 79 + return -EINVAL; 80 + } 81 + depth++; 82 + pathp = (char *)p; 83 + p = _ALIGN(p + strlen(pathp) + 1, 4); 84 + if ((*pathp) == '/') { 85 + char *lp, *np; 86 + for (lp = NULL, np = pathp; *np; np++) 87 + if ((*np) == '/') 88 + lp = np+1; 89 + if (lp != NULL) 90 + pathp = lp; 91 + } 92 + rc = it(p, pathp, depth, data); 93 + if (rc != 0) 94 + break; 95 + } while (1); 96 + 97 + return rc; 98 + } 99 + 100 + /** 101 + * of_get_flat_dt_root - find the root node in the flat blob 102 + */ 103 + unsigned long __init of_get_flat_dt_root(void) 104 + { 105 + unsigned long p = ((unsigned long)initial_boot_params) + 106 + be32_to_cpu(initial_boot_params->off_dt_struct); 107 + 108 + while (be32_to_cpup((__be32 *)p) == OF_DT_NOP) 109 + p += 4; 110 + BUG_ON(be32_to_cpup((__be32 *)p) != OF_DT_BEGIN_NODE); 111 + p += 4; 112 + return _ALIGN(p + strlen((char *)p) + 1, 4); 113 + } 114 + 115 + /** 116 + * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr 117 + * 118 + * This function can be used within scan_flattened_dt callback to get 119 + * access to properties 120 + */ 121 + void *__init of_get_flat_dt_prop(unsigned long node, const char *name, 122 + unsigned long *size) 123 + { 124 + unsigned long p = node; 125 + 126 + do { 127 + u32 tag = be32_to_cpup((__be32 *)p); 128 + u32 sz, noff; 129 + const char *nstr; 130 + 131 + p += 4; 132 + if (tag == OF_DT_NOP) 133 + continue; 134 + if (tag != OF_DT_PROP) 135 + return NULL; 136 + 137 + sz = be32_to_cpup((__be32 *)p); 138 + noff = be32_to_cpup((__be32 *)(p + 4)); 139 + p += 8; 140 + if (be32_to_cpu(initial_boot_params->version) < 0x10) 141 + p = _ALIGN(p, sz >= 8 ? 8 : 4); 142 + 143 + nstr = find_flat_dt_string(noff); 144 + if (nstr == NULL) { 145 + pr_warning("Can't find property index name !\n"); 146 + return NULL; 147 + } 148 + if (strcmp(name, nstr) == 0) { 149 + if (size) 150 + *size = sz; 151 + return (void *)p; 152 + } 153 + p += sz; 154 + p = _ALIGN(p, 4); 155 + } while (1); 156 + } 157 + 158 + /** 159 + * of_flat_dt_is_compatible - Return true if given node has compat in compatible list 160 + * @node: node to test 161 + * @compat: compatible string to compare with compatible list. 162 + */ 163 + int __init of_flat_dt_is_compatible(unsigned long node, const char *compat) 164 + { 165 + const char *cp; 166 + unsigned long cplen, l; 167 + 168 + cp = of_get_flat_dt_prop(node, "compatible", &cplen); 169 + if (cp == NULL) 170 + return 0; 171 + while (cplen > 0) { 172 + if (strncasecmp(cp, compat, strlen(compat)) == 0) 173 + return 1; 174 + l = strlen(cp) + 1; 175 + cp += l; 176 + cplen -= l; 177 + } 178 + 179 + return 0; 180 + } 181 + 182 + static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size, 183 + unsigned long align) 184 + { 185 + void *res; 186 + 187 + *mem = _ALIGN(*mem, align); 188 + res = (void *)*mem; 189 + *mem += size; 190 + 191 + return res; 192 + } 193 + 194 + /** 195 + * unflatten_dt_node - Alloc and populate a device_node from the flat tree 196 + * @p: pointer to node in flat tree 197 + * @dad: Parent struct device_node 198 + * @allnextpp: pointer to ->allnext from last allocated device_node 199 + * @fpsize: Size of the node path up at the current depth. 200 + */ 201 + unsigned long __init unflatten_dt_node(unsigned long mem, 202 + unsigned long *p, 203 + struct device_node *dad, 204 + struct device_node ***allnextpp, 205 + unsigned long fpsize) 206 + { 207 + struct device_node *np; 208 + struct property *pp, **prev_pp = NULL; 209 + char *pathp; 210 + u32 tag; 211 + unsigned int l, allocl; 212 + int has_name = 0; 213 + int new_format = 0; 214 + 215 + tag = be32_to_cpup((__be32 *)(*p)); 216 + if (tag != OF_DT_BEGIN_NODE) { 217 + pr_err("Weird tag at start of node: %x\n", tag); 218 + return mem; 219 + } 220 + *p += 4; 221 + pathp = (char *)*p; 222 + l = allocl = strlen(pathp) + 1; 223 + *p = _ALIGN(*p + l, 4); 224 + 225 + /* version 0x10 has a more compact unit name here instead of the full 226 + * path. we accumulate the full path size using "fpsize", we'll rebuild 227 + * it later. We detect this because the first character of the name is 228 + * not '/'. 229 + */ 230 + if ((*pathp) != '/') { 231 + new_format = 1; 232 + if (fpsize == 0) { 233 + /* root node: special case. fpsize accounts for path 234 + * plus terminating zero. root node only has '/', so 235 + * fpsize should be 2, but we want to avoid the first 236 + * level nodes to have two '/' so we use fpsize 1 here 237 + */ 238 + fpsize = 1; 239 + allocl = 2; 240 + } else { 241 + /* account for '/' and path size minus terminal 0 242 + * already in 'l' 243 + */ 244 + fpsize += l; 245 + allocl = fpsize; 246 + } 247 + } 248 + 249 + np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl, 250 + __alignof__(struct device_node)); 251 + if (allnextpp) { 252 + memset(np, 0, sizeof(*np)); 253 + np->full_name = ((char *)np) + sizeof(struct device_node); 254 + if (new_format) { 255 + char *fn = np->full_name; 256 + /* rebuild full path for new format */ 257 + if (dad && dad->parent) { 258 + strcpy(fn, dad->full_name); 259 + #ifdef DEBUG 260 + if ((strlen(fn) + l + 1) != allocl) { 261 + pr_debug("%s: p: %d, l: %d, a: %d\n", 262 + pathp, (int)strlen(fn), 263 + l, allocl); 264 + } 265 + #endif 266 + fn += strlen(fn); 267 + } 268 + *(fn++) = '/'; 269 + memcpy(fn, pathp, l); 270 + } else 271 + memcpy(np->full_name, pathp, l); 272 + prev_pp = &np->properties; 273 + **allnextpp = np; 274 + *allnextpp = &np->allnext; 275 + if (dad != NULL) { 276 + np->parent = dad; 277 + /* we temporarily use the next field as `last_child'*/ 278 + if (dad->next == NULL) 279 + dad->child = np; 280 + else 281 + dad->next->sibling = np; 282 + dad->next = np; 283 + } 284 + kref_init(&np->kref); 285 + } 286 + while (1) { 287 + u32 sz, noff; 288 + char *pname; 289 + 290 + tag = be32_to_cpup((__be32 *)(*p)); 291 + if (tag == OF_DT_NOP) { 292 + *p += 4; 293 + continue; 294 + } 295 + if (tag != OF_DT_PROP) 296 + break; 297 + *p += 4; 298 + sz = be32_to_cpup((__be32 *)(*p)); 299 + noff = be32_to_cpup((__be32 *)((*p) + 4)); 300 + *p += 8; 301 + if (be32_to_cpu(initial_boot_params->version) < 0x10) 302 + *p = _ALIGN(*p, sz >= 8 ? 8 : 4); 303 + 304 + pname = find_flat_dt_string(noff); 305 + if (pname == NULL) { 306 + pr_info("Can't find property name in list !\n"); 307 + break; 308 + } 309 + if (strcmp(pname, "name") == 0) 310 + has_name = 1; 311 + l = strlen(pname) + 1; 312 + pp = unflatten_dt_alloc(&mem, sizeof(struct property), 313 + __alignof__(struct property)); 314 + if (allnextpp) { 315 + /* We accept flattened tree phandles either in 316 + * ePAPR-style "phandle" properties, or the 317 + * legacy "linux,phandle" properties. If both 318 + * appear and have different values, things 319 + * will get weird. Don't do that. */ 320 + if ((strcmp(pname, "phandle") == 0) || 321 + (strcmp(pname, "linux,phandle") == 0)) { 322 + if (np->phandle == 0) 323 + np->phandle = *((u32 *)*p); 324 + } 325 + /* And we process the "ibm,phandle" property 326 + * used in pSeries dynamic device tree 327 + * stuff */ 328 + if (strcmp(pname, "ibm,phandle") == 0) 329 + np->phandle = *((u32 *)*p); 330 + pp->name = pname; 331 + pp->length = sz; 332 + pp->value = (void *)*p; 333 + *prev_pp = pp; 334 + prev_pp = &pp->next; 335 + } 336 + *p = _ALIGN((*p) + sz, 4); 337 + } 338 + /* with version 0x10 we may not have the name property, recreate 339 + * it here from the unit name if absent 340 + */ 341 + if (!has_name) { 342 + char *p1 = pathp, *ps = pathp, *pa = NULL; 343 + int sz; 344 + 345 + while (*p1) { 346 + if ((*p1) == '@') 347 + pa = p1; 348 + if ((*p1) == '/') 349 + ps = p1 + 1; 350 + p1++; 351 + } 352 + if (pa < ps) 353 + pa = p1; 354 + sz = (pa - ps) + 1; 355 + pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz, 356 + __alignof__(struct property)); 357 + if (allnextpp) { 358 + pp->name = "name"; 359 + pp->length = sz; 360 + pp->value = pp + 1; 361 + *prev_pp = pp; 362 + prev_pp = &pp->next; 363 + memcpy(pp->value, ps, sz - 1); 364 + ((char *)pp->value)[sz - 1] = 0; 365 + pr_debug("fixed up name for %s -> %s\n", pathp, 366 + (char *)pp->value); 367 + } 368 + } 369 + if (allnextpp) { 370 + *prev_pp = NULL; 371 + np->name = of_get_property(np, "name", NULL); 372 + np->type = of_get_property(np, "device_type", NULL); 373 + 374 + if (!np->name) 375 + np->name = "<NULL>"; 376 + if (!np->type) 377 + np->type = "<NULL>"; 378 + } 379 + while (tag == OF_DT_BEGIN_NODE) { 380 + mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize); 381 + tag = be32_to_cpup((__be32 *)(*p)); 382 + } 383 + if (tag != OF_DT_END_NODE) { 384 + pr_err("Weird tag at end of node: %x\n", tag); 385 + return mem; 386 + } 387 + *p += 4; 388 + return mem; 389 + } 390 + 391 + #ifdef CONFIG_BLK_DEV_INITRD 392 + /** 393 + * early_init_dt_check_for_initrd - Decode initrd location from flat tree 394 + * @node: reference to node containing initrd location ('chosen') 395 + */ 396 + void __init early_init_dt_check_for_initrd(unsigned long node) 397 + { 398 + unsigned long start, end, len; 399 + __be32 *prop; 400 + 401 + pr_debug("Looking for initrd properties... "); 402 + 403 + prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len); 404 + if (!prop) 405 + return; 406 + start = of_read_ulong(prop, len/4); 407 + 408 + prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len); 409 + if (!prop) 410 + return; 411 + end = of_read_ulong(prop, len/4); 412 + 413 + early_init_dt_setup_initrd_arch(start, end); 414 + pr_debug("initrd_start=0x%lx initrd_end=0x%lx\n", start, end); 415 + } 416 + #else 417 + inline void early_init_dt_check_for_initrd(unsigned long node) 418 + { 419 + } 420 + #endif /* CONFIG_BLK_DEV_INITRD */ 421 + 422 + /** 423 + * early_init_dt_scan_root - fetch the top level address and size cells 424 + */ 425 + int __init early_init_dt_scan_root(unsigned long node, const char *uname, 426 + int depth, void *data) 427 + { 428 + __be32 *prop; 429 + 430 + if (depth != 0) 431 + return 0; 432 + 433 + dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT; 434 + dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT; 435 + 436 + prop = of_get_flat_dt_prop(node, "#size-cells", NULL); 437 + if (prop) 438 + dt_root_size_cells = be32_to_cpup(prop); 439 + pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells); 440 + 441 + prop = of_get_flat_dt_prop(node, "#address-cells", NULL); 442 + if (prop) 443 + dt_root_addr_cells = be32_to_cpup(prop); 444 + pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells); 445 + 446 + /* break now */ 447 + return 1; 448 + } 449 + 450 + u64 __init dt_mem_next_cell(int s, __be32 **cellp) 451 + { 452 + __be32 *p = *cellp; 453 + 454 + *cellp = p + s; 455 + return of_read_number(p, s); 456 + } 457 + 458 + /** 459 + * early_init_dt_scan_memory - Look for an parse memory nodes 460 + */ 461 + int __init early_init_dt_scan_memory(unsigned long node, const char *uname, 462 + int depth, void *data) 463 + { 464 + char *type = of_get_flat_dt_prop(node, "device_type", NULL); 465 + __be32 *reg, *endp; 466 + unsigned long l; 467 + 468 + /* We are scanning "memory" nodes only */ 469 + if (type == NULL) { 470 + /* 471 + * The longtrail doesn't have a device_type on the 472 + * /memory node, so look for the node called /memory@0. 473 + */ 474 + if (depth != 1 || strcmp(uname, "memory@0") != 0) 475 + return 0; 476 + } else if (strcmp(type, "memory") != 0) 477 + return 0; 478 + 479 + reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l); 480 + if (reg == NULL) 481 + reg = of_get_flat_dt_prop(node, "reg", &l); 482 + if (reg == NULL) 483 + return 0; 484 + 485 + endp = reg + (l / sizeof(__be32)); 486 + 487 + pr_debug("memory scan node %s, reg size %ld, data: %x %x %x %x,\n", 488 + uname, l, reg[0], reg[1], reg[2], reg[3]); 489 + 490 + while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { 491 + u64 base, size; 492 + 493 + base = dt_mem_next_cell(dt_root_addr_cells, &reg); 494 + size = dt_mem_next_cell(dt_root_size_cells, &reg); 495 + 496 + if (size == 0) 497 + continue; 498 + pr_debug(" - %llx , %llx\n", (unsigned long long)base, 499 + (unsigned long long)size); 500 + 501 + early_init_dt_add_memory_arch(base, size); 502 + } 503 + 504 + return 0; 505 + } 506 + 507 + int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, 508 + int depth, void *data) 509 + { 510 + unsigned long l; 511 + char *p; 512 + 513 + pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname); 514 + 515 + if (depth != 1 || 516 + (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) 517 + return 0; 518 + 519 + early_init_dt_check_for_initrd(node); 520 + 521 + /* Retreive command line */ 522 + p = of_get_flat_dt_prop(node, "bootargs", &l); 523 + if (p != NULL && l > 0) 524 + strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); 525 + 526 + #ifdef CONFIG_CMDLINE 527 + #ifndef CONFIG_CMDLINE_FORCE 528 + if (p == NULL || l == 0 || (l == 1 && (*p) == 0)) 529 + #endif 530 + strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 531 + #endif /* CONFIG_CMDLINE */ 532 + 533 + early_init_dt_scan_chosen_arch(node); 534 + 535 + pr_debug("Command line is: %s\n", cmd_line); 536 + 537 + /* break now */ 538 + return 1; 539 + } 540 + 541 + /** 542 + * unflatten_device_tree - create tree of device_nodes from flat blob 543 + * 544 + * unflattens the device-tree passed by the firmware, creating the 545 + * tree of struct device_node. It also fills the "name" and "type" 546 + * pointers of the nodes so the normal device-tree walking functions 547 + * can be used. 548 + */ 549 + void __init unflatten_device_tree(void) 550 + { 551 + unsigned long start, mem, size; 552 + struct device_node **allnextp = &allnodes; 553 + 554 + pr_debug(" -> unflatten_device_tree()\n"); 555 + 556 + /* First pass, scan for size */ 557 + start = ((unsigned long)initial_boot_params) + 558 + be32_to_cpu(initial_boot_params->off_dt_struct); 559 + size = unflatten_dt_node(0, &start, NULL, NULL, 0); 560 + size = (size | 3) + 1; 561 + 562 + pr_debug(" size is %lx, allocating...\n", size); 563 + 564 + /* Allocate memory for the expanded device tree */ 565 + mem = early_init_dt_alloc_memory_arch(size + 4, 566 + __alignof__(struct device_node)); 567 + mem = (unsigned long) __va(mem); 568 + 569 + ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef); 570 + 571 + pr_debug(" unflattening %lx...\n", mem); 572 + 573 + /* Second pass, do actual unflattening */ 574 + start = ((unsigned long)initial_boot_params) + 575 + be32_to_cpu(initial_boot_params->off_dt_struct); 576 + unflatten_dt_node(mem, &start, NULL, &allnextp, 0); 577 + if (be32_to_cpup((__be32 *)start) != OF_DT_END) 578 + pr_warning("Weird tag at end of tree: %08x\n", *((u32 *)start)); 579 + if (be32_to_cpu(((__be32 *)mem)[size / 4]) != 0xdeadbeef) 580 + pr_warning("End of tree marker overwritten: %08x\n", 581 + be32_to_cpu(((__be32 *)mem)[size / 4])); 582 + *allnextp = NULL; 583 + 584 + /* Get pointer to OF "/chosen" node for use everywhere */ 585 + of_chosen = of_find_node_by_path("/chosen"); 586 + if (of_chosen == NULL) 587 + of_chosen = of_find_node_by_path("/chosen@0"); 588 + 589 + pr_debug(" <- unflatten_device_tree()\n"); 590 + }
+7 -6
drivers/of/gpio.c
··· 36 36 struct of_gpio_chip *of_gc = NULL; 37 37 int size; 38 38 const void *gpio_spec; 39 - const u32 *gpio_cells; 39 + const __be32 *gpio_cells; 40 40 41 41 ret = of_parse_phandles_with_args(np, "gpios", "#gpio-cells", index, 42 42 &gc, &gpio_spec); ··· 55 55 56 56 gpio_cells = of_get_property(gc, "#gpio-cells", &size); 57 57 if (!gpio_cells || size != sizeof(*gpio_cells) || 58 - *gpio_cells != of_gc->gpio_cells) { 58 + be32_to_cpup(gpio_cells) != of_gc->gpio_cells) { 59 59 pr_debug("%s: wrong #gpio-cells for %s\n", 60 60 np->full_name, gc->full_name); 61 61 ret = -EINVAL; ··· 127 127 int of_gpio_simple_xlate(struct of_gpio_chip *of_gc, struct device_node *np, 128 128 const void *gpio_spec, enum of_gpio_flags *flags) 129 129 { 130 - const u32 *gpio = gpio_spec; 130 + const __be32 *gpio = gpio_spec; 131 + const u32 n = be32_to_cpup(gpio); 131 132 132 133 /* 133 134 * We're discouraging gpio_cells < 2, since that way you'll have to ··· 141 140 return -EINVAL; 142 141 } 143 142 144 - if (*gpio > of_gc->gc.ngpio) 143 + if (n > of_gc->gc.ngpio) 145 144 return -EINVAL; 146 145 147 146 if (flags) 148 - *flags = gpio[1]; 147 + *flags = be32_to_cpu(gpio[1]); 149 148 150 - return *gpio; 149 + return n; 151 150 } 152 151 EXPORT_SYMBOL(of_gpio_simple_xlate); 153 152
+2 -2
drivers/of/of_i2c.c
··· 25 25 for_each_child_of_node(adap_node, node) { 26 26 struct i2c_board_info info = {}; 27 27 struct dev_archdata dev_ad = {}; 28 - const u32 *addr; 28 + const __be32 *addr; 29 29 int len; 30 30 31 31 if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) ··· 40 40 41 41 info.irq = irq_of_parse_and_map(node, 0); 42 42 43 - info.addr = *addr; 43 + info.addr = be32_to_cpup(addr); 44 44 45 45 dev_archdata_set_node(&dev_ad, node); 46 46 info.archdata = &dev_ad;
+4 -4
drivers/of/of_mdio.c
··· 51 51 52 52 /* Loop over the child nodes and register a phy_device for each one */ 53 53 for_each_child_of_node(np, child) { 54 - const u32 *addr; 54 + const __be32 *addr; 55 55 int len; 56 56 57 57 /* A PHY must have a reg property in the range [0-31] */ ··· 68 68 mdio->irq[*addr] = PHY_POLL; 69 69 } 70 70 71 - phy = get_phy_device(mdio, *addr); 71 + phy = get_phy_device(mdio, be32_to_cpup(addr)); 72 72 if (!phy) { 73 73 dev_err(&mdio->dev, "error probing PHY at address %i\n", 74 74 *addr); ··· 160 160 struct device_node *net_np; 161 161 char bus_id[MII_BUS_ID_SIZE + 3]; 162 162 struct phy_device *phy; 163 - const u32 *phy_id; 163 + const __be32 *phy_id; 164 164 int sz; 165 165 166 166 if (!dev->dev.parent) ··· 174 174 if (!phy_id || sz < sizeof(*phy_id)) 175 175 return NULL; 176 176 177 - sprintf(bus_id, PHY_ID_FMT, "0", phy_id[0]); 177 + sprintf(bus_id, PHY_ID_FMT, "0", be32_to_cpu(phy_id[0])); 178 178 179 179 phy = phy_connect(dev, bus_id, hndlr, 0, iface); 180 180 return IS_ERR(phy) ? NULL : phy;
+3 -3
drivers/of/of_spi.c
··· 23 23 { 24 24 struct spi_device *spi; 25 25 struct device_node *nc; 26 - const u32 *prop; 26 + const __be32 *prop; 27 27 int rc; 28 28 int len; 29 29 ··· 54 54 spi_dev_put(spi); 55 55 continue; 56 56 } 57 - spi->chip_select = *prop; 57 + spi->chip_select = be32_to_cpup(prop); 58 58 59 59 /* Mode (clock phase/polarity/etc.) */ 60 60 if (of_find_property(nc, "spi-cpha", NULL)) ··· 72 72 spi_dev_put(spi); 73 73 continue; 74 74 } 75 - spi->max_speed_hz = *prop; 75 + spi->max_speed_hz = be32_to_cpup(prop); 76 76 77 77 /* IRQ */ 78 78 spi->irq = irq_of_parse_and_map(nc, 0);
-6
drivers/pci/hotplug/acpiphp_glue.c
··· 720 720 -ret_val); 721 721 goto acpiphp_bus_add_out; 722 722 } 723 - /* 724 - * try to start anyway. We could have failed to add 725 - * simply because this bus had previously been added 726 - * on another add. Don't bother with the return value 727 - * we just keep going. 728 - */ 729 723 ret_val = acpi_bus_start(device); 730 724 731 725 acpiphp_bus_add_out:
+1 -1
drivers/platform/x86/acer-wmi.c
··· 934 934 acer_backlight_device = bd; 935 935 936 936 bd->props.power = FB_BLANK_UNBLANK; 937 - bd->props.brightness = max_brightness; 937 + bd->props.brightness = read_brightness(bd); 938 938 bd->props.max_brightness = max_brightness; 939 939 backlight_update_status(bd); 940 940 return 0;
+1 -1
drivers/platform/x86/thinkpad_acpi.c
··· 5771 5771 case TPACPI_THERMAL_ACPI_TMP07: 5772 5772 case TPACPI_THERMAL_ACPI_UPDT: 5773 5773 sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, 5774 - &thermal_temp_input16_group); 5774 + &thermal_temp_input8_group); 5775 5775 break; 5776 5776 case TPACPI_THERMAL_NONE: 5777 5777 default:
+5 -5
drivers/sbus/char/openprom.c
··· 233 233 234 234 ph = 0; 235 235 if (dp) 236 - ph = dp->node; 236 + ph = dp->phandle; 237 237 238 238 data->current_node = dp; 239 239 *((int *) op->oprom_array) = ph; ··· 256 256 257 257 dp = pci_device_to_OF_node(pdev); 258 258 data->current_node = dp; 259 - *((int *)op->oprom_array) = dp->node; 259 + *((int *)op->oprom_array) = dp->phandle; 260 260 op->oprom_size = sizeof(int); 261 261 err = copyout(argp, op, bufsize + sizeof(int)); 262 262 ··· 273 273 274 274 dp = of_find_node_by_path(op->oprom_array); 275 275 if (dp) 276 - ph = dp->node; 276 + ph = dp->phandle; 277 277 data->current_node = dp; 278 278 *((int *)op->oprom_array) = ph; 279 279 op->oprom_size = sizeof(int); ··· 540 540 } 541 541 } 542 542 if (dp) 543 - nd = dp->node; 543 + nd = dp->phandle; 544 544 if (copy_to_user(argp, &nd, sizeof(phandle))) 545 545 return -EFAULT; 546 546 ··· 570 570 case OPIOCGETOPTNODE: 571 571 BUILD_BUG_ON(sizeof(phandle) != sizeof(int)); 572 572 573 - if (copy_to_user(argp, &options_node->node, sizeof(phandle))) 573 + if (copy_to_user(argp, &options_node->phandle, sizeof(phandle))) 574 574 return -EFAULT; 575 575 576 576 return 0;
+1 -1
drivers/scsi/arm/fas216.c
··· 2516 2516 if (info->scsi.phase == PHASE_IDLE) 2517 2517 fas216_kick(info); 2518 2518 2519 - mod_timer(&info->eh_timer, 30 * HZ); 2519 + mod_timer(&info->eh_timer, jiffies + 30 * HZ); 2520 2520 spin_unlock_irqrestore(&info->host_lock, flags); 2521 2521 2522 2522 /*
+16 -2
drivers/scsi/fcoe/fcoe.c
··· 2009 2009 fcoe_interface_cleanup(fcoe); 2010 2010 rtnl_unlock(); 2011 2011 fcoe_if_destroy(fcoe->ctlr.lp); 2012 + module_put(THIS_MODULE); 2013 + 2012 2014 out_putdev: 2013 2015 dev_put(netdev); 2014 2016 out_nodev: ··· 2061 2059 } 2062 2060 #endif 2063 2061 2062 + if (!try_module_get(THIS_MODULE)) { 2063 + rc = -EINVAL; 2064 + goto out_nomod; 2065 + } 2066 + 2064 2067 rtnl_lock(); 2065 2068 netdev = fcoe_if_to_netdev(buffer); 2066 2069 if (!netdev) { ··· 2106 2099 if (!fcoe_link_ok(lport)) 2107 2100 fcoe_ctlr_link_up(&fcoe->ctlr); 2108 2101 2109 - rc = 0; 2110 - out_free: 2111 2102 /* 2112 2103 * Release from init in fcoe_interface_create(), on success lport 2113 2104 * should be holding a reference taken in fcoe_if_create(). 2114 2105 */ 2115 2106 fcoe_interface_put(fcoe); 2107 + dev_put(netdev); 2108 + rtnl_unlock(); 2109 + mutex_unlock(&fcoe_config_mutex); 2110 + 2111 + return 0; 2112 + out_free: 2113 + fcoe_interface_put(fcoe); 2116 2114 out_putdev: 2117 2115 dev_put(netdev); 2118 2116 out_nodev: 2119 2117 rtnl_unlock(); 2118 + module_put(THIS_MODULE); 2119 + out_nomod: 2120 2120 mutex_unlock(&fcoe_config_mutex); 2121 2121 return rc; 2122 2122 }
+1 -1
drivers/scsi/fcoe/libfcoe.c
··· 1187 1187 next_timer = fip->ctlr_ka_time; 1188 1188 1189 1189 if (time_after_eq(jiffies, fip->port_ka_time)) { 1190 - fip->port_ka_time += jiffies + 1190 + fip->port_ka_time = jiffies + 1191 1191 msecs_to_jiffies(FIP_VN_KA_PERIOD); 1192 1192 fip->send_port_ka = 1; 1193 1193 }
+1 -1
drivers/scsi/libfc/fc_exch.c
··· 1890 1890 fc_exch_setup_hdr(ep, fp, ep->f_ctl); 1891 1891 sp->cnt++; 1892 1892 1893 - if (ep->xid <= lport->lro_xid) 1893 + if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) 1894 1894 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); 1895 1895 1896 1896 if (unlikely(lport->tt.frame_send(lport, fp)))
-3
drivers/scsi/libfc/fc_fcp.c
··· 298 298 { 299 299 struct fc_lport *lport; 300 300 301 - if (!fsp) 302 - return; 303 - 304 301 lport = fsp->lp; 305 302 if ((fsp->req_flags & FC_SRB_READ) && 306 303 (lport->lro_enabled) && (lport->tt.ddp_setup)) {
+2 -1
drivers/scsi/libfc/fc_lport.c
··· 1800 1800 u32 did; 1801 1801 1802 1802 job->reply->reply_payload_rcv_len = 0; 1803 - rsp->resid_len = job->reply_payload.payload_len; 1803 + if (rsp) 1804 + rsp->resid_len = job->reply_payload.payload_len; 1804 1805 1805 1806 mutex_lock(&lport->lp_mutex); 1806 1807
+1 -1
drivers/scsi/libfc/fc_rport.c
··· 623 623 624 624 tov = ntohl(plp->fl_csp.sp_e_d_tov); 625 625 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR) 626 - tov /= 1000; 626 + tov /= 1000000; 627 627 if (tov > rdata->e_d_tov) 628 628 rdata->e_d_tov = tov; 629 629 csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
+3 -5
drivers/scsi/libiscsi_tcp.c
··· 992 992 if (r2t == NULL) { 993 993 if (kfifo_out(&tcp_task->r2tqueue, 994 994 (void *)&tcp_task->r2t, sizeof(void *)) != 995 - sizeof(void *)) { 996 - WARN_ONCE(1, "unexpected fifo state"); 995 + sizeof(void *)) 997 996 r2t = NULL; 998 - } 999 - 1000 - r2t = tcp_task->r2t; 997 + else 998 + r2t = tcp_task->r2t; 1001 999 } 1002 1000 spin_unlock_bh(&session->lock); 1003 1001 }
+16 -2
drivers/scsi/megaraid/megaraid_sas.c
··· 3781 3781 compat_alloc_user_space(sizeof(struct megasas_iocpacket)); 3782 3782 int i; 3783 3783 int error = 0; 3784 + compat_uptr_t ptr; 3784 3785 3785 3786 if (clear_user(ioc, sizeof(*ioc))) 3786 3787 return -EFAULT; ··· 3794 3793 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) 3795 3794 return -EFAULT; 3796 3795 3797 - for (i = 0; i < MAX_IOCTL_SGE; i++) { 3798 - compat_uptr_t ptr; 3796 + /* 3797 + * The sense_ptr is used in megasas_mgmt_fw_ioctl only when 3798 + * sense_len is not null, so prepare the 64bit value under 3799 + * the same condition. 3800 + */ 3801 + if (ioc->sense_len) { 3802 + void __user **sense_ioc_ptr = 3803 + (void __user **)(ioc->frame.raw + ioc->sense_off); 3804 + compat_uptr_t *sense_cioc_ptr = 3805 + (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off); 3806 + if (get_user(ptr, sense_cioc_ptr) || 3807 + put_user(compat_ptr(ptr), sense_ioc_ptr)) 3808 + return -EFAULT; 3809 + } 3799 3810 3811 + for (i = 0; i < MAX_IOCTL_SGE; i++) { 3800 3812 if (get_user(ptr, &cioc->sgl[i].iov_base) || 3801 3813 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || 3802 3814 copy_in_user(&ioc->sgl[i].iov_len,
+4 -3
drivers/serial/8250.c
··· 83 83 84 84 #define PASS_LIMIT 256 85 85 86 + #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) 87 + 88 + 86 89 /* 87 90 * We default to IRQ0 for the "no irq" hack. Some 88 91 * machine types want others as well - they're free ··· 1795 1792 up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; 1796 1793 spin_unlock_irqrestore(&up->port.lock, flags); 1797 1794 1798 - return lsr & UART_LSR_TEMT ? TIOCSER_TEMT : 0; 1795 + return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0; 1799 1796 } 1800 1797 1801 1798 static unsigned int serial8250_get_mctrl(struct uart_port *port) ··· 1852 1849 serial_out(up, UART_LCR, up->lcr); 1853 1850 spin_unlock_irqrestore(&up->port.lock, flags); 1854 1851 } 1855 - 1856 - #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) 1857 1852 1858 1853 /* 1859 1854 * Wait for transmitter & holding register to empty
+3 -3
drivers/serial/pmac_zilog.c
··· 2031 2031 /* 2032 2032 * XServe's default to 57600 bps 2033 2033 */ 2034 - if (machine_is_compatible("RackMac1,1") 2035 - || machine_is_compatible("RackMac1,2") 2036 - || machine_is_compatible("MacRISC4")) 2034 + if (of_machine_is_compatible("RackMac1,1") 2035 + || of_machine_is_compatible("RackMac1,2") 2036 + || of_machine_is_compatible("MacRISC4")) 2037 2037 baud = 57600; 2038 2038 2039 2039 /*
+22 -1
drivers/spi/Kconfig
··· 100 100 inexpensive battery powered microcontroller evaluation board. 101 101 This same cable can be used to flash new firmware. 102 102 103 + config SPI_COLDFIRE_QSPI 104 + tristate "Freescale Coldfire QSPI controller" 105 + depends on (M520x || M523x || M5249 || M527x || M528x || M532x) 106 + help 107 + This enables support for the Coldfire QSPI controller in master 108 + mode. 109 + 110 + This driver can also be built as a module. If so, the module 111 + will be called coldfire_qspi. 112 + 113 + config SPI_DAVINCI 114 + tristate "SPI controller driver for DaVinci/DA8xx SoC's" 115 + depends on SPI_MASTER && ARCH_DAVINCI 116 + select SPI_BITBANG 117 + help 118 + SPI master controller for DaVinci and DA8xx SPI modules. 119 + 103 120 config SPI_GPIO 104 121 tristate "GPIO-based bitbanging SPI Master" 105 122 depends on GENERIC_GPIO ··· 325 308 # 326 309 327 310 config SPI_DESIGNWARE 328 - bool "DesignWare SPI controller core support" 311 + tristate "DesignWare SPI controller core support" 329 312 depends on SPI_MASTER 330 313 help 331 314 general driver for SPI controller core from DesignWare ··· 333 316 config SPI_DW_PCI 334 317 tristate "PCI interface driver for DW SPI core" 335 318 depends on SPI_DESIGNWARE && PCI 319 + 320 + config SPI_DW_MMIO 321 + tristate "Memory-mapped io interface driver for DW SPI core" 322 + depends on SPI_DESIGNWARE && HAVE_CLK 336 323 337 324 # 338 325 # There are lots of SPI device types, with sensors and memory
+3
drivers/spi/Makefile
··· 16 16 obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o 17 17 obj-$(CONFIG_SPI_AU1550) += au1550_spi.o 18 18 obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o 19 + obj-$(CONFIG_SPI_COLDFIRE_QSPI) += coldfire_qspi.o 20 + obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o 19 21 obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o 20 22 obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o 23 + obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o 21 24 obj-$(CONFIG_SPI_GPIO) += spi_gpio.o 22 25 obj-$(CONFIG_SPI_IMX) += spi_imx.o 23 26 obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
+640
drivers/spi/coldfire_qspi.c
··· 1 + /* 2 + * Freescale/Motorola Coldfire Queued SPI driver 3 + * 4 + * Copyright 2010 Steven King <sfking@fdwdc.com> 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; either version 2 of the License, or 9 + * (at your option) any later version. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program; if not, write to the Free Software 18 + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA 19 + * 20 + */ 21 + 22 + #include <linux/kernel.h> 23 + #include <linux/module.h> 24 + #include <linux/interrupt.h> 25 + #include <linux/errno.h> 26 + #include <linux/platform_device.h> 27 + #include <linux/workqueue.h> 28 + #include <linux/delay.h> 29 + #include <linux/io.h> 30 + #include <linux/clk.h> 31 + #include <linux/err.h> 32 + #include <linux/spi/spi.h> 33 + 34 + #include <asm/coldfire.h> 35 + #include <asm/mcfqspi.h> 36 + 37 + #define DRIVER_NAME "mcfqspi" 38 + 39 + #define MCFQSPI_BUSCLK (MCF_BUSCLK / 2) 40 + 41 + #define MCFQSPI_QMR 0x00 42 + #define MCFQSPI_QMR_MSTR 0x8000 43 + #define MCFQSPI_QMR_CPOL 0x0200 44 + #define MCFQSPI_QMR_CPHA 0x0100 45 + #define MCFQSPI_QDLYR 0x04 46 + #define MCFQSPI_QDLYR_SPE 0x8000 47 + #define MCFQSPI_QWR 0x08 48 + #define MCFQSPI_QWR_HALT 0x8000 49 + #define MCFQSPI_QWR_WREN 0x4000 50 + #define MCFQSPI_QWR_CSIV 0x1000 51 + #define MCFQSPI_QIR 0x0C 52 + #define MCFQSPI_QIR_WCEFB 0x8000 53 + #define MCFQSPI_QIR_ABRTB 0x4000 54 + #define MCFQSPI_QIR_ABRTL 0x1000 55 + #define MCFQSPI_QIR_WCEFE 0x0800 56 + #define MCFQSPI_QIR_ABRTE 0x0400 57 + #define MCFQSPI_QIR_SPIFE 0x0100 58 + #define MCFQSPI_QIR_WCEF 0x0008 59 + #define MCFQSPI_QIR_ABRT 0x0004 60 + #define MCFQSPI_QIR_SPIF 0x0001 61 + #define MCFQSPI_QAR 0x010 62 + #define MCFQSPI_QAR_TXBUF 0x00 63 + #define MCFQSPI_QAR_RXBUF 0x10 64 + #define MCFQSPI_QAR_CMDBUF 0x20 65 + #define MCFQSPI_QDR 0x014 66 + #define MCFQSPI_QCR 0x014 67 + #define MCFQSPI_QCR_CONT 0x8000 68 + #define MCFQSPI_QCR_BITSE 0x4000 69 + #define MCFQSPI_QCR_DT 0x2000 70 + 71 + struct mcfqspi { 72 + void __iomem *iobase; 73 + int irq; 74 + struct clk *clk; 75 + struct mcfqspi_cs_control *cs_control; 76 + 77 + wait_queue_head_t waitq; 78 + 79 + struct work_struct work; 80 + struct workqueue_struct *workq; 81 + spinlock_t lock; 82 + struct list_head msgq; 83 + }; 84 + 85 + static void mcfqspi_wr_qmr(struct mcfqspi *mcfqspi, u16 val) 86 + { 87 + writew(val, mcfqspi->iobase + MCFQSPI_QMR); 88 + } 89 + 90 + static void mcfqspi_wr_qdlyr(struct mcfqspi *mcfqspi, u16 val) 91 + { 92 + writew(val, mcfqspi->iobase + MCFQSPI_QDLYR); 93 + } 94 + 95 + static u16 mcfqspi_rd_qdlyr(struct mcfqspi *mcfqspi) 96 + { 97 + return readw(mcfqspi->iobase + MCFQSPI_QDLYR); 98 + } 99 + 100 + static void mcfqspi_wr_qwr(struct mcfqspi *mcfqspi, u16 val) 101 + { 102 + writew(val, mcfqspi->iobase + MCFQSPI_QWR); 103 + } 104 + 105 + static void mcfqspi_wr_qir(struct mcfqspi *mcfqspi, u16 val) 106 + { 107 + writew(val, mcfqspi->iobase + MCFQSPI_QIR); 108 + } 109 + 110 + static void mcfqspi_wr_qar(struct mcfqspi *mcfqspi, u16 val) 111 + { 112 + writew(val, mcfqspi->iobase + MCFQSPI_QAR); 113 + } 114 + 115 + static void mcfqspi_wr_qdr(struct mcfqspi *mcfqspi, u16 val) 116 + { 117 + writew(val, mcfqspi->iobase + MCFQSPI_QDR); 118 + } 119 + 120 + static u16 mcfqspi_rd_qdr(struct mcfqspi *mcfqspi) 121 + { 122 + return readw(mcfqspi->iobase + MCFQSPI_QDR); 123 + } 124 + 125 + static void mcfqspi_cs_select(struct mcfqspi *mcfqspi, u8 chip_select, 126 + bool cs_high) 127 + { 128 + mcfqspi->cs_control->select(mcfqspi->cs_control, chip_select, cs_high); 129 + } 130 + 131 + static void mcfqspi_cs_deselect(struct mcfqspi *mcfqspi, u8 chip_select, 132 + bool cs_high) 133 + { 134 + mcfqspi->cs_control->deselect(mcfqspi->cs_control, chip_select, cs_high); 135 + } 136 + 137 + static int mcfqspi_cs_setup(struct mcfqspi *mcfqspi) 138 + { 139 + return (mcfqspi->cs_control && mcfqspi->cs_control->setup) ? 140 + mcfqspi->cs_control->setup(mcfqspi->cs_control) : 0; 141 + } 142 + 143 + static void mcfqspi_cs_teardown(struct mcfqspi *mcfqspi) 144 + { 145 + if (mcfqspi->cs_control && mcfqspi->cs_control->teardown) 146 + mcfqspi->cs_control->teardown(mcfqspi->cs_control); 147 + } 148 + 149 + static u8 mcfqspi_qmr_baud(u32 speed_hz) 150 + { 151 + return clamp((MCFQSPI_BUSCLK + speed_hz - 1) / speed_hz, 2u, 255u); 152 + } 153 + 154 + static bool mcfqspi_qdlyr_spe(struct mcfqspi *mcfqspi) 155 + { 156 + return mcfqspi_rd_qdlyr(mcfqspi) & MCFQSPI_QDLYR_SPE; 157 + } 158 + 159 + static irqreturn_t mcfqspi_irq_handler(int this_irq, void *dev_id) 160 + { 161 + struct mcfqspi *mcfqspi = dev_id; 162 + 163 + /* clear interrupt */ 164 + mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE | MCFQSPI_QIR_SPIF); 165 + wake_up(&mcfqspi->waitq); 166 + 167 + return IRQ_HANDLED; 168 + } 169 + 170 + static void mcfqspi_transfer_msg8(struct mcfqspi *mcfqspi, unsigned count, 171 + const u8 *txbuf, u8 *rxbuf) 172 + { 173 + unsigned i, n, offset = 0; 174 + 175 + n = min(count, 16u); 176 + 177 + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF); 178 + for (i = 0; i < n; ++i) 179 + mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE); 180 + 181 + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF); 182 + if (txbuf) 183 + for (i = 0; i < n; ++i) 184 + mcfqspi_wr_qdr(mcfqspi, *txbuf++); 185 + else 186 + for (i = 0; i < count; ++i) 187 + mcfqspi_wr_qdr(mcfqspi, 0); 188 + 189 + count -= n; 190 + if (count) { 191 + u16 qwr = 0xf08; 192 + mcfqspi_wr_qwr(mcfqspi, 0x700); 193 + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); 194 + 195 + do { 196 + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); 197 + mcfqspi_wr_qwr(mcfqspi, qwr); 198 + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); 199 + if (rxbuf) { 200 + mcfqspi_wr_qar(mcfqspi, 201 + MCFQSPI_QAR_RXBUF + offset); 202 + for (i = 0; i < 8; ++i) 203 + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); 204 + } 205 + n = min(count, 8u); 206 + if (txbuf) { 207 + mcfqspi_wr_qar(mcfqspi, 208 + MCFQSPI_QAR_TXBUF + offset); 209 + for (i = 0; i < n; ++i) 210 + mcfqspi_wr_qdr(mcfqspi, *txbuf++); 211 + } 212 + qwr = (offset ? 0x808 : 0) + ((n - 1) << 8); 213 + offset ^= 8; 214 + count -= n; 215 + } while (count); 216 + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); 217 + mcfqspi_wr_qwr(mcfqspi, qwr); 218 + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); 219 + if (rxbuf) { 220 + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); 221 + for (i = 0; i < 8; ++i) 222 + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); 223 + offset ^= 8; 224 + } 225 + } else { 226 + mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8); 227 + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); 228 + } 229 + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); 230 + if (rxbuf) { 231 + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); 232 + for (i = 0; i < n; ++i) 233 + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); 234 + } 235 + } 236 + 237 + static void mcfqspi_transfer_msg16(struct mcfqspi *mcfqspi, unsigned count, 238 + const u16 *txbuf, u16 *rxbuf) 239 + { 240 + unsigned i, n, offset = 0; 241 + 242 + n = min(count, 16u); 243 + 244 + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF); 245 + for (i = 0; i < n; ++i) 246 + mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE); 247 + 248 + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF); 249 + if (txbuf) 250 + for (i = 0; i < n; ++i) 251 + mcfqspi_wr_qdr(mcfqspi, *txbuf++); 252 + else 253 + for (i = 0; i < count; ++i) 254 + mcfqspi_wr_qdr(mcfqspi, 0); 255 + 256 + count -= n; 257 + if (count) { 258 + u16 qwr = 0xf08; 259 + mcfqspi_wr_qwr(mcfqspi, 0x700); 260 + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); 261 + 262 + do { 263 + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); 264 + mcfqspi_wr_qwr(mcfqspi, qwr); 265 + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); 266 + if (rxbuf) { 267 + mcfqspi_wr_qar(mcfqspi, 268 + MCFQSPI_QAR_RXBUF + offset); 269 + for (i = 0; i < 8; ++i) 270 + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); 271 + } 272 + n = min(count, 8u); 273 + if (txbuf) { 274 + mcfqspi_wr_qar(mcfqspi, 275 + MCFQSPI_QAR_TXBUF + offset); 276 + for (i = 0; i < n; ++i) 277 + mcfqspi_wr_qdr(mcfqspi, *txbuf++); 278 + } 279 + qwr = (offset ? 0x808 : 0x000) + ((n - 1) << 8); 280 + offset ^= 8; 281 + count -= n; 282 + } while (count); 283 + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); 284 + mcfqspi_wr_qwr(mcfqspi, qwr); 285 + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); 286 + if (rxbuf) { 287 + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); 288 + for (i = 0; i < 8; ++i) 289 + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); 290 + offset ^= 8; 291 + } 292 + } else { 293 + mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8); 294 + mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); 295 + } 296 + wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); 297 + if (rxbuf) { 298 + mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); 299 + for (i = 0; i < n; ++i) 300 + *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); 301 + } 302 + } 303 + 304 + static void mcfqspi_work(struct work_struct *work) 305 + { 306 + struct mcfqspi *mcfqspi = container_of(work, struct mcfqspi, work); 307 + unsigned long flags; 308 + 309 + spin_lock_irqsave(&mcfqspi->lock, flags); 310 + while (!list_empty(&mcfqspi->msgq)) { 311 + struct spi_message *msg; 312 + struct spi_device *spi; 313 + struct spi_transfer *xfer; 314 + int status = 0; 315 + 316 + msg = container_of(mcfqspi->msgq.next, struct spi_message, 317 + queue); 318 + 319 + list_del_init(&mcfqspi->msgq); 320 + spin_unlock_irqrestore(&mcfqspi->lock, flags); 321 + 322 + spi = msg->spi; 323 + 324 + list_for_each_entry(xfer, &msg->transfers, transfer_list) { 325 + bool cs_high = spi->mode & SPI_CS_HIGH; 326 + u16 qmr = MCFQSPI_QMR_MSTR; 327 + 328 + if (xfer->bits_per_word) 329 + qmr |= xfer->bits_per_word << 10; 330 + else 331 + qmr |= spi->bits_per_word << 10; 332 + if (spi->mode & SPI_CPHA) 333 + qmr |= MCFQSPI_QMR_CPHA; 334 + if (spi->mode & SPI_CPOL) 335 + qmr |= MCFQSPI_QMR_CPOL; 336 + if (xfer->speed_hz) 337 + qmr |= mcfqspi_qmr_baud(xfer->speed_hz); 338 + else 339 + qmr |= mcfqspi_qmr_baud(spi->max_speed_hz); 340 + mcfqspi_wr_qmr(mcfqspi, qmr); 341 + 342 + mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high); 343 + 344 + mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE); 345 + if ((xfer->bits_per_word ? xfer->bits_per_word : 346 + spi->bits_per_word) == 8) 347 + mcfqspi_transfer_msg8(mcfqspi, xfer->len, 348 + xfer->tx_buf, 349 + xfer->rx_buf); 350 + else 351 + mcfqspi_transfer_msg16(mcfqspi, xfer->len / 2, 352 + xfer->tx_buf, 353 + xfer->rx_buf); 354 + mcfqspi_wr_qir(mcfqspi, 0); 355 + 356 + if (xfer->delay_usecs) 357 + udelay(xfer->delay_usecs); 358 + if (xfer->cs_change) { 359 + if (!list_is_last(&xfer->transfer_list, 360 + &msg->transfers)) 361 + mcfqspi_cs_deselect(mcfqspi, 362 + spi->chip_select, 363 + cs_high); 364 + } else { 365 + if (list_is_last(&xfer->transfer_list, 366 + &msg->transfers)) 367 + mcfqspi_cs_deselect(mcfqspi, 368 + spi->chip_select, 369 + cs_high); 370 + } 371 + msg->actual_length += xfer->len; 372 + } 373 + msg->status = status; 374 + msg->complete(msg->context); 375 + 376 + spin_lock_irqsave(&mcfqspi->lock, flags); 377 + } 378 + spin_unlock_irqrestore(&mcfqspi->lock, flags); 379 + } 380 + 381 + static int mcfqspi_transfer(struct spi_device *spi, struct spi_message *msg) 382 + { 383 + struct mcfqspi *mcfqspi; 384 + struct spi_transfer *xfer; 385 + unsigned long flags; 386 + 387 + mcfqspi = spi_master_get_devdata(spi->master); 388 + 389 + list_for_each_entry(xfer, &msg->transfers, transfer_list) { 390 + if (xfer->bits_per_word && ((xfer->bits_per_word < 8) 391 + || (xfer->bits_per_word > 16))) { 392 + dev_dbg(&spi->dev, 393 + "%d bits per word is not supported\n", 394 + xfer->bits_per_word); 395 + goto fail; 396 + } 397 + if (xfer->speed_hz) { 398 + u32 real_speed = MCFQSPI_BUSCLK / 399 + mcfqspi_qmr_baud(xfer->speed_hz); 400 + if (real_speed != xfer->speed_hz) 401 + dev_dbg(&spi->dev, 402 + "using speed %d instead of %d\n", 403 + real_speed, xfer->speed_hz); 404 + } 405 + } 406 + msg->status = -EINPROGRESS; 407 + msg->actual_length = 0; 408 + 409 + spin_lock_irqsave(&mcfqspi->lock, flags); 410 + list_add_tail(&msg->queue, &mcfqspi->msgq); 411 + queue_work(mcfqspi->workq, &mcfqspi->work); 412 + spin_unlock_irqrestore(&mcfqspi->lock, flags); 413 + 414 + return 0; 415 + fail: 416 + msg->status = -EINVAL; 417 + return -EINVAL; 418 + } 419 + 420 + static int mcfqspi_setup(struct spi_device *spi) 421 + { 422 + if ((spi->bits_per_word < 8) || (spi->bits_per_word > 16)) { 423 + dev_dbg(&spi->dev, "%d bits per word is not supported\n", 424 + spi->bits_per_word); 425 + return -EINVAL; 426 + } 427 + if (spi->chip_select >= spi->master->num_chipselect) { 428 + dev_dbg(&spi->dev, "%d chip select is out of range\n", 429 + spi->chip_select); 430 + return -EINVAL; 431 + } 432 + 433 + mcfqspi_cs_deselect(spi_master_get_devdata(spi->master), 434 + spi->chip_select, spi->mode & SPI_CS_HIGH); 435 + 436 + dev_dbg(&spi->dev, 437 + "bits per word %d, chip select %d, speed %d KHz\n", 438 + spi->bits_per_word, spi->chip_select, 439 + (MCFQSPI_BUSCLK / mcfqspi_qmr_baud(spi->max_speed_hz)) 440 + / 1000); 441 + 442 + return 0; 443 + } 444 + 445 + static int __devinit mcfqspi_probe(struct platform_device *pdev) 446 + { 447 + struct spi_master *master; 448 + struct mcfqspi *mcfqspi; 449 + struct resource *res; 450 + struct mcfqspi_platform_data *pdata; 451 + int status; 452 + 453 + master = spi_alloc_master(&pdev->dev, sizeof(*mcfqspi)); 454 + if (master == NULL) { 455 + dev_dbg(&pdev->dev, "spi_alloc_master failed\n"); 456 + return -ENOMEM; 457 + } 458 + 459 + mcfqspi = spi_master_get_devdata(master); 460 + 461 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 462 + if (!res) { 463 + dev_dbg(&pdev->dev, "platform_get_resource failed\n"); 464 + status = -ENXIO; 465 + goto fail0; 466 + } 467 + 468 + if (!request_mem_region(res->start, resource_size(res), pdev->name)) { 469 + dev_dbg(&pdev->dev, "request_mem_region failed\n"); 470 + status = -EBUSY; 471 + goto fail0; 472 + } 473 + 474 + mcfqspi->iobase = ioremap(res->start, resource_size(res)); 475 + if (!mcfqspi->iobase) { 476 + dev_dbg(&pdev->dev, "ioremap failed\n"); 477 + status = -ENOMEM; 478 + goto fail1; 479 + } 480 + 481 + mcfqspi->irq = platform_get_irq(pdev, 0); 482 + if (mcfqspi->irq < 0) { 483 + dev_dbg(&pdev->dev, "platform_get_irq failed\n"); 484 + status = -ENXIO; 485 + goto fail2; 486 + } 487 + 488 + status = request_irq(mcfqspi->irq, mcfqspi_irq_handler, IRQF_DISABLED, 489 + pdev->name, mcfqspi); 490 + if (status) { 491 + dev_dbg(&pdev->dev, "request_irq failed\n"); 492 + goto fail2; 493 + } 494 + 495 + mcfqspi->clk = clk_get(&pdev->dev, "qspi_clk"); 496 + if (IS_ERR(mcfqspi->clk)) { 497 + dev_dbg(&pdev->dev, "clk_get failed\n"); 498 + status = PTR_ERR(mcfqspi->clk); 499 + goto fail3; 500 + } 501 + clk_enable(mcfqspi->clk); 502 + 503 + mcfqspi->workq = create_singlethread_workqueue(dev_name(master->dev.parent)); 504 + if (!mcfqspi->workq) { 505 + dev_dbg(&pdev->dev, "create_workqueue failed\n"); 506 + status = -ENOMEM; 507 + goto fail4; 508 + } 509 + INIT_WORK(&mcfqspi->work, mcfqspi_work); 510 + spin_lock_init(&mcfqspi->lock); 511 + INIT_LIST_HEAD(&mcfqspi->msgq); 512 + init_waitqueue_head(&mcfqspi->waitq); 513 + 514 + pdata = pdev->dev.platform_data; 515 + if (!pdata) { 516 + dev_dbg(&pdev->dev, "platform data is missing\n"); 517 + goto fail5; 518 + } 519 + master->bus_num = pdata->bus_num; 520 + master->num_chipselect = pdata->num_chipselect; 521 + 522 + mcfqspi->cs_control = pdata->cs_control; 523 + status = mcfqspi_cs_setup(mcfqspi); 524 + if (status) { 525 + dev_dbg(&pdev->dev, "error initializing cs_control\n"); 526 + goto fail5; 527 + } 528 + 529 + master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA; 530 + master->setup = mcfqspi_setup; 531 + master->transfer = mcfqspi_transfer; 532 + 533 + platform_set_drvdata(pdev, master); 534 + 535 + status = spi_register_master(master); 536 + if (status) { 537 + dev_dbg(&pdev->dev, "spi_register_master failed\n"); 538 + goto fail6; 539 + } 540 + dev_info(&pdev->dev, "Coldfire QSPI bus driver\n"); 541 + 542 + return 0; 543 + 544 + fail6: 545 + mcfqspi_cs_teardown(mcfqspi); 546 + fail5: 547 + destroy_workqueue(mcfqspi->workq); 548 + fail4: 549 + clk_disable(mcfqspi->clk); 550 + clk_put(mcfqspi->clk); 551 + fail3: 552 + free_irq(mcfqspi->irq, mcfqspi); 553 + fail2: 554 + iounmap(mcfqspi->iobase); 555 + fail1: 556 + release_mem_region(res->start, resource_size(res)); 557 + fail0: 558 + spi_master_put(master); 559 + 560 + dev_dbg(&pdev->dev, "Coldfire QSPI probe failed\n"); 561 + 562 + return status; 563 + } 564 + 565 + static int __devexit mcfqspi_remove(struct platform_device *pdev) 566 + { 567 + struct spi_master *master = platform_get_drvdata(pdev); 568 + struct mcfqspi *mcfqspi = spi_master_get_devdata(master); 569 + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 570 + 571 + /* disable the hardware (set the baud rate to 0) */ 572 + mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR); 573 + 574 + platform_set_drvdata(pdev, NULL); 575 + mcfqspi_cs_teardown(mcfqspi); 576 + destroy_workqueue(mcfqspi->workq); 577 + clk_disable(mcfqspi->clk); 578 + clk_put(mcfqspi->clk); 579 + free_irq(mcfqspi->irq, mcfqspi); 580 + iounmap(mcfqspi->iobase); 581 + release_mem_region(res->start, resource_size(res)); 582 + spi_unregister_master(master); 583 + spi_master_put(master); 584 + 585 + return 0; 586 + } 587 + 588 + #ifdef CONFIG_PM 589 + 590 + static int mcfqspi_suspend(struct device *dev) 591 + { 592 + struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev)); 593 + 594 + clk_disable(mcfqspi->clk); 595 + 596 + return 0; 597 + } 598 + 599 + static int mcfqspi_resume(struct device *dev) 600 + { 601 + struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev)); 602 + 603 + clk_enable(mcfqspi->clk); 604 + 605 + return 0; 606 + } 607 + 608 + static struct dev_pm_ops mcfqspi_dev_pm_ops = { 609 + .suspend = mcfqspi_suspend, 610 + .resume = mcfqspi_resume, 611 + }; 612 + 613 + #define MCFQSPI_DEV_PM_OPS (&mcfqspi_dev_pm_ops) 614 + #else 615 + #define MCFQSPI_DEV_PM_OPS NULL 616 + #endif 617 + 618 + static struct platform_driver mcfqspi_driver = { 619 + .driver.name = DRIVER_NAME, 620 + .driver.owner = THIS_MODULE, 621 + .driver.pm = MCFQSPI_DEV_PM_OPS, 622 + .remove = __devexit_p(mcfqspi_remove), 623 + }; 624 + 625 + static int __init mcfqspi_init(void) 626 + { 627 + return platform_driver_probe(&mcfqspi_driver, mcfqspi_probe); 628 + } 629 + module_init(mcfqspi_init); 630 + 631 + static void __exit mcfqspi_exit(void) 632 + { 633 + platform_driver_unregister(&mcfqspi_driver); 634 + } 635 + module_exit(mcfqspi_exit); 636 + 637 + MODULE_AUTHOR("Steven King <sfking@fdwdc.com>"); 638 + MODULE_DESCRIPTION("Coldfire QSPI Controller Driver"); 639 + MODULE_LICENSE("GPL"); 640 + MODULE_ALIAS("platform:" DRIVER_NAME);
+1255
drivers/spi/davinci_spi.c
··· 1 + /* 2 + * Copyright (C) 2009 Texas Instruments. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; either version 2 of the License, or 7 + * (at your option) any later version. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, write to the Free Software 16 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 + */ 18 + 19 + #include <linux/interrupt.h> 20 + #include <linux/io.h> 21 + #include <linux/gpio.h> 22 + #include <linux/module.h> 23 + #include <linux/delay.h> 24 + #include <linux/platform_device.h> 25 + #include <linux/err.h> 26 + #include <linux/clk.h> 27 + #include <linux/dma-mapping.h> 28 + #include <linux/spi/spi.h> 29 + #include <linux/spi/spi_bitbang.h> 30 + 31 + #include <mach/spi.h> 32 + #include <mach/edma.h> 33 + 34 + #define SPI_NO_RESOURCE ((resource_size_t)-1) 35 + 36 + #define SPI_MAX_CHIPSELECT 2 37 + 38 + #define CS_DEFAULT 0xFF 39 + 40 + #define SPI_BUFSIZ (SMP_CACHE_BYTES + 1) 41 + #define DAVINCI_DMA_DATA_TYPE_S8 0x01 42 + #define DAVINCI_DMA_DATA_TYPE_S16 0x02 43 + #define DAVINCI_DMA_DATA_TYPE_S32 0x04 44 + 45 + #define SPIFMT_PHASE_MASK BIT(16) 46 + #define SPIFMT_POLARITY_MASK BIT(17) 47 + #define SPIFMT_DISTIMER_MASK BIT(18) 48 + #define SPIFMT_SHIFTDIR_MASK BIT(20) 49 + #define SPIFMT_WAITENA_MASK BIT(21) 50 + #define SPIFMT_PARITYENA_MASK BIT(22) 51 + #define SPIFMT_ODD_PARITY_MASK BIT(23) 52 + #define SPIFMT_WDELAY_MASK 0x3f000000u 53 + #define SPIFMT_WDELAY_SHIFT 24 54 + #define SPIFMT_CHARLEN_MASK 0x0000001Fu 55 + 56 + /* SPIGCR1 */ 57 + #define SPIGCR1_SPIENA_MASK 0x01000000u 58 + 59 + /* SPIPC0 */ 60 + #define SPIPC0_DIFUN_MASK BIT(11) /* MISO */ 61 + #define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */ 62 + #define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */ 63 + #define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */ 64 + #define SPIPC0_EN1FUN_MASK BIT(1) 65 + #define SPIPC0_EN0FUN_MASK BIT(0) 66 + 67 + #define SPIINT_MASKALL 0x0101035F 68 + #define SPI_INTLVL_1 0x000001FFu 69 + #define SPI_INTLVL_0 0x00000000u 70 + 71 + /* SPIDAT1 */ 72 + #define SPIDAT1_CSHOLD_SHIFT 28 73 + #define SPIDAT1_CSNR_SHIFT 16 74 + #define SPIGCR1_CLKMOD_MASK BIT(1) 75 + #define SPIGCR1_MASTER_MASK BIT(0) 76 + #define SPIGCR1_LOOPBACK_MASK BIT(16) 77 + 78 + /* SPIBUF */ 79 + #define SPIBUF_TXFULL_MASK BIT(29) 80 + #define SPIBUF_RXEMPTY_MASK BIT(31) 81 + 82 + /* Error Masks */ 83 + #define SPIFLG_DLEN_ERR_MASK BIT(0) 84 + #define SPIFLG_TIMEOUT_MASK BIT(1) 85 + #define SPIFLG_PARERR_MASK BIT(2) 86 + #define SPIFLG_DESYNC_MASK BIT(3) 87 + #define SPIFLG_BITERR_MASK BIT(4) 88 + #define SPIFLG_OVRRUN_MASK BIT(6) 89 + #define SPIFLG_RX_INTR_MASK BIT(8) 90 + #define SPIFLG_TX_INTR_MASK BIT(9) 91 + #define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24) 92 + #define SPIFLG_MASK (SPIFLG_DLEN_ERR_MASK \ 93 + | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \ 94 + | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \ 95 + | SPIFLG_OVRRUN_MASK | SPIFLG_RX_INTR_MASK \ 96 + | SPIFLG_TX_INTR_MASK \ 97 + | SPIFLG_BUF_INIT_ACTIVE_MASK) 98 + 99 + #define SPIINT_DLEN_ERR_INTR BIT(0) 100 + #define SPIINT_TIMEOUT_INTR BIT(1) 101 + #define SPIINT_PARERR_INTR BIT(2) 102 + #define SPIINT_DESYNC_INTR BIT(3) 103 + #define SPIINT_BITERR_INTR BIT(4) 104 + #define SPIINT_OVRRUN_INTR BIT(6) 105 + #define SPIINT_RX_INTR BIT(8) 106 + #define SPIINT_TX_INTR BIT(9) 107 + #define SPIINT_DMA_REQ_EN BIT(16) 108 + #define SPIINT_ENABLE_HIGHZ BIT(24) 109 + 110 + #define SPI_T2CDELAY_SHIFT 16 111 + #define SPI_C2TDELAY_SHIFT 24 112 + 113 + /* SPI Controller registers */ 114 + #define SPIGCR0 0x00 115 + #define SPIGCR1 0x04 116 + #define SPIINT 0x08 117 + #define SPILVL 0x0c 118 + #define SPIFLG 0x10 119 + #define SPIPC0 0x14 120 + #define SPIPC1 0x18 121 + #define SPIPC2 0x1c 122 + #define SPIPC3 0x20 123 + #define SPIPC4 0x24 124 + #define SPIPC5 0x28 125 + #define SPIPC6 0x2c 126 + #define SPIPC7 0x30 127 + #define SPIPC8 0x34 128 + #define SPIDAT0 0x38 129 + #define SPIDAT1 0x3c 130 + #define SPIBUF 0x40 131 + #define SPIEMU 0x44 132 + #define SPIDELAY 0x48 133 + #define SPIDEF 0x4c 134 + #define SPIFMT0 0x50 135 + #define SPIFMT1 0x54 136 + #define SPIFMT2 0x58 137 + #define SPIFMT3 0x5c 138 + #define TGINTVEC0 0x60 139 + #define TGINTVEC1 0x64 140 + 141 + struct davinci_spi_slave { 142 + u32 cmd_to_write; 143 + u32 clk_ctrl_to_write; 144 + u32 bytes_per_word; 145 + u8 active_cs; 146 + }; 147 + 148 + /* We have 2 DMA channels per CS, one for RX and one for TX */ 149 + struct davinci_spi_dma { 150 + int dma_tx_channel; 151 + int dma_rx_channel; 152 + int dma_tx_sync_dev; 153 + int dma_rx_sync_dev; 154 + enum dma_event_q eventq; 155 + 156 + struct completion dma_tx_completion; 157 + struct completion dma_rx_completion; 158 + }; 159 + 160 + /* SPI Controller driver's private data. */ 161 + struct davinci_spi { 162 + struct spi_bitbang bitbang; 163 + struct clk *clk; 164 + 165 + u8 version; 166 + resource_size_t pbase; 167 + void __iomem *base; 168 + size_t region_size; 169 + u32 irq; 170 + struct completion done; 171 + 172 + const void *tx; 173 + void *rx; 174 + u8 *tmp_buf; 175 + int count; 176 + struct davinci_spi_dma *dma_channels; 177 + struct davinci_spi_platform_data *pdata; 178 + 179 + void (*get_rx)(u32 rx_data, struct davinci_spi *); 180 + u32 (*get_tx)(struct davinci_spi *); 181 + 182 + struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT]; 183 + }; 184 + 185 + static unsigned use_dma; 186 + 187 + static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi) 188 + { 189 + u8 *rx = davinci_spi->rx; 190 + 191 + *rx++ = (u8)data; 192 + davinci_spi->rx = rx; 193 + } 194 + 195 + static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi) 196 + { 197 + u16 *rx = davinci_spi->rx; 198 + 199 + *rx++ = (u16)data; 200 + davinci_spi->rx = rx; 201 + } 202 + 203 + static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi) 204 + { 205 + u32 data; 206 + const u8 *tx = davinci_spi->tx; 207 + 208 + data = *tx++; 209 + davinci_spi->tx = tx; 210 + return data; 211 + } 212 + 213 + static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi) 214 + { 215 + u32 data; 216 + const u16 *tx = davinci_spi->tx; 217 + 218 + data = *tx++; 219 + davinci_spi->tx = tx; 220 + return data; 221 + } 222 + 223 + static inline void set_io_bits(void __iomem *addr, u32 bits) 224 + { 225 + u32 v = ioread32(addr); 226 + 227 + v |= bits; 228 + iowrite32(v, addr); 229 + } 230 + 231 + static inline void clear_io_bits(void __iomem *addr, u32 bits) 232 + { 233 + u32 v = ioread32(addr); 234 + 235 + v &= ~bits; 236 + iowrite32(v, addr); 237 + } 238 + 239 + static inline void set_fmt_bits(void __iomem *addr, u32 bits, int cs_num) 240 + { 241 + set_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits); 242 + } 243 + 244 + static inline void clear_fmt_bits(void __iomem *addr, u32 bits, int cs_num) 245 + { 246 + clear_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits); 247 + } 248 + 249 + static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable) 250 + { 251 + struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master); 252 + 253 + if (enable) 254 + set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN); 255 + else 256 + clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN); 257 + } 258 + 259 + /* 260 + * Interface to control the chip select signal 261 + */ 262 + static void davinci_spi_chipselect(struct spi_device *spi, int value) 263 + { 264 + struct davinci_spi *davinci_spi; 265 + struct davinci_spi_platform_data *pdata; 266 + u32 data1_reg_val = 0; 267 + 268 + davinci_spi = spi_master_get_devdata(spi->master); 269 + pdata = davinci_spi->pdata; 270 + 271 + /* 272 + * Board specific chip select logic decides the polarity and cs 273 + * line for the controller 274 + */ 275 + if (value == BITBANG_CS_INACTIVE) { 276 + set_io_bits(davinci_spi->base + SPIDEF, CS_DEFAULT); 277 + 278 + data1_reg_val |= CS_DEFAULT << SPIDAT1_CSNR_SHIFT; 279 + iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); 280 + 281 + while ((ioread32(davinci_spi->base + SPIBUF) 282 + & SPIBUF_RXEMPTY_MASK) == 0) 283 + cpu_relax(); 284 + } 285 + } 286 + 287 + /** 288 + * davinci_spi_setup_transfer - This functions will determine transfer method 289 + * @spi: spi device on which data transfer to be done 290 + * @t: spi transfer in which transfer info is filled 291 + * 292 + * This function determines data transfer method (8/16/32 bit transfer). 293 + * It will also set the SPI Clock Control register according to 294 + * SPI slave device freq. 295 + */ 296 + static int davinci_spi_setup_transfer(struct spi_device *spi, 297 + struct spi_transfer *t) 298 + { 299 + 300 + struct davinci_spi *davinci_spi; 301 + struct davinci_spi_platform_data *pdata; 302 + u8 bits_per_word = 0; 303 + u32 hz = 0, prescale; 304 + 305 + davinci_spi = spi_master_get_devdata(spi->master); 306 + pdata = davinci_spi->pdata; 307 + 308 + if (t) { 309 + bits_per_word = t->bits_per_word; 310 + hz = t->speed_hz; 311 + } 312 + 313 + /* if bits_per_word is not set then set it default */ 314 + if (!bits_per_word) 315 + bits_per_word = spi->bits_per_word; 316 + 317 + /* 318 + * Assign function pointer to appropriate transfer method 319 + * 8bit, 16bit or 32bit transfer 320 + */ 321 + if (bits_per_word <= 8 && bits_per_word >= 2) { 322 + davinci_spi->get_rx = davinci_spi_rx_buf_u8; 323 + davinci_spi->get_tx = davinci_spi_tx_buf_u8; 324 + davinci_spi->slave[spi->chip_select].bytes_per_word = 1; 325 + } else if (bits_per_word <= 16 && bits_per_word >= 2) { 326 + davinci_spi->get_rx = davinci_spi_rx_buf_u16; 327 + davinci_spi->get_tx = davinci_spi_tx_buf_u16; 328 + davinci_spi->slave[spi->chip_select].bytes_per_word = 2; 329 + } else 330 + return -EINVAL; 331 + 332 + if (!hz) 333 + hz = spi->max_speed_hz; 334 + 335 + clear_fmt_bits(davinci_spi->base, SPIFMT_CHARLEN_MASK, 336 + spi->chip_select); 337 + set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f, 338 + spi->chip_select); 339 + 340 + prescale = ((clk_get_rate(davinci_spi->clk) / hz) - 1) & 0xff; 341 + 342 + clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select); 343 + set_fmt_bits(davinci_spi->base, prescale << 8, spi->chip_select); 344 + 345 + return 0; 346 + } 347 + 348 + static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data) 349 + { 350 + struct spi_device *spi = (struct spi_device *)data; 351 + struct davinci_spi *davinci_spi; 352 + struct davinci_spi_dma *davinci_spi_dma; 353 + struct davinci_spi_platform_data *pdata; 354 + 355 + davinci_spi = spi_master_get_devdata(spi->master); 356 + davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); 357 + pdata = davinci_spi->pdata; 358 + 359 + if (ch_status == DMA_COMPLETE) 360 + edma_stop(davinci_spi_dma->dma_rx_channel); 361 + else 362 + edma_clean_channel(davinci_spi_dma->dma_rx_channel); 363 + 364 + complete(&davinci_spi_dma->dma_rx_completion); 365 + /* We must disable the DMA RX request */ 366 + davinci_spi_set_dma_req(spi, 0); 367 + } 368 + 369 + static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data) 370 + { 371 + struct spi_device *spi = (struct spi_device *)data; 372 + struct davinci_spi *davinci_spi; 373 + struct davinci_spi_dma *davinci_spi_dma; 374 + struct davinci_spi_platform_data *pdata; 375 + 376 + davinci_spi = spi_master_get_devdata(spi->master); 377 + davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); 378 + pdata = davinci_spi->pdata; 379 + 380 + if (ch_status == DMA_COMPLETE) 381 + edma_stop(davinci_spi_dma->dma_tx_channel); 382 + else 383 + edma_clean_channel(davinci_spi_dma->dma_tx_channel); 384 + 385 + complete(&davinci_spi_dma->dma_tx_completion); 386 + /* We must disable the DMA TX request */ 387 + davinci_spi_set_dma_req(spi, 0); 388 + } 389 + 390 + static int davinci_spi_request_dma(struct spi_device *spi) 391 + { 392 + struct davinci_spi *davinci_spi; 393 + struct davinci_spi_dma *davinci_spi_dma; 394 + struct davinci_spi_platform_data *pdata; 395 + struct device *sdev; 396 + int r; 397 + 398 + davinci_spi = spi_master_get_devdata(spi->master); 399 + davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; 400 + pdata = davinci_spi->pdata; 401 + sdev = davinci_spi->bitbang.master->dev.parent; 402 + 403 + r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev, 404 + davinci_spi_dma_rx_callback, spi, 405 + davinci_spi_dma->eventq); 406 + if (r < 0) { 407 + dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n"); 408 + return -EAGAIN; 409 + } 410 + davinci_spi_dma->dma_rx_channel = r; 411 + r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev, 412 + davinci_spi_dma_tx_callback, spi, 413 + davinci_spi_dma->eventq); 414 + if (r < 0) { 415 + edma_free_channel(davinci_spi_dma->dma_rx_channel); 416 + davinci_spi_dma->dma_rx_channel = -1; 417 + dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n"); 418 + return -EAGAIN; 419 + } 420 + davinci_spi_dma->dma_tx_channel = r; 421 + 422 + return 0; 423 + } 424 + 425 + /** 426 + * davinci_spi_setup - This functions will set default transfer method 427 + * @spi: spi device on which data transfer to be done 428 + * 429 + * This functions sets the default transfer method. 430 + */ 431 + 432 + static int davinci_spi_setup(struct spi_device *spi) 433 + { 434 + int retval; 435 + struct davinci_spi *davinci_spi; 436 + struct davinci_spi_dma *davinci_spi_dma; 437 + struct device *sdev; 438 + 439 + davinci_spi = spi_master_get_devdata(spi->master); 440 + sdev = davinci_spi->bitbang.master->dev.parent; 441 + 442 + /* if bits per word length is zero then set it default 8 */ 443 + if (!spi->bits_per_word) 444 + spi->bits_per_word = 8; 445 + 446 + davinci_spi->slave[spi->chip_select].cmd_to_write = 0; 447 + 448 + if (use_dma && davinci_spi->dma_channels) { 449 + davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; 450 + 451 + if ((davinci_spi_dma->dma_rx_channel == -1) 452 + || (davinci_spi_dma->dma_tx_channel == -1)) { 453 + retval = davinci_spi_request_dma(spi); 454 + if (retval < 0) 455 + return retval; 456 + } 457 + } 458 + 459 + /* 460 + * SPI in DaVinci and DA8xx operate between 461 + * 600 KHz and 50 MHz 462 + */ 463 + if (spi->max_speed_hz < 600000 || spi->max_speed_hz > 50000000) { 464 + dev_dbg(sdev, "Operating frequency is not in acceptable " 465 + "range\n"); 466 + return -EINVAL; 467 + } 468 + 469 + /* 470 + * Set up SPIFMTn register, unique to this chipselect. 471 + * 472 + * NOTE: we could do all of these with one write. Also, some 473 + * of the "version 2" features are found in chips that don't 474 + * support all of them... 475 + */ 476 + if (spi->mode & SPI_LSB_FIRST) 477 + set_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK, 478 + spi->chip_select); 479 + else 480 + clear_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK, 481 + spi->chip_select); 482 + 483 + if (spi->mode & SPI_CPOL) 484 + set_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK, 485 + spi->chip_select); 486 + else 487 + clear_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK, 488 + spi->chip_select); 489 + 490 + if (!(spi->mode & SPI_CPHA)) 491 + set_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK, 492 + spi->chip_select); 493 + else 494 + clear_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK, 495 + spi->chip_select); 496 + 497 + /* 498 + * Version 1 hardware supports two basic SPI modes: 499 + * - Standard SPI mode uses 4 pins, with chipselect 500 + * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS) 501 + * (distinct from SPI_3WIRE, with just one data wire; 502 + * or similar variants without MOSI or without MISO) 503 + * 504 + * Version 2 hardware supports an optional handshaking signal, 505 + * so it can support two more modes: 506 + * - 5 pin SPI variant is standard SPI plus SPI_READY 507 + * - 4 pin with enable is (SPI_READY | SPI_NO_CS) 508 + */ 509 + 510 + if (davinci_spi->version == SPI_VERSION_2) { 511 + clear_fmt_bits(davinci_spi->base, SPIFMT_WDELAY_MASK, 512 + spi->chip_select); 513 + set_fmt_bits(davinci_spi->base, 514 + (davinci_spi->pdata->wdelay 515 + << SPIFMT_WDELAY_SHIFT) 516 + & SPIFMT_WDELAY_MASK, 517 + spi->chip_select); 518 + 519 + if (davinci_spi->pdata->odd_parity) 520 + set_fmt_bits(davinci_spi->base, 521 + SPIFMT_ODD_PARITY_MASK, 522 + spi->chip_select); 523 + else 524 + clear_fmt_bits(davinci_spi->base, 525 + SPIFMT_ODD_PARITY_MASK, 526 + spi->chip_select); 527 + 528 + if (davinci_spi->pdata->parity_enable) 529 + set_fmt_bits(davinci_spi->base, 530 + SPIFMT_PARITYENA_MASK, 531 + spi->chip_select); 532 + else 533 + clear_fmt_bits(davinci_spi->base, 534 + SPIFMT_PARITYENA_MASK, 535 + spi->chip_select); 536 + 537 + if (davinci_spi->pdata->wait_enable) 538 + set_fmt_bits(davinci_spi->base, 539 + SPIFMT_WAITENA_MASK, 540 + spi->chip_select); 541 + else 542 + clear_fmt_bits(davinci_spi->base, 543 + SPIFMT_WAITENA_MASK, 544 + spi->chip_select); 545 + 546 + if (davinci_spi->pdata->timer_disable) 547 + set_fmt_bits(davinci_spi->base, 548 + SPIFMT_DISTIMER_MASK, 549 + spi->chip_select); 550 + else 551 + clear_fmt_bits(davinci_spi->base, 552 + SPIFMT_DISTIMER_MASK, 553 + spi->chip_select); 554 + } 555 + 556 + retval = davinci_spi_setup_transfer(spi, NULL); 557 + 558 + return retval; 559 + } 560 + 561 + static void davinci_spi_cleanup(struct spi_device *spi) 562 + { 563 + struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master); 564 + struct davinci_spi_dma *davinci_spi_dma; 565 + 566 + davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; 567 + 568 + if (use_dma && davinci_spi->dma_channels) { 569 + davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; 570 + 571 + if ((davinci_spi_dma->dma_rx_channel != -1) 572 + && (davinci_spi_dma->dma_tx_channel != -1)) { 573 + edma_free_channel(davinci_spi_dma->dma_tx_channel); 574 + edma_free_channel(davinci_spi_dma->dma_rx_channel); 575 + } 576 + } 577 + } 578 + 579 + static int davinci_spi_bufs_prep(struct spi_device *spi, 580 + struct davinci_spi *davinci_spi) 581 + { 582 + int op_mode = 0; 583 + 584 + /* 585 + * REVISIT unless devices disagree about SPI_LOOP or 586 + * SPI_READY (SPI_NO_CS only allows one device!), this 587 + * should not need to be done before each message... 588 + * optimize for both flags staying cleared. 589 + */ 590 + 591 + op_mode = SPIPC0_DIFUN_MASK 592 + | SPIPC0_DOFUN_MASK 593 + | SPIPC0_CLKFUN_MASK; 594 + if (!(spi->mode & SPI_NO_CS)) 595 + op_mode |= 1 << spi->chip_select; 596 + if (spi->mode & SPI_READY) 597 + op_mode |= SPIPC0_SPIENA_MASK; 598 + 599 + iowrite32(op_mode, davinci_spi->base + SPIPC0); 600 + 601 + if (spi->mode & SPI_LOOP) 602 + set_io_bits(davinci_spi->base + SPIGCR1, 603 + SPIGCR1_LOOPBACK_MASK); 604 + else 605 + clear_io_bits(davinci_spi->base + SPIGCR1, 606 + SPIGCR1_LOOPBACK_MASK); 607 + 608 + return 0; 609 + } 610 + 611 + static int davinci_spi_check_error(struct davinci_spi *davinci_spi, 612 + int int_status) 613 + { 614 + struct device *sdev = davinci_spi->bitbang.master->dev.parent; 615 + 616 + if (int_status & SPIFLG_TIMEOUT_MASK) { 617 + dev_dbg(sdev, "SPI Time-out Error\n"); 618 + return -ETIMEDOUT; 619 + } 620 + if (int_status & SPIFLG_DESYNC_MASK) { 621 + dev_dbg(sdev, "SPI Desynchronization Error\n"); 622 + return -EIO; 623 + } 624 + if (int_status & SPIFLG_BITERR_MASK) { 625 + dev_dbg(sdev, "SPI Bit error\n"); 626 + return -EIO; 627 + } 628 + 629 + if (davinci_spi->version == SPI_VERSION_2) { 630 + if (int_status & SPIFLG_DLEN_ERR_MASK) { 631 + dev_dbg(sdev, "SPI Data Length Error\n"); 632 + return -EIO; 633 + } 634 + if (int_status & SPIFLG_PARERR_MASK) { 635 + dev_dbg(sdev, "SPI Parity Error\n"); 636 + return -EIO; 637 + } 638 + if (int_status & SPIFLG_OVRRUN_MASK) { 639 + dev_dbg(sdev, "SPI Data Overrun error\n"); 640 + return -EIO; 641 + } 642 + if (int_status & SPIFLG_TX_INTR_MASK) { 643 + dev_dbg(sdev, "SPI TX intr bit set\n"); 644 + return -EIO; 645 + } 646 + if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) { 647 + dev_dbg(sdev, "SPI Buffer Init Active\n"); 648 + return -EBUSY; 649 + } 650 + } 651 + 652 + return 0; 653 + } 654 + 655 + /** 656 + * davinci_spi_bufs - functions which will handle transfer data 657 + * @spi: spi device on which data transfer to be done 658 + * @t: spi transfer in which transfer info is filled 659 + * 660 + * This function will put data to be transferred into data register 661 + * of SPI controller and then wait until the completion will be marked 662 + * by the IRQ Handler. 663 + */ 664 + static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t) 665 + { 666 + struct davinci_spi *davinci_spi; 667 + int int_status, count, ret; 668 + u8 conv, tmp; 669 + u32 tx_data, data1_reg_val; 670 + u32 buf_val, flg_val; 671 + struct davinci_spi_platform_data *pdata; 672 + 673 + davinci_spi = spi_master_get_devdata(spi->master); 674 + pdata = davinci_spi->pdata; 675 + 676 + davinci_spi->tx = t->tx_buf; 677 + davinci_spi->rx = t->rx_buf; 678 + 679 + /* convert len to words based on bits_per_word */ 680 + conv = davinci_spi->slave[spi->chip_select].bytes_per_word; 681 + davinci_spi->count = t->len / conv; 682 + 683 + INIT_COMPLETION(davinci_spi->done); 684 + 685 + ret = davinci_spi_bufs_prep(spi, davinci_spi); 686 + if (ret) 687 + return ret; 688 + 689 + /* Enable SPI */ 690 + set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); 691 + 692 + iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) | 693 + (pdata->t2cdelay << SPI_T2CDELAY_SHIFT), 694 + davinci_spi->base + SPIDELAY); 695 + 696 + count = davinci_spi->count; 697 + data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT; 698 + tmp = ~(0x1 << spi->chip_select); 699 + 700 + clear_io_bits(davinci_spi->base + SPIDEF, ~tmp); 701 + 702 + data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT; 703 + 704 + while ((ioread32(davinci_spi->base + SPIBUF) 705 + & SPIBUF_RXEMPTY_MASK) == 0) 706 + cpu_relax(); 707 + 708 + /* Determine the command to execute READ or WRITE */ 709 + if (t->tx_buf) { 710 + clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); 711 + 712 + while (1) { 713 + tx_data = davinci_spi->get_tx(davinci_spi); 714 + 715 + data1_reg_val &= ~(0xFFFF); 716 + data1_reg_val |= (0xFFFF & tx_data); 717 + 718 + buf_val = ioread32(davinci_spi->base + SPIBUF); 719 + if ((buf_val & SPIBUF_TXFULL_MASK) == 0) { 720 + iowrite32(data1_reg_val, 721 + davinci_spi->base + SPIDAT1); 722 + 723 + count--; 724 + } 725 + while (ioread32(davinci_spi->base + SPIBUF) 726 + & SPIBUF_RXEMPTY_MASK) 727 + cpu_relax(); 728 + 729 + /* getting the returned byte */ 730 + if (t->rx_buf) { 731 + buf_val = ioread32(davinci_spi->base + SPIBUF); 732 + davinci_spi->get_rx(buf_val, davinci_spi); 733 + } 734 + if (count <= 0) 735 + break; 736 + } 737 + } else { 738 + if (pdata->poll_mode) { 739 + while (1) { 740 + /* keeps the serial clock going */ 741 + if ((ioread32(davinci_spi->base + SPIBUF) 742 + & SPIBUF_TXFULL_MASK) == 0) 743 + iowrite32(data1_reg_val, 744 + davinci_spi->base + SPIDAT1); 745 + 746 + while (ioread32(davinci_spi->base + SPIBUF) & 747 + SPIBUF_RXEMPTY_MASK) 748 + cpu_relax(); 749 + 750 + flg_val = ioread32(davinci_spi->base + SPIFLG); 751 + buf_val = ioread32(davinci_spi->base + SPIBUF); 752 + 753 + davinci_spi->get_rx(buf_val, davinci_spi); 754 + 755 + count--; 756 + if (count <= 0) 757 + break; 758 + } 759 + } else { /* Receive in Interrupt mode */ 760 + int i; 761 + 762 + for (i = 0; i < davinci_spi->count; i++) { 763 + set_io_bits(davinci_spi->base + SPIINT, 764 + SPIINT_BITERR_INTR 765 + | SPIINT_OVRRUN_INTR 766 + | SPIINT_RX_INTR); 767 + 768 + iowrite32(data1_reg_val, 769 + davinci_spi->base + SPIDAT1); 770 + 771 + while (ioread32(davinci_spi->base + SPIINT) & 772 + SPIINT_RX_INTR) 773 + cpu_relax(); 774 + } 775 + iowrite32((data1_reg_val & 0x0ffcffff), 776 + davinci_spi->base + SPIDAT1); 777 + } 778 + } 779 + 780 + /* 781 + * Check for bit error, desync error,parity error,timeout error and 782 + * receive overflow errors 783 + */ 784 + int_status = ioread32(davinci_spi->base + SPIFLG); 785 + 786 + ret = davinci_spi_check_error(davinci_spi, int_status); 787 + if (ret != 0) 788 + return ret; 789 + 790 + /* SPI Framework maintains the count only in bytes so convert back */ 791 + davinci_spi->count *= conv; 792 + 793 + return t->len; 794 + } 795 + 796 + #define DAVINCI_DMA_DATA_TYPE_S8 0x01 797 + #define DAVINCI_DMA_DATA_TYPE_S16 0x02 798 + #define DAVINCI_DMA_DATA_TYPE_S32 0x04 799 + 800 + static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) 801 + { 802 + struct davinci_spi *davinci_spi; 803 + int int_status = 0; 804 + int count, temp_count; 805 + u8 conv = 1; 806 + u8 tmp; 807 + u32 data1_reg_val; 808 + struct davinci_spi_dma *davinci_spi_dma; 809 + int word_len, data_type, ret; 810 + unsigned long tx_reg, rx_reg; 811 + struct davinci_spi_platform_data *pdata; 812 + struct device *sdev; 813 + 814 + davinci_spi = spi_master_get_devdata(spi->master); 815 + pdata = davinci_spi->pdata; 816 + sdev = davinci_spi->bitbang.master->dev.parent; 817 + 818 + davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; 819 + 820 + tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1; 821 + rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF; 822 + 823 + davinci_spi->tx = t->tx_buf; 824 + davinci_spi->rx = t->rx_buf; 825 + 826 + /* convert len to words based on bits_per_word */ 827 + conv = davinci_spi->slave[spi->chip_select].bytes_per_word; 828 + davinci_spi->count = t->len / conv; 829 + 830 + INIT_COMPLETION(davinci_spi->done); 831 + 832 + init_completion(&davinci_spi_dma->dma_rx_completion); 833 + init_completion(&davinci_spi_dma->dma_tx_completion); 834 + 835 + word_len = conv * 8; 836 + 837 + if (word_len <= 8) 838 + data_type = DAVINCI_DMA_DATA_TYPE_S8; 839 + else if (word_len <= 16) 840 + data_type = DAVINCI_DMA_DATA_TYPE_S16; 841 + else if (word_len <= 32) 842 + data_type = DAVINCI_DMA_DATA_TYPE_S32; 843 + else 844 + return -EINVAL; 845 + 846 + ret = davinci_spi_bufs_prep(spi, davinci_spi); 847 + if (ret) 848 + return ret; 849 + 850 + /* Put delay val if required */ 851 + iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) | 852 + (pdata->t2cdelay << SPI_T2CDELAY_SHIFT), 853 + davinci_spi->base + SPIDELAY); 854 + 855 + count = davinci_spi->count; /* the number of elements */ 856 + data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT; 857 + 858 + /* CS default = 0xFF */ 859 + tmp = ~(0x1 << spi->chip_select); 860 + 861 + clear_io_bits(davinci_spi->base + SPIDEF, ~tmp); 862 + 863 + data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT; 864 + 865 + /* disable all interrupts for dma transfers */ 866 + clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); 867 + /* Disable SPI to write configuration bits in SPIDAT */ 868 + clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); 869 + iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); 870 + /* Enable SPI */ 871 + set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); 872 + 873 + while ((ioread32(davinci_spi->base + SPIBUF) 874 + & SPIBUF_RXEMPTY_MASK) == 0) 875 + cpu_relax(); 876 + 877 + 878 + if (t->tx_buf) { 879 + t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count, 880 + DMA_TO_DEVICE); 881 + if (dma_mapping_error(&spi->dev, t->tx_dma)) { 882 + dev_dbg(sdev, "Unable to DMA map a %d bytes" 883 + " TX buffer\n", count); 884 + return -ENOMEM; 885 + } 886 + temp_count = count; 887 + } else { 888 + /* We need TX clocking for RX transaction */ 889 + t->tx_dma = dma_map_single(&spi->dev, 890 + (void *)davinci_spi->tmp_buf, count + 1, 891 + DMA_TO_DEVICE); 892 + if (dma_mapping_error(&spi->dev, t->tx_dma)) { 893 + dev_dbg(sdev, "Unable to DMA map a %d bytes" 894 + " TX tmp buffer\n", count); 895 + return -ENOMEM; 896 + } 897 + temp_count = count + 1; 898 + } 899 + 900 + edma_set_transfer_params(davinci_spi_dma->dma_tx_channel, 901 + data_type, temp_count, 1, 0, ASYNC); 902 + edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT); 903 + edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT); 904 + edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0); 905 + edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0); 906 + 907 + if (t->rx_buf) { 908 + /* initiate transaction */ 909 + iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); 910 + 911 + t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count, 912 + DMA_FROM_DEVICE); 913 + if (dma_mapping_error(&spi->dev, t->rx_dma)) { 914 + dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", 915 + count); 916 + if (t->tx_buf != NULL) 917 + dma_unmap_single(NULL, t->tx_dma, 918 + count, DMA_TO_DEVICE); 919 + return -ENOMEM; 920 + } 921 + edma_set_transfer_params(davinci_spi_dma->dma_rx_channel, 922 + data_type, count, 1, 0, ASYNC); 923 + edma_set_src(davinci_spi_dma->dma_rx_channel, 924 + rx_reg, INCR, W8BIT); 925 + edma_set_dest(davinci_spi_dma->dma_rx_channel, 926 + t->rx_dma, INCR, W8BIT); 927 + edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0); 928 + edma_set_dest_index(davinci_spi_dma->dma_rx_channel, 929 + data_type, 0); 930 + } 931 + 932 + if ((t->tx_buf) || (t->rx_buf)) 933 + edma_start(davinci_spi_dma->dma_tx_channel); 934 + 935 + if (t->rx_buf) 936 + edma_start(davinci_spi_dma->dma_rx_channel); 937 + 938 + if ((t->rx_buf) || (t->tx_buf)) 939 + davinci_spi_set_dma_req(spi, 1); 940 + 941 + if (t->tx_buf) 942 + wait_for_completion_interruptible( 943 + &davinci_spi_dma->dma_tx_completion); 944 + 945 + if (t->rx_buf) 946 + wait_for_completion_interruptible( 947 + &davinci_spi_dma->dma_rx_completion); 948 + 949 + dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE); 950 + 951 + if (t->rx_buf) 952 + dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE); 953 + 954 + /* 955 + * Check for bit error, desync error,parity error,timeout error and 956 + * receive overflow errors 957 + */ 958 + int_status = ioread32(davinci_spi->base + SPIFLG); 959 + 960 + ret = davinci_spi_check_error(davinci_spi, int_status); 961 + if (ret != 0) 962 + return ret; 963 + 964 + /* SPI Framework maintains the count only in bytes so convert back */ 965 + davinci_spi->count *= conv; 966 + 967 + return t->len; 968 + } 969 + 970 + /** 971 + * davinci_spi_irq - IRQ handler for DaVinci SPI 972 + * @irq: IRQ number for this SPI Master 973 + * @context_data: structure for SPI Master controller davinci_spi 974 + */ 975 + static irqreturn_t davinci_spi_irq(s32 irq, void *context_data) 976 + { 977 + struct davinci_spi *davinci_spi = context_data; 978 + u32 int_status, rx_data = 0; 979 + irqreturn_t ret = IRQ_NONE; 980 + 981 + int_status = ioread32(davinci_spi->base + SPIFLG); 982 + 983 + while ((int_status & SPIFLG_RX_INTR_MASK)) { 984 + if (likely(int_status & SPIFLG_RX_INTR_MASK)) { 985 + ret = IRQ_HANDLED; 986 + 987 + rx_data = ioread32(davinci_spi->base + SPIBUF); 988 + davinci_spi->get_rx(rx_data, davinci_spi); 989 + 990 + /* Disable Receive Interrupt */ 991 + iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR), 992 + davinci_spi->base + SPIINT); 993 + } else 994 + (void)davinci_spi_check_error(davinci_spi, int_status); 995 + 996 + int_status = ioread32(davinci_spi->base + SPIFLG); 997 + } 998 + 999 + return ret; 1000 + } 1001 + 1002 + /** 1003 + * davinci_spi_probe - probe function for SPI Master Controller 1004 + * @pdev: platform_device structure which contains plateform specific data 1005 + */ 1006 + static int davinci_spi_probe(struct platform_device *pdev) 1007 + { 1008 + struct spi_master *master; 1009 + struct davinci_spi *davinci_spi; 1010 + struct davinci_spi_platform_data *pdata; 1011 + struct resource *r, *mem; 1012 + resource_size_t dma_rx_chan = SPI_NO_RESOURCE; 1013 + resource_size_t dma_tx_chan = SPI_NO_RESOURCE; 1014 + resource_size_t dma_eventq = SPI_NO_RESOURCE; 1015 + int i = 0, ret = 0; 1016 + 1017 + pdata = pdev->dev.platform_data; 1018 + if (pdata == NULL) { 1019 + ret = -ENODEV; 1020 + goto err; 1021 + } 1022 + 1023 + master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi)); 1024 + if (master == NULL) { 1025 + ret = -ENOMEM; 1026 + goto err; 1027 + } 1028 + 1029 + dev_set_drvdata(&pdev->dev, master); 1030 + 1031 + davinci_spi = spi_master_get_devdata(master); 1032 + if (davinci_spi == NULL) { 1033 + ret = -ENOENT; 1034 + goto free_master; 1035 + } 1036 + 1037 + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1038 + if (r == NULL) { 1039 + ret = -ENOENT; 1040 + goto free_master; 1041 + } 1042 + 1043 + davinci_spi->pbase = r->start; 1044 + davinci_spi->region_size = resource_size(r); 1045 + davinci_spi->pdata = pdata; 1046 + 1047 + mem = request_mem_region(r->start, davinci_spi->region_size, 1048 + pdev->name); 1049 + if (mem == NULL) { 1050 + ret = -EBUSY; 1051 + goto free_master; 1052 + } 1053 + 1054 + davinci_spi->base = (struct davinci_spi_reg __iomem *) 1055 + ioremap(r->start, davinci_spi->region_size); 1056 + if (davinci_spi->base == NULL) { 1057 + ret = -ENOMEM; 1058 + goto release_region; 1059 + } 1060 + 1061 + davinci_spi->irq = platform_get_irq(pdev, 0); 1062 + if (davinci_spi->irq <= 0) { 1063 + ret = -EINVAL; 1064 + goto unmap_io; 1065 + } 1066 + 1067 + ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED, 1068 + dev_name(&pdev->dev), davinci_spi); 1069 + if (ret) 1070 + goto unmap_io; 1071 + 1072 + /* Allocate tmp_buf for tx_buf */ 1073 + davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL); 1074 + if (davinci_spi->tmp_buf == NULL) { 1075 + ret = -ENOMEM; 1076 + goto irq_free; 1077 + } 1078 + 1079 + davinci_spi->bitbang.master = spi_master_get(master); 1080 + if (davinci_spi->bitbang.master == NULL) { 1081 + ret = -ENODEV; 1082 + goto free_tmp_buf; 1083 + } 1084 + 1085 + davinci_spi->clk = clk_get(&pdev->dev, NULL); 1086 + if (IS_ERR(davinci_spi->clk)) { 1087 + ret = -ENODEV; 1088 + goto put_master; 1089 + } 1090 + clk_enable(davinci_spi->clk); 1091 + 1092 + 1093 + master->bus_num = pdev->id; 1094 + master->num_chipselect = pdata->num_chipselect; 1095 + master->setup = davinci_spi_setup; 1096 + master->cleanup = davinci_spi_cleanup; 1097 + 1098 + davinci_spi->bitbang.chipselect = davinci_spi_chipselect; 1099 + davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer; 1100 + 1101 + davinci_spi->version = pdata->version; 1102 + use_dma = pdata->use_dma; 1103 + 1104 + davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP; 1105 + if (davinci_spi->version == SPI_VERSION_2) 1106 + davinci_spi->bitbang.flags |= SPI_READY; 1107 + 1108 + if (use_dma) { 1109 + r = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1110 + if (r) 1111 + dma_rx_chan = r->start; 1112 + r = platform_get_resource(pdev, IORESOURCE_DMA, 1); 1113 + if (r) 1114 + dma_tx_chan = r->start; 1115 + r = platform_get_resource(pdev, IORESOURCE_DMA, 2); 1116 + if (r) 1117 + dma_eventq = r->start; 1118 + } 1119 + 1120 + if (!use_dma || 1121 + dma_rx_chan == SPI_NO_RESOURCE || 1122 + dma_tx_chan == SPI_NO_RESOURCE || 1123 + dma_eventq == SPI_NO_RESOURCE) { 1124 + davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio; 1125 + use_dma = 0; 1126 + } else { 1127 + davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma; 1128 + davinci_spi->dma_channels = kzalloc(master->num_chipselect 1129 + * sizeof(struct davinci_spi_dma), GFP_KERNEL); 1130 + if (davinci_spi->dma_channels == NULL) { 1131 + ret = -ENOMEM; 1132 + goto free_clk; 1133 + } 1134 + 1135 + for (i = 0; i < master->num_chipselect; i++) { 1136 + davinci_spi->dma_channels[i].dma_rx_channel = -1; 1137 + davinci_spi->dma_channels[i].dma_rx_sync_dev = 1138 + dma_rx_chan; 1139 + davinci_spi->dma_channels[i].dma_tx_channel = -1; 1140 + davinci_spi->dma_channels[i].dma_tx_sync_dev = 1141 + dma_tx_chan; 1142 + davinci_spi->dma_channels[i].eventq = dma_eventq; 1143 + } 1144 + dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n" 1145 + "Using RX channel = %d , TX channel = %d and " 1146 + "event queue = %d", dma_rx_chan, dma_tx_chan, 1147 + dma_eventq); 1148 + } 1149 + 1150 + davinci_spi->get_rx = davinci_spi_rx_buf_u8; 1151 + davinci_spi->get_tx = davinci_spi_tx_buf_u8; 1152 + 1153 + init_completion(&davinci_spi->done); 1154 + 1155 + /* Reset In/OUT SPI module */ 1156 + iowrite32(0, davinci_spi->base + SPIGCR0); 1157 + udelay(100); 1158 + iowrite32(1, davinci_spi->base + SPIGCR0); 1159 + 1160 + /* Clock internal */ 1161 + if (davinci_spi->pdata->clk_internal) 1162 + set_io_bits(davinci_spi->base + SPIGCR1, 1163 + SPIGCR1_CLKMOD_MASK); 1164 + else 1165 + clear_io_bits(davinci_spi->base + SPIGCR1, 1166 + SPIGCR1_CLKMOD_MASK); 1167 + 1168 + /* master mode default */ 1169 + set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK); 1170 + 1171 + if (davinci_spi->pdata->intr_level) 1172 + iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL); 1173 + else 1174 + iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL); 1175 + 1176 + ret = spi_bitbang_start(&davinci_spi->bitbang); 1177 + if (ret) 1178 + goto free_clk; 1179 + 1180 + dev_info(&pdev->dev, "Controller at 0x%p \n", davinci_spi->base); 1181 + 1182 + if (!pdata->poll_mode) 1183 + dev_info(&pdev->dev, "Operating in interrupt mode" 1184 + " using IRQ %d\n", davinci_spi->irq); 1185 + 1186 + return ret; 1187 + 1188 + free_clk: 1189 + clk_disable(davinci_spi->clk); 1190 + clk_put(davinci_spi->clk); 1191 + put_master: 1192 + spi_master_put(master); 1193 + free_tmp_buf: 1194 + kfree(davinci_spi->tmp_buf); 1195 + irq_free: 1196 + free_irq(davinci_spi->irq, davinci_spi); 1197 + unmap_io: 1198 + iounmap(davinci_spi->base); 1199 + release_region: 1200 + release_mem_region(davinci_spi->pbase, davinci_spi->region_size); 1201 + free_master: 1202 + kfree(master); 1203 + err: 1204 + return ret; 1205 + } 1206 + 1207 + /** 1208 + * davinci_spi_remove - remove function for SPI Master Controller 1209 + * @pdev: platform_device structure which contains plateform specific data 1210 + * 1211 + * This function will do the reverse action of davinci_spi_probe function 1212 + * It will free the IRQ and SPI controller's memory region. 1213 + * It will also call spi_bitbang_stop to destroy the work queue which was 1214 + * created by spi_bitbang_start. 1215 + */ 1216 + static int __exit davinci_spi_remove(struct platform_device *pdev) 1217 + { 1218 + struct davinci_spi *davinci_spi; 1219 + struct spi_master *master; 1220 + 1221 + master = dev_get_drvdata(&pdev->dev); 1222 + davinci_spi = spi_master_get_devdata(master); 1223 + 1224 + spi_bitbang_stop(&davinci_spi->bitbang); 1225 + 1226 + clk_disable(davinci_spi->clk); 1227 + clk_put(davinci_spi->clk); 1228 + spi_master_put(master); 1229 + kfree(davinci_spi->tmp_buf); 1230 + free_irq(davinci_spi->irq, davinci_spi); 1231 + iounmap(davinci_spi->base); 1232 + release_mem_region(davinci_spi->pbase, davinci_spi->region_size); 1233 + 1234 + return 0; 1235 + } 1236 + 1237 + static struct platform_driver davinci_spi_driver = { 1238 + .driver.name = "spi_davinci", 1239 + .remove = __exit_p(davinci_spi_remove), 1240 + }; 1241 + 1242 + static int __init davinci_spi_init(void) 1243 + { 1244 + return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe); 1245 + } 1246 + module_init(davinci_spi_init); 1247 + 1248 + static void __exit davinci_spi_exit(void) 1249 + { 1250 + platform_driver_unregister(&davinci_spi_driver); 1251 + } 1252 + module_exit(davinci_spi_exit); 1253 + 1254 + MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver"); 1255 + MODULE_LICENSE("GPL");
+81 -36
drivers/spi/dw_spi.c
··· 152 152 #else 153 153 static inline int mrst_spi_debugfs_init(struct dw_spi *dws) 154 154 { 155 + return 0; 155 156 } 156 157 157 158 static inline void mrst_spi_debugfs_remove(struct dw_spi *dws) ··· 162 161 163 162 static void wait_till_not_busy(struct dw_spi *dws) 164 163 { 165 - unsigned long end = jiffies + usecs_to_jiffies(1000); 164 + unsigned long end = jiffies + 1 + usecs_to_jiffies(1000); 166 165 167 166 while (time_before(jiffies, end)) { 168 167 if (!(dw_readw(dws, sr) & SR_BUSY)) 169 168 return; 170 169 } 171 170 dev_err(&dws->master->dev, 172 - "DW SPI: Stutus keeps busy for 1000us after a read/write!\n"); 171 + "DW SPI: Status keeps busy for 1000us after a read/write!\n"); 173 172 } 174 173 175 174 static void flush(struct dw_spi *dws) ··· 359 358 static irqreturn_t interrupt_transfer(struct dw_spi *dws) 360 359 { 361 360 u16 irq_status, irq_mask = 0x3f; 361 + u32 int_level = dws->fifo_len / 2; 362 + u32 left; 362 363 363 364 irq_status = dw_readw(dws, isr) & irq_mask; 364 365 /* Error handling */ ··· 372 369 return IRQ_HANDLED; 373 370 } 374 371 375 - /* INT comes from tx */ 376 - if (dws->tx && (irq_status & SPI_INT_TXEI)) { 377 - while (dws->tx < dws->tx_end) 372 + if (irq_status & SPI_INT_TXEI) { 373 + spi_mask_intr(dws, SPI_INT_TXEI); 374 + 375 + left = (dws->tx_end - dws->tx) / dws->n_bytes; 376 + left = (left > int_level) ? int_level : left; 377 + 378 + while (left--) 378 379 dws->write(dws); 380 + dws->read(dws); 379 381 380 - if (dws->tx == dws->tx_end) { 381 - spi_mask_intr(dws, SPI_INT_TXEI); 382 - transfer_complete(dws); 383 - } 384 - } 385 - 386 - /* INT comes from rx */ 387 - if (dws->rx && (irq_status & SPI_INT_RXFI)) { 388 - if (dws->read(dws)) 382 + /* Re-enable the IRQ if there is still data left to tx */ 383 + if (dws->tx_end > dws->tx) 384 + spi_umask_intr(dws, SPI_INT_TXEI); 385 + else 389 386 transfer_complete(dws); 390 387 } 388 + 391 389 return IRQ_HANDLED; 392 390 } 393 391 ··· 408 404 /* Must be called inside pump_transfers() */ 409 405 static void poll_transfer(struct dw_spi *dws) 410 406 { 411 - if (dws->tx) { 412 - while (dws->write(dws)) 413 - dws->read(dws); 414 - } 407 + while (dws->write(dws)) 408 + dws->read(dws); 415 409 416 - dws->read(dws); 417 410 transfer_complete(dws); 418 411 } 419 412 ··· 429 428 u8 bits = 0; 430 429 u8 imask = 0; 431 430 u8 cs_change = 0; 431 + u16 txint_level = 0; 432 432 u16 clk_div = 0; 433 433 u32 speed = 0; 434 434 u32 cr0 = 0; ··· 439 437 transfer = dws->cur_transfer; 440 438 chip = dws->cur_chip; 441 439 spi = message->spi; 440 + 441 + if (unlikely(!chip->clk_div)) 442 + chip->clk_div = dws->max_freq / chip->speed_hz; 442 443 443 444 if (message->state == ERROR_STATE) { 444 445 message->status = -EIO; ··· 497 492 498 493 /* clk_div doesn't support odd number */ 499 494 clk_div = dws->max_freq / speed; 500 - clk_div = (clk_div >> 1) << 1; 495 + clk_div = (clk_div + 1) & 0xfffe; 501 496 502 497 chip->speed_hz = speed; 503 498 chip->clk_div = clk_div; ··· 537 532 } 538 533 message->state = RUNNING_STATE; 539 534 535 + /* 536 + * Adjust transfer mode if necessary. Requires platform dependent 537 + * chipselect mechanism. 538 + */ 539 + if (dws->cs_control) { 540 + if (dws->rx && dws->tx) 541 + chip->tmode = 0x00; 542 + else if (dws->rx) 543 + chip->tmode = 0x02; 544 + else 545 + chip->tmode = 0x01; 546 + 547 + cr0 &= ~(0x3 << SPI_MODE_OFFSET); 548 + cr0 |= (chip->tmode << SPI_TMOD_OFFSET); 549 + } 550 + 540 551 /* Check if current transfer is a DMA transaction */ 541 552 dws->dma_mapped = map_dma_buffers(dws); 542 553 554 + /* 555 + * Interrupt mode 556 + * we only need set the TXEI IRQ, as TX/RX always happen syncronizely 557 + */ 543 558 if (!dws->dma_mapped && !chip->poll_mode) { 544 - if (dws->rx) 545 - imask |= SPI_INT_RXFI; 546 - if (dws->tx) 547 - imask |= SPI_INT_TXEI; 559 + int templen = dws->len / dws->n_bytes; 560 + txint_level = dws->fifo_len / 2; 561 + txint_level = (templen > txint_level) ? txint_level : templen; 562 + 563 + imask |= SPI_INT_TXEI; 548 564 dws->transfer_handler = interrupt_transfer; 549 565 } 550 566 ··· 575 549 * 2. clk_div is changed 576 550 * 3. control value changes 577 551 */ 578 - if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div) { 552 + if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div || imask) { 579 553 spi_enable_chip(dws, 0); 580 554 581 555 if (dw_readw(dws, ctrl0) != cr0) 582 556 dw_writew(dws, ctrl0, cr0); 583 557 584 - /* Set the interrupt mask, for poll mode just diable all int */ 585 - spi_mask_intr(dws, 0xff); 586 - if (!chip->poll_mode) 587 - spi_umask_intr(dws, imask); 588 - 589 558 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); 590 559 spi_chip_sel(dws, spi->chip_select); 591 - spi_enable_chip(dws, 1); 592 560 561 + /* Set the interrupt mask, for poll mode just diable all int */ 562 + spi_mask_intr(dws, 0xff); 563 + if (imask) 564 + spi_umask_intr(dws, imask); 565 + if (txint_level) 566 + dw_writew(dws, txfltr, txint_level); 567 + 568 + spi_enable_chip(dws, 1); 593 569 if (cs_change) 594 570 dws->prev_chip = chip; 595 571 } ··· 740 712 } 741 713 chip->bits_per_word = spi->bits_per_word; 742 714 715 + if (!spi->max_speed_hz) { 716 + dev_err(&spi->dev, "No max speed HZ parameter\n"); 717 + return -EINVAL; 718 + } 743 719 chip->speed_hz = spi->max_speed_hz; 744 - if (chip->speed_hz) 745 - chip->clk_div = 25000000 / chip->speed_hz; 746 - else 747 - chip->clk_div = 8; /* default value */ 748 720 749 721 chip->tmode = 0; /* Tx & Rx */ 750 722 /* Default SPI mode is SCPOL = 0, SCPH = 0 */ ··· 763 735 kfree(chip); 764 736 } 765 737 766 - static int __init init_queue(struct dw_spi *dws) 738 + static int __devinit init_queue(struct dw_spi *dws) 767 739 { 768 740 INIT_LIST_HEAD(&dws->queue); 769 741 spin_lock_init(&dws->lock); ··· 845 817 spi_mask_intr(dws, 0xff); 846 818 spi_enable_chip(dws, 1); 847 819 flush(dws); 820 + 821 + /* 822 + * Try to detect the FIFO depth if not set by interface driver, 823 + * the depth could be from 2 to 256 from HW spec 824 + */ 825 + if (!dws->fifo_len) { 826 + u32 fifo; 827 + for (fifo = 2; fifo <= 257; fifo++) { 828 + dw_writew(dws, txfltr, fifo); 829 + if (fifo != dw_readw(dws, txfltr)) 830 + break; 831 + } 832 + 833 + dws->fifo_len = (fifo == 257) ? 0 : fifo; 834 + dw_writew(dws, txfltr, 0); 835 + } 848 836 } 849 837 850 838 int __devinit dw_spi_add_host(struct dw_spi *dws) ··· 957 913 /* Disconnect from the SPI framework */ 958 914 spi_unregister_master(dws->master); 959 915 } 916 + EXPORT_SYMBOL(dw_spi_remove_host); 960 917 961 918 int dw_spi_suspend_host(struct dw_spi *dws) 962 919 {
+147
drivers/spi/dw_spi_mmio.c
··· 1 + /* 2 + * dw_spi_mmio.c - Memory-mapped interface driver for DW SPI Core 3 + * 4 + * Copyright (c) 2010, Octasic semiconductor. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2, as published by the Free Software Foundation. 9 + */ 10 + 11 + #include <linux/clk.h> 12 + #include <linux/interrupt.h> 13 + #include <linux/platform_device.h> 14 + #include <linux/spi/dw_spi.h> 15 + #include <linux/spi/spi.h> 16 + 17 + #define DRIVER_NAME "dw_spi_mmio" 18 + 19 + struct dw_spi_mmio { 20 + struct dw_spi dws; 21 + struct clk *clk; 22 + }; 23 + 24 + static int __devinit dw_spi_mmio_probe(struct platform_device *pdev) 25 + { 26 + struct dw_spi_mmio *dwsmmio; 27 + struct dw_spi *dws; 28 + struct resource *mem, *ioarea; 29 + int ret; 30 + 31 + dwsmmio = kzalloc(sizeof(struct dw_spi_mmio), GFP_KERNEL); 32 + if (!dwsmmio) { 33 + ret = -ENOMEM; 34 + goto err_end; 35 + } 36 + 37 + dws = &dwsmmio->dws; 38 + 39 + /* Get basic io resource and map it */ 40 + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 41 + if (!mem) { 42 + dev_err(&pdev->dev, "no mem resource?\n"); 43 + ret = -EINVAL; 44 + goto err_kfree; 45 + } 46 + 47 + ioarea = request_mem_region(mem->start, resource_size(mem), 48 + pdev->name); 49 + if (!ioarea) { 50 + dev_err(&pdev->dev, "SPI region already claimed\n"); 51 + ret = -EBUSY; 52 + goto err_kfree; 53 + } 54 + 55 + dws->regs = ioremap_nocache(mem->start, resource_size(mem)); 56 + if (!dws->regs) { 57 + dev_err(&pdev->dev, "SPI region already mapped\n"); 58 + ret = -ENOMEM; 59 + goto err_release_reg; 60 + } 61 + 62 + dws->irq = platform_get_irq(pdev, 0); 63 + if (dws->irq < 0) { 64 + dev_err(&pdev->dev, "no irq resource?\n"); 65 + ret = dws->irq; /* -ENXIO */ 66 + goto err_unmap; 67 + } 68 + 69 + dwsmmio->clk = clk_get(&pdev->dev, NULL); 70 + if (!dwsmmio->clk) { 71 + ret = -ENODEV; 72 + goto err_irq; 73 + } 74 + clk_enable(dwsmmio->clk); 75 + 76 + dws->parent_dev = &pdev->dev; 77 + dws->bus_num = 0; 78 + dws->num_cs = 4; 79 + dws->max_freq = clk_get_rate(dwsmmio->clk); 80 + 81 + ret = dw_spi_add_host(dws); 82 + if (ret) 83 + goto err_clk; 84 + 85 + platform_set_drvdata(pdev, dwsmmio); 86 + return 0; 87 + 88 + err_clk: 89 + clk_disable(dwsmmio->clk); 90 + clk_put(dwsmmio->clk); 91 + dwsmmio->clk = NULL; 92 + err_irq: 93 + free_irq(dws->irq, dws); 94 + err_unmap: 95 + iounmap(dws->regs); 96 + err_release_reg: 97 + release_mem_region(mem->start, resource_size(mem)); 98 + err_kfree: 99 + kfree(dwsmmio); 100 + err_end: 101 + return ret; 102 + } 103 + 104 + static int __devexit dw_spi_mmio_remove(struct platform_device *pdev) 105 + { 106 + struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev); 107 + struct resource *mem; 108 + 109 + platform_set_drvdata(pdev, NULL); 110 + 111 + clk_disable(dwsmmio->clk); 112 + clk_put(dwsmmio->clk); 113 + dwsmmio->clk = NULL; 114 + 115 + free_irq(dwsmmio->dws.irq, &dwsmmio->dws); 116 + dw_spi_remove_host(&dwsmmio->dws); 117 + iounmap(dwsmmio->dws.regs); 118 + kfree(dwsmmio); 119 + 120 + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 121 + release_mem_region(mem->start, resource_size(mem)); 122 + return 0; 123 + } 124 + 125 + static struct platform_driver dw_spi_mmio_driver = { 126 + .remove = __devexit_p(dw_spi_mmio_remove), 127 + .driver = { 128 + .name = DRIVER_NAME, 129 + .owner = THIS_MODULE, 130 + }, 131 + }; 132 + 133 + static int __init dw_spi_mmio_init(void) 134 + { 135 + return platform_driver_probe(&dw_spi_mmio_driver, dw_spi_mmio_probe); 136 + } 137 + module_init(dw_spi_mmio_init); 138 + 139 + static void __exit dw_spi_mmio_exit(void) 140 + { 141 + platform_driver_unregister(&dw_spi_mmio_driver); 142 + } 143 + module_exit(dw_spi_mmio_exit); 144 + 145 + MODULE_AUTHOR("Jean-Hugues Deschenes <jean-hugues.deschenes@octasic.com>"); 146 + MODULE_DESCRIPTION("Memory-mapped I/O interface driver for DW SPI Core"); 147 + MODULE_LICENSE("GPL v2");
+2
drivers/spi/dw_spi_pci.c
··· 73 73 dws->num_cs = 4; 74 74 dws->max_freq = 25000000; /* for Moorestwon */ 75 75 dws->irq = pdev->irq; 76 + dws->fifo_len = 40; /* FIFO has 40 words buffer */ 76 77 77 78 ret = dw_spi_add_host(dws); 78 79 if (ret) ··· 99 98 struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); 100 99 101 100 pci_set_drvdata(pdev, NULL); 101 + dw_spi_remove_host(&dwpci->dws); 102 102 iounmap(dwpci->dws.regs); 103 103 pci_release_region(pdev, 0); 104 104 kfree(dwpci);
+1 -1
drivers/spi/mpc52xx_psc_spi.c
··· 503 503 return mpc52xx_psc_spi_do_remove(&op->dev); 504 504 } 505 505 506 - static struct of_device_id mpc52xx_psc_spi_of_match[] = { 506 + static const struct of_device_id mpc52xx_psc_spi_of_match[] = { 507 507 { .compatible = "fsl,mpc5200-psc-spi", }, 508 508 { .compatible = "mpc5200-psc-spi", }, /* old */ 509 509 {}
+1 -1
drivers/spi/mpc52xx_spi.c
··· 550 550 return 0; 551 551 } 552 552 553 - static struct of_device_id mpc52xx_spi_match[] __devinitdata = { 553 + static const struct of_device_id mpc52xx_spi_match[] __devinitconst = { 554 554 { .compatible = "fsl,mpc5200-spi", }, 555 555 {} 556 556 };
+1 -1
drivers/spi/spi_imx.c
··· 469 469 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 470 470 int gpio = spi_imx->chipselect[spi->chip_select]; 471 471 472 - pr_debug("%s: mode %d, %u bpw, %d hz\n", __func__, 472 + dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__, 473 473 spi->mode, spi->bits_per_word, spi->max_speed_hz); 474 474 475 475 if (gpio >= 0)
+4 -4
drivers/spi/spi_mpc8xxx.c
··· 365 365 366 366 if ((mpc8xxx_spi->spibrg / hz) > 64) { 367 367 cs->hw_mode |= SPMODE_DIV16; 368 - pm = mpc8xxx_spi->spibrg / (hz * 64); 368 + pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1; 369 369 370 370 WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " 371 371 "Will use %d Hz instead.\n", dev_name(&spi->dev), ··· 373 373 if (pm > 16) 374 374 pm = 16; 375 375 } else 376 - pm = mpc8xxx_spi->spibrg / (hz * 4); 376 + pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; 377 377 if (pm) 378 378 pm--; 379 379 ··· 1328 1328 static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev) 1329 1329 { 1330 1330 struct resource *mem; 1331 - unsigned int irq; 1331 + int irq; 1332 1332 struct spi_master *master; 1333 1333 1334 1334 if (!pdev->dev.platform_data) ··· 1339 1339 return -EINVAL; 1340 1340 1341 1341 irq = platform_get_irq(pdev, 0); 1342 - if (!irq) 1342 + if (irq <= 0) 1343 1343 return -EINVAL; 1344 1344 1345 1345 master = mpc8xxx_spi_probe(&pdev->dev, mem, irq);
+1 -1
drivers/spi/spi_ppc4xx.c
··· 578 578 return 0; 579 579 } 580 580 581 - static struct of_device_id spi_ppc4xx_of_match[] = { 581 + static const struct of_device_id spi_ppc4xx_of_match[] = { 582 582 { .compatible = "ibm,ppc4xx-spi", }, 583 583 {}, 584 584 };
+38 -51
drivers/spi/spi_s3c64xx.c
··· 28 28 #include <linux/spi/spi.h> 29 29 30 30 #include <mach/dma.h> 31 - #include <plat/spi.h> 31 + #include <plat/s3c64xx-spi.h> 32 32 33 33 /* Registers and bit-fields */ 34 34 ··· 137 137 /** 138 138 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. 139 139 * @clk: Pointer to the spi clock. 140 + * @src_clk: Pointer to the clock used to generate SPI signals. 140 141 * @master: Pointer to the SPI Protocol master. 141 142 * @workqueue: Work queue for the SPI xfer requests. 142 143 * @cntrlr_info: Platform specific data for the controller this driver manages. ··· 158 157 struct s3c64xx_spi_driver_data { 159 158 void __iomem *regs; 160 159 struct clk *clk; 160 + struct clk *src_clk; 161 161 struct platform_device *pdev; 162 162 struct spi_master *master; 163 163 struct workqueue_struct *workqueue; 164 - struct s3c64xx_spi_cntrlr_info *cntrlr_info; 164 + struct s3c64xx_spi_info *cntrlr_info; 165 165 struct spi_device *tgl_spi; 166 166 struct work_struct work; 167 167 struct list_head queue; ··· 182 180 183 181 static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) 184 182 { 185 - struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; 183 + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; 186 184 void __iomem *regs = sdd->regs; 187 185 unsigned long loops; 188 186 u32 val; ··· 227 225 struct spi_device *spi, 228 226 struct spi_transfer *xfer, int dma_mode) 229 227 { 230 - struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; 228 + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; 231 229 void __iomem *regs = sdd->regs; 232 230 u32 modecfg, chcfg; 233 231 ··· 300 298 if (sdd->tgl_spi != spi) { /* if last mssg on diff device */ 301 299 /* Deselect the last toggled device */ 302 300 cs = sdd->tgl_spi->controller_data; 303 - cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1); 301 + cs->set_level(cs->line, 302 + spi->mode & SPI_CS_HIGH ? 0 : 1); 304 303 } 305 304 sdd->tgl_spi = NULL; 306 305 } 307 306 308 307 cs = spi->controller_data; 309 - cs->set_level(spi->mode & SPI_CS_HIGH ? 1 : 0); 308 + cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0); 310 309 } 311 310 312 311 static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, 313 312 struct spi_transfer *xfer, int dma_mode) 314 313 { 315 - struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; 314 + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; 316 315 void __iomem *regs = sdd->regs; 317 316 unsigned long val; 318 317 int ms; ··· 387 384 if (sdd->tgl_spi == spi) 388 385 sdd->tgl_spi = NULL; 389 386 390 - cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1); 387 + cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1); 391 388 } 392 389 393 390 static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) 394 391 { 395 - struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; 396 392 void __iomem *regs = sdd->regs; 397 393 u32 val; 398 394 ··· 437 435 /* Configure Clock */ 438 436 val = readl(regs + S3C64XX_SPI_CLK_CFG); 439 437 val &= ~S3C64XX_SPI_PSR_MASK; 440 - val |= ((clk_get_rate(sci->src_clk) / sdd->cur_speed / 2 - 1) 438 + val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1) 441 439 & S3C64XX_SPI_PSR_MASK); 442 440 writel(val, regs + S3C64XX_SPI_CLK_CFG); 443 441 ··· 560 558 static void handle_msg(struct s3c64xx_spi_driver_data *sdd, 561 559 struct spi_message *msg) 562 560 { 563 - struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; 561 + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; 564 562 struct spi_device *spi = msg->spi; 565 563 struct s3c64xx_spi_csinfo *cs = spi->controller_data; 566 564 struct spi_transfer *xfer; ··· 634 632 S3C64XX_SPI_DEACT(sdd); 635 633 636 634 if (status) { 637 - dev_err(&spi->dev, "I/O Error: \ 638 - rx-%d tx-%d res:rx-%c tx-%c len-%d\n", 635 + dev_err(&spi->dev, "I/O Error: " 636 + "rx-%d tx-%d res:rx-%c tx-%c len-%d\n", 639 637 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, 640 638 (sdd->state & RXBUSY) ? 'f' : 'p', 641 639 (sdd->state & TXBUSY) ? 'f' : 'p', ··· 788 786 { 789 787 struct s3c64xx_spi_csinfo *cs = spi->controller_data; 790 788 struct s3c64xx_spi_driver_data *sdd; 791 - struct s3c64xx_spi_cntrlr_info *sci; 789 + struct s3c64xx_spi_info *sci; 792 790 struct spi_message *msg; 793 791 u32 psr, speed; 794 792 unsigned long flags; ··· 833 831 } 834 832 835 833 /* Check if we can provide the requested rate */ 836 - speed = clk_get_rate(sci->src_clk) / 2 / (0 + 1); /* Max possible */ 834 + speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); /* Max possible */ 837 835 838 836 if (spi->max_speed_hz > speed) 839 837 spi->max_speed_hz = speed; 840 838 841 - psr = clk_get_rate(sci->src_clk) / 2 / spi->max_speed_hz - 1; 839 + psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1; 842 840 psr &= S3C64XX_SPI_PSR_MASK; 843 841 if (psr == S3C64XX_SPI_PSR_MASK) 844 842 psr--; 845 843 846 - speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1); 844 + speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); 847 845 if (spi->max_speed_hz < speed) { 848 846 if (psr+1 < S3C64XX_SPI_PSR_MASK) { 849 847 psr++; ··· 853 851 } 854 852 } 855 853 856 - speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1); 854 + speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); 857 855 if (spi->max_speed_hz >= speed) 858 856 spi->max_speed_hz = speed; 859 857 else ··· 869 867 870 868 static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) 871 869 { 872 - struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; 870 + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; 873 871 void __iomem *regs = sdd->regs; 874 872 unsigned int val; 875 873 ··· 904 902 { 905 903 struct resource *mem_res, *dmatx_res, *dmarx_res; 906 904 struct s3c64xx_spi_driver_data *sdd; 907 - struct s3c64xx_spi_cntrlr_info *sci; 905 + struct s3c64xx_spi_info *sci; 908 906 struct spi_master *master; 909 907 int ret; 910 908 ··· 1002 1000 goto err4; 1003 1001 } 1004 1002 1005 - if (sci->src_clk_nr == S3C64XX_SPI_SRCCLK_PCLK) 1006 - sci->src_clk = sdd->clk; 1007 - else 1008 - sci->src_clk = clk_get(&pdev->dev, sci->src_clk_name); 1009 - if (IS_ERR(sci->src_clk)) { 1003 + sdd->src_clk = clk_get(&pdev->dev, sci->src_clk_name); 1004 + if (IS_ERR(sdd->src_clk)) { 1010 1005 dev_err(&pdev->dev, 1011 1006 "Unable to acquire clock '%s'\n", sci->src_clk_name); 1012 - ret = PTR_ERR(sci->src_clk); 1007 + ret = PTR_ERR(sdd->src_clk); 1013 1008 goto err5; 1014 1009 } 1015 1010 1016 - if (sci->src_clk != sdd->clk && clk_enable(sci->src_clk)) { 1011 + if (clk_enable(sdd->src_clk)) { 1017 1012 dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", 1018 1013 sci->src_clk_name); 1019 1014 ret = -EBUSY; ··· 1039 1040 goto err8; 1040 1041 } 1041 1042 1042 - dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d \ 1043 - with %d Slaves attached\n", 1043 + dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d " 1044 + "with %d Slaves attached\n", 1044 1045 pdev->id, master->num_chipselect); 1045 - dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\ 1046 - \tDMA=[Rx-%d, Tx-%d]\n", 1046 + dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n", 1047 1047 mem_res->end, mem_res->start, 1048 1048 sdd->rx_dmach, sdd->tx_dmach); 1049 1049 ··· 1051 1053 err8: 1052 1054 destroy_workqueue(sdd->workqueue); 1053 1055 err7: 1054 - if (sci->src_clk != sdd->clk) 1055 - clk_disable(sci->src_clk); 1056 + clk_disable(sdd->src_clk); 1056 1057 err6: 1057 - if (sci->src_clk != sdd->clk) 1058 - clk_put(sci->src_clk); 1058 + clk_put(sdd->src_clk); 1059 1059 err5: 1060 1060 clk_disable(sdd->clk); 1061 1061 err4: ··· 1074 1078 { 1075 1079 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); 1076 1080 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1077 - struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; 1078 1081 struct resource *mem_res; 1079 1082 unsigned long flags; 1080 1083 ··· 1088 1093 1089 1094 destroy_workqueue(sdd->workqueue); 1090 1095 1091 - if (sci->src_clk != sdd->clk) 1092 - clk_disable(sci->src_clk); 1093 - 1094 - if (sci->src_clk != sdd->clk) 1095 - clk_put(sci->src_clk); 1096 + clk_disable(sdd->src_clk); 1097 + clk_put(sdd->src_clk); 1096 1098 1097 1099 clk_disable(sdd->clk); 1098 1100 clk_put(sdd->clk); ··· 1097 1105 iounmap((void *) sdd->regs); 1098 1106 1099 1107 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1100 - release_mem_region(mem_res->start, resource_size(mem_res)); 1108 + if (mem_res != NULL) 1109 + release_mem_region(mem_res->start, resource_size(mem_res)); 1101 1110 1102 1111 platform_set_drvdata(pdev, NULL); 1103 1112 spi_master_put(master); ··· 1111 1118 { 1112 1119 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); 1113 1120 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1114 - struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; 1115 - struct s3c64xx_spi_csinfo *cs; 1116 1121 unsigned long flags; 1117 1122 1118 1123 spin_lock_irqsave(&sdd->lock, flags); ··· 1121 1130 msleep(10); 1122 1131 1123 1132 /* Disable the clock */ 1124 - if (sci->src_clk != sdd->clk) 1125 - clk_disable(sci->src_clk); 1126 - 1133 + clk_disable(sdd->src_clk); 1127 1134 clk_disable(sdd->clk); 1128 1135 1129 1136 sdd->cur_speed = 0; /* Output Clock is stopped */ ··· 1133 1144 { 1134 1145 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); 1135 1146 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1136 - struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; 1147 + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; 1137 1148 unsigned long flags; 1138 1149 1139 1150 sci->cfg_gpio(pdev); 1140 1151 1141 1152 /* Enable the clock */ 1142 - if (sci->src_clk != sdd->clk) 1143 - clk_enable(sci->src_clk); 1144 - 1153 + clk_enable(sdd->src_clk); 1145 1154 clk_enable(sdd->clk); 1146 1155 1147 1156 s3c64xx_spi_hwinit(sdd, pdev->id);
+1 -1
drivers/spi/spi_sh_msiof.c
··· 20 20 #include <linux/bitmap.h> 21 21 #include <linux/clk.h> 22 22 #include <linux/io.h> 23 + #include <linux/err.h> 23 24 24 25 #include <linux/spi/spi.h> 25 26 #include <linux/spi/spi_bitbang.h> 26 27 #include <linux/spi/sh_msiof.h> 27 28 28 - #include <asm/spi.h> 29 29 #include <asm/unaligned.h> 30 30 31 31 struct sh_msiof_spi_priv {
+1 -1
drivers/spi/spi_stmp.c
··· 76 76 break; \ 77 77 } \ 78 78 cpu_relax(); \ 79 - } while (time_before(end_jiffies, jiffies)); \ 79 + } while (time_before(jiffies, end_jiffies)); \ 80 80 succeeded; \ 81 81 }) 82 82
+24 -4
drivers/spi/xilinx_spi.c
··· 93 93 void (*rx_fn) (struct xilinx_spi *); 94 94 }; 95 95 96 + static void xspi_write32(u32 val, void __iomem *addr) 97 + { 98 + iowrite32(val, addr); 99 + } 100 + 101 + static unsigned int xspi_read32(void __iomem *addr) 102 + { 103 + return ioread32(addr); 104 + } 105 + 106 + static void xspi_write32_be(u32 val, void __iomem *addr) 107 + { 108 + iowrite32be(val, addr); 109 + } 110 + 111 + static unsigned int xspi_read32_be(void __iomem *addr) 112 + { 113 + return ioread32be(addr); 114 + } 115 + 96 116 static void xspi_tx8(struct xilinx_spi *xspi) 97 117 { 98 118 xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET); ··· 394 374 xspi->mem = *mem; 395 375 xspi->irq = irq; 396 376 if (pdata->little_endian) { 397 - xspi->read_fn = ioread32; 398 - xspi->write_fn = iowrite32; 377 + xspi->read_fn = xspi_read32; 378 + xspi->write_fn = xspi_write32; 399 379 } else { 400 - xspi->read_fn = ioread32be; 401 - xspi->write_fn = iowrite32be; 380 + xspi->read_fn = xspi_read32_be; 381 + xspi->write_fn = xspi_write32_be; 402 382 } 403 383 xspi->bits_per_word = pdata->bits_per_word; 404 384 if (xspi->bits_per_word == 8) {
+1 -1
drivers/spi/xilinx_spi_of.c
··· 99 99 return xilinx_spi_remove(op); 100 100 } 101 101 102 - static struct of_device_id xilinx_spi_of_match[] = { 102 + static const struct of_device_id xilinx_spi_of_match[] = { 103 103 { .compatible = "xlnx,xps-spi-2.00.a", }, 104 104 { .compatible = "xlnx,xps-spi-2.00.b", }, 105 105 {}
+1 -2
drivers/ssb/main.c
··· 494 494 #endif 495 495 break; 496 496 case SSB_BUSTYPE_SDIO: 497 - #ifdef CONFIG_SSB_SDIO 498 - sdev->irq = bus->host_sdio->dev.irq; 497 + #ifdef CONFIG_SSB_SDIOHOST 499 498 dev->parent = &bus->host_sdio->dev; 500 499 #endif 501 500 break;
+30 -18
drivers/usb/core/devio.c
··· 1312 1312 void __user *addr = as->userurb; 1313 1313 unsigned int i; 1314 1314 1315 - if (as->userbuffer) 1315 + if (as->userbuffer && urb->actual_length) 1316 1316 if (copy_to_user(as->userbuffer, urb->transfer_buffer, 1317 - urb->transfer_buffer_length)) 1317 + urb->actual_length)) 1318 1318 goto err_out; 1319 1319 if (put_user(as->status, &userurb->status)) 1320 1320 goto err_out; ··· 1334 1334 } 1335 1335 } 1336 1336 1337 - free_async(as); 1338 - 1339 1337 if (put_user(addr, (void __user * __user *)arg)) 1340 1338 return -EFAULT; 1341 1339 return 0; 1342 1340 1343 1341 err_out: 1344 - free_async(as); 1345 1342 return -EFAULT; 1346 1343 } 1347 1344 ··· 1368 1371 static int proc_reapurb(struct dev_state *ps, void __user *arg) 1369 1372 { 1370 1373 struct async *as = reap_as(ps); 1371 - if (as) 1372 - return processcompl(as, (void __user * __user *)arg); 1374 + if (as) { 1375 + int retval = processcompl(as, (void __user * __user *)arg); 1376 + free_async(as); 1377 + return retval; 1378 + } 1373 1379 if (signal_pending(current)) 1374 1380 return -EINTR; 1375 1381 return -EIO; ··· 1380 1380 1381 1381 static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg) 1382 1382 { 1383 + int retval; 1383 1384 struct async *as; 1384 1385 1385 - if (!(as = async_getcompleted(ps))) 1386 - return -EAGAIN; 1387 - return processcompl(as, (void __user * __user *)arg); 1386 + as = async_getcompleted(ps); 1387 + retval = -EAGAIN; 1388 + if (as) { 1389 + retval = processcompl(as, (void __user * __user *)arg); 1390 + free_async(as); 1391 + } 1392 + return retval; 1388 1393 } 1389 1394 1390 1395 #ifdef CONFIG_COMPAT ··· 1480 1475 void __user *addr = as->userurb; 1481 1476 unsigned int i; 1482 1477 1483 - if (as->userbuffer) 1478 + if (as->userbuffer && urb->actual_length) 1484 1479 if (copy_to_user(as->userbuffer, urb->transfer_buffer, 1485 - urb->transfer_buffer_length)) 1480 + urb->actual_length)) 1486 1481 return -EFAULT; 1487 1482 if (put_user(as->status, &userurb->status)) 1488 1483 return -EFAULT; ··· 1502 1497 } 1503 1498 } 1504 1499 1505 - free_async(as); 1506 1500 if (put_user(ptr_to_compat(addr), (u32 __user *)arg)) 1507 1501 return -EFAULT; 1508 1502 return 0; ··· 1510 1506 static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) 1511 1507 { 1512 1508 struct async *as = reap_as(ps); 1513 - if (as) 1514 - return processcompl_compat(as, (void __user * __user *)arg); 1509 + if (as) { 1510 + int retval = processcompl_compat(as, (void __user * __user *)arg); 1511 + free_async(as); 1512 + return retval; 1513 + } 1515 1514 if (signal_pending(current)) 1516 1515 return -EINTR; 1517 1516 return -EIO; ··· 1522 1515 1523 1516 static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg) 1524 1517 { 1518 + int retval; 1525 1519 struct async *as; 1526 1520 1527 - if (!(as = async_getcompleted(ps))) 1528 - return -EAGAIN; 1529 - return processcompl_compat(as, (void __user * __user *)arg); 1521 + retval = -EAGAIN; 1522 + as = async_getcompleted(ps); 1523 + if (as) { 1524 + retval = processcompl_compat(as, (void __user * __user *)arg); 1525 + free_async(as); 1526 + } 1527 + return retval; 1530 1528 } 1531 1529 1532 1530
+1 -2
drivers/usb/gadget/f_eem.c
··· 358 358 * b15: bmType (0 == data) 359 359 */ 360 360 len = skb->len; 361 - put_unaligned_le16((len & 0x3FFF) | BIT(14), skb_push(skb, 2)); 361 + put_unaligned_le16(len & 0x3FFF, skb_push(skb, 2)); 362 362 363 363 /* add a zero-length EEM packet, if needed */ 364 364 if (padlen) ··· 464 464 } 465 465 466 466 /* validate CRC */ 467 - crc = get_unaligned_le32(skb->data + len - ETH_FCS_LEN); 468 467 if (header & BIT(14)) { 469 468 crc = get_unaligned_le32(skb->data + len 470 469 - ETH_FCS_LEN);
+1 -1
drivers/usb/gadget/multi.c
··· 29 29 #if defined USB_ETH_RNDIS 30 30 # undef USB_ETH_RNDIS 31 31 #endif 32 - #ifdef CONFIG_USB_ETH_RNDIS 32 + #ifdef CONFIG_USB_G_MULTI_RNDIS 33 33 # define USB_ETH_RNDIS y 34 34 #endif 35 35
+1
drivers/usb/gadget/r8a66597-udc.c
··· 26 26 #include <linux/io.h> 27 27 #include <linux/platform_device.h> 28 28 #include <linux/clk.h> 29 + #include <linux/err.h> 29 30 30 31 #include <linux/usb/ch9.h> 31 32 #include <linux/usb/gadget.h>
+1
drivers/usb/gadget/s3c-hsotg.c
··· 2582 2582 hsotg->gadget.dev.driver = NULL; 2583 2583 return ret; 2584 2584 } 2585 + EXPORT_SYMBOL(usb_gadget_register_driver); 2585 2586 2586 2587 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 2587 2588 {
+8 -5
drivers/usb/host/ehci-hub.c
··· 196 196 if (hostpc_reg) { 197 197 u32 t3; 198 198 199 + spin_unlock_irq(&ehci->lock); 199 200 msleep(5);/* 5ms for HCD enter low pwr mode */ 201 + spin_lock_irq(&ehci->lock); 200 202 t3 = ehci_readl(ehci, hostpc_reg); 201 203 ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); 202 204 t3 = ehci_readl(ehci, hostpc_reg); ··· 906 904 if ((temp & PORT_PE) == 0 907 905 || (temp & PORT_RESET) != 0) 908 906 goto error; 909 - ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); 907 + 910 908 /* After above check the port must be connected. 911 909 * Set appropriate bit thus could put phy into low power 912 910 * mode if we have hostpc feature 913 911 */ 912 + temp &= ~PORT_WKCONN_E; 913 + temp |= PORT_WKDISC_E | PORT_WKOC_E; 914 + ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); 914 915 if (hostpc_reg) { 915 - temp &= ~PORT_WKCONN_E; 916 - temp |= (PORT_WKDISC_E | PORT_WKOC_E); 917 - ehci_writel(ehci, temp | PORT_SUSPEND, 918 - status_reg); 916 + spin_unlock_irqrestore(&ehci->lock, flags); 919 917 msleep(5);/* 5ms for HCD enter low pwr mode */ 918 + spin_lock_irqsave(&ehci->lock, flags); 920 919 temp1 = ehci_readl(ehci, hostpc_reg); 921 920 ehci_writel(ehci, temp1 | HOSTPC_PHCD, 922 921 hostpc_reg);
+3 -3
drivers/usb/host/fhci-tds.c
··· 105 105 if (ep->td_base) 106 106 cpm_muram_free(cpm_muram_offset(ep->td_base)); 107 107 108 - if (ep->conf_frame_Q) { 108 + if (kfifo_initialized(&ep->conf_frame_Q)) { 109 109 size = cq_howmany(&ep->conf_frame_Q); 110 110 for (; size; size--) { 111 111 struct packet *pkt = cq_get(&ep->conf_frame_Q); ··· 115 115 cq_delete(&ep->conf_frame_Q); 116 116 } 117 117 118 - if (ep->empty_frame_Q) { 118 + if (kfifo_initialized(&ep->empty_frame_Q)) { 119 119 size = cq_howmany(&ep->empty_frame_Q); 120 120 for (; size; size--) { 121 121 struct packet *pkt = cq_get(&ep->empty_frame_Q); ··· 125 125 cq_delete(&ep->empty_frame_Q); 126 126 } 127 127 128 - if (ep->dummy_packets_Q) { 128 + if (kfifo_initialized(&ep->dummy_packets_Q)) { 129 129 size = cq_howmany(&ep->dummy_packets_Q); 130 130 for (; size; size--) { 131 131 u8 *buff = cq_get(&ep->dummy_packets_Q);
+1
drivers/usb/misc/sisusbvga/sisusb.c
··· 3245 3245 { USB_DEVICE(0x0711, 0x0902) }, 3246 3246 { USB_DEVICE(0x0711, 0x0903) }, 3247 3247 { USB_DEVICE(0x0711, 0x0918) }, 3248 + { USB_DEVICE(0x0711, 0x0920) }, 3248 3249 { USB_DEVICE(0x182d, 0x021c) }, 3249 3250 { USB_DEVICE(0x182d, 0x0269) }, 3250 3251 { }
+1
drivers/usb/otg/Kconfig
··· 44 44 config USB_ULPI 45 45 bool "Generic ULPI Transceiver Driver" 46 46 depends on ARM 47 + select USB_OTG_UTILS 47 48 help 48 49 Enable this to support ULPI connected USB OTG transceivers which 49 50 are likely found on embedded boards.
+22 -3
drivers/usb/serial/ftdi_sio.c
··· 50 50 * Version Information 51 51 */ 52 52 #define DRIVER_VERSION "v1.5.0" 53 - #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>" 53 + #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr" 54 54 #define DRIVER_DESC "USB FTDI Serial Converters Driver" 55 55 56 56 static int debug; ··· 145 145 146 146 147 147 148 + /* 149 + * Device ID not listed? Test via module params product/vendor or 150 + * /sys/bus/usb/ftdi_sio/new_id, then send patch/report! 151 + */ 148 152 static struct usb_device_id id_table_combined [] = { 149 153 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, 150 154 { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, 151 155 { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, 156 + { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) }, 152 157 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, 153 158 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, 154 159 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, ··· 557 552 { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) }, 558 553 { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) }, 559 554 /* 560 - * Due to many user requests for multiple ELV devices we enable 561 - * them by default. 555 + * ELV devices: 562 556 */ 557 + { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) }, 558 + { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) }, 559 + { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) }, 560 + { USB_DEVICE(FTDI_VID, FTDI_ELV_WS550_PID) }, 561 + { USB_DEVICE(FTDI_VID, FTDI_ELV_EC3000_PID) }, 562 + { USB_DEVICE(FTDI_VID, FTDI_ELV_WS888_PID) }, 563 + { USB_DEVICE(FTDI_VID, FTDI_ELV_TWS550_PID) }, 564 + { USB_DEVICE(FTDI_VID, FTDI_ELV_FEM_PID) }, 563 565 { USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) }, 564 566 { USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) }, 565 567 { USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) }, ··· 583 571 { USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) }, 584 572 { USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) }, 585 573 { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, 574 + { USB_DEVICE(FTDI_VID, FTDI_ELV_UTP8_PID) }, 586 575 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, 576 + { USB_DEVICE(FTDI_VID, FTDI_ELV_WS444PC_PID) }, 587 577 { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, 588 578 { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, 589 579 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, 590 580 { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) }, 581 + { USB_DEVICE(FTDI_VID, FTDI_ELV_UMS100_PID) }, 582 + { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) }, 583 + { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) }, 584 + { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) }, 591 585 { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, 592 586 { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, 593 587 { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, ··· 715 697 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, 716 698 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, 717 699 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) }, 700 + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) }, 718 701 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) }, 719 702 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, 720 703 { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) },
+18
drivers/usb/serial/ftdi_sio_ids.h
··· 38 38 /* www.candapter.com Ewert Energy Systems CANdapter device */ 39 39 #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */ 40 40 41 + #define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */ 42 + 41 43 /* OOCDlink by Joern Kaipf <joernk@web.de> 42 44 * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ 43 45 #define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ ··· 163 161 /* 164 162 * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). 165 163 * All of these devices use FTDI's vendor ID (0x0403). 164 + * Further IDs taken from ELV Windows .inf file. 166 165 * 167 166 * The previously included PID for the UO 100 module was incorrect. 168 167 * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58). 169 168 * 170 169 * Armin Laeuger originally sent the PID for the UM 100 module. 171 170 */ 171 + #define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */ 172 + #define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */ 173 + #define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */ 174 + #define FTDI_ELV_WS550_PID 0xE004 /* WS 550 */ 175 + #define FTDI_ELV_EC3000_PID 0xE006 /* ENERGY CONTROL 3000 USB */ 176 + #define FTDI_ELV_WS888_PID 0xE008 /* WS 888 */ 177 + #define FTDI_ELV_TWS550_PID 0xE009 /* Technoline WS 550 */ 178 + #define FTDI_ELV_FEM_PID 0xE00A /* Funk Energie Monitor */ 172 179 #define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */ 173 180 #define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */ 174 181 #define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */ 182 + #define FTDI_ELV_UMS100_PID 0xE0EB /* ELV USB Master-Slave Schaltsteckdose UMS 100 */ 183 + #define FTDI_ELV_TFD128_PID 0xE0EC /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */ 184 + #define FTDI_ELV_FM3RX_PID 0xE0ED /* ELV Messwertuebertragung FM3 RX */ 185 + #define FTDI_ELV_WS777_PID 0xE0EE /* Conrad WS 777 */ 175 186 #define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */ 176 187 #define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */ 177 188 #define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */ 178 189 #define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */ 179 190 #define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */ 180 191 #define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */ 192 + #define FTDI_ELV_UTP8_PID 0xE0F5 /* ELV UTP 8 */ 181 193 #define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */ 194 + #define FTDI_ELV_WS444PC_PID 0xE0F7 /* Conrad WS 444 PC */ 182 195 #define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */ 183 196 #define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */ 184 197 #define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */ ··· 985 968 #define PAPOUCH_VID 0x5050 /* Vendor ID */ 986 969 #define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */ 987 970 #define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */ 971 + #define PAPOUCH_AD4USB_PID 0x8003 /* AD4USB Measurement Module */ 988 972 989 973 /* 990 974 * Marvell SheevaPlug
+1
drivers/usb/serial/sierra.c
··· 298 298 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ 299 299 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 300 300 }, 301 + { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */ 301 302 302 303 { } 303 304 };
+1 -1
drivers/usb/storage/unusual_devs.h
··· 941 941 UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133, 942 942 "Microtech", 943 943 "USB-SCSI-DB25", 944 - US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init, 944 + US_SC_DEVICE, US_PR_DEVICE, usb_stor_euscsi_init, 945 945 US_FL_SCM_MULT_TARG ), 946 946 947 947 UNUSUAL_DEV( 0x07af, 0x0005, 0x0100, 0x0100,
+7 -7
drivers/video/aty/aty128fb.c
··· 1931 1931 * PowerMac2,2 summer 2000 iMacs 1932 1932 * PowerMac4,1 january 2001 iMacs "flower power" 1933 1933 */ 1934 - if (machine_is_compatible("PowerMac2,1") || 1935 - machine_is_compatible("PowerMac2,2") || 1936 - machine_is_compatible("PowerMac4,1")) 1934 + if (of_machine_is_compatible("PowerMac2,1") || 1935 + of_machine_is_compatible("PowerMac2,2") || 1936 + of_machine_is_compatible("PowerMac4,1")) 1937 1937 default_vmode = VMODE_1024_768_75; 1938 1938 1939 1939 /* iBook SE */ 1940 - if (machine_is_compatible("PowerBook2,2")) 1940 + if (of_machine_is_compatible("PowerBook2,2")) 1941 1941 default_vmode = VMODE_800_600_60; 1942 1942 1943 1943 /* PowerBook Firewire (Pismo), iBook Dual USB */ 1944 - if (machine_is_compatible("PowerBook3,1") || 1945 - machine_is_compatible("PowerBook4,1")) 1944 + if (of_machine_is_compatible("PowerBook3,1") || 1945 + of_machine_is_compatible("PowerBook4,1")) 1946 1946 default_vmode = VMODE_1024_768_60; 1947 1947 1948 1948 /* PowerBook Titanium */ 1949 - if (machine_is_compatible("PowerBook3,2")) 1949 + if (of_machine_is_compatible("PowerBook3,2")) 1950 1950 default_vmode = VMODE_1152_768_60; 1951 1951 1952 1952 if (default_cmode > 16)
+5 -5
drivers/video/aty/atyfb_base.c
··· 2439 2439 * The Apple iBook1 uses non-standard memory frequencies. 2440 2440 * We detect it and set the frequency manually. 2441 2441 */ 2442 - if (machine_is_compatible("PowerBook2,1")) { 2442 + if (of_machine_is_compatible("PowerBook2,1")) { 2443 2443 par->pll_limits.mclk = 70; 2444 2444 par->pll_limits.xclk = 53; 2445 2445 } ··· 2659 2659 FBINFO_HWACCEL_YPAN; 2660 2660 2661 2661 #ifdef CONFIG_PMAC_BACKLIGHT 2662 - if (M64_HAS(G3_PB_1_1) && machine_is_compatible("PowerBook1,1")) { 2662 + if (M64_HAS(G3_PB_1_1) && of_machine_is_compatible("PowerBook1,1")) { 2663 2663 /* 2664 2664 * these bits let the 101 powerbook 2665 2665 * wake up from sleep -- paulus ··· 2690 2690 if (M64_HAS(G3_PB_1024x768)) 2691 2691 /* G3 PowerBook with 1024x768 LCD */ 2692 2692 default_vmode = VMODE_1024_768_60; 2693 - else if (machine_is_compatible("iMac")) 2693 + else if (of_machine_is_compatible("iMac")) 2694 2694 default_vmode = VMODE_1024_768_75; 2695 - else if (machine_is_compatible("PowerBook2,1")) 2695 + else if (of_machine_is_compatible("PowerBook2,1")) 2696 2696 /* iBook with 800x600 LCD */ 2697 2697 default_vmode = VMODE_800_600_60; 2698 2698 else ··· 3104 3104 } 3105 3105 3106 3106 dp = pci_device_to_OF_node(pdev); 3107 - if (node == dp->node) { 3107 + if (node == dp->phandle) { 3108 3108 struct fb_var_screeninfo *var = &default_var; 3109 3109 unsigned int N, P, Q, M, T, R; 3110 3110 u32 v_total, h_total;
+3 -3
drivers/video/aty/radeon_backlight.c
··· 175 175 176 176 #ifdef CONFIG_PMAC_BACKLIGHT 177 177 pdata->negative = pdata->negative || 178 - machine_is_compatible("PowerBook4,3") || 179 - machine_is_compatible("PowerBook6,3") || 180 - machine_is_compatible("PowerBook6,5"); 178 + of_machine_is_compatible("PowerBook4,3") || 179 + of_machine_is_compatible("PowerBook6,3") || 180 + of_machine_is_compatible("PowerBook6,5"); 181 181 #endif 182 182 183 183 rinfo->info->bl_dev = bd;
+10 -1
drivers/video/efifb.c
··· 161 161 return 0; 162 162 } 163 163 164 + static void efifb_destroy(struct fb_info *info) 165 + { 166 + if (info->screen_base) 167 + iounmap(info->screen_base); 168 + release_mem_region(info->aperture_base, info->aperture_size); 169 + framebuffer_release(info); 170 + } 171 + 164 172 static struct fb_ops efifb_ops = { 165 173 .owner = THIS_MODULE, 174 + .fb_destroy = efifb_destroy, 166 175 .fb_setcolreg = efifb_setcolreg, 167 176 .fb_fillrect = cfb_fillrect, 168 177 .fb_copyarea = cfb_copyarea, ··· 290 281 info->par = NULL; 291 282 292 283 info->aperture_base = efifb_fix.smem_start; 293 - info->aperture_size = size_total; 284 + info->aperture_size = size_remap; 294 285 295 286 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); 296 287 if (!info->screen_base) {
+101 -8
drivers/virtio/virtio_balloon.c
··· 28 28 struct virtio_balloon 29 29 { 30 30 struct virtio_device *vdev; 31 - struct virtqueue *inflate_vq, *deflate_vq; 31 + struct virtqueue *inflate_vq, *deflate_vq, *stats_vq; 32 32 33 33 /* Where the ballooning thread waits for config to change. */ 34 34 wait_queue_head_t config_change; ··· 49 49 /* The array of pfns we tell the Host about. */ 50 50 unsigned int num_pfns; 51 51 u32 pfns[256]; 52 + 53 + /* Memory statistics */ 54 + int need_stats_update; 55 + struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR]; 52 56 }; 53 57 54 58 static struct virtio_device_id id_table[] = { ··· 158 154 } 159 155 } 160 156 157 + static inline void update_stat(struct virtio_balloon *vb, int idx, 158 + u16 tag, u64 val) 159 + { 160 + BUG_ON(idx >= VIRTIO_BALLOON_S_NR); 161 + vb->stats[idx].tag = tag; 162 + vb->stats[idx].val = val; 163 + } 164 + 165 + #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT) 166 + 167 + static void update_balloon_stats(struct virtio_balloon *vb) 168 + { 169 + unsigned long events[NR_VM_EVENT_ITEMS]; 170 + struct sysinfo i; 171 + int idx = 0; 172 + 173 + all_vm_events(events); 174 + si_meminfo(&i); 175 + 176 + update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN, 177 + pages_to_bytes(events[PSWPIN])); 178 + update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT, 179 + pages_to_bytes(events[PSWPOUT])); 180 + update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]); 181 + update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]); 182 + update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE, 183 + pages_to_bytes(i.freeram)); 184 + update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT, 185 + pages_to_bytes(i.totalram)); 186 + } 187 + 188 + /* 189 + * While most virtqueues communicate guest-initiated requests to the hypervisor, 190 + * the stats queue operates in reverse. The driver initializes the virtqueue 191 + * with a single buffer. From that point forward, all conversations consist of 192 + * a hypervisor request (a call to this function) which directs us to refill 193 + * the virtqueue with a fresh stats buffer. Since stats collection can sleep, 194 + * we notify our kthread which does the actual work via stats_handle_request(). 195 + */ 196 + static void stats_request(struct virtqueue *vq) 197 + { 198 + struct virtio_balloon *vb; 199 + unsigned int len; 200 + 201 + vb = vq->vq_ops->get_buf(vq, &len); 202 + if (!vb) 203 + return; 204 + vb->need_stats_update = 1; 205 + wake_up(&vb->config_change); 206 + } 207 + 208 + static void stats_handle_request(struct virtio_balloon *vb) 209 + { 210 + struct virtqueue *vq; 211 + struct scatterlist sg; 212 + 213 + vb->need_stats_update = 0; 214 + update_balloon_stats(vb); 215 + 216 + vq = vb->stats_vq; 217 + sg_init_one(&sg, vb->stats, sizeof(vb->stats)); 218 + if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0) 219 + BUG(); 220 + vq->vq_ops->kick(vq); 221 + } 222 + 161 223 static void virtballoon_changed(struct virtio_device *vdev) 162 224 { 163 225 struct virtio_balloon *vb = vdev->priv; ··· 260 190 try_to_freeze(); 261 191 wait_event_interruptible(vb->config_change, 262 192 (diff = towards_target(vb)) != 0 193 + || vb->need_stats_update 263 194 || kthread_should_stop() 264 195 || freezing(current)); 196 + if (vb->need_stats_update) 197 + stats_handle_request(vb); 265 198 if (diff > 0) 266 199 fill_balloon(vb, diff); 267 200 else if (diff < 0) ··· 277 204 static int virtballoon_probe(struct virtio_device *vdev) 278 205 { 279 206 struct virtio_balloon *vb; 280 - struct virtqueue *vqs[2]; 281 - vq_callback_t *callbacks[] = { balloon_ack, balloon_ack }; 282 - const char *names[] = { "inflate", "deflate" }; 283 - int err; 207 + struct virtqueue *vqs[3]; 208 + vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request }; 209 + const char *names[] = { "inflate", "deflate", "stats" }; 210 + int err, nvqs; 284 211 285 212 vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); 286 213 if (!vb) { ··· 292 219 vb->num_pages = 0; 293 220 init_waitqueue_head(&vb->config_change); 294 221 vb->vdev = vdev; 222 + vb->need_stats_update = 0; 295 223 296 - /* We expect two virtqueues. */ 297 - err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names); 224 + /* We expect two virtqueues: inflate and deflate, 225 + * and optionally stat. */ 226 + nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2; 227 + err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names); 298 228 if (err) 299 229 goto out_free_vb; 300 230 301 231 vb->inflate_vq = vqs[0]; 302 232 vb->deflate_vq = vqs[1]; 233 + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { 234 + struct scatterlist sg; 235 + vb->stats_vq = vqs[2]; 236 + 237 + /* 238 + * Prime this virtqueue with one buffer so the hypervisor can 239 + * use it to signal us later. 240 + */ 241 + sg_init_one(&sg, vb->stats, sizeof vb->stats); 242 + if (vb->stats_vq->vq_ops->add_buf(vb->stats_vq, 243 + &sg, 1, 0, vb) < 0) 244 + BUG(); 245 + vb->stats_vq->vq_ops->kick(vb->stats_vq); 246 + } 303 247 304 248 vb->thread = kthread_run(balloon, vb, "vballoon"); 305 249 if (IS_ERR(vb->thread)) { ··· 354 264 kfree(vb); 355 265 } 356 266 357 - static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST }; 267 + static unsigned int features[] = { 268 + VIRTIO_BALLOON_F_MUST_TELL_HOST, 269 + VIRTIO_BALLOON_F_STATS_VQ, 270 + }; 358 271 359 272 static struct virtio_driver virtio_balloon_driver = { 360 273 .feature_table = features,
+1 -1
drivers/virtio/virtio_pci.c
··· 702 702 .name = "virtio-pci", 703 703 .id_table = virtio_pci_id_table, 704 704 .probe = virtio_pci_probe, 705 - .remove = virtio_pci_remove, 705 + .remove = __devexit_p(virtio_pci_remove), 706 706 #ifdef CONFIG_PM 707 707 .suspend = virtio_pci_suspend, 708 708 .resume = virtio_pci_resume,
+52 -7
drivers/virtio/virtio_ring.c
··· 21 21 #include <linux/virtio_config.h> 22 22 #include <linux/device.h> 23 23 24 + /* virtio guest is communicating with a virtual "device" that actually runs on 25 + * a host processor. Memory barriers are used to control SMP effects. */ 26 + #ifdef CONFIG_SMP 27 + /* Where possible, use SMP barriers which are more lightweight than mandatory 28 + * barriers, because mandatory barriers control MMIO effects on accesses 29 + * through relaxed memory I/O windows (which virtio does not use). */ 30 + #define virtio_mb() smp_mb() 31 + #define virtio_rmb() smp_rmb() 32 + #define virtio_wmb() smp_wmb() 33 + #else 34 + /* We must force memory ordering even if guest is UP since host could be 35 + * running on another CPU, but SMP barriers are defined to barrier() in that 36 + * configuration. So fall back to mandatory barriers instead. */ 37 + #define virtio_mb() mb() 38 + #define virtio_rmb() rmb() 39 + #define virtio_wmb() wmb() 40 + #endif 41 + 24 42 #ifdef DEBUG 25 43 /* For development, we want to crash whenever the ring is screwed. */ 26 44 #define BAD_RING(_vq, fmt, args...) \ ··· 54 36 panic("%s:in_use = %i\n", \ 55 37 (_vq)->vq.name, (_vq)->in_use); \ 56 38 (_vq)->in_use = __LINE__; \ 57 - mb(); \ 58 39 } while (0) 59 40 #define END_USE(_vq) \ 60 - do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0) 41 + do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) 61 42 #else 62 43 #define BAD_RING(_vq, fmt, args...) \ 63 44 do { \ ··· 238 221 START_USE(vq); 239 222 /* Descriptors and available array need to be set before we expose the 240 223 * new available array entries. */ 241 - wmb(); 224 + virtio_wmb(); 242 225 243 226 vq->vring.avail->idx += vq->num_added; 244 227 vq->num_added = 0; 245 228 246 229 /* Need to update avail index before checking if we should notify */ 247 - mb(); 230 + virtio_mb(); 248 231 249 232 if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY)) 250 233 /* Prod other side to tell it about changes. */ ··· 303 286 } 304 287 305 288 /* Only get used array entries after they have been exposed by host. */ 306 - rmb(); 289 + virtio_rmb(); 307 290 308 291 i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id; 309 292 *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len; ··· 341 324 /* We optimistically turn back on interrupts, then check if there was 342 325 * more to do. */ 343 326 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; 344 - mb(); 327 + virtio_mb(); 345 328 if (unlikely(more_used(vq))) { 346 329 END_USE(vq); 347 330 return false; ··· 349 332 350 333 END_USE(vq); 351 334 return true; 335 + } 336 + 337 + static void *vring_detach_unused_buf(struct virtqueue *_vq) 338 + { 339 + struct vring_virtqueue *vq = to_vvq(_vq); 340 + unsigned int i; 341 + void *buf; 342 + 343 + START_USE(vq); 344 + 345 + for (i = 0; i < vq->vring.num; i++) { 346 + if (!vq->data[i]) 347 + continue; 348 + /* detach_buf clears data, so grab it now. */ 349 + buf = vq->data[i]; 350 + detach_buf(vq, i); 351 + END_USE(vq); 352 + return buf; 353 + } 354 + /* That should have freed everything. */ 355 + BUG_ON(vq->num_free != vq->vring.num); 356 + 357 + END_USE(vq); 358 + return NULL; 352 359 } 353 360 354 361 irqreturn_t vring_interrupt(int irq, void *_vq) ··· 401 360 .kick = vring_kick, 402 361 .disable_cb = vring_disable_cb, 403 362 .enable_cb = vring_enable_cb, 363 + .detach_unused_buf = vring_detach_unused_buf, 404 364 }; 405 365 406 366 struct virtqueue *vring_new_virtqueue(unsigned int num, ··· 448 406 /* Put everything in free lists. */ 449 407 vq->num_free = num; 450 408 vq->free_head = 0; 451 - for (i = 0; i < num-1; i++) 409 + for (i = 0; i < num-1; i++) { 452 410 vq->vring.desc[i].next = i+1; 411 + vq->data[i] = NULL; 412 + } 413 + vq->data[i] = NULL; 453 414 454 415 return &vq->vq; 455 416 }
+7 -6
drivers/watchdog/bfin_wdt.c
··· 1 1 /* 2 2 * Blackfin On-Chip Watchdog Driver 3 - * Supports BF53[123]/BF53[467]/BF54[2489]/BF561 4 3 * 5 4 * Originally based on softdog.c 6 - * Copyright 2006-2007 Analog Devices Inc. 5 + * Copyright 2006-2010 Analog Devices Inc. 7 6 * Copyright 2006-2007 Michele d'Amico 8 7 * Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk> 9 8 * ··· 136 137 */ 137 138 static int bfin_wdt_set_timeout(unsigned long t) 138 139 { 139 - u32 cnt; 140 + u32 cnt, max_t, sclk; 140 141 unsigned long flags; 141 142 142 - stampit(); 143 + sclk = get_sclk(); 144 + max_t = -1 / sclk; 145 + cnt = t * sclk; 146 + stamp("maxtimeout=%us newtimeout=%lus (cnt=%#x)", max_t, t, cnt); 143 147 144 - cnt = t * get_sclk(); 145 - if (cnt < get_sclk()) { 148 + if (t > max_t) { 146 149 printk(KERN_WARNING PFX "timeout value is too large\n"); 147 150 return -EINVAL; 148 151 }
+11 -1
fs/cachefiles/namei.c
··· 348 348 dir = dget_parent(object->dentry); 349 349 350 350 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); 351 - ret = cachefiles_bury_object(cache, dir, object->dentry); 351 + 352 + /* we need to check that our parent is _still_ our parent - it may have 353 + * been renamed */ 354 + if (dir == object->dentry->d_parent) { 355 + ret = cachefiles_bury_object(cache, dir, object->dentry); 356 + } else { 357 + /* it got moved, presumably by cachefilesd culling it, so it's 358 + * no longer in the key path and we can ignore it */ 359 + mutex_unlock(&dir->d_inode->i_mutex); 360 + ret = 0; 361 + } 352 362 353 363 dput(dir); 354 364 _leave(" = %d", ret);
-1
fs/exec.c
··· 637 637 * will align it up. 638 638 */ 639 639 rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK; 640 - rlim_stack = min(rlim_stack, stack_size); 641 640 #ifdef CONFIG_STACK_GROWSUP 642 641 if (stack_size + stack_expand > rlim_stack) 643 642 stack_base = vma->vm_start + rlim_stack;
+12 -2
fs/namei.c
··· 823 823 } 824 824 825 825 /* 826 + * This is a temporary kludge to deal with "automount" symlinks; proper 827 + * solution is to trigger them on follow_mount(), so that do_lookup() 828 + * would DTRT. To be killed before 2.6.34-final. 829 + */ 830 + static inline int follow_on_final(struct inode *inode, unsigned lookup_flags) 831 + { 832 + return inode && unlikely(inode->i_op->follow_link) && 833 + ((lookup_flags & LOOKUP_FOLLOW) || S_ISDIR(inode->i_mode)); 834 + } 835 + 836 + /* 826 837 * Name resolution. 827 838 * This is the basic name resolution function, turning a pathname into 828 839 * the final dentry. We expect 'base' to be positive and a directory. ··· 953 942 if (err) 954 943 break; 955 944 inode = next.dentry->d_inode; 956 - if ((lookup_flags & LOOKUP_FOLLOW) 957 - && inode && inode->i_op->follow_link) { 945 + if (follow_on_final(inode, lookup_flags)) { 958 946 err = do_follow_link(&next, nd); 959 947 if (err) 960 948 goto return_err;
+2 -1
fs/nfsd/vfs.c
··· 752 752 flags, current_cred()); 753 753 if (IS_ERR(*filp)) 754 754 host_err = PTR_ERR(*filp); 755 - host_err = ima_file_check(*filp, access); 755 + else 756 + host_err = ima_file_check(*filp, access); 756 757 out_nfserr: 757 758 err = nfserrno(host_err); 758 759 out:
+19 -5
fs/proc/base.c
··· 2369 2369 { 2370 2370 struct pid_namespace *ns = dentry->d_sb->s_fs_info; 2371 2371 pid_t tgid = task_tgid_nr_ns(current, ns); 2372 - char tmp[PROC_NUMBUF]; 2373 - if (!tgid) 2374 - return ERR_PTR(-ENOENT); 2375 - sprintf(tmp, "%d", task_tgid_nr_ns(current, ns)); 2376 - return ERR_PTR(vfs_follow_link(nd,tmp)); 2372 + char *name = ERR_PTR(-ENOENT); 2373 + if (tgid) { 2374 + name = __getname(); 2375 + if (!name) 2376 + name = ERR_PTR(-ENOMEM); 2377 + else 2378 + sprintf(name, "%d", tgid); 2379 + } 2380 + nd_set_link(nd, name); 2381 + return NULL; 2382 + } 2383 + 2384 + static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd, 2385 + void *cookie) 2386 + { 2387 + char *s = nd_get_link(nd); 2388 + if (!IS_ERR(s)) 2389 + __putname(s); 2377 2390 } 2378 2391 2379 2392 static const struct inode_operations proc_self_inode_operations = { 2380 2393 .readlink = proc_self_readlink, 2381 2394 .follow_link = proc_self_follow_link, 2395 + .put_link = proc_self_put_link, 2382 2396 }; 2383 2397 2384 2398 /*
+5 -2
fs/proc/proc_devtree.c
··· 10 10 #include <linux/seq_file.h> 11 11 #include <linux/stat.h> 12 12 #include <linux/string.h> 13 + #include <linux/of.h> 14 + #include <linux/module.h> 13 15 #include <asm/prom.h> 14 16 #include <asm/uaccess.h> 15 17 #include "internal.h" 16 18 17 - #ifndef HAVE_ARCH_DEVTREE_FIXUPS 18 19 static inline void set_node_proc_entry(struct device_node *np, 19 20 struct proc_dir_entry *de) 20 21 { 21 - } 22 + #ifdef HAVE_ARCH_DEVTREE_FIXUPS 23 + np->pde = de; 22 24 #endif 25 + } 23 26 24 27 static struct proc_dir_entry *proc_device_tree; 25 28
+16 -17
fs/sysfs/inode.c
··· 81 81 if (!sd_attrs) 82 82 return -ENOMEM; 83 83 sd->s_iattr = sd_attrs; 84 - } else { 85 - /* attributes were changed at least once in past */ 86 - iattrs = &sd_attrs->ia_iattr; 84 + } 85 + /* attributes were changed at least once in past */ 86 + iattrs = &sd_attrs->ia_iattr; 87 87 88 - if (ia_valid & ATTR_UID) 89 - iattrs->ia_uid = iattr->ia_uid; 90 - if (ia_valid & ATTR_GID) 91 - iattrs->ia_gid = iattr->ia_gid; 92 - if (ia_valid & ATTR_ATIME) 93 - iattrs->ia_atime = iattr->ia_atime; 94 - if (ia_valid & ATTR_MTIME) 95 - iattrs->ia_mtime = iattr->ia_mtime; 96 - if (ia_valid & ATTR_CTIME) 97 - iattrs->ia_ctime = iattr->ia_ctime; 98 - if (ia_valid & ATTR_MODE) { 99 - umode_t mode = iattr->ia_mode; 100 - iattrs->ia_mode = sd->s_mode = mode; 101 - } 88 + if (ia_valid & ATTR_UID) 89 + iattrs->ia_uid = iattr->ia_uid; 90 + if (ia_valid & ATTR_GID) 91 + iattrs->ia_gid = iattr->ia_gid; 92 + if (ia_valid & ATTR_ATIME) 93 + iattrs->ia_atime = iattr->ia_atime; 94 + if (ia_valid & ATTR_MTIME) 95 + iattrs->ia_mtime = iattr->ia_mtime; 96 + if (ia_valid & ATTR_CTIME) 97 + iattrs->ia_ctime = iattr->ia_ctime; 98 + if (ia_valid & ATTR_MODE) { 99 + umode_t mode = iattr->ia_mode; 100 + iattrs->ia_mode = sd->s_mode = mode; 102 101 } 103 102 return 0; 104 103 }
+5 -1
include/linux/amba/bus.h
··· 1 1 /* 2 - * linux/include/asm-arm/hardware/amba.h 2 + * linux/include/amba/bus.h 3 + * 4 + * This device type deals with ARM PrimeCells and anything else that 5 + * presents a proper CID (0xB105F00D) at the end of the I/O register 6 + * region or that is derived from a PrimeCell. 3 7 * 4 8 * Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved. 5 9 *
+1 -3
include/linux/blkdev.h
··· 461 461 #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ 462 462 #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 463 463 #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 464 - #define QUEUE_FLAG_CQ 16 /* hardware does queuing */ 465 - #define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */ 464 + #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ 466 465 467 466 #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 468 467 (1 << QUEUE_FLAG_CLUSTER) | \ ··· 585 586 586 587 #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 587 588 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 588 - #define blk_queue_queuing(q) test_bit(QUEUE_FLAG_CQ, &(q)->queue_flags) 589 589 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 590 590 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 591 591 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
+1 -1
include/linux/fs.h
··· 729 729 uid_t i_uid; 730 730 gid_t i_gid; 731 731 dev_t i_rdev; 732 + unsigned int i_blkbits; 732 733 u64 i_version; 733 734 loff_t i_size; 734 735 #ifdef __NEED_I_SIZE_ORDERED ··· 739 738 struct timespec i_mtime; 740 739 struct timespec i_ctime; 741 740 blkcnt_t i_blocks; 742 - unsigned int i_blkbits; 743 741 unsigned short i_bytes; 744 742 umode_t i_mode; 745 743 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
+3 -2
include/linux/hid.h
··· 501 501 void (*hiddev_report_event) (struct hid_device *, struct hid_report *); 502 502 503 503 /* handler for raw output data, used by hidraw */ 504 - int (*hid_output_raw_report) (struct hid_device *, __u8 *, size_t); 504 + int (*hid_output_raw_report) (struct hid_device *, __u8 *, size_t, unsigned char); 505 505 506 506 /* debugging support via debugfs */ 507 507 unsigned short debug; ··· 663 663 664 664 /* Applications from HID Usage Tables 4/8/99 Version 1.1 */ 665 665 /* We ignore a few input applications that are not widely used */ 666 - #define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002)) 666 + #define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || ((a >= 0x000d0002) && (a <= 0x000d0006))) 667 667 668 668 /* HID core API */ 669 669 ··· 690 690 int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field); 691 691 void hid_output_report(struct hid_report *report, __u8 *data); 692 692 struct hid_device *hid_allocate_device(void); 693 + struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id); 693 694 int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); 694 695 int hid_check_keys_pressed(struct hid_device *hid); 695 696 int hid_connect(struct hid_device *hid, unsigned int connect_mask);
+43
include/linux/input.h
··· 376 376 #define KEY_DISPLAY_OFF 245 /* display device to off state */ 377 377 378 378 #define KEY_WIMAX 246 379 + #define KEY_RFKILL 247 /* Key that controls all radios */ 379 380 380 381 /* Range 248 - 255 is reserved for special needs of AT keyboard driver */ 381 382 ··· 597 596 #define KEY_NUMERIC_POUND 0x20b 598 597 599 598 #define KEY_CAMERA_FOCUS 0x210 599 + 600 + #define BTN_TRIGGER_HAPPY 0x2c0 601 + #define BTN_TRIGGER_HAPPY1 0x2c0 602 + #define BTN_TRIGGER_HAPPY2 0x2c1 603 + #define BTN_TRIGGER_HAPPY3 0x2c2 604 + #define BTN_TRIGGER_HAPPY4 0x2c3 605 + #define BTN_TRIGGER_HAPPY5 0x2c4 606 + #define BTN_TRIGGER_HAPPY6 0x2c5 607 + #define BTN_TRIGGER_HAPPY7 0x2c6 608 + #define BTN_TRIGGER_HAPPY8 0x2c7 609 + #define BTN_TRIGGER_HAPPY9 0x2c8 610 + #define BTN_TRIGGER_HAPPY10 0x2c9 611 + #define BTN_TRIGGER_HAPPY11 0x2ca 612 + #define BTN_TRIGGER_HAPPY12 0x2cb 613 + #define BTN_TRIGGER_HAPPY13 0x2cc 614 + #define BTN_TRIGGER_HAPPY14 0x2cd 615 + #define BTN_TRIGGER_HAPPY15 0x2ce 616 + #define BTN_TRIGGER_HAPPY16 0x2cf 617 + #define BTN_TRIGGER_HAPPY17 0x2d0 618 + #define BTN_TRIGGER_HAPPY18 0x2d1 619 + #define BTN_TRIGGER_HAPPY19 0x2d2 620 + #define BTN_TRIGGER_HAPPY20 0x2d3 621 + #define BTN_TRIGGER_HAPPY21 0x2d4 622 + #define BTN_TRIGGER_HAPPY22 0x2d5 623 + #define BTN_TRIGGER_HAPPY23 0x2d6 624 + #define BTN_TRIGGER_HAPPY24 0x2d7 625 + #define BTN_TRIGGER_HAPPY25 0x2d8 626 + #define BTN_TRIGGER_HAPPY26 0x2d9 627 + #define BTN_TRIGGER_HAPPY27 0x2da 628 + #define BTN_TRIGGER_HAPPY28 0x2db 629 + #define BTN_TRIGGER_HAPPY29 0x2dc 630 + #define BTN_TRIGGER_HAPPY30 0x2dd 631 + #define BTN_TRIGGER_HAPPY31 0x2de 632 + #define BTN_TRIGGER_HAPPY32 0x2df 633 + #define BTN_TRIGGER_HAPPY33 0x2e0 634 + #define BTN_TRIGGER_HAPPY34 0x2e1 635 + #define BTN_TRIGGER_HAPPY35 0x2e2 636 + #define BTN_TRIGGER_HAPPY36 0x2e3 637 + #define BTN_TRIGGER_HAPPY37 0x2e4 638 + #define BTN_TRIGGER_HAPPY38 0x2e5 639 + #define BTN_TRIGGER_HAPPY39 0x2e6 640 + #define BTN_TRIGGER_HAPPY40 0x2e7 600 641 601 642 /* We avoid low common keys in module aliases so they don't get huge. */ 602 643 #define KEY_MIN_INTERESTING KEY_MUTE
+1 -1
include/linux/kfifo.h
··· 124 124 */ 125 125 static inline bool kfifo_initialized(struct kfifo *fifo) 126 126 { 127 - return fifo->buffer != 0; 127 + return fifo->buffer != NULL; 128 128 } 129 129 130 130 /**
+44 -18
include/linux/of.h
··· 19 19 #include <linux/bitops.h> 20 20 #include <linux/kref.h> 21 21 #include <linux/mod_devicetable.h> 22 + #include <linux/spinlock.h> 23 + 24 + #include <asm/byteorder.h> 25 + 26 + #ifdef CONFIG_OF 22 27 23 28 typedef u32 phandle; 24 29 typedef u32 ihandle; ··· 44 39 struct device_node { 45 40 const char *name; 46 41 const char *type; 47 - phandle node; 48 - #if !defined(CONFIG_SPARC) 49 - phandle linux_phandle; 50 - #endif 42 + phandle phandle; 51 43 char *full_name; 52 44 53 45 struct property *properties; ··· 65 63 #endif 66 64 }; 67 65 66 + /* Pointer for first entry in chain of all nodes. */ 67 + extern struct device_node *allnodes; 68 + extern struct device_node *of_chosen; 69 + extern rwlock_t devtree_lock; 70 + 68 71 static inline int of_node_check_flag(struct device_node *n, unsigned long flag) 69 72 { 70 73 return test_bit(flag, &n->_flags); ··· 78 71 static inline void of_node_set_flag(struct device_node *n, unsigned long flag) 79 72 { 80 73 set_bit(flag, &n->_flags); 81 - } 82 - 83 - static inline void 84 - set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de) 85 - { 86 - dn->pde = de; 87 74 } 88 75 89 76 extern struct device_node *of_find_all_nodes(struct device_node *prev); ··· 102 101 */ 103 102 104 103 /* Helper to read a big number; size is in cells (not bytes) */ 105 - static inline u64 of_read_number(const u32 *cell, int size) 104 + static inline u64 of_read_number(const __be32 *cell, int size) 106 105 { 107 106 u64 r = 0; 108 107 while (size--) 109 - r = (r << 32) | *(cell++); 108 + r = (r << 32) | be32_to_cpu(*(cell++)); 110 109 return r; 111 110 } 112 111 113 112 /* Like of_read_number, but we want an unsigned long result */ 114 - #ifdef CONFIG_PPC32 115 - static inline unsigned long of_read_ulong(const u32 *cell, int size) 113 + static inline unsigned long of_read_ulong(const __be32 *cell, int size) 116 114 { 117 - return cell[size-1]; 115 + /* toss away upper bits if unsigned long is smaller than u64 */ 116 + return of_read_number(cell, size); 118 117 } 119 - #else 120 - #define of_read_ulong(cell, size) of_read_number(cell, size) 121 - #endif 122 118 123 119 #include <asm/prom.h> 120 + 121 + /* Default #address and #size cells. Allow arch asm/prom.h to override */ 122 + #if !defined(OF_ROOT_NODE_ADDR_CELLS_DEFAULT) 123 + #define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1 124 + #define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1 125 + #endif 126 + 127 + /* Default string compare functions, Allow arch asm/prom.h to override */ 128 + #if !defined(of_compat_cmp) 129 + #define of_compat_cmp(s1, s2, l) strncasecmp((s1), (s2), (l)) 130 + #define of_prop_cmp(s1, s2) strcmp((s1), (s2)) 131 + #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) 132 + #endif 124 133 125 134 /* flag descriptions */ 126 135 #define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */ ··· 198 187 const char *list_name, const char *cells_name, int index, 199 188 struct device_node **out_node, const void **out_args); 200 189 190 + extern int of_machine_is_compatible(const char *compat); 191 + 192 + extern int prom_add_property(struct device_node* np, struct property* prop); 193 + extern int prom_remove_property(struct device_node *np, struct property *prop); 194 + extern int prom_update_property(struct device_node *np, 195 + struct property *newprop, 196 + struct property *oldprop); 197 + 198 + #if defined(CONFIG_OF_DYNAMIC) 199 + /* For updating the device tree at runtime */ 200 + extern void of_attach_node(struct device_node *); 201 + extern void of_detach_node(struct device_node *); 202 + #endif 203 + 204 + #endif /* CONFIG_OF */ 201 205 #endif /* _LINUX_OF_H */
+46 -29
include/linux/of_fdt.h
··· 42 42 * ends when size is 0 43 43 */ 44 44 struct boot_param_header { 45 - u32 magic; /* magic word OF_DT_HEADER */ 46 - u32 totalsize; /* total size of DT block */ 47 - u32 off_dt_struct; /* offset to structure */ 48 - u32 off_dt_strings; /* offset to strings */ 49 - u32 off_mem_rsvmap; /* offset to memory reserve map */ 50 - u32 version; /* format version */ 51 - u32 last_comp_version; /* last compatible version */ 45 + __be32 magic; /* magic word OF_DT_HEADER */ 46 + __be32 totalsize; /* total size of DT block */ 47 + __be32 off_dt_struct; /* offset to structure */ 48 + __be32 off_dt_strings; /* offset to strings */ 49 + __be32 off_mem_rsvmap; /* offset to memory reserve map */ 50 + __be32 version; /* format version */ 51 + __be32 last_comp_version; /* last compatible version */ 52 52 /* version 2 fields below */ 53 - u32 boot_cpuid_phys; /* Physical CPU id we're booting on */ 53 + __be32 boot_cpuid_phys; /* Physical CPU id we're booting on */ 54 54 /* version 3 fields below */ 55 - u32 dt_strings_size; /* size of the DT strings block */ 55 + __be32 dt_strings_size; /* size of the DT strings block */ 56 56 /* version 17 fields below */ 57 - u32 dt_struct_size; /* size of the DT structure block */ 57 + __be32 dt_struct_size; /* size of the DT structure block */ 58 58 }; 59 59 60 + /* TBD: Temporary export of fdt globals - remove when code fully merged */ 61 + extern int __initdata dt_root_addr_cells; 62 + extern int __initdata dt_root_size_cells; 63 + extern struct boot_param_header *initial_boot_params; 64 + 60 65 /* For scanning the flat device-tree at boot time */ 61 - extern int __init of_scan_flat_dt(int (*it)(unsigned long node, 62 - const char *uname, int depth, 63 - void *data), 64 - void *data); 65 - extern void __init *of_get_flat_dt_prop(unsigned long node, const char *name, 66 - unsigned long *size); 67 - extern int __init of_flat_dt_is_compatible(unsigned long node, 68 - const char *name); 69 - extern unsigned long __init of_get_flat_dt_root(void); 66 + extern char *find_flat_dt_string(u32 offset); 67 + extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname, 68 + int depth, void *data), 69 + void *data); 70 + extern void *of_get_flat_dt_prop(unsigned long node, const char *name, 71 + unsigned long *size); 72 + extern int of_flat_dt_is_compatible(unsigned long node, const char *name); 73 + extern unsigned long of_get_flat_dt_root(void); 74 + extern void early_init_dt_scan_chosen_arch(unsigned long node); 75 + extern int early_init_dt_scan_chosen(unsigned long node, const char *uname, 76 + int depth, void *data); 77 + extern void early_init_dt_check_for_initrd(unsigned long node); 78 + extern int early_init_dt_scan_memory(unsigned long node, const char *uname, 79 + int depth, void *data); 80 + extern void early_init_dt_add_memory_arch(u64 base, u64 size); 81 + extern u64 early_init_dt_alloc_memory_arch(u64 size, u64 align); 82 + extern u64 dt_mem_next_cell(int s, __be32 **cellp); 83 + 84 + /* 85 + * If BLK_DEV_INITRD, the fdt early init code will call this function, 86 + * to be provided by the arch code. start and end are specified as 87 + * physical addresses. 88 + */ 89 + #ifdef CONFIG_BLK_DEV_INITRD 90 + extern void early_init_dt_setup_initrd_arch(unsigned long start, 91 + unsigned long end); 92 + #endif 93 + 94 + /* Early flat tree scan hooks */ 95 + extern int early_init_dt_scan_root(unsigned long node, const char *uname, 96 + int depth, void *data); 70 97 71 98 /* Other Prototypes */ 72 - extern void finish_device_tree(void); 73 99 extern void unflatten_device_tree(void); 74 100 extern void early_init_devtree(void *); 75 - extern int machine_is_compatible(const char *compat); 76 - extern void print_properties(struct device_node *node); 77 - extern int prom_n_intr_cells(struct device_node* np); 78 - extern void prom_get_irq_senses(unsigned char *senses, int off, int max); 79 - extern int prom_add_property(struct device_node* np, struct property* prop); 80 - extern int prom_remove_property(struct device_node *np, struct property *prop); 81 - extern int prom_update_property(struct device_node *np, 82 - struct property *newprop, 83 - struct property *oldprop); 84 101 85 102 #endif /* __ASSEMBLY__ */ 86 103 #endif /* _LINUX_OF_FDT_H */
+5
include/linux/spi/dw_spi.h
··· 90 90 unsigned long paddr; 91 91 u32 iolen; 92 92 int irq; 93 + u32 fifo_len; /* depth of the FIFO buffer */ 93 94 u32 max_freq; /* max bus freq supported */ 94 95 95 96 u16 bus_num; ··· 172 171 { 173 172 if (cs > dws->num_cs) 174 173 return; 174 + 175 + if (dws->cs_control) 176 + dws->cs_control(1); 177 + 175 178 dw_writel(dws, ser, 1 << cs); 176 179 } 177 180
+4
include/linux/virtio.h
··· 51 51 * This re-enables callbacks; it returns "false" if there are pending 52 52 * buffers in the queue, to detect a possible race between the driver 53 53 * checking for more work, and enabling callbacks. 54 + * @detach_unused_buf: detach first unused buffer 55 + * vq: the struct virtqueue we're talking about. 56 + * Returns NULL or the "data" token handed to add_buf 54 57 * 55 58 * Locking rules are straightforward: the driver is responsible for 56 59 * locking. No two operations may be invoked simultaneously, with the exception ··· 74 71 75 72 void (*disable_cb)(struct virtqueue *vq); 76 73 bool (*enable_cb)(struct virtqueue *vq); 74 + void *(*detach_unused_buf)(struct virtqueue *vq); 77 75 }; 78 76 79 77 /**
+15
include/linux/virtio_balloon.h
··· 7 7 8 8 /* The feature bitmap for virtio balloon */ 9 9 #define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */ 10 + #define VIRTIO_BALLOON_F_STATS_VQ 1 /* Memory Stats virtqueue */ 10 11 11 12 /* Size of a PFN in the balloon interface. */ 12 13 #define VIRTIO_BALLOON_PFN_SHIFT 12 ··· 19 18 /* Number of pages we've actually got in balloon. */ 20 19 __le32 actual; 21 20 }; 21 + 22 + #define VIRTIO_BALLOON_S_SWAP_IN 0 /* Amount of memory swapped in */ 23 + #define VIRTIO_BALLOON_S_SWAP_OUT 1 /* Amount of memory swapped out */ 24 + #define VIRTIO_BALLOON_S_MAJFLT 2 /* Number of major faults */ 25 + #define VIRTIO_BALLOON_S_MINFLT 3 /* Number of minor faults */ 26 + #define VIRTIO_BALLOON_S_MEMFREE 4 /* Total amount of free memory */ 27 + #define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */ 28 + #define VIRTIO_BALLOON_S_NR 6 29 + 30 + struct virtio_balloon_stat { 31 + u16 tag; 32 + u64 val; 33 + } __attribute__((packed)); 34 + 22 35 #endif /* _LINUX_VIRTIO_BALLOON_H */
+13
include/linux/virtio_blk.h
··· 15 15 #define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ 16 16 #define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */ 17 17 #define VIRTIO_BLK_F_FLUSH 9 /* Cache flush command support */ 18 + #define VIRTIO_BLK_F_TOPOLOGY 10 /* Topology information is available */ 18 19 19 20 struct virtio_blk_config { 20 21 /* The capacity (in 512-byte sectors). */ ··· 30 29 __u8 heads; 31 30 __u8 sectors; 32 31 } geometry; 32 + 33 33 /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */ 34 34 __u32 blk_size; 35 + 36 + /* the next 4 entries are guarded by VIRTIO_BLK_F_TOPOLOGY */ 37 + /* exponent for physical block per logical block. */ 38 + __u8 physical_block_exp; 39 + /* alignment offset in logical blocks. */ 40 + __u8 alignment_offset; 41 + /* minimum I/O size without performance penalty in logical blocks. */ 42 + __u16 min_io_size; 43 + /* optimal sustained I/O size in logical blocks. */ 44 + __u32 opt_io_size; 45 + 35 46 } __attribute__((packed)); 36 47 37 48 /*
+28 -2
include/linux/virtio_console.h
··· 3 3 #include <linux/types.h> 4 4 #include <linux/virtio_ids.h> 5 5 #include <linux/virtio_config.h> 6 - /* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so 7 - * anyone can use the definitions to implement compatible drivers/servers. */ 6 + /* 7 + * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so 8 + * anyone can use the definitions to implement compatible drivers/servers. 9 + * 10 + * Copyright (C) Red Hat, Inc., 2009, 2010 11 + */ 8 12 9 13 /* Feature bits */ 10 14 #define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */ 15 + #define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */ 11 16 12 17 struct virtio_console_config { 13 18 /* colums of the screens */ 14 19 __u16 cols; 15 20 /* rows of the screens */ 16 21 __u16 rows; 22 + /* max. number of ports this device can hold */ 23 + __u32 max_nr_ports; 24 + /* number of ports added so far */ 25 + __u32 nr_ports; 17 26 } __attribute__((packed)); 18 27 28 + /* 29 + * A message that's passed between the Host and the Guest for a 30 + * particular port. 31 + */ 32 + struct virtio_console_control { 33 + __u32 id; /* Port number */ 34 + __u16 event; /* The kind of control event (see below) */ 35 + __u16 value; /* Extra information for the key */ 36 + }; 37 + 38 + /* Some events for control messages */ 39 + #define VIRTIO_CONSOLE_PORT_READY 0 40 + #define VIRTIO_CONSOLE_CONSOLE_PORT 1 41 + #define VIRTIO_CONSOLE_RESIZE 2 42 + #define VIRTIO_CONSOLE_PORT_OPEN 3 43 + #define VIRTIO_CONSOLE_PORT_NAME 4 44 + #define VIRTIO_CONSOLE_PORT_REMOVE 5 19 45 20 46 #ifdef __KERNEL__ 21 47 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int));
+2 -1
kernel/kfifo.c
··· 80 80 81 81 buffer = kmalloc(size, gfp_mask); 82 82 if (!buffer) { 83 - _kfifo_init(fifo, 0, 0); 83 + _kfifo_init(fifo, NULL, 0); 84 84 return -ENOMEM; 85 85 } 86 86 ··· 97 97 void kfifo_free(struct kfifo *fifo) 98 98 { 99 99 kfree(fifo->buffer); 100 + _kfifo_init(fifo, NULL, 0); 100 101 } 101 102 EXPORT_SYMBOL(kfifo_free); 102 103
+5 -6
kernel/perf_event.c
··· 3259 3259 task_event->event_id.tid = perf_event_tid(event, task); 3260 3260 task_event->event_id.ptid = perf_event_tid(event, current); 3261 3261 3262 - task_event->event_id.time = perf_clock(); 3263 - 3264 3262 perf_output_put(&handle, task_event->event_id); 3265 3263 3266 3264 perf_output_end(&handle); ··· 3266 3268 3267 3269 static int perf_event_task_match(struct perf_event *event) 3268 3270 { 3269 - if (event->state != PERF_EVENT_STATE_ACTIVE) 3271 + if (event->state < PERF_EVENT_STATE_INACTIVE) 3270 3272 return 0; 3271 3273 3272 3274 if (event->cpu != -1 && event->cpu != smp_processor_id()) ··· 3298 3300 cpuctx = &get_cpu_var(perf_cpu_context); 3299 3301 perf_event_task_ctx(&cpuctx->ctx, task_event); 3300 3302 if (!ctx) 3301 - ctx = rcu_dereference(task_event->task->perf_event_ctxp); 3303 + ctx = rcu_dereference(current->perf_event_ctxp); 3302 3304 if (ctx) 3303 3305 perf_event_task_ctx(ctx, task_event); 3304 3306 put_cpu_var(perf_cpu_context); ··· 3329 3331 /* .ppid */ 3330 3332 /* .tid */ 3331 3333 /* .ptid */ 3334 + .time = perf_clock(), 3332 3335 }, 3333 3336 }; 3334 3337 ··· 3379 3380 3380 3381 static int perf_event_comm_match(struct perf_event *event) 3381 3382 { 3382 - if (event->state != PERF_EVENT_STATE_ACTIVE) 3383 + if (event->state < PERF_EVENT_STATE_INACTIVE) 3383 3384 return 0; 3384 3385 3385 3386 if (event->cpu != -1 && event->cpu != smp_processor_id()) ··· 3499 3500 static int perf_event_mmap_match(struct perf_event *event, 3500 3501 struct perf_mmap_event *mmap_event) 3501 3502 { 3502 - if (event->state != PERF_EVENT_STATE_ACTIVE) 3503 + if (event->state < PERF_EVENT_STATE_INACTIVE) 3503 3504 return 0; 3504 3505 3505 3506 if (event->cpu != -1 && event->cpu != smp_processor_id())
+2
kernel/sys.c
··· 222 222 if (which > PRIO_USER || which < PRIO_PROCESS) 223 223 return -EINVAL; 224 224 225 + rcu_read_lock(); 225 226 read_lock(&tasklist_lock); 226 227 switch (which) { 227 228 case PRIO_PROCESS: ··· 268 267 } 269 268 out_unlock: 270 269 read_unlock(&tasklist_lock); 270 + rcu_read_unlock(); 271 271 272 272 return retval; 273 273 }
+3 -1
lib/idr.c
··· 156 156 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; 157 157 158 158 /* if already at the top layer, we need to grow */ 159 - if (!(p = pa[l])) { 159 + if (id >= 1 << (idp->layers * IDR_BITS)) { 160 160 *starting_id = id; 161 161 return IDR_NEED_TO_GROW; 162 162 } 163 + p = pa[l]; 164 + BUG_ON(!p); 163 165 164 166 /* If we need to go up one layer, continue the 165 167 * loop; otherwise, restart from the top.
+15 -21
mm/migrate.c
··· 1002 1002 #define DO_PAGES_STAT_CHUNK_NR 16 1003 1003 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 1004 1004 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 1005 - unsigned long i, chunk_nr = DO_PAGES_STAT_CHUNK_NR; 1006 - int err; 1007 1005 1008 - for (i = 0; i < nr_pages; i += chunk_nr) { 1009 - if (chunk_nr > nr_pages - i) 1010 - chunk_nr = nr_pages - i; 1006 + while (nr_pages) { 1007 + unsigned long chunk_nr; 1011 1008 1012 - err = copy_from_user(chunk_pages, &pages[i], 1013 - chunk_nr * sizeof(*chunk_pages)); 1014 - if (err) { 1015 - err = -EFAULT; 1016 - goto out; 1017 - } 1009 + chunk_nr = nr_pages; 1010 + if (chunk_nr > DO_PAGES_STAT_CHUNK_NR) 1011 + chunk_nr = DO_PAGES_STAT_CHUNK_NR; 1012 + 1013 + if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages))) 1014 + break; 1018 1015 1019 1016 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 1020 1017 1021 - err = copy_to_user(&status[i], chunk_status, 1022 - chunk_nr * sizeof(*chunk_status)); 1023 - if (err) { 1024 - err = -EFAULT; 1025 - goto out; 1026 - } 1027 - } 1028 - err = 0; 1018 + if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 1019 + break; 1029 1020 1030 - out: 1031 - return err; 1021 + pages += chunk_nr; 1022 + status += chunk_nr; 1023 + nr_pages -= chunk_nr; 1024 + } 1025 + return nr_pages ? -EFAULT : 0; 1032 1026 } 1033 1027 1034 1028 /*
+2
mm/oom_kill.c
··· 459 459 list_for_each_entry(c, &p->children, sibling) { 460 460 if (c->mm == p->mm) 461 461 continue; 462 + if (mem && !task_in_mem_cgroup(c, mem)) 463 + continue; 462 464 if (!oom_kill_task(c)) 463 465 return 0; 464 466 }
+14 -3
net/bluetooth/hidp/core.c
··· 313 313 return hidp_queue_report(session, buf, rsize); 314 314 } 315 315 316 - static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count) 316 + static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count, 317 + unsigned char report_type) 317 318 { 318 - if (hidp_send_ctrl_message(hid->driver_data, 319 - HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE, 319 + switch (report_type) { 320 + case HID_FEATURE_REPORT: 321 + report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE; 322 + break; 323 + case HID_OUTPUT_REPORT: 324 + report_type = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT; 325 + break; 326 + default: 327 + return -EINVAL; 328 + } 329 + 330 + if (hidp_send_ctrl_message(hid->driver_data, report_type, 320 331 data, count)) 321 332 return -ENOMEM; 322 333 return count;
+1 -1
net/core/dev.c
··· 2761 2761 switch (ret) { 2762 2762 case GRO_NORMAL: 2763 2763 case GRO_HELD: 2764 - skb->protocol = eth_type_trans(skb, napi->dev); 2764 + skb->protocol = eth_type_trans(skb, skb->dev); 2765 2765 2766 2766 if (ret == GRO_HELD) 2767 2767 skb_gro_pull(skb, -ETH_HLEN);
+1
net/core/ethtool.c
··· 927 927 case ETHTOOL_GPERMADDR: 928 928 case ETHTOOL_GUFO: 929 929 case ETHTOOL_GGSO: 930 + case ETHTOOL_GGRO: 930 931 case ETHTOOL_GFLAGS: 931 932 case ETHTOOL_GPFLAGS: 932 933 case ETHTOOL_GRXFH:
+2 -1
net/core/net-sysfs.c
··· 410 410 const struct iw_statistics *iw; 411 411 ssize_t ret = -EINVAL; 412 412 413 - rtnl_lock(); 413 + if (!rtnl_trylock()) 414 + return restart_syscall(); 414 415 if (dev_isalive(dev)) { 415 416 iw = get_wireless_stats(dev); 416 417 if (iw)
+6 -1
net/ipv4/devinet.c
··· 1317 1317 { 1318 1318 int *valp = ctl->data; 1319 1319 int val = *valp; 1320 + loff_t pos = *ppos; 1320 1321 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 1321 1322 1322 1323 if (write && *valp != val) { 1323 1324 struct net *net = ctl->extra2; 1324 1325 1325 1326 if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) { 1326 - if (!rtnl_trylock()) 1327 + if (!rtnl_trylock()) { 1328 + /* Restore the original values before restarting */ 1329 + *valp = val; 1330 + *ppos = pos; 1327 1331 return restart_syscall(); 1332 + } 1328 1333 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) { 1329 1334 inet_forward_change(net); 1330 1335 } else if (*valp) {
+1 -1
net/ipv4/igmp.c
··· 946 946 break; 947 947 case IGMP_HOST_MEMBERSHIP_REPORT: 948 948 case IGMPV2_HOST_MEMBERSHIP_REPORT: 949 - case IGMPV3_HOST_MEMBERSHIP_REPORT: 950 949 /* Is it our report looped back? */ 951 950 if (skb_rtable(skb)->fl.iif == 0) 952 951 break; ··· 959 960 in_dev_put(in_dev); 960 961 return pim_rcv_v1(skb); 961 962 #endif 963 + case IGMPV3_HOST_MEMBERSHIP_REPORT: 962 964 case IGMP_DVMRP: 963 965 case IGMP_TRACE: 964 966 case IGMP_HOST_LEAVE_MESSAGE:
+1 -5
net/ipv4/ipcomp.c
··· 124 124 if (x->props.mode == XFRM_MODE_TUNNEL) { 125 125 err = ipcomp_tunnel_attach(x); 126 126 if (err) 127 - goto error_tunnel; 127 + goto out; 128 128 } 129 129 130 130 err = 0; 131 131 out: 132 132 return err; 133 - 134 - error_tunnel: 135 - ipcomp_destroy(x); 136 - goto out; 137 133 } 138 134 139 135 static const struct xfrm_type ipcomp_type = {
+2 -4
net/ipv4/tcp_input.c
··· 5783 5783 5784 5784 /* tcp_ack considers this ACK as duplicate 5785 5785 * and does not calculate rtt. 5786 - * Fix it at least with timestamps. 5786 + * Force it here. 5787 5787 */ 5788 - if (tp->rx_opt.saw_tstamp && 5789 - tp->rx_opt.rcv_tsecr && !tp->srtt) 5790 - tcp_ack_saw_tstamp(sk, 0); 5788 + tcp_ack_update_rtt(sk, 0, 0); 5791 5789 5792 5790 if (tp->rx_opt.tstamp_ok) 5793 5791 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
+14 -2
net/ipv6/addrconf.c
··· 502 502 if (p == &net->ipv6.devconf_dflt->forwarding) 503 503 return 0; 504 504 505 - if (!rtnl_trylock()) 505 + if (!rtnl_trylock()) { 506 + /* Restore the original values before restarting */ 507 + *p = old; 506 508 return restart_syscall(); 509 + } 507 510 508 511 if (p == &net->ipv6.devconf_all->forwarding) { 509 512 __s32 newf = net->ipv6.devconf_all->forwarding; ··· 4031 4028 { 4032 4029 int *valp = ctl->data; 4033 4030 int val = *valp; 4031 + loff_t pos = *ppos; 4034 4032 int ret; 4035 4033 4036 4034 ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 4037 4035 4038 4036 if (write) 4039 4037 ret = addrconf_fixup_forwarding(ctl, valp, val); 4038 + if (ret) 4039 + *ppos = pos; 4040 4040 return ret; 4041 4041 } 4042 4042 ··· 4081 4075 if (p == &net->ipv6.devconf_dflt->disable_ipv6) 4082 4076 return 0; 4083 4077 4084 - if (!rtnl_trylock()) 4078 + if (!rtnl_trylock()) { 4079 + /* Restore the original values before restarting */ 4080 + *p = old; 4085 4081 return restart_syscall(); 4082 + } 4086 4083 4087 4084 if (p == &net->ipv6.devconf_all->disable_ipv6) { 4088 4085 __s32 newf = net->ipv6.devconf_all->disable_ipv6; ··· 4104 4095 { 4105 4096 int *valp = ctl->data; 4106 4097 int val = *valp; 4098 + loff_t pos = *ppos; 4107 4099 int ret; 4108 4100 4109 4101 ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 4110 4102 4111 4103 if (write) 4112 4104 ret = addrconf_disable_ipv6(ctl, valp, val); 4105 + if (ret) 4106 + *ppos = pos; 4113 4107 return ret; 4114 4108 } 4115 4109
+1 -5
net/ipv6/ipcomp6.c
··· 154 154 if (x->props.mode == XFRM_MODE_TUNNEL) { 155 155 err = ipcomp6_tunnel_attach(x); 156 156 if (err) 157 - goto error_tunnel; 157 + goto out; 158 158 } 159 159 160 160 err = 0; 161 161 out: 162 162 return err; 163 - error_tunnel: 164 - ipcomp_destroy(x); 165 - 166 - goto out; 167 163 } 168 164 169 165 static const struct xfrm_type ipcomp6_type =
+1 -1
net/mac80211/ibss.c
··· 647 647 } 648 648 if (pos[1] != 0 && 649 649 (pos[1] != ifibss->ssid_len || 650 - !memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) { 650 + memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) { 651 651 /* Ignore ProbeReq for foreign SSID */ 652 652 return; 653 653 }
+3
net/mac80211/rate.c
··· 245 245 info->control.rates[i].count = 1; 246 246 } 247 247 248 + if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) 249 + return; 250 + 248 251 if (sta && sdata->force_unicast_rateidx > -1) { 249 252 info->control.rates[0].idx = sdata->force_unicast_rateidx; 250 253 } else {
+10 -8
net/mac80211/scan.c
··· 439 439 if (local->scan_req) 440 440 return -EBUSY; 441 441 442 + if (req != local->int_scan_req && 443 + sdata->vif.type == NL80211_IFTYPE_STATION && 444 + !list_empty(&ifmgd->work_list)) { 445 + /* actually wait for the work it's doing to finish/time out */ 446 + set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request); 447 + local->scan_req = req; 448 + local->scan_sdata = sdata; 449 + return 0; 450 + } 451 + 442 452 if (local->ops->hw_scan) { 443 453 u8 *ies; 444 454 ··· 472 462 473 463 local->scan_req = req; 474 464 local->scan_sdata = sdata; 475 - 476 - if (req != local->int_scan_req && 477 - sdata->vif.type == NL80211_IFTYPE_STATION && 478 - !list_empty(&ifmgd->work_list)) { 479 - /* actually wait for the work it's doing to finish/time out */ 480 - set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request); 481 - return 0; 482 - } 483 465 484 466 if (local->ops->hw_scan) 485 467 __set_bit(SCAN_HW_SCANNING, &local->scanning);
+3 -9
net/xfrm/xfrm_state.c
··· 1102 1102 int err = -ENOMEM; 1103 1103 struct xfrm_state *x = xfrm_state_alloc(net); 1104 1104 if (!x) 1105 - goto error; 1105 + goto out; 1106 1106 1107 1107 memcpy(&x->id, &orig->id, sizeof(x->id)); 1108 1108 memcpy(&x->sel, &orig->sel, sizeof(x->sel)); ··· 1160 1160 return x; 1161 1161 1162 1162 error: 1163 + xfrm_state_put(x); 1164 + out: 1163 1165 if (errp) 1164 1166 *errp = err; 1165 - if (x) { 1166 - kfree(x->aalg); 1167 - kfree(x->ealg); 1168 - kfree(x->calg); 1169 - kfree(x->encap); 1170 - kfree(x->coaddr); 1171 - } 1172 - kfree(x); 1173 1167 return NULL; 1174 1168 } 1175 1169
-1
scripts/.gitignore
··· 6 6 pnmtologo 7 7 bin2c 8 8 unifdef 9 - binoffset 10 9 ihex2fw
-163
scripts/binoffset.c
··· 1 - /*************************************************************************** 2 - * binoffset.c 3 - * (C) 2002 Randy Dunlap <rdunlap@xenotime.net> 4 - 5 - # This program is free software; you can redistribute it and/or modify 6 - # it under the terms of the GNU General Public License as published by 7 - # the Free Software Foundation; either version 2 of the License, or 8 - # (at your option) any later version. 9 - # 10 - # This program is distributed in the hope that it will be useful, 11 - # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 - # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 - # GNU General Public License for more details. 14 - # 15 - # You should have received a copy of the GNU General Public License 16 - # along with this program; if not, write to the Free Software 17 - # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 - 19 - # binoffset.c: 20 - # - searches a (binary) file for a specified (binary) pattern 21 - # - returns the offset of the located pattern or ~0 if not found 22 - # - exits with exit status 0 normally or non-0 if pattern is not found 23 - # or any other error occurs. 24 - 25 - ****************************************************************/ 26 - 27 - #include <stdio.h> 28 - #include <stdlib.h> 29 - #include <string.h> 30 - #include <errno.h> 31 - #include <unistd.h> 32 - #include <fcntl.h> 33 - #include <sys/types.h> 34 - #include <sys/stat.h> 35 - #include <sys/mman.h> 36 - 37 - #define VERSION "0.1" 38 - #define BUF_SIZE (16 * 1024) 39 - #define PAT_SIZE 100 40 - 41 - char *progname; 42 - char *inputname; 43 - int inputfd; 44 - unsigned int bix; /* buf index */ 45 - unsigned char patterns [PAT_SIZE] = {0}; /* byte-sized pattern array */ 46 - int pat_len; /* actual number of pattern bytes */ 47 - unsigned char *madr; /* mmap address */ 48 - size_t filesize; 49 - int num_matches = 0; 50 - off_t firstloc = 0; 51 - 52 - void usage (void) 53 - { 54 - fprintf (stderr, "%s ver. %s\n", progname, VERSION); 55 - fprintf (stderr, "usage: %s filename pattern_bytes\n", 56 - progname); 57 - fprintf (stderr, " [prints location of pattern_bytes in file]\n"); 58 - exit (1); 59 - } 60 - 61 - void get_pattern (int pat_count, char *pats []) 62 - { 63 - int ix, err, tmp; 64 - 65 - #ifdef DEBUG 66 - fprintf (stderr,"get_pattern: count = %d\n", pat_count); 67 - for (ix = 0; ix < pat_count; ix++) 68 - fprintf (stderr, " pat # %d: [%s]\n", ix, pats[ix]); 69 - #endif 70 - 71 - for (ix = 0; ix < pat_count; ix++) { 72 - tmp = 0; 73 - err = sscanf (pats[ix], "%5i", &tmp); 74 - if (err != 1 || tmp > 0xff) { 75 - fprintf (stderr, "pattern or value error in pattern # %d [%s]\n", 76 - ix, pats[ix]); 77 - usage (); 78 - } 79 - patterns [ix] = tmp; 80 - } 81 - pat_len = pat_count; 82 - } 83 - 84 - void search_pattern (void) 85 - { 86 - for (bix = 0; bix < filesize; bix++) { 87 - if (madr[bix] == patterns[0]) { 88 - if (memcmp (&madr[bix], patterns, pat_len) == 0) { 89 - if (num_matches == 0) 90 - firstloc = bix; 91 - num_matches++; 92 - } 93 - } 94 - } 95 - } 96 - 97 - #ifdef NOTDEF 98 - size_t get_filesize (int fd) 99 - { 100 - off_t end_off = lseek (fd, 0, SEEK_END); 101 - lseek (fd, 0, SEEK_SET); 102 - return (size_t) end_off; 103 - } 104 - #endif 105 - 106 - size_t get_filesize (int fd) 107 - { 108 - int err; 109 - struct stat stat; 110 - 111 - err = fstat (fd, &stat); 112 - fprintf (stderr, "filesize: %ld\n", err < 0 ? (long)err : stat.st_size); 113 - if (err < 0) 114 - return err; 115 - return (size_t) stat.st_size; 116 - } 117 - 118 - int main (int argc, char *argv []) 119 - { 120 - progname = argv[0]; 121 - 122 - if (argc < 3) 123 - usage (); 124 - 125 - get_pattern (argc - 2, argv + 2); 126 - 127 - inputname = argv[1]; 128 - 129 - inputfd = open (inputname, O_RDONLY); 130 - if (inputfd == -1) { 131 - fprintf (stderr, "%s: cannot open '%s'\n", 132 - progname, inputname); 133 - exit (3); 134 - } 135 - 136 - filesize = get_filesize (inputfd); 137 - 138 - madr = mmap (0, filesize, PROT_READ, MAP_PRIVATE, inputfd, 0); 139 - if (madr == MAP_FAILED) { 140 - fprintf (stderr, "mmap error = %d\n", errno); 141 - close (inputfd); 142 - exit (4); 143 - } 144 - 145 - search_pattern (); 146 - 147 - if (munmap (madr, filesize)) 148 - fprintf (stderr, "munmap error = %d\n", errno); 149 - 150 - if (close (inputfd)) 151 - fprintf (stderr, "%s: error %d closing '%s'\n", 152 - progname, errno, inputname); 153 - 154 - fprintf (stderr, "number of pattern matches = %d\n", num_matches); 155 - if (num_matches == 0) 156 - firstloc = ~0; 157 - printf ("%ld\n", firstloc); 158 - fprintf (stderr, "%ld\n", firstloc); 159 - 160 - exit (num_matches ? 0 : 2); 161 - } 162 - 163 - /* end binoffset.c */
+40 -79
scripts/extract-ikconfig
··· 1 1 #!/bin/sh 2 - # extracts .config info from a [b]zImage file 3 - # uses: binoffset (new), dd, zcat, strings, grep 4 - # $arg1 is [b]zImage filename 2 + # ---------------------------------------------------------------------- 3 + # extract-ikconfig - Extract the .config file from a kernel image 4 + # 5 + # This will only work when the kernel was compiled with CONFIG_IKCONFIG. 6 + # 7 + # The obscure use of the "tr" filter is to work around older versions of 8 + # "grep" that report the byte offset of the line instead of the pattern. 9 + # 10 + # (c) 2009, Dick Streefland <dick@streefland.net> 11 + # Licensed under the terms of the GNU General Public License. 12 + # ---------------------------------------------------------------------- 5 13 6 - binoffset="./scripts/binoffset" 7 - test -e $binoffset || cc -o $binoffset ./scripts/binoffset.c || exit 1 14 + gz1='\037\213\010' 15 + gz2='01' 16 + cf1='IKCFG_ST\037\213\010' 17 + cf2='0123456789' 8 18 9 - IKCFG_ST="0x49 0x4b 0x43 0x46 0x47 0x5f 0x53 0x54" 10 - IKCFG_ED="0x49 0x4b 0x43 0x46 0x47 0x5f 0x45 0x44" 11 - dump_config() { 12 - file="$1" 13 - 14 - start=`$binoffset $file $IKCFG_ST 2>/dev/null` 15 - [ "$?" != "0" ] && start="-1" 16 - if [ "$start" -eq "-1" ]; then 17 - return 18 - fi 19 - end=`$binoffset $file $IKCFG_ED 2>/dev/null` 20 - [ "$?" != "0" ] && end="-1" 21 - if [ "$end" -eq "-1" ]; then 22 - return 23 - fi 24 - 25 - start=`expr $start + 8` 26 - size=`expr $end - $start` 27 - 28 - dd if="$file" ibs=1 skip="$start" count="$size" 2>/dev/null | zcat 29 - 30 - clean_up 31 - exit 0 32 - } 33 - 34 - 35 - usage() 19 + dump_config() 36 20 { 37 - echo " usage: extract-ikconfig [b]zImage_filename" 38 - } 39 - 40 - clean_up() 41 - { 42 - if [ "$TMPFILE" != "" ]; then 43 - rm -f $TMPFILE 21 + if pos=`tr "$cf1\n$cf2" "\n$cf2=" < "$1" | grep -abo "^$cf2"` 22 + then 23 + pos=${pos%%:*} 24 + tail -c+$(($pos+8)) "$1" | zcat -q 25 + exit 0 44 26 fi 45 27 } 46 28 47 - if [ $# -lt 1 ] 29 + # Check invocation: 30 + me=${0##*/} 31 + img=$1 32 + if [ $# -ne 1 -o ! -s "$img" ] 48 33 then 49 - usage 50 - exit 1 34 + echo "Usage: $me <kernel-image>" >&2 35 + exit 2 51 36 fi 52 37 53 - TMPFILE=`mktemp -t ikconfig-XXXXXX` || exit 1 54 - image="$1" 38 + # Initial attempt for uncompressed images or objects: 39 + dump_config "$img" 55 40 56 - # vmlinux: Attempt to dump the configuration from the file directly 57 - dump_config "$image" 41 + # That didn't work, so decompress and try again: 42 + tmp=/tmp/ikconfig$$ 43 + trap "rm -f $tmp" 0 44 + for pos in `tr "$gz1\n$gz2" "\n$gz2=" < "$img" | grep -abo "^$gz2"` 45 + do 46 + pos=${pos%%:*} 47 + tail -c+$pos "$img" | zcat 2> /dev/null > $tmp 48 + dump_config $tmp 49 + done 58 50 59 - GZHDR1="0x1f 0x8b 0x08 0x00" 60 - GZHDR2="0x1f 0x8b 0x08 0x08" 61 - 62 - ELFHDR="0x7f 0x45 0x4c 0x46" 63 - 64 - # vmlinux.gz: Check for a compressed images 65 - off=`$binoffset "$image" $GZHDR1 2>/dev/null` 66 - [ "$?" != "0" ] && off="-1" 67 - if [ "$off" -eq "-1" ]; then 68 - off=`$binoffset "$image" $GZHDR2 2>/dev/null` 69 - [ "$?" != "0" ] && off="-1" 70 - fi 71 - if [ "$off" -eq "0" ]; then 72 - zcat <"$image" >"$TMPFILE" 73 - dump_config "$TMPFILE" 74 - elif [ "$off" -ne "-1" ]; then 75 - (dd ibs="$off" skip=1 count=0 && dd bs=512k) <"$image" 2>/dev/null | \ 76 - zcat >"$TMPFILE" 77 - dump_config "$TMPFILE" 78 - 79 - # check if this is simply an ELF file 80 - else 81 - off=`$binoffset "$image" $ELFHDR 2>/dev/null` 82 - [ "$?" != "0" ] && off="-1" 83 - if [ "$off" -eq "0" ]; then 84 - dump_config "$image" 85 - fi 86 - fi 87 - 88 - echo "ERROR: Unable to extract kernel configuration information." 89 - echo " This kernel image may not have the config info." 90 - 91 - clean_up 51 + # Bail out: 52 + echo "$me: Cannot find kernel config." >&2 92 53 exit 1
+12 -2
scripts/kconfig/Makefile
··· 30 30 $(Q)mkdir -p include/generated 31 31 $< -s $(Kconfig) 32 32 33 + # if no path is given, then use src directory to find file 34 + ifdef LSMOD 35 + LSMOD_F := $(LSMOD) 36 + ifeq ($(findstring /,$(LSMOD)),) 37 + LSMOD_F := $(objtree)/$(LSMOD) 38 + endif 39 + endif 40 + 33 41 localmodconfig: $(obj)/streamline_config.pl $(obj)/conf 34 - $(Q)perl $< $(srctree) $(Kconfig) > .tmp.config 42 + $(Q)mkdir -p include/generated 43 + $(Q)perl $< $(srctree) $(Kconfig) $(LSMOD_F) > .tmp.config 35 44 $(Q)if [ -f .config ]; then \ 36 45 cmp -s .tmp.config .config || \ 37 46 (mv -f .config .config.old.1; \ ··· 54 45 $(Q)rm -f .tmp.config 55 46 56 47 localyesconfig: $(obj)/streamline_config.pl $(obj)/conf 57 - $(Q)perl $< $(srctree) $(Kconfig) > .tmp.config 48 + $(Q)mkdir -p include/generated 49 + $(Q)perl $< $(srctree) $(Kconfig) $(LSMOD_F) > .tmp.config 58 50 $(Q)sed -i s/=m/=y/ .tmp.config 59 51 $(Q)if [ -f .config ]; then \ 60 52 cmp -s .tmp.config .config || \
+57 -2
scripts/kconfig/streamline_config.pl
··· 113 113 # Get the build source and top level Kconfig file (passed in) 114 114 my $ksource = $ARGV[0]; 115 115 my $kconfig = $ARGV[1]; 116 + my $lsmod_file = $ARGV[2]; 116 117 117 118 my @makefiles = `find $ksource -name Makefile`; 118 119 my %depends; ··· 122 121 my %objects; 123 122 my $var; 124 123 my $cont = 0; 124 + my $iflevel = 0; 125 + my @ifdeps; 125 126 126 127 # prevent recursion 127 128 my %read_kconfigs; ··· 149 146 $state = "NEW"; 150 147 $config = $1; 151 148 149 + for (my $i = 0; $i < $iflevel; $i++) { 150 + if ($i) { 151 + $depends{$config} .= " " . $ifdeps[$i]; 152 + } else { 153 + $depends{$config} = $ifdeps[$i]; 154 + } 155 + $state = "DEP"; 156 + } 157 + 152 158 # collect the depends for the config 153 159 } elsif ($state eq "NEW" && /^\s*depends\s+on\s+(.*)$/) { 154 160 $state = "DEP"; ··· 177 165 } elsif ($state ne "NONE" && /^\s*tristate\s\S/) { 178 166 # note if the config has a prompt 179 167 $prompt{$config} = 1; 168 + 169 + # Check for if statements 170 + } elsif (/^if\s+(.*\S)\s*$/) { 171 + my $deps = $1; 172 + # remove beginning and ending non text 173 + $deps =~ s/^[^a-zA-Z0-9_]*//; 174 + $deps =~ s/[^a-zA-Z0-9_]*$//; 175 + 176 + my @deps = split /[^a-zA-Z0-9_]+/, $deps; 177 + 178 + $ifdeps[$iflevel++] = join ':', @deps; 179 + 180 + } elsif (/^endif/) { 181 + 182 + $iflevel-- if ($iflevel); 180 183 181 184 # stop on "help" 182 185 } elsif (/^\s*help\s*$/) { ··· 264 237 265 238 my %modules; 266 239 267 - # see what modules are loaded on this system 268 - open(LIN,"/sbin/lsmod|") || die "Cant lsmod"; 240 + if (defined($lsmod_file)) { 241 + if ( ! -f $lsmod_file) { 242 + die "$lsmod_file not found"; 243 + } 244 + if ( -x $lsmod_file) { 245 + # the file is executable, run it 246 + open(LIN, "$lsmod_file|"); 247 + } else { 248 + # Just read the contents 249 + open(LIN, "$lsmod_file"); 250 + } 251 + } else { 252 + 253 + # see what modules are loaded on this system 254 + my $lsmod; 255 + 256 + foreach $dir ( ("/sbin", "/bin", "/usr/sbin", "/usr/bin") ) { 257 + if ( -x "$dir/lsmod" ) { 258 + $lsmod = "$dir/lsmod"; 259 + last; 260 + } 261 + } 262 + if (!defined($lsmod)) { 263 + # try just the path 264 + $lsmod = "lsmod"; 265 + } 266 + 267 + open(LIN,"$lsmod|") || die "Can not call lsmod with $lsmod"; 268 + } 269 + 269 270 while (<LIN>) { 270 271 next if (/^Module/); # Skip the first line. 271 272 if (/^(\S+)/) {
+1 -2
security/integrity/ima/ima_iint.c
··· 63 63 spin_lock(&ima_iint_lock); 64 64 rc = radix_tree_insert(&ima_iint_store, (unsigned long)inode, iint); 65 65 spin_unlock(&ima_iint_lock); 66 + radix_tree_preload_end(); 66 67 out: 67 68 if (rc < 0) 68 69 kmem_cache_free(iint_cache, iint); 69 - 70 - radix_tree_preload_end(); 71 70 72 71 return rc; 73 72 }
+1 -1
security/selinux/ss/ebitmap.c
··· 128 128 cmap_idx = delta / NETLBL_CATMAP_MAPSIZE; 129 129 cmap_sft = delta % NETLBL_CATMAP_MAPSIZE; 130 130 c_iter->bitmap[cmap_idx] 131 - |= e_iter->maps[cmap_idx] << cmap_sft; 131 + |= e_iter->maps[i] << cmap_sft; 132 132 } 133 133 e_iter = e_iter->next; 134 134 }
+1 -1
sound/aoa/fabrics/layout.c
··· 768 768 "required property %s not present\n", propname); 769 769 return -ENODEV; 770 770 } 771 - if (*ref != codec->node->linux_phandle) { 771 + if (*ref != codec->node->phandle) { 772 772 printk(KERN_INFO "snd-aoa-fabric-layout: " 773 773 "%s doesn't match!\n", propname); 774 774 return -ENODEV;
+12 -12
sound/ppc/awacs.c
··· 751 751 752 752 static void snd_pmac_awacs_resume(struct snd_pmac *chip) 753 753 { 754 - if (machine_is_compatible("PowerBook3,1") 755 - || machine_is_compatible("PowerBook3,2")) { 754 + if (of_machine_is_compatible("PowerBook3,1") 755 + || of_machine_is_compatible("PowerBook3,2")) { 756 756 msleep(100); 757 757 snd_pmac_awacs_write_reg(chip, 1, 758 758 chip->awacs_reg[1] & ~MASK_PAROUT); ··· 780 780 } 781 781 #endif /* CONFIG_PM */ 782 782 783 - #define IS_PM7500 (machine_is_compatible("AAPL,7500") \ 784 - || machine_is_compatible("AAPL,8500") \ 785 - || machine_is_compatible("AAPL,9500")) 786 - #define IS_PM5500 (machine_is_compatible("AAPL,e411")) 787 - #define IS_BEIGE (machine_is_compatible("AAPL,Gossamer")) 788 - #define IS_IMAC1 (machine_is_compatible("PowerMac2,1")) 789 - #define IS_IMAC2 (machine_is_compatible("PowerMac2,2") \ 790 - || machine_is_compatible("PowerMac4,1")) 791 - #define IS_G4AGP (machine_is_compatible("PowerMac3,1")) 792 - #define IS_LOMBARD (machine_is_compatible("PowerBook1,1")) 783 + #define IS_PM7500 (of_machine_is_compatible("AAPL,7500") \ 784 + || of_machine_is_compatible("AAPL,8500") \ 785 + || of_machine_is_compatible("AAPL,9500")) 786 + #define IS_PM5500 (of_machine_is_compatible("AAPL,e411")) 787 + #define IS_BEIGE (of_machine_is_compatible("AAPL,Gossamer")) 788 + #define IS_IMAC1 (of_machine_is_compatible("PowerMac2,1")) 789 + #define IS_IMAC2 (of_machine_is_compatible("PowerMac2,2") \ 790 + || of_machine_is_compatible("PowerMac4,1")) 791 + #define IS_G4AGP (of_machine_is_compatible("PowerMac3,1")) 792 + #define IS_LOMBARD (of_machine_is_compatible("PowerBook1,1")) 793 793 794 794 static int imac1, imac2; 795 795
+2 -2
sound/ppc/burgundy.c
··· 582 582 static void snd_pmac_burgundy_update_automute(struct snd_pmac *chip, int do_notify) 583 583 { 584 584 if (chip->auto_mute) { 585 - int imac = machine_is_compatible("iMac"); 585 + int imac = of_machine_is_compatible("iMac"); 586 586 int reg, oreg; 587 587 reg = oreg = snd_pmac_burgundy_rcb(chip, 588 588 MASK_ADDR_BURGUNDY_MORE_OUTPUTENABLES); ··· 620 620 */ 621 621 int __devinit snd_pmac_burgundy_init(struct snd_pmac *chip) 622 622 { 623 - int imac = machine_is_compatible("iMac"); 623 + int imac = of_machine_is_compatible("iMac"); 624 624 int i, err; 625 625 626 626 /* Checks to see the chip is alive and kicking */
+9 -9
sound/ppc/pmac.c
··· 922 922 } 923 923 924 924 /* it seems the Pismo & iBook can't byte-swap in hardware. */ 925 - if (machine_is_compatible("PowerBook3,1") || 926 - machine_is_compatible("PowerBook2,1")) 925 + if (of_machine_is_compatible("PowerBook3,1") || 926 + of_machine_is_compatible("PowerBook2,1")) 927 927 chip->can_byte_swap = 0 ; 928 928 929 - if (machine_is_compatible("PowerBook2,1")) 929 + if (of_machine_is_compatible("PowerBook2,1")) 930 930 chip->can_duplex = 0; 931 931 } 932 932 ··· 959 959 chip->control_mask = MASK_IEPC | MASK_IEE | 0x11; /* default */ 960 960 961 961 /* check machine type */ 962 - if (machine_is_compatible("AAPL,3400/2400") 963 - || machine_is_compatible("AAPL,3500")) 962 + if (of_machine_is_compatible("AAPL,3400/2400") 963 + || of_machine_is_compatible("AAPL,3500")) 964 964 chip->is_pbook_3400 = 1; 965 - else if (machine_is_compatible("PowerBook1,1") 966 - || machine_is_compatible("AAPL,PowerBook1998")) 965 + else if (of_machine_is_compatible("PowerBook1,1") 966 + || of_machine_is_compatible("AAPL,PowerBook1998")) 967 967 chip->is_pbook_G3 = 1; 968 968 chip->node = of_find_node_by_name(NULL, "awacs"); 969 969 sound = of_node_get(chip->node); ··· 1033 1033 } 1034 1034 if (of_device_is_compatible(sound, "tumbler")) { 1035 1035 chip->model = PMAC_TUMBLER; 1036 - chip->can_capture = machine_is_compatible("PowerMac4,2") 1037 - || machine_is_compatible("PowerBook4,1"); 1036 + chip->can_capture = of_machine_is_compatible("PowerMac4,2") 1037 + || of_machine_is_compatible("PowerBook4,1"); 1038 1038 chip->can_duplex = 0; 1039 1039 // chip->can_byte_swap = 0; /* FIXME: check this */ 1040 1040 chip->num_freqs = ARRAY_SIZE(tumbler_freqs);
+1 -1
sound/soc/fsl/efika-audio-fabric.c
··· 55 55 struct platform_device *pdev; 56 56 int rc; 57 57 58 - if (!machine_is_compatible("bplan,efika")) 58 + if (!of_machine_is_compatible("bplan,efika")) 59 59 return -ENODEV; 60 60 61 61 card.platform = &mpc5200_audio_dma_platform;
+1 -1
sound/soc/fsl/pcm030-audio-fabric.c
··· 55 55 struct platform_device *pdev; 56 56 int rc; 57 57 58 - if (!machine_is_compatible("phytec,pcm030")) 58 + if (!of_machine_is_compatible("phytec,pcm030")) 59 59 return -ENODEV; 60 60 61 61 card.platform = &mpc5200_audio_dma_platform;
+3
tools/perf/util/probe-event.c
··· 272 272 int ret; 273 273 274 274 pp->probes[0] = buf = zalloc(MAX_CMDLEN); 275 + pp->found = 1; 275 276 if (!buf) 276 277 die("Failed to allocate memory by zalloc."); 277 278 if (pp->offset) { ··· 295 294 error: 296 295 free(pp->probes[0]); 297 296 pp->probes[0] = NULL; 297 + pp->found = 0; 298 298 } 299 299 return ret; 300 300 } ··· 457 455 struct strlist *rawlist; 458 456 struct str_node *ent; 459 457 458 + memset(&pp, 0, sizeof(pp)); 460 459 fd = open_kprobe_events(O_RDONLY, 0); 461 460 rawlist = get_trace_kprobe_event_rawlist(fd); 462 461 close(fd);