Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6

* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6:
[SPARC64]: Fill holes in hypervisor APIs and fix KTSB registry.
[SPARC64]: Fix two bugs wrt. kernel 4MB TSB.
[SPARC]: Mark as emulating cmpxchg, add appropriate depends for DRM.
[SPARC]: Emulate cmpxchg like parisc
[SPARC64]: Fix _PAGE_EXEC_4U check in sun4u I-TLB miss handler.
[SPARC]: Linux always started with 9600 8N1
[SPARC64]: arch/sparc64/time.c doesn't compile on Ultra 1 (no PCI)
[SPARC64]: Eliminate NR_CPUS limitations.
[SPARC64]: Use machine description and OBP properly for cpu probing.
[SPARC64]: Negotiate hypervisor API for PCI services.
[SPARC64]: Report proper system soft state to the hypervisor.
[SPARC64]: Fix typo in sun4v_hvapi_register error handling.
[SCSI] ESP: Kill SCSI_ESP_CORE and link directly just like jazz_esp
[SCSI] jazz_esp: Converted to use esp_core.
[SPARC64]: PCI device scan is way too verbose by default.
[SERIAL] sunzilog: section mismatch fix
[SPARC32]: Removes mismatch section warnigs in sparc time.c file
[SPARC64]: Don't be picky about virtual-dma values on sun4v.
[SPARC64]: Kill unused DIE_PAGE_FAULT enum value.
[SCSI] pluto: Use wait_for_completion_timeout.

+2762 -887
+7
arch/sparc/Kconfig
··· 178 178 bool 179 179 default n 180 180 181 + config EMULATED_CMPXCHG 182 + bool 183 + default y 184 + help 185 + Sparc32 does not have a CAS instruction like sparc64. cmpxchg() 186 + is emulated, and therefore it is not completely atomic. 187 + 181 188 config SUN_PM 182 189 bool 183 190 default y
+2 -2
arch/sparc/kernel/time.c
··· 148 148 } 149 149 150 150 /* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */ 151 - static void __init kick_start_clock(void) 151 + static void __devinit kick_start_clock(void) 152 152 { 153 153 struct mostek48t02 *regs = (struct mostek48t02 *)mstk48t02_regs; 154 154 unsigned char sec; ··· 223 223 return (data1 == data2); /* Was the write blocked? */ 224 224 } 225 225 226 - static void __init mostek_set_system_time(void) 226 + static void __devinit mostek_set_system_time(void) 227 227 { 228 228 unsigned int year, mon, day, hour, min, sec; 229 229 struct mostek48t02 *mregs;
+15
arch/sparc/lib/atomic32.c
··· 2 2 * atomic32.c: 32-bit atomic_t implementation 3 3 * 4 4 * Copyright (C) 2004 Keith M Wesolowski 5 + * Copyright (C) 2007 Kyle McMartin 5 6 * 6 7 * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf 7 8 */ ··· 118 117 return old & mask; 119 118 } 120 119 EXPORT_SYMBOL(___change_bit); 120 + 121 + unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) 122 + { 123 + unsigned long flags; 124 + u32 prev; 125 + 126 + spin_lock_irqsave(ATOMIC_HASH(addr), flags); 127 + if ((prev = *ptr) == old) 128 + *ptr = new; 129 + spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); 130 + 131 + return (unsigned long)prev; 132 + } 133 + EXPORT_SYMBOL(__cmpxchg_u32);
+3 -3
arch/sparc64/Kconfig
··· 147 147 If you don't know what to do here, say N. 148 148 149 149 config NR_CPUS 150 - int "Maximum number of CPUs (2-64)" 151 - range 2 64 150 + int "Maximum number of CPUs (2-1024)" 151 + range 2 1024 152 152 depends on SMP 153 - default "32" 153 + default "64" 154 154 155 155 source "drivers/cpufreq/Kconfig" 156 156
+2 -2
arch/sparc64/kernel/Makefile
··· 8 8 extra-y := head.o init_task.o vmlinux.lds 9 9 10 10 obj-y := process.o setup.o cpu.o idprom.o \ 11 - traps.o devices.o auxio.o una_asm.o \ 11 + traps.o auxio.o una_asm.o \ 12 12 irq.o ptrace.o time.o sys_sparc.o signal.o \ 13 13 unaligned.o central.o pci.o starfire.o semaphore.o \ 14 14 power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \ 15 - visemul.o prom.o of_device.o hvapi.o 15 + visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o 16 16 17 17 obj-$(CONFIG_STACKTRACE) += stacktrace.o 18 18 obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \
-196
arch/sparc64/kernel/devices.c
··· 1 - /* devices.c: Initial scan of the prom device tree for important 2 - * Sparc device nodes which we need to find. 3 - * 4 - * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 5 - */ 6 - 7 - #include <linux/kernel.h> 8 - #include <linux/threads.h> 9 - #include <linux/init.h> 10 - #include <linux/ioport.h> 11 - #include <linux/string.h> 12 - #include <linux/spinlock.h> 13 - #include <linux/errno.h> 14 - #include <linux/bootmem.h> 15 - 16 - #include <asm/page.h> 17 - #include <asm/oplib.h> 18 - #include <asm/system.h> 19 - #include <asm/smp.h> 20 - #include <asm/spitfire.h> 21 - #include <asm/timer.h> 22 - #include <asm/cpudata.h> 23 - 24 - /* Used to synchronize accesses to NatSemi SUPER I/O chip configure 25 - * operations in asm/ns87303.h 26 - */ 27 - DEFINE_SPINLOCK(ns87303_lock); 28 - 29 - extern void cpu_probe(void); 30 - extern void central_probe(void); 31 - 32 - static const char *cpu_mid_prop(void) 33 - { 34 - if (tlb_type == spitfire) 35 - return "upa-portid"; 36 - return "portid"; 37 - } 38 - 39 - static int get_cpu_mid(struct device_node *dp) 40 - { 41 - struct property *prop; 42 - 43 - if (tlb_type == hypervisor) { 44 - struct linux_prom64_registers *reg; 45 - int len; 46 - 47 - prop = of_find_property(dp, "cpuid", &len); 48 - if (prop && len == 4) 49 - return *(int *) prop->value; 50 - 51 - prop = of_find_property(dp, "reg", NULL); 52 - reg = prop->value; 53 - return (reg[0].phys_addr >> 32) & 0x0fffffffUL; 54 - } else { 55 - const char *prop_name = cpu_mid_prop(); 56 - 57 - prop = of_find_property(dp, prop_name, NULL); 58 - if (prop) 59 - return *(int *) prop->value; 60 - return 0; 61 - } 62 - } 63 - 64 - static int check_cpu_node(struct device_node *dp, int *cur_inst, 65 - int (*compare)(struct device_node *, int, void *), 66 - void *compare_arg, 67 - struct device_node **dev_node, int *mid) 68 - { 69 - if (!compare(dp, *cur_inst, compare_arg)) { 70 - if (dev_node) 71 - *dev_node = dp; 72 - if (mid) 73 - *mid = get_cpu_mid(dp); 74 - return 0; 75 - } 76 - 77 - (*cur_inst)++; 78 - 79 - return -ENODEV; 80 - } 81 - 82 - static int __cpu_find_by(int (*compare)(struct device_node *, int, void *), 83 - void *compare_arg, 84 - struct device_node **dev_node, int *mid) 85 - { 86 - struct device_node *dp; 87 - int cur_inst; 88 - 89 - cur_inst = 0; 90 - for_each_node_by_type(dp, "cpu") { 91 - int err = check_cpu_node(dp, &cur_inst, 92 - compare, compare_arg, 93 - dev_node, mid); 94 - if (err == 0) 95 - return 0; 96 - } 97 - 98 - return -ENODEV; 99 - } 100 - 101 - static int cpu_instance_compare(struct device_node *dp, int instance, void *_arg) 102 - { 103 - int desired_instance = (int) (long) _arg; 104 - 105 - if (instance == desired_instance) 106 - return 0; 107 - return -ENODEV; 108 - } 109 - 110 - int cpu_find_by_instance(int instance, struct device_node **dev_node, int *mid) 111 - { 112 - return __cpu_find_by(cpu_instance_compare, (void *)(long)instance, 113 - dev_node, mid); 114 - } 115 - 116 - static int cpu_mid_compare(struct device_node *dp, int instance, void *_arg) 117 - { 118 - int desired_mid = (int) (long) _arg; 119 - int this_mid; 120 - 121 - this_mid = get_cpu_mid(dp); 122 - if (this_mid == desired_mid) 123 - return 0; 124 - return -ENODEV; 125 - } 126 - 127 - int cpu_find_by_mid(int mid, struct device_node **dev_node) 128 - { 129 - return __cpu_find_by(cpu_mid_compare, (void *)(long)mid, 130 - dev_node, NULL); 131 - } 132 - 133 - void __init device_scan(void) 134 - { 135 - /* FIX ME FAST... -DaveM */ 136 - ioport_resource.end = 0xffffffffffffffffUL; 137 - 138 - prom_printf("Booting Linux...\n"); 139 - 140 - #ifndef CONFIG_SMP 141 - { 142 - struct device_node *dp; 143 - int err, def; 144 - 145 - err = cpu_find_by_instance(0, &dp, NULL); 146 - if (err) { 147 - prom_printf("No cpu nodes, cannot continue\n"); 148 - prom_halt(); 149 - } 150 - cpu_data(0).clock_tick = 151 - of_getintprop_default(dp, "clock-frequency", 0); 152 - 153 - def = ((tlb_type == hypervisor) ? 154 - (8 * 1024) : 155 - (16 * 1024)); 156 - cpu_data(0).dcache_size = of_getintprop_default(dp, 157 - "dcache-size", 158 - def); 159 - 160 - def = 32; 161 - cpu_data(0).dcache_line_size = 162 - of_getintprop_default(dp, "dcache-line-size", def); 163 - 164 - def = 16 * 1024; 165 - cpu_data(0).icache_size = of_getintprop_default(dp, 166 - "icache-size", 167 - def); 168 - 169 - def = 32; 170 - cpu_data(0).icache_line_size = 171 - of_getintprop_default(dp, "icache-line-size", def); 172 - 173 - def = ((tlb_type == hypervisor) ? 174 - (3 * 1024 * 1024) : 175 - (4 * 1024 * 1024)); 176 - cpu_data(0).ecache_size = of_getintprop_default(dp, 177 - "ecache-size", 178 - def); 179 - 180 - def = 64; 181 - cpu_data(0).ecache_line_size = 182 - of_getintprop_default(dp, "ecache-line-size", def); 183 - printk("CPU[0]: Caches " 184 - "D[sz(%d):line_sz(%d)] " 185 - "I[sz(%d):line_sz(%d)] " 186 - "E[sz(%d):line_sz(%d)]\n", 187 - cpu_data(0).dcache_size, cpu_data(0).dcache_line_size, 188 - cpu_data(0).icache_size, cpu_data(0).icache_line_size, 189 - cpu_data(0).ecache_size, cpu_data(0).ecache_line_size); 190 - } 191 - #endif 192 - 193 - central_probe(); 194 - 195 - cpu_probe(); 196 - }
+570 -9
arch/sparc64/kernel/entry.S
··· 1725 1725 * returns %o0: sysino 1726 1726 */ 1727 1727 .globl sun4v_devino_to_sysino 1728 + .type sun4v_devino_to_sysino,#function 1728 1729 sun4v_devino_to_sysino: 1729 1730 mov HV_FAST_INTR_DEVINO2SYSINO, %o5 1730 1731 ta HV_FAST_TRAP 1731 1732 retl 1732 1733 mov %o1, %o0 1734 + .size sun4v_devino_to_sysino, .-sun4v_devino_to_sysino 1733 1735 1734 1736 /* %o0: sysino 1735 1737 * 1736 1738 * returns %o0: intr_enabled (HV_INTR_{DISABLED,ENABLED}) 1737 1739 */ 1738 1740 .globl sun4v_intr_getenabled 1741 + .type sun4v_intr_getenabled,#function 1739 1742 sun4v_intr_getenabled: 1740 1743 mov HV_FAST_INTR_GETENABLED, %o5 1741 1744 ta HV_FAST_TRAP 1742 1745 retl 1743 1746 mov %o1, %o0 1747 + .size sun4v_intr_getenabled, .-sun4v_intr_getenabled 1744 1748 1745 1749 /* %o0: sysino 1746 1750 * %o1: intr_enabled (HV_INTR_{DISABLED,ENABLED}) 1747 1751 */ 1748 1752 .globl sun4v_intr_setenabled 1753 + .type sun4v_intr_setenabled,#function 1749 1754 sun4v_intr_setenabled: 1750 1755 mov HV_FAST_INTR_SETENABLED, %o5 1751 1756 ta HV_FAST_TRAP 1752 1757 retl 1753 1758 nop 1759 + .size sun4v_intr_setenabled, .-sun4v_intr_setenabled 1754 1760 1755 1761 /* %o0: sysino 1756 1762 * 1757 1763 * returns %o0: intr_state (HV_INTR_STATE_*) 1758 1764 */ 1759 1765 .globl sun4v_intr_getstate 1766 + .type sun4v_intr_getstate,#function 1760 1767 sun4v_intr_getstate: 1761 1768 mov HV_FAST_INTR_GETSTATE, %o5 1762 1769 ta HV_FAST_TRAP 1763 1770 retl 1764 1771 mov %o1, %o0 1772 + .size sun4v_intr_getstate, .-sun4v_intr_getstate 1765 1773 1766 1774 /* %o0: sysino 1767 1775 * %o1: intr_state (HV_INTR_STATE_*) 1768 1776 */ 1769 1777 .globl sun4v_intr_setstate 1778 + .type sun4v_intr_setstate,#function 1770 1779 sun4v_intr_setstate: 1771 1780 mov HV_FAST_INTR_SETSTATE, %o5 1772 1781 ta HV_FAST_TRAP 1773 1782 retl 1774 1783 nop 1784 + .size sun4v_intr_setstate, .-sun4v_intr_setstate 1775 1785 1776 1786 /* %o0: sysino 1777 1787 * 1778 1788 * returns %o0: cpuid 1779 1789 */ 1780 1790 .globl sun4v_intr_gettarget 1791 + .type sun4v_intr_gettarget,#function 1781 1792 sun4v_intr_gettarget: 1782 1793 mov HV_FAST_INTR_GETTARGET, %o5 1783 1794 ta HV_FAST_TRAP 1784 1795 retl 1785 1796 mov %o1, %o0 1797 + .size sun4v_intr_gettarget, .-sun4v_intr_gettarget 1786 1798 1787 1799 /* %o0: sysino 1788 1800 * %o1: cpuid 1789 1801 */ 1790 1802 .globl sun4v_intr_settarget 1803 + .type sun4v_intr_settarget,#function 1791 1804 sun4v_intr_settarget: 1792 1805 mov HV_FAST_INTR_SETTARGET, %o5 1793 1806 ta HV_FAST_TRAP 1794 1807 retl 1795 1808 nop 1809 + .size sun4v_intr_settarget, .-sun4v_intr_settarget 1810 + 1811 + /* %o0: cpuid 1812 + * %o1: pc 1813 + * %o2: rtba 1814 + * %o3: arg0 1815 + * 1816 + * returns %o0: status 1817 + */ 1818 + .globl sun4v_cpu_start 1819 + .type sun4v_cpu_start,#function 1820 + sun4v_cpu_start: 1821 + mov HV_FAST_CPU_START, %o5 1822 + ta HV_FAST_TRAP 1823 + retl 1824 + nop 1825 + .size sun4v_cpu_start, .-sun4v_cpu_start 1826 + 1827 + /* %o0: cpuid 1828 + * 1829 + * returns %o0: status 1830 + */ 1831 + .globl sun4v_cpu_stop 1832 + .type sun4v_cpu_stop,#function 1833 + sun4v_cpu_stop: 1834 + mov HV_FAST_CPU_STOP, %o5 1835 + ta HV_FAST_TRAP 1836 + retl 1837 + nop 1838 + .size sun4v_cpu_stop, .-sun4v_cpu_stop 1839 + 1840 + /* returns %o0: status */ 1841 + .globl sun4v_cpu_yield 1842 + .type sun4v_cpu_yield, #function 1843 + sun4v_cpu_yield: 1844 + mov HV_FAST_CPU_YIELD, %o5 1845 + ta HV_FAST_TRAP 1846 + retl 1847 + nop 1848 + .size sun4v_cpu_yield, .-sun4v_cpu_yield 1796 1849 1797 1850 /* %o0: type 1798 1851 * %o1: queue paddr ··· 1854 1801 * returns %o0: status 1855 1802 */ 1856 1803 .globl sun4v_cpu_qconf 1804 + .type sun4v_cpu_qconf,#function 1857 1805 sun4v_cpu_qconf: 1858 1806 mov HV_FAST_CPU_QCONF, %o5 1859 1807 ta HV_FAST_TRAP 1860 1808 retl 1861 1809 nop 1862 - 1863 - /* returns %o0: status 1864 - */ 1865 - .globl sun4v_cpu_yield 1866 - sun4v_cpu_yield: 1867 - mov HV_FAST_CPU_YIELD, %o5 1868 - ta HV_FAST_TRAP 1869 - retl 1870 - nop 1810 + .size sun4v_cpu_qconf, .-sun4v_cpu_qconf 1871 1811 1872 1812 /* %o0: num cpus in cpu list 1873 1813 * %o1: cpu list paddr ··· 1869 1823 * returns %o0: status 1870 1824 */ 1871 1825 .globl sun4v_cpu_mondo_send 1826 + .type sun4v_cpu_mondo_send,#function 1872 1827 sun4v_cpu_mondo_send: 1873 1828 mov HV_FAST_CPU_MONDO_SEND, %o5 1874 1829 ta HV_FAST_TRAP 1875 1830 retl 1876 1831 nop 1832 + .size sun4v_cpu_mondo_send, .-sun4v_cpu_mondo_send 1877 1833 1878 1834 /* %o0: CPU ID 1879 1835 * ··· 1883 1835 * %o0: cpu state as HV_CPU_STATE_* 1884 1836 */ 1885 1837 .globl sun4v_cpu_state 1838 + .type sun4v_cpu_state,#function 1886 1839 sun4v_cpu_state: 1887 1840 mov HV_FAST_CPU_STATE, %o5 1888 1841 ta HV_FAST_TRAP ··· 1892 1843 mov %o1, %o0 1893 1844 1: retl 1894 1845 nop 1846 + .size sun4v_cpu_state, .-sun4v_cpu_state 1847 + 1848 + /* %o0: virtual address 1849 + * %o1: must be zero 1850 + * %o2: TTE 1851 + * %o3: HV_MMU_* flags 1852 + * 1853 + * returns %o0: status 1854 + */ 1855 + .globl sun4v_mmu_map_perm_addr 1856 + .type sun4v_mmu_map_perm_addr,#function 1857 + sun4v_mmu_map_perm_addr: 1858 + mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 1859 + ta HV_FAST_TRAP 1860 + retl 1861 + nop 1862 + .size sun4v_mmu_map_perm_addr, .-sun4v_mmu_map_perm_addr 1863 + 1864 + /* %o0: number of TSB descriptions 1865 + * %o1: TSB descriptions real address 1866 + * 1867 + * returns %o0: status 1868 + */ 1869 + .globl sun4v_mmu_tsb_ctx0 1870 + .type sun4v_mmu_tsb_ctx0,#function 1871 + sun4v_mmu_tsb_ctx0: 1872 + mov HV_FAST_MMU_TSB_CTX0, %o5 1873 + ta HV_FAST_TRAP 1874 + retl 1875 + nop 1876 + .size sun4v_mmu_tsb_ctx0, .-sun4v_mmu_tsb_ctx0 1895 1877 1896 1878 /* %o0: API group number 1897 1879 * %o1: pointer to unsigned long major number storage ··· 1931 1851 * returns %o0: status 1932 1852 */ 1933 1853 .globl sun4v_get_version 1854 + .type sun4v_get_version,#function 1934 1855 sun4v_get_version: 1935 1856 mov HV_CORE_GET_VER, %o5 1936 1857 mov %o1, %o3 ··· 1940 1859 stx %o1, [%o3] 1941 1860 retl 1942 1861 stx %o2, [%o4] 1862 + .size sun4v_get_version, .-sun4v_get_version 1943 1863 1944 1864 /* %o0: API group number 1945 1865 * %o1: desired major number ··· 1950 1868 * returns %o0: status 1951 1869 */ 1952 1870 .globl sun4v_set_version 1871 + .type sun4v_set_version,#function 1953 1872 sun4v_set_version: 1954 1873 mov HV_CORE_SET_VER, %o5 1955 1874 mov %o3, %o4 1956 1875 ta HV_CORE_TRAP 1957 1876 retl 1958 1877 stx %o1, [%o4] 1878 + .size sun4v_set_version, .-sun4v_set_version 1879 + 1880 + /* %o0: pointer to unsigned long time 1881 + * 1882 + * returns %o0: status 1883 + */ 1884 + .globl sun4v_tod_get 1885 + .type sun4v_tod_get,#function 1886 + sun4v_tod_get: 1887 + mov %o0, %o4 1888 + mov HV_FAST_TOD_GET, %o5 1889 + ta HV_FAST_TRAP 1890 + stx %o1, [%o4] 1891 + retl 1892 + nop 1893 + .size sun4v_tod_get, .-sun4v_tod_get 1894 + 1895 + /* %o0: time 1896 + * 1897 + * returns %o0: status 1898 + */ 1899 + .globl sun4v_tod_set 1900 + .type sun4v_tod_set,#function 1901 + sun4v_tod_set: 1902 + mov HV_FAST_TOD_SET, %o5 1903 + ta HV_FAST_TRAP 1904 + retl 1905 + nop 1906 + .size sun4v_tod_set, .-sun4v_tod_set 1959 1907 1960 1908 /* %o0: pointer to unsigned long status 1961 1909 * 1962 1910 * returns %o0: signed character 1963 1911 */ 1964 1912 .globl sun4v_con_getchar 1913 + .type sun4v_con_getchar,#function 1965 1914 sun4v_con_getchar: 1966 1915 mov %o0, %o4 1967 1916 mov HV_FAST_CONS_GETCHAR, %o5 ··· 2002 1889 stx %o0, [%o4] 2003 1890 retl 2004 1891 sra %o1, 0, %o0 1892 + .size sun4v_con_getchar, .-sun4v_con_getchar 2005 1893 2006 1894 /* %o0: signed long character 2007 1895 * 2008 1896 * returns %o0: status 2009 1897 */ 2010 1898 .globl sun4v_con_putchar 1899 + .type sun4v_con_putchar,#function 2011 1900 sun4v_con_putchar: 2012 1901 mov HV_FAST_CONS_PUTCHAR, %o5 2013 1902 ta HV_FAST_TRAP 2014 1903 retl 2015 1904 sra %o0, 0, %o0 1905 + .size sun4v_con_putchar, .-sun4v_con_putchar 2016 1906 2017 1907 /* %o0: buffer real address 2018 1908 * %o1: buffer size ··· 2024 1908 * returns %o0: status 2025 1909 */ 2026 1910 .globl sun4v_con_read 1911 + .type sun4v_con_read,#function 2027 1912 sun4v_con_read: 2028 1913 mov %o2, %o4 2029 1914 mov HV_FAST_CONS_READ, %o5 ··· 2039 1922 stx %o1, [%o4] 2040 1923 1: retl 2041 1924 nop 1925 + .size sun4v_con_read, .-sun4v_con_read 2042 1926 2043 1927 /* %o0: buffer real address 2044 1928 * %o1: buffer size ··· 2048 1930 * returns %o0: status 2049 1931 */ 2050 1932 .globl sun4v_con_write 1933 + .type sun4v_con_write,#function 2051 1934 sun4v_con_write: 2052 1935 mov %o2, %o4 2053 1936 mov HV_FAST_CONS_WRITE, %o5 ··· 2056 1937 stx %o1, [%o4] 2057 1938 retl 2058 1939 nop 1940 + .size sun4v_con_write, .-sun4v_con_write 1941 + 1942 + /* %o0: soft state 1943 + * %o1: address of description string 1944 + * 1945 + * returns %o0: status 1946 + */ 1947 + .globl sun4v_mach_set_soft_state 1948 + .type sun4v_mach_set_soft_state,#function 1949 + sun4v_mach_set_soft_state: 1950 + mov HV_FAST_MACH_SET_SOFT_STATE, %o5 1951 + ta HV_FAST_TRAP 1952 + retl 1953 + nop 1954 + .size sun4v_mach_set_soft_state, .-sun4v_mach_set_soft_state 1955 + 1956 + /* %o0: exit code 1957 + * 1958 + * Does not return. 1959 + */ 1960 + .globl sun4v_mach_exit 1961 + .type sun4v_mach_exit,#function 1962 + sun4v_mach_exit: 1963 + mov HV_FAST_MACH_EXIT, %o5 1964 + ta HV_FAST_TRAP 1965 + retl 1966 + nop 1967 + .size sun4v_mach_exit, .-sun4v_mach_exit 1968 + 1969 + /* %o0: buffer real address 1970 + * %o1: buffer length 1971 + * %o2: pointer to unsigned long real_buf_len 1972 + * 1973 + * returns %o0: status 1974 + */ 1975 + .globl sun4v_mach_desc 1976 + .type sun4v_mach_desc,#function 1977 + sun4v_mach_desc: 1978 + mov %o2, %o4 1979 + mov HV_FAST_MACH_DESC, %o5 1980 + ta HV_FAST_TRAP 1981 + stx %o1, [%o4] 1982 + retl 1983 + nop 1984 + .size sun4v_mach_desc, .-sun4v_mach_desc 1985 + 1986 + /* %o0: new timeout in milliseconds 1987 + * %o1: pointer to unsigned long orig_timeout 1988 + * 1989 + * returns %o0: status 1990 + */ 1991 + .globl sun4v_mach_set_watchdog 1992 + .type sun4v_mach_set_watchdog,#function 1993 + sun4v_mach_set_watchdog: 1994 + mov %o1, %o4 1995 + mov HV_FAST_MACH_SET_WATCHDOG, %o5 1996 + ta HV_FAST_TRAP 1997 + stx %o1, [%o4] 1998 + retl 1999 + nop 2000 + .size sun4v_mach_set_watchdog, .-sun4v_mach_set_watchdog 2001 + 2002 + /* No inputs and does not return. */ 2003 + .globl sun4v_mach_sir 2004 + .type sun4v_mach_sir,#function 2005 + sun4v_mach_sir: 2006 + mov %o1, %o4 2007 + mov HV_FAST_MACH_SIR, %o5 2008 + ta HV_FAST_TRAP 2009 + stx %o1, [%o4] 2010 + retl 2011 + nop 2012 + .size sun4v_mach_sir, .-sun4v_mach_sir 2013 + 2014 + /* %o0: channel 2015 + * %o1: ra 2016 + * %o2: num_entries 2017 + * 2018 + * returns %o0: status 2019 + */ 2020 + .globl sun4v_ldc_tx_qconf 2021 + .type sun4v_ldc_tx_qconf,#function 2022 + sun4v_ldc_tx_qconf: 2023 + mov HV_FAST_LDC_TX_QCONF, %o5 2024 + ta HV_FAST_TRAP 2025 + retl 2026 + nop 2027 + .size sun4v_ldc_tx_qconf, .-sun4v_ldc_tx_qconf 2028 + 2029 + /* %o0: channel 2030 + * %o1: pointer to unsigned long ra 2031 + * %o2: pointer to unsigned long num_entries 2032 + * 2033 + * returns %o0: status 2034 + */ 2035 + .globl sun4v_ldc_tx_qinfo 2036 + .type sun4v_ldc_tx_qinfo,#function 2037 + sun4v_ldc_tx_qinfo: 2038 + mov %o1, %g1 2039 + mov %o2, %g2 2040 + mov HV_FAST_LDC_TX_QINFO, %o5 2041 + ta HV_FAST_TRAP 2042 + stx %o1, [%g1] 2043 + stx %o2, [%g2] 2044 + retl 2045 + nop 2046 + .size sun4v_ldc_tx_qinfo, .-sun4v_ldc_tx_qinfo 2047 + 2048 + /* %o0: channel 2049 + * %o1: pointer to unsigned long head_off 2050 + * %o2: pointer to unsigned long tail_off 2051 + * %o2: pointer to unsigned long chan_state 2052 + * 2053 + * returns %o0: status 2054 + */ 2055 + .globl sun4v_ldc_tx_get_state 2056 + .type sun4v_ldc_tx_get_state,#function 2057 + sun4v_ldc_tx_get_state: 2058 + mov %o1, %g1 2059 + mov %o2, %g2 2060 + mov %o3, %g3 2061 + mov HV_FAST_LDC_TX_GET_STATE, %o5 2062 + ta HV_FAST_TRAP 2063 + stx %o1, [%g1] 2064 + stx %o2, [%g2] 2065 + stx %o3, [%g3] 2066 + retl 2067 + nop 2068 + .size sun4v_ldc_tx_get_state, .-sun4v_ldc_tx_get_state 2069 + 2070 + /* %o0: channel 2071 + * %o1: tail_off 2072 + * 2073 + * returns %o0: status 2074 + */ 2075 + .globl sun4v_ldc_tx_set_qtail 2076 + .type sun4v_ldc_tx_set_qtail,#function 2077 + sun4v_ldc_tx_set_qtail: 2078 + mov HV_FAST_LDC_TX_SET_QTAIL, %o5 2079 + ta HV_FAST_TRAP 2080 + retl 2081 + nop 2082 + .size sun4v_ldc_tx_set_qtail, .-sun4v_ldc_tx_set_qtail 2083 + 2084 + /* %o0: channel 2085 + * %o1: ra 2086 + * %o2: num_entries 2087 + * 2088 + * returns %o0: status 2089 + */ 2090 + .globl sun4v_ldc_rx_qconf 2091 + .type sun4v_ldc_rx_qconf,#function 2092 + sun4v_ldc_rx_qconf: 2093 + mov HV_FAST_LDC_RX_QCONF, %o5 2094 + ta HV_FAST_TRAP 2095 + retl 2096 + nop 2097 + .size sun4v_ldc_rx_qconf, .-sun4v_ldc_rx_qconf 2098 + 2099 + /* %o0: channel 2100 + * %o1: pointer to unsigned long ra 2101 + * %o2: pointer to unsigned long num_entries 2102 + * 2103 + * returns %o0: status 2104 + */ 2105 + .globl sun4v_ldc_rx_qinfo 2106 + .type sun4v_ldc_rx_qinfo,#function 2107 + sun4v_ldc_rx_qinfo: 2108 + mov %o1, %g1 2109 + mov %o2, %g2 2110 + mov HV_FAST_LDC_RX_QINFO, %o5 2111 + ta HV_FAST_TRAP 2112 + stx %o1, [%g1] 2113 + stx %o2, [%g2] 2114 + retl 2115 + nop 2116 + .size sun4v_ldc_rx_qinfo, .-sun4v_ldc_rx_qinfo 2117 + 2118 + /* %o0: channel 2119 + * %o1: pointer to unsigned long head_off 2120 + * %o2: pointer to unsigned long tail_off 2121 + * %o2: pointer to unsigned long chan_state 2122 + * 2123 + * returns %o0: status 2124 + */ 2125 + .globl sun4v_ldc_rx_get_state 2126 + .type sun4v_ldc_rx_get_state,#function 2127 + sun4v_ldc_rx_get_state: 2128 + mov %o1, %g1 2129 + mov %o2, %g2 2130 + mov %o3, %g3 2131 + mov HV_FAST_LDC_RX_GET_STATE, %o5 2132 + ta HV_FAST_TRAP 2133 + stx %o1, [%g1] 2134 + stx %o2, [%g2] 2135 + stx %o3, [%g3] 2136 + retl 2137 + nop 2138 + .size sun4v_ldc_rx_get_state, .-sun4v_ldc_rx_get_state 2139 + 2140 + /* %o0: channel 2141 + * %o1: head_off 2142 + * 2143 + * returns %o0: status 2144 + */ 2145 + .globl sun4v_ldc_rx_set_qhead 2146 + .type sun4v_ldc_rx_set_qhead,#function 2147 + sun4v_ldc_rx_set_qhead: 2148 + mov HV_FAST_LDC_RX_SET_QHEAD, %o5 2149 + ta HV_FAST_TRAP 2150 + retl 2151 + nop 2152 + .size sun4v_ldc_rx_set_qhead, .-sun4v_ldc_rx_set_qhead 2153 + 2154 + /* %o0: channel 2155 + * %o1: ra 2156 + * %o2: num_entries 2157 + * 2158 + * returns %o0: status 2159 + */ 2160 + .globl sun4v_ldc_set_map_table 2161 + .type sun4v_ldc_set_map_table,#function 2162 + sun4v_ldc_set_map_table: 2163 + mov HV_FAST_LDC_SET_MAP_TABLE, %o5 2164 + ta HV_FAST_TRAP 2165 + retl 2166 + nop 2167 + .size sun4v_ldc_set_map_table, .-sun4v_ldc_set_map_table 2168 + 2169 + /* %o0: channel 2170 + * %o1: pointer to unsigned long ra 2171 + * %o2: pointer to unsigned long num_entries 2172 + * 2173 + * returns %o0: status 2174 + */ 2175 + .globl sun4v_ldc_get_map_table 2176 + .type sun4v_ldc_get_map_table,#function 2177 + sun4v_ldc_get_map_table: 2178 + mov %o1, %g1 2179 + mov %o2, %g2 2180 + mov HV_FAST_LDC_GET_MAP_TABLE, %o5 2181 + ta HV_FAST_TRAP 2182 + stx %o1, [%g1] 2183 + stx %o2, [%g2] 2184 + retl 2185 + nop 2186 + .size sun4v_ldc_get_map_table, .-sun4v_ldc_get_map_table 2187 + 2188 + /* %o0: channel 2189 + * %o1: dir_code 2190 + * %o2: tgt_raddr 2191 + * %o3: lcl_raddr 2192 + * %o4: len 2193 + * %o5: pointer to unsigned long actual_len 2194 + * 2195 + * returns %o0: status 2196 + */ 2197 + .globl sun4v_ldc_copy 2198 + .type sun4v_ldc_copy,#function 2199 + sun4v_ldc_copy: 2200 + mov %o5, %g1 2201 + mov HV_FAST_LDC_COPY, %o5 2202 + ta HV_FAST_TRAP 2203 + stx %o1, [%g1] 2204 + retl 2205 + nop 2206 + .size sun4v_ldc_copy, .-sun4v_ldc_copy 2207 + 2208 + /* %o0: channel 2209 + * %o1: cookie 2210 + * %o2: pointer to unsigned long ra 2211 + * %o3: pointer to unsigned long perm 2212 + * 2213 + * returns %o0: status 2214 + */ 2215 + .globl sun4v_ldc_mapin 2216 + .type sun4v_ldc_mapin,#function 2217 + sun4v_ldc_mapin: 2218 + mov %o2, %g1 2219 + mov %o3, %g2 2220 + mov HV_FAST_LDC_MAPIN, %o5 2221 + ta HV_FAST_TRAP 2222 + stx %o1, [%g1] 2223 + stx %o2, [%g2] 2224 + retl 2225 + nop 2226 + .size sun4v_ldc_mapin, .-sun4v_ldc_mapin 2227 + 2228 + /* %o0: ra 2229 + * 2230 + * returns %o0: status 2231 + */ 2232 + .globl sun4v_ldc_unmap 2233 + .type sun4v_ldc_unmap,#function 2234 + sun4v_ldc_unmap: 2235 + mov HV_FAST_LDC_UNMAP, %o5 2236 + ta HV_FAST_TRAP 2237 + retl 2238 + nop 2239 + .size sun4v_ldc_unmap, .-sun4v_ldc_unmap 2240 + 2241 + /* %o0: cookie 2242 + * %o1: mte_cookie 2243 + * 2244 + * returns %o0: status 2245 + */ 2246 + .globl sun4v_ldc_revoke 2247 + .type sun4v_ldc_revoke,#function 2248 + sun4v_ldc_revoke: 2249 + mov HV_FAST_LDC_REVOKE, %o5 2250 + ta HV_FAST_TRAP 2251 + retl 2252 + nop 2253 + .size sun4v_ldc_revoke, .-sun4v_ldc_revoke 2254 + 2255 + /* %o0: device handle 2256 + * %o1: device INO 2257 + * %o2: pointer to unsigned long cookie 2258 + * 2259 + * returns %o0: status 2260 + */ 2261 + .globl sun4v_vintr_get_cookie 2262 + .type sun4v_vintr_get_cookie,#function 2263 + sun4v_vintr_get_cookie: 2264 + mov %o2, %g1 2265 + mov HV_FAST_VINTR_GET_COOKIE, %o5 2266 + ta HV_FAST_TRAP 2267 + stx %o1, [%g1] 2268 + retl 2269 + nop 2270 + .size sun4v_vintr_get_cookie, .-sun4v_vintr_get_cookie 2271 + 2272 + /* %o0: device handle 2273 + * %o1: device INO 2274 + * %o2: cookie 2275 + * 2276 + * returns %o0: status 2277 + */ 2278 + .globl sun4v_vintr_set_cookie 2279 + .type sun4v_vintr_set_cookie,#function 2280 + sun4v_vintr_set_cookie: 2281 + mov HV_FAST_VINTR_SET_COOKIE, %o5 2282 + ta HV_FAST_TRAP 2283 + retl 2284 + nop 2285 + .size sun4v_vintr_set_cookie, .-sun4v_vintr_set_cookie 2286 + 2287 + /* %o0: device handle 2288 + * %o1: device INO 2289 + * %o2: pointer to unsigned long valid_state 2290 + * 2291 + * returns %o0: status 2292 + */ 2293 + .globl sun4v_vintr_get_valid 2294 + .type sun4v_vintr_get_valid,#function 2295 + sun4v_vintr_get_valid: 2296 + mov %o2, %g1 2297 + mov HV_FAST_VINTR_GET_VALID, %o5 2298 + ta HV_FAST_TRAP 2299 + stx %o1, [%g1] 2300 + retl 2301 + nop 2302 + .size sun4v_vintr_get_valid, .-sun4v_vintr_get_valid 2303 + 2304 + /* %o0: device handle 2305 + * %o1: device INO 2306 + * %o2: valid_state 2307 + * 2308 + * returns %o0: status 2309 + */ 2310 + .globl sun4v_vintr_set_valid 2311 + .type sun4v_vintr_set_valid,#function 2312 + sun4v_vintr_set_valid: 2313 + mov HV_FAST_VINTR_SET_VALID, %o5 2314 + ta HV_FAST_TRAP 2315 + retl 2316 + nop 2317 + .size sun4v_vintr_set_valid, .-sun4v_vintr_set_valid 2318 + 2319 + /* %o0: device handle 2320 + * %o1: device INO 2321 + * %o2: pointer to unsigned long state 2322 + * 2323 + * returns %o0: status 2324 + */ 2325 + .globl sun4v_vintr_get_state 2326 + .type sun4v_vintr_get_state,#function 2327 + sun4v_vintr_get_state: 2328 + mov %o2, %g1 2329 + mov HV_FAST_VINTR_GET_STATE, %o5 2330 + ta HV_FAST_TRAP 2331 + stx %o1, [%g1] 2332 + retl 2333 + nop 2334 + .size sun4v_vintr_get_state, .-sun4v_vintr_get_state 2335 + 2336 + /* %o0: device handle 2337 + * %o1: device INO 2338 + * %o2: state 2339 + * 2340 + * returns %o0: status 2341 + */ 2342 + .globl sun4v_vintr_set_state 2343 + .type sun4v_vintr_set_state,#function 2344 + sun4v_vintr_set_state: 2345 + mov HV_FAST_VINTR_SET_STATE, %o5 2346 + ta HV_FAST_TRAP 2347 + retl 2348 + nop 2349 + .size sun4v_vintr_set_state, .-sun4v_vintr_set_state 2350 + 2351 + /* %o0: device handle 2352 + * %o1: device INO 2353 + * %o2: pointer to unsigned long cpuid 2354 + * 2355 + * returns %o0: status 2356 + */ 2357 + .globl sun4v_vintr_get_target 2358 + .type sun4v_vintr_get_target,#function 2359 + sun4v_vintr_get_target: 2360 + mov %o2, %g1 2361 + mov HV_FAST_VINTR_GET_TARGET, %o5 2362 + ta HV_FAST_TRAP 2363 + stx %o1, [%g1] 2364 + retl 2365 + nop 2366 + .size sun4v_vintr_get_target, .-sun4v_vintr_get_target 2367 + 2368 + /* %o0: device handle 2369 + * %o1: device INO 2370 + * %o2: cpuid 2371 + * 2372 + * returns %o0: status 2373 + */ 2374 + .globl sun4v_vintr_set_target 2375 + .type sun4v_vintr_set_target,#function 2376 + sun4v_vintr_set_target: 2377 + mov HV_FAST_VINTR_SET_TARGET, %o5 2378 + ta HV_FAST_TRAP 2379 + retl 2380 + nop 2381 + .size sun4v_vintr_set_target, .-sun4v_vintr_set_target
+26 -5
arch/sparc64/kernel/head.S
··· 523 523 #else 524 524 mov 0, %o0 525 525 #endif 526 - stb %o0, [%g6 + TI_CPU] 526 + sth %o0, [%g6 + TI_CPU] 527 527 528 528 /* Off we go.... */ 529 529 call start_kernel ··· 653 653 restore 654 654 sparc64_boot_end: 655 655 656 - #include "ktlb.S" 657 - #include "tsb.S" 658 656 #include "etrap.S" 659 657 #include "rtrap.S" 660 658 #include "winfixup.S" 661 659 #include "entry.S" 662 660 #include "sun4v_tlb_miss.S" 663 661 #include "sun4v_ivec.S" 662 + #include "ktlb.S" 663 + #include "tsb.S" 664 664 665 665 /* 666 666 * The following skip makes sure the trap table in ttable.S is aligned 667 667 * on a 32K boundary as required by the v9 specs for TBA register. 668 668 * 669 669 * We align to a 32K boundary, then we have the 32K kernel TSB, 670 - * then the 32K aligned trap table. 670 + * the 64K kernel 4MB TSB, and then the 32K aligned trap table. 671 671 */ 672 672 1: 673 673 .skip 0x4000 + _start - 1b 674 + 675 + ! 0x0000000000408000 674 676 675 677 .globl swapper_tsb 676 678 swapper_tsb: 677 679 .skip (32 * 1024) 678 680 679 - ! 0x0000000000408000 681 + .globl swapper_4m_tsb 682 + swapper_4m_tsb: 683 + .skip (64 * 1024) 680 684 685 + ! 0x0000000000420000 686 + 687 + /* Some care needs to be exercised if you try to move the 688 + * location of the trap table relative to other things. For 689 + * one thing there are br* instructions in some of the 690 + * trap table entires which branch back to code in ktlb.S 691 + * Those instructions can only handle a signed 16-bit 692 + * displacement. 693 + * 694 + * There is a binutils bug (bugzilla #4558) which causes 695 + * the relocation overflow checks for such instructions to 696 + * not be done correctly. So bintuils will not notice the 697 + * error and will instead write junk into the relocation and 698 + * you'll have an unbootable kernel. 699 + */ 681 700 #include "ttable.S" 701 + 702 + ! 0x0000000000428000 682 703 683 704 #include "systbls.S" 684 705
+4 -1
arch/sparc64/kernel/hvapi.c
··· 9 9 10 10 #include <asm/hypervisor.h> 11 11 #include <asm/oplib.h> 12 + #include <asm/sstate.h> 12 13 13 14 /* If the hypervisor indicates that the API setting 14 15 * calls are unsupported, by returning HV_EBADTRAP or ··· 108 107 p->minor = actual_minor; 109 108 ret = 0; 110 109 } else if (hv_ret == HV_EBADTRAP || 111 - HV_ENOTSUPPORTED) { 110 + hv_ret == HV_ENOTSUPPORTED) { 112 111 if (p->flags & FLAG_PRE_API) { 113 112 if (major == 1) { 114 113 p->major = 1; ··· 179 178 minor = 1; 180 179 if (sun4v_hvapi_register(group, major, &minor)) 181 180 goto bad; 181 + 182 + sun4v_sstate_init(); 182 183 183 184 return; 184 185
+53 -30
arch/sparc64/kernel/irq.c
··· 171 171 return 0; 172 172 } 173 173 174 - extern unsigned long real_hard_smp_processor_id(void); 175 - 176 174 static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) 177 175 { 178 176 unsigned int tid; ··· 692 694 trap_block[cpu].irq_worklist = 0; 693 695 } 694 696 695 - static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type) 697 + /* Please be very careful with register_one_mondo() and 698 + * sun4v_register_mondo_queues(). 699 + * 700 + * On SMP this gets invoked from the CPU trampoline before 701 + * the cpu has fully taken over the trap table from OBP, 702 + * and it's kernel stack + %g6 thread register state is 703 + * not fully cooked yet. 704 + * 705 + * Therefore you cannot make any OBP calls, not even prom_printf, 706 + * from these two routines. 707 + */ 708 + static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) 696 709 { 697 - unsigned long num_entries = 128; 710 + unsigned long num_entries = (qmask + 1) / 64; 698 711 unsigned long status; 699 712 700 713 status = sun4v_cpu_qconf(type, paddr, num_entries); ··· 720 711 { 721 712 struct trap_per_cpu *tb = &trap_block[this_cpu]; 722 713 723 - register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO); 724 - register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO); 725 - register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR); 726 - register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR); 714 + register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO, 715 + tb->cpu_mondo_qmask); 716 + register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO, 717 + tb->dev_mondo_qmask); 718 + register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR, 719 + tb->resum_qmask); 720 + register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR, 721 + tb->nonresum_qmask); 727 722 } 728 723 729 - static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, int use_bootmem) 724 + static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem) 730 725 { 731 - void *page; 726 + unsigned long size = PAGE_ALIGN(qmask + 1); 727 + unsigned long order = get_order(size); 728 + void *p = NULL; 732 729 733 - if (use_bootmem) 734 - page = alloc_bootmem_low_pages(PAGE_SIZE); 735 - else 736 - page = (void *) get_zeroed_page(GFP_ATOMIC); 730 + if (use_bootmem) { 731 + p = __alloc_bootmem_low(size, size, 0); 732 + } else { 733 + struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order); 734 + if (page) 735 + p = page_address(page); 736 + } 737 737 738 - if (!page) { 738 + if (!p) { 739 739 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); 740 740 prom_halt(); 741 741 } 742 742 743 - *pa_ptr = __pa(page); 743 + *pa_ptr = __pa(p); 744 744 } 745 745 746 - static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, int use_bootmem) 746 + static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem) 747 747 { 748 - void *page; 748 + unsigned long size = PAGE_ALIGN(qmask + 1); 749 + unsigned long order = get_order(size); 750 + void *p = NULL; 749 751 750 - if (use_bootmem) 751 - page = alloc_bootmem_low_pages(PAGE_SIZE); 752 - else 753 - page = (void *) get_zeroed_page(GFP_ATOMIC); 752 + if (use_bootmem) { 753 + p = __alloc_bootmem_low(size, size, 0); 754 + } else { 755 + struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order); 756 + if (page) 757 + p = page_address(page); 758 + } 754 759 755 - if (!page) { 760 + if (!p) { 756 761 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); 757 762 prom_halt(); 758 763 } 759 764 760 - *pa_ptr = __pa(page); 765 + *pa_ptr = __pa(p); 761 766 } 762 767 763 768 static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem) ··· 802 779 struct trap_per_cpu *tb = &trap_block[cpu]; 803 780 804 781 if (alloc) { 805 - alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem); 806 - alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem); 807 - alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem); 808 - alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem); 809 - alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem); 810 - alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem); 782 + alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask, use_bootmem); 783 + alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask, use_bootmem); 784 + alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask, use_bootmem); 785 + alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask, use_bootmem); 786 + alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask, use_bootmem); 787 + alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, tb->nonresum_qmask, use_bootmem); 811 788 812 789 init_cpu_send_mondo_info(tb, use_bootmem); 813 790 }
+2 -2
arch/sparc64/kernel/itlb_miss.S
··· 11 11 /* ITLB ** ICACHE line 2: TSB compare and TLB load */ 12 12 bne,pn %xcc, tsb_miss_itlb ! Miss 13 13 mov FAULT_CODE_ITLB, %g3 14 - andcc %g5, _PAGE_EXEC_4U, %g0 ! Executable? 14 + sethi %hi(_PAGE_EXEC_4U), %g4 15 + andcc %g5, %g4, %g0 ! Executable? 15 16 be,pn %xcc, tsb_do_fault 16 17 nop ! Delay slot, fill me 17 18 stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB 18 19 retry ! Trap done 19 - nop 20 20 21 21 /* ITLB ** ICACHE line 3: */ 22 22 nop
+619
arch/sparc64/kernel/mdesc.c
··· 1 + /* mdesc.c: Sun4V machine description handling. 2 + * 3 + * Copyright (C) 2007 David S. Miller <davem@davemloft.net> 4 + */ 5 + #include <linux/kernel.h> 6 + #include <linux/types.h> 7 + #include <linux/bootmem.h> 8 + #include <linux/log2.h> 9 + 10 + #include <asm/hypervisor.h> 11 + #include <asm/mdesc.h> 12 + #include <asm/prom.h> 13 + #include <asm/oplib.h> 14 + #include <asm/smp.h> 15 + 16 + /* Unlike the OBP device tree, the machine description is a full-on 17 + * DAG. An arbitrary number of ARCs are possible from one 18 + * node to other nodes and thus we can't use the OBP device_node 19 + * data structure to represent these nodes inside of the kernel. 20 + * 21 + * Actually, it isn't even a DAG, because there are back pointers 22 + * which create cycles in the graph. 23 + * 24 + * mdesc_hdr and mdesc_elem describe the layout of the data structure 25 + * we get from the Hypervisor. 26 + */ 27 + struct mdesc_hdr { 28 + u32 version; /* Transport version */ 29 + u32 node_sz; /* node block size */ 30 + u32 name_sz; /* name block size */ 31 + u32 data_sz; /* data block size */ 32 + }; 33 + 34 + struct mdesc_elem { 35 + u8 tag; 36 + #define MD_LIST_END 0x00 37 + #define MD_NODE 0x4e 38 + #define MD_NODE_END 0x45 39 + #define MD_NOOP 0x20 40 + #define MD_PROP_ARC 0x61 41 + #define MD_PROP_VAL 0x76 42 + #define MD_PROP_STR 0x73 43 + #define MD_PROP_DATA 0x64 44 + u8 name_len; 45 + u16 resv; 46 + u32 name_offset; 47 + union { 48 + struct { 49 + u32 data_len; 50 + u32 data_offset; 51 + } data; 52 + u64 val; 53 + } d; 54 + }; 55 + 56 + static struct mdesc_hdr *main_mdesc; 57 + static struct mdesc_node *allnodes; 58 + 59 + static struct mdesc_node *allnodes_tail; 60 + static unsigned int unique_id; 61 + 62 + static struct mdesc_node **mdesc_hash; 63 + static unsigned int mdesc_hash_size; 64 + 65 + static inline unsigned int node_hashfn(u64 node) 66 + { 67 + return ((unsigned int) (node ^ (node >> 8) ^ (node >> 16))) 68 + & (mdesc_hash_size - 1); 69 + } 70 + 71 + static inline void hash_node(struct mdesc_node *mp) 72 + { 73 + struct mdesc_node **head = &mdesc_hash[node_hashfn(mp->node)]; 74 + 75 + mp->hash_next = *head; 76 + *head = mp; 77 + 78 + if (allnodes_tail) { 79 + allnodes_tail->allnodes_next = mp; 80 + allnodes_tail = mp; 81 + } else { 82 + allnodes = allnodes_tail = mp; 83 + } 84 + } 85 + 86 + static struct mdesc_node *find_node(u64 node) 87 + { 88 + struct mdesc_node *mp = mdesc_hash[node_hashfn(node)]; 89 + 90 + while (mp) { 91 + if (mp->node == node) 92 + return mp; 93 + 94 + mp = mp->hash_next; 95 + } 96 + return NULL; 97 + } 98 + 99 + struct property *md_find_property(const struct mdesc_node *mp, 100 + const char *name, 101 + int *lenp) 102 + { 103 + struct property *pp; 104 + 105 + for (pp = mp->properties; pp != 0; pp = pp->next) { 106 + if (strcasecmp(pp->name, name) == 0) { 107 + if (lenp) 108 + *lenp = pp->length; 109 + break; 110 + } 111 + } 112 + return pp; 113 + } 114 + EXPORT_SYMBOL(md_find_property); 115 + 116 + /* 117 + * Find a property with a given name for a given node 118 + * and return the value. 119 + */ 120 + const void *md_get_property(const struct mdesc_node *mp, const char *name, 121 + int *lenp) 122 + { 123 + struct property *pp = md_find_property(mp, name, lenp); 124 + return pp ? pp->value : NULL; 125 + } 126 + EXPORT_SYMBOL(md_get_property); 127 + 128 + struct mdesc_node *md_find_node_by_name(struct mdesc_node *from, 129 + const char *name) 130 + { 131 + struct mdesc_node *mp; 132 + 133 + mp = from ? from->allnodes_next : allnodes; 134 + for (; mp != NULL; mp = mp->allnodes_next) { 135 + if (strcmp(mp->name, name) == 0) 136 + break; 137 + } 138 + return mp; 139 + } 140 + EXPORT_SYMBOL(md_find_node_by_name); 141 + 142 + static unsigned int mdesc_early_allocated; 143 + 144 + static void * __init mdesc_early_alloc(unsigned long size) 145 + { 146 + void *ret; 147 + 148 + ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL); 149 + if (ret == NULL) { 150 + prom_printf("MDESC: alloc of %lu bytes failed.\n", size); 151 + prom_halt(); 152 + } 153 + 154 + memset(ret, 0, size); 155 + 156 + mdesc_early_allocated += size; 157 + 158 + return ret; 159 + } 160 + 161 + static unsigned int __init count_arcs(struct mdesc_elem *ep) 162 + { 163 + unsigned int ret = 0; 164 + 165 + ep++; 166 + while (ep->tag != MD_NODE_END) { 167 + if (ep->tag == MD_PROP_ARC) 168 + ret++; 169 + ep++; 170 + } 171 + return ret; 172 + } 173 + 174 + static void __init mdesc_node_alloc(u64 node, struct mdesc_elem *ep, const char *names) 175 + { 176 + unsigned int num_arcs = count_arcs(ep); 177 + struct mdesc_node *mp; 178 + 179 + mp = mdesc_early_alloc(sizeof(*mp) + 180 + (num_arcs * sizeof(struct mdesc_arc))); 181 + mp->name = names + ep->name_offset; 182 + mp->node = node; 183 + mp->unique_id = unique_id++; 184 + mp->num_arcs = num_arcs; 185 + 186 + hash_node(mp); 187 + } 188 + 189 + static inline struct mdesc_elem *node_block(struct mdesc_hdr *mdesc) 190 + { 191 + return (struct mdesc_elem *) (mdesc + 1); 192 + } 193 + 194 + static inline void *name_block(struct mdesc_hdr *mdesc) 195 + { 196 + return ((void *) node_block(mdesc)) + mdesc->node_sz; 197 + } 198 + 199 + static inline void *data_block(struct mdesc_hdr *mdesc) 200 + { 201 + return ((void *) name_block(mdesc)) + mdesc->name_sz; 202 + } 203 + 204 + /* In order to avoid recursion (the graph can be very deep) we use a 205 + * two pass algorithm. First we allocate all the nodes and hash them. 206 + * Then we iterate over each node, filling in the arcs and properties. 207 + */ 208 + static void __init build_all_nodes(struct mdesc_hdr *mdesc) 209 + { 210 + struct mdesc_elem *start, *ep; 211 + struct mdesc_node *mp; 212 + const char *names; 213 + void *data; 214 + u64 last_node; 215 + 216 + start = ep = node_block(mdesc); 217 + last_node = mdesc->node_sz / 16; 218 + 219 + names = name_block(mdesc); 220 + 221 + while (1) { 222 + u64 node = ep - start; 223 + 224 + if (ep->tag == MD_LIST_END) 225 + break; 226 + 227 + if (ep->tag != MD_NODE) { 228 + prom_printf("MDESC: Inconsistent element list.\n"); 229 + prom_halt(); 230 + } 231 + 232 + mdesc_node_alloc(node, ep, names); 233 + 234 + if (ep->d.val >= last_node) { 235 + printk("MDESC: Warning, early break out of node scan.\n"); 236 + printk("MDESC: Next node [%lu] last_node [%lu].\n", 237 + node, last_node); 238 + break; 239 + } 240 + 241 + ep = start + ep->d.val; 242 + } 243 + 244 + data = data_block(mdesc); 245 + for (mp = allnodes; mp; mp = mp->allnodes_next) { 246 + struct mdesc_elem *ep = start + mp->node; 247 + struct property **link = &mp->properties; 248 + unsigned int this_arc = 0; 249 + 250 + ep++; 251 + while (ep->tag != MD_NODE_END) { 252 + switch (ep->tag) { 253 + case MD_PROP_ARC: { 254 + struct mdesc_node *target; 255 + 256 + if (this_arc >= mp->num_arcs) { 257 + prom_printf("MDESC: ARC overrun [%u:%u]\n", 258 + this_arc, mp->num_arcs); 259 + prom_halt(); 260 + } 261 + target = find_node(ep->d.val); 262 + if (!target) { 263 + printk("MDESC: Warning, arc points to " 264 + "missing node, ignoring.\n"); 265 + break; 266 + } 267 + mp->arcs[this_arc].name = 268 + (names + ep->name_offset); 269 + mp->arcs[this_arc].arc = target; 270 + this_arc++; 271 + break; 272 + } 273 + 274 + case MD_PROP_VAL: 275 + case MD_PROP_STR: 276 + case MD_PROP_DATA: { 277 + struct property *p = mdesc_early_alloc(sizeof(*p)); 278 + 279 + p->unique_id = unique_id++; 280 + p->name = (char *) names + ep->name_offset; 281 + if (ep->tag == MD_PROP_VAL) { 282 + p->value = &ep->d.val; 283 + p->length = 8; 284 + } else { 285 + p->value = data + ep->d.data.data_offset; 286 + p->length = ep->d.data.data_len; 287 + } 288 + *link = p; 289 + link = &p->next; 290 + break; 291 + } 292 + 293 + case MD_NOOP: 294 + break; 295 + 296 + default: 297 + printk("MDESC: Warning, ignoring unknown tag type %02x\n", 298 + ep->tag); 299 + } 300 + ep++; 301 + } 302 + } 303 + } 304 + 305 + static unsigned int __init count_nodes(struct mdesc_hdr *mdesc) 306 + { 307 + struct mdesc_elem *ep = node_block(mdesc); 308 + struct mdesc_elem *end; 309 + unsigned int cnt = 0; 310 + 311 + end = ((void *)ep) + mdesc->node_sz; 312 + while (ep < end) { 313 + if (ep->tag == MD_NODE) 314 + cnt++; 315 + ep++; 316 + } 317 + return cnt; 318 + } 319 + 320 + static void __init report_platform_properties(void) 321 + { 322 + struct mdesc_node *pn = md_find_node_by_name(NULL, "platform"); 323 + const char *s; 324 + const u64 *v; 325 + 326 + if (!pn) { 327 + prom_printf("No platform node in machine-description.\n"); 328 + prom_halt(); 329 + } 330 + 331 + s = md_get_property(pn, "banner-name", NULL); 332 + printk("PLATFORM: banner-name [%s]\n", s); 333 + s = md_get_property(pn, "name", NULL); 334 + printk("PLATFORM: name [%s]\n", s); 335 + 336 + v = md_get_property(pn, "hostid", NULL); 337 + if (v) 338 + printk("PLATFORM: hostid [%08lx]\n", *v); 339 + v = md_get_property(pn, "serial#", NULL); 340 + if (v) 341 + printk("PLATFORM: serial# [%08lx]\n", *v); 342 + v = md_get_property(pn, "stick-frequency", NULL); 343 + printk("PLATFORM: stick-frequency [%08lx]\n", *v); 344 + v = md_get_property(pn, "mac-address", NULL); 345 + if (v) 346 + printk("PLATFORM: mac-address [%lx]\n", *v); 347 + v = md_get_property(pn, "watchdog-resolution", NULL); 348 + if (v) 349 + printk("PLATFORM: watchdog-resolution [%lu ms]\n", *v); 350 + v = md_get_property(pn, "watchdog-max-timeout", NULL); 351 + if (v) 352 + printk("PLATFORM: watchdog-max-timeout [%lu ms]\n", *v); 353 + v = md_get_property(pn, "max-cpus", NULL); 354 + if (v) 355 + printk("PLATFORM: max-cpus [%lu]\n", *v); 356 + } 357 + 358 + static int inline find_in_proplist(const char *list, const char *match, int len) 359 + { 360 + while (len > 0) { 361 + int l; 362 + 363 + if (!strcmp(list, match)) 364 + return 1; 365 + l = strlen(list) + 1; 366 + list += l; 367 + len -= l; 368 + } 369 + return 0; 370 + } 371 + 372 + static void __init fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_node *mp) 373 + { 374 + const u64 *level = md_get_property(mp, "level", NULL); 375 + const u64 *size = md_get_property(mp, "size", NULL); 376 + const u64 *line_size = md_get_property(mp, "line-size", NULL); 377 + const char *type; 378 + int type_len; 379 + 380 + type = md_get_property(mp, "type", &type_len); 381 + 382 + switch (*level) { 383 + case 1: 384 + if (find_in_proplist(type, "instn", type_len)) { 385 + c->icache_size = *size; 386 + c->icache_line_size = *line_size; 387 + } else if (find_in_proplist(type, "data", type_len)) { 388 + c->dcache_size = *size; 389 + c->dcache_line_size = *line_size; 390 + } 391 + break; 392 + 393 + case 2: 394 + c->ecache_size = *size; 395 + c->ecache_line_size = *line_size; 396 + break; 397 + 398 + default: 399 + break; 400 + } 401 + 402 + if (*level == 1) { 403 + unsigned int i; 404 + 405 + for (i = 0; i < mp->num_arcs; i++) { 406 + struct mdesc_node *t = mp->arcs[i].arc; 407 + 408 + if (strcmp(mp->arcs[i].name, "fwd")) 409 + continue; 410 + 411 + if (!strcmp(t->name, "cache")) 412 + fill_in_one_cache(c, t); 413 + } 414 + } 415 + } 416 + 417 + static void __init mark_core_ids(struct mdesc_node *mp, int core_id) 418 + { 419 + unsigned int i; 420 + 421 + for (i = 0; i < mp->num_arcs; i++) { 422 + struct mdesc_node *t = mp->arcs[i].arc; 423 + const u64 *id; 424 + 425 + if (strcmp(mp->arcs[i].name, "back")) 426 + continue; 427 + 428 + if (!strcmp(t->name, "cpu")) { 429 + id = md_get_property(t, "id", NULL); 430 + if (*id < NR_CPUS) 431 + cpu_data(*id).core_id = core_id; 432 + } else { 433 + unsigned int j; 434 + 435 + for (j = 0; j < t->num_arcs; j++) { 436 + struct mdesc_node *n = t->arcs[j].arc; 437 + 438 + if (strcmp(t->arcs[j].name, "back")) 439 + continue; 440 + 441 + if (strcmp(n->name, "cpu")) 442 + continue; 443 + 444 + id = md_get_property(n, "id", NULL); 445 + if (*id < NR_CPUS) 446 + cpu_data(*id).core_id = core_id; 447 + } 448 + } 449 + } 450 + } 451 + 452 + static void __init set_core_ids(void) 453 + { 454 + struct mdesc_node *mp; 455 + int idx; 456 + 457 + idx = 1; 458 + md_for_each_node_by_name(mp, "cache") { 459 + const u64 *level = md_get_property(mp, "level", NULL); 460 + const char *type; 461 + int len; 462 + 463 + if (*level != 1) 464 + continue; 465 + 466 + type = md_get_property(mp, "type", &len); 467 + if (!find_in_proplist(type, "instn", len)) 468 + continue; 469 + 470 + mark_core_ids(mp, idx); 471 + 472 + idx++; 473 + } 474 + } 475 + 476 + static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def) 477 + { 478 + u64 val; 479 + 480 + if (!p) 481 + goto use_default; 482 + val = *p; 483 + 484 + if (!val || val >= 64) 485 + goto use_default; 486 + 487 + *mask = ((1U << val) * 64U) - 1U; 488 + return; 489 + 490 + use_default: 491 + *mask = ((1U << def) * 64U) - 1U; 492 + } 493 + 494 + static void __init get_mondo_data(struct mdesc_node *mp, struct trap_per_cpu *tb) 495 + { 496 + const u64 *val; 497 + 498 + val = md_get_property(mp, "q-cpu-mondo-#bits", NULL); 499 + get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7); 500 + 501 + val = md_get_property(mp, "q-dev-mondo-#bits", NULL); 502 + get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7); 503 + 504 + val = md_get_property(mp, "q-resumable-#bits", NULL); 505 + get_one_mondo_bits(val, &tb->resum_qmask, 6); 506 + 507 + val = md_get_property(mp, "q-nonresumable-#bits", NULL); 508 + get_one_mondo_bits(val, &tb->nonresum_qmask, 2); 509 + } 510 + 511 + static void __init mdesc_fill_in_cpu_data(void) 512 + { 513 + struct mdesc_node *mp; 514 + 515 + ncpus_probed = 0; 516 + md_for_each_node_by_name(mp, "cpu") { 517 + const u64 *id = md_get_property(mp, "id", NULL); 518 + const u64 *cfreq = md_get_property(mp, "clock-frequency", NULL); 519 + struct trap_per_cpu *tb; 520 + cpuinfo_sparc *c; 521 + unsigned int i; 522 + int cpuid; 523 + 524 + ncpus_probed++; 525 + 526 + cpuid = *id; 527 + 528 + #ifdef CONFIG_SMP 529 + if (cpuid >= NR_CPUS) 530 + continue; 531 + #else 532 + /* On uniprocessor we only want the values for the 533 + * real physical cpu the kernel booted onto, however 534 + * cpu_data() only has one entry at index 0. 535 + */ 536 + if (cpuid != real_hard_smp_processor_id()) 537 + continue; 538 + cpuid = 0; 539 + #endif 540 + 541 + c = &cpu_data(cpuid); 542 + c->clock_tick = *cfreq; 543 + 544 + tb = &trap_block[cpuid]; 545 + get_mondo_data(mp, tb); 546 + 547 + for (i = 0; i < mp->num_arcs; i++) { 548 + struct mdesc_node *t = mp->arcs[i].arc; 549 + unsigned int j; 550 + 551 + if (strcmp(mp->arcs[i].name, "fwd")) 552 + continue; 553 + 554 + if (!strcmp(t->name, "cache")) { 555 + fill_in_one_cache(c, t); 556 + continue; 557 + } 558 + 559 + for (j = 0; j < t->num_arcs; j++) { 560 + struct mdesc_node *n; 561 + 562 + n = t->arcs[j].arc; 563 + if (strcmp(t->arcs[j].name, "fwd")) 564 + continue; 565 + 566 + if (!strcmp(n->name, "cache")) 567 + fill_in_one_cache(c, n); 568 + } 569 + } 570 + 571 + #ifdef CONFIG_SMP 572 + cpu_set(cpuid, cpu_present_map); 573 + cpu_set(cpuid, phys_cpu_present_map); 574 + #endif 575 + 576 + c->core_id = 0; 577 + } 578 + 579 + set_core_ids(); 580 + 581 + smp_fill_in_sib_core_maps(); 582 + } 583 + 584 + void __init sun4v_mdesc_init(void) 585 + { 586 + unsigned long len, real_len, status; 587 + 588 + (void) sun4v_mach_desc(0UL, 0UL, &len); 589 + 590 + printk("MDESC: Size is %lu bytes.\n", len); 591 + 592 + main_mdesc = mdesc_early_alloc(len); 593 + 594 + status = sun4v_mach_desc(__pa(main_mdesc), len, &real_len); 595 + if (status != HV_EOK || real_len > len) { 596 + prom_printf("sun4v_mach_desc fails, err(%lu), " 597 + "len(%lu), real_len(%lu)\n", 598 + status, len, real_len); 599 + prom_halt(); 600 + } 601 + 602 + len = count_nodes(main_mdesc); 603 + printk("MDESC: %lu nodes.\n", len); 604 + 605 + len = roundup_pow_of_two(len); 606 + 607 + mdesc_hash = mdesc_early_alloc(len * sizeof(struct mdesc_node *)); 608 + mdesc_hash_size = len; 609 + 610 + printk("MDESC: Hash size %lu entries.\n", len); 611 + 612 + build_all_nodes(main_mdesc); 613 + 614 + printk("MDESC: Built graph with %u bytes of memory.\n", 615 + mdesc_early_allocated); 616 + 617 + report_platform_properties(); 618 + mdesc_fill_in_cpu_data(); 619 + }
+40 -14
arch/sparc64/kernel/pci.c
··· 306 306 pci_controller_scan(pci_controller_init); 307 307 } 308 308 309 + static int ofpci_verbose; 310 + 311 + static int __init ofpci_debug(char *str) 312 + { 313 + int val = 0; 314 + 315 + get_option(&str, &val); 316 + if (val) 317 + ofpci_verbose = 1; 318 + return 1; 319 + } 320 + 321 + __setup("ofpci_debug=", ofpci_debug); 322 + 309 323 static unsigned long pci_parse_of_flags(u32 addr0) 310 324 { 311 325 unsigned long flags = 0; ··· 351 337 addrs = of_get_property(node, "assigned-addresses", &proplen); 352 338 if (!addrs) 353 339 return; 354 - printk(" parse addresses (%d bytes) @ %p\n", proplen, addrs); 340 + if (ofpci_verbose) 341 + printk(" parse addresses (%d bytes) @ %p\n", 342 + proplen, addrs); 355 343 op_res = &op->resource[0]; 356 344 for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) { 357 345 struct resource *res; ··· 364 348 if (!flags) 365 349 continue; 366 350 i = addrs[0] & 0xff; 367 - printk(" start: %lx, end: %lx, i: %x\n", 368 - op_res->start, op_res->end, i); 351 + if (ofpci_verbose) 352 + printk(" start: %lx, end: %lx, i: %x\n", 353 + op_res->start, op_res->end, i); 369 354 370 355 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { 371 356 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; ··· 410 393 if (type == NULL) 411 394 type = ""; 412 395 413 - printk(" create device, devfn: %x, type: %s hostcontroller(%d)\n", 414 - devfn, type, host_controller); 396 + if (ofpci_verbose) 397 + printk(" create device, devfn: %x, type: %s\n", 398 + devfn, type); 415 399 416 400 dev->bus = bus; 417 401 dev->sysdata = node; ··· 452 434 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus), 453 435 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); 454 436 } 455 - printk(" class: 0x%x device name: %s\n", 456 - dev->class, pci_name(dev)); 437 + if (ofpci_verbose) 438 + printk(" class: 0x%x device name: %s\n", 439 + dev->class, pci_name(dev)); 457 440 458 441 /* I have seen IDE devices which will not respond to 459 442 * the bmdma simplex check reads if bus mastering is ··· 488 469 } 489 470 pci_parse_of_addrs(sd->op, node, dev); 490 471 491 - printk(" adding to system ...\n"); 472 + if (ofpci_verbose) 473 + printk(" adding to system ...\n"); 492 474 493 475 pci_device_add(dev, bus); 494 476 ··· 567 547 unsigned int flags; 568 548 u64 size; 569 549 570 - printk("of_scan_pci_bridge(%s)\n", node->full_name); 550 + if (ofpci_verbose) 551 + printk("of_scan_pci_bridge(%s)\n", node->full_name); 571 552 572 553 /* parse bus-range property */ 573 554 busrange = of_get_property(node, "bus-range", &len); ··· 653 632 simba_cont: 654 633 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), 655 634 bus->number); 656 - printk(" bus name: %s\n", bus->name); 635 + if (ofpci_verbose) 636 + printk(" bus name: %s\n", bus->name); 657 637 658 638 pci_of_scan_bus(pbm, node, bus); 659 639 } ··· 668 646 int reglen, devfn; 669 647 struct pci_dev *dev; 670 648 671 - printk("PCI: scan_bus[%s] bus no %d\n", 672 - node->full_name, bus->number); 649 + if (ofpci_verbose) 650 + printk("PCI: scan_bus[%s] bus no %d\n", 651 + node->full_name, bus->number); 673 652 674 653 child = NULL; 675 654 while ((child = of_get_next_child(node, child)) != NULL) { 676 - printk(" * %s\n", child->full_name); 655 + if (ofpci_verbose) 656 + printk(" * %s\n", child->full_name); 677 657 reg = of_get_property(child, "reg", &reglen); 678 658 if (reg == NULL || reglen < 20) 679 659 continue; ··· 685 661 dev = of_create_pci_dev(pbm, child, bus, devfn, 0); 686 662 if (!dev) 687 663 continue; 688 - printk("PCI: dev header type: %x\n", dev->hdr_type); 664 + if (ofpci_verbose) 665 + printk("PCI: dev header type: %x\n", 666 + dev->hdr_type); 689 667 690 668 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 691 669 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
+4 -3
arch/sparc64/kernel/pci_sabre.c
··· 762 762 /* Of course, Sun has to encode things a thousand 763 763 * different ways, inconsistently. 764 764 */ 765 - cpu_find_by_instance(0, &dp, NULL); 766 - if (!strcmp(dp->name, "SUNW,UltraSPARC-IIe")) 767 - hummingbird_p = 1; 765 + for_each_node_by_type(dp, "cpu") { 766 + if (!strcmp(dp->name, "SUNW,UltraSPARC-IIe")) 767 + hummingbird_p = 1; 768 + } 768 769 } 769 770 } 770 771
+28 -26
arch/sparc64/kernel/pci_sun4v.c
··· 12 12 #include <linux/percpu.h> 13 13 #include <linux/irq.h> 14 14 #include <linux/msi.h> 15 + #include <linux/log2.h> 15 16 16 17 #include <asm/iommu.h> 17 18 #include <asm/irq.h> ··· 26 25 #include "iommu_common.h" 27 26 28 27 #include "pci_sun4v.h" 28 + 29 + static unsigned long vpci_major = 1; 30 + static unsigned long vpci_minor = 1; 29 31 30 32 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) 31 33 ··· 642 638 { 643 639 struct iommu *iommu = pbm->iommu; 644 640 struct property *prop; 645 - unsigned long num_tsb_entries, sz; 641 + unsigned long num_tsb_entries, sz, tsbsize; 646 642 u32 vdma[2], dma_mask, dma_offset; 647 - int tsbsize; 648 643 649 644 prop = of_find_property(pbm->prom_node, "virtual-dma", NULL); 650 645 if (prop) { ··· 657 654 vdma[1] = 0x80000000; 658 655 } 659 656 660 - dma_mask = vdma[0]; 661 - switch (vdma[1]) { 662 - case 0x20000000: 663 - dma_mask |= 0x1fffffff; 664 - tsbsize = 64; 665 - break; 666 - 667 - case 0x40000000: 668 - dma_mask |= 0x3fffffff; 669 - tsbsize = 128; 670 - break; 671 - 672 - case 0x80000000: 673 - dma_mask |= 0x7fffffff; 674 - tsbsize = 256; 675 - break; 676 - 677 - default: 678 - prom_printf("PCI-SUN4V: strange virtual-dma size.\n"); 679 - prom_halt(); 657 + if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) { 658 + prom_printf("PCI-SUN4V: strange virtual-dma[%08x:%08x].\n", 659 + vdma[0], vdma[1]); 660 + prom_halt(); 680 661 }; 681 662 682 - tsbsize *= (8 * 1024); 683 - 684 - num_tsb_entries = tsbsize / sizeof(iopte_t); 663 + dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL); 664 + num_tsb_entries = vdma[1] / IO_PAGE_SIZE; 665 + tsbsize = num_tsb_entries * sizeof(iopte_t); 685 666 686 667 dma_offset = vdma[0]; 687 668 ··· 676 689 iommu->dma_addr_mask = dma_mask; 677 690 678 691 /* Allocate and initialize the free area map. */ 679 - sz = num_tsb_entries / 8; 692 + sz = (num_tsb_entries + 7) / 8; 680 693 sz = (sz + 7UL) & ~7UL; 681 694 iommu->arena.map = kzalloc(sz, GFP_KERNEL); 682 695 if (!iommu->arena.map) { ··· 1165 1178 1166 1179 void sun4v_pci_init(struct device_node *dp, char *model_name) 1167 1180 { 1181 + static int hvapi_negotiated = 0; 1168 1182 struct pci_controller_info *p; 1169 1183 struct pci_pbm_info *pbm; 1170 1184 struct iommu *iommu; ··· 1173 1185 struct linux_prom64_registers *regs; 1174 1186 u32 devhandle; 1175 1187 int i; 1188 + 1189 + if (!hvapi_negotiated++) { 1190 + int err = sun4v_hvapi_register(HV_GRP_PCI, 1191 + vpci_major, 1192 + &vpci_minor); 1193 + 1194 + if (err) { 1195 + prom_printf("SUN4V_PCI: Could not register hvapi, " 1196 + "err=%d\n", err); 1197 + prom_halt(); 1198 + } 1199 + printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n", 1200 + vpci_major, vpci_minor); 1201 + } 1176 1202 1177 1203 prop = of_find_property(dp, "reg", NULL); 1178 1204 regs = prop->value;
+2
arch/sparc64/kernel/power.c
··· 19 19 #include <asm/prom.h> 20 20 #include <asm/of_device.h> 21 21 #include <asm/io.h> 22 + #include <asm/sstate.h> 22 23 23 24 #include <linux/unistd.h> 24 25 ··· 54 53 55 54 void machine_power_off(void) 56 55 { 56 + sstate_poweroff(); 57 57 if (!serial_console || scons_pwroff) { 58 58 #ifdef CONFIG_PCI 59 59 if (power_reg) {
+4
arch/sparc64/kernel/process.c
··· 45 45 #include <asm/mmu_context.h> 46 46 #include <asm/unistd.h> 47 47 #include <asm/hypervisor.h> 48 + #include <asm/sstate.h> 48 49 49 50 /* #define VERBOSE_SHOWREGS */ 50 51 ··· 107 106 108 107 void machine_halt(void) 109 108 { 109 + sstate_halt(); 110 110 if (!serial_console && prom_palette) 111 111 prom_palette (1); 112 112 if (prom_keyboard) ··· 118 116 119 117 void machine_alt_power_off(void) 120 118 { 119 + sstate_poweroff(); 121 120 if (!serial_console && prom_palette) 122 121 prom_palette(1); 123 122 if (prom_keyboard) ··· 131 128 { 132 129 char *p; 133 130 131 + sstate_reboot(); 134 132 p = strchr (reboot_command, '\n'); 135 133 if (p) *p = 0; 136 134 if (!serial_console && prom_palette)
+148
arch/sparc64/kernel/prom.c
··· 28 28 #include <asm/irq.h> 29 29 #include <asm/asi.h> 30 30 #include <asm/upa.h> 31 + #include <asm/smp.h> 31 32 32 33 static struct device_node *allnodes; 33 34 ··· 1666 1665 return ret; 1667 1666 } 1668 1667 1668 + static const char *get_mid_prop(void) 1669 + { 1670 + return (tlb_type == spitfire ? "upa-portid" : "portid"); 1671 + } 1672 + 1673 + struct device_node *of_find_node_by_cpuid(int cpuid) 1674 + { 1675 + struct device_node *dp; 1676 + const char *mid_prop = get_mid_prop(); 1677 + 1678 + for_each_node_by_type(dp, "cpu") { 1679 + int id = of_getintprop_default(dp, mid_prop, -1); 1680 + const char *this_mid_prop = mid_prop; 1681 + 1682 + if (id < 0) { 1683 + this_mid_prop = "cpuid"; 1684 + id = of_getintprop_default(dp, this_mid_prop, -1); 1685 + } 1686 + 1687 + if (id < 0) { 1688 + prom_printf("OF: Serious problem, cpu lacks " 1689 + "%s property", this_mid_prop); 1690 + prom_halt(); 1691 + } 1692 + if (cpuid == id) 1693 + return dp; 1694 + } 1695 + return NULL; 1696 + } 1697 + 1698 + static void __init of_fill_in_cpu_data(void) 1699 + { 1700 + struct device_node *dp; 1701 + const char *mid_prop = get_mid_prop(); 1702 + 1703 + ncpus_probed = 0; 1704 + for_each_node_by_type(dp, "cpu") { 1705 + int cpuid = of_getintprop_default(dp, mid_prop, -1); 1706 + const char *this_mid_prop = mid_prop; 1707 + struct device_node *portid_parent; 1708 + int portid = -1; 1709 + 1710 + portid_parent = NULL; 1711 + if (cpuid < 0) { 1712 + this_mid_prop = "cpuid"; 1713 + cpuid = of_getintprop_default(dp, this_mid_prop, -1); 1714 + if (cpuid >= 0) { 1715 + int limit = 2; 1716 + 1717 + portid_parent = dp; 1718 + while (limit--) { 1719 + portid_parent = portid_parent->parent; 1720 + if (!portid_parent) 1721 + break; 1722 + portid = of_getintprop_default(portid_parent, 1723 + "portid", -1); 1724 + if (portid >= 0) 1725 + break; 1726 + } 1727 + } 1728 + } 1729 + 1730 + if (cpuid < 0) { 1731 + prom_printf("OF: Serious problem, cpu lacks " 1732 + "%s property", this_mid_prop); 1733 + prom_halt(); 1734 + } 1735 + 1736 + ncpus_probed++; 1737 + 1738 + #ifdef CONFIG_SMP 1739 + if (cpuid >= NR_CPUS) 1740 + continue; 1741 + #else 1742 + /* On uniprocessor we only want the values for the 1743 + * real physical cpu the kernel booted onto, however 1744 + * cpu_data() only has one entry at index 0. 1745 + */ 1746 + if (cpuid != real_hard_smp_processor_id()) 1747 + continue; 1748 + cpuid = 0; 1749 + #endif 1750 + 1751 + cpu_data(cpuid).clock_tick = 1752 + of_getintprop_default(dp, "clock-frequency", 0); 1753 + 1754 + if (portid_parent) { 1755 + cpu_data(cpuid).dcache_size = 1756 + of_getintprop_default(dp, "l1-dcache-size", 1757 + 16 * 1024); 1758 + cpu_data(cpuid).dcache_line_size = 1759 + of_getintprop_default(dp, "l1-dcache-line-size", 1760 + 32); 1761 + cpu_data(cpuid).icache_size = 1762 + of_getintprop_default(dp, "l1-icache-size", 1763 + 8 * 1024); 1764 + cpu_data(cpuid).icache_line_size = 1765 + of_getintprop_default(dp, "l1-icache-line-size", 1766 + 32); 1767 + cpu_data(cpuid).ecache_size = 1768 + of_getintprop_default(dp, "l2-cache-size", 0); 1769 + cpu_data(cpuid).ecache_line_size = 1770 + of_getintprop_default(dp, "l2-cache-line-size", 0); 1771 + if (!cpu_data(cpuid).ecache_size || 1772 + !cpu_data(cpuid).ecache_line_size) { 1773 + cpu_data(cpuid).ecache_size = 1774 + of_getintprop_default(portid_parent, 1775 + "l2-cache-size", 1776 + (4 * 1024 * 1024)); 1777 + cpu_data(cpuid).ecache_line_size = 1778 + of_getintprop_default(portid_parent, 1779 + "l2-cache-line-size", 64); 1780 + } 1781 + 1782 + cpu_data(cpuid).core_id = portid + 1; 1783 + } else { 1784 + cpu_data(cpuid).dcache_size = 1785 + of_getintprop_default(dp, "dcache-size", 16 * 1024); 1786 + cpu_data(cpuid).dcache_line_size = 1787 + of_getintprop_default(dp, "dcache-line-size", 32); 1788 + 1789 + cpu_data(cpuid).icache_size = 1790 + of_getintprop_default(dp, "icache-size", 16 * 1024); 1791 + cpu_data(cpuid).icache_line_size = 1792 + of_getintprop_default(dp, "icache-line-size", 32); 1793 + 1794 + cpu_data(cpuid).ecache_size = 1795 + of_getintprop_default(dp, "ecache-size", 1796 + (4 * 1024 * 1024)); 1797 + cpu_data(cpuid).ecache_line_size = 1798 + of_getintprop_default(dp, "ecache-line-size", 64); 1799 + 1800 + cpu_data(cpuid).core_id = 0; 1801 + } 1802 + 1803 + #ifdef CONFIG_SMP 1804 + cpu_set(cpuid, cpu_present_map); 1805 + cpu_set(cpuid, phys_cpu_present_map); 1806 + #endif 1807 + } 1808 + 1809 + smp_fill_in_sib_core_maps(); 1810 + } 1811 + 1669 1812 void __init prom_build_devicetree(void) 1670 1813 { 1671 1814 struct device_node **nextp; ··· 1824 1679 &nextp); 1825 1680 printk("PROM: Built device tree with %u bytes of memory.\n", 1826 1681 prom_early_allocated); 1682 + 1683 + if (tlb_type != hypervisor) 1684 + of_fill_in_cpu_data(); 1827 1685 }
+7 -11
arch/sparc64/kernel/setup.c
··· 46 46 #include <asm/sections.h> 47 47 #include <asm/setup.h> 48 48 #include <asm/mmu.h> 49 + #include <asm/ns87303.h> 49 50 50 51 #ifdef CONFIG_IP_PNP 51 52 #include <net/ipconfig.h> 52 53 #endif 54 + 55 + /* Used to synchronize accesses to NatSemi SUPER I/O chip configure 56 + * operations in asm/ns87303.h 57 + */ 58 + DEFINE_SPINLOCK(ns87303_lock); 53 59 54 60 struct screen_info screen_info = { 55 61 0, 0, /* orig-x, orig-y */ ··· 376 370 init_cur_cpu_trap(current_thread_info()); 377 371 378 372 paging_init(); 379 - 380 - smp_setup_cpu_possible_map(); 381 373 } 382 374 383 375 static int __init set_preferred_console(void) ··· 428 424 unsigned int dcache_parity_tl1_occurred; 429 425 unsigned int icache_parity_tl1_occurred; 430 426 431 - static int ncpus_probed; 427 + int ncpus_probed; 432 428 433 429 static int show_cpuinfo(struct seq_file *m, void *__unused) 434 430 { ··· 519 515 int i, err; 520 516 521 517 err = -ENOMEM; 522 - 523 - /* Count the number of physically present processors in 524 - * the machine, even on uniprocessor, so that /proc/cpuinfo 525 - * output is consistent with 2.4.x 526 - */ 527 - ncpus_probed = 0; 528 - while (!cpu_find_by_instance(ncpus_probed, NULL, NULL)) 529 - ncpus_probed++; 530 518 531 519 for_each_possible_cpu(i) { 532 520 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
+52 -107
arch/sparc64/kernel/smp.c
··· 40 40 #include <asm/tlb.h> 41 41 #include <asm/sections.h> 42 42 #include <asm/prom.h> 43 + #include <asm/mdesc.h> 43 44 44 45 extern void calibrate_delay(void); 45 46 ··· 76 75 i, cpu_data(i).clock_tick); 77 76 } 78 77 79 - void __init smp_store_cpu_info(int id) 80 - { 81 - struct device_node *dp; 82 - int def; 83 - 84 - cpu_data(id).udelay_val = loops_per_jiffy; 85 - 86 - cpu_find_by_mid(id, &dp); 87 - cpu_data(id).clock_tick = 88 - of_getintprop_default(dp, "clock-frequency", 0); 89 - 90 - def = ((tlb_type == hypervisor) ? (8 * 1024) : (16 * 1024)); 91 - cpu_data(id).dcache_size = 92 - of_getintprop_default(dp, "dcache-size", def); 93 - 94 - def = 32; 95 - cpu_data(id).dcache_line_size = 96 - of_getintprop_default(dp, "dcache-line-size", def); 97 - 98 - def = 16 * 1024; 99 - cpu_data(id).icache_size = 100 - of_getintprop_default(dp, "icache-size", def); 101 - 102 - def = 32; 103 - cpu_data(id).icache_line_size = 104 - of_getintprop_default(dp, "icache-line-size", def); 105 - 106 - def = ((tlb_type == hypervisor) ? 107 - (3 * 1024 * 1024) : 108 - (4 * 1024 * 1024)); 109 - cpu_data(id).ecache_size = 110 - of_getintprop_default(dp, "ecache-size", def); 111 - 112 - def = 64; 113 - cpu_data(id).ecache_line_size = 114 - of_getintprop_default(dp, "ecache-line-size", def); 115 - 116 - printk("CPU[%d]: Caches " 117 - "D[sz(%d):line_sz(%d)] " 118 - "I[sz(%d):line_sz(%d)] " 119 - "E[sz(%d):line_sz(%d)]\n", 120 - id, 121 - cpu_data(id).dcache_size, cpu_data(id).dcache_line_size, 122 - cpu_data(id).icache_size, cpu_data(id).icache_line_size, 123 - cpu_data(id).ecache_size, cpu_data(id).ecache_line_size); 124 - } 125 - 126 78 extern void setup_sparc64_timer(void); 127 79 128 80 static volatile unsigned long callin_flag = 0; ··· 99 145 local_irq_enable(); 100 146 101 147 calibrate_delay(); 102 - smp_store_cpu_info(cpuid); 148 + cpu_data(cpuid).udelay_val = loops_per_jiffy; 103 149 callin_flag = 1; 104 150 __asm__ __volatile__("membar #Sync\n\t" 105 151 "flush %%g6" : : : "memory"); ··· 294 340 295 341 prom_startcpu_cpuid(cpu, entry, cookie); 296 342 } else { 297 - struct device_node *dp; 343 + struct device_node *dp = of_find_node_by_cpuid(cpu); 298 344 299 - cpu_find_by_mid(cpu, &dp); 300 345 prom_startcpu(dp->node, entry, cookie); 301 346 } 302 347 ··· 400 447 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) 401 448 { 402 449 u64 pstate, ver; 403 - int nack_busy_id, is_jbus; 450 + int nack_busy_id, is_jbus, need_more; 404 451 405 452 if (cpus_empty(mask)) 406 453 return; ··· 416 463 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); 417 464 418 465 retry: 466 + need_more = 0; 419 467 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t" 420 468 : : "r" (pstate), "i" (PSTATE_IE)); 421 469 ··· 445 491 : /* no outputs */ 446 492 : "r" (target), "i" (ASI_INTR_W)); 447 493 nack_busy_id++; 494 + if (nack_busy_id == 32) { 495 + need_more = 1; 496 + break; 497 + } 448 498 } 449 499 } 450 500 ··· 465 507 if (dispatch_stat == 0UL) { 466 508 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 467 509 : : "r" (pstate)); 510 + if (unlikely(need_more)) { 511 + int i, cnt = 0; 512 + for_each_cpu_mask(i, mask) { 513 + cpu_clear(i, mask); 514 + cnt++; 515 + if (cnt == 32) 516 + break; 517 + } 518 + goto retry; 519 + } 468 520 return; 469 521 } 470 522 if (!--stuck) ··· 512 544 if ((dispatch_stat & check_mask) == 0) 513 545 cpu_clear(i, mask); 514 546 this_busy_nack += 2; 547 + if (this_busy_nack == 64) 548 + break; 515 549 } 516 550 517 551 goto retry; ··· 1161 1191 1162 1192 static void __init smp_tune_scheduling(void) 1163 1193 { 1164 - struct device_node *dp; 1165 - int instance; 1166 - unsigned int def, smallest = ~0U; 1194 + unsigned int smallest = ~0U; 1195 + int i; 1167 1196 1168 - def = ((tlb_type == hypervisor) ? 1169 - (3 * 1024 * 1024) : 1170 - (4 * 1024 * 1024)); 1197 + for (i = 0; i < NR_CPUS; i++) { 1198 + unsigned int val = cpu_data(i).ecache_size; 1171 1199 1172 - instance = 0; 1173 - while (!cpu_find_by_instance(instance, &dp, NULL)) { 1174 - unsigned int val; 1175 - 1176 - val = of_getintprop_default(dp, "ecache-size", def); 1177 - if (val < smallest) 1200 + if (val && val < smallest) 1178 1201 smallest = val; 1179 - 1180 - instance++; 1181 1202 } 1182 1203 1183 1204 /* Any value less than 256K is nonsense. */ ··· 1191 1230 int i; 1192 1231 1193 1232 if (num_possible_cpus() > max_cpus) { 1194 - int instance, mid; 1195 - 1196 - instance = 0; 1197 - while (!cpu_find_by_instance(instance, NULL, &mid)) { 1198 - if (mid != boot_cpu_id) { 1199 - cpu_clear(mid, phys_cpu_present_map); 1200 - cpu_clear(mid, cpu_present_map); 1233 + for_each_possible_cpu(i) { 1234 + if (i != boot_cpu_id) { 1235 + cpu_clear(i, phys_cpu_present_map); 1236 + cpu_clear(i, cpu_present_map); 1201 1237 if (num_possible_cpus() <= max_cpus) 1202 1238 break; 1203 1239 } 1204 - instance++; 1205 1240 } 1206 1241 } 1207 1242 1208 - for_each_possible_cpu(i) { 1209 - if (tlb_type == hypervisor) { 1210 - int j; 1211 - 1212 - /* XXX get this mapping from machine description */ 1213 - for_each_possible_cpu(j) { 1214 - if ((j >> 2) == (i >> 2)) 1215 - cpu_set(j, cpu_sibling_map[i]); 1216 - } 1217 - } else { 1218 - cpu_set(i, cpu_sibling_map[i]); 1219 - } 1220 - } 1221 - 1222 - smp_store_cpu_info(boot_cpu_id); 1243 + cpu_data(boot_cpu_id).udelay_val = loops_per_jiffy; 1223 1244 smp_tune_scheduling(); 1224 - } 1225 - 1226 - /* Set this up early so that things like the scheduler can init 1227 - * properly. We use the same cpu mask for both the present and 1228 - * possible cpu map. 1229 - */ 1230 - void __init smp_setup_cpu_possible_map(void) 1231 - { 1232 - int instance, mid; 1233 - 1234 - instance = 0; 1235 - while (!cpu_find_by_instance(instance, NULL, &mid)) { 1236 - if (mid < NR_CPUS) { 1237 - cpu_set(mid, phys_cpu_present_map); 1238 - cpu_set(mid, cpu_present_map); 1239 - } 1240 - instance++; 1241 - } 1242 1245 } 1243 1246 1244 1247 void __devinit smp_prepare_boot_cpu(void) 1245 1248 { 1249 + } 1250 + 1251 + void __devinit smp_fill_in_sib_core_maps(void) 1252 + { 1253 + unsigned int i; 1254 + 1255 + for_each_possible_cpu(i) { 1256 + unsigned int j; 1257 + 1258 + if (cpu_data(i).core_id == 0) { 1259 + cpu_set(i, cpu_sibling_map[i]); 1260 + continue; 1261 + } 1262 + 1263 + for_each_possible_cpu(j) { 1264 + if (cpu_data(i).core_id == 1265 + cpu_data(j).core_id) 1266 + cpu_set(j, cpu_sibling_map[i]); 1267 + } 1268 + } 1246 1269 } 1247 1270 1248 1271 int __cpuinit __cpu_up(unsigned int cpu) ··· 1282 1337 EXPORT_SYMBOL(__per_cpu_base); 1283 1338 EXPORT_SYMBOL(__per_cpu_shift); 1284 1339 1285 - void __init setup_per_cpu_areas(void) 1340 + void __init real_setup_per_cpu_areas(void) 1286 1341 { 1287 1342 unsigned long goal, size, i; 1288 1343 char *ptr;
+104
arch/sparc64/kernel/sstate.c
··· 1 + /* sstate.c: System soft state support. 2 + * 3 + * Copyright (C) 2007 David S. Miller <davem@davemloft.net> 4 + */ 5 + 6 + #include <linux/kernel.h> 7 + #include <linux/notifier.h> 8 + #include <linux/init.h> 9 + 10 + #include <asm/hypervisor.h> 11 + #include <asm/sstate.h> 12 + #include <asm/oplib.h> 13 + #include <asm/head.h> 14 + #include <asm/io.h> 15 + 16 + static int hv_supports_soft_state; 17 + 18 + static unsigned long kimage_addr_to_ra(const char *p) 19 + { 20 + unsigned long val = (unsigned long) p; 21 + 22 + return kern_base + (val - KERNBASE); 23 + } 24 + 25 + static void do_set_sstate(unsigned long state, const char *msg) 26 + { 27 + unsigned long err; 28 + 29 + if (!hv_supports_soft_state) 30 + return; 31 + 32 + err = sun4v_mach_set_soft_state(state, kimage_addr_to_ra(msg)); 33 + if (err) { 34 + printk(KERN_WARNING "SSTATE: Failed to set soft-state to " 35 + "state[%lx] msg[%s], err=%lu\n", 36 + state, msg, err); 37 + } 38 + } 39 + 40 + static const char booting_msg[32] __attribute__((aligned(32))) = 41 + "Linux booting"; 42 + static const char running_msg[32] __attribute__((aligned(32))) = 43 + "Linux running"; 44 + static const char halting_msg[32] __attribute__((aligned(32))) = 45 + "Linux halting"; 46 + static const char poweroff_msg[32] __attribute__((aligned(32))) = 47 + "Linux powering off"; 48 + static const char rebooting_msg[32] __attribute__((aligned(32))) = 49 + "Linux rebooting"; 50 + static const char panicing_msg[32] __attribute__((aligned(32))) = 51 + "Linux panicing"; 52 + 53 + void sstate_booting(void) 54 + { 55 + do_set_sstate(HV_SOFT_STATE_TRANSITION, booting_msg); 56 + } 57 + 58 + void sstate_running(void) 59 + { 60 + do_set_sstate(HV_SOFT_STATE_NORMAL, running_msg); 61 + } 62 + 63 + void sstate_halt(void) 64 + { 65 + do_set_sstate(HV_SOFT_STATE_TRANSITION, halting_msg); 66 + } 67 + 68 + void sstate_poweroff(void) 69 + { 70 + do_set_sstate(HV_SOFT_STATE_TRANSITION, poweroff_msg); 71 + } 72 + 73 + void sstate_reboot(void) 74 + { 75 + do_set_sstate(HV_SOFT_STATE_TRANSITION, rebooting_msg); 76 + } 77 + 78 + static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr) 79 + { 80 + do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg); 81 + 82 + return NOTIFY_DONE; 83 + } 84 + 85 + static struct notifier_block sstate_panic_block = { 86 + .notifier_call = sstate_panic_event, 87 + .priority = INT_MAX, 88 + }; 89 + 90 + void __init sun4v_sstate_init(void) 91 + { 92 + unsigned long major, minor; 93 + 94 + major = 1; 95 + minor = 0; 96 + if (sun4v_hvapi_register(HV_GRP_SOFT_STATE, major, &minor)) 97 + return; 98 + 99 + hv_supports_soft_state = 1; 100 + 101 + prom_sun4v_guest_soft_state(); 102 + atomic_notifier_chain_register(&panic_notifier_list, 103 + &sstate_panic_block); 104 + }
+14 -16
arch/sparc64/kernel/sun4v_ivec.S
··· 22 22 be,pn %xcc, sun4v_cpu_mondo_queue_empty 23 23 nop 24 24 25 - /* Get &trap_block[smp_processor_id()] into %g3. */ 26 - ldxa [%g0] ASI_SCRATCHPAD, %g3 27 - sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 25 + /* Get &trap_block[smp_processor_id()] into %g4. */ 26 + ldxa [%g0] ASI_SCRATCHPAD, %g4 27 + sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 28 28 29 29 /* Get CPU mondo queue base phys address into %g7. */ 30 - ldx [%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 30 + ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 31 31 32 32 /* Now get the cross-call arguments and handler PC, same 33 33 * layout as sun4u: ··· 47 47 add %g2, 0x40 - 0x8 - 0x8, %g2 48 48 49 49 /* Update queue head pointer. */ 50 - sethi %hi(8192 - 1), %g4 51 - or %g4, %lo(8192 - 1), %g4 50 + lduw [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4 52 51 and %g2, %g4, %g2 53 52 54 53 mov INTRQ_CPU_MONDO_HEAD, %g4 ··· 70 71 be,pn %xcc, sun4v_dev_mondo_queue_empty 71 72 nop 72 73 73 - /* Get &trap_block[smp_processor_id()] into %g3. */ 74 - ldxa [%g0] ASI_SCRATCHPAD, %g3 75 - sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 74 + /* Get &trap_block[smp_processor_id()] into %g4. */ 75 + ldxa [%g0] ASI_SCRATCHPAD, %g4 76 + sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 76 77 77 78 /* Get DEV mondo queue base phys address into %g5. */ 78 - ldx [%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5 79 + ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5 79 80 80 81 /* Load IVEC into %g3. */ 81 82 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 ··· 89 90 */ 90 91 91 92 /* Update queue head pointer, this frees up some registers. */ 92 - sethi %hi(8192 - 1), %g4 93 - or %g4, %lo(8192 - 1), %g4 93 + lduw [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4 94 94 and %g2, %g4, %g2 95 95 96 96 mov INTRQ_DEVICE_MONDO_HEAD, %g4 ··· 141 143 brnz,pn %g1, sun4v_res_mondo_queue_full 142 144 nop 143 145 146 + lduw [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4 147 + 144 148 /* Remember this entry's offset in %g1. */ 145 149 mov %g2, %g1 146 150 ··· 173 173 add %g2, 0x08, %g2 174 174 175 175 /* Update queue head pointer. */ 176 - sethi %hi(8192 - 1), %g4 177 - or %g4, %lo(8192 - 1), %g4 178 176 and %g2, %g4, %g2 179 177 180 178 mov INTRQ_RESUM_MONDO_HEAD, %g4 ··· 252 254 brnz,pn %g1, sun4v_nonres_mondo_queue_full 253 255 nop 254 256 257 + lduw [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4 258 + 255 259 /* Remember this entry's offset in %g1. */ 256 260 mov %g2, %g1 257 261 ··· 284 284 add %g2, 0x08, %g2 285 285 286 286 /* Update queue head pointer. */ 287 - sethi %hi(8192 - 1), %g4 288 - or %g4, %lo(8192 - 1), %g4 289 287 and %g2, %g4, %g2 290 288 291 289 mov INTRQ_NONRESUM_MONDO_HEAD, %g4
+18 -29
arch/sparc64/kernel/time.c
··· 680 680 681 681 static u32 hypervisor_get_time(void) 682 682 { 683 - register unsigned long func asm("%o5"); 684 - register unsigned long arg0 asm("%o0"); 685 - register unsigned long arg1 asm("%o1"); 683 + unsigned long ret, time; 686 684 int retries = 10000; 687 685 688 686 retry: 689 - func = HV_FAST_TOD_GET; 690 - arg0 = 0; 691 - arg1 = 0; 692 - __asm__ __volatile__("ta %6" 693 - : "=&r" (func), "=&r" (arg0), "=&r" (arg1) 694 - : "0" (func), "1" (arg0), "2" (arg1), 695 - "i" (HV_FAST_TRAP)); 696 - if (arg0 == HV_EOK) 697 - return arg1; 698 - if (arg0 == HV_EWOULDBLOCK) { 687 + ret = sun4v_tod_get(&time); 688 + if (ret == HV_EOK) 689 + return time; 690 + if (ret == HV_EWOULDBLOCK) { 699 691 if (--retries > 0) { 700 692 udelay(100); 701 693 goto retry; ··· 701 709 702 710 static int hypervisor_set_time(u32 secs) 703 711 { 704 - register unsigned long func asm("%o5"); 705 - register unsigned long arg0 asm("%o0"); 712 + unsigned long ret; 706 713 int retries = 10000; 707 714 708 715 retry: 709 - func = HV_FAST_TOD_SET; 710 - arg0 = secs; 711 - __asm__ __volatile__("ta %4" 712 - : "=&r" (func), "=&r" (arg0) 713 - : "0" (func), "1" (arg0), 714 - "i" (HV_FAST_TRAP)); 715 - if (arg0 == HV_EOK) 716 + ret = sun4v_tod_set(secs); 717 + if (ret == HV_EOK) 716 718 return 0; 717 - if (arg0 == HV_EWOULDBLOCK) { 719 + if (ret == HV_EWOULDBLOCK) { 718 720 if (--retries > 0) { 719 721 udelay(100); 720 722 goto retry; ··· 848 862 static unsigned long sparc64_init_timers(void) 849 863 { 850 864 struct device_node *dp; 851 - struct property *prop; 852 865 unsigned long clock; 853 866 #ifdef CONFIG_SMP 854 867 extern void smp_tick_init(void); ··· 864 879 if (manuf == 0x17 && impl == 0x13) { 865 880 /* Hummingbird, aka Ultra-IIe */ 866 881 tick_ops = &hbtick_operations; 867 - prop = of_find_property(dp, "stick-frequency", NULL); 882 + clock = of_getintprop_default(dp, "stick-frequency", 0); 868 883 } else { 869 884 tick_ops = &tick_operations; 870 - cpu_find_by_instance(0, &dp, NULL); 871 - prop = of_find_property(dp, "clock-frequency", NULL); 885 + clock = local_cpu_data().clock_tick; 872 886 } 873 887 } else { 874 888 tick_ops = &stick_operations; 875 - prop = of_find_property(dp, "stick-frequency", NULL); 889 + clock = of_getintprop_default(dp, "stick-frequency", 0); 876 890 } 877 - clock = *(unsigned int *) prop->value; 878 891 879 892 #ifdef CONFIG_SMP 880 893 smp_tick_init(); ··· 1348 1365 return hypervisor_set_time(seconds); 1349 1366 } 1350 1367 1368 + #ifdef CONFIG_PCI 1351 1369 static void bq4802_get_rtc_time(struct rtc_time *time) 1352 1370 { 1353 1371 unsigned char val = readb(bq4802_regs + 0x0e); ··· 1420 1436 1421 1437 return 0; 1422 1438 } 1439 + #endif /* CONFIG_PCI */ 1423 1440 1424 1441 struct mini_rtc_ops { 1425 1442 void (*get_rtc_time)(struct rtc_time *); ··· 1437 1452 .set_rtc_time = hypervisor_set_rtc_time, 1438 1453 }; 1439 1454 1455 + #ifdef CONFIG_PCI 1440 1456 static struct mini_rtc_ops bq4802_rtc_ops = { 1441 1457 .get_rtc_time = bq4802_get_rtc_time, 1442 1458 .set_rtc_time = bq4802_set_rtc_time, 1443 1459 }; 1460 + #endif /* CONFIG_PCI */ 1444 1461 1445 1462 static struct mini_rtc_ops *mini_rtc_ops; 1446 1463 ··· 1566 1579 mini_rtc_ops = &hypervisor_rtc_ops; 1567 1580 else if (this_is_starfire) 1568 1581 mini_rtc_ops = &starfire_rtc_ops; 1582 + #ifdef CONFIG_PCI 1569 1583 else if (bq4802_regs) 1570 1584 mini_rtc_ops = &bq4802_rtc_ops; 1585 + #endif /* CONFIG_PCI */ 1571 1586 else 1572 1587 return -ENODEV; 1573 1588
+18 -9
arch/sparc64/kernel/traps.c
··· 795 795 void __init cheetah_ecache_flush_init(void) 796 796 { 797 797 unsigned long largest_size, smallest_linesize, order, ver; 798 - struct device_node *dp; 799 - int i, instance, sz; 798 + int i, sz; 800 799 801 800 /* Scan all cpu device tree nodes, note two values: 802 801 * 1) largest E-cache size ··· 804 805 largest_size = 0UL; 805 806 smallest_linesize = ~0UL; 806 807 807 - instance = 0; 808 - while (!cpu_find_by_instance(instance, &dp, NULL)) { 808 + for (i = 0; i < NR_CPUS; i++) { 809 809 unsigned long val; 810 810 811 - val = of_getintprop_default(dp, "ecache-size", 812 - (2 * 1024 * 1024)); 811 + val = cpu_data(i).ecache_size; 812 + if (!val) 813 + continue; 814 + 813 815 if (val > largest_size) 814 816 largest_size = val; 815 - val = of_getintprop_default(dp, "ecache-line-size", 64); 817 + 818 + val = cpu_data(i).ecache_line_size; 816 819 if (val < smallest_linesize) 817 820 smallest_linesize = val; 818 - instance++; 821 + 819 822 } 820 823 821 824 if (largest_size == 0UL || smallest_linesize == ~0UL) { ··· 2565 2564 (TRAP_PER_CPU_TSB_HUGE_TEMP != 2566 2565 offsetof(struct trap_per_cpu, tsb_huge_temp)) || 2567 2566 (TRAP_PER_CPU_IRQ_WORKLIST != 2568 - offsetof(struct trap_per_cpu, irq_worklist))) 2567 + offsetof(struct trap_per_cpu, irq_worklist)) || 2568 + (TRAP_PER_CPU_CPU_MONDO_QMASK != 2569 + offsetof(struct trap_per_cpu, cpu_mondo_qmask)) || 2570 + (TRAP_PER_CPU_DEV_MONDO_QMASK != 2571 + offsetof(struct trap_per_cpu, dev_mondo_qmask)) || 2572 + (TRAP_PER_CPU_RESUM_QMASK != 2573 + offsetof(struct trap_per_cpu, resum_qmask)) || 2574 + (TRAP_PER_CPU_NONRESUM_QMASK != 2575 + offsetof(struct trap_per_cpu, nonresum_qmask))) 2569 2576 trap_per_cpu_offsets_are_bolixed_dave(); 2570 2577 2571 2578 if ((TSB_CONFIG_TSB !=
+48 -40
arch/sparc64/mm/init.c
··· 23 23 #include <linux/kprobes.h> 24 24 #include <linux/cache.h> 25 25 #include <linux/sort.h> 26 + #include <linux/percpu.h> 26 27 27 28 #include <asm/head.h> 28 29 #include <asm/system.h> ··· 44 43 #include <asm/tsb.h> 45 44 #include <asm/hypervisor.h> 46 45 #include <asm/prom.h> 47 - 48 - extern void device_scan(void); 46 + #include <asm/sstate.h> 47 + #include <asm/mdesc.h> 49 48 50 49 #define MAX_PHYS_ADDRESS (1UL << 42UL) 51 50 #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) ··· 61 60 unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; 62 61 63 62 #ifndef CONFIG_DEBUG_PAGEALLOC 64 - /* A special kernel TSB for 4MB and 256MB linear mappings. */ 65 - struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; 63 + /* A special kernel TSB for 4MB and 256MB linear mappings. 64 + * Space is allocated for this right after the trap table 65 + * in arch/sparc64/kernel/head.S 66 + */ 67 + extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; 66 68 #endif 67 69 68 70 #define MAX_BANKS 32 ··· 194 190 } 195 191 196 192 #define PG_dcache_dirty PG_arch_1 197 - #define PG_dcache_cpu_shift 24UL 198 - #define PG_dcache_cpu_mask (256UL - 1UL) 199 - 200 - #if NR_CPUS > 256 201 - #error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus 202 - #endif 193 + #define PG_dcache_cpu_shift 32UL 194 + #define PG_dcache_cpu_mask \ 195 + ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL) 203 196 204 197 #define dcache_dirty_cpu(page) \ 205 198 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) ··· 558 557 unsigned long pte, 559 558 unsigned long mmu) 560 559 { 561 - register unsigned long func asm("%o5"); 562 - register unsigned long arg0 asm("%o0"); 563 - register unsigned long arg1 asm("%o1"); 564 - register unsigned long arg2 asm("%o2"); 565 - register unsigned long arg3 asm("%o3"); 560 + unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu); 566 561 567 - func = HV_FAST_MMU_MAP_PERM_ADDR; 568 - arg0 = vaddr; 569 - arg1 = 0; 570 - arg2 = pte; 571 - arg3 = mmu; 572 - __asm__ __volatile__("ta 0x80" 573 - : "=&r" (func), "=&r" (arg0), 574 - "=&r" (arg1), "=&r" (arg2), 575 - "=&r" (arg3) 576 - : "0" (func), "1" (arg0), "2" (arg1), 577 - "3" (arg2), "4" (arg3)); 578 - if (arg0 != 0) { 562 + if (ret != 0) { 579 563 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: " 580 - "errors with %lx\n", vaddr, 0, pte, mmu, arg0); 564 + "errors with %lx\n", vaddr, 0, pte, mmu, ret); 581 565 prom_halt(); 582 566 } 583 567 } ··· 1299 1313 1300 1314 void __cpuinit sun4v_ktsb_register(void) 1301 1315 { 1302 - register unsigned long func asm("%o5"); 1303 - register unsigned long arg0 asm("%o0"); 1304 - register unsigned long arg1 asm("%o1"); 1305 - unsigned long pa; 1316 + unsigned long pa, ret; 1306 1317 1307 1318 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); 1308 1319 1309 - func = HV_FAST_MMU_TSB_CTX0; 1310 - arg0 = NUM_KTSB_DESCR; 1311 - arg1 = pa; 1312 - __asm__ __volatile__("ta %6" 1313 - : "=&r" (func), "=&r" (arg0), "=&r" (arg1) 1314 - : "0" (func), "1" (arg0), "2" (arg1), 1315 - "i" (HV_FAST_TRAP)); 1320 + ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa); 1321 + if (ret != 0) { 1322 + prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: " 1323 + "errors with %lx\n", pa, ret); 1324 + prom_halt(); 1325 + } 1316 1326 } 1317 1327 1318 1328 /* paging_init() sets up the page tables */ 1319 1329 1320 1330 extern void cheetah_ecache_flush_init(void); 1321 1331 extern void sun4v_patch_tlb_handlers(void); 1332 + 1333 + extern void cpu_probe(void); 1334 + extern void central_probe(void); 1322 1335 1323 1336 static unsigned long last_valid_pfn; 1324 1337 pgd_t swapper_pg_dir[2048]; ··· 1330 1345 unsigned long end_pfn, pages_avail, shift, phys_base; 1331 1346 unsigned long real_end, i; 1332 1347 1348 + /* These build time checkes make sure that the dcache_dirty_cpu() 1349 + * page->flags usage will work. 1350 + * 1351 + * When a page gets marked as dcache-dirty, we store the 1352 + * cpu number starting at bit 32 in the page->flags. Also, 1353 + * functions like clear_dcache_dirty_cpu use the cpu mask 1354 + * in 13-bit signed-immediate instruction fields. 1355 + */ 1356 + BUILD_BUG_ON(FLAGS_RESERVED != 32); 1357 + BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH + 1358 + ilog2(roundup_pow_of_two(NR_CPUS)) > FLAGS_RESERVED); 1359 + BUILD_BUG_ON(NR_CPUS > 4096); 1360 + 1333 1361 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 1334 1362 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; 1363 + 1364 + sstate_booting(); 1335 1365 1336 1366 /* Invalidate both kernel TSBs. */ 1337 1367 memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); ··· 1416 1416 1417 1417 kernel_physical_mapping_init(); 1418 1418 1419 + real_setup_per_cpu_areas(); 1420 + 1419 1421 prom_build_devicetree(); 1422 + 1423 + if (tlb_type == hypervisor) 1424 + sun4v_mdesc_init(); 1420 1425 1421 1426 { 1422 1427 unsigned long zones_size[MAX_NR_ZONES]; ··· 1439 1434 zholes_size); 1440 1435 } 1441 1436 1442 - device_scan(); 1437 + prom_printf("Booting Linux...\n"); 1438 + 1439 + central_probe(); 1440 + cpu_probe(); 1443 1441 } 1444 1442 1445 1443 static void __init taint_real_pages(void)
+19
arch/sparc64/prom/misc.c
··· 15 15 #include <asm/oplib.h> 16 16 #include <asm/system.h> 17 17 18 + int prom_service_exists(const char *service_name) 19 + { 20 + int err = p1275_cmd("test", P1275_ARG(0, P1275_ARG_IN_STRING) | 21 + P1275_INOUT(1, 1), service_name); 22 + 23 + if (err) 24 + return 0; 25 + return 1; 26 + } 27 + 28 + void prom_sun4v_guest_soft_state(void) 29 + { 30 + const char *svc = "SUNW,soft-state-supported"; 31 + 32 + if (!prom_service_exists(svc)) 33 + return; 34 + p1275_cmd(svc, P1275_INOUT(0, 0)); 35 + } 36 + 18 37 /* Reset and reboot the machine with the command 'bcommand'. */ 19 38 void prom_reboot(const char *bcommand) 20 39 {
+1 -1
drivers/char/drm/Kconfig
··· 6 6 # 7 7 config DRM 8 8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" 9 - depends on (AGP || AGP=n) && PCI 9 + depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG 10 10 help 11 11 Kernel-level support for the Direct Rendering Infrastructure (DRI) 12 12 introduced in XFree86 4.0. If you say Y here, you need to select
-14
drivers/scsi/Kconfig
··· 1753 1753 The ESP was an on-board SCSI controller used on Sun 3/80 1754 1754 machines. Say Y here to compile in support for it. 1755 1755 1756 - config SCSI_ESP_CORE 1757 - tristate "ESP Scsi Driver Core" 1758 - depends on SCSI 1759 - select SCSI_SPI_ATTRS 1760 - help 1761 - This is a core driver for NCR53c9x based scsi chipsets, 1762 - also known as "ESP" for Emulex Scsi Processor or 1763 - Enhanced Scsi Processor. This driver does not exist by 1764 - itself, there are front-end drivers which, when enabled, 1765 - select and enable this driver. One example is SCSI_SUNESP. 1766 - These front-end drivers provide probing, DMA, and register 1767 - access support for the core driver. 1768 - 1769 1756 config SCSI_SUNESP 1770 1757 tristate "Sparc ESP Scsi Driver" 1771 1758 depends on SBUS && SCSI 1772 - select SCSI_ESP_CORE 1773 1759 help 1774 1760 This is the driver for the Sun ESP SCSI host adapter. The ESP 1775 1761 chipset is present in most SPARC SBUS-based computers.
+2 -3
drivers/scsi/Makefile
··· 106 106 obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ 107 107 obj-$(CONFIG_MEGARAID_SAS) += megaraid/ 108 108 obj-$(CONFIG_SCSI_ACARD) += atp870u.o 109 - obj-$(CONFIG_SCSI_ESP_CORE) += esp_scsi.o 110 - obj-$(CONFIG_SCSI_SUNESP) += sun_esp.o 109 + obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o 111 110 obj-$(CONFIG_SCSI_GDTH) += gdth.o 112 111 obj-$(CONFIG_SCSI_INITIO) += initio.o 113 112 obj-$(CONFIG_SCSI_INIA100) += a100u2w.o ··· 120 121 obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o 121 122 obj-$(CONFIG_SCSI_PPA) += ppa.o 122 123 obj-$(CONFIG_SCSI_IMM) += imm.o 123 - obj-$(CONFIG_JAZZ_ESP) += NCR53C9x.o jazz_esp.o 124 + obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o 124 125 obj-$(CONFIG_SUN3X_ESP) += NCR53C9x.o sun3x_esp.o 125 126 obj-$(CONFIG_SCSI_FCAL) += fcal.o 126 127 obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
+212 -275
drivers/scsi/jazz_esp.c
··· 1 - /* 2 - * jazz_esp.c: Driver for SCSI chip on Mips Magnum Boards (JAZZ architecture) 1 + /* jazz_esp.c: ESP front-end for MIPS JAZZ systems. 3 2 * 4 - * Copyright (C) 1997 Thomas Bogendoerfer (tsbogend@alpha.franken.de) 5 - * 6 - * jazz_esp is based on David S. Miller's ESP driver and cyber_esp 3 + * Copyright (C) 2007 Thomas Bogendörfer (tsbogend@alpha.frankende) 7 4 */ 8 5 9 - #include <linux/init.h> 10 6 #include <linux/kernel.h> 11 - #include <linux/delay.h> 12 7 #include <linux/types.h> 13 - #include <linux/string.h> 14 - #include <linux/slab.h> 15 - #include <linux/blkdev.h> 16 - #include <linux/proc_fs.h> 17 - #include <linux/stat.h> 18 - 19 - #include "scsi.h" 20 - #include <scsi/scsi_host.h> 21 - #include "NCR53C9x.h" 8 + #include <linux/module.h> 9 + #include <linux/init.h> 10 + #include <linux/interrupt.h> 11 + #include <linux/platform_device.h> 12 + #include <linux/dma-mapping.h> 22 13 23 14 #include <asm/irq.h> 24 - #include <asm/jazz.h> 25 - #include <asm/jazzdma.h> 15 + #include <asm/io.h> 26 16 #include <asm/dma.h> 27 17 28 - #include <asm/pgtable.h> 18 + #include <asm/jazz.h> 19 + #include <asm/jazzdma.h> 29 20 30 - static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count); 31 - static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd *sp); 32 - static void dma_dump_state(struct NCR_ESP *esp); 33 - static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length); 34 - static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length); 35 - static void dma_ints_off(struct NCR_ESP *esp); 36 - static void dma_ints_on(struct NCR_ESP *esp); 37 - static int dma_irq_p(struct NCR_ESP *esp); 38 - static int dma_ports_p(struct NCR_ESP *esp); 39 - static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write); 40 - static void dma_mmu_get_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp); 41 - static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp); 42 - static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp); 43 - static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp); 44 - static void dma_advance_sg (struct scsi_cmnd *sp); 45 - static void dma_led_off(struct NCR_ESP *); 46 - static void dma_led_on(struct NCR_ESP *); 21 + #include <scsi/scsi_host.h> 47 22 23 + #include "esp_scsi.h" 48 24 49 - static volatile unsigned char cmd_buffer[16]; 50 - /* This is where all commands are put 51 - * before they are trasfered to the ESP chip 52 - * via PIO. 53 - */ 25 + #define DRV_MODULE_NAME "jazz_esp" 26 + #define PFX DRV_MODULE_NAME ": " 27 + #define DRV_VERSION "1.000" 28 + #define DRV_MODULE_RELDATE "May 19, 2007" 54 29 55 - static int jazz_esp_release(struct Scsi_Host *shost) 30 + static void jazz_esp_write8(struct esp *esp, u8 val, unsigned long reg) 56 31 { 57 - if (shost->irq) 58 - free_irq(shost->irq, NULL); 59 - if (shost->dma_channel != 0xff) 60 - free_dma(shost->dma_channel); 61 - if (shost->io_port && shost->n_io_port) 62 - release_region(shost->io_port, shost->n_io_port); 63 - scsi_unregister(shost); 32 + *(volatile u8 *)(esp->regs + reg) = val; 33 + } 34 + 35 + static u8 jazz_esp_read8(struct esp *esp, unsigned long reg) 36 + { 37 + return *(volatile u8 *)(esp->regs + reg); 38 + } 39 + 40 + static dma_addr_t jazz_esp_map_single(struct esp *esp, void *buf, 41 + size_t sz, int dir) 42 + { 43 + return dma_map_single(esp->dev, buf, sz, dir); 44 + } 45 + 46 + static int jazz_esp_map_sg(struct esp *esp, struct scatterlist *sg, 47 + int num_sg, int dir) 48 + { 49 + return dma_map_sg(esp->dev, sg, num_sg, dir); 50 + } 51 + 52 + static void jazz_esp_unmap_single(struct esp *esp, dma_addr_t addr, 53 + size_t sz, int dir) 54 + { 55 + dma_unmap_single(esp->dev, addr, sz, dir); 56 + } 57 + 58 + static void jazz_esp_unmap_sg(struct esp *esp, struct scatterlist *sg, 59 + int num_sg, int dir) 60 + { 61 + dma_unmap_sg(esp->dev, sg, num_sg, dir); 62 + } 63 + 64 + static int jazz_esp_irq_pending(struct esp *esp) 65 + { 66 + if (jazz_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR) 67 + return 1; 64 68 return 0; 65 69 } 66 70 67 - /***************************************************************** Detection */ 68 - static int jazz_esp_detect(struct scsi_host_template *tpnt) 71 + static void jazz_esp_reset_dma(struct esp *esp) 69 72 { 70 - struct NCR_ESP *esp; 71 - struct ConfigDev *esp_dev; 73 + vdma_disable ((int)esp->dma_regs); 74 + } 72 75 73 - /* 74 - * first assumption it is there:-) 75 - */ 76 - if (1) { 77 - esp_dev = NULL; 78 - esp = esp_allocate(tpnt, esp_dev, 0); 79 - 80 - /* Do command transfer with programmed I/O */ 81 - esp->do_pio_cmds = 1; 82 - 83 - /* Required functions */ 84 - esp->dma_bytes_sent = &dma_bytes_sent; 85 - esp->dma_can_transfer = &dma_can_transfer; 86 - esp->dma_dump_state = &dma_dump_state; 87 - esp->dma_init_read = &dma_init_read; 88 - esp->dma_init_write = &dma_init_write; 89 - esp->dma_ints_off = &dma_ints_off; 90 - esp->dma_ints_on = &dma_ints_on; 91 - esp->dma_irq_p = &dma_irq_p; 92 - esp->dma_ports_p = &dma_ports_p; 93 - esp->dma_setup = &dma_setup; 76 + static void jazz_esp_dma_drain(struct esp *esp) 77 + { 78 + /* nothing to do */ 79 + } 94 80 95 - /* Optional functions */ 96 - esp->dma_barrier = NULL; 97 - esp->dma_drain = NULL; 98 - esp->dma_invalidate = NULL; 99 - esp->dma_irq_entry = NULL; 100 - esp->dma_irq_exit = NULL; 101 - esp->dma_poll = NULL; 102 - esp->dma_reset = NULL; 103 - esp->dma_led_off = &dma_led_off; 104 - esp->dma_led_on = &dma_led_on; 105 - 106 - /* virtual DMA functions */ 107 - esp->dma_mmu_get_scsi_one = &dma_mmu_get_scsi_one; 108 - esp->dma_mmu_get_scsi_sgl = &dma_mmu_get_scsi_sgl; 109 - esp->dma_mmu_release_scsi_one = &dma_mmu_release_scsi_one; 110 - esp->dma_mmu_release_scsi_sgl = &dma_mmu_release_scsi_sgl; 111 - esp->dma_advance_sg = &dma_advance_sg; 81 + static void jazz_esp_dma_invalidate(struct esp *esp) 82 + { 83 + vdma_disable ((int)esp->dma_regs); 84 + } 112 85 86 + static void jazz_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, 87 + u32 dma_count, int write, u8 cmd) 88 + { 89 + BUG_ON(!(cmd & ESP_CMD_DMA)); 113 90 114 - /* SCSI chip speed */ 91 + jazz_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); 92 + jazz_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); 93 + vdma_disable ((int)esp->dma_regs); 94 + if (write) 95 + vdma_set_mode ((int)esp->dma_regs, DMA_MODE_READ); 96 + else 97 + vdma_set_mode ((int)esp->dma_regs, DMA_MODE_WRITE); 98 + 99 + vdma_set_addr ((int)esp->dma_regs, addr); 100 + vdma_set_count ((int)esp->dma_regs, dma_count); 101 + vdma_enable ((int)esp->dma_regs); 102 + 103 + scsi_esp_cmd(esp, cmd); 104 + } 105 + 106 + static int jazz_esp_dma_error(struct esp *esp) 107 + { 108 + u32 enable = vdma_get_enable((int)esp->dma_regs); 109 + 110 + if (enable & (R4030_MEM_INTR|R4030_ADDR_INTR)) 111 + return 1; 112 + 113 + return 0; 114 + } 115 + 116 + static const struct esp_driver_ops jazz_esp_ops = { 117 + .esp_write8 = jazz_esp_write8, 118 + .esp_read8 = jazz_esp_read8, 119 + .map_single = jazz_esp_map_single, 120 + .map_sg = jazz_esp_map_sg, 121 + .unmap_single = jazz_esp_unmap_single, 122 + .unmap_sg = jazz_esp_unmap_sg, 123 + .irq_pending = jazz_esp_irq_pending, 124 + .reset_dma = jazz_esp_reset_dma, 125 + .dma_drain = jazz_esp_dma_drain, 126 + .dma_invalidate = jazz_esp_dma_invalidate, 127 + .send_dma_cmd = jazz_esp_send_dma_cmd, 128 + .dma_error = jazz_esp_dma_error, 129 + }; 130 + 131 + static int __devinit esp_jazz_probe(struct platform_device *dev) 132 + { 133 + struct scsi_host_template *tpnt = &scsi_esp_template; 134 + struct Scsi_Host *host; 135 + struct esp *esp; 136 + struct resource *res; 137 + int err; 138 + 139 + host = scsi_host_alloc(tpnt, sizeof(struct esp)); 140 + 141 + err = -ENOMEM; 142 + if (!host) 143 + goto fail; 144 + 145 + host->max_id = 8; 146 + esp = host_to_esp(host); 147 + 148 + esp->host = host; 149 + esp->dev = dev; 150 + esp->ops = &jazz_esp_ops; 151 + 152 + res = platform_get_resource(dev, IORESOURCE_MEM, 0); 153 + if (!res) 154 + goto fail_unlink; 155 + 156 + esp->regs = (void __iomem *)res->start; 157 + if (!esp->regs) 158 + goto fail_unlink; 159 + 160 + res = platform_get_resource(dev, IORESOURCE_MEM, 1); 161 + if (!res) 162 + goto fail_unlink; 163 + 164 + esp->dma_regs = (void __iomem *)res->start; 165 + 166 + esp->command_block = dma_alloc_coherent(esp->dev, 16, 167 + &esp->command_block_dma, 168 + GFP_KERNEL); 169 + if (!esp->command_block) 170 + goto fail_unmap_regs; 171 + 172 + host->irq = platform_get_irq(dev, 0); 173 + err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp); 174 + if (err < 0) 175 + goto fail_unmap_command_block; 176 + 177 + esp->scsi_id = 7; 178 + esp->host->this_id = esp->scsi_id; 179 + esp->scsi_id_mask = (1 << esp->scsi_id); 115 180 esp->cfreq = 40000000; 116 181 117 - /* 118 - * we don't give the address of DMA channel, but the number 119 - * of DMA channel, so we can use the jazz DMA functions 120 - * 121 - */ 122 - esp->dregs = (void *) JAZZ_SCSI_DMA; 123 - 124 - /* ESP register base */ 125 - esp->eregs = (struct ESP_regs *)(JAZZ_SCSI_BASE); 126 - 127 - /* Set the command buffer */ 128 - esp->esp_command = (volatile unsigned char *)cmd_buffer; 129 - 130 - /* get virtual dma address for command buffer */ 131 - esp->esp_command_dvma = vdma_alloc(CPHYSADDR(cmd_buffer), sizeof (cmd_buffer)); 132 - 133 - esp->irq = JAZZ_SCSI_IRQ; 134 - request_irq(JAZZ_SCSI_IRQ, esp_intr, IRQF_DISABLED, "JAZZ SCSI", 135 - esp->ehost); 182 + dev_set_drvdata(&dev->dev, esp); 136 183 137 - /* 138 - * FIXME, look if the scsi id is available from NVRAM 139 - */ 140 - esp->scsi_id = 7; 141 - 142 - /* Check for differential SCSI-bus */ 143 - /* What is this stuff? */ 144 - esp->diff = 0; 184 + err = scsi_esp_register(esp, &dev->dev); 185 + if (err) 186 + goto fail_free_irq; 145 187 146 - esp_initialize(esp); 147 - 148 - printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps,esps_in_use); 149 - esps_running = esps_in_use; 150 - return esps_in_use; 151 - } 152 - return 0; 188 + return 0; 189 + 190 + fail_free_irq: 191 + free_irq(host->irq, esp); 192 + fail_unmap_command_block: 193 + dma_free_coherent(esp->dev, 16, 194 + esp->command_block, 195 + esp->command_block_dma); 196 + fail_unmap_regs: 197 + fail_unlink: 198 + scsi_host_put(host); 199 + fail: 200 + return err; 153 201 } 154 202 155 - /************************************************************* DMA Functions */ 156 - static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count) 203 + static int __devexit esp_jazz_remove(struct platform_device *dev) 157 204 { 158 - return fifo_count; 205 + struct esp *esp = dev_get_drvdata(&dev->dev); 206 + unsigned int irq = esp->host->irq; 207 + 208 + scsi_esp_unregister(esp); 209 + 210 + free_irq(irq, esp); 211 + dma_free_coherent(esp->dev, 16, 212 + esp->command_block, 213 + esp->command_block_dma); 214 + 215 + scsi_host_put(esp->host); 216 + 217 + return 0; 159 218 } 160 219 161 - static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd *sp) 162 - { 163 - /* 164 - * maximum DMA size is 1MB 165 - */ 166 - unsigned long sz = sp->SCp.this_residual; 167 - if(sz > 0x100000) 168 - sz = 0x100000; 169 - return sz; 170 - } 171 - 172 - static void dma_dump_state(struct NCR_ESP *esp) 173 - { 174 - 175 - ESPLOG(("esp%d: dma -- enable <%08x> residue <%08x\n", 176 - esp->esp_id, vdma_get_enable((int)esp->dregs), vdma_get_residue((int)esp->dregs))); 177 - } 178 - 179 - static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length) 180 - { 181 - dma_cache_wback_inv ((unsigned long)phys_to_virt(vdma_log2phys(vaddress)), length); 182 - vdma_disable ((int)esp->dregs); 183 - vdma_set_mode ((int)esp->dregs, DMA_MODE_READ); 184 - vdma_set_addr ((int)esp->dregs, vaddress); 185 - vdma_set_count ((int)esp->dregs, length); 186 - vdma_enable ((int)esp->dregs); 187 - } 188 - 189 - static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length) 190 - { 191 - dma_cache_wback_inv ((unsigned long)phys_to_virt(vdma_log2phys(vaddress)), length); 192 - vdma_disable ((int)esp->dregs); 193 - vdma_set_mode ((int)esp->dregs, DMA_MODE_WRITE); 194 - vdma_set_addr ((int)esp->dregs, vaddress); 195 - vdma_set_count ((int)esp->dregs, length); 196 - vdma_enable ((int)esp->dregs); 197 - } 198 - 199 - static void dma_ints_off(struct NCR_ESP *esp) 200 - { 201 - disable_irq(esp->irq); 202 - } 203 - 204 - static void dma_ints_on(struct NCR_ESP *esp) 205 - { 206 - enable_irq(esp->irq); 207 - } 208 - 209 - static int dma_irq_p(struct NCR_ESP *esp) 210 - { 211 - return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR); 212 - } 213 - 214 - static int dma_ports_p(struct NCR_ESP *esp) 215 - { 216 - int enable = vdma_get_enable((int)esp->dregs); 217 - 218 - return (enable & R4030_CHNL_ENABLE); 219 - } 220 - 221 - static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write) 222 - { 223 - /* 224 - * On the Sparc, DMA_ST_WRITE means "move data from device to memory" 225 - * so when (write) is true, it actually means READ! 226 - */ 227 - if(write){ 228 - dma_init_read(esp, addr, count); 229 - } else { 230 - dma_init_write(esp, addr, count); 231 - } 232 - } 233 - 234 - static void dma_mmu_get_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp) 235 - { 236 - sp->SCp.have_data_in = vdma_alloc(CPHYSADDR(sp->SCp.buffer), sp->SCp.this_residual); 237 - sp->SCp.ptr = (char *)((unsigned long)sp->SCp.have_data_in); 238 - } 239 - 240 - static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp) 241 - { 242 - int sz = sp->SCp.buffers_residual; 243 - struct scatterlist *sg = (struct scatterlist *) sp->SCp.buffer; 244 - 245 - while (sz >= 0) { 246 - sg[sz].dma_address = vdma_alloc(CPHYSADDR(page_address(sg[sz].page) + sg[sz].offset), sg[sz].length); 247 - sz--; 248 - } 249 - sp->SCp.ptr=(char *)(sp->SCp.buffer->dma_address); 250 - } 251 - 252 - static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp) 253 - { 254 - vdma_free(sp->SCp.have_data_in); 255 - } 256 - 257 - static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp) 258 - { 259 - int sz = sp->use_sg - 1; 260 - struct scatterlist *sg = (struct scatterlist *)sp->request_buffer; 261 - 262 - while(sz >= 0) { 263 - vdma_free(sg[sz].dma_address); 264 - sz--; 265 - } 266 - } 267 - 268 - static void dma_advance_sg (struct scsi_cmnd *sp) 269 - { 270 - sp->SCp.ptr = (char *)(sp->SCp.buffer->dma_address); 271 - } 272 - 273 - #define JAZZ_HDC_LED 0xe000d100 /* FIXME, find correct address */ 274 - 275 - static void dma_led_off(struct NCR_ESP *esp) 276 - { 277 - #if 0 278 - *(unsigned char *)JAZZ_HDC_LED = 0; 279 - #endif 280 - } 281 - 282 - static void dma_led_on(struct NCR_ESP *esp) 283 - { 284 - #if 0 285 - *(unsigned char *)JAZZ_HDC_LED = 1; 286 - #endif 287 - } 288 - 289 - static struct scsi_host_template driver_template = { 290 - .proc_name = "jazz_esp", 291 - .proc_info = esp_proc_info, 292 - .name = "ESP 100/100a/200", 293 - .detect = jazz_esp_detect, 294 - .slave_alloc = esp_slave_alloc, 295 - .slave_destroy = esp_slave_destroy, 296 - .release = jazz_esp_release, 297 - .info = esp_info, 298 - .queuecommand = esp_queue, 299 - .eh_abort_handler = esp_abort, 300 - .eh_bus_reset_handler = esp_reset, 301 - .can_queue = 7, 302 - .this_id = 7, 303 - .sg_tablesize = SG_ALL, 304 - .cmd_per_lun = 1, 305 - .use_clustering = DISABLE_CLUSTERING, 220 + static struct platform_driver esp_jazz_driver = { 221 + .probe = esp_jazz_probe, 222 + .remove = __devexit_p(esp_jazz_remove), 223 + .driver = { 224 + .name = "jazz_esp", 225 + }, 306 226 }; 307 - #include "scsi_module.c" 227 + 228 + static int __init jazz_esp_init(void) 229 + { 230 + return platform_driver_register(&esp_jazz_driver); 231 + } 232 + 233 + static void __exit jazz_esp_exit(void) 234 + { 235 + platform_driver_unregister(&esp_jazz_driver); 236 + } 237 + 238 + MODULE_DESCRIPTION("JAZZ ESP SCSI driver"); 239 + MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)"); 240 + MODULE_LICENSE("GPL"); 241 + MODULE_VERSION(DRV_VERSION); 242 + 243 + module_init(jazz_esp_init); 244 + module_exit(jazz_esp_exit);
+4 -14
drivers/scsi/pluto.c
··· 4 4 * 5 5 */ 6 6 7 + #include <linux/completion.h> 7 8 #include <linux/kernel.h> 8 9 #include <linux/delay.h> 9 10 #include <linux/types.h> ··· 51 50 } *fcs __initdata; 52 51 static int fcscount __initdata = 0; 53 52 static atomic_t fcss __initdata = ATOMIC_INIT(0); 54 - DECLARE_MUTEX_LOCKED(fc_sem); 53 + static DECLARE_COMPLETION(fc_detect_complete); 55 54 56 55 static int pluto_encode_addr(Scsi_Cmnd *SCpnt, u16 *addr, fc_channel *fc, fcp_cmnd *fcmd); 57 - 58 - static void __init pluto_detect_timeout(unsigned long data) 59 - { 60 - PLND(("Timeout\n")) 61 - up(&fc_sem); 62 - } 63 56 64 57 static void __init pluto_detect_done(Scsi_Cmnd *SCpnt) 65 58 { ··· 64 69 { 65 70 PLND(("Detect done %08lx\n", (long)SCpnt)) 66 71 if (atomic_dec_and_test (&fcss)) 67 - up(&fc_sem); 72 + complete(&fc_detect_complete); 68 73 } 69 74 70 75 int pluto_slave_configure(struct scsi_device *device) ··· 91 96 int i, retry, nplutos; 92 97 fc_channel *fc; 93 98 struct scsi_device dev; 94 - DEFINE_TIMER(fc_timer, pluto_detect_timeout, 0, 0); 95 99 96 100 tpnt->proc_name = "pluto"; 97 101 fcscount = 0; ··· 181 187 } 182 188 } 183 189 184 - fc_timer.expires = jiffies + 10 * HZ; 185 - add_timer(&fc_timer); 186 - 187 - down(&fc_sem); 190 + wait_for_completion_timeout(&fc_detect_complete, 10 * HZ); 188 191 PLND(("Woken up\n")) 189 192 if (!atomic_read(&fcss)) 190 193 break; /* All fc channels have answered us */ 191 194 } 192 - del_timer_sync(&fc_timer); 193 195 194 196 PLND(("Finished search\n")) 195 197 for (i = 0, nplutos = 0; i < fcscount; i++) {
+3 -3
drivers/serial/suncore.c
··· 30 30 sunserial_console_termios(struct console *con) 31 31 { 32 32 char mode[16], buf[16], *s; 33 - char *mode_prop = "ttyX-mode"; 34 - char *cd_prop = "ttyX-ignore-cd"; 35 - char *dtr_prop = "ttyX-rts-dtr-off"; 33 + char mode_prop[] = "ttyX-mode"; 34 + char cd_prop[] = "ttyX-ignore-cd"; 35 + char dtr_prop[] = "ttyX-rts-dtr-off"; 36 36 char *ssp_console_modes_prop = "ssp-console-modes"; 37 37 int baud, bits, stop, cflag; 38 38 char parity;
+2 -2
drivers/serial/sunzilog.c
··· 1239 1239 #define SUNZILOG_CONSOLE() (NULL) 1240 1240 #endif 1241 1241 1242 - static void __init sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channel) 1242 + static void __devinit sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channel) 1243 1243 { 1244 1244 int baud, brg; 1245 1245 ··· 1259 1259 } 1260 1260 1261 1261 #ifdef CONFIG_SERIO 1262 - static void __init sunzilog_register_serio(struct uart_sunzilog_port *up) 1262 + static void __devinit sunzilog_register_serio(struct uart_sunzilog_port *up) 1263 1263 { 1264 1264 struct serio *serio = &up->serio; 1265 1265
+38
include/asm-sparc/atomic.h
··· 2 2 * 3 3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 4 4 * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au) 5 + * Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org) 5 6 * 6 7 * Additions by Keith M Wesolowski (wesolows@foobazco.org) based 7 8 * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>. ··· 11 10 #ifndef __ARCH_SPARC_ATOMIC__ 12 11 #define __ARCH_SPARC_ATOMIC__ 13 12 13 + #include <linux/types.h> 14 14 15 15 typedef struct { volatile int counter; } atomic_t; 16 16 17 17 #ifdef __KERNEL__ 18 + 19 + /* Emulate cmpxchg() the same way we emulate atomics, 20 + * by hashing the object address and indexing into an array 21 + * of spinlocks to get a bit of performance... 22 + * 23 + * See arch/sparc/lib/atomic32.c for implementation. 24 + * 25 + * Cribbed from <asm-parisc/atomic.h> 26 + */ 27 + #define __HAVE_ARCH_CMPXCHG 1 28 + 29 + /* bug catcher for when unsupported size is used - won't link */ 30 + extern void __cmpxchg_called_with_bad_pointer(void); 31 + /* we only need to support cmpxchg of a u32 on sparc */ 32 + extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_); 33 + 34 + /* don't worry...optimizer will get rid of most of this */ 35 + static __inline__ unsigned long 36 + __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) 37 + { 38 + switch(size) { 39 + case 4: 40 + return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_); 41 + default: 42 + __cmpxchg_called_with_bad_pointer(); 43 + break; 44 + } 45 + return old; 46 + } 47 + 48 + #define cmpxchg(ptr,o,n) ({ \ 49 + __typeof__(*(ptr)) _o_ = (o); \ 50 + __typeof__(*(ptr)) _n_ = (n); \ 51 + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 52 + (unsigned long)_n_, sizeof(*(ptr))); \ 53 + }) 18 54 19 55 #define ATOMIC_INIT(i) { (i) } 20 56
+4 -4
include/asm-sparc64/bugs.h
··· 1 - /* $Id: bugs.h,v 1.1 1996/12/26 13:25:20 davem Exp $ 2 - * include/asm-sparc64/bugs.h: Sparc probes for various bugs. 1 + /* bugs.h: Sparc64 probes for various bugs. 3 2 * 4 - * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 3 + * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net) 5 4 */ 6 - 5 + #include <asm/sstate.h> 7 6 8 7 extern unsigned long loops_per_jiffy; 9 8 ··· 11 12 #ifndef CONFIG_SMP 12 13 cpu_data(0).udelay_val = loops_per_jiffy; 13 14 #endif 15 + sstate_running(); 14 16 }
+17 -7
include/asm-sparc64/cpudata.h
··· 17 17 typedef struct { 18 18 /* Dcache line 1 */ 19 19 unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ 20 - unsigned int __pad0_1; 21 - unsigned int __pad0_2; 22 - unsigned int __pad1; 20 + unsigned int __pad0; 23 21 unsigned long clock_tick; /* %tick's per second */ 24 22 unsigned long udelay_val; 23 + unsigned int __pad1; 24 + unsigned int __pad2; 25 25 26 26 /* Dcache line 2, rarely used */ 27 27 unsigned int dcache_size; ··· 30 30 unsigned int icache_line_size; 31 31 unsigned int ecache_size; 32 32 unsigned int ecache_line_size; 33 + int core_id; 33 34 unsigned int __pad3; 34 - unsigned int __pad4; 35 35 } cpuinfo_sparc; 36 36 37 37 DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); ··· 76 76 77 77 /* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */ 78 78 unsigned int irq_worklist; 79 - unsigned int __pad1; 80 - unsigned long __pad2[3]; 79 + unsigned int cpu_mondo_qmask; 80 + unsigned int dev_mondo_qmask; 81 + unsigned int resum_qmask; 82 + unsigned int nonresum_qmask; 83 + unsigned int __pad2[3]; 81 84 } __attribute__((aligned(64))); 82 85 extern struct trap_per_cpu trap_block[NR_CPUS]; 83 86 extern void init_cur_cpu_trap(struct thread_info *); 84 87 extern void setup_tba(void); 88 + extern int ncpus_probed; 89 + 90 + extern unsigned long real_hard_smp_processor_id(void); 85 91 86 92 struct cpuid_patch_entry { 87 93 unsigned int addr; ··· 128 122 #define TRAP_PER_CPU_TSB_HUGE 0xd0 129 123 #define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8 130 124 #define TRAP_PER_CPU_IRQ_WORKLIST 0xe0 125 + #define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe4 126 + #define TRAP_PER_CPU_DEV_MONDO_QMASK 0xe8 127 + #define TRAP_PER_CPU_RESUM_QMASK 0xec 128 + #define TRAP_PER_CPU_NONRESUM_QMASK 0xf0 131 129 132 130 #define TRAP_BLOCK_SZ_SHIFT 8 133 131 ··· 202 192 * the calculations done by the macro mid-stream. 203 193 */ 204 194 #define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \ 205 - ldub [THR + TI_CPU], REG1; \ 195 + lduh [THR + TI_CPU], REG1; \ 206 196 sethi %hi(__per_cpu_shift), REG3; \ 207 197 sethi %hi(__per_cpu_base), REG2; \ 208 198 ldx [REG3 + %lo(__per_cpu_shift)], REG3; \
+602 -45
include/asm-sparc64/hypervisor.h
··· 73 73 #define HV_ENOTSUPPORTED 13 /* Function not supported */ 74 74 #define HV_ENOMAP 14 /* No mapping found */ 75 75 #define HV_ETOOMANY 15 /* Too many items specified */ 76 + #define HV_ECHANNEL 16 /* Invalid LDC channel */ 77 + #define HV_EBUSY 17 /* Resource busy */ 76 78 77 79 /* mach_exit() 78 80 * TRAP: HV_FAST_TRAP ··· 96 94 * 97 95 */ 98 96 #define HV_FAST_MACH_EXIT 0x00 97 + 98 + #ifndef __ASSEMBLY__ 99 + extern void sun4v_mach_exit(unsigned long exit_core); 100 + #endif 99 101 100 102 /* Domain services. */ 101 103 ··· 126 120 */ 127 121 #define HV_FAST_MACH_DESC 0x01 128 122 129 - /* mach_exit() 123 + #ifndef __ASSEMBLY__ 124 + extern unsigned long sun4v_mach_desc(unsigned long buffer_pa, 125 + unsigned long buf_len, 126 + unsigned long *real_buf_len); 127 + #endif 128 + 129 + /* mach_sir() 130 130 * TRAP: HV_FAST_TRAP 131 131 * FUNCTION: HV_FAST_MACH_SIR 132 132 * ERRORS: This service does not return. ··· 147 135 */ 148 136 #define HV_FAST_MACH_SIR 0x02 149 137 150 - /* mach_set_soft_state() 151 - * TRAP: HV_FAST_TRAP 152 - * FUNCTION: HV_FAST_MACH_SET_SOFT_STATE 153 - * ARG0: software state 154 - * ARG1: software state description pointer 155 - * RET0: status 156 - * ERRORS: EINVAL software state not valid or software state 157 - * description is not NULL terminated 158 - * ENORADDR software state description pointer is not a 159 - * valid real address 160 - * EBADALIGNED software state description is not correctly 161 - * aligned 162 - * 163 - * This allows the guest to report it's soft state to the hypervisor. There 164 - * are two primary components to this state. The first part states whether 165 - * the guest software is running or not. The second containts optional 166 - * details specific to the software. 167 - * 168 - * The software state argument is defined below in HV_SOFT_STATE_*, and 169 - * indicates whether the guest is operating normally or in a transitional 170 - * state. 171 - * 172 - * The software state description argument is a real address of a data buffer 173 - * of size 32-bytes aligned on a 32-byte boundary. It is treated as a NULL 174 - * terminated 7-bit ASCII string of up to 31 characters not including the 175 - * NULL termination. 176 - */ 177 - #define HV_FAST_MACH_SET_SOFT_STATE 0x03 178 - #define HV_SOFT_STATE_NORMAL 0x01 179 - #define HV_SOFT_STATE_TRANSITION 0x02 138 + #ifndef __ASSEMBLY__ 139 + extern void sun4v_mach_sir(void); 140 + #endif 180 141 181 - /* mach_get_soft_state() 142 + /* mach_set_watchdog() 182 143 * TRAP: HV_FAST_TRAP 183 - * FUNCTION: HV_FAST_MACH_GET_SOFT_STATE 184 - * ARG0: software state description pointer 144 + * FUNCTION: HV_FAST_MACH_SET_WATCHDOG 145 + * ARG0: timeout in milliseconds 185 146 * RET0: status 186 - * RET1: software state 187 - * ERRORS: ENORADDR software state description pointer is not a 188 - * valid real address 189 - * EBADALIGNED software state description is not correctly 190 - * aligned 147 + * RET1: time remaining in milliseconds 191 148 * 192 - * Retrieve the current value of the guest's software state. The rules 193 - * for the software state pointer are the same as for mach_set_soft_state() 194 - * above. 149 + * A guest uses this API to set a watchdog timer. Once the gues has set 150 + * the timer, it must call the timer service again either to disable or 151 + * postpone the expiration. If the timer expires before being reset or 152 + * disabled, then the hypervisor take a platform specific action leading 153 + * to guest termination within a bounded time period. The platform action 154 + * may include recovery actions such as reporting the expiration to a 155 + * Service Processor, and/or automatically restarting the gues. 156 + * 157 + * The 'timeout' parameter is specified in milliseconds, however the 158 + * implementated granularity is given by the 'watchdog-resolution' 159 + * property in the 'platform' node of the guest's machine description. 160 + * The largest allowed timeout value is specified by the 161 + * 'watchdog-max-timeout' property of the 'platform' node. 162 + * 163 + * If the 'timeout' argument is not zero, the watchdog timer is set to 164 + * expire after a minimum of 'timeout' milliseconds. 165 + * 166 + * If the 'timeout' argument is zero, the watchdog timer is disabled. 167 + * 168 + * If the 'timeout' value exceeds the value of the 'max-watchdog-timeout' 169 + * property, the hypervisor leaves the watchdog timer state unchanged, 170 + * and returns a status of EINVAL. 171 + * 172 + * The 'time remaining' return value is valid regardless of whether the 173 + * return status is EOK or EINVAL. A non-zero return value indicates the 174 + * number of milliseconds that were remaining until the timer was to expire. 175 + * If less than one millisecond remains, the return value is '1'. If the 176 + * watchdog timer was disabled at the time of the call, the return value is 177 + * zero. 178 + * 179 + * If the hypervisor cannot support the exact timeout value requested, but 180 + * can support a larger timeout value, the hypervisor may round the actual 181 + * timeout to a value larger than the requested timeout, consequently the 182 + * 'time remaining' return value may be larger than the previously requested 183 + * timeout value. 184 + * 185 + * Any guest OS debugger should be aware that the watchdog service may be in 186 + * use. Consequently, it is recommended that the watchdog service is 187 + * disabled upon debugger entry (e.g. reaching a breakpoint), and then 188 + * re-enabled upon returning to normal execution. The API has been designed 189 + * with this in mind, and the 'time remaining' result of the disable call may 190 + * be used directly as the timeout argument of the re-enable call. 195 191 */ 196 - #define HV_FAST_MACH_GET_SOFT_STATE 0x04 192 + #define HV_FAST_MACH_SET_WATCHDOG 0x05 193 + 194 + #ifndef __ASSEMBLY__ 195 + extern unsigned long sun4v_mach_set_watchdog(unsigned long timeout, 196 + unsigned long *orig_timeout); 197 + #endif 197 198 198 199 /* CPU services. 199 200 * ··· 231 206 * FUNCTION: HV_FAST_CPU_START 232 207 * ARG0: CPU ID 233 208 * ARG1: PC 234 - * ARG1: RTBA 235 - * ARG1: target ARG0 209 + * ARG2: RTBA 210 + * ARG3: target ARG0 236 211 * RET0: status 237 212 * ERRORS: ENOCPU Invalid CPU ID 238 213 * EINVAL Target CPU ID is not in the stopped state ··· 248 223 * and RTBA in %tba. 249 224 */ 250 225 #define HV_FAST_CPU_START 0x10 226 + 227 + #ifndef __ASSEMBLY__ 228 + extern unsigned long sun4v_cpu_start(unsigned long cpuid, 229 + unsigned long pc, 230 + unsigned long rtba, 231 + unsigned long arg0); 232 + #endif 251 233 252 234 /* cpu_stop() 253 235 * TRAP: HV_FAST_TRAP ··· 276 244 * and exit a running domain, a guest must use the mach_exit() service. 277 245 */ 278 246 #define HV_FAST_CPU_STOP 0x11 247 + 248 + #ifndef __ASSEMBLY__ 249 + extern unsigned long sun4v_cpu_stop(unsigned long cpuid); 250 + #endif 279 251 280 252 /* cpu_yield() 281 253 * TRAP: HV_FAST_TRAP ··· 624 588 */ 625 589 #define HV_FAST_MMU_TSB_CTX0 0x20 626 590 591 + #ifndef __ASSEMBLY__ 592 + extern unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions, 593 + unsigned long tsb_desc_ra); 594 + #endif 595 + 627 596 /* mmu_tsb_ctxnon0() 628 597 * TRAP: HV_FAST_TRAP 629 598 * FUNCTION: HV_FAST_MMU_TSB_CTXNON0 ··· 734 693 * this mechanism can be used to map kernel nucleus code and data. 735 694 */ 736 695 #define HV_FAST_MMU_MAP_PERM_ADDR 0x25 696 + 697 + #ifndef __ASSEMBLY__ 698 + extern unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr, 699 + unsigned long set_to_zero, 700 + unsigned long tte, 701 + unsigned long flags); 702 + #endif 737 703 738 704 /* mmu_fault_area_conf() 739 705 * TRAP: HV_FAST_TRAP ··· 940 892 */ 941 893 #define HV_FAST_TOD_GET 0x50 942 894 895 + #ifndef __ASSEMBLY__ 896 + extern unsigned long sun4v_tod_get(unsigned long *time); 897 + #endif 898 + 943 899 /* tod_set() 944 900 * TRAP: HV_FAST_TRAP 945 901 * FUNCTION: HV_FAST_TOD_SET ··· 956 904 * block if TOD access is temporarily not possible. 957 905 */ 958 906 #define HV_FAST_TOD_SET 0x51 907 + 908 + #ifndef __ASSEMBLY__ 909 + extern unsigned long sun4v_tod_set(unsigned long time); 910 + #endif 959 911 960 912 /* Console services */ 961 913 ··· 1043 987 unsigned long size, 1044 988 unsigned long *bytes_written); 1045 989 #endif 990 + 991 + /* mach_set_soft_state() 992 + * TRAP: HV_FAST_TRAP 993 + * FUNCTION: HV_FAST_MACH_SET_SOFT_STATE 994 + * ARG0: software state 995 + * ARG1: software state description pointer 996 + * RET0: status 997 + * ERRORS: EINVAL software state not valid or software state 998 + * description is not NULL terminated 999 + * ENORADDR software state description pointer is not a 1000 + * valid real address 1001 + * EBADALIGNED software state description is not correctly 1002 + * aligned 1003 + * 1004 + * This allows the guest to report it's soft state to the hypervisor. There 1005 + * are two primary components to this state. The first part states whether 1006 + * the guest software is running or not. The second containts optional 1007 + * details specific to the software. 1008 + * 1009 + * The software state argument is defined below in HV_SOFT_STATE_*, and 1010 + * indicates whether the guest is operating normally or in a transitional 1011 + * state. 1012 + * 1013 + * The software state description argument is a real address of a data buffer 1014 + * of size 32-bytes aligned on a 32-byte boundary. It is treated as a NULL 1015 + * terminated 7-bit ASCII string of up to 31 characters not including the 1016 + * NULL termination. 1017 + */ 1018 + #define HV_FAST_MACH_SET_SOFT_STATE 0x70 1019 + #define HV_SOFT_STATE_NORMAL 0x01 1020 + #define HV_SOFT_STATE_TRANSITION 0x02 1021 + 1022 + #ifndef __ASSEMBLY__ 1023 + extern unsigned long sun4v_mach_set_soft_state(unsigned long soft_state, 1024 + unsigned long msg_string_ra); 1025 + #endif 1026 + 1027 + /* mach_get_soft_state() 1028 + * TRAP: HV_FAST_TRAP 1029 + * FUNCTION: HV_FAST_MACH_GET_SOFT_STATE 1030 + * ARG0: software state description pointer 1031 + * RET0: status 1032 + * RET1: software state 1033 + * ERRORS: ENORADDR software state description pointer is not a 1034 + * valid real address 1035 + * EBADALIGNED software state description is not correctly 1036 + * aligned 1037 + * 1038 + * Retrieve the current value of the guest's software state. The rules 1039 + * for the software state pointer are the same as for mach_set_soft_state() 1040 + * above. 1041 + */ 1042 + #define HV_FAST_MACH_GET_SOFT_STATE 0x71 1046 1043 1047 1044 /* Trap trace services. 1048 1045 * ··· 1486 1377 1487 1378 #ifndef __ASSEMBLY__ 1488 1379 extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid); 1380 + #endif 1381 + 1382 + /* vintr_get_cookie() 1383 + * TRAP: HV_FAST_TRAP 1384 + * FUNCTION: HV_FAST_VINTR_GET_COOKIE 1385 + * ARG0: device handle 1386 + * ARG1: device ino 1387 + * RET0: status 1388 + * RET1: cookie 1389 + */ 1390 + #define HV_FAST_VINTR_GET_COOKIE 0xa7 1391 + 1392 + /* vintr_set_cookie() 1393 + * TRAP: HV_FAST_TRAP 1394 + * FUNCTION: HV_FAST_VINTR_SET_COOKIE 1395 + * ARG0: device handle 1396 + * ARG1: device ino 1397 + * ARG2: cookie 1398 + * RET0: status 1399 + */ 1400 + #define HV_FAST_VINTR_SET_COOKIE 0xa8 1401 + 1402 + /* vintr_get_valid() 1403 + * TRAP: HV_FAST_TRAP 1404 + * FUNCTION: HV_FAST_VINTR_GET_VALID 1405 + * ARG0: device handle 1406 + * ARG1: device ino 1407 + * RET0: status 1408 + * RET1: valid state 1409 + */ 1410 + #define HV_FAST_VINTR_GET_VALID 0xa9 1411 + 1412 + /* vintr_set_valid() 1413 + * TRAP: HV_FAST_TRAP 1414 + * FUNCTION: HV_FAST_VINTR_SET_VALID 1415 + * ARG0: device handle 1416 + * ARG1: device ino 1417 + * ARG2: valid state 1418 + * RET0: status 1419 + */ 1420 + #define HV_FAST_VINTR_SET_VALID 0xaa 1421 + 1422 + /* vintr_get_state() 1423 + * TRAP: HV_FAST_TRAP 1424 + * FUNCTION: HV_FAST_VINTR_GET_STATE 1425 + * ARG0: device handle 1426 + * ARG1: device ino 1427 + * RET0: status 1428 + * RET1: state 1429 + */ 1430 + #define HV_FAST_VINTR_GET_STATE 0xab 1431 + 1432 + /* vintr_set_state() 1433 + * TRAP: HV_FAST_TRAP 1434 + * FUNCTION: HV_FAST_VINTR_SET_STATE 1435 + * ARG0: device handle 1436 + * ARG1: device ino 1437 + * ARG2: state 1438 + * RET0: status 1439 + */ 1440 + #define HV_FAST_VINTR_SET_STATE 0xac 1441 + 1442 + /* vintr_get_target() 1443 + * TRAP: HV_FAST_TRAP 1444 + * FUNCTION: HV_FAST_VINTR_GET_TARGET 1445 + * ARG0: device handle 1446 + * ARG1: device ino 1447 + * RET0: status 1448 + * RET1: cpuid 1449 + */ 1450 + #define HV_FAST_VINTR_GET_TARGET 0xad 1451 + 1452 + /* vintr_set_target() 1453 + * TRAP: HV_FAST_TRAP 1454 + * FUNCTION: HV_FAST_VINTR_SET_TARGET 1455 + * ARG0: device handle 1456 + * ARG1: device ino 1457 + * ARG2: cpuid 1458 + * RET0: status 1459 + */ 1460 + #define HV_FAST_VINTR_SET_TARGET 0xae 1461 + 1462 + #ifndef __ASSEMBLY__ 1463 + extern unsigned long sun4v_vintr_get_cookie(unsigned long dev_handle, 1464 + unsigned long dev_ino, 1465 + unsigned long *cookie); 1466 + extern unsigned long sun4v_vintr_set_cookie(unsigned long dev_handle, 1467 + unsigned long dev_ino, 1468 + unsigned long cookie); 1469 + extern unsigned long sun4v_vintr_get_valid(unsigned long dev_handle, 1470 + unsigned long dev_ino, 1471 + unsigned long *valid); 1472 + extern unsigned long sun4v_vintr_set_valid(unsigned long dev_handle, 1473 + unsigned long dev_ino, 1474 + unsigned long valid); 1475 + extern unsigned long sun4v_vintr_get_state(unsigned long dev_handle, 1476 + unsigned long dev_ino, 1477 + unsigned long *state); 1478 + extern unsigned long sun4v_vintr_set_state(unsigned long dev_handle, 1479 + unsigned long dev_ino, 1480 + unsigned long state); 1481 + extern unsigned long sun4v_vintr_get_target(unsigned long dev_handle, 1482 + unsigned long dev_ino, 1483 + unsigned long *cpuid); 1484 + extern unsigned long sun4v_vintr_set_target(unsigned long dev_handle, 1485 + unsigned long dev_ino, 1486 + unsigned long cpuid); 1489 1487 #endif 1490 1488 1491 1489 /* PCI IO services. ··· 2253 2037 */ 2254 2038 #define HV_FAST_PCI_MSG_SETVALID 0xd3 2255 2039 2040 + /* Logical Domain Channel services. */ 2041 + 2042 + #define LDC_CHANNEL_DOWN 0 2043 + #define LDC_CHANNEL_UP 1 2044 + #define LDC_CHANNEL_RESETTING 2 2045 + 2046 + /* ldc_tx_qconf() 2047 + * TRAP: HV_FAST_TRAP 2048 + * FUNCTION: HV_FAST_LDC_TX_QCONF 2049 + * ARG0: channel ID 2050 + * ARG1: real address base of queue 2051 + * ARG2: num entries in queue 2052 + * RET0: status 2053 + * 2054 + * Configure transmit queue for the LDC endpoint specified by the 2055 + * given channel ID, to be placed at the given real address, and 2056 + * be of the given num entries. Num entries must be a power of two. 2057 + * The real address base of the queue must be aligned on the queue 2058 + * size. Each queue entry is 64-bytes, so for example, a 32 entry 2059 + * queue must be aligned on a 2048 byte real address boundary. 2060 + * 2061 + * Upon configuration of a valid transmit queue the head and tail 2062 + * pointers are set to a hypervisor specific identical value indicating 2063 + * that the queue initially is empty. 2064 + * 2065 + * The endpoint's transmit queue is un-configured if num entries is zero. 2066 + * 2067 + * The maximum number of entries for each queue for a specific cpu may be 2068 + * determined from the machine description. A transmit queue may be 2069 + * specified even in the event that the LDC is down (peer endpoint has no 2070 + * receive queue specified). Transmission will begin as soon as the peer 2071 + * endpoint defines a receive queue. 2072 + * 2073 + * It is recommended that a guest wait for a transmit queue to empty prior 2074 + * to reconfiguring it, or un-configuring it. Re or un-configuring of a 2075 + * non-empty transmit queue behaves exactly as defined above, however it 2076 + * is undefined as to how many of the pending entries in the original queue 2077 + * will be delivered prior to the re-configuration taking effect. 2078 + * Furthermore, as the queue configuration causes a reset of the head and 2079 + * tail pointers there is no way for a guest to determine how many entries 2080 + * have been sent after the configuration operation. 2081 + */ 2082 + #define HV_FAST_LDC_TX_QCONF 0xe0 2083 + 2084 + /* ldc_tx_qinfo() 2085 + * TRAP: HV_FAST_TRAP 2086 + * FUNCTION: HV_FAST_LDC_TX_QINFO 2087 + * ARG0: channel ID 2088 + * RET0: status 2089 + * RET1: real address base of queue 2090 + * RET2: num entries in queue 2091 + * 2092 + * Return the configuration info for the transmit queue of LDC endpoint 2093 + * defined by the given channel ID. The real address is the currently 2094 + * defined real address base of the defined queue, and num entries is the 2095 + * size of the queue in terms of number of entries. 2096 + * 2097 + * If the specified channel ID is a valid endpoint number, but no transmit 2098 + * queue has been defined this service will return success, but with num 2099 + * entries set to zero and the real address will have an undefined value. 2100 + */ 2101 + #define HV_FAST_LDC_TX_QINFO 0xe1 2102 + 2103 + /* ldc_tx_get_state() 2104 + * TRAP: HV_FAST_TRAP 2105 + * FUNCTION: HV_FAST_LDC_TX_GET_STATE 2106 + * ARG0: channel ID 2107 + * RET0: status 2108 + * RET1: head offset 2109 + * RET2: tail offset 2110 + * RET3: channel state 2111 + * 2112 + * Return the transmit state, and the head and tail queue pointers, for 2113 + * the transmit queue of the LDC endpoint defined by the given channel ID. 2114 + * The head and tail values are the byte offset of the head and tail 2115 + * positions of the transmit queue for the specified endpoint. 2116 + */ 2117 + #define HV_FAST_LDC_TX_GET_STATE 0xe2 2118 + 2119 + /* ldc_tx_set_qtail() 2120 + * TRAP: HV_FAST_TRAP 2121 + * FUNCTION: HV_FAST_LDC_TX_SET_QTAIL 2122 + * ARG0: channel ID 2123 + * ARG1: tail offset 2124 + * RET0: status 2125 + * 2126 + * Update the tail pointer for the transmit queue associated with the LDC 2127 + * endpoint defined by the given channel ID. The tail offset specified 2128 + * must be aligned on a 64 byte boundary, and calculated so as to increase 2129 + * the number of pending entries on the transmit queue. Any attempt to 2130 + * decrease the number of pending transmit queue entires is considered 2131 + * an invalid tail offset and will result in an EINVAL error. 2132 + * 2133 + * Since the tail of the transmit queue may not be moved backwards, the 2134 + * transmit queue may be flushed by configuring a new transmit queue, 2135 + * whereupon the hypervisor will configure the initial transmit head and 2136 + * tail pointers to be equal. 2137 + */ 2138 + #define HV_FAST_LDC_TX_SET_QTAIL 0xe3 2139 + 2140 + /* ldc_rx_qconf() 2141 + * TRAP: HV_FAST_TRAP 2142 + * FUNCTION: HV_FAST_LDC_RX_QCONF 2143 + * ARG0: channel ID 2144 + * ARG1: real address base of queue 2145 + * ARG2: num entries in queue 2146 + * RET0: status 2147 + * 2148 + * Configure receive queue for the LDC endpoint specified by the 2149 + * given channel ID, to be placed at the given real address, and 2150 + * be of the given num entries. Num entries must be a power of two. 2151 + * The real address base of the queue must be aligned on the queue 2152 + * size. Each queue entry is 64-bytes, so for example, a 32 entry 2153 + * queue must be aligned on a 2048 byte real address boundary. 2154 + * 2155 + * The endpoint's transmit queue is un-configured if num entries is zero. 2156 + * 2157 + * If a valid receive queue is specified for a local endpoint the LDC is 2158 + * in the up state for the purpose of transmission to this endpoint. 2159 + * 2160 + * The maximum number of entries for each queue for a specific cpu may be 2161 + * determined from the machine description. 2162 + * 2163 + * As receive queue configuration causes a reset of the queue's head and 2164 + * tail pointers there is no way for a gues to determine how many entries 2165 + * have been received between a preceeding ldc_get_rx_state() API call 2166 + * and the completion of the configuration operation. It should be noted 2167 + * that datagram delivery is not guarenteed via domain channels anyway, 2168 + * and therefore any higher protocol should be resilient to datagram 2169 + * loss if necessary. However, to overcome this specific race potential 2170 + * it is recommended, for example, that a higher level protocol be employed 2171 + * to ensure either retransmission, or ensure that no datagrams are pending 2172 + * on the peer endpoint's transmit queue prior to the configuration process. 2173 + */ 2174 + #define HV_FAST_LDC_RX_QCONF 0xe4 2175 + 2176 + /* ldc_rx_qinfo() 2177 + * TRAP: HV_FAST_TRAP 2178 + * FUNCTION: HV_FAST_LDC_RX_QINFO 2179 + * ARG0: channel ID 2180 + * RET0: status 2181 + * RET1: real address base of queue 2182 + * RET2: num entries in queue 2183 + * 2184 + * Return the configuration info for the receive queue of LDC endpoint 2185 + * defined by the given channel ID. The real address is the currently 2186 + * defined real address base of the defined queue, and num entries is the 2187 + * size of the queue in terms of number of entries. 2188 + * 2189 + * If the specified channel ID is a valid endpoint number, but no receive 2190 + * queue has been defined this service will return success, but with num 2191 + * entries set to zero and the real address will have an undefined value. 2192 + */ 2193 + #define HV_FAST_LDC_RX_QINFO 0xe5 2194 + 2195 + /* ldc_rx_get_state() 2196 + * TRAP: HV_FAST_TRAP 2197 + * FUNCTION: HV_FAST_LDC_RX_GET_STATE 2198 + * ARG0: channel ID 2199 + * RET0: status 2200 + * RET1: head offset 2201 + * RET2: tail offset 2202 + * RET3: channel state 2203 + * 2204 + * Return the receive state, and the head and tail queue pointers, for 2205 + * the receive queue of the LDC endpoint defined by the given channel ID. 2206 + * The head and tail values are the byte offset of the head and tail 2207 + * positions of the receive queue for the specified endpoint. 2208 + */ 2209 + #define HV_FAST_LDC_RX_GET_STATE 0xe6 2210 + 2211 + /* ldc_rx_set_qhead() 2212 + * TRAP: HV_FAST_TRAP 2213 + * FUNCTION: HV_FAST_LDC_RX_SET_QHEAD 2214 + * ARG0: channel ID 2215 + * ARG1: head offset 2216 + * RET0: status 2217 + * 2218 + * Update the head pointer for the receive queue associated with the LDC 2219 + * endpoint defined by the given channel ID. The head offset specified 2220 + * must be aligned on a 64 byte boundary, and calculated so as to decrease 2221 + * the number of pending entries on the receive queue. Any attempt to 2222 + * increase the number of pending receive queue entires is considered 2223 + * an invalid head offset and will result in an EINVAL error. 2224 + * 2225 + * The receive queue may be flushed by setting the head offset equal 2226 + * to the current tail offset. 2227 + */ 2228 + #define HV_FAST_LDC_RX_SET_QHEAD 0xe7 2229 + 2230 + /* LDC Map Table Entry. Each slot is defined by a translation table 2231 + * entry, as specified by the LDC_MTE_* bits below, and a 64-bit 2232 + * hypervisor invalidation cookie. 2233 + */ 2234 + #define LDC_MTE_PADDR 0x0fffffffffffe000 /* pa[55:13] */ 2235 + #define LDC_MTE_COPY_W 0x0000000000000400 /* copy write access */ 2236 + #define LDC_MTE_COPY_R 0x0000000000000200 /* copy read access */ 2237 + #define LDC_MTE_IOMMU_W 0x0000000000000100 /* IOMMU write access */ 2238 + #define LDC_MTE_IOMMU_R 0x0000000000000080 /* IOMMU read access */ 2239 + #define LDC_MTE_EXEC 0x0000000000000040 /* execute */ 2240 + #define LDC_MTE_WRITE 0x0000000000000020 /* read */ 2241 + #define LDC_MTE_READ 0x0000000000000010 /* write */ 2242 + #define LDC_MTE_SZALL 0x000000000000000f /* page size bits */ 2243 + #define LDC_MTE_SZ16GB 0x0000000000000007 /* 16GB page */ 2244 + #define LDC_MTE_SZ2GB 0x0000000000000006 /* 2GB page */ 2245 + #define LDC_MTE_SZ256MB 0x0000000000000005 /* 256MB page */ 2246 + #define LDC_MTE_SZ32MB 0x0000000000000004 /* 32MB page */ 2247 + #define LDC_MTE_SZ4MB 0x0000000000000003 /* 4MB page */ 2248 + #define LDC_MTE_SZ512K 0x0000000000000002 /* 512K page */ 2249 + #define LDC_MTE_SZ64K 0x0000000000000001 /* 64K page */ 2250 + #define LDC_MTE_SZ8K 0x0000000000000000 /* 8K page */ 2251 + 2252 + #ifndef __ASSEMBLY__ 2253 + struct ldc_mtable_entry { 2254 + unsigned long mte; 2255 + unsigned long cookie; 2256 + }; 2257 + #endif 2258 + 2259 + /* ldc_set_map_table() 2260 + * TRAP: HV_FAST_TRAP 2261 + * FUNCTION: HV_FAST_LDC_SET_MAP_TABLE 2262 + * ARG0: channel ID 2263 + * ARG1: table real address 2264 + * ARG2: num entries 2265 + * RET0: status 2266 + * 2267 + * Register the MTE table at the given table real address, with the 2268 + * specified num entries, for the LDC indicated by the given channel 2269 + * ID. 2270 + */ 2271 + #define HV_FAST_LDC_SET_MAP_TABLE 0xea 2272 + 2273 + /* ldc_get_map_table() 2274 + * TRAP: HV_FAST_TRAP 2275 + * FUNCTION: HV_FAST_LDC_GET_MAP_TABLE 2276 + * ARG0: channel ID 2277 + * RET0: status 2278 + * RET1: table real address 2279 + * RET2: num entries 2280 + * 2281 + * Return the configuration of the current mapping table registered 2282 + * for the given channel ID. 2283 + */ 2284 + #define HV_FAST_LDC_GET_MAP_TABLE 0xeb 2285 + 2286 + #define LDC_COPY_IN 0 2287 + #define LDC_COPY_OUT 1 2288 + 2289 + /* ldc_copy() 2290 + * TRAP: HV_FAST_TRAP 2291 + * FUNCTION: HV_FAST_LDC_COPY 2292 + * ARG0: channel ID 2293 + * ARG1: LDC_COPY_* direction code 2294 + * ARG2: target real address 2295 + * ARG3: local real address 2296 + * ARG4: length in bytes 2297 + * RET0: status 2298 + * RET1: actual length in bytes 2299 + */ 2300 + #define HV_FAST_LDC_COPY 0xec 2301 + 2302 + #define LDC_MEM_READ 1 2303 + #define LDC_MEM_WRITE 2 2304 + #define LDC_MEM_EXEC 4 2305 + 2306 + /* ldc_mapin() 2307 + * TRAP: HV_FAST_TRAP 2308 + * FUNCTION: HV_FAST_LDC_MAPIN 2309 + * ARG0: channel ID 2310 + * ARG1: cookie 2311 + * RET0: status 2312 + * RET1: real address 2313 + * RET2: LDC_MEM_* permissions 2314 + */ 2315 + #define HV_FAST_LDC_MAPIN 0xed 2316 + 2317 + /* ldc_unmap() 2318 + * TRAP: HV_FAST_TRAP 2319 + * FUNCTION: HV_FAST_LDC_UNMAP 2320 + * ARG0: real address 2321 + * RET0: status 2322 + */ 2323 + #define HV_FAST_LDC_UNMAP 0xee 2324 + 2325 + /* ldc_revoke() 2326 + * TRAP: HV_FAST_TRAP 2327 + * FUNCTION: HV_FAST_LDC_REVOKE 2328 + * ARG0: cookie 2329 + * ARG1: ldc_mtable_entry cookie 2330 + * RET0: status 2331 + */ 2332 + #define HV_FAST_LDC_REVOKE 0xef 2333 + 2334 + #ifndef __ASSEMBLY__ 2335 + extern unsigned long sun4v_ldc_tx_qconf(unsigned long channel, 2336 + unsigned long ra, 2337 + unsigned long num_entries); 2338 + extern unsigned long sun4v_ldc_tx_qinfo(unsigned long channel, 2339 + unsigned long *ra, 2340 + unsigned long *num_entries); 2341 + extern unsigned long sun4v_ldc_tx_get_state(unsigned long channel, 2342 + unsigned long *head_off, 2343 + unsigned long *tail_off, 2344 + unsigned long *chan_state); 2345 + extern unsigned long sun4v_ldc_tx_set_qtail(unsigned long channel, 2346 + unsigned long tail_off); 2347 + extern unsigned long sun4v_ldc_rx_qconf(unsigned long channel, 2348 + unsigned long ra, 2349 + unsigned long num_entries); 2350 + extern unsigned long sun4v_ldc_rx_qinfo(unsigned long channel, 2351 + unsigned long *ra, 2352 + unsigned long *num_entries); 2353 + extern unsigned long sun4v_ldc_rx_get_state(unsigned long channel, 2354 + unsigned long *head_off, 2355 + unsigned long *tail_off, 2356 + unsigned long *chan_state); 2357 + extern unsigned long sun4v_ldc_rx_set_qhead(unsigned long channel, 2358 + unsigned long head_off); 2359 + extern unsigned long sun4v_ldc_set_map_table(unsigned long channel, 2360 + unsigned long ra, 2361 + unsigned long num_entries); 2362 + extern unsigned long sun4v_ldc_get_map_table(unsigned long channel, 2363 + unsigned long *ra, 2364 + unsigned long *num_entries); 2365 + extern unsigned long sun4v_ldc_copy(unsigned long channel, 2366 + unsigned long dir_code, 2367 + unsigned long tgt_raddr, 2368 + unsigned long lcl_raddr, 2369 + unsigned long len, 2370 + unsigned long *actual_len); 2371 + extern unsigned long sun4v_ldc_mapin(unsigned long channel, 2372 + unsigned long cookie, 2373 + unsigned long *ra, 2374 + unsigned long *perm); 2375 + extern unsigned long sun4v_ldc_unmap(unsigned long ra); 2376 + extern unsigned long sun4v_ldc_revoke(unsigned long cookie, 2377 + unsigned long mte_cookie); 2378 + #endif 2379 + 2256 2380 /* Performance counter services. */ 2257 2381 2258 2382 #define HV_PERF_JBUS_PERF_CTRL_REG 0x00 ··· 2760 2204 extern int sun4v_hvapi_get(unsigned long group, 2761 2205 unsigned long *major, 2762 2206 unsigned long *minor); 2207 + extern void sun4v_hvapi_init(void); 2763 2208 #endif 2764 2209 2765 2210 #endif /* !(_SPARC64_HYPERVISOR_H) */
-1
include/asm-sparc64/kdebug.h
··· 32 32 DIE_TRAP, 33 33 DIE_TRAP_TL1, 34 34 DIE_CALL, 35 - DIE_PAGE_FAULT, 36 35 }; 37 36 38 37 #endif
+39
include/asm-sparc64/mdesc.h
··· 1 + #ifndef _SPARC64_MDESC_H 2 + #define _SPARC64_MDESC_H 3 + 4 + #include <linux/types.h> 5 + #include <asm/prom.h> 6 + 7 + struct mdesc_node; 8 + struct mdesc_arc { 9 + const char *name; 10 + struct mdesc_node *arc; 11 + }; 12 + 13 + struct mdesc_node { 14 + const char *name; 15 + u64 node; 16 + unsigned int unique_id; 17 + unsigned int num_arcs; 18 + struct property *properties; 19 + struct mdesc_node *hash_next; 20 + struct mdesc_node *allnodes_next; 21 + struct mdesc_arc arcs[0]; 22 + }; 23 + 24 + extern struct mdesc_node *md_find_node_by_name(struct mdesc_node *from, 25 + const char *name); 26 + #define md_for_each_node_by_name(__mn, __name) \ 27 + for (__mn = md_find_node_by_name(NULL, __name); __mn; \ 28 + __mn = md_find_node_by_name(__mn, __name)) 29 + 30 + extern struct property *md_find_property(const struct mdesc_node *mp, 31 + const char *name, 32 + int *lenp); 33 + extern const void *md_get_property(const struct mdesc_node *mp, 34 + const char *name, 35 + int *lenp); 36 + 37 + extern void sun4v_mdesc_init(void); 38 + 39 + #endif
+2 -5
include/asm-sparc64/oplib.h
··· 316 316 317 317 extern int prom_pathtoinode(const char *path); 318 318 extern int prom_inst2pkg(int); 319 - 320 - /* CPU probing helpers. */ 321 - struct device_node; 322 - int cpu_find_by_instance(int instance, struct device_node **dev_node, int *mid); 323 - int cpu_find_by_mid(int mid, struct device_node **prom_node); 319 + extern int prom_service_exists(const char *service_name); 320 + extern void prom_sun4v_guest_soft_state(void); 324 321 325 322 /* Client interface level routines. */ 326 323 extern void prom_set_trap_table(unsigned long tba);
+3 -1
include/asm-sparc64/percpu.h
··· 5 5 6 6 #ifdef CONFIG_SMP 7 7 8 - extern void setup_per_cpu_areas(void); 8 + #define setup_per_cpu_areas() do { } while (0) 9 + extern void real_setup_per_cpu_areas(void); 9 10 10 11 extern unsigned long __per_cpu_base; 11 12 extern unsigned long __per_cpu_shift; ··· 35 34 } while (0) 36 35 #else /* ! SMP */ 37 36 37 + #define real_setup_per_cpu_areas() do { } while (0) 38 38 #define DEFINE_PER_CPU(type, name) \ 39 39 __typeof__(type) per_cpu__##name 40 40
+1
include/asm-sparc64/prom.h
··· 90 90 const char *type, const char *compat); 91 91 extern struct device_node *of_find_node_by_path(const char *path); 92 92 extern struct device_node *of_find_node_by_phandle(phandle handle); 93 + extern struct device_node *of_find_node_by_cpuid(int cpuid); 93 94 extern struct device_node *of_get_parent(const struct device_node *node); 94 95 extern struct device_node *of_get_next_child(const struct device_node *node, 95 96 struct device_node *prev);
+2 -2
include/asm-sparc64/smp.h
··· 41 41 extern int hard_smp_processor_id(void); 42 42 #define raw_smp_processor_id() (current_thread_info()->cpu) 43 43 44 - extern void smp_setup_cpu_possible_map(void); 44 + extern void smp_fill_in_sib_core_maps(void); 45 45 extern unsigned char boot_cpu_id; 46 46 47 47 #endif /* !(__ASSEMBLY__) */ ··· 49 49 #else 50 50 51 51 #define hard_smp_processor_id() 0 52 - #define smp_setup_cpu_possible_map() do { } while (0) 52 + #define smp_fill_in_sib_core_maps() do { } while (0) 53 53 #define boot_cpu_id (0) 54 54 55 55 #endif /* !(CONFIG_SMP) */
+13
include/asm-sparc64/sstate.h
··· 1 + #ifndef _SPARC64_SSTATE_H 2 + #define _SPARC64_SSTATE_H 3 + 4 + extern void sstate_booting(void); 5 + extern void sstate_running(void); 6 + extern void sstate_halt(void); 7 + extern void sstate_poweroff(void); 8 + extern void sstate_panic(void); 9 + extern void sstate_reboot(void); 10 + 11 + extern void sun4v_sstate_init(void); 12 + 13 + #endif /* _SPARC64_SSTATE_H */
+4 -4
include/asm-sparc64/thread_info.h
··· 38 38 /* D$ line 1 */ 39 39 struct task_struct *task; 40 40 unsigned long flags; 41 - __u8 cpu; 42 41 __u8 fpsaved[7]; 42 + __u8 pad; 43 43 unsigned long ksp; 44 44 45 45 /* D$ line 2 */ ··· 49 49 int preempt_count; /* 0 => preemptable, <0 => BUG */ 50 50 __u8 new_child; 51 51 __u8 syscall_noerror; 52 - __u16 __pad; 52 + __u16 cpu; 53 53 54 54 unsigned long *utraps; 55 55 ··· 83 83 #define TI_CURRENT_DS (TI_FLAGS + TI_FLAG_BYTE_CURRENT_DS) 84 84 #define TI_FPDEPTH (TI_FLAGS + TI_FLAG_BYTE_FPDEPTH) 85 85 #define TI_WSAVED (TI_FLAGS + TI_FLAG_BYTE_WSAVED) 86 - #define TI_CPU 0x00000010 87 - #define TI_FPSAVED 0x00000011 86 + #define TI_FPSAVED 0x00000010 88 87 #define TI_KSP 0x00000018 89 88 #define TI_FAULT_ADDR 0x00000020 90 89 #define TI_KREGS 0x00000028 ··· 91 92 #define TI_PRE_COUNT 0x00000038 92 93 #define TI_NEW_CHILD 0x0000003c 93 94 #define TI_SYS_NOERROR 0x0000003d 95 + #define TI_CPU 0x0000003e 94 96 #define TI_UTRAPS 0x00000040 95 97 #define TI_REG_WINDOW 0x00000048 96 98 #define TI_RWIN_SPTRS 0x000003c8
+3
include/asm-sparc64/topology.h
··· 6 6 7 7 #include <asm-generic/topology.h> 8 8 9 + #define topology_core_id(cpu) (cpu_data(cpu).core_id) 10 + #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) 11 + 9 12 #endif /* _ASM_SPARC64_TOPOLOGY_H */
+1 -1
include/asm-sparc64/tsb.h
··· 271 271 #define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \ 272 272 sethi %hi(swapper_4m_tsb), REG1; \ 273 273 or REG1, %lo(swapper_4m_tsb), REG1; \ 274 - and TAG, (KERNEL_TSB_NENTRIES - 1), REG2; \ 274 + and TAG, (KERNEL_TSB4M_NENTRIES - 1), REG2; \ 275 275 sllx REG2, 4, REG2; \ 276 276 add REG1, REG2, REG2; \ 277 277 KTSB_LOAD_QUAD(REG2, REG3); \