Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mips_6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux

Pull MIPS updates from Thomas Bogendoerfer:
"Just cleanups and fixes"

* tag 'mips_6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux:
MIPS: Alchemy: Fix an out-of-bound access in db1550_dev_setup()
MIPS: Alchemy: Fix an out-of-bound access in db1200_dev_setup()
MIPS: Fix typos
MIPS: Remove unused shadow GPR support from vector irq setup
MIPS: Allow vectored interrupt handler to reside everywhere for 64bit
mips: Set dump-stack arch description
mips: mm: add slab availability checking in ioremap_prot
mips: Optimize max_mapnr init procedure
mips: Fix max_mapnr being uninitialized on early stages
mips: Fix incorrect max_low_pfn adjustment
mips: dmi: Fix early remap on MIPS32
MIPS: compressed: Use correct instruction for 64 bit code
MIPS: SGI-IP27: hubio: fix nasid kernel-doc warning
MAINTAINERS: Add myself as maintainer of the Ralink architecture

+138 -166
+1
MAINTAINERS
··· 18196 18196 18197 18197 RALINK MIPS ARCHITECTURE 18198 18198 M: John Crispin <john@phrozen.org> 18199 + M: Sergio Paracuellos <sergio.paracuellos@gmail.com> 18199 18200 L: linux-mips@vger.kernel.org 18200 18201 S: Maintained 18201 18202 F: arch/mips/ralink
+1 -1
arch/mips/alchemy/devboards/db1200.c
··· 847 847 i2c_register_board_info(0, db1200_i2c_devs, 848 848 ARRAY_SIZE(db1200_i2c_devs)); 849 849 spi_register_board_info(db1200_spi_devs, 850 - ARRAY_SIZE(db1200_i2c_devs)); 850 + ARRAY_SIZE(db1200_spi_devs)); 851 851 852 852 /* SWITCHES: S6.8 I2C/SPI selector (OFF=I2C ON=SPI) 853 853 * S6.7 AC97/I2S selector (OFF=AC97 ON=I2S)
+1 -1
arch/mips/alchemy/devboards/db1550.c
··· 589 589 i2c_register_board_info(0, db1550_i2c_devs, 590 590 ARRAY_SIZE(db1550_i2c_devs)); 591 591 spi_register_board_info(db1550_spi_devs, 592 - ARRAY_SIZE(db1550_i2c_devs)); 592 + ARRAY_SIZE(db1550_spi_devs)); 593 593 594 594 c = clk_get(NULL, "psc0_intclk"); 595 595 if (!IS_ERR(c)) {
+3 -3
arch/mips/bcm47xx/buttons.c
··· 147 147 bcm47xx_buttons_buffalo_whr_g125[] __initconst = { 148 148 BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON), 149 149 BCM47XX_GPIO_KEY(4, KEY_RESTART), 150 - BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode swtich */ 150 + BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode switch */ 151 151 }; 152 152 153 153 static const struct gpio_keys_button 154 154 bcm47xx_buttons_buffalo_whr_g54s[] __initconst = { 155 155 BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON), 156 156 BCM47XX_GPIO_KEY_H(4, KEY_RESTART), 157 - BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode swtich */ 157 + BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode switch */ 158 158 }; 159 159 160 160 static const struct gpio_keys_button 161 161 bcm47xx_buttons_buffalo_whr_hp_g54[] __initconst = { 162 162 BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON), 163 163 BCM47XX_GPIO_KEY(4, KEY_RESTART), 164 - BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode swtich */ 164 + BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode switch */ 165 165 }; 166 166 167 167 static const struct gpio_keys_button
+2 -2
arch/mips/bcm63xx/clk.c
··· 174 174 } 175 175 176 176 if (enable) { 177 - /* reset switch core afer clock change */ 177 + /* reset switch core after clock change */ 178 178 bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 1); 179 179 msleep(10); 180 180 bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 0); ··· 304 304 bcm_hwclock_set(CKCTL_6368_SAR_EN, enable); 305 305 306 306 if (enable) { 307 - /* reset sar core afer clock change */ 307 + /* reset sar core after clock change */ 308 308 bcm63xx_core_set_reset(BCM63XX_RESET_SAR, 1); 309 309 mdelay(1); 310 310 bcm63xx_core_set_reset(BCM63XX_RESET_SAR, 0);
+1 -1
arch/mips/boot/compressed/dbg.c
··· 3 3 * MIPS-specific debug support for pre-boot environment 4 4 * 5 5 * NOTE: putc() is board specific, if your board have a 16550 compatible uart, 6 - * please select SYS_SUPPORTS_ZBOOT_UART16550 for your machine. othewise, you 6 + * please select SYS_SUPPORTS_ZBOOT_UART16550 for your machine. otherwise, you 7 7 * need to implement your own putc(). 8 8 */ 9 9 #include <linux/compiler.h>
+2 -2
arch/mips/boot/compressed/head.S
··· 25 25 /* Clear BSS */ 26 26 PTR_LA a0, _edata 27 27 PTR_LA a2, _end 28 - 1: sw zero, 0(a0) 29 - addiu a0, a0, 4 28 + 1: PTR_S zero, 0(a0) 29 + PTR_ADDIU a0, a0, PTRSIZE 30 30 bne a2, a0, 1b 31 31 32 32 PTR_LA a0, (.heap) /* heap address */
+1 -1
arch/mips/boot/elf2ecoff.c
··· 443 443 efh.f_symptr = 0; 444 444 efh.f_nsyms = 0; 445 445 efh.f_opthdr = sizeof eah; 446 - efh.f_flags = 0x100f; /* Stripped, not sharable. */ 446 + efh.f_flags = 0x100f; /* Stripped, not shareable. */ 447 447 448 448 memset(esecs, 0, sizeof esecs); 449 449 strcpy(esecs[0].s_name, ".text");
+1 -1
arch/mips/cavium-octeon/csrc-octeon.c
··· 113 113 114 114 unsigned long long notrace sched_clock(void) 115 115 { 116 - /* 64-bit arithmatic can overflow, so use 128-bit. */ 116 + /* 64-bit arithmetic can overflow, so use 128-bit. */ 117 117 u64 t1, t2, t3; 118 118 unsigned long long rv; 119 119 u64 mult = clocksource_mips.mult;
+1 -1
arch/mips/cavium-octeon/executive/cvmx-boot-vector.c
··· 143 143 uint64_t v = _cvmx_bootvector_data[i]; 144 144 145 145 if (OCTEON_IS_OCTEON1PLUS() && (i == 0 || i == 7)) 146 - v &= 0xffffffff00000000ull; /* KScratch not availble. */ 146 + v &= 0xffffffff00000000ull; /* KScratch not available */ 147 147 cvmx_write_csr(CVMX_MIO_BOOT_LOC_ADR, i * 8); 148 148 cvmx_write_csr(CVMX_MIO_BOOT_LOC_DAT, v); 149 149 }
+1 -1
arch/mips/cavium-octeon/executive/cvmx-bootmem.c
··· 264 264 * Convert !0 address_min and 0 address_max to special case of 265 265 * range that specifies an exact memory block to allocate. Do 266 266 * this before other checks and adjustments so that this 267 - * tranformation will be validated. 267 + * transformation will be validated. 268 268 */ 269 269 if (address_min && !address_max) 270 270 address_max = address_min + req_size;
+2 -2
arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
··· 192 192 } 193 193 194 194 /* 195 - * Shutdown a queue a free it's command buffers to the FPA. The 195 + * Shutdown a queue and free its command buffers to the FPA. The 196 196 * hardware connected to the queue must be stopped before this 197 197 * function is called. 198 198 * ··· 285 285 286 286 /* 287 287 * Return the command buffer to be written to. The purpose of this 288 - * function is to allow CVMX routine access t othe low level buffer 288 + * function is to allow CVMX routine access to the low level buffer 289 289 * for initial hardware setup. User applications should not call this 290 290 * function directly. 291 291 *
+1 -1
arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c
··· 103 103 /** 104 104 * Shift long sequences of zeros into the QLM JTAG chain. It is 105 105 * common to need to shift more than 32 bits of zeros into the 106 - * chain. This function is a convience wrapper around 106 + * chain. This function is a convenience wrapper around 107 107 * cvmx_helper_qlm_jtag_shift() to shift more than 32 bits of 108 108 * zeros at a time. 109 109 *
+1 -1
arch/mips/cavium-octeon/executive/cvmx-pko.c
··· 615 615 /* 616 616 * Each packet has a 12 bytes of interframe gap, an 8 byte 617 617 * preamble, and a 4 byte CRC. These are not included in the 618 - * per word count. Multiply by 8 to covert to bits and divide 618 + * per word count. Multiply by 8 to convert to bits and divide 619 619 * by 256 for limit granularity. 620 620 */ 621 621 pko_mem_port_rate0.s.rate_pkt = (12 + 8 + 4) * 8 * tokens_per_bit / 256;
+1 -1
arch/mips/cavium-octeon/octeon-platform.c
··· 973 973 * zero. 974 974 */ 975 975 976 - /* Asume that CS1 immediately follows. */ 976 + /* Assume that CS1 immediately follows. */ 977 977 mio_boot_reg_cfg.u64 = 978 978 cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs + 1)); 979 979 region1_base = mio_boot_reg_cfg.s.base << 16;
+3 -3
arch/mips/fw/arc/promlib.c
··· 15 15 /* 16 16 * For 64bit kernels working with a 32bit ARC PROM pointer arguments 17 17 * for ARC calls need to reside in CKEG0/1. But as soon as the kernel 18 - * switches to it's first kernel thread stack is set to an address in 18 + * switches to its first kernel thread stack is set to an address in 19 19 * XKPHYS, so anything on stack can't be used anymore. This is solved 20 - * by using a * static declartion variables are put into BSS, which is 20 + * by using a * static declaration variables are put into BSS, which is 21 21 * linked to a CKSEG0 address. Since this is only used on UP platforms 22 - * there is not spinlock needed 22 + * there is no spinlock needed 23 23 */ 24 24 #define O32_STATIC static 25 25 #else
+1 -1
arch/mips/include/asm/debug.h
··· 10 10 11 11 /* 12 12 * mips_debugfs_dir corresponds to the "mips" directory at the top level 13 - * of the DebugFS hierarchy. MIPS-specific DebugFS entires should be 13 + * of the DebugFS hierarchy. MIPS-specific DebugFS entries should be 14 14 * placed beneath this directory. 15 15 */ 16 16 extern struct dentry *mips_debugfs_dir;
+1 -1
arch/mips/include/asm/dmi.h
··· 5 5 #include <linux/io.h> 6 6 #include <linux/memblock.h> 7 7 8 - #define dmi_early_remap(x, l) ioremap_cache(x, l) 8 + #define dmi_early_remap(x, l) ioremap(x, l) 9 9 #define dmi_early_unmap(x, l) iounmap(x) 10 10 #define dmi_remap(x, l) ioremap_cache(x, l) 11 11 #define dmi_unmap(x) iounmap(x)
+2 -2
arch/mips/include/asm/io.h
··· 159 159 * address is not guaranteed to be usable directly as a virtual 160 160 * address. 161 161 * 162 - * This version of ioremap ensures that the memory is marked cachable by 162 + * This version of ioremap ensures that the memory is marked cacheable by 163 163 * the CPU. Also enables full write-combining. Useful for some 164 164 * memory-like regions on I/O busses. 165 165 */ ··· 177 177 * address is not guaranteed to be usable directly as a virtual 178 178 * address. 179 179 * 180 - * This version of ioremap ensures that the memory is marked uncachable 180 + * This version of ioremap ensures that the memory is marked uncacheable 181 181 * but accelerated by means of write-combining feature. It is specifically 182 182 * useful for PCIe prefetchable windows, which may vastly improve a 183 183 * communications performance. If it was determined on boot stage, what
+1 -1
arch/mips/include/asm/mach-au1x00/au1000_dma.h
··· 259 259 if (!chan) 260 260 return; 261 261 /* 262 - * set_dma_mode is only allowed to change endianess, direction, 262 + * set_dma_mode is only allowed to change endianness, direction, 263 263 * transfer size, device FIFO width, and coherency settings. 264 264 * Make sure anything else is masked off. 265 265 */
+1 -1
arch/mips/include/asm/mach-au1x00/gpio-au1000.h
··· 435 435 /** 436 436 * alchemy_gpio2_enable - Activate GPIO2 block. 437 437 * 438 - * The GPIO2 block must be enabled excplicitly to work. On systems 438 + * The GPIO2 block must be enabled explicitly to work. On systems 439 439 * where this isn't done by the bootloader, this macro can be used. 440 440 */ 441 441 static inline void alchemy_gpio2_enable(void)
+1 -1
arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
··· 55 55 #define ltq_sys1_w32_mask(clear, set, reg) \ 56 56 ltq_sys1_w32((ltq_sys1_r32(reg) & ~(clear)) | (set), reg) 57 57 58 - /* allow the gpio and pinctrl drivers to talk to eachother */ 58 + /* allow the gpio and pinctrl drivers to talk to each other */ 59 59 extern int pinctrl_falcon_get_range_size(int id); 60 60 extern void pinctrl_falcon_add_gpio_range(struct pinctrl_gpio_range *range); 61 61
+1 -1
arch/mips/include/asm/mach-loongson64/loongson_hwmon.h
··· 42 42 /* period between two check. (Unit: S) */ 43 43 u8 adjust_period; 44 44 45 - /* fan adjust usually depend on a temprature input */ 45 + /* fan adjust usually depend on a temperature input */ 46 46 get_temp_fun depend_temp; 47 47 48 48 /* up_step/down_step used when type is STEP_SPEED_POLICY */
+1 -1
arch/mips/include/asm/mach-loongson64/loongson_regs.h
··· 227 227 #define LOONGSON_CSR_NODECNT 0x408 228 228 #define LOONGSON_CSR_CPUTEMP 0x428 229 229 230 - /* PerCore CSR, only accessable by local cores */ 230 + /* PerCore CSR, only accessible by local cores */ 231 231 #define LOONGSON_CSR_IPI_STATUS 0x1000 232 232 #define LOONGSON_CSR_IPI_EN 0x1004 233 233 #define LOONGSON_CSR_IPI_SET 0x1008
+2 -2
arch/mips/include/asm/mach-malta/spaces.h
··· 23 23 * The kernel is still located in 0x80000000(kseg0). However, 24 24 * the physical mask has been shifted to 0x80000000 which exploits the alias 25 25 * on the Malta board. As a result of which, we override the __pa_symbol 26 - * to peform direct mapping from virtual to physical addresses. In other 26 + * to perform direct mapping from virtual to physical addresses. In other 27 27 * words, the 0x80000000 virtual address maps to 0x80000000 physical address 28 28 * which in turn aliases to 0x0. We do this in order to be able to use a flat 29 29 * 2GB of memory (0x80000000 - 0xffffffff) so we can avoid the I/O hole in 30 30 * 0x10000000 - 0x1fffffff. 31 31 * The last 64KB of physical memory are reserved for correct HIGHMEM 32 - * macros arithmetics. 32 + * macros arithmetic. 33 33 * 34 34 */ 35 35
+1 -1
arch/mips/include/asm/mips-boards/bonito64.h
··· 16 16 */ 17 17 18 18 /* Revision 1.48 autogenerated on 08/17/99 15:20:01 */ 19 - /* This bonito64 version editted from bonito.h Revision 1.48 on 11/09/00 */ 19 + /* This bonito64 version edited from bonito.h Revision 1.48 on 11/09/00 */ 20 20 21 21 #ifndef _ASM_MIPS_BOARDS_BONITO64_H 22 22 #define _ASM_MIPS_BOARDS_BONITO64_H
+1 -1
arch/mips/include/asm/mips-cpc.h
··· 22 22 * the CPC 23 23 * 24 24 * Returns the default physical base address of the Cluster Power Controller 25 - * memory mapped registers. This is platform dependant & must therefore be 25 + * memory mapped registers. This is platform dependent & must therefore be 26 26 * implemented per-platform. 27 27 */ 28 28 extern phys_addr_t mips_cpc_default_phys_base(void);
+2 -2
arch/mips/include/asm/mipsregs.h
··· 98 98 99 99 /* 100 100 * R4640/R4650 cp0 register names. These registers are listed 101 - * here only for completeness; without MMU these CPUs are not useable 101 + * here only for completeness; without MMU these CPUs are not usable 102 102 * by Linux. A future ELKS port might take make Linux run on them 103 103 * though ... 104 104 */ ··· 461 461 #define EXCCODE_THREAD 25 /* Thread exceptions (MT) */ 462 462 #define EXCCODE_DSPDIS 26 /* DSP disabled exception */ 463 463 #define EXCCODE_GE 27 /* Virtualized guest exception (VZ) */ 464 - #define EXCCODE_CACHEERR 30 /* Parity/ECC occured on a core */ 464 + #define EXCCODE_CACHEERR 30 /* Parity/ECC occurred on a core */ 465 465 466 466 /* Implementation specific trap codes used by MIPS cores */ 467 467 #define MIPS_EXCCODE_TLBPAR 16 /* TLB parity error exception */
+1 -1
arch/mips/include/asm/octeon/cvmx-bootinfo.h
··· 114 114 115 115 /* 116 116 * flags indicating various configuration options. These 117 - * flags supercede the 'flags' variable and should be used 117 + * flags supersede the 'flags' variable and should be used 118 118 * instead if available. 119 119 */ 120 120 uint32_t config_flags;
+3 -3
arch/mips/include/asm/octeon/cvmx-cmd-queue.h
··· 145 145 /** 146 146 * This structure contains the global state of all command queues. 147 147 * It is stored in a bootmem named block and shared by all 148 - * applications running on Octeon. Tickets are stored in a differnet 148 + * applications running on Octeon. Tickets are stored in a different 149 149 * cache line that queue information to reduce the contention on the 150 150 * ll/sc used to get a ticket. If this is not the case, the update 151 151 * of queue state causes the ll/sc to fail quite often. ··· 172 172 int pool_size); 173 173 174 174 /** 175 - * Shutdown a queue a free it's command buffers to the FPA. The 175 + * Shutdown a queue and free its command buffers to the FPA. The 176 176 * hardware connected to the queue must be stopped before this 177 177 * function is called. 178 178 * ··· 194 194 195 195 /** 196 196 * Return the command buffer to be written to. The purpose of this 197 - * function is to allow CVMX routine access t othe low level buffer 197 + * function is to allow CVMX routine access to the low level buffer 198 198 * for initial hardware setup. User applications should not call this 199 199 * function directly. 200 200 *
+1 -1
arch/mips/include/asm/octeon/cvmx-pko.h
··· 91 91 } cvmx_pko_status_t; 92 92 93 93 /** 94 - * This enumeration represents the differnet locking modes supported by PKO. 94 + * This enumeration represents the different locking modes supported by PKO. 95 95 */ 96 96 typedef enum { 97 97 /*
+2 -2
arch/mips/include/asm/octeon/cvmx-pow.h
··· 1342 1342 * This function does NOT wait for previous tag switches to complete, 1343 1343 * so the caller must ensure that there is not a pending tag switch. 1344 1344 * 1345 - * @wait: When set, call stalls until work becomes avaiable, or times out. 1345 + * @wait: When set, call stalls until work becomes available, or times out. 1346 1346 * If not set, returns immediately. 1347 1347 * 1348 1348 * Returns: the WQE pointer from POW. Returns NULL if no work ··· 1376 1376 * This function waits for any previous tag switch to complete before 1377 1377 * requesting the new work. 1378 1378 * 1379 - * @wait: When set, call stalls until work becomes avaiable, or times out. 1379 + * @wait: When set, call stalls until work becomes available, or times out. 1380 1380 * If not set, returns immediately. 1381 1381 * 1382 1382 * Returns: the WQE pointer from POW. Returns NULL if no work
+2 -2
arch/mips/include/asm/octeon/octeon-model.h
··· 54 54 #define OM_CHECK_SUBMODEL 0x02000000 55 55 /* Match all models previous than the one specified */ 56 56 #define OM_MATCH_PREVIOUS_MODELS 0x04000000 57 - /* Ignores the minor revison on newer parts */ 57 + /* Ignores the minor revision on newer parts */ 58 58 #define OM_IGNORE_MINOR_REVISION 0x08000000 59 59 #define OM_FLAG_MASK 0xff000000 60 60 ··· 226 226 #define OCTEON_CN52XX_PASS2 OCTEON_CN52XX_PASS2_X 227 227 228 228 /* 229 - * CN3XXX models with old revision enconding 229 + * CN3XXX models with old revision encoding 230 230 */ 231 231 #define OCTEON_CN38XX_PASS1 0x000d0000 232 232 #define OCTEON_CN38XX_PASS2 0x000d0001
+1 -1
arch/mips/include/asm/page.h
··· 173 173 if (IS_ENABLED(CONFIG_64BIT)) { 174 174 /* 175 175 * For MIPS64 the virtual address may either be in one of 176 - * the compatibility segements ckseg0 or ckseg1, or it may 176 + * the compatibility segments ckseg0 or ckseg1, or it may 177 177 * be in xkphys. 178 178 */ 179 179 return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
+1 -1
arch/mips/include/asm/pci.h
··· 23 23 #ifdef CONFIG_PCI_DRIVERS_LEGACY 24 24 25 25 /* 26 - * Each pci channel is a top-level PCI bus seem by CPU. A machine with 26 + * Each PCI channel is a top-level PCI bus seem by CPU. A machine with 27 27 * multiple PCI channels may have multiple PCI host controllers or a 28 28 * single controller supporting multiple channels. 29 29 */
+1 -1
arch/mips/include/asm/pgtable-bits.h
··· 201 201 * The final layouts of the PTE bits are: 202 202 * 203 203 * 64-bit, R1 or earlier: CCC D V G [S H] M A W R P 204 - * 32-bit, R1 or earler: CCC D V G M A W R P 204 + * 32-bit, R1 or earlier: CCC D V G M A W R P 205 205 * 64-bit, R2 or later: CCC D V G RI/R XI [S H] M A W P 206 206 * 32-bit, R2 or later: CCC D V G RI/R XI M A W P 207 207 */
+1 -1
arch/mips/include/asm/sgi/mc.h
··· 96 96 volatile u32 lbursttp; /* Time period for long bursts */ 97 97 98 98 /* MC chip can drive up to 4 bank 4 SIMMs each. All SIMMs in bank must 99 - * be the same size. The size encoding for supported SIMMs is bellow */ 99 + * be the same size. The size encoding for supported SIMMs is below */ 100 100 u32 _unused11[9]; 101 101 volatile u32 mconfig0; /* Memory config register zero */ 102 102 u32 _unused12;
+1 -1
arch/mips/include/asm/sn/klconfig.h
··· 851 851 /* 852 852 * TBD - Allocation issues. 853 853 * 854 - * Do we need to Mark off sepatate heaps for lboard_t, rboard_t, component, 854 + * Do we need to Mark off separate heaps for lboard_t, rboard_t, component, 855 855 * errinfo and allocate from them, or have a single heap and allocate all 856 856 * structures from it. Debug is easier in the former method since we can 857 857 * dump all similar structs in one command, but there will be lots of holes,
+1 -1
arch/mips/include/asm/sync.h
··· 19 19 * 20 20 * Ordering barriers can be more efficient than completion barriers, since: 21 21 * 22 - * a) Ordering barriers only require memory access instructions which preceed 22 + * a) Ordering barriers only require memory access instructions which precede 23 23 * them in program order (older instructions) to reach a point in the 24 24 * load/store datapath beyond which reordering is not possible before 25 25 * allowing memory access instructions which follow them (younger
+1 -1
arch/mips/include/asm/thread_info.h
··· 27 27 unsigned long flags; /* low level flags */ 28 28 unsigned long tp_value; /* thread pointer */ 29 29 __u32 cpu; /* current CPU */ 30 - int preempt_count; /* 0 => preemptable, <0 => BUG */ 30 + int preempt_count; /* 0 => preemptible, <0 => BUG */ 31 31 struct pt_regs *regs; 32 32 long syscall; /* syscall number */ 33 33 };
+1 -1
arch/mips/include/asm/timex.h
··· 46 46 * 47 47 * There is a suggested workaround and also the erratum can't strike if 48 48 * the compare interrupt isn't being used as the clock source device. 49 - * However for now the implementaton of this function doesn't get these 49 + * However for now the implementation of this function doesn't get these 50 50 * fine details right. 51 51 */ 52 52 static inline int can_use_mips_counter(unsigned int prid)
+1 -1
arch/mips/include/asm/vdso/vdso.h
··· 32 32 #else 33 33 /* 34 34 * Get the base load address of the VDSO. We have to avoid generating 35 - * relocations and references to the GOT because ld.so does not peform 35 + * relocations and references to the GOT because ld.so does not perform 36 36 * relocations on the VDSO. We use the current offset from the VDSO base 37 37 * and perform a PC-relative branch which gives the absolute address in 38 38 * ra, and take the difference. The assembler chokes on
+1 -1
arch/mips/include/uapi/asm/mman.h
··· 88 88 #define MADV_HUGEPAGE 14 /* Worth backing with hugepages */ 89 89 #define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */ 90 90 91 - #define MADV_DONTDUMP 16 /* Explicity exclude from the core dump, 91 + #define MADV_DONTDUMP 16 /* Explicitly exclude from core dump, 92 92 overrides the coredump filter bits */ 93 93 #define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */ 94 94
+1 -1
arch/mips/include/uapi/asm/msgbuf.h
··· 62 62 unsigned long __unused5; 63 63 }; 64 64 #else 65 - #warning no endianess set 65 + #warning no endianness set 66 66 #endif 67 67 68 68 #endif /* _ASM_MSGBUF_H */
+1 -1
arch/mips/kernel/cpu-probe.c
··· 1138 1138 * This processor doesn't have an MMU, so it's not 1139 1139 * "real easy" to run Linux on it. It is left purely 1140 1140 * for documentation. Commented out because it shares 1141 - * it's c0_prid id number with the TX3900. 1141 + * its c0_prid id number with the TX3900. 1142 1142 */ 1143 1143 c->cputype = CPU_R4650; 1144 1144 __cpu_name[cpu] = "R4650";
+4 -4
arch/mips/kernel/genex.S
··· 272 272 .set push 273 273 .set noreorder 274 274 PTR_LA v1, except_vec_vi_handler 275 - FEXPORT(except_vec_vi_lui) 276 - lui v0, 0 /* Patched */ 277 275 jr v1 278 276 FEXPORT(except_vec_vi_ori) 279 - ori v0, 0 /* Patched */ 277 + ori v0, zero, 0 /* Offset in vi_handlers[] */ 280 278 .set pop 281 279 END(except_vec_vi) 282 280 EXPORT(except_vec_vi_end) 283 281 284 282 /* 285 283 * Common Vectored Interrupt code 286 - * Complete the register saves and invoke the handler which is passed in $v0 284 + * Complete the register saves and invoke the handler, $v0 holds 285 + * offset into vi_handlers[] 287 286 */ 288 287 NESTED(except_vec_vi_handler, 0, sp) 289 288 SAVE_TEMP ··· 330 331 /* Save task's sp on IRQ stack so that unwinding can follow it */ 331 332 LONG_S s1, 0(sp) 332 333 2: 334 + PTR_L v0, vi_handlers(v0) 333 335 jalr v0 334 336 335 337 /* Restore sp */
+1 -1
arch/mips/kernel/kprobes.c
··· 55 55 * one; putting breakpoint on top of atomic ll/sc pair is bad idea; 56 56 * so we need to prevent it and refuse kprobes insertion for such 57 57 * instructions; cannot do much about breakpoint in the middle of 58 - * ll/sc pair; it is upto user to avoid those places 58 + * ll/sc pair; it is up to user to avoid those places 59 59 */ 60 60 static int insn_has_ll_or_sc(union mips_instruction insn) 61 61 {
+2
arch/mips/kernel/prom.c
··· 28 28 29 29 strscpy(mips_machine_name, name, sizeof(mips_machine_name)); 30 30 pr_info("MIPS: machine is %s\n", mips_get_machine_name()); 31 + 32 + dump_stack_set_arch_desc(name); 31 33 } 32 34 33 35 char *mips_get_machine_name(void)
+1 -1
arch/mips/kernel/relocate.c
··· 380 380 } 381 381 #endif /* CONFIG_USE_OF */ 382 382 383 - /* Copy the kernel to it's new location */ 383 + /* Copy the kernel to its new location */ 384 384 memcpy(loc_new, &_text, kernel_length); 385 385 386 386 /* Perform relocations on the new kernel */
+1 -1
arch/mips/kernel/relocate_kernel.S
··· 70 70 done: 71 71 #ifdef CONFIG_SMP 72 72 /* kexec_flag reset is signal to other CPUs what kernel 73 - was moved to it's location. Note - we need relocated address 73 + was moved to its location. Note - we need relocated address 74 74 of kexec_flag. */ 75 75 76 76 bal 1f
+3 -3
arch/mips/kernel/setup.c
··· 147 147 /* 148 148 * Board specific code or command line parser should have 149 149 * already set up initrd_start and initrd_end. In these cases 150 - * perfom sanity checks and use them if all looks good. 150 + * perform sanity checks and use them if all looks good. 151 151 */ 152 152 if (!initrd_start || initrd_end <= initrd_start) 153 153 goto disable; ··· 322 322 panic("Incorrect memory mapping !!!"); 323 323 324 324 if (max_pfn > PFN_DOWN(HIGHMEM_START)) { 325 + max_low_pfn = PFN_DOWN(HIGHMEM_START); 325 326 #ifdef CONFIG_HIGHMEM 326 - highstart_pfn = PFN_DOWN(HIGHMEM_START); 327 + highstart_pfn = max_low_pfn; 327 328 highend_pfn = max_pfn; 328 329 #else 329 - max_low_pfn = PFN_DOWN(HIGHMEM_START); 330 330 max_pfn = max_low_pfn; 331 331 #endif 332 332 }
+1 -1
arch/mips/kernel/signal.c
··· 570 570 return (void __user __force *)(-1UL); 571 571 572 572 /* 573 - * FPU emulator may have it's own trampoline active just 573 + * FPU emulator may have its own trampoline active just 574 574 * above the user stack, 16-bytes before the next lowest 575 575 * 16 byte boundary. Try to avoid trashing it. 576 576 */
+31 -68
arch/mips/kernel/traps.c
··· 2055 2055 panic("Caught unexpected vectored interrupt."); 2056 2056 } 2057 2057 2058 - static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) 2058 + void *set_vi_handler(int n, vi_handler_t addr) 2059 2059 { 2060 + extern const u8 except_vec_vi[]; 2061 + extern const u8 except_vec_vi_ori[], except_vec_vi_end[]; 2062 + extern const u8 rollback_except_vec_vi[]; 2060 2063 unsigned long handler; 2061 2064 unsigned long old_handler = vi_handlers[n]; 2062 2065 int srssets = current_cpu_data.srsets; 2063 2066 u16 *h; 2064 2067 unsigned char *b; 2068 + const u8 *vec_start; 2069 + int ori_offset; 2070 + int handler_len; 2065 2071 2066 2072 BUG_ON(!cpu_has_veic && !cpu_has_vint); 2067 2073 2068 2074 if (addr == NULL) { 2069 2075 handler = (unsigned long) do_default_vi; 2070 - srs = 0; 2071 2076 } else 2072 2077 handler = (unsigned long) addr; 2073 2078 vi_handlers[n] = handler; 2074 2079 2075 2080 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); 2076 2081 2077 - if (srs >= srssets) 2078 - panic("Shadow register set %d not supported", srs); 2079 - 2080 2082 if (cpu_has_veic) { 2081 2083 if (board_bind_eic_interrupt) 2082 - board_bind_eic_interrupt(n, srs); 2084 + board_bind_eic_interrupt(n, 0); 2083 2085 } else if (cpu_has_vint) { 2084 2086 /* SRSMap is only defined if shadow sets are implemented */ 2085 2087 if (srssets > 1) 2086 - change_c0_srsmap(0xf << n*4, srs << n*4); 2088 + change_c0_srsmap(0xf << n*4, 0 << n*4); 2087 2089 } 2088 2090 2089 - if (srs == 0) { 2090 - /* 2091 - * If no shadow set is selected then use the default handler 2092 - * that does normal register saving and standard interrupt exit 2093 - */ 2094 - extern const u8 except_vec_vi[], except_vec_vi_lui[]; 2095 - extern const u8 except_vec_vi_ori[], except_vec_vi_end[]; 2096 - extern const u8 rollback_except_vec_vi[]; 2097 - const u8 *vec_start = using_rollback_handler() ? 2098 - rollback_except_vec_vi : except_vec_vi; 2091 + vec_start = using_rollback_handler() ? rollback_except_vec_vi : 2092 + except_vec_vi; 2099 2093 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) 2100 - const int lui_offset = except_vec_vi_lui - vec_start + 2; 2101 - const int ori_offset = except_vec_vi_ori - vec_start + 2; 2094 + ori_offset = except_vec_vi_ori - vec_start + 2; 2102 2095 #else 2103 - const int lui_offset = except_vec_vi_lui - vec_start; 2104 - const int ori_offset = except_vec_vi_ori - vec_start; 2096 + ori_offset = except_vec_vi_ori - vec_start; 2105 2097 #endif 2106 - const int handler_len = except_vec_vi_end - vec_start; 2098 + handler_len = except_vec_vi_end - vec_start; 2107 2099 2108 - if (handler_len > VECTORSPACING) { 2109 - /* 2110 - * Sigh... panicing won't help as the console 2111 - * is probably not configured :( 2112 - */ 2113 - panic("VECTORSPACING too small"); 2114 - } 2115 - 2116 - set_handler(((unsigned long)b - ebase), vec_start, 2117 - #ifdef CONFIG_CPU_MICROMIPS 2118 - (handler_len - 1)); 2119 - #else 2120 - handler_len); 2121 - #endif 2122 - h = (u16 *)(b + lui_offset); 2123 - *h = (handler >> 16) & 0xffff; 2124 - h = (u16 *)(b + ori_offset); 2125 - *h = (handler & 0xffff); 2126 - local_flush_icache_range((unsigned long)b, 2127 - (unsigned long)(b+handler_len)); 2128 - } 2129 - else { 2100 + if (handler_len > VECTORSPACING) { 2130 2101 /* 2131 - * In other cases jump directly to the interrupt handler. It 2132 - * is the handler's responsibility to save registers if required 2133 - * (eg hi/lo) and return from the exception using "eret". 2102 + * Sigh... panicing won't help as the console 2103 + * is probably not configured :( 2134 2104 */ 2135 - u32 insn; 2136 - 2137 - h = (u16 *)b; 2138 - /* j handler */ 2139 - #ifdef CONFIG_CPU_MICROMIPS 2140 - insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1); 2141 - #else 2142 - insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2); 2143 - #endif 2144 - h[0] = (insn >> 16) & 0xffff; 2145 - h[1] = insn & 0xffff; 2146 - h[2] = 0; 2147 - h[3] = 0; 2148 - local_flush_icache_range((unsigned long)b, 2149 - (unsigned long)(b+8)); 2105 + panic("VECTORSPACING too small"); 2150 2106 } 2107 + 2108 + set_handler(((unsigned long)b - ebase), vec_start, 2109 + #ifdef CONFIG_CPU_MICROMIPS 2110 + (handler_len - 1)); 2111 + #else 2112 + handler_len); 2113 + #endif 2114 + /* insert offset into vi_handlers[] */ 2115 + h = (u16 *)(b + ori_offset); 2116 + *h = n * sizeof(handler); 2117 + local_flush_icache_range((unsigned long)b, 2118 + (unsigned long)(b+handler_len)); 2151 2119 2152 2120 return (void *)old_handler; 2153 - } 2154 - 2155 - void *set_vi_handler(int n, vi_handler_t addr) 2156 - { 2157 - return set_vi_srs_handler(n, addr, 0); 2158 2121 } 2159 2122 2160 2123 /* ··· 2379 2416 set_except_vector(i, handle_reserved); 2380 2417 2381 2418 /* 2382 - * Copy the EJTAG debug exception vector handler code to it's final 2419 + * Copy the EJTAG debug exception vector handler code to its final 2383 2420 * destination. 2384 2421 */ 2385 2422 if (cpu_has_ejtag && board_ejtag_handler_setup)
+2 -2
arch/mips/kernel/vpe.c
··· 6 6 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. 7 7 * Copyright (C) 2013 Imagination Technologies Ltd. 8 8 * 9 - * VPE spport module for loading a MIPS SP program into VPE1. The SP 9 + * VPE support module for loading a MIPS SP program into VPE1. The SP 10 10 * environment is rather simple since there are no TLBs. It needs 11 - * to be relocatable (or partiall linked). Initialize your stack in 11 + * to be relocatable (or partially linked). Initialize your stack in 12 12 * the startup-code. The loader looks for the symbol __start and sets 13 13 * up the execution to resume from there. To load and run, simply do 14 14 * a cat SP 'binary' to the /dev/vpe1 device.
+1 -1
arch/mips/kvm/emulate.c
··· 531 531 * to be used for a period of time, but the exact ktime corresponding to the 532 532 * final Count that must be restored is not known. 533 533 * 534 - * It is gauranteed that a timer interrupt immediately after restore will be 534 + * It is guaranteed that a timer interrupt immediately after restore will be 535 535 * handled, but not if CP0_Compare is exactly at @count. That case should 536 536 * already be handled when the hardware timer state is saved. 537 537 *
+1 -1
arch/mips/loongson2ef/common/platform.c
··· 17 17 { 18 18 struct cpuinfo_mips *c = &current_cpu_data; 19 19 20 - /* Only 2F revision and it's successors support CPUFreq */ 20 + /* Only 2F revision and its successors support CPUFreq */ 21 21 if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON2F) 22 22 return platform_device_register(&loongson2_cpufreq_device); 23 23
+1 -1
arch/mips/loongson64/smp.c
··· 516 516 } 517 517 518 518 /* 519 - * Setup the PC, SP, and GP of a secondary processor and start it runing! 519 + * Setup the PC, SP, and GP of a secondary processor and start it running! 520 520 */ 521 521 static int loongson3_boot_secondary(int cpu, struct task_struct *idle) 522 522 {
+1 -1
arch/mips/mm/c-r4k.c
··· 1650 1650 1651 1651 /* 1652 1652 * c0_status.cu=0 specifies that updates by the sc instruction use 1653 - * the coherency mode specified by the TLB; 1 means cachable 1653 + * the coherency mode specified by the TLB; 1 means cacheable 1654 1654 * coherent update on write will be used. Not all processors have 1655 1655 * this bit and; some wire it to zero, others like Toshiba had the 1656 1656 * silly idea of putting something else there ...
+1 -1
arch/mips/mm/cex-gen.S
··· 25 25 * This is a very bad place to be. Our cache error 26 26 * detection has triggered. If we have write-back data 27 27 * in the cache, we may not be able to recover. As a 28 - * first-order desperate measure, turn off KSEG0 cacheing. 28 + * first-order desperate measure, turn off KSEG0 caching. 29 29 */ 30 30 mfc0 k0,CP0_CONFIG 31 31 li k1,~CONF_CM_CMASK
+9 -7
arch/mips/mm/init.c
··· 422 422 " %ldk highmem ignored\n", 423 423 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10)); 424 424 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn; 425 + 426 + max_mapnr = max_low_pfn; 427 + } else if (highend_pfn) { 428 + max_mapnr = highend_pfn; 429 + } else { 430 + max_mapnr = max_low_pfn; 425 431 } 432 + #else 433 + max_mapnr = max_low_pfn; 426 434 #endif 435 + high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 427 436 428 437 free_area_init(max_zone_pfns); 429 438 } ··· 467 458 * bits to hold a full 32b physical address on MIPS32 systems. 468 459 */ 469 460 BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT)); 470 - 471 - #ifdef CONFIG_HIGHMEM 472 - max_mapnr = highend_pfn ? highend_pfn : max_low_pfn; 473 - #else 474 - max_mapnr = max_low_pfn; 475 - #endif 476 - high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 477 461 478 462 maar_init(); 479 463 memblock_free_all();
+4
arch/mips/mm/ioremap.c
··· 72 72 flags == _CACHE_UNCACHED) 73 73 return (void __iomem *) CKSEG1ADDR(phys_addr); 74 74 75 + /* Early remaps should use the unmapped regions til' VM is available */ 76 + if (WARN_ON_ONCE(!slab_is_available())) 77 + return NULL; 78 + 75 79 /* 76 80 * Don't allow anybody to remap RAM that may be allocated by the page 77 81 * allocator, since that could lead to races & data clobbering.
+1 -1
arch/mips/mm/tlb-r3k.c
··· 183 183 int idx, pid; 184 184 185 185 /* 186 - * Handle debugger faulting in for debugee. 186 + * Handle debugger faulting in for debuggee. 187 187 */ 188 188 if (current->active_mm != vma->vm_mm) 189 189 return;
+1 -1
arch/mips/mm/tlb-r4k.c
··· 301 301 int idx, pid; 302 302 303 303 /* 304 - * Handle debugger faulting in for debugee. 304 + * Handle debugger faulting in for debuggee. 305 305 */ 306 306 if (current->active_mm != vma->vm_mm) 307 307 return;
+2 -2
arch/mips/mm/tlbex.c
··· 789 789 790 790 if (check_for_high_segbits) { 791 791 /* 792 - * The kernel currently implicitely assumes that the 792 + * The kernel currently implicitly assumes that the 793 793 * MIPS SEGBITS parameter for the processor is 794 794 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never 795 795 * allocate virtual addresses outside the maximum ··· 1715 1715 /* 1716 1716 * Check if PTE is present, if not then jump to LABEL. PTR points to 1717 1717 * the page table where this PTE is located, PTE will be re-loaded 1718 - * with it's original value. 1718 + * with its original value. 1719 1719 */ 1720 1720 static void 1721 1721 build_pte_present(u32 **p, struct uasm_reloc **r,
+1 -1
arch/mips/net/bpf_jit_comp32.c
··· 95 95 /* 96 96 * Mapping of 64-bit eBPF registers to 32-bit native MIPS registers. 97 97 * 98 - * 1) Native register pairs are ordered according to CPU endiannes, following 98 + * 1) Native register pairs are ordered according to CPU endianness, following 99 99 * the MIPS convention for passing 64-bit arguments and return values. 100 100 * 2) The eBPF return value, arguments and callee-saved registers are mapped 101 101 * to their native MIPS equivalents.
+1 -1
arch/mips/pci/ops-loongson2.c
··· 49 49 */ 50 50 #ifdef CONFIG_CS5536 51 51 /* cs5536_pci_conf_read4/write4() will call _rdmsr/_wrmsr() to 52 - * access the regsters PCI_MSR_ADDR, PCI_MSR_DATA_LO, 52 + * access the registers PCI_MSR_ADDR, PCI_MSR_DATA_LO, 53 53 * PCI_MSR_DATA_HI, which is bigger than PCI_MSR_CTRL, so, it 54 54 * will not go this branch, but the others. so, no calling dead 55 55 * loop here.
+1 -1
arch/mips/pci/pci-alchemy.c
··· 453 453 454 454 /* we can't ioremap the entire pci config space because it's too large, 455 455 * nor can we dynamically ioremap it because some drivers use the 456 - * PCI config routines from within atomic contex and that becomes a 456 + * PCI config routines from within atomic context and that becomes a 457 457 * problem in get_vm_area(). Instead we use one wired TLB entry to 458 458 * handle all config accesses for all busses. 459 459 */
+1 -1
arch/mips/pci/pci-ar2315.c
··· 16 16 * the CFG_SEL bit in the PCI_MISC_CONFIG register. 17 17 * 18 18 * Devices on the bus can perform DMA requests via chip BAR1. PCI host 19 - * controller BARs are programmend as if an external device is programmed. 19 + * controller BARs are programmed as if an external device is programmed. 20 20 * Which means that during configuration, IDSEL pin of the chip should be 21 21 * asserted. 22 22 *
+1 -1
arch/mips/pci/pci-lantiq.c
··· 152 152 temp_buffer &= ~0xf0000; 153 153 /* enable internal arbiter */ 154 154 temp_buffer |= (1 << INTERNAL_ARB_ENABLE_BIT); 155 - /* enable internal PCI master reqest */ 155 + /* enable internal PCI master request */ 156 156 temp_buffer &= (~(3 << PCI_MASTER0_REQ_MASK_2BITS)); 157 157 158 158 /* enable EBU request */
+1 -1
arch/mips/pci/pci-octeon.c
··· 376 376 ctl_status.s.timer = 1; 377 377 cvmx_write_csr(CVMX_NPI_CTL_STATUS, ctl_status.u64); 378 378 379 - /* Deassert PCI reset and advertize PCX Host Mode Device Capability 379 + /* Deassert PCI reset and advertise PCX Host Mode Device Capability 380 380 (64b) */ 381 381 cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x4); 382 382 cvmx_read_csr(CVMX_CIU_SOFT_PRST);
+1 -1
arch/mips/pci/pci-xtalk-bridge.c
··· 114 114 * 115 115 * The function is complicated by the ultimate brokenness of the IOC3 chip 116 116 * which is used in SGI systems. The IOC3 can only handle 32-bit PCI 117 - * accesses and does only decode parts of it's address space. 117 + * accesses and does only decode parts of its address space. 118 118 */ 119 119 static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn, 120 120 int where, int size, u32 *value)
+1 -1
arch/mips/pci/pcie-octeon.c
··· 1037 1037 in_fif_p_count = dbg_data.s.data & 0xff; 1038 1038 } while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff)); 1039 1039 1040 - /* Update in_fif_p_count for it's offset with respect to out_p_count */ 1040 + /* Update in_fif_p_count for its offset with respect to out_p_count */ 1041 1041 in_fif_p_count = (in_fif_p_count + in_p_offset) & 0xff; 1042 1042 1043 1043 /* Read the OUT_P_COUNT from the debug select */
+1 -1
arch/mips/ralink/mt7621.c
··· 175 175 * mips_cm_probe() wipes out bootloader 176 176 * config for CM regions and we have to configure them 177 177 * again. This SoC cannot talk to pamlbus devices 178 - * witout proper iocu region set up. 178 + * without proper iocu region set up. 179 179 * 180 180 * FIXME: it would be better to do this with values 181 181 * from DT, but we need this very early because
+1 -1
arch/mips/sgi-ip27/ip27-hubio.c
··· 21 21 /** 22 22 * hub_pio_map - establish a HUB PIO mapping 23 23 * 24 - * @hub: hub to perform PIO mapping on 24 + * @nasid: nasid to perform PIO mapping on 25 25 * @widget: widget ID to perform PIO mapping for 26 26 * @xtalk_addr: xtalk_address that needs to be mapped 27 27 * @size: size of the PIO mapping
+1 -1
arch/mips/txx9/generic/pci.c
··· 348 348 unsigned char bist; 349 349 int ret; 350 350 351 - /* Do build-in self test */ 351 + /* Do built-in self test */ 352 352 ret = pci_read_config_byte(dev, PCI_BIST, &bist); 353 353 if ((ret != PCIBIOS_SUCCESSFUL) || !(bist & PCI_BIST_CAPABLE)) 354 354 return;