Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge remote-tracking branch 'pci/pci/gavin-window-alignment' into next

Merge Gavin patches from the PCI tree as subsequent powerpc
patches are going to depend on them

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

+1256 -1016
+12
Documentation/ABI/testing/sysfs-bus-pci
··· 210 210 firmware assigned instance number of the PCI 211 211 device that can help in understanding the firmware 212 212 intended order of the PCI device. 213 + 214 + What: /sys/bus/pci/devices/.../d3cold_allowed 215 + Date: July 2012 216 + Contact: Huang Ying <ying.huang@intel.com> 217 + Description: 218 + d3cold_allowed is bit to control whether the corresponding PCI 219 + device can be put into D3Cold state. If it is cleared, the 220 + device will never be put into D3Cold state. If it is set, the 221 + device may be put into D3Cold state if other requirements are 222 + satisfied too. Reading this attribute will show the current 223 + value of d3cold_allowed bit. Writing this attribute will set 224 + the value of d3cold_allowed bit.
+1 -1
Documentation/feature-removal-schedule.txt
··· 579 579 ---------------------------- 580 580 581 581 What: at91-mci driver ("CONFIG_MMC_AT91") 582 - When: 3.7 582 + When: 3.8 583 583 Why: There are two mci drivers: at91-mci and atmel-mci. The PDC support 584 584 was added to atmel-mci as a first step to support more chips. 585 585 Then at91-mci was kept only for old IP versions (on at91rm9200 and
+1 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 6 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc4 4 + EXTRAVERSION = -rc5 5 5 NAME = Saber-toothed Squirrel 6 6 7 7 # *DOCUMENTATION*
+1 -1
arch/arm/Kconfig
··· 6 6 select HAVE_DMA_API_DEBUG 7 7 select HAVE_IDE if PCI || ISA || PCMCIA 8 8 select HAVE_DMA_ATTRS 9 - select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) 9 + select HAVE_DMA_CONTIGUOUS if MMU 10 10 select HAVE_MEMBLOCK 11 11 select RTC_LIB 12 12 select SYS_SUPPORTS_APM_EMULATION
+1 -1
arch/arm/boot/dts/at91sam9g25ek.dts
··· 15 15 compatible = "atmel,at91sam9g25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9"; 16 16 17 17 chosen { 18 - bootargs = "128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs"; 18 + bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs"; 19 19 }; 20 20 21 21 ahb {
+1 -1
arch/arm/configs/armadillo800eva_defconfig
··· 33 33 CONFIG_FORCE_MAX_ZONEORDER=13 34 34 CONFIG_ZBOOT_ROM_TEXT=0x0 35 35 CONFIG_ZBOOT_ROM_BSS=0x0 36 - CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096" 36 + CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096 rw" 37 37 CONFIG_CMDLINE_FORCE=y 38 38 CONFIG_KEXEC=y 39 39 CONFIG_VFP=y
+7
arch/arm/include/asm/dma-mapping.h
··· 203 203 } 204 204 205 205 /* 206 + * This can be called during early boot to increase the size of the atomic 207 + * coherent DMA pool above the default value of 256KiB. It must be called 208 + * before postcore_initcall. 209 + */ 210 + extern void __init init_dma_coherent_pool_size(unsigned long size); 211 + 212 + /* 206 213 * This can be called during boot to increase the size of the consistent 207 214 * DMA region above it's default value of 2MB. It must be called before the 208 215 * memory allocator is initialised, i.e. before any core_initcall.
+1 -1
arch/arm/mach-at91/at91rm9200_time.c
··· 197 197 at91_st_read(AT91_ST_SR); 198 198 199 199 /* Make IRQs happen for the system timer */ 200 - setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq); 200 + setup_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq); 201 201 202 202 /* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used 203 203 * directly for the clocksource and all clockevents, after adjusting
+5 -1
arch/arm/mach-at91/at91sam9260_devices.c
··· 726 726 .flags = IORESOURCE_MEM, 727 727 }, { 728 728 .flags = IORESOURCE_MEM, 729 + }, { 730 + .flags = IORESOURCE_IRQ, 729 731 }, 730 732 }; 731 733 ··· 746 744 * The second resource is needed: 747 745 * GPBR will serve as the storage for RTC time offset 748 746 */ 749 - at91sam9260_rtt_device.num_resources = 2; 747 + at91sam9260_rtt_device.num_resources = 3; 750 748 rtt_resources[1].start = AT91SAM9260_BASE_GPBR + 751 749 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; 752 750 rtt_resources[1].end = rtt_resources[1].start + 3; 751 + rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; 752 + rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; 753 753 } 754 754 #else 755 755 static void __init at91_add_device_rtt_rtc(void)
+5 -1
arch/arm/mach-at91/at91sam9261_devices.c
··· 609 609 .flags = IORESOURCE_MEM, 610 610 }, { 611 611 .flags = IORESOURCE_MEM, 612 + }, { 613 + .flags = IORESOURCE_IRQ, 612 614 } 613 615 }; 614 616 ··· 628 626 * The second resource is needed: 629 627 * GPBR will serve as the storage for RTC time offset 630 628 */ 631 - at91sam9261_rtt_device.num_resources = 2; 629 + at91sam9261_rtt_device.num_resources = 3; 632 630 rtt_resources[1].start = AT91SAM9261_BASE_GPBR + 633 631 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; 634 632 rtt_resources[1].end = rtt_resources[1].start + 3; 633 + rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; 634 + rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; 635 635 } 636 636 #else 637 637 static void __init at91_add_device_rtt_rtc(void)
+8 -2
arch/arm/mach-at91/at91sam9263_devices.c
··· 990 990 .flags = IORESOURCE_MEM, 991 991 }, { 992 992 .flags = IORESOURCE_MEM, 993 + }, { 994 + .flags = IORESOURCE_IRQ, 993 995 } 994 996 }; 995 997 ··· 1008 1006 .flags = IORESOURCE_MEM, 1009 1007 }, { 1010 1008 .flags = IORESOURCE_MEM, 1009 + }, { 1010 + .flags = IORESOURCE_IRQ, 1011 1011 } 1012 1012 }; 1013 1013 ··· 1031 1027 * The second resource is needed only for the chosen RTT: 1032 1028 * GPBR will serve as the storage for RTC time offset 1033 1029 */ 1034 - at91sam9263_rtt0_device.num_resources = 2; 1030 + at91sam9263_rtt0_device.num_resources = 3; 1035 1031 at91sam9263_rtt1_device.num_resources = 1; 1036 1032 pdev = &at91sam9263_rtt0_device; 1037 1033 r = rtt0_resources; 1038 1034 break; 1039 1035 case 1: 1040 1036 at91sam9263_rtt0_device.num_resources = 1; 1041 - at91sam9263_rtt1_device.num_resources = 2; 1037 + at91sam9263_rtt1_device.num_resources = 3; 1042 1038 pdev = &at91sam9263_rtt1_device; 1043 1039 r = rtt1_resources; 1044 1040 break; ··· 1051 1047 pdev->name = "rtc-at91sam9"; 1052 1048 r[1].start = AT91SAM9263_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; 1053 1049 r[1].end = r[1].start + 3; 1050 + r[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; 1051 + r[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; 1054 1052 } 1055 1053 #else 1056 1054 static void __init at91_add_device_rtt_rtc(void)
+5 -1
arch/arm/mach-at91/at91sam9g45_devices.c
··· 1293 1293 .flags = IORESOURCE_MEM, 1294 1294 }, { 1295 1295 .flags = IORESOURCE_MEM, 1296 + }, { 1297 + .flags = IORESOURCE_IRQ, 1296 1298 } 1297 1299 }; 1298 1300 ··· 1312 1310 * The second resource is needed: 1313 1311 * GPBR will serve as the storage for RTC time offset 1314 1312 */ 1315 - at91sam9g45_rtt_device.num_resources = 2; 1313 + at91sam9g45_rtt_device.num_resources = 3; 1316 1314 rtt_resources[1].start = AT91SAM9G45_BASE_GPBR + 1317 1315 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; 1318 1316 rtt_resources[1].end = rtt_resources[1].start + 3; 1317 + rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; 1318 + rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; 1319 1319 } 1320 1320 #else 1321 1321 static void __init at91_add_device_rtt_rtc(void)
+5 -1
arch/arm/mach-at91/at91sam9rl_devices.c
··· 688 688 .flags = IORESOURCE_MEM, 689 689 }, { 690 690 .flags = IORESOURCE_MEM, 691 + }, { 692 + .flags = IORESOURCE_IRQ, 691 693 } 692 694 }; 693 695 ··· 707 705 * The second resource is needed: 708 706 * GPBR will serve as the storage for RTC time offset 709 707 */ 710 - at91sam9rl_rtt_device.num_resources = 2; 708 + at91sam9rl_rtt_device.num_resources = 3; 711 709 rtt_resources[1].start = AT91SAM9RL_BASE_GPBR + 712 710 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; 713 711 rtt_resources[1].end = rtt_resources[1].start + 3; 712 + rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; 713 + rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; 714 714 } 715 715 #else 716 716 static void __init at91_add_device_rtt_rtc(void)
+12
arch/arm/mach-at91/clock.c
··· 63 63 64 64 #define cpu_has_300M_plla() (cpu_is_at91sam9g10()) 65 65 66 + #define cpu_has_240M_plla() (cpu_is_at91sam9261() \ 67 + || cpu_is_at91sam9263() \ 68 + || cpu_is_at91sam9rl()) 69 + 70 + #define cpu_has_210M_plla() (cpu_is_at91sam9260()) 71 + 66 72 #define cpu_has_pllb() (!(cpu_is_at91sam9rl() \ 67 73 || cpu_is_at91sam9g45() \ 68 74 || cpu_is_at91sam9x5() \ ··· 711 705 pll_overclock = true; 712 706 } else if (cpu_has_800M_plla()) { 713 707 if (plla.rate_hz > 800000000) 708 + pll_overclock = true; 709 + } else if (cpu_has_240M_plla()) { 710 + if (plla.rate_hz > 240000000) 711 + pll_overclock = true; 712 + } else if (cpu_has_210M_plla()) { 713 + if (plla.rate_hz > 210000000) 714 714 pll_overclock = true; 715 715 } else { 716 716 if (plla.rate_hz > 209000000)
+1
arch/arm/mach-gemini/irq.c
··· 17 17 #include <linux/sched.h> 18 18 #include <asm/irq.h> 19 19 #include <asm/mach/irq.h> 20 + #include <asm/system_misc.h> 20 21 #include <mach/hardware.h> 21 22 22 23 #define IRQ_SOURCE(base_addr) (base_addr + 0x00)
+7
arch/arm/mach-kirkwood/common.c
··· 517 517 void __init kirkwood_init_early(void) 518 518 { 519 519 orion_time_set_base(TIMER_VIRT_BASE); 520 + 521 + /* 522 + * Some Kirkwood devices allocate their coherent buffers from atomic 523 + * context. Increase size of atomic coherent pool to make sure such 524 + * the allocations won't fail. 525 + */ 526 + init_dma_coherent_pool_size(SZ_1M); 520 527 } 521 528 522 529 int kirkwood_tclk;
+1
arch/arm/mach-kirkwood/db88f6281-bp-setup.c
··· 10 10 11 11 #include <linux/kernel.h> 12 12 #include <linux/init.h> 13 + #include <linux/sizes.h> 13 14 #include <linux/platform_device.h> 14 15 #include <linux/mtd/partitions.h> 15 16 #include <linux/ata_platform.h>
+7 -6
arch/arm/mach-shmobile/board-armadillo800eva.c
··· 520 520 }; 521 521 522 522 /* GPIO KEY */ 523 - #define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 } 523 + #define GPIO_KEY(c, g, d, ...) \ 524 + { .code = c, .gpio = g, .desc = d, .active_low = 1, __VA_ARGS__ } 524 525 525 526 static struct gpio_keys_button gpio_buttons[] = { 526 - GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW1"), 527 - GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW2"), 528 - GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW3"), 529 - GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW4"), 527 + GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW3", .wakeup = 1), 528 + GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW4"), 529 + GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW5"), 530 + GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW6"), 530 531 }; 531 532 532 533 static struct gpio_keys_platform_data gpio_key_info = { ··· 902 901 &camera_device, 903 902 &ceu0_device, 904 903 &fsi_device, 905 - &fsi_hdmi_device, 906 904 &fsi_wm8978_device, 905 + &fsi_hdmi_device, 907 906 }; 908 907 909 908 static void __init eva_clock_init(void)
+2 -1
arch/arm/mach-shmobile/board-mackerel.c
··· 695 695 * - J30 "open" 696 696 * - modify usbhs1_get_id() USBHS_HOST -> USBHS_GADGET 697 697 * - add .get_vbus = usbhs_get_vbus in usbhs1_private 698 + * - check usbhs0_device(pio)/usbhs1_device(irq) order in mackerel_devices. 698 699 */ 699 700 #define IRQ8 evt2irq(0x0300) 700 701 #define USB_PHY_MODE (1 << 4) ··· 1326 1325 &nor_flash_device, 1327 1326 &smc911x_device, 1328 1327 &lcdc_device, 1329 - &usbhs1_device, 1330 1328 &usbhs0_device, 1329 + &usbhs1_device, 1331 1330 &leds_device, 1332 1331 &fsi_device, 1333 1332 &fsi_ak4643_device,
+1 -1
arch/arm/mach-shmobile/board-marzen.c
··· 67 67 68 68 static struct platform_device eth_device = { 69 69 .name = "smsc911x", 70 - .id = 0, 70 + .id = -1, 71 71 .dev = { 72 72 .platform_data = &smsc911x_platdata, 73 73 },
+2 -2
arch/arm/mach-shmobile/intc-sh73a0.c
··· 259 259 return 0; /* always allow wakeup */ 260 260 } 261 261 262 - #define RELOC_BASE 0x1000 262 + #define RELOC_BASE 0x1200 263 263 264 - /* INTCA IRQ pins at INTCS + 0x1000 to make space for GIC+INTC handling */ 264 + /* INTCA IRQ pins at INTCS + RELOC_BASE to make space for GIC+INTC handling */ 265 265 #define INTCS_VECT_RELOC(n, vect) INTCS_VECT((n), (vect) + RELOC_BASE) 266 266 267 267 INTC_IRQ_PINS_32(intca_irq_pins, 0xe6900000,
+1 -11
arch/arm/mach-tegra/pcie.c
··· 367 367 /* Tegra PCIE requires relaxed ordering */ 368 368 static void __devinit tegra_pcie_relax_enable(struct pci_dev *dev) 369 369 { 370 - u16 val16; 371 - int pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 372 - 373 - if (pos <= 0) { 374 - dev_err(&dev->dev, "skipping relaxed ordering fixup\n"); 375 - return; 376 - } 377 - 378 - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &val16); 379 - val16 |= PCI_EXP_DEVCTL_RELAX_EN; 380 - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, val16); 370 + pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); 381 371 } 382 372 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable); 383 373
+104 -10
arch/arm/mm/dma-mapping.c
··· 267 267 vunmap(cpu_addr); 268 268 } 269 269 270 + #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 271 + 270 272 struct dma_pool { 271 273 size_t size; 272 274 spinlock_t lock; 273 275 unsigned long *bitmap; 274 276 unsigned long nr_pages; 275 277 void *vaddr; 276 - struct page *page; 278 + struct page **pages; 277 279 }; 278 280 279 281 static struct dma_pool atomic_pool = { 280 - .size = SZ_256K, 282 + .size = DEFAULT_DMA_COHERENT_POOL_SIZE, 281 283 }; 282 284 283 285 static int __init early_coherent_pool(char *p) ··· 288 286 return 0; 289 287 } 290 288 early_param("coherent_pool", early_coherent_pool); 289 + 290 + void __init init_dma_coherent_pool_size(unsigned long size) 291 + { 292 + /* 293 + * Catch any attempt to set the pool size too late. 294 + */ 295 + BUG_ON(atomic_pool.vaddr); 296 + 297 + /* 298 + * Set architecture specific coherent pool size only if 299 + * it has not been changed by kernel command line parameter. 300 + */ 301 + if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE) 302 + atomic_pool.size = size; 303 + } 291 304 292 305 /* 293 306 * Initialise the coherent pool for atomic allocations. ··· 314 297 unsigned long nr_pages = pool->size >> PAGE_SHIFT; 315 298 unsigned long *bitmap; 316 299 struct page *page; 300 + struct page **pages; 317 301 void *ptr; 318 302 int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); 319 303 ··· 322 304 if (!bitmap) 323 305 goto no_bitmap; 324 306 307 + pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); 308 + if (!pages) 309 + goto no_pages; 310 + 325 311 if (IS_ENABLED(CONFIG_CMA)) 326 312 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); 327 313 else 328 314 ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, 329 315 &page, NULL); 330 316 if (ptr) { 317 + int i; 318 + 319 + for (i = 0; i < nr_pages; i++) 320 + pages[i] = page + i; 321 + 331 322 spin_lock_init(&pool->lock); 332 323 pool->vaddr = ptr; 333 - pool->page = page; 324 + pool->pages = pages; 334 325 pool->bitmap = bitmap; 335 326 pool->nr_pages = nr_pages; 336 327 pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", 337 328 (unsigned)pool->size / 1024); 338 329 return 0; 339 330 } 331 + no_pages: 340 332 kfree(bitmap); 341 333 no_bitmap: 342 334 pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", ··· 471 443 if (pageno < pool->nr_pages) { 472 444 bitmap_set(pool->bitmap, pageno, count); 473 445 ptr = pool->vaddr + PAGE_SIZE * pageno; 474 - *ret_page = pool->page + pageno; 446 + *ret_page = pool->pages[pageno]; 447 + } else { 448 + pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n" 449 + "Please increase it with coherent_pool= kernel parameter!\n", 450 + (unsigned)pool->size / 1024); 475 451 } 476 452 spin_unlock_irqrestore(&pool->lock, flags); 477 453 478 454 return ptr; 455 + } 456 + 457 + static bool __in_atomic_pool(void *start, size_t size) 458 + { 459 + struct dma_pool *pool = &atomic_pool; 460 + void *end = start + size; 461 + void *pool_start = pool->vaddr; 462 + void *pool_end = pool->vaddr + pool->size; 463 + 464 + if (start < pool_start || start > pool_end) 465 + return false; 466 + 467 + if (end <= pool_end) 468 + return true; 469 + 470 + WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n", 471 + start, end - 1, pool_start, pool_end - 1); 472 + 473 + return false; 479 474 } 480 475 481 476 static int __free_from_pool(void *start, size_t size) ··· 507 456 unsigned long pageno, count; 508 457 unsigned long flags; 509 458 510 - if (start < pool->vaddr || start > pool->vaddr + pool->size) 459 + if (!__in_atomic_pool(start, size)) 511 460 return 0; 512 - 513 - if (start + size > pool->vaddr + pool->size) { 514 - WARN(1, "freeing wrong coherent size from pool\n"); 515 - return 0; 516 - } 517 461 518 462 pageno = (start - pool->vaddr) >> PAGE_SHIFT; 519 463 count = size >> PAGE_SHIFT; ··· 1136 1090 return 0; 1137 1091 } 1138 1092 1093 + static struct page **__atomic_get_pages(void *addr) 1094 + { 1095 + struct dma_pool *pool = &atomic_pool; 1096 + struct page **pages = pool->pages; 1097 + int offs = (addr - pool->vaddr) >> PAGE_SHIFT; 1098 + 1099 + return pages + offs; 1100 + } 1101 + 1139 1102 static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) 1140 1103 { 1141 1104 struct vm_struct *area; 1105 + 1106 + if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) 1107 + return __atomic_get_pages(cpu_addr); 1142 1108 1143 1109 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1144 1110 return cpu_addr; ··· 1159 1101 if (area && (area->flags & VM_ARM_DMA_CONSISTENT)) 1160 1102 return area->pages; 1161 1103 return NULL; 1104 + } 1105 + 1106 + static void *__iommu_alloc_atomic(struct device *dev, size_t size, 1107 + dma_addr_t *handle) 1108 + { 1109 + struct page *page; 1110 + void *addr; 1111 + 1112 + addr = __alloc_from_pool(size, &page); 1113 + if (!addr) 1114 + return NULL; 1115 + 1116 + *handle = __iommu_create_mapping(dev, &page, size); 1117 + if (*handle == DMA_ERROR_CODE) 1118 + goto err_mapping; 1119 + 1120 + return addr; 1121 + 1122 + err_mapping: 1123 + __free_from_pool(addr, size); 1124 + return NULL; 1125 + } 1126 + 1127 + static void __iommu_free_atomic(struct device *dev, struct page **pages, 1128 + dma_addr_t handle, size_t size) 1129 + { 1130 + __iommu_remove_mapping(dev, handle, size); 1131 + __free_from_pool(page_address(pages[0]), size); 1162 1132 } 1163 1133 1164 1134 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, ··· 1198 1112 1199 1113 *handle = DMA_ERROR_CODE; 1200 1114 size = PAGE_ALIGN(size); 1115 + 1116 + if (gfp & GFP_ATOMIC) 1117 + return __iommu_alloc_atomic(dev, size, handle); 1201 1118 1202 1119 pages = __iommu_alloc_buffer(dev, size, gfp); 1203 1120 if (!pages) ··· 1265 1176 1266 1177 if (!pages) { 1267 1178 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 1179 + return; 1180 + } 1181 + 1182 + if (__in_atomic_pool(cpu_addr, size)) { 1183 + __iommu_free_atomic(dev, pages, handle, size); 1268 1184 return; 1269 1185 } 1270 1186
+5 -10
arch/mips/pci/pci-octeon.c
··· 117 117 } 118 118 119 119 /* Enable the PCIe normal error reporting */ 120 - pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 121 - if (pos) { 122 - /* Update Device Control */ 123 - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &config); 124 - config |= PCI_EXP_DEVCTL_CERE; /* Correctable Error Reporting */ 125 - config |= PCI_EXP_DEVCTL_NFERE; /* Non-Fatal Error Reporting */ 126 - config |= PCI_EXP_DEVCTL_FERE; /* Fatal Error Reporting */ 127 - config |= PCI_EXP_DEVCTL_URRE; /* Unsupported Request */ 128 - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, config); 129 - } 120 + config = PCI_EXP_DEVCTL_CERE; /* Correctable Error Reporting */ 121 + config |= PCI_EXP_DEVCTL_NFERE; /* Non-Fatal Error Reporting */ 122 + config |= PCI_EXP_DEVCTL_FERE; /* Fatal Error Reporting */ 123 + config |= PCI_EXP_DEVCTL_URRE; /* Unsupported Request */ 124 + pcie_capability_set_word(dev, PCI_EXP_DEVCTL, config); 130 125 131 126 /* Find the Advanced Error Reporting capability */ 132 127 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+3
arch/powerpc/include/asm/machdep.h
··· 215 215 /* Called after scan and before resource survey */ 216 216 void (*pcibios_fixup_phb)(struct pci_controller *hose); 217 217 218 + /* Called during PCI resource reassignment */ 219 + resource_size_t (*pcibios_window_alignment)(struct pci_bus *, unsigned long type); 220 + 218 221 /* Called to shutdown machine specific hardware not already controlled 219 222 * by other drivers. 220 223 */
+20
arch/powerpc/kernel/pci-common.c
··· 99 99 kfree(phb); 100 100 } 101 101 102 + /* 103 + * The function is used to return the minimal alignment 104 + * for memory or I/O windows of the associated P2P bridge. 105 + * By default, 4KiB alignment for I/O windows and 1MiB for 106 + * memory windows. 107 + */ 108 + resource_size_t pcibios_window_alignment(struct pci_bus *bus, 109 + unsigned long type) 110 + { 111 + if (ppc_md.pcibios_window_alignment) 112 + return ppc_md.pcibios_window_alignment(bus, type); 113 + 114 + /* 115 + * PCI core will figure out the default 116 + * alignment: 4KiB for I/O and 1MiB for 117 + * memory window. 118 + */ 119 + return 1; 120 + } 121 + 102 122 static resource_size_t pcibios_io_size(const struct pci_controller *hose) 103 123 { 104 124 #ifdef CONFIG_PPC64
+40 -1
arch/powerpc/platforms/powernv/pci-ioda.c
··· 854 854 if (pe == NULL) 855 855 continue; 856 856 /* Leaving the PCIe domain ... single PE# */ 857 - if (dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) 857 + if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE) 858 858 pnv_ioda_setup_bus_PE(dev, pe); 859 859 else if (dev->subordinate) 860 860 pnv_ioda_setup_PEs(dev->subordinate); ··· 1138 1138 } 1139 1139 } 1140 1140 1141 + /* 1142 + * Returns the alignment for I/O or memory windows for P2P 1143 + * bridges. That actually depends on how PEs are segmented. 1144 + * For now, we return I/O or M32 segment size for PE sensitive 1145 + * P2P bridges. Otherwise, the default values (4KiB for I/O, 1146 + * 1MiB for memory) will be returned. 1147 + * 1148 + * The current PCI bus might be put into one PE, which was 1149 + * create against the parent PCI bridge. For that case, we 1150 + * needn't enlarge the alignment so that we can save some 1151 + * resources. 1152 + */ 1153 + static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus, 1154 + unsigned long type) 1155 + { 1156 + struct pci_dev *bridge; 1157 + struct pci_controller *hose = pci_bus_to_host(bus); 1158 + struct pnv_phb *phb = hose->private_data; 1159 + int num_pci_bridges = 0; 1160 + 1161 + bridge = bus->self; 1162 + while (bridge) { 1163 + if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) { 1164 + num_pci_bridges++; 1165 + if (num_pci_bridges >= 2) 1166 + return 1; 1167 + } 1168 + 1169 + bridge = bridge->bus->self; 1170 + } 1171 + 1172 + /* We need support prefetchable memory window later */ 1173 + if (type & IORESOURCE_MEM) 1174 + return phb->ioda.m32_segsize; 1175 + 1176 + return phb->ioda.io_segsize; 1177 + } 1178 + 1141 1179 /* Prevent enabling devices for which we couldn't properly 1142 1180 * assign a PE 1143 1181 */ ··· 1343 1305 */ 1344 1306 ppc_md.pcibios_fixup_phb = pnv_pci_ioda_fixup_phb; 1345 1307 ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook; 1308 + ppc_md.pcibios_window_alignment = pnv_pci_window_alignment; 1346 1309 pci_add_flags(PCI_PROBE_ONLY | PCI_REASSIGN_ALL_RSRC); 1347 1310 1348 1311 /* Reset IODA tables to a clean state */
+6 -20
arch/tile/kernel/pci.c
··· 246 246 247 247 /* Scan for the smallest maximum payload size. */ 248 248 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 249 - int pcie_caps_offset; 250 249 u32 devcap; 251 250 int max_payload; 252 251 253 - pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP); 254 - if (pcie_caps_offset == 0) 252 + if (!pci_is_pcie(dev)) 255 253 continue; 256 254 257 - pci_read_config_dword(dev, pcie_caps_offset + PCI_EXP_DEVCAP, 258 - &devcap); 255 + pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &devcap); 259 256 max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD; 260 257 if (max_payload < smallest_max_payload) 261 258 smallest_max_payload = max_payload; ··· 260 263 261 264 /* Now, set the max_payload_size for all devices to that value. */ 262 265 new_values = (max_read_size << 12) | (smallest_max_payload << 5); 263 - while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 264 - int pcie_caps_offset; 265 - u16 devctl; 266 - 267 - pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP); 268 - if (pcie_caps_offset == 0) 269 - continue; 270 - 271 - pci_read_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, 272 - &devctl); 273 - devctl &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ); 274 - devctl |= new_values; 275 - pci_write_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, 276 - devctl); 277 - } 266 + while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) 267 + pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, 268 + PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ, 269 + new_values); 278 270 } 279 271 280 272
+1 -1
arch/um/os-Linux/time.c
··· 114 114 skew += this_tick - last_tick; 115 115 116 116 while (skew >= one_tick) { 117 - alarm_handler(SIGVTALRM, NULL); 117 + alarm_handler(SIGVTALRM, NULL, NULL); 118 118 skew -= one_tick; 119 119 } 120 120
+1 -1
arch/x86/xen/mmu.c
··· 1283 1283 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); 1284 1284 1285 1285 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; 1286 - if (start != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) { 1286 + if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) { 1287 1287 args->op.cmd = MMUEXT_INVLPG_MULTI; 1288 1288 args->op.arg1.linear_addr = start; 1289 1289 }
+1 -1
arch/x86/xen/p2m.c
··· 599 599 if (p2m_index(set_pfn)) 600 600 return false; 601 601 602 - for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) { 602 + for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) { 603 603 topidx = p2m_top_index(pfn); 604 604 605 605 if (!p2m_top[topidx])
+1 -1
drivers/base/dma-contiguous.c
··· 250 250 return -EINVAL; 251 251 252 252 /* Sanitise input arguments */ 253 - alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order); 253 + alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); 254 254 base = ALIGN(base, alignment); 255 255 size = ALIGN(size, alignment); 256 256 limit &= ~(alignment - 1);
+1 -1
drivers/gpio/Kconfig
··· 294 294 295 295 config GPIO_MC9S08DZ60 296 296 bool "MX35 3DS BOARD MC9S08DZ60 GPIO functions" 297 - depends on I2C && MACH_MX35_3DS 297 + depends on I2C=y && MACH_MX35_3DS 298 298 help 299 299 Select this to enable the MC9S08DZ60 GPIO driver 300 300
+2 -2
drivers/gpio/gpio-em.c
··· 247 247 248 248 p->irq_base = irq_alloc_descs(pdata->irq_base, 0, 249 249 pdata->number_of_pins, numa_node_id()); 250 - if (IS_ERR_VALUE(p->irq_base)) { 250 + if (p->irq_base < 0) { 251 251 dev_err(&pdev->dev, "cannot get irq_desc\n"); 252 - return -ENXIO; 252 + return p->irq_base; 253 253 } 254 254 pr_debug("gio: hw base = %d, nr = %d, sw base = %d\n", 255 255 pdata->gpio_base, pdata->number_of_pins, p->irq_base);
+1
drivers/gpio/gpio-rdc321x.c
··· 170 170 rdc321x_gpio_dev->reg2_data_base = r->start + 0x4; 171 171 172 172 rdc321x_gpio_dev->chip.label = "rdc321x-gpio"; 173 + rdc321x_gpio_dev->chip.owner = THIS_MODULE; 173 174 rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input; 174 175 rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config; 175 176 rdc321x_gpio_dev->chip.get = rdc_gpio_get_value;
+1 -1
drivers/gpio/gpiolib-of.c
··· 82 82 gpiochip_find(&gg_data, of_gpiochip_find_and_xlate); 83 83 84 84 of_node_put(gg_data.gpiospec.np); 85 - pr_debug("%s exited with status %d\n", __func__, ret); 85 + pr_debug("%s exited with status %d\n", __func__, gg_data.out_gpio); 86 86 return gg_data.out_gpio; 87 87 } 88 88 EXPORT_SYMBOL(of_get_named_gpio_flags);
+3 -7
drivers/gpu/drm/radeon/evergreen.c
··· 77 77 void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) 78 78 { 79 79 u16 ctl, v; 80 - int cap, err; 80 + int err; 81 81 82 - cap = pci_pcie_cap(rdev->pdev); 83 - if (!cap) 84 - return; 85 - 86 - err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl); 82 + err = pcie_capability_read_word(rdev->pdev, PCI_EXP_DEVCTL, &ctl); 87 83 if (err) 88 84 return; 89 85 ··· 91 95 if ((v == 0) || (v == 6) || (v == 7)) { 92 96 ctl &= ~PCI_EXP_DEVCTL_READRQ; 93 97 ctl |= (2 << 12); 94 - pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl); 98 + pcie_capability_write_word(rdev->pdev, PCI_EXP_DEVCTL, ctl); 95 99 } 96 100 } 97 101
+5 -2
drivers/hid/hid-core.c
··· 996 996 struct hid_driver *hdrv = hid->driver; 997 997 int ret; 998 998 999 - hid_dump_input(hid, usage, value); 999 + if (!list_empty(&hid->debug_list)) 1000 + hid_dump_input(hid, usage, value); 1000 1001 1001 1002 if (hdrv && hdrv->event && hid_match_usage(hid, usage)) { 1002 1003 ret = hdrv->event(hid, field, usage, value); ··· 1559 1558 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, 1560 1559 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, 1561 1560 { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, 1562 - { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, 1561 + #if IS_ENABLED(CONFIG_HID_LENOVO_TPKBD) 1562 + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, 1563 + #endif 1563 1564 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) }, 1564 1565 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) }, 1565 1566 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
+2 -2
drivers/hid/hid-logitech-dj.c
··· 439 439 struct dj_report *dj_report; 440 440 int retval; 441 441 442 - dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL); 442 + dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); 443 443 if (!dj_report) 444 444 return -ENOMEM; 445 445 dj_report->report_id = REPORT_ID_DJ_SHORT; ··· 456 456 struct dj_report *dj_report; 457 457 int retval; 458 458 459 - dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL); 459 + dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); 460 460 if (!dj_report) 461 461 return -ENOMEM; 462 462 dj_report->report_id = REPORT_ID_DJ_SHORT;
+1
drivers/hid/usbhid/hid-quirks.c
··· 70 70 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, 71 71 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 72 72 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, 73 + { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, 73 74 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, 74 75 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, 75 76 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
+4 -4
drivers/infiniband/hw/mthca/mthca_reset.c
··· 241 241 242 242 if (hca_pcie_cap) { 243 243 devctl = hca_header[(hca_pcie_cap + PCI_EXP_DEVCTL) / 4]; 244 - if (pci_write_config_word(mdev->pdev, hca_pcie_cap + PCI_EXP_DEVCTL, 245 - devctl)) { 244 + if (pcie_capability_write_word(mdev->pdev, PCI_EXP_DEVCTL, 245 + devctl)) { 246 246 err = -ENODEV; 247 247 mthca_err(mdev, "Couldn't restore HCA PCI Express " 248 248 "Device Control register, aborting.\n"); 249 249 goto out; 250 250 } 251 251 linkctl = hca_header[(hca_pcie_cap + PCI_EXP_LNKCTL) / 4]; 252 - if (pci_write_config_word(mdev->pdev, hca_pcie_cap + PCI_EXP_LNKCTL, 253 - linkctl)) { 252 + if (pcie_capability_write_word(mdev->pdev, PCI_EXP_LNKCTL, 253 + linkctl)) { 254 254 err = -ENODEV; 255 255 mthca_err(mdev, "Couldn't restore HCA PCI Express " 256 256 "Link control register, aborting.\n");
+15 -23
drivers/infiniband/hw/qib/qib_pcie.c
··· 273 273 struct qib_msix_entry *entry) 274 274 { 275 275 u16 linkstat, speed; 276 - int pos = 0, pose, ret = 1; 276 + int pos = 0, ret = 1; 277 277 278 - pose = pci_pcie_cap(dd->pcidev); 279 - if (!pose) { 278 + if (!pci_is_pcie(dd->pcidev)) { 280 279 qib_dev_err(dd, "Can't find PCI Express capability!\n"); 281 280 /* set up something... */ 282 281 dd->lbus_width = 1; ··· 297 298 if (!pos) 298 299 qib_enable_intx(dd->pcidev); 299 300 300 - pci_read_config_word(dd->pcidev, pose + PCI_EXP_LNKSTA, &linkstat); 301 + pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat); 301 302 /* 302 303 * speed is bits 0-3, linkwidth is bits 4-8 303 304 * no defines for them in headers ··· 515 516 { 516 517 int r; 517 518 struct pci_dev *parent; 518 - int ppos; 519 519 u16 devid; 520 520 u32 mask, bits, val; 521 521 ··· 527 529 qib_devinfo(dd->pcidev, "Parent not root\n"); 528 530 return 1; 529 531 } 530 - ppos = pci_pcie_cap(parent); 531 - if (!ppos) 532 + if (!pci_is_pcie(parent)) 532 533 return 1; 533 534 if (parent->vendor != 0x8086) 534 535 return 1; ··· 584 587 { 585 588 int ret = 1; /* Assume the worst */ 586 589 struct pci_dev *parent; 587 - int ppos, epos; 588 590 u16 pcaps, pctl, ecaps, ectl; 589 591 int rc_sup, ep_sup; 590 592 int rc_cur, ep_cur; ··· 594 598 qib_devinfo(dd->pcidev, "Parent not root\n"); 595 599 goto bail; 596 600 } 597 - ppos = pci_pcie_cap(parent); 598 - if (ppos) { 599 - pci_read_config_word(parent, ppos + PCI_EXP_DEVCAP, &pcaps); 600 - pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl); 601 - } else 601 + 602 + if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev)) 602 603 goto bail; 604 + pcie_capability_read_word(parent, PCI_EXP_DEVCAP, &pcaps); 605 + pcie_capability_read_word(parent, PCI_EXP_DEVCTL, &pctl); 603 606 /* Find out supported and configured values for endpoint (us) */ 604 - epos = pci_pcie_cap(dd->pcidev); 605 - if (epos) { 606 - pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCAP, &ecaps); 607 - pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, &ectl); 608 - } else 609 - goto bail; 607 + pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCAP, &ecaps); 608 + pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl); 609 + 610 610 ret = 0; 611 611 /* Find max payload supported by root, endpoint */ 612 612 rc_sup = fld2val(pcaps, PCI_EXP_DEVCAP_PAYLOAD); ··· 621 629 rc_cur = rc_sup; 622 630 pctl = (pctl & ~PCI_EXP_DEVCTL_PAYLOAD) | 623 631 val2fld(rc_cur, PCI_EXP_DEVCTL_PAYLOAD); 624 - pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl); 632 + pcie_capability_write_word(parent, PCI_EXP_DEVCTL, pctl); 625 633 } 626 634 /* If less than (allowed, supported), bump endpoint payload */ 627 635 if (rc_sup > ep_cur) { 628 636 ep_cur = rc_sup; 629 637 ectl = (ectl & ~PCI_EXP_DEVCTL_PAYLOAD) | 630 638 val2fld(ep_cur, PCI_EXP_DEVCTL_PAYLOAD); 631 - pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl); 639 + pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl); 632 640 } 633 641 634 642 /* ··· 646 654 rc_cur = rc_sup; 647 655 pctl = (pctl & ~PCI_EXP_DEVCTL_READRQ) | 648 656 val2fld(rc_cur, PCI_EXP_DEVCTL_READRQ); 649 - pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl); 657 + pcie_capability_write_word(parent, PCI_EXP_DEVCTL, pctl); 650 658 } 651 659 if (rc_sup > ep_cur) { 652 660 ep_cur = rc_sup; 653 661 ectl = (ectl & ~PCI_EXP_DEVCTL_READRQ) | 654 662 val2fld(ep_cur, PCI_EXP_DEVCTL_READRQ); 655 - pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl); 663 + pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl); 656 664 } 657 665 bail: 658 666 return ret;
+3
drivers/input/keyboard/imx_keypad.c
··· 358 358 /* Inhibit KDI and KRI interrupts. */ 359 359 reg_val = readw(keypad->mmio_base + KPSR); 360 360 reg_val &= ~(KBD_STAT_KRIE | KBD_STAT_KDIE); 361 + reg_val |= KBD_STAT_KPKR | KBD_STAT_KPKD; 361 362 writew(reg_val, keypad->mmio_base + KPSR); 362 363 363 364 /* Colums as open drain and disable all rows */ ··· 516 515 input_set_drvdata(input_dev, keypad); 517 516 518 517 /* Ensure that the keypad will stay dormant until opened */ 518 + clk_enable(keypad->clk); 519 519 imx_keypad_inhibit(keypad); 520 + clk_disable(keypad->clk); 520 521 521 522 error = request_irq(irq, imx_keypad_irq_handler, 0, 522 523 pdev->name, keypad);
+14
drivers/input/serio/i8042-x86ia64io.h
··· 177 177 }, 178 178 }, 179 179 { 180 + /* Gigabyte T1005 - defines wrong chassis type ("Other") */ 181 + .matches = { 182 + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), 183 + DMI_MATCH(DMI_PRODUCT_NAME, "T1005"), 184 + }, 185 + }, 186 + { 187 + /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */ 188 + .matches = { 189 + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), 190 + DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"), 191 + }, 192 + }, 193 + { 180 194 .matches = { 181 195 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 182 196 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
+5 -1
drivers/input/tablet/wacom_wac.c
··· 1848 1848 { "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047, 1849 1849 63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 1850 1850 static const struct wacom_features wacom_features_0xF4 = 1851 - { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, 1851 + { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, 1852 + 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 1853 + static const struct wacom_features wacom_features_0xF8 = 1854 + { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, 1852 1855 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 1853 1856 static const struct wacom_features wacom_features_0x3F = 1854 1857 { "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023, ··· 2094 2091 { USB_DEVICE_WACOM(0xEF) }, 2095 2092 { USB_DEVICE_WACOM(0x47) }, 2096 2093 { USB_DEVICE_WACOM(0xF4) }, 2094 + { USB_DEVICE_WACOM(0xF8) }, 2097 2095 { USB_DEVICE_WACOM(0xFA) }, 2098 2096 { USB_DEVICE_LENOVO(0x6004) }, 2099 2097 { }
+1 -1
drivers/input/touchscreen/edt-ft5x06.c
··· 602 602 { 603 603 if (tsdata->debug_dir) 604 604 debugfs_remove_recursive(tsdata->debug_dir); 605 + kfree(tsdata->raw_buffer); 605 606 } 606 607 607 608 #else ··· 844 843 if (gpio_is_valid(pdata->reset_pin)) 845 844 gpio_free(pdata->reset_pin); 846 845 847 - kfree(tsdata->raw_buffer); 848 846 kfree(tsdata); 849 847 850 848 return 0;
+3 -3
drivers/iommu/intel-iommu.c
··· 2351 2351 return 0; 2352 2352 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) 2353 2353 return 0; 2354 - } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) 2354 + } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE) 2355 2355 return 0; 2356 2356 2357 2357 /* ··· 3546 3546 struct pci_dev *bridge = bus->self; 3547 3547 3548 3548 if (!bridge || !pci_is_pcie(bridge) || 3549 - bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) 3549 + pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) 3550 3550 return 0; 3551 3551 3552 - if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) { 3552 + if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) { 3553 3553 for (i = 0; i < atsru->devices_cnt; i++) 3554 3554 if (atsru->devices[i] == bridge) 3555 3555 return 1;
+25 -1
drivers/mmc/card/block.c
··· 1411 1411 /* complete ongoing async transfer before issuing discard */ 1412 1412 if (card->host->areq) 1413 1413 mmc_blk_issue_rw_rq(mq, NULL); 1414 - if (req->cmd_flags & REQ_SECURE) 1414 + if (req->cmd_flags & REQ_SECURE && 1415 + !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) 1415 1416 ret = mmc_blk_issue_secdiscard_rq(mq, req); 1416 1417 else 1417 1418 ret = mmc_blk_issue_discard_rq(mq, req); ··· 1717 1716 #define CID_MANFID_SANDISK 0x2 1718 1717 #define CID_MANFID_TOSHIBA 0x11 1719 1718 #define CID_MANFID_MICRON 0x13 1719 + #define CID_MANFID_SAMSUNG 0x15 1720 1720 1721 1721 static const struct mmc_fixup blk_fixups[] = 1722 1722 { ··· 1753 1751 */ 1754 1752 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, 1755 1753 MMC_QUIRK_LONG_READ_TIME), 1754 + 1755 + /* 1756 + * On these Samsung MoviNAND parts, performing secure erase or 1757 + * secure trim can result in unrecoverable corruption due to a 1758 + * firmware bug. 1759 + */ 1760 + MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, 1761 + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), 1762 + MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, 1763 + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), 1764 + MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, 1765 + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), 1766 + MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, 1767 + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), 1768 + MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, 1769 + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), 1770 + MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, 1771 + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), 1772 + MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, 1773 + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), 1774 + MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, 1775 + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), 1756 1776 1757 1777 END_FIXUP 1758 1778 };
+5 -1
drivers/mmc/host/atmel-mci.c
··· 81 81 bool has_bad_data_ordering; 82 82 bool need_reset_after_xfer; 83 83 bool need_blksz_mul_4; 84 + bool need_notbusy_for_read_ops; 84 85 }; 85 86 86 87 struct atmel_mci_dma { ··· 1626 1625 __func__); 1627 1626 atmci_set_completed(host, EVENT_XFER_COMPLETE); 1628 1627 1629 - if (host->data->flags & MMC_DATA_WRITE) { 1628 + if (host->caps.need_notbusy_for_read_ops || 1629 + (host->data->flags & MMC_DATA_WRITE)) { 1630 1630 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 1631 1631 state = STATE_WAITING_NOTBUSY; 1632 1632 } else if (host->mrq->stop) { ··· 2220 2218 host->caps.has_bad_data_ordering = 1; 2221 2219 host->caps.need_reset_after_xfer = 1; 2222 2220 host->caps.need_blksz_mul_4 = 1; 2221 + host->caps.need_notbusy_for_read_ops = 0; 2223 2222 2224 2223 /* keep only major version number */ 2225 2224 switch (version & 0xf00) { ··· 2241 2238 case 0x200: 2242 2239 host->caps.has_rwproof = 1; 2243 2240 host->caps.need_blksz_mul_4 = 0; 2241 + host->caps.need_notbusy_for_read_ops = 1; 2244 2242 case 0x100: 2245 2243 host->caps.has_bad_data_ordering = 0; 2246 2244 host->caps.need_reset_after_xfer = 0;
-7
drivers/mmc/host/bfin_sdh.c
··· 49 49 #define bfin_write_SDH_CFG bfin_write_RSI_CFG 50 50 #endif 51 51 52 - struct dma_desc_array { 53 - unsigned long start_addr; 54 - unsigned short cfg; 55 - unsigned short x_count; 56 - short x_modify; 57 - } __packed; 58 - 59 52 struct sdh_host { 60 53 struct mmc_host *mmc; 61 54 spinlock_t lock;
+46 -39
drivers/mmc/host/dw_mmc.c
··· 627 627 { 628 628 struct dw_mci *host = slot->host; 629 629 u32 div; 630 + u32 clk_en_a; 630 631 631 632 if (slot->clock != host->current_speed) { 632 633 div = host->bus_hz / slot->clock; ··· 660 659 mci_send_cmd(slot, 661 660 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); 662 661 663 - /* enable clock */ 664 - mci_writel(host, CLKENA, ((SDMMC_CLKEN_ENABLE | 665 - SDMMC_CLKEN_LOW_PWR) << slot->id)); 662 + /* enable clock; only low power if no SDIO */ 663 + clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; 664 + if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id))) 665 + clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; 666 + mci_writel(host, CLKENA, clk_en_a); 666 667 667 668 /* inform CIU */ 668 669 mci_send_cmd(slot, ··· 865 862 return present; 866 863 } 867 864 865 + /* 866 + * Disable lower power mode. 867 + * 868 + * Low power mode will stop the card clock when idle. According to the 869 + * description of the CLKENA register we should disable low power mode 870 + * for SDIO cards if we need SDIO interrupts to work. 871 + * 872 + * This function is fast if low power mode is already disabled. 873 + */ 874 + static void dw_mci_disable_low_power(struct dw_mci_slot *slot) 875 + { 876 + struct dw_mci *host = slot->host; 877 + u32 clk_en_a; 878 + const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; 879 + 880 + clk_en_a = mci_readl(host, CLKENA); 881 + 882 + if (clk_en_a & clken_low_pwr) { 883 + mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr); 884 + mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 885 + SDMMC_CMD_PRV_DAT_WAIT, 0); 886 + } 887 + } 888 + 868 889 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) 869 890 { 870 891 struct dw_mci_slot *slot = mmc_priv(mmc); ··· 898 871 /* Enable/disable Slot Specific SDIO interrupt */ 899 872 int_mask = mci_readl(host, INTMASK); 900 873 if (enb) { 874 + /* 875 + * Turn off low power mode if it was enabled. This is a bit of 876 + * a heavy operation and we disable / enable IRQs a lot, so 877 + * we'll leave low power mode disabled and it will get 878 + * re-enabled again in dw_mci_setup_bus(). 879 + */ 880 + dw_mci_disable_low_power(slot); 881 + 901 882 mci_writel(host, INTMASK, 902 883 (int_mask | SDMMC_INT_SDIO(slot->id))); 903 884 } else { ··· 1464 1429 nbytes += len; 1465 1430 remain -= len; 1466 1431 } while (remain); 1467 - sg_miter->consumed = offset; 1468 1432 1433 + sg_miter->consumed = offset; 1469 1434 status = mci_readl(host, MINTSTS); 1470 1435 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 1471 - if (status & DW_MCI_DATA_ERROR_FLAGS) { 1472 - host->data_status = status; 1473 - data->bytes_xfered += nbytes; 1474 - sg_miter_stop(sg_miter); 1475 - host->sg = NULL; 1476 - smp_wmb(); 1477 - 1478 - set_bit(EVENT_DATA_ERROR, &host->pending_events); 1479 - 1480 - tasklet_schedule(&host->tasklet); 1481 - return; 1482 - } 1483 1436 } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ 1484 1437 data->bytes_xfered += nbytes; 1485 1438 ··· 1520 1497 nbytes += len; 1521 1498 remain -= len; 1522 1499 } while (remain); 1523 - sg_miter->consumed = offset; 1524 1500 1501 + sg_miter->consumed = offset; 1525 1502 status = mci_readl(host, MINTSTS); 1526 1503 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 1527 - if (status & DW_MCI_DATA_ERROR_FLAGS) { 1528 - host->data_status = status; 1529 - data->bytes_xfered += nbytes; 1530 - sg_miter_stop(sg_miter); 1531 - host->sg = NULL; 1532 - 1533 - smp_wmb(); 1534 - 1535 - set_bit(EVENT_DATA_ERROR, &host->pending_events); 1536 - 1537 - tasklet_schedule(&host->tasklet); 1538 - return; 1539 - } 1540 1504 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 1541 1505 data->bytes_xfered += nbytes; 1542 1506 ··· 1557 1547 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) 1558 1548 { 1559 1549 struct dw_mci *host = dev_id; 1560 - u32 status, pending; 1550 + u32 pending; 1561 1551 unsigned int pass_count = 0; 1562 1552 int i; 1563 1553 1564 1554 do { 1565 - status = mci_readl(host, RINTSTS); 1566 1555 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 1567 1556 1568 1557 /* ··· 1579 1570 1580 1571 if (pending & DW_MCI_CMD_ERROR_FLAGS) { 1581 1572 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 1582 - host->cmd_status = status; 1573 + host->cmd_status = pending; 1583 1574 smp_wmb(); 1584 1575 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 1585 1576 } ··· 1587 1578 if (pending & DW_MCI_DATA_ERROR_FLAGS) { 1588 1579 /* if there is an error report DATA_ERROR */ 1589 1580 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); 1590 - host->data_status = status; 1581 + host->data_status = pending; 1591 1582 smp_wmb(); 1592 1583 set_bit(EVENT_DATA_ERROR, &host->pending_events); 1593 - if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC | 1594 - SDMMC_INT_SBE | SDMMC_INT_EBE))) 1595 - tasklet_schedule(&host->tasklet); 1584 + tasklet_schedule(&host->tasklet); 1596 1585 } 1597 1586 1598 1587 if (pending & SDMMC_INT_DATA_OVER) { 1599 1588 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 1600 1589 if (!host->data_status) 1601 - host->data_status = status; 1590 + host->data_status = pending; 1602 1591 smp_wmb(); 1603 1592 if (host->dir_status == DW_MCI_RECV_STATUS) { 1604 1593 if (host->sg != NULL) ··· 1620 1613 1621 1614 if (pending & SDMMC_INT_CMD_DONE) { 1622 1615 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); 1623 - dw_mci_cmd_interrupt(host, status); 1616 + dw_mci_cmd_interrupt(host, pending); 1624 1617 } 1625 1618 1626 1619 if (pending & SDMMC_INT_CD) {
+7 -7
drivers/mmc/host/mxs-mmc.c
··· 285 285 writel(stat & MXS_MMC_IRQ_BITS, 286 286 host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR); 287 287 288 + spin_unlock(&host->lock); 289 + 288 290 if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN)) 289 291 mmc_signal_sdio_irq(host->mmc); 290 - 291 - spin_unlock(&host->lock); 292 292 293 293 if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ) 294 294 cmd->error = -ETIMEDOUT; ··· 644 644 host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 645 645 writel(BM_SSP_CTRL1_SDIO_IRQ_EN, 646 646 host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET); 647 - 648 - if (readl(host->base + HW_SSP_STATUS(host)) & 649 - BM_SSP_STATUS_SDIO_IRQ) 650 - mmc_signal_sdio_irq(host->mmc); 651 - 652 647 } else { 653 648 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, 654 649 host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); ··· 652 657 } 653 658 654 659 spin_unlock_irqrestore(&host->lock, flags); 660 + 661 + if (enable && readl(host->base + HW_SSP_STATUS(host)) & 662 + BM_SSP_STATUS_SDIO_IRQ) 663 + mmc_signal_sdio_irq(host->mmc); 664 + 655 665 } 656 666 657 667 static const struct mmc_host_ops mxs_mmc_ops = {
+11 -3
drivers/mmc/host/omap.c
··· 668 668 static void 669 669 mmc_omap_xfer_data(struct mmc_omap_host *host, int write) 670 670 { 671 - int n; 671 + int n, nwords; 672 672 673 673 if (host->buffer_bytes_left == 0) { 674 674 host->sg_idx++; ··· 678 678 n = 64; 679 679 if (n > host->buffer_bytes_left) 680 680 n = host->buffer_bytes_left; 681 + 682 + nwords = n / 2; 683 + nwords += n & 1; /* handle odd number of bytes to transfer */ 684 + 681 685 host->buffer_bytes_left -= n; 682 686 host->total_bytes_left -= n; 683 687 host->data->bytes_xfered += n; 684 688 685 689 if (write) { 686 - __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); 690 + __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), 691 + host->buffer, nwords); 687 692 } else { 688 - __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); 693 + __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), 694 + host->buffer, nwords); 689 695 } 696 + 697 + host->buffer += nwords; 690 698 } 691 699 692 700 static inline void mmc_omap_report_irq(u16 status)
+3 -3
drivers/mmc/host/sdhci-esdhc.h
··· 48 48 int div = 1; 49 49 u32 temp; 50 50 51 + if (clock == 0) 52 + goto out; 53 + 51 54 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); 52 55 temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN 53 56 | ESDHC_CLOCK_MASK); 54 57 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); 55 - 56 - if (clock == 0) 57 - goto out; 58 58 59 59 while (host->max_clk / pre_div / 16 > clock && pre_div < 256) 60 60 pre_div *= 2;
+2 -2
drivers/mtd/ubi/vtbl.c
··· 340 340 * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'. 341 341 */ 342 342 err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0); 343 - kfree(new_aeb); 343 + kmem_cache_free(ai->aeb_slab_cache, new_aeb); 344 344 ubi_free_vid_hdr(ubi, vid_hdr); 345 345 return err; 346 346 ··· 353 353 list_add(&new_aeb->u.list, &ai->erase); 354 354 goto retry; 355 355 } 356 - kfree(new_aeb); 356 + kmem_cache_free(ai->aeb_slab_cache, new_aeb); 357 357 out_free: 358 358 ubi_free_vid_hdr(ubi, vid_hdr); 359 359 return err;
+1 -1
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
··· 149 149 data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); 150 150 pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); 151 151 /* clear error status */ 152 - pci_write_config_word(pdev, pci_pcie_cap(pdev) + PCI_EXP_DEVSTA, 152 + pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, 153 153 PCI_EXP_DEVSTA_NFED | 154 154 PCI_EXP_DEVSTA_FED | 155 155 PCI_EXP_DEVSTA_CED |
+4 -14
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 1162 1162 1163 1163 static u8 bnx2x_is_pcie_pending(struct pci_dev *dev) 1164 1164 { 1165 - int pos; 1166 1165 u16 status; 1167 1166 1168 - pos = pci_pcie_cap(dev); 1169 - if (!pos) 1170 - return false; 1171 - 1172 - pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); 1167 + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); 1173 1168 return status & PCI_EXP_DEVSTA_TRPND; 1174 1169 } 1175 1170 ··· 6130 6135 u16 devctl; 6131 6136 int r_order, w_order; 6132 6137 6133 - pci_read_config_word(bp->pdev, 6134 - pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl); 6138 + pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl); 6135 6139 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); 6136 6140 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); 6137 6141 if (bp->mrrs == -1) ··· 9368 9374 9369 9375 static int __devinit bnx2x_do_flr(struct bnx2x *bp) 9370 9376 { 9371 - int i, pos; 9377 + int i; 9372 9378 u16 status; 9373 9379 struct pci_dev *dev = bp->pdev; 9374 9380 ··· 9385 9391 return -EINVAL; 9386 9392 } 9387 9393 9388 - pos = pci_pcie_cap(dev); 9389 - if (!pos) 9390 - return -ENOTTY; 9391 - 9392 9394 /* Wait for Transaction Pending bit clean */ 9393 9395 for (i = 0; i < 4; i++) { 9394 9396 if (i) 9395 9397 msleep((1 << (i - 1)) * 100); 9396 9398 9397 - pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); 9399 + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); 9398 9400 if (!(status & PCI_EXP_DEVSTA_TRPND)) 9399 9401 goto clear; 9400 9402 }
+13 -37
drivers/net/ethernet/broadcom/tg3.c
··· 3653 3653 tg3_enable_register_access(tp); 3654 3654 3655 3655 /* Restore the CLKREQ setting. */ 3656 - if (tg3_flag(tp, CLKREQ_BUG)) { 3657 - u16 lnkctl; 3658 - 3659 - pci_read_config_word(tp->pdev, 3660 - pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, 3661 - &lnkctl); 3662 - lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN; 3663 - pci_write_config_word(tp->pdev, 3664 - pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, 3665 - lnkctl); 3666 - } 3656 + if (tg3_flag(tp, CLKREQ_BUG)) 3657 + pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 3658 + PCI_EXP_LNKCTL_CLKREQ_EN); 3667 3659 3668 3660 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 3669 3661 tw32(TG3PCI_MISC_HOST_CTRL, ··· 4426 4434 4427 4435 /* Prevent send BD corruption. */ 4428 4436 if (tg3_flag(tp, CLKREQ_BUG)) { 4429 - u16 oldlnkctl, newlnkctl; 4430 - 4431 - pci_read_config_word(tp->pdev, 4432 - pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, 4433 - &oldlnkctl); 4434 4437 if (tp->link_config.active_speed == SPEED_100 || 4435 4438 tp->link_config.active_speed == SPEED_10) 4436 - newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN; 4439 + pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL, 4440 + PCI_EXP_LNKCTL_CLKREQ_EN); 4437 4441 else 4438 - newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN; 4439 - if (newlnkctl != oldlnkctl) 4440 - pci_write_config_word(tp->pdev, 4441 - pci_pcie_cap(tp->pdev) + 4442 - PCI_EXP_LNKCTL, newlnkctl); 4442 + pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, 4443 + PCI_EXP_LNKCTL_CLKREQ_EN); 4443 4444 } 4444 4445 4445 4446 if (current_link_up != netif_carrier_ok(tp->dev)) { ··· 8039 8054 8040 8055 udelay(120); 8041 8056 8042 - if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) { 8057 + if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) { 8043 8058 u16 val16; 8044 8059 8045 8060 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { ··· 8056 8071 } 8057 8072 8058 8073 /* Clear the "no snoop" and "relaxed ordering" bits. */ 8059 - pci_read_config_word(tp->pdev, 8060 - pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL, 8061 - &val16); 8062 - val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN | 8063 - PCI_EXP_DEVCTL_NOSNOOP_EN); 8074 + val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN; 8064 8075 /* 8065 8076 * Older PCIe devices only support the 128 byte 8066 8077 * MPS setting. Enforce the restriction. 8067 8078 */ 8068 8079 if (!tg3_flag(tp, CPMU_PRESENT)) 8069 - val16 &= ~PCI_EXP_DEVCTL_PAYLOAD; 8070 - pci_write_config_word(tp->pdev, 8071 - pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL, 8072 - val16); 8080 + val16 |= PCI_EXP_DEVCTL_PAYLOAD; 8081 + pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16); 8073 8082 8074 8083 /* Clear error status */ 8075 - pci_write_config_word(tp->pdev, 8076 - pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA, 8084 + pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA, 8077 8085 PCI_EXP_DEVSTA_CED | 8078 8086 PCI_EXP_DEVSTA_NFED | 8079 8087 PCI_EXP_DEVSTA_FED | ··· 14543 14565 14544 14566 tg3_flag_set(tp, PCI_EXPRESS); 14545 14567 14546 - pci_read_config_word(tp->pdev, 14547 - pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, 14548 - &lnkctl); 14568 + pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl); 14549 14569 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { 14550 14570 if (GET_ASIC_REV(tp->pci_chip_rev_id) == 14551 14571 ASIC_REV_5906) {
+8 -14
drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
··· 3289 3289 unsigned int log2_width, pldsize; 3290 3290 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt; 3291 3291 3292 - pci_read_config_word(adap->pdev, 3293 - adap->pdev->pcie_cap + PCI_EXP_DEVCTL, 3294 - &val); 3292 + pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL, &val); 3295 3293 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5; 3296 3294 3297 3295 pci_read_config_word(adap->pdev, 0x2, &devid); 3298 3296 if (devid == 0x37) { 3299 - pci_write_config_word(adap->pdev, 3300 - adap->pdev->pcie_cap + PCI_EXP_DEVCTL, 3301 - val & ~PCI_EXP_DEVCTL_READRQ & 3302 - ~PCI_EXP_DEVCTL_PAYLOAD); 3297 + pcie_capability_write_word(adap->pdev, PCI_EXP_DEVCTL, 3298 + val & ~PCI_EXP_DEVCTL_READRQ & 3299 + ~PCI_EXP_DEVCTL_PAYLOAD); 3303 3300 pldsize = 0; 3304 3301 } 3305 3302 3306 - pci_read_config_word(adap->pdev, adap->pdev->pcie_cap + PCI_EXP_LNKCTL, 3307 - &val); 3303 + pcie_capability_read_word(adap->pdev, PCI_EXP_LNKCTL, &val); 3308 3304 3309 3305 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0)); 3310 3306 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx : ··· 3421 3425 static void get_pci_mode(struct adapter *adapter, struct pci_params *p) 3422 3426 { 3423 3427 static unsigned short speed_map[] = { 33, 66, 100, 133 }; 3424 - u32 pci_mode, pcie_cap; 3428 + u32 pci_mode; 3425 3429 3426 - pcie_cap = pci_pcie_cap(adapter->pdev); 3427 - if (pcie_cap) { 3430 + if (pci_is_pcie(adapter->pdev)) { 3428 3431 u16 val; 3429 3432 3430 3433 p->variant = PCI_VARIANT_PCIE; 3431 - pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA, 3432 - &val); 3434 + pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val); 3433 3435 p->width = (val >> 4) & 0x3f; 3434 3436 return; 3435 3437 }
+1 -9
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 3694 3694 3695 3695 static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev) 3696 3696 { 3697 - u16 v; 3698 - int pos; 3699 - 3700 - pos = pci_pcie_cap(dev); 3701 - if (pos > 0) { 3702 - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v); 3703 - v |= PCI_EXP_DEVCTL_RELAX_EN; 3704 - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v); 3705 - } 3697 + pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); 3706 3698 } 3707 3699 3708 3700 /*
+2 -4
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
··· 2741 2741 struct pci_params *p) 2742 2742 { 2743 2743 u16 val; 2744 - u32 pcie_cap = pci_pcie_cap(adapter->pdev); 2745 2744 2746 - if (pcie_cap) { 2747 - pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA, 2748 - &val); 2745 + if (pci_is_pcie(adapter->pdev)) { 2746 + pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val); 2749 2747 p->speed = val & PCI_EXP_LNKSTA_CLS; 2750 2748 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; 2751 2749 }
+8 -19
drivers/net/ethernet/intel/e1000e/netdev.c
··· 5584 5584 */ 5585 5585 if (adapter->flags & FLAG_IS_QUAD_PORT) { 5586 5586 struct pci_dev *us_dev = pdev->bus->self; 5587 - int pos = pci_pcie_cap(us_dev); 5588 5587 u16 devctl; 5589 5588 5590 - pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl); 5591 - pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, 5592 - (devctl & ~PCI_EXP_DEVCTL_CERE)); 5589 + pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl); 5590 + pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, 5591 + (devctl & ~PCI_EXP_DEVCTL_CERE)); 5593 5592 5594 5593 e1000_power_off(pdev, sleep, wake); 5595 5594 5596 - pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); 5595 + pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl); 5597 5596 } else { 5598 5597 e1000_power_off(pdev, sleep, wake); 5599 5598 } ··· 5606 5607 #else 5607 5608 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 5608 5609 { 5609 - int pos; 5610 - u16 reg16; 5611 - 5612 5610 /* 5613 5611 * Both device and parent should have the same ASPM setting. 5614 5612 * Disable ASPM in downstream component first and then upstream. 5615 5613 */ 5616 - pos = pci_pcie_cap(pdev); 5617 - pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16); 5618 - reg16 &= ~state; 5619 - pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); 5614 + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, state); 5620 5615 5621 - if (!pdev->bus->self) 5622 - return; 5623 - 5624 - pos = pci_pcie_cap(pdev->bus->self); 5625 - pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16); 5626 - reg16 &= ~state; 5627 - pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16); 5616 + if (pdev->bus->self) 5617 + pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL, 5618 + state); 5628 5619 } 5629 5620 #endif 5630 5621 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+2 -10
drivers/net/ethernet/intel/igb/igb_main.c
··· 6538 6538 s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) 6539 6539 { 6540 6540 struct igb_adapter *adapter = hw->back; 6541 - u16 cap_offset; 6542 6541 6543 - cap_offset = adapter->pdev->pcie_cap; 6544 - if (!cap_offset) 6542 + if (pcie_capability_read_word(adapter->pdev, reg, value)) 6545 6543 return -E1000_ERR_CONFIG; 6546 - 6547 - pci_read_config_word(adapter->pdev, cap_offset + reg, value); 6548 6544 6549 6545 return 0; 6550 6546 } ··· 6548 6552 s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) 6549 6553 { 6550 6554 struct igb_adapter *adapter = hw->back; 6551 - u16 cap_offset; 6552 6555 6553 - cap_offset = adapter->pdev->pcie_cap; 6554 - if (!cap_offset) 6556 + if (pcie_capability_write_word(adapter->pdev, reg, *value)) 6555 6557 return -E1000_ERR_CONFIG; 6556 - 6557 - pci_write_config_word(adapter->pdev, cap_offset + reg, *value); 6558 6558 6559 6559 return 0; 6560 6560 }
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 7527 7527 goto skip_bad_vf_detection; 7528 7528 7529 7529 bdev = pdev->bus->self; 7530 - while (bdev && (bdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT)) 7530 + while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) 7531 7531 bdev = bdev->bus->self; 7532 7532 7533 7533 if (!bdev)
+4 -4
drivers/net/ethernet/mellanox/mlx4/reset.c
··· 141 141 /* Now restore the PCI headers */ 142 142 if (pcie_cap) { 143 143 devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4]; 144 - if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_DEVCTL, 145 - devctl)) { 144 + if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL, 145 + devctl)) { 146 146 err = -ENODEV; 147 147 mlx4_err(dev, "Couldn't restore HCA PCI Express " 148 148 "Device Control register, aborting.\n"); 149 149 goto out; 150 150 } 151 151 linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4]; 152 - if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_LNKCTL, 153 - linkctl)) { 152 + if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL, 153 + linkctl)) { 154 154 err = -ENODEV; 155 155 mlx4_err(dev, "Couldn't restore HCA PCI Express " 156 156 "Link control register, aborting.\n");
+8 -23
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
··· 1078 1078 #ifdef CONFIG_MYRI10GE_DCA 1079 1079 static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on) 1080 1080 { 1081 - int ret, cap, err; 1081 + int ret; 1082 1082 u16 ctl; 1083 1083 1084 - cap = pci_pcie_cap(pdev); 1085 - if (!cap) 1086 - return 0; 1087 - 1088 - err = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl); 1089 - if (err) 1090 - return 0; 1084 + pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &ctl); 1091 1085 1092 1086 ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4; 1093 1087 if (ret != on) { 1094 1088 ctl &= ~PCI_EXP_DEVCTL_RELAX_EN; 1095 1089 ctl |= (on << 4); 1096 - pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl); 1090 + pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, ctl); 1097 1091 } 1098 1092 return ret; 1099 1093 } ··· 3186 3192 struct device *dev = &mgp->pdev->dev; 3187 3193 int cap; 3188 3194 unsigned err_cap; 3189 - u16 val; 3190 - u8 ext_type; 3191 3195 int ret; 3192 3196 3193 3197 if (!myri10ge_ecrc_enable || !bridge) 3194 3198 return; 3195 3199 3196 3200 /* check that the bridge is a root port */ 3197 - cap = pci_pcie_cap(bridge); 3198 - pci_read_config_word(bridge, cap + PCI_CAP_FLAGS, &val); 3199 - ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4; 3200 - if (ext_type != PCI_EXP_TYPE_ROOT_PORT) { 3201 + if (pci_pcie_type(bridge) != PCI_EXP_TYPE_ROOT_PORT) { 3201 3202 if (myri10ge_ecrc_enable > 1) { 3202 3203 struct pci_dev *prev_bridge, *old_bridge = bridge; 3203 3204 ··· 3207 3218 " to force ECRC\n"); 3208 3219 return; 3209 3220 } 3210 - cap = pci_pcie_cap(bridge); 3211 - pci_read_config_word(bridge, 3212 - cap + PCI_CAP_FLAGS, &val); 3213 - ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4; 3214 - } while (ext_type != PCI_EXP_TYPE_ROOT_PORT); 3221 + } while (pci_pcie_type(bridge) != 3222 + PCI_EXP_TYPE_ROOT_PORT); 3215 3223 3216 3224 dev_info(dev, 3217 3225 "Forcing ECRC on non-root port %s" ··· 3321 3335 int overridden = 0; 3322 3336 3323 3337 if (myri10ge_force_firmware == 0) { 3324 - int link_width, exp_cap; 3338 + int link_width; 3325 3339 u16 lnk; 3326 3340 3327 - exp_cap = pci_pcie_cap(mgp->pdev); 3328 - pci_read_config_word(mgp->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk); 3341 + pcie_capability_read_word(mgp->pdev, PCI_EXP_LNKSTA, &lnk); 3329 3342 link_width = (lnk >> 4) & 0x3f; 3330 3343 3331 3344 /* Check to see if Link is less than 8 or if the
+2 -2
drivers/net/ethernet/neterion/vxge/vxge-config.c
··· 757 757 u16 lnk; 758 758 759 759 /* Get the negotiated link width and speed from PCI config space */ 760 - pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk); 760 + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnk); 761 761 762 762 if ((lnk & PCI_EXP_LNKSTA_CLS) != 1) 763 763 return VXGE_HW_ERR_INVALID_PCI_INFO; ··· 1982 1982 struct pci_dev *dev = hldev->pdev; 1983 1983 u16 lnk; 1984 1984 1985 - pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk); 1985 + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnk); 1986 1986 return (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4; 1987 1987 } 1988 1988
+1 -1
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
··· 1382 1382 adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP) 1383 1383 return; 1384 1384 1385 - if (root->pcie_type != PCI_EXP_TYPE_ROOT_PORT) 1385 + if (pci_pcie_type(root) != PCI_EXP_TYPE_ROOT_PORT) 1386 1386 return; 1387 1387 1388 1388 aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR);
+9 -35
drivers/net/ethernet/realtek/r8169.c
··· 833 833 834 834 static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force) 835 835 { 836 - int cap = pci_pcie_cap(pdev); 837 - 838 - if (cap) { 839 - u16 ctl; 840 - 841 - pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl); 842 - ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force; 843 - pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl); 844 - } 836 + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, 837 + PCI_EXP_DEVCTL_READRQ, force); 845 838 } 846 839 847 840 struct rtl_cond { ··· 4732 4739 4733 4740 static void rtl_disable_clock_request(struct pci_dev *pdev) 4734 4741 { 4735 - int cap = pci_pcie_cap(pdev); 4736 - 4737 - if (cap) { 4738 - u16 ctl; 4739 - 4740 - pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl); 4741 - ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN; 4742 - pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl); 4743 - } 4742 + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, 4743 + PCI_EXP_LNKCTL_CLKREQ_EN); 4744 4744 } 4745 4745 4746 4746 static void rtl_enable_clock_request(struct pci_dev *pdev) 4747 4747 { 4748 - int cap = pci_pcie_cap(pdev); 4749 - 4750 - if (cap) { 4751 - u16 ctl; 4752 - 4753 - pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl); 4754 - ctl |= PCI_EXP_LNKCTL_CLKREQ_EN; 4755 - pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl); 4756 - } 4748 + pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, 4749 + PCI_EXP_LNKCTL_CLKREQ_EN); 4757 4750 } 4758 4751 4759 4752 #define R8168_CPCMD_QUIRK_MASK (\ ··· 5384 5405 tp->event_slow &= ~RxFIFOOver; 5385 5406 5386 5407 if (tp->mac_version == RTL_GIGA_MAC_VER_13 || 5387 - tp->mac_version == RTL_GIGA_MAC_VER_16) { 5388 - int cap = pci_pcie_cap(pdev); 5389 - 5390 - if (cap) { 5391 - pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, 5392 - PCI_EXP_DEVCTL_NOSNOOP_EN); 5393 - } 5394 - } 5408 + tp->mac_version == RTL_GIGA_MAC_VER_16) 5409 + pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, 5410 + PCI_EXP_DEVCTL_NOSNOOP_EN); 5395 5411 5396 5412 RTL_W8(Cfg9346, Cfg9346_Unlock); 5397 5413
+7 -12
drivers/net/ethernet/sun/niu.c
··· 9762 9762 union niu_parent_id parent_id; 9763 9763 struct net_device *dev; 9764 9764 struct niu *np; 9765 - int err, pos; 9765 + int err; 9766 9766 u64 dma_mask; 9767 - u16 val16; 9768 9767 9769 9768 niu_driver_version(); 9770 9769 ··· 9786 9787 goto err_out_disable_pdev; 9787 9788 } 9788 9789 9789 - pos = pci_pcie_cap(pdev); 9790 - if (pos <= 0) { 9790 + if (!pci_is_pcie(pdev)) { 9791 9791 dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n"); 9792 9792 goto err_out_free_res; 9793 9793 } ··· 9811 9813 goto err_out_free_dev; 9812 9814 } 9813 9815 9814 - pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); 9815 - val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; 9816 - val16 |= (PCI_EXP_DEVCTL_CERE | 9817 - PCI_EXP_DEVCTL_NFERE | 9818 - PCI_EXP_DEVCTL_FERE | 9819 - PCI_EXP_DEVCTL_URRE | 9820 - PCI_EXP_DEVCTL_RELAX_EN); 9821 - pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16); 9816 + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, 9817 + PCI_EXP_DEVCTL_NOSNOOP_EN, 9818 + PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | 9819 + PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE | 9820 + PCI_EXP_DEVCTL_RELAX_EN); 9822 9821 9823 9822 dma_mask = DMA_BIT_MASK(44); 9824 9823 err = pci_set_dma_mask(pdev, dma_mask);
+6 -15
drivers/net/wireless/ath/ath9k/pci.c
··· 113 113 struct ath_hw *ah = sc->sc_ah; 114 114 struct pci_dev *pdev = to_pci_dev(sc->dev); 115 115 struct pci_dev *parent; 116 - int pos; 117 - u8 aspm; 116 + u16 aspm; 118 117 119 118 if (!ah->is_pciexpress) 120 - return; 121 - 122 - pos = pci_pcie_cap(pdev); 123 - if (!pos) 124 119 return; 125 120 126 121 parent = pdev->bus->self; ··· 124 129 125 130 if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) { 126 131 /* Bluetooth coexistance requires disabling ASPM. */ 127 - pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &aspm); 128 - aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); 129 - pci_write_config_byte(pdev, pos + PCI_EXP_LNKCTL, aspm); 132 + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, 133 + PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); 130 134 131 135 /* 132 136 * Both upstream and downstream PCIe components should 133 137 * have the same ASPM settings. 134 138 */ 135 - pos = pci_pcie_cap(parent); 136 - pci_read_config_byte(parent, pos + PCI_EXP_LNKCTL, &aspm); 137 - aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); 138 - pci_write_config_byte(parent, pos + PCI_EXP_LNKCTL, aspm); 139 + pcie_capability_clear_word(parent, PCI_EXP_LNKCTL, 140 + PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); 139 141 140 142 ath_info(common, "Disabling ASPM since BTCOEX is enabled\n"); 141 143 return; 142 144 } 143 145 144 - pos = pci_pcie_cap(parent); 145 - pci_read_config_byte(parent, pos + PCI_EXP_LNKCTL, &aspm); 146 + pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &aspm); 146 147 if (aspm & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) { 147 148 ah->aspm_enabled = true; 148 149 /* Initialize PCIe PM and SERDES registers. */
+1 -3
drivers/net/wireless/iwlegacy/common.h
··· 1832 1832 static inline u16 1833 1833 il_pcie_link_ctl(struct il_priv *il) 1834 1834 { 1835 - int pos; 1836 1835 u16 pci_lnk_ctl; 1837 - pos = pci_pcie_cap(il->pci_dev); 1838 - pci_read_config_word(il->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl); 1836 + pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &pci_lnk_ctl); 1839 1837 return pci_lnk_ctl; 1840 1838 } 1841 1839
+2 -5
drivers/net/wireless/iwlwifi/pcie/trans.c
··· 675 675 static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans) 676 676 { 677 677 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 678 - int pos; 679 678 u16 pci_lnk_ctl; 680 679 681 - struct pci_dev *pci_dev = trans_pcie->pci_dev; 682 - 683 - pos = pci_pcie_cap(pci_dev); 684 - pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl); 680 + pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, 681 + &pci_lnk_ctl); 685 682 return pci_lnk_ctl; 686 683 } 687 684
+3 -5
drivers/net/wireless/rtlwifi/pci.c
··· 372 372 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); 373 373 374 374 u8 tmp; 375 - int pos; 376 - u8 linkctrl_reg; 375 + u16 linkctrl_reg; 377 376 378 377 /*Link Control Register */ 379 - pos = pci_pcie_cap(pdev); 380 - pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &linkctrl_reg); 381 - pcipriv->ndis_adapter.linkctrl_reg = linkctrl_reg; 378 + pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &linkctrl_reg); 379 + pcipriv->ndis_adapter.linkctrl_reg = (u8)linkctrl_reg; 382 380 383 381 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n", 384 382 pcipriv->ndis_adapter.linkctrl_reg);
+202
drivers/pci/access.c
··· 469 469 raw_spin_unlock_irqrestore(&pci_lock, flags); 470 470 } 471 471 EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); 472 + 473 + static inline int pcie_cap_version(const struct pci_dev *dev) 474 + { 475 + return dev->pcie_flags_reg & PCI_EXP_FLAGS_VERS; 476 + } 477 + 478 + static inline bool pcie_cap_has_devctl(const struct pci_dev *dev) 479 + { 480 + return true; 481 + } 482 + 483 + static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev) 484 + { 485 + int type = pci_pcie_type(dev); 486 + 487 + return pcie_cap_version(dev) > 1 || 488 + type == PCI_EXP_TYPE_ROOT_PORT || 489 + type == PCI_EXP_TYPE_ENDPOINT || 490 + type == PCI_EXP_TYPE_LEG_END; 491 + } 492 + 493 + static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) 494 + { 495 + int type = pci_pcie_type(dev); 496 + 497 + return pcie_cap_version(dev) > 1 || 498 + type == PCI_EXP_TYPE_ROOT_PORT || 499 + (type == PCI_EXP_TYPE_DOWNSTREAM && 500 + dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT); 501 + } 502 + 503 + static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev) 504 + { 505 + int type = pci_pcie_type(dev); 506 + 507 + return pcie_cap_version(dev) > 1 || 508 + type == PCI_EXP_TYPE_ROOT_PORT || 509 + type == PCI_EXP_TYPE_RC_EC; 510 + } 511 + 512 + static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) 513 + { 514 + if (!pci_is_pcie(dev)) 515 + return false; 516 + 517 + switch (pos) { 518 + case PCI_EXP_FLAGS_TYPE: 519 + return true; 520 + case PCI_EXP_DEVCAP: 521 + case PCI_EXP_DEVCTL: 522 + case PCI_EXP_DEVSTA: 523 + return pcie_cap_has_devctl(dev); 524 + case PCI_EXP_LNKCAP: 525 + case PCI_EXP_LNKCTL: 526 + case PCI_EXP_LNKSTA: 527 + return pcie_cap_has_lnkctl(dev); 528 + case PCI_EXP_SLTCAP: 529 + case PCI_EXP_SLTCTL: 530 + case PCI_EXP_SLTSTA: 531 + return pcie_cap_has_sltctl(dev); 532 + case PCI_EXP_RTCTL: 533 + case PCI_EXP_RTCAP: 534 + case PCI_EXP_RTSTA: 535 + return pcie_cap_has_rtctl(dev); 536 + case PCI_EXP_DEVCAP2: 537 + case PCI_EXP_DEVCTL2: 538 + case PCI_EXP_LNKCAP2: 539 + case PCI_EXP_LNKCTL2: 540 + case PCI_EXP_LNKSTA2: 541 + return pcie_cap_version(dev) > 1; 542 + default: 543 + return false; 544 + } 545 + } 546 + 547 + /* 548 + * Note that these accessor functions are only for the "PCI Express 549 + * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the 550 + * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) 551 + */ 552 + int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) 553 + { 554 + int ret; 555 + 556 + *val = 0; 557 + if (pos & 1) 558 + return -EINVAL; 559 + 560 + if (pcie_capability_reg_implemented(dev, pos)) { 561 + ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); 562 + /* 563 + * Reset *val to 0 if pci_read_config_word() fails, it may 564 + * have been written as 0xFFFF if hardware error happens 565 + * during pci_read_config_word(). 566 + */ 567 + if (ret) 568 + *val = 0; 569 + return ret; 570 + } 571 + 572 + /* 573 + * For Functions that do not implement the Slot Capabilities, 574 + * Slot Status, and Slot Control registers, these spaces must 575 + * be hardwired to 0b, with the exception of the Presence Detect 576 + * State bit in the Slot Status register of Downstream Ports, 577 + * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) 578 + */ 579 + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && 580 + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { 581 + *val = PCI_EXP_SLTSTA_PDS; 582 + } 583 + 584 + return 0; 585 + } 586 + EXPORT_SYMBOL(pcie_capability_read_word); 587 + 588 + int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) 589 + { 590 + int ret; 591 + 592 + *val = 0; 593 + if (pos & 3) 594 + return -EINVAL; 595 + 596 + if (pcie_capability_reg_implemented(dev, pos)) { 597 + ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); 598 + /* 599 + * Reset *val to 0 if pci_read_config_dword() fails, it may 600 + * have been written as 0xFFFFFFFF if hardware error happens 601 + * during pci_read_config_dword(). 602 + */ 603 + if (ret) 604 + *val = 0; 605 + return ret; 606 + } 607 + 608 + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL && 609 + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { 610 + *val = PCI_EXP_SLTSTA_PDS; 611 + } 612 + 613 + return 0; 614 + } 615 + EXPORT_SYMBOL(pcie_capability_read_dword); 616 + 617 + int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) 618 + { 619 + if (pos & 1) 620 + return -EINVAL; 621 + 622 + if (!pcie_capability_reg_implemented(dev, pos)) 623 + return 0; 624 + 625 + return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); 626 + } 627 + EXPORT_SYMBOL(pcie_capability_write_word); 628 + 629 + int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) 630 + { 631 + if (pos & 3) 632 + return -EINVAL; 633 + 634 + if (!pcie_capability_reg_implemented(dev, pos)) 635 + return 0; 636 + 637 + return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); 638 + } 639 + EXPORT_SYMBOL(pcie_capability_write_dword); 640 + 641 + int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, 642 + u16 clear, u16 set) 643 + { 644 + int ret; 645 + u16 val; 646 + 647 + ret = pcie_capability_read_word(dev, pos, &val); 648 + if (!ret) { 649 + val &= ~clear; 650 + val |= set; 651 + ret = pcie_capability_write_word(dev, pos, val); 652 + } 653 + 654 + return ret; 655 + } 656 + EXPORT_SYMBOL(pcie_capability_clear_and_set_word); 657 + 658 + int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos, 659 + u32 clear, u32 set) 660 + { 661 + int ret; 662 + u32 val; 663 + 664 + ret = pcie_capability_read_dword(dev, pos, &val); 665 + if (!ret) { 666 + val &= ~clear; 667 + val |= set; 668 + ret = pcie_capability_write_dword(dev, pos, val); 669 + } 670 + 671 + return ret; 672 + } 673 + EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
+1 -5
drivers/pci/hotplug/pciehp_acpi.c
··· 81 81 /* Dummy driver for dumplicate name detection */ 82 82 static int __init dummy_probe(struct pcie_device *dev) 83 83 { 84 - int pos; 85 84 u32 slot_cap; 86 85 acpi_handle handle; 87 86 struct dummy_slot *slot, *tmp; 88 87 struct pci_dev *pdev = dev->port; 89 88 90 - pos = pci_pcie_cap(pdev); 91 - if (!pos) 92 - return -ENODEV; 93 - pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap); 89 + pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap); 94 90 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 95 91 if (!slot) 96 92 return -ENOMEM;
+4 -8
drivers/pci/hotplug/pciehp_hpc.c
··· 44 44 static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) 45 45 { 46 46 struct pci_dev *dev = ctrl->pcie->port; 47 - return pci_read_config_word(dev, pci_pcie_cap(dev) + reg, value); 47 + return pcie_capability_read_word(dev, reg, value); 48 48 } 49 49 50 50 static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value) 51 51 { 52 52 struct pci_dev *dev = ctrl->pcie->port; 53 - return pci_read_config_dword(dev, pci_pcie_cap(dev) + reg, value); 53 + return pcie_capability_read_dword(dev, reg, value); 54 54 } 55 55 56 56 static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value) 57 57 { 58 58 struct pci_dev *dev = ctrl->pcie->port; 59 - return pci_write_config_word(dev, pci_pcie_cap(dev) + reg, value); 59 + return pcie_capability_write_word(dev, reg, value); 60 60 } 61 61 62 62 static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value) 63 63 { 64 64 struct pci_dev *dev = ctrl->pcie->port; 65 - return pci_write_config_dword(dev, pci_pcie_cap(dev) + reg, value); 65 + return pcie_capability_write_dword(dev, reg, value); 66 66 } 67 67 68 68 /* Power Control Command */ ··· 855 855 goto abort; 856 856 } 857 857 ctrl->pcie = dev; 858 - if (!pci_pcie_cap(pdev)) { 859 - ctrl_err(ctrl, "Cannot find PCI Express capability\n"); 860 - goto abort_ctrl; 861 - } 862 858 if (pciehp_readl(ctrl, PCI_EXP_SLTCAP, &slot_cap)) { 863 859 ctrl_err(ctrl, "Cannot read SLOTCAP register\n"); 864 860 goto abort_ctrl;
+5 -15
drivers/pci/hotplug/pcihp_slot.c
··· 96 96 static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) 97 97 { 98 98 int pos; 99 - u16 reg16; 100 99 u32 reg32; 101 100 102 101 if (!hpp) 103 - return; 104 - 105 - /* Find PCI Express capability */ 106 - pos = pci_pcie_cap(dev); 107 - if (!pos) 108 102 return; 109 103 110 104 if (hpp->revision > 1) { ··· 108 114 } 109 115 110 116 /* Initialize Device Control Register */ 111 - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16); 112 - reg16 = (reg16 & hpp->pci_exp_devctl_and) | hpp->pci_exp_devctl_or; 113 - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16); 117 + pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, 118 + ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or); 114 119 115 120 /* Initialize Link Control Register */ 116 - if (dev->subordinate) { 117 - pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &reg16); 118 - reg16 = (reg16 & hpp->pci_exp_lnkctl_and) 119 - | hpp->pci_exp_lnkctl_or; 120 - pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, reg16); 121 - } 121 + if (dev->subordinate) 122 + pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, 123 + ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or); 122 124 123 125 /* Find Advanced Error Reporting Enhanced Capability */ 124 126 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+3 -3
drivers/pci/iov.c
··· 433 433 struct resource *res; 434 434 struct pci_dev *pdev; 435 435 436 - if (dev->pcie_type != PCI_EXP_TYPE_RC_END && 437 - dev->pcie_type != PCI_EXP_TYPE_ENDPOINT) 436 + if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END && 437 + pci_pcie_type(dev) != PCI_EXP_TYPE_ENDPOINT) 438 438 return -ENODEV; 439 439 440 440 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl); ··· 503 503 iov->self = dev; 504 504 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 505 505 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 506 - if (dev->pcie_type == PCI_EXP_TYPE_RC_END) 506 + if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) 507 507 iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link); 508 508 509 509 if (pdev)
+6
drivers/pci/pci-driver.c
··· 280 280 { 281 281 struct drv_dev_and_id *ddi = _ddi; 282 282 struct device *dev = &ddi->dev->dev; 283 + struct device *parent = dev->parent; 283 284 int rc; 284 285 286 + /* The parent bridge must be in active state when probing */ 287 + if (parent) 288 + pm_runtime_get_sync(parent); 285 289 /* Unbound PCI devices are always set to disabled and suspended. 286 290 * During probe, the device is set to enabled and active and the 287 291 * usage count is incremented. If the driver supports runtime PM, ··· 302 298 pm_runtime_set_suspended(dev); 303 299 pm_runtime_put_noidle(dev); 304 300 } 301 + if (parent) 302 + pm_runtime_put(parent); 305 303 return rc; 306 304 } 307 305
+42
drivers/pci/pci-sysfs.c
··· 458 458 } 459 459 struct device_attribute vga_attr = __ATTR_RO(boot_vga); 460 460 461 + static void 462 + pci_config_pm_runtime_get(struct pci_dev *pdev) 463 + { 464 + struct device *dev = &pdev->dev; 465 + struct device *parent = dev->parent; 466 + 467 + if (parent) 468 + pm_runtime_get_sync(parent); 469 + pm_runtime_get_noresume(dev); 470 + /* 471 + * pdev->current_state is set to PCI_D3cold during suspending, 472 + * so wait until suspending completes 473 + */ 474 + pm_runtime_barrier(dev); 475 + /* 476 + * Only need to resume devices in D3cold, because config 477 + * registers are still accessible for devices suspended but 478 + * not in D3cold. 479 + */ 480 + if (pdev->current_state == PCI_D3cold) 481 + pm_runtime_resume(dev); 482 + } 483 + 484 + static void 485 + pci_config_pm_runtime_put(struct pci_dev *pdev) 486 + { 487 + struct device *dev = &pdev->dev; 488 + struct device *parent = dev->parent; 489 + 490 + pm_runtime_put(dev); 491 + if (parent) 492 + pm_runtime_put_sync(parent); 493 + } 494 + 461 495 static ssize_t 462 496 pci_read_config(struct file *filp, struct kobject *kobj, 463 497 struct bin_attribute *bin_attr, ··· 517 483 } else { 518 484 size = count; 519 485 } 486 + 487 + pci_config_pm_runtime_get(dev); 520 488 521 489 if ((off & 1) && size) { 522 490 u8 val; ··· 565 529 --size; 566 530 } 567 531 532 + pci_config_pm_runtime_put(dev); 533 + 568 534 return count; 569 535 } 570 536 ··· 587 549 count = size; 588 550 } 589 551 552 + pci_config_pm_runtime_get(dev); 553 + 590 554 if ((off & 1) && size) { 591 555 pci_user_write_config_byte(dev, off, data[off - init_off]); 592 556 off++; ··· 626 586 off++; 627 587 --size; 628 588 } 589 + 590 + pci_config_pm_runtime_put(dev); 629 591 630 592 return count; 631 593 }
+64 -253
drivers/pci/pci.c
··· 254 254 } 255 255 256 256 /** 257 - * pci_pcie_cap2 - query for devices' PCI_CAP_ID_EXP v2 capability structure 258 - * @dev: PCI device to check 259 - * 260 - * Like pci_pcie_cap() but also checks that the PCIe capability version is 261 - * >= 2. Note that v1 capability structures could be sparse in that not 262 - * all register fields were required. v2 requires the entire structure to 263 - * be present size wise, while still allowing for non-implemented registers 264 - * to exist but they must be hardwired to 0. 265 - * 266 - * Due to the differences in the versions of capability structures, one 267 - * must be careful not to try and access non-existant registers that may 268 - * exist in early versions - v1 - of Express devices. 269 - * 270 - * Returns the offset of the PCIe capability structure as long as the 271 - * capability version is >= 2; otherwise 0 is returned. 272 - */ 273 - static int pci_pcie_cap2(struct pci_dev *dev) 274 - { 275 - u16 flags; 276 - int pos; 277 - 278 - pos = pci_pcie_cap(dev); 279 - if (pos) { 280 - pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags); 281 - if ((flags & PCI_EXP_FLAGS_VERS) < 2) 282 - pos = 0; 283 - } 284 - 285 - return pos; 286 - } 287 - 288 - /** 289 257 * pci_find_ext_capability - Find an extended capability 290 258 * @dev: PCI device to query 291 259 * @cap: capability code ··· 822 854 823 855 #define PCI_EXP_SAVE_REGS 7 824 856 825 - #define pcie_cap_has_devctl(type, flags) 1 826 - #define pcie_cap_has_lnkctl(type, flags) \ 827 - ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ 828 - (type == PCI_EXP_TYPE_ROOT_PORT || \ 829 - type == PCI_EXP_TYPE_ENDPOINT || \ 830 - type == PCI_EXP_TYPE_LEG_END)) 831 - #define pcie_cap_has_sltctl(type, flags) \ 832 - ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ 833 - ((type == PCI_EXP_TYPE_ROOT_PORT) || \ 834 - (type == PCI_EXP_TYPE_DOWNSTREAM && \ 835 - (flags & PCI_EXP_FLAGS_SLOT)))) 836 - #define pcie_cap_has_rtctl(type, flags) \ 837 - ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ 838 - (type == PCI_EXP_TYPE_ROOT_PORT || \ 839 - type == PCI_EXP_TYPE_RC_EC)) 840 857 841 858 static struct pci_cap_saved_state *pci_find_saved_cap( 842 859 struct pci_dev *pci_dev, char cap) ··· 838 885 839 886 static int pci_save_pcie_state(struct pci_dev *dev) 840 887 { 841 - int pos, i = 0; 888 + int i = 0; 842 889 struct pci_cap_saved_state *save_state; 843 890 u16 *cap; 844 - u16 flags; 845 891 846 - pos = pci_pcie_cap(dev); 847 - if (!pos) 892 + if (!pci_is_pcie(dev)) 848 893 return 0; 849 894 850 895 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); ··· 850 899 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 851 900 return -ENOMEM; 852 901 } 902 + 853 903 cap = (u16 *)&save_state->cap.data[0]; 904 + pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]); 905 + pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]); 906 + pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]); 907 + pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]); 908 + pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]); 909 + pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]); 910 + pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]); 854 911 855 - pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags); 856 - 857 - if (pcie_cap_has_devctl(dev->pcie_type, flags)) 858 - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]); 859 - if (pcie_cap_has_lnkctl(dev->pcie_type, flags)) 860 - pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]); 861 - if (pcie_cap_has_sltctl(dev->pcie_type, flags)) 862 - pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); 863 - if (pcie_cap_has_rtctl(dev->pcie_type, flags)) 864 - pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); 865 - 866 - pos = pci_pcie_cap2(dev); 867 - if (!pos) 868 - return 0; 869 - 870 - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]); 871 - pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]); 872 - pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]); 873 912 return 0; 874 913 } 875 914 876 915 static void pci_restore_pcie_state(struct pci_dev *dev) 877 916 { 878 - int i = 0, pos; 917 + int i = 0; 879 918 struct pci_cap_saved_state *save_state; 880 919 u16 *cap; 881 - u16 flags; 882 920 883 921 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 884 - pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 885 - if (!save_state || pos <= 0) 922 + if (!save_state) 886 923 return; 924 + 887 925 cap = (u16 *)&save_state->cap.data[0]; 888 - 889 - pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags); 890 - 891 - if (pcie_cap_has_devctl(dev->pcie_type, flags)) 892 - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]); 893 - if (pcie_cap_has_lnkctl(dev->pcie_type, flags)) 894 - pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]); 895 - if (pcie_cap_has_sltctl(dev->pcie_type, flags)) 896 - pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]); 897 - if (pcie_cap_has_rtctl(dev->pcie_type, flags)) 898 - pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]); 899 - 900 - pos = pci_pcie_cap2(dev); 901 - if (!pos) 902 - return; 903 - 904 - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]); 905 - pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]); 906 - pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]); 926 + pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]); 927 + pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]); 928 + pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]); 929 + pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]); 930 + pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]); 931 + pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]); 932 + pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]); 907 933 } 908 934 909 935 ··· 1869 1941 dev->pm_cap = pm; 1870 1942 dev->d3_delay = PCI_PM_D3_WAIT; 1871 1943 dev->d3cold_delay = PCI_PM_D3COLD_WAIT; 1944 + dev->d3cold_allowed = true; 1872 1945 1873 1946 dev->d1_support = false; 1874 1947 dev->d2_support = false; ··· 1995 2066 */ 1996 2067 void pci_enable_ari(struct pci_dev *dev) 1997 2068 { 1998 - int pos; 1999 2069 u32 cap; 2000 - u16 ctrl; 2001 2070 struct pci_dev *bridge; 2002 2071 2003 2072 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn) 2004 2073 return; 2005 2074 2006 - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); 2007 - if (!pos) 2075 + if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) 2008 2076 return; 2009 2077 2010 2078 bridge = dev->bus->self; 2011 2079 if (!bridge) 2012 2080 return; 2013 2081 2014 - /* ARI is a PCIe cap v2 feature */ 2015 - pos = pci_pcie_cap2(bridge); 2016 - if (!pos) 2017 - return; 2018 - 2019 - pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap); 2082 + pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap); 2020 2083 if (!(cap & PCI_EXP_DEVCAP2_ARI)) 2021 2084 return; 2022 2085 2023 - pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl); 2024 - ctrl |= PCI_EXP_DEVCTL2_ARI; 2025 - pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl); 2026 - 2086 + pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_ARI); 2027 2087 bridge->ari_enabled = 1; 2028 2088 } 2029 2089 ··· 2027 2109 */ 2028 2110 void pci_enable_ido(struct pci_dev *dev, unsigned long type) 2029 2111 { 2030 - int pos; 2031 - u16 ctrl; 2112 + u16 ctrl = 0; 2032 2113 2033 - /* ID-based Ordering is a PCIe cap v2 feature */ 2034 - pos = pci_pcie_cap2(dev); 2035 - if (!pos) 2036 - return; 2037 - 2038 - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl); 2039 2114 if (type & PCI_EXP_IDO_REQUEST) 2040 2115 ctrl |= PCI_EXP_IDO_REQ_EN; 2041 2116 if (type & PCI_EXP_IDO_COMPLETION) 2042 2117 ctrl |= PCI_EXP_IDO_CMP_EN; 2043 - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl); 2118 + if (ctrl) 2119 + pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, ctrl); 2044 2120 } 2045 2121 EXPORT_SYMBOL(pci_enable_ido); 2046 2122 ··· 2045 2133 */ 2046 2134 void pci_disable_ido(struct pci_dev *dev, unsigned long type) 2047 2135 { 2048 - int pos; 2049 - u16 ctrl; 2136 + u16 ctrl = 0; 2050 2137 2051 - /* ID-based Ordering is a PCIe cap v2 feature */ 2052 - pos = pci_pcie_cap2(dev); 2053 - if (!pos) 2054 - return; 2055 - 2056 - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl); 2057 2138 if (type & PCI_EXP_IDO_REQUEST) 2058 - ctrl &= ~PCI_EXP_IDO_REQ_EN; 2139 + ctrl |= PCI_EXP_IDO_REQ_EN; 2059 2140 if (type & PCI_EXP_IDO_COMPLETION) 2060 - ctrl &= ~PCI_EXP_IDO_CMP_EN; 2061 - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl); 2141 + ctrl |= PCI_EXP_IDO_CMP_EN; 2142 + if (ctrl) 2143 + pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, ctrl); 2062 2144 } 2063 2145 EXPORT_SYMBOL(pci_disable_ido); 2064 2146 ··· 2077 2171 */ 2078 2172 int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type) 2079 2173 { 2080 - int pos; 2081 2174 u32 cap; 2082 2175 u16 ctrl; 2083 2176 int ret; 2084 2177 2085 - /* OBFF is a PCIe cap v2 feature */ 2086 - pos = pci_pcie_cap2(dev); 2087 - if (!pos) 2088 - return -ENOTSUPP; 2089 - 2090 - pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap); 2178 + pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); 2091 2179 if (!(cap & PCI_EXP_OBFF_MASK)) 2092 2180 return -ENOTSUPP; /* no OBFF support at all */ 2093 2181 ··· 2092 2192 return ret; 2093 2193 } 2094 2194 2095 - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl); 2195 + pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctrl); 2096 2196 if (cap & PCI_EXP_OBFF_WAKE) 2097 2197 ctrl |= PCI_EXP_OBFF_WAKE_EN; 2098 2198 else { ··· 2110 2210 return -ENOTSUPP; 2111 2211 } 2112 2212 } 2113 - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl); 2213 + pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, ctrl); 2114 2214 2115 2215 return 0; 2116 2216 } ··· 2124 2224 */ 2125 2225 void pci_disable_obff(struct pci_dev *dev) 2126 2226 { 2127 - int pos; 2128 - u16 ctrl; 2129 - 2130 - /* OBFF is a PCIe cap v2 feature */ 2131 - pos = pci_pcie_cap2(dev); 2132 - if (!pos) 2133 - return; 2134 - 2135 - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl); 2136 - ctrl &= ~PCI_EXP_OBFF_WAKE_EN; 2137 - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl); 2227 + pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_OBFF_WAKE_EN); 2138 2228 } 2139 2229 EXPORT_SYMBOL(pci_disable_obff); 2140 2230 ··· 2137 2247 */ 2138 2248 static bool pci_ltr_supported(struct pci_dev *dev) 2139 2249 { 2140 - int pos; 2141 2250 u32 cap; 2142 2251 2143 - /* LTR is a PCIe cap v2 feature */ 2144 - pos = pci_pcie_cap2(dev); 2145 - if (!pos) 2146 - return false; 2147 - 2148 - pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap); 2252 + pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); 2149 2253 2150 2254 return cap & PCI_EXP_DEVCAP2_LTR; 2151 2255 } ··· 2156 2272 */ 2157 2273 int pci_enable_ltr(struct pci_dev *dev) 2158 2274 { 2159 - int pos; 2160 - u16 ctrl; 2161 2275 int ret; 2162 - 2163 - if (!pci_ltr_supported(dev)) 2164 - return -ENOTSUPP; 2165 - 2166 - /* LTR is a PCIe cap v2 feature */ 2167 - pos = pci_pcie_cap2(dev); 2168 - if (!pos) 2169 - return -ENOTSUPP; 2170 2276 2171 2277 /* Only primary function can enable/disable LTR */ 2172 2278 if (PCI_FUNC(dev->devfn) != 0) 2173 2279 return -EINVAL; 2280 + 2281 + if (!pci_ltr_supported(dev)) 2282 + return -ENOTSUPP; 2174 2283 2175 2284 /* Enable upstream ports first */ 2176 2285 if (dev->bus->self) { ··· 2172 2295 return ret; 2173 2296 } 2174 2297 2175 - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl); 2176 - ctrl |= PCI_EXP_LTR_EN; 2177 - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl); 2178 - 2179 - return 0; 2298 + return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN); 2180 2299 } 2181 2300 EXPORT_SYMBOL(pci_enable_ltr); 2182 2301 ··· 2182 2309 */ 2183 2310 void pci_disable_ltr(struct pci_dev *dev) 2184 2311 { 2185 - int pos; 2186 - u16 ctrl; 2187 - 2188 - if (!pci_ltr_supported(dev)) 2189 - return; 2190 - 2191 - /* LTR is a PCIe cap v2 feature */ 2192 - pos = pci_pcie_cap2(dev); 2193 - if (!pos) 2194 - return; 2195 - 2196 2312 /* Only primary function can enable/disable LTR */ 2197 2313 if (PCI_FUNC(dev->devfn) != 0) 2198 2314 return; 2199 2315 2200 - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl); 2201 - ctrl &= ~PCI_EXP_LTR_EN; 2202 - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl); 2316 + if (!pci_ltr_supported(dev)) 2317 + return; 2318 + 2319 + pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN); 2203 2320 } 2204 2321 EXPORT_SYMBOL(pci_disable_ltr); 2205 2322 ··· 2272 2409 if (!pci_acs_enable) 2273 2410 return; 2274 2411 2275 - if (!pci_is_pcie(dev)) 2276 - return; 2277 - 2278 2412 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); 2279 2413 if (!pos) 2280 2414 return; ··· 2319 2459 acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | 2320 2460 PCI_ACS_EC | PCI_ACS_DT); 2321 2461 2322 - if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM || 2323 - pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT || 2462 + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM || 2463 + pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || 2324 2464 pdev->multifunction) { 2325 2465 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); 2326 2466 if (!pos) ··· 3036 3176 static int pcie_flr(struct pci_dev *dev, int probe) 3037 3177 { 3038 3178 int i; 3039 - int pos; 3040 3179 u32 cap; 3041 - u16 status, control; 3180 + u16 status; 3042 3181 3043 - pos = pci_pcie_cap(dev); 3044 - if (!pos) 3045 - return -ENOTTY; 3046 - 3047 - pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap); 3182 + pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); 3048 3183 if (!(cap & PCI_EXP_DEVCAP_FLR)) 3049 3184 return -ENOTTY; 3050 3185 ··· 3051 3196 if (i) 3052 3197 msleep((1 << (i - 1)) * 100); 3053 3198 3054 - pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); 3199 + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); 3055 3200 if (!(status & PCI_EXP_DEVSTA_TRPND)) 3056 3201 goto clear; 3057 3202 } ··· 3060 3205 "proceeding with reset anyway\n"); 3061 3206 3062 3207 clear: 3063 - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control); 3064 - control |= PCI_EXP_DEVCTL_BCR_FLR; 3065 - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control); 3208 + pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); 3066 3209 3067 3210 msleep(100); 3068 3211 ··· 3428 3575 */ 3429 3576 int pcie_get_readrq(struct pci_dev *dev) 3430 3577 { 3431 - int ret, cap; 3432 3578 u16 ctl; 3433 3579 3434 - cap = pci_pcie_cap(dev); 3435 - if (!cap) 3436 - return -EINVAL; 3580 + pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); 3437 3581 3438 - ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); 3439 - if (!ret) 3440 - ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); 3441 - 3442 - return ret; 3582 + return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); 3443 3583 } 3444 3584 EXPORT_SYMBOL(pcie_get_readrq); 3445 3585 ··· 3446 3600 */ 3447 3601 int pcie_set_readrq(struct pci_dev *dev, int rq) 3448 3602 { 3449 - int cap, err = -EINVAL; 3450 - u16 ctl, v; 3603 + u16 v; 3451 3604 3452 3605 if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) 3453 - goto out; 3606 + return -EINVAL; 3454 3607 3455 - cap = pci_pcie_cap(dev); 3456 - if (!cap) 3457 - goto out; 3458 - 3459 - err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); 3460 - if (err) 3461 - goto out; 3462 3608 /* 3463 3609 * If using the "performance" PCIe config, we clamp the 3464 3610 * read rq size to the max packet size to prevent the ··· 3468 3630 3469 3631 v = (ffs(rq) - 8) << 12; 3470 3632 3471 - if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) { 3472 - ctl &= ~PCI_EXP_DEVCTL_READRQ; 3473 - ctl |= v; 3474 - err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl); 3475 - } 3476 - 3477 - out: 3478 - return err; 3633 + return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, 3634 + PCI_EXP_DEVCTL_READRQ, v); 3479 3635 } 3480 3636 EXPORT_SYMBOL(pcie_set_readrq); 3481 3637 ··· 3482 3650 */ 3483 3651 int pcie_get_mps(struct pci_dev *dev) 3484 3652 { 3485 - int ret, cap; 3486 3653 u16 ctl; 3487 3654 3488 - cap = pci_pcie_cap(dev); 3489 - if (!cap) 3490 - return -EINVAL; 3655 + pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); 3491 3656 3492 - ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); 3493 - if (!ret) 3494 - ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); 3495 - 3496 - return ret; 3657 + return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); 3497 3658 } 3498 3659 3499 3660 /** ··· 3499 3674 */ 3500 3675 int pcie_set_mps(struct pci_dev *dev, int mps) 3501 3676 { 3502 - int cap, err = -EINVAL; 3503 - u16 ctl, v; 3677 + u16 v; 3504 3678 3505 3679 if (mps < 128 || mps > 4096 || !is_power_of_2(mps)) 3506 - goto out; 3680 + return -EINVAL; 3507 3681 3508 3682 v = ffs(mps) - 8; 3509 3683 if (v > dev->pcie_mpss) 3510 - goto out; 3684 + return -EINVAL; 3511 3685 v <<= 5; 3512 3686 3513 - cap = pci_pcie_cap(dev); 3514 - if (!cap) 3515 - goto out; 3516 - 3517 - err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); 3518 - if (err) 3519 - goto out; 3520 - 3521 - if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) { 3522 - ctl &= ~PCI_EXP_DEVCTL_PAYLOAD; 3523 - ctl |= v; 3524 - err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl); 3525 - } 3526 - out: 3527 - return err; 3687 + return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, 3688 + PCI_EXP_DEVCTL_PAYLOAD, v); 3528 3689 } 3529 3690 3530 3691 /**
+1 -1
drivers/pci/pcie/aer/aer_inject.c
··· 288 288 while (1) { 289 289 if (!pci_is_pcie(dev)) 290 290 break; 291 - if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) 291 + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) 292 292 return dev; 293 293 if (!dev->bus->self) 294 294 break;
+11 -13
drivers/pci/pcie/aer/aerdrv.c
··· 81 81 static int set_device_error_reporting(struct pci_dev *dev, void *data) 82 82 { 83 83 bool enable = *((bool *)data); 84 + int type = pci_pcie_type(dev); 84 85 85 - if ((dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) || 86 - (dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) || 87 - (dev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)) { 86 + if ((type == PCI_EXP_TYPE_ROOT_PORT) || 87 + (type == PCI_EXP_TYPE_UPSTREAM) || 88 + (type == PCI_EXP_TYPE_DOWNSTREAM)) { 88 89 if (enable) 89 90 pci_enable_pcie_error_reporting(dev); 90 91 else ··· 122 121 static void aer_enable_rootport(struct aer_rpc *rpc) 123 122 { 124 123 struct pci_dev *pdev = rpc->rpd->port; 125 - int pos, aer_pos; 124 + int aer_pos; 126 125 u16 reg16; 127 126 u32 reg32; 128 127 129 - pos = pci_pcie_cap(pdev); 130 128 /* Clear PCIe Capability's Device Status */ 131 - pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16); 132 - pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16); 129 + pcie_capability_read_word(pdev, PCI_EXP_DEVSTA, &reg16); 130 + pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, reg16); 133 131 134 132 /* Disable system error generation in response to error messages */ 135 - pci_read_config_word(pdev, pos + PCI_EXP_RTCTL, &reg16); 136 - reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK); 137 - pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16); 133 + pcie_capability_clear_word(pdev, PCI_EXP_RTCTL, 134 + SYSTEM_ERROR_INTR_ON_MESG_MASK); 138 135 139 136 aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 140 137 /* Clear error status */ ··· 394 395 u16 reg16; 395 396 396 397 /* Clean up Root device status */ 397 - pos = pci_pcie_cap(dev); 398 - pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &reg16); 399 - pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16); 398 + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &reg16); 399 + pcie_capability_write_word(dev, PCI_EXP_DEVSTA, reg16); 400 400 401 401 /* Clean AER Root Error Status */ 402 402 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+1 -1
drivers/pci/pcie/aer/aerdrv_acpi.c
··· 60 60 p = (struct acpi_hest_aer_common *)(hest_hdr + 1); 61 61 if (p->flags & ACPI_HEST_GLOBAL) { 62 62 if ((pci_is_pcie(info->pci_dev) && 63 - info->pci_dev->pcie_type == pcie_type) || bridge) 63 + pci_pcie_type(info->pci_dev) == pcie_type) || bridge) 64 64 ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); 65 65 } else 66 66 if (hest_match_pci(p, info->pci_dev))
+11 -42
drivers/pci/pcie/aer/aerdrv_core.c
··· 32 32 module_param(forceload, bool, 0); 33 33 module_param(nosourceid, bool, 0); 34 34 35 + #define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \ 36 + PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE) 37 + 35 38 int pci_enable_pcie_error_reporting(struct pci_dev *dev) 36 39 { 37 - u16 reg16 = 0; 38 - int pos; 39 - 40 40 if (pcie_aer_get_firmware_first(dev)) 41 41 return -EIO; 42 42 43 - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 44 - if (!pos) 43 + if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) 45 44 return -EIO; 46 45 47 - pos = pci_pcie_cap(dev); 48 - if (!pos) 49 - return -EIO; 50 - 51 - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16); 52 - reg16 |= (PCI_EXP_DEVCTL_CERE | 53 - PCI_EXP_DEVCTL_NFERE | 54 - PCI_EXP_DEVCTL_FERE | 55 - PCI_EXP_DEVCTL_URRE); 56 - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16); 57 - 58 - return 0; 46 + return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS); 59 47 } 60 48 EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting); 61 49 62 50 int pci_disable_pcie_error_reporting(struct pci_dev *dev) 63 51 { 64 - u16 reg16 = 0; 65 - int pos; 66 - 67 52 if (pcie_aer_get_firmware_first(dev)) 68 53 return -EIO; 69 54 70 - pos = pci_pcie_cap(dev); 71 - if (!pos) 72 - return -EIO; 73 - 74 - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16); 75 - reg16 &= ~(PCI_EXP_DEVCTL_CERE | 76 - PCI_EXP_DEVCTL_NFERE | 77 - PCI_EXP_DEVCTL_FERE | 78 - PCI_EXP_DEVCTL_URRE); 79 - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16); 80 - 81 - return 0; 55 + return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, 56 + PCI_EXP_AER_FLAGS); 82 57 } 83 58 EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting); 84 59 ··· 126 151 */ 127 152 if (atomic_read(&dev->enable_cnt) == 0) 128 153 return false; 129 - pos = pci_pcie_cap(dev); 130 - if (!pos) 131 - return false; 132 154 133 155 /* Check if AER is enabled */ 134 - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16); 135 - if (!(reg16 & ( 136 - PCI_EXP_DEVCTL_CERE | 137 - PCI_EXP_DEVCTL_NFERE | 138 - PCI_EXP_DEVCTL_FERE | 139 - PCI_EXP_DEVCTL_URRE))) 156 + pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &reg16); 157 + if (!(reg16 & PCI_EXP_AER_FLAGS)) 140 158 return false; 159 + 141 160 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 142 161 if (!pos) 143 162 return false; ··· 434 465 435 466 if (driver && driver->reset_link) { 436 467 status = driver->reset_link(udev); 437 - } else if (udev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) { 468 + } else if (pci_pcie_type(udev) == PCI_EXP_TYPE_DOWNSTREAM) { 438 469 status = default_downstream_reset_link(udev); 439 470 } else { 440 471 dev_printk(KERN_DEBUG, &dev->dev,
+47 -72
drivers/pci/pcie/aspm.c
··· 125 125 126 126 static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable) 127 127 { 128 - int pos; 129 - u16 reg16; 130 128 struct pci_dev *child; 131 129 struct pci_bus *linkbus = link->pdev->subordinate; 132 130 133 131 list_for_each_entry(child, &linkbus->devices, bus_list) { 134 - pos = pci_pcie_cap(child); 135 - if (!pos) 136 - return; 137 - pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16); 138 132 if (enable) 139 - reg16 |= PCI_EXP_LNKCTL_CLKREQ_EN; 133 + pcie_capability_set_word(child, PCI_EXP_LNKCTL, 134 + PCI_EXP_LNKCTL_CLKREQ_EN); 140 135 else 141 - reg16 &= ~PCI_EXP_LNKCTL_CLKREQ_EN; 142 - pci_write_config_word(child, pos + PCI_EXP_LNKCTL, reg16); 136 + pcie_capability_clear_word(child, PCI_EXP_LNKCTL, 137 + PCI_EXP_LNKCTL_CLKREQ_EN); 143 138 } 144 139 link->clkpm_enabled = !!enable; 145 140 } ··· 152 157 153 158 static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) 154 159 { 155 - int pos, capable = 1, enabled = 1; 160 + int capable = 1, enabled = 1; 156 161 u32 reg32; 157 162 u16 reg16; 158 163 struct pci_dev *child; ··· 160 165 161 166 /* All functions should have the same cap and state, take the worst */ 162 167 list_for_each_entry(child, &linkbus->devices, bus_list) { 163 - pos = pci_pcie_cap(child); 164 - if (!pos) 165 - return; 166 - pci_read_config_dword(child, pos + PCI_EXP_LNKCAP, &reg32); 168 + pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &reg32); 167 169 if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) { 168 170 capable = 0; 169 171 enabled = 0; 170 172 break; 171 173 } 172 - pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16); 174 + pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16); 173 175 if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN)) 174 176 enabled = 0; 175 177 } ··· 182 190 */ 183 191 static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) 184 192 { 185 - int ppos, cpos, same_clock = 1; 193 + int same_clock = 1; 186 194 u16 reg16, parent_reg, child_reg[8]; 187 195 unsigned long start_jiffies; 188 196 struct pci_dev *child, *parent = link->pdev; ··· 195 203 BUG_ON(!pci_is_pcie(child)); 196 204 197 205 /* Check downstream component if bit Slot Clock Configuration is 1 */ 198 - cpos = pci_pcie_cap(child); 199 - pci_read_config_word(child, cpos + PCI_EXP_LNKSTA, &reg16); 206 + pcie_capability_read_word(child, PCI_EXP_LNKSTA, &reg16); 200 207 if (!(reg16 & PCI_EXP_LNKSTA_SLC)) 201 208 same_clock = 0; 202 209 203 210 /* Check upstream component if bit Slot Clock Configuration is 1 */ 204 - ppos = pci_pcie_cap(parent); 205 - pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16); 211 + pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16); 206 212 if (!(reg16 & PCI_EXP_LNKSTA_SLC)) 207 213 same_clock = 0; 208 214 209 215 /* Configure downstream component, all functions */ 210 216 list_for_each_entry(child, &linkbus->devices, bus_list) { 211 - cpos = pci_pcie_cap(child); 212 - pci_read_config_word(child, cpos + PCI_EXP_LNKCTL, &reg16); 217 + pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16); 213 218 child_reg[PCI_FUNC(child->devfn)] = reg16; 214 219 if (same_clock) 215 220 reg16 |= PCI_EXP_LNKCTL_CCC; 216 221 else 217 222 reg16 &= ~PCI_EXP_LNKCTL_CCC; 218 - pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, reg16); 223 + pcie_capability_write_word(child, PCI_EXP_LNKCTL, reg16); 219 224 } 220 225 221 226 /* Configure upstream component */ 222 - pci_read_config_word(parent, ppos + PCI_EXP_LNKCTL, &reg16); 227 + pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16); 223 228 parent_reg = reg16; 224 229 if (same_clock) 225 230 reg16 |= PCI_EXP_LNKCTL_CCC; 226 231 else 227 232 reg16 &= ~PCI_EXP_LNKCTL_CCC; 228 - pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16); 233 + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); 229 234 230 235 /* Retrain link */ 231 236 reg16 |= PCI_EXP_LNKCTL_RL; 232 - pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16); 237 + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); 233 238 234 239 /* Wait for link training end. Break out after waiting for timeout */ 235 240 start_jiffies = jiffies; 236 241 for (;;) { 237 - pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16); 242 + pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16); 238 243 if (!(reg16 & PCI_EXP_LNKSTA_LT)) 239 244 break; 240 245 if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) ··· 244 255 /* Training failed. Restore common clock configurations */ 245 256 dev_printk(KERN_ERR, &parent->dev, 246 257 "ASPM: Could not configure common clock\n"); 247 - list_for_each_entry(child, &linkbus->devices, bus_list) { 248 - cpos = pci_pcie_cap(child); 249 - pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, 250 - child_reg[PCI_FUNC(child->devfn)]); 251 - } 252 - pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, parent_reg); 258 + list_for_each_entry(child, &linkbus->devices, bus_list) 259 + pcie_capability_write_word(child, PCI_EXP_LNKCTL, 260 + child_reg[PCI_FUNC(child->devfn)]); 261 + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg); 253 262 } 254 263 255 264 /* Convert L0s latency encoding to ns */ ··· 292 305 static void pcie_get_aspm_reg(struct pci_dev *pdev, 293 306 struct aspm_register_info *info) 294 307 { 295 - int pos; 296 308 u16 reg16; 297 309 u32 reg32; 298 310 299 - pos = pci_pcie_cap(pdev); 300 - pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32); 311 + pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &reg32); 301 312 info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; 302 313 info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; 303 314 info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; 304 - pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16); 315 + pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &reg16); 305 316 info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC; 306 317 } 307 318 ··· 397 412 * do ASPM for now. 398 413 */ 399 414 list_for_each_entry(child, &linkbus->devices, bus_list) { 400 - if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { 415 + if (pci_pcie_type(child) == PCI_EXP_TYPE_PCI_BRIDGE) { 401 416 link->aspm_disable = ASPM_STATE_ALL; 402 417 break; 403 418 } ··· 405 420 406 421 /* Get and check endpoint acceptable latencies */ 407 422 list_for_each_entry(child, &linkbus->devices, bus_list) { 408 - int pos; 409 423 u32 reg32, encoding; 410 424 struct aspm_latency *acceptable = 411 425 &link->acceptable[PCI_FUNC(child->devfn)]; 412 426 413 - if (child->pcie_type != PCI_EXP_TYPE_ENDPOINT && 414 - child->pcie_type != PCI_EXP_TYPE_LEG_END) 427 + if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT && 428 + pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END) 415 429 continue; 416 430 417 - pos = pci_pcie_cap(child); 418 - pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32); 431 + pcie_capability_read_dword(child, PCI_EXP_DEVCAP, &reg32); 419 432 /* Calculate endpoint L0s acceptable latency */ 420 433 encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6; 421 434 acceptable->l0s = calc_l0s_acceptable(encoding); ··· 427 444 428 445 static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) 429 446 { 430 - u16 reg16; 431 - int pos = pci_pcie_cap(pdev); 432 - 433 - pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16); 434 - reg16 &= ~0x3; 435 - reg16 |= val; 436 - pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); 447 + pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL, 0x3, val); 437 448 } 438 449 439 450 static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state) ··· 482 505 static int pcie_aspm_sanity_check(struct pci_dev *pdev) 483 506 { 484 507 struct pci_dev *child; 485 - int pos; 486 508 u32 reg32; 487 509 488 510 /* ··· 489 513 * very strange. Disable ASPM for the whole slot 490 514 */ 491 515 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { 492 - pos = pci_pcie_cap(child); 493 - if (!pos) 516 + if (!pci_is_pcie(child)) 494 517 return -EINVAL; 495 518 496 519 /* ··· 505 530 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use 506 531 * RBER bit to determine if a function is 1.1 version device 507 532 */ 508 - pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32); 533 + pcie_capability_read_dword(child, PCI_EXP_DEVCAP, &reg32); 509 534 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { 510 535 dev_printk(KERN_INFO, &child->dev, "disabling ASPM" 511 536 " on pre-1.1 PCIe device. You can enable it" ··· 527 552 INIT_LIST_HEAD(&link->children); 528 553 INIT_LIST_HEAD(&link->link); 529 554 link->pdev = pdev; 530 - if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) { 555 + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM) { 531 556 struct pcie_link_state *parent; 532 557 parent = pdev->bus->parent->self->link_state; 533 558 if (!parent) { ··· 560 585 561 586 if (!pci_is_pcie(pdev) || pdev->link_state) 562 587 return; 563 - if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && 564 - pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) 588 + if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT && 589 + pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) 565 590 return; 566 591 567 592 /* VIA has a strange chipset, root port is under a bridge */ 568 - if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT && 593 + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT && 569 594 pdev->bus->self) 570 595 return; 571 596 ··· 622 647 if (link->root != root) 623 648 continue; 624 649 list_for_each_entry(child, &linkbus->devices, bus_list) { 625 - if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT) && 626 - (child->pcie_type != PCI_EXP_TYPE_LEG_END)) 650 + if ((pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT) && 651 + (pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)) 627 652 continue; 628 653 pcie_aspm_check_latency(child); 629 654 } ··· 638 663 639 664 if (!pci_is_pcie(pdev) || !parent || !parent->link_state) 640 665 return; 641 - if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && 642 - (parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) 666 + if ((pci_pcie_type(parent) != PCI_EXP_TYPE_ROOT_PORT) && 667 + (pci_pcie_type(parent) != PCI_EXP_TYPE_DOWNSTREAM)) 643 668 return; 644 669 645 670 down_read(&pci_bus_sem); ··· 679 704 680 705 if (aspm_disabled || !pci_is_pcie(pdev) || !link) 681 706 return; 682 - if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && 683 - (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) 707 + if ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) && 708 + (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)) 684 709 return; 685 710 /* 686 711 * Devices changed PM state, we should recheck if latency ··· 704 729 if (aspm_policy != POLICY_POWERSAVE) 705 730 return; 706 731 707 - if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && 708 - (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) 732 + if ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) && 733 + (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)) 709 734 return; 710 735 711 736 down_read(&pci_bus_sem); ··· 732 757 if (!pci_is_pcie(pdev)) 733 758 return; 734 759 735 - if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT || 736 - pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) 760 + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || 761 + pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM) 737 762 parent = pdev; 738 763 if (!parent || !parent->link_state) 739 764 return; ··· 908 933 struct pcie_link_state *link_state = pdev->link_state; 909 934 910 935 if (!pci_is_pcie(pdev) || 911 - (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && 912 - pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) 936 + (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT && 937 + pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) 913 938 return; 914 939 915 940 if (link_state->aspm_support) ··· 925 950 struct pcie_link_state *link_state = pdev->link_state; 926 951 927 952 if (!pci_is_pcie(pdev) || 928 - (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && 929 - pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) 953 + (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT && 954 + pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) 930 955 return; 931 956 932 957 if (link_state->aspm_support)
+9 -20
drivers/pci/pcie/pme.c
··· 57 57 */ 58 58 void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable) 59 59 { 60 - int rtctl_pos; 61 - u16 rtctl; 62 - 63 - rtctl_pos = pci_pcie_cap(dev) + PCI_EXP_RTCTL; 64 - 65 - pci_read_config_word(dev, rtctl_pos, &rtctl); 66 60 if (enable) 67 - rtctl |= PCI_EXP_RTCTL_PMEIE; 61 + pcie_capability_set_word(dev, PCI_EXP_RTCTL, 62 + PCI_EXP_RTCTL_PMEIE); 68 63 else 69 - rtctl &= ~PCI_EXP_RTCTL_PMEIE; 70 - pci_write_config_word(dev, rtctl_pos, rtctl); 64 + pcie_capability_clear_word(dev, PCI_EXP_RTCTL, 65 + PCI_EXP_RTCTL_PMEIE); 71 66 } 72 67 73 68 /** ··· 115 120 if (!dev) 116 121 return false; 117 122 118 - if (pci_is_pcie(dev) && dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) { 123 + if (pci_is_pcie(dev) && pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE) { 119 124 down_read(&pci_bus_sem); 120 125 if (pcie_pme_walk_bus(bus)) 121 126 found = true; ··· 221 226 struct pcie_pme_service_data *data = 222 227 container_of(work, struct pcie_pme_service_data, work); 223 228 struct pci_dev *port = data->srv->port; 224 - int rtsta_pos; 225 229 u32 rtsta; 226 - 227 - rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA; 228 230 229 231 spin_lock_irq(&data->lock); 230 232 ··· 229 237 if (data->noirq) 230 238 break; 231 239 232 - pci_read_config_dword(port, rtsta_pos, &rtsta); 240 + pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); 233 241 if (rtsta & PCI_EXP_RTSTA_PME) { 234 242 /* 235 243 * Clear PME status of the port. If there are other ··· 268 276 { 269 277 struct pci_dev *port; 270 278 struct pcie_pme_service_data *data; 271 - int rtsta_pos; 272 279 u32 rtsta; 273 280 unsigned long flags; 274 281 275 282 port = ((struct pcie_device *)context)->port; 276 283 data = get_service_data((struct pcie_device *)context); 277 284 278 - rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA; 279 - 280 285 spin_lock_irqsave(&data->lock, flags); 281 - pci_read_config_dword(port, rtsta_pos, &rtsta); 286 + pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); 282 287 283 288 if (!(rtsta & PCI_EXP_RTSTA_PME)) { 284 289 spin_unlock_irqrestore(&data->lock, flags); ··· 324 335 struct pci_dev *dev; 325 336 326 337 /* Check if this is a root port event collector. */ 327 - if (port->pcie_type != PCI_EXP_TYPE_RC_EC || !bus) 338 + if (pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC || !bus) 328 339 return; 329 340 330 341 down_read(&pci_bus_sem); 331 342 list_for_each_entry(dev, &bus->devices, bus_list) 332 343 if (pci_is_pcie(dev) 333 - && dev->pcie_type == PCI_EXP_TYPE_RC_END) 344 + && pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) 334 345 pcie_pme_set_native(dev, NULL); 335 346 up_read(&pci_bus_sem); 336 347 }
+1 -1
drivers/pci/pcie/portdrv_bus.c
··· 38 38 return 0; 39 39 40 40 if ((driver->port_type != PCIE_ANY_PORT) && 41 - (driver->port_type != pciedev->port->pcie_type)) 41 + (driver->port_type != pci_pcie_type(pciedev->port))) 42 42 return 0; 43 43 44 44 return 1;
+7 -12
drivers/pci/pcie/portdrv_core.c
··· 246 246 */ 247 247 static int get_port_device_capability(struct pci_dev *dev) 248 248 { 249 - int services = 0, pos; 250 - u16 reg16; 249 + int services = 0; 251 250 u32 reg32; 252 251 int cap_mask = 0; 253 252 int err; ··· 264 265 return 0; 265 266 } 266 267 267 - pos = pci_pcie_cap(dev); 268 - pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16); 269 268 /* Hot-Plug Capable */ 270 - if ((cap_mask & PCIE_PORT_SERVICE_HP) && (reg16 & PCI_EXP_FLAGS_SLOT)) { 271 - pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, &reg32); 269 + if (cap_mask & PCIE_PORT_SERVICE_HP) { 270 + pcie_capability_read_dword(dev, PCI_EXP_SLTCAP, &reg32); 272 271 if (reg32 & PCI_EXP_SLTCAP_HPC) { 273 272 services |= PCIE_PORT_SERVICE_HP; 274 273 /* ··· 274 277 * enabled by the BIOS and the hot-plug service driver 275 278 * is not loaded. 276 279 */ 277 - pos += PCI_EXP_SLTCTL; 278 - pci_read_config_word(dev, pos, &reg16); 279 - reg16 &= ~(PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE); 280 - pci_write_config_word(dev, pos, reg16); 280 + pcie_capability_clear_word(dev, PCI_EXP_SLTCTL, 281 + PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE); 281 282 } 282 283 } 283 284 /* AER capable */ ··· 293 298 services |= PCIE_PORT_SERVICE_VC; 294 299 /* Root ports are capable of generating PME too */ 295 300 if ((cap_mask & PCIE_PORT_SERVICE_PME) 296 - && dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) { 301 + && pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) { 297 302 services |= PCIE_PORT_SERVICE_PME; 298 303 /* 299 304 * Disable PME interrupt on this port in case it's been enabled ··· 331 336 device->release = release_pcie_device; /* callback to free pcie dev */ 332 337 dev_set_name(device, "%s:pcie%02x", 333 338 pci_name(pdev), 334 - get_descriptor_id(pdev->pcie_type, service)); 339 + get_descriptor_id(pci_pcie_type(pdev), service)); 335 340 device->parent = &pdev->dev; 336 341 device_enable_async_suspend(device); 337 342
+19 -12
drivers/pci/pcie/portdrv_pci.c
··· 64 64 */ 65 65 void pcie_clear_root_pme_status(struct pci_dev *dev) 66 66 { 67 - int rtsta_pos; 68 - u32 rtsta; 69 - 70 - rtsta_pos = pci_pcie_cap(dev) + PCI_EXP_RTSTA; 71 - 72 - pci_read_config_dword(dev, rtsta_pos, &rtsta); 73 - rtsta |= PCI_EXP_RTSTA_PME; 74 - pci_write_config_dword(dev, rtsta_pos, rtsta); 67 + pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME); 75 68 } 76 69 77 70 static int pcie_portdrv_restore_config(struct pci_dev *dev) ··· 88 95 * which breaks ACPI-based runtime wakeup on PCI Express, so clear those 89 96 * bits now just in case (shouldn't hurt). 90 97 */ 91 - if(pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) 98 + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) 92 99 pcie_clear_root_pme_status(pdev); 93 100 return 0; 94 101 } ··· 133 140 { 134 141 return 0; 135 142 } 143 + 144 + static int pcie_port_runtime_idle(struct device *dev) 145 + { 146 + /* Delay for a short while to prevent too frequent suspend/resume */ 147 + pm_schedule_suspend(dev, 10); 148 + return -EBUSY; 149 + } 136 150 #else 137 151 #define pcie_port_runtime_suspend NULL 138 152 #define pcie_port_runtime_resume NULL 153 + #define pcie_port_runtime_idle NULL 139 154 #endif 140 155 141 156 static const struct dev_pm_ops pcie_portdrv_pm_ops = { ··· 156 155 .resume_noirq = pcie_port_resume_noirq, 157 156 .runtime_suspend = pcie_port_runtime_suspend, 158 157 .runtime_resume = pcie_port_runtime_resume, 158 + .runtime_idle = pcie_port_runtime_idle, 159 159 }; 160 160 161 161 #define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops) ··· 188 186 int status; 189 187 190 188 if (!pci_is_pcie(dev) || 191 - ((dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && 192 - (dev->pcie_type != PCI_EXP_TYPE_UPSTREAM) && 193 - (dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))) 189 + ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) && 190 + (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM) && 191 + (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM))) 194 192 return -ENODEV; 195 193 196 194 if (!dev->irq && dev->pin) { ··· 202 200 return status; 203 201 204 202 pci_save_state(dev); 203 + /* 204 + * D3cold may not work properly on some PCIe port, so disable 205 + * it by default. 206 + */ 207 + dev->d3cold_allowed = false; 205 208 if (!pci_match_id(port_runtime_pm_black_list, dev)) 206 209 pm_runtime_put_noidle(&dev->dev); 207 210
+27 -32
drivers/pci/probe.c
··· 144 144 case PCI_BASE_ADDRESS_MEM_TYPE_32: 145 145 break; 146 146 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 147 - dev_info(&dev->dev, "1M mem BAR treated as 32-bit BAR\n"); 147 + /* 1M mem BAR treated as 32-bit BAR */ 148 148 break; 149 149 case PCI_BASE_ADDRESS_MEM_TYPE_64: 150 150 flags |= IORESOURCE_MEM_64; 151 151 break; 152 152 default: 153 - dev_warn(&dev->dev, 154 - "mem unknown type %x treated as 32-bit BAR\n", 155 - mem_type); 153 + /* mem unknown type treated as 32-bit BAR */ 156 154 break; 157 155 } 158 156 return flags; ··· 171 173 u32 l, sz, mask; 172 174 u16 orig_cmd; 173 175 struct pci_bus_region region; 176 + bool bar_too_big = false, bar_disabled = false; 174 177 175 178 mask = type ? PCI_ROM_ADDRESS_MASK : ~0; 176 179 180 + /* No printks while decoding is disabled! */ 177 181 if (!dev->mmio_always_on) { 178 182 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); 179 183 pci_write_config_word(dev, PCI_COMMAND, ··· 240 240 goto fail; 241 241 242 242 if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { 243 - dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", 244 - pos); 243 + bar_too_big = true; 245 244 goto fail; 246 245 } 247 246 ··· 251 252 region.start = 0; 252 253 region.end = sz64; 253 254 pcibios_bus_to_resource(dev, res, &region); 255 + bar_disabled = true; 254 256 } else { 255 257 region.start = l64; 256 258 region.end = l64 + sz64; 257 259 pcibios_bus_to_resource(dev, res, &region); 258 - dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", 259 - pos, res); 260 260 } 261 261 } else { 262 262 sz = pci_size(l, sz, mask); ··· 266 268 region.start = l; 267 269 region.end = l + sz; 268 270 pcibios_bus_to_resource(dev, res, &region); 269 - 270 - dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); 271 271 } 272 272 273 - out: 273 + goto out; 274 + 275 + 276 + fail: 277 + res->flags = 0; 278 + out: 274 279 if (!dev->mmio_always_on) 275 280 pci_write_config_word(dev, PCI_COMMAND, orig_cmd); 276 281 282 + if (bar_too_big) 283 + dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", pos); 284 + if (res->flags && !bar_disabled) 285 + dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); 286 + 277 287 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; 278 - fail: 279 - res->flags = 0; 280 - goto out; 281 288 } 282 289 283 290 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) ··· 606 603 u32 linkcap; 607 604 u16 linksta; 608 605 609 - pci_read_config_dword(bridge, pos + PCI_EXP_LNKCAP, &linkcap); 606 + pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap); 610 607 bus->max_bus_speed = pcie_link_speed[linkcap & 0xf]; 611 608 612 - pci_read_config_word(bridge, pos + PCI_EXP_LNKSTA, &linksta); 609 + pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta); 613 610 pcie_update_link_speed(bus, linksta); 614 611 } 615 612 } ··· 932 929 pdev->is_pcie = 1; 933 930 pdev->pcie_cap = pos; 934 931 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16); 935 - pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; 932 + pdev->pcie_flags_reg = reg16; 936 933 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16); 937 934 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; 938 935 } 939 936 940 937 void set_pcie_hotplug_bridge(struct pci_dev *pdev) 941 938 { 942 - int pos; 943 - u16 reg16; 944 939 u32 reg32; 945 940 946 - pos = pci_pcie_cap(pdev); 947 - if (!pos) 948 - return; 949 - pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16); 950 - if (!(reg16 & PCI_EXP_FLAGS_SLOT)) 951 - return; 952 - pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &reg32); 941 + pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32); 953 942 if (reg32 & PCI_EXP_SLTCAP_HPC) 954 943 pdev->is_hotplug_bridge = 1; 955 944 } ··· 1155 1160 if (class == PCI_CLASS_BRIDGE_HOST) 1156 1161 return pci_cfg_space_size_ext(dev); 1157 1162 1158 - pos = pci_pcie_cap(dev); 1159 - if (!pos) { 1163 + if (!pci_is_pcie(dev)) { 1160 1164 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 1161 1165 if (!pos) 1162 1166 goto fail; ··· 1377 1383 1378 1384 if (!parent || !pci_is_pcie(parent)) 1379 1385 return 0; 1380 - if (parent->pcie_type == PCI_EXP_TYPE_ROOT_PORT) 1386 + if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT) 1381 1387 return 1; 1382 - if (parent->pcie_type == PCI_EXP_TYPE_DOWNSTREAM && 1388 + if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM && 1383 1389 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS)) 1384 1390 return 1; 1385 1391 return 0; ··· 1456 1462 */ 1457 1463 if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) || 1458 1464 (dev->bus->self && 1459 - dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT))) 1465 + pci_pcie_type(dev->bus->self) != PCI_EXP_TYPE_ROOT_PORT))) 1460 1466 *smpss = 0; 1461 1467 1462 1468 if (*smpss > dev->pcie_mpss) ··· 1472 1478 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { 1473 1479 mps = 128 << dev->pcie_mpss; 1474 1480 1475 - if (dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && dev->bus->self) 1481 + if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT && 1482 + dev->bus->self) 1476 1483 /* For "Performance", the assumption is made that 1477 1484 * downstream communication will never be larger than 1478 1485 * the MRRS. So, the MPS only needs to be configured
+25 -6
drivers/pci/quirks.c
··· 3081 3081 3082 3082 static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe) 3083 3083 { 3084 - int pos; 3084 + int i; 3085 + u16 status; 3085 3086 3086 - pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 3087 - if (!pos) 3088 - return -ENOTTY; 3087 + /* 3088 + * http://www.intel.com/content/dam/doc/datasheet/82599-10-gbe-controller-datasheet.pdf 3089 + * 3090 + * The 82599 supports FLR on VFs, but FLR support is reported only 3091 + * in the PF DEVCAP (sec 9.3.10.4), not in the VF DEVCAP (sec 9.5). 3092 + * Therefore, we can't use pcie_flr(), which checks the VF DEVCAP. 3093 + */ 3089 3094 3090 3095 if (probe) 3091 3096 return 0; 3092 3097 3093 - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, 3094 - PCI_EXP_DEVCTL_BCR_FLR); 3098 + /* Wait for Transaction Pending bit clean */ 3099 + for (i = 0; i < 4; i++) { 3100 + if (i) 3101 + msleep((1 << (i - 1)) * 100); 3102 + 3103 + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); 3104 + if (!(status & PCI_EXP_DEVSTA_TRPND)) 3105 + goto clear; 3106 + } 3107 + 3108 + dev_err(&dev->dev, "transaction is not cleared; " 3109 + "proceeding with reset anyway\n"); 3110 + 3111 + clear: 3112 + pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); 3113 + 3095 3114 msleep(100); 3096 3115 3097 3116 return 0;
+1 -1
drivers/pci/search.c
··· 41 41 continue; 42 42 } 43 43 /* PCI device should connect to a PCIe bridge */ 44 - if (pdev->pcie_type != PCI_EXP_TYPE_PCI_BRIDGE) { 44 + if (pci_pcie_type(pdev) != PCI_EXP_TYPE_PCI_BRIDGE) { 45 45 /* Busted hardware? */ 46 46 WARN_ON_ONCE(1); 47 47 return NULL;
+60 -21
drivers/pci/setup-bus.c
··· 697 697 return size; 698 698 } 699 699 700 + resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus, 701 + unsigned long type) 702 + { 703 + return 1; 704 + } 705 + 706 + #define PCI_P2P_DEFAULT_MEM_ALIGN 0x100000 /* 1MiB */ 707 + #define PCI_P2P_DEFAULT_IO_ALIGN 0x1000 /* 4KiB */ 708 + #define PCI_P2P_DEFAULT_IO_ALIGN_1K 0x400 /* 1KiB */ 709 + 710 + static resource_size_t window_alignment(struct pci_bus *bus, 711 + unsigned long type) 712 + { 713 + resource_size_t align = 1, arch_align; 714 + 715 + if (type & IORESOURCE_MEM) 716 + align = PCI_P2P_DEFAULT_MEM_ALIGN; 717 + else if (type & IORESOURCE_IO) { 718 + /* 719 + * Per spec, I/O windows are 4K-aligned, but some 720 + * bridges have an extension to support 1K alignment. 721 + */ 722 + if (bus->self->io_window_1k) 723 + align = PCI_P2P_DEFAULT_IO_ALIGN_1K; 724 + else 725 + align = PCI_P2P_DEFAULT_IO_ALIGN; 726 + } 727 + 728 + arch_align = pcibios_window_alignment(bus, type); 729 + return max(align, arch_align); 730 + } 731 + 700 732 /** 701 733 * pbus_size_io() - size the io window of a given bus 702 734 * ··· 749 717 struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); 750 718 unsigned long size = 0, size0 = 0, size1 = 0; 751 719 resource_size_t children_add_size = 0; 752 - resource_size_t min_align = 4096, align; 720 + resource_size_t min_align, io_align, align; 753 721 754 722 if (!b_res) 755 723 return; 756 724 757 - /* 758 - * Per spec, I/O windows are 4K-aligned, but some bridges have an 759 - * extension to support 1K alignment. 760 - */ 761 - if (bus->self->io_window_1k) 762 - min_align = 1024; 725 + io_align = min_align = window_alignment(bus, IORESOURCE_IO); 763 726 list_for_each_entry(dev, &bus->devices, bus_list) { 764 727 int i; 765 728 ··· 781 754 } 782 755 } 783 756 784 - if (min_align > 4096) 785 - min_align = 4096; 757 + if (min_align > io_align) 758 + min_align = io_align; 786 759 787 760 size0 = calculate_iosize(size, min_size, size1, 788 761 resource_size(b_res), min_align); ··· 810 783 "%pR to %pR add_size %lx\n", b_res, 811 784 &bus->busn_res, size1-size0); 812 785 } 786 + } 787 + 788 + static inline resource_size_t calculate_mem_align(resource_size_t *aligns, 789 + int max_order) 790 + { 791 + resource_size_t align = 0; 792 + resource_size_t min_align = 0; 793 + int order; 794 + 795 + for (order = 0; order <= max_order; order++) { 796 + resource_size_t align1 = 1; 797 + 798 + align1 <<= (order + 20); 799 + 800 + if (!align) 801 + min_align = align1; 802 + else if (ALIGN(align + min_align, min_align) < align1) 803 + min_align = align1 >> 1; 804 + align += aligns[order]; 805 + } 806 + 807 + return min_align; 813 808 } 814 809 815 810 /** ··· 913 864 children_add_size += get_res_add_size(realloc_head, r); 914 865 } 915 866 } 916 - align = 0; 917 - min_align = 0; 918 - for (order = 0; order <= max_order; order++) { 919 - resource_size_t align1 = 1; 920 867 921 - align1 <<= (order + 20); 922 - 923 - if (!align) 924 - min_align = align1; 925 - else if (ALIGN(align + min_align, min_align) < align1) 926 - min_align = align1 >> 1; 927 - align += aligns[order]; 928 - } 868 + min_align = calculate_mem_align(aligns, max_order); 869 + min_align = max(min_align, window_alignment(bus, b_res->flags & mask)); 929 870 size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); 930 871 if (children_add_size > add_size) 931 872 add_size = children_add_size;
+6 -12
drivers/rapidio/devices/tsi721.c
··· 2219 2219 const struct pci_device_id *id) 2220 2220 { 2221 2221 struct tsi721_device *priv; 2222 - int cap; 2223 2222 int err; 2224 - u32 regval; 2225 2223 2226 2224 priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL); 2227 2225 if (priv == NULL) { ··· 2328 2330 dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); 2329 2331 } 2330 2332 2331 - cap = pci_pcie_cap(pdev); 2332 - BUG_ON(cap == 0); 2333 + BUG_ON(!pci_is_pcie(pdev)); 2333 2334 2334 2335 /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */ 2335 - pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &regval); 2336 - regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN | 2337 - PCI_EXP_DEVCTL_NOSNOOP_EN); 2338 - regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT; 2339 - pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval); 2336 + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, 2337 + PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN | 2338 + PCI_EXP_DEVCTL_NOSNOOP_EN, 2339 + 0x2 << MAX_READ_REQUEST_SZ_SHIFT); 2340 2340 2341 2341 /* Adjust PCIe completion timeout. */ 2342 - pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, &regval); 2343 - regval &= ~(0x0f); 2344 - pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2); 2342 + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2); 2345 2343 2346 2344 /* 2347 2345 * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
+15 -7
drivers/rtc/rtc-at91sam9.c
··· 58 58 struct rtc_device *rtcdev; 59 59 u32 imr; 60 60 void __iomem *gpbr; 61 + int irq; 61 62 }; 62 63 63 64 #define rtt_readl(rtc, field) \ ··· 293 292 { 294 293 struct resource *r, *r_gpbr; 295 294 struct sam9_rtc *rtc; 296 - int ret; 295 + int ret, irq; 297 296 u32 mr; 298 297 299 298 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); ··· 303 302 return -ENODEV; 304 303 } 305 304 305 + irq = platform_get_irq(pdev, 0); 306 + if (irq < 0) { 307 + dev_err(&pdev->dev, "failed to get interrupt resource\n"); 308 + return irq; 309 + } 310 + 306 311 rtc = kzalloc(sizeof *rtc, GFP_KERNEL); 307 312 if (!rtc) 308 313 return -ENOMEM; 314 + 315 + rtc->irq = irq; 309 316 310 317 /* platform setup code should have handled this; sigh */ 311 318 if (!device_can_wakeup(&pdev->dev)) ··· 354 345 } 355 346 356 347 /* register irq handler after we know what name we'll use */ 357 - ret = request_irq(AT91_ID_SYS, at91_rtc_interrupt, 358 - IRQF_SHARED, 348 + ret = request_irq(rtc->irq, at91_rtc_interrupt, IRQF_SHARED, 359 349 dev_name(&rtc->rtcdev->dev), rtc); 360 350 if (ret) { 361 - dev_dbg(&pdev->dev, "can't share IRQ %d?\n", AT91_ID_SYS); 351 + dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq); 362 352 rtc_device_unregister(rtc->rtcdev); 363 353 goto fail_register; 364 354 } ··· 394 386 395 387 /* disable all interrupts */ 396 388 rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN)); 397 - free_irq(AT91_ID_SYS, rtc); 389 + free_irq(rtc->irq, rtc); 398 390 399 391 rtc_device_unregister(rtc->rtcdev); 400 392 ··· 431 423 rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN); 432 424 if (rtc->imr) { 433 425 if (device_may_wakeup(&pdev->dev) && (mr & AT91_RTT_ALMIEN)) { 434 - enable_irq_wake(AT91_ID_SYS); 426 + enable_irq_wake(rtc->irq); 435 427 /* don't let RTTINC cause wakeups */ 436 428 if (mr & AT91_RTT_RTTINCIEN) 437 429 rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); ··· 449 441 450 442 if (rtc->imr) { 451 443 if (device_may_wakeup(&pdev->dev)) 452 - disable_irq_wake(AT91_ID_SYS); 444 + disable_irq_wake(rtc->irq); 453 445 mr = rtt_readl(rtc, MR); 454 446 rtt_writel(rtc, MR, mr | rtc->imr); 455 447 }
+2 -6
drivers/scsi/qla2xxx/qla_nx.c
··· 1615 1615 char * 1616 1616 qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str) 1617 1617 { 1618 - int pcie_reg; 1619 1618 struct qla_hw_data *ha = vha->hw; 1620 1619 char lwstr[6]; 1621 1620 uint16_t lnk; 1622 1621 1623 - pcie_reg = pci_pcie_cap(ha->pdev); 1624 - pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk); 1622 + pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk); 1625 1623 ha->link_width = (lnk >> 4) & 0x3f; 1626 1624 1627 1625 strcpy(str, "PCIe ("); ··· 2495 2497 int 2496 2498 qla82xx_start_firmware(scsi_qla_host_t *vha) 2497 2499 { 2498 - int pcie_cap; 2499 2500 uint16_t lnk; 2500 2501 struct qla_hw_data *ha = vha->hw; 2501 2502 ··· 2525 2528 } 2526 2529 2527 2530 /* Negotiated Link width */ 2528 - pcie_cap = pci_pcie_cap(ha->pdev); 2529 - pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk); 2531 + pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk); 2530 2532 ha->link_width = (lnk >> 4) & 0x3f; 2531 2533 2532 2534 /* Synchronize with Receive peg */
+1 -3
drivers/scsi/qla4xxx/ql4_nx.c
··· 1566 1566 static int 1567 1567 qla4_8xxx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start) 1568 1568 { 1569 - int pcie_cap; 1570 1569 uint16_t lnk; 1571 1570 1572 1571 /* scrub dma mask expansion register */ ··· 1589 1590 } 1590 1591 1591 1592 /* Negotiated Link width */ 1592 - pcie_cap = pci_pcie_cap(ha->pdev); 1593 - pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk); 1593 + pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk); 1594 1594 ha->link_width = (lnk >> 4) & 0x3f; 1595 1595 1596 1596 /* Synchronize with Receive peg */
+5 -14
drivers/staging/et131x/et131x.c
··· 3995 3995 static int et131x_pci_init(struct et131x_adapter *adapter, 3996 3996 struct pci_dev *pdev) 3997 3997 { 3998 - int cap = pci_pcie_cap(pdev); 3999 3998 u16 max_payload; 4000 - u16 ctl; 4001 3999 int i, rc; 4002 4000 4003 4001 rc = et131x_init_eeprom(adapter); 4004 4002 if (rc < 0) 4005 4003 goto out; 4006 4004 4007 - if (!cap) { 4005 + if (!pci_is_pcie(pdev)) { 4008 4006 dev_err(&pdev->dev, "Missing PCIe capabilities\n"); 4009 4007 goto err_out; 4010 4008 } ··· 4010 4012 /* Let's set up the PORT LOGIC Register. First we need to know what 4011 4013 * the max_payload_size is 4012 4014 */ 4013 - if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCAP, &max_payload)) { 4015 + if (pcie_capability_read_word(pdev, PCI_EXP_DEVCAP, &max_payload)) { 4014 4016 dev_err(&pdev->dev, 4015 4017 "Could not read PCI config space for Max Payload Size\n"); 4016 4018 goto err_out; ··· 4047 4049 } 4048 4050 4049 4051 /* Change the max read size to 2k */ 4050 - if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl)) { 4052 + if (pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, 4053 + PCI_EXP_DEVCTL_READRQ, 0x4 << 12)) { 4051 4054 dev_err(&pdev->dev, 4052 - "Could not read PCI config space for Max read size\n"); 4053 - goto err_out; 4054 - } 4055 - 4056 - ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | (0x04 << 12); 4057 - 4058 - if (pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl)) { 4059 - dev_err(&pdev->dev, 4060 - "Could not write PCI config space for Max read size\n"); 4055 + "Couldn't change PCI config space for Max read size\n"); 4061 4056 goto err_out; 4062 4057 } 4063 4058
+3 -5
drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
··· 31 31 struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); 32 32 33 33 u8 tmp; 34 - int pos; 35 - u8 LinkCtrlReg; 34 + u16 LinkCtrlReg; 36 35 37 - pos = pci_find_capability(priv->pdev, PCI_CAP_ID_EXP); 38 - pci_read_config_byte(priv->pdev, pos + PCI_EXP_LNKCTL, &LinkCtrlReg); 39 - priv->NdisAdapter.LinkCtrlReg = LinkCtrlReg; 36 + pcie_capability_read_word(priv->pdev, PCI_EXP_LNKCTL, &LinkCtrlReg); 37 + priv->NdisAdapter.LinkCtrlReg = (u8)LinkCtrlReg; 40 38 41 39 RT_TRACE(COMP_INIT, "Link Control Register =%x\n", 42 40 priv->NdisAdapter.LinkCtrlReg);
-2
drivers/video/auo_k190x.c
··· 987 987 fb_dealloc_cmap(&info->cmap); 988 988 err_cmap: 989 989 fb_deferred_io_cleanup(info); 990 - kfree(info->fbdefio); 991 990 err_defio: 992 991 vfree((void *)info->screen_base); 993 992 err_irq: ··· 1021 1022 fb_dealloc_cmap(&info->cmap); 1022 1023 1023 1024 fb_deferred_io_cleanup(info); 1024 - kfree(info->fbdefio); 1025 1025 1026 1026 vfree((void *)info->screen_base); 1027 1027
+1 -1
drivers/video/console/bitblit.c
··· 162 162 image.depth = 1; 163 163 164 164 if (attribute) { 165 - buf = kmalloc(cellsize, GFP_KERNEL); 165 + buf = kmalloc(cellsize, GFP_ATOMIC); 166 166 if (!buf) 167 167 return; 168 168 }
+1 -1
drivers/video/console/fbcon.c
··· 449 449 450 450 while ((options = strsep(&this_opt, ",")) != NULL) { 451 451 if (!strncmp(options, "font:", 5)) 452 - strcpy(fontname, options + 5); 452 + strlcpy(fontname, options + 5, sizeof(fontname)); 453 453 454 454 if (!strncmp(options, "scrollback:", 11)) { 455 455 options += 11;
+2
drivers/video/mb862xx/mb862xxfbdrv.c
··· 328 328 case MB862XX_L1_SET_CFG: 329 329 if (copy_from_user(l1_cfg, argp, sizeof(*l1_cfg))) 330 330 return -EFAULT; 331 + if (l1_cfg->dh == 0 || l1_cfg->dw == 0) 332 + return -EINVAL; 331 333 if ((l1_cfg->sw >= l1_cfg->dw) && (l1_cfg->sh >= l1_cfg->dh)) { 332 334 /* downscaling */ 333 335 outreg(cap, GC_CAP_CSC,
+14
drivers/video/omap2/dss/sdi.c
··· 105 105 106 106 sdi_config_lcd_manager(dssdev); 107 107 108 + /* 109 + * LCLK and PCLK divisors are located in shadow registers, and we 110 + * normally write them to DISPC registers when enabling the output. 111 + * However, SDI uses pck-free as source clock for its PLL, and pck-free 112 + * is affected by the divisors. And as we need the PLL before enabling 113 + * the output, we need to write the divisors early. 114 + * 115 + * It seems just writing to the DISPC register is enough, and we don't 116 + * need to care about the shadow register mechanism for pck-free. The 117 + * exact reason for this is unknown. 118 + */ 119 + dispc_mgr_set_clock_div(dssdev->manager->id, 120 + &sdi.mgr_config.clock_info); 121 + 108 122 dss_sdi_init(dssdev->phy.sdi.datapairs); 109 123 r = dss_sdi_enable(); 110 124 if (r)
+1 -1
drivers/video/omap2/omapfb/omapfb-main.c
··· 1192 1192 break; 1193 1193 1194 1194 if (regno < 16) { 1195 - u16 pal; 1195 + u32 pal; 1196 1196 pal = ((red >> (16 - var->red.length)) << 1197 1197 var->red.offset) | 1198 1198 ((green >> (16 - var->green.length)) <<
+1 -1
drivers/xen/swiotlb-xen.c
··· 232 232 return ret; 233 233 234 234 if (hwdev && hwdev->coherent_dma_mask) 235 - dma_mask = hwdev->coherent_dma_mask; 235 + dma_mask = dma_alloc_coherent_mask(hwdev, flags); 236 236 237 237 phys = virt_to_phys(ret); 238 238 dev_addr = xen_phys_to_bus(phys);
+4 -4
drivers/xen/xen-pciback/pci_stub.c
··· 353 353 if (err) 354 354 goto config_release; 355 355 356 - dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n"); 357 - __pci_reset_function_locked(dev); 358 - 359 356 /* We need the device active to save the state. */ 360 357 dev_dbg(&dev->dev, "save state of device\n"); 361 358 pci_save_state(dev); 362 359 dev_data->pci_saved_state = pci_store_saved_state(dev); 363 360 if (!dev_data->pci_saved_state) 364 361 dev_err(&dev->dev, "Could not store PCI conf saved state!\n"); 365 - 362 + else { 363 + dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n"); 364 + __pci_reset_function_locked(dev); 365 + } 366 366 /* Now disable the device (this also ensures some private device 367 367 * data is setup before we export) 368 368 */
+10 -2
include/linux/kernel.h
··· 82 82 __x - (__x % (y)); \ 83 83 } \ 84 84 ) 85 + 86 + /* 87 + * Divide positive or negative dividend by positive divisor and round 88 + * to closest integer. Result is undefined for negative divisors. 89 + */ 85 90 #define DIV_ROUND_CLOSEST(x, divisor)( \ 86 91 { \ 87 - typeof(divisor) __divisor = divisor; \ 88 - (((x) + ((__divisor) / 2)) / (__divisor)); \ 92 + typeof(x) __x = x; \ 93 + typeof(divisor) __d = divisor; \ 94 + (((typeof(x))-1) >= 0 || (__x) >= 0) ? \ 95 + (((__x) + ((__d) / 2)) / (__d)) : \ 96 + (((__x) - ((__d) / 2)) / (__d)); \ 89 97 } \ 90 98 ) 91 99
+1
include/linux/mmc/card.h
··· 239 239 #define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */ 240 240 #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */ 241 241 #define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ 242 + #define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */ 242 243 /* byte mode */ 243 244 unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */ 244 245 #define MMC_NO_POWER_NOTIFICATION 0
+45 -1
include/linux/pci.h
··· 254 254 u8 revision; /* PCI revision, low byte of class word */ 255 255 u8 hdr_type; /* PCI header type (`multi' flag masked out) */ 256 256 u8 pcie_cap; /* PCI-E capability offset */ 257 - u8 pcie_type:4; /* PCI-E device/port type */ 258 257 u8 pcie_mpss:3; /* PCI-E Max Payload Size Supported */ 259 258 u8 rom_base_reg; /* which config register controls the ROM */ 260 259 u8 pin; /* which interrupt pin this device uses */ 260 + u16 pcie_flags_reg; /* cached PCI-E Capabilities Register */ 261 261 262 262 struct pci_driver *driver; /* which driver has allocated this device */ 263 263 u64 dma_mask; /* Mask of the bits of bus address this ··· 816 816 return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); 817 817 } 818 818 819 + int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); 820 + int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); 821 + int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); 822 + int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val); 823 + int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, 824 + u16 clear, u16 set); 825 + int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos, 826 + u32 clear, u32 set); 827 + 828 + static inline int pcie_capability_set_word(struct pci_dev *dev, int pos, 829 + u16 set) 830 + { 831 + return pcie_capability_clear_and_set_word(dev, pos, 0, set); 832 + } 833 + 834 + static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos, 835 + u32 set) 836 + { 837 + return pcie_capability_clear_and_set_dword(dev, pos, 0, set); 838 + } 839 + 840 + static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos, 841 + u16 clear) 842 + { 843 + return pcie_capability_clear_and_set_word(dev, pos, clear, 0); 844 + } 845 + 846 + static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos, 847 + u32 clear) 848 + { 849 + return pcie_capability_clear_and_set_dword(dev, pos, clear, 0); 850 + } 851 + 819 852 /* user-space driven config access */ 820 853 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); 821 854 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); ··· 1064 1031 int pci_cfg_space_size(struct pci_dev *dev); 1065 1032 unsigned char pci_bus_max_busnr(struct pci_bus *bus); 1066 1033 void pci_setup_bridge(struct pci_bus *bus); 1034 + resource_size_t pcibios_window_alignment(struct pci_bus *bus, 1035 + unsigned long type); 1067 1036 1068 1037 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0) 1069 1038 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1) ··· 1683 1648 static inline bool pci_is_pcie(struct pci_dev *dev) 1684 1649 { 1685 1650 return !!pci_pcie_cap(dev); 1651 + } 1652 + 1653 + /** 1654 + * pci_pcie_type - get the PCIe device/port type 1655 + * @dev: PCI device 1656 + */ 1657 + static inline int pci_pcie_type(const struct pci_dev *dev) 1658 + { 1659 + return (dev->pcie_flags_reg & PCI_EXP_FLAGS_TYPE) >> 4; 1686 1660 } 1687 1661 1688 1662 void pci_request_acs(void);
+1
include/linux/pci_regs.h
··· 549 549 #define PCI_EXP_LNKCAP2_SLS_8_0GB 0x04 /* Current Link Speed 8.0GT/s */ 550 550 #define PCI_EXP_LNKCAP2_CROSSLINK 0x100 /* Crosslink supported */ 551 551 #define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ 552 + #define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ 552 553 #define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */ 553 554 554 555 /* Extended Capabilities (PCI-X 2.0 and Express) */
+1 -1
mm/mempolicy.c
··· 2562 2562 break; 2563 2563 2564 2564 default: 2565 - BUG(); 2565 + return -EINVAL; 2566 2566 } 2567 2567 2568 2568 l = strlen(policy_modes[mode]);
+2 -2
net/socket.c
··· 2604 2604 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); 2605 2605 set_fs(old_fs); 2606 2606 if (!err) 2607 - err = compat_put_timeval(up, &ktv); 2607 + err = compat_put_timeval(&ktv, up); 2608 2608 2609 2609 return err; 2610 2610 } ··· 2620 2620 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); 2621 2621 set_fs(old_fs); 2622 2622 if (!err) 2623 - err = compat_put_timespec(up, &kts); 2623 + err = compat_put_timespec(&kts, up); 2624 2624 2625 2625 return err; 2626 2626 }
+1 -1
scripts/Makefile.fwinst
··· 42 42 $(installed-fw-dirs): 43 43 $(call cmd,mkdir) 44 44 45 - $(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $(INSTALL_FW_PATH)/$$(dir %) 45 + $(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $$(dir $(INSTALL_FW_PATH)/%) 46 46 $(call cmd,install) 47 47 48 48 PHONY += __fw_install __fw_modinst FORCE
+8 -2
sound/pci/hda/hda_codec.c
··· 1209 1209 kfree(codec); 1210 1210 } 1211 1211 1212 + static bool snd_hda_codec_get_supported_ps(struct hda_codec *codec, 1213 + hda_nid_t fg, unsigned int power_state); 1214 + 1212 1215 static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg, 1213 1216 unsigned int power_state); 1214 1217 ··· 1319 1316 snd_hda_codec_read(codec, nid, 0, 1320 1317 AC_VERB_GET_SUBSYSTEM_ID, 0); 1321 1318 } 1319 + 1320 + codec->epss = snd_hda_codec_get_supported_ps(codec, 1321 + codec->afg ? codec->afg : codec->mfg, 1322 + AC_PWRST_EPSS); 1322 1323 1323 1324 /* power-up all before initialization */ 1324 1325 hda_set_power_state(codec, ··· 3550 3543 /* this delay seems necessary to avoid click noise at power-down */ 3551 3544 if (power_state == AC_PWRST_D3) { 3552 3545 /* transition time less than 10ms for power down */ 3553 - bool epss = snd_hda_codec_get_supported_ps(codec, fg, AC_PWRST_EPSS); 3554 - msleep(epss ? 10 : 100); 3546 + msleep(codec->epss ? 10 : 100); 3555 3547 } 3556 3548 3557 3549 /* repeat power states setting at most 10 times*/
+1
sound/pci/hda/hda_codec.h
··· 862 862 unsigned int ignore_misc_bit:1; /* ignore MISC_NO_PRESENCE bit */ 863 863 unsigned int no_jack_detect:1; /* Machine has no jack-detection */ 864 864 unsigned int pcm_format_first:1; /* PCM format must be set first */ 865 + unsigned int epss:1; /* supporting EPSS? */ 865 866 #ifdef CONFIG_SND_HDA_POWER_SAVE 866 867 unsigned int power_on :1; /* current (global) power-state */ 867 868 int power_transition; /* power-state in transition */
+4
sound/pci/hda/patch_sigmatel.c
··· 4543 4543 struct auto_pin_cfg *cfg = &spec->autocfg; 4544 4544 int i; 4545 4545 4546 + if (cfg->speaker_outs == 0) 4547 + return; 4548 + 4546 4549 for (i = 0; i < cfg->line_outs; i++) { 4547 4550 if (presence) 4548 4551 break; ··· 5534 5531 snd_hda_codec_set_pincfg(codec, 0xf, 0x2181205e); 5535 5532 } 5536 5533 5534 + codec->epss = 0; /* longer delay needed for D3 */ 5537 5535 codec->no_trigger_sense = 1; 5538 5536 codec->spec = spec; 5539 5537
+2 -2
sound/usb/card.c
··· 553 553 struct snd_usb_audio *chip) 554 554 { 555 555 struct snd_card *card; 556 - struct list_head *p; 556 + struct list_head *p, *n; 557 557 558 558 if (chip == (void *)-1L) 559 559 return; ··· 570 570 snd_usb_stream_disconnect(p); 571 571 } 572 572 /* release the endpoint resources */ 573 - list_for_each(p, &chip->ep_list) { 573 + list_for_each_safe(p, n, &chip->ep_list) { 574 574 snd_usb_endpoint_free(p); 575 575 } 576 576 /* release the midi resources */
+10 -14
sound/usb/endpoint.c
··· 141 141 * 142 142 * For implicit feedback, next_packet_size() is unused. 143 143 */ 144 - static int next_packet_size(struct snd_usb_endpoint *ep) 144 + int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep) 145 145 { 146 146 unsigned long flags; 147 147 int ret; ··· 175 175 176 176 if (ep->retire_data_urb) 177 177 ep->retire_data_urb(ep->data_subs, urb); 178 - } 179 - 180 - static void prepare_outbound_urb_sizes(struct snd_usb_endpoint *ep, 181 - struct snd_urb_ctx *ctx) 182 - { 183 - int i; 184 - 185 - for (i = 0; i < ctx->packets; ++i) 186 - ctx->packet_size[i] = next_packet_size(ep); 187 178 } 188 179 189 180 /* ··· 361 370 goto exit_clear; 362 371 } 363 372 364 - prepare_outbound_urb_sizes(ep, ctx); 365 373 prepare_outbound_urb(ep, ctx); 366 374 } else { 367 375 retire_inbound_urb(ep, ctx); ··· 789 799 /** 790 800 * snd_usb_endpoint_start: start an snd_usb_endpoint 791 801 * 792 - * @ep: the endpoint to start 802 + * @ep: the endpoint to start 803 + * @can_sleep: flag indicating whether the operation is executed in 804 + * non-atomic context 793 805 * 794 806 * A call to this function will increment the use count of the endpoint. 795 807 * In case it is not already running, the URBs for this endpoint will be ··· 801 809 * 802 810 * Returns an error if the URB submission failed, 0 in all other cases. 803 811 */ 804 - int snd_usb_endpoint_start(struct snd_usb_endpoint *ep) 812 + int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep) 805 813 { 806 814 int err; 807 815 unsigned int i; ··· 812 820 /* already running? */ 813 821 if (++ep->use_count != 1) 814 822 return 0; 823 + 824 + /* just to be sure */ 825 + deactivate_urbs(ep, 0, can_sleep); 826 + if (can_sleep) 827 + wait_clear_urbs(ep); 815 828 816 829 ep->active_mask = 0; 817 830 ep->unlink_mask = 0; ··· 847 850 goto __error; 848 851 849 852 if (usb_pipeout(ep->pipe)) { 850 - prepare_outbound_urb_sizes(ep, urb->context); 851 853 prepare_outbound_urb(ep, urb->context); 852 854 } else { 853 855 prepare_inbound_urb(ep, urb->context);
+2 -1
sound/usb/endpoint.h
··· 13 13 struct audioformat *fmt, 14 14 struct snd_usb_endpoint *sync_ep); 15 15 16 - int snd_usb_endpoint_start(struct snd_usb_endpoint *ep); 16 + int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep); 17 17 void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep, 18 18 int force, int can_sleep, int wait); 19 19 int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep); ··· 21 21 void snd_usb_endpoint_free(struct list_head *head); 22 22 23 23 int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep); 24 + int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep); 24 25 25 26 void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep, 26 27 struct snd_usb_endpoint *sender,
+52 -12
sound/usb/pcm.c
··· 212 212 } 213 213 } 214 214 215 - static int start_endpoints(struct snd_usb_substream *subs) 215 + static int start_endpoints(struct snd_usb_substream *subs, int can_sleep) 216 216 { 217 217 int err; 218 218 ··· 225 225 snd_printdd(KERN_DEBUG "Starting data EP @%p\n", ep); 226 226 227 227 ep->data_subs = subs; 228 - err = snd_usb_endpoint_start(ep); 228 + err = snd_usb_endpoint_start(ep, can_sleep); 229 229 if (err < 0) { 230 230 clear_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags); 231 231 return err; ··· 236 236 !test_and_set_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags)) { 237 237 struct snd_usb_endpoint *ep = subs->sync_endpoint; 238 238 239 + if (subs->data_endpoint->iface != subs->sync_endpoint->iface || 240 + subs->data_endpoint->alt_idx != subs->sync_endpoint->alt_idx) { 241 + err = usb_set_interface(subs->dev, 242 + subs->sync_endpoint->iface, 243 + subs->sync_endpoint->alt_idx); 244 + if (err < 0) { 245 + snd_printk(KERN_ERR 246 + "%d:%d:%d: cannot set interface (%d)\n", 247 + subs->dev->devnum, 248 + subs->sync_endpoint->iface, 249 + subs->sync_endpoint->alt_idx, err); 250 + return -EIO; 251 + } 252 + } 253 + 239 254 snd_printdd(KERN_DEBUG "Starting sync EP @%p\n", ep); 240 255 241 256 ep->sync_slave = subs->data_endpoint; 242 - err = snd_usb_endpoint_start(ep); 257 + err = snd_usb_endpoint_start(ep, can_sleep); 243 258 if (err < 0) { 244 259 clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags); 245 260 return err; ··· 559 544 subs->last_frame_number = 0; 560 545 runtime->delay = 0; 561 546 562 - /* clear the pending deactivation on the target EPs */ 563 - deactivate_endpoints(subs); 564 - 565 547 /* for playback, submit the URBs now; otherwise, the first hwptr_done 566 548 * updates for all URBs would happen at the same time when starting */ 567 549 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) 568 - return start_endpoints(subs); 550 + return start_endpoints(subs, 1); 569 551 570 552 return 0; 571 553 } ··· 1044 1032 struct urb *urb) 1045 1033 { 1046 1034 struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; 1035 + struct snd_usb_endpoint *ep = subs->data_endpoint; 1047 1036 struct snd_urb_ctx *ctx = urb->context; 1048 1037 unsigned int counts, frames, bytes; 1049 1038 int i, stride, period_elapsed = 0; ··· 1056 1043 urb->number_of_packets = 0; 1057 1044 spin_lock_irqsave(&subs->lock, flags); 1058 1045 for (i = 0; i < ctx->packets; i++) { 1059 - counts = ctx->packet_size[i]; 1046 + if (ctx->packet_size[i]) 1047 + counts = ctx->packet_size[i]; 1048 + else 1049 + counts = snd_usb_endpoint_next_packet_size(ep); 1050 + 1060 1051 /* set up descriptor */ 1061 1052 urb->iso_frame_desc[i].offset = frames * stride; 1062 1053 urb->iso_frame_desc[i].length = counts * stride; ··· 1111 1094 subs->hwptr_done += bytes; 1112 1095 if (subs->hwptr_done >= runtime->buffer_size * stride) 1113 1096 subs->hwptr_done -= runtime->buffer_size * stride; 1097 + 1098 + /* update delay with exact number of samples queued */ 1099 + runtime->delay = subs->last_delay; 1114 1100 runtime->delay += frames; 1101 + subs->last_delay = runtime->delay; 1102 + 1103 + /* realign last_frame_number */ 1104 + subs->last_frame_number = usb_get_current_frame_number(subs->dev); 1105 + subs->last_frame_number &= 0xFF; /* keep 8 LSBs */ 1106 + 1115 1107 spin_unlock_irqrestore(&subs->lock, flags); 1116 1108 urb->transfer_buffer_length = bytes; 1117 1109 if (period_elapsed) ··· 1138 1112 struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; 1139 1113 int stride = runtime->frame_bits >> 3; 1140 1114 int processed = urb->transfer_buffer_length / stride; 1115 + int est_delay; 1141 1116 1142 1117 spin_lock_irqsave(&subs->lock, flags); 1143 - if (processed > runtime->delay) 1144 - runtime->delay = 0; 1118 + est_delay = snd_usb_pcm_delay(subs, runtime->rate); 1119 + /* update delay with exact number of samples played */ 1120 + if (processed > subs->last_delay) 1121 + subs->last_delay = 0; 1145 1122 else 1146 - runtime->delay -= processed; 1123 + subs->last_delay -= processed; 1124 + runtime->delay = subs->last_delay; 1125 + 1126 + /* 1127 + * Report when delay estimate is off by more than 2ms. 1128 + * The error should be lower than 2ms since the estimate relies 1129 + * on two reads of a counter updated every ms. 1130 + */ 1131 + if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2) 1132 + snd_printk(KERN_DEBUG "delay: estimated %d, actual %d\n", 1133 + est_delay, subs->last_delay); 1134 + 1147 1135 spin_unlock_irqrestore(&subs->lock, flags); 1148 1136 } 1149 1137 ··· 1215 1175 1216 1176 switch (cmd) { 1217 1177 case SNDRV_PCM_TRIGGER_START: 1218 - err = start_endpoints(subs); 1178 + err = start_endpoints(subs, 0); 1219 1179 if (err < 0) 1220 1180 return err; 1221 1181