Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
[IA64] Support multiple CPUs going through OS_MCA
[IA64] silence GCC ia64 unused variable warnings
[IA64] prevent MCA when performing MMIO mmap to PCI config space
[IA64] add sn_register_pmi_handler oemcall
[IA64] Stop bit for brl instruction
[IA64] SN: Correct ROM resource length for BIOS copy
[IA64] Don't set psr.ic and psr.i simultaneously

+203 -77
+19 -7
Documentation/ia64/aliasing-test.c
··· 19 19 #include <sys/mman.h> 20 20 #include <sys/stat.h> 21 21 #include <unistd.h> 22 + #include <linux/pci.h> 22 23 23 24 int sum; 24 25 ··· 35 34 return -1; 36 35 } 37 36 37 + if (fnmatch("/proc/bus/pci/*", path, 0) == 0) { 38 + rc = ioctl(fd, PCIIOC_MMAP_IS_MEM); 39 + if (rc == -1) 40 + perror("PCIIOC_MMAP_IS_MEM ioctl"); 41 + } 42 + 38 43 addr = mmap(NULL, length, PROT_READ|PROT_WRITE, MAP_SHARED, fd, offset); 39 44 if (addr == MAP_FAILED) 40 45 return 1; 41 46 42 47 if (touch) { 43 48 c = (int *) addr; 44 - while (c < (int *) (offset + length)) 49 + while (c < (int *) (addr + length)) 45 50 sum += *c++; 46 51 } 47 52 ··· 61 54 return 0; 62 55 } 63 56 64 - int scan_sysfs(char *path, char *file, off_t offset, size_t length, int touch) 57 + int scan_tree(char *path, char *file, off_t offset, size_t length, int touch) 65 58 { 66 59 struct dirent **namelist; 67 60 char *name, *path2; ··· 100 93 } else { 101 94 r = lstat(path2, &buf); 102 95 if (r == 0 && S_ISDIR(buf.st_mode)) { 103 - rc = scan_sysfs(path2, file, offset, length, touch); 96 + rc = scan_tree(path2, file, offset, length, touch); 104 97 if (rc < 0) 105 98 return rc; 106 99 } ··· 245 238 else 246 239 fprintf(stderr, "FAIL: /dev/mem 0x0-0x100000 not accessible\n"); 247 240 248 - scan_sysfs("/sys/class/pci_bus", "legacy_mem", 0, 0xA0000, 1); 249 - scan_sysfs("/sys/class/pci_bus", "legacy_mem", 0xA0000, 0x20000, 0); 250 - scan_sysfs("/sys/class/pci_bus", "legacy_mem", 0xC0000, 0x40000, 1); 251 - scan_sysfs("/sys/class/pci_bus", "legacy_mem", 0, 1024*1024, 0); 241 + scan_tree("/sys/class/pci_bus", "legacy_mem", 0, 0xA0000, 1); 242 + scan_tree("/sys/class/pci_bus", "legacy_mem", 0xA0000, 0x20000, 0); 243 + scan_tree("/sys/class/pci_bus", "legacy_mem", 0xC0000, 0x40000, 1); 244 + scan_tree("/sys/class/pci_bus", "legacy_mem", 0, 1024*1024, 0); 252 245 253 246 scan_rom("/sys/devices", "rom"); 247 + 248 + scan_tree("/proc/bus/pci", "??.?", 0, 0xA0000, 1); 249 + scan_tree("/proc/bus/pci", "??.?", 0xA0000, 0x20000, 0); 250 + scan_tree("/proc/bus/pci", "??.?", 0xC0000, 0x40000, 1); 251 + scan_tree("/proc/bus/pci", "??.?", 0, 1024*1024, 0); 254 252 }
+12
Documentation/ia64/aliasing.txt
··· 112 112 113 113 The /dev/mem mmap constraints apply. 114 114 115 + mmap of /proc/bus/pci/.../??.? 116 + 117 + This is an MMIO mmap of PCI functions, which additionally may or 118 + may not be requested as using the WC attribute. 119 + 120 + If WC is requested, and the region in kern_memmap is either WC 121 + or UC, and the EFI memory map designates the region as WC, then 122 + the WC mapping is allowed. 123 + 124 + Otherwise, the user mapping must use the same attribute as the 125 + kernel mapping. 126 + 115 127 read/write of /dev/mem 116 128 117 129 This uses copy_from_user(), which implicitly uses a kernel
+1
arch/ia64/kernel/gate.S
··· 30 30 .previous 31 31 #define BRL_COND_FSYS_BUBBLE_DOWN(pr) \ 32 32 [1:](pr)brl.cond.sptk 0; \ 33 + ;; \ 33 34 .xdata4 ".data.patch.brl_fsys_bubble_down", 1b-. 34 35 35 36 GLOBAL_ENTRY(__kernel_syscall_via_break)
+53 -7
arch/ia64/kernel/mca.c
··· 57 57 * 58 58 * 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> 59 59 * Add printing support for MCA/INIT. 60 + * 61 + * 2007-04-27 Russ Anderson <rja@sgi.com> 62 + * Support multiple cpus going through OS_MCA in the same event. 60 63 */ 61 64 #include <linux/types.h> 62 65 #include <linux/init.h> ··· 99 96 #endif 100 97 101 98 /* Used by mca_asm.S */ 102 - u32 ia64_mca_serialize; 103 99 DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ 104 100 DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ 105 101 DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ ··· 965 963 goto no_mod; 966 964 } 967 965 966 + if (r13 != sos->prev_IA64_KR_CURRENT) { 967 + msg = "inconsistent previous current and r13"; 968 + goto no_mod; 969 + } 970 + 968 971 if (!mca_recover_range(ms->pmsa_iip)) { 969 - if (r13 != sos->prev_IA64_KR_CURRENT) { 970 - msg = "inconsistent previous current and r13"; 971 - goto no_mod; 972 - } 973 972 if ((r12 - r13) >= KERNEL_STACK_SIZE) { 974 973 msg = "inconsistent r12 and r13"; 975 974 goto no_mod; ··· 1190 1187 * further MCA logging is enabled by clearing logs. 1191 1188 * Monarch also has the duty of sending wakeup-IPIs to pull the 1192 1189 * slave processors out of rendezvous spinloop. 1190 + * 1191 + * If multiple processors call into OS_MCA, the first will become 1192 + * the monarch. Subsequent cpus will be recorded in the mca_cpu 1193 + * bitmask. After the first monarch has processed its MCA, it 1194 + * will wake up the next cpu in the mca_cpu bitmask and then go 1195 + * into the rendezvous loop. When all processors have serviced 1196 + * their MCA, the last monarch frees up the rest of the processors. 1193 1197 */ 1194 1198 void 1195 1199 ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, ··· 1206 1196 struct task_struct *previous_current; 1207 1197 struct ia64_mca_notify_die nd = 1208 1198 { .sos = sos, .monarch_cpu = &monarch_cpu }; 1199 + static atomic_t mca_count; 1200 + static cpumask_t mca_cpu; 1209 1201 1202 + if (atomic_add_return(1, &mca_count) == 1) { 1203 + monarch_cpu = cpu; 1204 + sos->monarch = 1; 1205 + } else { 1206 + cpu_set(cpu, mca_cpu); 1207 + sos->monarch = 0; 1208 + } 1210 1209 mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d " 1211 1210 "monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch); 1212 1211 1213 1212 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); 1214 - monarch_cpu = cpu; 1213 + 1215 1214 if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0) 1216 1215 == NOTIFY_STOP) 1217 1216 ia64_mca_spin(__FUNCTION__); 1218 - ia64_wait_for_slaves(cpu, "MCA"); 1217 + if (sos->monarch) { 1218 + ia64_wait_for_slaves(cpu, "MCA"); 1219 + } else { 1220 + ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; 1221 + while (cpu_isset(cpu, mca_cpu)) 1222 + cpu_relax(); /* spin until monarch wakes us */ 1223 + ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 1224 + } 1219 1225 1220 1226 /* Wakeup all the processors which are spinning in the rendezvous loop. 1221 1227 * They will leave SAL, then spin in the OS with interrupts disabled ··· 1270 1244 == NOTIFY_STOP) 1271 1245 ia64_mca_spin(__FUNCTION__); 1272 1246 1247 + 1248 + if (atomic_dec_return(&mca_count) > 0) { 1249 + int i; 1250 + 1251 + /* wake up the next monarch cpu, 1252 + * and put this cpu in the rendez loop. 1253 + */ 1254 + ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; 1255 + for_each_online_cpu(i) { 1256 + if (cpu_isset(i, mca_cpu)) { 1257 + monarch_cpu = i; 1258 + cpu_clear(i, mca_cpu); /* wake next cpu */ 1259 + while (monarch_cpu != -1) 1260 + cpu_relax(); /* spin until last cpu leaves */ 1261 + ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 1262 + set_curr_task(cpu, previous_current); 1263 + return; 1264 + } 1265 + } 1266 + } 1273 1267 set_curr_task(cpu, previous_current); 1274 1268 monarch_cpu = -1; 1275 1269 }
-12
arch/ia64/kernel/mca_asm.S
··· 133 133 //StartMain//////////////////////////////////////////////////////////////////// 134 134 135 135 ia64_os_mca_dispatch: 136 - // Serialize all MCA processing 137 - mov r3=1;; 138 - LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);; 139 - ia64_os_mca_spin: 140 - xchg4 r4=[r2],r3;; 141 - cmp.ne p6,p0=r4,r0 142 - (p6) br ia64_os_mca_spin 143 - 144 136 mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack 145 137 LOAD_PHYSICAL(p0,r2,1f) // return address 146 138 mov r19=1 // All MCA events are treated as monarch (for now) ··· 282 290 1: 283 291 284 292 mov b0=r12 // SAL_CHECK return address 285 - 286 - // release lock 287 - LOAD_PHYSICAL(p0,r3,ia64_mca_serialize);; 288 - st4.rel [r3]=r0 289 293 290 294 br b0 291 295
+5 -1
arch/ia64/kernel/mca_drv_asm.S
··· 40 40 mov b6=loc1 41 41 ;; 42 42 mov loc1=rp 43 - ssm psr.i | psr.ic 43 + ssm psr.ic 44 + ;; 45 + srlz.i 46 + ;; 47 + ssm psr.i 44 48 br.call.sptk.many rp=b6 // does not return ... 45 49 ;; 46 50 mov ar.pfs=loc0
+2 -1
arch/ia64/kernel/process.c
··· 513 513 static void 514 514 do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg) 515 515 { 516 - unsigned long mask, sp, nat_bits = 0, ip, ar_rnat, urbs_end, cfm; 516 + unsigned long mask, sp, nat_bits = 0, ar_rnat, urbs_end, cfm; 517 + unsigned long uninitialized_var(ip); /* GCC be quiet */ 517 518 elf_greg_t *dst = arg; 518 519 struct pt_regs *pt; 519 520 char nat;
+1 -1
arch/ia64/mm/tlb.c
··· 175 175 void __devinit 176 176 ia64_tlb_init (void) 177 177 { 178 - ia64_ptce_info_t ptce_info; 178 + ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */ 179 179 unsigned long tr_pgbits; 180 180 long status; 181 181
+17 -5
arch/ia64/pci/pci.c
··· 591 591 pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, 592 592 enum pci_mmap_state mmap_state, int write_combine) 593 593 { 594 + unsigned long size = vma->vm_end - vma->vm_start; 595 + pgprot_t prot; 596 + 594 597 /* 595 598 * I/O space cannot be accessed via normal processor loads and 596 599 * stores on this platform. ··· 607 604 */ 608 605 return -EINVAL; 609 606 607 + if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) 608 + return -EINVAL; 609 + 610 + prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size, 611 + vma->vm_page_prot); 612 + 610 613 /* 611 - * Leave vm_pgoff as-is, the PCI space address is the physical 612 - * address on this platform. 614 + * If the user requested WC, the kernel uses UC or WC for this region, 615 + * and the chipset supports WC, we can use WC. Otherwise, we have to 616 + * use the same attribute the kernel uses. 613 617 */ 614 - if (write_combine && efi_range_is_wc(vma->vm_start, 615 - vma->vm_end - vma->vm_start)) 618 + if (write_combine && 619 + ((pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_UC || 620 + (pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_WC) && 621 + efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) 616 622 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 617 623 else 618 - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 624 + vma->vm_page_prot = prot; 619 625 620 626 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 621 627 vma->vm_end - vma->vm_start, vma->vm_page_prot))
+8 -9
arch/ia64/sn/kernel/io_acpi_init.c
··· 418 418 void __iomem *addr; 419 419 struct pcidev_info *pcidev_info = NULL; 420 420 struct sn_irq_info *sn_irq_info = NULL; 421 - size_t size; 421 + size_t image_size, size; 422 422 423 423 if (sn_acpi_get_pcidev_info(dev, &pcidev_info, &sn_irq_info)) { 424 424 panic("%s: Failure obtaining pcidev_info for %s\n", ··· 428 428 if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) { 429 429 /* 430 430 * A valid ROM image exists and has been shadowed by the 431 - * PROM. Setup the pci_dev ROM resource to point to 432 - * the shadowed copy. 431 + * PROM. Setup the pci_dev ROM resource with the address 432 + * of the shadowed copy, and the actual length of the ROM image. 433 433 */ 434 - size = dev->resource[PCI_ROM_RESOURCE].end - 435 - dev->resource[PCI_ROM_RESOURCE].start; 436 - addr = 437 - ioremap(pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE], 438 - size); 434 + size = pci_resource_len(dev, PCI_ROM_RESOURCE); 435 + addr = ioremap(pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE], 436 + size); 437 + image_size = pci_get_rom_size(addr, size); 439 438 dev->resource[PCI_ROM_RESOURCE].start = (unsigned long) addr; 440 439 dev->resource[PCI_ROM_RESOURCE].end = 441 - (unsigned long) addr + size; 440 + (unsigned long) addr + image_size - 1; 442 441 dev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_BIOS_COPY; 443 442 } 444 443 sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
+17 -3
arch/ia64/sn/kernel/io_init.c
··· 259 259 insert_resource(&ioport_resource, &dev->resource[idx]); 260 260 else 261 261 insert_resource(&iomem_resource, &dev->resource[idx]); 262 - /* If ROM, mark as shadowed in PROM */ 263 - if (idx == PCI_ROM_RESOURCE) 264 - dev->resource[idx].flags |= IORESOURCE_ROM_BIOS_COPY; 262 + /* 263 + * If ROM, set the actual ROM image size, and mark as 264 + * shadowed in PROM. 265 + */ 266 + if (idx == PCI_ROM_RESOURCE) { 267 + size_t image_size; 268 + void __iomem *rom; 269 + 270 + rom = ioremap(pci_resource_start(dev, PCI_ROM_RESOURCE), 271 + size + 1); 272 + image_size = pci_get_rom_size(rom, size + 1); 273 + dev->resource[PCI_ROM_RESOURCE].end = 274 + dev->resource[PCI_ROM_RESOURCE].start + 275 + image_size - 1; 276 + dev->resource[PCI_ROM_RESOURCE].flags |= 277 + IORESOURCE_ROM_BIOS_COPY; 278 + } 265 279 } 266 280 /* Create a pci_window in the pci_controller struct for 267 281 * each device resource.
+1 -1
arch/ia64/sn/kernel/tiocx.c
··· 369 369 370 370 static int is_fpga_tio(int nasid, int *bt) 371 371 { 372 - u16 ioboard_type; 372 + u16 uninitialized_var(ioboard_type); /* GCC be quiet */ 373 373 s64 rc; 374 374 375 375 rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard_type);
+1 -1
arch/ia64/sn/pci/pcibr/pcibr_provider.c
··· 80 80 u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus) 81 81 { 82 82 s64 rc; 83 - u16 ioboard; 83 + u16 uninitialized_var(ioboard); /* GCC be quiet */ 84 84 nasid_t nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base); 85 85 86 86 rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard);
+44 -29
drivers/pci/rom.c
··· 54 54 } 55 55 56 56 /** 57 + * pci_get_rom_size - obtain the actual size of the ROM image 58 + * @rom: kernel virtual pointer to image of ROM 59 + * @size: size of PCI window 60 + * return: size of actual ROM image 61 + * 62 + * Determine the actual length of the ROM image. 63 + * The PCI window size could be much larger than the 64 + * actual image size. 65 + */ 66 + size_t pci_get_rom_size(void __iomem *rom, size_t size) 67 + { 68 + void __iomem *image; 69 + int last_image; 70 + 71 + image = rom; 72 + do { 73 + void __iomem *pds; 74 + /* Standard PCI ROMs start out with these bytes 55 AA */ 75 + if (readb(image) != 0x55) 76 + break; 77 + if (readb(image + 1) != 0xAA) 78 + break; 79 + /* get the PCI data structure and check its signature */ 80 + pds = image + readw(image + 24); 81 + if (readb(pds) != 'P') 82 + break; 83 + if (readb(pds + 1) != 'C') 84 + break; 85 + if (readb(pds + 2) != 'I') 86 + break; 87 + if (readb(pds + 3) != 'R') 88 + break; 89 + last_image = readb(pds + 21) & 0x80; 90 + /* this length is reliable */ 91 + image += readw(pds + 16) * 512; 92 + } while (!last_image); 93 + 94 + /* never return a size larger than the PCI resource window */ 95 + /* there are known ROMs that get the size wrong */ 96 + return min((size_t)(image - rom), size); 97 + } 98 + 99 + /** 57 100 * pci_map_rom - map a PCI ROM to kernel space 58 101 * @pdev: pointer to pci device struct 59 102 * @size: pointer to receive size of pci window over ROM ··· 111 68 struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; 112 69 loff_t start; 113 70 void __iomem *rom; 114 - void __iomem *image; 115 - int last_image; 116 71 117 72 /* 118 73 * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy ··· 158 117 * size is much larger than the actual size of the ROM. 159 118 * True size is important if the ROM is going to be copied. 160 119 */ 161 - image = rom; 162 - do { 163 - void __iomem *pds; 164 - /* Standard PCI ROMs start out with these bytes 55 AA */ 165 - if (readb(image) != 0x55) 166 - break; 167 - if (readb(image + 1) != 0xAA) 168 - break; 169 - /* get the PCI data structure and check its signature */ 170 - pds = image + readw(image + 24); 171 - if (readb(pds) != 'P') 172 - break; 173 - if (readb(pds + 1) != 'C') 174 - break; 175 - if (readb(pds + 2) != 'I') 176 - break; 177 - if (readb(pds + 3) != 'R') 178 - break; 179 - last_image = readb(pds + 21) & 0x80; 180 - /* this length is reliable */ 181 - image += readw(pds + 16) * 512; 182 - } while (!last_image); 183 - 184 - /* never return a size larger than the PCI resource window */ 185 - /* there are known ROMs that get the size wrong */ 186 - *size = min((size_t)(image - rom), *size); 187 - 120 + *size = pci_get_rom_size(rom, *size); 188 121 return rom; 189 122 } 190 123
+1
include/asm-ia64/mca.h
··· 48 48 IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0, 49 49 IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1, 50 50 IA64_MCA_RENDEZ_CHECKIN_INIT = 0x2, 51 + IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA = 0x3, 51 52 }; 52 53 53 54 /* Information maintained by the MC infrastructure */
+20
include/asm-ia64/sn/sn_sal.h
··· 32 32 #define SN_SAL_NO_FAULT_ZONE_VIRTUAL 0x02000010 33 33 #define SN_SAL_NO_FAULT_ZONE_PHYSICAL 0x02000011 34 34 #define SN_SAL_PRINT_ERROR 0x02000012 35 + #define SN_SAL_REGISTER_PMI_HANDLER 0x02000014 35 36 #define SN_SAL_SET_ERROR_HANDLING_FEATURES 0x0200001a // reentrant 36 37 #define SN_SAL_GET_FIT_COMPT 0x0200001b // reentrant 37 38 #define SN_SAL_GET_SAPIC_INFO 0x0200001d ··· 677 676 } 678 677 ia64_sal_oemcall(&ret_stuff, call, start_addr, end_addr, return_addr, 679 678 (u64)1, 0, 0, 0); 679 + return ret_stuff.status; 680 + } 681 + 682 + /* 683 + * Register or unregister a function to handle a PMI received by a CPU. 684 + * Before calling the registered handler, SAL sets r1 to the value that 685 + * was passed in as the global_pointer. 686 + * 687 + * If the handler pointer is NULL, then the currently registered handler 688 + * will be unregistered. 689 + * 690 + * Returns 0 on success, or a negative value if an error occurred. 691 + */ 692 + static inline int 693 + sn_register_pmi_handler(u64 handler, u64 global_pointer) 694 + { 695 + struct ia64_sal_retval ret_stuff; 696 + ia64_sal_oemcall(&ret_stuff, SN_SAL_REGISTER_PMI_HANDLER, handler, 697 + global_pointer, 0, 0, 0, 0, 0); 680 698 return ret_stuff.status; 681 699 } 682 700
+1
include/linux/pci.h
··· 566 566 void __iomem __must_check *pci_map_rom_copy(struct pci_dev *pdev, size_t *size); 567 567 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); 568 568 void pci_remove_rom(struct pci_dev *pdev); 569 + size_t pci_get_rom_size(void __iomem *rom, size_t size); 569 570 570 571 /* Power management related routines */ 571 572 int pci_save_state(struct pci_dev *dev);