[IA64] Make efi.c mostly fit in 80 columns

This patch is purely whitespace changes to make the code fit in 80
columns, plus fix some inconsistent indentation. The efi_guidcmp()
tests remain wider than 80-columns since that seems to be the most
clear.

Signed-off-by: Aron Griffis <aron@hp.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>

authored by Aron Griffis and committed by Tony Luck 7d9aed26 cdef24c9

+263 -220
+263 -220
arch/ia64/kernel/efi.c
··· 1 /* 2 * Extensible Firmware Interface 3 * 4 - * Based on Extensible Firmware Interface Specification version 0.9 April 30, 1999 5 * 6 * Copyright (C) 1999 VA Linux Systems 7 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> ··· 49 50 #define efi_call_virt(f, args...) (*(f))(args) 51 52 - #define STUB_GET_TIME(prefix, adjust_arg) \ 53 - static efi_status_t \ 54 - prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \ 55 - { \ 56 - struct ia64_fpreg fr[6]; \ 57 - efi_time_cap_t *atc = NULL; \ 58 - efi_status_t ret; \ 59 - \ 60 - if (tc) \ 61 - atc = adjust_arg(tc); \ 62 - ia64_save_scratch_fpregs(fr); \ 63 - ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), adjust_arg(tm), atc); \ 64 - ia64_load_scratch_fpregs(fr); \ 65 - return ret; \ 66 } 67 68 - #define STUB_SET_TIME(prefix, adjust_arg) \ 69 - static efi_status_t \ 70 - prefix##_set_time (efi_time_t *tm) \ 71 - { \ 72 - struct ia64_fpreg fr[6]; \ 73 - efi_status_t ret; \ 74 - \ 75 - ia64_save_scratch_fpregs(fr); \ 76 - ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), adjust_arg(tm)); \ 77 - ia64_load_scratch_fpregs(fr); \ 78 - return ret; \ 79 } 80 81 - #define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \ 82 - static efi_status_t \ 83 - prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm) \ 84 - { \ 85 - struct ia64_fpreg fr[6]; \ 86 - efi_status_t ret; \ 87 - \ 88 - ia64_save_scratch_fpregs(fr); \ 89 - ret = efi_call_##prefix((efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \ 90 - adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \ 91 - ia64_load_scratch_fpregs(fr); \ 92 - return ret; \ 93 } 94 95 - #define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \ 96 - static efi_status_t \ 97 - prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \ 98 - { \ 99 - struct ia64_fpreg fr[6]; \ 100 - efi_time_t *atm = NULL; \ 101 - efi_status_t ret; \ 102 - \ 103 - if (tm) \ 104 - atm = adjust_arg(tm); \ 105 - ia64_save_scratch_fpregs(fr); \ 106 - ret = efi_call_##prefix((efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \ 107 - enabled, atm); \ 108 - ia64_load_scratch_fpregs(fr); \ 109 - return ret; \ 110 } 111 112 - #define STUB_GET_VARIABLE(prefix, adjust_arg) \ 113 - static efi_status_t \ 114 - prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \ 115 - unsigned long *data_size, void *data) \ 116 - { \ 117 - struct ia64_fpreg fr[6]; \ 118 - u32 *aattr = NULL; \ 119 - efi_status_t ret; \ 120 - \ 121 - if (attr) \ 122 - aattr = adjust_arg(attr); \ 123 - ia64_save_scratch_fpregs(fr); \ 124 - ret = efi_call_##prefix((efi_get_variable_t *) __va(runtime->get_variable), \ 125 - adjust_arg(name), adjust_arg(vendor), aattr, \ 126 - adjust_arg(data_size), adjust_arg(data)); \ 127 - ia64_load_scratch_fpregs(fr); \ 128 - return ret; \ 129 } 130 131 - #define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \ 132 - static efi_status_t \ 133 - prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) \ 134 - { \ 135 - struct ia64_fpreg fr[6]; \ 136 - efi_status_t ret; \ 137 - \ 138 - ia64_save_scratch_fpregs(fr); \ 139 - ret = efi_call_##prefix((efi_get_next_variable_t *) __va(runtime->get_next_variable), \ 140 - adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \ 141 - ia64_load_scratch_fpregs(fr); \ 142 - return ret; \ 143 } 144 145 - #define STUB_SET_VARIABLE(prefix, adjust_arg) \ 146 - static efi_status_t \ 147 - prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, unsigned long attr, \ 148 - unsigned long data_size, void *data) \ 149 - { \ 150 - struct ia64_fpreg fr[6]; \ 151 - efi_status_t ret; \ 152 - \ 153 - ia64_save_scratch_fpregs(fr); \ 154 - ret = efi_call_##prefix((efi_set_variable_t *) __va(runtime->set_variable), \ 155 - adjust_arg(name), adjust_arg(vendor), attr, data_size, \ 156 - adjust_arg(data)); \ 157 - ia64_load_scratch_fpregs(fr); \ 158 - return ret; \ 159 } 160 161 - #define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \ 162 - static efi_status_t \ 163 - prefix##_get_next_high_mono_count (u32 *count) \ 164 - { \ 165 - struct ia64_fpreg fr[6]; \ 166 - efi_status_t ret; \ 167 - \ 168 - ia64_save_scratch_fpregs(fr); \ 169 - ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \ 170 - __va(runtime->get_next_high_mono_count), adjust_arg(count)); \ 171 - ia64_load_scratch_fpregs(fr); \ 172 - return ret; \ 173 } 174 175 - #define STUB_RESET_SYSTEM(prefix, adjust_arg) \ 176 - static void \ 177 - prefix##_reset_system (int reset_type, efi_status_t status, \ 178 - unsigned long data_size, efi_char16_t *data) \ 179 - { \ 180 - struct ia64_fpreg fr[6]; \ 181 - efi_char16_t *adata = NULL; \ 182 - \ 183 - if (data) \ 184 - adata = adjust_arg(data); \ 185 - \ 186 - ia64_save_scratch_fpregs(fr); \ 187 - efi_call_##prefix((efi_reset_system_t *) __va(runtime->reset_system), \ 188 - reset_type, status, data_size, adata); \ 189 - /* should not return, but just in case... */ \ 190 - ia64_load_scratch_fpregs(fr); \ 191 } 192 193 #define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg)) ··· 236 return; 237 } 238 239 - ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second); 240 ts->tv_nsec = tm.nanosecond; 241 } 242 ··· 311 } 312 313 /* 314 - * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that 315 - * has memory that is available for OS use. 316 */ 317 void 318 efi_memmap_walk (efi_freemem_callback_t callback, void *arg) ··· 321 } 322 323 /* 324 - * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that 325 - * has memory that is available for uncached allocator. 326 */ 327 void 328 efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg) ··· 335 * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor 336 * Abstraction Layer chapter 11 in ADAG 337 */ 338 - 339 void * 340 efi_get_pal_addr (void) 341 { ··· 354 continue; 355 356 if (++pal_code_count > 1) { 357 - printk(KERN_ERR "Too many EFI Pal Code memory ranges, dropped @ %lx\n", 358 - md->phys_addr); 359 continue; 360 } 361 /* 362 - * The only ITLB entry in region 7 that is used is the one installed by 363 - * __start(). That entry covers a 64MB range. 364 */ 365 mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1); 366 vaddr = PAGE_OFFSET + md->phys_addr; 367 368 /* 369 - * We must check that the PAL mapping won't overlap with the kernel 370 - * mapping. 371 * 372 - * PAL code is guaranteed to be aligned on a power of 2 between 4k and 373 - * 256KB and that only one ITR is needed to map it. This implies that the 374 - * PAL code is always aligned on its size, i.e., the closest matching page 375 - * size supported by the TLB. Therefore PAL code is guaranteed never to 376 - * cross a 64MB unless it is bigger than 64MB (very unlikely!). So for 377 - * now the following test is enough to determine whether or not we need a 378 - * dedicated ITR for the PAL code. 379 */ 380 if ((vaddr & mask) == (KERNEL_START & mask)) { 381 - printk(KERN_INFO "%s: no need to install ITR for PAL code\n", 382 - __FUNCTION__); 383 continue; 384 } 385 ··· 390 #if EFI_DEBUG 391 mask = ~((1 << IA64_GRANULE_SHIFT) - 1); 392 393 - printk(KERN_INFO "CPU %d: mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n", 394 - smp_processor_id(), md->phys_addr, 395 - md->phys_addr + efi_md_size(md), 396 - vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); 397 #endif 398 return __va(md->phys_addr); 399 } ··· 416 * Cannot write to CRx with PSR.ic=1 417 */ 418 psr = ia64_clear_ic(); 419 - ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) pal_vaddr), 420 pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), 421 IA64_GRANULE_SHIFT); 422 ia64_set_psr(psr); /* restore psr */ ··· 434 char *cp, vendor[100] = "unknown"; 435 int i; 436 437 - /* it's too early to be able to use the standard kernel command line support... */ 438 for (cp = boot_command_line; *cp; ) { 439 if (memcmp(cp, "mem=", 4) == 0) { 440 mem_limit = memparse(cp + 4, &cp); ··· 453 } 454 } 455 if (min_addr != 0UL) 456 - printk(KERN_INFO "Ignoring memory below %luMB\n", min_addr >> 20); 457 if (max_addr != ~0UL) 458 - printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20); 459 460 efi.systab = __va(ia64_boot_param->efi_systab); 461 ··· 485 } 486 487 printk(KERN_INFO "EFI v%u.%.02u by %s:", 488 - efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor); 489 490 efi.mps = EFI_INVALID_TABLE_ADDR; 491 efi.acpi = EFI_INVALID_TABLE_ADDR; ··· 541 efi_memory_desc_t *md; 542 void *p; 543 544 - for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) { 545 md = p; 546 - printk("mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n", 547 i, md->type, md->attribute, md->phys_addr, 548 md->phys_addr + efi_md_size(md), 549 md->num_pages >> (20 - EFI_PAGE_SHIFT)); ··· 574 md = p; 575 if (md->attribute & EFI_MEMORY_RUNTIME) { 576 /* 577 - * Some descriptors have multiple bits set, so the order of 578 - * the tests is relevant. 579 */ 580 if (md->attribute & EFI_MEMORY_WB) { 581 md->virt_addr = (u64) __va(md->phys_addr); ··· 583 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 584 } else if (md->attribute & EFI_MEMORY_WC) { 585 #if 0 586 - md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P 587 - | _PAGE_D 588 - | _PAGE_MA_WC 589 - | _PAGE_PL_0 590 - | _PAGE_AR_RW)); 591 #else 592 printk(KERN_INFO "EFI_MEMORY_WC mapping\n"); 593 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 594 #endif 595 } else if (md->attribute & EFI_MEMORY_WT) { 596 #if 0 597 - md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P 598 - | _PAGE_D | _PAGE_MA_WT 599 - | _PAGE_PL_0 600 - | _PAGE_AR_RW)); 601 #else 602 printk(KERN_INFO "EFI_MEMORY_WT mapping\n"); 603 md->virt_addr = (u64) ioremap(md->phys_addr, 0); ··· 613 614 status = efi_call_phys(__va(runtime->set_virtual_address_map), 615 ia64_boot_param->efi_memmap_size, 616 - efi_desc_size, ia64_boot_param->efi_memdesc_version, 617 ia64_boot_param->efi_memmap); 618 if (status != EFI_SUCCESS) { 619 - printk(KERN_WARNING "warning: unable to switch EFI into virtual mode " 620 - "(status=%lu)\n", status); 621 return; 622 } 623 624 /* 625 - * Now that EFI is in virtual mode, we call the EFI functions more efficiently: 626 */ 627 efi.get_time = virt_get_time; 628 efi.set_time = virt_set_time; ··· 638 } 639 640 /* 641 - * Walk the EFI memory map looking for the I/O port range. There can only be one entry of 642 - * this type, other I/O port ranges should be described via ACPI. 643 */ 644 u64 645 efi_get_iobase (void) ··· 710 711 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 712 md = p; 713 - 714 if (md->phys_addr < end && efi_md_end(md) > phys_addr) 715 return 1; 716 } ··· 914 return 1; 915 uart = 0; 916 } 917 - hdr = (struct efi_generic_dev_path *) ((u8 *) hdr + hdr->length); 918 } 919 printk(KERN_ERR "Malformed %s value\n", name); 920 return 0; ··· 952 if (!efi_wb(md)) { 953 continue; 954 } 955 - if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) { 956 contig_low = GRANULEROUNDUP(md->phys_addr); 957 contig_high = efi_md_end(md); 958 - for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) { 959 check_md = q; 960 if (!efi_wb(check_md)) 961 break; ··· 1021 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { 1022 md = p; 1023 if (!efi_wb(md)) { 1024 - if (efi_uc(md) && (md->type == EFI_CONVENTIONAL_MEMORY || 1025 - md->type == EFI_BOOT_SERVICES_DATA)) { 1026 k->attribute = EFI_MEMORY_UC; 1027 k->start = md->phys_addr; 1028 k->num_pages = md->num_pages; ··· 1031 } 1032 continue; 1033 } 1034 - if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) { 1035 contig_low = GRANULEROUNDUP(md->phys_addr); 1036 contig_high = efi_md_end(md); 1037 - for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) { 1038 check_md = q; 1039 if (!efi_wb(check_md)) 1040 break; ··· 1061 if (md->phys_addr < contig_low) { 1062 lim = min(efi_md_end(md), contig_low); 1063 if (efi_uc(md)) { 1064 - if (k > kern_memmap && (k-1)->attribute == EFI_MEMORY_UC && 1065 kmd_end(k-1) == md->phys_addr) { 1066 - (k-1)->num_pages += (lim - md->phys_addr) >> EFI_PAGE_SHIFT; 1067 } else { 1068 k->attribute = EFI_MEMORY_UC; 1069 k->start = md->phys_addr; 1070 - k->num_pages = (lim - md->phys_addr) >> EFI_PAGE_SHIFT; 1071 k++; 1072 } 1073 } ··· 1089 } else { 1090 k->attribute = EFI_MEMORY_UC; 1091 k->start = lim; 1092 - k->num_pages = (efi_md_end(md) - lim) >> EFI_PAGE_SHIFT; 1093 k++; 1094 } 1095 } ··· 1192 break; 1193 } 1194 1195 - if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { 1196 - printk(KERN_ERR "failed to alocate resource for iomem\n"); 1197 return; 1198 } 1199 ··· 1230 rsvd_regions are sorted 1231 */ 1232 unsigned long __init 1233 - kdump_find_rsvd_region (unsigned long size, 1234 - struct rsvd_region *r, int n) 1235 { 1236 - int i; 1237 - u64 start, end; 1238 - u64 alignment = 1UL << _PAGE_SIZE_64M; 1239 - void *efi_map_start, *efi_map_end, *p; 1240 - efi_memory_desc_t *md; 1241 - u64 efi_desc_size; 1242 1243 - efi_map_start = __va(ia64_boot_param->efi_memmap); 1244 - efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1245 - efi_desc_size = ia64_boot_param->efi_memdesc_size; 1246 1247 - for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 1248 - md = p; 1249 - if (!efi_wb(md)) 1250 - continue; 1251 - start = ALIGN(md->phys_addr, alignment); 1252 - end = efi_md_end(md); 1253 - for (i = 0; i < n; i++) { 1254 - if (__pa(r[i].start) >= start && __pa(r[i].end) < end) { 1255 - if (__pa(r[i].start) > start + size) 1256 - return start; 1257 - start = ALIGN(__pa(r[i].end), alignment); 1258 - if (i < n-1 && __pa(r[i+1].start) < start + size) 1259 - continue; 1260 - else 1261 - break; 1262 } 1263 - } 1264 - if (end > start + size) 1265 - return start; 1266 - } 1267 1268 - printk(KERN_WARNING "Cannot reserve 0x%lx byte of memory for crashdump\n", 1269 - size); 1270 - return ~0UL; 1271 } 1272 #endif 1273
··· 1 /* 2 * Extensible Firmware Interface 3 * 4 + * Based on Extensible Firmware Interface Specification version 0.9 5 + * April 30, 1999 6 * 7 * Copyright (C) 1999 VA Linux Systems 8 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> ··· 48 49 #define efi_call_virt(f, args...) (*(f))(args) 50 51 + #define STUB_GET_TIME(prefix, adjust_arg) \ 52 + static efi_status_t \ 53 + prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \ 54 + { \ 55 + struct ia64_fpreg fr[6]; \ 56 + efi_time_cap_t *atc = NULL; \ 57 + efi_status_t ret; \ 58 + \ 59 + if (tc) \ 60 + atc = adjust_arg(tc); \ 61 + ia64_save_scratch_fpregs(fr); \ 62 + ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), \ 63 + adjust_arg(tm), atc); \ 64 + ia64_load_scratch_fpregs(fr); \ 65 + return ret; \ 66 } 67 68 + #define STUB_SET_TIME(prefix, adjust_arg) \ 69 + static efi_status_t \ 70 + prefix##_set_time (efi_time_t *tm) \ 71 + { \ 72 + struct ia64_fpreg fr[6]; \ 73 + efi_status_t ret; \ 74 + \ 75 + ia64_save_scratch_fpregs(fr); \ 76 + ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), \ 77 + adjust_arg(tm)); \ 78 + ia64_load_scratch_fpregs(fr); \ 79 + return ret; \ 80 } 81 82 + #define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \ 83 + static efi_status_t \ 84 + prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, \ 85 + efi_time_t *tm) \ 86 + { \ 87 + struct ia64_fpreg fr[6]; \ 88 + efi_status_t ret; \ 89 + \ 90 + ia64_save_scratch_fpregs(fr); \ 91 + ret = efi_call_##prefix( \ 92 + (efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \ 93 + adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \ 94 + ia64_load_scratch_fpregs(fr); \ 95 + return ret; \ 96 } 97 98 + #define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \ 99 + static efi_status_t \ 100 + prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \ 101 + { \ 102 + struct ia64_fpreg fr[6]; \ 103 + efi_time_t *atm = NULL; \ 104 + efi_status_t ret; \ 105 + \ 106 + if (tm) \ 107 + atm = adjust_arg(tm); \ 108 + ia64_save_scratch_fpregs(fr); \ 109 + ret = efi_call_##prefix( \ 110 + (efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \ 111 + enabled, atm); \ 112 + ia64_load_scratch_fpregs(fr); \ 113 + return ret; \ 114 } 115 116 + #define STUB_GET_VARIABLE(prefix, adjust_arg) \ 117 + static efi_status_t \ 118 + prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \ 119 + unsigned long *data_size, void *data) \ 120 + { \ 121 + struct ia64_fpreg fr[6]; \ 122 + u32 *aattr = NULL; \ 123 + efi_status_t ret; \ 124 + \ 125 + if (attr) \ 126 + aattr = adjust_arg(attr); \ 127 + ia64_save_scratch_fpregs(fr); \ 128 + ret = efi_call_##prefix( \ 129 + (efi_get_variable_t *) __va(runtime->get_variable), \ 130 + adjust_arg(name), adjust_arg(vendor), aattr, \ 131 + adjust_arg(data_size), adjust_arg(data)); \ 132 + ia64_load_scratch_fpregs(fr); \ 133 + return ret; \ 134 } 135 136 + #define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \ 137 + static efi_status_t \ 138 + prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, \ 139 + efi_guid_t *vendor) \ 140 + { \ 141 + struct ia64_fpreg fr[6]; \ 142 + efi_status_t ret; \ 143 + \ 144 + ia64_save_scratch_fpregs(fr); \ 145 + ret = efi_call_##prefix( \ 146 + (efi_get_next_variable_t *) __va(runtime->get_next_variable), \ 147 + adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \ 148 + ia64_load_scratch_fpregs(fr); \ 149 + return ret; \ 150 } 151 152 + #define STUB_SET_VARIABLE(prefix, adjust_arg) \ 153 + static efi_status_t \ 154 + prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, \ 155 + unsigned long attr, unsigned long data_size, \ 156 + void *data) \ 157 + { \ 158 + struct ia64_fpreg fr[6]; \ 159 + efi_status_t ret; \ 160 + \ 161 + ia64_save_scratch_fpregs(fr); \ 162 + ret = efi_call_##prefix( \ 163 + (efi_set_variable_t *) __va(runtime->set_variable), \ 164 + adjust_arg(name), adjust_arg(vendor), attr, data_size, \ 165 + adjust_arg(data)); \ 166 + ia64_load_scratch_fpregs(fr); \ 167 + return ret; \ 168 } 169 170 + #define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \ 171 + static efi_status_t \ 172 + prefix##_get_next_high_mono_count (u32 *count) \ 173 + { \ 174 + struct ia64_fpreg fr[6]; \ 175 + efi_status_t ret; \ 176 + \ 177 + ia64_save_scratch_fpregs(fr); \ 178 + ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \ 179 + __va(runtime->get_next_high_mono_count), \ 180 + adjust_arg(count)); \ 181 + ia64_load_scratch_fpregs(fr); \ 182 + return ret; \ 183 } 184 185 + #define STUB_RESET_SYSTEM(prefix, adjust_arg) \ 186 + static void \ 187 + prefix##_reset_system (int reset_type, efi_status_t status, \ 188 + unsigned long data_size, efi_char16_t *data) \ 189 + { \ 190 + struct ia64_fpreg fr[6]; \ 191 + efi_char16_t *adata = NULL; \ 192 + \ 193 + if (data) \ 194 + adata = adjust_arg(data); \ 195 + \ 196 + ia64_save_scratch_fpregs(fr); \ 197 + efi_call_##prefix( \ 198 + (efi_reset_system_t *) __va(runtime->reset_system), \ 199 + reset_type, status, data_size, adata); \ 200 + /* should not return, but just in case... */ \ 201 + ia64_load_scratch_fpregs(fr); \ 202 } 203 204 #define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg)) ··· 223 return; 224 } 225 226 + ts->tv_sec = mktime(tm.year, tm.month, tm.day, 227 + tm.hour, tm.minute, tm.second); 228 ts->tv_nsec = tm.nanosecond; 229 } 230 ··· 297 } 298 299 /* 300 + * Walks the EFI memory map and calls CALLBACK once for each EFI memory 301 + * descriptor that has memory that is available for OS use. 302 */ 303 void 304 efi_memmap_walk (efi_freemem_callback_t callback, void *arg) ··· 307 } 308 309 /* 310 + * Walks the EFI memory map and calls CALLBACK once for each EFI memory 311 + * descriptor that has memory that is available for uncached allocator. 312 */ 313 void 314 efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg) ··· 321 * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor 322 * Abstraction Layer chapter 11 in ADAG 323 */ 324 void * 325 efi_get_pal_addr (void) 326 { ··· 341 continue; 342 343 if (++pal_code_count > 1) { 344 + printk(KERN_ERR "Too many EFI Pal Code memory ranges, " 345 + "dropped @ %lx\n", md->phys_addr); 346 continue; 347 } 348 /* 349 + * The only ITLB entry in region 7 that is used is the one 350 + * installed by __start(). That entry covers a 64MB range. 351 */ 352 mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1); 353 vaddr = PAGE_OFFSET + md->phys_addr; 354 355 /* 356 + * We must check that the PAL mapping won't overlap with the 357 + * kernel mapping. 358 * 359 + * PAL code is guaranteed to be aligned on a power of 2 between 360 + * 4k and 256KB and that only one ITR is needed to map it. This 361 + * implies that the PAL code is always aligned on its size, 362 + * i.e., the closest matching page size supported by the TLB. 363 + * Therefore PAL code is guaranteed never to cross a 64MB unless 364 + * it is bigger than 64MB (very unlikely!). So for now the 365 + * following test is enough to determine whether or not we need 366 + * a dedicated ITR for the PAL code. 367 */ 368 if ((vaddr & mask) == (KERNEL_START & mask)) { 369 + printk(KERN_INFO "%s: no need to install ITR for " 370 + "PAL code\n", __FUNCTION__); 371 continue; 372 } 373 ··· 376 #if EFI_DEBUG 377 mask = ~((1 << IA64_GRANULE_SHIFT) - 1); 378 379 + printk(KERN_INFO "CPU %d: mapping PAL code " 380 + "[0x%lx-0x%lx) into [0x%lx-0x%lx)\n", 381 + smp_processor_id(), md->phys_addr, 382 + md->phys_addr + efi_md_size(md), 383 + vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); 384 #endif 385 return __va(md->phys_addr); 386 } ··· 401 * Cannot write to CRx with PSR.ic=1 402 */ 403 psr = ia64_clear_ic(); 404 + ia64_itr(0x1, IA64_TR_PALCODE, 405 + GRANULEROUNDDOWN((unsigned long) pal_vaddr), 406 pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), 407 IA64_GRANULE_SHIFT); 408 ia64_set_psr(psr); /* restore psr */ ··· 418 char *cp, vendor[100] = "unknown"; 419 int i; 420 421 + /* 422 + * it's too early to be able to use the standard kernel command line 423 + * support... 424 + */ 425 for (cp = boot_command_line; *cp; ) { 426 if (memcmp(cp, "mem=", 4) == 0) { 427 mem_limit = memparse(cp + 4, &cp); ··· 434 } 435 } 436 if (min_addr != 0UL) 437 + printk(KERN_INFO "Ignoring memory below %luMB\n", 438 + min_addr >> 20); 439 if (max_addr != ~0UL) 440 + printk(KERN_INFO "Ignoring memory above %luMB\n", 441 + max_addr >> 20); 442 443 efi.systab = __va(ia64_boot_param->efi_systab); 444 ··· 464 } 465 466 printk(KERN_INFO "EFI v%u.%.02u by %s:", 467 + efi.systab->hdr.revision >> 16, 468 + efi.systab->hdr.revision & 0xffff, vendor); 469 470 efi.mps = EFI_INVALID_TABLE_ADDR; 471 efi.acpi = EFI_INVALID_TABLE_ADDR; ··· 519 efi_memory_desc_t *md; 520 void *p; 521 522 + for (i = 0, p = efi_map_start; p < efi_map_end; 523 + ++i, p += efi_desc_size) 524 + { 525 md = p; 526 + printk("mem%02u: type=%u, attr=0x%lx, " 527 + "range=[0x%016lx-0x%016lx) (%luMB)\n", 528 i, md->type, md->attribute, md->phys_addr, 529 md->phys_addr + efi_md_size(md), 530 md->num_pages >> (20 - EFI_PAGE_SHIFT)); ··· 549 md = p; 550 if (md->attribute & EFI_MEMORY_RUNTIME) { 551 /* 552 + * Some descriptors have multiple bits set, so the 553 + * order of the tests is relevant. 554 */ 555 if (md->attribute & EFI_MEMORY_WB) { 556 md->virt_addr = (u64) __va(md->phys_addr); ··· 558 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 559 } else if (md->attribute & EFI_MEMORY_WC) { 560 #if 0 561 + md->virt_addr = ia64_remap(md->phys_addr, 562 + (_PAGE_A | 563 + _PAGE_P | 564 + _PAGE_D | 565 + _PAGE_MA_WC | 566 + _PAGE_PL_0 | 567 + _PAGE_AR_RW)); 568 #else 569 printk(KERN_INFO "EFI_MEMORY_WC mapping\n"); 570 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 571 #endif 572 } else if (md->attribute & EFI_MEMORY_WT) { 573 #if 0 574 + md->virt_addr = ia64_remap(md->phys_addr, 575 + (_PAGE_A | 576 + _PAGE_P | 577 + _PAGE_D | 578 + _PAGE_MA_WT | 579 + _PAGE_PL_0 | 580 + _PAGE_AR_RW)); 581 #else 582 printk(KERN_INFO "EFI_MEMORY_WT mapping\n"); 583 md->virt_addr = (u64) ioremap(md->phys_addr, 0); ··· 583 584 status = efi_call_phys(__va(runtime->set_virtual_address_map), 585 ia64_boot_param->efi_memmap_size, 586 + efi_desc_size, 587 + ia64_boot_param->efi_memdesc_version, 588 ia64_boot_param->efi_memmap); 589 if (status != EFI_SUCCESS) { 590 + printk(KERN_WARNING "warning: unable to switch EFI into " 591 + "virtual mode (status=%lu)\n", status); 592 return; 593 } 594 595 /* 596 + * Now that EFI is in virtual mode, we call the EFI functions more 597 + * efficiently: 598 */ 599 efi.get_time = virt_get_time; 600 efi.set_time = virt_set_time; ··· 606 } 607 608 /* 609 + * Walk the EFI memory map looking for the I/O port range. There can only be 610 + * one entry of this type, other I/O port ranges should be described via ACPI. 611 */ 612 u64 613 efi_get_iobase (void) ··· 678 679 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 680 md = p; 681 if (md->phys_addr < end && efi_md_end(md) > phys_addr) 682 return 1; 683 } ··· 883 return 1; 884 uart = 0; 885 } 886 + hdr = (struct efi_generic_dev_path *)((u8 *) hdr + hdr->length); 887 } 888 printk(KERN_ERR "Malformed %s value\n", name); 889 return 0; ··· 921 if (!efi_wb(md)) { 922 continue; 923 } 924 + if (pmd == NULL || !efi_wb(pmd) || 925 + efi_md_end(pmd) != md->phys_addr) { 926 contig_low = GRANULEROUNDUP(md->phys_addr); 927 contig_high = efi_md_end(md); 928 + for (q = p + efi_desc_size; q < efi_map_end; 929 + q += efi_desc_size) { 930 check_md = q; 931 if (!efi_wb(check_md)) 932 break; ··· 988 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { 989 md = p; 990 if (!efi_wb(md)) { 991 + if (efi_uc(md) && 992 + (md->type == EFI_CONVENTIONAL_MEMORY || 993 + md->type == EFI_BOOT_SERVICES_DATA)) { 994 k->attribute = EFI_MEMORY_UC; 995 k->start = md->phys_addr; 996 k->num_pages = md->num_pages; ··· 997 } 998 continue; 999 } 1000 + if (pmd == NULL || !efi_wb(pmd) || 1001 + efi_md_end(pmd) != md->phys_addr) { 1002 contig_low = GRANULEROUNDUP(md->phys_addr); 1003 contig_high = efi_md_end(md); 1004 + for (q = p + efi_desc_size; q < efi_map_end; 1005 + q += efi_desc_size) { 1006 check_md = q; 1007 if (!efi_wb(check_md)) 1008 break; ··· 1025 if (md->phys_addr < contig_low) { 1026 lim = min(efi_md_end(md), contig_low); 1027 if (efi_uc(md)) { 1028 + if (k > kern_memmap && 1029 + (k-1)->attribute == EFI_MEMORY_UC && 1030 kmd_end(k-1) == md->phys_addr) { 1031 + (k-1)->num_pages += 1032 + (lim - md->phys_addr) 1033 + >> EFI_PAGE_SHIFT; 1034 } else { 1035 k->attribute = EFI_MEMORY_UC; 1036 k->start = md->phys_addr; 1037 + k->num_pages = (lim - md->phys_addr) 1038 + >> EFI_PAGE_SHIFT; 1039 k++; 1040 } 1041 } ··· 1049 } else { 1050 k->attribute = EFI_MEMORY_UC; 1051 k->start = lim; 1052 + k->num_pages = (efi_md_end(md) - lim) 1053 + >> EFI_PAGE_SHIFT; 1054 k++; 1055 } 1056 } ··· 1151 break; 1152 } 1153 1154 + if ((res = kzalloc(sizeof(struct resource), 1155 + GFP_KERNEL)) == NULL) { 1156 + printk(KERN_ERR 1157 + "failed to alocate resource for iomem\n"); 1158 return; 1159 } 1160 ··· 1187 rsvd_regions are sorted 1188 */ 1189 unsigned long __init 1190 + kdump_find_rsvd_region (unsigned long size, struct rsvd_region *r, int n) 1191 { 1192 + int i; 1193 + u64 start, end; 1194 + u64 alignment = 1UL << _PAGE_SIZE_64M; 1195 + void *efi_map_start, *efi_map_end, *p; 1196 + efi_memory_desc_t *md; 1197 + u64 efi_desc_size; 1198 1199 + efi_map_start = __va(ia64_boot_param->efi_memmap); 1200 + efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1201 + efi_desc_size = ia64_boot_param->efi_memdesc_size; 1202 1203 + for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 1204 + md = p; 1205 + if (!efi_wb(md)) 1206 + continue; 1207 + start = ALIGN(md->phys_addr, alignment); 1208 + end = efi_md_end(md); 1209 + for (i = 0; i < n; i++) { 1210 + if (__pa(r[i].start) >= start && __pa(r[i].end) < end) { 1211 + if (__pa(r[i].start) > start + size) 1212 + return start; 1213 + start = ALIGN(__pa(r[i].end), alignment); 1214 + if (i < n-1 && 1215 + __pa(r[i+1].start) < start + size) 1216 + continue; 1217 + else 1218 + break; 1219 + } 1220 } 1221 + if (end > start + size) 1222 + return start; 1223 + } 1224 1225 + printk(KERN_WARNING 1226 + "Cannot reserve 0x%lx byte of memory for crashdump\n", size); 1227 + return ~0UL; 1228 } 1229 #endif 1230