Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/amode31: change type of __samode31, __eamode31, etc

For consistencs reasons change the type of __samode31, __eamode31,
__stext_amode31, and __etext_amode31 to a char pointer so they
(nearly) match the type of all other sections.

This allows for code simplifications with follow-on patches.

Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>

+16 -16
+2 -2
arch/s390/include/asm/sections.h
··· 23 23 */ 24 24 #define __bootdata_preserved(var) __section(".boot.preserved.data." #var) var 25 25 26 - extern unsigned long __samode31, __eamode31; 27 - extern unsigned long __stext_amode31, __etext_amode31; 26 + extern char *__samode31, *__eamode31; 27 + extern char *__stext_amode31, *__etext_amode31; 28 28 29 29 #endif
+2 -2
arch/s390/kernel/machine_kexec.c
··· 216 216 VMCOREINFO_SYMBOL(lowcore_ptr); 217 217 VMCOREINFO_SYMBOL(high_memory); 218 218 VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS); 219 - vmcoreinfo_append_str("SAMODE31=%lx\n", __samode31); 220 - vmcoreinfo_append_str("EAMODE31=%lx\n", __eamode31); 219 + vmcoreinfo_append_str("SAMODE31=%lx\n", (unsigned long)__samode31); 220 + vmcoreinfo_append_str("EAMODE31=%lx\n", (unsigned long)__eamode31); 221 221 vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); 222 222 abs_lc = get_abs_lowcore(); 223 223 abs_lc->vmcore_info = paddr_vmcoreinfo_note();
+8 -8
arch/s390/kernel/setup.c
··· 97 97 * relocated above 2 GB, because it has to use 31 bit addresses. 98 98 * Such code and data is part of the .amode31 section. 99 99 */ 100 - unsigned long __amode31_ref __samode31 = (unsigned long)&_samode31; 101 - unsigned long __amode31_ref __eamode31 = (unsigned long)&_eamode31; 102 - unsigned long __amode31_ref __stext_amode31 = (unsigned long)&_stext_amode31; 103 - unsigned long __amode31_ref __etext_amode31 = (unsigned long)&_etext_amode31; 100 + char __amode31_ref *__samode31 = _samode31; 101 + char __amode31_ref *__eamode31 = _eamode31; 102 + char __amode31_ref *__stext_amode31 = _stext_amode31; 103 + char __amode31_ref *__etext_amode31 = _etext_amode31; 104 104 struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table; 105 105 struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table; 106 106 ··· 770 770 static void __init relocate_amode31_section(void) 771 771 { 772 772 unsigned long amode31_size = __eamode31 - __samode31; 773 - long amode31_offset = physmem_info.reserved[RR_AMODE31].start - __samode31; 774 - long *ptr; 773 + long amode31_offset, *ptr; 775 774 775 + amode31_offset = physmem_info.reserved[RR_AMODE31].start - (unsigned long)__samode31; 776 776 pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size); 777 777 778 778 /* Move original AMODE31 section to the new one */ 779 - memmove((void *)physmem_info.reserved[RR_AMODE31].start, (void *)__samode31, amode31_size); 779 + memmove((void *)physmem_info.reserved[RR_AMODE31].start, __samode31, amode31_size); 780 780 /* Zero out the old AMODE31 section to catch invalid accesses within it */ 781 - memset((void *)__samode31, 0, amode31_size); 781 + memset(__samode31, 0, amode31_size); 782 782 783 783 /* Update all AMODE31 region references */ 784 784 for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++)
+2 -2
arch/s390/mm/dump_pagetables.c
··· 290 290 max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2; 291 291 max_addr = 1UL << (max_addr * 11 + 31); 292 292 address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size; 293 - address_markers[AMODE31_START_NR].start_address = __samode31; 294 - address_markers[AMODE31_END_NR].start_address = __eamode31; 293 + address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31; 294 + address_markers[AMODE31_END_NR].start_address = (unsigned long)__eamode31; 295 295 address_markers[MODULES_NR].start_address = MODULES_VADDR; 296 296 address_markers[MODULES_END_NR].start_address = MODULES_END; 297 297 address_markers[ABS_LOWCORE_NR].start_address = __abs_lowcore;
+2 -2
arch/s390/mm/vmem.c
··· 657 657 (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT); 658 658 set_memory_rox((unsigned long)_sinittext, 659 659 (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT); 660 - set_memory_rox(__stext_amode31, 661 - (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT); 660 + set_memory_rox((unsigned long)__stext_amode31, 661 + (unsigned long)(__etext_amode31 - __stext_amode31) >> PAGE_SHIFT); 662 662 663 663 /* lowcore must be executable for LPSWE */ 664 664 if (!static_key_enabled(&cpu_has_bear))