Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Rename .data..patch.XXX to .data..patch.XXX.

Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
Signed-off-by: Michal Marek <mmarek@suse.cz>

authored by

Denys Vlasenko and committed by
Michal Marek
dafb9320 9d1578a3

+25 -25
+6 -6
arch/ia64/include/asm/asmmacro.h
··· 70 70 * path (ivt.S - TLB miss processing) or in places where it might not be 71 71 * safe to use a "tpa" instruction (mca_asm.S - error recovery). 72 72 */ 73 - .section ".data.patch.vtop", "a" // declare section & section attributes 73 + .section ".data..patch.vtop", "a" // declare section & section attributes 74 74 .previous 75 75 76 76 #define LOAD_PHYSICAL(pr, reg, obj) \ 77 77 [1:](pr)movl reg = obj; \ 78 - .xdata4 ".data.patch.vtop", 1b-. 78 + .xdata4 ".data..patch.vtop", 1b-. 79 79 80 80 /* 81 81 * For now, we always put in the McKinley E9 workaround. On CPUs that don't need it, ··· 84 84 #define DO_MCKINLEY_E9_WORKAROUND 85 85 86 86 #ifdef DO_MCKINLEY_E9_WORKAROUND 87 - .section ".data.patch.mckinley_e9", "a" 87 + .section ".data..patch.mckinley_e9", "a" 88 88 .previous 89 89 /* workaround for Itanium 2 Errata 9: */ 90 90 # define FSYS_RETURN \ 91 - .xdata4 ".data.patch.mckinley_e9", 1f-.; \ 91 + .xdata4 ".data..patch.mckinley_e9", 1f-.; \ 92 92 1:{ .mib; \ 93 93 nop.m 0; \ 94 94 mov r16=ar.pfs; \ ··· 107 107 * If physical stack register size is different from DEF_NUM_STACK_REG, 108 108 * dynamically patch the kernel for correct size. 109 109 */ 110 - .section ".data.patch.phys_stack_reg", "a" 110 + .section ".data..patch.phys_stack_reg", "a" 111 111 .previous 112 112 #define LOAD_PHYS_STACK_REG_SIZE(reg) \ 113 113 [1:] adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0; \ 114 - .xdata4 ".data.patch.phys_stack_reg", 1b-. 114 + .xdata4 ".data..patch.phys_stack_reg", 1b-. 115 115 116 116 /* 117 117 * Up until early 2004, use of .align within a function caused bad unwind info.
+4 -4
arch/ia64/kernel/gate.S
··· 21 21 * to targets outside the shared object) and to avoid multi-phase kernel builds, we 22 22 * simply create minimalistic "patch lists" in special ELF sections. 23 23 */ 24 - .section ".data.patch.fsyscall_table", "a" 24 + .section ".data..patch.fsyscall_table", "a" 25 25 .previous 26 26 #define LOAD_FSYSCALL_TABLE(reg) \ 27 27 [1:] movl reg=0; \ 28 - .xdata4 ".data.patch.fsyscall_table", 1b-. 28 + .xdata4 ".data..patch.fsyscall_table", 1b-. 29 29 30 - .section ".data.patch.brl_fsys_bubble_down", "a" 30 + .section ".data..patch.brl_fsys_bubble_down", "a" 31 31 .previous 32 32 #define BRL_COND_FSYS_BUBBLE_DOWN(pr) \ 33 33 [1:](pr)brl.cond.sptk 0; \ 34 34 ;; \ 35 - .xdata4 ".data.patch.brl_fsys_bubble_down", 1b-. 35 + .xdata4 ".data..patch.brl_fsys_bubble_down", 1b-. 36 36 37 37 GLOBAL_ENTRY(__kernel_syscall_via_break) 38 38 .prologue
+5 -5
arch/ia64/kernel/gate.lds.S
··· 33 33 */ 34 34 . = GATE_ADDR + 0x600; 35 35 36 - .data.patch : { 36 + .data..patch : { 37 37 __paravirt_start_gate_mckinley_e9_patchlist = .; 38 - *(.data.patch.mckinley_e9) 38 + *(.data..patch.mckinley_e9) 39 39 __paravirt_end_gate_mckinley_e9_patchlist = .; 40 40 41 41 __paravirt_start_gate_vtop_patchlist = .; 42 - *(.data.patch.vtop) 42 + *(.data..patch.vtop) 43 43 __paravirt_end_gate_vtop_patchlist = .; 44 44 45 45 __paravirt_start_gate_fsyscall_patchlist = .; 46 - *(.data.patch.fsyscall_table) 46 + *(.data..patch.fsyscall_table) 47 47 __paravirt_end_gate_fsyscall_patchlist = .; 48 48 49 49 __paravirt_start_gate_brl_fsys_bubble_down_patchlist = .; 50 - *(.data.patch.brl_fsys_bubble_down) 50 + *(.data..patch.brl_fsys_bubble_down) 51 51 __paravirt_end_gate_brl_fsys_bubble_down_patchlist = .; 52 52 } :readable 53 53
+2 -2
arch/ia64/kernel/minstate.h
··· 16 16 #define ACCOUNT_SYS_ENTER 17 17 #endif 18 18 19 - .section ".data.patch.rse", "a" 19 + .section ".data..patch.rse", "a" 20 20 .previous 21 21 22 22 /* ··· 215 215 (pUStk) extr.u r17=r18,3,6; \ 216 216 (pUStk) sub r16=r18,r22; \ 217 217 [1:](pKStk) br.cond.sptk.many 1f; \ 218 - .xdata4 ".data.patch.rse",1b-. \ 218 + .xdata4 ".data..patch.rse",1b-. \ 219 219 ;; \ 220 220 cmp.ge p6,p7 = 33,r17; \ 221 221 ;; \
+8 -8
arch/ia64/kernel/vmlinux.lds.S
··· 75 75 __stop___mca_table = .; 76 76 } 77 77 78 - .data.patch.phys_stack_reg : AT(ADDR(.data.patch.phys_stack_reg) - LOAD_OFFSET) 78 + .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) 79 79 { 80 80 __start___phys_stack_reg_patchlist = .; 81 - *(.data.patch.phys_stack_reg) 81 + *(.data..patch.phys_stack_reg) 82 82 __end___phys_stack_reg_patchlist = .; 83 83 } 84 84 ··· 110 110 INIT_TEXT_SECTION(PAGE_SIZE) 111 111 INIT_DATA_SECTION(16) 112 112 113 - .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET) 113 + .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) 114 114 { 115 115 __start___vtop_patchlist = .; 116 - *(.data.patch.vtop) 116 + *(.data..patch.vtop) 117 117 __end___vtop_patchlist = .; 118 118 } 119 119 120 - .data.patch.rse : AT(ADDR(.data.patch.rse) - LOAD_OFFSET) 120 + .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) 121 121 { 122 122 __start___rse_patchlist = .; 123 - *(.data.patch.rse) 123 + *(.data..patch.rse) 124 124 __end___rse_patchlist = .; 125 125 } 126 126 127 - .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET) 127 + .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) 128 128 { 129 129 __start___mckinley_e9_bundles = .; 130 - *(.data.patch.mckinley_e9) 130 + *(.data..patch.mckinley_e9) 131 131 __end___mckinley_e9_bundles = .; 132 132 } 133 133