Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: 7013/1: P2V: Remove ARM_PATCH_PHYS_VIRT_16BIT

This code can be removed now that MSM targets no longer need the 16-bit
offsets for P2V.

Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by

Nicolas Pitre and committed by
Russell King
daece596 9e775ad1

+14 -68
+1 -9
arch/arm/Kconfig
··· 205 205 kernel in system memory. 206 206 207 207 This can only be used with non-XIP MMU kernels where the base 208 - of physical memory is at a 16MB boundary, or theoretically 64K 209 - for the MSM machine class. 208 + of physical memory is at a 16MB boundary. 210 209 211 210 Only disable this option if you know that you do not require 212 211 this feature (eg, building a kernel for a single machine) and 213 212 you need to shrink the kernel to the minimal size. 214 213 215 - config ARM_PATCH_PHYS_VIRT_16BIT 216 - def_bool y 217 - depends on ARM_PATCH_PHYS_VIRT && ARCH_MSM 218 - help 219 - This option extends the physical to virtual translation patching 220 - to allow physical memory down to a theoretical minimum of 64K 221 - boundaries. 222 214 223 215 source "init/Kconfig" 224 216
-7
arch/arm/include/asm/memory.h
··· 160 160 * so that all we need to do is modify the 8-bit constant field. 161 161 */ 162 162 #define __PV_BITS_31_24 0x81000000 163 - #define __PV_BITS_23_16 0x00810000 164 163 165 164 extern unsigned long __pv_phys_offset; 166 165 #define PHYS_OFFSET __pv_phys_offset ··· 177 178 { 178 179 unsigned long t; 179 180 __pv_stub(x, t, "add", __PV_BITS_31_24); 180 - #ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT 181 - __pv_stub(t, t, "add", __PV_BITS_23_16); 182 - #endif 183 181 return t; 184 182 } 185 183 ··· 184 188 { 185 189 unsigned long t; 186 190 __pv_stub(x, t, "sub", __PV_BITS_31_24); 187 - #ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT 188 - __pv_stub(t, t, "sub", __PV_BITS_23_16); 189 - #endif 190 191 return t; 191 192 } 192 193 #else
-4
arch/arm/include/asm/module.h
··· 31 31 32 32 /* Add __virt_to_phys patching state as well */ 33 33 #ifdef CONFIG_ARM_PATCH_PHYS_VIRT 34 - #ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT 35 - #define MODULE_ARCH_VERMAGIC_P2V "p2v16 " 36 - #else 37 34 #define MODULE_ARCH_VERMAGIC_P2V "p2v8 " 38 - #endif 39 35 #else 40 36 #define MODULE_ARCH_VERMAGIC_P2V "" 41 37 #endif
+13 -48
arch/arm/kernel/head.S
··· 488 488 add r5, r5, r3 @ adjust table end address 489 489 add r7, r7, r3 @ adjust __pv_phys_offset address 490 490 str r8, [r7] @ save computed PHYS_OFFSET to __pv_phys_offset 491 - #ifndef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT 492 491 mov r6, r3, lsr #24 @ constant for add/sub instructions 493 492 teq r3, r6, lsl #24 @ must be 16MiB aligned 494 - #else 495 - mov r6, r3, lsr #16 @ constant for add/sub instructions 496 - teq r3, r6, lsl #16 @ must be 64kiB aligned 497 - #endif 498 493 THUMB( it ne @ cross section branch ) 499 494 bne __error 500 495 str r6, [r7, #4] @ save to __pv_offset ··· 505 510 .text 506 511 __fixup_a_pv_table: 507 512 #ifdef CONFIG_THUMB2_KERNEL 508 - #ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT 509 - lsls r0, r6, #24 510 - lsr r6, #8 511 - beq 1f 512 - clz r7, r0 513 - lsr r0, #24 514 - lsl r0, r7 515 - bic r0, 0x0080 516 - lsrs r7, #1 517 - orrcs r0, #0x0080 518 - orr r0, r0, r7, lsl #12 519 - #endif 520 - 1: lsls r6, #24 521 - beq 4f 513 + lsls r6, #24 514 + beq 2f 522 515 clz r7, r6 523 516 lsr r6, #24 524 517 lsl r6, r7 ··· 515 532 orrcs r6, #0x0080 516 533 orr r6, r6, r7, lsl #12 517 534 orr r6, #0x4000 518 - b 4f 519 - 2: @ at this point the C flag is always clear 520 - add r7, r3 521 - #ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT 522 - ldrh ip, [r7] 523 - tst ip, 0x0400 @ the i bit tells us LS or MS byte 524 - beq 3f 525 - cmp r0, #0 @ set C flag, and ... 526 - biceq ip, 0x0400 @ immediate zero value has a special encoding 527 - streqh ip, [r7] @ that requires the i bit cleared 528 - #endif 529 - 3: ldrh ip, [r7, #2] 535 + b 2f 536 + 1: add r7, r3 537 + ldrh ip, [r7, #2] 530 538 and ip, 0x8f00 531 - orrcc ip, r6 @ mask in offset bits 31-24 532 - orrcs ip, r0 @ mask in offset bits 23-16 539 + orr ip, r6 @ mask in offset bits 31-24 533 540 strh ip, [r7, #2] 534 - 4: cmp r4, r5 541 + 2: cmp r4, r5 535 542 ldrcc r7, [r4], #4 @ use branch for delay slot 536 - bcc 2b 543 + bcc 1b 537 544 bx lr 538 545 #else 539 - #ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT 540 - and r0, r6, #255 @ offset bits 23-16 541 - mov r6, r6, lsr #8 @ offset bits 31-24 542 - #else 543 - mov r0, #0 @ just in case... 544 - #endif 545 - b 3f 546 - 2: ldr ip, [r7, r3] 546 + b 2f 547 + 1: ldr ip, [r7, r3] 547 548 bic ip, ip, #0x000000ff 548 - tst ip, #0x400 @ rotate shift tells us LS or MS byte 549 - orrne ip, ip, r6 @ mask in offset bits 31-24 550 - orreq ip, ip, r0 @ mask in offset bits 23-16 549 + orr ip, ip, r6 @ mask in offset bits 31-24 551 550 str ip, [r7, r3] 552 - 3: cmp r4, r5 551 + 2: cmp r4, r5 553 552 ldrcc r7, [r4], #4 @ use branch for delay slot 554 - bcc 2b 553 + bcc 1b 555 554 mov pc, lr 556 555 #endif 557 556 ENDPROC(__fixup_a_pv_table)