Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/asm: Replace "MOVQ $imm, %reg" with MOVL

There is no reason to use MOVQ to load a non-negative immediate
constant value into a 64-bit register. MOVL does the same, since
the upper 32 bits are zero-extended by the CPU.

This makes the code a bit smaller, while leaving functionality
unchanged.

Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Will Drewry <wad@chromium.org>
Link: http://lkml.kernel.org/r/1427821211-25099-8-git-send-email-dvlasenk@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Denys Vlasenko and committed by
Ingo Molnar
a734b4a2 36acef25

+7 -7
+1 -1
arch/x86/crypto/crc32c-pcl-intel-asm_64.S
··· 178 178 ## 2a) PROCESS FULL BLOCKS: 179 179 ################################################################ 180 180 full_block: 181 - movq $128,%rax 181 + movl $128,%eax 182 182 lea 128*8*2(block_0), block_1 183 183 lea 128*8*3(block_0), block_2 184 184 add $128*8*1, block_0
+2 -2
arch/x86/crypto/twofish-x86_64-asm_64.S
··· 264 264 movq R1, 8(%rsi) 265 265 266 266 popq R1 267 - movq $1,%rax 267 + movl $1,%eax 268 268 ret 269 269 ENDPROC(twofish_enc_blk) 270 270 ··· 316 316 movq R1, 8(%rsi) 317 317 318 318 popq R1 319 - movq $1,%rax 319 + movl $1,%eax 320 320 ret 321 321 ENDPROC(twofish_dec_blk)
+4 -4
arch/x86/kernel/relocate_kernel_64.S
··· 123 123 * Set cr4 to a known state: 124 124 * - physical address extension enabled 125 125 */ 126 - movq $X86_CR4_PAE, %rax 126 + movl $X86_CR4_PAE, %eax 127 127 movq %rax, %cr4 128 128 129 129 jmp 1f ··· 246 246 movq %rsi, %rax 247 247 248 248 movq %r10, %rdi 249 - movq $512, %rcx 249 + movl $512, %ecx 250 250 rep ; movsq 251 251 252 252 movq %rax, %rdi 253 253 movq %rdx, %rsi 254 - movq $512, %rcx 254 + movl $512, %ecx 255 255 rep ; movsq 256 256 257 257 movq %rdx, %rdi 258 258 movq %r10, %rsi 259 - movq $512, %rcx 259 + movl $512, %ecx 260 260 rep ; movsq 261 261 262 262 lea PAGE_SIZE(%rax), %rsi