Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: Clean up 'sizeof x' => 'sizeof(x)'

"sizeof(x)" is the canonical coding style used in arch/x86 most of the time.
Fix the few places that didn't follow the convention.

(Also do some whitespace cleanups in a few places while at it.)

[ mingo: Rewrote the changelog. ]

Signed-off-by: Jordan Borgner <mail@jordan-borgner.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20181028125828.7rgammkgzep2wpam@JordanDesktop
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Jordan Borgner and committed by
Ingo Molnar
0e96f31e 4b783dd6

+63 -63
+1 -1
arch/x86/boot/cpucheck.c
··· 113 113 { 114 114 int err; 115 115 116 - memset(&cpu.flags, 0, sizeof cpu.flags); 116 + memset(&cpu.flags, 0, sizeof(cpu.flags)); 117 117 cpu.level = 3; 118 118 119 119 if (has_eflag(X86_EFLAGS_AC))
+2 -2
arch/x86/boot/early_serial_console.c
··· 50 50 int pos = 0; 51 51 int port = 0; 52 52 53 - if (cmdline_find_option("earlyprintk", arg, sizeof arg) > 0) { 53 + if (cmdline_find_option("earlyprintk", arg, sizeof(arg)) > 0) { 54 54 char *e; 55 55 56 56 if (!strncmp(arg, "serial", 6)) { ··· 124 124 * console=uart8250,io,0x3f8,115200n8 125 125 * need to make sure it is last one console ! 126 126 */ 127 - if (cmdline_find_option("console", optstr, sizeof optstr) <= 0) 127 + if (cmdline_find_option("console", optstr, sizeof(optstr)) <= 0) 128 128 return; 129 129 130 130 options = optstr;
+3 -3
arch/x86/boot/edd.c
··· 76 76 { 77 77 struct biosregs ireg, oreg; 78 78 79 - memset(ei, 0, sizeof *ei); 79 + memset(ei, 0, sizeof(*ei)); 80 80 81 81 /* Check Extensions Present */ 82 82 ··· 133 133 struct edd_info ei, *edp; 134 134 u32 *mbrptr; 135 135 136 - if (cmdline_find_option("edd", eddarg, sizeof eddarg) > 0) { 136 + if (cmdline_find_option("edd", eddarg, sizeof(eddarg)) > 0) { 137 137 if (!strcmp(eddarg, "skipmbr") || !strcmp(eddarg, "skip")) { 138 138 do_edd = 1; 139 139 do_mbr = 0; ··· 166 166 */ 167 167 if (!get_edd_info(devno, &ei) 168 168 && boot_params.eddbuf_entries < EDDMAXNR) { 169 - memcpy(edp, &ei, sizeof ei); 169 + memcpy(edp, &ei, sizeof(ei)); 170 170 edp++; 171 171 boot_params.eddbuf_entries++; 172 172 }
+2 -2
arch/x86/boot/main.c
··· 36 36 const struct old_cmdline * const oldcmd = 37 37 (const struct old_cmdline *)OLD_CL_ADDRESS; 38 38 39 - BUILD_BUG_ON(sizeof boot_params != 4096); 40 - memcpy(&boot_params.hdr, &hdr, sizeof hdr); 39 + BUILD_BUG_ON(sizeof(boot_params) != 4096); 40 + memcpy(&boot_params.hdr, &hdr, sizeof(hdr)); 41 41 42 42 if (!boot_params.hdr.cmd_line_ptr && 43 43 oldcmd->cl_magic == OLD_CL_MAGIC) {
+1 -1
arch/x86/boot/memory.c
··· 26 26 27 27 initregs(&ireg); 28 28 ireg.ax = 0xe820; 29 - ireg.cx = sizeof buf; 29 + ireg.cx = sizeof(buf); 30 30 ireg.edx = SMAP; 31 31 ireg.di = (size_t)&buf; 32 32
+1 -1
arch/x86/boot/regs.c
··· 21 21 22 22 void initregs(struct biosregs *reg) 23 23 { 24 - memset(reg, 0, sizeof *reg); 24 + memset(reg, 0, sizeof(*reg)); 25 25 reg->eflags |= X86_EFLAGS_CF; 26 26 reg->ds = ds(); 27 27 reg->es = ds();
+3 -3
arch/x86/boot/video-vesa.c
··· 62 62 if (mode & ~0x1ff) 63 63 continue; 64 64 65 - memset(&vminfo, 0, sizeof vminfo); /* Just in case... */ 65 + memset(&vminfo, 0, sizeof(vminfo)); /* Just in case... */ 66 66 67 67 ireg.ax = 0x4f01; 68 68 ireg.cx = mode; ··· 109 109 int is_graphic; 110 110 u16 vesa_mode = mode->mode - VIDEO_FIRST_VESA; 111 111 112 - memset(&vminfo, 0, sizeof vminfo); /* Just in case... */ 112 + memset(&vminfo, 0, sizeof(vminfo)); /* Just in case... */ 113 113 114 114 initregs(&ireg); 115 115 ireg.ax = 0x4f01; ··· 241 241 struct biosregs ireg, oreg; 242 242 243 243 /* Apparently used as a nonsense token... */ 244 - memset(&boot_params.edid_info, 0x13, sizeof boot_params.edid_info); 244 + memset(&boot_params.edid_info, 0x13, sizeof(boot_params.edid_info)); 245 245 246 246 if (vginfo.version < 0x0200) 247 247 return; /* EDID requires VBE 2.0+ */
+1 -1
arch/x86/boot/video.c
··· 115 115 } else if ((key >= '0' && key <= '9') || 116 116 (key >= 'A' && key <= 'Z') || 117 117 (key >= 'a' && key <= 'z')) { 118 - if (len < sizeof entry_buf) { 118 + if (len < sizeof(entry_buf)) { 119 119 entry_buf[len++] = key; 120 120 putchar(key); 121 121 }
+1 -1
arch/x86/events/intel/core.c
··· 4535 4535 } 4536 4536 } 4537 4537 4538 - snprintf(pmu_name_str, sizeof pmu_name_str, "%s", name); 4538 + snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name); 4539 4539 4540 4540 if (version >= 2 && extra_attr) { 4541 4541 x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
+2 -2
arch/x86/kernel/cpu/common.c
··· 1074 1074 #endif 1075 1075 c->x86_cache_alignment = c->x86_clflush_size; 1076 1076 1077 - memset(&c->x86_capability, 0, sizeof c->x86_capability); 1077 + memset(&c->x86_capability, 0, sizeof(c->x86_capability)); 1078 1078 c->extended_cpuid_level = 0; 1079 1079 1080 1080 if (!have_cpuid_p()) ··· 1317 1317 c->x86_virt_bits = 32; 1318 1318 #endif 1319 1319 c->x86_cache_alignment = c->x86_clflush_size; 1320 - memset(&c->x86_capability, 0, sizeof c->x86_capability); 1320 + memset(&c->x86_capability, 0, sizeof(c->x86_capability)); 1321 1321 1322 1322 generic_identify(c); 1323 1323
+1 -1
arch/x86/kernel/cpu/mcheck/mce.c
··· 2215 2215 if (dev) 2216 2216 return 0; 2217 2217 2218 - dev = kzalloc(sizeof *dev, GFP_KERNEL); 2218 + dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2219 2219 if (!dev) 2220 2220 return -ENOMEM; 2221 2221 dev->id = cpu;
+1 -1
arch/x86/kernel/cpu/mtrr/generic.c
··· 798 798 local_irq_restore(flags); 799 799 800 800 /* Use the atomic bitops to update the global mask */ 801 - for (count = 0; count < sizeof mask * 8; ++count) { 801 + for (count = 0; count < sizeof(mask) * 8; ++count) { 802 802 if (mask & 0x01) 803 803 set_bit(count, &smp_changes_mask); 804 804 mask >>= 1;
+3 -3
arch/x86/kernel/cpu/mtrr/if.c
··· 174 174 case MTRRIOC_SET_PAGE_ENTRY: 175 175 case MTRRIOC_DEL_PAGE_ENTRY: 176 176 case MTRRIOC_KILL_PAGE_ENTRY: 177 - if (copy_from_user(&sentry, arg, sizeof sentry)) 177 + if (copy_from_user(&sentry, arg, sizeof(sentry))) 178 178 return -EFAULT; 179 179 break; 180 180 case MTRRIOC_GET_ENTRY: 181 181 case MTRRIOC_GET_PAGE_ENTRY: 182 - if (copy_from_user(&gentry, arg, sizeof gentry)) 182 + if (copy_from_user(&gentry, arg, sizeof(gentry))) 183 183 return -EFAULT; 184 184 break; 185 185 #ifdef CONFIG_COMPAT ··· 332 332 switch (cmd) { 333 333 case MTRRIOC_GET_ENTRY: 334 334 case MTRRIOC_GET_PAGE_ENTRY: 335 - if (copy_to_user(arg, &gentry, sizeof gentry)) 335 + if (copy_to_user(arg, &gentry, sizeof(gentry))) 336 336 err = -EFAULT; 337 337 break; 338 338 #ifdef CONFIG_COMPAT
+1 -1
arch/x86/kernel/head64.c
··· 385 385 */ 386 386 sme_map_bootdata(real_mode_data); 387 387 388 - memcpy(&boot_params, real_mode_data, sizeof boot_params); 388 + memcpy(&boot_params, real_mode_data, sizeof(boot_params)); 389 389 sanitize_boot_params(&boot_params); 390 390 cmd_line_ptr = get_cmd_line_ptr(); 391 391 if (cmd_line_ptr) {
+4 -4
arch/x86/kernel/msr.c
··· 115 115 err = -EBADF; 116 116 break; 117 117 } 118 - if (copy_from_user(&regs, uregs, sizeof regs)) { 118 + if (copy_from_user(&regs, uregs, sizeof(regs))) { 119 119 err = -EFAULT; 120 120 break; 121 121 } 122 122 err = rdmsr_safe_regs_on_cpu(cpu, regs); 123 123 if (err) 124 124 break; 125 - if (copy_to_user(uregs, &regs, sizeof regs)) 125 + if (copy_to_user(uregs, &regs, sizeof(regs))) 126 126 err = -EFAULT; 127 127 break; 128 128 ··· 131 131 err = -EBADF; 132 132 break; 133 133 } 134 - if (copy_from_user(&regs, uregs, sizeof regs)) { 134 + if (copy_from_user(&regs, uregs, sizeof(regs))) { 135 135 err = -EFAULT; 136 136 break; 137 137 } 138 138 err = wrmsr_safe_regs_on_cpu(cpu, regs); 139 139 if (err) 140 140 break; 141 - if (copy_to_user(uregs, &regs, sizeof regs)) 141 + if (copy_to_user(uregs, &regs, sizeof(regs))) 142 142 err = -EFAULT; 143 143 break; 144 144
+11 -11
arch/x86/kvm/emulate.c
··· 1509 1509 return emulate_gp(ctxt, index << 3 | 0x2); 1510 1510 1511 1511 addr = dt.address + index * 8; 1512 - return linear_read_system(ctxt, addr, desc, sizeof *desc); 1512 + return linear_read_system(ctxt, addr, desc, sizeof(*desc)); 1513 1513 } 1514 1514 1515 1515 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, ··· 1522 1522 struct desc_struct desc; 1523 1523 u16 sel; 1524 1524 1525 - memset (dt, 0, sizeof *dt); 1525 + memset(dt, 0, sizeof(*dt)); 1526 1526 if (!ops->get_segment(ctxt, &sel, &desc, &base3, 1527 1527 VCPU_SREG_LDTR)) 1528 1528 return; ··· 1586 1586 if (rc != X86EMUL_CONTINUE) 1587 1587 return rc; 1588 1588 1589 - return linear_write_system(ctxt, addr, desc, sizeof *desc); 1589 + return linear_write_system(ctxt, addr, desc, sizeof(*desc)); 1590 1590 } 1591 1591 1592 1592 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, ··· 1604 1604 u16 dummy; 1605 1605 u32 base3 = 0; 1606 1606 1607 - memset(&seg_desc, 0, sizeof seg_desc); 1607 + memset(&seg_desc, 0, sizeof(seg_desc)); 1608 1608 1609 1609 if (ctxt->mode == X86EMUL_MODE_REAL) { 1610 1610 /* set real mode segment descriptor (keep limit etc. for ··· 3075 3075 int ret; 3076 3076 u32 new_tss_base = get_desc_base(new_desc); 3077 3077 3078 - ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg); 3078 + ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); 3079 3079 if (ret != X86EMUL_CONTINUE) 3080 3080 return ret; 3081 3081 3082 3082 save_state_to_tss16(ctxt, &tss_seg); 3083 3083 3084 - ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg); 3084 + ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); 3085 3085 if (ret != X86EMUL_CONTINUE) 3086 3086 return ret; 3087 3087 3088 - ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg); 3088 + ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg)); 3089 3089 if (ret != X86EMUL_CONTINUE) 3090 3090 return ret; 3091 3091 ··· 3094 3094 3095 3095 ret = linear_write_system(ctxt, new_tss_base, 3096 3096 &tss_seg.prev_task_link, 3097 - sizeof tss_seg.prev_task_link); 3097 + sizeof(tss_seg.prev_task_link)); 3098 3098 if (ret != X86EMUL_CONTINUE) 3099 3099 return ret; 3100 3100 } ··· 3216 3216 u32 eip_offset = offsetof(struct tss_segment_32, eip); 3217 3217 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector); 3218 3218 3219 - ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg); 3219 + ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); 3220 3220 if (ret != X86EMUL_CONTINUE) 3221 3221 return ret; 3222 3222 ··· 3228 3228 if (ret != X86EMUL_CONTINUE) 3229 3229 return ret; 3230 3230 3231 - ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg); 3231 + ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg)); 3232 3232 if (ret != X86EMUL_CONTINUE) 3233 3233 return ret; 3234 3234 ··· 3237 3237 3238 3238 ret = linear_write_system(ctxt, new_tss_base, 3239 3239 &tss_seg.prev_task_link, 3240 - sizeof tss_seg.prev_task_link); 3240 + sizeof(tss_seg.prev_task_link)); 3241 3241 if (ret != X86EMUL_CONTINUE) 3242 3242 return ret; 3243 3243 }
+1 -1
arch/x86/kvm/lapic.c
··· 2409 2409 r = kvm_apic_state_fixup(vcpu, s, true); 2410 2410 if (r) 2411 2411 return r; 2412 - memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s); 2412 + memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s)); 2413 2413 2414 2414 recalculate_apic_map(vcpu->kvm); 2415 2415 kvm_apic_set_version(vcpu);
+21 -21
arch/x86/kvm/x86.c
··· 2924 2924 unsigned size; 2925 2925 2926 2926 r = -EFAULT; 2927 - if (copy_from_user(&msrs, user_msrs, sizeof msrs)) 2927 + if (copy_from_user(&msrs, user_msrs, sizeof(msrs))) 2928 2928 goto out; 2929 2929 2930 2930 r = -E2BIG; ··· 3091 3091 unsigned n; 3092 3092 3093 3093 r = -EFAULT; 3094 - if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list)) 3094 + if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) 3095 3095 goto out; 3096 3096 n = msr_list.nmsrs; 3097 3097 msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs; 3098 - if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) 3098 + if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) 3099 3099 goto out; 3100 3100 r = -E2BIG; 3101 3101 if (n < msr_list.nmsrs) ··· 3117 3117 struct kvm_cpuid2 cpuid; 3118 3118 3119 3119 r = -EFAULT; 3120 - if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 3120 + if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 3121 3121 goto out; 3122 3122 3123 3123 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, ··· 3126 3126 goto out; 3127 3127 3128 3128 r = -EFAULT; 3129 - if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) 3129 + if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 3130 3130 goto out; 3131 3131 r = 0; 3132 3132 break; ··· 3894 3894 struct kvm_interrupt irq; 3895 3895 3896 3896 r = -EFAULT; 3897 - if (copy_from_user(&irq, argp, sizeof irq)) 3897 + if (copy_from_user(&irq, argp, sizeof(irq))) 3898 3898 goto out; 3899 3899 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 3900 3900 break; ··· 3912 3912 struct kvm_cpuid cpuid; 3913 3913 3914 3914 r = -EFAULT; 3915 - if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 3915 + if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 3916 3916 goto out; 3917 3917 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); 3918 3918 break; ··· 3922 3922 struct kvm_cpuid2 cpuid; 3923 3923 3924 3924 r = -EFAULT; 3925 - if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 3925 + if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 3926 3926 goto out; 3927 3927 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, 3928 3928 cpuid_arg->entries); ··· 3933 3933 struct kvm_cpuid2 cpuid; 3934 3934 3935 3935 r = -EFAULT; 3936 - if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) 3936 + if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) 3937 3937 goto out; 3938 3938 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, 3939 3939 cpuid_arg->entries); 3940 3940 if (r) 3941 3941 goto out; 3942 3942 r = -EFAULT; 3943 - if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) 3943 + if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) 3944 3944 goto out; 3945 3945 r = 0; 3946 3946 break; ··· 3961 3961 struct kvm_tpr_access_ctl tac; 3962 3962 3963 3963 r = -EFAULT; 3964 - if (copy_from_user(&tac, argp, sizeof tac)) 3964 + if (copy_from_user(&tac, argp, sizeof(tac))) 3965 3965 goto out; 3966 3966 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); 3967 3967 if (r) 3968 3968 goto out; 3969 3969 r = -EFAULT; 3970 - if (copy_to_user(argp, &tac, sizeof tac)) 3970 + if (copy_to_user(argp, &tac, sizeof(tac))) 3971 3971 goto out; 3972 3972 r = 0; 3973 3973 break; ··· 3980 3980 if (!lapic_in_kernel(vcpu)) 3981 3981 goto out; 3982 3982 r = -EFAULT; 3983 - if (copy_from_user(&va, argp, sizeof va)) 3983 + if (copy_from_user(&va, argp, sizeof(va))) 3984 3984 goto out; 3985 3985 idx = srcu_read_lock(&vcpu->kvm->srcu); 3986 3986 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); ··· 3991 3991 u64 mcg_cap; 3992 3992 3993 3993 r = -EFAULT; 3994 - if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap)) 3994 + if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap))) 3995 3995 goto out; 3996 3996 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); 3997 3997 break; ··· 4000 4000 struct kvm_x86_mce mce; 4001 4001 4002 4002 r = -EFAULT; 4003 - if (copy_from_user(&mce, argp, sizeof mce)) 4003 + if (copy_from_user(&mce, argp, sizeof(mce))) 4004 4004 goto out; 4005 4005 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); 4006 4006 break; ··· 4536 4536 if (kvm->created_vcpus) 4537 4537 goto set_identity_unlock; 4538 4538 r = -EFAULT; 4539 - if (copy_from_user(&ident_addr, argp, sizeof ident_addr)) 4539 + if (copy_from_user(&ident_addr, argp, sizeof(ident_addr))) 4540 4540 goto set_identity_unlock; 4541 4541 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); 4542 4542 set_identity_unlock: ··· 4620 4620 if (r) 4621 4621 goto get_irqchip_out; 4622 4622 r = -EFAULT; 4623 - if (copy_to_user(argp, chip, sizeof *chip)) 4623 + if (copy_to_user(argp, chip, sizeof(*chip))) 4624 4624 goto get_irqchip_out; 4625 4625 r = 0; 4626 4626 get_irqchip_out: ··· 4666 4666 } 4667 4667 case KVM_SET_PIT: { 4668 4668 r = -EFAULT; 4669 - if (copy_from_user(&u.ps, argp, sizeof u.ps)) 4669 + if (copy_from_user(&u.ps, argp, sizeof(u.ps))) 4670 4670 goto out; 4671 4671 r = -ENXIO; 4672 4672 if (!kvm->arch.vpit) ··· 8205 8205 sregs->efer = vcpu->arch.efer; 8206 8206 sregs->apic_base = kvm_get_apic_base(vcpu); 8207 8207 8208 - memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap); 8208 + memset(sregs->interrupt_bitmap, 0, sizeof(sregs->interrupt_bitmap)); 8209 8209 8210 8210 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) 8211 8211 set_bit(vcpu->arch.interrupt.nr, ··· 8509 8509 fpu->last_opcode = fxsave->fop; 8510 8510 fpu->last_ip = fxsave->rip; 8511 8511 fpu->last_dp = fxsave->rdp; 8512 - memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); 8512 + memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space)); 8513 8513 8514 8514 vcpu_put(vcpu); 8515 8515 return 0; ··· 8530 8530 fxsave->fop = fpu->last_opcode; 8531 8531 fxsave->rip = fpu->last_ip; 8532 8532 fxsave->rdp = fpu->last_dp; 8533 - memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); 8533 + memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space)); 8534 8534 8535 8535 vcpu_put(vcpu); 8536 8536 return 0;
+2 -2
arch/x86/tools/relocs.c
··· 130 130 REG_EXTENDED|REG_NOSUB); 131 131 132 132 if (err) { 133 - regerror(err, &sym_regex_c[i], errbuf, sizeof errbuf); 133 + regerror(err, &sym_regex_c[i], errbuf, sizeof(errbuf)); 134 134 die("%s", errbuf); 135 135 } 136 136 } ··· 405 405 } 406 406 for (i = 0; i < ehdr.e_shnum; i++) { 407 407 struct section *sec = &secs[i]; 408 - if (fread(&shdr, sizeof shdr, 1, fp) != 1) 408 + if (fread(&shdr, sizeof(shdr), 1, fp) != 1) 409 409 die("Cannot read ELF section headers %d/%d: %s\n", 410 410 i, ehdr.e_shnum, strerror(errno)); 411 411 sec->shdr.sh_name = elf_word_to_cpu(shdr.sh_name);
+1 -1
arch/x86/um/asm/elf.h
··· 194 194 195 195 typedef unsigned long elf_greg_t; 196 196 197 - #define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) 197 + #define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t)) 198 198 typedef elf_greg_t elf_gregset_t[ELF_NGREG]; 199 199 200 200 typedef struct user_i387_struct elf_fpregset_t;