opuntiaOS - an operating system targeting x86 and ARMv7
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

[all][arm] Add support for NEON-VFPv4

+338 -46
+6 -2
build/kernel/BUILD.gn
··· 34 34 if (target_cpu == "aarch32") { 35 35 kernel_c_flags += [ 36 36 "-fno-builtin", 37 - "-march=armv7-a", 37 + "-march=armv7-a+neon-vfpv4", 38 + "-mfpu=neon-vfpv4", 39 + "-mfloat-abi=soft", 38 40 "-fno-pie", 39 41 "-no-pie", 40 42 ] 41 43 kernel_asm_flags += [ 42 - "-march=armv7-a", 44 + "-march=armv7-a+neon-vfpv4", 45 + "-mfpu=neon-vfpv4", 46 + "-mfloat-abi=softfp", 43 47 "-mcpu=cortex-a15", 44 48 ] 45 49 kernel_ld_flags += [
+4
build/kernel/aarch32/kernel_link.ld
··· 54 54 STACK_ABORT_BASE = .; 55 55 . += 0x1000; 56 56 STACK_ABORT_TOP = .; 57 + 58 + STACK_UNDEFINED_BASE = .; 59 + . += 0x1000; 60 + STACK_UNDEFINED_TOP = .; 57 61 } 58 62 59 63 __end = .;
+3 -1
build/libs/BUILD.gn
··· 40 40 if (target_cpu == "aarch32") { 41 41 lib_c_flags += [ 42 42 "-fno-builtin", 43 - "-march=armv7-a", 43 + "-march=armv7-a+neon-vfpv4", 44 + "-mfpu=neon-vfpv4", 45 + "-mfloat-abi=softfp", 44 46 "-fno-pie", 45 47 "-no-pie", 46 48 ]
+3 -1
build/userland/BUILD.gn
··· 41 41 if (target_cpu == "aarch32") { 42 42 uland_c_flags += [ 43 43 "-fno-builtin", 44 - "-march=armv7-a", 44 + "-march=armv7-a+neon-vfpv4", 45 + "-mfpu=neon-vfpv4", 46 + "-mfloat-abi=softfp", 45 47 "-fno-pie", 46 48 "-no-pie", 47 49 ]
+56
kernel/include/drivers/aarch32/fpuv4.h
··· 1 + /* 2 + * Copyright (C) 2020-2021 Nikita Melekhin. All rights reserved. 3 + * 4 + * Use of this source code is governed by a BSD-style license that can be 5 + * found in the LICENSE file. 6 + */ 7 + 8 + #ifndef _KERNEL_DRIVERS_AARCH32_FPUV4_H 9 + #define _KERNEL_DRIVERS_AARCH32_FPUV4_H 10 + 11 + #include <drivers/driver_manager.h> 12 + #include <libkern/mask.h> 13 + #include <libkern/types.h> 14 + #include <platform/aarch32/interrupts.h> 15 + #include <platform/aarch32/registers.h> 16 + #include <platform/aarch32/target/cortex-a15/device_settings.h> 17 + 18 + typedef struct { 19 + uint64_t d[32]; 20 + } fpu_state_t; 21 + 22 + void fpuv4_install(); 23 + void fpu_init_state(fpu_state_t* new_fpu_state); 24 + extern uint32_t read_fpexc(); 25 + extern void write_fpexc(uint32_t); 26 + extern void fpu_save(void*); 27 + extern void fpu_restore(void*); 28 + 29 + static inline void fpu_enable() 30 + { 31 + write_fpexc(read_fpexc() | (1 << 30)); 32 + } 33 + 34 + static inline void fpu_disable() 35 + { 36 + write_fpexc(read_fpexc() & (~(1 << 30))); 37 + } 38 + 39 + static inline int fpu_is_avail() 40 + { 41 + return (((read_cpacr() >> 20) & 0b1111) == 0b1111); 42 + } 43 + 44 + static inline void fpu_make_avail() 45 + { 46 + write_cpacr(read_cpacr() | ((0b1111) << 20)); 47 + } 48 + 49 + static inline void fpu_make_unavail() 50 + { 51 + // Simply turn it off to make it unavailble. 52 + uint32_t val = read_cpacr() & (~((0b1111) << 20)); 53 + write_cpacr(val | ((0b0101) << 20)); 54 + } 55 + 56 + #endif //_KERNEL_DRIVERS_AARCH32_FPUV4_H
+5
kernel/include/drivers/generic/fpu.h
··· 1 + #ifdef __i386__ 2 + #include <drivers/x86/fpu.h> 3 + #elif __arm__ 4 + #include <drivers/aarch32/fpuv4.h> 5 + #endif
+1 -1
kernel/include/drivers/x86/fpu.h
··· 16 16 17 17 void fpu_handler(); 18 18 void fpu_init(); 19 - void fpu_reset_state(fpu_state_t* new_fpu_state); 19 + void fpu_init_state(fpu_state_t* new_fpu_state); 20 20 21 21 static inline void fpu_save(fpu_state_t* fpu_state) 22 22 {
+3 -1
kernel/include/platform/aarch32/interrupts.h
··· 1 1 #ifndef _KERNEL_PLATFORM_AARCH32_INTERRUPTS_H 2 2 #define _KERNEL_PLATFORM_AARCH32_INTERRUPTS_H 3 3 4 - #include <libkern/types.h> 5 4 #include <libkern/mask.h> 5 + #include <libkern/types.h> 6 6 7 7 #define IRQ_HANDLERS_MAX 256 8 8 ··· 18 18 void interrupts_setup(); 19 19 20 20 extern char STACK_ABORT_TOP; 21 + extern char STACK_UNDEFINED_TOP; 21 22 extern char STACK_IRQ_TOP; 22 23 extern char STACK_SVC_TOP; 23 24 extern char STACK_TOP; ··· 26 27 extern void set_svc_stack(uint32_t stack); 27 28 extern void set_irq_stack(uint32_t stack); 28 29 extern void set_abort_stack(uint32_t stack); 30 + extern void set_undefined_stack(uint32_t stack); 29 31 30 32 extern void reset_handler(); 31 33 extern void undefined_handler();
+50
kernel/include/platform/aarch32/registers.h
··· 64 64 return spsr; 65 65 } 66 66 67 + static inline uint32_t read_cpacr() 68 + { 69 + uint32_t cpacr; 70 + asm volatile("mrc p15, 0, %0, c1, c0, 2" 71 + : "=r"(cpacr) 72 + :); 73 + return cpacr; 74 + } 75 + 76 + static inline void write_cpacr(uint32_t val) 77 + { 78 + asm volatile("mcr p15, 0, %0, c1, c0, 2" 79 + : 80 + : "r"(val) 81 + : "memory"); 82 + } 83 + 84 + static inline uint32_t read_nsacr() 85 + { 86 + uint32_t cpacr; 87 + asm volatile("mrc p15, 0, %0, c1, c1, 2" 88 + : "=r"(cpacr) 89 + :); 90 + return cpacr; 91 + } 92 + 93 + static inline void write_nsacr(uint32_t val) 94 + { 95 + asm volatile("mcr p15, 0, %0, c1, c1, 2" 96 + : 97 + : "r"(val) 98 + : "memory"); 99 + } 100 + 101 + static inline uint32_t read_hcptr() 102 + { 103 + uint32_t cpacr; 104 + asm volatile("mrc p15, 4, %0, c1, c1, 2" 105 + : "=r"(cpacr) 106 + :); 107 + return cpacr; 108 + } 109 + 110 + static inline void write_hcptr(uint32_t val) 111 + { 112 + asm volatile("mcr p15, 4, %0, c1, c1, 2" 113 + : 114 + : "r"(val) 115 + : "memory"); 116 + } 67 117 68 118 #endif /* _KERNEL_PLATFORM_AARCH32_REGISTERS_H */
+41
kernel/include/tasking/bits/cpu.h
··· 1 + /* 2 + * Copyright (C) 2020-2021 Nikita Melekhin. All rights reserved. 3 + * 4 + * Use of this source code is governed by a BSD-style license that can be 5 + * found in the LICENSE file. 6 + */ 7 + 8 + #ifndef _KERNEL_TASKING_BITS_CPU_H 9 + #define _KERNEL_TASKING_BITS_CPU_H 10 + 11 + #include <drivers/generic/fpu.h> 12 + #include <libkern/types.h> 13 + #include <platform/generic/tasking/context.h> 14 + 15 + #define CPU_CNT 1 16 + #define THIS_CPU (&cpus[0]) 17 + #define RUNNIG_THREAD (THIS_CPU->running_thread) 18 + #define FPU_ENABLED 19 + 20 + struct thread; 21 + typedef int cpu_state_t; 22 + enum CPU_STATE { 23 + CPU_IN_KERNEL, 24 + CPU_IN_USERLAND, 25 + }; 26 + 27 + typedef struct { 28 + char* kstack; 29 + context_t* scheduler; // context of sched's registers 30 + struct thread* running_thread; 31 + cpu_state_t current_state; 32 + #ifdef FPU_ENABLED 33 + // Information about current state of fpu. 34 + struct thread* fpu_for_thread; 35 + pid_t fpu_for_pid; 36 + #endif // FPU_ENABLED 37 + } cpu_t; 38 + 39 + extern cpu_t cpus[CPU_CNT]; 40 + 41 + #endif // _KERNEL_TASKING_BITS_CPU_H
+24
kernel/include/tasking/cpu.h
··· 1 + /* 2 + * Copyright (C) 2020-2021 Nikita Melekhin. All rights reserved. 3 + * 4 + * Use of this source code is governed by a BSD-style license that can be 5 + * found in the LICENSE file. 6 + */ 7 + 8 + #ifndef _KERNEL_TASKING_CPU_H 9 + #define _KERNEL_TASKING_CPU_H 10 + 11 + #include <drivers/generic/fpu.h> 12 + #include <tasking/bits/cpu.h> 13 + 14 + static inline void cpu_enter_kernel_space() 15 + { 16 + THIS_CPU->current_state = CPU_IN_KERNEL; 17 + } 18 + 19 + static inline void cpu_leave_kernel_space() 20 + { 21 + THIS_CPU->current_state = CPU_IN_USERLAND; 22 + } 23 + 24 + #endif // _KERNEL_TASKING_CPU_H
+2 -14
kernel/include/tasking/tasking.h
··· 8 8 #ifndef _KERNEL_TASKING_TASKING_H 9 9 #define _KERNEL_TASKING_TASKING_H 10 10 11 + #include <drivers/generic/fpu.h> 11 12 #include <fs/vfs.h> 12 13 #include <libkern/types.h> 13 14 #include <mem/vmm/vmm.h> 14 15 #include <mem/vmm/zoner.h> 15 16 #include <platform/generic/tasking/context.h> 16 17 #include <platform/generic/tasking/trapframe.h> 18 + #include <tasking/bits/cpu.h> 17 19 #include <tasking/proc.h> 18 20 #include <tasking/thread.h> 19 21 20 - #define CPU_CNT 1 21 - #define THIS_CPU (&cpus[0]) 22 - #define RUNNIG_THREAD cpus[0].running_thread 23 22 #define MAX_PROCESS_COUNT 1024 24 23 #define MAX_DYING_PROCESS_COUNT 8 25 24 #define MAX_OPENED_FILES 16 26 25 #define SIGNALS_CNT 32 27 26 28 - #ifdef __i386__ 29 - #define FPU_ENABLED 30 - #endif 31 - 32 - typedef struct { 33 - char* kstack; 34 - context_t* scheduler; // context of sched's registers 35 - thread_t* running_thread; 36 - } __attribute__((packed)) cpu_t; 37 - 38 - extern cpu_t cpus[CPU_CNT]; 39 27 extern proc_t proc[MAX_PROCESS_COUNT]; 40 28 extern uint32_t nxt_proc; 41 29 extern uint32_t ended_proc;
+1 -1
kernel/include/tasking/thread.h
··· 8 8 #ifndef _KERNEL_TASKING_THREAD_H 9 9 #define _KERNEL_TASKING_THREAD_H 10 10 11 - #include <drivers/x86/fpu.h> 11 + #include <drivers/generic/fpu.h> 12 12 #include <fs/vfs.h> 13 13 #include <libkern/types.h> 14 14 #include <platform/generic/tasking/context.h>
+17
kernel/kernel/drivers/aarch32/fpuv4.c
··· 1 + #include <drivers/aarch32/fpuv4.h> 2 + #include <libkern/libkern.h> 3 + #include <platform/aarch32/registers.h> 4 + 5 + void fpuv4_install() 6 + { 7 + write_cpacr(read_cpacr() | ((0b1111) << 20)); 8 + write_nsacr(read_nsacr() | ((0b11) << 10)); 9 + write_hcptr(read_hcptr() | ((0b11) << 10)); 10 + fpu_enable(); 11 + fpu_make_unavail(); 12 + } 13 + 14 + void fpu_init_state(fpu_state_t* new_fpu_state) 15 + { 16 + memset(new_fpu_state, 0, sizeof(fpu_state_t)); 17 + }
+21
kernel/kernel/drivers/aarch32/fpuv4_helper.s
··· 1 + .global fpu_save 2 + fpu_save: 3 + vstm r0!, {d0-d15} 4 + vstm r0!, {d16-d31} 5 + bx lr 6 + 7 + .global fpu_restore 8 + fpu_restore: 9 + vldm r0!, {d0-d15} 10 + vldm r0!, {d16-d31} 11 + bx lr 12 + 13 + .global read_fpexc 14 + read_fpexc: 15 + vmrs r0, fpexc 16 + bx lr 17 + 18 + .global write_fpexc 19 + write_fpexc: 20 + vmsr fpexc, r0 21 + bx lr
+2 -2
kernel/kernel/drivers/x86/fpu.c
··· 38 38 set_irq_handler(IRQ7, fpu_handler); 39 39 } 40 40 41 - void fpu_reset_state(fpu_state_t* new_fpu_state) 41 + void fpu_init_state(fpu_state_t* new_fpu_state) 42 42 { 43 - memcpy((uint8_t*)new_fpu_state, (uint8_t*)&fpu_state, sizeof(fpu_state_t)); 43 + memcpy(new_fpu_state, &fpu_state, sizeof(fpu_state_t)); 44 44 }
+2
kernel/kernel/platform/aarch32/init.c
··· 5 5 * found in the LICENSE file. 6 6 */ 7 7 8 + #include <drivers/aarch32/fpuv4.h> 8 9 #include <drivers/aarch32/pl031.h> 9 10 #include <drivers/aarch32/pl050.h> 10 11 #include <drivers/aarch32/pl111.h> ··· 17 18 void platform_setup() 18 19 { 19 20 interrupts_setup(); 21 + fpuv4_install(); 20 22 } 21 23 22 24 void platform_drivers_setup()
+31 -8
kernel/kernel/platform/aarch32/interrupts/interrupt_handlers.c
··· 6 6 #include <platform/aarch32/tasking/trapframe.h> 7 7 #include <platform/generic/registers.h> 8 8 #include <syscalls/handlers.h> 9 + #include <tasking/cpu.h> 10 + #include <tasking/tasking.h> 9 11 10 12 /* IRQ */ 11 13 static irq_handler_t _irq_handlers[IRQ_HANDLERS_MAX]; ··· 42 44 { 43 45 disable_interrupts(); 44 46 set_abort_stack((uint32_t)&STACK_ABORT_TOP); 47 + set_undefined_stack((uint32_t)&STACK_UNDEFINED_TOP); 45 48 set_svc_stack((uint32_t)&STACK_SVC_TOP); 46 49 set_irq_stack((uint32_t)&STACK_IRQ_TOP); 47 50 init_irq_handlers(); ··· 59 62 60 63 void undefined_handler() 61 64 { 62 - uint32_t val; 63 - asm volatile("mov %0, lr" 64 - : "=r"(val) 65 - :); 66 - log("undefined_handler address : %x", val); 67 - while (1) { } 65 + #ifdef FPU_ENABLED 66 + if (!RUNNIG_THREAD) { 67 + goto undefined_h; 68 + } 69 + 70 + if (fpu_is_avail()) { 71 + goto undefined_h; 72 + } 73 + 74 + if (THIS_CPU->fpu_for_thread && THIS_CPU->fpu_for_thread->tid == THIS_CPU->fpu_for_pid) { 75 + fpu_save(THIS_CPU->fpu_for_thread->fpu_state); 76 + } 77 + 78 + fpu_restore(RUNNIG_THREAD->fpu_state); 79 + THIS_CPU->fpu_for_thread = RUNNIG_THREAD; 80 + THIS_CPU->fpu_for_pid = RUNNIG_THREAD->tid; 81 + fpu_make_avail(); 82 + return; 83 + #endif // FPU_ENABLED 84 + 85 + undefined_h: 86 + log("undefined_handler address"); 87 + system_stop(); 68 88 } 69 89 70 90 void svc_handler(trapframe_t* tf) ··· 79 99 : "=r"(val) 80 100 :); 81 101 log("prefetch_abort_handler address : %x", val); 82 - while (1) { } 102 + system_stop(); 83 103 } 84 104 85 105 void data_abort_handler(trapframe_t* tf) 86 106 { 87 107 system_disable_interrupts(); 108 + cpu_enter_kernel_space(); 88 109 uint32_t fault_addr = read_far(); 89 110 uint32_t info = read_dfsr(); 90 111 uint32_t is_pl0 = read_spsr() & 0xf; // See CPSR M field values 91 112 info |= ((is_pl0 != 0) << 31); // Set the 31bit as type 92 - // log("data_abort_handler: %x %x %x", fault_addr, info, tf->user_ip); 93 113 vmm_page_fault_handler(info, fault_addr); 114 + cpu_leave_kernel_space(); 94 115 system_enable_interrupts_only_counter(); 95 116 } 96 117 ··· 118 139 void irq_handler(trapframe_t* tf) 119 140 { 120 141 system_disable_interrupts(); 142 + cpu_enter_kernel_space(); 121 143 /* Remove gicv2 call from here */ 122 144 uint32_t int_disc = gicv2_interrupt_descriptor(); 123 145 /* We end the interrupt before handle it, since we can 124 146 call sched() and not return here. */ 125 147 gicv2_end(int_disc); 126 148 _irq_redirect(int_disc & 0x1ff); 149 + cpu_leave_kernel_space(); 127 150 system_enable_interrupts_only_counter(); 128 151 } 129 152
+32 -3
kernel/kernel/platform/aarch32/interrupts/interrupts.s
··· 36 36 .global trap_return 37 37 vector_table: 38 38 b reset_handler 39 - b undefined_handler 39 + b undefined_handler_isp 40 40 b svc_isp 41 41 b prefetch_abort_handler 42 42 b data_abort_isp ··· 106 106 subs pc, lr, #0 107 107 nop 108 108 109 + undefined_handler_isp: 110 + subs lr, lr, #4 111 + stmfd sp!, {r0-r12,lr} 112 + mrs r0, spsr 113 + mrs r1, sp_usr 114 + mrs r2, lr_usr 115 + stmfd sp!, {r0-r2} 116 + 117 + mov r0, sp 118 + bl undefined_handler 119 + 120 + ldmfd sp!, {r0-r2} 121 + msr spsr, r0 122 + msr sp_usr, r1 123 + msr lr_usr, r2 124 + 125 + ldmfd sp!, {r0-r12,lr} 126 + subs pc, lr, #0 127 + nop 128 + 109 129 data_abort_isp: 110 130 subs lr, lr, #8 111 131 stmfd sp!, {r0-r12,lr} ··· 115 135 stmfd sp!, {r0-r2} 116 136 117 137 mov r0, sp 118 - bl data_abort_handler 138 + bl data_abort_handler 119 139 120 140 ldmfd sp!, {r0-r2} 121 141 msr spsr, r0 ··· 221 241 swi 0x0 222 242 pop {pc} 223 243 224 - 225 244 .global set_svc_stack 226 245 set_svc_stack: 227 246 mov r1, sp ··· 251 270 cps #0x1F /* set system mode */ 252 271 mov sp, r1 253 272 bx r2 273 + 274 + .global set_undefined_stack 275 + set_undefined_stack: 276 + mov r1, sp 277 + mov r2, lr 278 + cps #0x1B /* set abort mode */ 279 + mov sp, r0 280 + cps #0x1F /* set system mode */ 281 + mov sp, r1 282 + bx r2
+2 -3
kernel/kernel/platform/aarch32/tasking/switchvm.c
··· 1 + #include <drivers/generic/fpu.h> 1 2 #include <mem/vmm/vmm.h> 2 3 #include <platform/aarch32/interrupts.h> 3 4 #include <platform/generic/system.h> ··· 8 9 void switchuvm(thread_t* thread) 9 10 { 10 11 system_disable_interrupts(); 11 - // uint32_t esp0 = ((uint32_t)thread->tf + sizeof(trapframe_t)); 12 - // set_svc_stack(esp0); 13 - // set_irq_stack(esp0); 14 12 RUNNIG_THREAD = thread; 15 13 vmm_switch_pdir(thread->process->pdir); 14 + fpu_make_unavail(); 16 15 system_enable_interrupts(); 17 16 }
+1 -1
kernel/kernel/platform/aarch32/tasking/tasking_jumper.s
··· 4 4 5 5 // void _tasking_jumper() 6 6 _tasking_jumper: 7 - bl system_enable_interrupts 7 + bl system_enable_interrupts_only_counter 8 8 mov r0, sp // saving current sp to bring it to the new mode 9 9 cps #0x13 10 10 mov sp, r0
+10 -3
kernel/kernel/platform/x86/interrupts/irq_handler.c
··· 1 1 #include <platform/generic/system.h> 2 2 #include <platform/x86/irq_handler.h> 3 3 #include <tasking/tasking.h> 4 + #include <tasking/cpu.h> 4 5 5 - static inline void irq_redirect(uint8_t int_no) { 6 + static inline void irq_redirect(uint8_t int_no) 7 + { 6 8 void (*func)() = (void*)handlers[int_no]; 7 9 func(); 8 10 } 9 11 10 - void irq_handler(trapframe_t *tf) { 12 + void irq_handler(trapframe_t* tf) 13 + { 11 14 system_disable_interrupts(); 15 + cpu_enter_kernel_space(); 16 + 12 17 if (tf->int_no >= IRQ_SLAVE_OFFSET) { 13 18 port_byte_out(0xA0, 0x20); 14 19 } ··· 23 28 irq_redirect(tf->int_no); 24 29 /* We are leaving interrupt, and later interrupts will be on, 25 30 when flags are restored */ 31 + cpu_leave_kernel_space(); 26 32 system_enable_interrupts_only_counter(); 27 33 } 28 34 29 - void irq_empty_handler() { 35 + void irq_empty_handler() 36 + { 30 37 return; 31 38 }
+3
kernel/kernel/platform/x86/interrupts/isr_handler.c
··· 4 4 #include <platform/generic/registers.h> 5 5 #include <platform/generic/system.h> 6 6 #include <platform/x86/isr_handler.h> 7 + #include <tasking/cpu.h> 7 8 #include <tasking/dump.h> 8 9 #include <tasking/sched.h> 9 10 #include <tasking/tasking.h> ··· 49 50 void isr_handler(trapframe_t* tf) 50 51 { 51 52 system_disable_interrupts(); 53 + cpu_enter_kernel_space(); 52 54 53 55 proc_t* p = NULL; 54 56 if (likely(RUNNIG_THREAD)) { ··· 84 86 85 87 /* We are leaving interrupt, and later interrupts will be on, 86 88 when flags are restored */ 89 + cpu_leave_kernel_space(); 87 90 system_enable_interrupts_only_counter(); 88 91 }
+4
kernel/kernel/syscalls/handler.c
··· 12 12 #include <platform/generic/syscalls/params.h> 13 13 #include <platform/generic/system.h> 14 14 #include <syscalls/handlers.h> 15 + #include <tasking/cpu.h> 15 16 16 17 /* From Linux 4.14.0 headers. */ 17 18 /* https://chromium.googlesource.com/chromiumos/docs/+/master/constants/syscalls.md#x86-32_bit */ ··· 74 75 /* This hack has to be here, when a context switching happens 75 76 during a syscall (e.g. when block occurs). The hack will start 76 77 interrupts again after it has become a running thread. */ 78 + cpu_enter_kernel_space(); 77 79 system_enable_interrupts(); 78 80 return param1; 79 81 } ··· 100 102 void sys_handler(trapframe_t* tf) 101 103 { 102 104 system_disable_interrupts(); 105 + cpu_enter_kernel_space(); 103 106 void (*callee)(trapframe_t*) = (void*)syscalls[sys_id]; 104 107 callee(tf); 108 + cpu_leave_kernel_space(); 105 109 system_enable_interrupts_only_counter(); 106 110 } 107 111
+1 -1
kernel/kernel/tasking/proc.c
··· 253 253 dentry_put(p->proc_file); 254 254 } 255 255 #ifdef FPU_ENABLED 256 - fpu_reset_state(p->main_thread->fpu_state); 256 + fpu_init_state(p->main_thread->fpu_state); 257 257 #endif 258 258 vmm_free_pdir(old_pdir, &old_zones); 259 259 dynamic_array_clear(&old_zones);
+9 -2
kernel/kernel/tasking/sched.c
··· 10 10 #include <libkern/log.h> 11 11 #include <mem/kmalloc.h> 12 12 #include <platform/generic/registers.h> 13 + #include <platform/generic/tasking/context.h> 13 14 #include <platform/generic/tasking/trapframe.h> 15 + #include <tasking/cpu.h> 14 16 #include <tasking/sched.h> 15 17 #include <tasking/tasking.h> 16 18 ··· 38 40 39 41 static void _init_cpus(cpu_t* cpu) 40 42 { 43 + cpu->current_state = CPU_IN_KERNEL; 41 44 cpu->kstack = kmalloc(VMM_PAGE_SIZE); 42 45 char* sp = cpu->kstack + VMM_PAGE_SIZE; 43 46 sp -= sizeof(*cpu->scheduler); 44 47 cpu->scheduler = (context_t*)sp; 45 48 memset((void*)cpu->scheduler, 0, sizeof(*cpu->scheduler)); 46 49 context_set_instruction_pointer(cpu->scheduler, (uint32_t)sched); 47 - cpu->running_thread = 0; 50 + cpu->running_thread = NULL; 51 + #ifdef FPU_ENABLED 52 + cpu->fpu_for_thread = NULL; 53 + cpu->fpu_for_pid = 0; 54 + #endif // FPU_ENABLED 48 55 } 49 56 50 57 static inline void _sched_swap_buffers() ··· 197 204 #endif 198 205 ASSERT(thread->status == THREAD_RUNNING); 199 206 switchuvm(thread); 200 - switch_contexts(&THIS_CPU->scheduler, thread->context); 207 + switch_contexts(&(THIS_CPU->scheduler), thread->context); 201 208 } 202 209 } 203 210
+3 -1
kernel/kernel/tasking/tasking.c
··· 14 14 #include <libkern/log.h> 15 15 #include <mem/kmalloc.h> 16 16 #include <platform/generic/system.h> 17 + #include <tasking/cpu.h> 17 18 #include <tasking/dump.h> 18 19 #include <tasking/sched.h> 19 20 #include <tasking/tasking.h> ··· 32 33 #ifdef __i386__ 33 34 void _tasking_jumper() 34 35 { 35 - system_enable_interrupts(); 36 + cpu_leave_kernel_space(); 37 + system_enable_interrupts_only_counter(); 36 38 return; 37 39 } 38 40 #endif
+1 -1
kernel/kernel/tasking/thread.c
··· 42 42 #ifdef FPU_ENABLED 43 43 /* setting fpu */ 44 44 thread->fpu_state = kmalloc_aligned(sizeof(fpu_state_t), 16); 45 - fpu_reset_state(thread->fpu_state); 45 + fpu_init_state(thread->fpu_state); 46 46 #endif 47 47 return 0; 48 48 }