+11
arch/x86/kernel/devicetree.c
+11
arch/x86/kernel/devicetree.c
···
13
13
#include <linux/slab.h>
14
14
#include <linux/pci.h>
15
15
#include <linux/of_pci.h>
16
+
#include <linux/initrd.h>
16
17
17
18
#include <asm/hpet.h>
18
19
#include <asm/irq_controller.h>
···
98
97
{
99
98
return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
100
99
}
100
+
101
+
#ifdef CONFIG_BLK_DEV_INITRD
102
+
void __init early_init_dt_setup_initrd_arch(unsigned long start,
103
+
unsigned long end)
104
+
{
105
+
initrd_start = (unsigned long)__va(start);
106
+
initrd_end = (unsigned long)__va(end);
107
+
initrd_below_start_ok = 1;
108
+
}
109
+
#endif
101
110
102
111
void __init add_dtb(u64 data)
103
112
{
+13
arch/x86/kernel/smpboot.c
+13
arch/x86/kernel/smpboot.c
···
285
285
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
286
286
x86_platform.nmi_init();
287
287
288
+
/*
289
+
* Wait until the cpu which brought this one up marked it
290
+
* online before enabling interrupts. If we don't do that then
291
+
* we can end up waking up the softirq thread before this cpu
292
+
* reached the active state, which makes the scheduler unhappy
293
+
* and schedule the softirq thread on the wrong cpu. This is
294
+
* only observable with forced threaded interrupts, but in
295
+
* theory it could also happen w/o them. It's just way harder
296
+
* to achieve.
297
+
*/
298
+
while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
299
+
cpu_relax();
300
+
288
301
/* enable local interrupts */
289
302
local_irq_enable();
290
303
+3
kernel/irq/manage.c
+3
kernel/irq/manage.c