Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

metag: Boot

Add boot code for metag. Due to the multi-threaded nature of Meta it is
not uncommon for an RTOS or bare metal application to be started on
other hardware threads by the bootloader. Since there is a single MMU
switch which affects all threads, the MMU is traditionally configured by
the bootloader prior to starting Linux. The bootloader passes a
structure to Linux which among other things contains information about
memory regions which have been mapped. Linux then assumes control of the
local heap memory region.

A kernel arguments string pointer or a flattened device tree pointer can
be provided in the third argument.

Signed-off-by: James Hogan <james.hogan@imgtec.com>

+715
+4
arch/metag/boot/.gitignore
··· 1 + vmlinux* 2 + uImage* 3 + ramdisk.* 4 + *.dtb
+82
arch/metag/include/asm/mach/arch.h
··· 1 + /* 2 + * arch/metag/include/asm/mach/arch.h 3 + * 4 + * Copyright (C) 2012 Imagination Technologies Ltd. 5 + * 6 + * based on the ARM version: 7 + * Copyright (C) 2000 Russell King 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License version 2 as 11 + * published by the Free Software Foundation. 12 + */ 13 + 14 + #ifndef _METAG_MACH_ARCH_H_ 15 + #define _METAG_MACH_ARCH_H_ 16 + 17 + #include <linux/stddef.h> 18 + 19 + /** 20 + * struct machine_desc - Describes a board controlled by a Meta. 21 + * @name: Board/SoC name. 22 + * @dt_compat: Array of device tree 'compatible' strings. 23 + * 24 + * @nr_irqs: Maximum number of IRQs. 25 + * If 0, defaults to NR_IRQS in asm-generic/irq.h. 26 + * 27 + * @init_early: Early init callback. 28 + * @init_irq: IRQ init callback for setting up IRQ controllers. 29 + * @init_machine: Arch init callback for setting up devices. 30 + * @init_late: Late init callback. 31 + * 32 + * This structure is provided by each board which can be controlled by a Meta. 33 + * It is chosen by matching the compatible strings in the device tree provided 34 + * by the bootloader with the strings in @dt_compat, and sets up any aspects of 35 + * the machine that aren't configured with device tree (yet). 36 + */ 37 + struct machine_desc { 38 + const char *name; 39 + const char **dt_compat; 40 + 41 + unsigned int nr_irqs; 42 + 43 + void (*init_early)(void); 44 + void (*init_irq)(void); 45 + void (*init_machine)(void); 46 + void (*init_late)(void); 47 + }; 48 + 49 + /* 50 + * Current machine - only accessible during boot. 51 + */ 52 + extern struct machine_desc *machine_desc; 53 + 54 + /* 55 + * Machine type table - also only accessible during boot 56 + */ 57 + extern struct machine_desc __arch_info_begin[], __arch_info_end[]; 58 + #define for_each_machine_desc(p) \ 59 + for (p = __arch_info_begin; p < __arch_info_end; p++) 60 + 61 + static inline struct machine_desc *default_machine_desc(void) 62 + { 63 + /* the default machine is the last one linked in */ 64 + if (__arch_info_end - 1 < __arch_info_begin) 65 + return NULL; 66 + return __arch_info_end - 1; 67 + } 68 + 69 + /* 70 + * Set of macros to define architecture features. This is built into 71 + * a table by the linker. 72 + */ 73 + #define MACHINE_START(_type, _name) \ 74 + static const struct machine_desc __mach_desc_##_type \ 75 + __used \ 76 + __attribute__((__section__(".arch.info.init"))) = { \ 77 + .name = _name, 78 + 79 + #define MACHINE_END \ 80 + }; 81 + 82 + #endif /* _METAG_MACH_ARCH_H_ */
+8
arch/metag/include/asm/setup.h
··· 1 + #ifndef _ASM_METAG_SETUP_H 2 + #define _ASM_METAG_SETUP_H 3 + 4 + #include <uapi/asm/setup.h> 5 + 6 + void per_cpu_trap_init(unsigned long); 7 + extern void __init dump_machine_table(void); 8 + #endif /* _ASM_METAG_SETUP_H */
+45
arch/metag/kernel/head.S
··· 1 + ! Copyright 2005,2006,2007,2009 Imagination Technologies 2 + 3 + #include <linux/init.h> 4 + #include <generated/asm-offsets.h> 5 + #undef __exit 6 + 7 + __HEAD 8 + ! Setup the stack and get going into _metag_start_kernel 9 + .global __start 10 + .type __start,function 11 + __start: 12 + ! D1Ar1 contains pTBI (ISTAT) 13 + ! D0Ar2 contains pTBI 14 + ! D1Ar3 contains __pTBISegs 15 + ! D0Ar4 contains kernel arglist pointer 16 + 17 + MOVT D0Re0,#HI(___pTBIs) 18 + ADD D0Re0,D0Re0,#LO(___pTBIs) 19 + SETL [D0Re0],D0Ar2,D1Ar1 20 + MOVT D0Re0,#HI(___pTBISegs) 21 + ADD D0Re0,D0Re0,#LO(___pTBISegs) 22 + SETD [D0Re0],D1Ar3 23 + MOV A0FrP,#0 24 + MOV D0Re0,#0 25 + MOV D1Re0,#0 26 + MOV D1Ar3,#0 27 + MOV D1Ar1,D0Ar4 !Store kernel boot params 28 + MOV D1Ar5,#0 29 + MOV D0Ar6,#0 30 + #ifdef CONFIG_METAG_DSP 31 + MOV D0.8,#0 32 + #endif 33 + MOVT A0StP,#HI(_init_thread_union) 34 + ADD A0StP,A0StP,#LO(_init_thread_union) 35 + ADD A0StP,A0StP,#THREAD_INFO_SIZE 36 + MOVT D1RtP,#HI(_metag_start_kernel) 37 + CALL D1RtP,#LO(_metag_start_kernel) 38 + .size __start,.-__start 39 + 40 + !! Needed by TBX 41 + .global __exit 42 + .type __exit,function 43 + __exit: 44 + XOR TXENABLE,D0Re0,D0Re0 45 + .size __exit,.-__exit
+20
arch/metag/kernel/machines.c
··· 1 + /* 2 + * arch/metag/kernel/machines.c 3 + * 4 + * Copyright (C) 2012 Imagination Technologies Ltd. 5 + * 6 + * Generic Meta Boards. 7 + */ 8 + 9 + #include <linux/init.h> 10 + #include <asm/irq.h> 11 + #include <asm/mach/arch.h> 12 + 13 + static const char *meta_boards_compat[] __initdata = { 14 + "img,meta", 15 + NULL, 16 + }; 17 + 18 + MACHINE_START(META, "Generic Meta") 19 + .dt_compat = meta_boards_compat, 20 + MACHINE_END
+556
arch/metag/kernel/setup.c
··· 1 + /* 2 + * Copyright (C) 2005-2012 Imagination Technologies Ltd. 3 + * 4 + * This file contains the architecture-dependant parts of system setup. 5 + * 6 + */ 7 + 8 + #include <linux/kernel.h> 9 + #include <linux/mm.h> 10 + #include <linux/sched.h> 11 + #include <linux/delay.h> 12 + #include <linux/interrupt.h> 13 + #include <linux/fs.h> 14 + #include <linux/console.h> 15 + #include <linux/genhd.h> 16 + #include <linux/errno.h> 17 + #include <linux/string.h> 18 + #include <linux/init.h> 19 + #include <linux/bootmem.h> 20 + #include <linux/root_dev.h> 21 + #include <linux/initrd.h> 22 + #include <linux/seq_file.h> 23 + #include <linux/pfn.h> 24 + #include <linux/start_kernel.h> 25 + #include <linux/cpu.h> 26 + #include <linux/memblock.h> 27 + #include <linux/of_fdt.h> 28 + 29 + #include <asm/cachepart.h> 30 + #include <asm/clock.h> 31 + #include <asm/sections.h> 32 + #include <asm/setup.h> 33 + #include <asm/processor.h> 34 + #include <asm/traps.h> 35 + #include <asm/mmu.h> 36 + #include <asm/cpu.h> 37 + #include <asm/hwthread.h> 38 + #include <asm/mmzone.h> 39 + #include <asm/l2cache.h> 40 + #include <asm/prom.h> 41 + #include <asm/mach/arch.h> 42 + #include <asm/core_reg.h> 43 + #include <asm/highmem.h> 44 + 45 + /* PRIV protect as many registers as possible. */ 46 + #define DEFAULT_PRIV 0xff0f7f00 47 + 48 + /* Enable unaligned access checking. */ 49 + #define UNALIGNED_PRIV 0x00000010 50 + 51 + #ifdef CONFIG_METAG_UNALIGNED 52 + #define PRIV_BITS (DEFAULT_PRIV | UNALIGNED_PRIV) 53 + #else 54 + #define PRIV_BITS DEFAULT_PRIV 55 + #endif 56 + 57 + extern char _heap_start[]; 58 + 59 + #ifdef CONFIG_METAG_BUILTIN_DTB 60 + extern u32 __dtb_start[]; 61 + #endif 62 + 63 + struct machine_desc *machine_desc __initdata; 64 + 65 + /* 66 + * Map a Linux CPU number to a hardware thread ID 67 + * In SMP this will be setup with the correct mapping at startup; in UP this 68 + * will map to the HW thread on which we are running. 69 + */ 70 + u8 cpu_2_hwthread_id[NR_CPUS] __read_mostly = { 71 + [0 ... NR_CPUS-1] = BAD_HWTHREAD_ID 72 + }; 73 + 74 + /* 75 + * Map a hardware thread ID to a Linux CPU number 76 + * In SMP this will be fleshed out with the correct CPU ID for a particular 77 + * hardware thread. In UP this will be initialised with the boot CPU ID. 78 + */ 79 + u8 hwthread_id_2_cpu[4] __read_mostly = { 80 + [0 ... 3] = BAD_CPU_ID 81 + }; 82 + 83 + /* The relative offset of the MMU mapped memory (from ldlk or bootloader) 84 + * to the real physical memory. This is needed as we have to use the 85 + * physical addresses in the MMU tables (pte entries), and not the virtual 86 + * addresses. 87 + * This variable is used in the __pa() and __va() macros, and should 88 + * probably only be used via them. 89 + */ 90 + unsigned int meta_memoffset; 91 + 92 + static char __initdata *original_cmd_line; 93 + 94 + DEFINE_PER_CPU(PTBI, pTBI); 95 + 96 + /* 97 + * Mapping are specified as "CPU_ID:HWTHREAD_ID", e.g. 98 + * 99 + * "hwthread_map=0:1,1:2,2:3,3:0" 100 + * 101 + * Linux CPU ID HWTHREAD_ID 102 + * --------------------------- 103 + * 0 1 104 + * 1 2 105 + * 2 3 106 + * 3 0 107 + */ 108 + static int __init parse_hwthread_map(char *p) 109 + { 110 + int cpu; 111 + 112 + while (*p) { 113 + cpu = (*p++) - '0'; 114 + if (cpu < 0 || cpu > 9) 115 + goto err_cpu; 116 + 117 + p++; /* skip semi-colon */ 118 + cpu_2_hwthread_id[cpu] = (*p++) - '0'; 119 + if (cpu_2_hwthread_id[cpu] >= 4) 120 + goto err_thread; 121 + hwthread_id_2_cpu[cpu_2_hwthread_id[cpu]] = cpu; 122 + 123 + if (*p == ',') 124 + p++; /* skip comma */ 125 + } 126 + 127 + return 0; 128 + err_cpu: 129 + pr_err("%s: hwthread_map cpu argument out of range\n", __func__); 130 + return -EINVAL; 131 + err_thread: 132 + pr_err("%s: hwthread_map thread argument out of range\n", __func__); 133 + return -EINVAL; 134 + } 135 + early_param("hwthread_map", parse_hwthread_map); 136 + 137 + void __init dump_machine_table(void) 138 + { 139 + struct machine_desc *p; 140 + const char **compat; 141 + 142 + pr_info("Available machine support:\n\tNAME\t\tCOMPATIBLE LIST\n"); 143 + for_each_machine_desc(p) { 144 + pr_info("\t%s\t[", p->name); 145 + for (compat = p->dt_compat; compat && *compat; ++compat) 146 + printk(" '%s'", *compat); 147 + printk(" ]\n"); 148 + } 149 + 150 + pr_info("\nPlease check your kernel config and/or bootloader.\n"); 151 + 152 + hard_processor_halt(HALT_PANIC); 153 + } 154 + 155 + #ifdef CONFIG_METAG_HALT_ON_PANIC 156 + static int metag_panic_event(struct notifier_block *this, unsigned long event, 157 + void *ptr) 158 + { 159 + hard_processor_halt(HALT_PANIC); 160 + return NOTIFY_DONE; 161 + } 162 + 163 + static struct notifier_block metag_panic_block = { 164 + metag_panic_event, 165 + NULL, 166 + 0 167 + }; 168 + #endif 169 + 170 + void __init setup_arch(char **cmdline_p) 171 + { 172 + unsigned long start_pfn; 173 + unsigned long text_start = (unsigned long)(&_stext); 174 + unsigned long cpu = smp_processor_id(); 175 + unsigned long heap_start, heap_end; 176 + unsigned long start_pte; 177 + PTBI _pTBI; 178 + PTBISEG p_heap; 179 + int heap_id, i; 180 + 181 + metag_cache_probe(); 182 + 183 + /* try interpreting the argument as a device tree */ 184 + machine_desc = setup_machine_fdt(original_cmd_line); 185 + /* if it doesn't look like a device tree it must be a command line */ 186 + if (!machine_desc) { 187 + #ifdef CONFIG_METAG_BUILTIN_DTB 188 + /* try the embedded device tree */ 189 + machine_desc = setup_machine_fdt(__dtb_start); 190 + if (!machine_desc) 191 + panic("Invalid embedded device tree."); 192 + #else 193 + /* use the default machine description */ 194 + machine_desc = default_machine_desc(); 195 + #endif 196 + #ifndef CONFIG_CMDLINE_FORCE 197 + /* append the bootloader cmdline to any builtin fdt cmdline */ 198 + if (boot_command_line[0] && original_cmd_line[0]) 199 + strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); 200 + strlcat(boot_command_line, original_cmd_line, 201 + COMMAND_LINE_SIZE); 202 + #endif 203 + } 204 + setup_meta_clocks(machine_desc->clocks); 205 + 206 + *cmdline_p = boot_command_line; 207 + parse_early_param(); 208 + 209 + /* 210 + * Make sure we don't alias in dcache or icache 211 + */ 212 + check_for_cache_aliasing(cpu); 213 + 214 + 215 + #ifdef CONFIG_METAG_HALT_ON_PANIC 216 + atomic_notifier_chain_register(&panic_notifier_list, 217 + &metag_panic_block); 218 + #endif 219 + 220 + #ifdef CONFIG_DUMMY_CONSOLE 221 + conswitchp = &dummy_con; 222 + #endif 223 + 224 + if (!(__core_reg_get(TXSTATUS) & TXSTATUS_PSTAT_BIT)) 225 + panic("Privilege must be enabled for this thread."); 226 + 227 + _pTBI = __TBI(TBID_ISTAT_BIT); 228 + 229 + per_cpu(pTBI, cpu) = _pTBI; 230 + 231 + if (!per_cpu(pTBI, cpu)) 232 + panic("No TBI found!"); 233 + 234 + /* 235 + * Initialize all interrupt vectors to our copy of __TBIUnExpXXX, 236 + * rather than the version from the bootloader. This makes call 237 + * stacks easier to understand and may allow us to unmap the 238 + * bootloader at some point. 239 + * 240 + * We need to keep the LWK handler that TBI installed in order to 241 + * be able to do inter-thread comms. 242 + */ 243 + for (i = 0; i <= TBID_SIGNUM_MAX; i++) 244 + if (i != TBID_SIGNUM_LWK) 245 + _pTBI->fnSigs[i] = __TBIUnExpXXX; 246 + 247 + /* A Meta requirement is that the kernel is loaded (virtually) 248 + * at the PAGE_OFFSET. 249 + */ 250 + if (PAGE_OFFSET != text_start) 251 + panic("Kernel not loaded at PAGE_OFFSET (%#x) but at %#lx.", 252 + PAGE_OFFSET, text_start); 253 + 254 + start_pte = mmu_read_second_level_page(text_start); 255 + 256 + /* 257 + * Kernel pages should have the PRIV bit set by the bootloader. 258 + */ 259 + if (!(start_pte & _PAGE_KERNEL)) 260 + panic("kernel pte does not have PRIV set"); 261 + 262 + /* 263 + * See __pa and __va in include/asm/page.h. 264 + * This value is negative when running in local space but the 265 + * calculations work anyway. 266 + */ 267 + meta_memoffset = text_start - (start_pte & PAGE_MASK); 268 + 269 + /* Now lets look at the heap space */ 270 + heap_id = (__TBIThreadId() & TBID_THREAD_BITS) 271 + + TBID_SEG(0, TBID_SEGSCOPE_LOCAL, TBID_SEGTYPE_HEAP); 272 + 273 + p_heap = __TBIFindSeg(NULL, heap_id); 274 + 275 + if (!p_heap) 276 + panic("Could not find heap from TBI!"); 277 + 278 + /* The heap begins at the first full page after the kernel data. */ 279 + heap_start = (unsigned long) &_heap_start; 280 + 281 + /* The heap ends at the end of the heap segment specified with 282 + * ldlk. 283 + */ 284 + if (is_global_space(text_start)) { 285 + pr_debug("WARNING: running in global space!\n"); 286 + heap_end = (unsigned long)p_heap->pGAddr + p_heap->Bytes; 287 + } else { 288 + heap_end = (unsigned long)p_heap->pLAddr + p_heap->Bytes; 289 + } 290 + 291 + ROOT_DEV = Root_RAM0; 292 + 293 + /* init_mm is the mm struct used for the first task. It is then 294 + * cloned for all other tasks spawned from that task. 295 + * 296 + * Note - we are using the virtual addresses here. 297 + */ 298 + init_mm.start_code = (unsigned long)(&_stext); 299 + init_mm.end_code = (unsigned long)(&_etext); 300 + init_mm.end_data = (unsigned long)(&_edata); 301 + init_mm.brk = (unsigned long)heap_start; 302 + 303 + min_low_pfn = PFN_UP(__pa(text_start)); 304 + max_low_pfn = PFN_DOWN(__pa(heap_end)); 305 + 306 + pfn_base = min_low_pfn; 307 + 308 + /* Round max_pfn up to a 4Mb boundary. The free_bootmem_node() 309 + * call later makes sure to keep the rounded up pages marked reserved. 310 + */ 311 + max_pfn = max_low_pfn + ((1 << MAX_ORDER) - 1); 312 + max_pfn &= ~((1 << MAX_ORDER) - 1); 313 + 314 + start_pfn = PFN_UP(__pa(heap_start)); 315 + 316 + if (min_low_pfn & ((1 << MAX_ORDER) - 1)) { 317 + /* Theoretically, we could expand the space that the 318 + * bootmem allocator covers - much as we do for the 319 + * 'high' address, and then tell the bootmem system 320 + * that the lowest chunk is 'not available'. Right 321 + * now it is just much easier to constrain the 322 + * user to always MAX_ORDER align their kernel space. 323 + */ 324 + 325 + panic("Kernel must be %d byte aligned, currently at %#lx.", 326 + 1 << (MAX_ORDER + PAGE_SHIFT), 327 + min_low_pfn << PAGE_SHIFT); 328 + } 329 + 330 + #ifdef CONFIG_HIGHMEM 331 + highstart_pfn = highend_pfn = max_pfn; 332 + high_memory = (void *) __va(PFN_PHYS(highstart_pfn)); 333 + #else 334 + high_memory = (void *)__va(PFN_PHYS(max_pfn)); 335 + #endif 336 + 337 + paging_init(heap_end); 338 + 339 + setup_txprivext(); 340 + 341 + /* Setup the boot cpu's mapping. The rest will be setup below. */ 342 + cpu_2_hwthread_id[smp_processor_id()] = hard_processor_id(); 343 + hwthread_id_2_cpu[hard_processor_id()] = smp_processor_id(); 344 + 345 + unflatten_device_tree(); 346 + 347 + #ifdef CONFIG_SMP 348 + smp_init_cpus(); 349 + #endif 350 + 351 + if (machine_desc->init_early) 352 + machine_desc->init_early(); 353 + } 354 + 355 + static int __init customize_machine(void) 356 + { 357 + /* customizes platform devices, or adds new ones */ 358 + if (machine_desc->init_machine) 359 + machine_desc->init_machine(); 360 + return 0; 361 + } 362 + arch_initcall(customize_machine); 363 + 364 + static int __init init_machine_late(void) 365 + { 366 + if (machine_desc->init_late) 367 + machine_desc->init_late(); 368 + return 0; 369 + } 370 + late_initcall(init_machine_late); 371 + 372 + #ifdef CONFIG_PROC_FS 373 + /* 374 + * Get CPU information for use by the procfs. 375 + */ 376 + static const char *get_cpu_capabilities(unsigned int txenable) 377 + { 378 + #ifdef CONFIG_METAG_META21 379 + /* See CORE_ID in META HTP.GP TRM - Architecture Overview 2.1.238 */ 380 + int coreid = metag_in32(METAC_CORE_ID); 381 + unsigned int dsp_type = (coreid >> 3) & 7; 382 + unsigned int fpu_type = (coreid >> 7) & 3; 383 + 384 + switch (dsp_type | fpu_type << 3) { 385 + case (0x00): return "EDSP"; 386 + case (0x01): return "DSP"; 387 + case (0x08): return "EDSP+LFPU"; 388 + case (0x09): return "DSP+LFPU"; 389 + case (0x10): return "EDSP+FPU"; 390 + case (0x11): return "DSP+FPU"; 391 + } 392 + return "UNKNOWN"; 393 + 394 + #else 395 + if (!(txenable & TXENABLE_CLASS_BITS)) 396 + return "DSP"; 397 + else 398 + return ""; 399 + #endif 400 + } 401 + 402 + static int show_cpuinfo(struct seq_file *m, void *v) 403 + { 404 + const char *cpu; 405 + unsigned int txenable, thread_id, major, minor; 406 + unsigned long clockfreq = get_coreclock(); 407 + #ifdef CONFIG_SMP 408 + int i; 409 + unsigned long lpj; 410 + #endif 411 + 412 + cpu = "META"; 413 + 414 + txenable = __core_reg_get(TXENABLE); 415 + major = (txenable & TXENABLE_MAJOR_REV_BITS) >> TXENABLE_MAJOR_REV_S; 416 + minor = (txenable & TXENABLE_MINOR_REV_BITS) >> TXENABLE_MINOR_REV_S; 417 + thread_id = (txenable >> 8) & 0x3; 418 + 419 + #ifdef CONFIG_SMP 420 + for_each_online_cpu(i) { 421 + lpj = per_cpu(cpu_data, i).loops_per_jiffy; 422 + txenable = core_reg_read(TXUCT_ID, TXENABLE_REGNUM, 423 + cpu_2_hwthread_id[i]); 424 + 425 + seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n" 426 + "Clocking:\t%lu.%1luMHz\n" 427 + "BogoMips:\t%lu.%02lu\n" 428 + "Calibration:\t%lu loops\n" 429 + "Capabilities:\t%s\n\n", 430 + cpu, major, minor, i, 431 + clockfreq / 1000000, (clockfreq / 100000) % 10, 432 + lpj / (500000 / HZ), (lpj / (5000 / HZ)) % 100, 433 + lpj, 434 + get_cpu_capabilities(txenable)); 435 + } 436 + #else 437 + seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n" 438 + "Clocking:\t%lu.%1luMHz\n" 439 + "BogoMips:\t%lu.%02lu\n" 440 + "Calibration:\t%lu loops\n" 441 + "Capabilities:\t%s\n", 442 + cpu, major, minor, thread_id, 443 + clockfreq / 1000000, (clockfreq / 100000) % 10, 444 + loops_per_jiffy / (500000 / HZ), 445 + (loops_per_jiffy / (5000 / HZ)) % 100, 446 + loops_per_jiffy, 447 + get_cpu_capabilities(txenable)); 448 + #endif /* CONFIG_SMP */ 449 + 450 + #ifdef CONFIG_METAG_L2C 451 + if (meta_l2c_is_present()) { 452 + seq_printf(m, "L2 cache:\t%s\n" 453 + "L2 cache size:\t%d KB\n", 454 + meta_l2c_is_enabled() ? "enabled" : "disabled", 455 + meta_l2c_size() >> 10); 456 + } 457 + #endif 458 + return 0; 459 + } 460 + 461 + static void *c_start(struct seq_file *m, loff_t *pos) 462 + { 463 + return (void *)(*pos == 0); 464 + } 465 + static void *c_next(struct seq_file *m, void *v, loff_t *pos) 466 + { 467 + return NULL; 468 + } 469 + static void c_stop(struct seq_file *m, void *v) 470 + { 471 + } 472 + const struct seq_operations cpuinfo_op = { 473 + .start = c_start, 474 + .next = c_next, 475 + .stop = c_stop, 476 + .show = show_cpuinfo, 477 + }; 478 + #endif /* CONFIG_PROC_FS */ 479 + 480 + void __init metag_start_kernel(char *args) 481 + { 482 + /* Zero the timer register so timestamps are from the point at 483 + * which the kernel started running. 484 + */ 485 + __core_reg_set(TXTIMER, 0); 486 + 487 + /* Clear the bss. */ 488 + memset(__bss_start, 0, 489 + (unsigned long)__bss_stop - (unsigned long)__bss_start); 490 + 491 + /* Remember where these are for use in setup_arch */ 492 + original_cmd_line = args; 493 + 494 + current_thread_info()->cpu = hard_processor_id(); 495 + 496 + start_kernel(); 497 + } 498 + 499 + /* 500 + * Setup TXPRIVEXT register to be prevent userland from touching our 501 + * precious registers. 502 + */ 503 + void setup_txprivext(void) 504 + { 505 + __core_reg_set(TXPRIVEXT, PRIV_BITS); 506 + } 507 + 508 + PTBI pTBI_get(unsigned int cpu) 509 + { 510 + return per_cpu(pTBI, cpu); 511 + } 512 + 513 + #if defined(CONFIG_METAG_DSP) && defined(CONFIG_METAG_FPU) 514 + char capabilites[] = "dsp fpu"; 515 + #elif defined(CONFIG_METAG_DSP) 516 + char capabilites[] = "dsp"; 517 + #elif defined(CONFIG_METAG_FPU) 518 + char capabilites[] = "fpu"; 519 + #else 520 + char capabilites[] = ""; 521 + #endif 522 + 523 + static struct ctl_table caps_kern_table[] = { 524 + { 525 + .procname = "capabilities", 526 + .data = capabilites, 527 + .maxlen = sizeof(capabilites), 528 + .mode = 0444, 529 + .proc_handler = proc_dostring, 530 + }, 531 + {} 532 + }; 533 + 534 + static struct ctl_table caps_root_table[] = { 535 + { 536 + .procname = "kernel", 537 + .mode = 0555, 538 + .child = caps_kern_table, 539 + }, 540 + {} 541 + }; 542 + 543 + static int __init capabilities_register_sysctl(void) 544 + { 545 + struct ctl_table_header *caps_table_header; 546 + 547 + caps_table_header = register_sysctl_table(caps_root_table); 548 + if (!caps_table_header) { 549 + pr_err("Unable to register CAPABILITIES sysctl\n"); 550 + return -ENOMEM; 551 + } 552 + 553 + return 0; 554 + } 555 + 556 + core_initcall(capabilities_register_sysctl);