Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arch/tile: catch up with section naming convention in 2.6.35

The convention changed to, e.g., ".data..page_aligned". This commit
fixes the places in the tile architecture that were still using the
old convention. One tile-specific section (.init.page) was dropped
in favor of just using an "aligned" attribute.

Sam Ravnborg <sam@ravnborg.org> pointed out __PAGE_ALIGNED_BSS, etc.

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>

+6 -10
+1 -1
arch/tile/include/asm/cache.h
··· 40 40 #define INTERNODE_CACHE_BYTES L2_CACHE_BYTES 41 41 42 42 /* Group together read-mostly things to avoid cache false sharing */ 43 - #define __read_mostly __attribute__((__section__(".data.read_mostly"))) 43 + #define __read_mostly __attribute__((__section__(".data..read_mostly"))) 44 44 45 45 /* 46 46 * Attribute for data that is kept read/write coherent until the end of
+2 -2
arch/tile/kernel/head_32.S
··· 133 133 } 134 134 ENDPROC(_start) 135 135 136 - .section ".bss.page_aligned","w" 136 + __PAGE_ALIGNED_BSS 137 137 .align PAGE_SIZE 138 138 ENTRY(empty_zero_page) 139 139 .fill PAGE_SIZE,1,0 ··· 148 148 .word (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN) 149 149 .endm 150 150 151 - .section ".data.page_aligned","wa" 151 + __PAGE_ALIGNED_DATA 152 152 .align PAGE_SIZE 153 153 ENTRY(swapper_pg_dir) 154 154 /*
+1 -4
arch/tile/kernel/vmlinux.lds.S
··· 59 59 60 60 . = ALIGN(PAGE_SIZE); 61 61 VMLINUX_SYMBOL(_sinitdata) = .; 62 - .init.page : AT (ADDR(.init.page) - LOAD_OFFSET) { 63 - *(.init.page) 64 - } :data =0 65 - INIT_DATA_SECTION(16) 62 + INIT_DATA_SECTION(16) :data =0 66 63 PERCPU(PAGE_SIZE) 67 64 . = ALIGN(PAGE_SIZE); 68 65 VMLINUX_SYMBOL(_einitdata) = .;
+1 -2
arch/tile/lib/atomic_32.c
··· 46 46 #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ 47 47 48 48 /* This page is remapped on startup to be hash-for-home. */ 49 - int atomic_locks[PAGE_SIZE / sizeof(int) /* Only ATOMIC_HASH_SIZE is used */] 50 - __attribute__((aligned(PAGE_SIZE), section(".bss.page_aligned"))); 49 + int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss; 51 50 52 51 #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ 53 52
+1 -1
arch/tile/mm/init.c
··· 445 445 446 446 /* Temporary page table we use for staging. */ 447 447 static pgd_t pgtables[PTRS_PER_PGD] 448 - __attribute__((section(".init.page"))); 448 + __attribute__((aligned(HV_PAGE_TABLE_ALIGN))); 449 449 450 450 /* 451 451 * This maps the physical memory to kernel virtual address space, a total