at v2.6.19-rc2 52 lines 1.3 kB view raw
1/* $Id: cache.h,v 1.6 2004/03/11 18:08:05 lethal Exp $ 2 * 3 * include/asm-sh/cache.h 4 * 5 * Copyright 1999 (C) Niibe Yutaka 6 * Copyright 2002, 2003 (C) Paul Mundt 7 */ 8#ifndef __ASM_SH_CACHE_H 9#define __ASM_SH_CACHE_H 10#ifdef __KERNEL__ 11 12#include <asm/cpu/cache.h> 13 14#define SH_CACHE_VALID 1 15#define SH_CACHE_UPDATED 2 16#define SH_CACHE_COMBINED 4 17#define SH_CACHE_ASSOC 8 18 19#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 20#define SMP_CACHE_BYTES L1_CACHE_BYTES 21 22#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) 23 24struct cache_info { 25 unsigned int ways; /* Number of cache ways */ 26 unsigned int sets; /* Number of cache sets */ 27 unsigned int linesz; /* Cache line size (bytes) */ 28 29 unsigned int way_size; /* sets * line size */ 30 31 /* 32 * way_incr is the address offset for accessing the next way 33 * in memory mapped cache array ops. 34 */ 35 unsigned int way_incr; 36 unsigned int entry_shift; 37 unsigned int entry_mask; 38 39 /* 40 * Compute a mask which selects the address bits which overlap between 41 * 1. those used to select the cache set during indexing 42 * 2. those in the physical page number. 43 */ 44 unsigned int alias_mask; 45 46 unsigned int n_aliases; /* Number of aliases */ 47 48 unsigned long flags; 49}; 50 51#endif /* __KERNEL__ */ 52#endif /* __ASM_SH_CACHE_H */