Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: Octeon: Update L2 Cache code for CN63XX

The CN63XX has a different L2 cache architecture. Update the helper
functions to reflect this.

Some joining of split lines was also done to improve readability, as
well as reformatting of comments.

Signed-off-by: David Daney <ddaney@caviumnetworks.com>
Patchwork: http://patchwork.linux-mips.org/patch/1663/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

authored by

David Daney and committed by
Ralf Baechle
b8db85b5 a70b13a9

+628 -416
+487 -321
arch/mips/cavium-octeon/executive/cvmx-l2c.c
··· 4 4 * Contact: support@caviumnetworks.com 5 5 * This file is part of the OCTEON SDK 6 6 * 7 - * Copyright (c) 2003-2008 Cavium Networks 7 + * Copyright (c) 2003-2010 Cavium Networks 8 8 * 9 9 * This file is free software; you can redistribute it and/or modify 10 10 * it under the terms of the GNU General Public License, Version 2, as ··· 26 26 ***********************license end**************************************/ 27 27 28 28 /* 29 - * Implementation of the Level 2 Cache (L2C) control, measurement, and 30 - * debugging facilities. 29 + * Implementation of the Level 2 Cache (L2C) control, 30 + * measurement, and debugging facilities. 31 31 */ 32 32 33 33 #include <asm/octeon/cvmx.h> ··· 42 42 * if multiple applications or operating systems are running, then it 43 43 * is up to the user program to coordinate between them. 44 44 */ 45 - static cvmx_spinlock_t cvmx_l2c_spinlock; 46 - 47 - static inline int l2_size_half(void) 48 - { 49 - uint64_t val = cvmx_read_csr(CVMX_L2D_FUS3); 50 - return !!(val & (1ull << 34)); 51 - } 45 + cvmx_spinlock_t cvmx_l2c_spinlock; 52 46 53 47 int cvmx_l2c_get_core_way_partition(uint32_t core) 54 48 { ··· 51 57 /* Validate the core number */ 52 58 if (core >= cvmx_octeon_num_cores()) 53 59 return -1; 60 + 61 + if (OCTEON_IS_MODEL(OCTEON_CN63XX)) 62 + return cvmx_read_csr(CVMX_L2C_WPAR_PPX(core)) & 0xffff; 54 63 55 64 /* 56 65 * Use the lower two bits of the coreNumber to determine the ··· 68 71 69 72 switch (core & 0xC) { 70 73 case 0x0: 71 - return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >> 72 - field; 74 + return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >> field; 73 75 case 0x4: 74 - return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >> 75 - field; 76 + return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >> field; 76 77 case 0x8: 77 - return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >> 78 - field; 78 + return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >> field; 79 79 case 0xC: 80 - return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >> 81 - field; 80 + return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >> field; 82 81 } 83 82 return 0; 84 83 } ··· 88 95 89 96 mask &= valid_mask; 90 97 91 - /* A UMSK setting which blocks all L2C Ways is an error. */ 92 - if (mask == valid_mask) 98 + /* A UMSK setting which blocks all L2C Ways is an error on some chips */ 99 + if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX)) 93 100 return -1; 94 101 95 102 /* Validate the core number */ 96 103 if (core >= cvmx_octeon_num_cores()) 97 104 return -1; 98 105 99 - /* Check to make sure current mask & new mask don't block all ways */ 100 - if (((mask | cvmx_l2c_get_core_way_partition(core)) & valid_mask) == 101 - valid_mask) 102 - return -1; 106 + if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { 107 + cvmx_write_csr(CVMX_L2C_WPAR_PPX(core), mask); 108 + return 0; 109 + } 103 110 104 - /* Use the lower two bits of core to determine the bit offset of the 111 + /* 112 + * Use the lower two bits of core to determine the bit offset of the 105 113 * UMSK[] field in the L2C_SPAR register. 106 114 */ 107 115 field = (core & 0x3) * 8; 108 116 109 - /* Assign the new mask setting to the UMSK[] field in the appropriate 117 + /* 118 + * Assign the new mask setting to the UMSK[] field in the appropriate 110 119 * L2C_SPAR register based on the core_num. 111 120 * 112 121 */ 113 122 switch (core & 0xC) { 114 123 case 0x0: 115 124 cvmx_write_csr(CVMX_L2C_SPAR0, 116 - (cvmx_read_csr(CVMX_L2C_SPAR0) & 117 - ~(0xFF << field)) | mask << field); 125 + (cvmx_read_csr(CVMX_L2C_SPAR0) & ~(0xFF << field)) | 126 + mask << field); 118 127 break; 119 128 case 0x4: 120 129 cvmx_write_csr(CVMX_L2C_SPAR1, 121 - (cvmx_read_csr(CVMX_L2C_SPAR1) & 122 - ~(0xFF << field)) | mask << field); 130 + (cvmx_read_csr(CVMX_L2C_SPAR1) & ~(0xFF << field)) | 131 + mask << field); 123 132 break; 124 133 case 0x8: 125 134 cvmx_write_csr(CVMX_L2C_SPAR2, 126 - (cvmx_read_csr(CVMX_L2C_SPAR2) & 127 - ~(0xFF << field)) | mask << field); 135 + (cvmx_read_csr(CVMX_L2C_SPAR2) & ~(0xFF << field)) | 136 + mask << field); 128 137 break; 129 138 case 0xC: 130 139 cvmx_write_csr(CVMX_L2C_SPAR3, 131 - (cvmx_read_csr(CVMX_L2C_SPAR3) & 132 - ~(0xFF << field)) | mask << field); 140 + (cvmx_read_csr(CVMX_L2C_SPAR3) & ~(0xFF << field)) | 141 + mask << field); 133 142 break; 134 143 } 135 144 return 0; ··· 141 146 { 142 147 uint32_t valid_mask; 143 148 144 - valid_mask = 0xff; 145 - 146 - if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN38XX)) { 147 - if (l2_size_half()) 148 - valid_mask = 0xf; 149 - } else if (l2_size_half()) 150 - valid_mask = 0x3; 151 - 149 + valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1; 152 150 mask &= valid_mask; 153 151 154 - /* A UMSK setting which blocks all L2C Ways is an error. */ 155 - if (mask == valid_mask) 156 - return -1; 157 - /* Check to make sure current mask & new mask don't block all ways */ 158 - if (((mask | cvmx_l2c_get_hw_way_partition()) & valid_mask) == 159 - valid_mask) 152 + /* A UMSK setting which blocks all L2C Ways is an error on some chips */ 153 + if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX)) 160 154 return -1; 161 155 162 - cvmx_write_csr(CVMX_L2C_SPAR4, 163 - (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask); 156 + if (OCTEON_IS_MODEL(OCTEON_CN63XX)) 157 + cvmx_write_csr(CVMX_L2C_WPAR_IOBX(0), mask); 158 + else 159 + cvmx_write_csr(CVMX_L2C_SPAR4, 160 + (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask); 164 161 return 0; 165 162 } 166 163 167 164 int cvmx_l2c_get_hw_way_partition(void) 168 165 { 169 - return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF); 166 + if (OCTEON_IS_MODEL(OCTEON_CN63XX)) 167 + return cvmx_read_csr(CVMX_L2C_WPAR_IOBX(0)) & 0xffff; 168 + else 169 + return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF); 170 170 } 171 171 172 172 void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event, 173 173 uint32_t clear_on_read) 174 174 { 175 - union cvmx_l2c_pfctl pfctl; 175 + if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) { 176 + union cvmx_l2c_pfctl pfctl; 176 177 177 - pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL); 178 + pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL); 178 179 179 - switch (counter) { 180 - case 0: 181 - pfctl.s.cnt0sel = event; 182 - pfctl.s.cnt0ena = 1; 183 - if (!cvmx_octeon_is_pass1()) 180 + switch (counter) { 181 + case 0: 182 + pfctl.s.cnt0sel = event; 183 + pfctl.s.cnt0ena = 1; 184 184 pfctl.s.cnt0rdclr = clear_on_read; 185 - break; 186 - case 1: 187 - pfctl.s.cnt1sel = event; 188 - pfctl.s.cnt1ena = 1; 189 - if (!cvmx_octeon_is_pass1()) 185 + break; 186 + case 1: 187 + pfctl.s.cnt1sel = event; 188 + pfctl.s.cnt1ena = 1; 190 189 pfctl.s.cnt1rdclr = clear_on_read; 191 - break; 192 - case 2: 193 - pfctl.s.cnt2sel = event; 194 - pfctl.s.cnt2ena = 1; 195 - if (!cvmx_octeon_is_pass1()) 190 + break; 191 + case 2: 192 + pfctl.s.cnt2sel = event; 193 + pfctl.s.cnt2ena = 1; 196 194 pfctl.s.cnt2rdclr = clear_on_read; 197 - break; 198 - case 3: 199 - default: 200 - pfctl.s.cnt3sel = event; 201 - pfctl.s.cnt3ena = 1; 202 - if (!cvmx_octeon_is_pass1()) 195 + break; 196 + case 3: 197 + default: 198 + pfctl.s.cnt3sel = event; 199 + pfctl.s.cnt3ena = 1; 203 200 pfctl.s.cnt3rdclr = clear_on_read; 204 - break; 205 - } 201 + break; 202 + } 206 203 207 - cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64); 204 + cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64); 205 + } else { 206 + union cvmx_l2c_tadx_prf l2c_tadx_prf; 207 + int tad; 208 + 209 + cvmx_dprintf("L2C performance counter events are different for this chip, mapping 'event' to cvmx_l2c_tad_event_t\n"); 210 + if (clear_on_read) 211 + cvmx_dprintf("L2C counters don't support clear on read for this chip\n"); 212 + 213 + l2c_tadx_prf.u64 = cvmx_read_csr(CVMX_L2C_TADX_PRF(0)); 214 + 215 + switch (counter) { 216 + case 0: 217 + l2c_tadx_prf.s.cnt0sel = event; 218 + break; 219 + case 1: 220 + l2c_tadx_prf.s.cnt1sel = event; 221 + break; 222 + case 2: 223 + l2c_tadx_prf.s.cnt2sel = event; 224 + break; 225 + default: 226 + case 3: 227 + l2c_tadx_prf.s.cnt3sel = event; 228 + break; 229 + } 230 + for (tad = 0; tad < CVMX_L2C_TADS; tad++) 231 + cvmx_write_csr(CVMX_L2C_TADX_PRF(tad), 232 + l2c_tadx_prf.u64); 233 + } 208 234 } 209 235 210 236 uint64_t cvmx_l2c_read_perf(uint32_t counter) 211 237 { 212 238 switch (counter) { 213 239 case 0: 214 - return cvmx_read_csr(CVMX_L2C_PFC0); 240 + if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) 241 + return cvmx_read_csr(CVMX_L2C_PFC0); 242 + else { 243 + uint64_t counter = 0; 244 + int tad; 245 + for (tad = 0; tad < CVMX_L2C_TADS; tad++) 246 + counter += cvmx_read_csr(CVMX_L2C_TADX_PFC0(tad)); 247 + return counter; 248 + } 215 249 case 1: 216 - return cvmx_read_csr(CVMX_L2C_PFC1); 250 + if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) 251 + return cvmx_read_csr(CVMX_L2C_PFC1); 252 + else { 253 + uint64_t counter = 0; 254 + int tad; 255 + for (tad = 0; tad < CVMX_L2C_TADS; tad++) 256 + counter += cvmx_read_csr(CVMX_L2C_TADX_PFC1(tad)); 257 + return counter; 258 + } 217 259 case 2: 218 - return cvmx_read_csr(CVMX_L2C_PFC2); 260 + if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) 261 + return cvmx_read_csr(CVMX_L2C_PFC2); 262 + else { 263 + uint64_t counter = 0; 264 + int tad; 265 + for (tad = 0; tad < CVMX_L2C_TADS; tad++) 266 + counter += cvmx_read_csr(CVMX_L2C_TADX_PFC2(tad)); 267 + return counter; 268 + } 219 269 case 3: 220 270 default: 221 - return cvmx_read_csr(CVMX_L2C_PFC3); 271 + if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) 272 + return cvmx_read_csr(CVMX_L2C_PFC3); 273 + else { 274 + uint64_t counter = 0; 275 + int tad; 276 + for (tad = 0; tad < CVMX_L2C_TADS; tad++) 277 + counter += cvmx_read_csr(CVMX_L2C_TADX_PFC3(tad)); 278 + return counter; 279 + } 222 280 } 223 281 } 224 282 ··· 288 240 volatile char dummy; 289 241 /* 290 242 * Adjust addr and length so we get all cache lines even for 291 - * small ranges spanning two cache lines 243 + * small ranges spanning two cache lines. 292 244 */ 293 245 len += addr & CVMX_CACHE_LINE_MASK; 294 246 addr &= ~CVMX_CACHE_LINE_MASK; ··· 307 259 308 260 int cvmx_l2c_lock_line(uint64_t addr) 309 261 { 310 - int retval = 0; 311 - union cvmx_l2c_dbg l2cdbg; 312 - union cvmx_l2c_lckbase lckbase; 313 - union cvmx_l2c_lckoff lckoff; 314 - union cvmx_l2t_err l2t_err; 315 - l2cdbg.u64 = 0; 316 - lckbase.u64 = 0; 317 - lckoff.u64 = 0; 262 + if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { 263 + int shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT; 264 + uint64_t assoc = cvmx_l2c_get_num_assoc(); 265 + uint64_t tag = addr >> shift; 266 + uint64_t index = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, cvmx_l2c_address_to_index(addr) << CVMX_L2C_IDX_ADDR_SHIFT); 267 + uint64_t way; 268 + union cvmx_l2c_tadx_tag l2c_tadx_tag; 318 269 319 - cvmx_spinlock_lock(&cvmx_l2c_spinlock); 270 + CVMX_CACHE_LCKL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, addr), 0); 320 271 321 - /* Clear l2t error bits if set */ 322 - l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR); 323 - l2t_err.s.lckerr = 1; 324 - l2t_err.s.lckerr2 = 1; 325 - cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64); 272 + /* Make sure we were able to lock the line */ 273 + for (way = 0; way < assoc; way++) { 274 + CVMX_CACHE_LTGL2I(index | (way << shift), 0); 275 + /* make sure CVMX_L2C_TADX_TAG is updated */ 276 + CVMX_SYNC; 277 + l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0)); 278 + if (l2c_tadx_tag.s.valid && l2c_tadx_tag.s.tag == tag) 279 + break; 280 + } 326 281 327 - addr &= ~CVMX_CACHE_LINE_MASK; 282 + /* Check if a valid line is found */ 283 + if (way >= assoc) { 284 + /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: line not found for locking at 0x%llx address\n", (unsigned long long)addr); */ 285 + return -1; 286 + } 328 287 329 - /* Set this core as debug core */ 330 - l2cdbg.s.ppnum = cvmx_get_core_num(); 331 - CVMX_SYNC; 332 - cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64); 333 - cvmx_read_csr(CVMX_L2C_DBG); 334 - 335 - lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */ 336 - cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64); 337 - cvmx_read_csr(CVMX_L2C_LCKOFF); 338 - 339 - if (((union cvmx_l2c_cfg) (cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) { 340 - int alias_shift = 341 - CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1; 342 - uint64_t addr_tmp = 343 - addr ^ (addr & ((1 << alias_shift) - 1)) >> 344 - CVMX_L2_SET_BITS; 345 - lckbase.s.lck_base = addr_tmp >> 7; 288 + /* Check if lock bit is not set */ 289 + if (!l2c_tadx_tag.s.lock) { 290 + /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: Not able to lock at 0x%llx address\n", (unsigned long long)addr); */ 291 + return -1; 292 + } 293 + return way; 346 294 } else { 347 - lckbase.s.lck_base = addr >> 7; 295 + int retval = 0; 296 + union cvmx_l2c_dbg l2cdbg; 297 + union cvmx_l2c_lckbase lckbase; 298 + union cvmx_l2c_lckoff lckoff; 299 + union cvmx_l2t_err l2t_err; 300 + 301 + cvmx_spinlock_lock(&cvmx_l2c_spinlock); 302 + 303 + l2cdbg.u64 = 0; 304 + lckbase.u64 = 0; 305 + lckoff.u64 = 0; 306 + 307 + /* Clear l2t error bits if set */ 308 + l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR); 309 + l2t_err.s.lckerr = 1; 310 + l2t_err.s.lckerr2 = 1; 311 + cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64); 312 + 313 + addr &= ~CVMX_CACHE_LINE_MASK; 314 + 315 + /* Set this core as debug core */ 316 + l2cdbg.s.ppnum = cvmx_get_core_num(); 317 + CVMX_SYNC; 318 + cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64); 319 + cvmx_read_csr(CVMX_L2C_DBG); 320 + 321 + lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */ 322 + cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64); 323 + cvmx_read_csr(CVMX_L2C_LCKOFF); 324 + 325 + if (((union cvmx_l2c_cfg)(cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) { 326 + int alias_shift = CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1; 327 + uint64_t addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> CVMX_L2_SET_BITS; 328 + lckbase.s.lck_base = addr_tmp >> 7; 329 + } else { 330 + lckbase.s.lck_base = addr >> 7; 331 + } 332 + 333 + lckbase.s.lck_ena = 1; 334 + cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64); 335 + /* Make sure it gets there */ 336 + cvmx_read_csr(CVMX_L2C_LCKBASE); 337 + 338 + fault_in(addr, CVMX_CACHE_LINE_SIZE); 339 + 340 + lckbase.s.lck_ena = 0; 341 + cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64); 342 + /* Make sure it gets there */ 343 + cvmx_read_csr(CVMX_L2C_LCKBASE); 344 + 345 + /* Stop being debug core */ 346 + cvmx_write_csr(CVMX_L2C_DBG, 0); 347 + cvmx_read_csr(CVMX_L2C_DBG); 348 + 349 + l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR); 350 + if (l2t_err.s.lckerr || l2t_err.s.lckerr2) 351 + retval = 1; /* We were unable to lock the line */ 352 + 353 + cvmx_spinlock_unlock(&cvmx_l2c_spinlock); 354 + return retval; 348 355 } 349 - 350 - lckbase.s.lck_ena = 1; 351 - cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64); 352 - cvmx_read_csr(CVMX_L2C_LCKBASE); /* Make sure it gets there */ 353 - 354 - fault_in(addr, CVMX_CACHE_LINE_SIZE); 355 - 356 - lckbase.s.lck_ena = 0; 357 - cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64); 358 - cvmx_read_csr(CVMX_L2C_LCKBASE); /* Make sure it gets there */ 359 - 360 - /* Stop being debug core */ 361 - cvmx_write_csr(CVMX_L2C_DBG, 0); 362 - cvmx_read_csr(CVMX_L2C_DBG); 363 - 364 - l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR); 365 - if (l2t_err.s.lckerr || l2t_err.s.lckerr2) 366 - retval = 1; /* We were unable to lock the line */ 367 - 368 - cvmx_spinlock_unlock(&cvmx_l2c_spinlock); 369 - 370 - return retval; 371 356 } 372 357 373 358 int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len) ··· 417 336 start += CVMX_CACHE_LINE_SIZE; 418 337 len -= CVMX_CACHE_LINE_SIZE; 419 338 } 420 - 421 339 return retval; 422 340 } 423 341 ··· 424 344 { 425 345 uint64_t assoc, set; 426 346 uint64_t n_assoc, n_set; 427 - union cvmx_l2c_dbg l2cdbg; 428 347 429 - cvmx_spinlock_lock(&cvmx_l2c_spinlock); 348 + n_set = cvmx_l2c_get_num_sets(); 349 + n_assoc = cvmx_l2c_get_num_assoc(); 430 350 431 - l2cdbg.u64 = 0; 432 - if (!OCTEON_IS_MODEL(OCTEON_CN30XX)) 433 - l2cdbg.s.ppnum = cvmx_get_core_num(); 434 - l2cdbg.s.finv = 1; 435 - n_set = CVMX_L2_SETS; 436 - n_assoc = l2_size_half() ? (CVMX_L2_ASSOC / 2) : CVMX_L2_ASSOC; 437 - for (set = 0; set < n_set; set++) { 438 - for (assoc = 0; assoc < n_assoc; assoc++) { 439 - l2cdbg.s.set = assoc; 440 - /* Enter debug mode, and make sure all other 441 - ** writes complete before we enter debug 442 - ** mode */ 443 - CVMX_SYNCW; 444 - cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64); 445 - cvmx_read_csr(CVMX_L2C_DBG); 446 - 447 - CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG 448 - (CVMX_MIPS_SPACE_XKPHYS, 449 - set * CVMX_CACHE_LINE_SIZE), 0); 450 - CVMX_SYNCW; /* Push STF out to L2 */ 451 - /* Exit debug mode */ 452 - CVMX_SYNC; 453 - cvmx_write_csr(CVMX_L2C_DBG, 0); 454 - cvmx_read_csr(CVMX_L2C_DBG); 351 + if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 352 + uint64_t address; 353 + /* These may look like constants, but they aren't... */ 354 + int assoc_shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT; 355 + int set_shift = CVMX_L2C_IDX_ADDR_SHIFT; 356 + for (set = 0; set < n_set; set++) { 357 + for (assoc = 0; assoc < n_assoc; assoc++) { 358 + address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 359 + (assoc << assoc_shift) | (set << set_shift)); 360 + CVMX_CACHE_WBIL2I(address, 0); 361 + } 455 362 } 363 + } else { 364 + for (set = 0; set < n_set; set++) 365 + for (assoc = 0; assoc < n_assoc; assoc++) 366 + cvmx_l2c_flush_line(assoc, set); 456 367 } 457 - 458 - cvmx_spinlock_unlock(&cvmx_l2c_spinlock); 459 368 } 369 + 460 370 461 371 int cvmx_l2c_unlock_line(uint64_t address) 462 372 { 463 - int assoc; 464 - union cvmx_l2c_tag tag; 465 - union cvmx_l2c_dbg l2cdbg; 466 - uint32_t tag_addr; 467 373 468 - uint32_t index = cvmx_l2c_address_to_index(address); 374 + if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { 375 + int assoc; 376 + union cvmx_l2c_tag tag; 377 + uint32_t tag_addr; 378 + uint32_t index = cvmx_l2c_address_to_index(address); 469 379 470 - cvmx_spinlock_lock(&cvmx_l2c_spinlock); 471 - /* Compute portion of address that is stored in tag */ 472 - tag_addr = 473 - ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & 474 - ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1)); 475 - for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) { 476 - tag = cvmx_get_l2c_tag(assoc, index); 380 + tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1)); 477 381 478 - if (tag.s.V && (tag.s.addr == tag_addr)) { 479 - l2cdbg.u64 = 0; 480 - l2cdbg.s.ppnum = cvmx_get_core_num(); 481 - l2cdbg.s.set = assoc; 482 - l2cdbg.s.finv = 1; 382 + /* 383 + * For 63XX, we can flush a line by using the physical 384 + * address directly, so finding the cache line used by 385 + * the address is only required to provide the proper 386 + * return value for the function. 387 + */ 388 + for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) { 389 + tag = cvmx_l2c_get_tag(assoc, index); 483 390 484 - CVMX_SYNC; 485 - /* Enter debug mode */ 486 - cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64); 487 - cvmx_read_csr(CVMX_L2C_DBG); 391 + if (tag.s.V && (tag.s.addr == tag_addr)) { 392 + CVMX_CACHE_WBIL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, address), 0); 393 + return tag.s.L; 394 + } 395 + } 396 + } else { 397 + int assoc; 398 + union cvmx_l2c_tag tag; 399 + uint32_t tag_addr; 488 400 489 - CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG 490 - (CVMX_MIPS_SPACE_XKPHYS, 491 - address), 0); 492 - CVMX_SYNC; 493 - /* Exit debug mode */ 494 - cvmx_write_csr(CVMX_L2C_DBG, 0); 495 - cvmx_read_csr(CVMX_L2C_DBG); 496 - cvmx_spinlock_unlock(&cvmx_l2c_spinlock); 497 - return tag.s.L; 401 + uint32_t index = cvmx_l2c_address_to_index(address); 402 + 403 + /* Compute portion of address that is stored in tag */ 404 + tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1)); 405 + for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) { 406 + tag = cvmx_l2c_get_tag(assoc, index); 407 + 408 + if (tag.s.V && (tag.s.addr == tag_addr)) { 409 + cvmx_l2c_flush_line(assoc, index); 410 + return tag.s.L; 411 + } 498 412 } 499 413 } 500 - cvmx_spinlock_unlock(&cvmx_l2c_spinlock); 501 414 return 0; 502 415 } 503 416 ··· 518 445 uint64_t u64; 519 446 struct cvmx_l2c_tag_cn50xx { 520 447 uint64_t reserved:40; 521 - uint64_t V:1; /* Line valid */ 522 - uint64_t D:1; /* Line dirty */ 523 - uint64_t L:1; /* Line locked */ 524 - uint64_t U:1; /* Use, LRU eviction */ 448 + uint64_t V:1; /* Line valid */ 449 + uint64_t D:1; /* Line dirty */ 450 + uint64_t L:1; /* Line locked */ 451 + uint64_t U:1; /* Use, LRU eviction */ 525 452 uint64_t addr:20; /* Phys mem addr (33..14) */ 526 453 } cn50xx; 527 454 struct cvmx_l2c_tag_cn30xx { 528 455 uint64_t reserved:41; 529 - uint64_t V:1; /* Line valid */ 530 - uint64_t D:1; /* Line dirty */ 531 - uint64_t L:1; /* Line locked */ 532 - uint64_t U:1; /* Use, LRU eviction */ 456 + uint64_t V:1; /* Line valid */ 457 + uint64_t D:1; /* Line dirty */ 458 + uint64_t L:1; /* Line locked */ 459 + uint64_t U:1; /* Use, LRU eviction */ 533 460 uint64_t addr:19; /* Phys mem addr (33..15) */ 534 461 } cn30xx; 535 462 struct cvmx_l2c_tag_cn31xx { 536 463 uint64_t reserved:42; 537 - uint64_t V:1; /* Line valid */ 538 - uint64_t D:1; /* Line dirty */ 539 - uint64_t L:1; /* Line locked */ 540 - uint64_t U:1; /* Use, LRU eviction */ 464 + uint64_t V:1; /* Line valid */ 465 + uint64_t D:1; /* Line dirty */ 466 + uint64_t L:1; /* Line locked */ 467 + uint64_t U:1; /* Use, LRU eviction */ 541 468 uint64_t addr:18; /* Phys mem addr (33..16) */ 542 469 } cn31xx; 543 470 struct cvmx_l2c_tag_cn38xx { 544 471 uint64_t reserved:43; 545 - uint64_t V:1; /* Line valid */ 546 - uint64_t D:1; /* Line dirty */ 547 - uint64_t L:1; /* Line locked */ 548 - uint64_t U:1; /* Use, LRU eviction */ 472 + uint64_t V:1; /* Line valid */ 473 + uint64_t D:1; /* Line dirty */ 474 + uint64_t L:1; /* Line locked */ 475 + uint64_t U:1; /* Use, LRU eviction */ 549 476 uint64_t addr:17; /* Phys mem addr (33..17) */ 550 477 } cn38xx; 551 478 struct cvmx_l2c_tag_cn58xx { 552 479 uint64_t reserved:44; 553 - uint64_t V:1; /* Line valid */ 554 - uint64_t D:1; /* Line dirty */ 555 - uint64_t L:1; /* Line locked */ 556 - uint64_t U:1; /* Use, LRU eviction */ 480 + uint64_t V:1; /* Line valid */ 481 + uint64_t D:1; /* Line dirty */ 482 + uint64_t L:1; /* Line locked */ 483 + uint64_t U:1; /* Use, LRU eviction */ 557 484 uint64_t addr:16; /* Phys mem addr (33..18) */ 558 485 } cn58xx; 559 486 struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */ 560 487 struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */ 561 488 }; 489 + 562 490 563 491 /** 564 492 * @INTERNAL ··· 577 503 static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index) 578 504 { 579 505 580 - uint64_t debug_tag_addr = (((1ULL << 63) | (index << 7)) + 96); 506 + uint64_t debug_tag_addr = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, (index << 7) + 96); 581 507 uint64_t core = cvmx_get_core_num(); 582 508 union __cvmx_l2c_tag tag_val; 583 509 uint64_t dbg_addr = CVMX_L2C_DBG; ··· 586 512 union cvmx_l2c_dbg debug_val; 587 513 debug_val.u64 = 0; 588 514 /* 589 - * For low core count parts, the core number is always small enough 590 - * to stay in the correct field and not set any reserved bits. 515 + * For low core count parts, the core number is always small 516 + * enough to stay in the correct field and not set any 517 + * reserved bits. 591 518 */ 592 519 debug_val.s.ppnum = core; 593 520 debug_val.s.l2t = 1; 594 521 debug_val.s.set = assoc; 522 + 523 + local_irq_save(flags); 595 524 /* 596 525 * Make sure core is quiet (no prefetches, etc.) before 597 526 * entering debug mode. ··· 603 526 /* Flush L1 to make sure debug load misses L1 */ 604 527 CVMX_DCACHE_INVALIDATE; 605 528 606 - local_irq_save(flags); 607 - 608 529 /* 609 530 * The following must be done in assembly as when in debug 610 531 * mode all data loads from L2 return special debug data, not 611 - * normal memory contents. Also, interrupts must be 612 - * disabled, since if an interrupt occurs while in debug mode 613 - * the ISR will get debug data from all its memory reads 614 - * instead of the contents of memory 532 + * normal memory contents. Also, interrupts must be disabled, 533 + * since if an interrupt occurs while in debug mode the ISR 534 + * will get debug data from all its memory * reads instead of 535 + * the contents of memory. 615 536 */ 616 537 617 - asm volatile (".set push \n" 618 - " .set mips64 \n" 619 - " .set noreorder \n" 620 - /* Enter debug mode, wait for store */ 621 - " sd %[dbg_val], 0(%[dbg_addr]) \n" 622 - " ld $0, 0(%[dbg_addr]) \n" 623 - /* Read L2C tag data */ 624 - " ld %[tag_val], 0(%[tag_addr]) \n" 625 - /* Exit debug mode, wait for store */ 626 - " sd $0, 0(%[dbg_addr]) \n" 627 - " ld $0, 0(%[dbg_addr]) \n" 628 - /* Invalidate dcache to discard debug data */ 629 - " cache 9, 0($0) \n" 630 - " .set pop" : 631 - [tag_val] "=r"(tag_val.u64) : [dbg_addr] "r"(dbg_addr), 632 - [dbg_val] "r"(debug_val.u64), 633 - [tag_addr] "r"(debug_tag_addr) : "memory"); 538 + asm volatile ( 539 + ".set push\n\t" 540 + ".set mips64\n\t" 541 + ".set noreorder\n\t" 542 + "sd %[dbg_val], 0(%[dbg_addr])\n\t" /* Enter debug mode, wait for store */ 543 + "ld $0, 0(%[dbg_addr])\n\t" 544 + "ld %[tag_val], 0(%[tag_addr])\n\t" /* Read L2C tag data */ 545 + "sd $0, 0(%[dbg_addr])\n\t" /* Exit debug mode, wait for store */ 546 + "ld $0, 0(%[dbg_addr])\n\t" 547 + "cache 9, 0($0)\n\t" /* Invalidate dcache to discard debug data */ 548 + ".set pop" 549 + : [tag_val] "=r" (tag_val) 550 + : [dbg_addr] "r" (dbg_addr), [dbg_val] "r" (debug_val), [tag_addr] "r" (debug_tag_addr) 551 + : "memory"); 634 552 635 553 local_irq_restore(flags); 636 - return tag_val; 637 554 555 + return tag_val; 638 556 } 557 + 639 558 640 559 union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index) 641 560 { 642 - union __cvmx_l2c_tag tmp_tag; 643 561 union cvmx_l2c_tag tag; 644 562 tag.u64 = 0; 645 563 646 564 if ((int)association >= cvmx_l2c_get_num_assoc()) { 647 - cvmx_dprintf 648 - ("ERROR: cvmx_get_l2c_tag association out of range\n"); 565 + cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n"); 649 566 return tag; 650 567 } 651 568 if ((int)index >= cvmx_l2c_get_num_sets()) { 652 - cvmx_dprintf("ERROR: cvmx_get_l2c_tag " 653 - "index out of range (arg: %d, max: %d\n", 654 - index, cvmx_l2c_get_num_sets()); 569 + cvmx_dprintf("ERROR: cvmx_l2c_get_tag index out of range (arg: %d, max: %d)\n", 570 + (int)index, cvmx_l2c_get_num_sets()); 655 571 return tag; 656 572 } 657 - /* __read_l2_tag is intended for internal use only */ 658 - tmp_tag = __read_l2_tag(association, index); 573 + if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { 574 + union cvmx_l2c_tadx_tag l2c_tadx_tag; 575 + uint64_t address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 576 + (association << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) | 577 + (index << CVMX_L2C_IDX_ADDR_SHIFT)); 578 + /* 579 + * Use L2 cache Index load tag cache instruction, as 580 + * hardware loads the virtual tag for the L2 cache 581 + * block with the contents of L2C_TAD0_TAG 582 + * register. 583 + */ 584 + CVMX_CACHE_LTGL2I(address, 0); 585 + CVMX_SYNC; /* make sure CVMX_L2C_TADX_TAG is updated */ 586 + l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0)); 659 587 660 - /* 661 - * Convert all tag structure types to generic version, as it 662 - * can represent all models. 663 - */ 664 - if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) { 665 - tag.s.V = tmp_tag.cn58xx.V; 666 - tag.s.D = tmp_tag.cn58xx.D; 667 - tag.s.L = tmp_tag.cn58xx.L; 668 - tag.s.U = tmp_tag.cn58xx.U; 669 - tag.s.addr = tmp_tag.cn58xx.addr; 670 - } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) { 671 - tag.s.V = tmp_tag.cn38xx.V; 672 - tag.s.D = tmp_tag.cn38xx.D; 673 - tag.s.L = tmp_tag.cn38xx.L; 674 - tag.s.U = tmp_tag.cn38xx.U; 675 - tag.s.addr = tmp_tag.cn38xx.addr; 676 - } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) 677 - || OCTEON_IS_MODEL(OCTEON_CN52XX)) { 678 - tag.s.V = tmp_tag.cn31xx.V; 679 - tag.s.D = tmp_tag.cn31xx.D; 680 - tag.s.L = tmp_tag.cn31xx.L; 681 - tag.s.U = tmp_tag.cn31xx.U; 682 - tag.s.addr = tmp_tag.cn31xx.addr; 683 - } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) { 684 - tag.s.V = tmp_tag.cn30xx.V; 685 - tag.s.D = tmp_tag.cn30xx.D; 686 - tag.s.L = tmp_tag.cn30xx.L; 687 - tag.s.U = tmp_tag.cn30xx.U; 688 - tag.s.addr = tmp_tag.cn30xx.addr; 689 - } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) { 690 - tag.s.V = tmp_tag.cn50xx.V; 691 - tag.s.D = tmp_tag.cn50xx.D; 692 - tag.s.L = tmp_tag.cn50xx.L; 693 - tag.s.U = tmp_tag.cn50xx.U; 694 - tag.s.addr = tmp_tag.cn50xx.addr; 588 + tag.s.V = l2c_tadx_tag.s.valid; 589 + tag.s.D = l2c_tadx_tag.s.dirty; 590 + tag.s.L = l2c_tadx_tag.s.lock; 591 + tag.s.U = l2c_tadx_tag.s.use; 592 + tag.s.addr = l2c_tadx_tag.s.tag; 695 593 } else { 696 - cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__); 697 - } 594 + union __cvmx_l2c_tag tmp_tag; 595 + /* __read_l2_tag is intended for internal use only */ 596 + tmp_tag = __read_l2_tag(association, index); 698 597 598 + /* 599 + * Convert all tag structure types to generic version, 600 + * as it can represent all models. 601 + */ 602 + if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) { 603 + tag.s.V = tmp_tag.cn58xx.V; 604 + tag.s.D = tmp_tag.cn58xx.D; 605 + tag.s.L = tmp_tag.cn58xx.L; 606 + tag.s.U = tmp_tag.cn58xx.U; 607 + tag.s.addr = tmp_tag.cn58xx.addr; 608 + } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) { 609 + tag.s.V = tmp_tag.cn38xx.V; 610 + tag.s.D = tmp_tag.cn38xx.D; 611 + tag.s.L = tmp_tag.cn38xx.L; 612 + tag.s.U = tmp_tag.cn38xx.U; 613 + tag.s.addr = tmp_tag.cn38xx.addr; 614 + } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) { 615 + tag.s.V = tmp_tag.cn31xx.V; 616 + tag.s.D = tmp_tag.cn31xx.D; 617 + tag.s.L = tmp_tag.cn31xx.L; 618 + tag.s.U = tmp_tag.cn31xx.U; 619 + tag.s.addr = tmp_tag.cn31xx.addr; 620 + } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) { 621 + tag.s.V = tmp_tag.cn30xx.V; 622 + tag.s.D = tmp_tag.cn30xx.D; 623 + tag.s.L = tmp_tag.cn30xx.L; 624 + tag.s.U = tmp_tag.cn30xx.U; 625 + tag.s.addr = tmp_tag.cn30xx.addr; 626 + } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) { 627 + tag.s.V = tmp_tag.cn50xx.V; 628 + tag.s.D = tmp_tag.cn50xx.D; 629 + tag.s.L = tmp_tag.cn50xx.L; 630 + tag.s.U = tmp_tag.cn50xx.U; 631 + tag.s.addr = tmp_tag.cn50xx.addr; 632 + } else { 633 + cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__); 634 + } 635 + } 699 636 return tag; 700 637 } 701 638 702 639 uint32_t cvmx_l2c_address_to_index(uint64_t addr) 703 640 { 704 641 uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT; 705 - union cvmx_l2c_cfg l2c_cfg; 706 - l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG); 642 + int indxalias = 0; 707 643 708 - if (l2c_cfg.s.idxalias) { 709 - idx ^= 710 - ((addr & CVMX_L2C_ALIAS_MASK) >> 711 - CVMX_L2C_TAG_ADDR_ALIAS_SHIFT); 644 + if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 645 + union cvmx_l2c_ctl l2c_ctl; 646 + l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL); 647 + indxalias = !l2c_ctl.s.disidxalias; 648 + } else { 649 + union cvmx_l2c_cfg l2c_cfg; 650 + l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG); 651 + indxalias = l2c_cfg.s.idxalias; 652 + } 653 + 654 + if (indxalias) { 655 + if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { 656 + uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7; 657 + idx ^= idx / cvmx_l2c_get_num_sets(); 658 + idx ^= a_14_12; 659 + } else { 660 + idx ^= ((addr & CVMX_L2C_ALIAS_MASK) >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT); 661 + } 712 662 } 713 663 idx &= CVMX_L2C_IDX_MASK; 714 664 return idx; ··· 756 652 int l2_set_bits; 757 653 if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) 758 654 l2_set_bits = 11; /* 2048 sets */ 759 - else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) 655 + else if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)) 760 656 l2_set_bits = 10; /* 1024 sets */ 761 - else if (OCTEON_IS_MODEL(OCTEON_CN31XX) 762 - || OCTEON_IS_MODEL(OCTEON_CN52XX)) 657 + else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) 763 658 l2_set_bits = 9; /* 512 sets */ 764 659 else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) 765 660 l2_set_bits = 8; /* 256 sets */ ··· 769 666 l2_set_bits = 11; /* 2048 sets */ 770 667 } 771 668 return l2_set_bits; 772 - 773 669 } 774 670 775 671 /* Return the number of sets in the L2 Cache */ ··· 784 682 if (OCTEON_IS_MODEL(OCTEON_CN56XX) || 785 683 OCTEON_IS_MODEL(OCTEON_CN52XX) || 786 684 OCTEON_IS_MODEL(OCTEON_CN58XX) || 787 - OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN38XX)) 685 + OCTEON_IS_MODEL(OCTEON_CN50XX) || 686 + OCTEON_IS_MODEL(OCTEON_CN38XX)) 788 687 l2_assoc = 8; 688 + else if (OCTEON_IS_MODEL(OCTEON_CN63XX)) 689 + l2_assoc = 16; 789 690 else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || 790 691 OCTEON_IS_MODEL(OCTEON_CN30XX)) 791 692 l2_assoc = 4; ··· 798 693 } 799 694 800 695 /* Check to see if part of the cache is disabled */ 801 - if (cvmx_fuse_read(265)) 802 - l2_assoc = l2_assoc >> 2; 803 - else if (cvmx_fuse_read(264)) 804 - l2_assoc = l2_assoc >> 1; 696 + if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { 697 + union cvmx_mio_fus_dat3 mio_fus_dat3; 805 698 699 + mio_fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3); 700 + /* 701 + * cvmx_mio_fus_dat3.s.l2c_crip fuses map as follows 702 + * <2> will be not used for 63xx 703 + * <1> disables 1/2 ways 704 + * <0> disables 1/4 ways 705 + * They are cumulative, so for 63xx: 706 + * <1> <0> 707 + * 0 0 16-way 2MB cache 708 + * 0 1 12-way 1.5MB cache 709 + * 1 0 8-way 1MB cache 710 + * 1 1 4-way 512KB cache 711 + */ 712 + 713 + if (mio_fus_dat3.s.l2c_crip == 3) 714 + l2_assoc = 4; 715 + else if (mio_fus_dat3.s.l2c_crip == 2) 716 + l2_assoc = 8; 717 + else if (mio_fus_dat3.s.l2c_crip == 1) 718 + l2_assoc = 12; 719 + } else { 720 + union cvmx_l2d_fus3 val; 721 + val.u64 = cvmx_read_csr(CVMX_L2D_FUS3); 722 + /* 723 + * Using shifts here, as bit position names are 724 + * different for each model but they all mean the 725 + * same. 726 + */ 727 + if ((val.u64 >> 35) & 0x1) 728 + l2_assoc = l2_assoc >> 2; 729 + else if ((val.u64 >> 34) & 0x1) 730 + l2_assoc = l2_assoc >> 1; 731 + } 806 732 return l2_assoc; 807 733 } 808 734 ··· 847 711 */ 848 712 void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index) 849 713 { 850 - union cvmx_l2c_dbg l2cdbg; 714 + /* Check the range of the index. */ 715 + if (index > (uint32_t)cvmx_l2c_get_num_sets()) { 716 + cvmx_dprintf("ERROR: cvmx_l2c_flush_line index out of range.\n"); 717 + return; 718 + } 851 719 852 - l2cdbg.u64 = 0; 853 - l2cdbg.s.ppnum = cvmx_get_core_num(); 854 - l2cdbg.s.finv = 1; 720 + /* Check the range of association. */ 721 + if (assoc > (uint32_t)cvmx_l2c_get_num_assoc()) { 722 + cvmx_dprintf("ERROR: cvmx_l2c_flush_line association out of range.\n"); 723 + return; 724 + } 855 725 856 - l2cdbg.s.set = assoc; 857 - /* 858 - * Enter debug mode, and make sure all other writes complete 859 - * before we enter debug mode. 860 - */ 861 - asm volatile ("sync" : : : "memory"); 862 - cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64); 863 - cvmx_read_csr(CVMX_L2C_DBG); 726 + if (OCTEON_IS_MODEL(OCTEON_CN63XX)) { 727 + uint64_t address; 728 + /* Create the address based on index and association. 729 + * Bits<20:17> select the way of the cache block involved in 730 + * the operation 731 + * Bits<16:7> of the effect address select the index 732 + */ 733 + address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 734 + (assoc << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) | 735 + (index << CVMX_L2C_IDX_ADDR_SHIFT)); 736 + CVMX_CACHE_WBIL2I(address, 0); 737 + } else { 738 + union cvmx_l2c_dbg l2cdbg; 864 739 865 - CVMX_PREPARE_FOR_STORE(((1ULL << 63) + (index) * 128), 0); 866 - /* Exit debug mode */ 867 - asm volatile ("sync" : : : "memory"); 868 - cvmx_write_csr(CVMX_L2C_DBG, 0); 869 - cvmx_read_csr(CVMX_L2C_DBG); 740 + l2cdbg.u64 = 0; 741 + if (!OCTEON_IS_MODEL(OCTEON_CN30XX)) 742 + l2cdbg.s.ppnum = cvmx_get_core_num(); 743 + l2cdbg.s.finv = 1; 744 + 745 + l2cdbg.s.set = assoc; 746 + cvmx_spinlock_lock(&cvmx_l2c_spinlock); 747 + /* 748 + * Enter debug mode, and make sure all other writes 749 + * complete before we enter debug mode 750 + */ 751 + CVMX_SYNC; 752 + cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64); 753 + cvmx_read_csr(CVMX_L2C_DBG); 754 + 755 + CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 756 + index * CVMX_CACHE_LINE_SIZE), 757 + 0); 758 + /* Exit debug mode */ 759 + CVMX_SYNC; 760 + cvmx_write_csr(CVMX_L2C_DBG, 0); 761 + cvmx_read_csr(CVMX_L2C_DBG); 762 + cvmx_spinlock_unlock(&cvmx_l2c_spinlock); 763 + } 870 764 }
+11
arch/mips/include/asm/octeon/cvmx-asm.h
··· 114 114 #define CVMX_DCACHE_INVALIDATE \ 115 115 { CVMX_SYNC; asm volatile ("cache 9, 0($0)" : : ); } 116 116 117 + #define CVMX_CACHE(op, address, offset) \ 118 + asm volatile ("cache " CVMX_TMP_STR(op) ", " CVMX_TMP_STR(offset) "(%[rbase])" \ 119 + : : [rbase] "d" (address) ) 120 + /* fetch and lock the state. */ 121 + #define CVMX_CACHE_LCKL2(address, offset) CVMX_CACHE(31, address, offset) 122 + /* unlock the state. */ 123 + #define CVMX_CACHE_WBIL2(address, offset) CVMX_CACHE(23, address, offset) 124 + /* invalidate the cache block and clear the USED bits for the block */ 125 + #define CVMX_CACHE_WBIL2I(address, offset) CVMX_CACHE(3, address, offset) 126 + /* load virtual tag and data for the L2 cache block into L2C_TAD0_TAG register */ 127 + #define CVMX_CACHE_LTGL2I(address, offset) CVMX_CACHE(7, address, offset) 117 128 118 129 #define CVMX_POP(result, input) \ 119 130 asm ("pop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
+130 -95
arch/mips/include/asm/octeon/cvmx-l2c.h
··· 4 4 * Contact: support@caviumnetworks.com 5 5 * This file is part of the OCTEON SDK 6 6 * 7 - * Copyright (c) 2003-2008 Cavium Networks 7 + * Copyright (c) 2003-2010 Cavium Networks 8 8 * 9 9 * This file is free software; you can redistribute it and/or modify 10 10 * it under the terms of the GNU General Public License, Version 2, as ··· 26 26 ***********************license end**************************************/ 27 27 28 28 /* 29 - * 30 29 * Interface to the Level 2 Cache (L2C) control, measurement, and debugging 31 30 * facilities. 32 31 */ ··· 33 34 #ifndef __CVMX_L2C_H__ 34 35 #define __CVMX_L2C_H__ 35 36 36 - /* Deprecated macro, use function */ 37 - #define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() 37 + #define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() /* Deprecated macro, use function */ 38 + #define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits() /* Deprecated macro, use function */ 39 + #define CVMX_L2_SETS cvmx_l2c_get_num_sets() /* Deprecated macro, use function */ 38 40 39 - /* Deprecated macro, use function */ 40 - #define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits() 41 - 42 - /* Deprecated macro, use function */ 43 - #define CVMX_L2_SETS cvmx_l2c_get_num_sets() 44 41 45 42 #define CVMX_L2C_IDX_ADDR_SHIFT 7 /* based on 128 byte cache line size */ 46 43 #define CVMX_L2C_IDX_MASK (cvmx_l2c_get_num_sets() - 1) 47 44 48 45 /* Defines for index aliasing computations */ 49 - #define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT \ 50 - (CVMX_L2C_IDX_ADDR_SHIFT + cvmx_l2c_get_set_bits()) 46 + #define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT (CVMX_L2C_IDX_ADDR_SHIFT + cvmx_l2c_get_set_bits()) 47 + #define CVMX_L2C_ALIAS_MASK (CVMX_L2C_IDX_MASK << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) 48 + #define CVMX_L2C_MEMBANK_SELECT_SIZE 4096 51 49 52 - #define CVMX_L2C_ALIAS_MASK \ 53 - (CVMX_L2C_IDX_MASK << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) 50 + /* Defines for Virtualizations, valid only from Octeon II onwards. */ 51 + #define CVMX_L2C_VRT_MAX_VIRTID_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 64 : 0) 52 + #define CVMX_L2C_VRT_MAX_MEMSZ_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 32 : 0) 54 53 55 54 union cvmx_l2c_tag { 56 55 uint64_t u64; 57 56 struct { 58 57 uint64_t reserved:28; 59 - uint64_t V:1; /* Line valid */ 60 - uint64_t D:1; /* Line dirty */ 61 - uint64_t L:1; /* Line locked */ 62 - uint64_t U:1; /* Use, LRU eviction */ 58 + uint64_t V:1; /* Line valid */ 59 + uint64_t D:1; /* Line dirty */ 60 + uint64_t L:1; /* Line locked */ 61 + uint64_t U:1; /* Use, LRU eviction */ 63 62 uint64_t addr:32; /* Phys mem (not all bits valid) */ 64 63 } s; 65 64 }; 66 65 66 + /* Number of L2C Tag-and-data sections (TADs) that are connected to LMC. */ 67 + #define CVMX_L2C_TADS 1 68 + 67 69 /* L2C Performance Counter events. */ 68 70 enum cvmx_l2c_event { 69 - CVMX_L2C_EVENT_CYCLES = 0, 70 - CVMX_L2C_EVENT_INSTRUCTION_MISS = 1, 71 - CVMX_L2C_EVENT_INSTRUCTION_HIT = 2, 72 - CVMX_L2C_EVENT_DATA_MISS = 3, 73 - CVMX_L2C_EVENT_DATA_HIT = 4, 74 - CVMX_L2C_EVENT_MISS = 5, 75 - CVMX_L2C_EVENT_HIT = 6, 76 - CVMX_L2C_EVENT_VICTIM_HIT = 7, 77 - CVMX_L2C_EVENT_INDEX_CONFLICT = 8, 78 - CVMX_L2C_EVENT_TAG_PROBE = 9, 79 - CVMX_L2C_EVENT_TAG_UPDATE = 10, 80 - CVMX_L2C_EVENT_TAG_COMPLETE = 11, 81 - CVMX_L2C_EVENT_TAG_DIRTY = 12, 82 - CVMX_L2C_EVENT_DATA_STORE_NOP = 13, 83 - CVMX_L2C_EVENT_DATA_STORE_READ = 14, 71 + CVMX_L2C_EVENT_CYCLES = 0, 72 + CVMX_L2C_EVENT_INSTRUCTION_MISS = 1, 73 + CVMX_L2C_EVENT_INSTRUCTION_HIT = 2, 74 + CVMX_L2C_EVENT_DATA_MISS = 3, 75 + CVMX_L2C_EVENT_DATA_HIT = 4, 76 + CVMX_L2C_EVENT_MISS = 5, 77 + CVMX_L2C_EVENT_HIT = 6, 78 + CVMX_L2C_EVENT_VICTIM_HIT = 7, 79 + CVMX_L2C_EVENT_INDEX_CONFLICT = 8, 80 + CVMX_L2C_EVENT_TAG_PROBE = 9, 81 + CVMX_L2C_EVENT_TAG_UPDATE = 10, 82 + CVMX_L2C_EVENT_TAG_COMPLETE = 11, 83 + CVMX_L2C_EVENT_TAG_DIRTY = 12, 84 + CVMX_L2C_EVENT_DATA_STORE_NOP = 13, 85 + CVMX_L2C_EVENT_DATA_STORE_READ = 14, 84 86 CVMX_L2C_EVENT_DATA_STORE_WRITE = 15, 85 - CVMX_L2C_EVENT_FILL_DATA_VALID = 16, 86 - CVMX_L2C_EVENT_WRITE_REQUEST = 17, 87 - CVMX_L2C_EVENT_READ_REQUEST = 18, 87 + CVMX_L2C_EVENT_FILL_DATA_VALID = 16, 88 + CVMX_L2C_EVENT_WRITE_REQUEST = 17, 89 + CVMX_L2C_EVENT_READ_REQUEST = 18, 88 90 CVMX_L2C_EVENT_WRITE_DATA_VALID = 19, 89 - CVMX_L2C_EVENT_XMC_NOP = 20, 90 - CVMX_L2C_EVENT_XMC_LDT = 21, 91 - CVMX_L2C_EVENT_XMC_LDI = 22, 92 - CVMX_L2C_EVENT_XMC_LDD = 23, 93 - CVMX_L2C_EVENT_XMC_STF = 24, 94 - CVMX_L2C_EVENT_XMC_STT = 25, 95 - CVMX_L2C_EVENT_XMC_STP = 26, 96 - CVMX_L2C_EVENT_XMC_STC = 27, 97 - CVMX_L2C_EVENT_XMC_DWB = 28, 98 - CVMX_L2C_EVENT_XMC_PL2 = 29, 99 - CVMX_L2C_EVENT_XMC_PSL1 = 30, 100 - CVMX_L2C_EVENT_XMC_IOBLD = 31, 101 - CVMX_L2C_EVENT_XMC_IOBST = 32, 102 - CVMX_L2C_EVENT_XMC_IOBDMA = 33, 103 - CVMX_L2C_EVENT_XMC_IOBRSP = 34, 104 - CVMX_L2C_EVENT_XMC_BUS_VALID = 35, 105 - CVMX_L2C_EVENT_XMC_MEM_DATA = 36, 106 - CVMX_L2C_EVENT_XMC_REFL_DATA = 37, 107 - CVMX_L2C_EVENT_XMC_IOBRSP_DATA = 38, 108 - CVMX_L2C_EVENT_RSC_NOP = 39, 109 - CVMX_L2C_EVENT_RSC_STDN = 40, 110 - CVMX_L2C_EVENT_RSC_FILL = 41, 111 - CVMX_L2C_EVENT_RSC_REFL = 42, 112 - CVMX_L2C_EVENT_RSC_STIN = 43, 113 - CVMX_L2C_EVENT_RSC_SCIN = 44, 114 - CVMX_L2C_EVENT_RSC_SCFL = 45, 115 - CVMX_L2C_EVENT_RSC_SCDN = 46, 116 - CVMX_L2C_EVENT_RSC_DATA_VALID = 47, 117 - CVMX_L2C_EVENT_RSC_VALID_FILL = 48, 118 - CVMX_L2C_EVENT_RSC_VALID_STRSP = 49, 119 - CVMX_L2C_EVENT_RSC_VALID_REFL = 50, 120 - CVMX_L2C_EVENT_LRF_REQ = 51, 121 - CVMX_L2C_EVENT_DT_RD_ALLOC = 52, 122 - CVMX_L2C_EVENT_DT_WR_INVAL = 53 91 + CVMX_L2C_EVENT_XMC_NOP = 20, 92 + CVMX_L2C_EVENT_XMC_LDT = 21, 93 + CVMX_L2C_EVENT_XMC_LDI = 22, 94 + CVMX_L2C_EVENT_XMC_LDD = 23, 95 + CVMX_L2C_EVENT_XMC_STF = 24, 96 + CVMX_L2C_EVENT_XMC_STT = 25, 97 + CVMX_L2C_EVENT_XMC_STP = 26, 98 + CVMX_L2C_EVENT_XMC_STC = 27, 99 + CVMX_L2C_EVENT_XMC_DWB = 28, 100 + CVMX_L2C_EVENT_XMC_PL2 = 29, 101 + CVMX_L2C_EVENT_XMC_PSL1 = 30, 102 + CVMX_L2C_EVENT_XMC_IOBLD = 31, 103 + CVMX_L2C_EVENT_XMC_IOBST = 32, 104 + CVMX_L2C_EVENT_XMC_IOBDMA = 33, 105 + CVMX_L2C_EVENT_XMC_IOBRSP = 34, 106 + CVMX_L2C_EVENT_XMC_BUS_VALID = 35, 107 + CVMX_L2C_EVENT_XMC_MEM_DATA = 36, 108 + CVMX_L2C_EVENT_XMC_REFL_DATA = 37, 109 + CVMX_L2C_EVENT_XMC_IOBRSP_DATA = 38, 110 + CVMX_L2C_EVENT_RSC_NOP = 39, 111 + CVMX_L2C_EVENT_RSC_STDN = 40, 112 + CVMX_L2C_EVENT_RSC_FILL = 41, 113 + CVMX_L2C_EVENT_RSC_REFL = 42, 114 + CVMX_L2C_EVENT_RSC_STIN = 43, 115 + CVMX_L2C_EVENT_RSC_SCIN = 44, 116 + CVMX_L2C_EVENT_RSC_SCFL = 45, 117 + CVMX_L2C_EVENT_RSC_SCDN = 46, 118 + CVMX_L2C_EVENT_RSC_DATA_VALID = 47, 119 + CVMX_L2C_EVENT_RSC_VALID_FILL = 48, 120 + CVMX_L2C_EVENT_RSC_VALID_STRSP = 49, 121 + CVMX_L2C_EVENT_RSC_VALID_REFL = 50, 122 + CVMX_L2C_EVENT_LRF_REQ = 51, 123 + CVMX_L2C_EVENT_DT_RD_ALLOC = 52, 124 + CVMX_L2C_EVENT_DT_WR_INVAL = 53, 125 + CVMX_L2C_EVENT_MAX 126 + }; 127 + 128 + /* L2C Performance Counter events for Octeon2. */ 129 + enum cvmx_l2c_tad_event { 130 + CVMX_L2C_TAD_EVENT_NONE = 0, 131 + CVMX_L2C_TAD_EVENT_TAG_HIT = 1, 132 + CVMX_L2C_TAD_EVENT_TAG_MISS = 2, 133 + CVMX_L2C_TAD_EVENT_TAG_NOALLOC = 3, 134 + CVMX_L2C_TAD_EVENT_TAG_VICTIM = 4, 135 + CVMX_L2C_TAD_EVENT_SC_FAIL = 5, 136 + CVMX_L2C_TAD_EVENT_SC_PASS = 6, 137 + CVMX_L2C_TAD_EVENT_LFB_VALID = 7, 138 + CVMX_L2C_TAD_EVENT_LFB_WAIT_LFB = 8, 139 + CVMX_L2C_TAD_EVENT_LFB_WAIT_VAB = 9, 140 + CVMX_L2C_TAD_EVENT_QUAD0_INDEX = 128, 141 + CVMX_L2C_TAD_EVENT_QUAD0_READ = 129, 142 + CVMX_L2C_TAD_EVENT_QUAD0_BANK = 130, 143 + CVMX_L2C_TAD_EVENT_QUAD0_WDAT = 131, 144 + CVMX_L2C_TAD_EVENT_QUAD1_INDEX = 144, 145 + CVMX_L2C_TAD_EVENT_QUAD1_READ = 145, 146 + CVMX_L2C_TAD_EVENT_QUAD1_BANK = 146, 147 + CVMX_L2C_TAD_EVENT_QUAD1_WDAT = 147, 148 + CVMX_L2C_TAD_EVENT_QUAD2_INDEX = 160, 149 + CVMX_L2C_TAD_EVENT_QUAD2_READ = 161, 150 + CVMX_L2C_TAD_EVENT_QUAD2_BANK = 162, 151 + CVMX_L2C_TAD_EVENT_QUAD2_WDAT = 163, 152 + CVMX_L2C_TAD_EVENT_QUAD3_INDEX = 176, 153 + CVMX_L2C_TAD_EVENT_QUAD3_READ = 177, 154 + CVMX_L2C_TAD_EVENT_QUAD3_BANK = 178, 155 + CVMX_L2C_TAD_EVENT_QUAD3_WDAT = 179, 156 + CVMX_L2C_TAD_EVENT_MAX 123 157 }; 124 158 125 159 /** ··· 164 132 * @clear_on_read: When asserted, any read of the performance counter 165 133 * clears the counter. 166 134 * 167 - * The routine does not clear the counter. 135 + * @note The routine does not clear the counter. 168 136 */ 169 - void cvmx_l2c_config_perf(uint32_t counter, 170 - enum cvmx_l2c_event event, uint32_t clear_on_read); 137 + void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event, uint32_t clear_on_read); 138 + 171 139 /** 172 140 * Read the given L2 Cache performance counter. The counter must be configured 173 141 * before reading, but this routine does not enforce this requirement. ··· 192 160 /** 193 161 * Partitions the L2 cache for a core 194 162 * 195 - * @core: The core that the partitioning applies to. 163 + * @core: The core that the partitioning applies to. 164 + * @mask: The partitioning of the ways expressed as a binary 165 + * mask. A 0 bit allows the core to evict cache lines from 166 + * a way, while a 1 bit blocks the core from evicting any 167 + * lines from that way. There must be at least one allowed 168 + * way (0 bit) in the mask. 196 169 * 197 - * @mask: The partitioning of the ways expressed as a binary mask. A 0 198 - * bit allows the core to evict cache lines from a way, while a 199 - * 1 bit blocks the core from evicting any lines from that 200 - * way. There must be at least one allowed way (0 bit) in the 201 - * mask. 202 - * 203 - * If any ways are blocked for all cores and the HW blocks, then those 204 - * ways will never have any cache lines evicted from them. All cores 205 - * and the hardware blocks are free to read from all ways regardless 206 - * of the partitioning. 170 + 171 + * @note If any ways are blocked for all cores and the HW blocks, then 172 + * those ways will never have any cache lines evicted from them. 173 + * All cores and the hardware blocks are free to read from all 174 + * ways regardless of the partitioning. 207 175 */ 208 176 int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask); 209 177 ··· 219 187 /** 220 188 * Partitions the L2 cache for the hardware blocks. 221 189 * 222 - * @mask: The partitioning of the ways expressed as a binary mask. A 0 223 - * bit allows the core to evict cache lines from a way, while a 224 - * 1 bit blocks the core from evicting any lines from that 225 - * way. There must be at least one allowed way (0 bit) in the 226 - * mask. 190 + * @mask: The partitioning of the ways expressed as a binary 191 + * mask. A 0 bit allows the core to evict cache lines from 192 + * a way, while a 1 bit blocks the core from evicting any 193 + * lines from that way. There must be at least one allowed 194 + * way (0 bit) in the mask. 227 195 * 228 - * If any ways are blocked for all cores and the HW blocks, then those 229 - * ways will never have any cache lines evicted from them. All cores 230 - * and the hardware blocks are free to read from all ways regardless 231 - * of the partitioning. 196 + 197 + * @note If any ways are blocked for all cores and the HW blocks, then 198 + * those ways will never have any cache lines evicted from them. 199 + * All cores and the hardware blocks are free to read from all 200 + * ways regardless of the partitioning. 232 201 */ 233 202 int cvmx_l2c_set_hw_way_partition(uint32_t mask); 203 + 234 204 235 205 /** 236 206 * Locks a line in the L2 cache at the specified physical address ··· 297 263 */ 298 264 union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index); 299 265 300 - /* Wrapper around deprecated old function name */ 301 - static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association, 302 - uint32_t index) 266 + /* Wrapper providing a deprecated old function name */ 267 + static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association, uint32_t index) __attribute__((deprecated)); 268 + static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association, uint32_t index) 303 269 { 304 270 return cvmx_l2c_get_tag(association, index); 305 271 } 272 + 306 273 307 274 /** 308 275 * Returns the cache index for a given physical address