[IA64] Export cpu cache info by sysfs

The patch exports 8 attributes of cpu cache info under
/sys/devices/system/cpu/cpuX/cache/indexX:
1) level
2) type
3) coherency_line_size
4) ways_of_associativity
5) size
6) shared_cpu_map
7) attributes
8) number_of_sets: number_of_sets=size/ways_of_associativity/coherency_line_size.

Signed-off-by: Zhang Yanmin <yanmin.zhang@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>

authored by Zhang, Yanmin and committed by Tony Luck f1918005 d1127e40

+395
+367
arch/ia64/kernel/topology.c
··· 9 * 2002/08/07 Erich Focht <efocht@ess.nec.de> 10 * Populate cpu entries in sysfs for non-numa systems as well 11 * Intel Corporation - Ashok Raj 12 */ 13 14 #include <linux/config.h> ··· 21 #include <linux/init.h> 22 #include <linux/bootmem.h> 23 #include <linux/nodemask.h> 24 #include <asm/mmzone.h> 25 #include <asm/numa.h> 26 #include <asm/cpu.h> ··· 104 } 105 106 subsys_initcall(topology_init);
··· 9 * 2002/08/07 Erich Focht <efocht@ess.nec.de> 10 * Populate cpu entries in sysfs for non-numa systems as well 11 * Intel Corporation - Ashok Raj 12 + * 02/27/2006 Zhang, Yanmin 13 + * Populate cpu cache entries in sysfs for cpu cache info 14 */ 15 16 #include <linux/config.h> ··· 19 #include <linux/init.h> 20 #include <linux/bootmem.h> 21 #include <linux/nodemask.h> 22 + #include <linux/notifier.h> 23 #include <asm/mmzone.h> 24 #include <asm/numa.h> 25 #include <asm/cpu.h> ··· 101 } 102 103 subsys_initcall(topology_init); 104 + 105 + 106 + /* 107 + * Export cpu cache information through sysfs 108 + */ 109 + 110 + /* 111 + * A bunch of string array to get pretty printing 112 + */ 113 + static const char *cache_types[] = { 114 + "", /* not used */ 115 + "Instruction", 116 + "Data", 117 + "Unified" /* unified */ 118 + }; 119 + 120 + static const char *cache_mattrib[]={ 121 + "WriteThrough", 122 + "WriteBack", 123 + "", /* reserved */ 124 + "" /* reserved */ 125 + }; 126 + 127 + struct cache_info { 128 + pal_cache_config_info_t cci; 129 + cpumask_t shared_cpu_map; 130 + int level; 131 + int type; 132 + struct kobject kobj; 133 + }; 134 + 135 + struct cpu_cache_info { 136 + struct cache_info *cache_leaves; 137 + int num_cache_leaves; 138 + struct kobject kobj; 139 + }; 140 + 141 + static struct cpu_cache_info all_cpu_cache_info[NR_CPUS]; 142 + #define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y]) 143 + 144 + #ifdef CONFIG_SMP 145 + static void cache_shared_cpu_map_setup( unsigned int cpu, 146 + struct cache_info * this_leaf) 147 + { 148 + pal_cache_shared_info_t csi; 149 + int num_shared, i = 0; 150 + unsigned int j; 151 + 152 + if (cpu_data(cpu)->threads_per_core <= 1 && 153 + cpu_data(cpu)->cores_per_socket <= 1) { 154 + cpu_set(cpu, this_leaf->shared_cpu_map); 155 + return; 156 + } 157 + 158 + if (ia64_pal_cache_shared_info(this_leaf->level, 159 + this_leaf->type, 160 + 0, 161 + &csi) != PAL_STATUS_SUCCESS) 162 + return; 163 + 164 + num_shared = (int) csi.num_shared; 165 + do { 166 + for_each_cpu(j) 167 + if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id 168 + && cpu_data(j)->core_id == csi.log1_cid 169 + && cpu_data(j)->thread_id == csi.log1_tid) 170 + cpu_set(j, this_leaf->shared_cpu_map); 171 + 172 + i++; 173 + } while (i < num_shared && 174 + ia64_pal_cache_shared_info(this_leaf->level, 175 + this_leaf->type, 176 + i, 177 + &csi) == PAL_STATUS_SUCCESS); 178 + } 179 + #else 180 + static void cache_shared_cpu_map_setup(unsigned int cpu, 181 + struct cache_info * this_leaf) 182 + { 183 + cpu_set(cpu, this_leaf->shared_cpu_map); 184 + return; 185 + } 186 + #endif 187 + 188 + static ssize_t show_coherency_line_size(struct cache_info *this_leaf, 189 + char *buf) 190 + { 191 + return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size); 192 + } 193 + 194 + static ssize_t show_ways_of_associativity(struct cache_info *this_leaf, 195 + char *buf) 196 + { 197 + return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc); 198 + } 199 + 200 + static ssize_t show_attributes(struct cache_info *this_leaf, char *buf) 201 + { 202 + return sprintf(buf, 203 + "%s\n", 204 + cache_mattrib[this_leaf->cci.pcci_cache_attr]); 205 + } 206 + 207 + static ssize_t show_size(struct cache_info *this_leaf, char *buf) 208 + { 209 + return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024); 210 + } 211 + 212 + static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf) 213 + { 214 + unsigned number_of_sets = this_leaf->cci.pcci_cache_size; 215 + number_of_sets /= this_leaf->cci.pcci_assoc; 216 + number_of_sets /= 1 << this_leaf->cci.pcci_line_size; 217 + 218 + return sprintf(buf, "%u\n", number_of_sets); 219 + } 220 + 221 + static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf) 222 + { 223 + ssize_t len; 224 + cpumask_t shared_cpu_map; 225 + 226 + cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map); 227 + len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map); 228 + len += sprintf(buf+len, "\n"); 229 + return len; 230 + } 231 + 232 + static ssize_t show_type(struct cache_info *this_leaf, char *buf) 233 + { 234 + int type = this_leaf->type + this_leaf->cci.pcci_unified; 235 + return sprintf(buf, "%s\n", cache_types[type]); 236 + } 237 + 238 + static ssize_t show_level(struct cache_info *this_leaf, char *buf) 239 + { 240 + return sprintf(buf, "%u\n", this_leaf->level); 241 + } 242 + 243 + struct cache_attr { 244 + struct attribute attr; 245 + ssize_t (*show)(struct cache_info *, char *); 246 + ssize_t (*store)(struct cache_info *, const char *, size_t count); 247 + }; 248 + 249 + #ifdef define_one_ro 250 + #undef define_one_ro 251 + #endif 252 + #define define_one_ro(_name) \ 253 + static struct cache_attr _name = \ 254 + __ATTR(_name, 0444, show_##_name, NULL) 255 + 256 + define_one_ro(level); 257 + define_one_ro(type); 258 + define_one_ro(coherency_line_size); 259 + define_one_ro(ways_of_associativity); 260 + define_one_ro(size); 261 + define_one_ro(number_of_sets); 262 + define_one_ro(shared_cpu_map); 263 + define_one_ro(attributes); 264 + 265 + static struct attribute * cache_default_attrs[] = { 266 + &type.attr, 267 + &level.attr, 268 + &coherency_line_size.attr, 269 + &ways_of_associativity.attr, 270 + &attributes.attr, 271 + &size.attr, 272 + &number_of_sets.attr, 273 + &shared_cpu_map.attr, 274 + NULL 275 + }; 276 + 277 + #define to_object(k) container_of(k, struct cache_info, kobj) 278 + #define to_attr(a) container_of(a, struct cache_attr, attr) 279 + 280 + static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * buf) 281 + { 282 + struct cache_attr *fattr = to_attr(attr); 283 + struct cache_info *this_leaf = to_object(kobj); 284 + ssize_t ret; 285 + 286 + ret = fattr->show ? fattr->show(this_leaf, buf) : 0; 287 + return ret; 288 + } 289 + 290 + static struct sysfs_ops cache_sysfs_ops = { 291 + .show = cache_show 292 + }; 293 + 294 + static struct kobj_type cache_ktype = { 295 + .sysfs_ops = &cache_sysfs_ops, 296 + .default_attrs = cache_default_attrs, 297 + }; 298 + 299 + static struct kobj_type cache_ktype_percpu_entry = { 300 + .sysfs_ops = &cache_sysfs_ops, 301 + }; 302 + 303 + static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu) 304 + { 305 + if (all_cpu_cache_info[cpu].cache_leaves) { 306 + kfree(all_cpu_cache_info[cpu].cache_leaves); 307 + all_cpu_cache_info[cpu].cache_leaves = NULL; 308 + } 309 + all_cpu_cache_info[cpu].num_cache_leaves = 0; 310 + memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject)); 311 + 312 + return; 313 + } 314 + 315 + static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu) 316 + { 317 + u64 i, levels, unique_caches; 318 + pal_cache_config_info_t cci; 319 + int j; 320 + s64 status; 321 + struct cache_info *this_cache; 322 + int num_cache_leaves = 0; 323 + 324 + if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) { 325 + printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status); 326 + return -1; 327 + } 328 + 329 + this_cache=kzalloc(sizeof(struct cache_info)*unique_caches, 330 + GFP_KERNEL); 331 + if (this_cache == NULL) 332 + return -ENOMEM; 333 + 334 + for (i=0; i < levels; i++) { 335 + for (j=2; j >0 ; j--) { 336 + if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 337 + PAL_STATUS_SUCCESS) 338 + continue; 339 + 340 + this_cache[num_cache_leaves].cci = cci; 341 + this_cache[num_cache_leaves].level = i + 1; 342 + this_cache[num_cache_leaves].type = j; 343 + 344 + cache_shared_cpu_map_setup(cpu, 345 + &this_cache[num_cache_leaves]); 346 + num_cache_leaves ++; 347 + } 348 + } 349 + 350 + all_cpu_cache_info[cpu].cache_leaves = this_cache; 351 + all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves; 352 + 353 + memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject)); 354 + 355 + return 0; 356 + } 357 + 358 + /* Add cache interface for CPU device */ 359 + static int __cpuinit cache_add_dev(struct sys_device * sys_dev) 360 + { 361 + unsigned int cpu = sys_dev->id; 362 + unsigned long i, j; 363 + struct cache_info *this_object; 364 + int retval = 0; 365 + cpumask_t oldmask; 366 + 367 + if (all_cpu_cache_info[cpu].kobj.parent) 368 + return 0; 369 + 370 + oldmask = current->cpus_allowed; 371 + retval = set_cpus_allowed(current, cpumask_of_cpu(cpu)); 372 + if (unlikely(retval)) 373 + return retval; 374 + 375 + retval = cpu_cache_sysfs_init(cpu); 376 + set_cpus_allowed(current, oldmask); 377 + if (unlikely(retval < 0)) 378 + return retval; 379 + 380 + all_cpu_cache_info[cpu].kobj.parent = &sys_dev->kobj; 381 + kobject_set_name(&all_cpu_cache_info[cpu].kobj, "%s", "cache"); 382 + all_cpu_cache_info[cpu].kobj.ktype = &cache_ktype_percpu_entry; 383 + retval = kobject_register(&all_cpu_cache_info[cpu].kobj); 384 + 385 + for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) { 386 + this_object = LEAF_KOBJECT_PTR(cpu,i); 387 + this_object->kobj.parent = &all_cpu_cache_info[cpu].kobj; 388 + kobject_set_name(&(this_object->kobj), "index%1lu", i); 389 + this_object->kobj.ktype = &cache_ktype; 390 + retval = kobject_register(&(this_object->kobj)); 391 + if (unlikely(retval)) { 392 + for (j = 0; j < i; j++) { 393 + kobject_unregister( 394 + &(LEAF_KOBJECT_PTR(cpu,j)->kobj)); 395 + } 396 + kobject_unregister(&all_cpu_cache_info[cpu].kobj); 397 + cpu_cache_sysfs_exit(cpu); 398 + break; 399 + } 400 + } 401 + return retval; 402 + } 403 + 404 + /* Remove cache interface for CPU device */ 405 + static int __cpuinit cache_remove_dev(struct sys_device * sys_dev) 406 + { 407 + unsigned int cpu = sys_dev->id; 408 + unsigned long i; 409 + 410 + for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) 411 + kobject_unregister(&(LEAF_KOBJECT_PTR(cpu,i)->kobj)); 412 + 413 + if (all_cpu_cache_info[cpu].kobj.parent) { 414 + kobject_unregister(&all_cpu_cache_info[cpu].kobj); 415 + memset(&all_cpu_cache_info[cpu].kobj, 416 + 0, 417 + sizeof(struct kobject)); 418 + } 419 + 420 + cpu_cache_sysfs_exit(cpu); 421 + 422 + return 0; 423 + } 424 + 425 + /* 426 + * When a cpu is hot-plugged, do a check and initiate 427 + * cache kobject if necessary 428 + */ 429 + static int __cpuinit cache_cpu_callback(struct notifier_block *nfb, 430 + unsigned long action, void *hcpu) 431 + { 432 + unsigned int cpu = (unsigned long)hcpu; 433 + struct sys_device *sys_dev; 434 + 435 + sys_dev = get_cpu_sysdev(cpu); 436 + switch (action) { 437 + case CPU_ONLINE: 438 + cache_add_dev(sys_dev); 439 + break; 440 + case CPU_DEAD: 441 + cache_remove_dev(sys_dev); 442 + break; 443 + } 444 + return NOTIFY_OK; 445 + } 446 + 447 + static struct notifier_block cache_cpu_notifier = 448 + { 449 + .notifier_call = cache_cpu_callback 450 + }; 451 + 452 + static int __cpuinit cache_sysfs_init(void) 453 + { 454 + int i; 455 + 456 + for_each_online_cpu(i) { 457 + cache_cpu_callback(&cache_cpu_notifier, CPU_ONLINE, 458 + (void *)(long)i); 459 + } 460 + 461 + register_cpu_notifier(&cache_cpu_notifier); 462 + 463 + return 0; 464 + } 465 + 466 + device_initcall(cache_sysfs_init); 467 +
+28
include/asm-ia64/pal.h
··· 68 #define PAL_SHUTDOWN 40 /* enter processor shutdown state */ 69 #define PAL_PREFETCH_VISIBILITY 41 /* Make Processor Prefetches Visible */ 70 #define PAL_LOGICAL_TO_PHYSICAL 42 /* returns information on logical to physical processor mapping */ 71 72 #define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */ 73 #define PAL_HALT_INFO 257 /* return the low power capabilities of processor */ ··· 1644 mapping->overview.overview_data = iprv.v0; 1645 mapping->ppli1.ppli1_data = iprv.v1; 1646 mapping->ppli2.ppli2_data = iprv.v2; 1647 } 1648 1649 return iprv.status;
··· 68 #define PAL_SHUTDOWN 40 /* enter processor shutdown state */ 69 #define PAL_PREFETCH_VISIBILITY 41 /* Make Processor Prefetches Visible */ 70 #define PAL_LOGICAL_TO_PHYSICAL 42 /* returns information on logical to physical processor mapping */ 71 + #define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */ 72 73 #define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */ 74 #define PAL_HALT_INFO 257 /* return the low power capabilities of processor */ ··· 1643 mapping->overview.overview_data = iprv.v0; 1644 mapping->ppli1.ppli1_data = iprv.v1; 1645 mapping->ppli2.ppli2_data = iprv.v2; 1646 + } 1647 + 1648 + return iprv.status; 1649 + } 1650 + 1651 + typedef struct pal_cache_shared_info_s 1652 + { 1653 + u64 num_shared; 1654 + pal_proc_n_log_info1_t ppli1; 1655 + pal_proc_n_log_info2_t ppli2; 1656 + } pal_cache_shared_info_t; 1657 + 1658 + /* Get information on logical to physical processor mappings. */ 1659 + static inline s64 1660 + ia64_pal_cache_shared_info(u64 level, 1661 + u64 type, 1662 + u64 proc_number, 1663 + pal_cache_shared_info_t *info) 1664 + { 1665 + struct ia64_pal_retval iprv; 1666 + 1667 + PAL_CALL(iprv, PAL_CACHE_SHARED_INFO, level, type, proc_number); 1668 + 1669 + if (iprv.status == PAL_STATUS_SUCCESS) { 1670 + info->num_shared = iprv.v0; 1671 + info->ppli1.ppli1_data = iprv.v1; 1672 + info->ppli2.ppli2_data = iprv.v2; 1673 } 1674 1675 return iprv.status;