at v2.6.33 485 lines 12 kB view raw
1/** 2 * @file cpu_buffer.c 3 * 4 * @remark Copyright 2002-2009 OProfile authors 5 * @remark Read the file COPYING 6 * 7 * @author John Levon <levon@movementarian.org> 8 * @author Barry Kasindorf <barry.kasindorf@amd.com> 9 * @author Robert Richter <robert.richter@amd.com> 10 * 11 * Each CPU has a local buffer that stores PC value/event 12 * pairs. We also log context switches when we notice them. 13 * Eventually each CPU's buffer is processed into the global 14 * event buffer by sync_buffer(). 15 * 16 * We use a local buffer for two reasons: an NMI or similar 17 * interrupt cannot synchronise, and high sampling rates 18 * would lead to catastrophic global synchronisation if 19 * a global buffer was used. 20 */ 21 22#include <linux/sched.h> 23#include <linux/oprofile.h> 24#include <linux/errno.h> 25 26#include "event_buffer.h" 27#include "cpu_buffer.h" 28#include "buffer_sync.h" 29#include "oprof.h" 30 31#define OP_BUFFER_FLAGS 0 32 33/* 34 * Read and write access is using spin locking. Thus, writing to the 35 * buffer by NMI handler (x86) could occur also during critical 36 * sections when reading the buffer. To avoid this, there are 2 37 * buffers for independent read and write access. Read access is in 38 * process context only, write access only in the NMI handler. If the 39 * read buffer runs empty, both buffers are swapped atomically. There 40 * is potentially a small window during swapping where the buffers are 41 * disabled and samples could be lost. 42 * 43 * Using 2 buffers is a little bit overhead, but the solution is clear 44 * and does not require changes in the ring buffer implementation. It 45 * can be changed to a single buffer solution when the ring buffer 46 * access is implemented as non-locking atomic code. 47 */ 48static struct ring_buffer *op_ring_buffer_read; 49static struct ring_buffer *op_ring_buffer_write; 50DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); 51 52static void wq_sync_buffer(struct work_struct *work); 53 54#define DEFAULT_TIMER_EXPIRE (HZ / 10) 55static int work_enabled; 56 57unsigned long oprofile_get_cpu_buffer_size(void) 58{ 59 return oprofile_cpu_buffer_size; 60} 61 62void oprofile_cpu_buffer_inc_smpl_lost(void) 63{ 64 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); 65 66 cpu_buf->sample_lost_overflow++; 67} 68 69void free_cpu_buffers(void) 70{ 71 if (op_ring_buffer_read) 72 ring_buffer_free(op_ring_buffer_read); 73 op_ring_buffer_read = NULL; 74 if (op_ring_buffer_write) 75 ring_buffer_free(op_ring_buffer_write); 76 op_ring_buffer_write = NULL; 77} 78 79#define RB_EVENT_HDR_SIZE 4 80 81int alloc_cpu_buffers(void) 82{ 83 int i; 84 85 unsigned long buffer_size = oprofile_cpu_buffer_size; 86 unsigned long byte_size = buffer_size * (sizeof(struct op_sample) + 87 RB_EVENT_HDR_SIZE); 88 89 op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); 90 if (!op_ring_buffer_read) 91 goto fail; 92 op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); 93 if (!op_ring_buffer_write) 94 goto fail; 95 96 for_each_possible_cpu(i) { 97 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); 98 99 b->last_task = NULL; 100 b->last_is_kernel = -1; 101 b->tracing = 0; 102 b->buffer_size = buffer_size; 103 b->sample_received = 0; 104 b->sample_lost_overflow = 0; 105 b->backtrace_aborted = 0; 106 b->sample_invalid_eip = 0; 107 b->cpu = i; 108 INIT_DELAYED_WORK(&b->work, wq_sync_buffer); 109 } 110 return 0; 111 112fail: 113 free_cpu_buffers(); 114 return -ENOMEM; 115} 116 117void start_cpu_work(void) 118{ 119 int i; 120 121 work_enabled = 1; 122 123 for_each_online_cpu(i) { 124 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); 125 126 /* 127 * Spread the work by 1 jiffy per cpu so they dont all 128 * fire at once. 129 */ 130 schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i); 131 } 132} 133 134void end_cpu_work(void) 135{ 136 int i; 137 138 work_enabled = 0; 139 140 for_each_online_cpu(i) { 141 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); 142 143 cancel_delayed_work(&b->work); 144 } 145 146 flush_scheduled_work(); 147} 148 149/* 150 * This function prepares the cpu buffer to write a sample. 151 * 152 * Struct op_entry is used during operations on the ring buffer while 153 * struct op_sample contains the data that is stored in the ring 154 * buffer. Struct entry can be uninitialized. The function reserves a 155 * data array that is specified by size. Use 156 * op_cpu_buffer_write_commit() after preparing the sample. In case of 157 * errors a null pointer is returned, otherwise the pointer to the 158 * sample. 159 * 160 */ 161struct op_sample 162*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) 163{ 164 entry->event = ring_buffer_lock_reserve 165 (op_ring_buffer_write, sizeof(struct op_sample) + 166 size * sizeof(entry->sample->data[0])); 167 if (entry->event) 168 entry->sample = ring_buffer_event_data(entry->event); 169 else 170 entry->sample = NULL; 171 172 if (!entry->sample) 173 return NULL; 174 175 entry->size = size; 176 entry->data = entry->sample->data; 177 178 return entry->sample; 179} 180 181int op_cpu_buffer_write_commit(struct op_entry *entry) 182{ 183 return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event); 184} 185 186struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) 187{ 188 struct ring_buffer_event *e; 189 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); 190 if (e) 191 goto event; 192 if (ring_buffer_swap_cpu(op_ring_buffer_read, 193 op_ring_buffer_write, 194 cpu)) 195 return NULL; 196 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); 197 if (e) 198 goto event; 199 return NULL; 200 201event: 202 entry->event = e; 203 entry->sample = ring_buffer_event_data(e); 204 entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample)) 205 / sizeof(entry->sample->data[0]); 206 entry->data = entry->sample->data; 207 return entry->sample; 208} 209 210unsigned long op_cpu_buffer_entries(int cpu) 211{ 212 return ring_buffer_entries_cpu(op_ring_buffer_read, cpu) 213 + ring_buffer_entries_cpu(op_ring_buffer_write, cpu); 214} 215 216static int 217op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace, 218 int is_kernel, struct task_struct *task) 219{ 220 struct op_entry entry; 221 struct op_sample *sample; 222 unsigned long flags; 223 int size; 224 225 flags = 0; 226 227 if (backtrace) 228 flags |= TRACE_BEGIN; 229 230 /* notice a switch from user->kernel or vice versa */ 231 is_kernel = !!is_kernel; 232 if (cpu_buf->last_is_kernel != is_kernel) { 233 cpu_buf->last_is_kernel = is_kernel; 234 flags |= KERNEL_CTX_SWITCH; 235 if (is_kernel) 236 flags |= IS_KERNEL; 237 } 238 239 /* notice a task switch */ 240 if (cpu_buf->last_task != task) { 241 cpu_buf->last_task = task; 242 flags |= USER_CTX_SWITCH; 243 } 244 245 if (!flags) 246 /* nothing to do */ 247 return 0; 248 249 if (flags & USER_CTX_SWITCH) 250 size = 1; 251 else 252 size = 0; 253 254 sample = op_cpu_buffer_write_reserve(&entry, size); 255 if (!sample) 256 return -ENOMEM; 257 258 sample->eip = ESCAPE_CODE; 259 sample->event = flags; 260 261 if (size) 262 op_cpu_buffer_add_data(&entry, (unsigned long)task); 263 264 op_cpu_buffer_write_commit(&entry); 265 266 return 0; 267} 268 269static inline int 270op_add_sample(struct oprofile_cpu_buffer *cpu_buf, 271 unsigned long pc, unsigned long event) 272{ 273 struct op_entry entry; 274 struct op_sample *sample; 275 276 sample = op_cpu_buffer_write_reserve(&entry, 0); 277 if (!sample) 278 return -ENOMEM; 279 280 sample->eip = pc; 281 sample->event = event; 282 283 return op_cpu_buffer_write_commit(&entry); 284} 285 286/* 287 * This must be safe from any context. 288 * 289 * is_kernel is needed because on some architectures you cannot 290 * tell if you are in kernel or user space simply by looking at 291 * pc. We tag this in the buffer by generating kernel enter/exit 292 * events whenever is_kernel changes 293 */ 294static int 295log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, 296 unsigned long backtrace, int is_kernel, unsigned long event) 297{ 298 cpu_buf->sample_received++; 299 300 if (pc == ESCAPE_CODE) { 301 cpu_buf->sample_invalid_eip++; 302 return 0; 303 } 304 305 if (op_add_code(cpu_buf, backtrace, is_kernel, current)) 306 goto fail; 307 308 if (op_add_sample(cpu_buf, pc, event)) 309 goto fail; 310 311 return 1; 312 313fail: 314 cpu_buf->sample_lost_overflow++; 315 return 0; 316} 317 318static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) 319{ 320 cpu_buf->tracing = 1; 321} 322 323static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf) 324{ 325 cpu_buf->tracing = 0; 326} 327 328static inline void 329__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, 330 unsigned long event, int is_kernel) 331{ 332 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); 333 unsigned long backtrace = oprofile_backtrace_depth; 334 335 /* 336 * if log_sample() fail we can't backtrace since we lost the 337 * source of this event 338 */ 339 if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event)) 340 /* failed */ 341 return; 342 343 if (!backtrace) 344 return; 345 346 oprofile_begin_trace(cpu_buf); 347 oprofile_ops.backtrace(regs, backtrace); 348 oprofile_end_trace(cpu_buf); 349} 350 351void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, 352 unsigned long event, int is_kernel) 353{ 354 __oprofile_add_ext_sample(pc, regs, event, is_kernel); 355} 356 357void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) 358{ 359 int is_kernel = !user_mode(regs); 360 unsigned long pc = profile_pc(regs); 361 362 __oprofile_add_ext_sample(pc, regs, event, is_kernel); 363} 364 365/* 366 * Add samples with data to the ring buffer. 367 * 368 * Use oprofile_add_data(&entry, val) to add data and 369 * oprofile_write_commit(&entry) to commit the sample. 370 */ 371void 372oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs, 373 unsigned long pc, int code, int size) 374{ 375 struct op_sample *sample; 376 int is_kernel = !user_mode(regs); 377 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); 378 379 cpu_buf->sample_received++; 380 381 /* no backtraces for samples with data */ 382 if (op_add_code(cpu_buf, 0, is_kernel, current)) 383 goto fail; 384 385 sample = op_cpu_buffer_write_reserve(entry, size + 2); 386 if (!sample) 387 goto fail; 388 sample->eip = ESCAPE_CODE; 389 sample->event = 0; /* no flags */ 390 391 op_cpu_buffer_add_data(entry, code); 392 op_cpu_buffer_add_data(entry, pc); 393 394 return; 395 396fail: 397 entry->event = NULL; 398 cpu_buf->sample_lost_overflow++; 399} 400 401int oprofile_add_data(struct op_entry *entry, unsigned long val) 402{ 403 if (!entry->event) 404 return 0; 405 return op_cpu_buffer_add_data(entry, val); 406} 407 408int oprofile_add_data64(struct op_entry *entry, u64 val) 409{ 410 if (!entry->event) 411 return 0; 412 if (op_cpu_buffer_get_size(entry) < 2) 413 /* 414 * the function returns 0 to indicate a too small 415 * buffer, even if there is some space left 416 */ 417 return 0; 418 if (!op_cpu_buffer_add_data(entry, (u32)val)) 419 return 0; 420 return op_cpu_buffer_add_data(entry, (u32)(val >> 32)); 421} 422 423int oprofile_write_commit(struct op_entry *entry) 424{ 425 if (!entry->event) 426 return -EINVAL; 427 return op_cpu_buffer_write_commit(entry); 428} 429 430void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) 431{ 432 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); 433 log_sample(cpu_buf, pc, 0, is_kernel, event); 434} 435 436void oprofile_add_trace(unsigned long pc) 437{ 438 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); 439 440 if (!cpu_buf->tracing) 441 return; 442 443 /* 444 * broken frame can give an eip with the same value as an 445 * escape code, abort the trace if we get it 446 */ 447 if (pc == ESCAPE_CODE) 448 goto fail; 449 450 if (op_add_sample(cpu_buf, pc, 0)) 451 goto fail; 452 453 return; 454fail: 455 cpu_buf->tracing = 0; 456 cpu_buf->backtrace_aborted++; 457 return; 458} 459 460/* 461 * This serves to avoid cpu buffer overflow, and makes sure 462 * the task mortuary progresses 463 * 464 * By using schedule_delayed_work_on and then schedule_delayed_work 465 * we guarantee this will stay on the correct cpu 466 */ 467static void wq_sync_buffer(struct work_struct *work) 468{ 469 struct oprofile_cpu_buffer *b = 470 container_of(work, struct oprofile_cpu_buffer, work.work); 471 if (b->cpu != smp_processor_id()) { 472 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n", 473 smp_processor_id(), b->cpu); 474 475 if (!cpu_online(b->cpu)) { 476 cancel_delayed_work(&b->work); 477 return; 478 } 479 } 480 sync_buffer(b->cpu); 481 482 /* don't re-add the work if we're shutting down */ 483 if (work_enabled) 484 schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); 485}