1/* 2 * FLoating proportions 3 * 4 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 5 * 6 * This file contains the public data structure and API definitions. 7 */ 8 9#ifndef _LINUX_PROPORTIONS_H 10#define _LINUX_PROPORTIONS_H 11 12#include <linux/percpu_counter.h> 13#include <linux/spinlock.h> 14#include <linux/mutex.h> 15 16struct prop_global { 17 /* 18 * The period over which we differentiate 19 * 20 * period = 2^shift 21 */ 22 int shift; 23 /* 24 * The total event counter aka 'time'. 25 * 26 * Treated as an unsigned long; the lower 'shift - 1' bits are the 27 * counter bits, the remaining upper bits the period counter. 28 */ 29 struct percpu_counter events; 30}; 31 32/* 33 * global proportion descriptor 34 * 35 * this is needed to consitently flip prop_global structures. 36 */ 37struct prop_descriptor { 38 int index; 39 struct prop_global pg[2]; 40 struct mutex mutex; /* serialize the prop_global switch */ 41}; 42 43int prop_descriptor_init(struct prop_descriptor *pd, int shift); 44void prop_change_shift(struct prop_descriptor *pd, int new_shift); 45 46/* 47 * ----- PERCPU ------ 48 */ 49 50struct prop_local_percpu { 51 /* 52 * the local events counter 53 */ 54 struct percpu_counter events; 55 56 /* 57 * snapshot of the last seen global state 58 */ 59 int shift; 60 unsigned long period; 61 spinlock_t lock; /* protect the snapshot state */ 62}; 63 64int prop_local_init_percpu(struct prop_local_percpu *pl); 65void prop_local_destroy_percpu(struct prop_local_percpu *pl); 66void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl); 67void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl, 68 long *numerator, long *denominator); 69 70static inline 71void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl) 72{ 73 unsigned long flags; 74 75 local_irq_save(flags); 76 __prop_inc_percpu(pd, pl); 77 local_irq_restore(flags); 78} 79 80/* 81 * ----- SINGLE ------ 82 */ 83 84struct prop_local_single { 85 /* 86 * the local events counter 87 */ 88 unsigned long events; 89 90 /* 91 * snapshot of the last seen global state 92 * and a lock protecting this state 93 */ 94 int shift; 95 unsigned long period; 96 spinlock_t lock; /* protect the snapshot state */ 97}; 98 99#define INIT_PROP_LOCAL_SINGLE(name) \ 100{ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ 101} 102 103int prop_local_init_single(struct prop_local_single *pl); 104void prop_local_destroy_single(struct prop_local_single *pl); 105void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl); 106void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl, 107 long *numerator, long *denominator); 108 109static inline 110void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl) 111{ 112 unsigned long flags; 113 114 local_irq_save(flags); 115 __prop_inc_single(pd, pl); 116 local_irq_restore(flags); 117} 118 119#endif /* _LINUX_PROPORTIONS_H */