Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

flex_proportions: remove unused fprop_local_single

The single variant of flex_proportions is not used. Simply remove it.

Link: https://lkml.kernel.org/r/20240118201321.759174-1-shikemeng@huaweicloud.com
Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Kemeng Shi and committed by
Andrew Morton
d6bbab8f 0e02ca29

-109
-32
include/linux/flex_proportions.h
··· 39 39 bool fprop_new_period(struct fprop_global *p, int periods); 40 40 41 41 /* 42 - * ---- SINGLE ---- 43 - */ 44 - struct fprop_local_single { 45 - /* the local events counter */ 46 - unsigned long events; 47 - /* Period in which we last updated events */ 48 - unsigned int period; 49 - raw_spinlock_t lock; /* Protect period and numerator */ 50 - }; 51 - 52 - #define INIT_FPROP_LOCAL_SINGLE(name) \ 53 - { .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ 54 - } 55 - 56 - int fprop_local_init_single(struct fprop_local_single *pl); 57 - void fprop_local_destroy_single(struct fprop_local_single *pl); 58 - void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl); 59 - void fprop_fraction_single(struct fprop_global *p, 60 - struct fprop_local_single *pl, unsigned long *numerator, 61 - unsigned long *denominator); 62 - 63 - static inline 64 - void fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl) 65 - { 66 - unsigned long flags; 67 - 68 - local_irq_save(flags); 69 - __fprop_inc_single(p, pl); 70 - local_irq_restore(flags); 71 - } 72 - 73 - /* 74 42 * ---- PERCPU ---- 75 43 */ 76 44 struct fprop_local_percpu {
-77
lib/flex_proportions.c
··· 84 84 } 85 85 86 86 /* 87 - * ---- SINGLE ---- 88 - */ 89 - 90 - int fprop_local_init_single(struct fprop_local_single *pl) 91 - { 92 - pl->events = 0; 93 - pl->period = 0; 94 - raw_spin_lock_init(&pl->lock); 95 - return 0; 96 - } 97 - 98 - void fprop_local_destroy_single(struct fprop_local_single *pl) 99 - { 100 - } 101 - 102 - static void fprop_reflect_period_single(struct fprop_global *p, 103 - struct fprop_local_single *pl) 104 - { 105 - unsigned int period = p->period; 106 - unsigned long flags; 107 - 108 - /* Fast path - period didn't change */ 109 - if (pl->period == period) 110 - return; 111 - raw_spin_lock_irqsave(&pl->lock, flags); 112 - /* Someone updated pl->period while we were spinning? */ 113 - if (pl->period >= period) { 114 - raw_spin_unlock_irqrestore(&pl->lock, flags); 115 - return; 116 - } 117 - /* Aging zeroed our fraction? */ 118 - if (period - pl->period < BITS_PER_LONG) 119 - pl->events >>= period - pl->period; 120 - else 121 - pl->events = 0; 122 - pl->period = period; 123 - raw_spin_unlock_irqrestore(&pl->lock, flags); 124 - } 125 - 126 - /* Event of type pl happened */ 127 - void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl) 128 - { 129 - fprop_reflect_period_single(p, pl); 130 - pl->events++; 131 - percpu_counter_add(&p->events, 1); 132 - } 133 - 134 - /* Return fraction of events of type pl */ 135 - void fprop_fraction_single(struct fprop_global *p, 136 - struct fprop_local_single *pl, 137 - unsigned long *numerator, unsigned long *denominator) 138 - { 139 - unsigned int seq; 140 - s64 num, den; 141 - 142 - do { 143 - seq = read_seqcount_begin(&p->sequence); 144 - fprop_reflect_period_single(p, pl); 145 - num = pl->events; 146 - den = percpu_counter_read_positive(&p->events); 147 - } while (read_seqcount_retry(&p->sequence, seq)); 148 - 149 - /* 150 - * Make fraction <= 1 and denominator > 0 even in presence of percpu 151 - * counter errors 152 - */ 153 - if (den <= num) { 154 - if (num) 155 - den = num; 156 - else 157 - den = 1; 158 - } 159 - *denominator = den; 160 - *numerator = num; 161 - } 162 - 163 - /* 164 87 * ---- PERCPU ---- 165 88 */ 166 89 #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))