Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'ucount-fixes-for-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace

Pull ucount cleanups from Eric Biederman:
"While working on the ucount fixes a for v5.15 a number of cleanups
suggested themselves.

Little things like not testing for NULL when a pointer can not be NULL
and wrapping atomic_add_negative with a more descriptive name, so that
people reading the code can more quickly understand what is going on"

* 'ucount-fixes-for-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace:
ucounts: Use atomic_long_sub_return for clarity
ucounts: Add get_ucounts_or_wrap for clarity
ucounts: Remove unnecessary test for NULL ucount in get_ucounts
ucounts: In set_cred_ucounts assume new->ucounts is non-NULL

+15 -10
+2 -3
kernel/cred.c
··· 676 676 * This optimization is needed because alloc_ucounts() uses locks 677 677 * for table lookups. 678 678 */ 679 - if (old_ucounts && old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid)) 679 + if (old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid)) 680 680 return 0; 681 681 682 682 if (!(new_ucounts = alloc_ucounts(new->user_ns, new->euid))) 683 683 return -EAGAIN; 684 684 685 685 new->ucounts = new_ucounts; 686 - if (old_ucounts) 687 - put_ucounts(old_ucounts); 686 + put_ucounts(old_ucounts); 688 687 689 688 return 0; 690 689 }
+13 -7
kernel/ucount.c
··· 150 150 spin_unlock_irq(&ucounts_lock); 151 151 } 152 152 153 + static inline bool get_ucounts_or_wrap(struct ucounts *ucounts) 154 + { 155 + /* Returns true on a successful get, false if the count wraps. */ 156 + return !atomic_add_negative(1, &ucounts->count); 157 + } 158 + 153 159 struct ucounts *get_ucounts(struct ucounts *ucounts) 154 160 { 155 - if (ucounts && atomic_add_negative(1, &ucounts->count)) { 161 + if (!get_ucounts_or_wrap(ucounts)) { 156 162 put_ucounts(ucounts); 157 163 ucounts = NULL; 158 164 } ··· 169 163 { 170 164 struct hlist_head *hashent = ucounts_hashentry(ns, uid); 171 165 struct ucounts *ucounts, *new; 172 - long overflow; 166 + bool wrapped; 173 167 174 168 spin_lock_irq(&ucounts_lock); 175 169 ucounts = find_ucounts(ns, uid, hashent); ··· 194 188 return new; 195 189 } 196 190 } 197 - overflow = atomic_add_negative(1, &ucounts->count); 191 + wrapped = !get_ucounts_or_wrap(ucounts); 198 192 spin_unlock_irq(&ucounts_lock); 199 - if (overflow) { 193 + if (wrapped) { 200 194 put_ucounts(ucounts); 201 195 return NULL; 202 196 } ··· 282 276 struct ucounts *iter; 283 277 long new = -1; /* Silence compiler warning */ 284 278 for (iter = ucounts; iter; iter = iter->ns->ucounts) { 285 - long dec = atomic_long_add_return(-v, &iter->ucount[type]); 279 + long dec = atomic_long_sub_return(v, &iter->ucount[type]); 286 280 WARN_ON_ONCE(dec < 0); 287 281 if (iter == ucounts) 288 282 new = dec; ··· 295 289 { 296 290 struct ucounts *iter, *next; 297 291 for (iter = ucounts; iter != last; iter = next) { 298 - long dec = atomic_long_add_return(-1, &iter->ucount[type]); 292 + long dec = atomic_long_sub_return(1, &iter->ucount[type]); 299 293 WARN_ON_ONCE(dec < 0); 300 294 next = iter->ns->ucounts; 301 295 if (dec == 0) ··· 332 326 } 333 327 return ret; 334 328 dec_unwind: 335 - dec = atomic_long_add_return(-1, &iter->ucount[type]); 329 + dec = atomic_long_sub_return(1, &iter->ucount[type]); 336 330 WARN_ON_ONCE(dec < 0); 337 331 unwind: 338 332 do_dec_rlimit_put_ucounts(ucounts, iter, type);