Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mm-nonmm-stable-2024-07-21-15-07' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull non-MM updates from Andrew Morton:

- In the series "treewide: Refactor heap related implementation",
Kuan-Wei Chiu has significantly reworked the min_heap library code
and has taught bcachefs to use the new more generic implementation.

- Yury Norov's series "Cleanup cpumask.h inclusion in core headers"
reworks the cpumask and nodemask headers to make things generally
more rational.

- Kuan-Wei Chiu has sent along some maintenance work against our
sorting library code in the series "lib/sort: Optimizations and
cleanups".

- More library maintainance work from Christophe Jaillet in the series
"Remove usage of the deprecated ida_simple_xx() API".

- Ryusuke Konishi continues with the nilfs2 fixes and clanups in the
series "nilfs2: eliminate the call to inode_attach_wb()".

- Kuan-Ying Lee has some fixes to the gdb scripts in the series "Fix
GDB command error".

- Plus the usual shower of singleton patches all over the place. Please
see the relevant changelogs for details.

* tag 'mm-nonmm-stable-2024-07-21-15-07' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (98 commits)
ia64: scrub ia64 from poison.h
watchdog/perf: properly initialize the turbo mode timestamp and rearm counter
tsacct: replace strncpy() with strscpy()
lib/bch.c: use swap() to improve code
test_bpf: convert comma to semicolon
init/modpost: conditionally check section mismatch to __meminit*
init: remove unused __MEMINIT* macros
nilfs2: Constify struct kobj_type
nilfs2: avoid undefined behavior in nilfs_cnt32_ge macro
math: rational: add missing MODULE_DESCRIPTION() macro
lib/zlib: add missing MODULE_DESCRIPTION() macro
fs: ufs: add MODULE_DESCRIPTION()
lib/rbtree.c: fix the example typo
ocfs2: add bounds checking to ocfs2_check_dir_entry()
fs: add kernel-doc comments to ocfs2_prepare_orphan_dir()
coredump: simplify zap_process()
selftests/fpu: add missing MODULE_DESCRIPTION() macro
compiler.h: simplify data_race() macro
build-id: require program headers to be right after ELF header
resource: add missing MODULE_DESCRIPTION()
...

+1417 -843
+2
MAINTAINERS
··· 3801 3801 F: include/linux/bitmap.h 3802 3802 F: include/linux/bits.h 3803 3803 F: include/linux/cpumask.h 3804 + F: include/linux/cpumask_types.h 3804 3805 F: include/linux/find.h 3805 3806 F: include/linux/nodemask.h 3807 + F: include/linux/nodemask_types.h 3806 3808 F: include/vdso/bits.h 3807 3809 F: lib/bitmap-str.c 3808 3810 F: lib/bitmap.c
-2
arch/powerpc/kernel/vmlinux.lds.S
··· 123 123 */ 124 124 *(.sfpr); 125 125 *(.text.asan.* .text.tsan.*) 126 - MEM_KEEP(init.text) 127 - MEM_KEEP(exit.text) 128 126 } :text 129 127 130 128 . = ALIGN(PAGE_SIZE);
+8 -9
drivers/fsi/fsi-occ.c
··· 656 656 rc = of_property_read_u32(dev->of_node, "reg", &reg); 657 657 if (!rc) { 658 658 /* make sure we don't have a duplicate from dts */ 659 - occ->idx = ida_simple_get(&occ_ida, reg, reg + 1, 660 - GFP_KERNEL); 659 + occ->idx = ida_alloc_range(&occ_ida, reg, reg, 660 + GFP_KERNEL); 661 661 if (occ->idx < 0) 662 - occ->idx = ida_simple_get(&occ_ida, 1, INT_MAX, 663 - GFP_KERNEL); 662 + occ->idx = ida_alloc_min(&occ_ida, 1, 663 + GFP_KERNEL); 664 664 } else { 665 - occ->idx = ida_simple_get(&occ_ida, 1, INT_MAX, 666 - GFP_KERNEL); 665 + occ->idx = ida_alloc_min(&occ_ida, 1, GFP_KERNEL); 667 666 } 668 667 } else { 669 - occ->idx = ida_simple_get(&occ_ida, 1, INT_MAX, GFP_KERNEL); 668 + occ->idx = ida_alloc_min(&occ_ida, 1, GFP_KERNEL); 670 669 } 671 670 672 671 platform_set_drvdata(pdev, occ); ··· 679 680 rc = misc_register(&occ->mdev); 680 681 if (rc) { 681 682 dev_err(dev, "failed to register miscdevice: %d\n", rc); 682 - ida_simple_remove(&occ_ida, occ->idx); 683 + ida_free(&occ_ida, occ->idx); 683 684 kvfree(occ->buffer); 684 685 return rc; 685 686 } ··· 718 719 else 719 720 device_for_each_child(&pdev->dev, NULL, occ_unregister_of_child); 720 721 721 - ida_simple_remove(&occ_ida, occ->idx); 722 + ida_free(&occ_ida, occ->idx); 722 723 723 724 return 0; 724 725 }
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
··· 340 340 * Without a 1:1 association between relocation handles and 341 341 * the execobject[] index, we instead create a hashtable. 342 342 * We size it dynamically based on available memory, starting 343 - * first with 1:1 assocative hash and scaling back until 343 + * first with 1:1 associative hash and scaling back until 344 344 * the allocation succeeds. 345 345 * 346 346 * Later on we use a positive lut_size to indicate we are
+47 -17
drivers/md/bcache/alloc.c
··· 164 164 * prio is worth 1/8th of what INITIAL_PRIO is worth. 165 165 */ 166 166 167 - #define bucket_prio(b) \ 168 - ({ \ 169 - unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \ 170 - \ 171 - (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \ 172 - }) 167 + static inline unsigned int new_bucket_prio(struct cache *ca, struct bucket *b) 168 + { 169 + unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; 173 170 174 - #define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r)) 175 - #define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r)) 171 + return (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); 172 + } 173 + 174 + static inline bool new_bucket_max_cmp(const void *l, const void *r, void *args) 175 + { 176 + struct bucket **lhs = (struct bucket **)l; 177 + struct bucket **rhs = (struct bucket **)r; 178 + struct cache *ca = args; 179 + 180 + return new_bucket_prio(ca, *lhs) > new_bucket_prio(ca, *rhs); 181 + } 182 + 183 + static inline bool new_bucket_min_cmp(const void *l, const void *r, void *args) 184 + { 185 + struct bucket **lhs = (struct bucket **)l; 186 + struct bucket **rhs = (struct bucket **)r; 187 + struct cache *ca = args; 188 + 189 + return new_bucket_prio(ca, *lhs) < new_bucket_prio(ca, *rhs); 190 + } 191 + 192 + static inline void new_bucket_swap(void *l, void *r, void __always_unused *args) 193 + { 194 + struct bucket **lhs = l, **rhs = r; 195 + 196 + swap(*lhs, *rhs); 197 + } 176 198 177 199 static void invalidate_buckets_lru(struct cache *ca) 178 200 { 179 201 struct bucket *b; 180 - ssize_t i; 202 + const struct min_heap_callbacks bucket_max_cmp_callback = { 203 + .less = new_bucket_max_cmp, 204 + .swp = new_bucket_swap, 205 + }; 206 + const struct min_heap_callbacks bucket_min_cmp_callback = { 207 + .less = new_bucket_min_cmp, 208 + .swp = new_bucket_swap, 209 + }; 181 210 182 - ca->heap.used = 0; 211 + ca->heap.nr = 0; 183 212 184 213 for_each_bucket(b, ca) { 185 214 if (!bch_can_invalidate_bucket(ca, b)) 186 215 continue; 187 216 188 - if (!heap_full(&ca->heap)) 189 - heap_add(&ca->heap, b, bucket_max_cmp); 190 - else if (bucket_max_cmp(b, heap_peek(&ca->heap))) { 217 + if (!min_heap_full(&ca->heap)) 218 + min_heap_push(&ca->heap, &b, &bucket_max_cmp_callback, ca); 219 + else if (!new_bucket_max_cmp(&b, min_heap_peek(&ca->heap), ca)) { 191 220 ca->heap.data[0] = b; 192 - heap_sift(&ca->heap, 0, bucket_max_cmp); 221 + min_heap_sift_down(&ca->heap, 0, &bucket_max_cmp_callback, ca); 193 222 } 194 223 } 195 224 196 - for (i = ca->heap.used / 2 - 1; i >= 0; --i) 197 - heap_sift(&ca->heap, i, bucket_min_cmp); 225 + min_heapify_all(&ca->heap, &bucket_min_cmp_callback, ca); 198 226 199 227 while (!fifo_full(&ca->free_inc)) { 200 - if (!heap_pop(&ca->heap, b, bucket_min_cmp)) { 228 + if (!ca->heap.nr) { 201 229 /* 202 230 * We don't want to be calling invalidate_buckets() 203 231 * multiple times when it can't do anything ··· 234 206 wake_up_gc(ca->set); 235 207 return; 236 208 } 209 + b = min_heap_peek(&ca->heap)[0]; 210 + min_heap_pop(&ca->heap, &bucket_min_cmp_callback, ca); 237 211 238 212 bch_invalidate_one_bucket(ca, b); 239 213 }
+1 -1
drivers/md/bcache/bcache.h
··· 458 458 /* Allocation stuff: */ 459 459 struct bucket *buckets; 460 460 461 - DECLARE_HEAP(struct bucket *, heap); 461 + DEFINE_MIN_HEAP(struct bucket *, cache_heap) heap; 462 462 463 463 /* 464 464 * If nonzero, we know we aren't going to find any buckets to invalidate
+79 -45
drivers/md/bcache/bset.c
··· 54 54 int __bch_count_data(struct btree_keys *b) 55 55 { 56 56 unsigned int ret = 0; 57 - struct btree_iter_stack iter; 57 + struct btree_iter iter; 58 58 struct bkey *k; 59 + 60 + min_heap_init(&iter.heap, NULL, MAX_BSETS); 59 61 60 62 if (b->ops->is_extents) 61 63 for_each_key(b, k, &iter) ··· 69 67 { 70 68 va_list args; 71 69 struct bkey *k, *p = NULL; 72 - struct btree_iter_stack iter; 70 + struct btree_iter iter; 73 71 const char *err; 72 + 73 + min_heap_init(&iter.heap, NULL, MAX_BSETS); 74 74 75 75 for_each_key(b, k, &iter) { 76 76 if (b->ops->is_extents) { ··· 114 110 115 111 static void bch_btree_iter_next_check(struct btree_iter *iter) 116 112 { 117 - struct bkey *k = iter->data->k, *next = bkey_next(k); 113 + struct bkey *k = iter->heap.data->k, *next = bkey_next(k); 118 114 119 - if (next < iter->data->end && 115 + if (next < iter->heap.data->end && 120 116 bkey_cmp(k, iter->b->ops->is_extents ? 121 117 &START_KEY(next) : next) > 0) { 122 118 bch_dump_bucket(iter->b); ··· 883 879 unsigned int status = BTREE_INSERT_STATUS_NO_INSERT; 884 880 struct bset *i = bset_tree_last(b)->data; 885 881 struct bkey *m, *prev = NULL; 886 - struct btree_iter_stack iter; 882 + struct btree_iter iter; 887 883 struct bkey preceding_key_on_stack = ZERO_KEY; 888 884 struct bkey *preceding_key_p = &preceding_key_on_stack; 889 885 890 886 BUG_ON(b->ops->is_extents && !KEY_SIZE(k)); 887 + 888 + min_heap_init(&iter.heap, NULL, MAX_BSETS); 891 889 892 890 /* 893 891 * If k has preceding key, preceding_key_p will be set to address ··· 901 895 else 902 896 preceding_key(k, &preceding_key_p); 903 897 904 - m = bch_btree_iter_stack_init(b, &iter, preceding_key_p); 898 + m = bch_btree_iter_init(b, &iter, preceding_key_p); 905 899 906 - if (b->ops->insert_fixup(b, k, &iter.iter, replace_key)) 900 + if (b->ops->insert_fixup(b, k, &iter, replace_key)) 907 901 return status; 908 902 909 903 status = BTREE_INSERT_STATUS_INSERT; ··· 1083 1077 1084 1078 /* Btree iterator */ 1085 1079 1086 - typedef bool (btree_iter_cmp_fn)(struct btree_iter_set, 1087 - struct btree_iter_set); 1080 + typedef bool (new_btree_iter_cmp_fn)(const void *, const void *, void *); 1088 1081 1089 - static inline bool btree_iter_cmp(struct btree_iter_set l, 1090 - struct btree_iter_set r) 1082 + static inline bool new_btree_iter_cmp(const void *l, const void *r, void __always_unused *args) 1091 1083 { 1092 - return bkey_cmp(l.k, r.k) > 0; 1084 + const struct btree_iter_set *_l = l; 1085 + const struct btree_iter_set *_r = r; 1086 + 1087 + return bkey_cmp(_l->k, _r->k) <= 0; 1088 + } 1089 + 1090 + static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args) 1091 + { 1092 + struct btree_iter_set *_iter1 = iter1; 1093 + struct btree_iter_set *_iter2 = iter2; 1094 + 1095 + swap(*_iter1, *_iter2); 1093 1096 } 1094 1097 1095 1098 static inline bool btree_iter_end(struct btree_iter *iter) 1096 1099 { 1097 - return !iter->used; 1100 + return !iter->heap.nr; 1098 1101 } 1099 1102 1100 1103 void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, 1101 1104 struct bkey *end) 1102 1105 { 1106 + const struct min_heap_callbacks callbacks = { 1107 + .less = new_btree_iter_cmp, 1108 + .swp = new_btree_iter_swap, 1109 + }; 1110 + 1103 1111 if (k != end) 1104 - BUG_ON(!heap_add(iter, 1105 - ((struct btree_iter_set) { k, end }), 1106 - btree_iter_cmp)); 1112 + BUG_ON(!min_heap_push(&iter->heap, 1113 + &((struct btree_iter_set) { k, end }), 1114 + &callbacks, 1115 + NULL)); 1107 1116 } 1108 1117 1109 - static struct bkey *__bch_btree_iter_stack_init(struct btree_keys *b, 1110 - struct btree_iter_stack *iter, 1111 - struct bkey *search, 1112 - struct bset_tree *start) 1118 + static struct bkey *__bch_btree_iter_init(struct btree_keys *b, 1119 + struct btree_iter *iter, 1120 + struct bkey *search, 1121 + struct bset_tree *start) 1113 1122 { 1114 1123 struct bkey *ret = NULL; 1115 1124 1116 - iter->iter.size = ARRAY_SIZE(iter->stack_data); 1117 - iter->iter.used = 0; 1125 + iter->heap.size = ARRAY_SIZE(iter->heap.preallocated); 1126 + iter->heap.nr = 0; 1118 1127 1119 1128 #ifdef CONFIG_BCACHE_DEBUG 1120 - iter->iter.b = b; 1129 + iter->b = b; 1121 1130 #endif 1122 1131 1123 1132 for (; start <= bset_tree_last(b); start++) { 1124 1133 ret = bch_bset_search(b, start, search); 1125 - bch_btree_iter_push(&iter->iter, ret, bset_bkey_last(start->data)); 1134 + bch_btree_iter_push(iter, ret, bset_bkey_last(start->data)); 1126 1135 } 1127 1136 1128 1137 return ret; 1129 1138 } 1130 1139 1131 - struct bkey *bch_btree_iter_stack_init(struct btree_keys *b, 1132 - struct btree_iter_stack *iter, 1140 + struct bkey *bch_btree_iter_init(struct btree_keys *b, 1141 + struct btree_iter *iter, 1133 1142 struct bkey *search) 1134 1143 { 1135 - return __bch_btree_iter_stack_init(b, iter, search, b->set); 1144 + return __bch_btree_iter_init(b, iter, search, b->set); 1136 1145 } 1137 1146 1138 1147 static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter, 1139 - btree_iter_cmp_fn *cmp) 1148 + new_btree_iter_cmp_fn *cmp) 1140 1149 { 1141 1150 struct btree_iter_set b __maybe_unused; 1142 1151 struct bkey *ret = NULL; 1152 + const struct min_heap_callbacks callbacks = { 1153 + .less = cmp, 1154 + .swp = new_btree_iter_swap, 1155 + }; 1143 1156 1144 1157 if (!btree_iter_end(iter)) { 1145 1158 bch_btree_iter_next_check(iter); 1146 1159 1147 - ret = iter->data->k; 1148 - iter->data->k = bkey_next(iter->data->k); 1160 + ret = iter->heap.data->k; 1161 + iter->heap.data->k = bkey_next(iter->heap.data->k); 1149 1162 1150 - if (iter->data->k > iter->data->end) { 1163 + if (iter->heap.data->k > iter->heap.data->end) { 1151 1164 WARN_ONCE(1, "bset was corrupt!\n"); 1152 - iter->data->k = iter->data->end; 1165 + iter->heap.data->k = iter->heap.data->end; 1153 1166 } 1154 1167 1155 - if (iter->data->k == iter->data->end) 1156 - heap_pop(iter, b, cmp); 1168 + if (iter->heap.data->k == iter->heap.data->end) { 1169 + if (iter->heap.nr) { 1170 + b = min_heap_peek(&iter->heap)[0]; 1171 + min_heap_pop(&iter->heap, &callbacks, NULL); 1172 + } 1173 + } 1157 1174 else 1158 - heap_sift(iter, 0, cmp); 1175 + min_heap_sift_down(&iter->heap, 0, &callbacks, NULL); 1159 1176 } 1160 1177 1161 1178 return ret; ··· 1186 1157 1187 1158 struct bkey *bch_btree_iter_next(struct btree_iter *iter) 1188 1159 { 1189 - return __bch_btree_iter_next(iter, btree_iter_cmp); 1160 + return __bch_btree_iter_next(iter, new_btree_iter_cmp); 1190 1161 1191 1162 } 1192 1163 ··· 1224 1195 struct btree_iter *iter, 1225 1196 bool fixup, bool remove_stale) 1226 1197 { 1227 - int i; 1228 1198 struct bkey *k, *last = NULL; 1229 1199 BKEY_PADDED(k) tmp; 1230 1200 bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale 1231 1201 ? bch_ptr_bad 1232 1202 : bch_ptr_invalid; 1203 + const struct min_heap_callbacks callbacks = { 1204 + .less = b->ops->sort_cmp, 1205 + .swp = new_btree_iter_swap, 1206 + }; 1233 1207 1234 1208 /* Heapify the iterator, using our comparison function */ 1235 - for (i = iter->used / 2 - 1; i >= 0; --i) 1236 - heap_sift(iter, i, b->ops->sort_cmp); 1209 + min_heapify_all(&iter->heap, &callbacks, NULL); 1237 1210 1238 1211 while (!btree_iter_end(iter)) { 1239 1212 if (b->ops->sort_fixup && fixup) ··· 1324 1293 struct bset_sort_state *state) 1325 1294 { 1326 1295 size_t order = b->page_order, keys = 0; 1327 - struct btree_iter_stack iter; 1296 + struct btree_iter iter; 1328 1297 int oldsize = bch_count_data(b); 1329 1298 1330 - __bch_btree_iter_stack_init(b, &iter, NULL, &b->set[start]); 1299 + min_heap_init(&iter.heap, NULL, MAX_BSETS); 1300 + __bch_btree_iter_init(b, &iter, NULL, &b->set[start]); 1331 1301 1332 1302 if (start) { 1333 1303 unsigned int i; ··· 1339 1307 order = get_order(__set_bytes(b->set->data, keys)); 1340 1308 } 1341 1309 1342 - __btree_sort(b, &iter.iter, start, order, false, state); 1310 + __btree_sort(b, &iter, start, order, false, state); 1343 1311 1344 1312 EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize); 1345 1313 } ··· 1355 1323 struct bset_sort_state *state) 1356 1324 { 1357 1325 uint64_t start_time = local_clock(); 1358 - struct btree_iter_stack iter; 1326 + struct btree_iter iter; 1359 1327 1360 - bch_btree_iter_stack_init(b, &iter, NULL); 1328 + min_heap_init(&iter.heap, NULL, MAX_BSETS); 1361 1329 1362 - btree_mergesort(b, new->set->data, &iter.iter, false, true); 1330 + bch_btree_iter_init(b, &iter, NULL); 1331 + 1332 + btree_mergesort(b, new->set->data, &iter, false, true); 1363 1333 1364 1334 bch_time_stats_update(&state->time, start_time); 1365 1335
+17 -23
drivers/md/bcache/bset.h
··· 187 187 }; 188 188 189 189 struct btree_keys_ops { 190 - bool (*sort_cmp)(struct btree_iter_set l, 191 - struct btree_iter_set r); 190 + bool (*sort_cmp)(const void *l, 191 + const void *r, 192 + void *args); 192 193 struct bkey *(*sort_fixup)(struct btree_iter *iter, 193 194 struct bkey *tmp); 194 195 bool (*insert_fixup)(struct btree_keys *b, ··· 313 312 BTREE_INSERT_STATUS_FRONT_MERGE, 314 313 }; 315 314 315 + struct btree_iter_set { 316 + struct bkey *k, *end; 317 + }; 318 + 316 319 /* Btree key iteration */ 317 320 318 321 struct btree_iter { 319 - size_t size, used; 320 322 #ifdef CONFIG_BCACHE_DEBUG 321 323 struct btree_keys *b; 322 324 #endif 323 - struct btree_iter_set { 324 - struct bkey *k, *end; 325 - } data[]; 326 - }; 327 - 328 - /* Fixed-size btree_iter that can be allocated on the stack */ 329 - 330 - struct btree_iter_stack { 331 - struct btree_iter iter; 332 - struct btree_iter_set stack_data[MAX_BSETS]; 325 + MIN_HEAP_PREALLOCATED(struct btree_iter_set, btree_iter_heap, MAX_BSETS) heap; 333 326 }; 334 327 335 328 typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k); ··· 335 340 336 341 void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, 337 342 struct bkey *end); 338 - struct bkey *bch_btree_iter_stack_init(struct btree_keys *b, 339 - struct btree_iter_stack *iter, 340 - struct bkey *search); 343 + struct bkey *bch_btree_iter_init(struct btree_keys *b, 344 + struct btree_iter *iter, 345 + struct bkey *search); 341 346 342 347 struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t, 343 348 const struct bkey *search); ··· 352 357 return search ? __bch_bset_search(b, t, search) : t->data->start; 353 358 } 354 359 355 - #define for_each_key_filter(b, k, stack_iter, filter) \ 356 - for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \ 357 - ((k) = bch_btree_iter_next_filter(&((stack_iter)->iter), (b), \ 358 - filter));) 360 + #define for_each_key_filter(b, k, iter, filter) \ 361 + for (bch_btree_iter_init((b), (iter), NULL); \ 362 + ((k) = bch_btree_iter_next_filter((iter), (b), filter));) 359 363 360 - #define for_each_key(b, k, stack_iter) \ 361 - for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \ 362 - ((k) = bch_btree_iter_next(&((stack_iter)->iter)));) 364 + #define for_each_key(b, k, iter) \ 365 + for (bch_btree_iter_init((b), (iter), NULL); \ 366 + ((k) = bch_btree_iter_next(iter));) 363 367 364 368 /* Sorting */ 365 369
+40 -29
drivers/md/bcache/btree.c
··· 149 149 { 150 150 const char *err = "bad btree header"; 151 151 struct bset *i = btree_bset_first(b); 152 - struct btree_iter *iter; 152 + struct btree_iter iter; 153 153 154 154 /* 155 155 * c->fill_iter can allocate an iterator with more memory space 156 156 * than static MAX_BSETS. 157 157 * See the comment arount cache_set->fill_iter. 158 158 */ 159 - iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO); 160 - iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size; 161 - iter->used = 0; 159 + iter.heap.data = mempool_alloc(&b->c->fill_iter, GFP_NOIO); 160 + iter.heap.size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size; 161 + iter.heap.nr = 0; 162 162 163 163 #ifdef CONFIG_BCACHE_DEBUG 164 - iter->b = &b->keys; 164 + iter.b = &b->keys; 165 165 #endif 166 166 167 167 if (!i->seq) ··· 199 199 if (i != b->keys.set[0].data && !i->keys) 200 200 goto err; 201 201 202 - bch_btree_iter_push(iter, i->start, bset_bkey_last(i)); 202 + bch_btree_iter_push(&iter, i->start, bset_bkey_last(i)); 203 203 204 204 b->written += set_blocks(i, block_bytes(b->c->cache)); 205 205 } ··· 211 211 if (i->seq == b->keys.set[0].data->seq) 212 212 goto err; 213 213 214 - bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); 214 + bch_btree_sort_and_fix_extents(&b->keys, &iter, &b->c->sort); 215 215 216 216 i = b->keys.set[0].data; 217 217 err = "short btree key"; ··· 223 223 bch_bset_init_next(&b->keys, write_block(b), 224 224 bset_magic(&b->c->cache->sb)); 225 225 out: 226 - mempool_free(iter, &b->c->fill_iter); 226 + mempool_free(iter.heap.data, &b->c->fill_iter); 227 227 return; 228 228 err: 229 229 set_btree_node_io_error(b); ··· 1309 1309 uint8_t stale = 0; 1310 1310 unsigned int keys = 0, good_keys = 0; 1311 1311 struct bkey *k; 1312 - struct btree_iter_stack iter; 1312 + struct btree_iter iter; 1313 1313 struct bset_tree *t; 1314 + 1315 + min_heap_init(&iter.heap, NULL, MAX_BSETS); 1314 1316 1315 1317 gc->nodes++; 1316 1318 ··· 1572 1570 static unsigned int btree_gc_count_keys(struct btree *b) 1573 1571 { 1574 1572 struct bkey *k; 1575 - struct btree_iter_stack iter; 1573 + struct btree_iter iter; 1576 1574 unsigned int ret = 0; 1575 + 1576 + min_heap_init(&iter.heap, NULL, MAX_BSETS); 1577 1577 1578 1578 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) 1579 1579 ret += bkey_u64s(k); ··· 1615 1611 int ret = 0; 1616 1612 bool should_rewrite; 1617 1613 struct bkey *k; 1618 - struct btree_iter_stack iter; 1614 + struct btree_iter iter; 1619 1615 struct gc_merge_info r[GC_MERGE_NODES]; 1620 1616 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1; 1621 1617 1622 - bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done); 1618 + min_heap_init(&iter.heap, NULL, MAX_BSETS); 1619 + bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); 1623 1620 1624 1621 for (i = r; i < r + ARRAY_SIZE(r); i++) 1625 1622 i->b = ERR_PTR(-EINTR); 1626 1623 1627 1624 while (1) { 1628 - k = bch_btree_iter_next_filter(&iter.iter, &b->keys, 1629 - bch_ptr_bad); 1625 + k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); 1630 1626 if (k) { 1631 1627 r->b = bch_btree_node_get(b->c, op, k, b->level - 1, 1632 1628 true, b); ··· 1921 1917 { 1922 1918 int ret = 0; 1923 1919 struct bkey *k, *p = NULL; 1924 - struct btree_iter_stack iter; 1920 + struct btree_iter iter; 1921 + 1922 + min_heap_init(&iter.heap, NULL, MAX_BSETS); 1925 1923 1926 1924 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) 1927 1925 bch_initial_mark_key(b->c, b->level, k); ··· 1931 1925 bch_initial_mark_key(b->c, b->level + 1, &b->key); 1932 1926 1933 1927 if (b->level) { 1934 - bch_btree_iter_stack_init(&b->keys, &iter, NULL); 1928 + bch_btree_iter_init(&b->keys, &iter, NULL); 1935 1929 1936 1930 do { 1937 - k = bch_btree_iter_next_filter(&iter.iter, &b->keys, 1931 + k = bch_btree_iter_next_filter(&iter, &b->keys, 1938 1932 bch_ptr_bad); 1939 1933 if (k) { 1940 1934 btree_node_prefetch(b, k); ··· 1962 1956 struct btree_check_info *info = arg; 1963 1957 struct btree_check_state *check_state = info->state; 1964 1958 struct cache_set *c = check_state->c; 1965 - struct btree_iter_stack iter; 1959 + struct btree_iter iter; 1966 1960 struct bkey *k, *p; 1967 1961 int cur_idx, prev_idx, skip_nr; 1968 1962 ··· 1970 1964 cur_idx = prev_idx = 0; 1971 1965 ret = 0; 1972 1966 1967 + min_heap_init(&iter.heap, NULL, MAX_BSETS); 1968 + 1973 1969 /* root node keys are checked before thread created */ 1974 - bch_btree_iter_stack_init(&c->root->keys, &iter, NULL); 1975 - k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad); 1970 + bch_btree_iter_init(&c->root->keys, &iter, NULL); 1971 + k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); 1976 1972 BUG_ON(!k); 1977 1973 1978 1974 p = k; ··· 1992 1984 skip_nr = cur_idx - prev_idx; 1993 1985 1994 1986 while (skip_nr) { 1995 - k = bch_btree_iter_next_filter(&iter.iter, 1987 + k = bch_btree_iter_next_filter(&iter, 1996 1988 &c->root->keys, 1997 1989 bch_ptr_bad); 1998 1990 if (k) ··· 2065 2057 int ret = 0; 2066 2058 int i; 2067 2059 struct bkey *k = NULL; 2068 - struct btree_iter_stack iter; 2060 + struct btree_iter iter; 2069 2061 struct btree_check_state check_state; 2062 + 2063 + min_heap_init(&iter.heap, NULL, MAX_BSETS); 2070 2064 2071 2065 /* check and mark root node keys */ 2072 2066 for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid) ··· 2563 2553 2564 2554 if (b->level) { 2565 2555 struct bkey *k; 2566 - struct btree_iter_stack iter; 2556 + struct btree_iter iter; 2567 2557 2568 - bch_btree_iter_stack_init(&b->keys, &iter, from); 2558 + min_heap_init(&iter.heap, NULL, MAX_BSETS); 2559 + bch_btree_iter_init(&b->keys, &iter, from); 2569 2560 2570 - while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys, 2561 + while ((k = bch_btree_iter_next_filter(&iter, &b->keys, 2571 2562 bch_ptr_bad))) { 2572 2563 ret = bcache_btree(map_nodes_recurse, k, b, 2573 2564 op, from, fn, flags); ··· 2597 2586 { 2598 2587 int ret = MAP_CONTINUE; 2599 2588 struct bkey *k; 2600 - struct btree_iter_stack iter; 2589 + struct btree_iter iter; 2601 2590 2602 - bch_btree_iter_stack_init(&b->keys, &iter, from); 2591 + min_heap_init(&iter.heap, NULL, MAX_BSETS); 2592 + bch_btree_iter_init(&b->keys, &iter, from); 2603 2593 2604 - while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys, 2605 - bch_ptr_bad))) { 2594 + while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { 2606 2595 ret = !b->level 2607 2596 ? fn(op, b, k) 2608 2597 : bcache_btree(map_keys_recurse, k,
+34 -19
drivers/md/bcache/extents.c
··· 33 33 i->k = bkey_next(i->k); 34 34 35 35 if (i->k == i->end) 36 - *i = iter->data[--iter->used]; 36 + *i = iter->heap.data[--iter->heap.nr]; 37 37 } 38 38 39 - static bool bch_key_sort_cmp(struct btree_iter_set l, 40 - struct btree_iter_set r) 39 + static bool new_bch_key_sort_cmp(const void *l, const void *r, void *args) 41 40 { 42 - int64_t c = bkey_cmp(l.k, r.k); 41 + struct btree_iter_set *_l = (struct btree_iter_set *)l; 42 + struct btree_iter_set *_r = (struct btree_iter_set *)r; 43 + int64_t c = bkey_cmp(_l->k, _r->k); 43 44 44 - return c ? c > 0 : l.k < r.k; 45 + return !(c ? c > 0 : _l->k < _r->k); 45 46 } 46 47 47 48 static bool __ptr_invalid(struct cache_set *c, const struct bkey *k) ··· 239 238 } 240 239 241 240 const struct btree_keys_ops bch_btree_keys_ops = { 242 - .sort_cmp = bch_key_sort_cmp, 241 + .sort_cmp = new_bch_key_sort_cmp, 243 242 .insert_fixup = bch_btree_ptr_insert_fixup, 244 243 .key_invalid = bch_btree_ptr_invalid, 245 244 .key_bad = bch_btree_ptr_bad, ··· 256 255 * Necessary for btree_sort_fixup() - if there are multiple keys that compare 257 256 * equal in different sets, we have to process them newest to oldest. 258 257 */ 259 - static bool bch_extent_sort_cmp(struct btree_iter_set l, 260 - struct btree_iter_set r) 261 - { 262 - int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k)); 263 258 264 - return c ? c > 0 : l.k < r.k; 259 + static bool new_bch_extent_sort_cmp(const void *l, const void *r, void __always_unused *args) 260 + { 261 + struct btree_iter_set *_l = (struct btree_iter_set *)l; 262 + struct btree_iter_set *_r = (struct btree_iter_set *)r; 263 + int64_t c = bkey_cmp(&START_KEY(_l->k), &START_KEY(_r->k)); 264 + 265 + return !(c ? c > 0 : _l->k < _r->k); 266 + } 267 + 268 + static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args) 269 + { 270 + struct btree_iter_set *_iter1 = iter1; 271 + struct btree_iter_set *_iter2 = iter2; 272 + 273 + swap(*_iter1, *_iter2); 265 274 } 266 275 267 276 static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter, 268 277 struct bkey *tmp) 269 278 { 270 - while (iter->used > 1) { 271 - struct btree_iter_set *top = iter->data, *i = top + 1; 279 + const struct min_heap_callbacks callbacks = { 280 + .less = new_bch_extent_sort_cmp, 281 + .swp = new_btree_iter_swap, 282 + }; 283 + while (iter->heap.nr > 1) { 284 + struct btree_iter_set *top = iter->heap.data, *i = top + 1; 272 285 273 - if (iter->used > 2 && 274 - bch_extent_sort_cmp(i[0], i[1])) 286 + if (iter->heap.nr > 2 && 287 + !new_bch_extent_sort_cmp(&i[0], &i[1], NULL)) 275 288 i++; 276 289 277 290 if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0) ··· 293 278 294 279 if (!KEY_SIZE(i->k)) { 295 280 sort_key_next(iter, i); 296 - heap_sift(iter, i - top, bch_extent_sort_cmp); 281 + min_heap_sift_down(&iter->heap, i - top, &callbacks, NULL); 297 282 continue; 298 283 } 299 284 ··· 303 288 else 304 289 bch_cut_front(top->k, i->k); 305 290 306 - heap_sift(iter, i - top, bch_extent_sort_cmp); 291 + min_heap_sift_down(&iter->heap, i - top, &callbacks, NULL); 307 292 } else { 308 293 /* can't happen because of comparison func */ 309 294 BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k))); ··· 313 298 314 299 bch_cut_back(&START_KEY(i->k), tmp); 315 300 bch_cut_front(i->k, top->k); 316 - heap_sift(iter, 0, bch_extent_sort_cmp); 301 + min_heap_sift_down(&iter->heap, 0, &callbacks, NULL); 317 302 318 303 return tmp; 319 304 } else { ··· 633 618 } 634 619 635 620 const struct btree_keys_ops bch_extent_keys_ops = { 636 - .sort_cmp = bch_extent_sort_cmp, 621 + .sort_cmp = new_bch_extent_sort_cmp, 637 622 .sort_fixup = bch_extent_sort_fixup, 638 623 .insert_fixup = bch_extent_insert_fixup, 639 624 .key_invalid = bch_extent_invalid,
+31 -10
drivers/md/bcache/movinggc.c
··· 182 182 closure_sync(&cl); 183 183 } 184 184 185 - static bool bucket_cmp(struct bucket *l, struct bucket *r) 185 + static bool new_bucket_cmp(const void *l, const void *r, void __always_unused *args) 186 186 { 187 - return GC_SECTORS_USED(l) < GC_SECTORS_USED(r); 187 + struct bucket **_l = (struct bucket **)l; 188 + struct bucket **_r = (struct bucket **)r; 189 + 190 + return GC_SECTORS_USED(*_l) >= GC_SECTORS_USED(*_r); 191 + } 192 + 193 + static void new_bucket_swap(void *l, void *r, void __always_unused *args) 194 + { 195 + struct bucket **_l = l; 196 + struct bucket **_r = r; 197 + 198 + swap(*_l, *_r); 188 199 } 189 200 190 201 static unsigned int bucket_heap_top(struct cache *ca) 191 202 { 192 203 struct bucket *b; 193 204 194 - return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0; 205 + return (b = min_heap_peek(&ca->heap)[0]) ? GC_SECTORS_USED(b) : 0; 195 206 } 196 207 197 208 void bch_moving_gc(struct cache_set *c) ··· 210 199 struct cache *ca = c->cache; 211 200 struct bucket *b; 212 201 unsigned long sectors_to_move, reserve_sectors; 202 + const struct min_heap_callbacks callbacks = { 203 + .less = new_bucket_cmp, 204 + .swp = new_bucket_swap, 205 + }; 213 206 214 207 if (!c->copy_gc_enabled) 215 208 return; ··· 224 209 reserve_sectors = ca->sb.bucket_size * 225 210 fifo_used(&ca->free[RESERVE_MOVINGGC]); 226 211 227 - ca->heap.used = 0; 212 + ca->heap.nr = 0; 228 213 229 214 for_each_bucket(b, ca) { 230 215 if (GC_MARK(b) == GC_MARK_METADATA || ··· 233 218 atomic_read(&b->pin)) 234 219 continue; 235 220 236 - if (!heap_full(&ca->heap)) { 221 + if (!min_heap_full(&ca->heap)) { 237 222 sectors_to_move += GC_SECTORS_USED(b); 238 - heap_add(&ca->heap, b, bucket_cmp); 239 - } else if (bucket_cmp(b, heap_peek(&ca->heap))) { 223 + min_heap_push(&ca->heap, &b, &callbacks, NULL); 224 + } else if (!new_bucket_cmp(&b, min_heap_peek(&ca->heap), ca)) { 240 225 sectors_to_move -= bucket_heap_top(ca); 241 226 sectors_to_move += GC_SECTORS_USED(b); 242 227 243 228 ca->heap.data[0] = b; 244 - heap_sift(&ca->heap, 0, bucket_cmp); 229 + min_heap_sift_down(&ca->heap, 0, &callbacks, NULL); 245 230 } 246 231 } 247 232 248 233 while (sectors_to_move > reserve_sectors) { 249 - heap_pop(&ca->heap, b, bucket_cmp); 234 + if (ca->heap.nr) { 235 + b = min_heap_peek(&ca->heap)[0]; 236 + min_heap_pop(&ca->heap, &callbacks, NULL); 237 + } 250 238 sectors_to_move -= GC_SECTORS_USED(b); 251 239 } 252 240 253 - while (heap_pop(&ca->heap, b, bucket_cmp)) 241 + while (ca->heap.nr) { 242 + b = min_heap_peek(&ca->heap)[0]; 243 + min_heap_pop(&ca->heap, &callbacks, NULL); 254 244 SET_GC_MOVE(b, 1); 245 + } 255 246 256 247 mutex_unlock(&c->bucket_lock); 257 248
+1 -2
drivers/md/bcache/super.c
··· 1907 1907 INIT_LIST_HEAD(&c->btree_cache_freed); 1908 1908 INIT_LIST_HEAD(&c->data_buckets); 1909 1909 1910 - iter_size = sizeof(struct btree_iter) + 1911 - ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) * 1910 + iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) * 1912 1911 sizeof(struct btree_iter_set); 1913 1912 1914 1913 c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL);
+3 -1
drivers/md/bcache/sysfs.c
··· 660 660 unsigned int bytes = 0; 661 661 struct bkey *k; 662 662 struct btree *b; 663 - struct btree_iter_stack iter; 663 + struct btree_iter iter; 664 + 665 + min_heap_init(&iter.heap, NULL, MAX_BSETS); 664 666 665 667 goto lock_root; 666 668
+1 -1
drivers/md/bcache/util.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 - * random utiility code, for bcache but in theory not specific to bcache 3 + * random utility code, for bcache but in theory not specific to bcache 4 4 * 5 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 6 * Copyright 2012 Google, Inc.
+2 -65
drivers/md/bcache/util.h
··· 9 9 #include <linux/kernel.h> 10 10 #include <linux/sched/clock.h> 11 11 #include <linux/llist.h> 12 + #include <linux/min_heap.h> 12 13 #include <linux/ratelimit.h> 13 14 #include <linux/vmalloc.h> 14 15 #include <linux/workqueue.h> ··· 31 30 32 31 #endif 33 32 34 - #define DECLARE_HEAP(type, name) \ 35 - struct { \ 36 - size_t size, used; \ 37 - type *data; \ 38 - } name 39 - 40 33 #define init_heap(heap, _size, gfp) \ 41 34 ({ \ 42 35 size_t _bytes; \ 43 - (heap)->used = 0; \ 36 + (heap)->nr = 0; \ 44 37 (heap)->size = (_size); \ 45 38 _bytes = (heap)->size * sizeof(*(heap)->data); \ 46 39 (heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \ ··· 46 51 kvfree((heap)->data); \ 47 52 (heap)->data = NULL; \ 48 53 } while (0) 49 - 50 - #define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j]) 51 - 52 - #define heap_sift(h, i, cmp) \ 53 - do { \ 54 - size_t _r, _j = i; \ 55 - \ 56 - for (; _j * 2 + 1 < (h)->used; _j = _r) { \ 57 - _r = _j * 2 + 1; \ 58 - if (_r + 1 < (h)->used && \ 59 - cmp((h)->data[_r], (h)->data[_r + 1])) \ 60 - _r++; \ 61 - \ 62 - if (cmp((h)->data[_r], (h)->data[_j])) \ 63 - break; \ 64 - heap_swap(h, _r, _j); \ 65 - } \ 66 - } while (0) 67 - 68 - #define heap_sift_down(h, i, cmp) \ 69 - do { \ 70 - while (i) { \ 71 - size_t p = (i - 1) / 2; \ 72 - if (cmp((h)->data[i], (h)->data[p])) \ 73 - break; \ 74 - heap_swap(h, i, p); \ 75 - i = p; \ 76 - } \ 77 - } while (0) 78 - 79 - #define heap_add(h, d, cmp) \ 80 - ({ \ 81 - bool _r = !heap_full(h); \ 82 - if (_r) { \ 83 - size_t _i = (h)->used++; \ 84 - (h)->data[_i] = d; \ 85 - \ 86 - heap_sift_down(h, _i, cmp); \ 87 - heap_sift(h, _i, cmp); \ 88 - } \ 89 - _r; \ 90 - }) 91 - 92 - #define heap_pop(h, d, cmp) \ 93 - ({ \ 94 - bool _r = (h)->used; \ 95 - if (_r) { \ 96 - (d) = (h)->data[0]; \ 97 - (h)->used--; \ 98 - heap_swap(h, 0, (h)->used); \ 99 - heap_sift(h, 0, cmp); \ 100 - } \ 101 - _r; \ 102 - }) 103 - 104 - #define heap_peek(h) ((h)->used ? (h)->data[0] : NULL) 105 - 106 - #define heap_full(h) ((h)->used == (h)->size) 107 54 108 55 #define DECLARE_FIFO(type, name) \ 109 56 struct { \
+8 -5
drivers/md/bcache/writeback.c
··· 908 908 struct dirty_init_thrd_info *info = arg; 909 909 struct bch_dirty_init_state *state = info->state; 910 910 struct cache_set *c = state->c; 911 - struct btree_iter_stack iter; 911 + struct btree_iter iter; 912 912 struct bkey *k, *p; 913 913 int cur_idx, prev_idx, skip_nr; 914 914 915 915 k = p = NULL; 916 916 prev_idx = 0; 917 917 918 - bch_btree_iter_stack_init(&c->root->keys, &iter, NULL); 919 - k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad); 918 + min_heap_init(&iter.heap, NULL, MAX_BSETS); 919 + bch_btree_iter_init(&c->root->keys, &iter, NULL); 920 + k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); 920 921 BUG_ON(!k); 921 922 922 923 p = k; ··· 931 930 skip_nr = cur_idx - prev_idx; 932 931 933 932 while (skip_nr) { 934 - k = bch_btree_iter_next_filter(&iter.iter, 933 + k = bch_btree_iter_next_filter(&iter, 935 934 &c->root->keys, 936 935 bch_ptr_bad); 937 936 if (k) ··· 980 979 int i; 981 980 struct btree *b = NULL; 982 981 struct bkey *k = NULL; 983 - struct btree_iter_stack iter; 982 + struct btree_iter iter; 984 983 struct sectors_dirty_init op; 985 984 struct cache_set *c = d->c; 986 985 struct bch_dirty_init_state state; 986 + 987 + min_heap_init(&iter.heap, NULL, MAX_BSETS); 987 988 988 989 retry_lock: 989 990 b = c->root;
+10 -9
drivers/md/dm-vdo/repair.c
··· 51 51 bool increment_applied; 52 52 }; 53 53 54 + DEFINE_MIN_HEAP(struct numbered_block_mapping, replay_heap); 55 + 54 56 struct repair_completion { 55 57 /* The completion header */ 56 58 struct vdo_completion completion; ··· 99 97 * order, then original journal order. This permits efficient iteration over the journal 100 98 * entries in order. 101 99 */ 102 - struct min_heap replay_heap; 100 + struct replay_heap replay_heap; 103 101 /* Fields tracking progress through the journal entries. */ 104 102 struct numbered_block_mapping *current_entry; 105 103 struct numbered_block_mapping *current_unfetched_entry; ··· 137 135 * to sort by slot while still ensuring we replay all entries with the same slot in the exact order 138 136 * as they appeared in the journal. 139 137 */ 140 - static bool mapping_is_less_than(const void *item1, const void *item2) 138 + static bool mapping_is_less_than(const void *item1, const void *item2, void __always_unused *args) 141 139 { 142 140 const struct numbered_block_mapping *mapping1 = 143 141 (const struct numbered_block_mapping *) item1; ··· 156 154 return 0; 157 155 } 158 156 159 - static void swap_mappings(void *item1, void *item2) 157 + static void swap_mappings(void *item1, void *item2, void __always_unused *args) 160 158 { 161 159 struct numbered_block_mapping *mapping1 = item1; 162 160 struct numbered_block_mapping *mapping2 = item2; ··· 165 163 } 166 164 167 165 static const struct min_heap_callbacks repair_min_heap = { 168 - .elem_size = sizeof(struct numbered_block_mapping), 169 166 .less = mapping_is_less_than, 170 167 .swp = swap_mappings, 171 168 }; 172 169 173 170 static struct numbered_block_mapping *sort_next_heap_element(struct repair_completion *repair) 174 171 { 175 - struct min_heap *heap = &repair->replay_heap; 172 + struct replay_heap *heap = &repair->replay_heap; 176 173 struct numbered_block_mapping *last; 177 174 178 175 if (heap->nr == 0) ··· 182 181 * restore the heap invariant, and return a pointer to the popped element. 183 182 */ 184 183 last = &repair->entries[--heap->nr]; 185 - swap_mappings(heap->data, last); 186 - min_heapify(heap, 0, &repair_min_heap); 184 + swap_mappings(heap->data, last, NULL); 185 + min_heap_sift_down(heap, 0, &repair_min_heap, NULL); 187 186 return last; 188 187 } 189 188 ··· 1117 1116 * Organize the journal entries into a binary heap so we can iterate over them in sorted 1118 1117 * order incrementally, avoiding an expensive sort call. 1119 1118 */ 1120 - repair->replay_heap = (struct min_heap) { 1119 + repair->replay_heap = (struct replay_heap) { 1121 1120 .data = repair->entries, 1122 1121 .nr = repair->block_map_entry_count, 1123 1122 .size = repair->block_map_entry_count, 1124 1123 }; 1125 - min_heapify_all(&repair->replay_heap, &repair_min_heap); 1124 + min_heapify_all(&repair->replay_heap, &repair_min_heap, NULL); 1126 1125 1127 1126 vdo_log_info("Replaying %zu recovery entries into block map", 1128 1127 repair->block_map_entry_count);
+7 -7
drivers/md/dm-vdo/slab-depot.c
··· 3288 3288 * Thus, the ordering is reversed from the usual sense since min_heap returns smaller elements 3289 3289 * before larger ones. 3290 3290 */ 3291 - static bool slab_status_is_less_than(const void *item1, const void *item2) 3291 + static bool slab_status_is_less_than(const void *item1, const void *item2, 3292 + void __always_unused *args) 3292 3293 { 3293 3294 const struct slab_status *info1 = item1; 3294 3295 const struct slab_status *info2 = item2; ··· 3301 3300 return info1->slab_number < info2->slab_number; 3302 3301 } 3303 3302 3304 - static void swap_slab_statuses(void *item1, void *item2) 3303 + static void swap_slab_statuses(void *item1, void *item2, void __always_unused *args) 3305 3304 { 3306 3305 struct slab_status *info1 = item1; 3307 3306 struct slab_status *info2 = item2; ··· 3310 3309 } 3311 3310 3312 3311 static const struct min_heap_callbacks slab_status_min_heap = { 3313 - .elem_size = sizeof(struct slab_status), 3314 3312 .less = slab_status_is_less_than, 3315 3313 .swp = swap_slab_statuses, 3316 3314 }; ··· 3509 3509 static int __must_check vdo_prepare_slabs_for_allocation(struct block_allocator *allocator) 3510 3510 { 3511 3511 struct slab_status current_slab_status; 3512 - struct min_heap heap; 3512 + DEFINE_MIN_HEAP(struct slab_status, heap) heap; 3513 3513 int result; 3514 3514 struct slab_status *slab_statuses; 3515 3515 struct slab_depot *depot = allocator->depot; ··· 3521 3521 return result; 3522 3522 3523 3523 /* Sort the slabs by cleanliness, then by emptiness hint. */ 3524 - heap = (struct min_heap) { 3524 + heap = (struct heap) { 3525 3525 .data = slab_statuses, 3526 3526 .nr = allocator->slab_count, 3527 3527 .size = allocator->slab_count, 3528 3528 }; 3529 - min_heapify_all(&heap, &slab_status_min_heap); 3529 + min_heapify_all(&heap, &slab_status_min_heap, NULL); 3530 3530 3531 3531 while (heap.nr > 0) { 3532 3532 bool high_priority; ··· 3534 3534 struct slab_journal *journal; 3535 3535 3536 3536 current_slab_status = slab_statuses[0]; 3537 - min_heap_pop(&heap, &slab_status_min_heap); 3537 + min_heap_pop(&heap, &slab_status_min_heap, NULL); 3538 3538 slab = depot->slabs[current_slab_status.slab_number]; 3539 3539 3540 3540 if ((depot->load_type == VDO_SLAB_DEPOT_REBUILD_LOAD) ||
+5 -5
drivers/most/core.c
··· 1286 1286 !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) 1287 1287 return -EINVAL; 1288 1288 1289 - id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL); 1289 + id = ida_alloc(&mdev_id, GFP_KERNEL); 1290 1290 if (id < 0) { 1291 1291 dev_err(iface->dev, "Failed to allocate device ID\n"); 1292 1292 return id; ··· 1294 1294 1295 1295 iface->p = kzalloc(sizeof(*iface->p), GFP_KERNEL); 1296 1296 if (!iface->p) { 1297 - ida_simple_remove(&mdev_id, id); 1297 + ida_free(&mdev_id, id); 1298 1298 return -ENOMEM; 1299 1299 } 1300 1300 ··· 1308 1308 dev_err(iface->dev, "Failed to register interface device\n"); 1309 1309 kfree(iface->p); 1310 1310 put_device(iface->dev); 1311 - ida_simple_remove(&mdev_id, id); 1311 + ida_free(&mdev_id, id); 1312 1312 return -ENOMEM; 1313 1313 } 1314 1314 ··· 1366 1366 } 1367 1367 kfree(iface->p); 1368 1368 device_unregister(iface->dev); 1369 - ida_simple_remove(&mdev_id, id); 1369 + ida_free(&mdev_id, id); 1370 1370 return -ENOMEM; 1371 1371 } 1372 1372 EXPORT_SYMBOL_GPL(most_register_interface); ··· 1397 1397 device_unregister(&c->dev); 1398 1398 } 1399 1399 1400 - ida_simple_remove(&mdev_id, iface->p->dev_id); 1400 + ida_free(&mdev_id, iface->p->dev_id); 1401 1401 kfree(iface->p); 1402 1402 device_unregister(iface->dev); 1403 1403 }
+3 -3
drivers/most/most_cdev.c
··· 100 100 101 101 static void destroy_channel(struct comp_channel *c) 102 102 { 103 - ida_simple_remove(&comp.minor_id, MINOR(c->devno)); 103 + ida_free(&comp.minor_id, MINOR(c->devno)); 104 104 kfifo_free(&c->fifo); 105 105 kfree(c); 106 106 } ··· 425 425 if (c) 426 426 return -EEXIST; 427 427 428 - current_minor = ida_simple_get(&comp.minor_id, 0, 0, GFP_KERNEL); 428 + current_minor = ida_alloc(&comp.minor_id, GFP_KERNEL); 429 429 if (current_minor < 0) 430 430 return current_minor; 431 431 ··· 472 472 err_free_c: 473 473 kfree(c); 474 474 err_remove_ida: 475 - ida_simple_remove(&comp.minor_id, current_minor); 475 + ida_free(&comp.minor_id, current_minor); 476 476 return retval; 477 477 } 478 478
+1 -1
drivers/net/wireless/ti/wl1251/acx.h
··· 229 229 * === ========== 230 230 * 31:12 Reserved - Always equal to 0. 231 231 * 11 Association - When set, the WiLink receives all association 232 - * related frames (association request/response, reassocation 232 + * related frames (association request/response, reassociation 233 233 * request/response, and disassociation). When clear, these frames 234 234 * are discarded. 235 235 * 10 Auth/De auth - When set, the WiLink receives all authentication
+1 -1
drivers/scsi/qedf/qedf_main.c
··· 2286 2286 * on. 2287 2287 */ 2288 2288 if (!io_req) 2289 - /* If there is not io_req assocated with this CQE 2289 + /* If there is not io_req associated with this CQE 2290 2290 * just queue it on CPU 0 2291 2291 */ 2292 2292 cpu = 0;
+1 -1
drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
··· 979 979 left = pkt_len - (sizeof(struct ieee80211_hdr_3addr) + ie_offset); 980 980 pos = pframe + (sizeof(struct ieee80211_hdr_3addr) + ie_offset); 981 981 982 - /* check if this stat has been successfully authenticated/assocated */ 982 + /* check if this stat has been successfully authenticated/associated */ 983 983 if (!((pstat->state) & WIFI_FW_AUTH_SUCCESS)) { 984 984 if (!((pstat->state) & WIFI_FW_ASSOC_SUCCESS)) { 985 985 status = WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA;
+1 -1
drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
··· 452 452 if (hal_btcoex_IsBtControlLps(padapter)) 453 453 return; 454 454 455 - /* Skip lps enter request if number of assocated adapters is not 1 */ 455 + /* Skip lps enter request if number of associated adapters is not 1 */ 456 456 if (check_fwstate(&(dvobj->padapters->mlmepriv), WIFI_ASOC_STATE)) 457 457 n_assoc_iface++; 458 458 if (n_assoc_iface != 1)
+38 -12
fs/bcachefs/clock.c
··· 6 6 #include <linux/kthread.h> 7 7 #include <linux/preempt.h> 8 8 9 - static inline long io_timer_cmp(io_timer_heap *h, 10 - struct io_timer *l, 11 - struct io_timer *r) 9 + static inline bool io_timer_cmp(const void *l, const void *r, void __always_unused *args) 12 10 { 13 - return l->expire - r->expire; 11 + struct io_timer **_l = (struct io_timer **)l; 12 + struct io_timer **_r = (struct io_timer **)r; 13 + 14 + return (*_l)->expire < (*_r)->expire; 15 + } 16 + 17 + static inline void io_timer_swp(void *l, void *r, void __always_unused *args) 18 + { 19 + struct io_timer **_l = (struct io_timer **)l; 20 + struct io_timer **_r = (struct io_timer **)r; 21 + 22 + swap(*_l, *_r); 14 23 } 15 24 16 25 void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer) 17 26 { 27 + const struct min_heap_callbacks callbacks = { 28 + .less = io_timer_cmp, 29 + .swp = io_timer_swp, 30 + }; 31 + 18 32 spin_lock(&clock->timer_lock); 19 33 20 34 if (time_after_eq64((u64) atomic64_read(&clock->now), timer->expire)) { ··· 37 23 return; 38 24 } 39 25 40 - for (size_t i = 0; i < clock->timers.used; i++) 26 + for (size_t i = 0; i < clock->timers.nr; i++) 41 27 if (clock->timers.data[i] == timer) 42 28 goto out; 43 29 44 - BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp, NULL)); 30 + BUG_ON(!min_heap_push(&clock->timers, &timer, &callbacks, NULL)); 45 31 out: 46 32 spin_unlock(&clock->timer_lock); 47 33 } 48 34 49 35 void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer) 50 36 { 37 + const struct min_heap_callbacks callbacks = { 38 + .less = io_timer_cmp, 39 + .swp = io_timer_swp, 40 + }; 41 + 51 42 spin_lock(&clock->timer_lock); 52 43 53 - for (size_t i = 0; i < clock->timers.used; i++) 44 + for (size_t i = 0; i < clock->timers.nr; i++) 54 45 if (clock->timers.data[i] == timer) { 55 - heap_del(&clock->timers, i, io_timer_cmp, NULL); 46 + min_heap_del(&clock->timers, i, &callbacks, NULL); 56 47 break; 57 48 } 58 49 ··· 142 123 static struct io_timer *get_expired_timer(struct io_clock *clock, u64 now) 143 124 { 144 125 struct io_timer *ret = NULL; 126 + const struct min_heap_callbacks callbacks = { 127 + .less = io_timer_cmp, 128 + .swp = io_timer_swp, 129 + }; 145 130 146 - if (clock->timers.used && 147 - time_after_eq64(now, clock->timers.data[0]->expire)) 148 - heap_pop(&clock->timers, ret, io_timer_cmp, NULL); 131 + if (clock->timers.nr && 132 + time_after_eq64(now, clock->timers.data[0]->expire)) { 133 + ret = *min_heap_peek(&clock->timers); 134 + min_heap_pop(&clock->timers, &callbacks, NULL); 135 + } 136 + 149 137 return ret; 150 138 } 151 139 ··· 176 150 printbuf_tabstop_push(out, 40); 177 151 prt_printf(out, "current time:\t%llu\n", now); 178 152 179 - for (unsigned i = 0; i < clock->timers.used; i++) 153 + for (unsigned i = 0; i < clock->timers.nr; i++) 180 154 prt_printf(out, "%ps %ps:\t%llu\n", 181 155 clock->timers.data[i]->fn, 182 156 clock->timers.data[i]->fn2,
+1 -1
fs/bcachefs/clock_types.h
··· 24 24 /* Amount to buffer up on a percpu counter */ 25 25 #define IO_CLOCK_PCPU_SECTORS 128 26 26 27 - typedef HEAP(struct io_timer *) io_timer_heap; 27 + typedef DEFINE_MIN_HEAP(struct io_timer *, io_timer_heap) io_timer_heap; 28 28 29 29 struct io_clock { 30 30 atomic64_t now;
+52 -26
fs/bcachefs/ec.c
··· 901 901 902 902 mutex_lock(&c->ec_stripes_heap_lock); 903 903 if (n.size > h->size) { 904 - memcpy(n.data, h->data, h->used * sizeof(h->data[0])); 905 - n.used = h->used; 904 + memcpy(n.data, h->data, h->nr * sizeof(h->data[0])); 905 + n.nr = h->nr; 906 906 swap(*h, n); 907 907 } 908 908 mutex_unlock(&c->ec_stripes_heap_lock); ··· 993 993 994 994 lockdep_assert_held(&c->ec_stripes_heap_lock); 995 995 996 - if (h->used && 996 + if (h->nr && 997 997 h->data[0].blocks_nonempty == 0 && 998 998 !bch2_stripe_is_open(c, h->data[0].idx)) 999 999 return h->data[0].idx; 1000 1000 1001 1001 return 0; 1002 - } 1003 - 1004 - static inline int ec_stripes_heap_cmp(ec_stripes_heap *h, 1005 - struct ec_stripe_heap_entry l, 1006 - struct ec_stripe_heap_entry r) 1007 - { 1008 - return ((l.blocks_nonempty > r.blocks_nonempty) - 1009 - (l.blocks_nonempty < r.blocks_nonempty)); 1010 1002 } 1011 1003 1012 1004 static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h, ··· 1009 1017 genradix_ptr(&c->stripes, h->data[i].idx)->heap_idx = i; 1010 1018 } 1011 1019 1020 + static inline bool ec_stripes_heap_cmp(const void *l, const void *r, void __always_unused *args) 1021 + { 1022 + struct ec_stripe_heap_entry *_l = (struct ec_stripe_heap_entry *)l; 1023 + struct ec_stripe_heap_entry *_r = (struct ec_stripe_heap_entry *)r; 1024 + 1025 + return ((_l->blocks_nonempty > _r->blocks_nonempty) < 1026 + (_l->blocks_nonempty < _r->blocks_nonempty)); 1027 + } 1028 + 1029 + static inline void ec_stripes_heap_swap(void *l, void *r, void *h) 1030 + { 1031 + struct ec_stripe_heap_entry *_l = (struct ec_stripe_heap_entry *)l; 1032 + struct ec_stripe_heap_entry *_r = (struct ec_stripe_heap_entry *)r; 1033 + ec_stripes_heap *_h = (ec_stripes_heap *)h; 1034 + size_t i = _l - _h->data; 1035 + size_t j = _r - _h->data; 1036 + 1037 + swap(*_l, *_r); 1038 + 1039 + ec_stripes_heap_set_backpointer(_h, i); 1040 + ec_stripes_heap_set_backpointer(_h, j); 1041 + } 1042 + 1012 1043 static void heap_verify_backpointer(struct bch_fs *c, size_t idx) 1013 1044 { 1014 1045 ec_stripes_heap *h = &c->ec_stripes_heap; 1015 1046 struct stripe *m = genradix_ptr(&c->stripes, idx); 1016 1047 1017 - BUG_ON(m->heap_idx >= h->used); 1048 + BUG_ON(m->heap_idx >= h->nr); 1018 1049 BUG_ON(h->data[m->heap_idx].idx != idx); 1019 1050 } 1020 1051 1021 1052 void bch2_stripes_heap_del(struct bch_fs *c, 1022 1053 struct stripe *m, size_t idx) 1023 1054 { 1055 + const struct min_heap_callbacks callbacks = { 1056 + .less = ec_stripes_heap_cmp, 1057 + .swp = ec_stripes_heap_swap, 1058 + }; 1059 + 1024 1060 mutex_lock(&c->ec_stripes_heap_lock); 1025 1061 heap_verify_backpointer(c, idx); 1026 1062 1027 - heap_del(&c->ec_stripes_heap, m->heap_idx, 1028 - ec_stripes_heap_cmp, 1029 - ec_stripes_heap_set_backpointer); 1063 + min_heap_del(&c->ec_stripes_heap, m->heap_idx, &callbacks, &c->ec_stripes_heap); 1030 1064 mutex_unlock(&c->ec_stripes_heap_lock); 1031 1065 } 1032 1066 1033 1067 void bch2_stripes_heap_insert(struct bch_fs *c, 1034 1068 struct stripe *m, size_t idx) 1035 1069 { 1036 - mutex_lock(&c->ec_stripes_heap_lock); 1037 - BUG_ON(heap_full(&c->ec_stripes_heap)); 1070 + const struct min_heap_callbacks callbacks = { 1071 + .less = ec_stripes_heap_cmp, 1072 + .swp = ec_stripes_heap_swap, 1073 + }; 1038 1074 1039 - heap_add(&c->ec_stripes_heap, ((struct ec_stripe_heap_entry) { 1075 + mutex_lock(&c->ec_stripes_heap_lock); 1076 + BUG_ON(min_heap_full(&c->ec_stripes_heap)); 1077 + 1078 + genradix_ptr(&c->stripes, idx)->heap_idx = c->ec_stripes_heap.nr; 1079 + min_heap_push(&c->ec_stripes_heap, &((struct ec_stripe_heap_entry) { 1040 1080 .idx = idx, 1041 1081 .blocks_nonempty = m->blocks_nonempty, 1042 1082 }), 1043 - ec_stripes_heap_cmp, 1044 - ec_stripes_heap_set_backpointer); 1083 + &callbacks, 1084 + &c->ec_stripes_heap); 1045 1085 1046 1086 heap_verify_backpointer(c, idx); 1047 1087 mutex_unlock(&c->ec_stripes_heap_lock); ··· 1082 1058 void bch2_stripes_heap_update(struct bch_fs *c, 1083 1059 struct stripe *m, size_t idx) 1084 1060 { 1061 + const struct min_heap_callbacks callbacks = { 1062 + .less = ec_stripes_heap_cmp, 1063 + .swp = ec_stripes_heap_swap, 1064 + }; 1085 1065 ec_stripes_heap *h = &c->ec_stripes_heap; 1086 1066 bool do_deletes; 1087 1067 size_t i; ··· 1096 1068 h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty; 1097 1069 1098 1070 i = m->heap_idx; 1099 - heap_sift_up(h, i, ec_stripes_heap_cmp, 1100 - ec_stripes_heap_set_backpointer); 1101 - heap_sift_down(h, i, ec_stripes_heap_cmp, 1102 - ec_stripes_heap_set_backpointer); 1071 + min_heap_sift_up(h, i, &callbacks, &c->ec_stripes_heap); 1072 + min_heap_sift_down(h, i, &callbacks, &c->ec_stripes_heap); 1103 1073 1104 1074 heap_verify_backpointer(c, idx); 1105 1075 ··· 1890 1864 return -1; 1891 1865 1892 1866 mutex_lock(&c->ec_stripes_heap_lock); 1893 - for (heap_idx = 0; heap_idx < h->used; heap_idx++) { 1867 + for (heap_idx = 0; heap_idx < h->nr; heap_idx++) { 1894 1868 /* No blocks worth reusing, stripe will just be deleted: */ 1895 1869 if (!h->data[heap_idx].blocks_nonempty) 1896 1870 continue; ··· 2221 2195 size_t i; 2222 2196 2223 2197 mutex_lock(&c->ec_stripes_heap_lock); 2224 - for (i = 0; i < min_t(size_t, h->used, 50); i++) { 2198 + for (i = 0; i < min_t(size_t, h->nr, 50); i++) { 2225 2199 m = genradix_ptr(&c->stripes, h->data[i].idx); 2226 2200 2227 2201 prt_printf(out, "%zu %u/%u+%u", h->data[i].idx,
+1 -1
fs/bcachefs/ec_types.h
··· 36 36 unsigned blocks_nonempty; 37 37 }; 38 38 39 - typedef HEAP(struct ec_stripe_heap_entry) ec_stripes_heap; 39 + typedef DEFINE_MIN_HEAP(struct ec_stripe_heap_entry, ec_stripes_heap) ec_stripes_heap; 40 40 41 41 #endif /* _BCACHEFS_EC_TYPES_H */
+1 -1
fs/bcachefs/util.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 - * random utiility code, for bcache but in theory not specific to bcache 3 + * random utility code, for bcache but in theory not specific to bcache 4 4 * 5 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 6 * Copyright 2012 Google, Inc.
+2 -116
fs/bcachefs/util.h
··· 8 8 #include <linux/errno.h> 9 9 #include <linux/freezer.h> 10 10 #include <linux/kernel.h> 11 + #include <linux/min_heap.h> 11 12 #include <linux/sched/clock.h> 12 13 #include <linux/llist.h> 13 14 #include <linux/log2.h> ··· 55 54 PAGE_SIZE); 56 55 } 57 56 58 - #define HEAP(type) \ 59 - struct { \ 60 - size_t size, used; \ 61 - type *data; \ 62 - } 63 - 64 - #define DECLARE_HEAP(type, name) HEAP(type) name 65 - 66 57 #define init_heap(heap, _size, gfp) \ 67 58 ({ \ 68 - (heap)->used = 0; \ 59 + (heap)->nr = 0; \ 69 60 (heap)->size = (_size); \ 70 61 (heap)->data = kvmalloc((heap)->size * sizeof((heap)->data[0]),\ 71 62 (gfp)); \ ··· 67 74 do { \ 68 75 kvfree((heap)->data); \ 69 76 (heap)->data = NULL; \ 70 - } while (0) 71 - 72 - #define heap_set_backpointer(h, i, _fn) \ 73 - do { \ 74 - void (*fn)(typeof(h), size_t) = _fn; \ 75 - if (fn) \ 76 - fn(h, i); \ 77 - } while (0) 78 - 79 - #define heap_swap(h, i, j, set_backpointer) \ 80 - do { \ 81 - swap((h)->data[i], (h)->data[j]); \ 82 - heap_set_backpointer(h, i, set_backpointer); \ 83 - heap_set_backpointer(h, j, set_backpointer); \ 84 - } while (0) 85 - 86 - #define heap_peek(h) \ 87 - ({ \ 88 - EBUG_ON(!(h)->used); \ 89 - (h)->data[0]; \ 90 - }) 91 - 92 - #define heap_full(h) ((h)->used == (h)->size) 93 - 94 - #define heap_sift_down(h, i, cmp, set_backpointer) \ 95 - do { \ 96 - size_t _c, _j = i; \ 97 - \ 98 - for (; _j * 2 + 1 < (h)->used; _j = _c) { \ 99 - _c = _j * 2 + 1; \ 100 - if (_c + 1 < (h)->used && \ 101 - cmp(h, (h)->data[_c], (h)->data[_c + 1]) >= 0) \ 102 - _c++; \ 103 - \ 104 - if (cmp(h, (h)->data[_c], (h)->data[_j]) >= 0) \ 105 - break; \ 106 - heap_swap(h, _c, _j, set_backpointer); \ 107 - } \ 108 - } while (0) 109 - 110 - #define heap_sift_up(h, i, cmp, set_backpointer) \ 111 - do { \ 112 - while (i) { \ 113 - size_t p = (i - 1) / 2; \ 114 - if (cmp(h, (h)->data[i], (h)->data[p]) >= 0) \ 115 - break; \ 116 - heap_swap(h, i, p, set_backpointer); \ 117 - i = p; \ 118 - } \ 119 - } while (0) 120 - 121 - #define __heap_add(h, d, cmp, set_backpointer) \ 122 - ({ \ 123 - size_t _i = (h)->used++; \ 124 - (h)->data[_i] = d; \ 125 - heap_set_backpointer(h, _i, set_backpointer); \ 126 - \ 127 - heap_sift_up(h, _i, cmp, set_backpointer); \ 128 - _i; \ 129 - }) 130 - 131 - #define heap_add(h, d, cmp, set_backpointer) \ 132 - ({ \ 133 - bool _r = !heap_full(h); \ 134 - if (_r) \ 135 - __heap_add(h, d, cmp, set_backpointer); \ 136 - _r; \ 137 - }) 138 - 139 - #define heap_add_or_replace(h, new, cmp, set_backpointer) \ 140 - do { \ 141 - if (!heap_add(h, new, cmp, set_backpointer) && \ 142 - cmp(h, new, heap_peek(h)) >= 0) { \ 143 - (h)->data[0] = new; \ 144 - heap_set_backpointer(h, 0, set_backpointer); \ 145 - heap_sift_down(h, 0, cmp, set_backpointer); \ 146 - } \ 147 - } while (0) 148 - 149 - #define heap_del(h, i, cmp, set_backpointer) \ 150 - do { \ 151 - size_t _i = (i); \ 152 - \ 153 - BUG_ON(_i >= (h)->used); \ 154 - (h)->used--; \ 155 - if ((_i) < (h)->used) { \ 156 - heap_swap(h, _i, (h)->used, set_backpointer); \ 157 - heap_sift_up(h, _i, cmp, set_backpointer); \ 158 - heap_sift_down(h, _i, cmp, set_backpointer); \ 159 - } \ 160 - } while (0) 161 - 162 - #define heap_pop(h, d, cmp, set_backpointer) \ 163 - ({ \ 164 - bool _r = (h)->used; \ 165 - if (_r) { \ 166 - (d) = (h)->data[0]; \ 167 - heap_del(h, 0, cmp, set_backpointer); \ 168 - } \ 169 - _r; \ 170 - }) 171 - 172 - #define heap_resort(heap, cmp, set_backpointer) \ 173 - do { \ 174 - ssize_t _i; \ 175 - for (_i = (ssize_t) (heap)->used / 2 - 1; _i >= 0; --_i) \ 176 - heap_sift_down(heap, _i, cmp, set_backpointer); \ 177 77 } while (0) 178 78 179 79 #define ANYSINT_MAX(t) \
+7 -7
fs/coredump.c
··· 361 361 return ispipe; 362 362 } 363 363 364 - static int zap_process(struct task_struct *start, int exit_code) 364 + static int zap_process(struct signal_struct *signal, int exit_code) 365 365 { 366 366 struct task_struct *t; 367 367 int nr = 0; 368 368 369 - /* Allow SIGKILL, see prepare_signal() */ 370 - start->signal->flags = SIGNAL_GROUP_EXIT; 371 - start->signal->group_exit_code = exit_code; 372 - start->signal->group_stop_count = 0; 369 + signal->flags = SIGNAL_GROUP_EXIT; 370 + signal->group_exit_code = exit_code; 371 + signal->group_stop_count = 0; 373 372 374 - for_each_thread(start, t) { 373 + __for_each_thread(signal, t) { 375 374 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); 376 375 if (t != current && !(t->flags & PF_POSTCOREDUMP)) { 377 376 sigaddset(&t->pending.signal, SIGKILL); ··· 390 391 391 392 spin_lock_irq(&tsk->sighand->siglock); 392 393 if (!(signal->flags & SIGNAL_GROUP_EXIT) && !signal->group_exec_task) { 394 + /* Allow SIGKILL, see prepare_signal() */ 393 395 signal->core_state = core_state; 394 - nr = zap_process(tsk, exit_code); 396 + nr = zap_process(signal, exit_code); 395 397 clear_tsk_thread_flag(tsk, TIF_SIGPENDING); 396 398 tsk->flags |= PF_DUMPCORE; 397 399 atomic_set(&core_state->nr_threads, nr);
+55 -36
fs/nilfs2/segment.c
··· 136 136 137 137 #define nilfs_cnt32_ge(a, b) \ 138 138 (typecheck(__u32, a) && typecheck(__u32, b) && \ 139 - ((__s32)(a) - (__s32)(b) >= 0)) 139 + ((__s32)((a) - (b)) >= 0)) 140 140 141 141 static int nilfs_prepare_segment_lock(struct super_block *sb, 142 142 struct nilfs_transaction_info *ti) ··· 1639 1639 folio_unlock(folio); 1640 1640 } 1641 1641 1642 - static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci) 1642 + /** 1643 + * nilfs_prepare_write_logs - prepare to write logs 1644 + * @logs: logs to prepare for writing 1645 + * @seed: checksum seed value 1646 + * 1647 + * nilfs_prepare_write_logs() adds checksums and prepares the block 1648 + * buffers/folios for writing logs. In order to stabilize folios of 1649 + * memory-mapped file blocks by putting them in writeback state before 1650 + * calculating the checksums, first prepare to write payload blocks other 1651 + * than segment summary and super root blocks in which the checksums will 1652 + * be embedded. 1653 + */ 1654 + static void nilfs_prepare_write_logs(struct list_head *logs, u32 seed) 1643 1655 { 1644 1656 struct nilfs_segment_buffer *segbuf; 1645 1657 struct folio *bd_folio = NULL, *fs_folio = NULL; 1658 + struct buffer_head *bh; 1646 1659 1647 - list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { 1648 - struct buffer_head *bh; 1649 - 1650 - list_for_each_entry(bh, &segbuf->sb_segsum_buffers, 1651 - b_assoc_buffers) { 1652 - if (bh->b_folio != bd_folio) { 1653 - if (bd_folio) { 1654 - folio_lock(bd_folio); 1655 - folio_wait_writeback(bd_folio); 1656 - folio_clear_dirty_for_io(bd_folio); 1657 - folio_start_writeback(bd_folio); 1658 - folio_unlock(bd_folio); 1659 - } 1660 - bd_folio = bh->b_folio; 1661 - } 1662 - } 1663 - 1660 + /* Prepare to write payload blocks */ 1661 + list_for_each_entry(segbuf, logs, sb_list) { 1664 1662 list_for_each_entry(bh, &segbuf->sb_payload_buffers, 1665 1663 b_assoc_buffers) { 1666 - if (bh == segbuf->sb_super_root) { 1667 - if (bh->b_folio != bd_folio) { 1668 - folio_lock(bd_folio); 1669 - folio_wait_writeback(bd_folio); 1670 - folio_clear_dirty_for_io(bd_folio); 1671 - folio_start_writeback(bd_folio); 1672 - folio_unlock(bd_folio); 1673 - bd_folio = bh->b_folio; 1674 - } 1664 + if (bh == segbuf->sb_super_root) 1675 1665 break; 1676 - } 1677 1666 set_buffer_async_write(bh); 1678 1667 if (bh->b_folio != fs_folio) { 1679 1668 nilfs_begin_folio_io(fs_folio); ··· 1670 1681 } 1671 1682 } 1672 1683 } 1684 + nilfs_begin_folio_io(fs_folio); 1685 + 1686 + nilfs_add_checksums_on_logs(logs, seed); 1687 + 1688 + /* Prepare to write segment summary blocks */ 1689 + list_for_each_entry(segbuf, logs, sb_list) { 1690 + list_for_each_entry(bh, &segbuf->sb_segsum_buffers, 1691 + b_assoc_buffers) { 1692 + mark_buffer_dirty(bh); 1693 + if (bh->b_folio == bd_folio) 1694 + continue; 1695 + if (bd_folio) { 1696 + folio_lock(bd_folio); 1697 + folio_wait_writeback(bd_folio); 1698 + folio_clear_dirty_for_io(bd_folio); 1699 + folio_start_writeback(bd_folio); 1700 + folio_unlock(bd_folio); 1701 + } 1702 + bd_folio = bh->b_folio; 1703 + } 1704 + } 1705 + 1706 + /* Prepare to write super root block */ 1707 + bh = NILFS_LAST_SEGBUF(logs)->sb_super_root; 1708 + if (bh) { 1709 + mark_buffer_dirty(bh); 1710 + if (bh->b_folio != bd_folio) { 1711 + folio_lock(bd_folio); 1712 + folio_wait_writeback(bd_folio); 1713 + folio_clear_dirty_for_io(bd_folio); 1714 + folio_start_writeback(bd_folio); 1715 + folio_unlock(bd_folio); 1716 + bd_folio = bh->b_folio; 1717 + } 1718 + } 1719 + 1673 1720 if (bd_folio) { 1674 1721 folio_lock(bd_folio); 1675 1722 folio_wait_writeback(bd_folio); ··· 1713 1688 folio_start_writeback(bd_folio); 1714 1689 folio_unlock(bd_folio); 1715 1690 } 1716 - nilfs_begin_folio_io(fs_folio); 1717 1691 } 1718 1692 1719 1693 static int nilfs_segctor_write(struct nilfs_sc_info *sci, ··· 2094 2070 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile); 2095 2071 2096 2072 /* Write partial segments */ 2097 - nilfs_segctor_prepare_write(sci); 2098 - 2099 - nilfs_add_checksums_on_logs(&sci->sc_segbufs, 2100 - nilfs->ns_crc_seed); 2073 + nilfs_prepare_write_logs(&sci->sc_segbufs, nilfs->ns_crc_seed); 2101 2074 2102 2075 err = nilfs_segctor_write(sci, nilfs); 2103 2076 if (unlikely(err)) ··· 2844 2823 nilfs->ns_writer = nilfs_segctor_new(sb, root); 2845 2824 if (!nilfs->ns_writer) 2846 2825 return -ENOMEM; 2847 - 2848 - inode_attach_wb(nilfs->ns_bdev->bd_mapping->host, NULL); 2849 2826 2850 2827 err = nilfs_segctor_start_thread(nilfs->ns_writer); 2851 2828 if (unlikely(err))
+3 -3
fs/nilfs2/sysfs.c
··· 56 56 sg_##name##_kobj); \ 57 57 complete(&subgroups->sg_##name##_kobj_unregister); \ 58 58 } \ 59 - static struct kobj_type nilfs_##name##_ktype = { \ 59 + static const struct kobj_type nilfs_##name##_ktype = { \ 60 60 .default_groups = nilfs_##name##_groups, \ 61 61 .sysfs_ops = &nilfs_##name##_attr_ops, \ 62 62 .release = nilfs_##name##_attr_release, \ ··· 166 166 .store = nilfs_snapshot_attr_store, 167 167 }; 168 168 169 - static struct kobj_type nilfs_snapshot_ktype = { 169 + static const struct kobj_type nilfs_snapshot_ktype = { 170 170 .default_groups = nilfs_snapshot_groups, 171 171 .sysfs_ops = &nilfs_snapshot_attr_ops, 172 172 .release = nilfs_snapshot_attr_release, ··· 967 967 .store = nilfs_dev_attr_store, 968 968 }; 969 969 970 - static struct kobj_type nilfs_dev_ktype = { 970 + static const struct kobj_type nilfs_dev_ktype = { 971 971 .default_groups = nilfs_dev_groups, 972 972 .sysfs_ops = &nilfs_dev_attr_ops, 973 973 .release = nilfs_dev_attr_release,
+29 -17
fs/ocfs2/dir.c
··· 294 294 * bh passed here can be an inode block or a dir data block, depending 295 295 * on the inode inline data flag. 296 296 */ 297 - static int ocfs2_check_dir_entry(struct inode * dir, 298 - struct ocfs2_dir_entry * de, 299 - struct buffer_head * bh, 297 + static int ocfs2_check_dir_entry(struct inode *dir, 298 + struct ocfs2_dir_entry *de, 299 + struct buffer_head *bh, 300 + char *buf, 301 + unsigned int size, 300 302 unsigned long offset) 301 303 { 302 304 const char *error_msg = NULL; 303 305 const int rlen = le16_to_cpu(de->rec_len); 306 + const unsigned long next_offset = ((char *) de - buf) + rlen; 304 307 305 308 if (unlikely(rlen < OCFS2_DIR_REC_LEN(1))) 306 309 error_msg = "rec_len is smaller than minimal"; ··· 311 308 error_msg = "rec_len % 4 != 0"; 312 309 else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len))) 313 310 error_msg = "rec_len is too small for name_len"; 314 - else if (unlikely( 315 - ((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)) 316 - error_msg = "directory entry across blocks"; 311 + else if (unlikely(next_offset > size)) 312 + error_msg = "directory entry overrun"; 313 + else if (unlikely(next_offset > size - OCFS2_DIR_REC_LEN(1)) && 314 + next_offset != size) 315 + error_msg = "directory entry too close to end"; 317 316 318 317 if (unlikely(error_msg != NULL)) 319 318 mlog(ML_ERROR, "bad entry in directory #%llu: %s - " ··· 357 352 de_buf = first_de; 358 353 dlimit = de_buf + bytes; 359 354 360 - while (de_buf < dlimit) { 355 + while (de_buf < dlimit - OCFS2_DIR_MEMBER_LEN) { 361 356 /* this code is executed quadratically often */ 362 357 /* do minimal checking `by hand' */ 363 358 364 359 de = (struct ocfs2_dir_entry *) de_buf; 365 360 366 - if (de_buf + namelen <= dlimit && 361 + if (de->name + namelen <= dlimit && 367 362 ocfs2_match(namelen, name, de)) { 368 363 /* found a match - just to be sure, do a full check */ 369 - if (!ocfs2_check_dir_entry(dir, de, bh, offset)) { 364 + if (!ocfs2_check_dir_entry(dir, de, bh, first_de, 365 + bytes, offset)) { 370 366 ret = -1; 371 367 goto bail; 372 368 } ··· 1144 1138 pde = NULL; 1145 1139 de = (struct ocfs2_dir_entry *) first_de; 1146 1140 while (i < bytes) { 1147 - if (!ocfs2_check_dir_entry(dir, de, bh, i)) { 1141 + if (!ocfs2_check_dir_entry(dir, de, bh, first_de, bytes, i)) { 1148 1142 status = -EIO; 1149 1143 mlog_errno(status); 1150 1144 goto bail; ··· 1641 1635 /* These checks should've already been passed by the 1642 1636 * prepare function, but I guess we can leave them 1643 1637 * here anyway. */ 1644 - if (!ocfs2_check_dir_entry(dir, de, insert_bh, offset)) { 1638 + if (!ocfs2_check_dir_entry(dir, de, insert_bh, data_start, 1639 + size, offset)) { 1645 1640 retval = -ENOENT; 1646 1641 goto bail; 1647 1642 } ··· 1781 1774 } 1782 1775 1783 1776 de = (struct ocfs2_dir_entry *) (data->id_data + ctx->pos); 1784 - if (!ocfs2_check_dir_entry(inode, de, di_bh, ctx->pos)) { 1777 + if (!ocfs2_check_dir_entry(inode, de, di_bh, (char *)data->id_data, 1778 + i_size_read(inode), ctx->pos)) { 1785 1779 /* On error, skip the f_pos to the end. */ 1786 1780 ctx->pos = i_size_read(inode); 1787 1781 break; ··· 1875 1867 while (ctx->pos < i_size_read(inode) 1876 1868 && offset < sb->s_blocksize) { 1877 1869 de = (struct ocfs2_dir_entry *) (bh->b_data + offset); 1878 - if (!ocfs2_check_dir_entry(inode, de, bh, offset)) { 1870 + if (!ocfs2_check_dir_entry(inode, de, bh, bh->b_data, 1871 + sb->s_blocksize, offset)) { 1879 1872 /* On error, skip the f_pos to the 1880 1873 next block. */ 1881 1874 ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1; ··· 3348 3339 struct super_block *sb = dir->i_sb; 3349 3340 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; 3350 3341 struct ocfs2_dir_entry *de, *last_de = NULL; 3351 - char *de_buf, *limit; 3342 + char *first_de, *de_buf, *limit; 3352 3343 unsigned long offset = 0; 3353 3344 unsigned int rec_len, new_rec_len, free_space; 3354 3345 ··· 3361 3352 else 3362 3353 free_space = dir->i_sb->s_blocksize - i_size_read(dir); 3363 3354 3364 - de_buf = di->id2.i_data.id_data; 3355 + first_de = di->id2.i_data.id_data; 3356 + de_buf = first_de; 3365 3357 limit = de_buf + i_size_read(dir); 3366 3358 rec_len = OCFS2_DIR_REC_LEN(namelen); 3367 3359 3368 3360 while (de_buf < limit) { 3369 3361 de = (struct ocfs2_dir_entry *)de_buf; 3370 3362 3371 - if (!ocfs2_check_dir_entry(dir, de, di_bh, offset)) { 3363 + if (!ocfs2_check_dir_entry(dir, de, di_bh, first_de, 3364 + i_size_read(dir), offset)) { 3372 3365 ret = -ENOENT; 3373 3366 goto out; 3374 3367 } ··· 3452 3441 /* move to next block */ 3453 3442 de = (struct ocfs2_dir_entry *) bh->b_data; 3454 3443 } 3455 - if (!ocfs2_check_dir_entry(dir, de, bh, offset)) { 3444 + if (!ocfs2_check_dir_entry(dir, de, bh, bh->b_data, blocksize, 3445 + offset)) { 3456 3446 status = -ENOENT; 3457 3447 goto bail; 3458 3448 }
+14 -14
fs/ocfs2/dlmglue.c
··· 221 221 */ 222 222 #define LOCK_TYPE_USES_LVB 0x2 223 223 224 - static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = { 224 + static const struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = { 225 225 .get_osb = ocfs2_get_inode_osb, 226 226 .flags = 0, 227 227 }; 228 228 229 - static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = { 229 + static const struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = { 230 230 .get_osb = ocfs2_get_inode_osb, 231 231 .check_downconvert = ocfs2_check_meta_downconvert, 232 232 .set_lvb = ocfs2_set_meta_lvb, ··· 234 234 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB, 235 235 }; 236 236 237 - static struct ocfs2_lock_res_ops ocfs2_super_lops = { 237 + static const struct ocfs2_lock_res_ops ocfs2_super_lops = { 238 238 .flags = LOCK_TYPE_REQUIRES_REFRESH, 239 239 }; 240 240 241 - static struct ocfs2_lock_res_ops ocfs2_rename_lops = { 241 + static const struct ocfs2_lock_res_ops ocfs2_rename_lops = { 242 242 .flags = 0, 243 243 }; 244 244 245 - static struct ocfs2_lock_res_ops ocfs2_nfs_sync_lops = { 245 + static const struct ocfs2_lock_res_ops ocfs2_nfs_sync_lops = { 246 246 .flags = 0, 247 247 }; 248 248 249 - static struct ocfs2_lock_res_ops ocfs2_trim_fs_lops = { 249 + static const struct ocfs2_lock_res_ops ocfs2_trim_fs_lops = { 250 250 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB, 251 251 }; 252 252 253 - static struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops = { 253 + static const struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops = { 254 254 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB, 255 255 }; 256 256 257 - static struct ocfs2_lock_res_ops ocfs2_dentry_lops = { 257 + static const struct ocfs2_lock_res_ops ocfs2_dentry_lops = { 258 258 .get_osb = ocfs2_get_dentry_osb, 259 259 .post_unlock = ocfs2_dentry_post_unlock, 260 260 .downconvert_worker = ocfs2_dentry_convert_worker, 261 261 .flags = 0, 262 262 }; 263 263 264 - static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = { 264 + static const struct ocfs2_lock_res_ops ocfs2_inode_open_lops = { 265 265 .get_osb = ocfs2_get_inode_osb, 266 266 .flags = 0, 267 267 }; 268 268 269 - static struct ocfs2_lock_res_ops ocfs2_flock_lops = { 269 + static const struct ocfs2_lock_res_ops ocfs2_flock_lops = { 270 270 .get_osb = ocfs2_get_file_osb, 271 271 .flags = 0, 272 272 }; 273 273 274 - static struct ocfs2_lock_res_ops ocfs2_qinfo_lops = { 274 + static const struct ocfs2_lock_res_ops ocfs2_qinfo_lops = { 275 275 .set_lvb = ocfs2_set_qinfo_lvb, 276 276 .get_osb = ocfs2_get_qinfo_osb, 277 277 .flags = LOCK_TYPE_REQUIRES_REFRESH | LOCK_TYPE_USES_LVB, 278 278 }; 279 279 280 - static struct ocfs2_lock_res_ops ocfs2_refcount_block_lops = { 280 + static const struct ocfs2_lock_res_ops ocfs2_refcount_block_lops = { 281 281 .check_downconvert = ocfs2_check_refcount_downconvert, 282 282 .downconvert_worker = ocfs2_refcount_convert_worker, 283 283 .flags = 0, ··· 510 510 static void ocfs2_lock_res_init_common(struct ocfs2_super *osb, 511 511 struct ocfs2_lock_res *res, 512 512 enum ocfs2_lock_type type, 513 - struct ocfs2_lock_res_ops *ops, 513 + const struct ocfs2_lock_res_ops *ops, 514 514 void *priv) 515 515 { 516 516 res->l_type = type; ··· 553 553 unsigned int generation, 554 554 struct inode *inode) 555 555 { 556 - struct ocfs2_lock_res_ops *ops; 556 + const struct ocfs2_lock_res_ops *ops; 557 557 558 558 switch(type) { 559 559 case OCFS2_LOCK_TYPE_RW:
+2
fs/ocfs2/namei.c
··· 2189 2189 * @osb: ocfs2 file system 2190 2190 * @ret_orphan_dir: Orphan dir inode - returned locked! 2191 2191 * @blkno: Actual block number of the inode to be inserted into orphan dir. 2192 + * @name: Buffer to store the name of the orphan. 2192 2193 * @lookup: dir lookup result, to be passed back into functions like 2193 2194 * ocfs2_orphan_add 2195 + * @dio: Flag indicating if direct IO is being used or not. 2194 2196 * 2195 2197 * Returns zero on success and the ret_orphan_dir, name and lookup 2196 2198 * fields will be populated.
+1 -1
fs/ocfs2/ocfs2.h
··· 154 154 155 155 struct ocfs2_lock_res { 156 156 void *l_priv; 157 - struct ocfs2_lock_res_ops *l_ops; 157 + const struct ocfs2_lock_res_ops *l_ops; 158 158 159 159 160 160 struct list_head l_blocked_list;
+1 -1
fs/ocfs2/stack_o2cb.c
··· 404 404 return 0; 405 405 } 406 406 407 - static struct ocfs2_stack_operations o2cb_stack_ops = { 407 + static const struct ocfs2_stack_operations o2cb_stack_ops = { 408 408 .connect = o2cb_cluster_connect, 409 409 .disconnect = o2cb_cluster_disconnect, 410 410 .this_node = o2cb_cluster_this_node,
+1 -1
fs/ocfs2/stack_user.c
··· 1065 1065 return 0; 1066 1066 } 1067 1067 1068 - static struct ocfs2_stack_operations ocfs2_user_plugin_ops = { 1068 + static const struct ocfs2_stack_operations ocfs2_user_plugin_ops = { 1069 1069 .connect = user_cluster_connect, 1070 1070 .disconnect = user_cluster_disconnect, 1071 1071 .this_node = user_cluster_this_node,
+1 -1
fs/ocfs2/stackglue.h
··· 223 223 */ 224 224 struct ocfs2_stack_plugin { 225 225 char *sp_name; 226 - struct ocfs2_stack_operations *sp_ops; 226 + const struct ocfs2_stack_operations *sp_ops; 227 227 struct module *sp_owner; 228 228 229 229 /* These are managed by the stackglue code. */
+18 -9
fs/ocfs2/xattr.c
··· 1062 1062 return i_ret + b_ret; 1063 1063 } 1064 1064 1065 - static int ocfs2_xattr_find_entry(int name_index, 1065 + static int ocfs2_xattr_find_entry(struct inode *inode, int name_index, 1066 1066 const char *name, 1067 1067 struct ocfs2_xattr_search *xs) 1068 1068 { 1069 1069 struct ocfs2_xattr_entry *entry; 1070 1070 size_t name_len; 1071 - int i, cmp = 1; 1071 + int i, name_offset, cmp = 1; 1072 1072 1073 1073 if (name == NULL) 1074 1074 return -EINVAL; ··· 1076 1076 name_len = strlen(name); 1077 1077 entry = xs->here; 1078 1078 for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) { 1079 + if ((void *)entry >= xs->end) { 1080 + ocfs2_error(inode->i_sb, "corrupted xattr entries"); 1081 + return -EFSCORRUPTED; 1082 + } 1079 1083 cmp = name_index - ocfs2_xattr_get_type(entry); 1080 1084 if (!cmp) 1081 1085 cmp = name_len - entry->xe_name_len; 1082 - if (!cmp) 1083 - cmp = memcmp(name, (xs->base + 1084 - le16_to_cpu(entry->xe_name_offset)), 1085 - name_len); 1086 + if (!cmp) { 1087 + name_offset = le16_to_cpu(entry->xe_name_offset); 1088 + if ((xs->base + name_offset + name_len) > xs->end) { 1089 + ocfs2_error(inode->i_sb, 1090 + "corrupted xattr entries"); 1091 + return -EFSCORRUPTED; 1092 + } 1093 + cmp = memcmp(name, (xs->base + name_offset), name_len); 1094 + } 1086 1095 if (cmp == 0) 1087 1096 break; 1088 1097 entry += 1; ··· 1175 1166 xs->base = (void *)xs->header; 1176 1167 xs->here = xs->header->xh_entries; 1177 1168 1178 - ret = ocfs2_xattr_find_entry(name_index, name, xs); 1169 + ret = ocfs2_xattr_find_entry(inode, name_index, name, xs); 1179 1170 if (ret) 1180 1171 return ret; 1181 1172 size = le64_to_cpu(xs->here->xe_value_size); ··· 2707 2698 2708 2699 /* Find the named attribute. */ 2709 2700 if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) { 2710 - ret = ocfs2_xattr_find_entry(name_index, name, xs); 2701 + ret = ocfs2_xattr_find_entry(inode, name_index, name, xs); 2711 2702 if (ret && ret != -ENODATA) 2712 2703 return ret; 2713 2704 xs->not_found = ret; ··· 2842 2833 xs->end = (void *)(blk_bh->b_data) + blk_bh->b_size; 2843 2834 xs->here = xs->header->xh_entries; 2844 2835 2845 - ret = ocfs2_xattr_find_entry(name_index, name, xs); 2836 + ret = ocfs2_xattr_find_entry(inode, name_index, name, xs); 2846 2837 } else 2847 2838 ret = ocfs2_xattr_index_block_find(inode, blk_bh, 2848 2839 name_index,
+1
fs/ufs/super.c
··· 1540 1540 1541 1541 module_init(init_ufs_fs) 1542 1542 module_exit(exit_ufs_fs) 1543 + MODULE_DESCRIPTION("UFS Filesystem"); 1543 1544 MODULE_LICENSE("GPL");
+2 -16
include/asm-generic/vmlinux.lds.h
··· 141 141 * often happens at runtime) 142 142 */ 143 143 144 - #if defined(CONFIG_MEMORY_HOTPLUG) 145 - #define MEM_KEEP(sec) *(.mem##sec) 146 - #define MEM_DISCARD(sec) 147 - #else 148 - #define MEM_KEEP(sec) 149 - #define MEM_DISCARD(sec) *(.mem##sec) 150 - #endif 151 - 152 144 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE 153 145 #define KEEP_PATCHABLE KEEP(*(__patchable_function_entries)) 154 146 #define PATCHABLE_DISCARDS ··· 349 357 *(.data..decrypted) \ 350 358 *(.ref.data) \ 351 359 *(.data..shared_aligned) /* percpu related */ \ 352 - MEM_KEEP(init.data*) \ 353 360 *(.data.unlikely) \ 354 361 __start_once = .; \ 355 362 *(.data.once) \ ··· 533 542 /* __*init sections */ \ 534 543 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 535 544 *(.ref.rodata) \ 536 - MEM_KEEP(init.rodata) \ 537 545 } \ 538 546 \ 539 547 /* Built-in module parameters. */ \ ··· 583 593 *(.text.unknown .text.unknown.*) \ 584 594 NOINSTR_TEXT \ 585 595 *(.ref.text) \ 586 - *(.text.asan.* .text.tsan.*) \ 587 - MEM_KEEP(init.text*) \ 596 + *(.text.asan.* .text.tsan.*) 588 597 589 598 590 599 /* sched.text is aling to function alignment to secure we have same ··· 690 701 #define INIT_DATA \ 691 702 KEEP(*(SORT(___kentry+*))) \ 692 703 *(.init.data .init.data.*) \ 693 - MEM_DISCARD(init.data*) \ 694 704 KERNEL_CTORS() \ 695 705 MCOUNT_REC() \ 696 706 *(.init.rodata .init.rodata.*) \ ··· 697 709 TRACE_SYSCALLS() \ 698 710 KPROBE_BLACKLIST() \ 699 711 ERROR_INJECT_WHITELIST() \ 700 - MEM_DISCARD(init.rodata) \ 701 712 CLK_OF_TABLES() \ 702 713 RESERVEDMEM_OF_TABLES() \ 703 714 TIMER_OF_TABLES() \ ··· 714 727 715 728 #define INIT_TEXT \ 716 729 *(.init.text .init.text.*) \ 717 - *(.text.startup) \ 718 - MEM_DISCARD(init.text*) 730 + *(.text.startup) 719 731 720 732 #define EXIT_DATA \ 721 733 *(.exit.data .exit.data.*) \
+1 -1
include/linux/cacheinfo.h
··· 4 4 5 5 #include <linux/bitops.h> 6 6 #include <linux/cpuhplock.h> 7 - #include <linux/cpumask.h> 7 + #include <linux/cpumask_types.h> 8 8 #include <linux/smp.h> 9 9 10 10 struct device_node;
-1
include/linux/cgroup.h
··· 10 10 */ 11 11 12 12 #include <linux/sched.h> 13 - #include <linux/cpumask.h> 14 13 #include <linux/nodemask.h> 15 14 #include <linux/rculist.h> 16 15 #include <linux/cgroupstats.h>
+1 -1
include/linux/clockchips.h
··· 12 12 #ifdef CONFIG_GENERIC_CLOCKEVENTS 13 13 14 14 # include <linux/clocksource.h> 15 - # include <linux/cpumask.h> 15 + # include <linux/cpumask_types.h> 16 16 # include <linux/ktime.h> 17 17 # include <linux/notifier.h> 18 18
+2 -4
include/linux/compiler.h
··· 208 208 */ 209 209 #define data_race(expr) \ 210 210 ({ \ 211 - __unqual_scalar_typeof(({ expr; })) __v = ({ \ 212 - __kcsan_disable_current(); \ 213 - expr; \ 214 - }); \ 211 + __kcsan_disable_current(); \ 212 + __auto_type __v = (expr); \ 215 213 __kcsan_enable_current(); \ 216 214 __v; \ 217 215 })
-1
include/linux/cpu.h
··· 16 16 17 17 #include <linux/node.h> 18 18 #include <linux/compiler.h> 19 - #include <linux/cpumask.h> 20 19 #include <linux/cpuhotplug.h> 21 20 #include <linux/cpuhplock.h> 22 21 #include <linux/cpu_smt.h>
-1
include/linux/cpu_cooling.h
··· 15 15 16 16 #include <linux/of.h> 17 17 #include <linux/thermal.h> 18 - #include <linux/cpumask.h> 19 18 20 19 struct cpufreq_policy; 21 20
+1 -1
include/linux/cpu_rmap.h
··· 7 7 * Copyright 2011 Solarflare Communications Inc. 8 8 */ 9 9 10 - #include <linux/cpumask.h> 10 + #include <linux/cpumask_types.h> 11 11 #include <linux/gfp.h> 12 12 #include <linux/slab.h> 13 13 #include <linux/kref.h>
+1 -55
include/linux/cpumask.h
··· 9 9 */ 10 10 #include <linux/cleanup.h> 11 11 #include <linux/kernel.h> 12 - #include <linux/threads.h> 13 12 #include <linux/bitmap.h> 13 + #include <linux/cpumask_types.h> 14 14 #include <linux/atomic.h> 15 15 #include <linux/bug.h> 16 16 #include <linux/gfp_types.h> 17 17 #include <linux/numa.h> 18 - 19 - /* Don't assign or return these: may not be this big! */ 20 - typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; 21 - 22 - /** 23 - * cpumask_bits - get the bits in a cpumask 24 - * @maskp: the struct cpumask * 25 - * 26 - * You should only assume nr_cpu_ids bits of this mask are valid. This is 27 - * a macro so it's const-correct. 28 - */ 29 - #define cpumask_bits(maskp) ((maskp)->bits) 30 18 31 19 /** 32 20 * cpumask_pr_args - printf args to output a cpumask ··· 913 925 return bitmap_size(large_cpumask_bits); 914 926 } 915 927 916 - /* 917 - * cpumask_var_t: struct cpumask for stack usage. 918 - * 919 - * Oh, the wicked games we play! In order to make kernel coding a 920 - * little more difficult, we typedef cpumask_var_t to an array or a 921 - * pointer: doing &mask on an array is a noop, so it still works. 922 - * 923 - * i.e. 924 - * cpumask_var_t tmpmask; 925 - * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) 926 - * return -ENOMEM; 927 - * 928 - * ... use 'tmpmask' like a normal struct cpumask * ... 929 - * 930 - * free_cpumask_var(tmpmask); 931 - * 932 - * 933 - * However, one notable exception is there. alloc_cpumask_var() allocates 934 - * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has 935 - * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t. 936 - * 937 - * cpumask_var_t tmpmask; 938 - * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) 939 - * return -ENOMEM; 940 - * 941 - * var = *tmpmask; 942 - * 943 - * This code makes NR_CPUS length memcopy and brings to a memory corruption. 944 - * cpumask_copy() provide safe copy functionality. 945 - * 946 - * Note that there is another evil here: If you define a cpumask_var_t 947 - * as a percpu variable then the way to obtain the address of the cpumask 948 - * structure differently influences what this_cpu_* operation needs to be 949 - * used. Please use this_cpu_cpumask_var_t in those cases. The direct use 950 - * of this_cpu_ptr() or this_cpu_read() will lead to failures when the 951 - * other type of cpumask_var_t implementation is configured. 952 - * 953 - * Please also note that __cpumask_var_read_mostly can be used to declare 954 - * a cpumask_var_t variable itself (not its content) as read mostly. 955 - */ 956 928 #ifdef CONFIG_CPUMASK_OFFSTACK 957 - typedef struct cpumask *cpumask_var_t; 958 929 959 930 #define this_cpu_cpumask_var_ptr(x) this_cpu_read(x) 960 931 #define __cpumask_var_read_mostly __read_mostly ··· 960 1013 } 961 1014 962 1015 #else 963 - typedef struct cpumask cpumask_var_t[1]; 964 1016 965 1017 #define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x) 966 1018 #define __cpumask_var_read_mostly
+66
include/linux/cpumask_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __LINUX_CPUMASK_TYPES_H 3 + #define __LINUX_CPUMASK_TYPES_H 4 + 5 + #include <linux/bitops.h> 6 + #include <linux/threads.h> 7 + 8 + /* Don't assign or return these: may not be this big! */ 9 + typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; 10 + 11 + /** 12 + * cpumask_bits - get the bits in a cpumask 13 + * @maskp: the struct cpumask * 14 + * 15 + * You should only assume nr_cpu_ids bits of this mask are valid. This is 16 + * a macro so it's const-correct. 17 + */ 18 + #define cpumask_bits(maskp) ((maskp)->bits) 19 + 20 + /* 21 + * cpumask_var_t: struct cpumask for stack usage. 22 + * 23 + * Oh, the wicked games we play! In order to make kernel coding a 24 + * little more difficult, we typedef cpumask_var_t to an array or a 25 + * pointer: doing &mask on an array is a noop, so it still works. 26 + * 27 + * i.e. 28 + * cpumask_var_t tmpmask; 29 + * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) 30 + * return -ENOMEM; 31 + * 32 + * ... use 'tmpmask' like a normal struct cpumask * ... 33 + * 34 + * free_cpumask_var(tmpmask); 35 + * 36 + * 37 + * However, one notable exception is there. alloc_cpumask_var() allocates 38 + * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has 39 + * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t. 40 + * 41 + * cpumask_var_t tmpmask; 42 + * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) 43 + * return -ENOMEM; 44 + * 45 + * var = *tmpmask; 46 + * 47 + * This code makes NR_CPUS length memcopy and brings to a memory corruption. 48 + * cpumask_copy() provide safe copy functionality. 49 + * 50 + * Note that there is another evil here: If you define a cpumask_var_t 51 + * as a percpu variable then the way to obtain the address of the cpumask 52 + * structure differently influences what this_cpu_* operation needs to be 53 + * used. Please use this_cpu_cpumask_var_t in those cases. The direct use 54 + * of this_cpu_ptr() or this_cpu_read() will lead to failures when the 55 + * other type of cpumask_var_t implementation is configured. 56 + * 57 + * Please also note that __cpumask_var_read_mostly can be used to declare 58 + * a cpumask_var_t variable itself (not its content) as read mostly. 59 + */ 60 + #ifdef CONFIG_CPUMASK_OFFSTACK 61 + typedef struct cpumask *cpumask_var_t; 62 + #else 63 + typedef struct cpumask cpumask_var_t[1]; 64 + #endif /* CONFIG_CPUMASK_OFFSTACK */ 65 + 66 + #endif /* __LINUX_CPUMASK_TYPES_H */
+9 -9
include/linux/init.h
··· 84 84 85 85 #define __exit __section(".exit.text") __exitused __cold notrace 86 86 87 - /* Used for MEMORY_HOTPLUG */ 88 - #define __meminit __section(".meminit.text") __cold notrace \ 89 - __latent_entropy 90 - #define __meminitdata __section(".meminit.data") 91 - #define __meminitconst __section(".meminit.rodata") 87 + #ifdef CONFIG_MEMORY_HOTPLUG 88 + #define __meminit 89 + #define __meminitdata 90 + #define __meminitconst 91 + #else 92 + #define __meminit __init 93 + #define __meminitdata __initdata 94 + #define __meminitconst __initconst 95 + #endif 92 96 93 97 /* For assembly routines */ 94 98 #define __HEAD .section ".head.text","ax" ··· 102 98 #define __INITDATA .section ".init.data","aw",%progbits 103 99 #define __INITRODATA .section ".init.rodata","a",%progbits 104 100 #define __FINITDATA .previous 105 - 106 - #define __MEMINIT .section ".meminit.text", "ax" 107 - #define __MEMINITDATA .section ".meminit.data", "aw" 108 - #define __MEMINITRODATA .section ".meminit.rodata", "a" 109 101 110 102 /* silence warnings when references are OK */ 111 103 #define __REF .section ".ref.text", "ax"
+1 -1
include/linux/interrupt.h
··· 6 6 #include <linux/kernel.h> 7 7 #include <linux/bitops.h> 8 8 #include <linux/cleanup.h> 9 - #include <linux/cpumask.h> 10 9 #include <linux/irqreturn.h> 11 10 #include <linux/irqnr.h> 12 11 #include <linux/hardirq.h> 13 12 #include <linux/irqflags.h> 14 13 #include <linux/hrtimer.h> 15 14 #include <linux/kref.h> 15 + #include <linux/cpumask_types.h> 16 16 #include <linux/workqueue.h> 17 17 #include <linux/jump_label.h> 18 18
+1 -1
include/linux/irqchip/irq-partition-percpu.h
··· 8 8 #define __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H 9 9 10 10 #include <linux/fwnode.h> 11 - #include <linux/cpumask.h> 11 + #include <linux/cpumask_types.h> 12 12 #include <linux/irqdomain.h> 13 13 14 14 struct partition_affinity {
+3 -3
include/linux/jhash.h
··· 31 31 /* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */ 32 32 #define jhash_mask(n) (jhash_size(n)-1) 33 33 34 - /* __jhash_mix -- mix 3 32-bit values reversibly. */ 34 + /* __jhash_mix - mix 3 32-bit values reversibly. */ 35 35 #define __jhash_mix(a, b, c) \ 36 36 { \ 37 37 a -= c; a ^= rol32(c, 4); c += b; \ ··· 60 60 /* jhash - hash an arbitrary key 61 61 * @k: sequence of bytes as key 62 62 * @length: the length of the key 63 - * @initval: the previous hash, or an arbitray value 63 + * @initval: the previous hash, or an arbitrary value 64 64 * 65 65 * The generic version, hashes an arbitrary sequence of bytes. 66 66 * No alignment or length assumptions are made about the input key. ··· 110 110 /* jhash2 - hash an array of u32's 111 111 * @k: the key which must be an array of u32's 112 112 * @length: the number of u32's in the key 113 - * @initval: the previous hash, or an arbitray value 113 + * @initval: the previous hash, or an arbitrary value 114 114 * 115 115 * Returns the hash value of the key. 116 116 */
-1
include/linux/kernel_stat.h
··· 5 5 #include <linux/smp.h> 6 6 #include <linux/threads.h> 7 7 #include <linux/percpu.h> 8 - #include <linux/cpumask.h> 9 8 #include <linux/interrupt.h> 10 9 #include <linux/sched.h> 11 10 #include <linux/vtime.h>
+144 -44
include/linux/min_heap.h
··· 7 7 #include <linux/types.h> 8 8 9 9 /** 10 - * struct min_heap - Data structure to hold a min-heap. 11 - * @data: Start of array holding the heap elements. 10 + * Data structure to hold a min-heap. 12 11 * @nr: Number of elements currently in the heap. 13 12 * @size: Maximum number of elements that can be held in current storage. 13 + * @data: Pointer to the start of array holding the heap elements. 14 + * @preallocated: Start of the static preallocated array holding the heap elements. 14 15 */ 15 - struct min_heap { 16 - void *data; 17 - int nr; 18 - int size; 19 - }; 16 + #define MIN_HEAP_PREALLOCATED(_type, _name, _nr) \ 17 + struct _name { \ 18 + int nr; \ 19 + int size; \ 20 + _type *data; \ 21 + _type preallocated[_nr]; \ 22 + } 23 + 24 + #define DEFINE_MIN_HEAP(_type, _name) MIN_HEAP_PREALLOCATED(_type, _name, 0) 25 + 26 + typedef DEFINE_MIN_HEAP(char, min_heap_char) min_heap_char; 27 + 28 + #define __minheap_cast(_heap) (typeof((_heap)->data[0]) *) 29 + #define __minheap_obj_size(_heap) sizeof((_heap)->data[0]) 20 30 21 31 /** 22 32 * struct min_heap_callbacks - Data/functions to customise the min_heap. 23 - * @elem_size: The nr of each element in bytes. 24 33 * @less: Partial order function for this heap. 25 34 * @swp: Swap elements function. 26 35 */ 27 36 struct min_heap_callbacks { 28 - int elem_size; 29 - bool (*less)(const void *lhs, const void *rhs); 30 - void (*swp)(void *lhs, void *rhs); 37 + bool (*less)(const void *lhs, const void *rhs, void *args); 38 + void (*swp)(void *lhs, void *rhs, void *args); 31 39 }; 40 + 41 + /* Initialize a min-heap. */ 42 + static __always_inline 43 + void __min_heap_init(min_heap_char *heap, void *data, int size) 44 + { 45 + heap->nr = 0; 46 + heap->size = size; 47 + if (data) 48 + heap->data = data; 49 + else 50 + heap->data = heap->preallocated; 51 + } 52 + 53 + #define min_heap_init(_heap, _data, _size) \ 54 + __min_heap_init((min_heap_char *)_heap, _data, _size) 55 + 56 + /* Get the minimum element from the heap. */ 57 + static __always_inline 58 + void *__min_heap_peek(struct min_heap_char *heap) 59 + { 60 + return heap->nr ? heap->data : NULL; 61 + } 62 + 63 + #define min_heap_peek(_heap) \ 64 + (__minheap_cast(_heap) __min_heap_peek((min_heap_char *)_heap)) 65 + 66 + /* Check if the heap is full. */ 67 + static __always_inline 68 + bool __min_heap_full(min_heap_char *heap) 69 + { 70 + return heap->nr == heap->size; 71 + } 72 + 73 + #define min_heap_full(_heap) \ 74 + __min_heap_full((min_heap_char *)_heap) 32 75 33 76 /* Sift the element at pos down the heap. */ 34 77 static __always_inline 35 - void min_heapify(struct min_heap *heap, int pos, 36 - const struct min_heap_callbacks *func) 78 + void __min_heap_sift_down(min_heap_char *heap, int pos, size_t elem_size, 79 + const struct min_heap_callbacks *func, void *args) 37 80 { 38 81 void *left, *right; 39 82 void *data = heap->data; 40 - void *root = data + pos * func->elem_size; 83 + void *root = data + pos * elem_size; 41 84 int i = pos, j; 42 85 43 86 /* Find the sift-down path all the way to the leaves. */ 44 87 for (;;) { 45 88 if (i * 2 + 2 >= heap->nr) 46 89 break; 47 - left = data + (i * 2 + 1) * func->elem_size; 48 - right = data + (i * 2 + 2) * func->elem_size; 49 - i = func->less(left, right) ? i * 2 + 1 : i * 2 + 2; 90 + left = data + (i * 2 + 1) * elem_size; 91 + right = data + (i * 2 + 2) * elem_size; 92 + i = func->less(left, right, args) ? i * 2 + 1 : i * 2 + 2; 50 93 } 51 94 52 95 /* Special case for the last leaf with no sibling. */ ··· 97 54 i = i * 2 + 1; 98 55 99 56 /* Backtrack to the correct location. */ 100 - while (i != pos && func->less(root, data + i * func->elem_size)) 57 + while (i != pos && func->less(root, data + i * elem_size, args)) 101 58 i = (i - 1) / 2; 102 59 103 60 /* Shift the element into its correct place. */ 104 61 j = i; 105 62 while (i != pos) { 106 63 i = (i - 1) / 2; 107 - func->swp(data + i * func->elem_size, data + j * func->elem_size); 64 + func->swp(data + i * elem_size, data + j * elem_size, args); 108 65 } 109 66 } 110 67 68 + #define min_heap_sift_down(_heap, _pos, _func, _args) \ 69 + __min_heap_sift_down((min_heap_char *)_heap, _pos, __minheap_obj_size(_heap), _func, _args) 70 + 71 + /* Sift up ith element from the heap, O(log2(nr)). */ 72 + static __always_inline 73 + void __min_heap_sift_up(min_heap_char *heap, size_t elem_size, size_t idx, 74 + const struct min_heap_callbacks *func, void *args) 75 + { 76 + void *data = heap->data; 77 + size_t parent; 78 + 79 + while (idx) { 80 + parent = (idx - 1) / 2; 81 + if (func->less(data + parent * elem_size, data + idx * elem_size, args)) 82 + break; 83 + func->swp(data + parent * elem_size, data + idx * elem_size, args); 84 + idx = parent; 85 + } 86 + } 87 + 88 + #define min_heap_sift_up(_heap, _idx, _func, _args) \ 89 + __min_heap_sift_up((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args) 90 + 111 91 /* Floyd's approach to heapification that is O(nr). */ 112 92 static __always_inline 113 - void min_heapify_all(struct min_heap *heap, 114 - const struct min_heap_callbacks *func) 93 + void __min_heapify_all(min_heap_char *heap, size_t elem_size, 94 + const struct min_heap_callbacks *func, void *args) 115 95 { 116 96 int i; 117 97 118 98 for (i = heap->nr / 2 - 1; i >= 0; i--) 119 - min_heapify(heap, i, func); 99 + __min_heap_sift_down(heap, i, elem_size, func, args); 120 100 } 101 + 102 + #define min_heapify_all(_heap, _func, _args) \ 103 + __min_heapify_all((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) 121 104 122 105 /* Remove minimum element from the heap, O(log2(nr)). */ 123 106 static __always_inline 124 - void min_heap_pop(struct min_heap *heap, 125 - const struct min_heap_callbacks *func) 107 + bool __min_heap_pop(min_heap_char *heap, size_t elem_size, 108 + const struct min_heap_callbacks *func, void *args) 126 109 { 127 110 void *data = heap->data; 128 111 129 112 if (WARN_ONCE(heap->nr <= 0, "Popping an empty heap")) 130 - return; 113 + return false; 131 114 132 115 /* Place last element at the root (position 0) and then sift down. */ 133 116 heap->nr--; 134 - memcpy(data, data + (heap->nr * func->elem_size), func->elem_size); 135 - min_heapify(heap, 0, func); 117 + memcpy(data, data + (heap->nr * elem_size), elem_size); 118 + __min_heap_sift_down(heap, 0, elem_size, func, args); 119 + 120 + return true; 136 121 } 122 + 123 + #define min_heap_pop(_heap, _func, _args) \ 124 + __min_heap_pop((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) 137 125 138 126 /* 139 127 * Remove the minimum element and then push the given element. The ··· 172 98 * efficient than a pop followed by a push that does 2. 173 99 */ 174 100 static __always_inline 175 - void min_heap_pop_push(struct min_heap *heap, 176 - const void *element, 177 - const struct min_heap_callbacks *func) 101 + void __min_heap_pop_push(min_heap_char *heap, 102 + const void *element, size_t elem_size, 103 + const struct min_heap_callbacks *func, 104 + void *args) 178 105 { 179 - memcpy(heap->data, element, func->elem_size); 180 - min_heapify(heap, 0, func); 106 + memcpy(heap->data, element, elem_size); 107 + __min_heap_sift_down(heap, 0, elem_size, func, args); 181 108 } 109 + 110 + #define min_heap_pop_push(_heap, _element, _func, _args) \ 111 + __min_heap_pop_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), _func, _args) 182 112 183 113 /* Push an element on to the heap, O(log2(nr)). */ 184 114 static __always_inline 185 - void min_heap_push(struct min_heap *heap, const void *element, 186 - const struct min_heap_callbacks *func) 115 + bool __min_heap_push(min_heap_char *heap, const void *element, size_t elem_size, 116 + const struct min_heap_callbacks *func, void *args) 187 117 { 188 118 void *data = heap->data; 189 - void *child, *parent; 190 119 int pos; 191 120 192 121 if (WARN_ONCE(heap->nr >= heap->size, "Pushing on a full heap")) 193 - return; 122 + return false; 194 123 195 124 /* Place at the end of data. */ 196 125 pos = heap->nr; 197 - memcpy(data + (pos * func->elem_size), element, func->elem_size); 126 + memcpy(data + (pos * elem_size), element, elem_size); 198 127 heap->nr++; 199 128 200 129 /* Sift child at pos up. */ 201 - for (; pos > 0; pos = (pos - 1) / 2) { 202 - child = data + (pos * func->elem_size); 203 - parent = data + ((pos - 1) / 2) * func->elem_size; 204 - if (func->less(parent, child)) 205 - break; 206 - func->swp(parent, child); 207 - } 130 + __min_heap_sift_up(heap, elem_size, pos, func, args); 131 + 132 + return true; 208 133 } 134 + 135 + #define min_heap_push(_heap, _element, _func, _args) \ 136 + __min_heap_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), _func, _args) 137 + 138 + /* Remove ith element from the heap, O(log2(nr)). */ 139 + static __always_inline 140 + bool __min_heap_del(min_heap_char *heap, size_t elem_size, size_t idx, 141 + const struct min_heap_callbacks *func, void *args) 142 + { 143 + void *data = heap->data; 144 + 145 + if (WARN_ONCE(heap->nr <= 0, "Popping an empty heap")) 146 + return false; 147 + 148 + /* Place last element at the root (position 0) and then sift down. */ 149 + heap->nr--; 150 + if (idx == heap->nr) 151 + return true; 152 + func->swp(data + (idx * elem_size), data + (heap->nr * elem_size), args); 153 + __min_heap_sift_up(heap, elem_size, idx, func, args); 154 + __min_heap_sift_down(heap, idx, elem_size, func, args); 155 + 156 + return true; 157 + } 158 + 159 + #define min_heap_del(_heap, _idx, _func, _args) \ 160 + __min_heap_del((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args) 209 161 210 162 #endif /* _LINUX_MIN_HEAP_H */
+1 -1
include/linux/msi.h
··· 19 19 */ 20 20 21 21 #include <linux/irqdomain_defs.h> 22 - #include <linux/cpumask.h> 22 + #include <linux/cpumask_types.h> 23 23 #include <linux/msi_api.h> 24 24 #include <linux/xarray.h> 25 25 #include <linux/mutex.h>
-1
include/linux/node.h
··· 16 16 #define _LINUX_NODE_H_ 17 17 18 18 #include <linux/device.h> 19 - #include <linux/cpumask.h> 20 19 #include <linux/list.h> 21 20 22 21 /**
+1 -1
include/linux/nvme-fc-driver.h
··· 620 620 * 621 621 * Structure used between LLDD and nvmet-fc layer to represent the exchange 622 622 * context for a FC-NVME FCP I/O operation (e.g. a nvme sqe, the sqe-related 623 - * memory transfers, and its assocated cqe transfer). 623 + * memory transfers, and its associated cqe transfer). 624 624 * 625 625 * The structure is allocated by the LLDD whenever a FCP CMD IU is received 626 626 * from the FC link. The address of the structure is passed to the nvmet-fc
+5 -3
include/linux/panic.h
··· 77 77 #define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1) 78 78 79 79 struct taint_flag { 80 - char c_true; /* character printed when tainted */ 81 - char c_false; /* character printed when not tainted */ 82 - bool module; /* also show as a per-module taint flag */ 80 + char c_true; /* character printed when tainted */ 81 + char c_false; /* character printed when not tainted */ 82 + bool module; /* also show as a per-module taint flag */ 83 + const char *desc; /* verbose description of the set taint flag */ 83 84 }; 84 85 85 86 extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT]; ··· 91 90 }; 92 91 93 92 extern const char *print_tainted(void); 93 + extern const char *print_tainted_verbose(void); 94 94 extern void add_taint(unsigned flag, enum lockdep_ok); 95 95 extern int test_taint(unsigned flag); 96 96 extern unsigned long get_taint(void);
-1
include/linux/percpu.h
··· 6 6 #include <linux/mmdebug.h> 7 7 #include <linux/preempt.h> 8 8 #include <linux/smp.h> 9 - #include <linux/cpumask.h> 10 9 #include <linux/pfn.h> 11 10 #include <linux/init.h> 12 11 #include <linux/cleanup.h>
+1 -1
include/linux/pm_domain.h
··· 16 16 #include <linux/of.h> 17 17 #include <linux/notifier.h> 18 18 #include <linux/spinlock.h> 19 - #include <linux/cpumask.h> 19 + #include <linux/cpumask_types.h> 20 20 #include <linux/time64.h> 21 21 22 22 /*
-6
include/linux/poison.h
··· 49 49 /********** arch/$ARCH/mm/init.c **********/ 50 50 #define POISON_FREE_INITMEM 0xcc 51 51 52 - /********** arch/ia64/hp/common/sba_iommu.c **********/ 53 - /* 54 - * arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a 55 - * value of "SBAIOMMU POISON\0" for spill-over poisoning. 56 - */ 57 - 58 52 /********** fs/jbd/journal.c **********/ 59 53 #define JBD_POISON_FREE 0x5b 60 54 #define JBD2_POISON_FREE 0x5c
-1
include/linux/profile.h
··· 4 4 5 5 #include <linux/kernel.h> 6 6 #include <linux/init.h> 7 - #include <linux/cpumask.h> 8 7 #include <linux/cache.h> 9 8 10 9 #include <asm/errno.h>
-1
include/linux/rcupdate.h
··· 29 29 #include <linux/lockdep.h> 30 30 #include <linux/cleanup.h> 31 31 #include <asm/processor.h> 32 - #include <linux/cpumask.h> 33 32 #include <linux/context_tracking_irq.h> 34 33 35 34 #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
+4 -3
include/linux/sched.h
··· 13 13 #include <asm/processor.h> 14 14 #include <linux/thread_info.h> 15 15 #include <linux/preempt.h> 16 - #include <linux/cpumask.h> 16 + #include <linux/cpumask_types.h> 17 17 18 18 #include <linux/cache.h> 19 19 #include <linux/irqflags_types.h> ··· 1618 1618 { 1619 1619 static const char state_char[] = "RSDTtXZPI"; 1620 1620 1621 - BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1); 1621 + BUILD_BUG_ON(TASK_REPORT_MAX * 2 != 1 << (sizeof(state_char) - 1)); 1622 1622 1623 1623 return state_char[state]; 1624 1624 } ··· 1792 1792 } 1793 1793 static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 1794 1794 { 1795 - if (!cpumask_test_cpu(0, new_mask)) 1795 + /* Opencoded cpumask_test_cpu(0, new_mask) to avoid dependency on cpumask.h */ 1796 + if ((*cpumask_bits(new_mask) & 1) == 0) 1796 1797 return -EINVAL; 1797 1798 return 0; 1798 1799 }
-1
include/linux/seq_file.h
··· 7 7 #include <linux/string_helpers.h> 8 8 #include <linux/bug.h> 9 9 #include <linux/mutex.h> 10 - #include <linux/cpumask.h> 11 10 #include <linux/nodemask.h> 12 11 #include <linux/fs.h> 13 12 #include <linux/cred.h>
+2 -2
include/linux/soc/apple/rtkit.h
··· 69 69 * Initializes the internal state required to handle RTKit. This 70 70 * should usually be called within _probe. 71 71 * 72 - * @dev: Pointer to the device node this coprocessor is assocated with 72 + * @dev: Pointer to the device node this coprocessor is associated with 73 73 * @cookie: opaque cookie passed to all functions defined in rtkit_ops 74 74 * @mbox_name: mailbox name used to communicate with the co-processor 75 75 * @mbox_idx: mailbox index to be used if mbox_name is NULL ··· 83 83 * Non-devm version of devm_apple_rtkit_init. Must be freed with 84 84 * apple_rtkit_free. 85 85 * 86 - * @dev: Pointer to the device node this coprocessor is assocated with 86 + * @dev: Pointer to the device node this coprocessor is associated with 87 87 * @cookie: opaque cookie passed to all functions defined in rtkit_ops 88 88 * @mbox_name: mailbox name used to communicate with the co-processor 89 89 * @mbox_idx: mailbox index to be used if mbox_name is NULL
+1 -1
include/linux/stop_machine.h
··· 3 3 #define _LINUX_STOP_MACHINE 4 4 5 5 #include <linux/cpu.h> 6 - #include <linux/cpumask.h> 6 + #include <linux/cpumask_types.h> 7 7 #include <linux/smp.h> 8 8 #include <linux/list.h> 9 9
+1 -1
include/linux/torture.h
··· 14 14 #include <linux/cache.h> 15 15 #include <linux/spinlock.h> 16 16 #include <linux/threads.h> 17 - #include <linux/cpumask.h> 17 + #include <linux/cpumask_types.h> 18 18 #include <linux/seqlock.h> 19 19 #include <linux/lockdep.h> 20 20 #include <linux/completion.h>
-1
include/linux/tracepoint.h
··· 16 16 #include <linux/srcu.h> 17 17 #include <linux/errno.h> 18 18 #include <linux/types.h> 19 - #include <linux/cpumask.h> 20 19 #include <linux/rcupdate.h> 21 20 #include <linux/tracepoint-defs.h> 22 21 #include <linux/static_call.h>
+1 -1
include/linux/workqueue.h
··· 12 12 #include <linux/lockdep.h> 13 13 #include <linux/threads.h> 14 14 #include <linux/atomic.h> 15 - #include <linux/cpumask.h> 15 + #include <linux/cpumask_types.h> 16 16 #include <linux/rcupdate.h> 17 17 #include <linux/workqueue_types.h> 18 18
+1
kernel/backtracetest.c
··· 74 74 75 75 module_init(backtrace_regression_test); 76 76 module_exit(exitf); 77 + MODULE_DESCRIPTION("Simple stack backtrace regression test module"); 77 78 MODULE_LICENSE("GPL"); 78 79 MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
-1
kernel/crash_reserve.c
··· 13 13 #include <linux/memory.h> 14 14 #include <linux/cpuhotplug.h> 15 15 #include <linux/memblock.h> 16 - #include <linux/kexec.h> 17 16 #include <linux/kmemleak.h> 18 17 19 18 #include <asm/page.h>
+15 -14
kernel/events/core.c
··· 534 534 __this_cpu_write(running_sample_length, running_len); 535 535 536 536 /* 537 - * Note: this will be biased artifically low until we have 537 + * Note: this will be biased artificially low until we have 538 538 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us 539 539 * from having to maintain a count. 540 540 */ ··· 596 596 * 597 597 * Event groups make things a little more complicated, but not terribly so. The 598 598 * rules for a group are that if the group leader is OFF the entire group is 599 - * OFF, irrespecive of what the group member states are. This results in 599 + * OFF, irrespective of what the group member states are. This results in 600 600 * __perf_effective_state(). 601 601 * 602 - * A futher ramification is that when a group leader flips between OFF and 602 + * A further ramification is that when a group leader flips between OFF and 603 603 * !OFF, we need to update all group member times. 604 604 * 605 605 * ··· 891 891 int cpu, heap_size, ret = 0; 892 892 893 893 /* 894 - * Allow storage to have sufficent space for an iterator for each 894 + * Allow storage to have sufficient space for an iterator for each 895 895 * possibly nested cgroup plus an iterator for events with no cgroup. 896 896 */ 897 897 for (heap_size = 1; css; css = css->parent) ··· 3671 3671 perf_cgroup_switch(next); 3672 3672 } 3673 3673 3674 - static bool perf_less_group_idx(const void *l, const void *r) 3674 + static bool perf_less_group_idx(const void *l, const void *r, void __always_unused *args) 3675 3675 { 3676 3676 const struct perf_event *le = *(const struct perf_event **)l; 3677 3677 const struct perf_event *re = *(const struct perf_event **)r; ··· 3679 3679 return le->group_index < re->group_index; 3680 3680 } 3681 3681 3682 - static void swap_ptr(void *l, void *r) 3682 + static void swap_ptr(void *l, void *r, void __always_unused *args) 3683 3683 { 3684 3684 void **lp = l, **rp = r; 3685 3685 3686 3686 swap(*lp, *rp); 3687 3687 } 3688 3688 3689 + DEFINE_MIN_HEAP(struct perf_event *, perf_event_min_heap); 3690 + 3689 3691 static const struct min_heap_callbacks perf_min_heap = { 3690 - .elem_size = sizeof(struct perf_event *), 3691 3692 .less = perf_less_group_idx, 3692 3693 .swp = swap_ptr, 3693 3694 }; 3694 3695 3695 - static void __heap_add(struct min_heap *heap, struct perf_event *event) 3696 + static void __heap_add(struct perf_event_min_heap *heap, struct perf_event *event) 3696 3697 { 3697 3698 struct perf_event **itrs = heap->data; 3698 3699 ··· 3727 3726 struct perf_cpu_context *cpuctx = NULL; 3728 3727 /* Space for per CPU and/or any CPU event iterators. */ 3729 3728 struct perf_event *itrs[2]; 3730 - struct min_heap event_heap; 3729 + struct perf_event_min_heap event_heap; 3731 3730 struct perf_event **evt; 3732 3731 int ret; 3733 3732 ··· 3736 3735 3737 3736 if (!ctx->task) { 3738 3737 cpuctx = this_cpu_ptr(&perf_cpu_context); 3739 - event_heap = (struct min_heap){ 3738 + event_heap = (struct perf_event_min_heap){ 3740 3739 .data = cpuctx->heap, 3741 3740 .nr = 0, 3742 3741 .size = cpuctx->heap_size, ··· 3749 3748 css = &cpuctx->cgrp->css; 3750 3749 #endif 3751 3750 } else { 3752 - event_heap = (struct min_heap){ 3751 + event_heap = (struct perf_event_min_heap){ 3753 3752 .data = itrs, 3754 3753 .nr = 0, 3755 3754 .size = ARRAY_SIZE(itrs), ··· 3771 3770 perf_assert_pmu_disabled((*evt)->pmu_ctx->pmu); 3772 3771 } 3773 3772 3774 - min_heapify_all(&event_heap, &perf_min_heap); 3773 + min_heapify_all(&event_heap, &perf_min_heap, NULL); 3775 3774 3776 3775 while (event_heap.nr) { 3777 3776 ret = func(*evt, data); ··· 3780 3779 3781 3780 *evt = perf_event_groups_next(*evt, pmu); 3782 3781 if (*evt) 3783 - min_heapify(&event_heap, 0, &perf_min_heap); 3782 + min_heap_sift_down(&event_heap, 0, &perf_min_heap, NULL); 3784 3783 else 3785 - min_heap_pop(&event_heap, &perf_min_heap); 3784 + min_heap_pop(&event_heap, &perf_min_heap, NULL); 3786 3785 } 3787 3786 3788 3787 return 0;
+4 -3
kernel/fork.c
··· 208 208 unsigned int i; 209 209 210 210 for (i = 0; i < NR_CACHED_STACKS; i++) { 211 - if (this_cpu_cmpxchg(cached_stacks[i], NULL, vm) != NULL) 212 - continue; 213 - return true; 211 + struct vm_struct *tmp = NULL; 212 + 213 + if (this_cpu_try_cmpxchg(cached_stacks[i], &tmp, vm)) 214 + return true; 214 215 } 215 216 return false; 216 217 }
+1 -1
kernel/hung_task.c
··· 127 127 * Ok, the task did not get scheduled for more than 2 minutes, 128 128 * complain: 129 129 */ 130 - if (sysctl_hung_task_warnings) { 130 + if (sysctl_hung_task_warnings || hung_task_call_panic) { 131 131 if (sysctl_hung_task_warnings > 0) 132 132 sysctl_hung_task_warnings--; 133 133 pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
+79 -37
kernel/panic.c
··· 35 35 #include <linux/debugfs.h> 36 36 #include <linux/sysfs.h> 37 37 #include <linux/context_tracking.h> 38 + #include <linux/seq_buf.h> 38 39 #include <trace/events/error_report.h> 39 40 #include <asm/sections.h> 40 41 ··· 471 470 472 471 EXPORT_SYMBOL(panic); 473 472 473 + #define TAINT_FLAG(taint, _c_true, _c_false, _module) \ 474 + [ TAINT_##taint ] = { \ 475 + .c_true = _c_true, .c_false = _c_false, \ 476 + .module = _module, \ 477 + .desc = #taint, \ 478 + } 479 + 474 480 /* 475 481 * TAINT_FORCED_RMMOD could be a per-module flag but the module 476 482 * is being removed anyway. 477 483 */ 478 484 const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = { 479 - [ TAINT_PROPRIETARY_MODULE ] = { 'P', 'G', true }, 480 - [ TAINT_FORCED_MODULE ] = { 'F', ' ', true }, 481 - [ TAINT_CPU_OUT_OF_SPEC ] = { 'S', ' ', false }, 482 - [ TAINT_FORCED_RMMOD ] = { 'R', ' ', false }, 483 - [ TAINT_MACHINE_CHECK ] = { 'M', ' ', false }, 484 - [ TAINT_BAD_PAGE ] = { 'B', ' ', false }, 485 - [ TAINT_USER ] = { 'U', ' ', false }, 486 - [ TAINT_DIE ] = { 'D', ' ', false }, 487 - [ TAINT_OVERRIDDEN_ACPI_TABLE ] = { 'A', ' ', false }, 488 - [ TAINT_WARN ] = { 'W', ' ', false }, 489 - [ TAINT_CRAP ] = { 'C', ' ', true }, 490 - [ TAINT_FIRMWARE_WORKAROUND ] = { 'I', ' ', false }, 491 - [ TAINT_OOT_MODULE ] = { 'O', ' ', true }, 492 - [ TAINT_UNSIGNED_MODULE ] = { 'E', ' ', true }, 493 - [ TAINT_SOFTLOCKUP ] = { 'L', ' ', false }, 494 - [ TAINT_LIVEPATCH ] = { 'K', ' ', true }, 495 - [ TAINT_AUX ] = { 'X', ' ', true }, 496 - [ TAINT_RANDSTRUCT ] = { 'T', ' ', true }, 497 - [ TAINT_TEST ] = { 'N', ' ', true }, 485 + TAINT_FLAG(PROPRIETARY_MODULE, 'P', 'G', true), 486 + TAINT_FLAG(FORCED_MODULE, 'F', ' ', true), 487 + TAINT_FLAG(CPU_OUT_OF_SPEC, 'S', ' ', false), 488 + TAINT_FLAG(FORCED_RMMOD, 'R', ' ', false), 489 + TAINT_FLAG(MACHINE_CHECK, 'M', ' ', false), 490 + TAINT_FLAG(BAD_PAGE, 'B', ' ', false), 491 + TAINT_FLAG(USER, 'U', ' ', false), 492 + TAINT_FLAG(DIE, 'D', ' ', false), 493 + TAINT_FLAG(OVERRIDDEN_ACPI_TABLE, 'A', ' ', false), 494 + TAINT_FLAG(WARN, 'W', ' ', false), 495 + TAINT_FLAG(CRAP, 'C', ' ', true), 496 + TAINT_FLAG(FIRMWARE_WORKAROUND, 'I', ' ', false), 497 + TAINT_FLAG(OOT_MODULE, 'O', ' ', true), 498 + TAINT_FLAG(UNSIGNED_MODULE, 'E', ' ', true), 499 + TAINT_FLAG(SOFTLOCKUP, 'L', ' ', false), 500 + TAINT_FLAG(LIVEPATCH, 'K', ' ', true), 501 + TAINT_FLAG(AUX, 'X', ' ', true), 502 + TAINT_FLAG(RANDSTRUCT, 'T', ' ', true), 503 + TAINT_FLAG(TEST, 'N', ' ', true), 498 504 }; 505 + 506 + #undef TAINT_FLAG 507 + 508 + static void print_tainted_seq(struct seq_buf *s, bool verbose) 509 + { 510 + const char *sep = ""; 511 + int i; 512 + 513 + if (!tainted_mask) { 514 + seq_buf_puts(s, "Not tainted"); 515 + return; 516 + } 517 + 518 + seq_buf_printf(s, "Tainted: "); 519 + for (i = 0; i < TAINT_FLAGS_COUNT; i++) { 520 + const struct taint_flag *t = &taint_flags[i]; 521 + bool is_set = test_bit(i, &tainted_mask); 522 + char c = is_set ? t->c_true : t->c_false; 523 + 524 + if (verbose) { 525 + if (is_set) { 526 + seq_buf_printf(s, "%s[%c]=%s", sep, c, t->desc); 527 + sep = ", "; 528 + } 529 + } else { 530 + seq_buf_putc(s, c); 531 + } 532 + } 533 + } 534 + 535 + static const char *_print_tainted(bool verbose) 536 + { 537 + /* FIXME: what should the size be? */ 538 + static char buf[sizeof(taint_flags)]; 539 + struct seq_buf s; 540 + 541 + BUILD_BUG_ON(ARRAY_SIZE(taint_flags) != TAINT_FLAGS_COUNT); 542 + 543 + seq_buf_init(&s, buf, sizeof(buf)); 544 + 545 + print_tainted_seq(&s, verbose); 546 + 547 + return seq_buf_str(&s); 548 + } 499 549 500 550 /** 501 551 * print_tainted - return a string to represent the kernel taint state. ··· 558 506 */ 559 507 const char *print_tainted(void) 560 508 { 561 - static char buf[TAINT_FLAGS_COUNT + sizeof("Tainted: ")]; 509 + return _print_tainted(false); 510 + } 562 511 563 - BUILD_BUG_ON(ARRAY_SIZE(taint_flags) != TAINT_FLAGS_COUNT); 564 - 565 - if (tainted_mask) { 566 - char *s; 567 - int i; 568 - 569 - s = buf + sprintf(buf, "Tainted: "); 570 - for (i = 0; i < TAINT_FLAGS_COUNT; i++) { 571 - const struct taint_flag *t = &taint_flags[i]; 572 - *s++ = test_bit(i, &tainted_mask) ? 573 - t->c_true : t->c_false; 574 - } 575 - *s = 0; 576 - } else 577 - snprintf(buf, sizeof(buf), "Not tainted"); 578 - 579 - return buf; 512 + /** 513 + * print_tainted_verbose - A more verbose version of print_tainted() 514 + */ 515 + const char *print_tainted_verbose(void) 516 + { 517 + return _print_tainted(true); 580 518 } 581 519 582 520 int test_taint(unsigned flag)
+1
kernel/resource_kunit.c
··· 149 149 }; 150 150 kunit_test_suite(resource_test_suite); 151 151 152 + MODULE_DESCRIPTION("I/O Port & Memory Resource manager unit tests"); 152 153 MODULE_LICENSE("GPL");
+1 -1
kernel/tsacct.c
··· 76 76 stats->ac_minflt = tsk->min_flt; 77 77 stats->ac_majflt = tsk->maj_flt; 78 78 79 - strncpy(stats->ac_comm, tsk->comm, sizeof(stats->ac_comm)); 79 + strscpy_pad(stats->ac_comm, tsk->comm); 80 80 } 81 81 82 82
+8 -3
kernel/watchdog_perf.c
··· 75 75 __this_cpu_write(last_timestamp, now); 76 76 return true; 77 77 } 78 - #else 79 - static inline bool watchdog_check_timestamp(void) 78 + 79 + static void watchdog_init_timestamp(void) 80 80 { 81 - return true; 81 + __this_cpu_write(nmi_rearmed, 0); 82 + __this_cpu_write(last_timestamp, ktime_get_mono_fast_ns()); 82 83 } 84 + #else 85 + static inline bool watchdog_check_timestamp(void) { return true; } 86 + static inline void watchdog_init_timestamp(void) { } 83 87 #endif 84 88 85 89 static struct perf_event_attr wd_hw_attr = { ··· 165 161 if (!atomic_fetch_inc(&watchdog_cpus)) 166 162 pr_info("Enabled. Permanently consumes one hw-PMU counter.\n"); 167 163 164 + watchdog_init_timestamp(); 168 165 perf_event_enable(this_cpu_read(watchdog_ev)); 169 166 } 170 167
+3 -1
lib/Kconfig.debug
··· 1043 1043 Set the timeout value (in seconds) until a reboot occurs when 1044 1044 the kernel panics. If n = 0, then we wait forever. A timeout 1045 1045 value n > 0 will wait n seconds before rebooting, while a timeout 1046 - value n < 0 will reboot immediately. 1046 + value n < 0 will reboot immediately. This setting can be overridden 1047 + with the kernel command line option panic=, and from userspace via 1048 + /proc/sys/kernel/panic. 1047 1049 1048 1050 config LOCKUP_DETECTOR 1049 1051 bool
+1
lib/asn1_encoder.c
··· 449 449 } 450 450 EXPORT_SYMBOL_GPL(asn1_encode_boolean); 451 451 452 + MODULE_DESCRIPTION("Simple encoder primitives for ASN.1 BER/DER/CER"); 452 453 MODULE_LICENSE("GPL");
+1
lib/atomic64_test.c
··· 273 273 module_init(test_atomics_init); 274 274 module_exit(test_atomics_exit); 275 275 276 + MODULE_DESCRIPTION("Testsuite for atomic64_t functions"); 276 277 MODULE_LICENSE("GPL");
+5 -15
lib/bch.c
··· 479 479 /* find suitable row for elimination */ 480 480 for (r = p; r < m; r++) { 481 481 if (rows[r] & mask) { 482 - if (r != p) { 483 - tmp = rows[r]; 484 - rows[r] = rows[p]; 485 - rows[p] = tmp; 486 - } 482 + if (r != p) 483 + swap(rows[r], rows[p]); 487 484 rem = r+1; 488 485 break; 489 486 } ··· 796 799 static struct gf_poly *gf_poly_gcd(struct bch_control *bch, struct gf_poly *a, 797 800 struct gf_poly *b) 798 801 { 799 - struct gf_poly *tmp; 800 - 801 802 dbg("gcd(%s,%s)=", gf_poly_str(a), gf_poly_str(b)); 802 803 803 - if (a->deg < b->deg) { 804 - tmp = b; 805 - b = a; 806 - a = tmp; 807 - } 804 + if (a->deg < b->deg) 805 + swap(a, b); 808 806 809 807 while (b->deg > 0) { 810 808 gf_poly_mod(bch, a, b, NULL); 811 - tmp = b; 812 - b = a; 813 - a = tmp; 809 + swap(a, b); 814 810 } 815 811 816 812 dbg("%s\n", gf_poly_str(a));
+1
lib/bitfield_kunit.c
··· 151 151 kunit_test_suites(&bitfields_test_suite); 152 152 153 153 MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); 154 + MODULE_DESCRIPTION("Test cases for bitfield helpers"); 154 155 MODULE_LICENSE("GPL");
+14
lib/buildid.c
··· 73 73 Elf32_Phdr *phdr; 74 74 int i; 75 75 76 + /* 77 + * FIXME 78 + * Neither ELF spec nor ELF loader require that program headers 79 + * start immediately after ELF header. 80 + */ 81 + if (ehdr->e_phoff != sizeof(Elf32_Ehdr)) 82 + return -EINVAL; 76 83 /* only supports phdr that fits in one page */ 77 84 if (ehdr->e_phnum > 78 85 (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr)) ··· 105 98 Elf64_Phdr *phdr; 106 99 int i; 107 100 101 + /* 102 + * FIXME 103 + * Neither ELF spec nor ELF loader require that program headers 104 + * start immediately after ELF header. 105 + */ 106 + if (ehdr->e_phoff != sizeof(Elf64_Ehdr)) 107 + return -EINVAL; 108 108 /* only supports phdr that fits in one page */ 109 109 if (ehdr->e_phnum > 110 110 (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
+1
lib/checksum_kunit.c
··· 639 639 kunit_test_suites(&checksum_test_suite); 640 640 641 641 MODULE_AUTHOR("Noah Goldstein <goldstein.w.n@gmail.com>"); 642 + MODULE_DESCRIPTION("Test cases csum_* APIs"); 642 643 MODULE_LICENSE("GPL");
+1
lib/cmdline_kunit.c
··· 153 153 }; 154 154 kunit_test_suite(cmdline_test_suite); 155 155 156 + MODULE_DESCRIPTION("Test cases for API provided by cmdline.c"); 156 157 MODULE_LICENSE("GPL");
+1
lib/dhry_run.c
··· 83 83 module_init(dhry_init); 84 84 85 85 MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>"); 86 + MODULE_DESCRIPTION("Dhrystone benchmark test module"); 86 87 MODULE_LICENSE("GPL");
+7 -2
lib/dump_stack.c
··· 54 54 */ 55 55 void dump_stack_print_info(const char *log_lvl) 56 56 { 57 - printk("%sCPU: %d PID: %d Comm: %.20s %s%s %s %.*s" BUILD_ID_FMT "\n", 58 - log_lvl, raw_smp_processor_id(), current->pid, current->comm, 57 + printk("%sCPU: %d UID: %u PID: %d Comm: %.20s %s%s %s %.*s" BUILD_ID_FMT "\n", 58 + log_lvl, raw_smp_processor_id(), 59 + __kuid_val(current_real_cred()->euid), 60 + current->pid, current->comm, 59 61 kexec_crash_loaded() ? "Kdump: loaded " : "", 60 62 print_tainted(), 61 63 init_utsname()->release, 62 64 (int)strcspn(init_utsname()->version, " "), 63 65 init_utsname()->version, BUILD_ID_VAL); 66 + 67 + if (get_taint()) 68 + printk("%s%s\n", log_lvl, print_tainted_verbose()); 64 69 65 70 if (dump_stack_arch_desc_str[0] != '\0') 66 71 printk("%sHardware name: %s\n",
+1
lib/fortify_kunit.c
··· 1093 1093 1094 1094 kunit_test_suite(fortify_test_suite); 1095 1095 1096 + MODULE_DESCRIPTION("Runtime test cases for CONFIG_FORTIFY_SOURCE"); 1096 1097 MODULE_LICENSE("GPL");
+1
lib/hashtable_test.c
··· 314 314 315 315 kunit_test_suites(&hashtable_test_module); 316 316 317 + MODULE_DESCRIPTION("KUnit test for the Kernel Hashtable structures"); 317 318 MODULE_LICENSE("GPL");
+1
lib/is_signed_type_kunit.c
··· 46 46 47 47 kunit_test_suite(is_signed_type_test_suite); 48 48 49 + MODULE_DESCRIPTION("is_signed_type() KUnit test suite"); 49 50 MODULE_LICENSE("Dual MIT/GPL");
+1
lib/math/rational.c
··· 108 108 109 109 EXPORT_SYMBOL(rational_best_approximation); 110 110 111 + MODULE_DESCRIPTION("Rational fraction support library"); 111 112 MODULE_LICENSE("GPL v2");
+1
lib/memcpy_kunit.c
··· 510 510 511 511 kunit_test_suite(memcpy_test_suite); 512 512 513 + MODULE_DESCRIPTION("test cases for memcpy(), memmove(), and memset()"); 513 514 MODULE_LICENSE("GPL");
+1
lib/overflow_kunit.c
··· 1237 1237 1238 1238 kunit_test_suite(overflow_test_suite); 1239 1239 1240 + MODULE_DESCRIPTION("Test cases for arithmetic overflow checks"); 1240 1241 MODULE_LICENSE("Dual MIT/GPL");
+39 -5
lib/percpu_counter.c
··· 73 73 EXPORT_SYMBOL(percpu_counter_set); 74 74 75 75 /* 76 - * local_irq_save() is needed to make the function irq safe: 77 - * - The slow path would be ok as protected by an irq-safe spinlock. 78 - * - this_cpu_add would be ok as it is irq-safe by definition. 79 - * But: 80 - * The decision slow path/fast path and the actual update must be atomic, too. 76 + * Add to a counter while respecting batch size. 77 + * 78 + * There are 2 implementations, both dealing with the following problem: 79 + * 80 + * The decision slow path/fast path and the actual update must be atomic. 81 81 * Otherwise a call in process context could check the current values and 82 82 * decide that the fast path can be used. If now an interrupt occurs before 83 83 * the this_cpu_add(), and the interrupt updates this_cpu(*fbc->counters), 84 84 * then the this_cpu_add() that is executed after the interrupt has completed 85 85 * can produce values larger than "batch" or even overflows. 86 + */ 87 + #ifdef CONFIG_HAVE_CMPXCHG_LOCAL 88 + /* 89 + * Safety against interrupts is achieved in 2 ways: 90 + * 1. the fast path uses local cmpxchg (note: no lock prefix) 91 + * 2. the slow path operates with interrupts disabled 92 + */ 93 + void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) 94 + { 95 + s64 count; 96 + unsigned long flags; 97 + 98 + count = this_cpu_read(*fbc->counters); 99 + do { 100 + if (unlikely(abs(count + amount) >= batch)) { 101 + raw_spin_lock_irqsave(&fbc->lock, flags); 102 + /* 103 + * Note: by now we might have migrated to another CPU 104 + * or the value might have changed. 105 + */ 106 + count = __this_cpu_read(*fbc->counters); 107 + fbc->count += count + amount; 108 + __this_cpu_sub(*fbc->counters, count); 109 + raw_spin_unlock_irqrestore(&fbc->lock, flags); 110 + return; 111 + } 112 + } while (!this_cpu_try_cmpxchg(*fbc->counters, &count, count + amount)); 113 + } 114 + #else 115 + /* 116 + * local_irq_save() is used to make the function irq safe: 117 + * - The slow path would be ok as protected by an irq-safe spinlock. 118 + * - this_cpu_add would be ok as it is irq-safe by definition. 86 119 */ 87 120 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) 88 121 { ··· 134 101 } 135 102 local_irq_restore(flags); 136 103 } 104 + #endif 137 105 EXPORT_SYMBOL(percpu_counter_add_batch); 138 106 139 107 /*
+39 -3
lib/plist.c
··· 47 47 48 48 plist_check_prev_next(top, prev, next); 49 49 while (next != top) { 50 - prev = next; 51 - next = prev->next; 50 + WRITE_ONCE(prev, next); 51 + WRITE_ONCE(next, prev->next); 52 52 plist_check_prev_next(top, prev, next); 53 53 } 54 54 } ··· 72 72 */ 73 73 void plist_add(struct plist_node *node, struct plist_head *head) 74 74 { 75 - struct plist_node *first, *iter, *prev = NULL; 75 + struct plist_node *first, *iter, *prev = NULL, *last, *reverse_iter; 76 76 struct list_head *node_next = &head->node_list; 77 77 78 78 plist_check_head(head); ··· 83 83 goto ins_node; 84 84 85 85 first = iter = plist_first(head); 86 + last = reverse_iter = list_entry(first->prio_list.prev, struct plist_node, prio_list); 86 87 87 88 do { 88 89 if (node->prio < iter->prio) { 89 90 node_next = &iter->node_list; 90 91 break; 92 + } else if (node->prio >= reverse_iter->prio) { 93 + prev = reverse_iter; 94 + iter = list_entry(reverse_iter->prio_list.next, 95 + struct plist_node, prio_list); 96 + if (likely(reverse_iter != last)) 97 + node_next = &iter->node_list; 98 + break; 91 99 } 92 100 93 101 prev = iter; 94 102 iter = list_entry(iter->prio_list.next, 103 + struct plist_node, prio_list); 104 + reverse_iter = list_entry(reverse_iter->prio_list.prev, 95 105 struct plist_node, prio_list); 96 106 } while (iter != first); 97 107 ··· 265 255 } 266 256 267 257 printk(KERN_DEBUG "end plist test\n"); 258 + 259 + /* Worst case test for plist_add() */ 260 + unsigned int test_data[241]; 261 + 262 + for (i = 0; i < ARRAY_SIZE(test_data); i++) 263 + test_data[i] = i; 264 + 265 + ktime_t start, end, time_elapsed = 0; 266 + 267 + plist_head_init(&test_head); 268 + 269 + for (i = 0; i < ARRAY_SIZE(test_node); i++) { 270 + plist_node_init(test_node + i, 0); 271 + test_node[i].prio = test_data[i]; 272 + } 273 + 274 + for (i = 0; i < ARRAY_SIZE(test_node); i++) { 275 + if (plist_node_empty(test_node + i)) { 276 + start = ktime_get(); 277 + plist_add(test_node + i, &test_head); 278 + end = ktime_get(); 279 + time_elapsed += (end - start); 280 + } 281 + } 282 + 283 + pr_debug("plist_add worst case test time elapsed %lld\n", time_elapsed); 268 284 return 0; 269 285 } 270 286
+4 -4
lib/rbtree.c
··· 297 297 * / \ / \ 298 298 * N S --> N sl 299 299 * / \ \ 300 - * sl Sr S 300 + * sl sr S 301 301 * \ 302 - * Sr 302 + * sr 303 303 * 304 304 * Note: p might be red, and then both 305 305 * p and sl are red after rotation(which ··· 312 312 * / \ / \ 313 313 * N sl --> P S 314 314 * \ / \ 315 - * S N Sr 315 + * S N sr 316 316 * \ 317 - * Sr 317 + * sr 318 318 */ 319 319 tmp1 = tmp2->rb_right; 320 320 WRITE_ONCE(sibling->rb_left, tmp1);
+1
lib/siphash_kunit.c
··· 194 194 kunit_test_suite(siphash_test_suite); 195 195 196 196 MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); 197 + MODULE_DESCRIPTION("Test cases for siphash.c"); 197 198 MODULE_LICENSE("Dual BSD/GPL");
+7 -7
lib/sort.c
··· 5 5 * This performs n*log2(n) + 0.37*n + o(n) comparisons on average, 6 6 * and 1.5*n*log2(n) + O(n) in the (very contrived) worst case. 7 7 * 8 - * Glibc qsort() manages n*log2(n) - 1.26*n for random inputs (1.63*n 8 + * Quicksort manages n*log2(n) - 1.26*n for random inputs (1.63*n 9 9 * better) at the expense of stack usage and much larger code to avoid 10 10 * quicksort's O(n^2) worst case. 11 11 */ 12 - 13 - #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 12 15 13 #include <linux/types.h> 16 14 #include <linux/export.h> ··· 250 252 a = size << shift; 251 253 n -= size; 252 254 do_swap(base + a, base + n, size, swap_func, priv); 253 - } else if (n > size) { /* Sorting: Extract root */ 254 - n -= size; 255 - do_swap(base, base + n, size, swap_func, priv); 256 - } else { /* Sort complete */ 255 + } else { /* Sort complete */ 257 256 break; 258 257 } 259 258 ··· 280 285 do_swap(base + b, base + c, size, swap_func, priv); 281 286 } 282 287 } 288 + 289 + n -= size; 290 + do_swap(base, base + n, size, swap_func, priv); 291 + if (n == size * 2 && do_cmp(base, base + size, cmp_func, priv) > 0) 292 + do_swap(base, base + size, size, swap_func, priv); 283 293 } 284 294 EXPORT_SYMBOL(sort_r); 285 295
+1
lib/stackinit_kunit.c
··· 471 471 472 472 kunit_test_suites(&stackinit_test_suite); 473 473 474 + MODULE_DESCRIPTION("Test cases for compiler-based stack variable zeroing"); 474 475 MODULE_LICENSE("GPL");
+1
lib/test-kstrtox.c
··· 732 732 return -EINVAL; 733 733 } 734 734 module_init(test_kstrtox_init); 735 + MODULE_DESCRIPTION("Module test for kstrto*() APIs"); 735 736 MODULE_LICENSE("Dual BSD/GPL");
+1
lib/test_bits.c
··· 72 72 }; 73 73 kunit_test_suite(bits_test_suite); 74 74 75 + MODULE_DESCRIPTION("Test cases for functions and macros in bits.h"); 75 76 MODULE_LICENSE("GPL");
+1
lib/test_blackhole_dev.c
··· 96 96 module_exit(test_blackholedev_exit); 97 97 98 98 MODULE_AUTHOR("Mahesh Bandewar <maheshb@google.com>"); 99 + MODULE_DESCRIPTION("module test of the blackhole_dev"); 99 100 MODULE_LICENSE("GPL");
+2 -2
lib/test_bpf.c
··· 1740 1740 /* Result unsuccessful */ 1741 1741 insns[i++] = BPF_STX_MEM(BPF_W, R10, R1, -4); 1742 1742 insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4); 1743 - insns[i++] = BPF_ZEXT_REG(R0), /* Zext always inserted by verifier */ 1743 + insns[i++] = BPF_ZEXT_REG(R0); /* Zext always inserted by verifier */ 1744 1744 insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4); 1745 1745 1746 1746 insns[i++] = BPF_JMP32_REG(BPF_JEQ, R1, R3, 2); ··· 1754 1754 /* Result successful */ 1755 1755 i += __bpf_ld_imm64(&insns[i], R0, dst); 1756 1756 insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4); 1757 - insns[i++] = BPF_ZEXT_REG(R0), /* Zext always inserted by verifier */ 1757 + insns[i++] = BPF_ZEXT_REG(R0); /* Zext always inserted by verifier */ 1758 1758 insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4); 1759 1759 1760 1760 insns[i++] = BPF_JMP32_REG(BPF_JEQ, R2, R3, 2);
+1
lib/test_firmware.c
··· 1567 1567 module_exit(test_firmware_exit); 1568 1568 1569 1569 MODULE_AUTHOR("Kees Cook <keescook@chromium.org>"); 1570 + MODULE_DESCRIPTION("interface to trigger and test firmware loading"); 1570 1571 MODULE_LICENSE("GPL");
+1
lib/test_fpu_glue.c
··· 59 59 module_init(test_fpu_init); 60 60 module_exit(test_fpu_exit); 61 61 62 + MODULE_DESCRIPTION("Test cases for floating point operations"); 62 63 MODULE_LICENSE("GPL");
+1
lib/test_free_pages.c
··· 44 44 module_init(m_in); 45 45 module_exit(m_ex); 46 46 MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>"); 47 + MODULE_DESCRIPTION("Check that free_pages() doesn't leak memory"); 47 48 MODULE_LICENSE("GPL");
+1
lib/test_hash.c
··· 235 235 236 236 kunit_test_suite(hash_test_suite); 237 237 238 + MODULE_DESCRIPTION("Test cases for <linux/hash.h> and <linux/stringhash.h>"); 238 239 MODULE_LICENSE("GPL");
+1
lib/test_hexdump.c
··· 253 253 module_exit(test_hexdump_exit); 254 254 255 255 MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); 256 + MODULE_DESCRIPTION("Test cases for lib/hexdump.c module"); 256 257 MODULE_LICENSE("Dual BSD/GPL");
+1
lib/test_ida.c
··· 214 214 module_init(ida_checks); 215 215 module_exit(ida_exit); 216 216 MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>"); 217 + MODULE_DESCRIPTION("Test the IDA API"); 217 218 MODULE_LICENSE("GPL");
+1
lib/test_kmod.c
··· 1223 1223 module_exit(test_kmod_exit); 1224 1224 1225 1225 MODULE_AUTHOR("Luis R. Rodriguez <mcgrof@kernel.org>"); 1226 + MODULE_DESCRIPTION("kmod stress test driver"); 1226 1227 MODULE_LICENSE("GPL");
+2 -1
lib/test_kprobes.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 2 /* 3 - * test_kprobes.c - simple sanity test for *probes 3 + * test_kprobes.c - simple sanity test for k*probes 4 4 * 5 5 * Copyright IBM Corp. 2008 6 6 */ ··· 400 400 401 401 kunit_test_suites(&kprobes_test_suite); 402 402 403 + MODULE_DESCRIPTION("simple sanity test for k*probes"); 403 404 MODULE_LICENSE("GPL");
+1
lib/test_linear_ranges.c
··· 216 216 217 217 kunit_test_suites(&range_test_module); 218 218 219 + MODULE_DESCRIPTION("KUnit test for the linear_ranges helper"); 219 220 MODULE_LICENSE("GPL");
+1
lib/test_list_sort.c
··· 119 119 120 120 kunit_test_suites(&list_sort_suite); 121 121 122 + MODULE_DESCRIPTION("list_sort() KUnit test suite"); 122 123 MODULE_LICENSE("GPL");
+1
lib/test_memcat_p.c
··· 112 112 module_init(test_memcat_p_init); 113 113 module_exit(test_memcat_p_exit); 114 114 115 + MODULE_DESCRIPTION("Test cases for memcat_p() in lib/memcat_p.c"); 115 116 MODULE_LICENSE("GPL");
+1
lib/test_meminit.c
··· 436 436 } 437 437 module_init(test_meminit_init); 438 438 439 + MODULE_DESCRIPTION("Test cases for SL[AOU]B/page initialization at alloc/free time"); 439 440 MODULE_LICENSE("GPL");
+56 -20
lib/test_min_heap.c
··· 11 11 #include <linux/printk.h> 12 12 #include <linux/random.h> 13 13 14 - static __init bool less_than(const void *lhs, const void *rhs) 14 + DEFINE_MIN_HEAP(int, min_heap_test); 15 + 16 + static __init bool less_than(const void *lhs, const void *rhs, void __always_unused *args) 15 17 { 16 18 return *(int *)lhs < *(int *)rhs; 17 19 } 18 20 19 - static __init bool greater_than(const void *lhs, const void *rhs) 21 + static __init bool greater_than(const void *lhs, const void *rhs, void __always_unused *args) 20 22 { 21 23 return *(int *)lhs > *(int *)rhs; 22 24 } 23 25 24 - static __init void swap_ints(void *lhs, void *rhs) 26 + static __init void swap_ints(void *lhs, void *rhs, void __always_unused *args) 25 27 { 26 28 int temp = *(int *)lhs; 27 29 ··· 32 30 } 33 31 34 32 static __init int pop_verify_heap(bool min_heap, 35 - struct min_heap *heap, 33 + struct min_heap_test *heap, 36 34 const struct min_heap_callbacks *funcs) 37 35 { 38 36 int *values = heap->data; ··· 40 38 int last; 41 39 42 40 last = values[0]; 43 - min_heap_pop(heap, funcs); 41 + min_heap_pop(heap, funcs, NULL); 44 42 while (heap->nr > 0) { 45 43 if (min_heap) { 46 44 if (last > values[0]) { ··· 56 54 } 57 55 } 58 56 last = values[0]; 59 - min_heap_pop(heap, funcs); 57 + min_heap_pop(heap, funcs, NULL); 60 58 } 61 59 return err; 62 60 } ··· 65 63 { 66 64 int values[] = { 3, 1, 2, 4, 0x8000000, 0x7FFFFFF, 0, 67 65 -3, -1, -2, -4, 0x8000000, 0x7FFFFFF }; 68 - struct min_heap heap = { 66 + struct min_heap_test heap = { 69 67 .data = values, 70 68 .nr = ARRAY_SIZE(values), 71 69 .size = ARRAY_SIZE(values), 72 70 }; 73 71 struct min_heap_callbacks funcs = { 74 - .elem_size = sizeof(int), 75 72 .less = min_heap ? less_than : greater_than, 76 73 .swp = swap_ints, 77 74 }; 78 75 int i, err; 79 76 80 77 /* Test with known set of values. */ 81 - min_heapify_all(&heap, &funcs); 78 + min_heapify_all(&heap, &funcs, NULL); 82 79 err = pop_verify_heap(min_heap, &heap, &funcs); 83 80 84 81 ··· 86 85 for (i = 0; i < heap.nr; i++) 87 86 values[i] = get_random_u32(); 88 87 89 - min_heapify_all(&heap, &funcs); 88 + min_heapify_all(&heap, &funcs, NULL); 90 89 err += pop_verify_heap(min_heap, &heap, &funcs); 91 90 92 91 return err; ··· 97 96 const int data[] = { 3, 1, 2, 4, 0x80000000, 0x7FFFFFFF, 0, 98 97 -3, -1, -2, -4, 0x80000000, 0x7FFFFFFF }; 99 98 int values[ARRAY_SIZE(data)]; 100 - struct min_heap heap = { 99 + struct min_heap_test heap = { 101 100 .data = values, 102 101 .nr = 0, 103 102 .size = ARRAY_SIZE(values), 104 103 }; 105 104 struct min_heap_callbacks funcs = { 106 - .elem_size = sizeof(int), 107 105 .less = min_heap ? less_than : greater_than, 108 106 .swp = swap_ints, 109 107 }; ··· 110 110 111 111 /* Test with known set of values copied from data. */ 112 112 for (i = 0; i < ARRAY_SIZE(data); i++) 113 - min_heap_push(&heap, &data[i], &funcs); 113 + min_heap_push(&heap, &data[i], &funcs, NULL); 114 114 115 115 err = pop_verify_heap(min_heap, &heap, &funcs); 116 116 117 117 /* Test with randomly generated values. */ 118 118 while (heap.nr < heap.size) { 119 119 temp = get_random_u32(); 120 - min_heap_push(&heap, &temp, &funcs); 120 + min_heap_push(&heap, &temp, &funcs, NULL); 121 121 } 122 122 err += pop_verify_heap(min_heap, &heap, &funcs); 123 123 ··· 129 129 const int data[] = { 3, 1, 2, 4, 0x80000000, 0x7FFFFFFF, 0, 130 130 -3, -1, -2, -4, 0x80000000, 0x7FFFFFFF }; 131 131 int values[ARRAY_SIZE(data)]; 132 - struct min_heap heap = { 132 + struct min_heap_test heap = { 133 133 .data = values, 134 134 .nr = 0, 135 135 .size = ARRAY_SIZE(values), 136 136 }; 137 137 struct min_heap_callbacks funcs = { 138 - .elem_size = sizeof(int), 139 138 .less = min_heap ? less_than : greater_than, 140 139 .swp = swap_ints, 141 140 }; ··· 143 144 /* Fill values with data to pop and replace. */ 144 145 temp = min_heap ? 0x80000000 : 0x7FFFFFFF; 145 146 for (i = 0; i < ARRAY_SIZE(data); i++) 146 - min_heap_push(&heap, &temp, &funcs); 147 + min_heap_push(&heap, &temp, &funcs, NULL); 147 148 148 149 /* Test with known set of values copied from data. */ 149 150 for (i = 0; i < ARRAY_SIZE(data); i++) 150 - min_heap_pop_push(&heap, &data[i], &funcs); 151 + min_heap_pop_push(&heap, &data[i], &funcs, NULL); 151 152 152 153 err = pop_verify_heap(min_heap, &heap, &funcs); 153 154 154 155 heap.nr = 0; 155 156 for (i = 0; i < ARRAY_SIZE(data); i++) 156 - min_heap_push(&heap, &temp, &funcs); 157 + min_heap_push(&heap, &temp, &funcs, NULL); 157 158 158 159 /* Test with randomly generated values. */ 159 160 for (i = 0; i < ARRAY_SIZE(data); i++) { 160 161 temp = get_random_u32(); 161 - min_heap_pop_push(&heap, &temp, &funcs); 162 + min_heap_pop_push(&heap, &temp, &funcs, NULL); 162 163 } 164 + err += pop_verify_heap(min_heap, &heap, &funcs); 165 + 166 + return err; 167 + } 168 + 169 + static __init int test_heap_del(bool min_heap) 170 + { 171 + int values[] = { 3, 1, 2, 4, 0x8000000, 0x7FFFFFF, 0, 172 + -3, -1, -2, -4, 0x8000000, 0x7FFFFFF }; 173 + struct min_heap_test heap; 174 + 175 + min_heap_init(&heap, values, ARRAY_SIZE(values)); 176 + heap.nr = ARRAY_SIZE(values); 177 + struct min_heap_callbacks funcs = { 178 + .less = min_heap ? less_than : greater_than, 179 + .swp = swap_ints, 180 + }; 181 + int i, err; 182 + 183 + /* Test with known set of values. */ 184 + min_heapify_all(&heap, &funcs, NULL); 185 + for (i = 0; i < ARRAY_SIZE(values) / 2; i++) 186 + min_heap_del(&heap, get_random_u32() % heap.nr, &funcs, NULL); 187 + err = pop_verify_heap(min_heap, &heap, &funcs); 188 + 189 + 190 + /* Test with randomly generated values. */ 191 + heap.nr = ARRAY_SIZE(values); 192 + for (i = 0; i < heap.nr; i++) 193 + values[i] = get_random_u32(); 194 + min_heapify_all(&heap, &funcs, NULL); 195 + 196 + for (i = 0; i < ARRAY_SIZE(values) / 2; i++) 197 + min_heap_del(&heap, get_random_u32() % heap.nr, &funcs, NULL); 163 198 err += pop_verify_heap(min_heap, &heap, &funcs); 164 199 165 200 return err; ··· 209 176 err += test_heap_push(false); 210 177 err += test_heap_pop_push(true); 211 178 err += test_heap_pop_push(false); 179 + err += test_heap_del(true); 180 + err += test_heap_del(false); 212 181 if (err) { 213 182 pr_err("test failed with %d errors\n", err); 214 183 return -EINVAL; ··· 226 191 } 227 192 module_exit(test_min_heap_exit); 228 193 194 + MODULE_DESCRIPTION("Test cases for the min max heap"); 229 195 MODULE_LICENSE("GPL");
+1
lib/test_module.c
··· 31 31 module_exit(test_module_exit); 32 32 33 33 MODULE_AUTHOR("Kees Cook <keescook@chromium.org>"); 34 + MODULE_DESCRIPTION("module loading subsystem test module"); 34 35 MODULE_LICENSE("GPL");
+2 -1
lib/test_ref_tracker.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * Referrence tracker self test. 3 + * Reference tracker self test. 4 4 * 5 5 * Copyright (c) 2021 Eric Dumazet <edumazet@google.com> 6 6 */ ··· 112 112 module_init(test_ref_tracker_init); 113 113 module_exit(test_ref_tracker_exit); 114 114 115 + MODULE_DESCRIPTION("Reference tracker self test"); 115 116 MODULE_LICENSE("GPL v2");
+14 -1
lib/test_sort.c
··· 29 29 30 30 sort(a, TEST_LEN, sizeof(*a), cmpint, NULL); 31 31 32 - for (i = 0; i < TEST_LEN-1; i++) 32 + for (i = 0; i < TEST_LEN - 1; i++) 33 + KUNIT_ASSERT_LE(test, a[i], a[i + 1]); 34 + 35 + r = 48; 36 + 37 + for (i = 0; i < TEST_LEN - 1; i++) { 38 + r = (r * 725861) % 6599; 39 + a[i] = r; 40 + } 41 + 42 + sort(a, TEST_LEN - 1, sizeof(*a), cmpint, NULL); 43 + 44 + for (i = 0; i < TEST_LEN - 2; i++) 33 45 KUNIT_ASSERT_LE(test, a[i], a[i + 1]); 34 46 } 35 47 ··· 57 45 58 46 kunit_test_suites(&sort_test_suite); 59 47 48 + MODULE_DESCRIPTION("sort() KUnit test suite"); 60 49 MODULE_LICENSE("GPL");
+1
lib/test_static_key_base.c
··· 57 57 module_exit(test_static_key_base_exit); 58 58 59 59 MODULE_AUTHOR("Jason Baron <jbaron@akamai.com>"); 60 + MODULE_DESCRIPTION("Kernel module to support testing static keys"); 60 61 MODULE_LICENSE("GPL");
+1
lib/test_static_keys.c
··· 236 236 module_exit(test_static_key_exit); 237 237 238 238 MODULE_AUTHOR("Jason Baron <jbaron@akamai.com>"); 239 + MODULE_DESCRIPTION("Kernel module for testing static keys"); 239 240 MODULE_LICENSE("GPL");
+1
lib/test_sysctl.c
··· 280 280 module_exit(test_sysctl_exit); 281 281 282 282 MODULE_AUTHOR("Luis R. Rodriguez <mcgrof@kernel.org>"); 283 + MODULE_DESCRIPTION("proc sysctl test driver"); 283 284 MODULE_LICENSE("GPL");
+1
lib/test_uuid.c
··· 130 130 module_exit(test_uuid_exit); 131 131 132 132 MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); 133 + MODULE_DESCRIPTION("Test cases for lib/uuid.c module"); 133 134 MODULE_LICENSE("Dual BSD/GPL");
+1
lib/ts_bm.c
··· 216 216 textsearch_unregister(&bm_ops); 217 217 } 218 218 219 + MODULE_DESCRIPTION("Boyer-Moore text search implementation"); 219 220 MODULE_LICENSE("GPL"); 220 221 221 222 module_init(init_bm);
+1
lib/ts_fsm.c
··· 331 331 textsearch_unregister(&fsm_ops); 332 332 } 333 333 334 + MODULE_DESCRIPTION("naive finite state machine text search"); 334 335 MODULE_LICENSE("GPL"); 335 336 336 337 module_init(init_fsm);
+1
lib/ts_kmp.c
··· 147 147 textsearch_unregister(&kmp_ops); 148 148 } 149 149 150 + MODULE_DESCRIPTION("Knuth-Morris-Pratt text search implementation"); 150 151 MODULE_LICENSE("GPL"); 151 152 152 153 module_init(init_kmp);
+1
lib/zlib_deflate/deflate_syms.c
··· 17 17 EXPORT_SYMBOL(zlib_deflateInit2); 18 18 EXPORT_SYMBOL(zlib_deflateEnd); 19 19 EXPORT_SYMBOL(zlib_deflateReset); 20 + MODULE_DESCRIPTION("Data compression using the deflation algorithm"); 20 21 MODULE_LICENSE("GPL");
+1 -1
net/netfilter/nf_conntrack_core.c
··· 1090 1090 * A conntrack entry can be inserted to the connection tracking table 1091 1091 * if there is no existing entry with an identical tuple. 1092 1092 * 1093 - * If there is one, @skb (and the assocated, unconfirmed conntrack) has 1093 + * If there is one, @skb (and the associated, unconfirmed conntrack) has 1094 1094 * to be dropped. In case @skb is retransmitted, next conntrack lookup 1095 1095 * will find the already-existing entry. 1096 1096 *
+1 -1
net/tipc/socket.c
··· 657 657 } 658 658 659 659 /** 660 - * __tipc_bind - associate or disassocate TIPC name(s) with a socket 660 + * __tipc_bind - associate or disassociate TIPC name(s) with a socket 661 661 * @sock: socket structure 662 662 * @skaddr: socket address describing name(s) and desired operation 663 663 * @alen: size of socket address data structure
+1
samples/kfifo/bytestream-example.c
··· 191 191 192 192 module_init(example_init); 193 193 module_exit(example_exit); 194 + MODULE_DESCRIPTION("Sample kfifo byte stream implementation"); 194 195 MODULE_LICENSE("GPL"); 195 196 MODULE_AUTHOR("Stefani Seibold <stefani@seibold.net>");
+1
samples/kfifo/dma-example.c
··· 138 138 139 139 module_init(example_init); 140 140 module_exit(example_exit); 141 + MODULE_DESCRIPTION("Sample fifo dma implementation"); 141 142 MODULE_LICENSE("GPL"); 142 143 MODULE_AUTHOR("Stefani Seibold <stefani@seibold.net>");
+1
samples/kfifo/inttype-example.c
··· 182 182 183 183 module_init(example_init); 184 184 module_exit(example_exit); 185 + MODULE_DESCRIPTION("Sample kfifo int type implementation"); 185 186 MODULE_LICENSE("GPL"); 186 187 MODULE_AUTHOR("Stefani Seibold <stefani@seibold.net>");
+1
samples/kfifo/record-example.c
··· 198 198 199 199 module_init(example_init); 200 200 module_exit(example_exit); 201 + MODULE_DESCRIPTION("Sample dynamic sized record fifo implementation"); 201 202 MODULE_LICENSE("GPL"); 202 203 MODULE_AUTHOR("Stefani Seibold <stefani@seibold.net>");
+25 -1
scripts/checkpatch.pl
··· 28 28 my %verbose_emitted = (); 29 29 my $tree = 1; 30 30 my $chk_signoff = 1; 31 + my $chk_fixes_tag = 1; 31 32 my $chk_patch = 1; 32 33 my $tst_only; 33 34 my $emacs = 0; ··· 89 88 -v, --verbose verbose mode 90 89 --no-tree run without a kernel tree 91 90 --no-signoff do not check for 'Signed-off-by' line 91 + --no-fixes-tag do not check for 'Fixes:' tag 92 92 --patch treat FILE as patchfile (default) 93 93 --emacs emacs compile window format 94 94 --terse one line per report ··· 297 295 'v|verbose!' => \$verbose, 298 296 'tree!' => \$tree, 299 297 'signoff!' => \$chk_signoff, 298 + 'fixes-tag!' => \$chk_fixes_tag, 300 299 'patch!' => \$chk_patch, 301 300 'emacs!' => \$emacs, 302 301 'terse!' => \$terse, ··· 1260 1257 } 1261 1258 1262 1259 $chk_signoff = 0 if ($file); 1260 + $chk_fixes_tag = 0 if ($file); 1263 1261 1264 1262 my @rawlines = (); 1265 1263 my @lines = (); ··· 2640 2636 2641 2637 our $clean = 1; 2642 2638 my $signoff = 0; 2639 + my $fixes_tag = 0; 2640 + my $is_revert = 0; 2641 + my $needs_fixes_tag = ""; 2643 2642 my $author = ''; 2644 2643 my $authorsignoff = 0; 2645 2644 my $author_sob = ''; ··· 3196 3189 } 3197 3190 } 3198 3191 3192 + # These indicate a bug fix 3193 + if (!$in_header_lines && !$is_patch && 3194 + $line =~ /^This reverts commit/) { 3195 + $is_revert = 1; 3196 + } 3197 + 3198 + if (!$in_header_lines && !$is_patch && 3199 + $line =~ /((?:(?:BUG: K.|UB)SAN: |Call Trace:|stable\@|syzkaller))/) { 3200 + $needs_fixes_tag = $1; 3201 + } 3199 3202 3200 3203 # Check Fixes: styles is correct 3201 3204 if (!$in_header_lines && ··· 3218 3201 my $id_length = 1; 3219 3202 my $id_case = 1; 3220 3203 my $title_has_quotes = 0; 3204 + $fixes_tag = 1; 3221 3205 3222 3206 if ($line =~ /(\s*fixes:?)\s+([0-9a-f]{5,})\s+($balanced_parens)/i) { 3223 3207 my $tag = $1; ··· 3876 3858 } 3877 3859 3878 3860 if ($msg_type ne "" && 3879 - (show_type("LONG_LINE") || show_type($msg_type))) { 3861 + show_type("LONG_LINE") && show_type($msg_type)) { 3880 3862 my $msg_level = \&WARN; 3881 3863 $msg_level = \&CHK if ($file); 3882 3864 &{$msg_level}($msg_type, ··· 7714 7696 if (!$is_patch && $filename !~ /cover-letter\.patch$/) { 7715 7697 ERROR("NOT_UNIFIED_DIFF", 7716 7698 "Does not appear to be a unified-diff format patch\n"); 7699 + } 7700 + if ($is_patch && $has_commit_log && $chk_fixes_tag) { 7701 + if ($needs_fixes_tag ne "" && !$is_revert && !$fixes_tag) { 7702 + WARN("MISSING_FIXES_TAG", 7703 + "The commit message has '$needs_fixes_tag', perhaps it also needs a 'Fixes:' tag?\n"); 7704 + } 7717 7705 } 7718 7706 if ($is_patch && $has_commit_log && $chk_signoff) { 7719 7707 if ($signoff == 0) {
+5 -1
scripts/decode_stacktrace.sh
··· 30 30 31 31 READELF=${UTIL_PREFIX}readelf${UTIL_SUFFIX} 32 32 ADDR2LINE=${UTIL_PREFIX}addr2line${UTIL_SUFFIX} 33 + NM=${UTIL_PREFIX}nm${UTIL_SUFFIX} 33 34 34 35 if [[ $1 == "-r" ]] ; then 35 36 vmlinux="" ··· 159 158 if [[ $aarray_support == true && "${cache[$module,$name]+isset}" == "isset" ]]; then 160 159 local base_addr=${cache[$module,$name]} 161 160 else 162 - local base_addr=$(nm "$objfile" 2>/dev/null | awk '$3 == "'$name'" && ($2 == "t" || $2 == "T") {print $1; exit}') 161 + local base_addr=$(${NM} "$objfile" 2>/dev/null | awk '$3 == "'$name'" && ($2 == "t" || $2 == "T") {print $1; exit}') 163 162 if [[ $base_addr == "" ]] ; then 164 163 # address not found 165 164 return ··· 283 282 284 283 if [[ ${words[$last]} =~ \[([^]]+)\] ]]; then 285 284 module=${words[$last]} 285 + # some traces format is "(%pS)", which like "(foo+0x0/0x1 [bar])" 286 + # so $module may like "[bar])". Strip the right parenthesis firstly 287 + module=${module%\)} 286 288 module=${module#\[} 287 289 module=${module%\]} 288 290 modbuildid=${module#* }
+14 -9
scripts/gdb/linux/mm.py
··· 33 33 def __init__(self): 34 34 self.SUBSECTION_SHIFT = 21 35 35 self.SEBSECTION_SIZE = 1 << self.SUBSECTION_SHIFT 36 - self.MODULES_VSIZE = 128 * 1024 * 1024 36 + self.MODULES_VSIZE = 2 * 1024 * 1024 * 1024 37 37 38 38 if constants.LX_CONFIG_ARM64_64K_PAGES: 39 39 self.SECTION_SIZE_BITS = 29 ··· 47 47 48 48 self.VA_BITS = constants.LX_CONFIG_ARM64_VA_BITS 49 49 if self.VA_BITS > 48: 50 - self.VA_BITS_MIN = 48 51 - self.vabits_actual = gdb.parse_and_eval('vabits_actual') 50 + if constants.LX_CONFIG_ARM64_16K_PAGES: 51 + self.VA_BITS_MIN = 47 52 + else: 53 + self.VA_BITS_MIN = 48 54 + tcr_el1 = gdb.execute("info registers $TCR_EL1", to_string=True) 55 + tcr_el1 = int(tcr_el1.split()[1], 16) 56 + self.vabits_actual = 64 - ((tcr_el1 >> 16) & 63) 52 57 else: 53 58 self.VA_BITS_MIN = self.VA_BITS 54 59 self.vabits_actual = self.VA_BITS ··· 64 59 if str(constants.LX_CONFIG_ARCH_FORCE_MAX_ORDER).isdigit(): 65 60 self.MAX_ORDER = constants.LX_CONFIG_ARCH_FORCE_MAX_ORDER 66 61 else: 67 - self.MAX_ORDER = 11 62 + self.MAX_ORDER = 10 68 63 69 - self.MAX_ORDER_NR_PAGES = 1 << (self.MAX_ORDER - 1) 64 + self.MAX_ORDER_NR_PAGES = 1 << (self.MAX_ORDER) 70 65 self.PFN_SECTION_SHIFT = self.SECTION_SIZE_BITS - self.PAGE_SHIFT 71 66 self.NR_MEM_SECTIONS = 1 << self.SECTIONS_SHIFT 72 67 self.PAGES_PER_SECTION = 1 << self.PFN_SECTION_SHIFT ··· 94 89 self.MODULES_VADDR = self._PAGE_END(self.VA_BITS_MIN) 95 90 self.MODULES_END = self.MODULES_VADDR + self.MODULES_VSIZE 96 91 97 - self.VMEMMAP_SHIFT = (self.PAGE_SHIFT - self.STRUCT_PAGE_MAX_SHIFT) 98 - self.VMEMMAP_SIZE = ((self._PAGE_END(self.VA_BITS_MIN) - self.PAGE_OFFSET) >> self.VMEMMAP_SHIFT) 99 - self.VMEMMAP_START = (-(1 << (self.VA_BITS - self.VMEMMAP_SHIFT))) & 0xffffffffffffffff 100 - self.VMEMMAP_END = self.VMEMMAP_START + self.VMEMMAP_SIZE 92 + self.VMEMMAP_RANGE = self._PAGE_END(self.VA_BITS_MIN) - self.PAGE_OFFSET 93 + self.VMEMMAP_SIZE = (self.VMEMMAP_RANGE >> self.PAGE_SHIFT) * self.struct_page_size 94 + self.VMEMMAP_END = (-(1 * 1024 * 1024 * 1024)) & 0xffffffffffffffff 95 + self.VMEMMAP_START = self.VMEMMAP_END - self.VMEMMAP_SIZE 101 96 102 97 self.VMALLOC_START = self.MODULES_END 103 98 self.VMALLOC_END = self.VMEMMAP_START - 256 * 1024 * 1024
+6 -2
scripts/gdb/linux/stackdepot.py
··· 27 27 offset = parts['offset'] << DEPOT_STACK_ALIGN 28 28 pools_num = gdb.parse_and_eval('pools_num') 29 29 30 - if parts['pool_index'] > pools_num: 30 + if handle == 0: 31 + raise gdb.GdbError("handle is 0\n") 32 + 33 + pool_index = parts['pool_index_plus_1'] - 1 34 + if pool_index >= pools_num: 31 35 gdb.write("pool index %d out of bounds (%d) for stack id 0x%08x\n" % (parts['pool_index'], pools_num, handle)) 32 36 return gdb.Value(0), 0 33 37 34 38 stack_pools = gdb.parse_and_eval('stack_pools') 35 39 36 40 try: 37 - pool = stack_pools[parts['pool_index']] 41 + pool = stack_pools[pool_index] 38 42 stack = (pool + gdb.Value(offset).cast(utils.get_size_t_type())).cast(stack_record_type.get_type().pointer()) 39 43 size = int(stack['size'].cast(utils.get_ulong_type())) 40 44 return stack['entries'], size
+4 -15
scripts/mod/modpost.c
··· 776 776 777 777 778 778 #define ALL_INIT_DATA_SECTIONS \ 779 - ".init.setup", ".init.rodata", ".meminit.rodata", \ 780 - ".init.data", ".meminit.data" 779 + ".init.setup", ".init.rodata", ".init.data" 781 780 782 781 #define ALL_PCI_INIT_SECTIONS \ 783 782 ".pci_fixup_early", ".pci_fixup_header", ".pci_fixup_final", \ 784 783 ".pci_fixup_enable", ".pci_fixup_resume", \ 785 784 ".pci_fixup_resume_early", ".pci_fixup_suspend" 786 785 787 - #define ALL_XXXINIT_SECTIONS ".meminit.*" 788 - 789 - #define ALL_INIT_SECTIONS INIT_SECTIONS, ALL_XXXINIT_SECTIONS 786 + #define ALL_INIT_SECTIONS ".init.*" 790 787 #define ALL_EXIT_SECTIONS ".exit.*" 791 788 792 789 #define DATA_SECTIONS ".data", ".data.rel" ··· 794 797 ".fixup", ".entry.text", ".exception.text", \ 795 798 ".coldtext", ".softirqentry.text" 796 799 797 - #define INIT_SECTIONS ".init.*" 798 - 799 - #define ALL_TEXT_SECTIONS ".init.text", ".meminit.text", ".exit.text", \ 800 + #define ALL_TEXT_SECTIONS ".init.text", ".exit.text", \ 800 801 TEXT_SECTIONS, OTHER_TEXT_SECTIONS 801 802 802 803 enum mismatch { ··· 834 839 .bad_tosec = { ALL_INIT_SECTIONS, ALL_EXIT_SECTIONS, NULL }, 835 840 .mismatch = TEXTDATA_TO_ANY_INIT_EXIT, 836 841 }, 837 - /* Do not reference init code/data from meminit code/data */ 838 - { 839 - .fromsec = { ALL_XXXINIT_SECTIONS, NULL }, 840 - .bad_tosec = { INIT_SECTIONS, NULL }, 841 - .mismatch = XXXINIT_TO_SOME_INIT, 842 - }, 843 842 /* Do not use exit code/data from init code */ 844 843 { 845 844 .fromsec = { ALL_INIT_SECTIONS, NULL }, ··· 848 859 }, 849 860 { 850 861 .fromsec = { ALL_PCI_INIT_SECTIONS, NULL }, 851 - .bad_tosec = { INIT_SECTIONS, NULL }, 862 + .bad_tosec = { ALL_INIT_SECTIONS, NULL }, 852 863 .mismatch = ANY_INIT_TO_ANY_EXIT, 853 864 }, 854 865 {
+3
scripts/spelling.txt
··· 176 176 assigments||assignments 177 177 assistent||assistant 178 178 assocaited||associated 179 + assocated||associated 179 180 assocating||associating 180 181 assocation||association 182 + assocative||associative 181 183 associcated||associated 182 184 assotiated||associated 183 185 asssert||assert ··· 545 543 direectly||directly 546 544 diregard||disregard 547 545 disassocation||disassociation 546 + disassocative||disassociative 548 547 disapear||disappear 549 548 disapeared||disappeared 550 549 disappared||disappeared
-10
tools/lib/list_sort.c
··· 52 52 struct list_head *a, struct list_head *b) 53 53 { 54 54 struct list_head *tail = head; 55 - u8 count = 0; 56 55 57 56 for (;;) { 58 57 /* if equal, take 'a' -- important for sort stability */ ··· 77 78 /* Finish linking remainder of list b on to tail */ 78 79 tail->next = b; 79 80 do { 80 - /* 81 - * If the merge is highly unbalanced (e.g. the input is 82 - * already sorted), this loop may run many iterations. 83 - * Continue callbacks to the client even though no 84 - * element comparison is needed, so the client's cmp() 85 - * routine can invoke cond_resched() periodically. 86 - */ 87 - if (unlikely(!++count)) 88 - cmp(priv, b, b); 89 81 b->prev = tail; 90 82 tail = b; 91 83 b = b->next;
+1
tools/testing/radix-tree/idr-test.c
··· 424 424 #define module_init(x) 425 425 #define module_exit(x) 426 426 #define MODULE_AUTHOR(x) 427 + #define MODULE_DESCRIPTION(X) 427 428 #define MODULE_LICENSE(x) 428 429 #define dump_stack() assert(0) 429 430 void ida_dump(struct ida *);
+1
tools/testing/radix-tree/maple.c
··· 19 19 #define module_init(x) 20 20 #define module_exit(x) 21 21 #define MODULE_AUTHOR(x) 22 + #define MODULE_DESCRIPTION(X) 22 23 #define MODULE_LICENSE(x) 23 24 #define dump_stack() assert(0) 24 25
+1
tools/testing/radix-tree/xarray.c
··· 10 10 #define module_init(x) 11 11 #define module_exit(x) 12 12 #define MODULE_AUTHOR(x) 13 + #define MODULE_DESCRIPTION(X) 13 14 #define MODULE_LICENSE(x) 14 15 #define dump_stack() assert(0) 15 16
+131 -5
tools/testing/selftests/filesystems/eventfd/eventfd_test.c
··· 13 13 #include <sys/eventfd.h> 14 14 #include "../../kselftest_harness.h" 15 15 16 + #define EVENTFD_TEST_ITERATIONS 100000UL 17 + 16 18 struct error { 17 19 int code; 18 20 char msg[512]; ··· 42 40 return syscall(__NR_eventfd2, count, flags); 43 41 } 44 42 45 - TEST(eventfd01) 43 + TEST(eventfd_check_flag_rdwr) 46 44 { 47 45 int fd, flags; 48 46 ··· 56 54 close(fd); 57 55 } 58 56 59 - TEST(eventfd02) 57 + TEST(eventfd_check_flag_cloexec) 60 58 { 61 59 int fd, flags; 62 60 ··· 70 68 close(fd); 71 69 } 72 70 73 - TEST(eventfd03) 71 + TEST(eventfd_check_flag_nonblock) 74 72 { 75 73 int fd, flags; 76 74 ··· 85 83 close(fd); 86 84 } 87 85 88 - TEST(eventfd04) 86 + TEST(eventfd_chek_flag_cloexec_and_nonblock) 89 87 { 90 88 int fd, flags; 91 89 ··· 163 161 return 0; 164 162 } 165 163 166 - TEST(eventfd05) 164 + TEST(eventfd_check_flag_semaphore) 167 165 { 168 166 struct error err = {0}; 169 167 int fd, ret; ··· 181 179 ksft_print_msg("eventfd-semaphore check failed, msg: %s\n", 182 180 err.msg); 183 181 EXPECT_EQ(ret, 0); 182 + 183 + close(fd); 184 + } 185 + 186 + /* 187 + * A write(2) fails with the error EINVAL if the size of the supplied buffer 188 + * is less than 8 bytes, or if an attempt is made to write the value 189 + * 0xffffffffffffffff. 190 + */ 191 + TEST(eventfd_check_write) 192 + { 193 + uint64_t value = 1; 194 + ssize_t size; 195 + int fd; 196 + 197 + fd = sys_eventfd2(0, 0); 198 + ASSERT_GE(fd, 0); 199 + 200 + size = write(fd, &value, sizeof(int)); 201 + EXPECT_EQ(size, -1); 202 + EXPECT_EQ(errno, EINVAL); 203 + 204 + size = write(fd, &value, sizeof(value)); 205 + EXPECT_EQ(size, sizeof(value)); 206 + 207 + value = (uint64_t)-1; 208 + size = write(fd, &value, sizeof(value)); 209 + EXPECT_EQ(size, -1); 210 + EXPECT_EQ(errno, EINVAL); 211 + 212 + close(fd); 213 + } 214 + 215 + /* 216 + * A read(2) fails with the error EINVAL if the size of the supplied buffer is 217 + * less than 8 bytes. 218 + */ 219 + TEST(eventfd_check_read) 220 + { 221 + uint64_t value; 222 + ssize_t size; 223 + int fd; 224 + 225 + fd = sys_eventfd2(1, 0); 226 + ASSERT_GE(fd, 0); 227 + 228 + size = read(fd, &value, sizeof(int)); 229 + EXPECT_EQ(size, -1); 230 + EXPECT_EQ(errno, EINVAL); 231 + 232 + size = read(fd, &value, sizeof(value)); 233 + EXPECT_EQ(size, sizeof(value)); 234 + EXPECT_EQ(value, 1); 235 + 236 + close(fd); 237 + } 238 + 239 + 240 + /* 241 + * If EFD_SEMAPHORE was not specified and the eventfd counter has a nonzero 242 + * value, then a read(2) returns 8 bytes containing that value, and the 243 + * counter's value is reset to zero. 244 + * If the eventfd counter is zero at the time of the call to read(2), then the 245 + * call fails with the error EAGAIN if the file descriptor has been made nonblocking. 246 + */ 247 + TEST(eventfd_check_read_with_nonsemaphore) 248 + { 249 + uint64_t value; 250 + ssize_t size; 251 + int fd; 252 + int i; 253 + 254 + fd = sys_eventfd2(0, EFD_NONBLOCK); 255 + ASSERT_GE(fd, 0); 256 + 257 + value = 1; 258 + for (i = 0; i < EVENTFD_TEST_ITERATIONS; i++) { 259 + size = write(fd, &value, sizeof(value)); 260 + EXPECT_EQ(size, sizeof(value)); 261 + } 262 + 263 + size = read(fd, &value, sizeof(value)); 264 + EXPECT_EQ(size, sizeof(uint64_t)); 265 + EXPECT_EQ(value, EVENTFD_TEST_ITERATIONS); 266 + 267 + size = read(fd, &value, sizeof(value)); 268 + EXPECT_EQ(size, -1); 269 + EXPECT_EQ(errno, EAGAIN); 270 + 271 + close(fd); 272 + } 273 + 274 + /* 275 + * If EFD_SEMAPHORE was specified and the eventfd counter has a nonzero value, 276 + * then a read(2) returns 8 bytes containing the value 1, and the counter's 277 + * value is decremented by 1. 278 + * If the eventfd counter is zero at the time of the call to read(2), then the 279 + * call fails with the error EAGAIN if the file descriptor has been made nonblocking. 280 + */ 281 + TEST(eventfd_check_read_with_semaphore) 282 + { 283 + uint64_t value; 284 + ssize_t size; 285 + int fd; 286 + int i; 287 + 288 + fd = sys_eventfd2(0, EFD_SEMAPHORE|EFD_NONBLOCK); 289 + ASSERT_GE(fd, 0); 290 + 291 + value = 1; 292 + for (i = 0; i < EVENTFD_TEST_ITERATIONS; i++) { 293 + size = write(fd, &value, sizeof(value)); 294 + EXPECT_EQ(size, sizeof(value)); 295 + } 296 + 297 + for (i = 0; i < EVENTFD_TEST_ITERATIONS; i++) { 298 + size = read(fd, &value, sizeof(value)); 299 + EXPECT_EQ(size, sizeof(value)); 300 + EXPECT_EQ(value, 1); 301 + } 302 + 303 + size = read(fd, &value, sizeof(value)); 304 + EXPECT_EQ(size, -1); 305 + EXPECT_EQ(errno, EAGAIN); 184 306 185 307 close(fd); 186 308 }
+4 -2
tools/testing/selftests/mqueue/mq_perf_tests.c
··· 323 323 void *cont_thread(void *arg) 324 324 { 325 325 char buff[MSG_SIZE]; 326 - int i, priority; 326 + int i; 327 + unsigned int priority; 327 328 328 329 for (i = 0; i < num_cpus_to_pin; i++) 329 330 if (cpu_threads[i] == pthread_self()) ··· 426 425 void *perf_test_thread(void *arg) 427 426 { 428 427 char buff[MSG_SIZE]; 429 - int prio_out, prio_in; 428 + int prio_out; 429 + unsigned int prio_in; 430 430 int i; 431 431 clockid_t clock; 432 432 pthread_t *t;
+2
tools/testing/selftests/proc/.gitignore
··· 2 2 /fd-001-lookup 3 3 /fd-002-posix-eq 4 4 /fd-003-kthread 5 + /proc-2-is-kthread 5 6 /proc-fsconfig-hidepid 6 7 /proc-loadavg-001 7 8 /proc-multiple-procfs ··· 10 9 /proc-pid-vm 11 10 /proc-self-map-files-001 12 11 /proc-self-map-files-002 12 + /proc-self-isnt-kthread 13 13 /proc-self-syscall 14 14 /proc-self-wchan 15 15 /proc-subset-pid
+2
tools/testing/selftests/proc/Makefile
··· 7 7 TEST_GEN_PROGS += fd-001-lookup 8 8 TEST_GEN_PROGS += fd-002-posix-eq 9 9 TEST_GEN_PROGS += fd-003-kthread 10 + TEST_GEN_PROGS += proc-2-is-kthread 10 11 TEST_GEN_PROGS += proc-loadavg-001 11 12 TEST_GEN_PROGS += proc-empty-vm 12 13 TEST_GEN_PROGS += proc-pid-vm 13 14 TEST_GEN_PROGS += proc-self-map-files-001 14 15 TEST_GEN_PROGS += proc-self-map-files-002 16 + TEST_GEN_PROGS += proc-self-isnt-kthread 15 17 TEST_GEN_PROGS += proc-self-syscall 16 18 TEST_GEN_PROGS += proc-self-wchan 17 19 TEST_GEN_PROGS += proc-subset-pid
+53
tools/testing/selftests/proc/proc-2-is-kthread.c
··· 1 + /* 2 + * Copyright (c) 2024 Alexey Dobriyan <adobriyan@gmail.com> 3 + * 4 + * Permission to use, copy, modify, and distribute this software for any 5 + * purpose with or without fee is hereby granted, provided that the above 6 + * copyright notice and this permission notice appear in all copies. 7 + * 8 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 + */ 16 + /* Test that kernel thread is reported as such. */ 17 + #undef NDEBUG 18 + #include <assert.h> 19 + #include <errno.h> 20 + #include <fcntl.h> 21 + #include <string.h> 22 + #include <unistd.h> 23 + 24 + int main(void) 25 + { 26 + /* 27 + * The following solutions don't really work: 28 + * 29 + * 1) jit kernel module which creates kernel thread: 30 + * test becomes arch-specific, 31 + * problems with mandatory module signing, 32 + * problems with lockdown mode, 33 + * doesn't work with CONFIG_MODULES=n at all, 34 + * kthread creation API is formally unstable internal kernel API, 35 + * need a mechanism to report test kernel thread's PID back, 36 + * 37 + * 2) ksoftirqd/0 and kswapd0 look like stable enough kernel threads, 38 + * but their PIDs are unstable. 39 + * 40 + * Check against kthreadd which always seem to exist under pid 2. 41 + */ 42 + int fd = open("/proc/2/status", O_RDONLY); 43 + assert(fd >= 0); 44 + 45 + char buf[4096]; 46 + ssize_t rv = read(fd, buf, sizeof(buf)); 47 + assert(0 <= rv && rv < sizeof(buf)); 48 + buf[rv] = '\0'; 49 + 50 + assert(strstr(buf, "Kthread:\t1\n")); 51 + 52 + return 0; 53 + }
-3
tools/testing/selftests/proc/proc-empty-vm.c
··· 381 381 382 382 assert(rv >= 0); 383 383 assert(rv <= sizeof(buf)); 384 - if (0) { 385 - write(1, buf, rv); 386 - } 387 384 388 385 const char *p = buf; 389 386 const char *const end = p + rv;
+37
tools/testing/selftests/proc/proc-self-isnt-kthread.c
··· 1 + /* 2 + * Copyright (c) 2024 Alexey Dobriyan <adobriyan@gmail.com> 3 + * 4 + * Permission to use, copy, modify, and distribute this software for any 5 + * purpose with or without fee is hereby granted, provided that the above 6 + * copyright notice and this permission notice appear in all copies. 7 + * 8 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 + */ 16 + /* Test that userspace program is not kernel thread. */ 17 + #undef NDEBUG 18 + #include <assert.h> 19 + #include <fcntl.h> 20 + #include <string.h> 21 + #include <unistd.h> 22 + 23 + int main(void) 24 + { 25 + int fd = open("/proc/self/status", O_RDONLY); 26 + assert(fd >= 0); 27 + 28 + char buf[4096]; 29 + ssize_t rv = read(fd, buf, sizeof(buf)); 30 + assert(0 <= rv && rv < sizeof(buf)); 31 + buf[rv] = '\0'; 32 + 33 + /* This test is very much not kernel thread. */ 34 + assert(strstr(buf, "Kthread:\t0\n")); 35 + 36 + return 0; 37 + }