Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

maple_tree: Prefilled sheaf conversion and testing

Use prefilled sheaves instead of bulk allocations. This should speed up
the allocations and the return path of unused allocations.

Remove the push and pop of nodes from the maple state as this is now
handled by the slab layer with sheaves.

Testing has been removed as necessary since the features of the tree
have been reduced.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>

authored by

Liam R. Howlett and committed by
Vlastimil Babka
9b05890a fdbebab1

+88 -710
+4 -2
include/linux/maple_tree.h
··· 442 442 struct maple_enode *node; /* The node containing this entry */ 443 443 unsigned long min; /* The minimum index of this node - implied pivot min */ 444 444 unsigned long max; /* The maximum index of this node - implied pivot max */ 445 - struct maple_alloc *alloc; /* Allocated nodes for this operation */ 445 + struct slab_sheaf *sheaf; /* Allocated nodes for this operation */ 446 + unsigned long node_request; /* The number of nodes to allocate for this operation */ 446 447 enum maple_status status; /* The status of the state (active, start, none, etc) */ 447 448 unsigned char depth; /* depth of tree descent during write */ 448 449 unsigned char offset; ··· 491 490 .status = ma_start, \ 492 491 .min = 0, \ 493 492 .max = ULONG_MAX, \ 494 - .alloc = NULL, \ 493 + .node_request = 0, \ 494 + .sheaf = NULL, \ 495 495 .mas_flags = 0, \ 496 496 .store_type = wr_invalid, \ 497 497 }
+62 -264
lib/maple_tree.c
··· 182 182 kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes); 183 183 } 184 184 185 + static void mt_return_sheaf(struct slab_sheaf *sheaf) 186 + { 187 + kmem_cache_return_sheaf(maple_node_cache, GFP_NOWAIT, sheaf); 188 + } 189 + 190 + static struct slab_sheaf *mt_get_sheaf(gfp_t gfp, int count) 191 + { 192 + return kmem_cache_prefill_sheaf(maple_node_cache, gfp, count); 193 + } 194 + 195 + static int mt_refill_sheaf(gfp_t gfp, struct slab_sheaf **sheaf, 196 + unsigned int size) 197 + { 198 + return kmem_cache_refill_sheaf(maple_node_cache, gfp, sheaf, size); 199 + } 200 + 185 201 /* 186 202 * ma_free_rcu() - Use rcu callback to free a maple node 187 203 * @node: The node to free ··· 588 572 589 573 node = mte_to_node(enode); 590 574 return ma_dead_node(node); 591 - } 592 - 593 - /* 594 - * mas_allocated() - Get the number of nodes allocated in a maple state. 595 - * @mas: The maple state 596 - * 597 - * The ma_state alloc member is overloaded to hold a pointer to the first 598 - * allocated node or to the number of requested nodes to allocate. If bit 0 is 599 - * set, then the alloc contains the number of requested nodes. If there is an 600 - * allocated node, then the total allocated nodes is in that node. 601 - * 602 - * Return: The total number of nodes allocated 603 - */ 604 - static inline unsigned long mas_allocated(const struct ma_state *mas) 605 - { 606 - if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) 607 - return 0; 608 - 609 - return mas->alloc->total; 610 - } 611 - 612 - /* 613 - * mas_set_alloc_req() - Set the requested number of allocations. 614 - * @mas: the maple state 615 - * @count: the number of allocations. 616 - * 617 - * The requested number of allocations is either in the first allocated node, 618 - * located in @mas->alloc->request_count, or directly in @mas->alloc if there is 619 - * no allocated node. Set the request either in the node or do the necessary 620 - * encoding to store in @mas->alloc directly. 621 - */ 622 - static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count) 623 - { 624 - if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) { 625 - if (!count) 626 - mas->alloc = NULL; 627 - else 628 - mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U); 629 - return; 630 - } 631 - 632 - mas->alloc->request_count = count; 633 - } 634 - 635 - /* 636 - * mas_alloc_req() - get the requested number of allocations. 637 - * @mas: The maple state 638 - * 639 - * The alloc count is either stored directly in @mas, or in 640 - * @mas->alloc->request_count if there is at least one node allocated. Decode 641 - * the request count if it's stored directly in @mas->alloc. 642 - * 643 - * Return: The allocation request count. 644 - */ 645 - static inline unsigned int mas_alloc_req(const struct ma_state *mas) 646 - { 647 - if ((unsigned long)mas->alloc & 0x1) 648 - return (unsigned long)(mas->alloc) >> 1; 649 - else if (mas->alloc) 650 - return mas->alloc->request_count; 651 - return 0; 652 575 } 653 576 654 577 /* ··· 1075 1120 */ 1076 1121 static inline struct maple_node *mas_pop_node(struct ma_state *mas) 1077 1122 { 1078 - struct maple_alloc *ret, *node = mas->alloc; 1079 - unsigned long total = mas_allocated(mas); 1080 - unsigned int req = mas_alloc_req(mas); 1123 + struct maple_node *ret; 1081 1124 1082 - /* nothing or a request pending. */ 1083 - if (WARN_ON(!total)) 1125 + if (WARN_ON_ONCE(!mas->sheaf)) 1084 1126 return NULL; 1085 1127 1086 - if (total == 1) { 1087 - /* single allocation in this ma_state */ 1088 - mas->alloc = NULL; 1089 - ret = node; 1090 - goto single_node; 1091 - } 1092 - 1093 - if (node->node_count == 1) { 1094 - /* Single allocation in this node. */ 1095 - mas->alloc = node->slot[0]; 1096 - mas->alloc->total = node->total - 1; 1097 - ret = node; 1098 - goto new_head; 1099 - } 1100 - node->total--; 1101 - ret = node->slot[--node->node_count]; 1102 - node->slot[node->node_count] = NULL; 1103 - 1104 - single_node: 1105 - new_head: 1106 - if (req) { 1107 - req++; 1108 - mas_set_alloc_req(mas, req); 1109 - } 1110 - 1128 + ret = kmem_cache_alloc_from_sheaf(maple_node_cache, GFP_NOWAIT, mas->sheaf); 1111 1129 memset(ret, 0, sizeof(*ret)); 1112 - return (struct maple_node *)ret; 1113 - } 1114 1130 1115 - /* 1116 - * mas_push_node() - Push a node back on the maple state allocation. 1117 - * @mas: The maple state 1118 - * @used: The used maple node 1119 - * 1120 - * Stores the maple node back into @mas->alloc for reuse. Updates allocated and 1121 - * requested node count as necessary. 1122 - */ 1123 - static inline void mas_push_node(struct ma_state *mas, struct maple_node *used) 1124 - { 1125 - struct maple_alloc *reuse = (struct maple_alloc *)used; 1126 - struct maple_alloc *head = mas->alloc; 1127 - unsigned long count; 1128 - unsigned int requested = mas_alloc_req(mas); 1129 - 1130 - count = mas_allocated(mas); 1131 - 1132 - reuse->request_count = 0; 1133 - reuse->node_count = 0; 1134 - if (count) { 1135 - if (head->node_count < MAPLE_ALLOC_SLOTS) { 1136 - head->slot[head->node_count++] = reuse; 1137 - head->total++; 1138 - goto done; 1139 - } 1140 - reuse->slot[0] = head; 1141 - reuse->node_count = 1; 1142 - } 1143 - 1144 - reuse->total = count + 1; 1145 - mas->alloc = reuse; 1146 - done: 1147 - if (requested > 1) 1148 - mas_set_alloc_req(mas, requested - 1); 1131 + return ret; 1149 1132 } 1150 1133 1151 1134 /* ··· 1093 1200 */ 1094 1201 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) 1095 1202 { 1096 - struct maple_alloc *node; 1097 - unsigned long allocated = mas_allocated(mas); 1098 - unsigned int requested = mas_alloc_req(mas); 1099 - unsigned int count; 1100 - void **slots = NULL; 1101 - unsigned int max_req = 0; 1203 + if (unlikely(mas->sheaf)) { 1204 + unsigned long refill = mas->node_request; 1102 1205 1103 - if (!requested) 1104 - return; 1105 - 1106 - mas_set_alloc_req(mas, 0); 1107 - if (mas->mas_flags & MA_STATE_PREALLOC) { 1108 - if (allocated) 1206 + if (kmem_cache_sheaf_size(mas->sheaf) >= refill) { 1207 + mas->node_request = 0; 1109 1208 return; 1110 - WARN_ON(!allocated); 1111 - } 1112 - 1113 - if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) { 1114 - node = (struct maple_alloc *)mt_alloc_one(gfp); 1115 - if (!node) 1116 - goto nomem_one; 1117 - 1118 - if (allocated) { 1119 - node->slot[0] = mas->alloc; 1120 - node->node_count = 1; 1121 - } else { 1122 - node->node_count = 0; 1123 1209 } 1124 1210 1125 - mas->alloc = node; 1126 - node->total = ++allocated; 1127 - node->request_count = 0; 1128 - requested--; 1211 + if (mt_refill_sheaf(gfp, &mas->sheaf, refill)) 1212 + goto error; 1213 + 1214 + mas->node_request = 0; 1215 + return; 1129 1216 } 1130 1217 1131 - node = mas->alloc; 1132 - while (requested) { 1133 - max_req = MAPLE_ALLOC_SLOTS - node->node_count; 1134 - slots = (void **)&node->slot[node->node_count]; 1135 - max_req = min(requested, max_req); 1136 - count = mt_alloc_bulk(gfp, max_req, slots); 1137 - if (!count) 1138 - goto nomem_bulk; 1139 - 1140 - if (node->node_count == 0) { 1141 - node->slot[0]->node_count = 0; 1142 - node->slot[0]->request_count = 0; 1143 - } 1144 - 1145 - node->node_count += count; 1146 - allocated += count; 1147 - /* find a non-full node*/ 1148 - do { 1149 - node = node->slot[0]; 1150 - } while (unlikely(node->node_count == MAPLE_ALLOC_SLOTS)); 1151 - requested -= count; 1218 + mas->sheaf = mt_get_sheaf(gfp, mas->node_request); 1219 + if (likely(mas->sheaf)) { 1220 + mas->node_request = 0; 1221 + return; 1152 1222 } 1153 - mas->alloc->total = allocated; 1154 - return; 1155 1223 1156 - nomem_bulk: 1157 - /* Clean up potential freed allocations on bulk failure */ 1158 - memset(slots, 0, max_req * sizeof(unsigned long)); 1159 - mas->alloc->total = allocated; 1160 - nomem_one: 1161 - mas_set_alloc_req(mas, requested); 1224 + error: 1162 1225 mas_set_err(mas, -ENOMEM); 1163 1226 } 1227 + 1164 1228 1165 1229 /* 1166 1230 * mas_free() - Free an encoded maple node ··· 1129 1279 */ 1130 1280 static inline void mas_free(struct ma_state *mas, struct maple_enode *used) 1131 1281 { 1132 - struct maple_node *tmp = mte_to_node(used); 1133 - 1134 - if (mt_in_rcu(mas->tree)) 1135 - ma_free_rcu(tmp); 1136 - else 1137 - mas_push_node(mas, tmp); 1138 - } 1139 - 1140 - /* 1141 - * mas_node_count_gfp() - Check if enough nodes are allocated and request more 1142 - * if there is not enough nodes. 1143 - * @mas: The maple state 1144 - * @count: The number of nodes needed 1145 - * @gfp: the gfp flags 1146 - */ 1147 - static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp) 1148 - { 1149 - unsigned long allocated = mas_allocated(mas); 1150 - 1151 - if (allocated < count) { 1152 - mas_set_alloc_req(mas, count - allocated); 1153 - mas_alloc_nodes(mas, gfp); 1154 - } 1155 - } 1156 - 1157 - /* 1158 - * mas_node_count() - Check if enough nodes are allocated and request more if 1159 - * there is not enough nodes. 1160 - * @mas: The maple state 1161 - * @count: The number of nodes needed 1162 - * 1163 - * Note: Uses GFP_NOWAIT for gfp flags. 1164 - */ 1165 - static void mas_node_count(struct ma_state *mas, int count) 1166 - { 1167 - return mas_node_count_gfp(mas, count, GFP_NOWAIT); 1282 + ma_free_rcu(mte_to_node(used)); 1168 1283 } 1169 1284 1170 1285 /* ··· 2266 2451 enode = tmp_mas->node; 2267 2452 tmp = mte_to_node(enode); 2268 2453 mte_set_node_dead(enode); 2269 - if (in_rcu) 2270 - ma_free_rcu(tmp); 2271 - else 2272 - mas_push_node(mas, tmp); 2454 + ma_free_rcu(tmp); 2273 2455 } 2274 2456 2275 2457 /* ··· 3792 3980 * 3793 3981 * Return: Number of nodes required for preallocation. 3794 3982 */ 3795 - static inline int mas_prealloc_calc(struct ma_wr_state *wr_mas, void *entry) 3983 + static inline void mas_prealloc_calc(struct ma_wr_state *wr_mas, void *entry) 3796 3984 { 3797 3985 struct ma_state *mas = wr_mas->mas; 3798 3986 unsigned char height = mas_mt_height(mas); ··· 3838 4026 WARN_ON_ONCE(1); 3839 4027 } 3840 4028 3841 - return ret; 4029 + mas->node_request = ret; 3842 4030 } 3843 4031 3844 4032 /* ··· 3899 4087 */ 3900 4088 static inline void mas_wr_preallocate(struct ma_wr_state *wr_mas, void *entry) 3901 4089 { 3902 - int request; 4090 + struct ma_state *mas = wr_mas->mas; 3903 4091 3904 4092 mas_wr_prealloc_setup(wr_mas); 3905 - wr_mas->mas->store_type = mas_wr_store_type(wr_mas); 3906 - request = mas_prealloc_calc(wr_mas, entry); 3907 - if (!request) 4093 + mas->store_type = mas_wr_store_type(wr_mas); 4094 + mas_prealloc_calc(wr_mas, entry); 4095 + if (!mas->node_request) 3908 4096 return; 3909 4097 3910 - mas_node_count(wr_mas->mas, request); 4098 + mas_alloc_nodes(mas, GFP_NOWAIT); 3911 4099 } 3912 4100 3913 4101 /** ··· 5020 5208 */ 5021 5209 void *mas_store(struct ma_state *mas, void *entry) 5022 5210 { 5023 - int request; 5024 5211 MA_WR_STATE(wr_mas, mas, entry); 5025 5212 5026 5213 trace_ma_write(__func__, mas, 0, entry); ··· 5049 5238 return wr_mas.content; 5050 5239 } 5051 5240 5052 - request = mas_prealloc_calc(&wr_mas, entry); 5053 - if (!request) 5241 + mas_prealloc_calc(&wr_mas, entry); 5242 + if (!mas->node_request) 5054 5243 goto store; 5055 5244 5056 - mas_node_count(mas, request); 5245 + mas_alloc_nodes(mas, GFP_NOWAIT); 5057 5246 if (mas_is_err(mas)) 5058 5247 return NULL; 5059 5248 ··· 5141 5330 int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp) 5142 5331 { 5143 5332 MA_WR_STATE(wr_mas, mas, entry); 5144 - int ret = 0; 5145 - int request; 5146 5333 5147 5334 mas_wr_prealloc_setup(&wr_mas); 5148 5335 mas->store_type = mas_wr_store_type(&wr_mas); 5149 - request = mas_prealloc_calc(&wr_mas, entry); 5150 - if (!request) 5336 + mas_prealloc_calc(&wr_mas, entry); 5337 + if (!mas->node_request) 5151 5338 goto set_flag; 5152 5339 5153 5340 mas->mas_flags &= ~MA_STATE_PREALLOC; 5154 - mas_node_count_gfp(mas, request, gfp); 5341 + mas_alloc_nodes(mas, gfp); 5155 5342 if (mas_is_err(mas)) { 5156 - mas_set_alloc_req(mas, 0); 5157 - ret = xa_err(mas->node); 5343 + int ret = xa_err(mas->node); 5344 + 5345 + mas->node_request = 0; 5158 5346 mas_destroy(mas); 5159 5347 mas_reset(mas); 5160 5348 return ret; ··· 5161 5351 5162 5352 set_flag: 5163 5353 mas->mas_flags |= MA_STATE_PREALLOC; 5164 - return ret; 5354 + return 0; 5165 5355 } 5166 5356 EXPORT_SYMBOL_GPL(mas_preallocate); 5167 5357 ··· 5175 5365 */ 5176 5366 void mas_destroy(struct ma_state *mas) 5177 5367 { 5178 - struct maple_alloc *node; 5179 - unsigned long total; 5180 - 5181 5368 mas->mas_flags &= ~MA_STATE_PREALLOC; 5182 5369 5183 - total = mas_allocated(mas); 5184 - while (total) { 5185 - node = mas->alloc; 5186 - mas->alloc = node->slot[0]; 5187 - if (node->node_count > 1) { 5188 - size_t count = node->node_count - 1; 5370 + mas->node_request = 0; 5371 + if (mas->sheaf) 5372 + mt_return_sheaf(mas->sheaf); 5189 5373 5190 - mt_free_bulk(count, (void __rcu **)&node->slot[1]); 5191 - total -= count; 5192 - } 5193 - kfree(ma_mnode_ptr(node)); 5194 - total--; 5195 - } 5196 - 5197 - mas->alloc = NULL; 5374 + mas->sheaf = NULL; 5198 5375 } 5199 5376 EXPORT_SYMBOL_GPL(mas_destroy); 5200 5377 ··· 5816 6019 mas_alloc_nodes(mas, gfp); 5817 6020 } 5818 6021 5819 - if (!mas_allocated(mas)) 6022 + if (!mas->sheaf) 5820 6023 return false; 5821 6024 5822 6025 mas->status = ma_start; ··· 7211 7414 7212 7415 pr_err("[%u/%u] index=%lx last=%lx\n", mas->offset, mas->end, 7213 7416 mas->index, mas->last); 7214 - pr_err(" min=%lx max=%lx alloc=" PTR_FMT ", depth=%u, flags=%x\n", 7215 - mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags); 7417 + pr_err(" min=%lx max=%lx sheaf=" PTR_FMT ", request %lu depth=%u, flags=%x\n", 7418 + mas->min, mas->max, mas->sheaf, mas->node_request, mas->depth, 7419 + mas->mas_flags); 7216 7420 if (mas->index > mas->last) 7217 7421 pr_err("Check index & last\n"); 7218 7422 }
+18 -443
tools/testing/radix-tree/maple.c
··· 57 57 struct rcu_test_struct2 *test; 58 58 }; 59 59 60 - static int get_alloc_node_count(struct ma_state *mas) 61 - { 62 - int count = 1; 63 - struct maple_alloc *node = mas->alloc; 64 - 65 - if (!node || ((unsigned long)node & 0x1)) 66 - return 0; 67 - while (node->node_count) { 68 - count += node->node_count; 69 - node = node->slot[0]; 70 - } 71 - return count; 72 - } 73 - 74 - static void check_mas_alloc_node_count(struct ma_state *mas) 75 - { 76 - mas_node_count_gfp(mas, MAPLE_ALLOC_SLOTS + 1, GFP_KERNEL); 77 - mas_node_count_gfp(mas, MAPLE_ALLOC_SLOTS + 3, GFP_KERNEL); 78 - MT_BUG_ON(mas->tree, get_alloc_node_count(mas) != mas->alloc->total); 79 - mas_destroy(mas); 80 - } 81 - 82 - /* 83 - * check_new_node() - Check the creation of new nodes and error path 84 - * verification. 85 - */ 86 - static noinline void __init check_new_node(struct maple_tree *mt) 87 - { 88 - 89 - struct maple_node *mn, *mn2, *mn3; 90 - struct maple_alloc *smn; 91 - struct maple_node *nodes[100]; 92 - int i, j, total; 93 - 94 - MA_STATE(mas, mt, 0, 0); 95 - 96 - check_mas_alloc_node_count(&mas); 97 - 98 - /* Try allocating 3 nodes */ 99 - mtree_lock(mt); 100 - mt_set_non_kernel(0); 101 - /* request 3 nodes to be allocated. */ 102 - mas_node_count(&mas, 3); 103 - /* Allocation request of 3. */ 104 - MT_BUG_ON(mt, mas_alloc_req(&mas) != 3); 105 - /* Allocate failed. */ 106 - MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM)); 107 - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); 108 - 109 - MT_BUG_ON(mt, mas_allocated(&mas) != 3); 110 - mn = mas_pop_node(&mas); 111 - MT_BUG_ON(mt, not_empty(mn)); 112 - MT_BUG_ON(mt, mn == NULL); 113 - MT_BUG_ON(mt, mas.alloc == NULL); 114 - MT_BUG_ON(mt, mas.alloc->slot[0] == NULL); 115 - mas_push_node(&mas, mn); 116 - mas_reset(&mas); 117 - mas_destroy(&mas); 118 - mtree_unlock(mt); 119 - 120 - 121 - /* Try allocating 1 node, then 2 more */ 122 - mtree_lock(mt); 123 - /* Set allocation request to 1. */ 124 - mas_set_alloc_req(&mas, 1); 125 - /* Check Allocation request of 1. */ 126 - MT_BUG_ON(mt, mas_alloc_req(&mas) != 1); 127 - mas_set_err(&mas, -ENOMEM); 128 - /* Validate allocation request. */ 129 - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); 130 - /* Eat the requested node. */ 131 - mn = mas_pop_node(&mas); 132 - MT_BUG_ON(mt, not_empty(mn)); 133 - MT_BUG_ON(mt, mn == NULL); 134 - MT_BUG_ON(mt, mn->slot[0] != NULL); 135 - MT_BUG_ON(mt, mn->slot[1] != NULL); 136 - MT_BUG_ON(mt, mas_allocated(&mas) != 0); 137 - 138 - mn->parent = ma_parent_ptr(mn); 139 - ma_free_rcu(mn); 140 - mas.status = ma_start; 141 - mas_destroy(&mas); 142 - /* Allocate 3 nodes, will fail. */ 143 - mas_node_count(&mas, 3); 144 - /* Drop the lock and allocate 3 nodes. */ 145 - mas_nomem(&mas, GFP_KERNEL); 146 - /* Ensure 3 are allocated. */ 147 - MT_BUG_ON(mt, mas_allocated(&mas) != 3); 148 - /* Allocation request of 0. */ 149 - MT_BUG_ON(mt, mas_alloc_req(&mas) != 0); 150 - 151 - MT_BUG_ON(mt, mas.alloc == NULL); 152 - MT_BUG_ON(mt, mas.alloc->slot[0] == NULL); 153 - MT_BUG_ON(mt, mas.alloc->slot[1] == NULL); 154 - /* Ensure we counted 3. */ 155 - MT_BUG_ON(mt, mas_allocated(&mas) != 3); 156 - /* Free. */ 157 - mas_reset(&mas); 158 - mas_destroy(&mas); 159 - 160 - /* Set allocation request to 1. */ 161 - mas_set_alloc_req(&mas, 1); 162 - MT_BUG_ON(mt, mas_alloc_req(&mas) != 1); 163 - mas_set_err(&mas, -ENOMEM); 164 - /* Validate allocation request. */ 165 - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); 166 - MT_BUG_ON(mt, mas_allocated(&mas) != 1); 167 - /* Check the node is only one node. */ 168 - mn = mas_pop_node(&mas); 169 - MT_BUG_ON(mt, not_empty(mn)); 170 - MT_BUG_ON(mt, mas_allocated(&mas) != 0); 171 - MT_BUG_ON(mt, mn == NULL); 172 - MT_BUG_ON(mt, mn->slot[0] != NULL); 173 - MT_BUG_ON(mt, mn->slot[1] != NULL); 174 - MT_BUG_ON(mt, mas_allocated(&mas) != 0); 175 - mas_push_node(&mas, mn); 176 - MT_BUG_ON(mt, mas_allocated(&mas) != 1); 177 - MT_BUG_ON(mt, mas.alloc->node_count); 178 - 179 - mas_set_alloc_req(&mas, 2); /* request 2 more. */ 180 - MT_BUG_ON(mt, mas_alloc_req(&mas) != 2); 181 - mas_set_err(&mas, -ENOMEM); 182 - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); 183 - MT_BUG_ON(mt, mas_allocated(&mas) != 3); 184 - MT_BUG_ON(mt, mas.alloc == NULL); 185 - MT_BUG_ON(mt, mas.alloc->slot[0] == NULL); 186 - MT_BUG_ON(mt, mas.alloc->slot[1] == NULL); 187 - for (i = 2; i >= 0; i--) { 188 - mn = mas_pop_node(&mas); 189 - MT_BUG_ON(mt, mas_allocated(&mas) != i); 190 - MT_BUG_ON(mt, !mn); 191 - MT_BUG_ON(mt, not_empty(mn)); 192 - mn->parent = ma_parent_ptr(mn); 193 - ma_free_rcu(mn); 194 - } 195 - 196 - total = 64; 197 - mas_set_alloc_req(&mas, total); /* request 2 more. */ 198 - MT_BUG_ON(mt, mas_alloc_req(&mas) != total); 199 - mas_set_err(&mas, -ENOMEM); 200 - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); 201 - for (i = total; i > 0; i--) { 202 - unsigned int e = 0; /* expected node_count */ 203 - 204 - if (!MAPLE_32BIT) { 205 - if (i >= 35) 206 - e = i - 34; 207 - else if (i >= 5) 208 - e = i - 4; 209 - else if (i >= 2) 210 - e = i - 1; 211 - } else { 212 - if (i >= 4) 213 - e = i - 3; 214 - else if (i >= 1) 215 - e = i - 1; 216 - else 217 - e = 0; 218 - } 219 - 220 - MT_BUG_ON(mt, mas.alloc->node_count != e); 221 - mn = mas_pop_node(&mas); 222 - MT_BUG_ON(mt, not_empty(mn)); 223 - MT_BUG_ON(mt, mas_allocated(&mas) != i - 1); 224 - MT_BUG_ON(mt, !mn); 225 - mn->parent = ma_parent_ptr(mn); 226 - ma_free_rcu(mn); 227 - } 228 - 229 - total = 100; 230 - for (i = 1; i < total; i++) { 231 - mas_set_alloc_req(&mas, i); 232 - mas_set_err(&mas, -ENOMEM); 233 - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); 234 - for (j = i; j > 0; j--) { 235 - mn = mas_pop_node(&mas); 236 - MT_BUG_ON(mt, mas_allocated(&mas) != j - 1); 237 - MT_BUG_ON(mt, !mn); 238 - MT_BUG_ON(mt, not_empty(mn)); 239 - mas_push_node(&mas, mn); 240 - MT_BUG_ON(mt, mas_allocated(&mas) != j); 241 - mn = mas_pop_node(&mas); 242 - MT_BUG_ON(mt, not_empty(mn)); 243 - MT_BUG_ON(mt, mas_allocated(&mas) != j - 1); 244 - mn->parent = ma_parent_ptr(mn); 245 - ma_free_rcu(mn); 246 - } 247 - MT_BUG_ON(mt, mas_allocated(&mas) != 0); 248 - 249 - mas_set_alloc_req(&mas, i); 250 - mas_set_err(&mas, -ENOMEM); 251 - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); 252 - for (j = 0; j <= i/2; j++) { 253 - MT_BUG_ON(mt, mas_allocated(&mas) != i - j); 254 - nodes[j] = mas_pop_node(&mas); 255 - MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1); 256 - } 257 - 258 - while (j) { 259 - j--; 260 - mas_push_node(&mas, nodes[j]); 261 - MT_BUG_ON(mt, mas_allocated(&mas) != i - j); 262 - } 263 - MT_BUG_ON(mt, mas_allocated(&mas) != i); 264 - for (j = 0; j <= i/2; j++) { 265 - MT_BUG_ON(mt, mas_allocated(&mas) != i - j); 266 - mn = mas_pop_node(&mas); 267 - MT_BUG_ON(mt, not_empty(mn)); 268 - mn->parent = ma_parent_ptr(mn); 269 - ma_free_rcu(mn); 270 - MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1); 271 - } 272 - mas_reset(&mas); 273 - MT_BUG_ON(mt, mas_nomem(&mas, GFP_KERNEL)); 274 - mas_destroy(&mas); 275 - 276 - } 277 - 278 - /* Set allocation request. */ 279 - total = 500; 280 - mas_node_count(&mas, total); 281 - /* Drop the lock and allocate the nodes. */ 282 - mas_nomem(&mas, GFP_KERNEL); 283 - MT_BUG_ON(mt, !mas.alloc); 284 - i = 1; 285 - smn = mas.alloc; 286 - while (i < total) { 287 - for (j = 0; j < MAPLE_ALLOC_SLOTS; j++) { 288 - i++; 289 - MT_BUG_ON(mt, !smn->slot[j]); 290 - if (i == total) 291 - break; 292 - } 293 - smn = smn->slot[0]; /* next. */ 294 - } 295 - MT_BUG_ON(mt, mas_allocated(&mas) != total); 296 - mas_reset(&mas); 297 - mas_destroy(&mas); /* Free. */ 298 - 299 - MT_BUG_ON(mt, mas_allocated(&mas) != 0); 300 - for (i = 1; i < 128; i++) { 301 - mas_node_count(&mas, i); /* Request */ 302 - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ 303 - MT_BUG_ON(mt, mas_allocated(&mas) != i); /* check request filled */ 304 - for (j = i; j > 0; j--) { /*Free the requests */ 305 - mn = mas_pop_node(&mas); /* get the next node. */ 306 - MT_BUG_ON(mt, mn == NULL); 307 - MT_BUG_ON(mt, not_empty(mn)); 308 - mn->parent = ma_parent_ptr(mn); 309 - ma_free_rcu(mn); 310 - } 311 - MT_BUG_ON(mt, mas_allocated(&mas) != 0); 312 - } 313 - 314 - for (i = 1; i < MAPLE_NODE_MASK + 1; i++) { 315 - MA_STATE(mas2, mt, 0, 0); 316 - mas_node_count(&mas, i); /* Request */ 317 - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ 318 - MT_BUG_ON(mt, mas_allocated(&mas) != i); /* check request filled */ 319 - for (j = 1; j <= i; j++) { /* Move the allocations to mas2 */ 320 - mn = mas_pop_node(&mas); /* get the next node. */ 321 - MT_BUG_ON(mt, mn == NULL); 322 - MT_BUG_ON(mt, not_empty(mn)); 323 - mas_push_node(&mas2, mn); 324 - MT_BUG_ON(mt, mas_allocated(&mas2) != j); 325 - } 326 - MT_BUG_ON(mt, mas_allocated(&mas) != 0); 327 - MT_BUG_ON(mt, mas_allocated(&mas2) != i); 328 - 329 - for (j = i; j > 0; j--) { /*Free the requests */ 330 - MT_BUG_ON(mt, mas_allocated(&mas2) != j); 331 - mn = mas_pop_node(&mas2); /* get the next node. */ 332 - MT_BUG_ON(mt, mn == NULL); 333 - MT_BUG_ON(mt, not_empty(mn)); 334 - mn->parent = ma_parent_ptr(mn); 335 - ma_free_rcu(mn); 336 - } 337 - MT_BUG_ON(mt, mas_allocated(&mas2) != 0); 338 - } 339 - 340 - 341 - MT_BUG_ON(mt, mas_allocated(&mas) != 0); 342 - mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 1); /* Request */ 343 - MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM)); 344 - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); 345 - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1); 346 - MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS); 347 - 348 - mn = mas_pop_node(&mas); /* get the next node. */ 349 - MT_BUG_ON(mt, mn == NULL); 350 - MT_BUG_ON(mt, not_empty(mn)); 351 - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS); 352 - MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1); 353 - 354 - mas_push_node(&mas, mn); 355 - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1); 356 - MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS); 357 - 358 - /* Check the limit of pop/push/pop */ 359 - mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 2); /* Request */ 360 - MT_BUG_ON(mt, mas_alloc_req(&mas) != 1); 361 - MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM)); 362 - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); 363 - MT_BUG_ON(mt, mas_alloc_req(&mas)); 364 - MT_BUG_ON(mt, mas.alloc->node_count != 1); 365 - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2); 366 - mn = mas_pop_node(&mas); 367 - MT_BUG_ON(mt, not_empty(mn)); 368 - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1); 369 - MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS); 370 - mas_push_node(&mas, mn); 371 - MT_BUG_ON(mt, mas.alloc->node_count != 1); 372 - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2); 373 - mn = mas_pop_node(&mas); 374 - MT_BUG_ON(mt, not_empty(mn)); 375 - mn->parent = ma_parent_ptr(mn); 376 - ma_free_rcu(mn); 377 - for (i = 1; i <= MAPLE_ALLOC_SLOTS + 1; i++) { 378 - mn = mas_pop_node(&mas); 379 - MT_BUG_ON(mt, not_empty(mn)); 380 - mn->parent = ma_parent_ptr(mn); 381 - ma_free_rcu(mn); 382 - } 383 - MT_BUG_ON(mt, mas_allocated(&mas) != 0); 384 - 385 - 386 - for (i = 3; i < MAPLE_NODE_MASK * 3; i++) { 387 - mas.node = MA_ERROR(-ENOMEM); 388 - mas_node_count(&mas, i); /* Request */ 389 - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ 390 - mn = mas_pop_node(&mas); /* get the next node. */ 391 - mas_push_node(&mas, mn); /* put it back */ 392 - mas_destroy(&mas); 393 - 394 - mas.node = MA_ERROR(-ENOMEM); 395 - mas_node_count(&mas, i); /* Request */ 396 - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ 397 - mn = mas_pop_node(&mas); /* get the next node. */ 398 - mn2 = mas_pop_node(&mas); /* get the next node. */ 399 - mas_push_node(&mas, mn); /* put them back */ 400 - mas_push_node(&mas, mn2); 401 - mas_destroy(&mas); 402 - 403 - mas.node = MA_ERROR(-ENOMEM); 404 - mas_node_count(&mas, i); /* Request */ 405 - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ 406 - mn = mas_pop_node(&mas); /* get the next node. */ 407 - mn2 = mas_pop_node(&mas); /* get the next node. */ 408 - mn3 = mas_pop_node(&mas); /* get the next node. */ 409 - mas_push_node(&mas, mn); /* put them back */ 410 - mas_push_node(&mas, mn2); 411 - mas_push_node(&mas, mn3); 412 - mas_destroy(&mas); 413 - 414 - mas.node = MA_ERROR(-ENOMEM); 415 - mas_node_count(&mas, i); /* Request */ 416 - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ 417 - mn = mas_pop_node(&mas); /* get the next node. */ 418 - mn->parent = ma_parent_ptr(mn); 419 - ma_free_rcu(mn); 420 - mas_destroy(&mas); 421 - 422 - mas.node = MA_ERROR(-ENOMEM); 423 - mas_node_count(&mas, i); /* Request */ 424 - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ 425 - mn = mas_pop_node(&mas); /* get the next node. */ 426 - mn->parent = ma_parent_ptr(mn); 427 - ma_free_rcu(mn); 428 - mn = mas_pop_node(&mas); /* get the next node. */ 429 - mn->parent = ma_parent_ptr(mn); 430 - ma_free_rcu(mn); 431 - mn = mas_pop_node(&mas); /* get the next node. */ 432 - mn->parent = ma_parent_ptr(mn); 433 - ma_free_rcu(mn); 434 - mas_destroy(&mas); 435 - } 436 - 437 - mas.node = MA_ERROR(-ENOMEM); 438 - mas_node_count(&mas, 5); /* Request */ 439 - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ 440 - MT_BUG_ON(mt, mas_allocated(&mas) != 5); 441 - mas.node = MA_ERROR(-ENOMEM); 442 - mas_node_count(&mas, 10); /* Request */ 443 - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ 444 - mas.status = ma_start; 445 - MT_BUG_ON(mt, mas_allocated(&mas) != 10); 446 - mas_destroy(&mas); 447 - 448 - mas.node = MA_ERROR(-ENOMEM); 449 - mas_node_count(&mas, MAPLE_ALLOC_SLOTS - 1); /* Request */ 450 - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ 451 - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS - 1); 452 - mas.node = MA_ERROR(-ENOMEM); 453 - mas_node_count(&mas, 10 + MAPLE_ALLOC_SLOTS - 1); /* Request */ 454 - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ 455 - mas.status = ma_start; 456 - MT_BUG_ON(mt, mas_allocated(&mas) != 10 + MAPLE_ALLOC_SLOTS - 1); 457 - mas_destroy(&mas); 458 - 459 - mas.node = MA_ERROR(-ENOMEM); 460 - mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 1); /* Request */ 461 - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ 462 - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1); 463 - mas.node = MA_ERROR(-ENOMEM); 464 - mas_node_count(&mas, MAPLE_ALLOC_SLOTS * 2 + 2); /* Request */ 465 - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ 466 - mas.status = ma_start; 467 - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS * 2 + 2); 468 - mas_destroy(&mas); 469 - 470 - mas.node = MA_ERROR(-ENOMEM); 471 - mas_node_count(&mas, MAPLE_ALLOC_SLOTS * 2 + 1); /* Request */ 472 - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ 473 - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS * 2 + 1); 474 - mas.node = MA_ERROR(-ENOMEM); 475 - mas_node_count(&mas, MAPLE_ALLOC_SLOTS * 3 + 2); /* Request */ 476 - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ 477 - mas.status = ma_start; 478 - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS * 3 + 2); 479 - mas_destroy(&mas); 480 - 481 - mtree_unlock(mt); 482 - } 483 - 484 60 /* 485 61 * Check erasing including RCU. 486 62 */ ··· 35083 35507 return vacant_height; 35084 35508 } 35085 35509 35510 + static int mas_allocated(struct ma_state *mas) 35511 + { 35512 + if (mas->sheaf) 35513 + return kmem_cache_sheaf_size(mas->sheaf); 35514 + 35515 + return 0; 35516 + } 35086 35517 /* Preallocation testing */ 35087 35518 static noinline void __init check_prealloc(struct maple_tree *mt) 35088 35519 { ··· 35108 35525 35109 35526 /* Spanning store */ 35110 35527 mas_set_range(&mas, 470, 500); 35111 - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); 35528 + 35529 + mas_wr_preallocate(&wr_mas, ptr); 35530 + MT_BUG_ON(mt, mas.store_type != wr_spanning_store); 35531 + MT_BUG_ON(mt, mas_is_err(&mas)); 35112 35532 allocated = mas_allocated(&mas); 35113 35533 height = mas_mt_height(&mas); 35114 35534 vacant_height = get_vacant_height(&wr_mas, ptr); ··· 35121 35535 allocated = mas_allocated(&mas); 35122 35536 MT_BUG_ON(mt, allocated != 0); 35123 35537 35538 + mas_wr_preallocate(&wr_mas, ptr); 35124 35539 MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); 35125 35540 allocated = mas_allocated(&mas); 35126 35541 height = mas_mt_height(&mas); ··· 35161 35574 MT_BUG_ON(mt, allocated != 0); 35162 35575 mn->parent = ma_parent_ptr(mn); 35163 35576 ma_free_rcu(mn); 35164 - 35165 - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); 35166 - allocated = mas_allocated(&mas); 35167 - height = mas_mt_height(&mas); 35168 - vacant_height = get_vacant_height(&wr_mas, ptr); 35169 - MT_BUG_ON(mt, allocated != 1 + (height - vacant_height) * 3); 35170 - mn = mas_pop_node(&mas); 35171 - MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1); 35172 - mas_push_node(&mas, mn); 35173 - MT_BUG_ON(mt, mas_allocated(&mas) != allocated); 35174 - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); 35175 - mas_destroy(&mas); 35176 - allocated = mas_allocated(&mas); 35177 - MT_BUG_ON(mt, allocated != 0); 35178 35577 35179 35578 MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); 35180 35579 allocated = mas_allocated(&mas); ··· 35962 36389 check_load(mt, 6, xa_mk_value(0xC)); 35963 36390 mtree_unlock(mt); 35964 36391 36392 + mt_set_non_kernel(0); 35965 36393 /* test for the same race but with mas_store_gfp() */ 35966 36394 mtree_store_range(mt, 0, 5, xa_mk_value(0xA), GFP_KERNEL); 35967 36395 mtree_store_range(mt, 6, 10, NULL, GFP_KERNEL); 35968 36396 35969 36397 mas_set_range(&mas, 0, 5); 36398 + 36399 + /* setup writer 2 that will trigger the race condition */ 36400 + mt_set_private(mt); 36401 + mt_set_callback(writer2); 36402 + 35970 36403 mtree_lock(mt); 35971 36404 mas_store_gfp(&mas, NULL, GFP_KERNEL); 35972 36405 ··· 36085 36506 /* RCU testing */ 36086 36507 mt_init_flags(&tree, 0); 36087 36508 check_erase_testset(&tree); 36088 - mtree_destroy(&tree); 36089 - 36090 - mt_init_flags(&tree, 0); 36091 - check_new_node(&tree); 36092 36509 mtree_destroy(&tree); 36093 36510 36094 36511 if (!MAPLE_32BIT) {
+4 -1
tools/testing/shared/linux.c
··· 64 64 65 65 if (!(gfp & __GFP_DIRECT_RECLAIM)) { 66 66 if (!cachep->non_kernel) { 67 - cachep->exec_callback = true; 67 + if (cachep->callback) 68 + cachep->exec_callback = true; 68 69 return NULL; 69 70 } 70 71 ··· 211 210 for (i = 0; i < size; i++) 212 211 __kmem_cache_free_locked(cachep, p[i]); 213 212 pthread_mutex_unlock(&cachep->lock); 213 + if (cachep->callback) 214 + cachep->exec_callback = true; 214 215 return 0; 215 216 } 216 217