Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.0 281 lines 6.9 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Handle caching attributes in page tables (PAT) 4 * 5 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 6 * Suresh B Siddha <suresh.b.siddha@intel.com> 7 * 8 * Interval tree (augmented rbtree) used to store the PAT memory type 9 * reservations. 10 */ 11 12#include <linux/seq_file.h> 13#include <linux/debugfs.h> 14#include <linux/kernel.h> 15#include <linux/rbtree_augmented.h> 16#include <linux/sched.h> 17#include <linux/gfp.h> 18 19#include <asm/pgtable.h> 20#include <asm/pat.h> 21 22#include "pat_internal.h" 23 24/* 25 * The memtype tree keeps track of memory type for specific 26 * physical memory areas. Without proper tracking, conflicting memory 27 * types in different mappings can cause CPU cache corruption. 28 * 29 * The tree is an interval tree (augmented rbtree) with tree ordered 30 * on starting address. Tree can contain multiple entries for 31 * different regions which overlap. All the aliases have the same 32 * cache attributes of course. 33 * 34 * memtype_lock protects the rbtree. 35 */ 36 37static struct rb_root memtype_rbroot = RB_ROOT; 38 39static int is_node_overlap(struct memtype *node, u64 start, u64 end) 40{ 41 if (node->start >= end || node->end <= start) 42 return 0; 43 44 return 1; 45} 46 47static u64 get_subtree_max_end(struct rb_node *node) 48{ 49 u64 ret = 0; 50 if (node) { 51 struct memtype *data = rb_entry(node, struct memtype, rb); 52 ret = data->subtree_max_end; 53 } 54 return ret; 55} 56 57static u64 compute_subtree_max_end(struct memtype *data) 58{ 59 u64 max_end = data->end, child_max_end; 60 61 child_max_end = get_subtree_max_end(data->rb.rb_right); 62 if (child_max_end > max_end) 63 max_end = child_max_end; 64 65 child_max_end = get_subtree_max_end(data->rb.rb_left); 66 if (child_max_end > max_end) 67 max_end = child_max_end; 68 69 return max_end; 70} 71 72RB_DECLARE_CALLBACKS(static, memtype_rb_augment_cb, struct memtype, rb, 73 u64, subtree_max_end, compute_subtree_max_end) 74 75/* Find the first (lowest start addr) overlapping range from rb tree */ 76static struct memtype *memtype_rb_lowest_match(struct rb_root *root, 77 u64 start, u64 end) 78{ 79 struct rb_node *node = root->rb_node; 80 struct memtype *last_lower = NULL; 81 82 while (node) { 83 struct memtype *data = rb_entry(node, struct memtype, rb); 84 85 if (get_subtree_max_end(node->rb_left) > start) { 86 /* Lowest overlap if any must be on left side */ 87 node = node->rb_left; 88 } else if (is_node_overlap(data, start, end)) { 89 last_lower = data; 90 break; 91 } else if (start >= data->start) { 92 /* Lowest overlap if any must be on right side */ 93 node = node->rb_right; 94 } else { 95 break; 96 } 97 } 98 return last_lower; /* Returns NULL if there is no overlap */ 99} 100 101enum { 102 MEMTYPE_EXACT_MATCH = 0, 103 MEMTYPE_END_MATCH = 1 104}; 105 106static struct memtype *memtype_rb_match(struct rb_root *root, 107 u64 start, u64 end, int match_type) 108{ 109 struct memtype *match; 110 111 match = memtype_rb_lowest_match(root, start, end); 112 while (match != NULL && match->start < end) { 113 struct rb_node *node; 114 115 if ((match_type == MEMTYPE_EXACT_MATCH) && 116 (match->start == start) && (match->end == end)) 117 return match; 118 119 if ((match_type == MEMTYPE_END_MATCH) && 120 (match->start < start) && (match->end == end)) 121 return match; 122 123 node = rb_next(&match->rb); 124 if (node) 125 match = rb_entry(node, struct memtype, rb); 126 else 127 match = NULL; 128 } 129 130 return NULL; /* Returns NULL if there is no match */ 131} 132 133static int memtype_rb_check_conflict(struct rb_root *root, 134 u64 start, u64 end, 135 enum page_cache_mode reqtype, 136 enum page_cache_mode *newtype) 137{ 138 struct rb_node *node; 139 struct memtype *match; 140 enum page_cache_mode found_type = reqtype; 141 142 match = memtype_rb_lowest_match(&memtype_rbroot, start, end); 143 if (match == NULL) 144 goto success; 145 146 if (match->type != found_type && newtype == NULL) 147 goto failure; 148 149 dprintk("Overlap at 0x%Lx-0x%Lx\n", match->start, match->end); 150 found_type = match->type; 151 152 node = rb_next(&match->rb); 153 while (node) { 154 match = rb_entry(node, struct memtype, rb); 155 156 if (match->start >= end) /* Checked all possible matches */ 157 goto success; 158 159 if (is_node_overlap(match, start, end) && 160 match->type != found_type) { 161 goto failure; 162 } 163 164 node = rb_next(&match->rb); 165 } 166success: 167 if (newtype) 168 *newtype = found_type; 169 170 return 0; 171 172failure: 173 pr_info("x86/PAT: %s:%d conflicting memory types %Lx-%Lx %s<->%s\n", 174 current->comm, current->pid, start, end, 175 cattr_name(found_type), cattr_name(match->type)); 176 return -EBUSY; 177} 178 179static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata) 180{ 181 struct rb_node **node = &(root->rb_node); 182 struct rb_node *parent = NULL; 183 184 while (*node) { 185 struct memtype *data = rb_entry(*node, struct memtype, rb); 186 187 parent = *node; 188 if (data->subtree_max_end < newdata->end) 189 data->subtree_max_end = newdata->end; 190 if (newdata->start <= data->start) 191 node = &((*node)->rb_left); 192 else if (newdata->start > data->start) 193 node = &((*node)->rb_right); 194 } 195 196 newdata->subtree_max_end = newdata->end; 197 rb_link_node(&newdata->rb, parent, node); 198 rb_insert_augmented(&newdata->rb, root, &memtype_rb_augment_cb); 199} 200 201int rbt_memtype_check_insert(struct memtype *new, 202 enum page_cache_mode *ret_type) 203{ 204 int err = 0; 205 206 err = memtype_rb_check_conflict(&memtype_rbroot, new->start, new->end, 207 new->type, ret_type); 208 209 if (!err) { 210 if (ret_type) 211 new->type = *ret_type; 212 213 new->subtree_max_end = new->end; 214 memtype_rb_insert(&memtype_rbroot, new); 215 } 216 return err; 217} 218 219struct memtype *rbt_memtype_erase(u64 start, u64 end) 220{ 221 struct memtype *data; 222 223 /* 224 * Since the memtype_rbroot tree allows overlapping ranges, 225 * rbt_memtype_erase() checks with EXACT_MATCH first, i.e. free 226 * a whole node for the munmap case. If no such entry is found, 227 * it then checks with END_MATCH, i.e. shrink the size of a node 228 * from the end for the mremap case. 229 */ 230 data = memtype_rb_match(&memtype_rbroot, start, end, 231 MEMTYPE_EXACT_MATCH); 232 if (!data) { 233 data = memtype_rb_match(&memtype_rbroot, start, end, 234 MEMTYPE_END_MATCH); 235 if (!data) 236 return ERR_PTR(-EINVAL); 237 } 238 239 if (data->start == start) { 240 /* munmap: erase this node */ 241 rb_erase_augmented(&data->rb, &memtype_rbroot, 242 &memtype_rb_augment_cb); 243 } else { 244 /* mremap: update the end value of this node */ 245 rb_erase_augmented(&data->rb, &memtype_rbroot, 246 &memtype_rb_augment_cb); 247 data->end = start; 248 data->subtree_max_end = data->end; 249 memtype_rb_insert(&memtype_rbroot, data); 250 return NULL; 251 } 252 253 return data; 254} 255 256struct memtype *rbt_memtype_lookup(u64 addr) 257{ 258 return memtype_rb_lowest_match(&memtype_rbroot, addr, addr + PAGE_SIZE); 259} 260 261#if defined(CONFIG_DEBUG_FS) 262int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos) 263{ 264 struct rb_node *node; 265 int i = 1; 266 267 node = rb_first(&memtype_rbroot); 268 while (node && pos != i) { 269 node = rb_next(node); 270 i++; 271 } 272 273 if (node) { /* pos == i */ 274 struct memtype *this = rb_entry(node, struct memtype, rb); 275 *out = *this; 276 return 0; 277 } else { 278 return 1; 279 } 280} 281#endif