Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.5 346 lines 11 kB view raw
1/* 2 * Copyright © 2008-2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25#include <linux/oom.h> 26#include <linux/shmem_fs.h> 27#include <linux/slab.h> 28#include <linux/swap.h> 29#include <linux/pci.h> 30#include <linux/dma-buf.h> 31#include <drm/drmP.h> 32#include <drm/i915_drm.h> 33 34#include "i915_drv.h" 35#include "i915_trace.h" 36 37static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) 38{ 39 if (!mutex_is_locked(mutex)) 40 return false; 41 42#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) 43 return mutex->owner == task; 44#else 45 /* Since UP may be pre-empted, we cannot assume that we own the lock */ 46 return false; 47#endif 48} 49 50/** 51 * i915_gem_shrink - Shrink buffer object caches 52 * @dev_priv: i915 device 53 * @target: amount of memory to make available, in pages 54 * @flags: control flags for selecting cache types 55 * 56 * This function is the main interface to the shrinker. It will try to release 57 * up to @target pages of main memory backing storage from buffer objects. 58 * Selection of the specific caches can be done with @flags. This is e.g. useful 59 * when purgeable objects should be removed from caches preferentially. 60 * 61 * Note that it's not guaranteed that released amount is actually available as 62 * free system memory - the pages might still be in-used to due to other reasons 63 * (like cpu mmaps) or the mm core has reused them before we could grab them. 64 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to 65 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all(). 66 * 67 * Also note that any kind of pinning (both per-vma address space pins and 68 * backing storage pins at the buffer object level) result in the shrinker code 69 * having to skip the object. 70 * 71 * Returns: 72 * The number of pages of backing storage actually released. 73 */ 74unsigned long 75i915_gem_shrink(struct drm_i915_private *dev_priv, 76 unsigned long target, unsigned flags) 77{ 78 const struct { 79 struct list_head *list; 80 unsigned int bit; 81 } phases[] = { 82 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND }, 83 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND }, 84 { NULL, 0 }, 85 }, *phase; 86 unsigned long count = 0; 87 88 trace_i915_gem_shrink(dev_priv, target, flags); 89 i915_gem_retire_requests(dev_priv->dev); 90 91 /* 92 * As we may completely rewrite the (un)bound list whilst unbinding 93 * (due to retiring requests) we have to strictly process only 94 * one element of the list at the time, and recheck the list 95 * on every iteration. 96 * 97 * In particular, we must hold a reference whilst removing the 98 * object as we may end up waiting for and/or retiring the objects. 99 * This might release the final reference (held by the active list) 100 * and result in the object being freed from under us. This is 101 * similar to the precautions the eviction code must take whilst 102 * removing objects. 103 * 104 * Also note that although these lists do not hold a reference to 105 * the object we can safely grab one here: The final object 106 * unreferencing and the bound_list are both protected by the 107 * dev->struct_mutex and so we won't ever be able to observe an 108 * object on the bound_list with a reference count equals 0. 109 */ 110 for (phase = phases; phase->list; phase++) { 111 struct list_head still_in_list; 112 113 if ((flags & phase->bit) == 0) 114 continue; 115 116 INIT_LIST_HEAD(&still_in_list); 117 while (count < target && !list_empty(phase->list)) { 118 struct drm_i915_gem_object *obj; 119 struct i915_vma *vma, *v; 120 121 obj = list_first_entry(phase->list, 122 typeof(*obj), global_list); 123 list_move_tail(&obj->global_list, &still_in_list); 124 125 if (flags & I915_SHRINK_PURGEABLE && 126 obj->madv != I915_MADV_DONTNEED) 127 continue; 128 129 if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active) 130 continue; 131 132 drm_gem_object_reference(&obj->base); 133 134 /* For the unbound phase, this should be a no-op! */ 135 list_for_each_entry_safe(vma, v, 136 &obj->vma_list, vma_link) 137 if (i915_vma_unbind(vma)) 138 break; 139 140 if (i915_gem_object_put_pages(obj) == 0) 141 count += obj->base.size >> PAGE_SHIFT; 142 143 drm_gem_object_unreference(&obj->base); 144 } 145 list_splice(&still_in_list, phase->list); 146 } 147 148 i915_gem_retire_requests(dev_priv->dev); 149 150 return count; 151} 152 153/** 154 * i915_gem_shrink_all - Shrink buffer object caches completely 155 * @dev_priv: i915 device 156 * 157 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all 158 * caches completely. It also first waits for and retires all outstanding 159 * requests to also be able to release backing storage for active objects. 160 * 161 * This should only be used in code to intentionally quiescent the gpu or as a 162 * last-ditch effort when memory seems to have run out. 163 * 164 * Returns: 165 * The number of pages of backing storage actually released. 166 */ 167unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) 168{ 169 return i915_gem_shrink(dev_priv, -1UL, 170 I915_SHRINK_BOUND | 171 I915_SHRINK_UNBOUND | 172 I915_SHRINK_ACTIVE); 173} 174 175static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) 176{ 177 if (!mutex_trylock(&dev->struct_mutex)) { 178 if (!mutex_is_locked_by(&dev->struct_mutex, current)) 179 return false; 180 181 if (to_i915(dev)->mm.shrinker_no_lock_stealing) 182 return false; 183 184 *unlock = false; 185 } else 186 *unlock = true; 187 188 return true; 189} 190 191static int num_vma_bound(struct drm_i915_gem_object *obj) 192{ 193 struct i915_vma *vma; 194 int count = 0; 195 196 list_for_each_entry(vma, &obj->vma_list, vma_link) { 197 if (drm_mm_node_allocated(&vma->node)) 198 count++; 199 if (vma->pin_count) 200 count++; 201 } 202 203 return count; 204} 205 206static unsigned long 207i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) 208{ 209 struct drm_i915_private *dev_priv = 210 container_of(shrinker, struct drm_i915_private, mm.shrinker); 211 struct drm_device *dev = dev_priv->dev; 212 struct drm_i915_gem_object *obj; 213 unsigned long count; 214 bool unlock; 215 216 if (!i915_gem_shrinker_lock(dev, &unlock)) 217 return 0; 218 219 count = 0; 220 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) 221 if (obj->pages_pin_count == 0) 222 count += obj->base.size >> PAGE_SHIFT; 223 224 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 225 if (!obj->active && obj->pages_pin_count == num_vma_bound(obj)) 226 count += obj->base.size >> PAGE_SHIFT; 227 } 228 229 if (unlock) 230 mutex_unlock(&dev->struct_mutex); 231 232 return count; 233} 234 235static unsigned long 236i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) 237{ 238 struct drm_i915_private *dev_priv = 239 container_of(shrinker, struct drm_i915_private, mm.shrinker); 240 struct drm_device *dev = dev_priv->dev; 241 unsigned long freed; 242 bool unlock; 243 244 if (!i915_gem_shrinker_lock(dev, &unlock)) 245 return SHRINK_STOP; 246 247 freed = i915_gem_shrink(dev_priv, 248 sc->nr_to_scan, 249 I915_SHRINK_BOUND | 250 I915_SHRINK_UNBOUND | 251 I915_SHRINK_PURGEABLE); 252 if (freed < sc->nr_to_scan) 253 freed += i915_gem_shrink(dev_priv, 254 sc->nr_to_scan - freed, 255 I915_SHRINK_BOUND | 256 I915_SHRINK_UNBOUND); 257 if (unlock) 258 mutex_unlock(&dev->struct_mutex); 259 260 return freed; 261} 262 263static int 264i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) 265{ 266 struct drm_i915_private *dev_priv = 267 container_of(nb, struct drm_i915_private, mm.oom_notifier); 268 struct drm_device *dev = dev_priv->dev; 269 struct drm_i915_gem_object *obj; 270 unsigned long timeout = msecs_to_jiffies(5000) + 1; 271 unsigned long pinned, bound, unbound, freed_pages; 272 bool was_interruptible; 273 bool unlock; 274 275 while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) { 276 schedule_timeout_killable(1); 277 if (fatal_signal_pending(current)) 278 return NOTIFY_DONE; 279 } 280 if (timeout == 0) { 281 pr_err("Unable to purge GPU memory due lock contention.\n"); 282 return NOTIFY_DONE; 283 } 284 285 was_interruptible = dev_priv->mm.interruptible; 286 dev_priv->mm.interruptible = false; 287 288 freed_pages = i915_gem_shrink_all(dev_priv); 289 290 dev_priv->mm.interruptible = was_interruptible; 291 292 /* Because we may be allocating inside our own driver, we cannot 293 * assert that there are no objects with pinned pages that are not 294 * being pointed to by hardware. 295 */ 296 unbound = bound = pinned = 0; 297 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 298 if (!obj->base.filp) /* not backed by a freeable object */ 299 continue; 300 301 if (obj->pages_pin_count) 302 pinned += obj->base.size; 303 else 304 unbound += obj->base.size; 305 } 306 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 307 if (!obj->base.filp) 308 continue; 309 310 if (obj->pages_pin_count) 311 pinned += obj->base.size; 312 else 313 bound += obj->base.size; 314 } 315 316 if (unlock) 317 mutex_unlock(&dev->struct_mutex); 318 319 if (freed_pages || unbound || bound) 320 pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", 321 freed_pages << PAGE_SHIFT, pinned); 322 if (unbound || bound) 323 pr_err("%lu and %lu bytes still available in the " 324 "bound and unbound GPU page lists.\n", 325 bound, unbound); 326 327 *(unsigned long *)ptr += freed_pages; 328 return NOTIFY_DONE; 329} 330 331/** 332 * i915_gem_shrinker_init - Initialize i915 shrinker 333 * @dev_priv: i915 device 334 * 335 * This function registers and sets up the i915 shrinker and OOM handler. 336 */ 337void i915_gem_shrinker_init(struct drm_i915_private *dev_priv) 338{ 339 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan; 340 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count; 341 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS; 342 register_shrinker(&dev_priv->mm.shrinker); 343 344 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; 345 register_oom_notifier(&dev_priv->mm.oom_notifier); 346}