drm/i915: Selectively enable self-reclaim

Having missed the ENOMEM return via i915_gem_fault(), there are probably
other paths that I also missed. By not enabling NORETRY by default these
paths can run the shrinker and take memory from the system (but not from
our own inactive lists because our shrinker can not run whilst we hold
the struct mutex) and this may allow the system to survive a little longer
whilst our drivers consume all available memory.

References:
OOM killer unexpectedly called with kernel 2.6.32
http://bugzilla.kernel.org/show_bug.cgi?id=14933

v2: Pass gfp into page mapping.
v3: Use new read_cache_page_gfp() instead of open-coding.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Jesse Barnes <jbarnes@virtuousgeek.org>
Cc: Eric Anholt <eric@anholt.net>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by Chris Wilson and committed by Linus Torvalds 4bdadb97 0531b2aa

+19 -52
-13
drivers/gpu/drm/drm_gem.c
··· 142 142 if (IS_ERR(obj->filp)) 143 143 goto free; 144 144 145 - /* Basically we want to disable the OOM killer and handle ENOMEM 146 - * ourselves by sacrificing pages from cached buffers. 147 - * XXX shmem_file_[gs]et_gfp_mask() 148 - */ 149 - mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, 150 - GFP_HIGHUSER | 151 - __GFP_COLD | 152 - __GFP_FS | 153 - __GFP_RECLAIMABLE | 154 - __GFP_NORETRY | 155 - __GFP_NOWARN | 156 - __GFP_NOMEMALLOC); 157 - 158 145 kref_init(&obj->refcount); 159 146 kref_init(&obj->handlecount); 160 147 obj->size = size;
+1 -1
drivers/gpu/drm/i915/i915_debugfs.c
··· 290 290 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 291 291 obj = obj_priv->obj; 292 292 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 293 - ret = i915_gem_object_get_pages(obj); 293 + ret = i915_gem_object_get_pages(obj, 0); 294 294 if (ret) { 295 295 DRM_ERROR("Failed to get pages: %d\n", ret); 296 296 spin_unlock(&dev_priv->mm.active_list_lock);
+1 -1
drivers/gpu/drm/i915/i915_drv.h
··· 872 872 void i915_gem_detach_phys_object(struct drm_device *dev, 873 873 struct drm_gem_object *obj); 874 874 void i915_gem_free_all_phys_object(struct drm_device *dev); 875 - int i915_gem_object_get_pages(struct drm_gem_object *obj); 875 + int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); 876 876 void i915_gem_object_put_pages(struct drm_gem_object *obj); 877 877 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); 878 878 void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
+17 -37
drivers/gpu/drm/i915/i915_gem.c
··· 277 277 278 278 mutex_lock(&dev->struct_mutex); 279 279 280 - ret = i915_gem_object_get_pages(obj); 280 + ret = i915_gem_object_get_pages(obj, 0); 281 281 if (ret != 0) 282 282 goto fail_unlock; 283 283 ··· 321 321 return ret; 322 322 } 323 323 324 - static inline gfp_t 325 - i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj) 326 - { 327 - return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping); 328 - } 329 - 330 - static inline void 331 - i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp) 332 - { 333 - mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp); 334 - } 335 - 336 324 static int 337 325 i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) 338 326 { 339 327 int ret; 340 328 341 - ret = i915_gem_object_get_pages(obj); 329 + ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN); 342 330 343 331 /* If we've insufficient memory to map in the pages, attempt 344 332 * to make some space by throwing out some old buffers. 345 333 */ 346 334 if (ret == -ENOMEM) { 347 335 struct drm_device *dev = obj->dev; 348 - gfp_t gfp; 349 336 350 337 ret = i915_gem_evict_something(dev, obj->size); 351 338 if (ret) 352 339 return ret; 353 340 354 - gfp = i915_gem_object_get_page_gfp_mask(obj); 355 - i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY); 356 - ret = i915_gem_object_get_pages(obj); 357 - i915_gem_object_set_page_gfp_mask (obj, gfp); 341 + ret = i915_gem_object_get_pages(obj, 0); 358 342 } 359 343 360 344 return ret; ··· 774 790 775 791 mutex_lock(&dev->struct_mutex); 776 792 777 - ret = i915_gem_object_get_pages(obj); 793 + ret = i915_gem_object_get_pages(obj, 0); 778 794 if (ret != 0) 779 795 goto fail_unlock; 780 796 ··· 2214 2230 } 2215 2231 2216 2232 int 2217 - i915_gem_object_get_pages(struct drm_gem_object *obj) 2233 + i915_gem_object_get_pages(struct drm_gem_object *obj, 2234 + gfp_t gfpmask) 2218 2235 { 2219 2236 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2220 2237 int page_count, i; ··· 2241 2256 inode = obj->filp->f_path.dentry->d_inode; 2242 2257 mapping = inode->i_mapping; 2243 2258 for (i = 0; i < page_count; i++) { 2244 - page = read_mapping_page(mapping, i, NULL); 2259 + page = read_cache_page_gfp(mapping, i, 2260 + mapping_gfp_mask (mapping) | 2261 + __GFP_COLD | 2262 + gfpmask); 2245 2263 if (IS_ERR(page)) { 2246 2264 ret = PTR_ERR(page); 2247 2265 i915_gem_object_put_pages(obj); ··· 2567 2579 drm_i915_private_t *dev_priv = dev->dev_private; 2568 2580 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2569 2581 struct drm_mm_node *free_space; 2570 - bool retry_alloc = false; 2582 + gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; 2571 2583 int ret; 2572 2584 2573 2585 if (obj_priv->madv != I915_MADV_WILLNEED) { ··· 2611 2623 DRM_INFO("Binding object of size %zd at 0x%08x\n", 2612 2624 obj->size, obj_priv->gtt_offset); 2613 2625 #endif 2614 - if (retry_alloc) { 2615 - i915_gem_object_set_page_gfp_mask (obj, 2616 - i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY); 2617 - } 2618 - ret = i915_gem_object_get_pages(obj); 2619 - if (retry_alloc) { 2620 - i915_gem_object_set_page_gfp_mask (obj, 2621 - i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY); 2622 - } 2626 + ret = i915_gem_object_get_pages(obj, gfpmask); 2623 2627 if (ret) { 2624 2628 drm_mm_put_block(obj_priv->gtt_space); 2625 2629 obj_priv->gtt_space = NULL; ··· 2621 2641 ret = i915_gem_evict_something(dev, obj->size); 2622 2642 if (ret) { 2623 2643 /* now try to shrink everyone else */ 2624 - if (! retry_alloc) { 2625 - retry_alloc = true; 2626 - goto search_free; 2644 + if (gfpmask) { 2645 + gfpmask = 0; 2646 + goto search_free; 2627 2647 } 2628 2648 2629 2649 return ret; ··· 4926 4946 if (!obj_priv->phys_obj) 4927 4947 return; 4928 4948 4929 - ret = i915_gem_object_get_pages(obj); 4949 + ret = i915_gem_object_get_pages(obj, 0); 4930 4950 if (ret) 4931 4951 goto out; 4932 4952 ··· 4984 5004 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; 4985 5005 obj_priv->phys_obj->cur_obj = obj; 4986 5006 4987 - ret = i915_gem_object_get_pages(obj); 5007 + ret = i915_gem_object_get_pages(obj, 0); 4988 5008 if (ret) { 4989 5009 DRM_ERROR("failed to get page list\n"); 4990 5010 goto out;