drm/i915: Selectively enable self-reclaim

Having missed the ENOMEM return via i915_gem_fault(), there are probably
other paths that I also missed. By not enabling NORETRY by default these
paths can run the shrinker and take memory from the system (but not from
our own inactive lists because our shrinker can not run whilst we hold
the struct mutex) and this may allow the system to survive a little longer
whilst our drivers consume all available memory.

References:
OOM killer unexpectedly called with kernel 2.6.32
http://bugzilla.kernel.org/show_bug.cgi?id=14933

v2: Pass gfp into page mapping.
v3: Use new read_cache_page_gfp() instead of open-coding.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Jesse Barnes <jbarnes@virtuousgeek.org>
Cc: Eric Anholt <eric@anholt.net>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by Chris Wilson and committed by Linus Torvalds 4bdadb97 0531b2aa

+19 -52
-13
drivers/gpu/drm/drm_gem.c
··· 142 if (IS_ERR(obj->filp)) 143 goto free; 144 145 - /* Basically we want to disable the OOM killer and handle ENOMEM 146 - * ourselves by sacrificing pages from cached buffers. 147 - * XXX shmem_file_[gs]et_gfp_mask() 148 - */ 149 - mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, 150 - GFP_HIGHUSER | 151 - __GFP_COLD | 152 - __GFP_FS | 153 - __GFP_RECLAIMABLE | 154 - __GFP_NORETRY | 155 - __GFP_NOWARN | 156 - __GFP_NOMEMALLOC); 157 - 158 kref_init(&obj->refcount); 159 kref_init(&obj->handlecount); 160 obj->size = size;
··· 142 if (IS_ERR(obj->filp)) 143 goto free; 144 145 kref_init(&obj->refcount); 146 kref_init(&obj->handlecount); 147 obj->size = size;
+1 -1
drivers/gpu/drm/i915/i915_debugfs.c
··· 290 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 291 obj = obj_priv->obj; 292 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 293 - ret = i915_gem_object_get_pages(obj); 294 if (ret) { 295 DRM_ERROR("Failed to get pages: %d\n", ret); 296 spin_unlock(&dev_priv->mm.active_list_lock);
··· 290 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 291 obj = obj_priv->obj; 292 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 293 + ret = i915_gem_object_get_pages(obj, 0); 294 if (ret) { 295 DRM_ERROR("Failed to get pages: %d\n", ret); 296 spin_unlock(&dev_priv->mm.active_list_lock);
+1 -1
drivers/gpu/drm/i915/i915_drv.h
··· 872 void i915_gem_detach_phys_object(struct drm_device *dev, 873 struct drm_gem_object *obj); 874 void i915_gem_free_all_phys_object(struct drm_device *dev); 875 - int i915_gem_object_get_pages(struct drm_gem_object *obj); 876 void i915_gem_object_put_pages(struct drm_gem_object *obj); 877 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); 878 void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
··· 872 void i915_gem_detach_phys_object(struct drm_device *dev, 873 struct drm_gem_object *obj); 874 void i915_gem_free_all_phys_object(struct drm_device *dev); 875 + int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); 876 void i915_gem_object_put_pages(struct drm_gem_object *obj); 877 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); 878 void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
+17 -37
drivers/gpu/drm/i915/i915_gem.c
··· 277 278 mutex_lock(&dev->struct_mutex); 279 280 - ret = i915_gem_object_get_pages(obj); 281 if (ret != 0) 282 goto fail_unlock; 283 ··· 321 return ret; 322 } 323 324 - static inline gfp_t 325 - i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj) 326 - { 327 - return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping); 328 - } 329 - 330 - static inline void 331 - i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp) 332 - { 333 - mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp); 334 - } 335 - 336 static int 337 i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) 338 { 339 int ret; 340 341 - ret = i915_gem_object_get_pages(obj); 342 343 /* If we've insufficient memory to map in the pages, attempt 344 * to make some space by throwing out some old buffers. 345 */ 346 if (ret == -ENOMEM) { 347 struct drm_device *dev = obj->dev; 348 - gfp_t gfp; 349 350 ret = i915_gem_evict_something(dev, obj->size); 351 if (ret) 352 return ret; 353 354 - gfp = i915_gem_object_get_page_gfp_mask(obj); 355 - i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY); 356 - ret = i915_gem_object_get_pages(obj); 357 - i915_gem_object_set_page_gfp_mask (obj, gfp); 358 } 359 360 return ret; ··· 774 775 mutex_lock(&dev->struct_mutex); 776 777 - ret = i915_gem_object_get_pages(obj); 778 if (ret != 0) 779 goto fail_unlock; 780 ··· 2214 } 2215 2216 int 2217 - i915_gem_object_get_pages(struct drm_gem_object *obj) 2218 { 2219 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2220 int page_count, i; ··· 2241 inode = obj->filp->f_path.dentry->d_inode; 2242 mapping = inode->i_mapping; 2243 for (i = 0; i < page_count; i++) { 2244 - page = read_mapping_page(mapping, i, NULL); 2245 if (IS_ERR(page)) { 2246 ret = PTR_ERR(page); 2247 i915_gem_object_put_pages(obj); ··· 2567 drm_i915_private_t *dev_priv = dev->dev_private; 2568 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2569 struct drm_mm_node *free_space; 2570 - bool retry_alloc = false; 2571 int ret; 2572 2573 if (obj_priv->madv != I915_MADV_WILLNEED) { ··· 2611 DRM_INFO("Binding object of size %zd at 0x%08x\n", 2612 obj->size, obj_priv->gtt_offset); 2613 #endif 2614 - if (retry_alloc) { 2615 - i915_gem_object_set_page_gfp_mask (obj, 2616 - i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY); 2617 - } 2618 - ret = i915_gem_object_get_pages(obj); 2619 - if (retry_alloc) { 2620 - i915_gem_object_set_page_gfp_mask (obj, 2621 - i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY); 2622 - } 2623 if (ret) { 2624 drm_mm_put_block(obj_priv->gtt_space); 2625 obj_priv->gtt_space = NULL; ··· 2621 ret = i915_gem_evict_something(dev, obj->size); 2622 if (ret) { 2623 /* now try to shrink everyone else */ 2624 - if (! retry_alloc) { 2625 - retry_alloc = true; 2626 - goto search_free; 2627 } 2628 2629 return ret; ··· 4926 if (!obj_priv->phys_obj) 4927 return; 4928 4929 - ret = i915_gem_object_get_pages(obj); 4930 if (ret) 4931 goto out; 4932 ··· 4984 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; 4985 obj_priv->phys_obj->cur_obj = obj; 4986 4987 - ret = i915_gem_object_get_pages(obj); 4988 if (ret) { 4989 DRM_ERROR("failed to get page list\n"); 4990 goto out;
··· 277 278 mutex_lock(&dev->struct_mutex); 279 280 + ret = i915_gem_object_get_pages(obj, 0); 281 if (ret != 0) 282 goto fail_unlock; 283 ··· 321 return ret; 322 } 323 324 static int 325 i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) 326 { 327 int ret; 328 329 + ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN); 330 331 /* If we've insufficient memory to map in the pages, attempt 332 * to make some space by throwing out some old buffers. 333 */ 334 if (ret == -ENOMEM) { 335 struct drm_device *dev = obj->dev; 336 337 ret = i915_gem_evict_something(dev, obj->size); 338 if (ret) 339 return ret; 340 341 + ret = i915_gem_object_get_pages(obj, 0); 342 } 343 344 return ret; ··· 790 791 mutex_lock(&dev->struct_mutex); 792 793 + ret = i915_gem_object_get_pages(obj, 0); 794 if (ret != 0) 795 goto fail_unlock; 796 ··· 2230 } 2231 2232 int 2233 + i915_gem_object_get_pages(struct drm_gem_object *obj, 2234 + gfp_t gfpmask) 2235 { 2236 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2237 int page_count, i; ··· 2256 inode = obj->filp->f_path.dentry->d_inode; 2257 mapping = inode->i_mapping; 2258 for (i = 0; i < page_count; i++) { 2259 + page = read_cache_page_gfp(mapping, i, 2260 + mapping_gfp_mask (mapping) | 2261 + __GFP_COLD | 2262 + gfpmask); 2263 if (IS_ERR(page)) { 2264 ret = PTR_ERR(page); 2265 i915_gem_object_put_pages(obj); ··· 2579 drm_i915_private_t *dev_priv = dev->dev_private; 2580 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2581 struct drm_mm_node *free_space; 2582 + gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; 2583 int ret; 2584 2585 if (obj_priv->madv != I915_MADV_WILLNEED) { ··· 2623 DRM_INFO("Binding object of size %zd at 0x%08x\n", 2624 obj->size, obj_priv->gtt_offset); 2625 #endif 2626 + ret = i915_gem_object_get_pages(obj, gfpmask); 2627 if (ret) { 2628 drm_mm_put_block(obj_priv->gtt_space); 2629 obj_priv->gtt_space = NULL; ··· 2641 ret = i915_gem_evict_something(dev, obj->size); 2642 if (ret) { 2643 /* now try to shrink everyone else */ 2644 + if (gfpmask) { 2645 + gfpmask = 0; 2646 + goto search_free; 2647 } 2648 2649 return ret; ··· 4946 if (!obj_priv->phys_obj) 4947 return; 4948 4949 + ret = i915_gem_object_get_pages(obj, 0); 4950 if (ret) 4951 goto out; 4952 ··· 5004 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; 5005 obj_priv->phys_obj->cur_obj = obj; 5006 5007 + ret = i915_gem_object_get_pages(obj, 0); 5008 if (ret) { 5009 DRM_ERROR("failed to get page list\n"); 5010 goto out;