Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: writeback: cleanups in preparation for per-zone dirty limits

The next patch will introduce per-zone dirty limiting functions in
addition to the traditional global dirty limiting.

Rename determine_dirtyable_memory() to global_dirtyable_memory() before
adding the zone-specific version, and fix up its documentation.

Also, move the functions to determine the dirtyable memory and the
function to calculate the dirty limit based on that together so that their
relationship is more apparent and that they can be commented on as a
group.

Signed-off-by: Johannes Weiner <jweiner@redhat.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Acked-by: Mel Gorman <mel@suse.de>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Shaohua Li <shaohua.li@intel.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Chris Mason <chris.mason@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Johannes Weiner and committed by
Linus Torvalds
ccafa287 ab8fabd4

+47 -46
+47 -46
mm/page-writeback.c
··· 146 146 * We make sure that the background writeout level is below the adjusted 147 147 * clamping level. 148 148 */ 149 + 149 150 static unsigned long highmem_dirtyable_memory(unsigned long total) 150 151 { 151 152 #ifdef CONFIG_HIGHMEM ··· 173 172 } 174 173 175 174 /** 176 - * determine_dirtyable_memory - amount of memory that may be used 175 + * global_dirtyable_memory - number of globally dirtyable pages 177 176 * 178 - * Returns the numebr of pages that can currently be freed and used 179 - * by the kernel for direct mappings. 177 + * Returns the global number of pages potentially available for dirty 178 + * page cache. This is the base value for the global dirty limits. 180 179 */ 181 - static unsigned long determine_dirtyable_memory(void) 180 + unsigned long global_dirtyable_memory(void) 182 181 { 183 182 unsigned long x; 184 183 ··· 189 188 x -= highmem_dirtyable_memory(x); 190 189 191 190 return x + 1; /* Ensure that we never return 0 */ 191 + } 192 + 193 + /* 194 + * global_dirty_limits - background-writeback and dirty-throttling thresholds 195 + * 196 + * Calculate the dirty thresholds based on sysctl parameters 197 + * - vm.dirty_background_ratio or vm.dirty_background_bytes 198 + * - vm.dirty_ratio or vm.dirty_bytes 199 + * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and 200 + * real-time tasks. 201 + */ 202 + void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) 203 + { 204 + unsigned long background; 205 + unsigned long dirty; 206 + unsigned long uninitialized_var(available_memory); 207 + struct task_struct *tsk; 208 + 209 + if (!vm_dirty_bytes || !dirty_background_bytes) 210 + available_memory = global_dirtyable_memory(); 211 + 212 + if (vm_dirty_bytes) 213 + dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); 214 + else 215 + dirty = (vm_dirty_ratio * available_memory) / 100; 216 + 217 + if (dirty_background_bytes) 218 + background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE); 219 + else 220 + background = (dirty_background_ratio * available_memory) / 100; 221 + 222 + if (background >= dirty) 223 + background = dirty / 2; 224 + tsk = current; 225 + if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { 226 + background += background / 4; 227 + dirty += dirty / 4; 228 + } 229 + *pbackground = background; 230 + *pdirty = dirty; 231 + trace_global_dirty_state(background, dirty); 192 232 } 193 233 194 234 /* ··· 244 202 if (vm_dirty_bytes) 245 203 dirty_total = vm_dirty_bytes / PAGE_SIZE; 246 204 else 247 - dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) / 205 + dirty_total = (vm_dirty_ratio * global_dirtyable_memory()) / 248 206 100; 249 207 return 2 + ilog2(dirty_total - 1); 250 208 } ··· 402 360 static unsigned long hard_dirty_limit(unsigned long thresh) 403 361 { 404 362 return max(thresh, global_dirty_limit); 405 - } 406 - 407 - /* 408 - * global_dirty_limits - background-writeback and dirty-throttling thresholds 409 - * 410 - * Calculate the dirty thresholds based on sysctl parameters 411 - * - vm.dirty_background_ratio or vm.dirty_background_bytes 412 - * - vm.dirty_ratio or vm.dirty_bytes 413 - * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and 414 - * real-time tasks. 415 - */ 416 - void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) 417 - { 418 - unsigned long background; 419 - unsigned long dirty; 420 - unsigned long uninitialized_var(available_memory); 421 - struct task_struct *tsk; 422 - 423 - if (!vm_dirty_bytes || !dirty_background_bytes) 424 - available_memory = determine_dirtyable_memory(); 425 - 426 - if (vm_dirty_bytes) 427 - dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); 428 - else 429 - dirty = (vm_dirty_ratio * available_memory) / 100; 430 - 431 - if (dirty_background_bytes) 432 - background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE); 433 - else 434 - background = (dirty_background_ratio * available_memory) / 100; 435 - 436 - if (background >= dirty) 437 - background = dirty / 2; 438 - tsk = current; 439 - if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { 440 - background += background / 4; 441 - dirty += dirty / 4; 442 - } 443 - *pbackground = background; 444 - *pdirty = dirty; 445 - trace_global_dirty_state(background, dirty); 446 363 } 447 364 448 365 /**