at v3.11-rc2 3.7 kB view raw
1#ifndef _LINUX_COMPACTION_H 2#define _LINUX_COMPACTION_H 3 4/* Return values for compact_zone() and try_to_compact_pages() */ 5/* compaction didn't start as it was not possible or direct reclaim was more suitable */ 6#define COMPACT_SKIPPED 0 7/* compaction should continue to another pageblock */ 8#define COMPACT_CONTINUE 1 9/* direct compaction partially compacted a zone and there are suitable pages */ 10#define COMPACT_PARTIAL 2 11/* The full zone was compacted */ 12#define COMPACT_COMPLETE 3 13 14#ifdef CONFIG_COMPACTION 15extern int sysctl_compact_memory; 16extern int sysctl_compaction_handler(struct ctl_table *table, int write, 17 void __user *buffer, size_t *length, loff_t *ppos); 18extern int sysctl_extfrag_threshold; 19extern int sysctl_extfrag_handler(struct ctl_table *table, int write, 20 void __user *buffer, size_t *length, loff_t *ppos); 21 22extern int fragmentation_index(struct zone *zone, unsigned int order); 23extern unsigned long try_to_compact_pages(struct zonelist *zonelist, 24 int order, gfp_t gfp_mask, nodemask_t *mask, 25 bool sync, bool *contended); 26extern void compact_pgdat(pg_data_t *pgdat, int order); 27extern void reset_isolation_suitable(pg_data_t *pgdat); 28extern unsigned long compaction_suitable(struct zone *zone, int order); 29 30/* Do not skip compaction more than 64 times */ 31#define COMPACT_MAX_DEFER_SHIFT 6 32 33/* 34 * Compaction is deferred when compaction fails to result in a page 35 * allocation success. 1 << compact_defer_limit compactions are skipped up 36 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT 37 */ 38static inline void defer_compaction(struct zone *zone, int order) 39{ 40 zone->compact_considered = 0; 41 zone->compact_defer_shift++; 42 43 if (order < zone->compact_order_failed) 44 zone->compact_order_failed = order; 45 46 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) 47 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; 48} 49 50/* Returns true if compaction should be skipped this time */ 51static inline bool compaction_deferred(struct zone *zone, int order) 52{ 53 unsigned long defer_limit = 1UL << zone->compact_defer_shift; 54 55 if (order < zone->compact_order_failed) 56 return false; 57 58 /* Avoid possible overflow */ 59 if (++zone->compact_considered > defer_limit) 60 zone->compact_considered = defer_limit; 61 62 return zone->compact_considered < defer_limit; 63} 64 65/* Returns true if restarting compaction after many failures */ 66static inline bool compaction_restarting(struct zone *zone, int order) 67{ 68 if (order < zone->compact_order_failed) 69 return false; 70 71 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && 72 zone->compact_considered >= 1UL << zone->compact_defer_shift; 73} 74 75#else 76static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, 77 int order, gfp_t gfp_mask, nodemask_t *nodemask, 78 bool sync, bool *contended) 79{ 80 return COMPACT_CONTINUE; 81} 82 83static inline void compact_pgdat(pg_data_t *pgdat, int order) 84{ 85} 86 87static inline void reset_isolation_suitable(pg_data_t *pgdat) 88{ 89} 90 91static inline unsigned long compaction_suitable(struct zone *zone, int order) 92{ 93 return COMPACT_SKIPPED; 94} 95 96static inline void defer_compaction(struct zone *zone, int order) 97{ 98} 99 100static inline bool compaction_deferred(struct zone *zone, int order) 101{ 102 return true; 103} 104 105#endif /* CONFIG_COMPACTION */ 106 107#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 108extern int compaction_register_node(struct node *node); 109extern void compaction_unregister_node(struct node *node); 110 111#else 112 113static inline int compaction_register_node(struct node *node) 114{ 115 return 0; 116} 117 118static inline void compaction_unregister_node(struct node *node) 119{ 120} 121#endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */ 122 123#endif /* _LINUX_COMPACTION_H */