Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
[IA64] make uncached allocator more node aware

+57 -29
+57 -29
arch/ia64/kernel/uncached.c
··· 32 33 extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *); 34 35 - #define MAX_UNCACHED_GRANULES 5 36 - static int allocated_granules; 37 38 - struct gen_pool *uncached_pool[MAX_NUMNODES]; 39 40 41 static void uncached_ipi_visibility(void *data) 42 { 43 int status; 44 45 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); 46 if ((status != PAL_VISIBILITY_OK) && 47 (status != PAL_VISIBILITY_OK_REMOTE_NEEDED)) 48 - printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on " 49 - "CPU %i\n", status, raw_smp_processor_id()); 50 } 51 52 53 static void uncached_ipi_mc_drain(void *data) 54 { 55 int status; 56 57 status = ia64_pal_mc_drain(); 58 - if (status) 59 - printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on " 60 - "CPU %i\n", status, raw_smp_processor_id()); 61 } 62 63 ··· 76 * This is accomplished by first allocating a granule of cached memory pages 77 * and then converting them to uncached memory pages. 78 */ 79 - static int uncached_add_chunk(struct gen_pool *pool, int nid) 80 { 81 struct page *page; 82 - int status, i; 83 unsigned long c_addr, uc_addr; 84 85 - if (allocated_granules >= MAX_UNCACHED_GRANULES) 86 return -1; 87 88 /* attempt to allocate a granule's worth of cached memory pages */ 89 90 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO, 91 IA64_GRANULE_SHIFT-PAGE_SHIFT); 92 - if (!page) 93 return -1; 94 95 /* convert the memory pages from cached to uncached */ 96 ··· 121 flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE); 122 123 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); 124 - if (!status) { 125 - status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1); 126 - if (status) 127 goto failed; 128 - } 129 130 preempt_disable(); 131 ··· 142 143 preempt_enable(); 144 145 - ia64_pal_mc_drain(); 146 - status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1); 147 - if (status) 148 goto failed; 149 150 /* 151 * The chunk of memory pages has been converted to uncached so now we 152 * can add it to the pool. 153 */ 154 - status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid); 155 if (status) 156 goto failed; 157 158 - allocated_granules++; 159 return 0; 160 161 /* failed to convert or add the chunk so give it back to the kernel */ ··· 168 ClearPageUncached(&page[i]); 169 170 free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); 171 return -1; 172 } 173 ··· 185 unsigned long uncached_alloc_page(int starting_nid) 186 { 187 unsigned long uc_addr; 188 - struct gen_pool *pool; 189 int nid; 190 191 if (unlikely(starting_nid >= MAX_NUMNODES)) ··· 198 do { 199 if (!node_online(nid)) 200 continue; 201 - pool = uncached_pool[nid]; 202 - if (pool == NULL) 203 continue; 204 do { 205 - uc_addr = gen_pool_alloc(pool, PAGE_SIZE); 206 if (uc_addr != 0) 207 return uc_addr; 208 - } while (uncached_add_chunk(pool, nid) == 0); 209 210 } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid); 211 ··· 224 void uncached_free_page(unsigned long uc_addr) 225 { 226 int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET); 227 - struct gen_pool *pool = uncached_pool[nid]; 228 229 if (unlikely(pool == NULL)) 230 return; ··· 251 unsigned long uc_end, void *arg) 252 { 253 int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET); 254 - struct gen_pool *pool = uncached_pool[nid]; 255 size_t size = uc_end - uc_start; 256 257 touch_softlockup_watchdog(); ··· 269 int nid; 270 271 for_each_online_node(nid) { 272 - uncached_pool[nid] = gen_pool_create(PAGE_SHIFT, nid); 273 } 274 275 efi_memmap_walk_uc(uncached_build_memmap, NULL);
··· 32 33 extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *); 34 35 + struct uncached_pool { 36 + struct gen_pool *pool; 37 + struct mutex add_chunk_mutex; /* serialize adding a converted chunk */ 38 + int nchunks_added; /* #of converted chunks added to pool */ 39 + atomic_t status; /* smp called function's return status*/ 40 + }; 41 42 + #define MAX_CONVERTED_CHUNKS_PER_NODE 2 43 + 44 + struct uncached_pool uncached_pools[MAX_NUMNODES]; 45 46 47 static void uncached_ipi_visibility(void *data) 48 { 49 int status; 50 + struct uncached_pool *uc_pool = (struct uncached_pool *)data; 51 52 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); 53 if ((status != PAL_VISIBILITY_OK) && 54 (status != PAL_VISIBILITY_OK_REMOTE_NEEDED)) 55 + atomic_inc(&uc_pool->status); 56 } 57 58 59 static void uncached_ipi_mc_drain(void *data) 60 { 61 int status; 62 + struct uncached_pool *uc_pool = (struct uncached_pool *)data; 63 64 status = ia64_pal_mc_drain(); 65 + if (status != PAL_STATUS_SUCCESS) 66 + atomic_inc(&uc_pool->status); 67 } 68 69 ··· 70 * This is accomplished by first allocating a granule of cached memory pages 71 * and then converting them to uncached memory pages. 72 */ 73 + static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) 74 { 75 struct page *page; 76 + int status, i, nchunks_added = uc_pool->nchunks_added; 77 unsigned long c_addr, uc_addr; 78 79 + if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0) 80 + return -1; /* interrupted by a signal */ 81 + 82 + if (uc_pool->nchunks_added > nchunks_added) { 83 + /* someone added a new chunk while we were waiting */ 84 + mutex_unlock(&uc_pool->add_chunk_mutex); 85 + return 0; 86 + } 87 + 88 + if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) { 89 + mutex_unlock(&uc_pool->add_chunk_mutex); 90 return -1; 91 + } 92 93 /* attempt to allocate a granule's worth of cached memory pages */ 94 95 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO, 96 IA64_GRANULE_SHIFT-PAGE_SHIFT); 97 + if (!page) { 98 + mutex_unlock(&uc_pool->add_chunk_mutex); 99 return -1; 100 + } 101 102 /* convert the memory pages from cached to uncached */ 103 ··· 102 flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE); 103 104 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); 105 + if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { 106 + atomic_set(&uc_pool->status, 0); 107 + status = smp_call_function(uncached_ipi_visibility, uc_pool, 108 + 0, 1); 109 + if (status || atomic_read(&uc_pool->status)) 110 goto failed; 111 + } else if (status != PAL_VISIBILITY_OK) 112 + goto failed; 113 114 preempt_disable(); 115 ··· 120 121 preempt_enable(); 122 123 + status = ia64_pal_mc_drain(); 124 + if (status != PAL_STATUS_SUCCESS) 125 + goto failed; 126 + atomic_set(&uc_pool->status, 0); 127 + status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1); 128 + if (status || atomic_read(&uc_pool->status)) 129 goto failed; 130 131 /* 132 * The chunk of memory pages has been converted to uncached so now we 133 * can add it to the pool. 134 */ 135 + status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid); 136 if (status) 137 goto failed; 138 139 + uc_pool->nchunks_added++; 140 + mutex_unlock(&uc_pool->add_chunk_mutex); 141 return 0; 142 143 /* failed to convert or add the chunk so give it back to the kernel */ ··· 142 ClearPageUncached(&page[i]); 143 144 free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); 145 + mutex_unlock(&uc_pool->add_chunk_mutex); 146 return -1; 147 } 148 ··· 158 unsigned long uncached_alloc_page(int starting_nid) 159 { 160 unsigned long uc_addr; 161 + struct uncached_pool *uc_pool; 162 int nid; 163 164 if (unlikely(starting_nid >= MAX_NUMNODES)) ··· 171 do { 172 if (!node_online(nid)) 173 continue; 174 + uc_pool = &uncached_pools[nid]; 175 + if (uc_pool->pool == NULL) 176 continue; 177 do { 178 + uc_addr = gen_pool_alloc(uc_pool->pool, PAGE_SIZE); 179 if (uc_addr != 0) 180 return uc_addr; 181 + } while (uncached_add_chunk(uc_pool, nid) == 0); 182 183 } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid); 184 ··· 197 void uncached_free_page(unsigned long uc_addr) 198 { 199 int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET); 200 + struct gen_pool *pool = uncached_pools[nid].pool; 201 202 if (unlikely(pool == NULL)) 203 return; ··· 224 unsigned long uc_end, void *arg) 225 { 226 int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET); 227 + struct gen_pool *pool = uncached_pools[nid].pool; 228 size_t size = uc_end - uc_start; 229 230 touch_softlockup_watchdog(); ··· 242 int nid; 243 244 for_each_online_node(nid) { 245 + uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid); 246 + mutex_init(&uncached_pools[nid].add_chunk_mutex); 247 } 248 249 efi_memmap_walk_uc(uncached_build_memmap, NULL);