···32323333extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);34343535-#define MAX_UNCACHED_GRANULES 53636-static int allocated_granules;3535+struct uncached_pool {3636+ struct gen_pool *pool;3737+ struct mutex add_chunk_mutex; /* serialize adding a converted chunk */3838+ int nchunks_added; /* #of converted chunks added to pool */3939+ atomic_t status; /* smp called function's return status*/4040+};37413838-struct gen_pool *uncached_pool[MAX_NUMNODES];4242+#define MAX_CONVERTED_CHUNKS_PER_NODE 24343+4444+struct uncached_pool uncached_pools[MAX_NUMNODES];394540464147static void uncached_ipi_visibility(void *data)4248{4349 int status;5050+ struct uncached_pool *uc_pool = (struct uncached_pool *)data;44514552 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);4653 if ((status != PAL_VISIBILITY_OK) &&4754 (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))4848- printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on "4949- "CPU %i\n", status, raw_smp_processor_id());5555+ atomic_inc(&uc_pool->status);5056}515752585359static void uncached_ipi_mc_drain(void *data)5460{5561 int status;6262+ struct uncached_pool *uc_pool = (struct uncached_pool *)data;56635764 status = ia64_pal_mc_drain();5858- if (status)5959- printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on "6060- "CPU %i\n", status, raw_smp_processor_id());6565+ if (status != PAL_STATUS_SUCCESS)6666+ atomic_inc(&uc_pool->status);6167}62686369···7670 * This is accomplished by first allocating a granule of cached memory pages7771 * and then converting them to uncached memory pages.7872 */7979-static int uncached_add_chunk(struct gen_pool *pool, int nid)7373+static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)8074{8175 struct page *page;8282- int status, i;7676+ int status, i, nchunks_added = uc_pool->nchunks_added;8377 unsigned long c_addr, uc_addr;84788585- if (allocated_granules >= MAX_UNCACHED_GRANULES)7979+ if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)8080+ return -1; /* interrupted by a signal */8181+8282+ if (uc_pool->nchunks_added > nchunks_added) {8383+ /* someone added a new chunk while we were waiting */8484+ mutex_unlock(&uc_pool->add_chunk_mutex);8585+ return 0;8686+ }8787+8888+ if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {8989+ mutex_unlock(&uc_pool->add_chunk_mutex);8690 return -1;9191+ }87928893 /* attempt to allocate a granule's worth of cached memory pages */89949095 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO,9196 IA64_GRANULE_SHIFT-PAGE_SHIFT);9292- if (!page)9797+ if (!page) {9898+ mutex_unlock(&uc_pool->add_chunk_mutex);9399 return -1;100100+ }9410195102 /* convert the memory pages from cached to uncached */96103···121102 flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE);122103123104 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);124124- if (!status) {125125- status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1);126126- if (status)105105+ if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {106106+ atomic_set(&uc_pool->status, 0);107107+ status = smp_call_function(uncached_ipi_visibility, uc_pool,108108+ 0, 1);109109+ if (status || atomic_read(&uc_pool->status))127110 goto failed;128128- }111111+ } else if (status != PAL_VISIBILITY_OK)112112+ goto failed;129113130114 preempt_disable();131115···142120143121 preempt_enable();144122145145- ia64_pal_mc_drain();146146- status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1);147147- if (status)123123+ status = ia64_pal_mc_drain();124124+ if (status != PAL_STATUS_SUCCESS)125125+ goto failed;126126+ atomic_set(&uc_pool->status, 0);127127+ status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1);128128+ if (status || atomic_read(&uc_pool->status))148129 goto failed;149130150131 /*151132 * The chunk of memory pages has been converted to uncached so now we152133 * can add it to the pool.153134 */154154- status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid);135135+ status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);155136 if (status)156137 goto failed;157138158158- allocated_granules++;139139+ uc_pool->nchunks_added++;140140+ mutex_unlock(&uc_pool->add_chunk_mutex);159141 return 0;160142161143 /* failed to convert or add the chunk so give it back to the kernel */···168142 ClearPageUncached(&page[i]);169143170144 free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);145145+ mutex_unlock(&uc_pool->add_chunk_mutex);171146 return -1;172147}173148···185158unsigned long uncached_alloc_page(int starting_nid)186159{187160 unsigned long uc_addr;188188- struct gen_pool *pool;161161+ struct uncached_pool *uc_pool;189162 int nid;190163191164 if (unlikely(starting_nid >= MAX_NUMNODES))···198171 do {199172 if (!node_online(nid))200173 continue;201201- pool = uncached_pool[nid];202202- if (pool == NULL)174174+ uc_pool = &uncached_pools[nid];175175+ if (uc_pool->pool == NULL)203176 continue;204177 do {205205- uc_addr = gen_pool_alloc(pool, PAGE_SIZE);178178+ uc_addr = gen_pool_alloc(uc_pool->pool, PAGE_SIZE);206179 if (uc_addr != 0)207180 return uc_addr;208208- } while (uncached_add_chunk(pool, nid) == 0);181181+ } while (uncached_add_chunk(uc_pool, nid) == 0);209182210183 } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);211184···224197void uncached_free_page(unsigned long uc_addr)225198{226199 int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);227227- struct gen_pool *pool = uncached_pool[nid];200200+ struct gen_pool *pool = uncached_pools[nid].pool;228201229202 if (unlikely(pool == NULL))230203 return;···251224 unsigned long uc_end, void *arg)252225{253226 int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);254254- struct gen_pool *pool = uncached_pool[nid];227227+ struct gen_pool *pool = uncached_pools[nid].pool;255228 size_t size = uc_end - uc_start;256229257230 touch_softlockup_watchdog();···269242 int nid;270243271244 for_each_online_node(nid) {272272- uncached_pool[nid] = gen_pool_create(PAGE_SHIFT, nid);245245+ uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);246246+ mutex_init(&uncached_pools[nid].add_chunk_mutex);273247 }274248275249 efi_memmap_walk_uc(uncached_build_memmap, NULL);