···3233extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);3435-#define MAX_UNCACHED_GRANULES 536-static int allocated_granules;00003738-struct gen_pool *uncached_pool[MAX_NUMNODES];00394041static void uncached_ipi_visibility(void *data)42{43 int status;04445 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);46 if ((status != PAL_VISIBILITY_OK) &&47 (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))48- printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on "49- "CPU %i\n", status, raw_smp_processor_id());50}515253static void uncached_ipi_mc_drain(void *data)54{55 int status;05657 status = ia64_pal_mc_drain();58- if (status)59- printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on "60- "CPU %i\n", status, raw_smp_processor_id());61}6263···76 * This is accomplished by first allocating a granule of cached memory pages77 * and then converting them to uncached memory pages.78 */79-static int uncached_add_chunk(struct gen_pool *pool, int nid)80{81 struct page *page;82- int status, i;83 unsigned long c_addr, uc_addr;8485- if (allocated_granules >= MAX_UNCACHED_GRANULES)000000000086 return -1;08788 /* attempt to allocate a granule's worth of cached memory pages */8990 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO,91 IA64_GRANULE_SHIFT-PAGE_SHIFT);92- if (!page)093 return -1;09495 /* convert the memory pages from cached to uncached */96···121 flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE);122123 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);124- if (!status) {125- status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1);126- if (status)00127 goto failed;128- }0129130 preempt_disable();131···142143 preempt_enable();144145- ia64_pal_mc_drain();146- status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1);147- if (status)000148 goto failed;149150 /*151 * The chunk of memory pages has been converted to uncached so now we152 * can add it to the pool.153 */154- status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid);155 if (status)156 goto failed;157158- allocated_granules++;0159 return 0;160161 /* failed to convert or add the chunk so give it back to the kernel */···168 ClearPageUncached(&page[i]);169170 free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);0171 return -1;172}173···185unsigned long uncached_alloc_page(int starting_nid)186{187 unsigned long uc_addr;188- struct gen_pool *pool;189 int nid;190191 if (unlikely(starting_nid >= MAX_NUMNODES))···198 do {199 if (!node_online(nid))200 continue;201- pool = uncached_pool[nid];202- if (pool == NULL)203 continue;204 do {205- uc_addr = gen_pool_alloc(pool, PAGE_SIZE);206 if (uc_addr != 0)207 return uc_addr;208- } while (uncached_add_chunk(pool, nid) == 0);209210 } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);211···224void uncached_free_page(unsigned long uc_addr)225{226 int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);227- struct gen_pool *pool = uncached_pool[nid];228229 if (unlikely(pool == NULL))230 return;···251 unsigned long uc_end, void *arg)252{253 int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);254- struct gen_pool *pool = uncached_pool[nid];255 size_t size = uc_end - uc_start;256257 touch_softlockup_watchdog();···269 int nid;270271 for_each_online_node(nid) {272- uncached_pool[nid] = gen_pool_create(PAGE_SHIFT, nid);0273 }274275 efi_memmap_walk_uc(uncached_build_memmap, NULL);
···3233extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);3435+struct uncached_pool {36+ struct gen_pool *pool;37+ struct mutex add_chunk_mutex; /* serialize adding a converted chunk */38+ int nchunks_added; /* #of converted chunks added to pool */39+ atomic_t status; /* smp called function's return status*/40+};4142+#define MAX_CONVERTED_CHUNKS_PER_NODE 243+44+struct uncached_pool uncached_pools[MAX_NUMNODES];454647static void uncached_ipi_visibility(void *data)48{49 int status;50+ struct uncached_pool *uc_pool = (struct uncached_pool *)data;5152 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);53 if ((status != PAL_VISIBILITY_OK) &&54 (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))55+ atomic_inc(&uc_pool->status);056}575859static void uncached_ipi_mc_drain(void *data)60{61 int status;62+ struct uncached_pool *uc_pool = (struct uncached_pool *)data;6364 status = ia64_pal_mc_drain();65+ if (status != PAL_STATUS_SUCCESS)66+ atomic_inc(&uc_pool->status);067}6869···70 * This is accomplished by first allocating a granule of cached memory pages71 * and then converting them to uncached memory pages.72 */73+static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)74{75 struct page *page;76+ int status, i, nchunks_added = uc_pool->nchunks_added;77 unsigned long c_addr, uc_addr;7879+ if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)80+ return -1; /* interrupted by a signal */81+82+ if (uc_pool->nchunks_added > nchunks_added) {83+ /* someone added a new chunk while we were waiting */84+ mutex_unlock(&uc_pool->add_chunk_mutex);85+ return 0;86+ }87+88+ if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {89+ mutex_unlock(&uc_pool->add_chunk_mutex);90 return -1;91+ }9293 /* attempt to allocate a granule's worth of cached memory pages */9495 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO,96 IA64_GRANULE_SHIFT-PAGE_SHIFT);97+ if (!page) {98+ mutex_unlock(&uc_pool->add_chunk_mutex);99 return -1;100+ }101102 /* convert the memory pages from cached to uncached */103···102 flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE);103104 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);105+ if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {106+ atomic_set(&uc_pool->status, 0);107+ status = smp_call_function(uncached_ipi_visibility, uc_pool,108+ 0, 1);109+ if (status || atomic_read(&uc_pool->status))110 goto failed;111+ } else if (status != PAL_VISIBILITY_OK)112+ goto failed;113114 preempt_disable();115···120121 preempt_enable();122123+ status = ia64_pal_mc_drain();124+ if (status != PAL_STATUS_SUCCESS)125+ goto failed;126+ atomic_set(&uc_pool->status, 0);127+ status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1);128+ if (status || atomic_read(&uc_pool->status))129 goto failed;130131 /*132 * The chunk of memory pages has been converted to uncached so now we133 * can add it to the pool.134 */135+ status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);136 if (status)137 goto failed;138139+ uc_pool->nchunks_added++;140+ mutex_unlock(&uc_pool->add_chunk_mutex);141 return 0;142143 /* failed to convert or add the chunk so give it back to the kernel */···142 ClearPageUncached(&page[i]);143144 free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);145+ mutex_unlock(&uc_pool->add_chunk_mutex);146 return -1;147}148···158unsigned long uncached_alloc_page(int starting_nid)159{160 unsigned long uc_addr;161+ struct uncached_pool *uc_pool;162 int nid;163164 if (unlikely(starting_nid >= MAX_NUMNODES))···171 do {172 if (!node_online(nid))173 continue;174+ uc_pool = &uncached_pools[nid];175+ if (uc_pool->pool == NULL)176 continue;177 do {178+ uc_addr = gen_pool_alloc(uc_pool->pool, PAGE_SIZE);179 if (uc_addr != 0)180 return uc_addr;181+ } while (uncached_add_chunk(uc_pool, nid) == 0);182183 } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);184···197void uncached_free_page(unsigned long uc_addr)198{199 int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);200+ struct gen_pool *pool = uncached_pools[nid].pool;201202 if (unlikely(pool == NULL))203 return;···224 unsigned long uc_end, void *arg)225{226 int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);227+ struct gen_pool *pool = uncached_pools[nid].pool;228 size_t size = uc_end - uc_start;229230 touch_softlockup_watchdog();···242 int nid;243244 for_each_online_node(nid) {245+ uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);246+ mutex_init(&uncached_pools[nid].add_chunk_mutex);247 }248249 efi_memmap_walk_uc(uncached_build_memmap, NULL);