Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * pSeries_lpar.c
3 * Copyright (C) 2001 Todd Inglett, IBM Corporation
4 *
5 * pSeries LPAR support.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22/* Enables debugging of low-level hash table routines - careful! */
23#undef DEBUG
24
25#include <linux/kernel.h>
26#include <linux/dma-mapping.h>
27#include <linux/console.h>
28#include <linux/export.h>
29#include <linux/jump_label.h>
30#include <asm/processor.h>
31#include <asm/mmu.h>
32#include <asm/page.h>
33#include <asm/pgtable.h>
34#include <asm/machdep.h>
35#include <asm/mmu_context.h>
36#include <asm/iommu.h>
37#include <asm/tlbflush.h>
38#include <asm/tlb.h>
39#include <asm/prom.h>
40#include <asm/cputable.h>
41#include <asm/udbg.h>
42#include <asm/smp.h>
43#include <asm/trace.h>
44#include <asm/firmware.h>
45#include <asm/plpar_wrappers.h>
46#include <asm/kexec.h>
47#include <asm/fadump.h>
48
49#include "pseries.h"
50
51/* Flag bits for H_BULK_REMOVE */
52#define HBR_REQUEST 0x4000000000000000UL
53#define HBR_RESPONSE 0x8000000000000000UL
54#define HBR_END 0xc000000000000000UL
55#define HBR_AVPN 0x0200000000000000UL
56#define HBR_ANDCOND 0x0100000000000000UL
57
58
59/* in hvCall.S */
60EXPORT_SYMBOL(plpar_hcall);
61EXPORT_SYMBOL(plpar_hcall9);
62EXPORT_SYMBOL(plpar_hcall_norets);
63
64void vpa_init(int cpu)
65{
66 int hwcpu = get_hard_smp_processor_id(cpu);
67 unsigned long addr;
68 long ret;
69 struct paca_struct *pp;
70 struct dtl_entry *dtl;
71
72 /*
73 * The spec says it "may be problematic" if CPU x registers the VPA of
74 * CPU y. We should never do that, but wail if we ever do.
75 */
76 WARN_ON(cpu != smp_processor_id());
77
78 if (cpu_has_feature(CPU_FTR_ALTIVEC))
79 lppaca_of(cpu).vmxregs_in_use = 1;
80
81 if (cpu_has_feature(CPU_FTR_ARCH_207S))
82 lppaca_of(cpu).ebb_regs_in_use = 1;
83
84 addr = __pa(&lppaca_of(cpu));
85 ret = register_vpa(hwcpu, addr);
86
87 if (ret) {
88 pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
89 "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
90 return;
91 }
92
93#ifdef CONFIG_PPC_STD_MMU_64
94 /*
95 * PAPR says this feature is SLB-Buffer but firmware never
96 * reports that. All SPLPAR support SLB shadow buffer.
97 */
98 if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR)) {
99 addr = __pa(paca[cpu].slb_shadow_ptr);
100 ret = register_slb_shadow(hwcpu, addr);
101 if (ret)
102 pr_err("WARNING: SLB shadow buffer registration for "
103 "cpu %d (hw %d) of area %lx failed with %ld\n",
104 cpu, hwcpu, addr, ret);
105 }
106#endif /* CONFIG_PPC_STD_MMU_64 */
107
108 /*
109 * Register dispatch trace log, if one has been allocated.
110 */
111 pp = &paca[cpu];
112 dtl = pp->dispatch_log;
113 if (dtl) {
114 pp->dtl_ridx = 0;
115 pp->dtl_curr = dtl;
116 lppaca_of(cpu).dtl_idx = 0;
117
118 /* hypervisor reads buffer length from this field */
119 dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
120 ret = register_dtl(hwcpu, __pa(dtl));
121 if (ret)
122 pr_err("WARNING: DTL registration of cpu %d (hw %d) "
123 "failed with %ld\n", smp_processor_id(),
124 hwcpu, ret);
125 lppaca_of(cpu).dtl_enable_mask = 2;
126 }
127}
128
129#ifdef CONFIG_PPC_STD_MMU_64
130
131static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
132 unsigned long vpn, unsigned long pa,
133 unsigned long rflags, unsigned long vflags,
134 int psize, int apsize, int ssize)
135{
136 unsigned long lpar_rc;
137 unsigned long flags;
138 unsigned long slot;
139 unsigned long hpte_v, hpte_r;
140
141 if (!(vflags & HPTE_V_BOLTED))
142 pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
143 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
144 hpte_group, vpn, pa, rflags, vflags, psize);
145
146 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
147 hpte_r = hpte_encode_r(pa, psize, apsize, ssize) | rflags;
148
149 if (!(vflags & HPTE_V_BOLTED))
150 pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
151
152 /* Now fill in the actual HPTE */
153 /* Set CEC cookie to 0 */
154 /* Zero page = 0 */
155 /* I-cache Invalidate = 0 */
156 /* I-cache synchronize = 0 */
157 /* Exact = 0 */
158 flags = 0;
159
160 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
161 flags |= H_COALESCE_CAND;
162
163 lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
164 if (unlikely(lpar_rc == H_PTEG_FULL)) {
165 if (!(vflags & HPTE_V_BOLTED))
166 pr_devel(" full\n");
167 return -1;
168 }
169
170 /*
171 * Since we try and ioremap PHBs we don't own, the pte insert
172 * will fail. However we must catch the failure in hash_page
173 * or we will loop forever, so return -2 in this case.
174 */
175 if (unlikely(lpar_rc != H_SUCCESS)) {
176 if (!(vflags & HPTE_V_BOLTED))
177 pr_devel(" lpar err %ld\n", lpar_rc);
178 return -2;
179 }
180 if (!(vflags & HPTE_V_BOLTED))
181 pr_devel(" -> slot: %lu\n", slot & 7);
182
183 /* Because of iSeries, we have to pass down the secondary
184 * bucket bit here as well
185 */
186 return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
187}
188
189static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);
190
191static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
192{
193 unsigned long slot_offset;
194 unsigned long lpar_rc;
195 int i;
196 unsigned long dummy1, dummy2;
197
198 /* pick a random slot to start at */
199 slot_offset = mftb() & 0x7;
200
201 for (i = 0; i < HPTES_PER_GROUP; i++) {
202
203 /* don't remove a bolted entry */
204 lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
205 (0x1UL << 4), &dummy1, &dummy2);
206 if (lpar_rc == H_SUCCESS)
207 return i;
208
209 /*
210 * The test for adjunct partition is performed before the
211 * ANDCOND test. H_RESOURCE may be returned, so we need to
212 * check for that as well.
213 */
214 BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
215
216 slot_offset++;
217 slot_offset &= 0x7;
218 }
219
220 return -1;
221}
222
223static void pSeries_lpar_hptab_clear(void)
224{
225 unsigned long size_bytes = 1UL << ppc64_pft_size;
226 unsigned long hpte_count = size_bytes >> 4;
227 struct {
228 unsigned long pteh;
229 unsigned long ptel;
230 } ptes[4];
231 long lpar_rc;
232 unsigned long i, j;
233
234 /* Read in batches of 4,
235 * invalidate only valid entries not in the VRMA
236 * hpte_count will be a multiple of 4
237 */
238 for (i = 0; i < hpte_count; i += 4) {
239 lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
240 if (lpar_rc != H_SUCCESS)
241 continue;
242 for (j = 0; j < 4; j++){
243 if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
244 HPTE_V_VRMA_MASK)
245 continue;
246 if (ptes[j].pteh & HPTE_V_VALID)
247 plpar_pte_remove_raw(0, i + j, 0,
248 &(ptes[j].pteh), &(ptes[j].ptel));
249 }
250 }
251
252#ifdef __LITTLE_ENDIAN__
253 /*
254 * Reset exceptions to big endian.
255 *
256 * FIXME this is a hack for kexec, we need to reset the exception
257 * endian before starting the new kernel and this is a convenient place
258 * to do it.
259 *
260 * This is also called on boot when a fadump happens. In that case we
261 * must not change the exception endian mode.
262 */
263 if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active()) {
264 long rc;
265
266 rc = pseries_big_endian_exceptions();
267 /*
268 * At this point it is unlikely panic() will get anything
269 * out to the user, but at least this will stop us from
270 * continuing on further and creating an even more
271 * difficult to debug situation.
272 *
273 * There is a known problem when kdump'ing, if cpus are offline
274 * the above call will fail. Rather than panicking again, keep
275 * going and hope the kdump kernel is also little endian, which
276 * it usually is.
277 */
278 if (rc && !kdump_in_progress())
279 panic("Could not enable big endian exceptions");
280 }
281#endif
282}
283
284/*
285 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
286 * the low 3 bits of flags happen to line up. So no transform is needed.
287 * We can probably optimize here and assume the high bits of newpp are
288 * already zero. For now I am paranoid.
289 */
290static long pSeries_lpar_hpte_updatepp(unsigned long slot,
291 unsigned long newpp,
292 unsigned long vpn,
293 int psize, int apsize,
294 int ssize, unsigned long inv_flags)
295{
296 unsigned long lpar_rc;
297 unsigned long flags = (newpp & 7) | H_AVPN;
298 unsigned long want_v;
299
300 want_v = hpte_encode_avpn(vpn, psize, ssize);
301
302 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
303 want_v, slot, flags, psize);
304
305 lpar_rc = plpar_pte_protect(flags, slot, want_v);
306
307 if (lpar_rc == H_NOT_FOUND) {
308 pr_devel("not found !\n");
309 return -1;
310 }
311
312 pr_devel("ok\n");
313
314 BUG_ON(lpar_rc != H_SUCCESS);
315
316 return 0;
317}
318
319static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group)
320{
321 long lpar_rc;
322 unsigned long i, j;
323 struct {
324 unsigned long pteh;
325 unsigned long ptel;
326 } ptes[4];
327
328 for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
329
330 lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
331 if (lpar_rc != H_SUCCESS)
332 continue;
333
334 for (j = 0; j < 4; j++) {
335 if (HPTE_V_COMPARE(ptes[j].pteh, want_v) &&
336 (ptes[j].pteh & HPTE_V_VALID))
337 return i + j;
338 }
339 }
340
341 return -1;
342}
343
344static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
345{
346 long slot;
347 unsigned long hash;
348 unsigned long want_v;
349 unsigned long hpte_group;
350
351 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
352 want_v = hpte_encode_avpn(vpn, psize, ssize);
353
354 /* Bolted entries are always in the primary group */
355 hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
356 slot = __pSeries_lpar_hpte_find(want_v, hpte_group);
357 if (slot < 0)
358 return -1;
359 return hpte_group + slot;
360}
361
362static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
363 unsigned long ea,
364 int psize, int ssize)
365{
366 unsigned long vpn;
367 unsigned long lpar_rc, slot, vsid, flags;
368
369 vsid = get_kernel_vsid(ea, ssize);
370 vpn = hpt_vpn(ea, vsid, ssize);
371
372 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
373 BUG_ON(slot == -1);
374
375 flags = newpp & 7;
376 lpar_rc = plpar_pte_protect(flags, slot, 0);
377
378 BUG_ON(lpar_rc != H_SUCCESS);
379}
380
381static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
382 int psize, int apsize,
383 int ssize, int local)
384{
385 unsigned long want_v;
386 unsigned long lpar_rc;
387 unsigned long dummy1, dummy2;
388
389 pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
390 slot, vpn, psize, local);
391
392 want_v = hpte_encode_avpn(vpn, psize, ssize);
393 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
394 if (lpar_rc == H_NOT_FOUND)
395 return;
396
397 BUG_ON(lpar_rc != H_SUCCESS);
398}
399
400#ifdef CONFIG_TRANSPARENT_HUGEPAGE
401/*
402 * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
403 * to make sure that we avoid bouncing the hypervisor tlbie lock.
404 */
405#define PPC64_HUGE_HPTE_BATCH 12
406
407static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
408 unsigned long *vpn, int count,
409 int psize, int ssize)
410{
411 unsigned long param[8];
412 int i = 0, pix = 0, rc;
413 unsigned long flags = 0;
414 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
415
416 if (lock_tlbie)
417 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
418
419 for (i = 0; i < count; i++) {
420
421 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
422 pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0,
423 ssize, 0);
424 } else {
425 param[pix] = HBR_REQUEST | HBR_AVPN | slot[i];
426 param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize);
427 pix += 2;
428 if (pix == 8) {
429 rc = plpar_hcall9(H_BULK_REMOVE, param,
430 param[0], param[1], param[2],
431 param[3], param[4], param[5],
432 param[6], param[7]);
433 BUG_ON(rc != H_SUCCESS);
434 pix = 0;
435 }
436 }
437 }
438 if (pix) {
439 param[pix] = HBR_END;
440 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
441 param[2], param[3], param[4], param[5],
442 param[6], param[7]);
443 BUG_ON(rc != H_SUCCESS);
444 }
445
446 if (lock_tlbie)
447 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
448}
449
450static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
451 unsigned long addr,
452 unsigned char *hpte_slot_array,
453 int psize, int ssize, int local)
454{
455 int i, index = 0;
456 unsigned long s_addr = addr;
457 unsigned int max_hpte_count, valid;
458 unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
459 unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
460 unsigned long shift, hidx, vpn = 0, hash, slot;
461
462 shift = mmu_psize_defs[psize].shift;
463 max_hpte_count = 1U << (PMD_SHIFT - shift);
464
465 for (i = 0; i < max_hpte_count; i++) {
466 valid = hpte_valid(hpte_slot_array, i);
467 if (!valid)
468 continue;
469 hidx = hpte_hash_index(hpte_slot_array, i);
470
471 /* get the vpn */
472 addr = s_addr + (i * (1ul << shift));
473 vpn = hpt_vpn(addr, vsid, ssize);
474 hash = hpt_hash(vpn, shift, ssize);
475 if (hidx & _PTEIDX_SECONDARY)
476 hash = ~hash;
477
478 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
479 slot += hidx & _PTEIDX_GROUP_IX;
480
481 slot_array[index] = slot;
482 vpn_array[index] = vpn;
483 if (index == PPC64_HUGE_HPTE_BATCH - 1) {
484 /*
485 * Now do a bluk invalidate
486 */
487 __pSeries_lpar_hugepage_invalidate(slot_array,
488 vpn_array,
489 PPC64_HUGE_HPTE_BATCH,
490 psize, ssize);
491 index = 0;
492 } else
493 index++;
494 }
495 if (index)
496 __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
497 index, psize, ssize);
498}
499#else
500static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
501 unsigned long addr,
502 unsigned char *hpte_slot_array,
503 int psize, int ssize, int local)
504{
505 WARN(1, "%s called without THP support\n", __func__);
506}
507#endif
508
509static int pSeries_lpar_hpte_removebolted(unsigned long ea,
510 int psize, int ssize)
511{
512 unsigned long vpn;
513 unsigned long slot, vsid;
514
515 vsid = get_kernel_vsid(ea, ssize);
516 vpn = hpt_vpn(ea, vsid, ssize);
517
518 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
519 if (slot == -1)
520 return -ENOENT;
521
522 /*
523 * lpar doesn't use the passed actual page size
524 */
525 pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0);
526 return 0;
527}
528
529/*
530 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
531 * lock.
532 */
533static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
534{
535 unsigned long vpn;
536 unsigned long i, pix, rc;
537 unsigned long flags = 0;
538 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
539 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
540 unsigned long param[9];
541 unsigned long hash, index, shift, hidx, slot;
542 real_pte_t pte;
543 int psize, ssize;
544
545 if (lock_tlbie)
546 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
547
548 psize = batch->psize;
549 ssize = batch->ssize;
550 pix = 0;
551 for (i = 0; i < number; i++) {
552 vpn = batch->vpn[i];
553 pte = batch->pte[i];
554 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
555 hash = hpt_hash(vpn, shift, ssize);
556 hidx = __rpte_to_hidx(pte, index);
557 if (hidx & _PTEIDX_SECONDARY)
558 hash = ~hash;
559 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
560 slot += hidx & _PTEIDX_GROUP_IX;
561 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
562 /*
563 * lpar doesn't use the passed actual page size
564 */
565 pSeries_lpar_hpte_invalidate(slot, vpn, psize,
566 0, ssize, local);
567 } else {
568 param[pix] = HBR_REQUEST | HBR_AVPN | slot;
569 param[pix+1] = hpte_encode_avpn(vpn, psize,
570 ssize);
571 pix += 2;
572 if (pix == 8) {
573 rc = plpar_hcall9(H_BULK_REMOVE, param,
574 param[0], param[1], param[2],
575 param[3], param[4], param[5],
576 param[6], param[7]);
577 BUG_ON(rc != H_SUCCESS);
578 pix = 0;
579 }
580 }
581 } pte_iterate_hashed_end();
582 }
583 if (pix) {
584 param[pix] = HBR_END;
585 rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
586 param[2], param[3], param[4], param[5],
587 param[6], param[7]);
588 BUG_ON(rc != H_SUCCESS);
589 }
590
591 if (lock_tlbie)
592 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
593}
594
595static int __init disable_bulk_remove(char *str)
596{
597 if (strcmp(str, "off") == 0 &&
598 firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
599 printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
600 powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
601 }
602 return 1;
603}
604
605__setup("bulk_remove=", disable_bulk_remove);
606
607void __init hpte_init_lpar(void)
608{
609 ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate;
610 ppc_md.hpte_updatepp = pSeries_lpar_hpte_updatepp;
611 ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
612 ppc_md.hpte_insert = pSeries_lpar_hpte_insert;
613 ppc_md.hpte_remove = pSeries_lpar_hpte_remove;
614 ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted;
615 ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range;
616 ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear;
617 ppc_md.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
618}
619
620#ifdef CONFIG_PPC_SMLPAR
621#define CMO_FREE_HINT_DEFAULT 1
622static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;
623
624static int __init cmo_free_hint(char *str)
625{
626 char *parm;
627 parm = strstrip(str);
628
629 if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
630 printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n");
631 cmo_free_hint_flag = 0;
632 return 1;
633 }
634
635 cmo_free_hint_flag = 1;
636 printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n");
637
638 if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
639 return 1;
640
641 return 0;
642}
643
644__setup("cmo_free_hint=", cmo_free_hint);
645
646static void pSeries_set_page_state(struct page *page, int order,
647 unsigned long state)
648{
649 int i, j;
650 unsigned long cmo_page_sz, addr;
651
652 cmo_page_sz = cmo_get_page_size();
653 addr = __pa((unsigned long)page_address(page));
654
655 for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
656 for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
657 plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
658 }
659}
660
661void arch_free_page(struct page *page, int order)
662{
663 if (radix_enabled())
664 return;
665 if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
666 return;
667
668 pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
669}
670EXPORT_SYMBOL(arch_free_page);
671
672#endif /* CONFIG_PPC_SMLPAR */
673#endif /* CONFIG_PPC_STD_MMU_64 */
674
675#ifdef CONFIG_TRACEPOINTS
676#ifdef HAVE_JUMP_LABEL
677struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
678
679void hcall_tracepoint_regfunc(void)
680{
681 static_key_slow_inc(&hcall_tracepoint_key);
682}
683
684void hcall_tracepoint_unregfunc(void)
685{
686 static_key_slow_dec(&hcall_tracepoint_key);
687}
688#else
689/*
690 * We optimise our hcall path by placing hcall_tracepoint_refcount
691 * directly in the TOC so we can check if the hcall tracepoints are
692 * enabled via a single load.
693 */
694
695/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
696extern long hcall_tracepoint_refcount;
697
698void hcall_tracepoint_regfunc(void)
699{
700 hcall_tracepoint_refcount++;
701}
702
703void hcall_tracepoint_unregfunc(void)
704{
705 hcall_tracepoint_refcount--;
706}
707#endif
708
709/*
710 * Since the tracing code might execute hcalls we need to guard against
711 * recursion. One example of this are spinlocks calling H_YIELD on
712 * shared processor partitions.
713 */
714static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
715
716
717void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
718{
719 unsigned long flags;
720 unsigned int *depth;
721
722 /*
723 * We cannot call tracepoints inside RCU idle regions which
724 * means we must not trace H_CEDE.
725 */
726 if (opcode == H_CEDE)
727 return;
728
729 local_irq_save(flags);
730
731 depth = this_cpu_ptr(&hcall_trace_depth);
732
733 if (*depth)
734 goto out;
735
736 (*depth)++;
737 preempt_disable();
738 trace_hcall_entry(opcode, args);
739 (*depth)--;
740
741out:
742 local_irq_restore(flags);
743}
744
745void __trace_hcall_exit(long opcode, unsigned long retval,
746 unsigned long *retbuf)
747{
748 unsigned long flags;
749 unsigned int *depth;
750
751 if (opcode == H_CEDE)
752 return;
753
754 local_irq_save(flags);
755
756 depth = this_cpu_ptr(&hcall_trace_depth);
757
758 if (*depth)
759 goto out;
760
761 (*depth)++;
762 trace_hcall_exit(opcode, retval, retbuf);
763 preempt_enable();
764 (*depth)--;
765
766out:
767 local_irq_restore(flags);
768}
769#endif
770
771/**
772 * h_get_mpp
773 * H_GET_MPP hcall returns info in 7 parms
774 */
775int h_get_mpp(struct hvcall_mpp_data *mpp_data)
776{
777 int rc;
778 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
779
780 rc = plpar_hcall9(H_GET_MPP, retbuf);
781
782 mpp_data->entitled_mem = retbuf[0];
783 mpp_data->mapped_mem = retbuf[1];
784
785 mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
786 mpp_data->pool_num = retbuf[2] & 0xffff;
787
788 mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
789 mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
790 mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
791
792 mpp_data->pool_size = retbuf[4];
793 mpp_data->loan_request = retbuf[5];
794 mpp_data->backing_mem = retbuf[6];
795
796 return rc;
797}
798EXPORT_SYMBOL(h_get_mpp);
799
800int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
801{
802 int rc;
803 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };
804
805 rc = plpar_hcall9(H_GET_MPP_X, retbuf);
806
807 mpp_x_data->coalesced_bytes = retbuf[0];
808 mpp_x_data->pool_coalesced_bytes = retbuf[1];
809 mpp_x_data->pool_purr_cycles = retbuf[2];
810 mpp_x_data->pool_spurr_cycles = retbuf[3];
811
812 return rc;
813}