Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright 2016,2017 IBM Corporation.
4 */
5
6#define pr_fmt(fmt) "xive: " fmt
7
8#include <linux/types.h>
9#include <linux/irq.h>
10#include <linux/smp.h>
11#include <linux/interrupt.h>
12#include <linux/init.h>
13#include <linux/of.h>
14#include <linux/of_address.h>
15#include <linux/of_fdt.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/bitmap.h>
19#include <linux/cpumask.h>
20#include <linux/mm.h>
21#include <linux/delay.h>
22#include <linux/libfdt.h>
23
24#include <asm/machdep.h>
25#include <asm/prom.h>
26#include <asm/io.h>
27#include <asm/smp.h>
28#include <asm/irq.h>
29#include <asm/errno.h>
30#include <asm/xive.h>
31#include <asm/xive-regs.h>
32#include <asm/hvcall.h>
33#include <asm/svm.h>
34#include <asm/ultravisor.h>
35
36#include "xive-internal.h"
37
38static u32 xive_queue_shift;
39
40struct xive_irq_bitmap {
41 unsigned long *bitmap;
42 unsigned int base;
43 unsigned int count;
44 spinlock_t lock;
45 struct list_head list;
46};
47
48static LIST_HEAD(xive_irq_bitmaps);
49
50static int __init xive_irq_bitmap_add(int base, int count)
51{
52 struct xive_irq_bitmap *xibm;
53
54 xibm = kzalloc(sizeof(*xibm), GFP_KERNEL);
55 if (!xibm)
56 return -ENOMEM;
57
58 spin_lock_init(&xibm->lock);
59 xibm->base = base;
60 xibm->count = count;
61 xibm->bitmap = bitmap_zalloc(xibm->count, GFP_KERNEL);
62 if (!xibm->bitmap) {
63 kfree(xibm);
64 return -ENOMEM;
65 }
66 list_add(&xibm->list, &xive_irq_bitmaps);
67
68 pr_info("Using IRQ range [%x-%x]", xibm->base,
69 xibm->base + xibm->count - 1);
70 return 0;
71}
72
73static void xive_irq_bitmap_remove_all(void)
74{
75 struct xive_irq_bitmap *xibm, *tmp;
76
77 list_for_each_entry_safe(xibm, tmp, &xive_irq_bitmaps, list) {
78 list_del(&xibm->list);
79 bitmap_free(xibm->bitmap);
80 kfree(xibm);
81 }
82}
83
84static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap *xibm)
85{
86 int irq;
87
88 irq = find_first_zero_bit(xibm->bitmap, xibm->count);
89 if (irq != xibm->count) {
90 set_bit(irq, xibm->bitmap);
91 irq += xibm->base;
92 } else {
93 irq = -ENOMEM;
94 }
95
96 return irq;
97}
98
99static int xive_irq_bitmap_alloc(void)
100{
101 struct xive_irq_bitmap *xibm;
102 unsigned long flags;
103 int irq = -ENOENT;
104
105 list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
106 spin_lock_irqsave(&xibm->lock, flags);
107 irq = __xive_irq_bitmap_alloc(xibm);
108 spin_unlock_irqrestore(&xibm->lock, flags);
109 if (irq >= 0)
110 break;
111 }
112 return irq;
113}
114
115static void xive_irq_bitmap_free(int irq)
116{
117 unsigned long flags;
118 struct xive_irq_bitmap *xibm;
119
120 list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
121 if ((irq >= xibm->base) && (irq < xibm->base + xibm->count)) {
122 spin_lock_irqsave(&xibm->lock, flags);
123 clear_bit(irq - xibm->base, xibm->bitmap);
124 spin_unlock_irqrestore(&xibm->lock, flags);
125 break;
126 }
127 }
128}
129
130
131/* Based on the similar routines in RTAS */
132static unsigned int plpar_busy_delay_time(long rc)
133{
134 unsigned int ms = 0;
135
136 if (H_IS_LONG_BUSY(rc)) {
137 ms = get_longbusy_msecs(rc);
138 } else if (rc == H_BUSY) {
139 ms = 10; /* seems appropriate for XIVE hcalls */
140 }
141
142 return ms;
143}
144
145static unsigned int plpar_busy_delay(int rc)
146{
147 unsigned int ms;
148
149 ms = plpar_busy_delay_time(rc);
150 if (ms)
151 mdelay(ms);
152
153 return ms;
154}
155
156/*
157 * Note: this call has a partition wide scope and can take a while to
158 * complete. If it returns H_LONG_BUSY_* it should be retried
159 * periodically.
160 */
161static long plpar_int_reset(unsigned long flags)
162{
163 long rc;
164
165 do {
166 rc = plpar_hcall_norets(H_INT_RESET, flags);
167 } while (plpar_busy_delay(rc));
168
169 if (rc)
170 pr_err("H_INT_RESET failed %ld\n", rc);
171
172 return rc;
173}
174
175static long plpar_int_get_source_info(unsigned long flags,
176 unsigned long lisn,
177 unsigned long *src_flags,
178 unsigned long *eoi_page,
179 unsigned long *trig_page,
180 unsigned long *esb_shift)
181{
182 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
183 long rc;
184
185 do {
186 rc = plpar_hcall(H_INT_GET_SOURCE_INFO, retbuf, flags, lisn);
187 } while (plpar_busy_delay(rc));
188
189 if (rc) {
190 pr_err("H_INT_GET_SOURCE_INFO lisn=0x%lx failed %ld\n", lisn, rc);
191 return rc;
192 }
193
194 *src_flags = retbuf[0];
195 *eoi_page = retbuf[1];
196 *trig_page = retbuf[2];
197 *esb_shift = retbuf[3];
198
199 pr_debug("H_INT_GET_SOURCE_INFO lisn=0x%lx flags=0x%lx eoi=0x%lx trig=0x%lx shift=0x%lx\n",
200 lisn, retbuf[0], retbuf[1], retbuf[2], retbuf[3]);
201
202 return 0;
203}
204
205#define XIVE_SRC_SET_EISN (1ull << (63 - 62))
206#define XIVE_SRC_MASK (1ull << (63 - 63)) /* unused */
207
208static long plpar_int_set_source_config(unsigned long flags,
209 unsigned long lisn,
210 unsigned long target,
211 unsigned long prio,
212 unsigned long sw_irq)
213{
214 long rc;
215
216
217 pr_debug("H_INT_SET_SOURCE_CONFIG flags=0x%lx lisn=0x%lx target=%ld prio=%ld sw_irq=%ld\n",
218 flags, lisn, target, prio, sw_irq);
219
220
221 do {
222 rc = plpar_hcall_norets(H_INT_SET_SOURCE_CONFIG, flags, lisn,
223 target, prio, sw_irq);
224 } while (plpar_busy_delay(rc));
225
226 if (rc) {
227 pr_err("H_INT_SET_SOURCE_CONFIG lisn=0x%lx target=%ld prio=%ld failed %ld\n",
228 lisn, target, prio, rc);
229 return rc;
230 }
231
232 return 0;
233}
234
235static long plpar_int_get_source_config(unsigned long flags,
236 unsigned long lisn,
237 unsigned long *target,
238 unsigned long *prio,
239 unsigned long *sw_irq)
240{
241 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
242 long rc;
243
244 pr_debug("H_INT_GET_SOURCE_CONFIG flags=0x%lx lisn=0x%lx\n", flags, lisn);
245
246 do {
247 rc = plpar_hcall(H_INT_GET_SOURCE_CONFIG, retbuf, flags, lisn,
248 target, prio, sw_irq);
249 } while (plpar_busy_delay(rc));
250
251 if (rc) {
252 pr_err("H_INT_GET_SOURCE_CONFIG lisn=0x%lx failed %ld\n",
253 lisn, rc);
254 return rc;
255 }
256
257 *target = retbuf[0];
258 *prio = retbuf[1];
259 *sw_irq = retbuf[2];
260
261 pr_debug("H_INT_GET_SOURCE_CONFIG target=%ld prio=%ld sw_irq=%ld\n",
262 retbuf[0], retbuf[1], retbuf[2]);
263
264 return 0;
265}
266
267static long plpar_int_get_queue_info(unsigned long flags,
268 unsigned long target,
269 unsigned long priority,
270 unsigned long *esn_page,
271 unsigned long *esn_size)
272{
273 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
274 long rc;
275
276 do {
277 rc = plpar_hcall(H_INT_GET_QUEUE_INFO, retbuf, flags, target,
278 priority);
279 } while (plpar_busy_delay(rc));
280
281 if (rc) {
282 pr_err("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld failed %ld\n",
283 target, priority, rc);
284 return rc;
285 }
286
287 *esn_page = retbuf[0];
288 *esn_size = retbuf[1];
289
290 pr_debug("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld page=0x%lx size=0x%lx\n",
291 target, priority, retbuf[0], retbuf[1]);
292
293 return 0;
294}
295
296#define XIVE_EQ_ALWAYS_NOTIFY (1ull << (63 - 63))
297
298static long plpar_int_set_queue_config(unsigned long flags,
299 unsigned long target,
300 unsigned long priority,
301 unsigned long qpage,
302 unsigned long qsize)
303{
304 long rc;
305
306 pr_debug("H_INT_SET_QUEUE_CONFIG flags=0x%lx target=%ld priority=0x%lx qpage=0x%lx qsize=0x%lx\n",
307 flags, target, priority, qpage, qsize);
308
309 do {
310 rc = plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG, flags, target,
311 priority, qpage, qsize);
312 } while (plpar_busy_delay(rc));
313
314 if (rc) {
315 pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=0x%lx returned %ld\n",
316 target, priority, qpage, rc);
317 return rc;
318 }
319
320 return 0;
321}
322
323static long plpar_int_sync(unsigned long flags, unsigned long lisn)
324{
325 long rc;
326
327 do {
328 rc = plpar_hcall_norets(H_INT_SYNC, flags, lisn);
329 } while (plpar_busy_delay(rc));
330
331 if (rc) {
332 pr_err("H_INT_SYNC lisn=0x%lx returned %ld\n", lisn, rc);
333 return rc;
334 }
335
336 return 0;
337}
338
339#define XIVE_ESB_FLAG_STORE (1ull << (63 - 63))
340
341static long plpar_int_esb(unsigned long flags,
342 unsigned long lisn,
343 unsigned long offset,
344 unsigned long in_data,
345 unsigned long *out_data)
346{
347 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
348 long rc;
349
350 pr_debug("H_INT_ESB flags=0x%lx lisn=0x%lx offset=0x%lx in=0x%lx\n",
351 flags, lisn, offset, in_data);
352
353 do {
354 rc = plpar_hcall(H_INT_ESB, retbuf, flags, lisn, offset,
355 in_data);
356 } while (plpar_busy_delay(rc));
357
358 if (rc) {
359 pr_err("H_INT_ESB lisn=0x%lx offset=0x%lx returned %ld\n",
360 lisn, offset, rc);
361 return rc;
362 }
363
364 *out_data = retbuf[0];
365
366 return 0;
367}
368
369static u64 xive_spapr_esb_rw(u32 lisn, u32 offset, u64 data, bool write)
370{
371 unsigned long read_data;
372 long rc;
373
374 rc = plpar_int_esb(write ? XIVE_ESB_FLAG_STORE : 0,
375 lisn, offset, data, &read_data);
376 if (rc)
377 return -1;
378
379 return write ? 0 : read_data;
380}
381
382#define XIVE_SRC_H_INT_ESB (1ull << (63 - 60))
383#define XIVE_SRC_LSI (1ull << (63 - 61))
384#define XIVE_SRC_TRIGGER (1ull << (63 - 62))
385#define XIVE_SRC_STORE_EOI (1ull << (63 - 63))
386
387static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
388{
389 long rc;
390 unsigned long flags;
391 unsigned long eoi_page;
392 unsigned long trig_page;
393 unsigned long esb_shift;
394
395 memset(data, 0, sizeof(*data));
396
397 rc = plpar_int_get_source_info(0, hw_irq, &flags, &eoi_page, &trig_page,
398 &esb_shift);
399 if (rc)
400 return -EINVAL;
401
402 if (flags & XIVE_SRC_H_INT_ESB)
403 data->flags |= XIVE_IRQ_FLAG_H_INT_ESB;
404 if (flags & XIVE_SRC_STORE_EOI)
405 data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
406 if (flags & XIVE_SRC_LSI)
407 data->flags |= XIVE_IRQ_FLAG_LSI;
408 data->eoi_page = eoi_page;
409 data->esb_shift = esb_shift;
410 data->trig_page = trig_page;
411
412 data->hw_irq = hw_irq;
413
414 /*
415 * No chip-id for the sPAPR backend. This has an impact how we
416 * pick a target. See xive_pick_irq_target().
417 */
418 data->src_chip = XIVE_INVALID_CHIP_ID;
419
420 /*
421 * When the H_INT_ESB flag is set, the H_INT_ESB hcall should
422 * be used for interrupt management. Skip the remapping of the
423 * ESB pages which are not available.
424 */
425 if (data->flags & XIVE_IRQ_FLAG_H_INT_ESB)
426 return 0;
427
428 data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
429 if (!data->eoi_mmio) {
430 pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
431 return -ENOMEM;
432 }
433
434 /* Full function page supports trigger */
435 if (flags & XIVE_SRC_TRIGGER) {
436 data->trig_mmio = data->eoi_mmio;
437 return 0;
438 }
439
440 data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
441 if (!data->trig_mmio) {
442 pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
443 return -ENOMEM;
444 }
445 return 0;
446}
447
448static int xive_spapr_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
449{
450 long rc;
451
452 rc = plpar_int_set_source_config(XIVE_SRC_SET_EISN, hw_irq, target,
453 prio, sw_irq);
454
455 return rc == 0 ? 0 : -ENXIO;
456}
457
458static int xive_spapr_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
459 u32 *sw_irq)
460{
461 long rc;
462 unsigned long h_target;
463 unsigned long h_prio;
464 unsigned long h_sw_irq;
465
466 rc = plpar_int_get_source_config(0, hw_irq, &h_target, &h_prio,
467 &h_sw_irq);
468
469 *target = h_target;
470 *prio = h_prio;
471 *sw_irq = h_sw_irq;
472
473 return rc == 0 ? 0 : -ENXIO;
474}
475
476/* This can be called multiple time to change a queue configuration */
477static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
478 __be32 *qpage, u32 order)
479{
480 s64 rc = 0;
481 unsigned long esn_page;
482 unsigned long esn_size;
483 u64 flags, qpage_phys;
484
485 /* If there's an actual queue page, clean it */
486 if (order) {
487 if (WARN_ON(!qpage))
488 return -EINVAL;
489 qpage_phys = __pa(qpage);
490 } else {
491 qpage_phys = 0;
492 }
493
494 /* Initialize the rest of the fields */
495 q->msk = order ? ((1u << (order - 2)) - 1) : 0;
496 q->idx = 0;
497 q->toggle = 0;
498
499 rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
500 if (rc) {
501 pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
502 target, prio);
503 rc = -EIO;
504 goto fail;
505 }
506
507 /* TODO: add support for the notification page */
508 q->eoi_phys = esn_page;
509
510 /* Default is to always notify */
511 flags = XIVE_EQ_ALWAYS_NOTIFY;
512
513 /* Configure and enable the queue in HW */
514 rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
515 if (rc) {
516 pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
517 target, prio);
518 rc = -EIO;
519 } else {
520 q->qpage = qpage;
521 if (is_secure_guest())
522 uv_share_page(PHYS_PFN(qpage_phys),
523 1 << xive_alloc_order(order));
524 }
525fail:
526 return rc;
527}
528
529static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
530 u8 prio)
531{
532 struct xive_q *q = &xc->queue[prio];
533 __be32 *qpage;
534
535 qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
536 if (IS_ERR(qpage))
537 return PTR_ERR(qpage);
538
539 return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
540 q, prio, qpage, xive_queue_shift);
541}
542
543static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
544 u8 prio)
545{
546 struct xive_q *q = &xc->queue[prio];
547 unsigned int alloc_order;
548 long rc;
549 int hw_cpu = get_hard_smp_processor_id(cpu);
550
551 rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
552 if (rc)
553 pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
554 hw_cpu, prio);
555
556 alloc_order = xive_alloc_order(xive_queue_shift);
557 if (is_secure_guest())
558 uv_unshare_page(PHYS_PFN(__pa(q->qpage)), 1 << alloc_order);
559 free_pages((unsigned long)q->qpage, alloc_order);
560 q->qpage = NULL;
561}
562
563static bool xive_spapr_match(struct device_node *node)
564{
565 /* Ignore cascaded controllers for the moment */
566 return true;
567}
568
569#ifdef CONFIG_SMP
570static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
571{
572 int irq = xive_irq_bitmap_alloc();
573
574 if (irq < 0) {
575 pr_err("Failed to allocate IPI on CPU %d\n", cpu);
576 return -ENXIO;
577 }
578
579 xc->hw_ipi = irq;
580 return 0;
581}
582
583static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
584{
585 if (xc->hw_ipi == XIVE_BAD_IRQ)
586 return;
587
588 xive_irq_bitmap_free(xc->hw_ipi);
589 xc->hw_ipi = XIVE_BAD_IRQ;
590}
591#endif /* CONFIG_SMP */
592
593static void xive_spapr_shutdown(void)
594{
595 plpar_int_reset(0);
596}
597
598/*
599 * Perform an "ack" cycle on the current thread. Grab the pending
600 * active priorities and update the CPPR to the most favored one.
601 */
602static void xive_spapr_update_pending(struct xive_cpu *xc)
603{
604 u8 nsr, cppr;
605 u16 ack;
606
607 /*
608 * Perform the "Acknowledge O/S to Register" cycle.
609 *
610 * Let's speedup the access to the TIMA using the raw I/O
611 * accessor as we don't need the synchronisation routine of
612 * the higher level ones
613 */
614 ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG));
615
616 /* Synchronize subsequent queue accesses */
617 mb();
618
619 /*
620 * Grab the CPPR and the "NSR" field which indicates the source
621 * of the interrupt (if any)
622 */
623 cppr = ack & 0xff;
624 nsr = ack >> 8;
625
626 if (nsr & TM_QW1_NSR_EO) {
627 if (cppr == 0xff)
628 return;
629 /* Mark the priority pending */
630 xc->pending_prio |= 1 << cppr;
631
632 /*
633 * A new interrupt should never have a CPPR less favored
634 * than our current one.
635 */
636 if (cppr >= xc->cppr)
637 pr_err("CPU %d odd ack CPPR, got %d at %d\n",
638 smp_processor_id(), cppr, xc->cppr);
639
640 /* Update our idea of what the CPPR is */
641 xc->cppr = cppr;
642 }
643}
644
645static void xive_spapr_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
646{
647 /* Only some debug on the TIMA settings */
648 pr_debug("(HW value: %08x %08x %08x)\n",
649 in_be32(xive_tima + TM_QW1_OS + TM_WORD0),
650 in_be32(xive_tima + TM_QW1_OS + TM_WORD1),
651 in_be32(xive_tima + TM_QW1_OS + TM_WORD2));
652}
653
654static void xive_spapr_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
655{
656 /* Nothing to do */;
657}
658
659static void xive_spapr_sync_source(u32 hw_irq)
660{
661 /* Specs are unclear on what this is doing */
662 plpar_int_sync(0, hw_irq);
663}
664
665static int xive_spapr_debug_show(struct seq_file *m, void *private)
666{
667 struct xive_irq_bitmap *xibm;
668 char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
669
670 if (!buf)
671 return -ENOMEM;
672
673 list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
674 memset(buf, 0, PAGE_SIZE);
675 bitmap_print_to_pagebuf(true, buf, xibm->bitmap, xibm->count);
676 seq_printf(m, "bitmap #%d: %s", xibm->count, buf);
677 }
678 kfree(buf);
679
680 return 0;
681}
682
683static const struct xive_ops xive_spapr_ops = {
684 .populate_irq_data = xive_spapr_populate_irq_data,
685 .configure_irq = xive_spapr_configure_irq,
686 .get_irq_config = xive_spapr_get_irq_config,
687 .setup_queue = xive_spapr_setup_queue,
688 .cleanup_queue = xive_spapr_cleanup_queue,
689 .match = xive_spapr_match,
690 .shutdown = xive_spapr_shutdown,
691 .update_pending = xive_spapr_update_pending,
692 .setup_cpu = xive_spapr_setup_cpu,
693 .teardown_cpu = xive_spapr_teardown_cpu,
694 .sync_source = xive_spapr_sync_source,
695 .esb_rw = xive_spapr_esb_rw,
696#ifdef CONFIG_SMP
697 .get_ipi = xive_spapr_get_ipi,
698 .put_ipi = xive_spapr_put_ipi,
699 .debug_show = xive_spapr_debug_show,
700#endif /* CONFIG_SMP */
701 .name = "spapr",
702};
703
704/*
705 * get max priority from "/ibm,plat-res-int-priorities"
706 */
707static bool __init xive_get_max_prio(u8 *max_prio)
708{
709 struct device_node *rootdn;
710 const __be32 *reg;
711 u32 len;
712 int prio, found;
713
714 rootdn = of_find_node_by_path("/");
715 if (!rootdn) {
716 pr_err("not root node found !\n");
717 return false;
718 }
719
720 reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len);
721 if (!reg) {
722 pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n");
723 return false;
724 }
725
726 if (len % (2 * sizeof(u32)) != 0) {
727 pr_err("invalid 'ibm,plat-res-int-priorities' property\n");
728 return false;
729 }
730
731 /* HW supports priorities in the range [0-7] and 0xFF is a
732 * wildcard priority used to mask. We scan the ranges reserved
733 * by the hypervisor to find the lowest priority we can use.
734 */
735 found = 0xFF;
736 for (prio = 0; prio < 8; prio++) {
737 int reserved = 0;
738 int i;
739
740 for (i = 0; i < len / (2 * sizeof(u32)); i++) {
741 int base = be32_to_cpu(reg[2 * i]);
742 int range = be32_to_cpu(reg[2 * i + 1]);
743
744 if (prio >= base && prio < base + range)
745 reserved++;
746 }
747
748 if (!reserved)
749 found = prio;
750 }
751
752 if (found == 0xFF) {
753 pr_err("no valid priority found in 'ibm,plat-res-int-priorities'\n");
754 return false;
755 }
756
757 *max_prio = found;
758 return true;
759}
760
761static const u8 *__init get_vec5_feature(unsigned int index)
762{
763 unsigned long root, chosen;
764 int size;
765 const u8 *vec5;
766
767 root = of_get_flat_dt_root();
768 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
769 if (chosen == -FDT_ERR_NOTFOUND)
770 return NULL;
771
772 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
773 if (!vec5)
774 return NULL;
775
776 if (size <= index)
777 return NULL;
778
779 return vec5 + index;
780}
781
782static bool __init xive_spapr_disabled(void)
783{
784 const u8 *vec5_xive;
785
786 vec5_xive = get_vec5_feature(OV5_INDX(OV5_XIVE_SUPPORT));
787 if (vec5_xive) {
788 u8 val;
789
790 val = *vec5_xive & OV5_FEAT(OV5_XIVE_SUPPORT);
791 switch (val) {
792 case OV5_FEAT(OV5_XIVE_EITHER):
793 case OV5_FEAT(OV5_XIVE_LEGACY):
794 break;
795 case OV5_FEAT(OV5_XIVE_EXPLOIT):
796 /* Hypervisor only supports XIVE */
797 if (xive_cmdline_disabled)
798 pr_warn("WARNING: Ignoring cmdline option xive=off\n");
799 return false;
800 default:
801 pr_warn("%s: Unknown xive support option: 0x%x\n",
802 __func__, val);
803 break;
804 }
805 }
806
807 return xive_cmdline_disabled;
808}
809
810bool __init xive_spapr_init(void)
811{
812 struct device_node *np;
813 struct resource r;
814 void __iomem *tima;
815 struct property *prop;
816 u8 max_prio;
817 u32 val;
818 u32 len;
819 const __be32 *reg;
820 int i, err;
821
822 if (xive_spapr_disabled())
823 return false;
824
825 pr_devel("%s()\n", __func__);
826 np = of_find_compatible_node(NULL, NULL, "ibm,power-ivpe");
827 if (!np) {
828 pr_devel("not found !\n");
829 return false;
830 }
831 pr_devel("Found %s\n", np->full_name);
832
833 /* Resource 1 is the OS ring TIMA */
834 if (of_address_to_resource(np, 1, &r)) {
835 pr_err("Failed to get thread mgmnt area resource\n");
836 goto err_put;
837 }
838 tima = ioremap(r.start, resource_size(&r));
839 if (!tima) {
840 pr_err("Failed to map thread mgmnt area\n");
841 goto err_put;
842 }
843
844 if (!xive_get_max_prio(&max_prio))
845 goto err_unmap;
846
847 /* Feed the IRQ number allocator with the ranges given in the DT */
848 reg = of_get_property(np, "ibm,xive-lisn-ranges", &len);
849 if (!reg) {
850 pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n");
851 goto err_unmap;
852 }
853
854 if (len % (2 * sizeof(u32)) != 0) {
855 pr_err("invalid 'ibm,xive-lisn-ranges' property\n");
856 goto err_unmap;
857 }
858
859 for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2) {
860 err = xive_irq_bitmap_add(be32_to_cpu(reg[0]),
861 be32_to_cpu(reg[1]));
862 if (err < 0)
863 goto err_mem_free;
864 }
865
866 /* Iterate the EQ sizes and pick one */
867 of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, reg, val) {
868 xive_queue_shift = val;
869 if (val == PAGE_SHIFT)
870 break;
871 }
872
873 /* Initialize XIVE core with our backend */
874 if (!xive_core_init(np, &xive_spapr_ops, tima, TM_QW1_OS, max_prio))
875 goto err_mem_free;
876
877 of_node_put(np);
878 pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
879 return true;
880
881err_mem_free:
882 xive_irq_bitmap_remove_all();
883err_unmap:
884 iounmap(tima);
885err_put:
886 of_node_put(np);
887 return false;
888}
889
890machine_arch_initcall(pseries, xive_core_debug_init);