Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

irqchip/irq-sifive-plic: Add syscore callbacks for hibernation

The priority and enable registers of plic will be reset
during hibernation power cycle in poweroff mode,
add the syscore callbacks to save/restore those registers.

Signed-off-by: Mason Huo <mason.huo@starfivetech.com>
Reviewed-by: Ley Foon Tan <leyfoon.tan@starfivetech.com>
Reviewed-by: Sia Jee Heng <jeeheng.sia@starfivetech.com>
Reported-by: Dan Carpenter <error27@gmail.com>
Link: https://lore.kernel.org/r/202302140709.CdkxgtPi-lkp@intel.com/
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20230404032908.89638-1-mason.huo@starfivetech.com

authored by

Mason Huo and committed by
Marc Zyngier
e80f0b6a 9dfc7791

+91 -2
+91 -2
drivers/irqchip/irq-sifive-plic.c
··· 17 17 #include <linux/of_irq.h> 18 18 #include <linux/platform_device.h> 19 19 #include <linux/spinlock.h> 20 + #include <linux/syscore_ops.h> 20 21 #include <asm/smp.h> 21 22 22 23 /* ··· 68 67 struct irq_domain *irqdomain; 69 68 void __iomem *regs; 70 69 unsigned long plic_quirks; 70 + unsigned int nr_irqs; 71 + unsigned long *prio_save; 71 72 }; 72 73 73 74 struct plic_handler { ··· 81 78 */ 82 79 raw_spinlock_t enable_lock; 83 80 void __iomem *enable_base; 81 + u32 *enable_save; 84 82 struct plic_priv *priv; 85 83 }; 86 84 static int plic_parent_irq __ro_after_init; ··· 233 229 return IRQ_SET_MASK_OK; 234 230 } 235 231 232 + static int plic_irq_suspend(void) 233 + { 234 + unsigned int i, cpu; 235 + u32 __iomem *reg; 236 + struct plic_priv *priv; 237 + 238 + priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; 239 + 240 + for (i = 0; i < priv->nr_irqs; i++) 241 + if (readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID)) 242 + __set_bit(i, priv->prio_save); 243 + else 244 + __clear_bit(i, priv->prio_save); 245 + 246 + for_each_cpu(cpu, cpu_present_mask) { 247 + struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 248 + 249 + if (!handler->present) 250 + continue; 251 + 252 + raw_spin_lock(&handler->enable_lock); 253 + for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { 254 + reg = handler->enable_base + i * sizeof(u32); 255 + handler->enable_save[i] = readl(reg); 256 + } 257 + raw_spin_unlock(&handler->enable_lock); 258 + } 259 + 260 + return 0; 261 + } 262 + 263 + static void plic_irq_resume(void) 264 + { 265 + unsigned int i, index, cpu; 266 + u32 __iomem *reg; 267 + struct plic_priv *priv; 268 + 269 + priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; 270 + 271 + for (i = 0; i < priv->nr_irqs; i++) { 272 + index = BIT_WORD(i); 273 + writel((priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0, 274 + priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); 275 + } 276 + 277 + for_each_cpu(cpu, cpu_present_mask) { 278 + struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 279 + 280 + if (!handler->present) 281 + continue; 282 + 283 + raw_spin_lock(&handler->enable_lock); 284 + for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { 285 + reg = handler->enable_base + i * sizeof(u32); 286 + writel(handler->enable_save[i], reg); 287 + } 288 + raw_spin_unlock(&handler->enable_lock); 289 + } 290 + } 291 + 292 + static struct syscore_ops plic_irq_syscore_ops = { 293 + .suspend = plic_irq_suspend, 294 + .resume = plic_irq_resume, 295 + }; 296 + 236 297 static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, 237 298 irq_hw_number_t hwirq) 238 299 { ··· 414 345 u32 nr_irqs; 415 346 struct plic_priv *priv; 416 347 struct plic_handler *handler; 348 + unsigned int cpu; 417 349 418 350 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 419 351 if (!priv) ··· 433 363 if (WARN_ON(!nr_irqs)) 434 364 goto out_iounmap; 435 365 366 + priv->nr_irqs = nr_irqs; 367 + 368 + priv->prio_save = bitmap_alloc(nr_irqs, GFP_KERNEL); 369 + if (!priv->prio_save) 370 + goto out_free_priority_reg; 371 + 436 372 nr_contexts = of_irq_count(node); 437 373 if (WARN_ON(!nr_contexts)) 438 - goto out_iounmap; 374 + goto out_free_priority_reg; 439 375 440 376 error = -ENOMEM; 441 377 priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1, 442 378 &plic_irqdomain_ops, priv); 443 379 if (WARN_ON(!priv->irqdomain)) 444 - goto out_iounmap; 380 + goto out_free_priority_reg; 445 381 446 382 for (i = 0; i < nr_contexts; i++) { 447 383 struct of_phandle_args parent; ··· 517 441 handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE + 518 442 i * CONTEXT_ENABLE_SIZE; 519 443 handler->priv = priv; 444 + 445 + handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32), 446 + sizeof(*handler->enable_save), GFP_KERNEL); 447 + if (!handler->enable_save) 448 + goto out_free_enable_reg; 520 449 done: 521 450 for (hwirq = 1; hwirq <= nr_irqs; hwirq++) { 522 451 plic_toggle(handler, hwirq, 0); ··· 542 461 plic_starting_cpu, plic_dying_cpu); 543 462 plic_cpuhp_setup_done = true; 544 463 } 464 + register_syscore_ops(&plic_irq_syscore_ops); 545 465 546 466 pr_info("%pOFP: mapped %d interrupts with %d handlers for" 547 467 " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts); 548 468 return 0; 549 469 470 + out_free_enable_reg: 471 + for_each_cpu(cpu, cpu_present_mask) { 472 + handler = per_cpu_ptr(&plic_handlers, cpu); 473 + kfree(handler->enable_save); 474 + } 475 + out_free_priority_reg: 476 + kfree(priv->prio_save); 550 477 out_iounmap: 551 478 iounmap(priv->regs); 552 479 out_free_priv: