Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mei: me: d0i3: enable d0i3 interrupts

D0i3 adds additional interrupt reason bit, therefore we add a variable
intr_source to save the interrupt causes for further dispatching.
The interrupt cause is saved in the irq quick handler to achieve
unified behavior for both MSI enabled and shared interrupt platforms.

Signed-off-by: Alexander Usyskin <alexander.usyskin@intel.com>
Signed-off-by: Tomas Winkler <tomas.winkler@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Alexander Usyskin and committed by
Greg Kroah-Hartman
1fa55b4e bb9f4d26

+33 -35
+4
drivers/misc/mei/hw-me-regs.h
··· 166 166 /* Host D0I3 Interrupt Status */ 167 167 #define H_D0I3C_IS 0x00000040 168 168 169 + /* H_CSR masks */ 170 + #define H_CSR_IE_MASK (H_IE | H_D0I3C_IE) 171 + #define H_CSR_IS_MASK (H_IS | H_D0I3C_IS) 172 + 169 173 /* register bits of ME_CSR_HA (ME Control Status Host Access register) */ 170 174 /* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only 171 175 access to ME_CBD */
+17 -18
drivers/misc/mei/hw-me.c
··· 134 134 */ 135 135 static inline void mei_hcsr_set(struct mei_device *dev, u32 reg) 136 136 { 137 - reg &= ~H_IS; 137 + reg &= ~H_CSR_IS_MASK; 138 138 mei_hcsr_write(dev, reg); 139 139 } 140 140 ··· 216 216 { 217 217 u32 hcsr = mei_hcsr_read(dev); 218 218 219 - if ((hcsr & H_IS) == H_IS) 219 + if (hcsr & H_CSR_IS_MASK) 220 220 mei_hcsr_write(dev, hcsr); 221 221 } 222 222 /** ··· 228 228 { 229 229 u32 hcsr = mei_hcsr_read(dev); 230 230 231 - hcsr |= H_IE; 231 + hcsr |= H_CSR_IE_MASK; 232 232 mei_hcsr_set(dev, hcsr); 233 233 } 234 234 ··· 241 241 { 242 242 u32 hcsr = mei_hcsr_read(dev); 243 243 244 - hcsr &= ~H_IE; 244 + hcsr &= ~H_CSR_IE_MASK; 245 245 mei_hcsr_set(dev, hcsr); 246 246 } 247 247 ··· 285 285 hcsr = mei_hcsr_read(dev); 286 286 } 287 287 288 - hcsr |= H_RST | H_IG | H_IS; 288 + hcsr |= H_RST | H_IG | H_CSR_IS_MASK; 289 289 290 290 if (intr_enable) 291 - hcsr |= H_IE; 291 + hcsr |= H_CSR_IE_MASK; 292 292 else 293 - hcsr &= ~H_IE; 293 + hcsr &= ~H_CSR_IE_MASK; 294 294 295 295 dev->recvd_hw_ready = false; 296 296 mei_hcsr_write(dev, hcsr); ··· 322 322 { 323 323 u32 hcsr = mei_hcsr_read(dev); 324 324 325 - hcsr |= H_IE | H_IG | H_RDY; 325 + hcsr |= H_CSR_IE_MASK | H_IG | H_RDY; 326 326 mei_hcsr_set(dev, hcsr); 327 327 } 328 328 ··· 767 767 * 768 768 * Return: irqreturn_t 769 769 */ 770 - 771 770 irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id) 772 771 { 773 - struct mei_device *dev = (struct mei_device *) dev_id; 774 - u32 hcsr = mei_hcsr_read(dev); 772 + struct mei_device *dev = (struct mei_device *)dev_id; 773 + struct mei_me_hw *hw = to_me_hw(dev); 774 + u32 hcsr; 775 775 776 - if ((hcsr & H_IS) != H_IS) 776 + hcsr = mei_hcsr_read(dev); 777 + if (!(hcsr & H_CSR_IS_MASK)) 777 778 return IRQ_NONE; 778 779 779 - /* clear H_IS bit in H_CSR */ 780 + hw->intr_source = hcsr & H_CSR_IS_MASK; 781 + dev_dbg(dev->dev, "interrupt source 0x%08X.\n", hw->intr_source); 782 + 783 + /* clear H_IS and H_D0I3C_IS bits in H_CSR to clear the interrupts */ 780 784 mei_hcsr_write(dev, hcsr); 781 785 782 786 return IRQ_WAKE_THREAD; ··· 807 803 /* initialize our complete list */ 808 804 mutex_lock(&dev->device_lock); 809 805 mei_io_list_init(&complete_list); 810 - 811 - /* Ack the interrupt here 812 - * In case of MSI we don't go through the quick handler */ 813 - if (pci_dev_msi_enabled(to_pci_dev(dev->dev))) 814 - mei_clear_interrupts(dev); 815 806 816 807 /* check if ME wants a reset */ 817 808 if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
+2
drivers/misc/mei/hw-me.h
··· 51 51 * 52 52 * @cfg: per device generation config and ops 53 53 * @mem_addr: io memory address 54 + * @intr_source: interrupt source 54 55 * @pg_state: power gating state 55 56 * @d0i3_supported: di03 support 56 57 */ 57 58 struct mei_me_hw { 58 59 const struct mei_cfg *cfg; 59 60 void __iomem *mem_addr; 61 + u32 intr_source; 60 62 enum mei_pg_state pg_state; 61 63 bool d0i3_supported; 62 64 };
+10 -17
drivers/misc/mei/pci-me.c
··· 128 128 const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data); 129 129 struct mei_device *dev; 130 130 struct mei_me_hw *hw; 131 + unsigned int irqflags; 131 132 int err; 132 133 133 134 ··· 181 180 pci_enable_msi(pdev); 182 181 183 182 /* request and enable interrupt */ 184 - if (pci_dev_msi_enabled(pdev)) 185 - err = request_threaded_irq(pdev->irq, 186 - NULL, 187 - mei_me_irq_thread_handler, 188 - IRQF_ONESHOT, KBUILD_MODNAME, dev); 189 - else 190 - err = request_threaded_irq(pdev->irq, 183 + irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED; 184 + 185 + err = request_threaded_irq(pdev->irq, 191 186 mei_me_irq_quick_handler, 192 187 mei_me_irq_thread_handler, 193 - IRQF_SHARED, KBUILD_MODNAME, dev); 194 - 188 + irqflags, KBUILD_MODNAME, dev); 195 189 if (err) { 196 190 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", 197 191 pdev->irq); ··· 315 319 { 316 320 struct pci_dev *pdev = to_pci_dev(device); 317 321 struct mei_device *dev; 322 + unsigned int irqflags; 318 323 int err; 319 324 320 325 dev = pci_get_drvdata(pdev); ··· 324 327 325 328 pci_enable_msi(pdev); 326 329 330 + irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED; 331 + 327 332 /* request and enable interrupt */ 328 - if (pci_dev_msi_enabled(pdev)) 329 - err = request_threaded_irq(pdev->irq, 330 - NULL, 331 - mei_me_irq_thread_handler, 332 - IRQF_ONESHOT, KBUILD_MODNAME, dev); 333 - else 334 - err = request_threaded_irq(pdev->irq, 333 + err = request_threaded_irq(pdev->irq, 335 334 mei_me_irq_quick_handler, 336 335 mei_me_irq_thread_handler, 337 - IRQF_SHARED, KBUILD_MODNAME, dev); 336 + irqflags, KBUILD_MODNAME, dev); 338 337 339 338 if (err) { 340 339 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",