Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ipmi: kcs_bmc: Allow clients to control KCS IRQ state

Add a mechanism for controlling whether the client associated with a
KCS device will receive Input Buffer Full (IBF) and Output Buffer Empty
(OBE) events. This enables an abstract implementation of poll() for KCS
devices.

A wart in the implementation is that the ASPEED KCS devices don't
support an OBE interrupt for the BMC. Instead we pretend it has one by
polling the status register waiting for the Output Buffer Full (OBF) bit
to clear, and generating an event when OBE is observed.

Cc: CS20 KWLiu <KWLIU@nuvoton.com>
Signed-off-by: Andrew Jeffery <andrew@aj.id.au>
Reviewed-by: Zev Weiss <zweiss@equinix.com>
Message-Id: <20210608104757.582199-10-andrew@aj.id.au>
Signed-off-by: Corey Minyard <cminyard@mvista.com>

authored by

Andrew Jeffery and committed by
Corey Minyard
28651e6c 7cafff99

+132 -58
+6
drivers/char/ipmi/kcs_bmc.c
··· 182 182 } 183 183 EXPORT_SYMBOL(kcs_bmc_unregister_driver); 184 184 185 + void kcs_bmc_update_event_mask(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 events) 186 + { 187 + kcs_bmc->ops->irq_mask_update(kcs_bmc, mask, events); 188 + } 189 + EXPORT_SYMBOL(kcs_bmc_update_event_mask); 190 + 185 191 MODULE_LICENSE("GPL v2"); 186 192 MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>"); 187 193 MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+3
drivers/char/ipmi/kcs_bmc.h
··· 8 8 9 9 #include <linux/list.h> 10 10 11 + #define KCS_BMC_EVENT_TYPE_OBE BIT(0) 12 + #define KCS_BMC_EVENT_TYPE_IBF BIT(1) 13 + 11 14 #define KCS_BMC_STR_OBF BIT(0) 12 15 #define KCS_BMC_STR_IBF BIT(1) 13 16 #define KCS_BMC_STR_CMD_DAT BIT(3)
+98 -54
drivers/char/ipmi/kcs_bmc_aspeed.c
··· 60 60 #define LPC_ODR4 0x118 61 61 #define LPC_STR4 0x11C 62 62 63 + #define OBE_POLL_PERIOD (HZ / 2) 64 + 63 65 struct aspeed_kcs_bmc { 64 66 struct kcs_bmc_device kcs_bmc; 65 67 66 68 struct regmap *map; 69 + 70 + struct { 71 + spinlock_t lock; 72 + bool remove; 73 + struct timer_list timer; 74 + } obe; 67 75 }; 68 76 69 77 struct aspeed_kcs_of_ops { ··· 167 159 168 160 switch (kcs_bmc->channel) { 169 161 case 1: 170 - if (enable) { 171 - regmap_update_bits(priv->map, LPC_HICR2, 172 - LPC_HICR2_IBFIF1, LPC_HICR2_IBFIF1); 173 - regmap_update_bits(priv->map, LPC_HICR0, 174 - LPC_HICR0_LPC1E, LPC_HICR0_LPC1E); 175 - } else { 176 - regmap_update_bits(priv->map, LPC_HICR0, 177 - LPC_HICR0_LPC1E, 0); 178 - regmap_update_bits(priv->map, LPC_HICR2, 179 - LPC_HICR2_IBFIF1, 0); 180 - } 181 - break; 182 - 162 + regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC1E, enable * LPC_HICR0_LPC1E); 163 + return; 183 164 case 2: 184 - if (enable) { 185 - regmap_update_bits(priv->map, LPC_HICR2, 186 - LPC_HICR2_IBFIF2, LPC_HICR2_IBFIF2); 187 - regmap_update_bits(priv->map, LPC_HICR0, 188 - LPC_HICR0_LPC2E, LPC_HICR0_LPC2E); 189 - } else { 190 - regmap_update_bits(priv->map, LPC_HICR0, 191 - LPC_HICR0_LPC2E, 0); 192 - regmap_update_bits(priv->map, LPC_HICR2, 193 - LPC_HICR2_IBFIF2, 0); 194 - } 195 - break; 196 - 165 + regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC2E, enable * LPC_HICR0_LPC2E); 166 + return; 197 167 case 3: 198 - if (enable) { 199 - regmap_update_bits(priv->map, LPC_HICR2, 200 - LPC_HICR2_IBFIF3, LPC_HICR2_IBFIF3); 201 - regmap_update_bits(priv->map, LPC_HICR0, 202 - LPC_HICR0_LPC3E, LPC_HICR0_LPC3E); 203 - regmap_update_bits(priv->map, LPC_HICR4, 204 - LPC_HICR4_KCSENBL, LPC_HICR4_KCSENBL); 205 - } else { 206 - regmap_update_bits(priv->map, LPC_HICR0, 207 - LPC_HICR0_LPC3E, 0); 208 - regmap_update_bits(priv->map, LPC_HICR4, 209 - LPC_HICR4_KCSENBL, 0); 210 - regmap_update_bits(priv->map, LPC_HICR2, 211 - LPC_HICR2_IBFIF3, 0); 212 - } 213 - break; 214 - 168 + regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC3E, enable * LPC_HICR0_LPC3E); 169 + regmap_update_bits(priv->map, LPC_HICR4, 170 + LPC_HICR4_KCSENBL, enable * LPC_HICR4_KCSENBL); 171 + return; 215 172 case 4: 216 - if (enable) 217 - regmap_update_bits(priv->map, LPC_HICRB, 218 - LPC_HICRB_IBFIF4 | LPC_HICRB_LPC4E, 219 - LPC_HICRB_IBFIF4 | LPC_HICRB_LPC4E); 220 - else 221 - regmap_update_bits(priv->map, LPC_HICRB, 222 - LPC_HICRB_IBFIF4 | LPC_HICRB_LPC4E, 223 - 0); 224 - break; 225 - 173 + regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_LPC4E, enable * LPC_HICRB_LPC4E); 174 + return; 226 175 default: 227 - break; 176 + pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel); 177 + return; 178 + } 179 + } 180 + 181 + static void aspeed_kcs_check_obe(struct timer_list *timer) 182 + { 183 + struct aspeed_kcs_bmc *priv = container_of(timer, struct aspeed_kcs_bmc, obe.timer); 184 + unsigned long flags; 185 + u8 str; 186 + 187 + spin_lock_irqsave(&priv->obe.lock, flags); 188 + if (priv->obe.remove) { 189 + spin_unlock_irqrestore(&priv->obe.lock, flags); 190 + return; 191 + } 192 + 193 + str = aspeed_kcs_inb(&priv->kcs_bmc, priv->kcs_bmc.ioreg.str); 194 + if (str & KCS_BMC_STR_OBF) { 195 + mod_timer(timer, jiffies + OBE_POLL_PERIOD); 196 + spin_unlock_irqrestore(&priv->obe.lock, flags); 197 + return; 198 + } 199 + spin_unlock_irqrestore(&priv->obe.lock, flags); 200 + 201 + kcs_bmc_handle_event(&priv->kcs_bmc); 202 + } 203 + 204 + static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state) 205 + { 206 + struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); 207 + 208 + /* We don't have an OBE IRQ, emulate it */ 209 + if (mask & KCS_BMC_EVENT_TYPE_OBE) { 210 + if (KCS_BMC_EVENT_TYPE_OBE & state) 211 + mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD); 212 + else 213 + del_timer(&priv->obe.timer); 214 + } 215 + 216 + if (mask & KCS_BMC_EVENT_TYPE_IBF) { 217 + const bool enable = !!(state & KCS_BMC_EVENT_TYPE_IBF); 218 + 219 + switch (kcs_bmc->channel) { 220 + case 1: 221 + regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIF1, 222 + enable * LPC_HICR2_IBFIF1); 223 + return; 224 + case 2: 225 + regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIF2, 226 + enable * LPC_HICR2_IBFIF2); 227 + return; 228 + case 3: 229 + regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIF3, 230 + enable * LPC_HICR2_IBFIF3); 231 + return; 232 + case 4: 233 + regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_IBFIF4, 234 + enable * LPC_HICRB_IBFIF4); 235 + return; 236 + default: 237 + pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel); 238 + return; 239 + } 228 240 } 229 241 } 230 242 231 243 static const struct kcs_bmc_device_ops aspeed_kcs_ops = { 244 + .irq_mask_update = aspeed_kcs_irq_mask_update, 232 245 .io_inputb = aspeed_kcs_inb, 233 246 .io_outputb = aspeed_kcs_outb, 234 247 .io_updateb = aspeed_kcs_updateb, ··· 404 375 return -ENODEV; 405 376 } 406 377 378 + spin_lock_init(&priv->obe.lock); 379 + priv->obe.remove = false; 380 + timer_setup(&priv->obe.timer, aspeed_kcs_check_obe, 0); 381 + 407 382 aspeed_kcs_set_address(kcs_bmc, addr); 408 383 409 384 rc = aspeed_kcs_config_irq(kcs_bmc, pdev); ··· 416 383 417 384 platform_set_drvdata(pdev, priv); 418 385 386 + aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 387 + KCS_BMC_EVENT_TYPE_IBF); 419 388 aspeed_kcs_enable_channel(kcs_bmc, true); 420 389 421 390 rc = kcs_bmc_add_device(&priv->kcs_bmc); ··· 437 402 struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc; 438 403 439 404 kcs_bmc_remove_device(kcs_bmc); 405 + 406 + aspeed_kcs_enable_channel(kcs_bmc, false); 407 + aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0); 408 + 409 + /* Make sure it's proper dead */ 410 + spin_lock_irq(&priv->obe.lock); 411 + priv->obe.remove = true; 412 + spin_unlock_irq(&priv->obe.lock); 413 + del_timer_sync(&priv->obe.timer); 440 414 441 415 return 0; 442 416 }
+2
drivers/char/ipmi/kcs_bmc_client.h
··· 35 35 int kcs_bmc_enable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client); 36 36 void kcs_bmc_disable_device(struct kcs_bmc_device *kcs_bmc, struct kcs_bmc_client *client); 37 37 38 + void kcs_bmc_update_event_mask(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 events); 39 + 38 40 u8 kcs_bmc_read_data(struct kcs_bmc_device *kcs_bmc); 39 41 void kcs_bmc_write_data(struct kcs_bmc_device *kcs_bmc, u8 data); 40 42 u8 kcs_bmc_read_status(struct kcs_bmc_device *kcs_bmc);
+1
drivers/char/ipmi/kcs_bmc_device.h
··· 9 9 #include "kcs_bmc.h" 10 10 11 11 struct kcs_bmc_device_ops { 12 + void (*irq_mask_update)(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 enable); 12 13 u8 (*io_inputb)(struct kcs_bmc_device *kcs_bmc, u32 reg); 13 14 void (*io_outputb)(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 b); 14 15 void (*io_updateb)(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask, u8 b);
+22 -4
drivers/char/ipmi/kcs_bmc_npcm7xx.c
··· 38 38 #define KCS2CTL 0x2A 39 39 #define KCS3CTL 0x3C 40 40 #define KCS_CTL_IBFIE BIT(0) 41 + #define KCS_CTL_OBEIE BIT(1) 41 42 42 43 #define KCS1IE 0x1C 43 44 #define KCS2IE 0x2E ··· 118 117 { 119 118 struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc); 120 119 121 - regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_IBFIE, 122 - enable ? KCS_CTL_IBFIE : 0); 123 - 124 120 regmap_update_bits(priv->map, priv->reg->ie, KCS_IE_IRQE | KCS_IE_HIRQE, 125 121 enable ? KCS_IE_IRQE | KCS_IE_HIRQE : 0); 122 + } 123 + 124 + static void npcm7xx_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state) 125 + { 126 + struct npcm7xx_kcs_bmc *priv = to_npcm7xx_kcs_bmc(kcs_bmc); 127 + 128 + if (mask & KCS_BMC_EVENT_TYPE_OBE) 129 + regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_OBEIE, 130 + !!(state & KCS_BMC_EVENT_TYPE_OBE) * KCS_CTL_OBEIE); 131 + 132 + if (mask & KCS_BMC_EVENT_TYPE_IBF) 133 + regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_IBFIE, 134 + !!(state & KCS_BMC_EVENT_TYPE_IBF) * KCS_CTL_IBFIE); 126 135 } 127 136 128 137 static irqreturn_t npcm7xx_kcs_irq(int irq, void *arg) ··· 157 146 } 158 147 159 148 static const struct kcs_bmc_device_ops npcm7xx_kcs_ops = { 149 + .irq_mask_update = npcm7xx_kcs_irq_mask_update, 160 150 .io_inputb = npcm7xx_kcs_inb, 161 151 .io_outputb = npcm7xx_kcs_outb, 162 152 .io_updateb = npcm7xx_kcs_updateb, ··· 198 186 199 187 platform_set_drvdata(pdev, priv); 200 188 201 - npcm7xx_kcs_enable_channel(kcs_bmc, true); 202 189 rc = npcm7xx_kcs_config_irq(kcs_bmc, pdev); 203 190 if (rc) 204 191 return rc; 192 + 193 + npcm7xx_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 194 + KCS_BMC_EVENT_TYPE_IBF); 195 + npcm7xx_kcs_enable_channel(kcs_bmc, true); 205 196 206 197 rc = kcs_bmc_add_device(kcs_bmc); 207 198 if (rc) { ··· 225 210 struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc; 226 211 227 212 kcs_bmc_remove_device(kcs_bmc); 213 + 214 + npcm7xx_kcs_enable_channel(kcs_bmc, false); 215 + npcm7xx_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0); 228 216 229 217 return 0; 230 218 }