Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2015-2018, Intel Corporation.
4 */
5
6#define pr_fmt(fmt) "aspeed-kcs-bmc: " fmt
7
8#include <linux/atomic.h>
9#include <linux/errno.h>
10#include <linux/interrupt.h>
11#include <linux/io.h>
12#include <linux/irq.h>
13#include <linux/mfd/syscon.h>
14#include <linux/module.h>
15#include <linux/of.h>
16#include <linux/of_address.h>
17#include <linux/of_device.h>
18#include <linux/platform_device.h>
19#include <linux/poll.h>
20#include <linux/regmap.h>
21#include <linux/sched.h>
22#include <linux/slab.h>
23#include <linux/timer.h>
24
25#include "kcs_bmc_device.h"
26
27
28#define DEVICE_NAME "ast-kcs-bmc"
29
30#define KCS_CHANNEL_MAX 4
31
32/*
33 * Field class descriptions
34 *
35 * LPCyE Enable LPC channel y
36 * IBFIEy Input Buffer Full IRQ Enable for LPC channel y
37 * IRQxEy Assert SerIRQ x for LPC channel y (Deprecated, use IDyIRQX, IRQXEy)
38 * IDyIRQX Use the specified 4-bit SerIRQ for LPC channel y
39 * SELyIRQX SerIRQ polarity for LPC channel y (low: 0, high: 1)
40 * IRQXEy Assert the SerIRQ specified in IDyIRQX for LPC channel y
41 */
42
43#define LPC_TYIRQX_LOW 0b00
44#define LPC_TYIRQX_HIGH 0b01
45#define LPC_TYIRQX_RSVD 0b10
46#define LPC_TYIRQX_RISING 0b11
47
48#define LPC_HICR0 0x000
49#define LPC_HICR0_LPC3E BIT(7)
50#define LPC_HICR0_LPC2E BIT(6)
51#define LPC_HICR0_LPC1E BIT(5)
52#define LPC_HICR2 0x008
53#define LPC_HICR2_IBFIE3 BIT(3)
54#define LPC_HICR2_IBFIE2 BIT(2)
55#define LPC_HICR2_IBFIE1 BIT(1)
56#define LPC_HICR4 0x010
57#define LPC_HICR4_LADR12AS BIT(7)
58#define LPC_HICR4_KCSENBL BIT(2)
59#define LPC_SIRQCR0 0x070
60/* IRQ{12,1}E1 are deprecated as of AST2600 A3 but necessary for prior chips */
61#define LPC_SIRQCR0_IRQ12E1 BIT(1)
62#define LPC_SIRQCR0_IRQ1E1 BIT(0)
63#define LPC_HICR5 0x080
64#define LPC_HICR5_ID3IRQX_MASK GENMASK(23, 20)
65#define LPC_HICR5_ID3IRQX_SHIFT 20
66#define LPC_HICR5_ID2IRQX_MASK GENMASK(19, 16)
67#define LPC_HICR5_ID2IRQX_SHIFT 16
68#define LPC_HICR5_SEL3IRQX BIT(15)
69#define LPC_HICR5_IRQXE3 BIT(14)
70#define LPC_HICR5_SEL2IRQX BIT(13)
71#define LPC_HICR5_IRQXE2 BIT(12)
72#define LPC_LADR3H 0x014
73#define LPC_LADR3L 0x018
74#define LPC_LADR12H 0x01C
75#define LPC_LADR12L 0x020
76#define LPC_IDR1 0x024
77#define LPC_IDR2 0x028
78#define LPC_IDR3 0x02C
79#define LPC_ODR1 0x030
80#define LPC_ODR2 0x034
81#define LPC_ODR3 0x038
82#define LPC_STR1 0x03C
83#define LPC_STR2 0x040
84#define LPC_STR3 0x044
85#define LPC_HICRB 0x100
86#define LPC_HICRB_EN16LADR2 BIT(5)
87#define LPC_HICRB_EN16LADR1 BIT(4)
88#define LPC_HICRB_IBFIE4 BIT(1)
89#define LPC_HICRB_LPC4E BIT(0)
90#define LPC_HICRC 0x104
91#define LPC_HICRC_ID4IRQX_MASK GENMASK(7, 4)
92#define LPC_HICRC_ID4IRQX_SHIFT 4
93#define LPC_HICRC_TY4IRQX_MASK GENMASK(3, 2)
94#define LPC_HICRC_TY4IRQX_SHIFT 2
95#define LPC_HICRC_OBF4_AUTO_CLR BIT(1)
96#define LPC_HICRC_IRQXE4 BIT(0)
97#define LPC_LADR4 0x110
98#define LPC_IDR4 0x114
99#define LPC_ODR4 0x118
100#define LPC_STR4 0x11C
101#define LPC_LSADR12 0x120
102#define LPC_LSADR12_LSADR2_MASK GENMASK(31, 16)
103#define LPC_LSADR12_LSADR2_SHIFT 16
104#define LPC_LSADR12_LSADR1_MASK GENMASK(15, 0)
105#define LPC_LSADR12_LSADR1_SHIFT 0
106
107#define OBE_POLL_PERIOD (HZ / 2)
108
109enum aspeed_kcs_irq_mode {
110 aspeed_kcs_irq_none,
111 aspeed_kcs_irq_serirq,
112};
113
114struct aspeed_kcs_bmc {
115 struct kcs_bmc_device kcs_bmc;
116
117 struct regmap *map;
118
119 struct {
120 enum aspeed_kcs_irq_mode mode;
121 int id;
122 } upstream_irq;
123
124 struct {
125 spinlock_t lock;
126 bool remove;
127 struct timer_list timer;
128 } obe;
129};
130
131static inline struct aspeed_kcs_bmc *to_aspeed_kcs_bmc(struct kcs_bmc_device *kcs_bmc)
132{
133 return container_of(kcs_bmc, struct aspeed_kcs_bmc, kcs_bmc);
134}
135
136static u8 aspeed_kcs_inb(struct kcs_bmc_device *kcs_bmc, u32 reg)
137{
138 struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
139 u32 val = 0;
140 int rc;
141
142 rc = regmap_read(priv->map, reg, &val);
143 WARN(rc != 0, "regmap_read() failed: %d\n", rc);
144
145 return rc == 0 ? (u8) val : 0;
146}
147
148static void aspeed_kcs_outb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 data)
149{
150 struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
151 int rc;
152
153 rc = regmap_write(priv->map, reg, data);
154 WARN(rc != 0, "regmap_write() failed: %d\n", rc);
155
156 /* Trigger the upstream IRQ on ODR writes, if enabled */
157
158 switch (reg) {
159 case LPC_ODR1:
160 case LPC_ODR2:
161 case LPC_ODR3:
162 case LPC_ODR4:
163 break;
164 default:
165 return;
166 }
167
168 if (priv->upstream_irq.mode != aspeed_kcs_irq_serirq)
169 return;
170
171 switch (kcs_bmc->channel) {
172 case 1:
173 switch (priv->upstream_irq.id) {
174 case 12:
175 regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ12E1,
176 LPC_SIRQCR0_IRQ12E1);
177 break;
178 case 1:
179 regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ1E1,
180 LPC_SIRQCR0_IRQ1E1);
181 break;
182 default:
183 break;
184 }
185 break;
186 case 2:
187 regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE2, LPC_HICR5_IRQXE2);
188 break;
189 case 3:
190 regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE3, LPC_HICR5_IRQXE3);
191 break;
192 case 4:
193 regmap_update_bits(priv->map, LPC_HICRC, LPC_HICRC_IRQXE4, LPC_HICRC_IRQXE4);
194 break;
195 default:
196 break;
197 }
198}
199
200static void aspeed_kcs_updateb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask, u8 val)
201{
202 struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
203 int rc;
204
205 rc = regmap_update_bits(priv->map, reg, mask, val);
206 WARN(rc != 0, "regmap_update_bits() failed: %d\n", rc);
207}
208
209/*
210 * We note D for Data, and C for Cmd/Status, default rules are
211 *
212 * 1. Only the D address is given:
213 * A. KCS1/KCS2 (D/C: X/X+4)
214 * D/C: CA0h/CA4h
215 * D/C: CA8h/CACh
216 * B. KCS3 (D/C: XX2/XX3h)
217 * D/C: CA2h/CA3h
218 * C. KCS4 (D/C: X/X+1)
219 * D/C: CA4h/CA5h
220 *
221 * 2. Both the D/C addresses are given:
222 * A. KCS1/KCS2/KCS4 (D/C: X/Y)
223 * D/C: CA0h/CA1h
224 * D/C: CA8h/CA9h
225 * D/C: CA4h/CA5h
226 * B. KCS3 (D/C: XX2/XX3h)
227 * D/C: CA2h/CA3h
228 */
229static int aspeed_kcs_set_address(struct kcs_bmc_device *kcs_bmc, u32 addrs[2], int nr_addrs)
230{
231 struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
232
233 if (WARN_ON(nr_addrs < 1 || nr_addrs > 2))
234 return -EINVAL;
235
236 switch (priv->kcs_bmc.channel) {
237 case 1:
238 regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, 0);
239 regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8);
240 regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF);
241 if (nr_addrs == 2) {
242 regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR1_MASK,
243 addrs[1] << LPC_LSADR12_LSADR1_SHIFT);
244
245 regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR1,
246 LPC_HICRB_EN16LADR1);
247 }
248 break;
249
250 case 2:
251 regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, LPC_HICR4_LADR12AS);
252 regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8);
253 regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF);
254 if (nr_addrs == 2) {
255 regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR2_MASK,
256 addrs[1] << LPC_LSADR12_LSADR2_SHIFT);
257
258 regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR2,
259 LPC_HICRB_EN16LADR2);
260 }
261 break;
262
263 case 3:
264 if (nr_addrs == 2) {
265 dev_err(priv->kcs_bmc.dev,
266 "Channel 3 only supports inferred status IO address\n");
267 return -EINVAL;
268 }
269
270 regmap_write(priv->map, LPC_LADR3H, addrs[0] >> 8);
271 regmap_write(priv->map, LPC_LADR3L, addrs[0] & 0xFF);
272 break;
273
274 case 4:
275 if (nr_addrs == 1)
276 regmap_write(priv->map, LPC_LADR4, ((addrs[0] + 1) << 16) | addrs[0]);
277 else
278 regmap_write(priv->map, LPC_LADR4, (addrs[1] << 16) | addrs[0]);
279
280 break;
281
282 default:
283 return -EINVAL;
284 }
285
286 return 0;
287}
288
289static inline int aspeed_kcs_map_serirq_type(u32 dt_type)
290{
291 switch (dt_type) {
292 case IRQ_TYPE_EDGE_RISING:
293 return LPC_TYIRQX_RISING;
294 case IRQ_TYPE_LEVEL_HIGH:
295 return LPC_TYIRQX_HIGH;
296 case IRQ_TYPE_LEVEL_LOW:
297 return LPC_TYIRQX_LOW;
298 default:
299 return -EINVAL;
300 }
301}
302
303static int aspeed_kcs_config_upstream_irq(struct aspeed_kcs_bmc *priv, u32 id, u32 dt_type)
304{
305 unsigned int mask, val, hw_type;
306 int ret;
307
308 if (id > 15)
309 return -EINVAL;
310
311 ret = aspeed_kcs_map_serirq_type(dt_type);
312 if (ret < 0)
313 return ret;
314 hw_type = ret;
315
316 priv->upstream_irq.mode = aspeed_kcs_irq_serirq;
317 priv->upstream_irq.id = id;
318
319 switch (priv->kcs_bmc.channel) {
320 case 1:
321 /* Needs IRQxE1 rather than (ID1IRQX, SEL1IRQX, IRQXE1) before AST2600 A3 */
322 break;
323 case 2:
324 if (!(hw_type == LPC_TYIRQX_LOW || hw_type == LPC_TYIRQX_HIGH))
325 return -EINVAL;
326
327 mask = LPC_HICR5_SEL2IRQX | LPC_HICR5_ID2IRQX_MASK;
328 val = (id << LPC_HICR5_ID2IRQX_SHIFT);
329 val |= (hw_type == LPC_TYIRQX_HIGH) ? LPC_HICR5_SEL2IRQX : 0;
330 regmap_update_bits(priv->map, LPC_HICR5, mask, val);
331
332 break;
333 case 3:
334 if (!(hw_type == LPC_TYIRQX_LOW || hw_type == LPC_TYIRQX_HIGH))
335 return -EINVAL;
336
337 mask = LPC_HICR5_SEL3IRQX | LPC_HICR5_ID3IRQX_MASK;
338 val = (id << LPC_HICR5_ID3IRQX_SHIFT);
339 val |= (hw_type == LPC_TYIRQX_HIGH) ? LPC_HICR5_SEL3IRQX : 0;
340 regmap_update_bits(priv->map, LPC_HICR5, mask, val);
341
342 break;
343 case 4:
344 mask = LPC_HICRC_ID4IRQX_MASK | LPC_HICRC_TY4IRQX_MASK | LPC_HICRC_OBF4_AUTO_CLR;
345 val = (id << LPC_HICRC_ID4IRQX_SHIFT) | (hw_type << LPC_HICRC_TY4IRQX_SHIFT);
346 regmap_update_bits(priv->map, LPC_HICRC, mask, val);
347 break;
348 default:
349 dev_warn(priv->kcs_bmc.dev,
350 "SerIRQ configuration not supported on KCS channel %d\n",
351 priv->kcs_bmc.channel);
352 return -EINVAL;
353 }
354
355 return 0;
356}
357
358static void aspeed_kcs_enable_channel(struct kcs_bmc_device *kcs_bmc, bool enable)
359{
360 struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
361
362 switch (kcs_bmc->channel) {
363 case 1:
364 regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC1E, enable * LPC_HICR0_LPC1E);
365 return;
366 case 2:
367 regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC2E, enable * LPC_HICR0_LPC2E);
368 return;
369 case 3:
370 regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC3E, enable * LPC_HICR0_LPC3E);
371 regmap_update_bits(priv->map, LPC_HICR4,
372 LPC_HICR4_KCSENBL, enable * LPC_HICR4_KCSENBL);
373 return;
374 case 4:
375 regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_LPC4E, enable * LPC_HICRB_LPC4E);
376 return;
377 default:
378 pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel);
379 return;
380 }
381}
382
383static void aspeed_kcs_check_obe(struct timer_list *timer)
384{
385 struct aspeed_kcs_bmc *priv = container_of(timer, struct aspeed_kcs_bmc, obe.timer);
386 unsigned long flags;
387 u8 str;
388
389 spin_lock_irqsave(&priv->obe.lock, flags);
390 if (priv->obe.remove) {
391 spin_unlock_irqrestore(&priv->obe.lock, flags);
392 return;
393 }
394
395 str = aspeed_kcs_inb(&priv->kcs_bmc, priv->kcs_bmc.ioreg.str);
396 if (str & KCS_BMC_STR_OBF) {
397 mod_timer(timer, jiffies + OBE_POLL_PERIOD);
398 spin_unlock_irqrestore(&priv->obe.lock, flags);
399 return;
400 }
401 spin_unlock_irqrestore(&priv->obe.lock, flags);
402
403 kcs_bmc_handle_event(&priv->kcs_bmc);
404}
405
406static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state)
407{
408 struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
409 int rc;
410 u8 str;
411
412 /* We don't have an OBE IRQ, emulate it */
413 if (mask & KCS_BMC_EVENT_TYPE_OBE) {
414 if (KCS_BMC_EVENT_TYPE_OBE & state) {
415 /*
416 * Given we don't have an OBE IRQ, delay by polling briefly to see if we can
417 * observe such an event before returning to the caller. This is not
418 * incorrect because OBF may have already become clear before enabling the
419 * IRQ if we had one, under which circumstance no event will be propagated
420 * anyway.
421 *
422 * The onus is on the client to perform a race-free check that it hasn't
423 * missed the event.
424 */
425 rc = read_poll_timeout_atomic(aspeed_kcs_inb, str,
426 !(str & KCS_BMC_STR_OBF), 1, 100, false,
427 &priv->kcs_bmc, priv->kcs_bmc.ioreg.str);
428 /* Time for the slow path? */
429 if (rc == -ETIMEDOUT)
430 mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD);
431 } else {
432 del_timer(&priv->obe.timer);
433 }
434 }
435
436 if (mask & KCS_BMC_EVENT_TYPE_IBF) {
437 const bool enable = !!(state & KCS_BMC_EVENT_TYPE_IBF);
438
439 switch (kcs_bmc->channel) {
440 case 1:
441 regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE1,
442 enable * LPC_HICR2_IBFIE1);
443 return;
444 case 2:
445 regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE2,
446 enable * LPC_HICR2_IBFIE2);
447 return;
448 case 3:
449 regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE3,
450 enable * LPC_HICR2_IBFIE3);
451 return;
452 case 4:
453 regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_IBFIE4,
454 enable * LPC_HICRB_IBFIE4);
455 return;
456 default:
457 pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel);
458 return;
459 }
460 }
461}
462
463static const struct kcs_bmc_device_ops aspeed_kcs_ops = {
464 .irq_mask_update = aspeed_kcs_irq_mask_update,
465 .io_inputb = aspeed_kcs_inb,
466 .io_outputb = aspeed_kcs_outb,
467 .io_updateb = aspeed_kcs_updateb,
468};
469
470static irqreturn_t aspeed_kcs_irq(int irq, void *arg)
471{
472 struct kcs_bmc_device *kcs_bmc = arg;
473
474 return kcs_bmc_handle_event(kcs_bmc);
475}
476
477static int aspeed_kcs_config_downstream_irq(struct kcs_bmc_device *kcs_bmc,
478 struct platform_device *pdev)
479{
480 struct device *dev = &pdev->dev;
481 int irq;
482
483 irq = platform_get_irq(pdev, 0);
484 if (irq < 0)
485 return irq;
486
487 return devm_request_irq(dev, irq, aspeed_kcs_irq, IRQF_SHARED,
488 dev_name(dev), kcs_bmc);
489}
490
491static const struct kcs_ioreg ast_kcs_bmc_ioregs[KCS_CHANNEL_MAX] = {
492 { .idr = LPC_IDR1, .odr = LPC_ODR1, .str = LPC_STR1 },
493 { .idr = LPC_IDR2, .odr = LPC_ODR2, .str = LPC_STR2 },
494 { .idr = LPC_IDR3, .odr = LPC_ODR3, .str = LPC_STR3 },
495 { .idr = LPC_IDR4, .odr = LPC_ODR4, .str = LPC_STR4 },
496};
497
498static int aspeed_kcs_of_get_channel(struct platform_device *pdev)
499{
500 struct device_node *np;
501 struct kcs_ioreg ioreg;
502 const __be32 *reg;
503 int i;
504
505 np = pdev->dev.of_node;
506
507 /* Don't translate addresses, we want offsets for the regmaps */
508 reg = of_get_address(np, 0, NULL, NULL);
509 if (!reg)
510 return -EINVAL;
511 ioreg.idr = be32_to_cpup(reg);
512
513 reg = of_get_address(np, 1, NULL, NULL);
514 if (!reg)
515 return -EINVAL;
516 ioreg.odr = be32_to_cpup(reg);
517
518 reg = of_get_address(np, 2, NULL, NULL);
519 if (!reg)
520 return -EINVAL;
521 ioreg.str = be32_to_cpup(reg);
522
523 for (i = 0; i < ARRAY_SIZE(ast_kcs_bmc_ioregs); i++) {
524 if (!memcmp(&ast_kcs_bmc_ioregs[i], &ioreg, sizeof(ioreg)))
525 return i + 1;
526 }
527 return -EINVAL;
528}
529
530static int
531aspeed_kcs_of_get_io_address(struct platform_device *pdev, u32 addrs[2])
532{
533 int rc;
534
535 rc = of_property_read_variable_u32_array(pdev->dev.of_node,
536 "aspeed,lpc-io-reg",
537 addrs, 1, 2);
538 if (rc < 0) {
539 dev_err(&pdev->dev, "No valid 'aspeed,lpc-io-reg' configured\n");
540 return rc;
541 }
542
543 if (addrs[0] > 0xffff) {
544 dev_err(&pdev->dev, "Invalid data address in 'aspeed,lpc-io-reg'\n");
545 return -EINVAL;
546 }
547
548 if (rc == 2 && addrs[1] > 0xffff) {
549 dev_err(&pdev->dev, "Invalid status address in 'aspeed,lpc-io-reg'\n");
550 return -EINVAL;
551 }
552
553 return rc;
554}
555
556static int aspeed_kcs_probe(struct platform_device *pdev)
557{
558 struct kcs_bmc_device *kcs_bmc;
559 struct aspeed_kcs_bmc *priv;
560 struct device_node *np;
561 bool have_upstream_irq;
562 u32 upstream_irq[2];
563 int rc, channel;
564 int nr_addrs;
565 u32 addrs[2];
566
567 np = pdev->dev.of_node->parent;
568 if (!of_device_is_compatible(np, "aspeed,ast2400-lpc-v2") &&
569 !of_device_is_compatible(np, "aspeed,ast2500-lpc-v2") &&
570 !of_device_is_compatible(np, "aspeed,ast2600-lpc-v2")) {
571 dev_err(&pdev->dev, "unsupported LPC device binding\n");
572 return -ENODEV;
573 }
574
575 channel = aspeed_kcs_of_get_channel(pdev);
576 if (channel < 0)
577 return channel;
578
579 nr_addrs = aspeed_kcs_of_get_io_address(pdev, addrs);
580 if (nr_addrs < 0)
581 return nr_addrs;
582
583 np = pdev->dev.of_node;
584 rc = of_property_read_u32_array(np, "aspeed,lpc-interrupts", upstream_irq, 2);
585 if (rc && rc != -EINVAL)
586 return -EINVAL;
587
588 have_upstream_irq = !rc;
589
590 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
591 if (!priv)
592 return -ENOMEM;
593
594 kcs_bmc = &priv->kcs_bmc;
595 kcs_bmc->dev = &pdev->dev;
596 kcs_bmc->channel = channel;
597 kcs_bmc->ioreg = ast_kcs_bmc_ioregs[channel - 1];
598 kcs_bmc->ops = &aspeed_kcs_ops;
599
600 priv->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
601 if (IS_ERR(priv->map)) {
602 dev_err(&pdev->dev, "Couldn't get regmap\n");
603 return -ENODEV;
604 }
605
606 spin_lock_init(&priv->obe.lock);
607 priv->obe.remove = false;
608 timer_setup(&priv->obe.timer, aspeed_kcs_check_obe, 0);
609
610 rc = aspeed_kcs_set_address(kcs_bmc, addrs, nr_addrs);
611 if (rc)
612 return rc;
613
614 /* Host to BMC IRQ */
615 rc = aspeed_kcs_config_downstream_irq(kcs_bmc, pdev);
616 if (rc)
617 return rc;
618
619 /* BMC to Host IRQ */
620 if (have_upstream_irq) {
621 rc = aspeed_kcs_config_upstream_irq(priv, upstream_irq[0], upstream_irq[1]);
622 if (rc < 0)
623 return rc;
624 } else {
625 priv->upstream_irq.mode = aspeed_kcs_irq_none;
626 }
627
628 platform_set_drvdata(pdev, priv);
629
630 aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
631 aspeed_kcs_enable_channel(kcs_bmc, true);
632
633 rc = kcs_bmc_add_device(&priv->kcs_bmc);
634 if (rc) {
635 dev_warn(&pdev->dev, "Failed to register channel %d: %d\n", kcs_bmc->channel, rc);
636 return rc;
637 }
638
639 dev_info(&pdev->dev, "Initialised channel %d at 0x%x\n",
640 kcs_bmc->channel, addrs[0]);
641
642 return 0;
643}
644
645static int aspeed_kcs_remove(struct platform_device *pdev)
646{
647 struct aspeed_kcs_bmc *priv = platform_get_drvdata(pdev);
648 struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc;
649
650 kcs_bmc_remove_device(kcs_bmc);
651
652 aspeed_kcs_enable_channel(kcs_bmc, false);
653 aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
654
655 /* Make sure it's proper dead */
656 spin_lock_irq(&priv->obe.lock);
657 priv->obe.remove = true;
658 spin_unlock_irq(&priv->obe.lock);
659 del_timer_sync(&priv->obe.timer);
660
661 return 0;
662}
663
664static const struct of_device_id ast_kcs_bmc_match[] = {
665 { .compatible = "aspeed,ast2400-kcs-bmc-v2" },
666 { .compatible = "aspeed,ast2500-kcs-bmc-v2" },
667 { .compatible = "aspeed,ast2600-kcs-bmc" },
668 { }
669};
670MODULE_DEVICE_TABLE(of, ast_kcs_bmc_match);
671
672static struct platform_driver ast_kcs_bmc_driver = {
673 .driver = {
674 .name = DEVICE_NAME,
675 .of_match_table = ast_kcs_bmc_match,
676 },
677 .probe = aspeed_kcs_probe,
678 .remove = aspeed_kcs_remove,
679};
680module_platform_driver(ast_kcs_bmc_driver);
681
682MODULE_LICENSE("GPL v2");
683MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
684MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
685MODULE_DESCRIPTION("Aspeed device interface to the KCS BMC device");