Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Blackfin On-Chip CAN Driver
3 *
4 * Copyright 2004-2009 Analog Devices Inc.
5 *
6 * Enter bugs at http://blackfin.uclinux.org/
7 *
8 * Licensed under the GPL-2 or later.
9 */
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/bitops.h>
14#include <linux/interrupt.h>
15#include <linux/errno.h>
16#include <linux/netdevice.h>
17#include <linux/skbuff.h>
18#include <linux/platform_device.h>
19
20#include <linux/can/dev.h>
21#include <linux/can/error.h>
22
23#include <asm/bfin_can.h>
24#include <asm/portmux.h>
25
26#define DRV_NAME "bfin_can"
27#define BFIN_CAN_TIMEOUT 100
28#define TX_ECHO_SKB_MAX 1
29
30/*
31 * bfin can private data
32 */
33struct bfin_can_priv {
34 struct can_priv can; /* must be the first member */
35 struct net_device *dev;
36 void __iomem *membase;
37 int rx_irq;
38 int tx_irq;
39 int err_irq;
40 unsigned short *pin_list;
41};
42
43/*
44 * bfin can timing parameters
45 */
46static const struct can_bittiming_const bfin_can_bittiming_const = {
47 .name = DRV_NAME,
48 .tseg1_min = 1,
49 .tseg1_max = 16,
50 .tseg2_min = 1,
51 .tseg2_max = 8,
52 .sjw_max = 4,
53 /*
54 * Although the BRP field can be set to any value, it is recommended
55 * that the value be greater than or equal to 4, as restrictions
56 * apply to the bit timing configuration when BRP is less than 4.
57 */
58 .brp_min = 4,
59 .brp_max = 1024,
60 .brp_inc = 1,
61};
62
63static int bfin_can_set_bittiming(struct net_device *dev)
64{
65 struct bfin_can_priv *priv = netdev_priv(dev);
66 struct bfin_can_regs __iomem *reg = priv->membase;
67 struct can_bittiming *bt = &priv->can.bittiming;
68 u16 clk, timing;
69
70 clk = bt->brp - 1;
71 timing = ((bt->sjw - 1) << 8) | (bt->prop_seg + bt->phase_seg1 - 1) |
72 ((bt->phase_seg2 - 1) << 4);
73
74 /*
75 * If the SAM bit is set, the input signal is oversampled three times
76 * at the SCLK rate.
77 */
78 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
79 timing |= SAM;
80
81 bfin_write(®->clock, clk);
82 bfin_write(®->timing, timing);
83
84 netdev_info(dev, "setting CLOCK=0x%04x TIMING=0x%04x\n", clk, timing);
85
86 return 0;
87}
88
89static void bfin_can_set_reset_mode(struct net_device *dev)
90{
91 struct bfin_can_priv *priv = netdev_priv(dev);
92 struct bfin_can_regs __iomem *reg = priv->membase;
93 int timeout = BFIN_CAN_TIMEOUT;
94 int i;
95
96 /* disable interrupts */
97 bfin_write(®->mbim1, 0);
98 bfin_write(®->mbim2, 0);
99 bfin_write(®->gim, 0);
100
101 /* reset can and enter configuration mode */
102 bfin_write(®->control, SRS | CCR);
103 SSYNC();
104 bfin_write(®->control, CCR);
105 SSYNC();
106 while (!(bfin_read(®->control) & CCA)) {
107 udelay(10);
108 if (--timeout == 0) {
109 netdev_err(dev, "fail to enter configuration mode\n");
110 BUG();
111 }
112 }
113
114 /*
115 * All mailbox configurations are marked as inactive
116 * by writing to CAN Mailbox Configuration Registers 1 and 2
117 * For all bits: 0 - Mailbox disabled, 1 - Mailbox enabled
118 */
119 bfin_write(®->mc1, 0);
120 bfin_write(®->mc2, 0);
121
122 /* Set Mailbox Direction */
123 bfin_write(®->md1, 0xFFFF); /* mailbox 1-16 are RX */
124 bfin_write(®->md2, 0); /* mailbox 17-32 are TX */
125
126 /* RECEIVE_STD_CHL */
127 for (i = 0; i < 2; i++) {
128 bfin_write(®->chl[RECEIVE_STD_CHL + i].id0, 0);
129 bfin_write(®->chl[RECEIVE_STD_CHL + i].id1, AME);
130 bfin_write(®->chl[RECEIVE_STD_CHL + i].dlc, 0);
131 bfin_write(®->msk[RECEIVE_STD_CHL + i].amh, 0x1FFF);
132 bfin_write(®->msk[RECEIVE_STD_CHL + i].aml, 0xFFFF);
133 }
134
135 /* RECEIVE_EXT_CHL */
136 for (i = 0; i < 2; i++) {
137 bfin_write(®->chl[RECEIVE_EXT_CHL + i].id0, 0);
138 bfin_write(®->chl[RECEIVE_EXT_CHL + i].id1, AME | IDE);
139 bfin_write(®->chl[RECEIVE_EXT_CHL + i].dlc, 0);
140 bfin_write(®->msk[RECEIVE_EXT_CHL + i].amh, 0x1FFF);
141 bfin_write(®->msk[RECEIVE_EXT_CHL + i].aml, 0xFFFF);
142 }
143
144 bfin_write(®->mc2, BIT(TRANSMIT_CHL - 16));
145 bfin_write(®->mc1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
146 SSYNC();
147
148 priv->can.state = CAN_STATE_STOPPED;
149}
150
151static void bfin_can_set_normal_mode(struct net_device *dev)
152{
153 struct bfin_can_priv *priv = netdev_priv(dev);
154 struct bfin_can_regs __iomem *reg = priv->membase;
155 int timeout = BFIN_CAN_TIMEOUT;
156
157 /*
158 * leave configuration mode
159 */
160 bfin_write(®->control, bfin_read(®->control) & ~CCR);
161
162 while (bfin_read(®->status) & CCA) {
163 udelay(10);
164 if (--timeout == 0) {
165 netdev_err(dev, "fail to leave configuration mode\n");
166 BUG();
167 }
168 }
169
170 /*
171 * clear _All_ tx and rx interrupts
172 */
173 bfin_write(®->mbtif1, 0xFFFF);
174 bfin_write(®->mbtif2, 0xFFFF);
175 bfin_write(®->mbrif1, 0xFFFF);
176 bfin_write(®->mbrif2, 0xFFFF);
177
178 /*
179 * clear global interrupt status register
180 */
181 bfin_write(®->gis, 0x7FF); /* overwrites with '1' */
182
183 /*
184 * Initialize Interrupts
185 * - set bits in the mailbox interrupt mask register
186 * - global interrupt mask
187 */
188 bfin_write(®->mbim1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
189 bfin_write(®->mbim2, BIT(TRANSMIT_CHL - 16));
190
191 bfin_write(®->gim, EPIM | BOIM | RMLIM);
192 SSYNC();
193}
194
195static void bfin_can_start(struct net_device *dev)
196{
197 struct bfin_can_priv *priv = netdev_priv(dev);
198
199 /* enter reset mode */
200 if (priv->can.state != CAN_STATE_STOPPED)
201 bfin_can_set_reset_mode(dev);
202
203 /* leave reset mode */
204 bfin_can_set_normal_mode(dev);
205}
206
207static int bfin_can_set_mode(struct net_device *dev, enum can_mode mode)
208{
209 switch (mode) {
210 case CAN_MODE_START:
211 bfin_can_start(dev);
212 if (netif_queue_stopped(dev))
213 netif_wake_queue(dev);
214 break;
215
216 default:
217 return -EOPNOTSUPP;
218 }
219
220 return 0;
221}
222
223static int bfin_can_get_berr_counter(const struct net_device *dev,
224 struct can_berr_counter *bec)
225{
226 struct bfin_can_priv *priv = netdev_priv(dev);
227 struct bfin_can_regs __iomem *reg = priv->membase;
228
229 u16 cec = bfin_read(®->cec);
230
231 bec->txerr = cec >> 8;
232 bec->rxerr = cec;
233
234 return 0;
235}
236
237static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
238{
239 struct bfin_can_priv *priv = netdev_priv(dev);
240 struct bfin_can_regs __iomem *reg = priv->membase;
241 struct can_frame *cf = (struct can_frame *)skb->data;
242 u8 dlc = cf->can_dlc;
243 canid_t id = cf->can_id;
244 u8 *data = cf->data;
245 u16 val;
246 int i;
247
248 if (can_dropped_invalid_skb(dev, skb))
249 return NETDEV_TX_OK;
250
251 netif_stop_queue(dev);
252
253 /* fill id */
254 if (id & CAN_EFF_FLAG) {
255 bfin_write(®->chl[TRANSMIT_CHL].id0, id);
256 val = ((id & 0x1FFF0000) >> 16) | IDE;
257 } else
258 val = (id << 2);
259 if (id & CAN_RTR_FLAG)
260 val |= RTR;
261 bfin_write(®->chl[TRANSMIT_CHL].id1, val | AME);
262
263 /* fill payload */
264 for (i = 0; i < 8; i += 2) {
265 val = ((7 - i) < dlc ? (data[7 - i]) : 0) +
266 ((6 - i) < dlc ? (data[6 - i] << 8) : 0);
267 bfin_write(®->chl[TRANSMIT_CHL].data[i], val);
268 }
269
270 /* fill data length code */
271 bfin_write(®->chl[TRANSMIT_CHL].dlc, dlc);
272
273 can_put_echo_skb(skb, dev, 0);
274
275 /* set transmit request */
276 bfin_write(®->trs2, BIT(TRANSMIT_CHL - 16));
277
278 return 0;
279}
280
281static void bfin_can_rx(struct net_device *dev, u16 isrc)
282{
283 struct bfin_can_priv *priv = netdev_priv(dev);
284 struct net_device_stats *stats = &dev->stats;
285 struct bfin_can_regs __iomem *reg = priv->membase;
286 struct can_frame *cf;
287 struct sk_buff *skb;
288 int obj;
289 int i;
290 u16 val;
291
292 skb = alloc_can_skb(dev, &cf);
293 if (skb == NULL)
294 return;
295
296 /* get id */
297 if (isrc & BIT(RECEIVE_EXT_CHL)) {
298 /* extended frame format (EFF) */
299 cf->can_id = ((bfin_read(®->chl[RECEIVE_EXT_CHL].id1)
300 & 0x1FFF) << 16)
301 + bfin_read(®->chl[RECEIVE_EXT_CHL].id0);
302 cf->can_id |= CAN_EFF_FLAG;
303 obj = RECEIVE_EXT_CHL;
304 } else {
305 /* standard frame format (SFF) */
306 cf->can_id = (bfin_read(®->chl[RECEIVE_STD_CHL].id1)
307 & 0x1ffc) >> 2;
308 obj = RECEIVE_STD_CHL;
309 }
310 if (bfin_read(®->chl[obj].id1) & RTR)
311 cf->can_id |= CAN_RTR_FLAG;
312
313 /* get data length code */
314 cf->can_dlc = get_can_dlc(bfin_read(®->chl[obj].dlc) & 0xF);
315
316 /* get payload */
317 for (i = 0; i < 8; i += 2) {
318 val = bfin_read(®->chl[obj].data[i]);
319 cf->data[7 - i] = (7 - i) < cf->can_dlc ? val : 0;
320 cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
321 }
322
323 netif_rx(skb);
324
325 stats->rx_packets++;
326 stats->rx_bytes += cf->can_dlc;
327}
328
329static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
330{
331 struct bfin_can_priv *priv = netdev_priv(dev);
332 struct bfin_can_regs __iomem *reg = priv->membase;
333 struct net_device_stats *stats = &dev->stats;
334 struct can_frame *cf;
335 struct sk_buff *skb;
336 enum can_state state = priv->can.state;
337
338 skb = alloc_can_err_skb(dev, &cf);
339 if (skb == NULL)
340 return -ENOMEM;
341
342 if (isrc & RMLIS) {
343 /* data overrun interrupt */
344 netdev_dbg(dev, "data overrun interrupt\n");
345 cf->can_id |= CAN_ERR_CRTL;
346 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
347 stats->rx_over_errors++;
348 stats->rx_errors++;
349 }
350
351 if (isrc & BOIS) {
352 netdev_dbg(dev, "bus-off mode interrupt\n");
353 state = CAN_STATE_BUS_OFF;
354 cf->can_id |= CAN_ERR_BUSOFF;
355 priv->can.can_stats.bus_off++;
356 can_bus_off(dev);
357 }
358
359 if (isrc & EPIS) {
360 /* error passive interrupt */
361 netdev_dbg(dev, "error passive interrupt\n");
362 state = CAN_STATE_ERROR_PASSIVE;
363 }
364
365 if ((isrc & EWTIS) || (isrc & EWRIS)) {
366 netdev_dbg(dev, "Error Warning Transmit/Receive Interrupt\n");
367 state = CAN_STATE_ERROR_WARNING;
368 }
369
370 if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING ||
371 state == CAN_STATE_ERROR_PASSIVE)) {
372 u16 cec = bfin_read(®->cec);
373 u8 rxerr = cec;
374 u8 txerr = cec >> 8;
375
376 cf->can_id |= CAN_ERR_CRTL;
377 if (state == CAN_STATE_ERROR_WARNING) {
378 priv->can.can_stats.error_warning++;
379 cf->data[1] = (txerr > rxerr) ?
380 CAN_ERR_CRTL_TX_WARNING :
381 CAN_ERR_CRTL_RX_WARNING;
382 } else {
383 priv->can.can_stats.error_passive++;
384 cf->data[1] = (txerr > rxerr) ?
385 CAN_ERR_CRTL_TX_PASSIVE :
386 CAN_ERR_CRTL_RX_PASSIVE;
387 }
388 }
389
390 if (status) {
391 priv->can.can_stats.bus_error++;
392
393 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
394
395 if (status & BEF)
396 cf->data[2] |= CAN_ERR_PROT_BIT;
397 else if (status & FER)
398 cf->data[2] |= CAN_ERR_PROT_FORM;
399 else if (status & SER)
400 cf->data[2] |= CAN_ERR_PROT_STUFF;
401 else
402 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
403 }
404
405 priv->can.state = state;
406
407 netif_rx(skb);
408
409 stats->rx_packets++;
410 stats->rx_bytes += cf->can_dlc;
411
412 return 0;
413}
414
415static irqreturn_t bfin_can_interrupt(int irq, void *dev_id)
416{
417 struct net_device *dev = dev_id;
418 struct bfin_can_priv *priv = netdev_priv(dev);
419 struct bfin_can_regs __iomem *reg = priv->membase;
420 struct net_device_stats *stats = &dev->stats;
421 u16 status, isrc;
422
423 if ((irq == priv->tx_irq) && bfin_read(®->mbtif2)) {
424 /* transmission complete interrupt */
425 bfin_write(®->mbtif2, 0xFFFF);
426 stats->tx_packets++;
427 stats->tx_bytes += bfin_read(®->chl[TRANSMIT_CHL].dlc);
428 can_get_echo_skb(dev, 0);
429 netif_wake_queue(dev);
430 } else if ((irq == priv->rx_irq) && bfin_read(®->mbrif1)) {
431 /* receive interrupt */
432 isrc = bfin_read(®->mbrif1);
433 bfin_write(®->mbrif1, 0xFFFF);
434 bfin_can_rx(dev, isrc);
435 } else if ((irq == priv->err_irq) && bfin_read(®->gis)) {
436 /* error interrupt */
437 isrc = bfin_read(®->gis);
438 status = bfin_read(®->esr);
439 bfin_write(®->gis, 0x7FF);
440 bfin_can_err(dev, isrc, status);
441 } else {
442 return IRQ_NONE;
443 }
444
445 return IRQ_HANDLED;
446}
447
448static int bfin_can_open(struct net_device *dev)
449{
450 struct bfin_can_priv *priv = netdev_priv(dev);
451 int err;
452
453 /* set chip into reset mode */
454 bfin_can_set_reset_mode(dev);
455
456 /* common open */
457 err = open_candev(dev);
458 if (err)
459 goto exit_open;
460
461 /* register interrupt handler */
462 err = request_irq(priv->rx_irq, &bfin_can_interrupt, 0,
463 "bfin-can-rx", dev);
464 if (err)
465 goto exit_rx_irq;
466 err = request_irq(priv->tx_irq, &bfin_can_interrupt, 0,
467 "bfin-can-tx", dev);
468 if (err)
469 goto exit_tx_irq;
470 err = request_irq(priv->err_irq, &bfin_can_interrupt, 0,
471 "bfin-can-err", dev);
472 if (err)
473 goto exit_err_irq;
474
475 bfin_can_start(dev);
476
477 netif_start_queue(dev);
478
479 return 0;
480
481exit_err_irq:
482 free_irq(priv->tx_irq, dev);
483exit_tx_irq:
484 free_irq(priv->rx_irq, dev);
485exit_rx_irq:
486 close_candev(dev);
487exit_open:
488 return err;
489}
490
491static int bfin_can_close(struct net_device *dev)
492{
493 struct bfin_can_priv *priv = netdev_priv(dev);
494
495 netif_stop_queue(dev);
496 bfin_can_set_reset_mode(dev);
497
498 close_candev(dev);
499
500 free_irq(priv->rx_irq, dev);
501 free_irq(priv->tx_irq, dev);
502 free_irq(priv->err_irq, dev);
503
504 return 0;
505}
506
507static struct net_device *alloc_bfin_candev(void)
508{
509 struct net_device *dev;
510 struct bfin_can_priv *priv;
511
512 dev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX);
513 if (!dev)
514 return NULL;
515
516 priv = netdev_priv(dev);
517
518 priv->dev = dev;
519 priv->can.bittiming_const = &bfin_can_bittiming_const;
520 priv->can.do_set_bittiming = bfin_can_set_bittiming;
521 priv->can.do_set_mode = bfin_can_set_mode;
522 priv->can.do_get_berr_counter = bfin_can_get_berr_counter;
523 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
524
525 return dev;
526}
527
528static const struct net_device_ops bfin_can_netdev_ops = {
529 .ndo_open = bfin_can_open,
530 .ndo_stop = bfin_can_close,
531 .ndo_start_xmit = bfin_can_start_xmit,
532 .ndo_change_mtu = can_change_mtu,
533};
534
535static int bfin_can_probe(struct platform_device *pdev)
536{
537 int err;
538 struct net_device *dev;
539 struct bfin_can_priv *priv;
540 struct resource *res_mem, *rx_irq, *tx_irq, *err_irq;
541 unsigned short *pdata;
542
543 pdata = dev_get_platdata(&pdev->dev);
544 if (!pdata) {
545 dev_err(&pdev->dev, "No platform data provided!\n");
546 err = -EINVAL;
547 goto exit;
548 }
549
550 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
551 rx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
552 tx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
553 err_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
554 if (!res_mem || !rx_irq || !tx_irq || !err_irq) {
555 err = -EINVAL;
556 goto exit;
557 }
558
559 if (!request_mem_region(res_mem->start, resource_size(res_mem),
560 dev_name(&pdev->dev))) {
561 err = -EBUSY;
562 goto exit;
563 }
564
565 /* request peripheral pins */
566 err = peripheral_request_list(pdata, dev_name(&pdev->dev));
567 if (err)
568 goto exit_mem_release;
569
570 dev = alloc_bfin_candev();
571 if (!dev) {
572 err = -ENOMEM;
573 goto exit_peri_pin_free;
574 }
575
576 priv = netdev_priv(dev);
577 priv->membase = (void __iomem *)res_mem->start;
578 priv->rx_irq = rx_irq->start;
579 priv->tx_irq = tx_irq->start;
580 priv->err_irq = err_irq->start;
581 priv->pin_list = pdata;
582 priv->can.clock.freq = get_sclk();
583
584 platform_set_drvdata(pdev, dev);
585 SET_NETDEV_DEV(dev, &pdev->dev);
586
587 dev->flags |= IFF_ECHO; /* we support local echo */
588 dev->netdev_ops = &bfin_can_netdev_ops;
589
590 bfin_can_set_reset_mode(dev);
591
592 err = register_candev(dev);
593 if (err) {
594 dev_err(&pdev->dev, "registering failed (err=%d)\n", err);
595 goto exit_candev_free;
596 }
597
598 dev_info(&pdev->dev,
599 "%s device registered"
600 "(®_base=%p, rx_irq=%d, tx_irq=%d, err_irq=%d, sclk=%d)\n",
601 DRV_NAME, priv->membase, priv->rx_irq,
602 priv->tx_irq, priv->err_irq, priv->can.clock.freq);
603 return 0;
604
605exit_candev_free:
606 free_candev(dev);
607exit_peri_pin_free:
608 peripheral_free_list(pdata);
609exit_mem_release:
610 release_mem_region(res_mem->start, resource_size(res_mem));
611exit:
612 return err;
613}
614
615static int bfin_can_remove(struct platform_device *pdev)
616{
617 struct net_device *dev = platform_get_drvdata(pdev);
618 struct bfin_can_priv *priv = netdev_priv(dev);
619 struct resource *res;
620
621 bfin_can_set_reset_mode(dev);
622
623 unregister_candev(dev);
624
625 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
626 release_mem_region(res->start, resource_size(res));
627
628 peripheral_free_list(priv->pin_list);
629
630 free_candev(dev);
631 return 0;
632}
633
634#ifdef CONFIG_PM
635static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg)
636{
637 struct net_device *dev = platform_get_drvdata(pdev);
638 struct bfin_can_priv *priv = netdev_priv(dev);
639 struct bfin_can_regs __iomem *reg = priv->membase;
640 int timeout = BFIN_CAN_TIMEOUT;
641
642 if (netif_running(dev)) {
643 /* enter sleep mode */
644 bfin_write(®->control, bfin_read(®->control) | SMR);
645 SSYNC();
646 while (!(bfin_read(®->intr) & SMACK)) {
647 udelay(10);
648 if (--timeout == 0) {
649 netdev_err(dev, "fail to enter sleep mode\n");
650 BUG();
651 }
652 }
653 }
654
655 return 0;
656}
657
658static int bfin_can_resume(struct platform_device *pdev)
659{
660 struct net_device *dev = platform_get_drvdata(pdev);
661 struct bfin_can_priv *priv = netdev_priv(dev);
662 struct bfin_can_regs __iomem *reg = priv->membase;
663
664 if (netif_running(dev)) {
665 /* leave sleep mode */
666 bfin_write(®->intr, 0);
667 SSYNC();
668 }
669
670 return 0;
671}
672#else
673#define bfin_can_suspend NULL
674#define bfin_can_resume NULL
675#endif /* CONFIG_PM */
676
677static struct platform_driver bfin_can_driver = {
678 .probe = bfin_can_probe,
679 .remove = bfin_can_remove,
680 .suspend = bfin_can_suspend,
681 .resume = bfin_can_resume,
682 .driver = {
683 .name = DRV_NAME,
684 },
685};
686
687module_platform_driver(bfin_can_driver);
688
689MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
690MODULE_LICENSE("GPL");
691MODULE_DESCRIPTION("Blackfin on-chip CAN netdevice driver");
692MODULE_ALIAS("platform:" DRV_NAME);