Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drivers/net: support hdlc function for QE-UCC

The driver add hdlc support for Freescale QUICC Engine.
It support NMSI and TSA mode.

Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Zhao Qiang and committed by
David S. Miller
c19b6d24 35ef1c20

+1379 -2
+7
MAINTAINERS
··· 4870 4870 X: drivers/net/ethernet/freescale/gianfar_ptp.c 4871 4871 F: Documentation/devicetree/bindings/net/fsl-tsec-phy.txt 4872 4872 4873 + FREESCALE QUICC ENGINE UCC HDLC DRIVER 4874 + M: Zhao Qiang <qiang.zhao@nxp.com> 4875 + L: netdev@vger.kernel.org 4876 + L: linuxppc-dev@lists.ozlabs.org 4877 + S: Maintained 4878 + F: drivers/net/wan/fsl_ucc_hdlc* 4879 + 4873 4880 FREESCALE QUICC ENGINE UCC UART DRIVER 4874 4881 M: Timur Tabi <timur@tabi.org> 4875 4882 L: linuxppc-dev@lists.ozlabs.org
+11
drivers/net/wan/Kconfig
··· 280 280 To compile this driver as a module, choose M here: the 281 281 module will be called dscc4. 282 282 283 + config FSL_UCC_HDLC 284 + tristate "Freescale QUICC Engine HDLC support" 285 + depends on HDLC 286 + depends on QUICC_ENGINE 287 + help 288 + Driver for Freescale QUICC Engine HDLC controller. The driver 289 + supports HDLC in NMSI and TDM mode. 290 + 291 + To compile this driver as a module, choose M here: the 292 + module will be called fsl_ucc_hdlc. 293 + 283 294 config DSCC4_PCISYNC 284 295 bool "Etinc PCISYNC features" 285 296 depends on DSCC4
+1
drivers/net/wan/Makefile
··· 32 32 obj-$(CONFIG_PCI200SYN) += pci200syn.o 33 33 obj-$(CONFIG_PC300TOO) += pc300too.o 34 34 obj-$(CONFIG_IXP4XX_HSS) += ixp4xx_hss.o 35 + obj-$(CONFIG_FSL_UCC_HDLC) += fsl_ucc_hdlc.o 35 36 36 37 clean-files := wanxlfw.inc 37 38 $(obj)/wanxl.o: $(obj)/wanxlfw.inc
+1192
drivers/net/wan/fsl_ucc_hdlc.c
··· 1 + /* Freescale QUICC Engine HDLC Device Driver 2 + * 3 + * Copyright 2016 Freescale Semiconductor Inc. 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms of the GNU General Public License as published by the 7 + * Free Software Foundation; either version 2 of the License, or (at your 8 + * option) any later version. 9 + */ 10 + 11 + #include <linux/delay.h> 12 + #include <linux/dma-mapping.h> 13 + #include <linux/hdlc.h> 14 + #include <linux/init.h> 15 + #include <linux/interrupt.h> 16 + #include <linux/io.h> 17 + #include <linux/irq.h> 18 + #include <linux/kernel.h> 19 + #include <linux/module.h> 20 + #include <linux/netdevice.h> 21 + #include <linux/of_address.h> 22 + #include <linux/of_irq.h> 23 + #include <linux/of_platform.h> 24 + #include <linux/platform_device.h> 25 + #include <linux/sched.h> 26 + #include <linux/skbuff.h> 27 + #include <linux/slab.h> 28 + #include <linux/spinlock.h> 29 + #include <linux/stddef.h> 30 + #include <soc/fsl/qe/qe_tdm.h> 31 + #include <uapi/linux/if_arp.h> 32 + 33 + #include "fsl_ucc_hdlc.h" 34 + 35 + #define DRV_DESC "Freescale QE UCC HDLC Driver" 36 + #define DRV_NAME "ucc_hdlc" 37 + 38 + #define TDM_PPPOHT_SLIC_MAXIN 39 + #define BROKEN_FRAME_INFO 40 + 41 + static struct ucc_tdm_info utdm_primary_info = { 42 + .uf_info = { 43 + .tsa = 0, 44 + .cdp = 0, 45 + .cds = 1, 46 + .ctsp = 1, 47 + .ctss = 1, 48 + .revd = 0, 49 + .urfs = 256, 50 + .utfs = 256, 51 + .urfet = 128, 52 + .urfset = 192, 53 + .utfet = 128, 54 + .utftt = 0x40, 55 + .ufpt = 256, 56 + .mode = UCC_FAST_PROTOCOL_MODE_HDLC, 57 + .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, 58 + .tenc = UCC_FAST_TX_ENCODING_NRZ, 59 + .renc = UCC_FAST_RX_ENCODING_NRZ, 60 + .tcrc = UCC_FAST_16_BIT_CRC, 61 + .synl = UCC_FAST_SYNC_LEN_NOT_USED, 62 + }, 63 + 64 + .si_info = { 65 + #ifdef TDM_PPPOHT_SLIC_MAXIN 66 + .simr_rfsd = 1, 67 + .simr_tfsd = 2, 68 + #else 69 + .simr_rfsd = 0, 70 + .simr_tfsd = 0, 71 + #endif 72 + .simr_crt = 0, 73 + .simr_sl = 0, 74 + .simr_ce = 1, 75 + .simr_fe = 1, 76 + .simr_gm = 0, 77 + }, 78 + }; 79 + 80 + static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM]; 81 + 82 + static int uhdlc_init(struct ucc_hdlc_private *priv) 83 + { 84 + struct ucc_tdm_info *ut_info; 85 + struct ucc_fast_info *uf_info; 86 + u32 cecr_subblock; 87 + u16 bd_status; 88 + int ret, i; 89 + void *bd_buffer; 90 + dma_addr_t bd_dma_addr; 91 + u32 riptr; 92 + u32 tiptr; 93 + u32 gumr; 94 + 95 + ut_info = priv->ut_info; 96 + uf_info = &ut_info->uf_info; 97 + 98 + if (priv->tsa) { 99 + uf_info->tsa = 1; 100 + uf_info->ctsp = 1; 101 + } 102 + uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF | 103 + UCC_HDLC_UCCE_TXB) << 16); 104 + 105 + ret = ucc_fast_init(uf_info, &priv->uccf); 106 + if (ret) { 107 + dev_err(priv->dev, "Failed to init uccf."); 108 + return ret; 109 + } 110 + 111 + priv->uf_regs = priv->uccf->uf_regs; 112 + ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 113 + 114 + /* Loopback mode */ 115 + if (priv->loopback) { 116 + dev_info(priv->dev, "Loopback Mode\n"); 117 + gumr = ioread32be(&priv->uf_regs->gumr); 118 + gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS | 119 + UCC_FAST_GUMR_TCI); 120 + gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN); 121 + iowrite32be(gumr, &priv->uf_regs->gumr); 122 + } 123 + 124 + /* Initialize SI */ 125 + if (priv->tsa) 126 + ucc_tdm_init(priv->utdm, priv->ut_info); 127 + 128 + /* Write to QE CECR, UCCx channel to Stop Transmission */ 129 + cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); 130 + ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock, 131 + QE_CR_PROTOCOL_UNSPECIFIED, 0); 132 + 133 + /* Set UPSMR normal mode (need fixed)*/ 134 + iowrite32be(0, &priv->uf_regs->upsmr); 135 + 136 + priv->rx_ring_size = RX_BD_RING_LEN; 137 + priv->tx_ring_size = TX_BD_RING_LEN; 138 + /* Alloc Rx BD */ 139 + priv->rx_bd_base = dma_alloc_coherent(priv->dev, 140 + RX_BD_RING_LEN * sizeof(struct qe_bd *), 141 + &priv->dma_rx_bd, GFP_KERNEL); 142 + 143 + if (!priv->rx_bd_base) { 144 + dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n"); 145 + ret = -ENOMEM; 146 + goto rxbd_alloc_error; 147 + } 148 + 149 + /* Alloc Tx BD */ 150 + priv->tx_bd_base = dma_alloc_coherent(priv->dev, 151 + TX_BD_RING_LEN * sizeof(struct qe_bd *), 152 + &priv->dma_tx_bd, GFP_KERNEL); 153 + 154 + if (!priv->tx_bd_base) { 155 + dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n"); 156 + ret = -ENOMEM; 157 + goto txbd_alloc_error; 158 + } 159 + 160 + /* Alloc parameter ram for ucc hdlc */ 161 + priv->ucc_pram_offset = qe_muram_alloc(sizeof(priv->ucc_pram), 162 + ALIGNMENT_OF_UCC_HDLC_PRAM); 163 + 164 + if (priv->ucc_pram_offset < 0) { 165 + dev_err(priv->dev, "Can not allocate MURAM for hdlc prameter.\n"); 166 + ret = -ENOMEM; 167 + goto pram_alloc_error; 168 + } 169 + 170 + priv->rx_skbuff = kzalloc(priv->rx_ring_size * sizeof(*priv->rx_skbuff), 171 + GFP_KERNEL); 172 + if (!priv->rx_skbuff) 173 + goto rx_skb_alloc_error; 174 + 175 + priv->tx_skbuff = kzalloc(priv->tx_ring_size * sizeof(*priv->tx_skbuff), 176 + GFP_KERNEL); 177 + if (!priv->tx_skbuff) 178 + goto tx_skb_alloc_error; 179 + 180 + priv->skb_curtx = 0; 181 + priv->skb_dirtytx = 0; 182 + priv->curtx_bd = priv->tx_bd_base; 183 + priv->dirty_tx = priv->tx_bd_base; 184 + priv->currx_bd = priv->rx_bd_base; 185 + priv->currx_bdnum = 0; 186 + 187 + /* init parameter base */ 188 + cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); 189 + ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock, 190 + QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset); 191 + 192 + priv->ucc_pram = (struct ucc_hdlc_param __iomem *) 193 + qe_muram_addr(priv->ucc_pram_offset); 194 + 195 + /* Zero out parameter ram */ 196 + memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param)); 197 + 198 + /* Alloc riptr, tiptr */ 199 + riptr = qe_muram_alloc(32, 32); 200 + if (riptr < 0) { 201 + dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n"); 202 + ret = -ENOMEM; 203 + goto riptr_alloc_error; 204 + } 205 + 206 + tiptr = qe_muram_alloc(32, 32); 207 + if (tiptr < 0) { 208 + dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n"); 209 + ret = -ENOMEM; 210 + goto tiptr_alloc_error; 211 + } 212 + 213 + /* Set RIPTR, TIPTR */ 214 + iowrite16be(riptr, &priv->ucc_pram->riptr); 215 + iowrite16be(tiptr, &priv->ucc_pram->tiptr); 216 + 217 + /* Set MRBLR */ 218 + iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr); 219 + 220 + /* Set RBASE, TBASE */ 221 + iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase); 222 + iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase); 223 + 224 + /* Set RSTATE, TSTATE */ 225 + iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate); 226 + iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate); 227 + 228 + /* Set C_MASK, C_PRES for 16bit CRC */ 229 + iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask); 230 + iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres); 231 + 232 + iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr); 233 + iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr); 234 + iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt); 235 + iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask); 236 + iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1); 237 + iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2); 238 + iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3); 239 + iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4); 240 + 241 + /* Get BD buffer */ 242 + bd_buffer = dma_alloc_coherent(priv->dev, 243 + (RX_BD_RING_LEN + TX_BD_RING_LEN) * 244 + MAX_RX_BUF_LENGTH, 245 + &bd_dma_addr, GFP_KERNEL); 246 + 247 + if (!bd_buffer) { 248 + dev_err(priv->dev, "Could not allocate buffer descriptors\n"); 249 + ret = -ENOMEM; 250 + goto bd_alloc_error; 251 + } 252 + 253 + memset(bd_buffer, 0, (RX_BD_RING_LEN + TX_BD_RING_LEN) 254 + * MAX_RX_BUF_LENGTH); 255 + 256 + priv->rx_buffer = bd_buffer; 257 + priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH; 258 + 259 + priv->dma_rx_addr = bd_dma_addr; 260 + priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH; 261 + 262 + for (i = 0; i < RX_BD_RING_LEN; i++) { 263 + if (i < (RX_BD_RING_LEN - 1)) 264 + bd_status = R_E_S | R_I_S; 265 + else 266 + bd_status = R_E_S | R_I_S | R_W_S; 267 + 268 + iowrite16be(bd_status, &priv->rx_bd_base[i].status); 269 + iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH, 270 + &priv->rx_bd_base[i].buf); 271 + } 272 + 273 + for (i = 0; i < TX_BD_RING_LEN; i++) { 274 + if (i < (TX_BD_RING_LEN - 1)) 275 + bd_status = T_I_S | T_TC_S; 276 + else 277 + bd_status = T_I_S | T_TC_S | T_W_S; 278 + 279 + iowrite16be(bd_status, &priv->tx_bd_base[i].status); 280 + iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH, 281 + &priv->tx_bd_base[i].buf); 282 + } 283 + 284 + return 0; 285 + 286 + bd_alloc_error: 287 + qe_muram_free(tiptr); 288 + tiptr_alloc_error: 289 + qe_muram_free(riptr); 290 + riptr_alloc_error: 291 + kfree(priv->tx_skbuff); 292 + tx_skb_alloc_error: 293 + kfree(priv->rx_skbuff); 294 + rx_skb_alloc_error: 295 + qe_muram_free(priv->ucc_pram_offset); 296 + pram_alloc_error: 297 + dma_free_coherent(priv->dev, 298 + TX_BD_RING_LEN * sizeof(struct qe_bd), 299 + priv->tx_bd_base, priv->dma_tx_bd); 300 + txbd_alloc_error: 301 + dma_free_coherent(priv->dev, 302 + RX_BD_RING_LEN * sizeof(struct qe_bd), 303 + priv->rx_bd_base, priv->dma_rx_bd); 304 + rxbd_alloc_error: 305 + ucc_fast_free(priv->uccf); 306 + 307 + return ret; 308 + } 309 + 310 + static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev) 311 + { 312 + hdlc_device *hdlc = dev_to_hdlc(dev); 313 + struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv; 314 + struct qe_bd __iomem *bd; 315 + u16 bd_status; 316 + unsigned long flags; 317 + u8 *send_buf; 318 + int i; 319 + u16 *proto_head; 320 + 321 + switch (dev->type) { 322 + case ARPHRD_RAWHDLC: 323 + if (skb_headroom(skb) < HDLC_HEAD_LEN) { 324 + dev->stats.tx_dropped++; 325 + dev_kfree_skb(skb); 326 + netdev_err(dev, "No enough space for hdlc head\n"); 327 + return -ENOMEM; 328 + } 329 + 330 + skb_push(skb, HDLC_HEAD_LEN); 331 + 332 + proto_head = (u16 *)skb->data; 333 + *proto_head = htons(DEFAULT_HDLC_HEAD); 334 + 335 + dev->stats.tx_bytes += skb->len; 336 + break; 337 + 338 + case ARPHRD_PPP: 339 + proto_head = (u16 *)skb->data; 340 + if (*proto_head != htons(DEFAULT_PPP_HEAD)) { 341 + dev->stats.tx_dropped++; 342 + dev_kfree_skb(skb); 343 + netdev_err(dev, "Wrong ppp header\n"); 344 + return -ENOMEM; 345 + } 346 + 347 + dev->stats.tx_bytes += skb->len; 348 + break; 349 + 350 + default: 351 + dev->stats.tx_dropped++; 352 + dev_kfree_skb(skb); 353 + return -ENOMEM; 354 + } 355 + 356 + pr_info("Tx data skb->len:%d ", skb->len); 357 + send_buf = (u8 *)skb->data; 358 + pr_info("\nTransmitted data:\n"); 359 + for (i = 0; i < 16; i++) { 360 + if (i == skb->len) 361 + pr_info("++++"); 362 + else 363 + pr_info("%02x\n", send_buf[i]); 364 + } 365 + spin_lock_irqsave(&priv->lock, flags); 366 + 367 + /* Start from the next BD that should be filled */ 368 + bd = priv->curtx_bd; 369 + bd_status = ioread16be(&bd->status); 370 + /* Save the skb pointer so we can free it later */ 371 + priv->tx_skbuff[priv->skb_curtx] = skb; 372 + 373 + /* Update the current skb pointer (wrapping if this was the last) */ 374 + priv->skb_curtx = 375 + (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN); 376 + 377 + /* copy skb data to tx buffer for sdma processing */ 378 + memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr), 379 + skb->data, skb->len); 380 + 381 + /* set bd status and length */ 382 + bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S; 383 + 384 + iowrite16be(bd_status, &bd->status); 385 + iowrite16be(skb->len, &bd->length); 386 + 387 + /* Move to next BD in the ring */ 388 + if (!(bd_status & T_W_S)) 389 + bd += 1; 390 + else 391 + bd = priv->tx_bd_base; 392 + 393 + if (bd == priv->dirty_tx) { 394 + if (!netif_queue_stopped(dev)) 395 + netif_stop_queue(dev); 396 + } 397 + 398 + priv->curtx_bd = bd; 399 + 400 + spin_unlock_irqrestore(&priv->lock, flags); 401 + 402 + return NETDEV_TX_OK; 403 + } 404 + 405 + static int hdlc_tx_done(struct ucc_hdlc_private *priv) 406 + { 407 + /* Start from the next BD that should be filled */ 408 + struct net_device *dev = priv->ndev; 409 + struct qe_bd *bd; /* BD pointer */ 410 + u16 bd_status; 411 + 412 + bd = priv->dirty_tx; 413 + bd_status = ioread16be(&bd->status); 414 + 415 + /* Normal processing. */ 416 + while ((bd_status & T_R_S) == 0) { 417 + struct sk_buff *skb; 418 + 419 + /* BD contains already transmitted buffer. */ 420 + /* Handle the transmitted buffer and release */ 421 + /* the BD to be used with the current frame */ 422 + 423 + skb = priv->tx_skbuff[priv->skb_dirtytx]; 424 + if (!skb) 425 + break; 426 + pr_info("TxBD: %x\n", bd_status); 427 + dev->stats.tx_packets++; 428 + memset(priv->tx_buffer + 429 + (be32_to_cpu(bd->buf) - priv->dma_tx_addr), 430 + 0, skb->len); 431 + dev_kfree_skb_irq(skb); 432 + 433 + priv->tx_skbuff[priv->skb_dirtytx] = NULL; 434 + priv->skb_dirtytx = 435 + (priv->skb_dirtytx + 436 + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN); 437 + 438 + /* We freed a buffer, so now we can restart transmission */ 439 + if (netif_queue_stopped(dev)) 440 + netif_wake_queue(dev); 441 + 442 + /* Advance the confirmation BD pointer */ 443 + if (!(bd_status & T_W_S)) 444 + bd += 1; 445 + else 446 + bd = priv->tx_bd_base; 447 + bd_status = ioread16be(&bd->status); 448 + } 449 + priv->dirty_tx = bd; 450 + 451 + return 0; 452 + } 453 + 454 + static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit) 455 + { 456 + struct net_device *dev = priv->ndev; 457 + struct sk_buff *skb; 458 + hdlc_device *hdlc = dev_to_hdlc(dev); 459 + struct qe_bd *bd; 460 + u32 bd_status; 461 + u16 length, howmany = 0; 462 + u8 *bdbuffer; 463 + int i; 464 + static int entry; 465 + 466 + bd = priv->currx_bd; 467 + bd_status = ioread16be(&bd->status); 468 + 469 + /* while there are received buffers and BD is full (~R_E) */ 470 + while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) { 471 + if (bd_status & R_OV_S) 472 + dev->stats.rx_over_errors++; 473 + if (bd_status & R_CR_S) { 474 + #ifdef BROKEN_FRAME_INFO 475 + pr_info("Broken Frame with RxBD: %x\n", bd_status); 476 + #endif 477 + dev->stats.rx_crc_errors++; 478 + dev->stats.rx_dropped++; 479 + goto recycle; 480 + } 481 + bdbuffer = priv->rx_buffer + 482 + (priv->currx_bdnum * MAX_RX_BUF_LENGTH); 483 + length = ioread16be(&bd->length); 484 + 485 + pr_info("Received data length:%d", length); 486 + pr_info("while entry times:%d", entry++); 487 + 488 + pr_info("\nReceived data:\n"); 489 + for (i = 0; (i < 16); i++) { 490 + if (i == length) 491 + pr_info("++++"); 492 + else 493 + pr_info("%02x\n", bdbuffer[i]); 494 + } 495 + 496 + switch (dev->type) { 497 + case ARPHRD_RAWHDLC: 498 + bdbuffer += HDLC_HEAD_LEN; 499 + length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE); 500 + 501 + skb = dev_alloc_skb(length); 502 + if (!skb) { 503 + dev->stats.rx_dropped++; 504 + return -ENOMEM; 505 + } 506 + 507 + skb_put(skb, length); 508 + skb->len = length; 509 + skb->dev = dev; 510 + memcpy(skb->data, bdbuffer, length); 511 + break; 512 + 513 + case ARPHRD_PPP: 514 + length -= HDLC_CRC_SIZE; 515 + 516 + skb = dev_alloc_skb(length); 517 + if (!skb) { 518 + dev->stats.rx_dropped++; 519 + return -ENOMEM; 520 + } 521 + 522 + skb_put(skb, length); 523 + skb->len = length; 524 + skb->dev = dev; 525 + memcpy(skb->data, bdbuffer, length); 526 + break; 527 + } 528 + 529 + dev->stats.rx_packets++; 530 + dev->stats.rx_bytes += skb->len; 531 + howmany++; 532 + if (hdlc->proto) 533 + skb->protocol = hdlc_type_trans(skb, dev); 534 + pr_info("skb->protocol:%x\n", skb->protocol); 535 + netif_receive_skb(skb); 536 + 537 + recycle: 538 + iowrite16be(bd_status | R_E_S | R_I_S, &bd->status); 539 + 540 + /* update to point at the next bd */ 541 + if (bd_status & R_W_S) { 542 + priv->currx_bdnum = 0; 543 + bd = priv->rx_bd_base; 544 + } else { 545 + if (priv->currx_bdnum < (RX_BD_RING_LEN - 1)) 546 + priv->currx_bdnum += 1; 547 + else 548 + priv->currx_bdnum = RX_BD_RING_LEN - 1; 549 + 550 + bd += 1; 551 + } 552 + 553 + bd_status = ioread16be(&bd->status); 554 + } 555 + 556 + priv->currx_bd = bd; 557 + return howmany; 558 + } 559 + 560 + static int ucc_hdlc_poll(struct napi_struct *napi, int budget) 561 + { 562 + struct ucc_hdlc_private *priv = container_of(napi, 563 + struct ucc_hdlc_private, 564 + napi); 565 + int howmany; 566 + 567 + /* Tx event processing */ 568 + spin_lock(&priv->lock); 569 + hdlc_tx_done(priv); 570 + spin_unlock(&priv->lock); 571 + 572 + howmany = 0; 573 + howmany += hdlc_rx_done(priv, budget - howmany); 574 + 575 + if (howmany < budget) { 576 + napi_complete(napi); 577 + qe_setbits32(priv->uccf->p_uccm, 578 + (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16); 579 + } 580 + 581 + return howmany; 582 + } 583 + 584 + static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id) 585 + { 586 + struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id; 587 + struct net_device *dev = priv->ndev; 588 + struct ucc_fast_private *uccf; 589 + struct ucc_tdm_info *ut_info; 590 + u32 ucce; 591 + u32 uccm; 592 + 593 + ut_info = priv->ut_info; 594 + uccf = priv->uccf; 595 + 596 + ucce = ioread32be(uccf->p_ucce); 597 + uccm = ioread32be(uccf->p_uccm); 598 + ucce &= uccm; 599 + iowrite32be(ucce, uccf->p_ucce); 600 + pr_info("irq ucce:%x\n", ucce); 601 + if (!ucce) 602 + return IRQ_NONE; 603 + 604 + if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) { 605 + if (napi_schedule_prep(&priv->napi)) { 606 + uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) 607 + << 16); 608 + iowrite32be(uccm, uccf->p_uccm); 609 + __napi_schedule(&priv->napi); 610 + } 611 + } 612 + 613 + /* Errors and other events */ 614 + if (ucce >> 16 & UCC_HDLC_UCCE_BSY) 615 + dev->stats.rx_errors++; 616 + if (ucce >> 16 & UCC_HDLC_UCCE_TXE) 617 + dev->stats.tx_errors++; 618 + 619 + return IRQ_HANDLED; 620 + } 621 + 622 + static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 623 + { 624 + const size_t size = sizeof(te1_settings); 625 + te1_settings line; 626 + struct ucc_hdlc_private *priv = netdev_priv(dev); 627 + 628 + if (cmd != SIOCWANDEV) 629 + return hdlc_ioctl(dev, ifr, cmd); 630 + 631 + switch (ifr->ifr_settings.type) { 632 + case IF_GET_IFACE: 633 + ifr->ifr_settings.type = IF_IFACE_E1; 634 + if (ifr->ifr_settings.size < size) { 635 + ifr->ifr_settings.size = size; /* data size wanted */ 636 + return -ENOBUFS; 637 + } 638 + line.clock_type = priv->clocking; 639 + line.clock_rate = 0; 640 + line.loopback = 0; 641 + 642 + if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size)) 643 + return -EFAULT; 644 + return 0; 645 + 646 + default: 647 + return hdlc_ioctl(dev, ifr, cmd); 648 + } 649 + } 650 + 651 + static int uhdlc_open(struct net_device *dev) 652 + { 653 + u32 cecr_subblock; 654 + hdlc_device *hdlc = dev_to_hdlc(dev); 655 + struct ucc_hdlc_private *priv = hdlc->priv; 656 + struct ucc_tdm *utdm = priv->utdm; 657 + 658 + if (priv->hdlc_busy != 1) { 659 + if (request_irq(priv->ut_info->uf_info.irq, 660 + ucc_hdlc_irq_handler, 0, "hdlc", priv)) 661 + return -ENODEV; 662 + 663 + cecr_subblock = ucc_fast_get_qe_cr_subblock( 664 + priv->ut_info->uf_info.ucc_num); 665 + 666 + qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock, 667 + QE_CR_PROTOCOL_UNSPECIFIED, 0); 668 + 669 + ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 670 + 671 + /* Enable the TDM port */ 672 + if (priv->tsa) 673 + utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port); 674 + 675 + priv->hdlc_busy = 1; 676 + netif_device_attach(priv->ndev); 677 + napi_enable(&priv->napi); 678 + netif_start_queue(dev); 679 + hdlc_open(dev); 680 + } 681 + 682 + return 0; 683 + } 684 + 685 + static void uhdlc_memclean(struct ucc_hdlc_private *priv) 686 + { 687 + qe_muram_free(priv->ucc_pram->riptr); 688 + qe_muram_free(priv->ucc_pram->tiptr); 689 + 690 + if (priv->rx_bd_base) { 691 + dma_free_coherent(priv->dev, 692 + RX_BD_RING_LEN * sizeof(struct qe_bd), 693 + priv->rx_bd_base, priv->dma_rx_bd); 694 + 695 + priv->rx_bd_base = NULL; 696 + priv->dma_rx_bd = 0; 697 + } 698 + 699 + if (priv->tx_bd_base) { 700 + dma_free_coherent(priv->dev, 701 + TX_BD_RING_LEN * sizeof(struct qe_bd), 702 + priv->tx_bd_base, priv->dma_tx_bd); 703 + 704 + priv->tx_bd_base = NULL; 705 + priv->dma_tx_bd = 0; 706 + } 707 + 708 + if (priv->ucc_pram) { 709 + qe_muram_free(priv->ucc_pram_offset); 710 + priv->ucc_pram = NULL; 711 + priv->ucc_pram_offset = 0; 712 + } 713 + 714 + kfree(priv->rx_skbuff); 715 + priv->rx_skbuff = NULL; 716 + 717 + kfree(priv->tx_skbuff); 718 + priv->tx_skbuff = NULL; 719 + 720 + if (priv->uf_regs) { 721 + iounmap(priv->uf_regs); 722 + priv->uf_regs = NULL; 723 + } 724 + 725 + if (priv->uccf) { 726 + ucc_fast_free(priv->uccf); 727 + priv->uccf = NULL; 728 + } 729 + 730 + if (priv->rx_buffer) { 731 + dma_free_coherent(priv->dev, 732 + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH, 733 + priv->rx_buffer, priv->dma_rx_addr); 734 + priv->rx_buffer = NULL; 735 + priv->dma_rx_addr = 0; 736 + } 737 + 738 + if (priv->tx_buffer) { 739 + dma_free_coherent(priv->dev, 740 + TX_BD_RING_LEN * MAX_RX_BUF_LENGTH, 741 + priv->tx_buffer, priv->dma_tx_addr); 742 + priv->tx_buffer = NULL; 743 + priv->dma_tx_addr = 0; 744 + } 745 + } 746 + 747 + static int uhdlc_close(struct net_device *dev) 748 + { 749 + struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv; 750 + struct ucc_tdm *utdm = priv->utdm; 751 + u32 cecr_subblock; 752 + 753 + napi_disable(&priv->napi); 754 + cecr_subblock = ucc_fast_get_qe_cr_subblock( 755 + priv->ut_info->uf_info.ucc_num); 756 + 757 + qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, 758 + (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); 759 + qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock, 760 + (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); 761 + 762 + if (priv->tsa) 763 + utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port); 764 + 765 + ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 766 + 767 + free_irq(priv->ut_info->uf_info.irq, priv); 768 + netif_stop_queue(dev); 769 + priv->hdlc_busy = 0; 770 + 771 + return 0; 772 + } 773 + 774 + static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding, 775 + unsigned short parity) 776 + { 777 + struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv; 778 + 779 + if (encoding != ENCODING_NRZ && 780 + encoding != ENCODING_NRZI) 781 + return -EINVAL; 782 + 783 + if (parity != PARITY_NONE && 784 + parity != PARITY_CRC32_PR1_CCITT && 785 + parity != PARITY_CRC16_PR1_CCITT) 786 + return -EINVAL; 787 + 788 + priv->encoding = encoding; 789 + priv->parity = parity; 790 + 791 + return 0; 792 + } 793 + 794 + #ifdef CONFIG_PM 795 + static void store_clk_config(struct ucc_hdlc_private *priv) 796 + { 797 + struct qe_mux *qe_mux_reg = &qe_immr->qmx; 798 + 799 + /* store si clk */ 800 + priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h); 801 + priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l); 802 + 803 + /* store si sync */ 804 + priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr); 805 + 806 + /* store ucc clk */ 807 + memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32)); 808 + } 809 + 810 + static void resume_clk_config(struct ucc_hdlc_private *priv) 811 + { 812 + struct qe_mux *qe_mux_reg = &qe_immr->qmx; 813 + 814 + memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32)); 815 + 816 + iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h); 817 + iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l); 818 + 819 + iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr); 820 + } 821 + 822 + static int uhdlc_suspend(struct device *dev) 823 + { 824 + struct ucc_hdlc_private *priv = dev_get_drvdata(dev); 825 + struct ucc_tdm_info *ut_info; 826 + struct ucc_fast __iomem *uf_regs; 827 + 828 + if (!priv) 829 + return -EINVAL; 830 + 831 + if (!netif_running(priv->ndev)) 832 + return 0; 833 + 834 + netif_device_detach(priv->ndev); 835 + napi_disable(&priv->napi); 836 + 837 + ut_info = priv->ut_info; 838 + uf_regs = priv->uf_regs; 839 + 840 + /* backup gumr guemr*/ 841 + priv->gumr = ioread32be(&uf_regs->gumr); 842 + priv->guemr = ioread8(&uf_regs->guemr); 843 + 844 + priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak), 845 + GFP_KERNEL); 846 + if (!priv->ucc_pram_bak) 847 + return -ENOMEM; 848 + 849 + /* backup HDLC parameter */ 850 + memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram, 851 + sizeof(struct ucc_hdlc_param)); 852 + 853 + /* store the clk configuration */ 854 + store_clk_config(priv); 855 + 856 + /* save power */ 857 + ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 858 + 859 + dev_dbg(dev, "ucc hdlc suspend\n"); 860 + return 0; 861 + } 862 + 863 + static int uhdlc_resume(struct device *dev) 864 + { 865 + struct ucc_hdlc_private *priv = dev_get_drvdata(dev); 866 + struct ucc_tdm *utdm = priv->utdm; 867 + struct ucc_tdm_info *ut_info; 868 + struct ucc_fast __iomem *uf_regs; 869 + struct ucc_fast_private *uccf; 870 + struct ucc_fast_info *uf_info; 871 + int ret, i; 872 + u32 cecr_subblock; 873 + u16 bd_status; 874 + 875 + if (!priv) 876 + return -EINVAL; 877 + 878 + if (!netif_running(priv->ndev)) 879 + return 0; 880 + 881 + ut_info = priv->ut_info; 882 + uf_info = &ut_info->uf_info; 883 + uf_regs = priv->uf_regs; 884 + uccf = priv->uccf; 885 + 886 + /* restore gumr guemr */ 887 + iowrite8(priv->guemr, &uf_regs->guemr); 888 + iowrite32be(priv->gumr, &uf_regs->gumr); 889 + 890 + /* Set Virtual Fifo registers */ 891 + iowrite16be(uf_info->urfs, &uf_regs->urfs); 892 + iowrite16be(uf_info->urfet, &uf_regs->urfet); 893 + iowrite16be(uf_info->urfset, &uf_regs->urfset); 894 + iowrite16be(uf_info->utfs, &uf_regs->utfs); 895 + iowrite16be(uf_info->utfet, &uf_regs->utfet); 896 + iowrite16be(uf_info->utftt, &uf_regs->utftt); 897 + /* utfb, urfb are offsets from MURAM base */ 898 + iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb); 899 + iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb); 900 + 901 + /* Rx Tx and sync clock routing */ 902 + resume_clk_config(priv); 903 + 904 + iowrite32be(uf_info->uccm_mask, &uf_regs->uccm); 905 + iowrite32be(0xffffffff, &uf_regs->ucce); 906 + 907 + ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 908 + 909 + /* rebuild SIRAM */ 910 + if (priv->tsa) 911 + ucc_tdm_init(priv->utdm, priv->ut_info); 912 + 913 + /* Write to QE CECR, UCCx channel to Stop Transmission */ 914 + cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); 915 + ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock, 916 + (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); 917 + 918 + /* Set UPSMR normal mode */ 919 + iowrite32be(0, &uf_regs->upsmr); 920 + 921 + /* init parameter base */ 922 + cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); 923 + ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock, 924 + QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset); 925 + 926 + priv->ucc_pram = (struct ucc_hdlc_param __iomem *) 927 + qe_muram_addr(priv->ucc_pram_offset); 928 + 929 + /* restore ucc parameter */ 930 + memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak, 931 + sizeof(struct ucc_hdlc_param)); 932 + kfree(priv->ucc_pram_bak); 933 + 934 + /* rebuild BD entry */ 935 + for (i = 0; i < RX_BD_RING_LEN; i++) { 936 + if (i < (RX_BD_RING_LEN - 1)) 937 + bd_status = R_E_S | R_I_S; 938 + else 939 + bd_status = R_E_S | R_I_S | R_W_S; 940 + 941 + iowrite16be(bd_status, &priv->rx_bd_base[i].status); 942 + iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH, 943 + &priv->rx_bd_base[i].buf); 944 + } 945 + 946 + for (i = 0; i < TX_BD_RING_LEN; i++) { 947 + if (i < (TX_BD_RING_LEN - 1)) 948 + bd_status = T_I_S | T_TC_S; 949 + else 950 + bd_status = T_I_S | T_TC_S | T_W_S; 951 + 952 + iowrite16be(bd_status, &priv->tx_bd_base[i].status); 953 + iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH, 954 + &priv->tx_bd_base[i].buf); 955 + } 956 + 957 + /* if hdlc is busy enable TX and RX */ 958 + if (priv->hdlc_busy == 1) { 959 + cecr_subblock = ucc_fast_get_qe_cr_subblock( 960 + priv->ut_info->uf_info.ucc_num); 961 + 962 + qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock, 963 + (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); 964 + 965 + ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 966 + 967 + /* Enable the TDM port */ 968 + if (priv->tsa) 969 + utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port); 970 + } 971 + 972 + napi_enable(&priv->napi); 973 + netif_device_attach(priv->ndev); 974 + 975 + return 0; 976 + } 977 + 978 + static const struct dev_pm_ops uhdlc_pm_ops = { 979 + .suspend = uhdlc_suspend, 980 + .resume = uhdlc_resume, 981 + .freeze = uhdlc_suspend, 982 + .thaw = uhdlc_resume, 983 + }; 984 + 985 + #define HDLC_PM_OPS (&uhdlc_pm_ops) 986 + 987 + #else 988 + 989 + #define HDLC_PM_OPS NULL 990 + 991 + #endif 992 + static const struct net_device_ops uhdlc_ops = { 993 + .ndo_open = uhdlc_open, 994 + .ndo_stop = uhdlc_close, 995 + .ndo_change_mtu = hdlc_change_mtu, 996 + .ndo_start_xmit = hdlc_start_xmit, 997 + .ndo_do_ioctl = uhdlc_ioctl, 998 + }; 999 + 1000 + static int ucc_hdlc_probe(struct platform_device *pdev) 1001 + { 1002 + struct device_node *np = pdev->dev.of_node; 1003 + struct ucc_hdlc_private *uhdlc_priv = NULL; 1004 + struct ucc_tdm_info *ut_info; 1005 + struct ucc_tdm *utdm; 1006 + struct resource res; 1007 + struct net_device *dev; 1008 + hdlc_device *hdlc; 1009 + int ucc_num; 1010 + const char *sprop; 1011 + int ret; 1012 + u32 val; 1013 + 1014 + ret = of_property_read_u32_index(np, "cell-index", 0, &val); 1015 + if (ret) { 1016 + dev_err(&pdev->dev, "Invalid ucc property\n"); 1017 + return -ENODEV; 1018 + } 1019 + 1020 + ucc_num = val - 1; 1021 + if ((ucc_num > 3) || (ucc_num < 0)) { 1022 + dev_err(&pdev->dev, ": Invalid UCC num\n"); 1023 + return -EINVAL; 1024 + } 1025 + 1026 + memcpy(&utdm_info[ucc_num], &utdm_primary_info, 1027 + sizeof(utdm_primary_info)); 1028 + 1029 + ut_info = &utdm_info[ucc_num]; 1030 + ut_info->uf_info.ucc_num = ucc_num; 1031 + 1032 + sprop = of_get_property(np, "rx-clock-name", NULL); 1033 + if (sprop) { 1034 + ut_info->uf_info.rx_clock = qe_clock_source(sprop); 1035 + if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) || 1036 + (ut_info->uf_info.rx_clock > QE_CLK24)) { 1037 + dev_err(&pdev->dev, "Invalid rx-clock-name property\n"); 1038 + return -EINVAL; 1039 + } 1040 + } else { 1041 + dev_err(&pdev->dev, "Invalid rx-clock-name property\n"); 1042 + return -EINVAL; 1043 + } 1044 + 1045 + sprop = of_get_property(np, "tx-clock-name", NULL); 1046 + if (sprop) { 1047 + ut_info->uf_info.tx_clock = qe_clock_source(sprop); 1048 + if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) || 1049 + (ut_info->uf_info.tx_clock > QE_CLK24)) { 1050 + dev_err(&pdev->dev, "Invalid tx-clock-name property\n"); 1051 + return -EINVAL; 1052 + } 1053 + } else { 1054 + dev_err(&pdev->dev, "Invalid tx-clock-name property\n"); 1055 + return -EINVAL; 1056 + } 1057 + 1058 + /* use the same clock when work in loopback */ 1059 + if (ut_info->uf_info.rx_clock == ut_info->uf_info.tx_clock) 1060 + qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1); 1061 + 1062 + ret = of_address_to_resource(np, 0, &res); 1063 + if (ret) 1064 + return -EINVAL; 1065 + 1066 + ut_info->uf_info.regs = res.start; 1067 + ut_info->uf_info.irq = irq_of_parse_and_map(np, 0); 1068 + 1069 + uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL); 1070 + if (!uhdlc_priv) { 1071 + ret = -ENOMEM; 1072 + dev_err(&pdev->dev, "No mem to alloc hdlc private data\n"); 1073 + goto err_alloc_priv; 1074 + } 1075 + 1076 + dev_set_drvdata(&pdev->dev, uhdlc_priv); 1077 + uhdlc_priv->dev = &pdev->dev; 1078 + uhdlc_priv->ut_info = ut_info; 1079 + 1080 + if (of_get_property(np, "fsl,tdm-interface", NULL)) 1081 + uhdlc_priv->tsa = 1; 1082 + 1083 + if (of_get_property(np, "fsl,ucc-internal-loopback", NULL)) 1084 + uhdlc_priv->loopback = 1; 1085 + 1086 + if (uhdlc_priv->tsa == 1) { 1087 + utdm = kzalloc(sizeof(*utdm), GFP_KERNEL); 1088 + if (!utdm) { 1089 + ret = -ENOMEM; 1090 + dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n"); 1091 + goto err_alloc_utdm; 1092 + } 1093 + uhdlc_priv->utdm = utdm; 1094 + ret = ucc_of_parse_tdm(np, utdm, ut_info); 1095 + if (ret) 1096 + goto err_miss_tsa_property; 1097 + } 1098 + 1099 + ret = uhdlc_init(uhdlc_priv); 1100 + if (ret) { 1101 + dev_err(&pdev->dev, "Failed to init uhdlc\n"); 1102 + goto err_hdlc_init; 1103 + } 1104 + 1105 + dev = alloc_hdlcdev(uhdlc_priv); 1106 + if (!dev) { 1107 + ret = -ENOMEM; 1108 + pr_err("ucc_hdlc: unable to allocate memory\n"); 1109 + goto err_hdlc_init; 1110 + } 1111 + 1112 + uhdlc_priv->ndev = dev; 1113 + hdlc = dev_to_hdlc(dev); 1114 + dev->tx_queue_len = 16; 1115 + dev->netdev_ops = &uhdlc_ops; 1116 + hdlc->attach = ucc_hdlc_attach; 1117 + hdlc->xmit = ucc_hdlc_tx; 1118 + netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32); 1119 + if (register_hdlc_device(dev)) { 1120 + ret = -ENOBUFS; 1121 + pr_err("ucc_hdlc: unable to register hdlc device\n"); 1122 + free_netdev(dev); 1123 + goto err_hdlc_init; 1124 + } 1125 + 1126 + return 0; 1127 + 1128 + err_hdlc_init: 1129 + err_miss_tsa_property: 1130 + kfree(uhdlc_priv); 1131 + if (uhdlc_priv->tsa) 1132 + kfree(utdm); 1133 + err_alloc_utdm: 1134 + kfree(uhdlc_priv); 1135 + err_alloc_priv: 1136 + return ret; 1137 + } 1138 + 1139 + static int ucc_hdlc_remove(struct platform_device *pdev) 1140 + { 1141 + struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev); 1142 + 1143 + uhdlc_memclean(priv); 1144 + 1145 + if (priv->utdm->si_regs) { 1146 + iounmap(priv->utdm->si_regs); 1147 + priv->utdm->si_regs = NULL; 1148 + } 1149 + 1150 + if (priv->utdm->siram) { 1151 + iounmap(priv->utdm->siram); 1152 + priv->utdm->siram = NULL; 1153 + } 1154 + kfree(priv); 1155 + 1156 + dev_info(&pdev->dev, "UCC based hdlc module removed\n"); 1157 + 1158 + return 0; 1159 + } 1160 + 1161 + static const struct of_device_id fsl_ucc_hdlc_of_match[] = { 1162 + { 1163 + .compatible = "fsl,ucc-hdlc", 1164 + }, 1165 + {}, 1166 + }; 1167 + 1168 + MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match); 1169 + 1170 + static struct platform_driver ucc_hdlc_driver = { 1171 + .probe = ucc_hdlc_probe, 1172 + .remove = ucc_hdlc_remove, 1173 + .driver = { 1174 + .owner = THIS_MODULE, 1175 + .name = DRV_NAME, 1176 + .pm = HDLC_PM_OPS, 1177 + .of_match_table = fsl_ucc_hdlc_of_match, 1178 + }, 1179 + }; 1180 + 1181 + static int __init ucc_hdlc_init(void) 1182 + { 1183 + return platform_driver_register(&ucc_hdlc_driver); 1184 + } 1185 + 1186 + static void __exit ucc_hdlc_exit(void) 1187 + { 1188 + platform_driver_unregister(&ucc_hdlc_driver); 1189 + } 1190 + 1191 + module_init(ucc_hdlc_init); 1192 + module_exit(ucc_hdlc_exit);
+147
drivers/net/wan/fsl_ucc_hdlc.h
··· 1 + /* Freescale QUICC Engine HDLC Device Driver 2 + * 3 + * Copyright 2014 Freescale Semiconductor Inc. 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms of the GNU General Public License as published by the 7 + * Free Software Foundation; either version 2 of the License, or (at your 8 + * option) any later version. 9 + */ 10 + 11 + #ifndef CONFIG_UCC_HDLC_H 12 + #define CONFIG_UCC_HDLC_H 13 + 14 + #include <linux/kernel.h> 15 + #include <linux/list.h> 16 + 17 + #include <soc/fsl/qe/immap_qe.h> 18 + #include <soc/fsl/qe/qe.h> 19 + 20 + #include <soc/fsl/qe/ucc.h> 21 + #include <soc/fsl/qe/ucc_fast.h> 22 + 23 + /* UCC HDLC event register */ 24 + #define UCCE_HDLC_RX_EVENTS \ 25 + (UCC_HDLC_UCCE_RXF | UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_BSY) 26 + #define UCCE_HDLC_TX_EVENTS (UCC_HDLC_UCCE_TXB | UCC_HDLC_UCCE_TXE) 27 + 28 + struct ucc_hdlc_param { 29 + __be16 riptr; 30 + __be16 tiptr; 31 + __be16 res0; 32 + __be16 mrblr; 33 + __be32 rstate; 34 + __be32 rbase; 35 + __be16 rbdstat; 36 + __be16 rbdlen; 37 + __be32 rdptr; 38 + __be32 tstate; 39 + __be32 tbase; 40 + __be16 tbdstat; 41 + __be16 tbdlen; 42 + __be32 tdptr; 43 + __be32 rbptr; 44 + __be32 tbptr; 45 + __be32 rcrc; 46 + __be32 res1; 47 + __be32 tcrc; 48 + __be32 res2; 49 + __be32 res3; 50 + __be32 c_mask; 51 + __be32 c_pres; 52 + __be16 disfc; 53 + __be16 crcec; 54 + __be16 abtsc; 55 + __be16 nmarc; 56 + __be32 max_cnt; 57 + __be16 mflr; 58 + __be16 rfthr; 59 + __be16 rfcnt; 60 + __be16 hmask; 61 + __be16 haddr1; 62 + __be16 haddr2; 63 + __be16 haddr3; 64 + __be16 haddr4; 65 + __be16 ts_tmp; 66 + __be16 tmp_mb; 67 + }; 68 + 69 + struct ucc_hdlc_private { 70 + struct ucc_tdm *utdm; 71 + struct ucc_tdm_info *ut_info; 72 + struct ucc_fast_private *uccf; 73 + struct device *dev; 74 + struct net_device *ndev; 75 + struct napi_struct napi; 76 + struct ucc_fast __iomem *uf_regs; /* UCC Fast registers */ 77 + struct ucc_hdlc_param __iomem *ucc_pram; 78 + u16 tsa; 79 + bool hdlc_busy; 80 + bool loopback; 81 + 82 + u8 *tx_buffer; 83 + u8 *rx_buffer; 84 + dma_addr_t dma_tx_addr; 85 + dma_addr_t dma_rx_addr; 86 + 87 + struct qe_bd *tx_bd_base; 88 + struct qe_bd *rx_bd_base; 89 + dma_addr_t dma_tx_bd; 90 + dma_addr_t dma_rx_bd; 91 + struct qe_bd *curtx_bd; 92 + struct qe_bd *currx_bd; 93 + struct qe_bd *dirty_tx; 94 + u16 currx_bdnum; 95 + 96 + struct sk_buff **tx_skbuff; 97 + struct sk_buff **rx_skbuff; 98 + u16 skb_curtx; 99 + u16 skb_currx; 100 + unsigned short skb_dirtytx; 101 + 102 + unsigned short tx_ring_size; 103 + unsigned short rx_ring_size; 104 + u32 ucc_pram_offset; 105 + 106 + unsigned short encoding; 107 + unsigned short parity; 108 + u32 clocking; 109 + spinlock_t lock; /* lock for Tx BD and Tx buffer */ 110 + #ifdef CONFIG_PM 111 + struct ucc_hdlc_param *ucc_pram_bak; 112 + u32 gumr; 113 + u8 guemr; 114 + u32 cmxsi1cr_l, cmxsi1cr_h; 115 + u32 cmxsi1syr; 116 + u32 cmxucr[4]; 117 + #endif 118 + }; 119 + 120 + #define TX_BD_RING_LEN 0x10 121 + #define RX_BD_RING_LEN 0x20 122 + #define RX_CLEAN_MAX 0x10 123 + #define NUM_OF_BUF 4 124 + #define MAX_RX_BUF_LENGTH (48 * 0x20) 125 + #define MAX_FRAME_LENGTH (MAX_RX_BUF_LENGTH + 8) 126 + #define ALIGNMENT_OF_UCC_HDLC_PRAM 64 127 + #define SI_BANK_SIZE 128 128 + #define MAX_HDLC_NUM 4 129 + #define HDLC_HEAD_LEN 2 130 + #define HDLC_CRC_SIZE 2 131 + #define TX_RING_MOD_MASK(size) (size - 1) 132 + #define RX_RING_MOD_MASK(size) (size - 1) 133 + 134 + #define HDLC_HEAD_MASK 0x0000 135 + #define DEFAULT_HDLC_HEAD 0xff44 136 + #define DEFAULT_ADDR_MASK 0x00ff 137 + #define DEFAULT_HDLC_ADDR 0x00ff 138 + 139 + #define BMR_GBL 0x20000000 140 + #define BMR_BIG_ENDIAN 0x10000000 141 + #define CRC_16BIT_MASK 0x0000F0B8 142 + #define CRC_16BIT_PRES 0x0000FFFF 143 + #define DEFAULT_RFTHR 1 144 + 145 + #define DEFAULT_PPP_HEAD 0xff03 146 + 147 + #endif
+1
include/soc/fsl/qe/qe.h
··· 657 657 #define UCC_SLOW_GUMR_L_MODE_QMC 0x00000002 658 658 659 659 /* General UCC FAST Mode Register */ 660 + #define UCC_FAST_GUMR_LOOPBACK 0x40000000 660 661 #define UCC_FAST_GUMR_TCI 0x20000000 661 662 #define UCC_FAST_GUMR_TRX 0x10000000 662 663 #define UCC_FAST_GUMR_TTX 0x08000000
+20 -2
include/soc/fsl/qe/ucc_fast.h
··· 21 21 22 22 #include <soc/fsl/qe/ucc.h> 23 23 24 - /* Receive BD's status */ 24 + /* Receive BD's status and length*/ 25 25 #define R_E 0x80000000 /* buffer empty */ 26 26 #define R_W 0x20000000 /* wrap bit */ 27 27 #define R_I 0x10000000 /* interrupt on reception */ 28 28 #define R_L 0x08000000 /* last */ 29 29 #define R_F 0x04000000 /* first */ 30 30 31 - /* transmit BD's status */ 31 + /* transmit BD's status and length*/ 32 32 #define T_R 0x80000000 /* ready bit */ 33 33 #define T_W 0x20000000 /* wrap bit */ 34 34 #define T_I 0x10000000 /* interrupt on completion */ 35 35 #define T_L 0x08000000 /* last */ 36 + 37 + /* Receive BD's status */ 38 + #define R_E_S 0x8000 /* buffer empty */ 39 + #define R_W_S 0x2000 /* wrap bit */ 40 + #define R_I_S 0x1000 /* interrupt on reception */ 41 + #define R_L_S 0x0800 /* last */ 42 + #define R_F_S 0x0400 /* first */ 43 + #define R_CM_S 0x0200 /* continuous mode */ 44 + #define R_CR_S 0x0004 /* crc */ 45 + #define R_OV_S 0x0002 /* crc */ 46 + 47 + /* transmit BD's status */ 48 + #define T_R_S 0x8000 /* ready bit */ 49 + #define T_W_S 0x2000 /* wrap bit */ 50 + #define T_I_S 0x1000 /* interrupt on completion */ 51 + #define T_L_S 0x0800 /* last */ 52 + #define T_TC_S 0x0400 /* crc */ 53 + #define T_TM_S 0x0200 /* continuous mode */ 36 54 37 55 /* Rx Data buffer must be 4 bytes aligned in most cases */ 38 56 #define UCC_FAST_RX_ALIGN 4