Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.31-rc8 1332 lines 35 kB view raw
1/* 2 * Intel IXP4xx HSS (synchronous serial port) driver for Linux 3 * 4 * Copyright (C) 2007-2008 Krzysztof Hałasa <khc@pm.waw.pl> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of version 2 of the GNU General Public License 8 * as published by the Free Software Foundation. 9 */ 10 11#include <linux/bitops.h> 12#include <linux/cdev.h> 13#include <linux/dma-mapping.h> 14#include <linux/dmapool.h> 15#include <linux/fs.h> 16#include <linux/hdlc.h> 17#include <linux/io.h> 18#include <linux/kernel.h> 19#include <linux/platform_device.h> 20#include <linux/poll.h> 21#include <mach/npe.h> 22#include <mach/qmgr.h> 23 24#define DEBUG_DESC 0 25#define DEBUG_RX 0 26#define DEBUG_TX 0 27#define DEBUG_PKT_BYTES 0 28#define DEBUG_CLOSE 0 29 30#define DRV_NAME "ixp4xx_hss" 31 32#define PKT_EXTRA_FLAGS 0 /* orig 1 */ 33#define PKT_NUM_PIPES 1 /* 1, 2 or 4 */ 34#define PKT_PIPE_FIFO_SIZEW 4 /* total 4 dwords per HSS */ 35 36#define RX_DESCS 16 /* also length of all RX queues */ 37#define TX_DESCS 16 /* also length of all TX queues */ 38 39#define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS)) 40#define RX_SIZE (HDLC_MAX_MRU + 4) /* NPE needs more space */ 41#define MAX_CLOSE_WAIT 1000 /* microseconds */ 42#define HSS_COUNT 2 43#define FRAME_SIZE 256 /* doesn't matter at this point */ 44#define FRAME_OFFSET 0 45#define MAX_CHANNELS (FRAME_SIZE / 8) 46 47#define NAPI_WEIGHT 16 48 49/* Queue IDs */ 50#define HSS0_CHL_RXTRIG_QUEUE 12 /* orig size = 32 dwords */ 51#define HSS0_PKT_RX_QUEUE 13 /* orig size = 32 dwords */ 52#define HSS0_PKT_TX0_QUEUE 14 /* orig size = 16 dwords */ 53#define HSS0_PKT_TX1_QUEUE 15 54#define HSS0_PKT_TX2_QUEUE 16 55#define HSS0_PKT_TX3_QUEUE 17 56#define HSS0_PKT_RXFREE0_QUEUE 18 /* orig size = 16 dwords */ 57#define HSS0_PKT_RXFREE1_QUEUE 19 58#define HSS0_PKT_RXFREE2_QUEUE 20 59#define HSS0_PKT_RXFREE3_QUEUE 21 60#define HSS0_PKT_TXDONE_QUEUE 22 /* orig size = 64 dwords */ 61 62#define HSS1_CHL_RXTRIG_QUEUE 10 63#define HSS1_PKT_RX_QUEUE 0 64#define HSS1_PKT_TX0_QUEUE 5 65#define HSS1_PKT_TX1_QUEUE 6 66#define HSS1_PKT_TX2_QUEUE 7 67#define HSS1_PKT_TX3_QUEUE 8 68#define HSS1_PKT_RXFREE0_QUEUE 1 69#define HSS1_PKT_RXFREE1_QUEUE 2 70#define HSS1_PKT_RXFREE2_QUEUE 3 71#define HSS1_PKT_RXFREE3_QUEUE 4 72#define HSS1_PKT_TXDONE_QUEUE 9 73 74#define NPE_PKT_MODE_HDLC 0 75#define NPE_PKT_MODE_RAW 1 76#define NPE_PKT_MODE_56KMODE 2 77#define NPE_PKT_MODE_56KENDIAN_MSB 4 78 79/* PKT_PIPE_HDLC_CFG_WRITE flags */ 80#define PKT_HDLC_IDLE_ONES 0x1 /* default = flags */ 81#define PKT_HDLC_CRC_32 0x2 /* default = CRC-16 */ 82#define PKT_HDLC_MSB_ENDIAN 0x4 /* default = LE */ 83 84 85/* hss_config, PCRs */ 86/* Frame sync sampling, default = active low */ 87#define PCR_FRM_SYNC_ACTIVE_HIGH 0x40000000 88#define PCR_FRM_SYNC_FALLINGEDGE 0x80000000 89#define PCR_FRM_SYNC_RISINGEDGE 0xC0000000 90 91/* Frame sync pin: input (default) or output generated off a given clk edge */ 92#define PCR_FRM_SYNC_OUTPUT_FALLING 0x20000000 93#define PCR_FRM_SYNC_OUTPUT_RISING 0x30000000 94 95/* Frame and data clock sampling on edge, default = falling */ 96#define PCR_FCLK_EDGE_RISING 0x08000000 97#define PCR_DCLK_EDGE_RISING 0x04000000 98 99/* Clock direction, default = input */ 100#define PCR_SYNC_CLK_DIR_OUTPUT 0x02000000 101 102/* Generate/Receive frame pulses, default = enabled */ 103#define PCR_FRM_PULSE_DISABLED 0x01000000 104 105 /* Data rate is full (default) or half the configured clk speed */ 106#define PCR_HALF_CLK_RATE 0x00200000 107 108/* Invert data between NPE and HSS FIFOs? (default = no) */ 109#define PCR_DATA_POLARITY_INVERT 0x00100000 110 111/* TX/RX endianness, default = LSB */ 112#define PCR_MSB_ENDIAN 0x00080000 113 114/* Normal (default) / open drain mode (TX only) */ 115#define PCR_TX_PINS_OPEN_DRAIN 0x00040000 116 117/* No framing bit transmitted and expected on RX? (default = framing bit) */ 118#define PCR_SOF_NO_FBIT 0x00020000 119 120/* Drive data pins? */ 121#define PCR_TX_DATA_ENABLE 0x00010000 122 123/* Voice 56k type: drive the data pins low (default), high, high Z */ 124#define PCR_TX_V56K_HIGH 0x00002000 125#define PCR_TX_V56K_HIGH_IMP 0x00004000 126 127/* Unassigned type: drive the data pins low (default), high, high Z */ 128#define PCR_TX_UNASS_HIGH 0x00000800 129#define PCR_TX_UNASS_HIGH_IMP 0x00001000 130 131/* T1 @ 1.544MHz only: Fbit dictated in FIFO (default) or high Z */ 132#define PCR_TX_FB_HIGH_IMP 0x00000400 133 134/* 56k data endiannes - which bit unused: high (default) or low */ 135#define PCR_TX_56KE_BIT_0_UNUSED 0x00000200 136 137/* 56k data transmission type: 32/8 bit data (default) or 56K data */ 138#define PCR_TX_56KS_56K_DATA 0x00000100 139 140/* hss_config, cCR */ 141/* Number of packetized clients, default = 1 */ 142#define CCR_NPE_HFIFO_2_HDLC 0x04000000 143#define CCR_NPE_HFIFO_3_OR_4HDLC 0x08000000 144 145/* default = no loopback */ 146#define CCR_LOOPBACK 0x02000000 147 148/* HSS number, default = 0 (first) */ 149#define CCR_SECOND_HSS 0x01000000 150 151 152/* hss_config, clkCR: main:10, num:10, denom:12 */ 153#define CLK42X_SPEED_EXP ((0x3FF << 22) | ( 2 << 12) | 15) /*65 KHz*/ 154 155#define CLK42X_SPEED_512KHZ (( 130 << 22) | ( 2 << 12) | 15) 156#define CLK42X_SPEED_1536KHZ (( 43 << 22) | ( 18 << 12) | 47) 157#define CLK42X_SPEED_1544KHZ (( 43 << 22) | ( 33 << 12) | 192) 158#define CLK42X_SPEED_2048KHZ (( 32 << 22) | ( 34 << 12) | 63) 159#define CLK42X_SPEED_4096KHZ (( 16 << 22) | ( 34 << 12) | 127) 160#define CLK42X_SPEED_8192KHZ (( 8 << 22) | ( 34 << 12) | 255) 161 162#define CLK46X_SPEED_512KHZ (( 130 << 22) | ( 24 << 12) | 127) 163#define CLK46X_SPEED_1536KHZ (( 43 << 22) | (152 << 12) | 383) 164#define CLK46X_SPEED_1544KHZ (( 43 << 22) | ( 66 << 12) | 385) 165#define CLK46X_SPEED_2048KHZ (( 32 << 22) | (280 << 12) | 511) 166#define CLK46X_SPEED_4096KHZ (( 16 << 22) | (280 << 12) | 1023) 167#define CLK46X_SPEED_8192KHZ (( 8 << 22) | (280 << 12) | 2047) 168 169 170/* hss_config, LUT entries */ 171#define TDMMAP_UNASSIGNED 0 172#define TDMMAP_HDLC 1 /* HDLC - packetized */ 173#define TDMMAP_VOICE56K 2 /* Voice56K - 7-bit channelized */ 174#define TDMMAP_VOICE64K 3 /* Voice64K - 8-bit channelized */ 175 176/* offsets into HSS config */ 177#define HSS_CONFIG_TX_PCR 0x00 /* port configuration registers */ 178#define HSS_CONFIG_RX_PCR 0x04 179#define HSS_CONFIG_CORE_CR 0x08 /* loopback control, HSS# */ 180#define HSS_CONFIG_CLOCK_CR 0x0C /* clock generator control */ 181#define HSS_CONFIG_TX_FCR 0x10 /* frame configuration registers */ 182#define HSS_CONFIG_RX_FCR 0x14 183#define HSS_CONFIG_TX_LUT 0x18 /* channel look-up tables */ 184#define HSS_CONFIG_RX_LUT 0x38 185 186 187/* NPE command codes */ 188/* writes the ConfigWord value to the location specified by offset */ 189#define PORT_CONFIG_WRITE 0x40 190 191/* triggers the NPE to load the contents of the configuration table */ 192#define PORT_CONFIG_LOAD 0x41 193 194/* triggers the NPE to return an HssErrorReadResponse message */ 195#define PORT_ERROR_READ 0x42 196 197/* triggers the NPE to reset internal status and enable the HssPacketized 198 operation for the flow specified by pPipe */ 199#define PKT_PIPE_FLOW_ENABLE 0x50 200#define PKT_PIPE_FLOW_DISABLE 0x51 201#define PKT_NUM_PIPES_WRITE 0x52 202#define PKT_PIPE_FIFO_SIZEW_WRITE 0x53 203#define PKT_PIPE_HDLC_CFG_WRITE 0x54 204#define PKT_PIPE_IDLE_PATTERN_WRITE 0x55 205#define PKT_PIPE_RX_SIZE_WRITE 0x56 206#define PKT_PIPE_MODE_WRITE 0x57 207 208/* HDLC packet status values - desc->status */ 209#define ERR_SHUTDOWN 1 /* stop or shutdown occurrance */ 210#define ERR_HDLC_ALIGN 2 /* HDLC alignment error */ 211#define ERR_HDLC_FCS 3 /* HDLC Frame Check Sum error */ 212#define ERR_RXFREE_Q_EMPTY 4 /* RX-free queue became empty while receiving 213 this packet (if buf_len < pkt_len) */ 214#define ERR_HDLC_TOO_LONG 5 /* HDLC frame size too long */ 215#define ERR_HDLC_ABORT 6 /* abort sequence received */ 216#define ERR_DISCONNECTING 7 /* disconnect is in progress */ 217 218 219#ifdef __ARMEB__ 220typedef struct sk_buff buffer_t; 221#define free_buffer dev_kfree_skb 222#define free_buffer_irq dev_kfree_skb_irq 223#else 224typedef void buffer_t; 225#define free_buffer kfree 226#define free_buffer_irq kfree 227#endif 228 229struct port { 230 struct device *dev; 231 struct npe *npe; 232 struct net_device *netdev; 233 struct napi_struct napi; 234 struct hss_plat_info *plat; 235 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; 236 struct desc *desc_tab; /* coherent */ 237 u32 desc_tab_phys; 238 unsigned int id; 239 unsigned int clock_type, clock_rate, loopback; 240 unsigned int initialized, carrier; 241 u8 hdlc_cfg; 242}; 243 244/* NPE message structure */ 245struct msg { 246#ifdef __ARMEB__ 247 u8 cmd, unused, hss_port, index; 248 union { 249 struct { u8 data8a, data8b, data8c, data8d; }; 250 struct { u16 data16a, data16b; }; 251 struct { u32 data32; }; 252 }; 253#else 254 u8 index, hss_port, unused, cmd; 255 union { 256 struct { u8 data8d, data8c, data8b, data8a; }; 257 struct { u16 data16b, data16a; }; 258 struct { u32 data32; }; 259 }; 260#endif 261}; 262 263/* HDLC packet descriptor */ 264struct desc { 265 u32 next; /* pointer to next buffer, unused */ 266 267#ifdef __ARMEB__ 268 u16 buf_len; /* buffer length */ 269 u16 pkt_len; /* packet length */ 270 u32 data; /* pointer to data buffer in RAM */ 271 u8 status; 272 u8 error_count; 273 u16 __reserved; 274#else 275 u16 pkt_len; /* packet length */ 276 u16 buf_len; /* buffer length */ 277 u32 data; /* pointer to data buffer in RAM */ 278 u16 __reserved; 279 u8 error_count; 280 u8 status; 281#endif 282 u32 __reserved1[4]; 283}; 284 285 286#define rx_desc_phys(port, n) ((port)->desc_tab_phys + \ 287 (n) * sizeof(struct desc)) 288#define rx_desc_ptr(port, n) (&(port)->desc_tab[n]) 289 290#define tx_desc_phys(port, n) ((port)->desc_tab_phys + \ 291 ((n) + RX_DESCS) * sizeof(struct desc)) 292#define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS]) 293 294/***************************************************************************** 295 * global variables 296 ****************************************************************************/ 297 298static int ports_open; 299static struct dma_pool *dma_pool; 300static spinlock_t npe_lock; 301 302static const struct { 303 int tx, txdone, rx, rxfree; 304}queue_ids[2] = {{HSS0_PKT_TX0_QUEUE, HSS0_PKT_TXDONE_QUEUE, HSS0_PKT_RX_QUEUE, 305 HSS0_PKT_RXFREE0_QUEUE}, 306 {HSS1_PKT_TX0_QUEUE, HSS1_PKT_TXDONE_QUEUE, HSS1_PKT_RX_QUEUE, 307 HSS1_PKT_RXFREE0_QUEUE}, 308}; 309 310/***************************************************************************** 311 * utility functions 312 ****************************************************************************/ 313 314static inline struct port* dev_to_port(struct net_device *dev) 315{ 316 return dev_to_hdlc(dev)->priv; 317} 318 319#ifndef __ARMEB__ 320static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt) 321{ 322 int i; 323 for (i = 0; i < cnt; i++) 324 dest[i] = swab32(src[i]); 325} 326#endif 327 328/***************************************************************************** 329 * HSS access 330 ****************************************************************************/ 331 332static void hss_npe_send(struct port *port, struct msg *msg, const char* what) 333{ 334 u32 *val = (u32*)msg; 335 if (npe_send_message(port->npe, msg, what)) { 336 printk(KERN_CRIT "HSS-%i: unable to send command [%08X:%08X]" 337 " to %s\n", port->id, val[0], val[1], 338 npe_name(port->npe)); 339 BUG(); 340 } 341} 342 343static void hss_config_set_lut(struct port *port) 344{ 345 struct msg msg; 346 int ch; 347 348 memset(&msg, 0, sizeof(msg)); 349 msg.cmd = PORT_CONFIG_WRITE; 350 msg.hss_port = port->id; 351 352 for (ch = 0; ch < MAX_CHANNELS; ch++) { 353 msg.data32 >>= 2; 354 msg.data32 |= TDMMAP_HDLC << 30; 355 356 if (ch % 16 == 15) { 357 msg.index = HSS_CONFIG_TX_LUT + ((ch / 4) & ~3); 358 hss_npe_send(port, &msg, "HSS_SET_TX_LUT"); 359 360 msg.index += HSS_CONFIG_RX_LUT - HSS_CONFIG_TX_LUT; 361 hss_npe_send(port, &msg, "HSS_SET_RX_LUT"); 362 } 363 } 364} 365 366static void hss_config(struct port *port) 367{ 368 struct msg msg; 369 370 memset(&msg, 0, sizeof(msg)); 371 msg.cmd = PORT_CONFIG_WRITE; 372 msg.hss_port = port->id; 373 msg.index = HSS_CONFIG_TX_PCR; 374 msg.data32 = PCR_FRM_SYNC_OUTPUT_RISING | PCR_MSB_ENDIAN | 375 PCR_TX_DATA_ENABLE | PCR_SOF_NO_FBIT; 376 if (port->clock_type == CLOCK_INT) 377 msg.data32 |= PCR_SYNC_CLK_DIR_OUTPUT; 378 hss_npe_send(port, &msg, "HSS_SET_TX_PCR"); 379 380 msg.index = HSS_CONFIG_RX_PCR; 381 msg.data32 ^= PCR_TX_DATA_ENABLE | PCR_DCLK_EDGE_RISING; 382 hss_npe_send(port, &msg, "HSS_SET_RX_PCR"); 383 384 memset(&msg, 0, sizeof(msg)); 385 msg.cmd = PORT_CONFIG_WRITE; 386 msg.hss_port = port->id; 387 msg.index = HSS_CONFIG_CORE_CR; 388 msg.data32 = (port->loopback ? CCR_LOOPBACK : 0) | 389 (port->id ? CCR_SECOND_HSS : 0); 390 hss_npe_send(port, &msg, "HSS_SET_CORE_CR"); 391 392 memset(&msg, 0, sizeof(msg)); 393 msg.cmd = PORT_CONFIG_WRITE; 394 msg.hss_port = port->id; 395 msg.index = HSS_CONFIG_CLOCK_CR; 396 msg.data32 = CLK42X_SPEED_2048KHZ /* FIXME */; 397 hss_npe_send(port, &msg, "HSS_SET_CLOCK_CR"); 398 399 memset(&msg, 0, sizeof(msg)); 400 msg.cmd = PORT_CONFIG_WRITE; 401 msg.hss_port = port->id; 402 msg.index = HSS_CONFIG_TX_FCR; 403 msg.data16a = FRAME_OFFSET; 404 msg.data16b = FRAME_SIZE - 1; 405 hss_npe_send(port, &msg, "HSS_SET_TX_FCR"); 406 407 memset(&msg, 0, sizeof(msg)); 408 msg.cmd = PORT_CONFIG_WRITE; 409 msg.hss_port = port->id; 410 msg.index = HSS_CONFIG_RX_FCR; 411 msg.data16a = FRAME_OFFSET; 412 msg.data16b = FRAME_SIZE - 1; 413 hss_npe_send(port, &msg, "HSS_SET_RX_FCR"); 414 415 hss_config_set_lut(port); 416 417 memset(&msg, 0, sizeof(msg)); 418 msg.cmd = PORT_CONFIG_LOAD; 419 msg.hss_port = port->id; 420 hss_npe_send(port, &msg, "HSS_LOAD_CONFIG"); 421 422 if (npe_recv_message(port->npe, &msg, "HSS_LOAD_CONFIG") || 423 /* HSS_LOAD_CONFIG for port #1 returns port_id = #4 */ 424 msg.cmd != PORT_CONFIG_LOAD || msg.data32) { 425 printk(KERN_CRIT "HSS-%i: HSS_LOAD_CONFIG failed\n", 426 port->id); 427 BUG(); 428 } 429 430 /* HDLC may stop working without this - check FIXME */ 431 npe_recv_message(port->npe, &msg, "FLUSH_IT"); 432} 433 434static void hss_set_hdlc_cfg(struct port *port) 435{ 436 struct msg msg; 437 438 memset(&msg, 0, sizeof(msg)); 439 msg.cmd = PKT_PIPE_HDLC_CFG_WRITE; 440 msg.hss_port = port->id; 441 msg.data8a = port->hdlc_cfg; /* rx_cfg */ 442 msg.data8b = port->hdlc_cfg | (PKT_EXTRA_FLAGS << 3); /* tx_cfg */ 443 hss_npe_send(port, &msg, "HSS_SET_HDLC_CFG"); 444} 445 446static u32 hss_get_status(struct port *port) 447{ 448 struct msg msg; 449 450 memset(&msg, 0, sizeof(msg)); 451 msg.cmd = PORT_ERROR_READ; 452 msg.hss_port = port->id; 453 hss_npe_send(port, &msg, "PORT_ERROR_READ"); 454 if (npe_recv_message(port->npe, &msg, "PORT_ERROR_READ")) { 455 printk(KERN_CRIT "HSS-%i: unable to read HSS status\n", 456 port->id); 457 BUG(); 458 } 459 460 return msg.data32; 461} 462 463static void hss_start_hdlc(struct port *port) 464{ 465 struct msg msg; 466 467 memset(&msg, 0, sizeof(msg)); 468 msg.cmd = PKT_PIPE_FLOW_ENABLE; 469 msg.hss_port = port->id; 470 msg.data32 = 0; 471 hss_npe_send(port, &msg, "HSS_ENABLE_PKT_PIPE"); 472} 473 474static void hss_stop_hdlc(struct port *port) 475{ 476 struct msg msg; 477 478 memset(&msg, 0, sizeof(msg)); 479 msg.cmd = PKT_PIPE_FLOW_DISABLE; 480 msg.hss_port = port->id; 481 hss_npe_send(port, &msg, "HSS_DISABLE_PKT_PIPE"); 482 hss_get_status(port); /* make sure it's halted */ 483} 484 485static int hss_load_firmware(struct port *port) 486{ 487 struct msg msg; 488 int err; 489 490 if (port->initialized) 491 return 0; 492 493 if (!npe_running(port->npe) && 494 (err = npe_load_firmware(port->npe, npe_name(port->npe), 495 port->dev))) 496 return err; 497 498 /* HDLC mode configuration */ 499 memset(&msg, 0, sizeof(msg)); 500 msg.cmd = PKT_NUM_PIPES_WRITE; 501 msg.hss_port = port->id; 502 msg.data8a = PKT_NUM_PIPES; 503 hss_npe_send(port, &msg, "HSS_SET_PKT_PIPES"); 504 505 msg.cmd = PKT_PIPE_FIFO_SIZEW_WRITE; 506 msg.data8a = PKT_PIPE_FIFO_SIZEW; 507 hss_npe_send(port, &msg, "HSS_SET_PKT_FIFO"); 508 509 msg.cmd = PKT_PIPE_MODE_WRITE; 510 msg.data8a = NPE_PKT_MODE_HDLC; 511 /* msg.data8b = inv_mask */ 512 /* msg.data8c = or_mask */ 513 hss_npe_send(port, &msg, "HSS_SET_PKT_MODE"); 514 515 msg.cmd = PKT_PIPE_RX_SIZE_WRITE; 516 msg.data16a = HDLC_MAX_MRU; /* including CRC */ 517 hss_npe_send(port, &msg, "HSS_SET_PKT_RX_SIZE"); 518 519 msg.cmd = PKT_PIPE_IDLE_PATTERN_WRITE; 520 msg.data32 = 0x7F7F7F7F; /* ??? FIXME */ 521 hss_npe_send(port, &msg, "HSS_SET_PKT_IDLE"); 522 523 port->initialized = 1; 524 return 0; 525} 526 527/***************************************************************************** 528 * packetized (HDLC) operation 529 ****************************************************************************/ 530 531static inline void debug_pkt(struct net_device *dev, const char *func, 532 u8 *data, int len) 533{ 534#if DEBUG_PKT_BYTES 535 int i; 536 537 printk(KERN_DEBUG "%s: %s(%i)", dev->name, func, len); 538 for (i = 0; i < len; i++) { 539 if (i >= DEBUG_PKT_BYTES) 540 break; 541 printk("%s%02X", !(i % 4) ? " " : "", data[i]); 542 } 543 printk("\n"); 544#endif 545} 546 547 548static inline void debug_desc(u32 phys, struct desc *desc) 549{ 550#if DEBUG_DESC 551 printk(KERN_DEBUG "%X: %X %3X %3X %08X %X %X\n", 552 phys, desc->next, desc->buf_len, desc->pkt_len, 553 desc->data, desc->status, desc->error_count); 554#endif 555} 556 557static inline int queue_get_desc(unsigned int queue, struct port *port, 558 int is_tx) 559{ 560 u32 phys, tab_phys, n_desc; 561 struct desc *tab; 562 563 if (!(phys = qmgr_get_entry(queue))) 564 return -1; 565 566 BUG_ON(phys & 0x1F); 567 tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0); 568 tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0); 569 n_desc = (phys - tab_phys) / sizeof(struct desc); 570 BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS)); 571 debug_desc(phys, &tab[n_desc]); 572 BUG_ON(tab[n_desc].next); 573 return n_desc; 574} 575 576static inline void queue_put_desc(unsigned int queue, u32 phys, 577 struct desc *desc) 578{ 579 debug_desc(phys, desc); 580 BUG_ON(phys & 0x1F); 581 qmgr_put_entry(queue, phys); 582 /* Don't check for queue overflow here, we've allocated sufficient 583 length and queues >= 32 don't support this check anyway. */ 584} 585 586 587static inline void dma_unmap_tx(struct port *port, struct desc *desc) 588{ 589#ifdef __ARMEB__ 590 dma_unmap_single(&port->netdev->dev, desc->data, 591 desc->buf_len, DMA_TO_DEVICE); 592#else 593 dma_unmap_single(&port->netdev->dev, desc->data & ~3, 594 ALIGN((desc->data & 3) + desc->buf_len, 4), 595 DMA_TO_DEVICE); 596#endif 597} 598 599 600static void hss_hdlc_set_carrier(void *pdev, int carrier) 601{ 602 struct net_device *netdev = pdev; 603 struct port *port = dev_to_port(netdev); 604 unsigned long flags; 605 606 spin_lock_irqsave(&npe_lock, flags); 607 port->carrier = carrier; 608 if (!port->loopback) { 609 if (carrier) 610 netif_carrier_on(netdev); 611 else 612 netif_carrier_off(netdev); 613 } 614 spin_unlock_irqrestore(&npe_lock, flags); 615} 616 617static void hss_hdlc_rx_irq(void *pdev) 618{ 619 struct net_device *dev = pdev; 620 struct port *port = dev_to_port(dev); 621 622#if DEBUG_RX 623 printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name); 624#endif 625 qmgr_disable_irq(queue_ids[port->id].rx); 626 napi_schedule(&port->napi); 627} 628 629static int hss_hdlc_poll(struct napi_struct *napi, int budget) 630{ 631 struct port *port = container_of(napi, struct port, napi); 632 struct net_device *dev = port->netdev; 633 unsigned int rxq = queue_ids[port->id].rx; 634 unsigned int rxfreeq = queue_ids[port->id].rxfree; 635 int received = 0; 636 637#if DEBUG_RX 638 printk(KERN_DEBUG "%s: hss_hdlc_poll\n", dev->name); 639#endif 640 641 while (received < budget) { 642 struct sk_buff *skb; 643 struct desc *desc; 644 int n; 645#ifdef __ARMEB__ 646 struct sk_buff *temp; 647 u32 phys; 648#endif 649 650 if ((n = queue_get_desc(rxq, port, 0)) < 0) { 651#if DEBUG_RX 652 printk(KERN_DEBUG "%s: hss_hdlc_poll" 653 " napi_complete\n", dev->name); 654#endif 655 napi_complete(napi); 656 qmgr_enable_irq(rxq); 657 if (!qmgr_stat_empty(rxq) && 658 napi_reschedule(napi)) { 659#if DEBUG_RX 660 printk(KERN_DEBUG "%s: hss_hdlc_poll" 661 " napi_reschedule succeeded\n", 662 dev->name); 663#endif 664 qmgr_disable_irq(rxq); 665 continue; 666 } 667#if DEBUG_RX 668 printk(KERN_DEBUG "%s: hss_hdlc_poll all done\n", 669 dev->name); 670#endif 671 return received; /* all work done */ 672 } 673 674 desc = rx_desc_ptr(port, n); 675#if 0 /* FIXME - error_count counts modulo 256, perhaps we should use it */ 676 if (desc->error_count) 677 printk(KERN_DEBUG "%s: hss_hdlc_poll status 0x%02X" 678 " errors %u\n", dev->name, desc->status, 679 desc->error_count); 680#endif 681 skb = NULL; 682 switch (desc->status) { 683 case 0: 684#ifdef __ARMEB__ 685 if ((skb = netdev_alloc_skb(dev, RX_SIZE)) != NULL) { 686 phys = dma_map_single(&dev->dev, skb->data, 687 RX_SIZE, 688 DMA_FROM_DEVICE); 689 if (dma_mapping_error(&dev->dev, phys)) { 690 dev_kfree_skb(skb); 691 skb = NULL; 692 } 693 } 694#else 695 skb = netdev_alloc_skb(dev, desc->pkt_len); 696#endif 697 if (!skb) 698 dev->stats.rx_dropped++; 699 break; 700 case ERR_HDLC_ALIGN: 701 case ERR_HDLC_ABORT: 702 dev->stats.rx_frame_errors++; 703 dev->stats.rx_errors++; 704 break; 705 case ERR_HDLC_FCS: 706 dev->stats.rx_crc_errors++; 707 dev->stats.rx_errors++; 708 break; 709 case ERR_HDLC_TOO_LONG: 710 dev->stats.rx_length_errors++; 711 dev->stats.rx_errors++; 712 break; 713 default: /* FIXME - remove printk */ 714 printk(KERN_ERR "%s: hss_hdlc_poll: status 0x%02X" 715 " errors %u\n", dev->name, desc->status, 716 desc->error_count); 717 dev->stats.rx_errors++; 718 } 719 720 if (!skb) { 721 /* put the desc back on RX-ready queue */ 722 desc->buf_len = RX_SIZE; 723 desc->pkt_len = desc->status = 0; 724 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); 725 continue; 726 } 727 728 /* process received frame */ 729#ifdef __ARMEB__ 730 temp = skb; 731 skb = port->rx_buff_tab[n]; 732 dma_unmap_single(&dev->dev, desc->data, 733 RX_SIZE, DMA_FROM_DEVICE); 734#else 735 dma_sync_single_for_cpu(&dev->dev, desc->data, 736 RX_SIZE, DMA_FROM_DEVICE); 737 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], 738 ALIGN(desc->pkt_len, 4) / 4); 739#endif 740 skb_put(skb, desc->pkt_len); 741 742 debug_pkt(dev, "hss_hdlc_poll", skb->data, skb->len); 743 744 skb->protocol = hdlc_type_trans(skb, dev); 745 dev->stats.rx_packets++; 746 dev->stats.rx_bytes += skb->len; 747 netif_receive_skb(skb); 748 749 /* put the new buffer on RX-free queue */ 750#ifdef __ARMEB__ 751 port->rx_buff_tab[n] = temp; 752 desc->data = phys; 753#endif 754 desc->buf_len = RX_SIZE; 755 desc->pkt_len = 0; 756 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); 757 received++; 758 } 759#if DEBUG_RX 760 printk(KERN_DEBUG "hss_hdlc_poll: end, not all work done\n"); 761#endif 762 return received; /* not all work done */ 763} 764 765 766static void hss_hdlc_txdone_irq(void *pdev) 767{ 768 struct net_device *dev = pdev; 769 struct port *port = dev_to_port(dev); 770 int n_desc; 771 772#if DEBUG_TX 773 printk(KERN_DEBUG DRV_NAME ": hss_hdlc_txdone_irq\n"); 774#endif 775 while ((n_desc = queue_get_desc(queue_ids[port->id].txdone, 776 port, 1)) >= 0) { 777 struct desc *desc; 778 int start; 779 780 desc = tx_desc_ptr(port, n_desc); 781 782 dev->stats.tx_packets++; 783 dev->stats.tx_bytes += desc->pkt_len; 784 785 dma_unmap_tx(port, desc); 786#if DEBUG_TX 787 printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq free %p\n", 788 dev->name, port->tx_buff_tab[n_desc]); 789#endif 790 free_buffer_irq(port->tx_buff_tab[n_desc]); 791 port->tx_buff_tab[n_desc] = NULL; 792 793 start = qmgr_stat_below_low_watermark(port->plat->txreadyq); 794 queue_put_desc(port->plat->txreadyq, 795 tx_desc_phys(port, n_desc), desc); 796 if (start) { /* TX-ready queue was empty */ 797#if DEBUG_TX 798 printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq xmit" 799 " ready\n", dev->name); 800#endif 801 netif_wake_queue(dev); 802 } 803 } 804} 805 806static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev) 807{ 808 struct port *port = dev_to_port(dev); 809 unsigned int txreadyq = port->plat->txreadyq; 810 int len, offset, bytes, n; 811 void *mem; 812 u32 phys; 813 struct desc *desc; 814 815#if DEBUG_TX 816 printk(KERN_DEBUG "%s: hss_hdlc_xmit\n", dev->name); 817#endif 818 819 if (unlikely(skb->len > HDLC_MAX_MRU)) { 820 dev_kfree_skb(skb); 821 dev->stats.tx_errors++; 822 return NETDEV_TX_OK; 823 } 824 825 debug_pkt(dev, "hss_hdlc_xmit", skb->data, skb->len); 826 827 len = skb->len; 828#ifdef __ARMEB__ 829 offset = 0; /* no need to keep alignment */ 830 bytes = len; 831 mem = skb->data; 832#else 833 offset = (int)skb->data & 3; /* keep 32-bit alignment */ 834 bytes = ALIGN(offset + len, 4); 835 if (!(mem = kmalloc(bytes, GFP_ATOMIC))) { 836 dev_kfree_skb(skb); 837 dev->stats.tx_dropped++; 838 return NETDEV_TX_OK; 839 } 840 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); 841 dev_kfree_skb(skb); 842#endif 843 844 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); 845 if (dma_mapping_error(&dev->dev, phys)) { 846#ifdef __ARMEB__ 847 dev_kfree_skb(skb); 848#else 849 kfree(mem); 850#endif 851 dev->stats.tx_dropped++; 852 return NETDEV_TX_OK; 853 } 854 855 n = queue_get_desc(txreadyq, port, 1); 856 BUG_ON(n < 0); 857 desc = tx_desc_ptr(port, n); 858 859#ifdef __ARMEB__ 860 port->tx_buff_tab[n] = skb; 861#else 862 port->tx_buff_tab[n] = mem; 863#endif 864 desc->data = phys + offset; 865 desc->buf_len = desc->pkt_len = len; 866 867 wmb(); 868 queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc); 869 dev->trans_start = jiffies; 870 871 if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */ 872#if DEBUG_TX 873 printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name); 874#endif 875 netif_stop_queue(dev); 876 /* we could miss TX ready interrupt */ 877 if (!qmgr_stat_below_low_watermark(txreadyq)) { 878#if DEBUG_TX 879 printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n", 880 dev->name); 881#endif 882 netif_wake_queue(dev); 883 } 884 } 885 886#if DEBUG_TX 887 printk(KERN_DEBUG "%s: hss_hdlc_xmit end\n", dev->name); 888#endif 889 return NETDEV_TX_OK; 890} 891 892 893static int request_hdlc_queues(struct port *port) 894{ 895 int err; 896 897 err = qmgr_request_queue(queue_ids[port->id].rxfree, RX_DESCS, 0, 0, 898 "%s:RX-free", port->netdev->name); 899 if (err) 900 return err; 901 902 err = qmgr_request_queue(queue_ids[port->id].rx, RX_DESCS, 0, 0, 903 "%s:RX", port->netdev->name); 904 if (err) 905 goto rel_rxfree; 906 907 err = qmgr_request_queue(queue_ids[port->id].tx, TX_DESCS, 0, 0, 908 "%s:TX", port->netdev->name); 909 if (err) 910 goto rel_rx; 911 912 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0, 913 "%s:TX-ready", port->netdev->name); 914 if (err) 915 goto rel_tx; 916 917 err = qmgr_request_queue(queue_ids[port->id].txdone, TX_DESCS, 0, 0, 918 "%s:TX-done", port->netdev->name); 919 if (err) 920 goto rel_txready; 921 return 0; 922 923rel_txready: 924 qmgr_release_queue(port->plat->txreadyq); 925rel_tx: 926 qmgr_release_queue(queue_ids[port->id].tx); 927rel_rx: 928 qmgr_release_queue(queue_ids[port->id].rx); 929rel_rxfree: 930 qmgr_release_queue(queue_ids[port->id].rxfree); 931 printk(KERN_DEBUG "%s: unable to request hardware queues\n", 932 port->netdev->name); 933 return err; 934} 935 936static void release_hdlc_queues(struct port *port) 937{ 938 qmgr_release_queue(queue_ids[port->id].rxfree); 939 qmgr_release_queue(queue_ids[port->id].rx); 940 qmgr_release_queue(queue_ids[port->id].txdone); 941 qmgr_release_queue(queue_ids[port->id].tx); 942 qmgr_release_queue(port->plat->txreadyq); 943} 944 945static int init_hdlc_queues(struct port *port) 946{ 947 int i; 948 949 if (!ports_open) 950 if (!(dma_pool = dma_pool_create(DRV_NAME, NULL, 951 POOL_ALLOC_SIZE, 32, 0))) 952 return -ENOMEM; 953 954 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, 955 &port->desc_tab_phys))) 956 return -ENOMEM; 957 memset(port->desc_tab, 0, POOL_ALLOC_SIZE); 958 memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */ 959 memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab)); 960 961 /* Setup RX buffers */ 962 for (i = 0; i < RX_DESCS; i++) { 963 struct desc *desc = rx_desc_ptr(port, i); 964 buffer_t *buff; 965 void *data; 966#ifdef __ARMEB__ 967 if (!(buff = netdev_alloc_skb(port->netdev, RX_SIZE))) 968 return -ENOMEM; 969 data = buff->data; 970#else 971 if (!(buff = kmalloc(RX_SIZE, GFP_KERNEL))) 972 return -ENOMEM; 973 data = buff; 974#endif 975 desc->buf_len = RX_SIZE; 976 desc->data = dma_map_single(&port->netdev->dev, data, 977 RX_SIZE, DMA_FROM_DEVICE); 978 if (dma_mapping_error(&port->netdev->dev, desc->data)) { 979 free_buffer(buff); 980 return -EIO; 981 } 982 port->rx_buff_tab[i] = buff; 983 } 984 985 return 0; 986} 987 988static void destroy_hdlc_queues(struct port *port) 989{ 990 int i; 991 992 if (port->desc_tab) { 993 for (i = 0; i < RX_DESCS; i++) { 994 struct desc *desc = rx_desc_ptr(port, i); 995 buffer_t *buff = port->rx_buff_tab[i]; 996 if (buff) { 997 dma_unmap_single(&port->netdev->dev, 998 desc->data, RX_SIZE, 999 DMA_FROM_DEVICE); 1000 free_buffer(buff); 1001 } 1002 } 1003 for (i = 0; i < TX_DESCS; i++) { 1004 struct desc *desc = tx_desc_ptr(port, i); 1005 buffer_t *buff = port->tx_buff_tab[i]; 1006 if (buff) { 1007 dma_unmap_tx(port, desc); 1008 free_buffer(buff); 1009 } 1010 } 1011 dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys); 1012 port->desc_tab = NULL; 1013 } 1014 1015 if (!ports_open && dma_pool) { 1016 dma_pool_destroy(dma_pool); 1017 dma_pool = NULL; 1018 } 1019} 1020 1021static int hss_hdlc_open(struct net_device *dev) 1022{ 1023 struct port *port = dev_to_port(dev); 1024 unsigned long flags; 1025 int i, err = 0; 1026 1027 if ((err = hdlc_open(dev))) 1028 return err; 1029 1030 if ((err = hss_load_firmware(port))) 1031 goto err_hdlc_close; 1032 1033 if ((err = request_hdlc_queues(port))) 1034 goto err_hdlc_close; 1035 1036 if ((err = init_hdlc_queues(port))) 1037 goto err_destroy_queues; 1038 1039 spin_lock_irqsave(&npe_lock, flags); 1040 if (port->plat->open) 1041 if ((err = port->plat->open(port->id, dev, 1042 hss_hdlc_set_carrier))) 1043 goto err_unlock; 1044 spin_unlock_irqrestore(&npe_lock, flags); 1045 1046 /* Populate queues with buffers, no failure after this point */ 1047 for (i = 0; i < TX_DESCS; i++) 1048 queue_put_desc(port->plat->txreadyq, 1049 tx_desc_phys(port, i), tx_desc_ptr(port, i)); 1050 1051 for (i = 0; i < RX_DESCS; i++) 1052 queue_put_desc(queue_ids[port->id].rxfree, 1053 rx_desc_phys(port, i), rx_desc_ptr(port, i)); 1054 1055 napi_enable(&port->napi); 1056 netif_start_queue(dev); 1057 1058 qmgr_set_irq(queue_ids[port->id].rx, QUEUE_IRQ_SRC_NOT_EMPTY, 1059 hss_hdlc_rx_irq, dev); 1060 1061 qmgr_set_irq(queue_ids[port->id].txdone, QUEUE_IRQ_SRC_NOT_EMPTY, 1062 hss_hdlc_txdone_irq, dev); 1063 qmgr_enable_irq(queue_ids[port->id].txdone); 1064 1065 ports_open++; 1066 1067 hss_set_hdlc_cfg(port); 1068 hss_config(port); 1069 1070 hss_start_hdlc(port); 1071 1072 /* we may already have RX data, enables IRQ */ 1073 napi_schedule(&port->napi); 1074 return 0; 1075 1076err_unlock: 1077 spin_unlock_irqrestore(&npe_lock, flags); 1078err_destroy_queues: 1079 destroy_hdlc_queues(port); 1080 release_hdlc_queues(port); 1081err_hdlc_close: 1082 hdlc_close(dev); 1083 return err; 1084} 1085 1086static int hss_hdlc_close(struct net_device *dev) 1087{ 1088 struct port *port = dev_to_port(dev); 1089 unsigned long flags; 1090 int i, buffs = RX_DESCS; /* allocated RX buffers */ 1091 1092 spin_lock_irqsave(&npe_lock, flags); 1093 ports_open--; 1094 qmgr_disable_irq(queue_ids[port->id].rx); 1095 netif_stop_queue(dev); 1096 napi_disable(&port->napi); 1097 1098 hss_stop_hdlc(port); 1099 1100 while (queue_get_desc(queue_ids[port->id].rxfree, port, 0) >= 0) 1101 buffs--; 1102 while (queue_get_desc(queue_ids[port->id].rx, port, 0) >= 0) 1103 buffs--; 1104 1105 if (buffs) 1106 printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)" 1107 " left in NPE\n", dev->name, buffs); 1108 1109 buffs = TX_DESCS; 1110 while (queue_get_desc(queue_ids[port->id].tx, port, 1) >= 0) 1111 buffs--; /* cancel TX */ 1112 1113 i = 0; 1114 do { 1115 while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0) 1116 buffs--; 1117 if (!buffs) 1118 break; 1119 } while (++i < MAX_CLOSE_WAIT); 1120 1121 if (buffs) 1122 printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) " 1123 "left in NPE\n", dev->name, buffs); 1124#if DEBUG_CLOSE 1125 if (!buffs) 1126 printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i); 1127#endif 1128 qmgr_disable_irq(queue_ids[port->id].txdone); 1129 1130 if (port->plat->close) 1131 port->plat->close(port->id, dev); 1132 spin_unlock_irqrestore(&npe_lock, flags); 1133 1134 destroy_hdlc_queues(port); 1135 release_hdlc_queues(port); 1136 hdlc_close(dev); 1137 return 0; 1138} 1139 1140 1141static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding, 1142 unsigned short parity) 1143{ 1144 struct port *port = dev_to_port(dev); 1145 1146 if (encoding != ENCODING_NRZ) 1147 return -EINVAL; 1148 1149 switch(parity) { 1150 case PARITY_CRC16_PR1_CCITT: 1151 port->hdlc_cfg = 0; 1152 return 0; 1153 1154 case PARITY_CRC32_PR1_CCITT: 1155 port->hdlc_cfg = PKT_HDLC_CRC_32; 1156 return 0; 1157 1158 default: 1159 return -EINVAL; 1160 } 1161} 1162 1163 1164static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1165{ 1166 const size_t size = sizeof(sync_serial_settings); 1167 sync_serial_settings new_line; 1168 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; 1169 struct port *port = dev_to_port(dev); 1170 unsigned long flags; 1171 int clk; 1172 1173 if (cmd != SIOCWANDEV) 1174 return hdlc_ioctl(dev, ifr, cmd); 1175 1176 switch(ifr->ifr_settings.type) { 1177 case IF_GET_IFACE: 1178 ifr->ifr_settings.type = IF_IFACE_V35; 1179 if (ifr->ifr_settings.size < size) { 1180 ifr->ifr_settings.size = size; /* data size wanted */ 1181 return -ENOBUFS; 1182 } 1183 memset(&new_line, 0, sizeof(new_line)); 1184 new_line.clock_type = port->clock_type; 1185 new_line.clock_rate = 2048000; /* FIXME */ 1186 new_line.loopback = port->loopback; 1187 if (copy_to_user(line, &new_line, size)) 1188 return -EFAULT; 1189 return 0; 1190 1191 case IF_IFACE_SYNC_SERIAL: 1192 case IF_IFACE_V35: 1193 if(!capable(CAP_NET_ADMIN)) 1194 return -EPERM; 1195 if (copy_from_user(&new_line, line, size)) 1196 return -EFAULT; 1197 1198 clk = new_line.clock_type; 1199 if (port->plat->set_clock) 1200 clk = port->plat->set_clock(port->id, clk); 1201 1202 if (clk != CLOCK_EXT && clk != CLOCK_INT) 1203 return -EINVAL; /* No such clock setting */ 1204 1205 if (new_line.loopback != 0 && new_line.loopback != 1) 1206 return -EINVAL; 1207 1208 port->clock_type = clk; /* Update settings */ 1209 /* FIXME port->clock_rate = new_line.clock_rate */; 1210 port->loopback = new_line.loopback; 1211 1212 spin_lock_irqsave(&npe_lock, flags); 1213 1214 if (dev->flags & IFF_UP) 1215 hss_config(port); 1216 1217 if (port->loopback || port->carrier) 1218 netif_carrier_on(port->netdev); 1219 else 1220 netif_carrier_off(port->netdev); 1221 spin_unlock_irqrestore(&npe_lock, flags); 1222 1223 return 0; 1224 1225 default: 1226 return hdlc_ioctl(dev, ifr, cmd); 1227 } 1228} 1229 1230/***************************************************************************** 1231 * initialization 1232 ****************************************************************************/ 1233 1234static const struct net_device_ops hss_hdlc_ops = { 1235 .ndo_open = hss_hdlc_open, 1236 .ndo_stop = hss_hdlc_close, 1237 .ndo_change_mtu = hdlc_change_mtu, 1238 .ndo_start_xmit = hdlc_start_xmit, 1239 .ndo_do_ioctl = hss_hdlc_ioctl, 1240}; 1241 1242static int __devinit hss_init_one(struct platform_device *pdev) 1243{ 1244 struct port *port; 1245 struct net_device *dev; 1246 hdlc_device *hdlc; 1247 int err; 1248 1249 if ((port = kzalloc(sizeof(*port), GFP_KERNEL)) == NULL) 1250 return -ENOMEM; 1251 1252 if ((port->npe = npe_request(0)) == NULL) { 1253 err = -ENODEV; 1254 goto err_free; 1255 } 1256 1257 if ((port->netdev = dev = alloc_hdlcdev(port)) == NULL) { 1258 err = -ENOMEM; 1259 goto err_plat; 1260 } 1261 1262 SET_NETDEV_DEV(dev, &pdev->dev); 1263 hdlc = dev_to_hdlc(dev); 1264 hdlc->attach = hss_hdlc_attach; 1265 hdlc->xmit = hss_hdlc_xmit; 1266 dev->netdev_ops = &hss_hdlc_ops; 1267 dev->tx_queue_len = 100; 1268 port->clock_type = CLOCK_EXT; 1269 port->clock_rate = 2048000; 1270 port->id = pdev->id; 1271 port->dev = &pdev->dev; 1272 port->plat = pdev->dev.platform_data; 1273 netif_napi_add(dev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT); 1274 1275 if ((err = register_hdlc_device(dev))) 1276 goto err_free_netdev; 1277 1278 platform_set_drvdata(pdev, port); 1279 1280 printk(KERN_INFO "%s: HSS-%i\n", dev->name, port->id); 1281 return 0; 1282 1283err_free_netdev: 1284 free_netdev(dev); 1285err_plat: 1286 npe_release(port->npe); 1287err_free: 1288 kfree(port); 1289 return err; 1290} 1291 1292static int __devexit hss_remove_one(struct platform_device *pdev) 1293{ 1294 struct port *port = platform_get_drvdata(pdev); 1295 1296 unregister_hdlc_device(port->netdev); 1297 free_netdev(port->netdev); 1298 npe_release(port->npe); 1299 platform_set_drvdata(pdev, NULL); 1300 kfree(port); 1301 return 0; 1302} 1303 1304static struct platform_driver ixp4xx_hss_driver = { 1305 .driver.name = DRV_NAME, 1306 .probe = hss_init_one, 1307 .remove = hss_remove_one, 1308}; 1309 1310static int __init hss_init_module(void) 1311{ 1312 if ((ixp4xx_read_feature_bits() & 1313 (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) != 1314 (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) 1315 return -ENODEV; 1316 1317 spin_lock_init(&npe_lock); 1318 1319 return platform_driver_register(&ixp4xx_hss_driver); 1320} 1321 1322static void __exit hss_cleanup_module(void) 1323{ 1324 platform_driver_unregister(&ixp4xx_hss_driver); 1325} 1326 1327MODULE_AUTHOR("Krzysztof Halasa"); 1328MODULE_DESCRIPTION("Intel IXP4xx HSS driver"); 1329MODULE_LICENSE("GPL v2"); 1330MODULE_ALIAS("platform:ixp4xx_hss"); 1331module_init(hss_init_module); 1332module_exit(hss_cleanup_module);