···69 device to be used as both a tty interface and as a synchronous 70 controller is a project for Linux post the 2.4 release71 </para>72- <para>73- The support code handles most common card configurations and74- supports running both Cisco HDLC and Synchronous PPP. With extra75- glue the frame relay and X.25 protocols can also be used with this76- driver.77- </para>78 </chapter>7980 <chapter id="Driver_Modes">···173 <para>174 If you wish to use the network interface facilities of the driver,175 then you need to attach a network device to each channel that is176- present and in use. In addition to use the SyncPPP and Cisco HDLC177 you need to follow some additional plumbing rules. They may seem 178 complex but a look at the example hostess_sv11 driver should179 reassure you.180 </para>181 <para>182 The network device used for each channel should be pointed to by183- the netdevice field of each channel. The dev-> priv field of the184 network device points to your private data - you will need to be185- able to find your ppp device from this. In addition to use the186- sync ppp layer the private data must start with a void * pointer187- to the syncppp structures.188 </para>189 <para>190 The way most drivers approach this particular problem is to191 create a structure holding the Z8530 device definition and192- put that and the syncppp pointer into the private field of193- the network device. The network device fields of the channels194- then point back to the network devices. The ppp_device can also195- be put in the private structure conveniently.196 </para>197 <para>198- If you wish to use the synchronous ppp then you need to attach199- the syncppp layer to the network device. You should do this before200- you register the network device. The201- <function>sppp_attach</function> requires that the first void *202- pointer in your private data is pointing to an empty struct203- ppp_device. The function fills in the initial data for the204- ppp/hdlc layer.205 </para>206 <para>207 Before you register your network device you will also need to···300 buffer in sk_buff format and queues it for transmission. The301 caller must provide the entire packet with the exception of the302 bitstuffing and CRC. This is normally done by the caller via303- the syncppp interface layer. It returns 0 if the buffer has been 304- queued and non zero values for queue full. If the function accepts 305- the buffer it becomes property of the Z8530 layer and the caller 306- should not free it. 307 </para>308 <para>309 The function <function>z8530_get_stats</function> returns a pointer
···69 device to be used as both a tty interface and as a synchronous 70 controller is a project for Linux post the 2.4 release71 </para>00000072 </chapter>7374 <chapter id="Driver_Modes">···179 <para>180 If you wish to use the network interface facilities of the driver,181 then you need to attach a network device to each channel that is182+ present and in use. In addition to use the generic HDLC183 you need to follow some additional plumbing rules. They may seem 184 complex but a look at the example hostess_sv11 driver should185 reassure you.186 </para>187 <para>188 The network device used for each channel should be pointed to by189+ the netdevice field of each channel. The hdlc-> priv field of the190 network device points to your private data - you will need to be191+ able to find your private data from this.00192 </para>193 <para>194 The way most drivers approach this particular problem is to195 create a structure holding the Z8530 device definition and196+ put that into the private field of the network device. The197+ network device fields of the channels then point back to the198+ network devices.0199 </para>200 <para>201+ If you wish to use the generic HDLC then you need to register202+ the HDLC device.00000203 </para>204 <para>205 Before you register your network device you will also need to···314 buffer in sk_buff format and queues it for transmission. The315 caller must provide the entire packet with the exception of the316 bitstuffing and CRC. This is normally done by the caller via317+ the generic HDLC interface layer. It returns 0 if the buffer has been318+ queued and non zero values for queue full. If the function accepts319+ the buffer it becomes property of the Z8530 layer and the caller320+ should not free it.321 </para>322 <para>323 The function <function>z8530_get_stats</function> returns a pointer
+11
arch/sh/include/asm/sh_eth.h
···00000000000
···1+#ifndef __ASM_SH_ETH_H__2+#define __ASM_SH_ETH_H__3+4+enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN};5+6+struct sh_eth_plat_data {7+ int phy;8+ int edmac_endian;9+};10+11+#endif
-4
drivers/char/pcmcia/synclink_cs.c
···232233 /* SPPP/Cisco HDLC device parts */234 int netcount;235- int dosyncppp;236 spinlock_t netlock;237238#if SYNCLINK_GENERIC_HDLC···458459static int debug_level = 0;460static int maxframe[MAX_DEVICE_COUNT] = {0,};461-static int dosyncppp[MAX_DEVICE_COUNT] = {1,1,1,1};462463module_param(break_on_load, bool, 0);464module_param(ttymajor, int, 0);465module_param(debug_level, int, 0);466module_param_array(maxframe, int, NULL, 0);467-module_param_array(dosyncppp, int, NULL, 0);468469MODULE_LICENSE("GPL");470···2912 if (info->line < MAX_DEVICE_COUNT) {2913 if (maxframe[info->line])2914 info->max_frame_size = maxframe[info->line];2915- info->dosyncppp = dosyncppp[info->line];2916 }29172918 mgslpc_device_count++;
···232233 /* SPPP/Cisco HDLC device parts */234 int netcount;0235 spinlock_t netlock;236237#if SYNCLINK_GENERIC_HDLC···459460static int debug_level = 0;461static int maxframe[MAX_DEVICE_COUNT] = {0,};0462463module_param(break_on_load, bool, 0);464module_param(ttymajor, int, 0);465module_param(debug_level, int, 0);466module_param_array(maxframe, int, NULL, 0);0467468MODULE_LICENSE("GPL");469···2915 if (info->line < MAX_DEVICE_COUNT) {2916 if (maxframe[info->line])2917 info->max_frame_size = maxframe[info->line];02918 }29192920 mgslpc_device_count++;
-4
drivers/char/synclink.c
···304305 /* generic HDLC device parts */306 int netcount;307- int dosyncppp;308 spinlock_t netlock;309310#if SYNCLINK_GENERIC_HDLC···867static int dma[MAX_ISA_DEVICES];868static int debug_level;869static int maxframe[MAX_TOTAL_DEVICES];870-static int dosyncppp[MAX_TOTAL_DEVICES];871static int txdmabufs[MAX_TOTAL_DEVICES];872static int txholdbufs[MAX_TOTAL_DEVICES];873···877module_param_array(dma, int, NULL, 0);878module_param(debug_level, int, 0);879module_param_array(maxframe, int, NULL, 0);880-module_param_array(dosyncppp, int, NULL, 0);881module_param_array(txdmabufs, int, NULL, 0);882module_param_array(txholdbufs, int, NULL, 0);883···4255 if (info->line < MAX_TOTAL_DEVICES) {4256 if (maxframe[info->line])4257 info->max_frame_size = maxframe[info->line];4258- info->dosyncppp = dosyncppp[info->line];42594260 if (txdmabufs[info->line]) {4261 info->num_tx_dma_buffers = txdmabufs[info->line];
···304305 /* generic HDLC device parts */306 int netcount;0307 spinlock_t netlock;308309#if SYNCLINK_GENERIC_HDLC···868static int dma[MAX_ISA_DEVICES];869static int debug_level;870static int maxframe[MAX_TOTAL_DEVICES];0871static int txdmabufs[MAX_TOTAL_DEVICES];872static int txholdbufs[MAX_TOTAL_DEVICES];873···879module_param_array(dma, int, NULL, 0);880module_param(debug_level, int, 0);881module_param_array(maxframe, int, NULL, 0);0882module_param_array(txdmabufs, int, NULL, 0);883module_param_array(txholdbufs, int, NULL, 0);884···4258 if (info->line < MAX_TOTAL_DEVICES) {4259 if (maxframe[info->line])4260 info->max_frame_size = maxframe[info->line];042614262 if (txdmabufs[info->line]) {4263 info->num_tx_dma_buffers = txdmabufs[info->line];
-5
drivers/char/synclink_gt.c
···128static int ttymajor;129static int debug_level;130static int maxframe[MAX_DEVICES];131-static int dosyncppp[MAX_DEVICES];132133module_param(ttymajor, int, 0);134module_param(debug_level, int, 0);135module_param_array(maxframe, int, NULL, 0);136-module_param_array(dosyncppp, int, NULL, 0);137138MODULE_PARM_DESC(ttymajor, "TTY major device number override: 0=auto assigned");139MODULE_PARM_DESC(debug_level, "Debug syslog output: 0=disabled, 1 to 5=increasing detail");140MODULE_PARM_DESC(maxframe, "Maximum frame size used by device (4096 to 65535)");141-MODULE_PARM_DESC(dosyncppp, "Enable synchronous net device, 0=disable 1=enable");142143/*144 * tty support and callbacks···346 /* SPPP/Cisco HDLC device parts */347348 int netcount;349- int dosyncppp;350 spinlock_t netlock;351#if SYNCLINK_GENERIC_HDLC352 struct net_device *netdev;···3401 if (info->line < MAX_DEVICES) {3402 if (maxframe[info->line])3403 info->max_frame_size = maxframe[info->line];3404- info->dosyncppp = dosyncppp[info->line];3405 }34063407 slgt_device_count++;
···128static int ttymajor;129static int debug_level;130static int maxframe[MAX_DEVICES];0131132module_param(ttymajor, int, 0);133module_param(debug_level, int, 0);134module_param_array(maxframe, int, NULL, 0);0135136MODULE_PARM_DESC(ttymajor, "TTY major device number override: 0=auto assigned");137MODULE_PARM_DESC(debug_level, "Debug syslog output: 0=disabled, 1 to 5=increasing detail");138MODULE_PARM_DESC(maxframe, "Maximum frame size used by device (4096 to 65535)");0139140/*141 * tty support and callbacks···349 /* SPPP/Cisco HDLC device parts */350351 int netcount;0352 spinlock_t netlock;353#if SYNCLINK_GENERIC_HDLC354 struct net_device *netdev;···3405 if (info->line < MAX_DEVICES) {3406 if (maxframe[info->line])3407 info->max_frame_size = maxframe[info->line];03408 }34093410 slgt_device_count++;
-4
drivers/char/synclinkmp.c
···270271 /* SPPP/Cisco HDLC device parts */272 int netcount;273- int dosyncppp;274 spinlock_t netlock;275276#if SYNCLINK_GENERIC_HDLC···468 */469static int debug_level = 0;470static int maxframe[MAX_DEVICES] = {0,};471-static int dosyncppp[MAX_DEVICES] = {0,};472473module_param(break_on_load, bool, 0);474module_param(ttymajor, int, 0);475module_param(debug_level, int, 0);476module_param_array(maxframe, int, NULL, 0);477-module_param_array(dosyncppp, int, NULL, 0);478479static char *driver_name = "SyncLink MultiPort driver";480static char *driver_version = "$Revision: 4.38 $";···3749 if (info->line < MAX_DEVICES) {3750 if (maxframe[info->line])3751 info->max_frame_size = maxframe[info->line];3752- info->dosyncppp = dosyncppp[info->line];3753 }37543755 synclinkmp_device_count++;
···270271 /* SPPP/Cisco HDLC device parts */272 int netcount;0273 spinlock_t netlock;274275#if SYNCLINK_GENERIC_HDLC···469 */470static int debug_level = 0;471static int maxframe[MAX_DEVICES] = {0,};0472473module_param(break_on_load, bool, 0);474module_param(ttymajor, int, 0);475module_param(debug_level, int, 0);476module_param_array(maxframe, int, NULL, 0);0477478static char *driver_name = "SyncLink MultiPort driver";479static char *driver_version = "$Revision: 4.38 $";···3752 if (info->line < MAX_DEVICES) {3753 if (maxframe[info->line])3754 info->max_frame_size = maxframe[info->line];03755 }37563757 synclinkmp_device_count++;
···1790{1791 struct pci_dev *pdev = adapter->pdev;1792000000000001793 skb->ip_summed = CHECKSUM_NONE;17941795 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {···1827 return;1828 }18291830- /* IPv4, but hardware thinks its checksum is wrong */1831- if (netif_msg_rx_err(adapter))1832- dev_printk(KERN_DEBUG, &pdev->dev,1833- "hw csum wrong, pkt_flag:%x, err_flag:%x\n",1834- rrd->pkt_flg, rrd->err_flg);1835- skb->ip_summed = CHECKSUM_COMPLETE;1836- skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum);1837- adapter->hw_csum_err++;1838 return;1839}1840
···1790{1791 struct pci_dev *pdev = adapter->pdev;17921793+ /*1794+ * The L1 hardware contains a bug that erroneously sets the1795+ * PACKET_FLAG_ERR and ERR_FLAG_L4_CHKSUM bits whenever a1796+ * fragmented IP packet is received, even though the packet1797+ * is perfectly valid and its checksum is correct. There's1798+ * no way to distinguish between one of these good packets1799+ * and a packet that actually contains a TCP/UDP checksum1800+ * error, so all we can do is allow it to be handed up to1801+ * the higher layers and let it be sorted out there.1802+ */1803+1804 skb->ip_summed = CHECKSUM_NONE;18051806 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {···1816 return;1817 }1818000000001819 return;1820}1821
+2-7
drivers/net/atp.c
···854 struct net_local *lp = netdev_priv(dev);855 long ioaddr = dev->base_addr;856857- if ( dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC))) {858- /* We must make the kernel realise we had to move859- * into promisc mode or we start all out war on860- * the cable. - AC861- */862- dev->flags|=IFF_PROMISC;863 lp->addr_mode = CMR2h_PROMISC;864- } else865 lp->addr_mode = CMR2h_Normal;866 write_reg_high(ioaddr, CMR2, lp->addr_mode);867}
···2107 aggregator = __get_first_agg(port);2108 ad_agg_selection_logic(aggregator);2109 }02110 }21112112 // for each port run the state machines
···2107 aggregator = __get_first_agg(port);2108 ad_agg_selection_logic(aggregator);2109 }2110+ bond_3ad_set_carrier(bond);2111 }21122113 // for each port run the state machines
+173-223
drivers/net/bonding/bond_main.c
···22232224/*-------------------------------- Monitoring -------------------------------*/22252226-/*2227- * if !have_locks, return nonzero if a failover is necessary. if2228- * have_locks, do whatever failover activities are needed.2229- *2230- * This is to separate the inspection and failover steps for locking2231- * purposes; failover requires rtnl, but acquiring it for every2232- * inspection is undesirable, so a wrapper first does inspection, and2233- * the acquires the necessary locks and calls again to perform2234- * failover if needed. Since all locks are dropped, a complete2235- * restart is needed between calls.2236- */2237-static int __bond_mii_monitor(struct bonding *bond, int have_locks)2238{2239- struct slave *slave, *oldcurrent;2240- int do_failover = 0;2241- int i;2242-2243- if (bond->slave_cnt == 0)2244- goto out;2245-2246- /* we will try to read the link status of each of our slaves, and2247- * set their IFF_RUNNING flag appropriately. For each slave not2248- * supporting MII status, we won't do anything so that a user-space2249- * program could monitor the link itself if needed.2250- */2251-2252- read_lock(&bond->curr_slave_lock);2253- oldcurrent = bond->curr_active_slave;2254- read_unlock(&bond->curr_slave_lock);22552256 bond_for_each_slave(bond, slave, i) {2257- struct net_device *slave_dev = slave->dev;2258- int link_state;2259- u16 old_speed = slave->speed;2260- u8 old_duplex = slave->duplex;22612262- link_state = bond_check_dev_link(bond, slave_dev, 0);22632264 switch (slave->link) {2265- case BOND_LINK_UP: /* the link was up */2266- if (link_state == BMSR_LSTATUS) {2267- if (!oldcurrent) {2268- if (!have_locks)2269- return 1;2270- do_failover = 1;2271- }2272- break;2273- } else { /* link going down */2274- slave->link = BOND_LINK_FAIL;2275- slave->delay = bond->params.downdelay;22762277- if (slave->link_failure_count < UINT_MAX) {2278- slave->link_failure_count++;2279- }2280-2281- if (bond->params.downdelay) {2282- printk(KERN_INFO DRV_NAME2283- ": %s: link status down for %s "2284- "interface %s, disabling it in "2285- "%d ms.\n",2286- bond->dev->name,2287- IS_UP(slave_dev)2288- ? ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)2289- ? ((slave == oldcurrent)2290- ? "active " : "backup ")2291- : "")2292- : "idle ",2293- slave_dev->name,2294- bond->params.downdelay * bond->params.miimon);2295- }2296 }2297- /* no break ! fall through the BOND_LINK_FAIL test to2298- ensure proper action to be taken2299- */2300- case BOND_LINK_FAIL: /* the link has just gone down */2301- if (link_state != BMSR_LSTATUS) {2302- /* link stays down */2303- if (slave->delay <= 0) {2304- if (!have_locks)2305- return 1;2306-2307- /* link down for too long time */2308- slave->link = BOND_LINK_DOWN;2309-2310- /* in active/backup mode, we must2311- * completely disable this interface2312- */2313- if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) ||2314- (bond->params.mode == BOND_MODE_8023AD)) {2315- bond_set_slave_inactive_flags(slave);2316- }2317-2318- printk(KERN_INFO DRV_NAME2319- ": %s: link status definitely "2320- "down for interface %s, "2321- "disabling it\n",2322- bond->dev->name,2323- slave_dev->name);2324-2325- /* notify ad that the link status has changed */2326- if (bond->params.mode == BOND_MODE_8023AD) {2327- bond_3ad_handle_link_change(slave, BOND_LINK_DOWN);2328- }2329-2330- if ((bond->params.mode == BOND_MODE_TLB) ||2331- (bond->params.mode == BOND_MODE_ALB)) {2332- bond_alb_handle_link_change(bond, slave, BOND_LINK_DOWN);2333- }2334-2335- if (slave == oldcurrent) {2336- do_failover = 1;2337- }2338- } else {2339- slave->delay--;2340- }2341- } else {2342- /* link up again */2343- slave->link = BOND_LINK_UP;2344 slave->jiffies = jiffies;2345 printk(KERN_INFO DRV_NAME2346 ": %s: link status up again after %d "2347 "ms for interface %s.\n",2348 bond->dev->name,2349- (bond->params.downdelay - slave->delay) * bond->params.miimon,2350- slave_dev->name);002351 }000000002352 break;2353- case BOND_LINK_DOWN: /* the link was down */2354- if (link_state != BMSR_LSTATUS) {2355- /* the link stays down, nothing more to do */2356- break;2357- } else { /* link going up */2358- slave->link = BOND_LINK_BACK;2359- slave->delay = bond->params.updelay;23602361- if (bond->params.updelay) {2362- /* if updelay == 0, no need to2363- advertise about a 0 ms delay */2364- printk(KERN_INFO DRV_NAME2365- ": %s: link status up for "2366- "interface %s, enabling it "2367- "in %d ms.\n",2368- bond->dev->name,2369- slave_dev->name,2370- bond->params.updelay * bond->params.miimon);2371- }0002372 }2373- /* no break ! fall through the BOND_LINK_BACK state in2374- case there's something to do.2375- */2376- case BOND_LINK_BACK: /* the link has just come back */2377- if (link_state != BMSR_LSTATUS) {2378- /* link down again */2379- slave->link = BOND_LINK_DOWN;2380-2381 printk(KERN_INFO DRV_NAME2382 ": %s: link status down again after %d "2383 "ms for interface %s.\n",2384 bond->dev->name,2385- (bond->params.updelay - slave->delay) * bond->params.miimon,2386- slave_dev->name);2387- } else {2388- /* link stays up */2389- if (slave->delay == 0) {2390- if (!have_locks)2391- return 1;23922393- /* now the link has been up for long time enough */2394- slave->link = BOND_LINK_UP;2395- slave->jiffies = jiffies;2396-2397- if (bond->params.mode == BOND_MODE_8023AD) {2398- /* prevent it from being the active one */2399- slave->state = BOND_STATE_BACKUP;2400- } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {2401- /* make it immediately active */2402- slave->state = BOND_STATE_ACTIVE;2403- } else if (slave != bond->primary_slave) {2404- /* prevent it from being the active one */2405- slave->state = BOND_STATE_BACKUP;2406- }2407-2408- printk(KERN_INFO DRV_NAME2409- ": %s: link status definitely "2410- "up for interface %s.\n",2411- bond->dev->name,2412- slave_dev->name);2413-2414- /* notify ad that the link status has changed */2415- if (bond->params.mode == BOND_MODE_8023AD) {2416- bond_3ad_handle_link_change(slave, BOND_LINK_UP);2417- }2418-2419- if ((bond->params.mode == BOND_MODE_TLB) ||2420- (bond->params.mode == BOND_MODE_ALB)) {2421- bond_alb_handle_link_change(bond, slave, BOND_LINK_UP);2422- }2423-2424- if ((!oldcurrent) ||2425- (slave == bond->primary_slave)) {2426- do_failover = 1;2427- }2428- } else {2429- slave->delay--;2430- }2431 }000000002432 break;000000000000000000000000000000000000000000000000000000000000000000000000000002433 default:2434- /* Should not happen */2435 printk(KERN_ERR DRV_NAME2436- ": %s: Error: %s Illegal value (link=%d)\n",2437- bond->dev->name,2438- slave->dev->name,2439- slave->link);2440- goto out;2441- } /* end of switch (slave->link) */24422443- bond_update_speed_duplex(slave);2444-2445- if (bond->params.mode == BOND_MODE_8023AD) {2446- if (old_speed != slave->speed) {2447- bond_3ad_adapter_speed_changed(slave);2448- }2449-2450- if (old_duplex != slave->duplex) {2451- bond_3ad_adapter_duplex_changed(slave);2452- }2453 }24542455- } /* end of for */2456-2457- if (do_failover) {2458 ASSERT_RTNL();2459-2460 write_lock_bh(&bond->curr_slave_lock);2461-2462 bond_select_active_slave(bond);2463-2464 write_unlock_bh(&bond->curr_slave_lock);024652466- } else2467- bond_set_carrier(bond);2468-2469-out:2470- return 0;2471}24722473/*2474 * bond_mii_monitor2475 *2476 * Really a wrapper that splits the mii monitor into two phases: an2477- * inspection, then (if inspection indicates something needs to be2478- * done) an acquisition of appropriate locks followed by another pass2479- * to implement whatever link state changes are indicated.2480 */2481void bond_mii_monitor(struct work_struct *work)2482{2483 struct bonding *bond = container_of(work, struct bonding,2484 mii_work.work);2485- unsigned long delay;24862487 read_lock(&bond->lock);2488- if (bond->kill_timers) {2489- read_unlock(&bond->lock);2490- return;2491- }024922493 if (bond->send_grat_arp) {2494 read_lock(&bond->curr_slave_lock);···2441 read_unlock(&bond->curr_slave_lock);2442 }24432444- if (__bond_mii_monitor(bond, 0)) {2445 read_unlock(&bond->lock);2446 rtnl_lock();2447 read_lock(&bond->lock);2448- __bond_mii_monitor(bond, 1);002449 read_unlock(&bond->lock);2450 rtnl_unlock(); /* might sleep, hold no other locks */2451 read_lock(&bond->lock);2452 }24532454- delay = msecs_to_jiffies(bond->params.miimon);00002455 read_unlock(&bond->lock);2456- queue_delayed_work(bond->wq, &bond->mii_work, delay);2457}24582459static __be32 bond_glean_dev_ip(struct net_device *dev)
···22232224/*-------------------------------- Monitoring -------------------------------*/22252226+2227+static int bond_miimon_inspect(struct bonding *bond)00000000002228{2229+ struct slave *slave;2230+ int i, link_state, commit = 0;0000000000000022312232 bond_for_each_slave(bond, slave, i) {2233+ slave->new_link = BOND_LINK_NOCHANGE;00022342235+ link_state = bond_check_dev_link(bond, slave->dev, 0);22362237 switch (slave->link) {2238+ case BOND_LINK_UP:2239+ if (link_state)2240+ continue;0000000022412242+ slave->link = BOND_LINK_FAIL;2243+ slave->delay = bond->params.downdelay;2244+ if (slave->delay) {2245+ printk(KERN_INFO DRV_NAME2246+ ": %s: link status down for %s"2247+ "interface %s, disabling it in %d ms.\n",2248+ bond->dev->name,2249+ (bond->params.mode ==2250+ BOND_MODE_ACTIVEBACKUP) ?2251+ ((slave->state == BOND_STATE_ACTIVE) ?2252+ "active " : "backup ") : "",2253+ slave->dev->name,2254+ bond->params.downdelay * bond->params.miimon);0000002255 }2256+ /*FALLTHRU*/2257+ case BOND_LINK_FAIL:2258+ if (link_state) {2259+ /*2260+ * recovered before downdelay expired2261+ */2262+ slave->link = BOND_LINK_UP;00000000000000000000000000000000000000002263 slave->jiffies = jiffies;2264 printk(KERN_INFO DRV_NAME2265 ": %s: link status up again after %d "2266 "ms for interface %s.\n",2267 bond->dev->name,2268+ (bond->params.downdelay - slave->delay) *2269+ bond->params.miimon,2270+ slave->dev->name);2271+ continue;2272 }2273+2274+ if (slave->delay <= 0) {2275+ slave->new_link = BOND_LINK_DOWN;2276+ commit++;2277+ continue;2278+ }2279+2280+ slave->delay--;2281 break;000000022822283+ case BOND_LINK_DOWN:2284+ if (!link_state)2285+ continue;2286+2287+ slave->link = BOND_LINK_BACK;2288+ slave->delay = bond->params.updelay;2289+2290+ if (slave->delay) {2291+ printk(KERN_INFO DRV_NAME2292+ ": %s: link status up for "2293+ "interface %s, enabling it in %d ms.\n",2294+ bond->dev->name, slave->dev->name,2295+ bond->params.updelay *2296+ bond->params.miimon);2297 }2298+ /*FALLTHRU*/2299+ case BOND_LINK_BACK:2300+ if (!link_state) {2301+ slave->link = BOND_LINK_DOWN;00002302 printk(KERN_INFO DRV_NAME2303 ": %s: link status down again after %d "2304 "ms for interface %s.\n",2305 bond->dev->name,2306+ (bond->params.updelay - slave->delay) *2307+ bond->params.miimon,2308+ slave->dev->name);000023092310+ continue;00000000000000000000000000000000000002311 }2312+2313+ if (slave->delay <= 0) {2314+ slave->new_link = BOND_LINK_UP;2315+ commit++;2316+ continue;2317+ }2318+2319+ slave->delay--;2320 break;2321+ }2322+ }2323+2324+ return commit;2325+}2326+2327+static void bond_miimon_commit(struct bonding *bond)2328+{2329+ struct slave *slave;2330+ int i;2331+2332+ bond_for_each_slave(bond, slave, i) {2333+ switch (slave->new_link) {2334+ case BOND_LINK_NOCHANGE:2335+ continue;2336+2337+ case BOND_LINK_UP:2338+ slave->link = BOND_LINK_UP;2339+ slave->jiffies = jiffies;2340+2341+ if (bond->params.mode == BOND_MODE_8023AD) {2342+ /* prevent it from being the active one */2343+ slave->state = BOND_STATE_BACKUP;2344+ } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {2345+ /* make it immediately active */2346+ slave->state = BOND_STATE_ACTIVE;2347+ } else if (slave != bond->primary_slave) {2348+ /* prevent it from being the active one */2349+ slave->state = BOND_STATE_BACKUP;2350+ }2351+2352+ printk(KERN_INFO DRV_NAME2353+ ": %s: link status definitely "2354+ "up for interface %s.\n",2355+ bond->dev->name, slave->dev->name);2356+2357+ /* notify ad that the link status has changed */2358+ if (bond->params.mode == BOND_MODE_8023AD)2359+ bond_3ad_handle_link_change(slave, BOND_LINK_UP);2360+2361+ if ((bond->params.mode == BOND_MODE_TLB) ||2362+ (bond->params.mode == BOND_MODE_ALB))2363+ bond_alb_handle_link_change(bond, slave,2364+ BOND_LINK_UP);2365+2366+ if (!bond->curr_active_slave ||2367+ (slave == bond->primary_slave))2368+ goto do_failover;2369+2370+ continue;2371+2372+ case BOND_LINK_DOWN:2373+ slave->link = BOND_LINK_DOWN;2374+2375+ if (bond->params.mode == BOND_MODE_ACTIVEBACKUP ||2376+ bond->params.mode == BOND_MODE_8023AD)2377+ bond_set_slave_inactive_flags(slave);2378+2379+ printk(KERN_INFO DRV_NAME2380+ ": %s: link status definitely down for "2381+ "interface %s, disabling it\n",2382+ bond->dev->name, slave->dev->name);2383+2384+ if (bond->params.mode == BOND_MODE_8023AD)2385+ bond_3ad_handle_link_change(slave,2386+ BOND_LINK_DOWN);2387+2388+ if (bond->params.mode == BOND_MODE_TLB ||2389+ bond->params.mode == BOND_MODE_ALB)2390+ bond_alb_handle_link_change(bond, slave,2391+ BOND_LINK_DOWN);2392+2393+ if (slave == bond->curr_active_slave)2394+ goto do_failover;2395+2396+ continue;2397+2398 default:02399 printk(KERN_ERR DRV_NAME2400+ ": %s: invalid new link %d on slave %s\n",2401+ bond->dev->name, slave->new_link,2402+ slave->dev->name);2403+ slave->new_link = BOND_LINK_NOCHANGE;0024042405+ continue;0000000002406 }24072408+do_failover:002409 ASSERT_RTNL();02410 write_lock_bh(&bond->curr_slave_lock);02411 bond_select_active_slave(bond);02412 write_unlock_bh(&bond->curr_slave_lock);2413+ }24142415+ bond_set_carrier(bond);00002416}24172418/*2419 * bond_mii_monitor2420 *2421 * Really a wrapper that splits the mii monitor into two phases: an2422+ * inspection, then (if inspection indicates something needs to be done)2423+ * an acquisition of appropriate locks followed by a commit phase to2424+ * implement whatever link state changes are indicated.2425 */2426void bond_mii_monitor(struct work_struct *work)2427{2428 struct bonding *bond = container_of(work, struct bonding,2429 mii_work.work);024302431 read_lock(&bond->lock);2432+ if (bond->kill_timers)2433+ goto out;2434+2435+ if (bond->slave_cnt == 0)2436+ goto re_arm;24372438 if (bond->send_grat_arp) {2439 read_lock(&bond->curr_slave_lock);···2496 read_unlock(&bond->curr_slave_lock);2497 }24982499+ if (bond_miimon_inspect(bond)) {2500 read_unlock(&bond->lock);2501 rtnl_lock();2502 read_lock(&bond->lock);2503+2504+ bond_miimon_commit(bond);2505+2506 read_unlock(&bond->lock);2507 rtnl_unlock(); /* might sleep, hold no other locks */2508 read_lock(&bond->lock);2509 }25102511+re_arm:2512+ if (bond->params.miimon)2513+ queue_delayed_work(bond->wq, &bond->mii_work,2514+ msecs_to_jiffies(bond->params.miimon));2515+out:2516 read_unlock(&bond->lock);02517}25182519static __be32 bond_glean_dev_ip(struct net_device *dev)
-3
drivers/net/bonding/bond_sysfs.c
···350 if (dev) {351 printk(KERN_INFO DRV_NAME ": %s: Removing slave %s\n",352 bond->dev->name, dev->name);353- if (bond->setup_by_slave)354- res = bond_release_and_destroy(bond->dev, dev);355- else356 res = bond_release(bond->dev, dev);357 if (res) {358 ret = res;
···350 if (dev) {351 printk(KERN_INFO DRV_NAME ": %s: Removing slave %s\n",352 bond->dev->name, dev->name);000353 res = bond_release(bond->dev, dev);354 if (res) {355 ret = res;
-7
drivers/net/de620.c
···488{489 if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))490 { /* Enable promiscuous mode */491- /*492- * We must make the kernel realise we had to move493- * into promisc mode or we start all out war on494- * the cable. - AC495- */496- dev->flags|=IFF_PROMISC;497-498 de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL);499 }500 else
···1374 for (i = 0; i < 6; i += 2)1375 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);1376000001377 if (!is_valid_ether_addr(ndev->dev_addr)) {1378 /* try reading from mac */1379
···1374 for (i = 0; i < 6; i += 2)1375 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);13761377+ if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {1378+ mac_src = "platform data";1379+ memcpy(ndev->dev_addr, pdata->dev_addr, 6);1380+ }1381+1382 if (!is_valid_ether_addr(ndev->dev_addr)) {1383 /* try reading from mac */1384
+14-17
drivers/net/e1000e/e1000.h
···4142struct e1000_info;4344-#define ndev_printk(level, netdev, format, arg...) \45- printk(level "%s: " format, (netdev)->name, ## arg)04647#ifdef DEBUG48-#define ndev_dbg(netdev, format, arg...) \49- ndev_printk(KERN_DEBUG , netdev, format, ## arg)50#else51-#define ndev_dbg(netdev, format, arg...) do { (void)(netdev); } while (0)52#endif5354-#define ndev_err(netdev, format, arg...) \55- ndev_printk(KERN_ERR , netdev, format, ## arg)56-#define ndev_info(netdev, format, arg...) \57- ndev_printk(KERN_INFO , netdev, format, ## arg)58-#define ndev_warn(netdev, format, arg...) \59- ndev_printk(KERN_WARNING , netdev, format, ## arg)60-#define ndev_notice(netdev, format, arg...) \61- ndev_printk(KERN_NOTICE , netdev, format, ## arg)626364/* Tx/Rx descriptor defines */···284 unsigned long led_status;285286 unsigned int flags;287-288- /* for ioport free */289- int bars;290- int need_ioport;291};292293struct e1000_info {
···4142struct e1000_info;4344+#define e_printk(level, adapter, format, arg...) \45+ printk(level "%s: %s: " format, pci_name(adapter->pdev), \46+ adapter->netdev->name, ## arg)4748#ifdef DEBUG49+#define e_dbg(format, arg...) \50+ e_printk(KERN_DEBUG , adapter, format, ## arg)51#else52+#define e_dbg(format, arg...) do { (void)(adapter); } while (0)53#endif5455+#define e_err(format, arg...) \56+ e_printk(KERN_ERR, adapter, format, ## arg)57+#define e_info(format, arg...) \58+ e_printk(KERN_INFO, adapter, format, ## arg)59+#define e_warn(format, arg...) \60+ e_printk(KERN_WARNING, adapter, format, ## arg)61+#define e_notice(format, arg...) \62+ e_printk(KERN_NOTICE, adapter, format, ## arg)636465/* Tx/Rx descriptor defines */···283 unsigned long led_status;284285 unsigned int flags;0000286};287288struct e1000_info {
+18-26
drivers/net/e1000e/ethtool.c
···189 /* Fiber NICs only allow 1000 gbps Full duplex */190 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&191 spddplx != (SPEED_1000 + DUPLEX_FULL)) {192- ndev_err(adapter->netdev, "Unsupported Speed/Duplex "193- "configuration\n");194 return -EINVAL;195 }196···212 break;213 case SPEED_1000 + DUPLEX_HALF: /* not supported */214 default:215- ndev_err(adapter->netdev, "Unsupported Speed/Duplex "216- "configuration\n");217 return -EINVAL;218 }219 return 0;···229 * cannot be changed230 */231 if (e1000_check_reset_block(hw)) {232- ndev_err(netdev, "Cannot change link "233- "characteristics when SoL/IDER is active.\n");234 return -EINVAL;235 }236···378 netdev->features &= ~NETIF_F_TSO6;379 }380381- ndev_info(netdev, "TSO is %s\n",382- data ? "Enabled" : "Disabled");383 adapter->flags |= FLAG_TSO_FORCE;384 return 0;385}···719 (test[pat] & write));720 val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);721 if (val != (test[pat] & write & mask)) {722- ndev_err(adapter->netdev, "pattern test reg %04X "723- "failed: got 0x%08X expected 0x%08X\n",724- reg + offset,725- val, (test[pat] & write & mask));726 *data = reg;727 return 1;728 }···736 __ew32(&adapter->hw, reg, write & mask);737 val = __er32(&adapter->hw, reg);738 if ((write & mask) != (val & mask)) {739- ndev_err(adapter->netdev, "set/check reg %04X test failed: "740- "got 0x%08X expected 0x%08X\n", reg, (val & mask),741- (write & mask));742 *data = reg;743 return 1;744 }···761{762 struct e1000_hw *hw = &adapter->hw;763 struct e1000_mac_info *mac = &adapter->hw.mac;764- struct net_device *netdev = adapter->netdev;765 u32 value;766 u32 before;767 u32 after;···793 ew32(STATUS, toggle);794 after = er32(STATUS) & toggle;795 if (value != after) {796- ndev_err(netdev, "failed STATUS register test got: "797- "0x%08X expected: 0x%08X\n", after, value);798 *data = 1;799 return 1;800 }···897 *data = 1;898 return -1;899 }900- ndev_info(netdev, "testing %s interrupt\n",901- (shared_int ? "shared" : "unshared"));902903 /* Disable all the interrupts */904 ew32(IMC, 0xFFFFFFFF);···1519 * sessions are active1520 */1521 if (e1000_check_reset_block(&adapter->hw)) {1522- ndev_err(adapter->netdev, "Cannot do PHY loopback test "1523- "when SoL/IDER is active.\n");1524 *data = 0;1525 goto out;1526 }···1604 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;1605 autoneg = adapter->hw.mac.autoneg;16061607- ndev_info(netdev, "offline testing starting\n");16081609 /*1610 * Link test performed before hardware reset so autoneg doesn't···1650 if (if_running)1651 dev_open(netdev);1652 } else {1653- ndev_info(netdev, "online testing starting\n");1654 /* Online tests */1655 if (e1000_link_test(adapter, &data[4]))1656 eth_test->flags |= ETH_TEST_FL_FAILED;···1686 wol->supported &= ~WAKE_UCAST;16871688 if (adapter->wol & E1000_WUFC_EX)1689- ndev_err(netdev, "Interface does not support "1690- "directed (unicast) frame wake-up packets\n");1691 }16921693 if (adapter->wol & E1000_WUFC_EX)
···189 /* Fiber NICs only allow 1000 gbps Full duplex */190 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&191 spddplx != (SPEED_1000 + DUPLEX_FULL)) {192+ e_err("Unsupported Speed/Duplex configuration\n");0193 return -EINVAL;194 }195···213 break;214 case SPEED_1000 + DUPLEX_HALF: /* not supported */215 default:216+ e_err("Unsupported Speed/Duplex configuration\n");0217 return -EINVAL;218 }219 return 0;···231 * cannot be changed232 */233 if (e1000_check_reset_block(hw)) {234+ e_err("Cannot change link characteristics when SoL/IDER is "235+ "active.\n");236 return -EINVAL;237 }238···380 netdev->features &= ~NETIF_F_TSO6;381 }382383+ e_info("TSO is %s\n", data ? "Enabled" : "Disabled");0384 adapter->flags |= FLAG_TSO_FORCE;385 return 0;386}···722 (test[pat] & write));723 val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);724 if (val != (test[pat] & write & mask)) {725+ e_err("pattern test reg %04X failed: got 0x%08X "726+ "expected 0x%08X\n", reg + offset, val,727+ (test[pat] & write & mask));0728 *data = reg;729 return 1;730 }···740 __ew32(&adapter->hw, reg, write & mask);741 val = __er32(&adapter->hw, reg);742 if ((write & mask) != (val & mask)) {743+ e_err("set/check reg %04X test failed: got 0x%08X "744+ "expected 0x%08X\n", reg, (val & mask), (write & mask));0745 *data = reg;746 return 1;747 }···766{767 struct e1000_hw *hw = &adapter->hw;768 struct e1000_mac_info *mac = &adapter->hw.mac;0769 u32 value;770 u32 before;771 u32 after;···799 ew32(STATUS, toggle);800 after = er32(STATUS) & toggle;801 if (value != after) {802+ e_err("failed STATUS register test got: 0x%08X expected: "803+ "0x%08X\n", after, value);804 *data = 1;805 return 1;806 }···903 *data = 1;904 return -1;905 }906+ e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));0907908 /* Disable all the interrupts */909 ew32(IMC, 0xFFFFFFFF);···1526 * sessions are active1527 */1528 if (e1000_check_reset_block(&adapter->hw)) {1529+ e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");01530 *data = 0;1531 goto out;1532 }···1612 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;1613 autoneg = adapter->hw.mac.autoneg;16141615+ e_info("offline testing starting\n");16161617 /*1618 * Link test performed before hardware reset so autoneg doesn't···1658 if (if_running)1659 dev_open(netdev);1660 } else {1661+ e_info("online testing starting\n");1662 /* Online tests */1663 if (e1000_link_test(adapter, &data[4]))1664 eth_test->flags |= ETH_TEST_FL_FAILED;···1694 wol->supported &= ~WAKE_UCAST;16951696 if (adapter->wol & E1000_WUFC_EX)1697+ e_err("Interface does not support directed (unicast) "1698+ "frame wake-up packets\n");1699 }17001701 if (adapter->wol & E1000_WUFC_EX)
+110-136
drivers/net/e1000e/netdev.c
···484 * packet, also make sure the frame isn't just CRC only */485 if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {486 /* All receives must fit into a single buffer */487- ndev_dbg(netdev, "%s: Receive packet consumed "488- "multiple buffers\n", netdev->name);489 /* recycle */490 buffer_info->skb = skb;491 goto next_desc;···576 unsigned int i = tx_ring->next_to_clean;577 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;578 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);579- struct net_device *netdev = adapter->netdev;580581 /* detected Tx unit hang */582- ndev_err(netdev,583- "Detected Tx Unit Hang:\n"584- " TDH <%x>\n"585- " TDT <%x>\n"586- " next_to_use <%x>\n"587- " next_to_clean <%x>\n"588- "buffer_info[next_to_clean]:\n"589- " time_stamp <%lx>\n"590- " next_to_watch <%x>\n"591- " jiffies <%lx>\n"592- " next_to_watch.status <%x>\n",593- readl(adapter->hw.hw_addr + tx_ring->head),594- readl(adapter->hw.hw_addr + tx_ring->tail),595- tx_ring->next_to_use,596- tx_ring->next_to_clean,597- tx_ring->buffer_info[eop].time_stamp,598- eop,599- jiffies,600- eop_desc->upper.fields.status);601}602603/**···745 buffer_info->dma = 0;746747 if (!(staterr & E1000_RXD_STAT_EOP)) {748- ndev_dbg(netdev, "%s: Packet Split buffers didn't pick "749- "up the full packet\n", netdev->name);750 dev_kfree_skb_irq(skb);751 goto next_desc;752 }···759 length = le16_to_cpu(rx_desc->wb.middle.length0);760761 if (!length) {762- ndev_dbg(netdev, "%s: Last part of the packet spanning"763- " multiple descriptors\n", netdev->name);764 dev_kfree_skb_irq(skb);765 goto next_desc;766 }···10091010 /* eth type trans needs skb->data to point to something */1011 if (!pskb_may_pull(skb, ETH_HLEN)) {1012- ndev_err(netdev, "pskb_may_pull failed.\n");1013 dev_kfree_skb(skb);1014 goto next_desc;1015 }···1249 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,1250 netdev);1251 if (err) {1252- ndev_err(netdev,1253- "Unable to allocate %s interrupt (return: %d)\n",1254- adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx",1255- err);1256 if (adapter->flags & FLAG_MSI_ENABLED)1257 pci_disable_msi(adapter->pdev);1258 }···1391 return 0;1392err:1393 vfree(tx_ring->buffer_info);1394- ndev_err(adapter->netdev,1395- "Unable to allocate memory for the transmit descriptor ring\n");1396 return err;1397}1398···1445 }1446err:1447 vfree(rx_ring->buffer_info);1448- ndev_err(adapter->netdev,1449- "Unable to allocate memory for the transmit descriptor ring\n");1450 return err;1451}1452···2444 * For parts with AMT enabled, let the firmware know2445 * that the network interface is in control2446 */2447- if ((adapter->flags & FLAG_HAS_AMT) && e1000e_check_mng_mode(hw))2448 e1000_get_hw_control(adapter);24492450 ew32(WUC, 0);24512452 if (mac->ops.init_hw(hw))2453- ndev_err(adapter->netdev, "Hardware Error\n");24542455 e1000_update_mng_vlan(adapter);2456···2585 return 0;25862587err:2588- ndev_err(netdev, "Unable to allocate memory for queues\n");2589 kfree(adapter->rx_ring);2590 kfree(adapter->tx_ring);2591 return -ENOMEM;···2634 * If AMT is enabled, let the firmware know that the network2635 * interface is now open2636 */2637- if ((adapter->flags & FLAG_HAS_AMT) &&2638- e1000e_check_mng_mode(&adapter->hw))2639 e1000_get_hw_control(adapter);26402641 /*···2712 * If AMT is enabled, let the firmware know that the network2713 * interface is now closed2714 */2715- if ((adapter->flags & FLAG_HAS_AMT) &&2716- e1000e_check_mng_mode(&adapter->hw))2717 e1000_release_hw_control(adapter);27182719 return 0;···2909 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);2910 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);2911 if (ret_val)2912- ndev_warn(adapter->netdev,2913- "Error reading PHY register\n");2914 } else {2915 /*2916 * Do not read PHY registers if link is not up···2934static void e1000_print_link_info(struct e1000_adapter *adapter)2935{2936 struct e1000_hw *hw = &adapter->hw;2937- struct net_device *netdev = adapter->netdev;2938 u32 ctrl = er32(CTRL);29392940- ndev_info(netdev,2941- "Link is Up %d Mbps %s, Flow Control: %s\n",2942- adapter->link_speed,2943- (adapter->link_duplex == FULL_DUPLEX) ?2944- "Full Duplex" : "Half Duplex",2945- ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?2946- "RX/TX" :2947- ((ctrl & E1000_CTRL_RFCE) ? "RX" :2948- ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));2949}29502951static bool e1000_has_link(struct e1000_adapter *adapter)···2983 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&2984 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {2985 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */2986- ndev_info(adapter->netdev,2987- "Gigabit has been disabled, downgrading speed\n");2988 }29892990 return link_active;···3084 switch (adapter->link_speed) {3085 case SPEED_10:3086 case SPEED_100:3087- ndev_info(netdev,3088- "10/100 speed: disabling TSO\n");3089 netdev->features &= ~NETIF_F_TSO;3090 netdev->features &= ~NETIF_F_TSO6;3091 break;···3117 if (netif_carrier_ok(netdev)) {3118 adapter->link_speed = 0;3119 adapter->link_duplex = 0;3120- ndev_info(netdev, "Link is Down\n");3121 netif_carrier_off(netdev);3122 netif_tx_stop_all_queues(netdev);3123 if (!test_bit(__E1000_DOWN, &adapter->state))···35913592 pull_size = min((unsigned int)4, skb->data_len);3593 if (!__pskb_pull_tail(skb, pull_size)) {3594- ndev_err(netdev,3595- "__pskb_pull_tail failed.\n");3596 dev_kfree_skb_any(skb);3597 return NETDEV_TX_OK;3598 }···37233724 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||3725 (max_frame > MAX_JUMBO_FRAME_SIZE)) {3726- ndev_err(netdev, "Invalid MTU setting\n");3727 return -EINVAL;3728 }37293730 /* Jumbo frame size limits */3731 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {3732 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {3733- ndev_err(netdev, "Jumbo Frames not supported.\n");3734 return -EINVAL;3735 }3736 if (adapter->hw.phy.type == e1000_phy_ife) {3737- ndev_err(netdev, "Jumbo Frames not supported.\n");3738 return -EINVAL;3739 }3740 }37413742#define MAX_STD_JUMBO_FRAME_SIZE 92343743 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {3744- ndev_err(netdev, "MTU > 9216 not supported.\n");3745 return -EINVAL;3746 }3747···3778 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN3779 + ETH_FCS_LEN;37803781- ndev_info(netdev, "changing MTU from %d to %d\n",3782- netdev->mtu, new_mtu);3783 netdev->mtu = new_mtu;37843785 if (netif_running(netdev))···3991 pci_restore_state(pdev);3992 e1000e_disable_l1aspm(pdev);39933994- if (adapter->need_ioport)3995- err = pci_enable_device(pdev);3996- else3997- err = pci_enable_device_mem(pdev);3998 if (err) {3999 dev_err(&pdev->dev,4000 "Cannot enable PCI device from suspend\n");···4025 * is up. For all other cases, let the f/w know that the h/w is now4026 * under the control of the driver.4027 */4028- if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw))4029 e1000_get_hw_control(adapter);40304031 return 0;···4093 int err;40944095 e1000e_disable_l1aspm(pdev);4096- if (adapter->need_ioport)4097- err = pci_enable_device(pdev);4098- else4099- err = pci_enable_device_mem(pdev);4100 if (err) {4101 dev_err(&pdev->dev,4102 "Cannot re-enable PCI device after reset.\n");···4141 * is up. For all other cases, let the f/w know that the h/w is now4142 * under the control of the driver.4143 */4144- if (!(adapter->flags & FLAG_HAS_AMT) ||4145- !e1000e_check_mng_mode(&adapter->hw))4146 e1000_get_hw_control(adapter);41474148}···4153 u32 pba_num;41544155 /* print bus type/speed/width info */4156- ndev_info(netdev, "(PCI Express:2.5GB/s:%s) "4157- "%02x:%02x:%02x:%02x:%02x:%02x\n",4158- /* bus width */4159- ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :4160- "Width x1"),4161- /* MAC address */4162- netdev->dev_addr[0], netdev->dev_addr[1],4163- netdev->dev_addr[2], netdev->dev_addr[3],4164- netdev->dev_addr[4], netdev->dev_addr[5]);4165- ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n",4166- (hw->phy.type == e1000_phy_ife)4167- ? "10/100" : "1000");4168 e1000e_read_pba_num(hw, &pba_num);4169- ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",4170- hw->mac.type, hw->phy.type,4171- (pba_num >> 8), (pba_num & 0xff));4172}41734174-/**4175- * e1000e_is_need_ioport - determine if an adapter needs ioport resources or not4176- * @pdev: PCI device information struct4177- *4178- * Returns true if an adapters needs ioport resources4179- **/4180-static int e1000e_is_need_ioport(struct pci_dev *pdev)4181{4182- switch (pdev->device) {4183- /* Currently there are no adapters that need ioport resources */4184- default:4185- return false;00000000000004186 }4187}4188···4215 int i, err, pci_using_dac;4216 u16 eeprom_data = 0;4217 u16 eeprom_apme_mask = E1000_EEPROM_APME;4218- int bars, need_ioport;42194220 e1000e_disable_l1aspm(pdev);42214222- /* do not allocate ioport bars when not needed */4223- need_ioport = e1000e_is_need_ioport(pdev);4224- if (need_ioport) {4225- bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);4226- err = pci_enable_device(pdev);4227- } else {4228- bars = pci_select_bars(pdev, IORESOURCE_MEM);4229- err = pci_enable_device_mem(pdev);4230- }4231 if (err)4232 return err;4233···4241 }4242 }42434244- err = pci_request_selected_regions(pdev, bars, e1000e_driver_name);004245 if (err)4246 goto err_pci_reg;4247···4268 adapter->hw.adapter = adapter;4269 adapter->hw.mac.type = ei->mac;4270 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;4271- adapter->bars = bars;4272- adapter->need_ioport = need_ioport;42734274 mmio_start = pci_resource_start(pdev, 0);4275 mmio_len = pci_resource_len(pdev, 0);···4339 }43404341 if (e1000_check_reset_block(&adapter->hw))4342- ndev_info(netdev,4343- "PHY reset is blocked due to SOL/IDER session.\n");43444345 netdev->features = NETIF_F_SG |4346 NETIF_F_HW_CSUM |···4383 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)4384 break;4385 if (i == 2) {4386- ndev_err(netdev, "The NVM Checksum Is Not Valid\n");4387 err = -EIO;4388 goto err_eeprom;4389 }4390 }4391004392 /* copy the MAC address out of the NVM */4393 if (e1000e_read_mac_addr(&adapter->hw))4394- ndev_err(netdev, "NVM Read Error while reading MAC address\n");43954396 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);4397 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);43984399 if (!is_valid_ether_addr(netdev->perm_addr)) {4400- ndev_err(netdev, "Invalid MAC Address: "4401- "%02x:%02x:%02x:%02x:%02x:%02x\n",4402- netdev->perm_addr[0], netdev->perm_addr[1],4403- netdev->perm_addr[2], netdev->perm_addr[3],4404- netdev->perm_addr[4], netdev->perm_addr[5]);4405 err = -EIO;4406 goto err_eeprom;4407 }···4472 * is up. For all other cases, let the f/w know that the h/w is now4473 * under the control of the driver.4474 */4475- if (!(adapter->flags & FLAG_HAS_AMT) ||4476- !e1000e_check_mng_mode(&adapter->hw))4477 e1000_get_hw_control(adapter);44784479 /* tell the stack to leave us alone until e1000_open() is called */···4489 return 0;44904491err_register:4492-err_hw_init:4493- e1000_release_hw_control(adapter);4494err_eeprom:4495 if (!e1000_check_reset_block(&adapter->hw))4496 e1000_phy_hw_reset(&adapter->hw);044974498- if (adapter->hw.flash_address)4499- iounmap(adapter->hw.flash_address);4500-4501-err_flashmap:4502 kfree(adapter->tx_ring);4503 kfree(adapter->rx_ring);4504err_sw_init:0004505 iounmap(adapter->hw.hw_addr);4506err_ioremap:4507 free_netdev(netdev);4508err_alloc_etherdev:4509- pci_release_selected_regions(pdev, bars);04510err_pci_reg:4511err_dma:4512 pci_disable_device(pdev);···4555 iounmap(adapter->hw.hw_addr);4556 if (adapter->hw.flash_address)4557 iounmap(adapter->hw.flash_address);4558- pci_release_selected_regions(pdev, adapter->bars);045594560 free_netdev(netdev);4561
···484 * packet, also make sure the frame isn't just CRC only */485 if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {486 /* All receives must fit into a single buffer */487+ e_dbg("%s: Receive packet consumed multiple buffers\n",488+ netdev->name);489 /* recycle */490 buffer_info->skb = skb;491 goto next_desc;···576 unsigned int i = tx_ring->next_to_clean;577 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;578 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);0579580 /* detected Tx unit hang */581+ e_err("Detected Tx Unit Hang:\n"582+ " TDH <%x>\n"583+ " TDT <%x>\n"584+ " next_to_use <%x>\n"585+ " next_to_clean <%x>\n"586+ "buffer_info[next_to_clean]:\n"587+ " time_stamp <%lx>\n"588+ " next_to_watch <%x>\n"589+ " jiffies <%lx>\n"590+ " next_to_watch.status <%x>\n",591+ readl(adapter->hw.hw_addr + tx_ring->head),592+ readl(adapter->hw.hw_addr + tx_ring->tail),593+ tx_ring->next_to_use,594+ tx_ring->next_to_clean,595+ tx_ring->buffer_info[eop].time_stamp,596+ eop,597+ jiffies,598+ eop_desc->upper.fields.status);0599}600601/**···747 buffer_info->dma = 0;748749 if (!(staterr & E1000_RXD_STAT_EOP)) {750+ e_dbg("%s: Packet Split buffers didn't pick up the "751+ "full packet\n", netdev->name);752 dev_kfree_skb_irq(skb);753 goto next_desc;754 }···761 length = le16_to_cpu(rx_desc->wb.middle.length0);762763 if (!length) {764+ e_dbg("%s: Last part of the packet spanning multiple "765+ "descriptors\n", netdev->name);766 dev_kfree_skb_irq(skb);767 goto next_desc;768 }···10111012 /* eth type trans needs skb->data to point to something */1013 if (!pskb_may_pull(skb, ETH_HLEN)) {1014+ e_err("pskb_may_pull failed.\n");1015 dev_kfree_skb(skb);1016 goto next_desc;1017 }···1251 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,1252 netdev);1253 if (err) {1254+ e_err("Unable to allocate %s interrupt (return: %d)\n",1255+ adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx", err);001256 if (adapter->flags & FLAG_MSI_ENABLED)1257 pci_disable_msi(adapter->pdev);1258 }···1395 return 0;1396err:1397 vfree(tx_ring->buffer_info);1398+ e_err("Unable to allocate memory for the transmit descriptor ring\n");01399 return err;1400}1401···1450 }1451err:1452 vfree(rx_ring->buffer_info);1453+ e_err("Unable to allocate memory for the transmit descriptor ring\n");01454 return err;1455}1456···2450 * For parts with AMT enabled, let the firmware know2451 * that the network interface is in control2452 */2453+ if (adapter->flags & FLAG_HAS_AMT)2454 e1000_get_hw_control(adapter);24552456 ew32(WUC, 0);24572458 if (mac->ops.init_hw(hw))2459+ e_err("Hardware Error\n");24602461 e1000_update_mng_vlan(adapter);2462···2591 return 0;25922593err:2594+ e_err("Unable to allocate memory for queues\n");2595 kfree(adapter->rx_ring);2596 kfree(adapter->tx_ring);2597 return -ENOMEM;···2640 * If AMT is enabled, let the firmware know that the network2641 * interface is now open2642 */2643+ if (adapter->flags & FLAG_HAS_AMT)02644 e1000_get_hw_control(adapter);26452646 /*···2719 * If AMT is enabled, let the firmware know that the network2720 * interface is now closed2721 */2722+ if (adapter->flags & FLAG_HAS_AMT)02723 e1000_release_hw_control(adapter);27242725 return 0;···2917 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);2918 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);2919 if (ret_val)2920+ e_warn("Error reading PHY register\n");02921 } else {2922 /*2923 * Do not read PHY registers if link is not up···2943static void e1000_print_link_info(struct e1000_adapter *adapter)2944{2945 struct e1000_hw *hw = &adapter->hw;02946 u32 ctrl = er32(CTRL);29472948+ e_info("Link is Up %d Mbps %s, Flow Control: %s\n",2949+ adapter->link_speed,2950+ (adapter->link_duplex == FULL_DUPLEX) ?2951+ "Full Duplex" : "Half Duplex",2952+ ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?2953+ "RX/TX" :2954+ ((ctrl & E1000_CTRL_RFCE) ? "RX" :2955+ ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));02956}29572958static bool e1000_has_link(struct e1000_adapter *adapter)···2994 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&2995 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {2996 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */2997+ e_info("Gigabit has been disabled, downgrading speed\n");02998 }29993000 return link_active;···3096 switch (adapter->link_speed) {3097 case SPEED_10:3098 case SPEED_100:3099+ e_info("10/100 speed: disabling TSO\n");03100 netdev->features &= ~NETIF_F_TSO;3101 netdev->features &= ~NETIF_F_TSO6;3102 break;···3130 if (netif_carrier_ok(netdev)) {3131 adapter->link_speed = 0;3132 adapter->link_duplex = 0;3133+ e_info("Link is Down\n");3134 netif_carrier_off(netdev);3135 netif_tx_stop_all_queues(netdev);3136 if (!test_bit(__E1000_DOWN, &adapter->state))···36043605 pull_size = min((unsigned int)4, skb->data_len);3606 if (!__pskb_pull_tail(skb, pull_size)) {3607+ e_err("__pskb_pull_tail failed.\n");03608 dev_kfree_skb_any(skb);3609 return NETDEV_TX_OK;3610 }···37373738 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||3739 (max_frame > MAX_JUMBO_FRAME_SIZE)) {3740+ e_err("Invalid MTU setting\n");3741 return -EINVAL;3742 }37433744 /* Jumbo frame size limits */3745 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {3746 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {3747+ e_err("Jumbo Frames not supported.\n");3748 return -EINVAL;3749 }3750 if (adapter->hw.phy.type == e1000_phy_ife) {3751+ e_err("Jumbo Frames not supported.\n");3752 return -EINVAL;3753 }3754 }37553756#define MAX_STD_JUMBO_FRAME_SIZE 92343757 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {3758+ e_err("MTU > 9216 not supported.\n");3759 return -EINVAL;3760 }3761···3792 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN3793 + ETH_FCS_LEN;37943795+ e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);03796 netdev->mtu = new_mtu;37973798 if (netif_running(netdev))···4006 pci_restore_state(pdev);4007 e1000e_disable_l1aspm(pdev);40084009+ err = pci_enable_device_mem(pdev);0004010 if (err) {4011 dev_err(&pdev->dev,4012 "Cannot enable PCI device from suspend\n");···4043 * is up. For all other cases, let the f/w know that the h/w is now4044 * under the control of the driver.4045 */4046+ if (!(adapter->flags & FLAG_HAS_AMT))4047 e1000_get_hw_control(adapter);40484049 return 0;···4111 int err;41124113 e1000e_disable_l1aspm(pdev);4114+ err = pci_enable_device_mem(pdev);0004115 if (err) {4116 dev_err(&pdev->dev,4117 "Cannot re-enable PCI device after reset.\n");···4162 * is up. For all other cases, let the f/w know that the h/w is now4163 * under the control of the driver.4164 */4165+ if (!(adapter->flags & FLAG_HAS_AMT))04166 e1000_get_hw_control(adapter);41674168}···4175 u32 pba_num;41764177 /* print bus type/speed/width info */4178+ e_info("(PCI Express:2.5GB/s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n",4179+ /* bus width */4180+ ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :4181+ "Width x1"),4182+ /* MAC address */4183+ netdev->dev_addr[0], netdev->dev_addr[1],4184+ netdev->dev_addr[2], netdev->dev_addr[3],4185+ netdev->dev_addr[4], netdev->dev_addr[5]);4186+ e_info("Intel(R) PRO/%s Network Connection\n",4187+ (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");004188 e1000e_read_pba_num(hw, &pba_num);4189+ e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",4190+ hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff));04191}41924193+static void e1000_eeprom_checks(struct e1000_adapter *adapter)0000004194{4195+ struct e1000_hw *hw = &adapter->hw;4196+ int ret_val;4197+ u16 buf = 0;4198+4199+ if (hw->mac.type != e1000_82573)4200+ return;4201+4202+ ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);4203+ if (!(le16_to_cpu(buf) & (1 << 0))) {4204+ /* Deep Smart Power Down (DSPD) */4205+ e_warn("Warning: detected DSPD enabled in EEPROM\n");4206+ }4207+4208+ ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);4209+ if (le16_to_cpu(buf) & (3 << 2)) {4210+ /* ASPM enable */4211+ e_warn("Warning: detected ASPM enabled in EEPROM\n");4212 }4213}4214···4233 int i, err, pci_using_dac;4234 u16 eeprom_data = 0;4235 u16 eeprom_apme_mask = E1000_EEPROM_APME;042364237 e1000e_disable_l1aspm(pdev);42384239+ err = pci_enable_device_mem(pdev);000000004240 if (err)4241 return err;4242···4268 }4269 }42704271+ err = pci_request_selected_regions(pdev,4272+ pci_select_bars(pdev, IORESOURCE_MEM),4273+ e1000e_driver_name);4274 if (err)4275 goto err_pci_reg;4276···4293 adapter->hw.adapter = adapter;4294 adapter->hw.mac.type = ei->mac;4295 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;0042964297 mmio_start = pci_resource_start(pdev, 0);4298 mmio_len = pci_resource_len(pdev, 0);···4366 }43674368 if (e1000_check_reset_block(&adapter->hw))4369+ e_info("PHY reset is blocked due to SOL/IDER session.\n");043704371 netdev->features = NETIF_F_SG |4372 NETIF_F_HW_CSUM |···4411 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)4412 break;4413 if (i == 2) {4414+ e_err("The NVM Checksum Is Not Valid\n");4415 err = -EIO;4416 goto err_eeprom;4417 }4418 }44194420+ e1000_eeprom_checks(adapter);4421+4422 /* copy the MAC address out of the NVM */4423 if (e1000e_read_mac_addr(&adapter->hw))4424+ e_err("NVM Read Error while reading MAC address\n");44254426 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);4427 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);44284429 if (!is_valid_ether_addr(netdev->perm_addr)) {4430+ e_err("Invalid MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n",4431+ netdev->perm_addr[0], netdev->perm_addr[1],4432+ netdev->perm_addr[2], netdev->perm_addr[3],4433+ netdev->perm_addr[4], netdev->perm_addr[5]);04434 err = -EIO;4435 goto err_eeprom;4436 }···4499 * is up. For all other cases, let the f/w know that the h/w is now4500 * under the control of the driver.4501 */4502+ if (!(adapter->flags & FLAG_HAS_AMT))04503 e1000_get_hw_control(adapter);45044505 /* tell the stack to leave us alone until e1000_open() is called */···4517 return 0;45184519err_register:4520+ if (!(adapter->flags & FLAG_HAS_AMT))4521+ e1000_release_hw_control(adapter);4522err_eeprom:4523 if (!e1000_check_reset_block(&adapter->hw))4524 e1000_phy_hw_reset(&adapter->hw);4525+err_hw_init:452600004527 kfree(adapter->tx_ring);4528 kfree(adapter->rx_ring);4529err_sw_init:4530+ if (adapter->hw.flash_address)4531+ iounmap(adapter->hw.flash_address);4532+err_flashmap:4533 iounmap(adapter->hw.hw_addr);4534err_ioremap:4535 free_netdev(netdev);4536err_alloc_etherdev:4537+ pci_release_selected_regions(pdev,4538+ pci_select_bars(pdev, IORESOURCE_MEM));4539err_pci_reg:4540err_dma:4541 pci_disable_device(pdev);···4582 iounmap(adapter->hw.hw_addr);4583 if (adapter->hw.flash_address)4584 iounmap(adapter->hw.flash_address);4585+ pci_release_selected_regions(pdev,4586+ pci_select_bars(pdev, IORESOURCE_MEM));45874588 free_netdev(netdev);4589
+12-19
drivers/net/e1000e/param.c
···27*******************************************************************************/2829#include <linux/netdevice.h>03031#include "e1000.h"32···163 case enable_option:164 switch (*value) {165 case OPTION_ENABLED:166- ndev_info(adapter->netdev, "%s Enabled\n", opt->name);167 return 0;168 case OPTION_DISABLED:169- ndev_info(adapter->netdev, "%s Disabled\n", opt->name);170 return 0;171 }172 break;173 case range_option:174 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {175- ndev_info(adapter->netdev,176- "%s set to %i\n", opt->name, *value);177 return 0;178 }179 break;···184 ent = &opt->arg.l.p[i];185 if (*value == ent->i) {186 if (ent->str[0] != '\0')187- ndev_info(adapter->netdev, "%s\n",188- ent->str);189 return 0;190 }191 }···194 BUG();195 }196197- ndev_info(adapter->netdev, "Invalid %s value specified (%i) %s\n",198- opt->name, *value, opt->err);199 *value = opt->def;200 return -1;201}···212void __devinit e1000e_check_options(struct e1000_adapter *adapter)213{214 struct e1000_hw *hw = &adapter->hw;215- struct net_device *netdev = adapter->netdev;216 int bd = adapter->bd_number;217218 if (bd >= E1000_MAX_NIC) {219- ndev_notice(netdev,220- "Warning: no configuration for board #%i\n", bd);221- ndev_notice(netdev, "Using defaults for all values\n");222 }223224 { /* Transmit Interrupt Delay */···310 adapter->itr = InterruptThrottleRate[bd];311 switch (adapter->itr) {312 case 0:313- ndev_info(netdev, "%s turned off\n",314- opt.name);315 break;316 case 1:317- ndev_info(netdev,318- "%s set to dynamic mode\n",319- opt.name);320 adapter->itr_setting = adapter->itr;321 adapter->itr = 20000;322 break;323 case 3:324- ndev_info(netdev,325- "%s set to dynamic conservative mode\n",326 opt.name);327 adapter->itr_setting = adapter->itr;328 adapter->itr = 20000;
···27*******************************************************************************/2829#include <linux/netdevice.h>30+#include <linux/pci.h>3132#include "e1000.h"33···162 case enable_option:163 switch (*value) {164 case OPTION_ENABLED:165+ e_info("%s Enabled\n", opt->name);166 return 0;167 case OPTION_DISABLED:168+ e_info("%s Disabled\n", opt->name);169 return 0;170 }171 break;172 case range_option:173 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {174+ e_info("%s set to %i\n", opt->name, *value);0175 return 0;176 }177 break;···184 ent = &opt->arg.l.p[i];185 if (*value == ent->i) {186 if (ent->str[0] != '\0')187+ e_info("%s\n", ent->str);0188 return 0;189 }190 }···195 BUG();196 }197198+ e_info("Invalid %s value specified (%i) %s\n", opt->name, *value,199+ opt->err);200 *value = opt->def;201 return -1;202}···213void __devinit e1000e_check_options(struct e1000_adapter *adapter)214{215 struct e1000_hw *hw = &adapter->hw;0216 int bd = adapter->bd_number;217218 if (bd >= E1000_MAX_NIC) {219+ e_notice("Warning: no configuration for board #%i\n", bd);220+ e_notice("Using defaults for all values\n");0221 }222223 { /* Transmit Interrupt Delay */···313 adapter->itr = InterruptThrottleRate[bd];314 switch (adapter->itr) {315 case 0:316+ e_info("%s turned off\n", opt.name);0317 break;318 case 1:319+ e_info("%s set to dynamic mode\n", opt.name);00320 adapter->itr_setting = adapter->itr;321 adapter->itr = 20000;322 break;323 case 3:324+ e_info("%s set to dynamic conservative mode\n",0325 opt.name);326 adapter->itr_setting = adapter->itr;327 adapter->itr = 20000;
-8
drivers/net/eepro.c
···12831284 if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63)1285 {1286- /*1287- * We must make the kernel realise we had to move1288- * into promisc mode or we start all out war on1289- * the cable. If it was a promisc request the1290- * flag is already set. If not we assert it.1291- */1292- dev->flags|=IFF_PROMISC;1293-1294 eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */1295 mode = inb(ioaddr + REG2);1296 outb(mode | PRMSC_Mode, ioaddr + REG2);
···12831284 if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63)1285 {000000001286 eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */1287 mode = inb(ioaddr + REG2);1288 outb(mode | PRMSC_Mode, ioaddr + REG2);
-1
drivers/net/eth16i.c
···13561357 if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))1358 {1359- dev->flags|=IFF_PROMISC; /* Must do this */1360 outb(3, ioaddr + RECEIVE_MODE_REG);1361 } else {1362 outb(2, ioaddr + RECEIVE_MODE_REG);
···850 for (; mc_addr_count > 0; mc_addr_count--) {851 hash_value = igb_hash_mc_addr(hw, mc_addr_list);852 hw_dbg("Hash value = 0x%03X\n", hash_value);853- hw->mac.ops.mta_set(hw, hash_value);854 mc_addr_list += ETH_ALEN;855 }856}···1136 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */1137 hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg);1138 }0000001139 wr32(E1000_PCS_LCTL, reg);11401141 return 0;···12351236out:1237 return ret_val;1238-}1239-1240-/**1241- * igb_translate_register_82576 - Translate the proper register offset1242- * @reg: e1000 register to be read1243- *1244- * Registers in 82576 are located in different offsets than other adapters1245- * even though they function in the same manner. This function takes in1246- * the name of the register to read and returns the correct offset for1247- * 82576 silicon.1248- **/1249-u32 igb_translate_register_82576(u32 reg)1250-{1251- /*1252- * Some of the Kawela registers are located at different1253- * offsets than they are in older adapters.1254- * Despite the difference in location, the registers1255- * function in the same manner.1256- */1257- switch (reg) {1258- case E1000_TDBAL(0):1259- reg = 0x0E000;1260- break;1261- case E1000_TDBAH(0):1262- reg = 0x0E004;1263- break;1264- case E1000_TDLEN(0):1265- reg = 0x0E008;1266- break;1267- case E1000_TDH(0):1268- reg = 0x0E010;1269- break;1270- case E1000_TDT(0):1271- reg = 0x0E018;1272- break;1273- case E1000_TXDCTL(0):1274- reg = 0x0E028;1275- break;1276- case E1000_RDBAL(0):1277- reg = 0x0C000;1278- break;1279- case E1000_RDBAH(0):1280- reg = 0x0C004;1281- break;1282- case E1000_RDLEN(0):1283- reg = 0x0C008;1284- break;1285- case E1000_RDH(0):1286- reg = 0x0C010;1287- break;1288- case E1000_RDT(0):1289- reg = 0x0C018;1290- break;1291- case E1000_RXDCTL(0):1292- reg = 0x0C028;1293- break;1294- case E1000_SRRCTL(0):1295- reg = 0x0C00C;1296- break;1297- default:1298- break;1299- }1300-1301- return reg;1302}13031304/**
···144}145146/**147- * igb_init_rx_addrs - Initialize receive address's148- * @hw: pointer to the HW structure149- * @rar_count: receive address registers150- *151- * Setups the receive address registers by setting the base receive address152- * register to the devices MAC address and clearing all the other receive153- * address registers to 0.154- **/155-void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)156-{157- u32 i;158-159- /* Setup the receive address */160- hw_dbg("Programming MAC Address into RAR[0]\n");161-162- hw->mac.ops.rar_set(hw, hw->mac.addr, 0);163-164- /* Zero out the other (rar_entry_count - 1) receive addresses */165- hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);166- for (i = 1; i < rar_count; i++) {167- array_wr32(E1000_RA, (i << 1), 0);168- wrfl();169- array_wr32(E1000_RA, ((i << 1) + 1), 0);170- wrfl();171- }172-}173-174-/**175 * igb_check_alt_mac_addr - Check for alternate MAC addr176 * @hw: pointer to the HW structure177 *···243 * current value is read, the new bit is OR'd in and the new value is244 * written back into the register.245 **/246-static void igb_mta_set(struct e1000_hw *hw, u32 hash_value)247{248 u32 hash_bit, hash_reg, mta;249···266267 array_wr32(E1000_MTA, hash_reg, mta);268 wrfl();269-}270-271-/**272- * igb_update_mc_addr_list - Update Multicast addresses273- * @hw: pointer to the HW structure274- * @mc_addr_list: array of multicast addresses to program275- * @mc_addr_count: number of multicast addresses to program276- * @rar_used_count: the first RAR register free to program277- * @rar_count: total number of supported Receive Address Registers278- *279- * Updates the Receive Address Registers and Multicast Table Array.280- * The caller must have a packed mc_addr_list of multicast addresses.281- * The parameter rar_count will usually be hw->mac.rar_entry_count282- * unless there are workarounds that change this.283- **/284-void igb_update_mc_addr_list(struct e1000_hw *hw,285- u8 *mc_addr_list, u32 mc_addr_count,286- u32 rar_used_count, u32 rar_count)287-{288- u32 hash_value;289- u32 i;290-291- /*292- * Load the first set of multicast addresses into the exact293- * filters (RAR). If there are not enough to fill the RAR294- * array, clear the filters.295- */296- for (i = rar_used_count; i < rar_count; i++) {297- if (mc_addr_count) {298- hw->mac.ops.rar_set(hw, mc_addr_list, i);299- mc_addr_count--;300- mc_addr_list += ETH_ALEN;301- } else {302- array_wr32(E1000_RA, i << 1, 0);303- wrfl();304- array_wr32(E1000_RA, (i << 1) + 1, 0);305- wrfl();306- }307- }308-309- /* Clear the old settings from the MTA */310- hw_dbg("Clearing MTA\n");311- for (i = 0; i < hw->mac.mta_reg_count; i++) {312- array_wr32(E1000_MTA, i, 0);313- wrfl();314- }315-316- /* Load any remaining multicast addresses into the hash table. */317- for (; mc_addr_count > 0; mc_addr_count--) {318- hash_value = igb_hash_mc_addr(hw, mc_addr_list);319- hw_dbg("Hash value = 0x%03X\n", hash_value);320- igb_mta_set(hw, hash_value);321- mc_addr_list += ETH_ALEN;322- }323}324325/**
···144}145146/**0000000000000000000000000000147 * igb_check_alt_mac_addr - Check for alternate MAC addr148 * @hw: pointer to the HW structure149 *···271 * current value is read, the new bit is OR'd in and the new value is272 * written back into the register.273 **/274+void igb_mta_set(struct e1000_hw *hw, u32 hash_value)275{276 u32 hash_bit, hash_reg, mta;277···294295 array_wr32(E1000_MTA, hash_reg, mta);296 wrfl();000000000000000000000000000000000000000000000000000000297}298299/**
···311 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);312 break;313 case e1000_82576:314- /* Kawela uses a table-based method for assigning vectors.315 Each queue has a single entry in the table to which we write316 a vector number along with a "valid" bit. Sadly, the layout317 of the table is somewhat counterintuitive. */···720 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);721}722723-static void igb_init_manageability(struct igb_adapter *adapter)724-{725- struct e1000_hw *hw = &adapter->hw;726-727- if (adapter->en_mng_pt) {728- u32 manc2h = rd32(E1000_MANC2H);729- u32 manc = rd32(E1000_MANC);730-731- /* enable receiving management packets to the host */732- /* this will probably generate destination unreachable messages733- * from the host OS, but the packets will be handled on SMBUS */734- manc |= E1000_MANC_EN_MNG2HOST;735-#define E1000_MNG2HOST_PORT_623 (1 << 5)736-#define E1000_MNG2HOST_PORT_664 (1 << 6)737- manc2h |= E1000_MNG2HOST_PORT_623;738- manc2h |= E1000_MNG2HOST_PORT_664;739- wr32(E1000_MANC2H, manc2h);740-741- wr32(E1000_MANC, manc);742- }743-}744-745/**746 * igb_configure - configure the hardware for RX and TX747 * @adapter: private board structure···733 igb_set_multi(netdev);734735 igb_restore_vlan(adapter);736- igb_init_manageability(adapter);737738 igb_configure_tx(adapter);739 igb_setup_rctl(adapter);···13491350 unregister_netdev(netdev);13511352- if (!igb_check_reset_block(&adapter->hw))01353 adapter->hw.phy.ops.reset_phy(&adapter->hw);13541355 igb_remove_device(&adapter->hw);···4500{4501 struct net_device *netdev = pci_get_drvdata(pdev);4502 struct igb_adapter *adapter = netdev_priv(netdev);4503-4504- igb_init_manageability(adapter);45054506 if (netif_running(netdev)) {4507 if (igb_up(adapter)) {
···311 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);312 break;313 case e1000_82576:314+ /* The 82576 uses a table-based method for assigning vectors.315 Each queue has a single entry in the table to which we write316 a vector number along with a "valid" bit. Sadly, the layout317 of the table is somewhat counterintuitive. */···720 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);721}7220000000000000000000000723/**724 * igb_configure - configure the hardware for RX and TX725 * @adapter: private board structure···755 igb_set_multi(netdev);756757 igb_restore_vlan(adapter);0758759 igb_configure_tx(adapter);760 igb_setup_rctl(adapter);···13721373 unregister_netdev(netdev);13741375+ if (adapter->hw.phy.ops.reset_phy &&1376+ !igb_check_reset_block(&adapter->hw))1377 adapter->hw.phy.ops.reset_phy(&adapter->hw);13781379 igb_remove_device(&adapter->hw);···4522{4523 struct net_device *netdev = pci_get_drvdata(pdev);4524 struct igb_adapter *adapter = netdev_priv(netdev);0045254526 if (netif_running(netdev)) {4527 if (igb_up(adapter)) {
···101#define MXGEFW_ETH_SEND_3 0x2c0000102#define MXGEFW_ETH_RECV_SMALL 0x300000103#define MXGEFW_ETH_RECV_BIG 0x34000000104105#define MXGEFW_ETH_SEND(n) (0x200000 + (((n) & 0x03) * 0x40000))106#define MXGEFW_ETH_SEND_OFFSET(n) (MXGEFW_ETH_SEND(n) - MXGEFW_ETH_SEND_4)···122 * MXGEFW_CMD_RESET is issued */123124 MXGEFW_CMD_SET_INTRQ_DMA,00000125 MXGEFW_CMD_SET_BIG_BUFFER_SIZE, /* in bytes, power of 2 */126 MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, /* in bytes */127···136 MXGEFW_CMD_GET_SEND_OFFSET,137 MXGEFW_CMD_GET_SMALL_RX_OFFSET,138 MXGEFW_CMD_GET_BIG_RX_OFFSET,00139 MXGEFW_CMD_GET_IRQ_ACK_OFFSET,140 MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,141···209 MXGEFW_CMD_SET_STATS_DMA_V2,210 /* data0, data1 = bus addr,211 * data2 = sizeof(struct mcp_irq_data) from driver point of view, allows212- * adding new stuff to mcp_irq_data without changing the ABI */00000213214 MXGEFW_CMD_UNALIGNED_TEST,215 /* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned···236 MXGEFW_CMD_GET_MAX_RSS_QUEUES,237 MXGEFW_CMD_ENABLE_RSS_QUEUES,238 /* data0 = number of slices n (0, 1, ..., n-1) to enable239- * data1 = interrupt mode.240- * 0=share one INTx/MSI, 1=use one MSI-X per queue.0241 * If all queues share one interrupt, the driver must have set242 * RSS_SHARED_INTERRUPT_DMA before enabling queues.000243 */244-#define MXGEFW_SLICE_INTR_MODE_SHARED 0245-#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 10246247 MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET,248 MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA,···269 * 2: TCP_IPV4 (required by RSS)270 * 3: IPV4 | TCP_IPV4 (required by RSS)271 * 4: source port0272 */273#define MXGEFW_RSS_HASH_TYPE_IPV4 0x1274#define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2275#define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x400276277 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,278 /* Return data = the max. size of the entire headers of a IPv6 TSO packet.···351352 MXGEFW_CMD_GET_DCA_OFFSET,353 /* offset of dca control for WDMAs */00000000000000354};355356enum myri10ge_mcp_cmd_status {···416 u8 stats_updated;417 u8 valid;418};000000419420#endif /* __MYRI10GE_MCP_H__ */
···101#define MXGEFW_ETH_SEND_3 0x2c0000102#define MXGEFW_ETH_RECV_SMALL 0x300000103#define MXGEFW_ETH_RECV_BIG 0x340000104+#define MXGEFW_ETH_SEND_GO 0x380000105+#define MXGEFW_ETH_SEND_STOP 0x3C0000106107#define MXGEFW_ETH_SEND(n) (0x200000 + (((n) & 0x03) * 0x40000))108#define MXGEFW_ETH_SEND_OFFSET(n) (MXGEFW_ETH_SEND(n) - MXGEFW_ETH_SEND_4)···120 * MXGEFW_CMD_RESET is issued */121122 MXGEFW_CMD_SET_INTRQ_DMA,123+ /* data0 = LSW of the host address124+ * data1 = MSW of the host address125+ * data2 = slice number if multiple slices are used126+ */127+128 MXGEFW_CMD_SET_BIG_BUFFER_SIZE, /* in bytes, power of 2 */129 MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, /* in bytes */130···129 MXGEFW_CMD_GET_SEND_OFFSET,130 MXGEFW_CMD_GET_SMALL_RX_OFFSET,131 MXGEFW_CMD_GET_BIG_RX_OFFSET,132+ /* data0 = slice number if multiple slices are used */133+134 MXGEFW_CMD_GET_IRQ_ACK_OFFSET,135 MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,136···200 MXGEFW_CMD_SET_STATS_DMA_V2,201 /* data0, data1 = bus addr,202 * data2 = sizeof(struct mcp_irq_data) from driver point of view, allows203+ * adding new stuff to mcp_irq_data without changing the ABI204+ *205+ * If multiple slices are used, data2 contains both the size of the206+ * structure (in the lower 16 bits) and the slice number207+ * (in the upper 16 bits).208+ */209210 MXGEFW_CMD_UNALIGNED_TEST,211 /* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned···222 MXGEFW_CMD_GET_MAX_RSS_QUEUES,223 MXGEFW_CMD_ENABLE_RSS_QUEUES,224 /* data0 = number of slices n (0, 1, ..., n-1) to enable225+ * data1 = interrupt mode | use of multiple transmit queues.226+ * 0=share one INTx/MSI.227+ * 1=use one MSI-X per queue.228 * If all queues share one interrupt, the driver must have set229 * RSS_SHARED_INTERRUPT_DMA before enabling queues.230+ * 2=enable both receive and send queues.231+ * Without this bit set, only one send queue (slice 0's send queue)232+ * is enabled. The receive queues are always enabled.233 */234+#define MXGEFW_SLICE_INTR_MODE_SHARED 0x0235+#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 0x1236+#define MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES 0x2237238 MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET,239 MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA,···250 * 2: TCP_IPV4 (required by RSS)251 * 3: IPV4 | TCP_IPV4 (required by RSS)252 * 4: source port253+ * 5: source port + destination port254 */255#define MXGEFW_RSS_HASH_TYPE_IPV4 0x1256#define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2257#define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4258+#define MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT 0x5259+#define MXGEFW_RSS_HASH_TYPE_MAX 0x5260261 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,262 /* Return data = the max. size of the entire headers of a IPv6 TSO packet.···329330 MXGEFW_CMD_GET_DCA_OFFSET,331 /* offset of dca control for WDMAs */332+333+ /* VMWare NetQueue commands */334+ MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE,335+ MXGEFW_CMD_NETQ_ADD_FILTER,336+ /* data0 = filter_id << 16 | queue << 8 | type */337+ /* data1 = MS4 of MAC Addr */338+ /* data2 = LS2_MAC << 16 | VLAN_tag */339+ MXGEFW_CMD_NETQ_DEL_FILTER,340+ /* data0 = filter_id */341+ MXGEFW_CMD_NETQ_QUERY1,342+ MXGEFW_CMD_NETQ_QUERY2,343+ MXGEFW_CMD_NETQ_QUERY3,344+ MXGEFW_CMD_NETQ_QUERY4,345+346};347348enum myri10ge_mcp_cmd_status {···380 u8 stats_updated;381 u8 valid;382};383+384+/* definitions for NETQ filter type */385+#define MXGEFW_NETQ_FILTERTYPE_NONE 0386+#define MXGEFW_NETQ_FILTERTYPE_MACADDR 1387+#define MXGEFW_NETQ_FILTERTYPE_VLAN 2388+#define MXGEFW_NETQ_FILTERTYPE_VLANMACADDR 3389390#endif /* __MYRI10GE_MCP_H__ */
+1-1
drivers/net/myri10ge/myri10ge_mcp_gen_header.h
···35 unsigned char mcp_index;36 unsigned char disable_rabbit;37 unsigned char unaligned_tlp;38- unsigned char pad1;39 unsigned counters_addr;40 unsigned copy_block_info; /* for small mcps loaded with "lload -d" */41 unsigned short handoff_id_major; /* must be equal */
···35 unsigned char mcp_index;36 unsigned char disable_rabbit;37 unsigned char unaligned_tlp;38+ unsigned char pcie_link_algo;39 unsigned counters_addr;40 unsigned copy_block_info; /* for small mcps loaded with "lload -d" */41 unsigned short handoff_id_major; /* must be equal */
+38-3
drivers/net/netxen/netxen_nic.h
···508 NETXEN_BRDTYPE_P3_10000_BASE_T = 0x0027,509 NETXEN_BRDTYPE_P3_XG_LOM = 0x0028,510 NETXEN_BRDTYPE_P3_4_GB_MM = 0x0029,00511 NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031,512 NETXEN_BRDTYPE_P3_10G_XFP = 0x0032513···1172 nx_nic_intr_coalesce_data_t irq;1173} nx_nic_intr_coalesce_t;11740000000000000000000000000000001175typedef struct {1176 u64 qhdr;1177 u64 req_hdr;···1320 int (*disable_phy_interrupts) (struct netxen_adapter *);1321 int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t);1322 int (*set_mtu) (struct netxen_adapter *, int);1323- int (*set_promisc) (struct netxen_adapter *, netxen_niu_prom_mode_t);1324 int (*phy_read) (struct netxen_adapter *, long reg, u32 *);1325 int (*phy_write) (struct netxen_adapter *, long reg, u32 val);1326 int (*init_port) (struct netxen_adapter *, int);···1497u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max);1498void netxen_p2_nic_set_multi(struct net_device *netdev);1499void netxen_p3_nic_set_multi(struct net_device *netdev);01500int netxen_config_intr_coalesce(struct netxen_adapter *adapter);15011502-u32 nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu);1503int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);15041505int netxen_nic_set_mac(struct net_device *netdev, void *p);···1535 {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"},1536 {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"},1537 {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"},1538- {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "Quad GB - March Madness"},001539 {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"},1540 {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"}1541};
···419#define netxen_get_niu_enable_ge(config_word) \420 _netxen_crb_get_bit(config_word, 1)421422-/* Promiscous mode options (GbE mode only) */423-typedef enum {424- NETXEN_NIU_PROMISC_MODE = 0,425- NETXEN_NIU_NON_PROMISC_MODE,426- NETXEN_NIU_ALLMULTI_MODE427-} netxen_niu_prom_mode_t;428429/*430 * NIU GB Drop CRC Register···468469/* Set promiscuous mode for a GbE interface */470int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,471- netxen_niu_prom_mode_t mode);472int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,473- netxen_niu_prom_mode_t mode);474475/* set the MAC address for a given MAC */476int netxen_niu_macaddr_set(struct netxen_adapter *adapter,
···419#define netxen_get_niu_enable_ge(config_word) \420 _netxen_crb_get_bit(config_word, 1)421422+#define NETXEN_NIU_NON_PROMISC_MODE 0423+#define NETXEN_NIU_PROMISC_MODE 1424+#define NETXEN_NIU_ALLMULTI_MODE 2000425426/*427 * NIU GB Drop CRC Register···471472/* Set promiscuous mode for a GbE interface */473int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,474+ u32 mode);475int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,476+ u32 mode);477478/* set the MAC address for a given MAC */479int netxen_niu_macaddr_set(struct netxen_adapter *adapter,
···621 if (num_addrs > len) {622 printk(KERN_ERR "%s: switching to promisc. mode\n",623 dev->name);624- dev->flags |= IFF_PROMISC;625 }626 }627 if (dev->flags & IFF_PROMISC)
···621 if (num_addrs > len) {622 printk(KERN_ERR "%s: switching to promisc. mode\n",623 dev->name);624+ writeb(0x01, &cfg_cmd->promisc);625 }626 }627 if (dev->flags & IFF_PROMISC)
+6-17
drivers/net/qla3xxx.c
···3839#define DRV_NAME "qla3xxx"40#define DRV_STRING "QLogic ISP3XXX Network Driver"41-#define DRV_VERSION "v2.03.00-k4"42#define PFX DRV_NAME " "4344static const char ql3xxx_driver_name[] = DRV_NAME;···3495 case ISP_CONTROL_FN0_NET:3496 qdev->mac_index = 0;3497 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;3498- qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;3499- qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;3500 qdev->mb_bit_mask = FN0_MA_BITS_MASK;3501 qdev->PHYAddr = PORT0_PHY_ADDRESS;3502 if (port_status & PORT_STATUS_SM0)···3506 case ISP_CONTROL_FN1_NET:3507 qdev->mac_index = 1;3508 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;3509- qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;3510- qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;3511 qdev->mb_bit_mask = FN1_MA_BITS_MASK;3512 qdev->PHYAddr = PORT1_PHY_ADDRESS;3513 if (port_status & PORT_STATUS_SM1)···3724{3725 struct ql3_adapter *qdev = netdev_priv(ndev);3726 return (ql_adapter_up(qdev));3727-}3728-3729-static void ql3xxx_set_multicast_list(struct net_device *ndev)3730-{3731- /*3732- * We are manually parsing the list in the net_device structure.3733- */3734- return;3735}37363737static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)···3995 ndev->open = ql3xxx_open;3996 ndev->hard_start_xmit = ql3xxx_send;3997 ndev->stop = ql3xxx_close;3998- ndev->set_multicast_list = ql3xxx_set_multicast_list;00003999 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);4000 ndev->set_mac_address = ql3xxx_set_mac_address;4001 ndev->tx_timeout = ql3xxx_tx_timeout;···4031 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);40324033 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;4034-4035- /* Turn off support for multicasting */4036- ndev->flags &= ~IFF_MULTICAST;40374038 /* Record PCI bus information. */4039 ql_get_board_info(qdev);
···3839#define DRV_NAME "qla3xxx"40#define DRV_STRING "QLogic ISP3XXX Network Driver"41+#define DRV_VERSION "v2.03.00-k5"42#define PFX DRV_NAME " "4344static const char ql3xxx_driver_name[] = DRV_NAME;···3495 case ISP_CONTROL_FN0_NET:3496 qdev->mac_index = 0;3497 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;003498 qdev->mb_bit_mask = FN0_MA_BITS_MASK;3499 qdev->PHYAddr = PORT0_PHY_ADDRESS;3500 if (port_status & PORT_STATUS_SM0)···3508 case ISP_CONTROL_FN1_NET:3509 qdev->mac_index = 1;3510 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;003511 qdev->mb_bit_mask = FN1_MA_BITS_MASK;3512 qdev->PHYAddr = PORT1_PHY_ADDRESS;3513 if (port_status & PORT_STATUS_SM1)···3728{3729 struct ql3_adapter *qdev = netdev_priv(ndev);3730 return (ql_adapter_up(qdev));000000003731}37323733static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)···4007 ndev->open = ql3xxx_open;4008 ndev->hard_start_xmit = ql3xxx_send;4009 ndev->stop = ql3xxx_close;4010+ /* ndev->set_multicast_list4011+ * This device is one side of a two-function adapter4012+ * (NIC and iSCSI). Promiscuous mode setting/clearing is4013+ * not allowed from the NIC side.4014+ */4015 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);4016 ndev->set_mac_address = ql3xxx_set_mac_address;4017 ndev->tx_timeout = ql3xxx_tx_timeout;···4039 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);40404041 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;00040424043 /* Record PCI bus information. */4044 ql_get_board_info(qdev);
-105
drivers/net/qla3xxx.h
···1415#define OPCODE_OB_MAC_IOCB_FN0 0x0116#define OPCODE_OB_MAC_IOCB_FN2 0x2117-#define OPCODE_OB_TCP_IOCB_FN0 0x0318-#define OPCODE_OB_TCP_IOCB_FN2 0x2319-#define OPCODE_UPDATE_NCB_IOCB_FN0 0x0020-#define OPCODE_UPDATE_NCB_IOCB_FN2 0x202122-#define OPCODE_UPDATE_NCB_IOCB 0xF023#define OPCODE_IB_MAC_IOCB 0xF924#define OPCODE_IB_3032_MAC_IOCB 0x0925#define OPCODE_IB_IP_IOCB 0xFA26#define OPCODE_IB_3032_IP_IOCB 0x0A27-#define OPCODE_IB_TCP_IOCB 0xFB28-#define OPCODE_DUMP_PROTO_IOCB 0xFE29-#define OPCODE_BUFFER_ALERT_IOCB 0xFB3031#define OPCODE_FUNC_ID_MASK 0x3032#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */33-#define OUTBOUND_TCP_IOCB 0x03 /* plus function bits */34-#define UPDATE_NCB_IOCB 0x00 /* plus function bits */3536#define FN0_MA_BITS_MASK 0x0037#define FN1_MA_BITS_MASK 0x80···149 __le32 reserved2;150};151152-struct ob_tcp_iocb_req {153- u8 opcode;154-155- u8 flags0;156-#define OB_TCP_IOCB_REQ_P 0x80157-#define OB_TCP_IOCB_REQ_CI 0x20158-#define OB_TCP_IOCB_REQ_H 0x10159-#define OB_TCP_IOCB_REQ_LN 0x08160-#define OB_TCP_IOCB_REQ_K 0x04161-#define OB_TCP_IOCB_REQ_D 0x02162-#define OB_TCP_IOCB_REQ_I 0x01163-164- u8 flags1;165-#define OB_TCP_IOCB_REQ_OSM 0x40166-#define OB_TCP_IOCB_REQ_URG 0x20167-#define OB_TCP_IOCB_REQ_ACK 0x10168-#define OB_TCP_IOCB_REQ_PSH 0x08169-#define OB_TCP_IOCB_REQ_RST 0x04170-#define OB_TCP_IOCB_REQ_SYN 0x02171-#define OB_TCP_IOCB_REQ_FIN 0x01172-173- u8 options_len;174-#define OB_TCP_IOCB_REQ_OMASK 0xF0175-#define OB_TCP_IOCB_REQ_SHIFT 4176-177- __le32 transaction_id;178- __le32 data_len;179- __le32 hncb_ptr_low;180- __le32 hncb_ptr_high;181- __le32 buf_addr0_low;182- __le32 buf_addr0_high;183- __le32 buf_0_len;184- __le32 buf_addr1_low;185- __le32 buf_addr1_high;186- __le32 buf_1_len;187- __le32 buf_addr2_low;188- __le32 buf_addr2_high;189- __le32 buf_2_len;190- __le32 time_stamp;191- __le32 reserved1;192-};193-194-struct ob_tcp_iocb_rsp {195- u8 opcode;196-197- u8 flags0;198-#define OB_TCP_IOCB_RSP_C 0x20199-#define OB_TCP_IOCB_RSP_H 0x10200-#define OB_TCP_IOCB_RSP_LN 0x08201-#define OB_TCP_IOCB_RSP_K 0x04202-#define OB_TCP_IOCB_RSP_D 0x02203-#define OB_TCP_IOCB_RSP_I 0x01204-205- u8 flags1;206-#define OB_TCP_IOCB_RSP_E 0x10207-#define OB_TCP_IOCB_RSP_W 0x08208-#define OB_TCP_IOCB_RSP_P 0x04209-#define OB_TCP_IOCB_RSP_T 0x02210-#define OB_TCP_IOCB_RSP_F 0x01211-212- u8 state;213-#define OB_TCP_IOCB_RSP_SMASK 0xF0214-#define OB_TCP_IOCB_RSP_SHIFT 4215-216- __le32 transaction_id;217- __le32 local_ncb_ptr;218- __le32 reserved0;219-};220-221struct ib_ip_iocb_rsp {222 u8 opcode;223#define IB_IP_IOCB_RSP_3032_V 0x80···173#define IB_IP_IOCB_RSP_3032_IPE 0x20174 __le16 reserved;175#define IB_IP_IOCB_RSP_R 0x01176- __le32 ial_low;177- __le32 ial_high;178-};179-180-struct ib_tcp_iocb_rsp {181- u8 opcode;182- u8 flags;183-#define IB_TCP_IOCB_RSP_P 0x80184-#define IB_TCP_IOCB_RSP_T 0x40185-#define IB_TCP_IOCB_RSP_D 0x20186-#define IB_TCP_IOCB_RSP_N 0x10187-#define IB_TCP_IOCB_RSP_IP 0x03188-#define IB_TCP_FLAG_MASK 0xf0189-#define IB_TCP_FLAG_IOCB_SYN 0x00190-191-#define TCP_IB_RSP_FLAGS(x) (x->flags & ~IB_TCP_FLAG_MASK)192-193- __le16 length;194- __le32 hncb_ref_num;195 __le32 ial_low;196 __le32 ial_high;197};···1168 u32 small_buf_release_cnt;1169 u32 small_buf_total_size;11701171- /* ISR related, saves status for DPC. */1172- u32 control_status;1173-1174 struct eeprom_data nvram_data;1175- struct timer_list ioctl_timer;1176 u32 port_link_state;1177- u32 last_rsp_offset;11781179 /* 4022 specific */1180 u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */1181 u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */1182 u32 mac_ob_opcode; /* Opcode to use on mac transmission */1183- u32 tcp_ob_opcode; /* Opcode to use on tcp transmission */1184- u32 update_ob_opcode; /* Opcode to use for updating NCB */1185 u32 mb_bit_mask; /* MA Bits mask to use on transmission */1186 u32 numPorts;1187 struct workqueue_struct *workqueue;
···1415#define OPCODE_OB_MAC_IOCB_FN0 0x0116#define OPCODE_OB_MAC_IOCB_FN2 0x21000017018#define OPCODE_IB_MAC_IOCB 0xF919#define OPCODE_IB_3032_MAC_IOCB 0x0920#define OPCODE_IB_IP_IOCB 0xFA21#define OPCODE_IB_3032_IP_IOCB 0x0A0002223#define OPCODE_FUNC_ID_MASK 0x3024#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */002526#define FN0_MA_BITS_MASK 0x0027#define FN1_MA_BITS_MASK 0x80···159 __le32 reserved2;160};161000000000000000000000000000000000000000000000000000000000000000000000162struct ib_ip_iocb_rsp {163 u8 opcode;164#define IB_IP_IOCB_RSP_3032_V 0x80···252#define IB_IP_IOCB_RSP_3032_IPE 0x20253 __le16 reserved;254#define IB_IP_IOCB_RSP_R 0x010000000000000000000255 __le32 ial_low;256 __le32 ial_high;257};···1266 u32 small_buf_release_cnt;1267 u32 small_buf_total_size;12680001269 struct eeprom_data nvram_data;01270 u32 port_link_state;012711272 /* 4022 specific */1273 u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */1274 u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */1275 u32 mac_ob_opcode; /* Opcode to use on mac transmission */001276 u32 mb_bit_mask; /* MA Bits mask to use on transmission */1277 u32 numPorts;1278 struct workqueue_struct *workqueue;
+50-19
drivers/net/sh_eth.c
···3435#include "sh_eth.h"360000000000000000000000037/*38 * Program the hardware MAC address from dev->dev_addr.39 */···263 /* RX descriptor */264 rxdesc = &mdp->rx_ring[i];265 rxdesc->addr = (u32)skb->data & ~0x3UL;266- rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);267268 /* The size of the buffer is 16 byte boundary. */269 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;···285 mdp->dirty_rx = (u32) (i - RX_RING_SIZE);286287 /* Mark the last entry as wrapping the ring. */288- rxdesc->status |= cpu_to_le32(RD_RDEL);289290 memset(mdp->tx_ring, 0, tx_ringsize);291···293 for (i = 0; i < TX_RING_SIZE; i++) {294 mdp->tx_skbuff[i] = NULL;295 txdesc = &mdp->tx_ring[i];296- txdesc->status = cpu_to_le32(TD_TFP);297 txdesc->buffer_length = 0;298 if (i == 0) {299- /* Rx descriptor address set */300 ctrl_outl((u32)txdesc, ioaddr + TDLAR);301#if defined(CONFIG_CPU_SUBTYPE_SH7763)302 ctrl_outl((u32)txdesc, ioaddr + TDFAR);···304 }305 }306307- /* Rx descriptor address set */308#if defined(CONFIG_CPU_SUBTYPE_SH7763)309 ctrl_outl((u32)txdesc, ioaddr + TDFXR);310 ctrl_outl(0x1, ioaddr + TDFFR);311#endif312313- txdesc->status |= cpu_to_le32(TD_TDLE);314}315316/* Get skb and descriptor buffer */···478 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {479 entry = mdp->dirty_tx % TX_RING_SIZE;480 txdesc = &mdp->tx_ring[entry];481- if (txdesc->status & cpu_to_le32(TD_TACT))482 break;483 /* Free the original skb. */484 if (mdp->tx_skbuff[entry]) {···486 mdp->tx_skbuff[entry] = NULL;487 freeNum++;488 }489- txdesc->status = cpu_to_le32(TD_TFP);490 if (entry >= TX_RING_SIZE - 1)491- txdesc->status |= cpu_to_le32(TD_TDLE);492493 mdp->stats.tx_packets++;494 mdp->stats.tx_bytes += txdesc->buffer_length;···509 u32 desc_status, reserve = 0;510511 rxdesc = &mdp->rx_ring[entry];512- while (!(rxdesc->status & cpu_to_le32(RD_RACT))) {513- desc_status = le32_to_cpu(rxdesc->status);514 pkt_len = rxdesc->frame_length;515516 if (--boguscnt < 0)···545 mdp->stats.rx_packets++;546 mdp->stats.rx_bytes += pkt_len;547 }548- rxdesc->status |= cpu_to_le32(RD_RACT);549 entry = (++mdp->cur_rx) % RX_RING_SIZE;550 }551···575 }576 if (entry >= RX_RING_SIZE - 1)577 rxdesc->status |=578- cpu_to_le32(RD_RACT | RD_RFP | RD_RDEL);579 else580 rxdesc->status |=581- cpu_to_le32(RD_RACT | RD_RFP);582 }583584 /* Restart Rx engine if stopped. */···954 txdesc->buffer_length = skb->len;955956 if (entry >= TX_RING_SIZE - 1)957- txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE);958 else959- txdesc->status |= cpu_to_le32(TD_TACT);960961 mdp->cur_tx++;962···1182 struct resource *res;1183 struct net_device *ndev = NULL;1184 struct sh_eth_private *mdp;011851186 /* get base addr */1187 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);···1220 mdp = netdev_priv(ndev);1221 spin_lock_init(&mdp->lock);122201223 /* get PHY ID */1224- mdp->phy_id = (int)pdev->dev.platform_data;0012251226 /* set function */1227 ndev->open = sh_eth_open;···12441245 /* First device only init */1246 if (!devno) {01247 /* reset device */1248 ctrl_outl(ARSTR_ARSTR, ARSTR);1249 mdelay(1);0125001251 /* TSU init (Init only)*/1252 sh_eth_tsu_init(SH_TSU_ADDR);01253 }12541255 /* network device register */···1271 ndev->name, CARDNAME, (u32) ndev->base_addr);12721273 for (i = 0; i < 5; i++)1274- printk(KERN_INFO "%02X:", ndev->dev_addr[i]);1275- printk(KERN_INFO "%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq);12761277 platform_set_drvdata(pdev, ndev);1278
···3435#include "sh_eth.h"3637+/* CPU <-> EDMAC endian convert */38+static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)39+{40+ switch (mdp->edmac_endian) {41+ case EDMAC_LITTLE_ENDIAN:42+ return cpu_to_le32(x);43+ case EDMAC_BIG_ENDIAN:44+ return cpu_to_be32(x);45+ }46+ return x;47+}48+49+static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)50+{51+ switch (mdp->edmac_endian) {52+ case EDMAC_LITTLE_ENDIAN:53+ return le32_to_cpu(x);54+ case EDMAC_BIG_ENDIAN:55+ return be32_to_cpu(x);56+ }57+ return x;58+}59+60/*61 * Program the hardware MAC address from dev->dev_addr.62 */···240 /* RX descriptor */241 rxdesc = &mdp->rx_ring[i];242 rxdesc->addr = (u32)skb->data & ~0x3UL;243+ rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);244245 /* The size of the buffer is 16 byte boundary. */246 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;···262 mdp->dirty_rx = (u32) (i - RX_RING_SIZE);263264 /* Mark the last entry as wrapping the ring. */265+ rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);266267 memset(mdp->tx_ring, 0, tx_ringsize);268···270 for (i = 0; i < TX_RING_SIZE; i++) {271 mdp->tx_skbuff[i] = NULL;272 txdesc = &mdp->tx_ring[i];273+ txdesc->status = cpu_to_edmac(mdp, TD_TFP);274 txdesc->buffer_length = 0;275 if (i == 0) {276+ /* Tx descriptor address set */277 ctrl_outl((u32)txdesc, ioaddr + TDLAR);278#if defined(CONFIG_CPU_SUBTYPE_SH7763)279 ctrl_outl((u32)txdesc, ioaddr + TDFAR);···281 }282 }283284+ /* Tx descriptor address set */285#if defined(CONFIG_CPU_SUBTYPE_SH7763)286 ctrl_outl((u32)txdesc, ioaddr + TDFXR);287 ctrl_outl(0x1, ioaddr + TDFFR);288#endif289290+ txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);291}292293/* Get skb and descriptor buffer */···455 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {456 entry = mdp->dirty_tx % TX_RING_SIZE;457 txdesc = &mdp->tx_ring[entry];458+ if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))459 break;460 /* Free the original skb. */461 if (mdp->tx_skbuff[entry]) {···463 mdp->tx_skbuff[entry] = NULL;464 freeNum++;465 }466+ txdesc->status = cpu_to_edmac(mdp, TD_TFP);467 if (entry >= TX_RING_SIZE - 1)468+ txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);469470 mdp->stats.tx_packets++;471 mdp->stats.tx_bytes += txdesc->buffer_length;···486 u32 desc_status, reserve = 0;487488 rxdesc = &mdp->rx_ring[entry];489+ while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {490+ desc_status = edmac_to_cpu(mdp, rxdesc->status);491 pkt_len = rxdesc->frame_length;492493 if (--boguscnt < 0)···522 mdp->stats.rx_packets++;523 mdp->stats.rx_bytes += pkt_len;524 }525+ rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);526 entry = (++mdp->cur_rx) % RX_RING_SIZE;527 }528···552 }553 if (entry >= RX_RING_SIZE - 1)554 rxdesc->status |=555+ cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);556 else557 rxdesc->status |=558+ cpu_to_edmac(mdp, RD_RACT | RD_RFP);559 }560561 /* Restart Rx engine if stopped. */···931 txdesc->buffer_length = skb->len;932933 if (entry >= TX_RING_SIZE - 1)934+ txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);935 else936+ txdesc->status |= cpu_to_edmac(mdp, TD_TACT);937938 mdp->cur_tx++;939···1159 struct resource *res;1160 struct net_device *ndev = NULL;1161 struct sh_eth_private *mdp;1162+ struct sh_eth_plat_data *pd;11631164 /* get base addr */1165 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);···1196 mdp = netdev_priv(ndev);1197 spin_lock_init(&mdp->lock);11981199+ pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);1200 /* get PHY ID */1201+ mdp->phy_id = pd->phy;1202+ /* EDMAC endian */1203+ mdp->edmac_endian = pd->edmac_endian;12041205 /* set function */1206 ndev->open = sh_eth_open;···12171218 /* First device only init */1219 if (!devno) {1220+#if defined(ARSTR)1221 /* reset device */1222 ctrl_outl(ARSTR_ARSTR, ARSTR);1223 mdelay(1);1224+#endif12251226+#if defined(SH_TSU_ADDR)1227 /* TSU init (Init only)*/1228 sh_eth_tsu_init(SH_TSU_ADDR);1229+#endif1230 }12311232 /* network device register */···1240 ndev->name, CARDNAME, (u32) ndev->base_addr);12411242 for (i = 0; i < 5; i++)1243+ printk("%02X:", ndev->dev_addr[i]);1244+ printk("%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq);12451246 platform_set_drvdata(pdev, ndev);1247
···2072#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */2073#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */2074#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */2075-#define SKY2_HW_CLK_POWER 0x00000100 /* clock power management */20762077- int pm_cap;2078 u8 chip_id;2079 u8 chip_rev;2080 u8 pmd_type;
···2072#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */2073#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */2074#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */0207502076 u8 chip_id;2077 u8 chip_rev;2078 u8 pmd_type;
···1285 }1286}12870000000000000001288static int pegasus_probe(struct usb_interface *intf,1289 const struct usb_device_id *id)1290{···1311 DECLARE_MAC_BUF(mac);13121313 usb_get_dev(dev);0000001314 net = alloc_etherdev(sizeof(struct pegasus));1315 if (!net) {1316 dev_err(&intf->dev, "can't allocate %s\n", "device");
···1285 }1286}12871288+static int pegasus_blacklisted(struct usb_device *udev)1289+{1290+ struct usb_device_descriptor *udd = &udev->descriptor;1291+1292+ /* Special quirk to keep the driver from handling the Belkin Bluetooth1293+ * dongle which happens to have the same ID.1294+ */1295+ if ((udd->idVendor == VENDOR_BELKIN && udd->idProduct == 0x0121) &&1296+ (udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) &&1297+ (udd->bDeviceProtocol == 1))1298+ return 1;1299+1300+ return 0;1301+}1302+1303static int pegasus_probe(struct usb_interface *intf,1304 const struct usb_device_id *id)1305{···1296 DECLARE_MAC_BUF(mac);12971298 usb_get_dev(dev);1299+1300+ if (pegasus_blacklisted(dev)) {1301+ res = -ENODEV;1302+ goto out;1303+ }1304+1305 net = alloc_etherdev(sizeof(struct pegasus));1306 if (!net) {1307 dev_err(&intf->dev, "can't allocate %s\n", "device");
+173-132
drivers/net/via-velocity.c
···662 spin_unlock_irq(&vptr->lock);663}6640000665666/**667 * velocity_rx_reset - handle a receive reset···681 struct mac_regs __iomem * regs = vptr->mac_regs;682 int i;683684- vptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0;685686 /*687 * Init state, all RD entries belong to the NIC688 */689 for (i = 0; i < vptr->options.numrx; ++i)690- vptr->rd_ring[i].rdesc0.len |= OWNED_BY_NIC;691692 writew(vptr->options.numrx, ®s->RBRDU);693- writel(vptr->rd_pool_dma, ®s->RDBaseLo);694 writew(0, ®s->RDIdx);695 writew(vptr->options.numrx - 1, ®s->RDCSize);696}···783784 vptr->int_mask = INT_MASK_DEF;785786- writel(vptr->rd_pool_dma, ®s->RDBaseLo);787 writew(vptr->options.numrx - 1, ®s->RDCSize);788 mac_rx_queue_run(regs);789 mac_rx_queue_wake(regs);790791 writew(vptr->options.numtx - 1, ®s->TDCSize);792793- for (i = 0; i < vptr->num_txq; i++) {794- writel(vptr->td_pool_dma[i], ®s->TDBaseLo[i]);795 mac_tx_queue_run(regs, i);796 }797···10511052 vptr->pdev = pdev;1053 vptr->chip_id = info->chip_id;1054- vptr->num_txq = info->txqueue;1055 vptr->multicast_limit = MCAM_SIZE;1056 spin_lock_init(&vptr->lock);1057 INIT_LIST_HEAD(&vptr->list);···1097}10981099/**1100- * velocity_init_rings - set up DMA rings1101 * @vptr: Velocity to set up1102 *1103 * Allocate PCI mapped DMA rings for the receive and transmit layer1104 * to use.1105 */11061107-static int velocity_init_rings(struct velocity_info *vptr)1108{1109 struct velocity_opt *opt = &vptr->options;1110 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);···1120 * pci_alloc_consistent() fulfills the requirement for 64 bytes1121 * alignment1122 */1123- pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->num_txq +1124 rx_ring_size, &pool_dma);1125 if (!pool) {1126 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",···1128 return -ENOMEM;1129 }11301131- vptr->rd_ring = pool;1132- vptr->rd_pool_dma = pool_dma;11331134 pool += rx_ring_size;1135 pool_dma += rx_ring_size;11361137- for (i = 0; i < vptr->num_txq; i++) {1138- vptr->td_rings[i] = pool;1139- vptr->td_pool_dma[i] = pool_dma;1140 pool += tx_ring_size;1141 pool_dma += tx_ring_size;1142 }···1145}11461147/**1148- * velocity_free_rings - free PCI ring pointers1149 * @vptr: Velocity to free from1150 *1151 * Clean up the PCI ring buffers allocated to this velocity.1152 */11531154-static void velocity_free_rings(struct velocity_info *vptr)1155{1156 const int size = vptr->options.numrx * sizeof(struct rx_desc) +1157- vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;11581159- pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma);1160}11611162static void velocity_give_many_rx_descs(struct velocity_info *vptr)···1168 * RD number must be equal to 4X per hardware spec1169 * (programming guide rev 1.20, p.13)1170 */1171- if (vptr->rd_filled < 4)1172 return;11731174 wmb();11751176- unusable = vptr->rd_filled & 0x0003;1177- dirty = vptr->rd_dirty - unusable;1178- for (avail = vptr->rd_filled & 0xfffc; avail; avail--) {1179 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;1180- vptr->rd_ring[dirty].rdesc0.len |= OWNED_BY_NIC;1181 }11821183- writew(vptr->rd_filled & 0xfffc, ®s->RBRDU);1184- vptr->rd_filled = unusable;1185}11861187static int velocity_rx_refill(struct velocity_info *vptr)1188{1189- int dirty = vptr->rd_dirty, done = 0;11901191 do {1192- struct rx_desc *rd = vptr->rd_ring + dirty;11931194 /* Fine for an all zero Rx desc at init time as well */1195 if (rd->rdesc0.len & OWNED_BY_NIC)1196 break;11971198- if (!vptr->rd_info[dirty].skb) {1199 if (velocity_alloc_rx_buf(vptr, dirty) < 0)1200 break;1201 }1202 done++;1203 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;1204- } while (dirty != vptr->rd_curr);12051206 if (done) {1207- vptr->rd_dirty = dirty;1208- vptr->rd_filled += done;1209 }12101211 return done;···12131214static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)1215{1216- vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;1217}12181219/**···1228{1229 int ret = -ENOMEM;12301231- vptr->rd_info = kcalloc(vptr->options.numrx,1232 sizeof(struct velocity_rd_info), GFP_KERNEL);1233- if (!vptr->rd_info)1234 goto out;12351236- vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0;12371238 if (velocity_rx_refill(vptr) != vptr->options.numrx) {1239 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR···1259{1260 int i;12611262- if (vptr->rd_info == NULL)1263 return;12641265 for (i = 0; i < vptr->options.numrx; i++) {1266- struct velocity_rd_info *rd_info = &(vptr->rd_info[i]);1267- struct rx_desc *rd = vptr->rd_ring + i;12681269 memset(rd, 0, sizeof(*rd));12701271 if (!rd_info->skb)1272 continue;1273- pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz,1274 PCI_DMA_FROMDEVICE);1275 rd_info->skb_dma = (dma_addr_t) NULL;1276···1278 rd_info->skb = NULL;1279 }12801281- kfree(vptr->rd_info);1282- vptr->rd_info = NULL;1283}12841285/**···1297 unsigned int j;12981299 /* Init the TD ring entries */1300- for (j = 0; j < vptr->num_txq; j++) {1301- curr = vptr->td_pool_dma[j];13021303- vptr->td_infos[j] = kcalloc(vptr->options.numtx,1304 sizeof(struct velocity_td_info),1305 GFP_KERNEL);1306- if (!vptr->td_infos[j]) {1307 while(--j >= 0)1308- kfree(vptr->td_infos[j]);1309 return -ENOMEM;1310 }13111312- vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0;1313 }1314 return 0;1315}···1321static void velocity_free_td_ring_entry(struct velocity_info *vptr,1322 int q, int n)1323{1324- struct velocity_td_info * td_info = &(vptr->td_infos[q][n]);1325 int i;13261327 if (td_info == NULL)···1353{1354 int i, j;13551356- for (j = 0; j < vptr->num_txq; j++) {1357- if (vptr->td_infos[j] == NULL)1358 continue;1359 for (i = 0; i < vptr->options.numtx; i++) {1360 velocity_free_td_ring_entry(vptr, j, i);13611362 }1363- kfree(vptr->td_infos[j]);1364- vptr->td_infos[j] = NULL;1365 }1366}1367···1378static int velocity_rx_srv(struct velocity_info *vptr, int status)1379{1380 struct net_device_stats *stats = &vptr->stats;1381- int rd_curr = vptr->rd_curr;1382 int works = 0;13831384 do {1385- struct rx_desc *rd = vptr->rd_ring + rd_curr;13861387- if (!vptr->rd_info[rd_curr].skb)1388 break;13891390 if (rd->rdesc0.len & OWNED_BY_NIC)···1416 rd_curr = 0;1417 } while (++works <= 15);14181419- vptr->rd_curr = rd_curr;14201421 if ((works > 0) && (velocity_rx_refill(vptr) > 0))1422 velocity_give_many_rx_descs(vptr);···1514{1515 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);1516 struct net_device_stats *stats = &vptr->stats;1517- struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);1518- struct rx_desc *rd = &(vptr->rd_ring[idx]);1519 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;1520 struct sk_buff *skb;1521···1531 skb = rd_info->skb;15321533 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,1534- vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);15351536 /*1537 * Drop frame not meeting IEEE 802.3···1554 rd_info->skb = NULL;1555 }15561557- pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz,1558 PCI_DMA_FROMDEVICE);15591560 skb_put(skb, pkt_len - 4);···15841585static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)1586{1587- struct rx_desc *rd = &(vptr->rd_ring[idx]);1588- struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);15891590- rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx_buf_sz + 64);1591 if (rd_info->skb == NULL)1592 return -ENOMEM;1593···1596 * 64byte alignment.1597 */1598 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);1599- rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);016001601 /*1602 * Fill in the descriptor to match1603- */16041605 *((u32 *) & (rd->rdesc0)) = 0;1606- rd->size = cpu_to_le16(vptr->rx_buf_sz) | RX_INTEN;1607 rd->pa_low = cpu_to_le32(rd_info->skb_dma);1608 rd->pa_high = 0;1609 return 0;···1630 struct velocity_td_info *tdinfo;1631 struct net_device_stats *stats = &vptr->stats;16321633- for (qnum = 0; qnum < vptr->num_txq; qnum++) {1634- for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0;1635 idx = (idx + 1) % vptr->options.numtx) {16361637 /*1638 * Get Tx Descriptor1639 */1640- td = &(vptr->td_rings[qnum][idx]);1641- tdinfo = &(vptr->td_infos[qnum][idx]);16421643 if (td->tdesc0.len & OWNED_BY_NIC)1644 break;···1662 stats->tx_bytes += tdinfo->skb->len;1663 }1664 velocity_free_tx_buf(vptr, tdinfo);1665- vptr->td_used[qnum]--;1666 }1667- vptr->td_tail[qnum] = idx;16681669 if (AVAIL_TD(vptr, qnum) < 1) {1670 full = 1;···1851 tdinfo->skb = NULL;1852}185300000000000000000000000000000000001854/**1855 * velocity_open - interface activation callback1856 * @dev: network layer device to open···1901 struct velocity_info *vptr = netdev_priv(dev);1902 int ret;19031904- velocity_set_rxbufsize(vptr, dev->mtu);1905-1906- ret = velocity_init_rings(vptr);1907 if (ret < 0)1908 goto out;1909-1910- ret = velocity_init_rd_ring(vptr);1911- if (ret < 0)1912- goto err_free_desc_rings;1913-1914- ret = velocity_init_td_ring(vptr);1915- if (ret < 0)1916- goto err_free_rd_ring;19171918 /* Ensure chip is running */1919 pci_set_power_state(vptr->pdev, PCI_D0);···1917 if (ret < 0) {1918 /* Power down the chip */1919 pci_set_power_state(vptr->pdev, PCI_D3hot);1920- goto err_free_td_ring;01921 }19221923 mac_enable_int(vptr->mac_regs);···1926 vptr->flags |= VELOCITY_FLAGS_OPENED;1927out:1928 return ret;1929-1930-err_free_td_ring:1931- velocity_free_td_ring(vptr);1932-err_free_rd_ring:1933- velocity_free_rd_ring(vptr);1934-err_free_desc_rings:1935- velocity_free_rings(vptr);1936- goto out;1937}19381939/**···1941static int velocity_change_mtu(struct net_device *dev, int new_mtu)1942{1943 struct velocity_info *vptr = netdev_priv(dev);1944- unsigned long flags;1945- int oldmtu = dev->mtu;1946 int ret = 0;19471948 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {1949 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",1950 vptr->dev->name);1951- return -EINVAL;01952 }19531954 if (!netif_running(dev)) {1955 dev->mtu = new_mtu;1956- return 0;1957 }19581959- if (new_mtu != oldmtu) {000000000000000000001960 spin_lock_irqsave(&vptr->lock, flags);19611962 netif_stop_queue(dev);1963 velocity_shutdown(vptr);19641965- velocity_free_td_ring(vptr);1966- velocity_free_rd_ring(vptr);00000019671968 dev->mtu = new_mtu;19691970- velocity_set_rxbufsize(vptr, new_mtu);1971-1972- ret = velocity_init_rd_ring(vptr);1973- if (ret < 0)1974- goto out_unlock;1975-1976- ret = velocity_init_td_ring(vptr);1977- if (ret < 0)1978- goto out_unlock;19791980 velocity_init_registers(vptr, VELOCITY_INIT_COLD);19811982 mac_enable_int(vptr->mac_regs);1983 netif_start_queue(dev);1984-out_unlock:1985- spin_unlock_irqrestore(&vptr->lock, flags);1986- }1987000000001988 return ret;1989}1990···2052 /* Power down the chip */2053 pci_set_power_state(vptr->pdev, PCI_D3hot);20542055- /* Free the resources */2056- velocity_free_td_ring(vptr);2057- velocity_free_rd_ring(vptr);2058 velocity_free_rings(vptr);20592060 vptr->flags &= (~VELOCITY_FLAGS_OPENED);···20972098 spin_lock_irqsave(&vptr->lock, flags);20992100- index = vptr->td_curr[qnum];2101- td_ptr = &(vptr->td_rings[qnum][index]);2102- tdinfo = &(vptr->td_infos[qnum][index]);21032104 td_ptr->tdesc1.TCR = TCR0_TIC;2105 td_ptr->td_buf[0].size &= ~TD_QUEUE;···2112 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);2113 tdinfo->skb_dma[0] = tdinfo->buf_dma;2114 td_ptr->tdesc0.len = len;2115- td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);2116- td_ptr->td_buf[0].pa_high = 0;2117- td_ptr->td_buf[0].size = len; /* queue is 0 anyway */2118 tdinfo->nskb_dma = 1;2119 } else {2120 int i = 0;···2125 td_ptr->tdesc0.len = len;21262127 /* FIXME: support 48bit DMA later */2128- td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);2129- td_ptr->td_buf[i].pa_high = 0;2130- td_ptr->td_buf[i].size = cpu_to_le16(skb_headlen(skb));21312132 for (i = 0; i < nfrags; i++) {2133 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];···21352136 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);21372138- td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);2139- td_ptr->td_buf[i + 1].pa_high = 0;2140- td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);2141 }2142 tdinfo->nskb_dma = i - 1;2143 }···2183 if (prev < 0)2184 prev = vptr->options.numtx - 1;2185 td_ptr->tdesc0.len |= OWNED_BY_NIC;2186- vptr->td_used[qnum]++;2187- vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx;21882189 if (AVAIL_TD(vptr, qnum) < 1)2190 netif_stop_queue(dev);21912192- td_ptr = &(vptr->td_rings[qnum][prev]);2193 td_ptr->td_buf[0].size |= TD_QUEUE;2194 mac_tx_queue_wake(vptr->mac_regs, qnum);2195 }···34463447 velocity_tx_srv(vptr, 0);34483449- for (i = 0; i < vptr->num_txq; i++) {3450- if (vptr->td_used[i]) {3451 mac_tx_queue_wake(vptr->mac_regs, i);3452 }3453 }
···662 spin_unlock_irq(&vptr->lock);663}664665+static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)666+{667+ vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;668+}669670/**671 * velocity_rx_reset - handle a receive reset···677 struct mac_regs __iomem * regs = vptr->mac_regs;678 int i;679680+ velocity_init_rx_ring_indexes(vptr);681682 /*683 * Init state, all RD entries belong to the NIC684 */685 for (i = 0; i < vptr->options.numrx; ++i)686+ vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;687688 writew(vptr->options.numrx, ®s->RBRDU);689+ writel(vptr->rx.pool_dma, ®s->RDBaseLo);690 writew(0, ®s->RDIdx);691 writew(vptr->options.numrx - 1, ®s->RDCSize);692}···779780 vptr->int_mask = INT_MASK_DEF;781782+ writel(vptr->rx.pool_dma, ®s->RDBaseLo);783 writew(vptr->options.numrx - 1, ®s->RDCSize);784 mac_rx_queue_run(regs);785 mac_rx_queue_wake(regs);786787 writew(vptr->options.numtx - 1, ®s->TDCSize);788789+ for (i = 0; i < vptr->tx.numq; i++) {790+ writel(vptr->tx.pool_dma[i], ®s->TDBaseLo[i]);791 mac_tx_queue_run(regs, i);792 }793···10471048 vptr->pdev = pdev;1049 vptr->chip_id = info->chip_id;1050+ vptr->tx.numq = info->txqueue;1051 vptr->multicast_limit = MCAM_SIZE;1052 spin_lock_init(&vptr->lock);1053 INIT_LIST_HEAD(&vptr->list);···1093}10941095/**1096+ * velocity_init_dma_rings - set up DMA rings1097 * @vptr: Velocity to set up1098 *1099 * Allocate PCI mapped DMA rings for the receive and transmit layer1100 * to use.1101 */11021103+static int velocity_init_dma_rings(struct velocity_info *vptr)1104{1105 struct velocity_opt *opt = &vptr->options;1106 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);···1116 * pci_alloc_consistent() fulfills the requirement for 64 bytes1117 * alignment1118 */1119+ pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +1120 rx_ring_size, &pool_dma);1121 if (!pool) {1122 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",···1124 return -ENOMEM;1125 }11261127+ vptr->rx.ring = pool;1128+ vptr->rx.pool_dma = pool_dma;11291130 pool += rx_ring_size;1131 pool_dma += rx_ring_size;11321133+ for (i = 0; i < vptr->tx.numq; i++) {1134+ vptr->tx.rings[i] = pool;1135+ vptr->tx.pool_dma[i] = pool_dma;1136 pool += tx_ring_size;1137 pool_dma += tx_ring_size;1138 }···1141}11421143/**1144+ * velocity_free_dma_rings - free PCI ring pointers1145 * @vptr: Velocity to free from1146 *1147 * Clean up the PCI ring buffers allocated to this velocity.1148 */11491150+static void velocity_free_dma_rings(struct velocity_info *vptr)1151{1152 const int size = vptr->options.numrx * sizeof(struct rx_desc) +1153+ vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;11541155+ pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);1156}11571158static void velocity_give_many_rx_descs(struct velocity_info *vptr)···1164 * RD number must be equal to 4X per hardware spec1165 * (programming guide rev 1.20, p.13)1166 */1167+ if (vptr->rx.filled < 4)1168 return;11691170 wmb();11711172+ unusable = vptr->rx.filled & 0x0003;1173+ dirty = vptr->rx.dirty - unusable;1174+ for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {1175 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;1176+ vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;1177 }11781179+ writew(vptr->rx.filled & 0xfffc, ®s->RBRDU);1180+ vptr->rx.filled = unusable;1181}11821183static int velocity_rx_refill(struct velocity_info *vptr)1184{1185+ int dirty = vptr->rx.dirty, done = 0;11861187 do {1188+ struct rx_desc *rd = vptr->rx.ring + dirty;11891190 /* Fine for an all zero Rx desc at init time as well */1191 if (rd->rdesc0.len & OWNED_BY_NIC)1192 break;11931194+ if (!vptr->rx.info[dirty].skb) {1195 if (velocity_alloc_rx_buf(vptr, dirty) < 0)1196 break;1197 }1198 done++;1199 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;1200+ } while (dirty != vptr->rx.curr);12011202 if (done) {1203+ vptr->rx.dirty = dirty;1204+ vptr->rx.filled += done;1205 }12061207 return done;···12091210static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)1211{1212+ vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;1213}12141215/**···1224{1225 int ret = -ENOMEM;12261227+ vptr->rx.info = kcalloc(vptr->options.numrx,1228 sizeof(struct velocity_rd_info), GFP_KERNEL);1229+ if (!vptr->rx.info)1230 goto out;12311232+ velocity_init_rx_ring_indexes(vptr);12331234 if (velocity_rx_refill(vptr) != vptr->options.numrx) {1235 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR···1255{1256 int i;12571258+ if (vptr->rx.info == NULL)1259 return;12601261 for (i = 0; i < vptr->options.numrx; i++) {1262+ struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);1263+ struct rx_desc *rd = vptr->rx.ring + i;12641265 memset(rd, 0, sizeof(*rd));12661267 if (!rd_info->skb)1268 continue;1269+ pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,1270 PCI_DMA_FROMDEVICE);1271 rd_info->skb_dma = (dma_addr_t) NULL;1272···1274 rd_info->skb = NULL;1275 }12761277+ kfree(vptr->rx.info);1278+ vptr->rx.info = NULL;1279}12801281/**···1293 unsigned int j;12941295 /* Init the TD ring entries */1296+ for (j = 0; j < vptr->tx.numq; j++) {1297+ curr = vptr->tx.pool_dma[j];12981299+ vptr->tx.infos[j] = kcalloc(vptr->options.numtx,1300 sizeof(struct velocity_td_info),1301 GFP_KERNEL);1302+ if (!vptr->tx.infos[j]) {1303 while(--j >= 0)1304+ kfree(vptr->tx.infos[j]);1305 return -ENOMEM;1306 }13071308+ vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;1309 }1310 return 0;1311}···1317static void velocity_free_td_ring_entry(struct velocity_info *vptr,1318 int q, int n)1319{1320+ struct velocity_td_info * td_info = &(vptr->tx.infos[q][n]);1321 int i;13221323 if (td_info == NULL)···1349{1350 int i, j;13511352+ for (j = 0; j < vptr->tx.numq; j++) {1353+ if (vptr->tx.infos[j] == NULL)1354 continue;1355 for (i = 0; i < vptr->options.numtx; i++) {1356 velocity_free_td_ring_entry(vptr, j, i);13571358 }1359+ kfree(vptr->tx.infos[j]);1360+ vptr->tx.infos[j] = NULL;1361 }1362}1363···1374static int velocity_rx_srv(struct velocity_info *vptr, int status)1375{1376 struct net_device_stats *stats = &vptr->stats;1377+ int rd_curr = vptr->rx.curr;1378 int works = 0;13791380 do {1381+ struct rx_desc *rd = vptr->rx.ring + rd_curr;13821383+ if (!vptr->rx.info[rd_curr].skb)1384 break;13851386 if (rd->rdesc0.len & OWNED_BY_NIC)···1412 rd_curr = 0;1413 } while (++works <= 15);14141415+ vptr->rx.curr = rd_curr;14161417 if ((works > 0) && (velocity_rx_refill(vptr) > 0))1418 velocity_give_many_rx_descs(vptr);···1510{1511 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);1512 struct net_device_stats *stats = &vptr->stats;1513+ struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);1514+ struct rx_desc *rd = &(vptr->rx.ring[idx]);1515 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;1516 struct sk_buff *skb;1517···1527 skb = rd_info->skb;15281529 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,1530+ vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);15311532 /*1533 * Drop frame not meeting IEEE 802.3···1550 rd_info->skb = NULL;1551 }15521553+ pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,1554 PCI_DMA_FROMDEVICE);15551556 skb_put(skb, pkt_len - 4);···15801581static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)1582{1583+ struct rx_desc *rd = &(vptr->rx.ring[idx]);1584+ struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);15851586+ rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64);1587 if (rd_info->skb == NULL)1588 return -ENOMEM;1589···1592 * 64byte alignment.1593 */1594 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);1595+ rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,1596+ vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);15971598 /*1599 * Fill in the descriptor to match1600+ */16011602 *((u32 *) & (rd->rdesc0)) = 0;1603+ rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;1604 rd->pa_low = cpu_to_le32(rd_info->skb_dma);1605 rd->pa_high = 0;1606 return 0;···1625 struct velocity_td_info *tdinfo;1626 struct net_device_stats *stats = &vptr->stats;16271628+ for (qnum = 0; qnum < vptr->tx.numq; qnum++) {1629+ for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;1630 idx = (idx + 1) % vptr->options.numtx) {16311632 /*1633 * Get Tx Descriptor1634 */1635+ td = &(vptr->tx.rings[qnum][idx]);1636+ tdinfo = &(vptr->tx.infos[qnum][idx]);16371638 if (td->tdesc0.len & OWNED_BY_NIC)1639 break;···1657 stats->tx_bytes += tdinfo->skb->len;1658 }1659 velocity_free_tx_buf(vptr, tdinfo);1660+ vptr->tx.used[qnum]--;1661 }1662+ vptr->tx.tail[qnum] = idx;16631664 if (AVAIL_TD(vptr, qnum) < 1) {1665 full = 1;···1846 tdinfo->skb = NULL;1847}18481849+static int velocity_init_rings(struct velocity_info *vptr, int mtu)1850+{1851+ int ret;1852+1853+ velocity_set_rxbufsize(vptr, mtu);1854+1855+ ret = velocity_init_dma_rings(vptr);1856+ if (ret < 0)1857+ goto out;1858+1859+ ret = velocity_init_rd_ring(vptr);1860+ if (ret < 0)1861+ goto err_free_dma_rings_0;1862+1863+ ret = velocity_init_td_ring(vptr);1864+ if (ret < 0)1865+ goto err_free_rd_ring_1;1866+out:1867+ return ret;1868+1869+err_free_rd_ring_1:1870+ velocity_free_rd_ring(vptr);1871+err_free_dma_rings_0:1872+ velocity_free_dma_rings(vptr);1873+ goto out;1874+}1875+1876+static void velocity_free_rings(struct velocity_info *vptr)1877+{1878+ velocity_free_td_ring(vptr);1879+ velocity_free_rd_ring(vptr);1880+ velocity_free_dma_rings(vptr);1881+}1882+1883/**1884 * velocity_open - interface activation callback1885 * @dev: network layer device to open···1862 struct velocity_info *vptr = netdev_priv(dev);1863 int ret;18641865+ ret = velocity_init_rings(vptr, dev->mtu);001866 if (ret < 0)1867 goto out;0000000018681869 /* Ensure chip is running */1870 pci_set_power_state(vptr->pdev, PCI_D0);···1888 if (ret < 0) {1889 /* Power down the chip */1890 pci_set_power_state(vptr->pdev, PCI_D3hot);1891+ velocity_free_rings(vptr);1892+ goto out;1893 }18941895 mac_enable_int(vptr->mac_regs);···1896 vptr->flags |= VELOCITY_FLAGS_OPENED;1897out:1898 return ret;000000001899}19001901/**···1919static int velocity_change_mtu(struct net_device *dev, int new_mtu)1920{1921 struct velocity_info *vptr = netdev_priv(dev);001922 int ret = 0;19231924 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {1925 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",1926 vptr->dev->name);1927+ ret = -EINVAL;1928+ goto out_0;1929 }19301931 if (!netif_running(dev)) {1932 dev->mtu = new_mtu;1933+ goto out_0;1934 }19351936+ if (dev->mtu != new_mtu) {1937+ struct velocity_info *tmp_vptr;1938+ unsigned long flags;1939+ struct rx_info rx;1940+ struct tx_info tx;1941+1942+ tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);1943+ if (!tmp_vptr) {1944+ ret = -ENOMEM;1945+ goto out_0;1946+ }1947+1948+ tmp_vptr->dev = dev;1949+ tmp_vptr->pdev = vptr->pdev;1950+ tmp_vptr->options = vptr->options;1951+ tmp_vptr->tx.numq = vptr->tx.numq;1952+1953+ ret = velocity_init_rings(tmp_vptr, new_mtu);1954+ if (ret < 0)1955+ goto out_free_tmp_vptr_1;1956+1957 spin_lock_irqsave(&vptr->lock, flags);19581959 netif_stop_queue(dev);1960 velocity_shutdown(vptr);19611962+ rx = vptr->rx;1963+ tx = vptr->tx;1964+1965+ vptr->rx = tmp_vptr->rx;1966+ vptr->tx = tmp_vptr->tx;1967+1968+ tmp_vptr->rx = rx;1969+ tmp_vptr->tx = tx;19701971 dev->mtu = new_mtu;19721973+ velocity_give_many_rx_descs(vptr);0000000019741975 velocity_init_registers(vptr, VELOCITY_INIT_COLD);19761977 mac_enable_int(vptr->mac_regs);1978 netif_start_queue(dev);00019791980+ spin_unlock_irqrestore(&vptr->lock, flags);1981+1982+ velocity_free_rings(tmp_vptr);1983+1984+out_free_tmp_vptr_1:1985+ kfree(tmp_vptr);1986+ }1987+out_0:1988 return ret;1989}1990···2008 /* Power down the chip */2009 pci_set_power_state(vptr->pdev, PCI_D3hot);20100002011 velocity_free_rings(vptr);20122013 vptr->flags &= (~VELOCITY_FLAGS_OPENED);···20562057 spin_lock_irqsave(&vptr->lock, flags);20582059+ index = vptr->tx.curr[qnum];2060+ td_ptr = &(vptr->tx.rings[qnum][index]);2061+ tdinfo = &(vptr->tx.infos[qnum][index]);20622063 td_ptr->tdesc1.TCR = TCR0_TIC;2064 td_ptr->td_buf[0].size &= ~TD_QUEUE;···2071 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);2072 tdinfo->skb_dma[0] = tdinfo->buf_dma;2073 td_ptr->tdesc0.len = len;2074+ td_ptr->tx.buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);2075+ td_ptr->tx.buf[0].pa_high = 0;2076+ td_ptr->tx.buf[0].size = len; /* queue is 0 anyway */2077 tdinfo->nskb_dma = 1;2078 } else {2079 int i = 0;···2084 td_ptr->tdesc0.len = len;20852086 /* FIXME: support 48bit DMA later */2087+ td_ptr->tx.buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);2088+ td_ptr->tx.buf[i].pa_high = 0;2089+ td_ptr->tx.buf[i].size = cpu_to_le16(skb_headlen(skb));20902091 for (i = 0; i < nfrags; i++) {2092 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];···20942095 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);20962097+ td_ptr->tx.buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);2098+ td_ptr->tx.buf[i + 1].pa_high = 0;2099+ td_ptr->tx.buf[i + 1].size = cpu_to_le16(frag->size);2100 }2101 tdinfo->nskb_dma = i - 1;2102 }···2142 if (prev < 0)2143 prev = vptr->options.numtx - 1;2144 td_ptr->tdesc0.len |= OWNED_BY_NIC;2145+ vptr->tx.used[qnum]++;2146+ vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;21472148 if (AVAIL_TD(vptr, qnum) < 1)2149 netif_stop_queue(dev);21502151+ td_ptr = &(vptr->tx.rings[qnum][prev]);2152 td_ptr->td_buf[0].size |= TD_QUEUE;2153 mac_tx_queue_wake(vptr->mac_regs, qnum);2154 }···34053406 velocity_tx_srv(vptr, 0);34073408+ for (i = 0; i < vptr->tx.numq; i++) {3409+ if (vptr->tx.used[i]) {3410 mac_tx_queue_wake(vptr->mac_regs, i);3411 }3412 }
+24-20
drivers/net/via-velocity.h
···1494 u32 flags;1495};149600001497struct velocity_info {1498 struct list_head list;14991500 struct pci_dev *pdev;1501 struct net_device *dev;1502 struct net_device_stats stats;1503-1504- dma_addr_t rd_pool_dma;1505- dma_addr_t td_pool_dma[TX_QUEUE_NO];15061507 struct vlan_group *vlgrp;1508 u8 ip_addr[4];···1513 unsigned long memaddr;1514 unsigned long ioaddr;15151516- u8 rev_id;015171518-#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->td_used[(q)]))000000015191520- int num_txq;015211522- volatile int td_used[TX_QUEUE_NO];1523- int td_curr[TX_QUEUE_NO];1524- int td_tail[TX_QUEUE_NO];1525- struct tx_desc *td_rings[TX_QUEUE_NO];1526- struct velocity_td_info *td_infos[TX_QUEUE_NO];0015271528- int rd_curr;1529- int rd_dirty;1530- u32 rd_filled;1531- struct rx_desc *rd_ring;1532- struct velocity_rd_info *rd_info; /* It's an array */1533-1534-#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])1535 u32 mib_counter[MAX_HW_MIB_COUNTER];1536 struct velocity_opt options;1537···15431544 u32 flags;15451546- int rx_buf_sz;1547 u32 mii_status;1548 u32 phy_id;1549 int multicast_limit;···1558 struct velocity_context context;15591560 u32 ticks;1561- u32 rx_bytes;156201563};15641565/**
···1494 u32 flags;1495};14961497+#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->tx.used[(q)]))1498+1499+#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])1500+1501struct velocity_info {1502 struct list_head list;15031504 struct pci_dev *pdev;1505 struct net_device *dev;1506 struct net_device_stats stats;00015071508 struct vlan_group *vlgrp;1509 u8 ip_addr[4];···1512 unsigned long memaddr;1513 unsigned long ioaddr;15141515+ struct tx_info {1516+ int numq;15171518+ /* FIXME: the locality of the data seems rather poor. */1519+ int used[TX_QUEUE_NO];1520+ int curr[TX_QUEUE_NO];1521+ int tail[TX_QUEUE_NO];1522+ struct tx_desc *rings[TX_QUEUE_NO];1523+ struct velocity_td_info *infos[TX_QUEUE_NO];1524+ dma_addr_t pool_dma[TX_QUEUE_NO];1525+ } tx;15261527+ struct rx_info {1528+ int buf_sz;15291530+ int dirty;1531+ int curr;1532+ u32 filled;1533+ struct rx_desc *ring;1534+ struct velocity_rd_info *info; /* It's an array */1535+ dma_addr_t pool_dma;1536+ } rx;153700000001538 u32 mib_counter[MAX_HW_MIB_COUNTER];1539 struct velocity_opt options;1540···15381539 u32 flags;154001541 u32 mii_status;1542 u32 phy_id;1543 int multicast_limit;···1554 struct velocity_context context;15551556 u32 ticks;015571558+ u8 rev_id;1559};15601561/**
+6-9
drivers/net/wan/Kconfig
···25# There is no way to detect a comtrol sv11 - force it modular for now.26config HOSTESS_SV1127 tristate "Comtrol Hostess SV-11 support"28- depends on ISA && m && ISA_DMA_API && INET29 help30 Driver for Comtrol Hostess SV-11 network card which31 operates on low speed synchronous serial links at up to···37# The COSA/SRP driver has not been tested as non-modular yet.38config COSA39 tristate "COSA/SRP sync serial boards support"40- depends on ISA && m && ISA_DMA_API41 ---help---42 Driver for COSA and SRP synchronous serial boards.43···61#62config LANMEDIA63 tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards"64- depends on PCI && VIRT_TO_BUS65 ---help---66 Driver for the following Lan Media family of serial boards:67···78 - LMC 5245 board connects directly to a T3 circuit saving the79 additional external hardware.8081- To change setting such as syncPPP vs Cisco HDLC or clock source you82- will need lmcctl. It is available at <ftp://ftp.lanmedia.com/>83- (broken link).8485 To compile this driver as a module, choose M here: the86 module will be called lmc.···87# There is no way to detect a Sealevel board. Force it modular88config SEALEVEL_402189 tristate "Sealevel Systems 4021 support"90- depends on ISA && m && ISA_DMA_API && INET91 help92 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.93···152 depends on HDLC153 help154 Generic HDLC driver supporting PPP over WAN connections.155-156- It will be replaced by new PPP implementation in Linux 2.6.26.157158 If unsure, say N.159
···25# There is no way to detect a comtrol sv11 - force it modular for now.26config HOSTESS_SV1127 tristate "Comtrol Hostess SV-11 support"28+ depends on ISA && m && ISA_DMA_API && INET && HDLC29 help30 Driver for Comtrol Hostess SV-11 network card which31 operates on low speed synchronous serial links at up to···37# The COSA/SRP driver has not been tested as non-modular yet.38config COSA39 tristate "COSA/SRP sync serial boards support"40+ depends on ISA && m && ISA_DMA_API && HDLC41 ---help---42 Driver for COSA and SRP synchronous serial boards.43···61#62config LANMEDIA63 tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards"64+ depends on PCI && VIRT_TO_BUS && HDLC65 ---help---66 Driver for the following Lan Media family of serial boards:67···78 - LMC 5245 board connects directly to a T3 circuit saving the79 additional external hardware.8081+ To change setting such as clock source you will need lmcctl.82+ It is available at <ftp://ftp.lanmedia.com/> (broken link).08384 To compile this driver as a module, choose M here: the85 module will be called lmc.···88# There is no way to detect a Sealevel board. Force it modular89config SEALEVEL_402190 tristate "Sealevel Systems 4021 support"91+ depends on ISA && m && ISA_DMA_API && INET && HDLC92 help93 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.94···153 depends on HDLC154 help155 Generic HDLC driver supporting PPP over WAN connections.00156157 If unsure, say N.158
···47/* Default parameters for the link48 */49#define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is50- * useful, the syncppp module forces51- * this down assuming a slower line I52- * guess.53- */54#define FST_TXQ_DEPTH 16 /* This one is for the buffering55 * of frames on the way down to the card56 * so that we can keep the card busy
···47/* Default parameters for the link48 */49#define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is50+ * useful */00051#define FST_TXQ_DEPTH 16 /* This one is for the buffering52 * of frames on the way down to the card53 * so that we can keep the card busy
-6
drivers/net/wan/farsync.h
···545556/* Ioctl call command values57- *58- * The first three private ioctls are used by the sync-PPP module,59- * allowing a little room for expansion we start our numbering at 10.60 */61#define FSTWRITE (SIOCDEVPRIVATE+10)62#define FSTCPURESET (SIOCDEVPRIVATE+11)···199#define J1 7200201/* "proto" */202-#define FST_HDLC 1 /* Cisco compatible HDLC */203-#define FST_PPP 2 /* Sync PPP */204-#define FST_MONITOR 3 /* Monitor only (raw packet reception) */205#define FST_RAW 4 /* Two way raw packets */206#define FST_GEN_HDLC 5 /* Using "Generic HDLC" module */207
···545556/* Ioctl call command values00057 */58#define FSTWRITE (SIOCDEVPRIVATE+10)59#define FSTCPURESET (SIOCDEVPRIVATE+11)···202#define J1 7203204/* "proto" */000205#define FST_RAW 4 /* Two way raw packets */206#define FST_GEN_HDLC 5 /* Using "Generic HDLC" module */207
+15-16
drivers/net/wan/hdlc.c
···22 * - proto->start() and stop() are called with spin_lock_irq held.23 */2425-#include <linux/module.h>26-#include <linux/kernel.h>27-#include <linux/slab.h>28-#include <linux/poll.h>29#include <linux/errno.h>30-#include <linux/if_arp.h>31-#include <linux/init.h>32-#include <linux/skbuff.h>33-#include <linux/pkt_sched.h>34-#include <linux/inetdevice.h>35-#include <linux/lapb.h>36-#include <linux/rtnetlink.h>37-#include <linux/notifier.h>38#include <linux/hdlc.h>0000000000039#include <net/net_namespace.h>4041···108109 if (dev->get_stats != hdlc_get_stats)110 return NOTIFY_DONE; /* not an HDLC device */111-112 if (event != NETDEV_CHANGE)113 return NOTIFY_DONE; /* Only interrested in carrier changes */114···356357358static struct notifier_block hdlc_notifier = {359- .notifier_call = hdlc_device_event,360};361362···366367 printk(KERN_INFO "%s\n", version);368 if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0)369- return result;370- dev_add_pack(&hdlc_packet_type);371 return 0;372}373
···22 * - proto->start() and stop() are called with spin_lock_irq held.23 */24000025#include <linux/errno.h>0000000026#include <linux/hdlc.h>27+#include <linux/if_arp.h>28+#include <linux/inetdevice.h>29+#include <linux/init.h>30+#include <linux/kernel.h>31+#include <linux/module.h>32+#include <linux/notifier.h>33+#include <linux/pkt_sched.h>34+#include <linux/poll.h>35+#include <linux/rtnetlink.h>36+#include <linux/skbuff.h>37+#include <linux/slab.h>38#include <net/net_namespace.h>3940···109110 if (dev->get_stats != hdlc_get_stats)111 return NOTIFY_DONE; /* not an HDLC device */112+113 if (event != NETDEV_CHANGE)114 return NOTIFY_DONE; /* Only interrested in carrier changes */115···357358359static struct notifier_block hdlc_notifier = {360+ .notifier_call = hdlc_device_event,361};362363···367368 printk(KERN_INFO "%s\n", version);369 if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0)370+ return result;371+ dev_add_pack(&hdlc_packet_type);372 return 0;373}374
+17-18
drivers/net/wan/hdlc_cisco.c
···9 * as published by the Free Software Foundation.10 */1112-#include <linux/module.h>13-#include <linux/kernel.h>14-#include <linux/slab.h>15-#include <linux/poll.h>16#include <linux/errno.h>17-#include <linux/if_arp.h>18-#include <linux/init.h>19-#include <linux/skbuff.h>20-#include <linux/pkt_sched.h>21-#include <linux/inetdevice.h>22-#include <linux/lapb.h>23-#include <linux/rtnetlink.h>24#include <linux/hdlc.h>00000000002526#undef DEBUG_HARD_HEADER27···67static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr);686970-static inline struct cisco_state * state(hdlc_device *hdlc)71{72- return(struct cisco_state *)(hdlc->state);73}7475···171 data->address != CISCO_UNICAST)172 goto rx_error;173174- switch(ntohs(data->protocol)) {175 case CISCO_SYS_INFO:176 /* Packet is not needed, drop it. */177 dev_kfree_skb_any(skb);···335static const struct header_ops cisco_header_ops = {336 .create = cisco_hard_header,337};338-339static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)340{341 cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco;···358 return 0;359360 case IF_PROTO_CISCO:361- if(!capable(CAP_NET_ADMIN))362 return -EPERM;363364- if(dev->flags & IFF_UP)365 return -EBUSY;366367 if (copy_from_user(&new_settings, cisco_s, size))···371 new_settings.timeout < 2)372 return -EINVAL;373374- result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);375 if (result)376 return result;377
···9 * as published by the Free Software Foundation.10 */11000012#include <linux/errno.h>000000013#include <linux/hdlc.h>14+#include <linux/if_arp.h>15+#include <linux/inetdevice.h>16+#include <linux/init.h>17+#include <linux/kernel.h>18+#include <linux/module.h>19+#include <linux/pkt_sched.h>20+#include <linux/poll.h>21+#include <linux/rtnetlink.h>22+#include <linux/skbuff.h>23+#include <linux/slab.h>2425#undef DEBUG_HARD_HEADER26···68static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr);697071+static inline struct cisco_state* state(hdlc_device *hdlc)72{73+ return (struct cisco_state *)hdlc->state;74}7576···172 data->address != CISCO_UNICAST)173 goto rx_error;174175+ switch (ntohs(data->protocol)) {176 case CISCO_SYS_INFO:177 /* Packet is not needed, drop it. */178 dev_kfree_skb_any(skb);···336static const struct header_ops cisco_header_ops = {337 .create = cisco_hard_header,338};339+340static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)341{342 cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco;···359 return 0;360361 case IF_PROTO_CISCO:362+ if (!capable(CAP_NET_ADMIN))363 return -EPERM;364365+ if (dev->flags & IFF_UP)366 return -EBUSY;367368 if (copy_from_user(&new_settings, cisco_s, size))···372 new_settings.timeout < 2)373 return -EINVAL;374375+ result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);376 if (result)377 return result;378
···61/*62 * IFTYPE defines63 */64-#define LMC_PPP 1 /* use sppp interface */65#define LMC_NET 2 /* use direct net interface */66#define LMC_RAW 3 /* use direct net interface */67
···61/*62 * IFTYPE defines63 */64+#define LMC_PPP 1 /* use generic HDLC interface */65#define LMC_NET 2 /* use direct net interface */66#define LMC_RAW 3 /* use direct net interface */67
+292-376
drivers/net/wan/lmc/lmc_main.c
···1 /*2 * Copyright (c) 1997-2000 LAN Media Corporation (LMC)3 * All rights reserved. www.lanmedia.com04 *5 * This code is written by:6 * Andrew Stanley-Jones (asj@cban.com)···37 *38 */3940-/* $Id: lmc_main.c,v 1.36 2000/04/11 05:25:25 asj Exp $ */41-42#include <linux/kernel.h>43#include <linux/module.h>44#include <linux/string.h>···48#include <linux/interrupt.h>49#include <linux/pci.h>50#include <linux/delay.h>051#include <linux/init.h>52#include <linux/in.h>53#include <linux/if_arp.h>···57#include <linux/skbuff.h>58#include <linux/inet.h>59#include <linux/bitops.h>60-61-#include <net/syncppp.h>62-63#include <asm/processor.h> /* Processor type for cache alignment. */64#include <asm/io.h>65#include <asm/dma.h>···75#include "lmc_debug.h"76#include "lmc_proto.h"7778-static int lmc_first_load = 0;79-80static int LMC_PKT_BUF_SZ = 1542;8182static struct pci_device_id lmc_pci_tbl[] = {···86};8788MODULE_DEVICE_TABLE(pci, lmc_pci_tbl);89-MODULE_LICENSE("GPL");909192-static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev);93static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev);94static int lmc_rx (struct net_device *dev);95static int lmc_open(struct net_device *dev);···108 * linux reserves 16 device specific IOCTLs. We call them109 * LMCIOC* to control various bits of our world.110 */111-int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/112{113- lmc_softc_t *sc;114 lmc_ctl_t ctl;115- int ret;116- u_int16_t regVal;117 unsigned long flags;118-119- struct sppp *sp;120-121- ret = -EOPNOTSUPP;122-123- sc = dev->priv;124125 lmc_trace(dev, "lmc_ioctl in");126···137 break;138139 case LMCIOCSINFO: /*fold01*/140- sp = &((struct ppp_device *) dev)->sppp;141 if (!capable(CAP_NET_ADMIN)) {142 ret = -EPERM;143 break;···162 sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE;163 }164165- if (ctl.keepalive_onoff == LMC_CTL_OFF)166- sp->pp_flags &= ~PP_KEEPALIVE; /* Turn off */167- else168- sp->pp_flags |= PP_KEEPALIVE; /* Turn on */169-170 ret = 0;171 break;172173 case LMCIOCIFTYPE: /*fold01*/174 {175- u_int16_t old_type = sc->if_type;176- u_int16_t new_type;177178 if (!capable(CAP_NET_ADMIN)) {179 ret = -EPERM;180 break;181 }182183- if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u_int16_t))) {184 ret = -EFAULT;185 break;186 }···188 }189190 lmc_proto_close(sc);191- lmc_proto_detach(sc);192193 sc->if_type = new_type;194-// lmc_proto_init(sc);195 lmc_proto_attach(sc);196- lmc_proto_open(sc);197-198- ret = 0 ;199- break ;200 }201202 case LMCIOCGETXINFO: /*fold01*/···219220 break;221222- case LMCIOCGETLMCSTATS: /*fold01*/223- if (sc->lmc_cardtype == LMC_CARDTYPE_T1){224- lmc_mii_writereg (sc, 0, 17, T1FRAMER_FERR_LSB);225- sc->stats.framingBitErrorCount +=226- lmc_mii_readreg (sc, 0, 18) & 0xff;227- lmc_mii_writereg (sc, 0, 17, T1FRAMER_FERR_MSB);228- sc->stats.framingBitErrorCount +=229- (lmc_mii_readreg (sc, 0, 18) & 0xff) << 8;230- lmc_mii_writereg (sc, 0, 17, T1FRAMER_LCV_LSB);231- sc->stats.lineCodeViolationCount +=232- lmc_mii_readreg (sc, 0, 18) & 0xff;233- lmc_mii_writereg (sc, 0, 17, T1FRAMER_LCV_MSB);234- sc->stats.lineCodeViolationCount +=235- (lmc_mii_readreg (sc, 0, 18) & 0xff) << 8;236- lmc_mii_writereg (sc, 0, 17, T1FRAMER_AERR);237- regVal = lmc_mii_readreg (sc, 0, 18) & 0xff;238239- sc->stats.lossOfFrameCount +=240- (regVal & T1FRAMER_LOF_MASK) >> 4;241- sc->stats.changeOfFrameAlignmentCount +=242- (regVal & T1FRAMER_COFA_MASK) >> 2;243- sc->stats.severelyErroredFrameCount +=244- regVal & T1FRAMER_SEF_MASK;245- }00000000246247- if (copy_to_user(ifr->ifr_data, &sc->stats,248- sizeof (struct lmc_statistics)))249- ret = -EFAULT;250- else251- ret = 0;252- break;253254- case LMCIOCCLEARLMCSTATS: /*fold01*/255- if (!capable(CAP_NET_ADMIN)){256- ret = -EPERM;257- break;258- }259-260- memset (&sc->stats, 0, sizeof (struct lmc_statistics));261- sc->stats.check = STATCHECK;262- sc->stats.version_size = (DRIVER_VERSION << 16) +263- sizeof (struct lmc_statistics);264- sc->stats.lmc_cardtype = sc->lmc_cardtype;265- ret = 0;266- break;267268 case LMCIOCSETCIRCUIT: /*fold01*/269 if (!capable(CAP_NET_ADMIN)){···310 ret = -EFAULT;311 break;312 }313- if (copy_to_user(ifr->ifr_data + sizeof (u32), lmcEventLogBuf, sizeof (lmcEventLogBuf)))0314 ret = -EFAULT;315 else316 ret = 0;···622/* the watchdog process that cruises around */623static void lmc_watchdog (unsigned long data) /*fold00*/624{625- struct net_device *dev = (struct net_device *) data;626- lmc_softc_t *sc;627 int link_status;628- u_int32_t ticks;629 unsigned long flags;630-631- sc = dev->priv;632633 lmc_trace(dev, "lmc_watchdog in");634···656 * check for a transmit interrupt timeout657 * Has the packet xmt vs xmt serviced threshold been exceeded */658 if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&659- sc->stats.tx_packets > sc->lasttx_packets &&660- sc->tx_TimeoutInd == 0)661 {662663 /* wait for the watchdog to come around again */664 sc->tx_TimeoutInd = 1;665 }666 else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&667- sc->stats.tx_packets > sc->lasttx_packets &&668- sc->tx_TimeoutInd)669 {670671 LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0);672673 sc->tx_TimeoutDisplay = 1;674- sc->stats.tx_TimeoutCnt++;675676 /* DEC chip is stuck, hit it with a RESET!!!! */677 lmc_running_reset (dev);···691 /* reset the transmit timeout detection flag */692 sc->tx_TimeoutInd = 0;693 sc->lastlmc_taint_tx = sc->lmc_taint_tx;694- sc->lasttx_packets = sc->stats.tx_packets;695- }696- else697- {698 sc->tx_TimeoutInd = 0;699 sc->lastlmc_taint_tx = sc->lmc_taint_tx;700- sc->lasttx_packets = sc->stats.tx_packets;701 }702703 /* --- end time out check ----------------------------------- */···725 sc->last_link_status = 1;726 /* lmc_reset (sc); Again why reset??? */727728- /* Inform the world that link protocol is back up. */729 netif_carrier_on(dev);730-731- /* Now we have to tell the syncppp that we had an outage732- * and that it should deal. Calling sppp_reopen here733- * should do the trick, but we may have to call sppp_close734- * when the link goes down, and call sppp_open here.735- * Subject to more testing.736- * --bbraun737- */738-739- lmc_proto_reopen(sc);740-741 }742743 /* Call media specific watchdog functions */···781782}783784-static void lmc_setup(struct net_device * const dev) /*fold00*/0785{786- lmc_trace(dev, "lmc_setup in");787-788- dev->type = ARPHRD_HDLC;789- dev->hard_start_xmit = lmc_start_xmit;790- dev->open = lmc_open;791- dev->stop = lmc_close;792- dev->get_stats = lmc_get_stats;793- dev->do_ioctl = lmc_ioctl;794- dev->tx_timeout = lmc_driver_timeout;795- dev->watchdog_timeo = (HZ); /* 1 second */796-797- lmc_trace(dev, "lmc_setup out");798}799-800801static int __devinit lmc_init_one(struct pci_dev *pdev,802 const struct pci_device_id *ent)803{804- struct net_device *dev;805- lmc_softc_t *sc;806- u16 subdevice;807- u_int16_t AdapModelNum;808- int err = -ENOMEM;809- static int cards_found;810-#ifndef GCOM811- /* We name by type not by vendor */812- static const char lmcname[] = "hdlc%d";813-#else814- /* 815- * GCOM uses LMC vendor name so that clients can know which card816- * to attach to.817- */818- static const char lmcname[] = "lmc%d";819-#endif0000000000000000000820821822- /*823- * Allocate our own device structure824- */825- dev = alloc_netdev(sizeof(lmc_softc_t), lmcname, lmc_setup);826- if (!dev) {827- printk (KERN_ERR "lmc:alloc_netdev for device failed\n");828- goto out1;829- }830-831- lmc_trace(dev, "lmc_init_one in");00000000832833- err = pci_enable_device(pdev);834- if (err) {835- printk(KERN_ERR "lmc: pci enable failed:%d\n", err);836- goto out2;837- }838-839- if (pci_request_regions(pdev, "lmc")) {840- printk(KERN_ERR "lmc: pci_request_region failed\n");841- err = -EIO;842- goto out3;843- }844845- pci_set_drvdata(pdev, dev);846847- if(lmc_first_load == 0){848- printk(KERN_INFO "Lan Media Corporation WAN Driver Version %d.%d.%d\n",849- DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION,DRIVER_SUB_VERSION);850- lmc_first_load = 1;851- }852-853- sc = dev->priv;854- sc->lmc_device = dev;855- sc->name = dev->name;856857- /* Initialize the sppp layer */858- /* An ioctl can cause a subsequent detach for raw frame interface */859- dev->ml_priv = sc;860- sc->if_type = LMC_PPP;861- sc->check = 0xBEAFCAFE;862- dev->base_addr = pci_resource_start(pdev, 0);863- dev->irq = pdev->irq;864865- SET_NETDEV_DEV(dev, &pdev->dev);866-867- /*868- * This will get the protocol layer ready and do any 1 time init's869- * Must have a valid sc and dev structure870- */871- lmc_proto_init(sc);872-873- lmc_proto_attach(sc);874-875- /*876- * Why were we changing this???877- dev->tx_queue_len = 100;878- */879-880- /* Init the spin lock so can call it latter */881-882- spin_lock_init(&sc->lmc_lock);883- pci_set_master(pdev);884-885- printk ("%s: detected at %lx, irq %d\n", dev->name,886- dev->base_addr, dev->irq);887-888- if (register_netdev (dev) != 0) {889- printk (KERN_ERR "%s: register_netdev failed.\n", dev->name);890- goto out4;891- }892893 sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN;894 sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;···883884 switch (subdevice) {885 case PCI_DEVICE_ID_LMC_HSSI:886- printk ("%s: LMC HSSI\n", dev->name);887 sc->lmc_cardtype = LMC_CARDTYPE_HSSI;888 sc->lmc_media = &lmc_hssi_media;889 break;890 case PCI_DEVICE_ID_LMC_DS3:891- printk ("%s: LMC DS3\n", dev->name);892 sc->lmc_cardtype = LMC_CARDTYPE_DS3;893 sc->lmc_media = &lmc_ds3_media;894 break;895 case PCI_DEVICE_ID_LMC_SSI:896- printk ("%s: LMC SSI\n", dev->name);897 sc->lmc_cardtype = LMC_CARDTYPE_SSI;898 sc->lmc_media = &lmc_ssi_media;899 break;900 case PCI_DEVICE_ID_LMC_T1:901- printk ("%s: LMC T1\n", dev->name);902 sc->lmc_cardtype = LMC_CARDTYPE_T1;903 sc->lmc_media = &lmc_t1_media;904 break;905 default:906- printk (KERN_WARNING "%s: LMC UNKOWN CARD!\n", dev->name);907 break;908 }909···921 */922 AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4;923924- if ((AdapModelNum == LMC_ADAP_T1925- && subdevice == PCI_DEVICE_ID_LMC_T1) || /* detect LMC1200 */926- (AdapModelNum == LMC_ADAP_SSI927- && subdevice == PCI_DEVICE_ID_LMC_SSI) || /* detect LMC1000 */928- (AdapModelNum == LMC_ADAP_DS3929- && subdevice == PCI_DEVICE_ID_LMC_DS3) || /* detect LMC5245 */930- (AdapModelNum == LMC_ADAP_HSSI931- && subdevice == PCI_DEVICE_ID_LMC_HSSI))932- { /* detect LMC5200 */00933934- }935- else {936- printk ("%s: Model number (%d) miscompare for PCI Subsystem ID = 0x%04x\n",937- dev->name, AdapModelNum, subdevice);938-// return (NULL);939- }940 /*941 * reset clock942 */943 LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL);944945 sc->board_idx = cards_found++;946- sc->stats.check = STATCHECK;947- sc->stats.version_size = (DRIVER_VERSION << 16) +948- sizeof (struct lmc_statistics);949- sc->stats.lmc_cardtype = sc->lmc_cardtype;950951 sc->lmc_ok = 0;952 sc->last_link_status = 0;···950 lmc_trace(dev, "lmc_init_one out");951 return 0;952953- out4:954- lmc_proto_detach(sc);955- out3:956- if (pdev) {957- pci_release_regions(pdev);958- pci_set_drvdata(pdev, NULL);959- }960- out2:961- free_netdev(dev);962- out1:963- return err;964}965966/*967 * Called from pci when removing module.968 */969-static void __devexit lmc_remove_one (struct pci_dev *pdev)970{971- struct net_device *dev = pci_get_drvdata(pdev);972-973- if (dev) {974- lmc_softc_t *sc = dev->priv;975-976- printk("%s: removing...\n", dev->name);977- lmc_proto_detach(sc);978- unregister_netdev(dev);979- free_netdev(dev);980- pci_release_regions(pdev);981- pci_disable_device(pdev);982- pci_set_drvdata(pdev, NULL);983- }984}985986/* After this is called, packets can be sent.987 * Does not initialize the addresses988 */989-static int lmc_open (struct net_device *dev) /*fold00*/990{991- lmc_softc_t *sc = dev->priv;0992993 lmc_trace(dev, "lmc_open in");994995 lmc_led_on(sc, LMC_DS3_LED0);996997- lmc_dec_reset (sc);998- lmc_reset (sc);9991000- LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);1001- LMC_EVENT_LOG(LMC_EVENT_RESET2,1002- lmc_mii_readreg (sc, 0, 16),1003- lmc_mii_readreg (sc, 0, 17));1004-10051006 if (sc->lmc_ok){1007 lmc_trace(dev, "lmc_open lmc_ok out");···10391040 /* dev->flags |= IFF_UP; */10411042- lmc_proto_open(sc);010431044 dev->do_ioctl = lmc_ioctl;104510461047 netif_start_queue(dev);1048-1049- sc->stats.tx_tbusy0++ ;10501051 /*1052 * select what interrupts we want to get···10981099static void lmc_running_reset (struct net_device *dev) /*fold00*/1100{1101-1102- lmc_softc_t *sc = (lmc_softc_t *) dev->priv;11031104 lmc_trace(dev, "lmc_runnig_reset in");1105···1116 netif_wake_queue(dev);11171118 sc->lmc_txfull = 0;1119- sc->stats.tx_tbusy0++ ;11201121 sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK;1122 LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);···1132 * This disables the timer for the watchdog and keepalives,1133 * and disables the irq for dev.1134 */1135-static int lmc_close (struct net_device *dev) /*fold00*/1136{1137 /* not calling release_region() as we should */1138- lmc_softc_t *sc;11391140 lmc_trace(dev, "lmc_close in");1141-1142- sc = dev->priv;1143 sc->lmc_ok = 0;1144 sc->lmc_media->set_link_status (sc, 0);1145 del_timer (&sc->timer);···1146 lmc_ifdown (dev);11471148 lmc_trace(dev, "lmc_close out");1149-1150 return 0;1151}1152···1154/* When the interface goes down, this is called */1155static int lmc_ifdown (struct net_device *dev) /*fold00*/1156{1157- lmc_softc_t *sc = dev->priv;1158 u32 csr6;1159 int i;11601161 lmc_trace(dev, "lmc_ifdown in");1162-1163 /* Don't let anything else go on right now */1164 // dev->start = 0;1165 netif_stop_queue(dev);1166- sc->stats.tx_tbusy1++ ;11671168 /* stop interrupts */1169 /* Clear the interrupt mask */···1175 csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */1176 LMC_CSR_WRITE (sc, csr_command, csr6);11771178- sc->stats.rx_missed_errors +=1179- LMC_CSR_READ (sc, csr_missed_frames) & 0xffff;11801181 /* release the interrupt */1182 if(sc->got_irq == 1){···1207 lmc_led_off (sc, LMC_MII16_LED_ALL);12081209 netif_wake_queue(dev);1210- sc->stats.tx_tbusy0++ ;12111212 lmc_trace(dev, "lmc_ifdown out");1213···1220static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/1221{1222 struct net_device *dev = (struct net_device *) dev_instance;1223- lmc_softc_t *sc;1224 u32 csr;1225 int i;1226 s32 stat;···12311232 lmc_trace(dev, "lmc_interrupt in");12331234- sc = dev->priv;1235-1236 spin_lock(&sc->lmc_lock);12371238 /*···12831284 int n_compl = 0 ;1285 /* reset the transmit timeout detection flag -baz */1286- sc->stats.tx_NoCompleteCnt = 0;12871288 badtx = sc->lmc_taint_tx;1289 i = badtx % LMC_TXDESCS;···1307 if (sc->lmc_txq[i] == NULL)1308 continue;13091310- /*1311- * Check the total error summary to look for any errors1312- */1313- if (stat & 0x8000) {1314- sc->stats.tx_errors++;1315- if (stat & 0x4104)1316- sc->stats.tx_aborted_errors++;1317- if (stat & 0x0C00)1318- sc->stats.tx_carrier_errors++;1319- if (stat & 0x0200)1320- sc->stats.tx_window_errors++;1321- if (stat & 0x0002)1322- sc->stats.tx_fifo_errors++;00001323 }1324- else {1325-1326- sc->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff;1327-1328- sc->stats.tx_packets++;1329- }1330-1331 // dev_kfree_skb(sc->lmc_txq[i]);1332 dev_kfree_skb_irq(sc->lmc_txq[i]);1333 sc->lmc_txq[i] = NULL;···1342 LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0);1343 sc->lmc_txfull = 0;1344 netif_wake_queue(dev);1345- sc->stats.tx_tbusy0++ ;134613471348#ifdef DEBUG1349- sc->stats.dirtyTx = badtx;1350- sc->stats.lmc_next_tx = sc->lmc_next_tx;1351- sc->stats.lmc_txfull = sc->lmc_txfull;1352#endif1353 sc->lmc_taint_tx = badtx;1354···1403 return IRQ_RETVAL(handled);1404}14051406-static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00*/1407{1408- lmc_softc_t *sc;1409 u32 flag;1410 int entry;1411 int ret = 0;1412 unsigned long flags;14131414 lmc_trace(dev, "lmc_start_xmit in");1415-1416- sc = dev->priv;14171418 spin_lock_irqsave(&sc->lmc_lock, flags);1419···1457 if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1)1458 { /* ring full, go busy */1459 sc->lmc_txfull = 1;1460- netif_stop_queue(dev);1461- sc->stats.tx_tbusy1++ ;1462 LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0);1463 }1464#endif···1475 * the watchdog timer handler. -baz1476 */14771478- sc->stats.tx_NoCompleteCnt++;1479 sc->lmc_next_tx++;14801481 /* give ownership to the chip */···1494}149514961497-static int lmc_rx (struct net_device *dev) /*fold00*/1498{1499- lmc_softc_t *sc;1500 int i;1501 int rx_work_limit = LMC_RXDESCS;1502 unsigned int next_rx;···1507 u16 len;15081509 lmc_trace(dev, "lmc_rx in");1510-1511- sc = dev->priv;15121513 lmc_led_on(sc, LMC_DS3_LED3);1514···1520 rxIntLoopCnt++; /* debug -baz */1521 len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER);1522 if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */1523- if ((stat & 0x0000ffff) != 0x7fff) {1524- /* Oversized frame */1525- sc->stats.rx_length_errors++;1526- goto skip_packet;1527- }1528- }15291530- if(stat & 0x00000008){ /* Catch a dribbling bit error */1531- sc->stats.rx_errors++;1532- sc->stats.rx_frame_errors++;1533- goto skip_packet;1534- }1535-1536-1537- if(stat & 0x00000004){ /* Catch a CRC error by the Xilinx */1538- sc->stats.rx_errors++;1539- sc->stats.rx_crc_errors++;1540- goto skip_packet;1541- }154215431544- if (len > LMC_PKT_BUF_SZ){1545- sc->stats.rx_length_errors++;1546- localLengthErrCnt++;1547- goto skip_packet;1548- }15491550- if (len < sc->lmc_crcSize + 2) {1551- sc->stats.rx_length_errors++;1552- sc->stats.rx_SmallPktCnt++;1553- localLengthErrCnt++;1554- goto skip_packet;1555- }00000015561557 if(stat & 0x00004000){1558 printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name);···1578 }15791580 dev->last_rx = jiffies;1581- sc->stats.rx_packets++;1582- sc->stats.rx_bytes += len;15831584 LMC_CONSOLE_LOG("recv", skb->data, len);1585···16011602 skb_put (skb, len);1603 skb->protocol = lmc_proto_type(sc, skb);1604- skb->protocol = htons(ETH_P_WAN_PPP);1605 skb_reset_mac_header(skb);1606 /* skb_reset_network_header(skb); */1607 skb->dev = dev;···1625 * in which care we'll try to allocate the buffer1626 * again. (once a second)1627 */1628- sc->stats.rx_BuffAllocErr++;1629 LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);1630 sc->failed_recv_alloc = 1;1631 goto skip_out_of_mem;···1660 * descriptors with bogus packets1661 *1662 if (localLengthErrCnt > LMC_RXDESCS - 3) {1663- sc->stats.rx_BadPktSurgeCnt++;1664- LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE,1665- localLengthErrCnt,1666- sc->stats.rx_BadPktSurgeCnt);1667 } */16681669 /* save max count of receive descriptors serviced */1670- if (rxIntLoopCnt > sc->stats.rxIntLoopCnt) {1671- sc->stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */1672- }16731674#ifdef DEBUG1675 if (rxIntLoopCnt == 0)···1694 return 0;1695}16961697-static struct net_device_stats *lmc_get_stats (struct net_device *dev) /*fold00*/1698{1699- lmc_softc_t *sc = dev->priv;1700 unsigned long flags;17011702 lmc_trace(dev, "lmc_get_stats in");17031704-1705 spin_lock_irqsave(&sc->lmc_lock, flags);17061707- sc->stats.rx_missed_errors += LMC_CSR_READ (sc, csr_missed_frames) & 0xffff;17081709 spin_unlock_irqrestore(&sc->lmc_lock, flags);17101711 lmc_trace(dev, "lmc_get_stats out");17121713- return (struct net_device_stats *) &sc->stats;1714}17151716static struct pci_driver lmc_driver = {···1888 {1889 if (sc->lmc_txq[i] != NULL){ /* have buffer */1890 dev_kfree_skb(sc->lmc_txq[i]); /* free it */1891- sc->stats.tx_dropped++; /* We just dropped a packet */1892 }1893 sc->lmc_txq[i] = NULL;1894 sc->lmc_txring[i].status = 0x00000000;···1900 lmc_trace(sc->lmc_device, "lmc_softreset out");1901}19021903-void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/1904{1905 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in");1906 sc->lmc_gpio_io &= ~bits;···1908 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out");1909}19101911-void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/1912{1913 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in");1914 sc->lmc_gpio_io |= bits;···1916 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out");1917}19181919-void lmc_led_on(lmc_softc_t * const sc, u_int32_t led) /*fold00*/1920{1921 lmc_trace(sc->lmc_device, "lmc_led_on in");1922 if((~sc->lmc_miireg16) & led){ /* Already on! */···1929 lmc_trace(sc->lmc_device, "lmc_led_on out");1930}19311932-void lmc_led_off(lmc_softc_t * const sc, u_int32_t led) /*fold00*/1933{1934 lmc_trace(sc->lmc_device, "lmc_led_off in");1935 if(sc->lmc_miireg16 & led){ /* Already set don't do anything */···1979 */1980 sc->lmc_media->init(sc);19811982- sc->stats.resetCount++;1983 lmc_trace(sc->lmc_device, "lmc_reset out");1984}19851986static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/1987{1988- u_int32_t val;1989 lmc_trace(sc->lmc_device, "lmc_dec_reset in");19901991 /*···2069 lmc_trace(sc->lmc_device, "lmc_initcsrs out");2070}20712072-static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/2073- lmc_softc_t *sc;02074 u32 csr6;2075 unsigned long flags;20762077 lmc_trace(dev, "lmc_driver_timeout in");20782079- sc = dev->priv;2080-2081 spin_lock_irqsave(&sc->lmc_lock, flags);20822083 printk("%s: Xmitter busy|\n", dev->name);20842085- sc->stats.tx_tbusy_calls++ ;2086- if (jiffies - dev->trans_start < TX_TIMEOUT) {2087- goto bug_out;2088- }20892090 /*2091 * Chip seems to have locked up···20942095 LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO,2096 LMC_CSR_READ (sc, csr_status),2097- sc->stats.tx_ProcTimeout);20982099 lmc_running_reset (dev);2100···2111 /* immediate transmit */2112 LMC_CSR_WRITE (sc, csr_txpoll, 0);21132114- sc->stats.tx_errors++;2115- sc->stats.tx_ProcTimeout++; /* -baz */21162117 dev->trans_start = jiffies;2118
···1 /*2 * Copyright (c) 1997-2000 LAN Media Corporation (LMC)3 * All rights reserved. www.lanmedia.com4+ * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>5 *6 * This code is written by:7 * Andrew Stanley-Jones (asj@cban.com)···36 *37 */380039#include <linux/kernel.h>40#include <linux/module.h>41#include <linux/string.h>···49#include <linux/interrupt.h>50#include <linux/pci.h>51#include <linux/delay.h>52+#include <linux/hdlc.h>53#include <linux/init.h>54#include <linux/in.h>55#include <linux/if_arp.h>···57#include <linux/skbuff.h>58#include <linux/inet.h>59#include <linux/bitops.h>00060#include <asm/processor.h> /* Processor type for cache alignment. */61#include <asm/io.h>62#include <asm/dma.h>···78#include "lmc_debug.h"79#include "lmc_proto.h"800081static int LMC_PKT_BUF_SZ = 1542;8283static struct pci_device_id lmc_pci_tbl[] = {···91};9293MODULE_DEVICE_TABLE(pci, lmc_pci_tbl);94+MODULE_LICENSE("GPL v2");9596097static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev);98static int lmc_rx (struct net_device *dev);99static int lmc_open(struct net_device *dev);···114 * linux reserves 16 device specific IOCTLs. We call them115 * LMCIOC* to control various bits of our world.116 */117+int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/118{119+ lmc_softc_t *sc = dev_to_sc(dev);120 lmc_ctl_t ctl;121+ int ret = -EOPNOTSUPP;122+ u16 regVal;123 unsigned long flags;000000124125 lmc_trace(dev, "lmc_ioctl in");126···149 break;150151 case LMCIOCSINFO: /*fold01*/0152 if (!capable(CAP_NET_ADMIN)) {153 ret = -EPERM;154 break;···175 sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE;176 }17700000178 ret = 0;179 break;180181 case LMCIOCIFTYPE: /*fold01*/182 {183+ u16 old_type = sc->if_type;184+ u16 new_type;185186 if (!capable(CAP_NET_ADMIN)) {187 ret = -EPERM;188 break;189 }190191+ if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u16))) {192 ret = -EFAULT;193 break;194 }···206 }207208 lmc_proto_close(sc);0209210 sc->if_type = new_type;0211 lmc_proto_attach(sc);212+ ret = lmc_proto_open(sc);213+ break;00214 }215216 case LMCIOCGETXINFO: /*fold01*/···241242 break;243244+ case LMCIOCGETLMCSTATS:245+ if (sc->lmc_cardtype == LMC_CARDTYPE_T1) {246+ lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB);247+ sc->extra_stats.framingBitErrorCount +=248+ lmc_mii_readreg(sc, 0, 18) & 0xff;249+ lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB);250+ sc->extra_stats.framingBitErrorCount +=251+ (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;252+ lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB);253+ sc->extra_stats.lineCodeViolationCount +=254+ lmc_mii_readreg(sc, 0, 18) & 0xff;255+ lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB);256+ sc->extra_stats.lineCodeViolationCount +=257+ (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;258+ lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR);259+ regVal = lmc_mii_readreg(sc, 0, 18) & 0xff;260261+ sc->extra_stats.lossOfFrameCount +=262+ (regVal & T1FRAMER_LOF_MASK) >> 4;263+ sc->extra_stats.changeOfFrameAlignmentCount +=264+ (regVal & T1FRAMER_COFA_MASK) >> 2;265+ sc->extra_stats.severelyErroredFrameCount +=266+ regVal & T1FRAMER_SEF_MASK;267+ }268+ if (copy_to_user(ifr->ifr_data, &sc->lmc_device->stats,269+ sizeof(sc->lmc_device->stats)) ||270+ copy_to_user(ifr->ifr_data + sizeof(sc->lmc_device->stats),271+ &sc->extra_stats, sizeof(sc->extra_stats)))272+ ret = -EFAULT;273+ else274+ ret = 0;275+ break;276277+ case LMCIOCCLEARLMCSTATS:278+ if (!capable(CAP_NET_ADMIN)) {279+ ret = -EPERM;280+ break;281+ }0282283+ memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats));284+ memset(&sc->extra_stats, 0, sizeof(sc->extra_stats));285+ sc->extra_stats.check = STATCHECK;286+ sc->extra_stats.version_size = (DRIVER_VERSION << 16) +287+ sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);288+ sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;289+ ret = 0;290+ break;00000291292 case LMCIOCSETCIRCUIT: /*fold01*/293 if (!capable(CAP_NET_ADMIN)){···330 ret = -EFAULT;331 break;332 }333+ if (copy_to_user(ifr->ifr_data + sizeof(u32), lmcEventLogBuf,334+ sizeof(lmcEventLogBuf)))335 ret = -EFAULT;336 else337 ret = 0;···641/* the watchdog process that cruises around */642static void lmc_watchdog (unsigned long data) /*fold00*/643{644+ struct net_device *dev = (struct net_device *)data;645+ lmc_softc_t *sc = dev_to_sc(dev);646 int link_status;647+ u32 ticks;648 unsigned long flags;00649650 lmc_trace(dev, "lmc_watchdog in");651···677 * check for a transmit interrupt timeout678 * Has the packet xmt vs xmt serviced threshold been exceeded */679 if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&680+ sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&681+ sc->tx_TimeoutInd == 0)682 {683684 /* wait for the watchdog to come around again */685 sc->tx_TimeoutInd = 1;686 }687 else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&688+ sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&689+ sc->tx_TimeoutInd)690 {691692 LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0);693694 sc->tx_TimeoutDisplay = 1;695+ sc->extra_stats.tx_TimeoutCnt++;696697 /* DEC chip is stuck, hit it with a RESET!!!! */698 lmc_running_reset (dev);···712 /* reset the transmit timeout detection flag */713 sc->tx_TimeoutInd = 0;714 sc->lastlmc_taint_tx = sc->lmc_taint_tx;715+ sc->lasttx_packets = sc->lmc_device->stats.tx_packets;716+ } else {00717 sc->tx_TimeoutInd = 0;718 sc->lastlmc_taint_tx = sc->lmc_taint_tx;719+ sc->lasttx_packets = sc->lmc_device->stats.tx_packets;720 }721722 /* --- end time out check ----------------------------------- */···748 sc->last_link_status = 1;749 /* lmc_reset (sc); Again why reset??? */7500751 netif_carrier_on(dev);00000000000752 }753754 /* Call media specific watchdog functions */···816817}818819+static int lmc_attach(struct net_device *dev, unsigned short encoding,820+ unsigned short parity)821{822+ if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)823+ return 0;824+ return -EINVAL;000000000825}0826827static int __devinit lmc_init_one(struct pci_dev *pdev,828 const struct pci_device_id *ent)829{830+ lmc_softc_t *sc;831+ struct net_device *dev;832+ u16 subdevice;833+ u16 AdapModelNum;834+ int err;835+ static int cards_found;836+837+ /* lmc_trace(dev, "lmc_init_one in"); */838+839+ err = pci_enable_device(pdev);840+ if (err) {841+ printk(KERN_ERR "lmc: pci enable failed: %d\n", err);842+ return err;843+ }844+845+ err = pci_request_regions(pdev, "lmc");846+ if (err) {847+ printk(KERN_ERR "lmc: pci_request_region failed\n");848+ goto err_req_io;849+ }850+851+ /*852+ * Allocate our own device structure853+ */854+ sc = kzalloc(sizeof(lmc_softc_t), GFP_KERNEL);855+ if (!sc) {856+ err = -ENOMEM;857+ goto err_kzalloc;858+ }859+860+ dev = alloc_hdlcdev(sc);861+ if (!dev) {862+ printk(KERN_ERR "lmc:alloc_netdev for device failed\n");863+ goto err_hdlcdev;864+ }865866867+ dev->type = ARPHRD_HDLC;868+ dev_to_hdlc(dev)->xmit = lmc_start_xmit;869+ dev_to_hdlc(dev)->attach = lmc_attach;870+ dev->open = lmc_open;871+ dev->stop = lmc_close;872+ dev->get_stats = lmc_get_stats;873+ dev->do_ioctl = lmc_ioctl;874+ dev->tx_timeout = lmc_driver_timeout;875+ dev->watchdog_timeo = HZ; /* 1 second */876+ dev->tx_queue_len = 100;877+ sc->lmc_device = dev;878+ sc->name = dev->name;879+ sc->if_type = LMC_PPP;880+ sc->check = 0xBEAFCAFE;881+ dev->base_addr = pci_resource_start(pdev, 0);882+ dev->irq = pdev->irq;883+ pci_set_drvdata(pdev, dev);884+ SET_NETDEV_DEV(dev, &pdev->dev);885886+ /*887+ * This will get the protocol layer ready and do any 1 time init's888+ * Must have a valid sc and dev structure889+ */890+ lmc_proto_attach(sc);000000891892+ /* Init the spin lock so can call it latter */893894+ spin_lock_init(&sc->lmc_lock);895+ pci_set_master(pdev);0000000896897+ printk(KERN_INFO "%s: detected at %lx, irq %d\n", dev->name,898+ dev->base_addr, dev->irq);00000899900+ err = register_hdlc_device(dev);901+ if (err) {902+ printk(KERN_ERR "%s: register_netdev failed.\n", dev->name);903+ free_netdev(dev);904+ goto err_hdlcdev;905+ }000000000000000000000906907 sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN;908 sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;···939940 switch (subdevice) {941 case PCI_DEVICE_ID_LMC_HSSI:942+ printk(KERN_INFO "%s: LMC HSSI\n", dev->name);943 sc->lmc_cardtype = LMC_CARDTYPE_HSSI;944 sc->lmc_media = &lmc_hssi_media;945 break;946 case PCI_DEVICE_ID_LMC_DS3:947+ printk(KERN_INFO "%s: LMC DS3\n", dev->name);948 sc->lmc_cardtype = LMC_CARDTYPE_DS3;949 sc->lmc_media = &lmc_ds3_media;950 break;951 case PCI_DEVICE_ID_LMC_SSI:952+ printk(KERN_INFO "%s: LMC SSI\n", dev->name);953 sc->lmc_cardtype = LMC_CARDTYPE_SSI;954 sc->lmc_media = &lmc_ssi_media;955 break;956 case PCI_DEVICE_ID_LMC_T1:957+ printk(KERN_INFO "%s: LMC T1\n", dev->name);958 sc->lmc_cardtype = LMC_CARDTYPE_T1;959 sc->lmc_media = &lmc_t1_media;960 break;961 default:962+ printk(KERN_WARNING "%s: LMC UNKOWN CARD!\n", dev->name);963 break;964 }965···977 */978 AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4;979980+ if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */981+ subdevice != PCI_DEVICE_ID_LMC_T1) &&982+ (AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */983+ subdevice != PCI_DEVICE_ID_LMC_SSI) &&984+ (AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */985+ subdevice != PCI_DEVICE_ID_LMC_DS3) &&986+ (AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */987+ subdevice != PCI_DEVICE_ID_LMC_HSSI))988+ printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI"989+ " Subsystem ID = 0x%04x\n",990+ dev->name, AdapModelNum, subdevice);991000000992 /*993 * reset clock994 */995 LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL);996997 sc->board_idx = cards_found++;998+ sc->extra_stats.check = STATCHECK;999+ sc->extra_stats.version_size = (DRIVER_VERSION << 16) +1000+ sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);1001+ sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;10021003 sc->lmc_ok = 0;1004 sc->last_link_status = 0;···1010 lmc_trace(dev, "lmc_init_one out");1011 return 0;10121013+err_hdlcdev:1014+ pci_set_drvdata(pdev, NULL);1015+ kfree(sc);1016+err_kzalloc:1017+ pci_release_regions(pdev);1018+err_req_io:1019+ pci_disable_device(pdev);1020+ return err;0001021}10221023/*1024 * Called from pci when removing module.1025 */1026+static void __devexit lmc_remove_one(struct pci_dev *pdev)1027{1028+ struct net_device *dev = pci_get_drvdata(pdev);1029+1030+ if (dev) {1031+ printk(KERN_DEBUG "%s: removing...\n", dev->name);1032+ unregister_hdlc_device(dev);1033+ free_netdev(dev);1034+ pci_release_regions(pdev);1035+ pci_disable_device(pdev);1036+ pci_set_drvdata(pdev, NULL);1037+ }0001038}10391040/* After this is called, packets can be sent.1041 * Does not initialize the addresses1042 */1043+static int lmc_open(struct net_device *dev)1044{1045+ lmc_softc_t *sc = dev_to_sc(dev);1046+ int err;10471048 lmc_trace(dev, "lmc_open in");10491050 lmc_led_on(sc, LMC_DS3_LED0);10511052+ lmc_dec_reset(sc);1053+ lmc_reset(sc);10541055+ LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0);1056+ LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16),1057+ lmc_mii_readreg(sc, 0, 17));0010581059 if (sc->lmc_ok){1060 lmc_trace(dev, "lmc_open lmc_ok out");···11061107 /* dev->flags |= IFF_UP; */11081109+ if ((err = lmc_proto_open(sc)) != 0)1110+ return err;11111112 dev->do_ioctl = lmc_ioctl;111311141115 netif_start_queue(dev);1116+ sc->extra_stats.tx_tbusy0++;011171118 /*1119 * select what interrupts we want to get···11651166static void lmc_running_reset (struct net_device *dev) /*fold00*/1167{1168+ lmc_softc_t *sc = dev_to_sc(dev);011691170 lmc_trace(dev, "lmc_runnig_reset in");1171···1184 netif_wake_queue(dev);11851186 sc->lmc_txfull = 0;1187+ sc->extra_stats.tx_tbusy0++;11881189 sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK;1190 LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);···1200 * This disables the timer for the watchdog and keepalives,1201 * and disables the irq for dev.1202 */1203+static int lmc_close(struct net_device *dev)1204{1205 /* not calling release_region() as we should */1206+ lmc_softc_t *sc = dev_to_sc(dev);12071208 lmc_trace(dev, "lmc_close in");1209+01210 sc->lmc_ok = 0;1211 sc->lmc_media->set_link_status (sc, 0);1212 del_timer (&sc->timer);···1215 lmc_ifdown (dev);12161217 lmc_trace(dev, "lmc_close out");1218+1219 return 0;1220}1221···1223/* When the interface goes down, this is called */1224static int lmc_ifdown (struct net_device *dev) /*fold00*/1225{1226+ lmc_softc_t *sc = dev_to_sc(dev);1227 u32 csr6;1228 int i;12291230 lmc_trace(dev, "lmc_ifdown in");1231+1232 /* Don't let anything else go on right now */1233 // dev->start = 0;1234 netif_stop_queue(dev);1235+ sc->extra_stats.tx_tbusy1++;12361237 /* stop interrupts */1238 /* Clear the interrupt mask */···1244 csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */1245 LMC_CSR_WRITE (sc, csr_command, csr6);12461247+ sc->lmc_device->stats.rx_missed_errors +=1248+ LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;12491250 /* release the interrupt */1251 if(sc->got_irq == 1){···1276 lmc_led_off (sc, LMC_MII16_LED_ALL);12771278 netif_wake_queue(dev);1279+ sc->extra_stats.tx_tbusy0++;12801281 lmc_trace(dev, "lmc_ifdown out");1282···1289static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/1290{1291 struct net_device *dev = (struct net_device *) dev_instance;1292+ lmc_softc_t *sc = dev_to_sc(dev);1293 u32 csr;1294 int i;1295 s32 stat;···13001301 lmc_trace(dev, "lmc_interrupt in");1302001303 spin_lock(&sc->lmc_lock);13041305 /*···13541355 int n_compl = 0 ;1356 /* reset the transmit timeout detection flag -baz */1357+ sc->extra_stats.tx_NoCompleteCnt = 0;13581359 badtx = sc->lmc_taint_tx;1360 i = badtx % LMC_TXDESCS;···1378 if (sc->lmc_txq[i] == NULL)1379 continue;13801381+ /*1382+ * Check the total error summary to look for any errors1383+ */1384+ if (stat & 0x8000) {1385+ sc->lmc_device->stats.tx_errors++;1386+ if (stat & 0x4104)1387+ sc->lmc_device->stats.tx_aborted_errors++;1388+ if (stat & 0x0C00)1389+ sc->lmc_device->stats.tx_carrier_errors++;1390+ if (stat & 0x0200)1391+ sc->lmc_device->stats.tx_window_errors++;1392+ if (stat & 0x0002)1393+ sc->lmc_device->stats.tx_fifo_errors++;1394+ } else {1395+ sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff;1396+1397+ sc->lmc_device->stats.tx_packets++;1398 }1399+0000001400 // dev_kfree_skb(sc->lmc_txq[i]);1401 dev_kfree_skb_irq(sc->lmc_txq[i]);1402 sc->lmc_txq[i] = NULL;···1415 LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0);1416 sc->lmc_txfull = 0;1417 netif_wake_queue(dev);1418+ sc->extra_stats.tx_tbusy0++;141914201421#ifdef DEBUG1422+ sc->extra_stats.dirtyTx = badtx;1423+ sc->extra_stats.lmc_next_tx = sc->lmc_next_tx;1424+ sc->extra_stats.lmc_txfull = sc->lmc_txfull;1425#endif1426 sc->lmc_taint_tx = badtx;1427···1476 return IRQ_RETVAL(handled);1477}14781479+static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev)1480{1481+ lmc_softc_t *sc = dev_to_sc(dev);1482 u32 flag;1483 int entry;1484 int ret = 0;1485 unsigned long flags;14861487 lmc_trace(dev, "lmc_start_xmit in");0014881489 spin_lock_irqsave(&sc->lmc_lock, flags);1490···1532 if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1)1533 { /* ring full, go busy */1534 sc->lmc_txfull = 1;1535+ netif_stop_queue(dev);1536+ sc->extra_stats.tx_tbusy1++;1537 LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0);1538 }1539#endif···1550 * the watchdog timer handler. -baz1551 */15521553+ sc->extra_stats.tx_NoCompleteCnt++;1554 sc->lmc_next_tx++;15551556 /* give ownership to the chip */···1569}157015711572+static int lmc_rx(struct net_device *dev)1573{1574+ lmc_softc_t *sc = dev_to_sc(dev);1575 int i;1576 int rx_work_limit = LMC_RXDESCS;1577 unsigned int next_rx;···1582 u16 len;15831584 lmc_trace(dev, "lmc_rx in");0015851586 lmc_led_on(sc, LMC_DS3_LED3);1587···1597 rxIntLoopCnt++; /* debug -baz */1598 len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER);1599 if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */1600+ if ((stat & 0x0000ffff) != 0x7fff) {1601+ /* Oversized frame */1602+ sc->lmc_device->stats.rx_length_errors++;1603+ goto skip_packet;1604+ }1605+ }16061607+ if (stat & 0x00000008) { /* Catch a dribbling bit error */1608+ sc->lmc_device->stats.rx_errors++;1609+ sc->lmc_device->stats.rx_frame_errors++;1610+ goto skip_packet;1611+ }0000000161216131614+ if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */1615+ sc->lmc_device->stats.rx_errors++;1616+ sc->lmc_device->stats.rx_crc_errors++;1617+ goto skip_packet;1618+ }16191620+ if (len > LMC_PKT_BUF_SZ) {1621+ sc->lmc_device->stats.rx_length_errors++;1622+ localLengthErrCnt++;1623+ goto skip_packet;1624+ }1625+1626+ if (len < sc->lmc_crcSize + 2) {1627+ sc->lmc_device->stats.rx_length_errors++;1628+ sc->extra_stats.rx_SmallPktCnt++;1629+ localLengthErrCnt++;1630+ goto skip_packet;1631+ }16321633 if(stat & 0x00004000){1634 printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name);···1656 }16571658 dev->last_rx = jiffies;1659+ sc->lmc_device->stats.rx_packets++;1660+ sc->lmc_device->stats.rx_bytes += len;16611662 LMC_CONSOLE_LOG("recv", skb->data, len);1663···16791680 skb_put (skb, len);1681 skb->protocol = lmc_proto_type(sc, skb);01682 skb_reset_mac_header(skb);1683 /* skb_reset_network_header(skb); */1684 skb->dev = dev;···1704 * in which care we'll try to allocate the buffer1705 * again. (once a second)1706 */1707+ sc->extra_stats.rx_BuffAllocErr++;1708 LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);1709 sc->failed_recv_alloc = 1;1710 goto skip_out_of_mem;···1739 * descriptors with bogus packets1740 *1741 if (localLengthErrCnt > LMC_RXDESCS - 3) {1742+ sc->extra_stats.rx_BadPktSurgeCnt++;1743+ LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt,1744+ sc->extra_stats.rx_BadPktSurgeCnt);01745 } */17461747 /* save max count of receive descriptors serviced */1748+ if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt)1749+ sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */017501751#ifdef DEBUG1752 if (rxIntLoopCnt == 0)···1775 return 0;1776}17771778+static struct net_device_stats *lmc_get_stats(struct net_device *dev)1779{1780+ lmc_softc_t *sc = dev_to_sc(dev);1781 unsigned long flags;17821783 lmc_trace(dev, "lmc_get_stats in");178401785 spin_lock_irqsave(&sc->lmc_lock, flags);17861787+ sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;17881789 spin_unlock_irqrestore(&sc->lmc_lock, flags);17901791 lmc_trace(dev, "lmc_get_stats out");17921793+ return &sc->lmc_device->stats;1794}17951796static struct pci_driver lmc_driver = {···1970 {1971 if (sc->lmc_txq[i] != NULL){ /* have buffer */1972 dev_kfree_skb(sc->lmc_txq[i]); /* free it */1973+ sc->lmc_device->stats.tx_dropped++; /* We just dropped a packet */1974 }1975 sc->lmc_txq[i] = NULL;1976 sc->lmc_txring[i].status = 0x00000000;···1982 lmc_trace(sc->lmc_device, "lmc_softreset out");1983}19841985+void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/1986{1987 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in");1988 sc->lmc_gpio_io &= ~bits;···1990 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out");1991}19921993+void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/1994{1995 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in");1996 sc->lmc_gpio_io |= bits;···1998 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out");1999}20002001+void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/2002{2003 lmc_trace(sc->lmc_device, "lmc_led_on in");2004 if((~sc->lmc_miireg16) & led){ /* Already on! */···2011 lmc_trace(sc->lmc_device, "lmc_led_on out");2012}20132014+void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/2015{2016 lmc_trace(sc->lmc_device, "lmc_led_off in");2017 if(sc->lmc_miireg16 & led){ /* Already set don't do anything */···2061 */2062 sc->lmc_media->init(sc);20632064+ sc->extra_stats.resetCount++;2065 lmc_trace(sc->lmc_device, "lmc_reset out");2066}20672068static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/2069{2070+ u32 val;2071 lmc_trace(sc->lmc_device, "lmc_dec_reset in");20722073 /*···2151 lmc_trace(sc->lmc_device, "lmc_initcsrs out");2152}21532154+static void lmc_driver_timeout(struct net_device *dev)2155+{2156+ lmc_softc_t *sc = dev_to_sc(dev);2157 u32 csr6;2158 unsigned long flags;21592160 lmc_trace(dev, "lmc_driver_timeout in");2161002162 spin_lock_irqsave(&sc->lmc_lock, flags);21632164 printk("%s: Xmitter busy|\n", dev->name);21652166+ sc->extra_stats.tx_tbusy_calls++;2167+ if (jiffies - dev->trans_start < TX_TIMEOUT)2168+ goto bug_out;021692170 /*2171 * Chip seems to have locked up···21782179 LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO,2180 LMC_CSR_READ (sc, csr_status),2181+ sc->extra_stats.tx_ProcTimeout);21822183 lmc_running_reset (dev);2184···2195 /* immediate transmit */2196 LMC_CSR_WRITE (sc, csr_txpoll, 0);21972198+ sc->lmc_device->stats.tx_errors++;2199+ sc->extra_stats.tx_ProcTimeout++; /* -baz */22002201 dev->trans_start = jiffies;2202
+27-39
drivers/net/wan/lmc/lmc_media.c
···16#include <linux/inet.h>17#include <linux/bitops.h>1819-#include <net/syncppp.h>20-21#include <asm/processor.h> /* Processor type for cache alignment. */22#include <asm/io.h>23#include <asm/dma.h>···93static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *);9495static inline void write_av9110_bit (lmc_softc_t *, int);96-static void write_av9110 (lmc_softc_t *, u_int32_t, u_int32_t, u_int32_t,97- u_int32_t, u_int32_t);9899lmc_media_t lmc_ds3_media = {100 lmc_ds3_init, /* special media init stuff */···424static int425lmc_ds3_get_link_status (lmc_softc_t * const sc)426{427- u_int16_t link_status, link_status_11;428 int ret = 1;429430 lmc_mii_writereg (sc, 0, 17, 7);···446 (link_status & LMC_FRAMER_REG0_OOFS)){447 ret = 0;448 if(sc->last_led_err[3] != 1){449- u16 r1;450 lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */451 r1 = lmc_mii_readreg (sc, 0, 18);452 r1 &= 0xfe;···459 else {460 lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */461 if(sc->last_led_err[3] == 1){462- u16 r1;463 lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */464 r1 = lmc_mii_readreg (sc, 0, 18);465 r1 |= 0x01;···537 * SSI methods538 */539540-static void541-lmc_ssi_init (lmc_softc_t * const sc)542{543- u_int16_t mii17;544- int cable;545546- sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000;547548- mii17 = lmc_mii_readreg (sc, 0, 17);549550- cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT;551- sc->ictl.cable_type = cable;552553- lmc_gpio_mkoutput (sc, LMC_GEP_SSI_TXCLOCK);554}555556static void···677static int678lmc_ssi_get_link_status (lmc_softc_t * const sc)679{680- u_int16_t link_status;681- u_int32_t ticks;682 int ret = 1;683 int hw_hdsk = 1;684-685 /*686 * missing CTS? Hmm. If we require CTS on, we may never get the687 * link to come up, so omit it in this test.···716 }717 else if (ticks == 0 ) { /* no clock found ? */718 ret = 0;719- if(sc->last_led_err[3] != 1){720- sc->stats.tx_lossOfClockCnt++;721- printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name);722 }723 sc->last_led_err[3] = 1;724 lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */···834 LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);835}836837-static void838-write_av9110 (lmc_softc_t * sc, u_int32_t n, u_int32_t m, u_int32_t v,839- u_int32_t x, u_int32_t r)840{841 int i;842···881 | LMC_GEP_SSI_GENERATOR));882}883884-static void885-lmc_ssi_watchdog (lmc_softc_t * const sc)886{887- u_int16_t mii17 = lmc_mii_readreg (sc, 0, 17);888- if (((mii17 >> 3) & 7) == 7)889- {890- lmc_led_off (sc, LMC_MII16_LED2);891- }892- else893- {894- lmc_led_on (sc, LMC_MII16_LED2);895- }896-897}898899/*···917static void918lmc_t1_init (lmc_softc_t * const sc)919{920- u_int16_t mii16;921 int i;922923 sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200;···1016 */ static int1017lmc_t1_get_link_status (lmc_softc_t * const sc)1018{1019- u_int16_t link_status;1020 int ret = 1;10211022 /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
···16#include <linux/inet.h>17#include <linux/bitops.h>180019#include <asm/processor.h> /* Processor type for cache alignment. */20#include <asm/io.h>21#include <asm/dma.h>···95static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *);9697static inline void write_av9110_bit (lmc_softc_t *, int);98+static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);099100lmc_media_t lmc_ds3_media = {101 lmc_ds3_init, /* special media init stuff */···427static int428lmc_ds3_get_link_status (lmc_softc_t * const sc)429{430+ u16 link_status, link_status_11;431 int ret = 1;432433 lmc_mii_writereg (sc, 0, 17, 7);···449 (link_status & LMC_FRAMER_REG0_OOFS)){450 ret = 0;451 if(sc->last_led_err[3] != 1){452+ u16 r1;453 lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */454 r1 = lmc_mii_readreg (sc, 0, 18);455 r1 &= 0xfe;···462 else {463 lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */464 if(sc->last_led_err[3] == 1){465+ u16 r1;466 lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */467 r1 = lmc_mii_readreg (sc, 0, 18);468 r1 |= 0x01;···540 * SSI methods541 */542543+static void lmc_ssi_init(lmc_softc_t * const sc)0544{545+ u16 mii17;546+ int cable;547548+ sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000;549550+ mii17 = lmc_mii_readreg(sc, 0, 17);551552+ cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT;553+ sc->ictl.cable_type = cable;554555+ lmc_gpio_mkoutput(sc, LMC_GEP_SSI_TXCLOCK);556}557558static void···681static int682lmc_ssi_get_link_status (lmc_softc_t * const sc)683{684+ u16 link_status;685+ u32 ticks;686 int ret = 1;687 int hw_hdsk = 1;688+689 /*690 * missing CTS? Hmm. If we require CTS on, we may never get the691 * link to come up, so omit it in this test.···720 }721 else if (ticks == 0 ) { /* no clock found ? */722 ret = 0;723+ if (sc->last_led_err[3] != 1) {724+ sc->extra_stats.tx_lossOfClockCnt++;725+ printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name);726 }727 sc->last_led_err[3] = 1;728 lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */···838 LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);839}840841+static void write_av9110(lmc_softc_t *sc, u32 n, u32 m, u32 v, u32 x, u32 r)00842{843 int i;844···887 | LMC_GEP_SSI_GENERATOR));888}889890+static void lmc_ssi_watchdog(lmc_softc_t * const sc)0891{892+ u16 mii17 = lmc_mii_readreg(sc, 0, 17);893+ if (((mii17 >> 3) & 7) == 7)894+ lmc_led_off(sc, LMC_MII16_LED2);895+ else896+ lmc_led_on(sc, LMC_MII16_LED2);00000897}898899/*···929static void930lmc_t1_init (lmc_softc_t * const sc)931{932+ u16 mii16;933 int i;934935 sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200;···1028 */ static int1029lmc_t1_get_link_status (lmc_softc_t * const sc)1030{1031+ u16 link_status;1032 int ret = 1;10331034 /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
+27-123
drivers/net/wan/lmc/lmc_proto.c
···36#include <linux/workqueue.h>37#include <linux/proc_fs.h>38#include <linux/bitops.h>39-40-#include <net/syncppp.h>41-42#include <asm/processor.h> /* Processor type for cache alignment. */43#include <asm/io.h>44#include <asm/dma.h>···47#include "lmc_ioctl.h"48#include "lmc_proto.h"4950-/*51- * The compile-time variable SPPPSTUP causes the module to be52- * compiled without referencing any of the sync ppp routines.53- */54-#ifdef SPPPSTUB55-#define SPPP_detach(d) (void)056-#define SPPP_open(d) 057-#define SPPP_reopen(d) (void)058-#define SPPP_close(d) (void)059-#define SPPP_attach(d) (void)060-#define SPPP_do_ioctl(d,i,c) -EOPNOTSUPP61-#else62-#define SPPP_attach(x) sppp_attach((x)->pd)63-#define SPPP_detach(x) sppp_detach((x)->pd->dev)64-#define SPPP_open(x) sppp_open((x)->pd->dev)65-#define SPPP_reopen(x) sppp_reopen((x)->pd->dev)66-#define SPPP_close(x) sppp_close((x)->pd->dev)67-#define SPPP_do_ioctl(x, y, z) sppp_do_ioctl((x)->pd->dev, (y), (z))68-#endif69-70-// init71-void lmc_proto_init(lmc_softc_t *sc) /*FOLD00*/72-{73- lmc_trace(sc->lmc_device, "lmc_proto_init in");74- switch(sc->if_type){75- case LMC_PPP:76- sc->pd = kmalloc(sizeof(struct ppp_device), GFP_KERNEL);77- if (!sc->pd) {78- printk("lmc_proto_init(): kmalloc failure!\n");79- return;80- }81- sc->pd->dev = sc->lmc_device;82- sc->if_ptr = sc->pd;83- break;84- case LMC_RAW:85- break;86- default:87- break;88- }89- lmc_trace(sc->lmc_device, "lmc_proto_init out");90-}91-92// attach93void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/94{···55 case LMC_PPP:56 {57 struct net_device *dev = sc->lmc_device;58- SPPP_attach(sc);59 dev->do_ioctl = lmc_ioctl;60 }61 break;···62 {63 struct net_device *dev = sc->lmc_device;64 /*65- * They set a few basics because they don't use sync_ppp66 */67 dev->flags |= IFF_POINTOPOINT;68···78 lmc_trace(sc->lmc_device, "lmc_proto_attach out");79}8081-// detach82-void lmc_proto_detach(lmc_softc_t *sc) /*FOLD00*/83{84- switch(sc->if_type){85- case LMC_PPP:86- SPPP_detach(sc);87- break;88- case LMC_RAW: /* Tell someone we're detaching? */89- break;90- default:91- break;92- }93-94}9596-// reopen97-void lmc_proto_reopen(lmc_softc_t *sc) /*FOLD00*/98{99- lmc_trace(sc->lmc_device, "lmc_proto_reopen in");100- switch(sc->if_type){101- case LMC_PPP:102- SPPP_reopen(sc);103- break;104- case LMC_RAW: /* Reset the interface after being down, prerape to receive packets again */105- break;106- default:107- break;108- }109- lmc_trace(sc->lmc_device, "lmc_proto_reopen out");00110}111112-113-// ioctl114-int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd) /*FOLD00*/115{116- lmc_trace(sc->lmc_device, "lmc_proto_ioctl out");117- switch(sc->if_type){118- case LMC_PPP:119- return SPPP_do_ioctl (sc, ifr, cmd);120- break;121- default:122- return -EOPNOTSUPP;123- break;124- }125- lmc_trace(sc->lmc_device, "lmc_proto_ioctl out");126-}127128-// open129-void lmc_proto_open(lmc_softc_t *sc) /*FOLD00*/130-{131- int ret;132133- lmc_trace(sc->lmc_device, "lmc_proto_open in");134- switch(sc->if_type){135- case LMC_PPP:136- ret = SPPP_open(sc);137- if(ret < 0)138- printk("%s: syncPPP open failed: %d\n", sc->name, ret);139- break;140- case LMC_RAW: /* We're about to start getting packets! */141- break;142- default:143- break;144- }145- lmc_trace(sc->lmc_device, "lmc_proto_open out");146-}147-148-// close149-150-void lmc_proto_close(lmc_softc_t *sc) /*FOLD00*/151-{152- lmc_trace(sc->lmc_device, "lmc_proto_close in");153- switch(sc->if_type){154- case LMC_PPP:155- SPPP_close(sc);156- break;157- case LMC_RAW: /* Interface going down */158- break;159- default:160- break;161- }162- lmc_trace(sc->lmc_device, "lmc_proto_close out");163}164165__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/···118 lmc_trace(sc->lmc_device, "lmc_proto_type in");119 switch(sc->if_type){120 case LMC_PPP:121- return htons(ETH_P_WAN_PPP);122- break;123 case LMC_NET:124 return htons(ETH_P_802_2);125 break;···150 }151 lmc_trace(sc->lmc_device, "lmc_proto_netif out");152}153-
···36#include <linux/workqueue.h>37#include <linux/proc_fs.h>38#include <linux/bitops.h>00039#include <asm/processor.h> /* Processor type for cache alignment. */40#include <asm/io.h>41#include <asm/dma.h>···50#include "lmc_ioctl.h"51#include "lmc_proto.h"5200000000000000000000000000000000000000000053// attach54void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/55{···100 case LMC_PPP:101 {102 struct net_device *dev = sc->lmc_device;0103 dev->do_ioctl = lmc_ioctl;104 }105 break;···108 {109 struct net_device *dev = sc->lmc_device;110 /*111+ * They set a few basics because they don't use HDLC112 */113 dev->flags |= IFF_POINTOPOINT;114···124 lmc_trace(sc->lmc_device, "lmc_proto_attach out");125}126127+int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd)0128{129+ lmc_trace(sc->lmc_device, "lmc_proto_ioctl");130+ if (sc->if_type == LMC_PPP)131+ return hdlc_ioctl(sc->lmc_device, ifr, cmd);132+ return -EOPNOTSUPP;000000133}134135+int lmc_proto_open(lmc_softc_t *sc)0136{137+ int ret = 0;138+139+ lmc_trace(sc->lmc_device, "lmc_proto_open in");140+141+ if (sc->if_type == LMC_PPP) {142+ ret = hdlc_open(sc->lmc_device);143+ if (ret < 0)144+ printk(KERN_WARNING "%s: HDLC open failed: %d\n",145+ sc->name, ret);146+ }147+148+ lmc_trace(sc->lmc_device, "lmc_proto_open out");149+ return ret;150}151152+void lmc_proto_close(lmc_softc_t *sc)00153{154+ lmc_trace(sc->lmc_device, "lmc_proto_close in");0000000000155156+ if (sc->if_type == LMC_PPP)157+ hdlc_close(sc->lmc_device);00158159+ lmc_trace(sc->lmc_device, "lmc_proto_close out");00000000000000000000000000000160}161162__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/···213 lmc_trace(sc->lmc_device, "lmc_proto_type in");214 switch(sc->if_type){215 case LMC_PPP:216+ return hdlc_type_trans(skb, sc->lmc_device);217+ break;218 case LMC_NET:219 return htons(ETH_P_802_2);220 break;···245 }246 lmc_trace(sc->lmc_device, "lmc_proto_netif out");247}0
···8 *9 * (c) Copyright 1999, 2001 Alan Cox10 * (c) Copyright 2001 Red Hat Inc.011 *12 */13···20#include <linux/netdevice.h>21#include <linux/if_arp.h>22#include <linux/delay.h>023#include <linux/ioport.h>24#include <linux/init.h>25#include <net/arp.h>···29#include <asm/io.h>30#include <asm/dma.h>31#include <asm/byteorder.h>32-#include <net/syncppp.h>33#include "z85230.h"343536struct slvl_device37{38- void *if_ptr; /* General purpose pointer (used by SPPP) */39 struct z8530_channel *chan;40- struct ppp_device pppdev;41 int channel;42};434445struct slvl_board46{47- struct slvl_device *dev[2];48 struct z8530_dev board;49 int iobase;50};···50 * Network driver support routines51 */520000053/*54- * Frame receive. Simple for our card as we do sync ppp and there55 * is no funny garbage involved56 */57-58static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)59{60 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */61- skb_trim(skb, skb->len-2);62- skb->protocol=htons(ETH_P_WAN_PPP);63 skb_reset_mac_header(skb);64- skb->dev=c->netdevice;65- /*66- * Send it to the PPP layer. We don't have time to process67- * it right now.68- */69 netif_rx(skb);70 c->netdevice->last_rx = jiffies;71}72-73/*74 * We've been placed in the UP state75- */ 76-77static int sealevel_open(struct net_device *d)78{79- struct slvl_device *slvl=d->priv;80 int err = -1;81 int unit = slvl->channel;82-83 /*84- * Link layer up. 85 */8687- switch(unit)88 {89 case 0:90- err=z8530_sync_dma_open(d, slvl->chan);91 break;92 case 1:93- err=z8530_sync_open(d, slvl->chan);94 break;95 }96-97- if(err)98 return err;99- /*100- * Begin PPP101- */102- err=sppp_open(d);103- if(err)104- {105- switch(unit)106- {107 case 0:108 z8530_sync_dma_close(d, slvl->chan);109 break;110 case 1:111 z8530_sync_close(d, slvl->chan);112 break;113- } 114 return err;115 }116-117- slvl->chan->rx_function=sealevel_input;118-119 /*120 * Go go go121 */···122123static int sealevel_close(struct net_device *d)124{125- struct slvl_device *slvl=d->priv;126 int unit = slvl->channel;127-128 /*129 * Discard new frames130 */131-132- slvl->chan->rx_function=z8530_null_rx;133-134- /*135- * PPP off136- */137- sppp_close(d);138- /*139- * Link layer down140- */141000142 netif_stop_queue(d);143-144- switch(unit)145 {146 case 0:147 z8530_sync_dma_close(d, slvl->chan);···148149static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)150{151- /* struct slvl_device *slvl=d->priv;152 z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */153- return sppp_do_ioctl(d, ifr,cmd);154-}155-156-static struct net_device_stats *sealevel_get_stats(struct net_device *d)157-{158- struct slvl_device *slvl=d->priv;159- if(slvl)160- return z8530_get_stats(slvl->chan);161- else162- return NULL;163}164165/*166- * Passed PPP frames, fire them downwind.167 */168-169static int sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d)170{171- struct slvl_device *slvl=d->priv;172- return z8530_queue_xmit(slvl->chan, skb);173}174175-static int sealevel_neigh_setup(struct neighbour *n)0176{177- if (n->nud_state == NUD_NONE) {178- n->ops = &arp_broken_ops;179- n->output = n->ops->output;00000000000000000000180 }00181 return 0;182-}183-184-static int sealevel_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)185-{186- if (p->tbl->family == AF_INET) {187- p->neigh_setup = sealevel_neigh_setup;188- p->ucast_probes = 0;189- p->mcast_probes = 0;190- }191- return 0;192-}193-194-static int sealevel_attach(struct net_device *dev)195-{196- struct slvl_device *sv = dev->priv;197- sppp_attach(&sv->pppdev);198- return 0;199-}200-201-static void sealevel_detach(struct net_device *dev)202-{203- sppp_detach(dev);204-}205-206-static void slvl_setup(struct net_device *d)207-{208- d->open = sealevel_open;209- d->stop = sealevel_close;210- d->init = sealevel_attach;211- d->uninit = sealevel_detach;212- d->hard_start_xmit = sealevel_queue_xmit;213- d->get_stats = sealevel_get_stats;214- d->set_multicast_list = NULL;215- d->do_ioctl = sealevel_ioctl;216- d->neigh_setup = sealevel_neigh_setup_dev;217- d->set_mac_address = NULL;218-219-}220-221-static inline struct slvl_device *slvl_alloc(int iobase, int irq)222-{223- struct net_device *d;224- struct slvl_device *sv;225-226- d = alloc_netdev(sizeof(struct slvl_device), "hdlc%d",227- slvl_setup);228-229- if (!d) 230- return NULL;231-232- sv = d->priv;233- d->ml_priv = sv;234- sv->if_ptr = &sv->pppdev;235- sv->pppdev.dev = d;236- d->base_addr = iobase;237- d->irq = irq;238-239- return sv;240}241242243/*244 * Allocate and setup Sealevel board.245 */246-247-static __init struct slvl_board *slvl_init(int iobase, int irq, 248 int txdma, int rxdma, int slow)249{250 struct z8530_dev *dev;251 struct slvl_board *b;252-253 /*254 * Get the needed I/O space255 */256257- if(!request_region(iobase, 8, "Sealevel 4021")) 258- { 259- printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n", iobase);260 return NULL;261 }262-263 b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);264- if(!b)265- goto fail3;266267- if (!(b->dev[0]= slvl_alloc(iobase, irq)))268- goto fail2;269270- b->dev[0]->chan = &b->board.chanA; 271- b->dev[0]->channel = 0;272-273- if (!(b->dev[1] = slvl_alloc(iobase, irq)))274- goto fail1_0;275-276- b->dev[1]->chan = &b->board.chanB;277- b->dev[1]->channel = 1;278279 dev = &b->board;280-281 /*282 * Stuff in the I/O addressing283 */284-285 dev->active = 0;286287 b->iobase = iobase;288-289 /*290 * Select 8530 delays for the old board291 */292-293- if(slow)294 iobase |= Z8530_PORT_SLEEP;295-296- dev->chanA.ctrlio=iobase+1;297- dev->chanA.dataio=iobase;298- dev->chanB.ctrlio=iobase+3;299- dev->chanB.dataio=iobase+2;300-301- dev->chanA.irqs=&z8530_nop;302- dev->chanB.irqs=&z8530_nop;303-304 /*305 * Assert DTR enable DMA306 */307-308- outb(3|(1<<7), b->iobase+4); 309-310311 /* We want a fast IRQ for this device. Actually we'd like an even faster312 IRQ ;) - This is one driver RtLinux is made for */313-314- if(request_irq(irq, &z8530_interrupt, IRQF_DISABLED, "SeaLevel", dev)<0)315- {316- printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq);317- goto fail1_1;318- }319-320- dev->irq=irq;321- dev->chanA.private=&b->dev[0];322- dev->chanB.private=&b->dev[1];323- dev->chanA.netdevice=b->dev[0]->pppdev.dev;324- dev->chanB.netdevice=b->dev[1]->pppdev.dev;325- dev->chanA.dev=dev;326- dev->chanB.dev=dev;327328- dev->chanA.txdma=3;329- dev->chanA.rxdma=1;330- if(request_dma(dev->chanA.txdma, "SeaLevel (TX)")!=0)331- goto fail;332-333- if(request_dma(dev->chanA.rxdma, "SeaLevel (RX)")!=0)334- goto dmafail;335-000000000000336 disable_irq(irq);337-338 /*339 * Begin normal initialise340 */341-342- if(z8530_init(dev)!=0)343- {344 printk(KERN_ERR "Z8530 series device not found.\n");345 enable_irq(irq);346- goto dmafail2;347 }348- if(dev->type==Z85C30)349- {350 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream);351 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream);352- }353- else354- {355 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230);356 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230);357 }···302 /*303 * Now we can take the IRQ304 */305-306 enable_irq(irq);307308- if (register_netdev(b->dev[0]->pppdev.dev)) 309- goto dmafail2;310-311- if (register_netdev(b->dev[1]->pppdev.dev)) 312- goto fail_unit;313314 z8530_describe(dev, "I/O", iobase);315- dev->active=1;316 return b;317318-fail_unit:319- unregister_netdev(b->dev[0]->pppdev.dev);320-321-dmafail2:322 free_dma(dev->chanA.rxdma);323-dmafail:324 free_dma(dev->chanA.txdma);325-fail:326 free_irq(irq, dev);327-fail1_1:328- free_netdev(b->dev[1]->pppdev.dev);329-fail1_0:330- free_netdev(b->dev[0]->pppdev.dev);331-fail2:332 kfree(b);333-fail3:334- release_region(iobase,8);335 return NULL;336}337···335 int u;336337 z8530_shutdown(&b->board);338-339- for(u=0; u<2; u++)340 {341- struct net_device *d = b->dev[u]->pppdev.dev;342- unregister_netdev(d);343 free_netdev(d);344 }345-346 free_irq(b->board.irq, &b->board);347 free_dma(b->board.chanA.rxdma);348 free_dma(b->board.chanA.txdma);···378379static int __init slvl_init_module(void)380{381-#ifdef MODULE382- printk(KERN_INFO "SeaLevel Z85230 Synchronous Driver v 0.02.\n");383- printk(KERN_INFO "(c) Copyright 1998, Building Number Three Ltd.\n");384-#endif385 slvl_unit = slvl_init(io, irq, txdma, rxdma, slow);386387 return slvl_unit ? 0 : -ENODEV;
···8 *9 * (c) Copyright 1999, 2001 Alan Cox10 * (c) Copyright 2001 Red Hat Inc.11+ * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>12 *13 */14···19#include <linux/netdevice.h>20#include <linux/if_arp.h>21#include <linux/delay.h>22+#include <linux/hdlc.h>23#include <linux/ioport.h>24#include <linux/init.h>25#include <net/arp.h>···27#include <asm/io.h>28#include <asm/dma.h>29#include <asm/byteorder.h>030#include "z85230.h"313233struct slvl_device34{035 struct z8530_channel *chan;036 int channel;37};383940struct slvl_board41{42+ struct slvl_device dev[2];43 struct z8530_dev board;44 int iobase;45};···51 * Network driver support routines52 */5354+static inline struct slvl_device* dev_to_chan(struct net_device *dev)55+{56+ return (struct slvl_device *)dev_to_hdlc(dev)->priv;57+}58+59/*60+ * Frame receive. Simple for our card as we do HDLC and there61 * is no funny garbage involved62 */63+64static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)65{66 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */67+ skb_trim(skb, skb->len - 2);68+ skb->protocol = hdlc_type_trans(skb, c->netdevice);69 skb_reset_mac_header(skb);70+ skb->dev = c->netdevice;000071 netif_rx(skb);72 c->netdevice->last_rx = jiffies;73}74+75/*76 * We've been placed in the UP state77+ */78+79static int sealevel_open(struct net_device *d)80{81+ struct slvl_device *slvl = dev_to_chan(d);82 int err = -1;83 int unit = slvl->channel;84+85 /*86+ * Link layer up.87 */8889+ switch (unit)90 {91 case 0:92+ err = z8530_sync_dma_open(d, slvl->chan);93 break;94 case 1:95+ err = z8530_sync_open(d, slvl->chan);96 break;97 }98+99+ if (err)100 return err;101+102+ err = hdlc_open(d);103+ if (err) {104+ switch (unit) {0000105 case 0:106 z8530_sync_dma_close(d, slvl->chan);107 break;108 case 1:109 z8530_sync_close(d, slvl->chan);110 break;111+ }112 return err;113 }114+115+ slvl->chan->rx_function = sealevel_input;116+117 /*118 * Go go go119 */···126127static int sealevel_close(struct net_device *d)128{129+ struct slvl_device *slvl = dev_to_chan(d);130 int unit = slvl->channel;131+132 /*133 * Discard new frames134 */0000000000135136+ slvl->chan->rx_function = z8530_null_rx;137+138+ hdlc_close(d);139 netif_stop_queue(d);140+141+ switch (unit)142 {143 case 0:144 z8530_sync_dma_close(d, slvl->chan);···159160static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)161{162+ /* struct slvl_device *slvl=dev_to_chan(d);163 z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */164+ return hdlc_ioctl(d, ifr, cmd);000000000165}166167/*168+ * Passed network frames, fire them downwind.169 */170+171static int sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d)172{173+ return z8530_queue_xmit(dev_to_chan(d)->chan, skb);0174}175176+static int sealevel_attach(struct net_device *dev, unsigned short encoding,177+ unsigned short parity)178{179+ if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)180+ return 0;181+ return -EINVAL;182+}183+184+static int slvl_setup(struct slvl_device *sv, int iobase, int irq)185+{186+ struct net_device *dev = alloc_hdlcdev(sv);187+ if (!dev)188+ return -1;189+190+ dev_to_hdlc(dev)->attach = sealevel_attach;191+ dev_to_hdlc(dev)->xmit = sealevel_queue_xmit;192+ dev->open = sealevel_open;193+ dev->stop = sealevel_close;194+ dev->do_ioctl = sealevel_ioctl;195+ dev->base_addr = iobase;196+ dev->irq = irq;197+198+ if (register_hdlc_device(dev)) {199+ printk(KERN_ERR "sealevel: unable to register HDLC device\n");200+ free_netdev(dev);201+ return -1;202 }203+204+ sv->chan->netdevice = dev;205 return 0;0000000000000000000000000000000000000000000000000000000000206}207208209/*210 * Allocate and setup Sealevel board.211 */212+213+static __init struct slvl_board *slvl_init(int iobase, int irq,214 int txdma, int rxdma, int slow)215{216 struct z8530_dev *dev;217 struct slvl_board *b;218+219 /*220 * Get the needed I/O space221 */222223+ if (!request_region(iobase, 8, "Sealevel 4021")) {224+ printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n",225+ iobase);226 return NULL;227 }228+229 b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);230+ if (!b)231+ goto err_kzalloc;232233+ b->dev[0].chan = &b->board.chanA;234+ b->dev[0].channel = 0;235236+ b->dev[1].chan = &b->board.chanB;237+ b->dev[1].channel = 1;000000238239 dev = &b->board;240+241 /*242 * Stuff in the I/O addressing243 */244+245 dev->active = 0;246247 b->iobase = iobase;248+249 /*250 * Select 8530 delays for the old board251 */252+253+ if (slow)254 iobase |= Z8530_PORT_SLEEP;255+256+ dev->chanA.ctrlio = iobase + 1;257+ dev->chanA.dataio = iobase;258+ dev->chanB.ctrlio = iobase + 3;259+ dev->chanB.dataio = iobase + 2;260+261+ dev->chanA.irqs = &z8530_nop;262+ dev->chanB.irqs = &z8530_nop;263+264 /*265 * Assert DTR enable DMA266 */267+268+ outb(3 | (1 << 7), b->iobase + 4);269+270271 /* We want a fast IRQ for this device. Actually we'd like an even faster272 IRQ ;) - This is one driver RtLinux is made for */00000000000000273274+ if (request_irq(irq, &z8530_interrupt, IRQF_DISABLED,275+ "SeaLevel", dev) < 0) {276+ printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq);277+ goto err_request_irq;278+ }279+280+ dev->irq = irq;281+ dev->chanA.private = &b->dev[0];282+ dev->chanB.private = &b->dev[1];283+ dev->chanA.dev = dev;284+ dev->chanB.dev = dev;285+286+ dev->chanA.txdma = 3;287+ dev->chanA.rxdma = 1;288+ if (request_dma(dev->chanA.txdma, "SeaLevel (TX)"))289+ goto err_dma_tx;290+291+ if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)"))292+ goto err_dma_rx;293+294 disable_irq(irq);295+296 /*297 * Begin normal initialise298 */299+300+ if (z8530_init(dev) != 0) {0301 printk(KERN_ERR "Z8530 series device not found.\n");302 enable_irq(irq);303+ goto free_hw;304 }305+ if (dev->type == Z85C30) {0306 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream);307 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream);308+ } else {00309 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230);310 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230);311 }···370 /*371 * Now we can take the IRQ372 */373+374 enable_irq(irq);375376+ if (slvl_setup(&b->dev[0], iobase, irq))377+ goto free_hw;378+ if (slvl_setup(&b->dev[1], iobase, irq))379+ goto free_netdev0;0380381 z8530_describe(dev, "I/O", iobase);382+ dev->active = 1;383 return b;384385+free_netdev0:386+ unregister_hdlc_device(b->dev[0].chan->netdevice);387+ free_netdev(b->dev[0].chan->netdevice);388+free_hw:389 free_dma(dev->chanA.rxdma);390+err_dma_rx:391 free_dma(dev->chanA.txdma);392+err_dma_tx:393 free_irq(irq, dev);394+err_request_irq:0000395 kfree(b);396+err_kzalloc:397+ release_region(iobase, 8);398 return NULL;399}400···408 int u;409410 z8530_shutdown(&b->board);411+412+ for (u = 0; u < 2; u++)413 {414+ struct net_device *d = b->dev[u].chan->netdevice;415+ unregister_hdlc_device(d);416 free_netdev(d);417 }418+419 free_irq(b->board.irq, &b->board);420 free_dma(b->board.chanA.rxdma);421 free_dma(b->board.chanA.txdma);···451452static int __init slvl_init_module(void)453{0000454 slvl_unit = slvl_init(io, irq, txdma, rxdma, slow);455456 return slvl_unit ? 0 : -ENODEV;
-9
drivers/net/wan/syncppp.c
···230 skb->dev=dev;231 skb_reset_mac_header(skb);232233- if (dev->flags & IFF_RUNNING)234- {235- /* Count received bytes, add FCS and one flag */236- sp->ibytes+= skb->len + 3;237- sp->ipkts++;238- }239-240 if (!pskb_may_pull(skb, PPP_HEADER_LEN)) {241 /* Too small packet, drop it. */242 if (sp->pp_flags & PP_DEBUG)···825 sppp_print_bytes ((u8*) (lh+1), len);826 printk (">\n");827 }828- sp->obytes += skb->len;829 /* Control is high priority so it doesn't get queued behind data */830 skb->priority=TC_PRIO_CONTROL;831 skb->dev = dev;···867 printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n",868 dev->name, ntohl (ch->type), ch->par1,869 ch->par2, ch->rel, ch->time0, ch->time1);870- sp->obytes += skb->len;871 skb->priority=TC_PRIO_CONTROL;872 skb->dev = dev;873 skb_queue_tail(&tx_queue, skb);
···230 skb->dev=dev;231 skb_reset_mac_header(skb);2320000000233 if (!pskb_may_pull(skb, PPP_HEADER_LEN)) {234 /* Too small packet, drop it. */235 if (sp->pp_flags & PP_DEBUG)···832 sppp_print_bytes ((u8*) (lh+1), len);833 printk (">\n");834 }0835 /* Control is high priority so it doesn't get queued behind data */836 skb->priority=TC_PRIO_CONTROL;837 skb->dev = dev;···875 printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n",876 dev->name, ntohl (ch->type), ch->par1,877 ch->par2, ch->rel, ch->time0, ch->time1);0878 skb->priority=TC_PRIO_CONTROL;879 skb->dev = dev;880 skb_queue_tail(&tx_queue, skb);
+77-116
drivers/net/wan/z85230.c
···43#include <linux/netdevice.h>44#include <linux/if_arp.h>45#include <linux/delay.h>046#include <linux/ioport.h>47#include <linux/init.h>48#include <asm/dma.h>···52#define RT_UNLOCK53#include <linux/spinlock.h>5455-#include <net/syncppp.h>56#include "z85230.h"5758···440 * A status event occurred in PIO synchronous mode. There are several441 * reasons the chip will bother us here. A transmit underrun means we442 * failed to feed the chip fast enough and just broke a packet. A DCD443- * change is a line up or down. We communicate that back to the protocol444- * layer for synchronous PPP to renegotiate.445 */446447static void z8530_status(struct z8530_channel *chan)448{449 u8 status, altered;450451- status=read_zsreg(chan, R0);452- altered=chan->status^status;453-454- chan->status=status;455-456- if(status&TxEOM)457- {458/* printk("%s: Tx underrun.\n", chan->dev->name); */459- chan->stats.tx_fifo_errors++;460 write_zsctrl(chan, ERR_RES);461 z8530_tx_done(chan);462 }463-464- if(altered&chan->dcdcheck)465 {466- if(status&chan->dcdcheck)467- {468 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);469- write_zsreg(chan, R3, chan->regs[3]|RxENABLE);470- if(chan->netdevice &&471- ((chan->netdevice->type == ARPHRD_HDLC) ||472- (chan->netdevice->type == ARPHRD_PPP)))473- sppp_reopen(chan->netdevice);474- }475- else476- {477 printk(KERN_INFO "%s: DCD lost\n", chan->dev->name);478- write_zsreg(chan, R3, chan->regs[3]&~RxENABLE);479 z8530_flush_fifo(chan);00480 }481-482- } 483 write_zsctrl(chan, RES_EXT_INT);484 write_zsctrl(chan, RES_H_IUS);485}486487-struct z8530_irqhandler z8530_sync=488{489 z8530_rx,490 z8530_tx,···551 * 552 * A status event occurred on the Z8530. We receive these for two reasons553 * when in DMA mode. Firstly if we finished a packet transfer we get one554- * and kick the next packet out. Secondly we may see a DCD change and555- * have to poke the protocol layer.556 *557 */558···580 }581 }582583- if(altered&chan->dcdcheck)584 {585- if(status&chan->dcdcheck)586- {587 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);588- write_zsreg(chan, R3, chan->regs[3]|RxENABLE);589- if(chan->netdevice &&590- ((chan->netdevice->type == ARPHRD_HDLC) ||591- (chan->netdevice->type == ARPHRD_PPP)))592- sppp_reopen(chan->netdevice);593- }594- else595- {596 printk(KERN_INFO "%s:DCD lost\n", chan->dev->name);597- write_zsreg(chan, R3, chan->regs[3]&~RxENABLE);598 z8530_flush_fifo(chan);00599 }600- } 601602 write_zsctrl(chan, RES_EXT_INT);603 write_zsctrl(chan, RES_H_IUS);···1450 /*1451 * Check if we crapped out.1452 */1453- if(get_dma_residue(c->txdma))1454 {1455- c->stats.tx_dropped++;1456- c->stats.tx_fifo_errors++;1457 }1458 release_dma_lock(flags);1459 }···1525 * packet. This code is fairly timing sensitive.1526 *1527 * Called with the register lock held.1528- */ 1529-1530static void z8530_tx_done(struct z8530_channel *c)1531{1532 struct sk_buff *skb;15331534 /* Actually this can happen.*/1535- if(c->tx_skb==NULL)1536 return;15371538- skb=c->tx_skb;1539- c->tx_skb=NULL;1540 z8530_tx_begin(c);1541- c->stats.tx_packets++;1542- c->stats.tx_bytes+=skb->len;1543 dev_kfree_skb_irq(skb);1544}1545···1549 * @skb: The buffer1550 *1551 * We point the receive handler at this function when idle. Instead1552- * of syncppp processing the frames we get to throw them away.1553 */15541555void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)···1626 else1627 /* Can't occur as we dont reenable the DMA irq until1628 after the flip is done */1629- printk(KERN_WARNING "%s: DMA flip overrun!\n", c->netdevice->name);1630-01631 release_dma_lock(flags);1632-1633 /*1634 * Shove the old buffer into an sk_buff. We can't DMA1635 * directly into one on a PC - it might be above the 16Mb···1638 * can avoid the copy. Optimisation 2 - make the memcpy1639 * a copychecksum.1640 */1641-1642- skb=dev_alloc_skb(ct);1643- if(skb==NULL)1644- {1645- c->stats.rx_dropped++;1646- printk(KERN_WARNING "%s: Memory squeeze.\n", c->netdevice->name);1647- }1648- else1649- {1650 skb_put(skb, ct);1651 skb_copy_to_linear_data(skb, rxb, ct);1652- c->stats.rx_packets++;1653- c->stats.rx_bytes+=ct;1654 }1655- c->dma_ready=1;1656- }1657- else1658- {1659- RT_LOCK; 1660- skb=c->skb;1661-1662 /*1663 * The game we play for non DMA is similar. We want to1664 * get the controller set up for the next packet as fast···1665 * if you build a system where the sync irq isnt blocked1666 * by the kernel IRQ disable then you need only block the1667 * sync IRQ for the RT_LOCK area.1668- * 1669 */1670 ct=c->count;1671-1672 c->skb = c->skb2;1673 c->count = 0;1674 c->max = c->mtu;1675- if(c->skb)1676- {1677 c->dptr = c->skb->data;1678 c->max = c->mtu;1679- }1680- else1681- {1682- c->count= 0;1683 c->max = 0;1684 }1685 RT_UNLOCK;16861687 c->skb2 = dev_alloc_skb(c->mtu);1688- if(c->skb2==NULL)1689 printk(KERN_WARNING "%s: memory squeeze.\n",1690- c->netdevice->name);1691 else1692- {1693- skb_put(c->skb2,c->mtu);1694- }1695- c->stats.rx_packets++;1696- c->stats.rx_bytes+=ct;1697-1698 }1699 /*1700 * If we received a frame we must now process it.1701 */1702- if(skb)1703- {1704 skb_trim(skb, ct);1705- c->rx_function(c,skb);1706- }1707- else1708- {1709- c->stats.rx_dropped++;1710 printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);1711 }1712}···1709 * Returns true if the buffer cross a DMA boundary on a PC. The poor1710 * thing can only DMA within a 64K block not across the edges of it.1711 */1712-1713static inline int spans_boundary(struct sk_buff *skb)1714{1715 unsigned long a=(unsigned long)skb->data;···1777}17781779EXPORT_SYMBOL(z8530_queue_xmit);1780-1781-/**1782- * z8530_get_stats - Get network statistics1783- * @c: The channel to use1784- *1785- * Get the statistics block. We keep the statistics in software as1786- * the chip doesn't do it for us.1787- *1788- * Locking is ignored here - we could lock for a copy but its1789- * not likely to be that big an issue1790- */1791-1792-struct net_device_stats *z8530_get_stats(struct z8530_channel *c)1793-{1794- return &c->stats;1795-}1796-1797-EXPORT_SYMBOL(z8530_get_stats);17981799/*1800 * Module support
···43#include <linux/netdevice.h>44#include <linux/if_arp.h>45#include <linux/delay.h>46+#include <linux/hdlc.h>47#include <linux/ioport.h>48#include <linux/init.h>49#include <asm/dma.h>···51#define RT_UNLOCK52#include <linux/spinlock.h>53054#include "z85230.h"5556···440 * A status event occurred in PIO synchronous mode. There are several441 * reasons the chip will bother us here. A transmit underrun means we442 * failed to feed the chip fast enough and just broke a packet. A DCD443+ * change is a line up or down.0444 */445446static void z8530_status(struct z8530_channel *chan)447{448 u8 status, altered;449450+ status = read_zsreg(chan, R0);451+ altered = chan->status ^ status;452+453+ chan->status = status;454+455+ if (status & TxEOM) {0456/* printk("%s: Tx underrun.\n", chan->dev->name); */457+ chan->netdevice->stats.tx_fifo_errors++;458 write_zsctrl(chan, ERR_RES);459 z8530_tx_done(chan);460 }461+462+ if (altered & chan->dcdcheck)463 {464+ if (status & chan->dcdcheck) {0465 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);466+ write_zsreg(chan, R3, chan->regs[3] | RxENABLE);467+ if (chan->netdevice)468+ netif_carrier_on(chan->netdevice);469+ } else {0000470 printk(KERN_INFO "%s: DCD lost\n", chan->dev->name);471+ write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);472 z8530_flush_fifo(chan);473+ if (chan->netdevice)474+ netif_carrier_off(chan->netdevice);475 }476+477+ }478 write_zsctrl(chan, RES_EXT_INT);479 write_zsctrl(chan, RES_H_IUS);480}481482+struct z8530_irqhandler z8530_sync =483{484 z8530_rx,485 z8530_tx,···556 * 557 * A status event occurred on the Z8530. We receive these for two reasons558 * when in DMA mode. Firstly if we finished a packet transfer we get one559+ * and kick the next packet out. Secondly we may see a DCD change.0560 *561 */562···586 }587 }588589+ if (altered & chan->dcdcheck)590 {591+ if (status & chan->dcdcheck) {0592 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);593+ write_zsreg(chan, R3, chan->regs[3] | RxENABLE);594+ if (chan->netdevice)595+ netif_carrier_on(chan->netdevice);596+ } else {0000597 printk(KERN_INFO "%s:DCD lost\n", chan->dev->name);598+ write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);599 z8530_flush_fifo(chan);600+ if (chan->netdevice)601+ netif_carrier_off(chan->netdevice);602 }603+ }604605 write_zsctrl(chan, RES_EXT_INT);606 write_zsctrl(chan, RES_H_IUS);···1459 /*1460 * Check if we crapped out.1461 */1462+ if (get_dma_residue(c->txdma))1463 {1464+ c->netdevice->stats.tx_dropped++;1465+ c->netdevice->stats.tx_fifo_errors++;1466 }1467 release_dma_lock(flags);1468 }···1534 * packet. This code is fairly timing sensitive.1535 *1536 * Called with the register lock held.1537+ */1538+1539static void z8530_tx_done(struct z8530_channel *c)1540{1541 struct sk_buff *skb;15421543 /* Actually this can happen.*/1544+ if (c->tx_skb == NULL)1545 return;15461547+ skb = c->tx_skb;1548+ c->tx_skb = NULL;1549 z8530_tx_begin(c);1550+ c->netdevice->stats.tx_packets++;1551+ c->netdevice->stats.tx_bytes += skb->len;1552 dev_kfree_skb_irq(skb);1553}1554···1558 * @skb: The buffer1559 *1560 * We point the receive handler at this function when idle. Instead1561+ * of processing the frames we get to throw them away.1562 */15631564void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)···1635 else1636 /* Can't occur as we dont reenable the DMA irq until1637 after the flip is done */1638+ printk(KERN_WARNING "%s: DMA flip overrun!\n",1639+ c->netdevice->name);1640+1641 release_dma_lock(flags);1642+1643 /*1644 * Shove the old buffer into an sk_buff. We can't DMA1645 * directly into one on a PC - it might be above the 16Mb···1646 * can avoid the copy. Optimisation 2 - make the memcpy1647 * a copychecksum.1648 */1649+1650+ skb = dev_alloc_skb(ct);1651+ if (skb == NULL) {1652+ c->netdevice->stats.rx_dropped++;1653+ printk(KERN_WARNING "%s: Memory squeeze.\n",1654+ c->netdevice->name);1655+ } else {001656 skb_put(skb, ct);1657 skb_copy_to_linear_data(skb, rxb, ct);1658+ c->netdevice->stats.rx_packets++;1659+ c->netdevice->stats.rx_bytes += ct;1660 }1661+ c->dma_ready = 1;1662+ } else {1663+ RT_LOCK;1664+ skb = c->skb;1665+001666 /*1667 * The game we play for non DMA is similar. We want to1668 * get the controller set up for the next packet as fast···1677 * if you build a system where the sync irq isnt blocked1678 * by the kernel IRQ disable then you need only block the1679 * sync IRQ for the RT_LOCK area.1680+ *1681 */1682 ct=c->count;1683+1684 c->skb = c->skb2;1685 c->count = 0;1686 c->max = c->mtu;1687+ if (c->skb) {01688 c->dptr = c->skb->data;1689 c->max = c->mtu;1690+ } else {1691+ c->count = 0;001692 c->max = 0;1693 }1694 RT_UNLOCK;16951696 c->skb2 = dev_alloc_skb(c->mtu);1697+ if (c->skb2 == NULL)1698 printk(KERN_WARNING "%s: memory squeeze.\n",1699+ c->netdevice->name);1700 else1701+ skb_put(c->skb2, c->mtu);1702+ c->netdevice->stats.rx_packets++;1703+ c->netdevice->stats.rx_bytes += ct;0001704 }1705 /*1706 * If we received a frame we must now process it.1707 */1708+ if (skb) {01709 skb_trim(skb, ct);1710+ c->rx_function(c, skb);1711+ } else {1712+ c->netdevice->stats.rx_dropped++;001713 printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);1714 }1715}···1730 * Returns true if the buffer cross a DMA boundary on a PC. The poor1731 * thing can only DMA within a 64K block not across the edges of it.1732 */1733+1734static inline int spans_boundary(struct sk_buff *skb)1735{1736 unsigned long a=(unsigned long)skb->data;···1798}17991800EXPORT_SYMBOL(z8530_queue_xmit);00000000000000000018011802/*1803 * Module support
+4-6
drivers/net/wan/z85230.h
···325326 void *private; /* For our owner */327 struct net_device *netdevice; /* Network layer device */328- struct net_device_stats stats; /* Network layer statistics */329330 /*331 * Async features···365 unsigned char tx_active; /* character is being xmitted */366 unsigned char tx_stopped; /* output is suspended */367368- spinlock_t *lock; /* Devicr lock */369-}; 370371/*372 * Each Z853x0 device.373- */ 374-375struct z8530_dev376{377 char *name; /* Device instance name */···407extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);408extern int z8530_channel_load(struct z8530_channel *, u8 *);409extern int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);410-extern struct net_device_stats *z8530_get_stats(struct z8530_channel *c);411extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);412413
···325326 void *private; /* For our owner */327 struct net_device *netdevice; /* Network layer device */0328329 /*330 * Async features···366 unsigned char tx_active; /* character is being xmitted */367 unsigned char tx_stopped; /* output is suspended */368369+ spinlock_t *lock; /* Device lock */370+};371372/*373 * Each Z853x0 device.374+ */375+376struct z8530_dev377{378 char *name; /* Device instance name */···408extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);409extern int z8530_channel_load(struct z8530_channel *, u8 *);410extern int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);0411extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);412413
-7
drivers/net/wireless/orinoco.c
···1998 else1999 priv->mc_count = mc_count;2000 }2001-2002- /* Since we can set the promiscuous flag when it wasn't asked2003- for, make sure the net_device knows about it. */2004- if (priv->promiscuous)2005- dev->flags |= IFF_PROMISC;2006- else2007- dev->flags &= ~IFF_PROMISC;2008}20092010/* This must be called from user context, without locks held - use
···1998 else1999 priv->mc_count = mc_count;2000 }00000002001}20022003/* This must be called from user context, without locks held - use
-3
drivers/net/wireless/wavelan.c
···1409 lp->mc_count = 0;14101411 wv_82586_reconfig(dev);1412-1413- /* Tell the kernel that we are doing a really bad job. */1414- dev->flags |= IFF_PROMISC;1415 }1416 } else1417 /* Are there multicast addresses to send? */
···1409 lp->mc_count = 0;14101411 wv_82586_reconfig(dev);0001412 }1413 } else1414 /* Are there multicast addresses to send? */
-6
drivers/net/wireless/wavelan_cs.c
···1412 lp->mc_count = 0;14131414 wv_82593_reconfig(dev);1415-1416- /* Tell the kernel that we are doing a really bad job... */1417- dev->flags |= IFF_PROMISC;1418 }1419 }1420 else···1430 lp->mc_count = 0;14311432 wv_82593_reconfig(dev);1433-1434- /* Tell the kernel that we are doing a really bad job... */1435- dev->flags |= IFF_ALLMULTI;1436 }1437 }1438 else
···27 __u8 autoneg; /* Enable or disable autonegotiation */28 __u32 maxtxpkt; /* Tx pkts before generating tx int */29 __u32 maxrxpkt; /* Rx pkts before generating rx int */30- __u32 reserved[4];0031};00000000000003233#define ETHTOOL_BUSINFO_LEN 3234/* these strings are set to whatever the driver author decides... */
···27 __u8 autoneg; /* Enable or disable autonegotiation */28 __u32 maxtxpkt; /* Tx pkts before generating tx int */29 __u32 maxrxpkt; /* Rx pkts before generating rx int */30+ __u16 speed_hi;31+ __u16 reserved2;32+ __u32 reserved[3];33};34+35+static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep,36+ __u32 speed)37+{38+39+ ep->speed = (__u16)speed;40+ ep->speed_hi = (__u16)(speed >> 16);41+}42+43+static inline __u32 ethtool_cmd_speed(struct ethtool_cmd *ep)44+{45+ return (ep->speed_hi << 16) | ep->speed;46+}4748#define ETHTOOL_BUSINFO_LEN 3249/* these strings are set to whatever the driver author decides... */