Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (39 commits)
gigaset: fix build failure
bridge: Make first arg to deliver_clone const.
bridge br_multicast: Don't refer to BR_INPUT_SKB_CB(skb)->mrouters_only without IGMP snooping.
route: Fix caught BUG_ON during rt_secret_rebuild_oneshot()
bridge br_multicast: Fix skb leakage in error path.
bridge br_multicast: Fix handling of Max Response Code in IGMPv3 message.
NET: netpoll, fix potential NULL ptr dereference
tipc: fix lockdep warning on address assignment
l2tp: Fix UDP socket reference count bugs in the pppol2tp driver
smsc95xx: wait for PHY to complete reset during init
l2tp: Fix oops in pppol2tp_xmit
smsc75xx: SMSC LAN75xx USB gigabit ethernet adapter driver
ne: Do not use slashes in irq name string
NET: ksz884x, fix lock imbalance
gigaset: correct range checking off by one error
bridge: Fix br_forward crash in promiscuous mode
bridge: Move NULL mdb check into br_mdb_ip_get
ISDN: Add PCI ID for HFC-2S/4S Beronet Card PCIe
net-2.6 [Bug-Fix][dccp]: fix oops caused after failed initialisation
myri: remove dead code
...

+1986 -215
+2
Documentation/networking/Makefile
··· 6 6 7 7 # Tell kbuild to always build the programs 8 8 always := $(hostprogs-y) 9 + 10 + obj-m := timestamping/
+9 -2
Documentation/networking/timestamping/Makefile
··· 1 - CPPFLAGS = -I../../../include 1 + # kbuild trick to avoid linker error. Can be omitted if a module is built. 2 + obj- := dummy.o 2 3 3 - timestamping: timestamping.c 4 + # List of programs to build 5 + hostprogs-y := timestamping 6 + 7 + # Tell kbuild to always build the programs 8 + always := $(hostprogs-y) 9 + 10 + HOSTCFLAGS_timestamping.o += -I$(objtree)/usr/include 4 11 5 12 clean: 6 13 rm -f timestamping
+5 -5
Documentation/networking/timestamping/timestamping.c
··· 41 41 #include <arpa/inet.h> 42 42 #include <net/if.h> 43 43 44 - #include "asm/types.h" 45 - #include "linux/net_tstamp.h" 46 - #include "linux/errqueue.h" 44 + #include <asm/types.h> 45 + #include <linux/net_tstamp.h> 46 + #include <linux/errqueue.h> 47 47 48 48 #ifndef SO_TIMESTAMPING 49 49 # define SO_TIMESTAMPING 37 ··· 164 164 165 165 gettimeofday(&now, 0); 166 166 167 - printf("%ld.%06ld: received %s data, %d bytes from %s, %d bytes control messages\n", 167 + printf("%ld.%06ld: received %s data, %d bytes from %s, %zu bytes control messages\n", 168 168 (long)now.tv_sec, (long)now.tv_usec, 169 169 (recvmsg_flags & MSG_ERRQUEUE) ? "error" : "regular", 170 170 res, ··· 173 173 for (cmsg = CMSG_FIRSTHDR(msg); 174 174 cmsg; 175 175 cmsg = CMSG_NXTHDR(msg, cmsg)) { 176 - printf(" cmsg len %d: ", cmsg->cmsg_len); 176 + printf(" cmsg len %zu: ", cmsg->cmsg_len); 177 177 switch (cmsg->cmsg_level) { 178 178 case SOL_SOCKET: 179 179 printf("SOL_SOCKET ");
+3 -4
drivers/atm/lanai.c
··· 306 306 const unsigned long *lp, 307 307 void (*func)(struct lanai_dev *,vci_t vci)) 308 308 { 309 - vci_t vci = find_first_bit(lp, NUM_VCI); 310 - while (vci < NUM_VCI) { 309 + vci_t vci; 310 + 311 + for_each_set_bit(vci, lp, NUM_VCI) 311 312 func(lanai, vci); 312 - vci = find_next_bit(lp, NUM_VCI, vci + 1); 313 - } 314 313 } 315 314 316 315 /* -------------------- BUFFER UTILITIES: */
+26 -20
drivers/isdn/gigaset/capi.c
··· 1301 1301 } 1302 1302 1303 1303 /* check parameter: CIP Value */ 1304 - if (cmsg->CIPValue > ARRAY_SIZE(cip2bchlc) || 1304 + if (cmsg->CIPValue >= ARRAY_SIZE(cip2bchlc) || 1305 1305 (cmsg->CIPValue > 0 && cip2bchlc[cmsg->CIPValue].bc == NULL)) { 1306 1306 dev_notice(cs->dev, "%s: unknown CIP value %d\n", 1307 1307 "CONNECT_REQ", cmsg->CIPValue); ··· 2191 2191 .release = single_release, 2192 2192 }; 2193 2193 2194 - static struct capi_driver capi_driver_gigaset = { 2195 - .name = "gigaset", 2196 - .revision = "1.0", 2197 - }; 2198 - 2199 2194 /** 2200 - * gigaset_isdn_register() - register to LL 2195 + * gigaset_isdn_regdev() - register device to LL 2201 2196 * @cs: device descriptor structure. 2202 2197 * @isdnid: device name. 2203 2198 * 2204 - * Called by main module to register the device with the LL. 2205 - * 2206 2199 * Return value: 1 for success, 0 for failure 2207 2200 */ 2208 - int gigaset_isdn_register(struct cardstate *cs, const char *isdnid) 2201 + int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) 2209 2202 { 2210 2203 struct gigaset_capi_ctr *iif; 2211 2204 int rc; 2212 - 2213 - pr_info("Kernel CAPI interface\n"); 2214 2205 2215 2206 iif = kmalloc(sizeof(*iif), GFP_KERNEL); 2216 2207 if (!iif) { 2217 2208 pr_err("%s: out of memory\n", __func__); 2218 2209 return 0; 2219 2210 } 2220 - 2221 - /* register driver with CAPI (ToDo: what for?) */ 2222 - register_capi_driver(&capi_driver_gigaset); 2223 2211 2224 2212 /* prepare controller structure */ 2225 2213 iif->ctr.owner = THIS_MODULE; ··· 2229 2241 rc = attach_capi_ctr(&iif->ctr); 2230 2242 if (rc) { 2231 2243 pr_err("attach_capi_ctr failed (%d)\n", rc); 2232 - unregister_capi_driver(&capi_driver_gigaset); 2233 2244 kfree(iif); 2234 2245 return 0; 2235 2246 } ··· 2239 2252 } 2240 2253 2241 2254 /** 2242 - * gigaset_isdn_unregister() - unregister from LL 2255 + * gigaset_isdn_unregdev() - unregister device from LL 2243 2256 * @cs: device descriptor structure. 2244 - * 2245 - * Called by main module to unregister the device from the LL. 2246 2257 */ 2247 - void gigaset_isdn_unregister(struct cardstate *cs) 2258 + void gigaset_isdn_unregdev(struct cardstate *cs) 2248 2259 { 2249 2260 struct gigaset_capi_ctr *iif = cs->iif; 2250 2261 2251 2262 detach_capi_ctr(&iif->ctr); 2252 2263 kfree(iif); 2253 2264 cs->iif = NULL; 2265 + } 2266 + 2267 + static struct capi_driver capi_driver_gigaset = { 2268 + .name = "gigaset", 2269 + .revision = "1.0", 2270 + }; 2271 + 2272 + /** 2273 + * gigaset_isdn_regdrv() - register driver to LL 2274 + */ 2275 + void gigaset_isdn_regdrv(void) 2276 + { 2277 + pr_info("Kernel CAPI interface\n"); 2278 + register_capi_driver(&capi_driver_gigaset); 2279 + } 2280 + 2281 + /** 2282 + * gigaset_isdn_unregdrv() - unregister driver from LL 2283 + */ 2284 + void gigaset_isdn_unregdrv(void) 2285 + { 2254 2286 unregister_capi_driver(&capi_driver_gigaset); 2255 2287 }
+4 -2
drivers/isdn/gigaset/common.c
··· 507 507 case 2: /* error in initcshw */ 508 508 /* Deregister from LL */ 509 509 make_invalid(cs, VALID_ID); 510 - gigaset_isdn_unregister(cs); 510 + gigaset_isdn_unregdev(cs); 511 511 512 512 /* fall through */ 513 513 case 1: /* error when registering to LL */ ··· 769 769 cs->cmdbytes = 0; 770 770 771 771 gig_dbg(DEBUG_INIT, "setting up iif"); 772 - if (!gigaset_isdn_register(cs, modulename)) { 772 + if (!gigaset_isdn_regdev(cs, modulename)) { 773 773 pr_err("error registering ISDN device\n"); 774 774 goto error; 775 775 } ··· 1205 1205 gigaset_debuglevel = DEBUG_DEFAULT; 1206 1206 1207 1207 pr_info(DRIVER_DESC DRIVER_DESC_DEBUG "\n"); 1208 + gigaset_isdn_regdrv(); 1208 1209 return 0; 1209 1210 } 1210 1211 1211 1212 static void __exit gigaset_exit_module(void) 1212 1213 { 1214 + gigaset_isdn_unregdrv(); 1213 1215 } 1214 1216 1215 1217 module_init(gigaset_init_module);
+11 -3
drivers/isdn/gigaset/dummyll.c
··· 57 57 { 58 58 } 59 59 60 - int gigaset_isdn_register(struct cardstate *cs, const char *isdnid) 60 + int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) 61 61 { 62 - pr_info("no ISDN subsystem interface\n"); 63 62 return 1; 64 63 } 65 64 66 - void gigaset_isdn_unregister(struct cardstate *cs) 65 + void gigaset_isdn_unregdev(struct cardstate *cs) 66 + { 67 + } 68 + 69 + void gigaset_isdn_regdrv(void) 70 + { 71 + pr_info("no ISDN subsystem interface\n"); 72 + } 73 + 74 + void gigaset_isdn_unregdrv(void) 67 75 { 68 76 }
+4 -8
drivers/isdn/gigaset/ev-layer.c
··· 1258 1258 * note that bcs may be NULL if no B channel is free 1259 1259 */ 1260 1260 at_state2->ConState = 700; 1261 - kfree(at_state2->str_var[STR_NMBR]); 1262 - at_state2->str_var[STR_NMBR] = NULL; 1263 - kfree(at_state2->str_var[STR_ZCPN]); 1264 - at_state2->str_var[STR_ZCPN] = NULL; 1265 - kfree(at_state2->str_var[STR_ZBC]); 1266 - at_state2->str_var[STR_ZBC] = NULL; 1267 - kfree(at_state2->str_var[STR_ZHLC]); 1268 - at_state2->str_var[STR_ZHLC] = NULL; 1261 + for (i = 0; i < STR_NUM; ++i) { 1262 + kfree(at_state2->str_var[i]); 1263 + at_state2->str_var[i] = NULL; 1264 + } 1269 1265 at_state2->int_var[VAR_ZCTP] = -1; 1270 1266 1271 1267 spin_lock_irqsave(&cs->lock, flags);
+4 -2
drivers/isdn/gigaset/gigaset.h
··· 675 675 */ 676 676 677 677 /* Called from common.c for setting up/shutting down with the ISDN subsystem */ 678 - int gigaset_isdn_register(struct cardstate *cs, const char *isdnid); 679 - void gigaset_isdn_unregister(struct cardstate *cs); 678 + void gigaset_isdn_regdrv(void); 679 + void gigaset_isdn_unregdrv(void); 680 + int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid); 681 + void gigaset_isdn_unregdev(struct cardstate *cs); 680 682 681 683 /* Called from hardware module to indicate completion of an skb */ 682 684 void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb);
+20 -8
drivers/isdn/gigaset/i4l.c
··· 592 592 } 593 593 594 594 /** 595 - * gigaset_isdn_register() - register to LL 595 + * gigaset_isdn_regdev() - register to LL 596 596 * @cs: device descriptor structure. 597 597 * @isdnid: device name. 598 598 * 599 - * Called by main module to register the device with the LL. 600 - * 601 599 * Return value: 1 for success, 0 for failure 602 600 */ 603 - int gigaset_isdn_register(struct cardstate *cs, const char *isdnid) 601 + int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) 604 602 { 605 603 isdn_if *iif; 606 604 ··· 648 650 } 649 651 650 652 /** 651 - * gigaset_isdn_unregister() - unregister from LL 653 + * gigaset_isdn_unregdev() - unregister device from LL 652 654 * @cs: device descriptor structure. 653 - * 654 - * Called by main module to unregister the device from the LL. 655 655 */ 656 - void gigaset_isdn_unregister(struct cardstate *cs) 656 + void gigaset_isdn_unregdev(struct cardstate *cs) 657 657 { 658 658 gig_dbg(DEBUG_CMD, "sending UNLOAD"); 659 659 gigaset_i4l_cmd(cs, ISDN_STAT_UNLOAD); 660 660 kfree(cs->iif); 661 661 cs->iif = NULL; 662 + } 663 + 664 + /** 665 + * gigaset_isdn_regdrv() - register driver to LL 666 + */ 667 + void gigaset_isdn_regdrv(void) 668 + { 669 + /* nothing to do */ 670 + } 671 + 672 + /** 673 + * gigaset_isdn_unregdrv() - unregister driver from LL 674 + */ 675 + void gigaset_isdn_unregdrv(void) 676 + { 677 + /* nothing to do */ 662 678 }
-1
drivers/isdn/gigaset/interface.c
··· 628 628 if (tty == NULL) 629 629 gig_dbg(DEBUG_IF, "receive on closed device"); 630 630 else { 631 - tty_buffer_request_room(tty, len); 632 631 tty_insert_flip_string(tty, buffer, len); 633 632 tty_flip_buffer_push(tty); 634 633 }
+17 -18
drivers/isdn/hardware/eicon/message.c
··· 2754 2754 for (i = 0; i < w; i++) 2755 2755 ((T30_INFO *)(plci->fax_connect_info_buffer))->station_id[i] = fax_parms[4].info[1+i]; 2756 2756 ((T30_INFO *)(plci->fax_connect_info_buffer))->head_line_len = 0; 2757 - len = offsetof(T30_INFO, station_id) + 20; 2757 + len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH; 2758 2758 w = fax_parms[5].length; 2759 2759 if (w > 20) 2760 2760 w = 20; ··· 2892 2892 && (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_ENABLE_NSF) 2893 2893 && (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_NEGOTIATE_RESP)) 2894 2894 { 2895 - len = offsetof(T30_INFO, station_id) + 20; 2895 + len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH; 2896 2896 if (plci->fax_connect_info_length < len) 2897 2897 { 2898 2898 ((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = 0; ··· 3802 3802 break; 3803 3803 } 3804 3804 ncpi = &m_parms[1]; 3805 - len = offsetof(T30_INFO, station_id) + 20; 3805 + len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH; 3806 3806 if (plci->fax_connect_info_length < len) 3807 3807 { 3808 3808 ((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = 0; ··· 6830 6830 if(((T30_INFO *)plci->NL.RBuffer->P)->station_id_len) 6831 6831 { 6832 6832 plci->ncpi_buffer[len] = 20; 6833 - for (i = 0; i < 20; i++) 6833 + for (i = 0; i < T30_MAX_STATION_ID_LENGTH; i++) 6834 6834 plci->ncpi_buffer[++len] = ((T30_INFO *)plci->NL.RBuffer->P)->station_id[i]; 6835 6835 } 6836 6836 if (((plci->NL.Ind & 0x0f) == N_DISC) || ((plci->NL.Ind & 0x0f) == N_DISC_ACK)) ··· 6844 6844 if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[plci->appl->Id-1]) 6845 6845 & ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD))) 6846 6846 { 6847 - i = offsetof(T30_INFO, station_id) + 20 + ((T30_INFO *)plci->NL.RBuffer->P)->head_line_len; 6847 + i = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + ((T30_INFO *)plci->NL.RBuffer->P)->head_line_len; 6848 6848 while (i < plci->NL.RBuffer->length) 6849 6849 plci->ncpi_buffer[++len] = plci->NL.RBuffer->P[i++]; 6850 6850 } ··· 8400 8400 } 8401 8401 } 8402 8402 /* copy station id to NLC */ 8403 - for(i=0; i<20; i++) 8403 + for(i=0; i < T30_MAX_STATION_ID_LENGTH; i++) 8404 8404 { 8405 8405 if(i<b3_config_parms[2].length) 8406 8406 { ··· 8411 8411 ((T30_INFO *)&nlc[1])->station_id[i] = ' '; 8412 8412 } 8413 8413 } 8414 - ((T30_INFO *)&nlc[1])->station_id_len = 20; 8414 + ((T30_INFO *)&nlc[1])->station_id_len = T30_MAX_STATION_ID_LENGTH; 8415 8415 /* copy head line to NLC */ 8416 8416 if(b3_config_parms[3].length) 8417 8417 { 8418 8418 8419 - pos = (byte)(fax_head_line_time (&(((T30_INFO *)&nlc[1])->station_id[20]))); 8419 + pos = (byte)(fax_head_line_time (&(((T30_INFO *)&nlc[1])->station_id[T30_MAX_STATION_ID_LENGTH]))); 8420 8420 if (pos != 0) 8421 8421 { 8422 8422 if (CAPI_MAX_DATE_TIME_LENGTH + 2 + b3_config_parms[3].length > CAPI_MAX_HEAD_LINE_SPACE) 8423 8423 pos = 0; 8424 8424 else 8425 8425 { 8426 - ((T30_INFO *)&nlc[1])->station_id[20 + pos++] = ' '; 8427 - ((T30_INFO *)&nlc[1])->station_id[20 + pos++] = ' '; 8426 + nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' '; 8427 + nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' '; 8428 8428 len = (byte)b3_config_parms[2].length; 8429 8429 if (len > 20) 8430 8430 len = 20; 8431 8431 if (CAPI_MAX_DATE_TIME_LENGTH + 2 + len + 2 + b3_config_parms[3].length <= CAPI_MAX_HEAD_LINE_SPACE) 8432 8432 { 8433 8433 for (i = 0; i < len; i++) 8434 - ((T30_INFO *)&nlc[1])->station_id[20 + pos++] = ((byte *)b3_config_parms[2].info)[1+i]; 8435 - ((T30_INFO *)&nlc[1])->station_id[20 + pos++] = ' '; 8436 - ((T30_INFO *)&nlc[1])->station_id[20 + pos++] = ' '; 8434 + nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ((byte *)b3_config_parms[2].info)[1+i]; 8435 + nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' '; 8436 + nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' '; 8437 8437 } 8438 8438 } 8439 8439 } ··· 8444 8444 ((T30_INFO *)&nlc[1])->head_line_len = (byte)(pos + len); 8445 8445 nlc[0] += (byte)(pos + len); 8446 8446 for (i = 0; i < len; i++) 8447 - ((T30_INFO *)&nlc[1])->station_id[20 + pos++] = ((byte *)b3_config_parms[3].info)[1+i]; 8448 - } 8449 - else 8447 + nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ((byte *)b3_config_parms[3].info)[1+i]; 8448 + } else 8450 8449 ((T30_INFO *)&nlc[1])->head_line_len = 0; 8451 8450 8452 8451 plci->nsf_control_bits = 0; ··· 8472 8473 fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SEL_POLLING; 8473 8474 } 8474 8475 len = nlc[0]; 8475 - pos = offsetof(T30_INFO, station_id) + 20; 8476 + pos = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH; 8476 8477 if (pos < plci->fax_connect_info_length) 8477 8478 { 8478 8479 for (i = 1 + plci->fax_connect_info_buffer[pos]; i != 0; i--) ··· 8524 8525 } 8525 8526 8526 8527 PUT_WORD(&(((T30_INFO *)&nlc[1])->control_bits_low), fax_control_bits); 8527 - len = offsetof(T30_INFO, station_id) + 20; 8528 + len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH; 8528 8529 for (i = 0; i < len; i++) 8529 8530 plci->fax_connect_info_buffer[i] = nlc[1+i]; 8530 8531 ((T30_INFO *) plci->fax_connect_info_buffer)->head_line_len = 0;
+6
drivers/isdn/hardware/mISDN/hfcmulti.c
··· 5265 5265 /*31*/ {VENDOR_CCD, "XHFC-4S Speech Design", 5, 4, 0, 0, 0, 0, 5266 5266 HFC_IO_MODE_EMBSD, XHFC_IRQ}, 5267 5267 /*32*/ {VENDOR_JH, "HFC-8S (junghanns)", 8, 8, 1, 0, 0, 0, 0, 0}, 5268 + /*33*/ {VENDOR_BN, "HFC-2S Beronet Card PCIe", 4, 2, 1, 3, 0, DIP_4S, 0, 0}, 5269 + /*34*/ {VENDOR_BN, "HFC-4S Beronet Card PCIe", 4, 4, 1, 2, 0, DIP_4S, 0, 0}, 5268 5270 }; 5269 5271 5270 5272 #undef H ··· 5302 5300 PCI_SUBDEVICE_ID_CCD_OV4S, 0, 0, H(28)}, /* OpenVox 4 */ 5303 5301 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD, 5304 5302 PCI_SUBDEVICE_ID_CCD_OV2S, 0, 0, H(29)}, /* OpenVox 2 */ 5303 + { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD, 5304 + 0xb761, 0, 0, H(33)}, /* BN2S PCIe */ 5305 + { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD, 5306 + 0xb762, 0, 0, H(34)}, /* BN4S PCIe */ 5305 5307 5306 5308 /* Cards with HFC-8S Chip */ 5307 5309 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
+1 -1
drivers/isdn/hysdn/hysdn_boot.c
··· 143 143 (boot->pof_recid == TAG_CABSDATA) ? "CABSDATA" : "ABSDATA", 144 144 datlen, boot->pof_recoffset); 145 145 146 - if ((boot->last_error = card->writebootseq(card, boot->buf.BootBuf, datlen) < 0)) 146 + if ((boot->last_error = card->writebootseq(card, boot->buf.BootBuf, datlen)) < 0) 147 147 return (boot->last_error); /* error writing data */ 148 148 149 149 if (boot->pof_recoffset + datlen >= boot->pof_reclen)
+1 -1
drivers/net/benet/be_cmds.c
··· 673 673 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 674 674 OPCODE_COMMON_MCC_CREATE, sizeof(*req)); 675 675 676 - req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 676 + req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 677 677 678 678 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1); 679 679 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
+6 -4
drivers/net/bnx2x_main.c
··· 893 893 u16 prod; 894 894 u16 cons; 895 895 896 - barrier(); /* Tell compiler that prod and cons can change */ 897 896 prod = fp->tx_bd_prod; 898 897 cons = fp->tx_bd_cons; 899 898 ··· 962 963 * start_xmit() will miss it and cause the queue to be stopped 963 964 * forever. 964 965 */ 965 - smp_wmb(); 966 + smp_mb(); 966 967 967 968 /* TBD need a thresh? */ 968 969 if (unlikely(netif_tx_queue_stopped(txq))) { ··· 11428 11429 11429 11430 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { 11430 11431 netif_tx_stop_queue(txq); 11431 - /* We want bnx2x_tx_int to "see" the updated tx_bd_prod 11432 - if we put Tx into XOFF state. */ 11432 + 11433 + /* paired memory barrier is in bnx2x_tx_int(), we have to keep 11434 + * ordering of set_bit() in netif_tx_stop_queue() and read of 11435 + * fp->bd_tx_cons */ 11433 11436 smp_mb(); 11437 + 11434 11438 fp->eth_q_stats.driver_xoff++; 11435 11439 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) 11436 11440 netif_tx_wake_queue(txq);
+42 -30
drivers/net/davinci_emac.c
··· 29 29 * PHY layer usage 30 30 */ 31 31 32 - /** Pending Items in this driver: 33 - * 1. Use Linux cache infrastcture for DMA'ed memory (dma_xxx functions) 34 - */ 35 - 36 32 #include <linux/module.h> 37 33 #include <linux/kernel.h> 38 34 #include <linux/sched.h> ··· 500 504 501 505 /* Cache macros - Packet buffers would be from skb pool which is cached */ 502 506 #define EMAC_VIRT_NOCACHE(addr) (addr) 503 - #define EMAC_CACHE_INVALIDATE(addr, size) \ 504 - dma_cache_maint((void *)addr, size, DMA_FROM_DEVICE) 505 - #define EMAC_CACHE_WRITEBACK(addr, size) \ 506 - dma_cache_maint((void *)addr, size, DMA_TO_DEVICE) 507 - #define EMAC_CACHE_WRITEBACK_INVALIDATE(addr, size) \ 508 - dma_cache_maint((void *)addr, size, DMA_BIDIRECTIONAL) 509 507 510 508 /* DM644x does not have BD's in cached memory - so no cache functions */ 511 509 #define BD_CACHE_INVALIDATE(addr, size) ··· 1225 1235 if (1 == txch->queue_active) { 1226 1236 curr_bd = txch->active_queue_head; 1227 1237 while (curr_bd != NULL) { 1238 + dma_unmap_single(emac_dev, curr_bd->buff_ptr, 1239 + curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE, 1240 + DMA_TO_DEVICE); 1241 + 1228 1242 emac_net_tx_complete(priv, (void __force *) 1229 1243 &curr_bd->buf_token, 1, ch); 1230 1244 if (curr_bd != txch->active_queue_tail) ··· 1321 1327 txch->queue_active = 0; /* end of queue */ 1322 1328 } 1323 1329 } 1330 + 1331 + dma_unmap_single(emac_dev, curr_bd->buff_ptr, 1332 + curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE, 1333 + DMA_TO_DEVICE); 1334 + 1324 1335 *tx_complete_ptr = (u32) curr_bd->buf_token; 1325 1336 ++tx_complete_ptr; 1326 1337 ++tx_complete_cnt; ··· 1386 1387 1387 1388 txch->bd_pool_head = curr_bd->next; 1388 1389 curr_bd->buf_token = buf_list->buf_token; 1389 - /* FIXME buff_ptr = dma_map_single(... data_ptr ...) */ 1390 - curr_bd->buff_ptr = virt_to_phys(buf_list->data_ptr); 1390 + curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buf_list->data_ptr, 1391 + buf_list->length, DMA_TO_DEVICE); 1391 1392 curr_bd->off_b_len = buf_list->length; 1392 1393 curr_bd->h_next = 0; 1393 1394 curr_bd->next = NULL; ··· 1467 1468 tx_buf.length = skb->len; 1468 1469 tx_buf.buf_token = (void *)skb; 1469 1470 tx_buf.data_ptr = skb->data; 1470 - EMAC_CACHE_WRITEBACK((unsigned long)skb->data, skb->len); 1471 1471 ndev->trans_start = jiffies; 1472 1472 ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH); 1473 1473 if (unlikely(ret_code != 0)) { ··· 1541 1543 p_skb->dev = ndev; 1542 1544 skb_reserve(p_skb, NET_IP_ALIGN); 1543 1545 *data_token = (void *) p_skb; 1544 - EMAC_CACHE_WRITEBACK_INVALIDATE((unsigned long)p_skb->data, buf_size); 1545 1546 return p_skb->data; 1546 1547 } 1547 1548 ··· 1609 1612 /* populate the hardware descriptor */ 1610 1613 curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head, 1611 1614 priv); 1612 - /* FIXME buff_ptr = dma_map_single(... data_ptr ...) */ 1613 - curr_bd->buff_ptr = virt_to_phys(curr_bd->data_ptr); 1615 + curr_bd->buff_ptr = dma_map_single(emac_dev, curr_bd->data_ptr, 1616 + rxch->buf_size, DMA_FROM_DEVICE); 1614 1617 curr_bd->off_b_len = rxch->buf_size; 1615 1618 curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT; 1616 1619 ··· 1694 1697 curr_bd = rxch->active_queue_head; 1695 1698 while (curr_bd) { 1696 1699 if (curr_bd->buf_token) { 1700 + dma_unmap_single(&priv->ndev->dev, 1701 + curr_bd->buff_ptr, 1702 + curr_bd->off_b_len 1703 + & EMAC_RX_BD_BUF_SIZE, 1704 + DMA_FROM_DEVICE); 1705 + 1697 1706 dev_kfree_skb_any((struct sk_buff *)\ 1698 1707 curr_bd->buf_token); 1699 1708 } ··· 1874 1871 1875 1872 /* populate the hardware descriptor */ 1876 1873 curr_bd->h_next = 0; 1877 - /* FIXME buff_ptr = dma_map_single(... buffer ...) */ 1878 - curr_bd->buff_ptr = virt_to_phys(buffer); 1874 + curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buffer, 1875 + rxch->buf_size, DMA_FROM_DEVICE); 1879 1876 curr_bd->off_b_len = rxch->buf_size; 1880 1877 curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT; 1881 1878 curr_bd->next = NULL; ··· 1930 1927 p_skb = (struct sk_buff *)net_pkt_list->pkt_token; 1931 1928 /* set length of packet */ 1932 1929 skb_put(p_skb, net_pkt_list->pkt_length); 1933 - EMAC_CACHE_INVALIDATE((unsigned long)p_skb->data, p_skb->len); 1934 1930 p_skb->protocol = eth_type_trans(p_skb, priv->ndev); 1935 1931 netif_receive_skb(p_skb); 1936 1932 priv->net_dev_stats.rx_bytes += net_pkt_list->pkt_length; ··· 1992 1990 rx_buf_obj->data_ptr = (char *)curr_bd->data_ptr; 1993 1991 rx_buf_obj->length = curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE; 1994 1992 rx_buf_obj->buf_token = curr_bd->buf_token; 1993 + 1994 + dma_unmap_single(&priv->ndev->dev, curr_bd->buff_ptr, 1995 + curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE, 1996 + DMA_FROM_DEVICE); 1997 + 1995 1998 curr_pkt->pkt_token = curr_pkt->buf_list->buf_token; 1996 1999 curr_pkt->num_bufs = 1; 1997 2000 curr_pkt->pkt_length = ··· 2827 2820 return 0; 2828 2821 } 2829 2822 2830 - static 2831 - int davinci_emac_suspend(struct platform_device *pdev, pm_message_t state) 2823 + static int davinci_emac_suspend(struct device *dev) 2832 2824 { 2833 - struct net_device *dev = platform_get_drvdata(pdev); 2825 + struct platform_device *pdev = to_platform_device(dev); 2826 + struct net_device *ndev = platform_get_drvdata(pdev); 2834 2827 2835 - if (netif_running(dev)) 2836 - emac_dev_stop(dev); 2828 + if (netif_running(ndev)) 2829 + emac_dev_stop(ndev); 2837 2830 2838 2831 clk_disable(emac_clk); 2839 2832 2840 2833 return 0; 2841 2834 } 2842 2835 2843 - static int davinci_emac_resume(struct platform_device *pdev) 2836 + static int davinci_emac_resume(struct device *dev) 2844 2837 { 2845 - struct net_device *dev = platform_get_drvdata(pdev); 2838 + struct platform_device *pdev = to_platform_device(dev); 2839 + struct net_device *ndev = platform_get_drvdata(pdev); 2846 2840 2847 2841 clk_enable(emac_clk); 2848 2842 2849 - if (netif_running(dev)) 2850 - emac_dev_open(dev); 2843 + if (netif_running(ndev)) 2844 + emac_dev_open(ndev); 2851 2845 2852 2846 return 0; 2853 2847 } 2848 + 2849 + static const struct dev_pm_ops davinci_emac_pm_ops = { 2850 + .suspend = davinci_emac_suspend, 2851 + .resume = davinci_emac_resume, 2852 + }; 2854 2853 2855 2854 /** 2856 2855 * davinci_emac_driver: EMAC platform driver structure ··· 2865 2852 .driver = { 2866 2853 .name = "davinci_emac", 2867 2854 .owner = THIS_MODULE, 2855 + .pm = &davinci_emac_pm_ops, 2868 2856 }, 2869 2857 .probe = davinci_emac_probe, 2870 2858 .remove = __devexit_p(davinci_emac_remove), 2871 - .suspend = davinci_emac_suspend, 2872 - .resume = davinci_emac_resume, 2873 2859 }; 2874 2860 2875 2861 /**
+1 -1
drivers/net/e100.c
··· 2858 2858 } 2859 2859 nic->cbs_pool = pci_pool_create(netdev->name, 2860 2860 nic->pdev, 2861 - nic->params.cbs.count * sizeof(struct cb), 2861 + nic->params.cbs.max * sizeof(struct cb), 2862 2862 sizeof(u32), 2863 2863 0); 2864 2864 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
-36
drivers/net/irda/w83977af_ir.c
··· 65 65 #undef CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */ 66 66 #define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */ 67 67 #endif 68 - #undef CONFIG_USE_INTERNAL_TIMER /* Just cannot make that timer work */ 69 68 #define CONFIG_USE_W977_PNP /* Currently needed */ 70 69 #define PIO_MAX_SPEED 115200 71 70 ··· 532 533 self->tx_buff.len = skb->len; 533 534 534 535 mtt = irda_get_mtt(skb); 535 - #ifdef CONFIG_USE_INTERNAL_TIMER 536 - if (mtt > 50) { 537 - /* Adjust for timer resolution */ 538 - mtt /= 1000+1; 539 - 540 - /* Setup timer */ 541 - switch_bank(iobase, SET4); 542 - outb(mtt & 0xff, iobase+TMRL); 543 - outb((mtt >> 8) & 0x0f, iobase+TMRH); 544 - 545 - /* Start timer */ 546 - outb(IR_MSL_EN_TMR, iobase+IR_MSL); 547 - self->io.direction = IO_XMIT; 548 - 549 - /* Enable timer interrupt */ 550 - switch_bank(iobase, SET0); 551 - outb(ICR_ETMRI, iobase+ICR); 552 - } else { 553 - #endif 554 536 IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt); 555 537 if (mtt) 556 538 udelay(mtt); ··· 540 560 switch_bank(iobase, SET0); 541 561 outb(ICR_EDMAI, iobase+ICR); 542 562 w83977af_dma_write(self, iobase); 543 - #ifdef CONFIG_USE_INTERNAL_TIMER 544 - } 545 - #endif 546 563 } else { 547 564 self->tx_buff.data = self->tx_buff.head; 548 565 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, ··· 853 876 /* Check if we have transferred all data to memory */ 854 877 switch_bank(iobase, SET0); 855 878 if (inb(iobase+USR) & USR_RDR) { 856 - #ifdef CONFIG_USE_INTERNAL_TIMER 857 - /* Put this entry back in fifo */ 858 - st_fifo->head--; 859 - st_fifo->len++; 860 - st_fifo->entries[st_fifo->head].status = status; 861 - st_fifo->entries[st_fifo->head].len = len; 862 - 863 - /* Restore set register */ 864 - outb(set, iobase+SSR); 865 - 866 - return FALSE; /* I'll be back! */ 867 - #else 868 879 udelay(80); /* Should be enough!? */ 869 - #endif 870 880 } 871 881 872 882 skb = dev_alloc_skb(len+1);
+5 -3
drivers/net/ksz884x.c
··· 4899 4899 struct sk_buff *org_skb = skb; 4900 4900 4901 4901 skb = dev_alloc_skb(org_skb->len); 4902 - if (!skb) 4903 - return NETDEV_TX_BUSY; 4902 + if (!skb) { 4903 + rc = NETDEV_TX_BUSY; 4904 + goto unlock; 4905 + } 4904 4906 skb_copy_and_csum_dev(org_skb, skb->data); 4905 4907 org_skb->ip_summed = 0; 4906 4908 skb->len = org_skb->len; ··· 4916 4914 netif_stop_queue(dev); 4917 4915 rc = NETDEV_TX_BUSY; 4918 4916 } 4919 - 4917 + unlock: 4920 4918 spin_unlock_irq(&hw_priv->hwlock); 4921 4919 4922 4920 return rc;
-1
drivers/net/myri10ge/myri10ge.c
··· 3687 3687 if (status != 0) { 3688 3688 dev_err(&mgp->pdev->dev, "failed reset\n"); 3689 3689 goto abort_with_fw; 3690 - return; 3691 3690 } 3692 3691 3693 3692 mgp->max_intr_slots = cmd.data0 / sizeof(struct mcp_slot);
+1 -1
drivers/net/ne.c
··· 142 142 {"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */ 143 143 {"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */ 144 144 #ifdef CONFIG_MACH_TX49XX 145 - {"RBHMA4X00-RTL8019", "RBHMA4X00/RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */ 145 + {"RBHMA4X00-RTL8019", "RBHMA4X00-RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */ 146 146 #endif 147 147 {"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */ 148 148 {NULL,}
+5 -1
drivers/net/pppol2tp.c
··· 756 756 757 757 /* Try to dequeue as many skbs from reorder_q as we can. */ 758 758 pppol2tp_recv_dequeue(session); 759 + sock_put(sock); 759 760 760 761 return 0; 761 762 ··· 773 772 UDP_INC_STATS_USER(&init_net, UDP_MIB_INERRORS, 0); 774 773 tunnel->stats.rx_errors++; 775 774 kfree_skb(skb); 775 + sock_put(sock); 776 776 777 777 return 0; 778 778 ··· 1182 1180 /* Calculate UDP checksum if configured to do so */ 1183 1181 if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT) 1184 1182 skb->ip_summed = CHECKSUM_NONE; 1185 - else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) { 1183 + else if ((skb_dst(skb) && skb_dst(skb)->dev) && 1184 + (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) { 1186 1185 skb->ip_summed = CHECKSUM_COMPLETE; 1187 1186 csum = skb_checksum(skb, 0, udp_len, 0); 1188 1187 uh->check = csum_tcpudp_magic(inet->inet_saddr, ··· 1664 1661 if (tunnel_sock == NULL) 1665 1662 goto end; 1666 1663 1664 + sock_hold(tunnel_sock); 1667 1665 tunnel = tunnel_sock->sk_user_data; 1668 1666 } else { 1669 1667 tunnel = pppol2tp_tunnel_find(sock_net(sk), sp->pppol2tp.s_tunnel);
+1 -3
drivers/net/s2io.c
··· 5819 5819 } 5820 5820 } 5821 5821 5822 - if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) { 5823 - memset(nic->product_name, 0, vpd_data[1]); 5822 + if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) 5824 5823 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); 5825 - } 5826 5824 kfree(vpd_data); 5827 5825 swstats->mem_freed += 256; 5828 5826 }
+8
drivers/net/usb/Kconfig
··· 204 204 This option adds support for Davicom DM9601 based USB 1.1 205 205 10/100 Ethernet adapters. 206 206 207 + config USB_NET_SMSC75XX 208 + tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices" 209 + depends on USB_USBNET 210 + select CRC32 211 + help 212 + This option adds support for SMSC LAN95XX based USB 2.0 213 + Gigabit Ethernet adapters. 214 + 207 215 config USB_NET_SMSC95XX 208 216 tristate "SMSC LAN95XX based USB 2.0 10/100 ethernet devices" 209 217 depends on USB_USBNET
+1
drivers/net/usb/Makefile
··· 11 11 obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o 12 12 obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o 13 13 obj-$(CONFIG_USB_NET_DM9601) += dm9601.o 14 + obj-$(CONFIG_USB_NET_SMSC75XX) += smsc75xx.o 14 15 obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o 15 16 obj-$(CONFIG_USB_NET_GL620A) += gl620a.o 16 17 obj-$(CONFIG_USB_NET_NET1080) += net1080.o
-3
drivers/net/usb/hso.c
··· 1155 1155 static void hso_resubmit_rx_bulk_urb(struct hso_serial *serial, struct urb *urb) 1156 1156 { 1157 1157 int result; 1158 - #ifdef CONFIG_HSO_AUTOPM 1159 - usb_mark_last_busy(urb->dev); 1160 - #endif 1161 1158 /* We are done with this URB, resubmit it. Prep the USB to wait for 1162 1159 * another frame */ 1163 1160 usb_fill_bulk_urb(urb, serial->parent->usb,
+1288
drivers/net/usb/smsc75xx.c
··· 1 + /*************************************************************************** 2 + * 3 + * Copyright (C) 2007-2010 SMSC 4 + * 5 + * This program is free software; you can redistribute it and/or 6 + * modify it under the terms of the GNU General Public License 7 + * as published by the Free Software Foundation; either version 2 8 + * of the License, or (at your option) any later version. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software 17 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 + * 19 + *****************************************************************************/ 20 + 21 + #include <linux/module.h> 22 + #include <linux/kmod.h> 23 + #include <linux/init.h> 24 + #include <linux/netdevice.h> 25 + #include <linux/etherdevice.h> 26 + #include <linux/ethtool.h> 27 + #include <linux/mii.h> 28 + #include <linux/usb.h> 29 + #include <linux/crc32.h> 30 + #include <linux/usb/usbnet.h> 31 + #include "smsc75xx.h" 32 + 33 + #define SMSC_CHIPNAME "smsc75xx" 34 + #define SMSC_DRIVER_VERSION "1.0.0" 35 + #define HS_USB_PKT_SIZE (512) 36 + #define FS_USB_PKT_SIZE (64) 37 + #define DEFAULT_HS_BURST_CAP_SIZE (16 * 1024 + 5 * HS_USB_PKT_SIZE) 38 + #define DEFAULT_FS_BURST_CAP_SIZE (6 * 1024 + 33 * FS_USB_PKT_SIZE) 39 + #define DEFAULT_BULK_IN_DELAY (0x00002000) 40 + #define MAX_SINGLE_PACKET_SIZE (9000) 41 + #define LAN75XX_EEPROM_MAGIC (0x7500) 42 + #define EEPROM_MAC_OFFSET (0x01) 43 + #define DEFAULT_TX_CSUM_ENABLE (true) 44 + #define DEFAULT_RX_CSUM_ENABLE (true) 45 + #define DEFAULT_TSO_ENABLE (true) 46 + #define SMSC75XX_INTERNAL_PHY_ID (1) 47 + #define SMSC75XX_TX_OVERHEAD (8) 48 + #define MAX_RX_FIFO_SIZE (20 * 1024) 49 + #define MAX_TX_FIFO_SIZE (12 * 1024) 50 + #define USB_VENDOR_ID_SMSC (0x0424) 51 + #define USB_PRODUCT_ID_LAN7500 (0x7500) 52 + #define USB_PRODUCT_ID_LAN7505 (0x7505) 53 + 54 + #define check_warn(ret, fmt, args...) \ 55 + ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); }) 56 + 57 + #define check_warn_return(ret, fmt, args...) \ 58 + ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); return ret; } }) 59 + 60 + #define check_warn_goto_done(ret, fmt, args...) \ 61 + ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); goto done; } }) 62 + 63 + struct smsc75xx_priv { 64 + struct usbnet *dev; 65 + u32 rfe_ctl; 66 + u32 multicast_hash_table[DP_SEL_VHF_HASH_LEN]; 67 + bool use_rx_csum; 68 + struct mutex dataport_mutex; 69 + spinlock_t rfe_ctl_lock; 70 + struct work_struct set_multicast; 71 + }; 72 + 73 + struct usb_context { 74 + struct usb_ctrlrequest req; 75 + struct usbnet *dev; 76 + }; 77 + 78 + static int turbo_mode = true; 79 + module_param(turbo_mode, bool, 0644); 80 + MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); 81 + 82 + static int __must_check smsc75xx_read_reg(struct usbnet *dev, u32 index, 83 + u32 *data) 84 + { 85 + u32 *buf = kmalloc(4, GFP_KERNEL); 86 + int ret; 87 + 88 + BUG_ON(!dev); 89 + 90 + if (!buf) 91 + return -ENOMEM; 92 + 93 + ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 94 + USB_VENDOR_REQUEST_READ_REGISTER, 95 + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 96 + 00, index, buf, 4, USB_CTRL_GET_TIMEOUT); 97 + 98 + if (unlikely(ret < 0)) 99 + netdev_warn(dev->net, 100 + "Failed to read register index 0x%08x", index); 101 + 102 + le32_to_cpus(buf); 103 + *data = *buf; 104 + kfree(buf); 105 + 106 + return ret; 107 + } 108 + 109 + static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index, 110 + u32 data) 111 + { 112 + u32 *buf = kmalloc(4, GFP_KERNEL); 113 + int ret; 114 + 115 + BUG_ON(!dev); 116 + 117 + if (!buf) 118 + return -ENOMEM; 119 + 120 + *buf = data; 121 + cpu_to_le32s(buf); 122 + 123 + ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 124 + USB_VENDOR_REQUEST_WRITE_REGISTER, 125 + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 126 + 00, index, buf, 4, USB_CTRL_SET_TIMEOUT); 127 + 128 + if (unlikely(ret < 0)) 129 + netdev_warn(dev->net, 130 + "Failed to write register index 0x%08x", index); 131 + 132 + kfree(buf); 133 + 134 + return ret; 135 + } 136 + 137 + /* Loop until the read is completed with timeout 138 + * called with phy_mutex held */ 139 + static int smsc75xx_phy_wait_not_busy(struct usbnet *dev) 140 + { 141 + unsigned long start_time = jiffies; 142 + u32 val; 143 + int ret; 144 + 145 + do { 146 + ret = smsc75xx_read_reg(dev, MII_ACCESS, &val); 147 + check_warn_return(ret, "Error reading MII_ACCESS"); 148 + 149 + if (!(val & MII_ACCESS_BUSY)) 150 + return 0; 151 + } while (!time_after(jiffies, start_time + HZ)); 152 + 153 + return -EIO; 154 + } 155 + 156 + static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx) 157 + { 158 + struct usbnet *dev = netdev_priv(netdev); 159 + u32 val, addr; 160 + int ret; 161 + 162 + mutex_lock(&dev->phy_mutex); 163 + 164 + /* confirm MII not busy */ 165 + ret = smsc75xx_phy_wait_not_busy(dev); 166 + check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_read"); 167 + 168 + /* set the address, index & direction (read from PHY) */ 169 + phy_id &= dev->mii.phy_id_mask; 170 + idx &= dev->mii.reg_num_mask; 171 + addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR) 172 + | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) 173 + | MII_ACCESS_READ; 174 + ret = smsc75xx_write_reg(dev, MII_ACCESS, addr); 175 + check_warn_goto_done(ret, "Error writing MII_ACCESS"); 176 + 177 + ret = smsc75xx_phy_wait_not_busy(dev); 178 + check_warn_goto_done(ret, "Timed out reading MII reg %02X", idx); 179 + 180 + ret = smsc75xx_read_reg(dev, MII_DATA, &val); 181 + check_warn_goto_done(ret, "Error reading MII_DATA"); 182 + 183 + ret = (u16)(val & 0xFFFF); 184 + 185 + done: 186 + mutex_unlock(&dev->phy_mutex); 187 + return ret; 188 + } 189 + 190 + static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx, 191 + int regval) 192 + { 193 + struct usbnet *dev = netdev_priv(netdev); 194 + u32 val, addr; 195 + int ret; 196 + 197 + mutex_lock(&dev->phy_mutex); 198 + 199 + /* confirm MII not busy */ 200 + ret = smsc75xx_phy_wait_not_busy(dev); 201 + check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_write"); 202 + 203 + val = regval; 204 + ret = smsc75xx_write_reg(dev, MII_DATA, val); 205 + check_warn_goto_done(ret, "Error writing MII_DATA"); 206 + 207 + /* set the address, index & direction (write to PHY) */ 208 + phy_id &= dev->mii.phy_id_mask; 209 + idx &= dev->mii.reg_num_mask; 210 + addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR) 211 + | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) 212 + | MII_ACCESS_WRITE; 213 + ret = smsc75xx_write_reg(dev, MII_ACCESS, addr); 214 + check_warn_goto_done(ret, "Error writing MII_ACCESS"); 215 + 216 + ret = smsc75xx_phy_wait_not_busy(dev); 217 + check_warn_goto_done(ret, "Timed out writing MII reg %02X", idx); 218 + 219 + done: 220 + mutex_unlock(&dev->phy_mutex); 221 + } 222 + 223 + static int smsc75xx_wait_eeprom(struct usbnet *dev) 224 + { 225 + unsigned long start_time = jiffies; 226 + u32 val; 227 + int ret; 228 + 229 + do { 230 + ret = smsc75xx_read_reg(dev, E2P_CMD, &val); 231 + check_warn_return(ret, "Error reading E2P_CMD"); 232 + 233 + if (!(val & E2P_CMD_BUSY) || (val & E2P_CMD_TIMEOUT)) 234 + break; 235 + udelay(40); 236 + } while (!time_after(jiffies, start_time + HZ)); 237 + 238 + if (val & (E2P_CMD_TIMEOUT | E2P_CMD_BUSY)) { 239 + netdev_warn(dev->net, "EEPROM read operation timeout"); 240 + return -EIO; 241 + } 242 + 243 + return 0; 244 + } 245 + 246 + static int smsc75xx_eeprom_confirm_not_busy(struct usbnet *dev) 247 + { 248 + unsigned long start_time = jiffies; 249 + u32 val; 250 + int ret; 251 + 252 + do { 253 + ret = smsc75xx_read_reg(dev, E2P_CMD, &val); 254 + check_warn_return(ret, "Error reading E2P_CMD"); 255 + 256 + if (!(val & E2P_CMD_BUSY)) 257 + return 0; 258 + 259 + udelay(40); 260 + } while (!time_after(jiffies, start_time + HZ)); 261 + 262 + netdev_warn(dev->net, "EEPROM is busy"); 263 + return -EIO; 264 + } 265 + 266 + static int smsc75xx_read_eeprom(struct usbnet *dev, u32 offset, u32 length, 267 + u8 *data) 268 + { 269 + u32 val; 270 + int i, ret; 271 + 272 + BUG_ON(!dev); 273 + BUG_ON(!data); 274 + 275 + ret = smsc75xx_eeprom_confirm_not_busy(dev); 276 + if (ret) 277 + return ret; 278 + 279 + for (i = 0; i < length; i++) { 280 + val = E2P_CMD_BUSY | E2P_CMD_READ | (offset & E2P_CMD_ADDR); 281 + ret = smsc75xx_write_reg(dev, E2P_CMD, val); 282 + check_warn_return(ret, "Error writing E2P_CMD"); 283 + 284 + ret = smsc75xx_wait_eeprom(dev); 285 + if (ret < 0) 286 + return ret; 287 + 288 + ret = smsc75xx_read_reg(dev, E2P_DATA, &val); 289 + check_warn_return(ret, "Error reading E2P_DATA"); 290 + 291 + data[i] = val & 0xFF; 292 + offset++; 293 + } 294 + 295 + return 0; 296 + } 297 + 298 + static int smsc75xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length, 299 + u8 *data) 300 + { 301 + u32 val; 302 + int i, ret; 303 + 304 + BUG_ON(!dev); 305 + BUG_ON(!data); 306 + 307 + ret = smsc75xx_eeprom_confirm_not_busy(dev); 308 + if (ret) 309 + return ret; 310 + 311 + /* Issue write/erase enable command */ 312 + val = E2P_CMD_BUSY | E2P_CMD_EWEN; 313 + ret = smsc75xx_write_reg(dev, E2P_CMD, val); 314 + check_warn_return(ret, "Error writing E2P_CMD"); 315 + 316 + ret = smsc75xx_wait_eeprom(dev); 317 + if (ret < 0) 318 + return ret; 319 + 320 + for (i = 0; i < length; i++) { 321 + 322 + /* Fill data register */ 323 + val = data[i]; 324 + ret = smsc75xx_write_reg(dev, E2P_DATA, val); 325 + check_warn_return(ret, "Error writing E2P_DATA"); 326 + 327 + /* Send "write" command */ 328 + val = E2P_CMD_BUSY | E2P_CMD_WRITE | (offset & E2P_CMD_ADDR); 329 + ret = smsc75xx_write_reg(dev, E2P_CMD, val); 330 + check_warn_return(ret, "Error writing E2P_CMD"); 331 + 332 + ret = smsc75xx_wait_eeprom(dev); 333 + if (ret < 0) 334 + return ret; 335 + 336 + offset++; 337 + } 338 + 339 + return 0; 340 + } 341 + 342 + static int smsc75xx_dataport_wait_not_busy(struct usbnet *dev) 343 + { 344 + int i, ret; 345 + 346 + for (i = 0; i < 100; i++) { 347 + u32 dp_sel; 348 + ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel); 349 + check_warn_return(ret, "Error reading DP_SEL"); 350 + 351 + if (dp_sel & DP_SEL_DPRDY) 352 + return 0; 353 + 354 + udelay(40); 355 + } 356 + 357 + netdev_warn(dev->net, "smsc75xx_dataport_wait_not_busy timed out"); 358 + 359 + return -EIO; 360 + } 361 + 362 + static int smsc75xx_dataport_write(struct usbnet *dev, u32 ram_select, u32 addr, 363 + u32 length, u32 *buf) 364 + { 365 + struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 366 + u32 dp_sel; 367 + int i, ret; 368 + 369 + mutex_lock(&pdata->dataport_mutex); 370 + 371 + ret = smsc75xx_dataport_wait_not_busy(dev); 372 + check_warn_goto_done(ret, "smsc75xx_dataport_write busy on entry"); 373 + 374 + ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel); 375 + check_warn_goto_done(ret, "Error reading DP_SEL"); 376 + 377 + dp_sel &= ~DP_SEL_RSEL; 378 + dp_sel |= ram_select; 379 + ret = smsc75xx_write_reg(dev, DP_SEL, dp_sel); 380 + check_warn_goto_done(ret, "Error writing DP_SEL"); 381 + 382 + for (i = 0; i < length; i++) { 383 + ret = smsc75xx_write_reg(dev, DP_ADDR, addr + i); 384 + check_warn_goto_done(ret, "Error writing DP_ADDR"); 385 + 386 + ret = smsc75xx_write_reg(dev, DP_DATA, buf[i]); 387 + check_warn_goto_done(ret, "Error writing DP_DATA"); 388 + 389 + ret = smsc75xx_write_reg(dev, DP_CMD, DP_CMD_WRITE); 390 + check_warn_goto_done(ret, "Error writing DP_CMD"); 391 + 392 + ret = smsc75xx_dataport_wait_not_busy(dev); 393 + check_warn_goto_done(ret, "smsc75xx_dataport_write timeout"); 394 + } 395 + 396 + done: 397 + mutex_unlock(&pdata->dataport_mutex); 398 + return ret; 399 + } 400 + 401 + /* returns hash bit number for given MAC address */ 402 + static u32 smsc75xx_hash(char addr[ETH_ALEN]) 403 + { 404 + return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff; 405 + } 406 + 407 + static void smsc75xx_deferred_multicast_write(struct work_struct *param) 408 + { 409 + struct smsc75xx_priv *pdata = 410 + container_of(param, struct smsc75xx_priv, set_multicast); 411 + struct usbnet *dev = pdata->dev; 412 + int ret; 413 + 414 + netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x", 415 + pdata->rfe_ctl); 416 + 417 + smsc75xx_dataport_write(dev, DP_SEL_VHF, DP_SEL_VHF_VLAN_LEN, 418 + DP_SEL_VHF_HASH_LEN, pdata->multicast_hash_table); 419 + 420 + ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); 421 + check_warn(ret, "Error writing RFE_CRL"); 422 + } 423 + 424 + static void smsc75xx_set_multicast(struct net_device *netdev) 425 + { 426 + struct usbnet *dev = netdev_priv(netdev); 427 + struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 428 + unsigned long flags; 429 + int i; 430 + 431 + spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); 432 + 433 + pdata->rfe_ctl &= 434 + ~(RFE_CTL_AU | RFE_CTL_AM | RFE_CTL_DPF | RFE_CTL_MHF); 435 + pdata->rfe_ctl |= RFE_CTL_AB; 436 + 437 + for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++) 438 + pdata->multicast_hash_table[i] = 0; 439 + 440 + if (dev->net->flags & IFF_PROMISC) { 441 + netif_dbg(dev, drv, dev->net, "promiscuous mode enabled"); 442 + pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_AU; 443 + } else if (dev->net->flags & IFF_ALLMULTI) { 444 + netif_dbg(dev, drv, dev->net, "receive all multicast enabled"); 445 + pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF; 446 + } else if (!netdev_mc_empty(dev->net)) { 447 + struct dev_mc_list *mc_list; 448 + 449 + netif_dbg(dev, drv, dev->net, "receive multicast hash filter"); 450 + 451 + pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF; 452 + 453 + netdev_for_each_mc_addr(mc_list, netdev) { 454 + u32 bitnum = smsc75xx_hash(mc_list->dmi_addr); 455 + pdata->multicast_hash_table[bitnum / 32] |= 456 + (1 << (bitnum % 32)); 457 + } 458 + } else { 459 + netif_dbg(dev, drv, dev->net, "receive own packets only"); 460 + pdata->rfe_ctl |= RFE_CTL_DPF; 461 + } 462 + 463 + spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); 464 + 465 + /* defer register writes to a sleepable context */ 466 + schedule_work(&pdata->set_multicast); 467 + } 468 + 469 + static int smsc75xx_update_flowcontrol(struct usbnet *dev, u8 duplex, 470 + u16 lcladv, u16 rmtadv) 471 + { 472 + u32 flow = 0, fct_flow = 0; 473 + int ret; 474 + 475 + if (duplex == DUPLEX_FULL) { 476 + u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); 477 + 478 + if (cap & FLOW_CTRL_TX) { 479 + flow = (FLOW_TX_FCEN | 0xFFFF); 480 + /* set fct_flow thresholds to 20% and 80% */ 481 + fct_flow = (8 << 8) | 32; 482 + } 483 + 484 + if (cap & FLOW_CTRL_RX) 485 + flow |= FLOW_RX_FCEN; 486 + 487 + netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s", 488 + (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), 489 + (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); 490 + } else { 491 + netif_dbg(dev, link, dev->net, "half duplex"); 492 + } 493 + 494 + ret = smsc75xx_write_reg(dev, FLOW, flow); 495 + check_warn_return(ret, "Error writing FLOW"); 496 + 497 + ret = smsc75xx_write_reg(dev, FCT_FLOW, fct_flow); 498 + check_warn_return(ret, "Error writing FCT_FLOW"); 499 + 500 + return 0; 501 + } 502 + 503 + static int smsc75xx_link_reset(struct usbnet *dev) 504 + { 505 + struct mii_if_info *mii = &dev->mii; 506 + struct ethtool_cmd ecmd; 507 + u16 lcladv, rmtadv; 508 + int ret; 509 + 510 + /* clear interrupt status */ 511 + ret = smsc75xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC); 512 + check_warn_return(ret, "Error reading PHY_INT_SRC"); 513 + 514 + ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL); 515 + check_warn_return(ret, "Error writing INT_STS"); 516 + 517 + mii_check_media(mii, 1, 1); 518 + mii_ethtool_gset(&dev->mii, &ecmd); 519 + lcladv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE); 520 + rmtadv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_LPA); 521 + 522 + netif_dbg(dev, link, dev->net, "speed: %d duplex: %d lcladv: %04x" 523 + " rmtadv: %04x", ecmd.speed, ecmd.duplex, lcladv, rmtadv); 524 + 525 + return smsc75xx_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv); 526 + } 527 + 528 + static void smsc75xx_status(struct usbnet *dev, struct urb *urb) 529 + { 530 + u32 intdata; 531 + 532 + if (urb->actual_length != 4) { 533 + netdev_warn(dev->net, 534 + "unexpected urb length %d", urb->actual_length); 535 + return; 536 + } 537 + 538 + memcpy(&intdata, urb->transfer_buffer, 4); 539 + le32_to_cpus(&intdata); 540 + 541 + netif_dbg(dev, link, dev->net, "intdata: 0x%08X", intdata); 542 + 543 + if (intdata & INT_ENP_PHY_INT) 544 + usbnet_defer_kevent(dev, EVENT_LINK_RESET); 545 + else 546 + netdev_warn(dev->net, 547 + "unexpected interrupt, intdata=0x%08X", intdata); 548 + } 549 + 550 + /* Enable or disable Rx checksum offload engine */ 551 + static int smsc75xx_set_rx_csum_offload(struct usbnet *dev) 552 + { 553 + struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 554 + unsigned long flags; 555 + int ret; 556 + 557 + spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); 558 + 559 + if (pdata->use_rx_csum) 560 + pdata->rfe_ctl |= RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM; 561 + else 562 + pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM); 563 + 564 + spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); 565 + 566 + ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); 567 + check_warn_return(ret, "Error writing RFE_CTL"); 568 + 569 + return 0; 570 + } 571 + 572 + static int smsc75xx_ethtool_get_eeprom_len(struct net_device *net) 573 + { 574 + return MAX_EEPROM_SIZE; 575 + } 576 + 577 + static int smsc75xx_ethtool_get_eeprom(struct net_device *netdev, 578 + struct ethtool_eeprom *ee, u8 *data) 579 + { 580 + struct usbnet *dev = netdev_priv(netdev); 581 + 582 + ee->magic = LAN75XX_EEPROM_MAGIC; 583 + 584 + return smsc75xx_read_eeprom(dev, ee->offset, ee->len, data); 585 + } 586 + 587 + static int smsc75xx_ethtool_set_eeprom(struct net_device *netdev, 588 + struct ethtool_eeprom *ee, u8 *data) 589 + { 590 + struct usbnet *dev = netdev_priv(netdev); 591 + 592 + if (ee->magic != LAN75XX_EEPROM_MAGIC) { 593 + netdev_warn(dev->net, 594 + "EEPROM: magic value mismatch: 0x%x", ee->magic); 595 + return -EINVAL; 596 + } 597 + 598 + return smsc75xx_write_eeprom(dev, ee->offset, ee->len, data); 599 + } 600 + 601 + static u32 smsc75xx_ethtool_get_rx_csum(struct net_device *netdev) 602 + { 603 + struct usbnet *dev = netdev_priv(netdev); 604 + struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 605 + 606 + return pdata->use_rx_csum; 607 + } 608 + 609 + static int smsc75xx_ethtool_set_rx_csum(struct net_device *netdev, u32 val) 610 + { 611 + struct usbnet *dev = netdev_priv(netdev); 612 + struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 613 + 614 + pdata->use_rx_csum = !!val; 615 + 616 + return smsc75xx_set_rx_csum_offload(dev); 617 + } 618 + 619 + static int smsc75xx_ethtool_set_tso(struct net_device *netdev, u32 data) 620 + { 621 + if (data) 622 + netdev->features |= NETIF_F_TSO | NETIF_F_TSO6; 623 + else 624 + netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); 625 + 626 + return 0; 627 + } 628 + 629 + static const struct ethtool_ops smsc75xx_ethtool_ops = { 630 + .get_link = usbnet_get_link, 631 + .nway_reset = usbnet_nway_reset, 632 + .get_drvinfo = usbnet_get_drvinfo, 633 + .get_msglevel = usbnet_get_msglevel, 634 + .set_msglevel = usbnet_set_msglevel, 635 + .get_settings = usbnet_get_settings, 636 + .set_settings = usbnet_set_settings, 637 + .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len, 638 + .get_eeprom = smsc75xx_ethtool_get_eeprom, 639 + .set_eeprom = smsc75xx_ethtool_set_eeprom, 640 + .get_tx_csum = ethtool_op_get_tx_csum, 641 + .set_tx_csum = ethtool_op_set_tx_hw_csum, 642 + .get_rx_csum = smsc75xx_ethtool_get_rx_csum, 643 + .set_rx_csum = smsc75xx_ethtool_set_rx_csum, 644 + .get_tso = ethtool_op_get_tso, 645 + .set_tso = smsc75xx_ethtool_set_tso, 646 + }; 647 + 648 + static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) 649 + { 650 + struct usbnet *dev = netdev_priv(netdev); 651 + 652 + if (!netif_running(netdev)) 653 + return -EINVAL; 654 + 655 + return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL); 656 + } 657 + 658 + static void smsc75xx_init_mac_address(struct usbnet *dev) 659 + { 660 + /* try reading mac address from EEPROM */ 661 + if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, 662 + dev->net->dev_addr) == 0) { 663 + if (is_valid_ether_addr(dev->net->dev_addr)) { 664 + /* eeprom values are valid so use them */ 665 + netif_dbg(dev, ifup, dev->net, 666 + "MAC address read from EEPROM"); 667 + return; 668 + } 669 + } 670 + 671 + /* no eeprom, or eeprom values are invalid. generate random MAC */ 672 + random_ether_addr(dev->net->dev_addr); 673 + netif_dbg(dev, ifup, dev->net, "MAC address set to random_ether_addr"); 674 + } 675 + 676 + static int smsc75xx_set_mac_address(struct usbnet *dev) 677 + { 678 + u32 addr_lo = dev->net->dev_addr[0] | dev->net->dev_addr[1] << 8 | 679 + dev->net->dev_addr[2] << 16 | dev->net->dev_addr[3] << 24; 680 + u32 addr_hi = dev->net->dev_addr[4] | dev->net->dev_addr[5] << 8; 681 + 682 + int ret = smsc75xx_write_reg(dev, RX_ADDRH, addr_hi); 683 + check_warn_return(ret, "Failed to write RX_ADDRH: %d", ret); 684 + 685 + ret = smsc75xx_write_reg(dev, RX_ADDRL, addr_lo); 686 + check_warn_return(ret, "Failed to write RX_ADDRL: %d", ret); 687 + 688 + addr_hi |= ADDR_FILTX_FB_VALID; 689 + ret = smsc75xx_write_reg(dev, ADDR_FILTX, addr_hi); 690 + check_warn_return(ret, "Failed to write ADDR_FILTX: %d", ret); 691 + 692 + ret = smsc75xx_write_reg(dev, ADDR_FILTX + 4, addr_lo); 693 + check_warn_return(ret, "Failed to write ADDR_FILTX+4: %d", ret); 694 + 695 + return 0; 696 + } 697 + 698 + static int smsc75xx_phy_initialize(struct usbnet *dev) 699 + { 700 + int bmcr, timeout = 0; 701 + 702 + /* Initialize MII structure */ 703 + dev->mii.dev = dev->net; 704 + dev->mii.mdio_read = smsc75xx_mdio_read; 705 + dev->mii.mdio_write = smsc75xx_mdio_write; 706 + dev->mii.phy_id_mask = 0x1f; 707 + dev->mii.reg_num_mask = 0x1f; 708 + dev->mii.phy_id = SMSC75XX_INTERNAL_PHY_ID; 709 + 710 + /* reset phy and wait for reset to complete */ 711 + smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET); 712 + 713 + do { 714 + msleep(10); 715 + bmcr = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR); 716 + check_warn_return(bmcr, "Error reading MII_BMCR"); 717 + timeout++; 718 + } while ((bmcr & MII_BMCR) && (timeout < 100)); 719 + 720 + if (timeout >= 100) { 721 + netdev_warn(dev->net, "timeout on PHY Reset"); 722 + return -EIO; 723 + } 724 + 725 + smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, 726 + ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | 727 + ADVERTISE_PAUSE_ASYM); 728 + 729 + /* read to clear */ 730 + smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC); 731 + check_warn_return(bmcr, "Error reading PHY_INT_SRC"); 732 + 733 + smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK, 734 + PHY_INT_MASK_DEFAULT); 735 + mii_nway_restart(&dev->mii); 736 + 737 + netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); 738 + return 0; 739 + } 740 + 741 + static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size) 742 + { 743 + int ret = 0; 744 + u32 buf; 745 + bool rxenabled; 746 + 747 + ret = smsc75xx_read_reg(dev, MAC_RX, &buf); 748 + check_warn_return(ret, "Failed to read MAC_RX: %d", ret); 749 + 750 + rxenabled = ((buf & MAC_RX_RXEN) != 0); 751 + 752 + if (rxenabled) { 753 + buf &= ~MAC_RX_RXEN; 754 + ret = smsc75xx_write_reg(dev, MAC_RX, buf); 755 + check_warn_return(ret, "Failed to write MAC_RX: %d", ret); 756 + } 757 + 758 + /* add 4 to size for FCS */ 759 + buf &= ~MAC_RX_MAX_SIZE; 760 + buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT) & MAC_RX_MAX_SIZE); 761 + 762 + ret = smsc75xx_write_reg(dev, MAC_RX, buf); 763 + check_warn_return(ret, "Failed to write MAC_RX: %d", ret); 764 + 765 + if (rxenabled) { 766 + buf |= MAC_RX_RXEN; 767 + ret = smsc75xx_write_reg(dev, MAC_RX, buf); 768 + check_warn_return(ret, "Failed to write MAC_RX: %d", ret); 769 + } 770 + 771 + return 0; 772 + } 773 + 774 + static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu) 775 + { 776 + struct usbnet *dev = netdev_priv(netdev); 777 + 778 + int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu); 779 + check_warn_return(ret, "Failed to set mac rx frame length"); 780 + 781 + return usbnet_change_mtu(netdev, new_mtu); 782 + } 783 + 784 + static int smsc75xx_reset(struct usbnet *dev) 785 + { 786 + struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 787 + u32 buf; 788 + int ret = 0, timeout; 789 + 790 + netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset"); 791 + 792 + ret = smsc75xx_read_reg(dev, HW_CFG, &buf); 793 + check_warn_return(ret, "Failed to read HW_CFG: %d", ret); 794 + 795 + buf |= HW_CFG_LRST; 796 + 797 + ret = smsc75xx_write_reg(dev, HW_CFG, buf); 798 + check_warn_return(ret, "Failed to write HW_CFG: %d", ret); 799 + 800 + timeout = 0; 801 + do { 802 + msleep(10); 803 + ret = smsc75xx_read_reg(dev, HW_CFG, &buf); 804 + check_warn_return(ret, "Failed to read HW_CFG: %d", ret); 805 + timeout++; 806 + } while ((buf & HW_CFG_LRST) && (timeout < 100)); 807 + 808 + if (timeout >= 100) { 809 + netdev_warn(dev->net, "timeout on completion of Lite Reset"); 810 + return -EIO; 811 + } 812 + 813 + netif_dbg(dev, ifup, dev->net, "Lite reset complete, resetting PHY"); 814 + 815 + ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); 816 + check_warn_return(ret, "Failed to read PMT_CTL: %d", ret); 817 + 818 + buf |= PMT_CTL_PHY_RST; 819 + 820 + ret = smsc75xx_write_reg(dev, PMT_CTL, buf); 821 + check_warn_return(ret, "Failed to write PMT_CTL: %d", ret); 822 + 823 + timeout = 0; 824 + do { 825 + msleep(10); 826 + ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); 827 + check_warn_return(ret, "Failed to read PMT_CTL: %d", ret); 828 + timeout++; 829 + } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100)); 830 + 831 + if (timeout >= 100) { 832 + netdev_warn(dev->net, "timeout waiting for PHY Reset"); 833 + return -EIO; 834 + } 835 + 836 + netif_dbg(dev, ifup, dev->net, "PHY reset complete"); 837 + 838 + smsc75xx_init_mac_address(dev); 839 + 840 + ret = smsc75xx_set_mac_address(dev); 841 + check_warn_return(ret, "Failed to set mac address"); 842 + 843 + netif_dbg(dev, ifup, dev->net, "MAC Address: %pM", dev->net->dev_addr); 844 + 845 + ret = smsc75xx_read_reg(dev, HW_CFG, &buf); 846 + check_warn_return(ret, "Failed to read HW_CFG: %d", ret); 847 + 848 + netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x", buf); 849 + 850 + buf |= HW_CFG_BIR; 851 + 852 + ret = smsc75xx_write_reg(dev, HW_CFG, buf); 853 + check_warn_return(ret, "Failed to write HW_CFG: %d", ret); 854 + 855 + ret = smsc75xx_read_reg(dev, HW_CFG, &buf); 856 + check_warn_return(ret, "Failed to read HW_CFG: %d", ret); 857 + 858 + netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG after " 859 + "writing HW_CFG_BIR: 0x%08x", buf); 860 + 861 + if (!turbo_mode) { 862 + buf = 0; 863 + dev->rx_urb_size = MAX_SINGLE_PACKET_SIZE; 864 + } else if (dev->udev->speed == USB_SPEED_HIGH) { 865 + buf = DEFAULT_HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE; 866 + dev->rx_urb_size = DEFAULT_HS_BURST_CAP_SIZE; 867 + } else { 868 + buf = DEFAULT_FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE; 869 + dev->rx_urb_size = DEFAULT_FS_BURST_CAP_SIZE; 870 + } 871 + 872 + netif_dbg(dev, ifup, dev->net, "rx_urb_size=%ld", 873 + (ulong)dev->rx_urb_size); 874 + 875 + ret = smsc75xx_write_reg(dev, BURST_CAP, buf); 876 + check_warn_return(ret, "Failed to write BURST_CAP: %d", ret); 877 + 878 + ret = smsc75xx_read_reg(dev, BURST_CAP, &buf); 879 + check_warn_return(ret, "Failed to read BURST_CAP: %d", ret); 880 + 881 + netif_dbg(dev, ifup, dev->net, 882 + "Read Value from BURST_CAP after writing: 0x%08x", buf); 883 + 884 + ret = smsc75xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY); 885 + check_warn_return(ret, "Failed to write BULK_IN_DLY: %d", ret); 886 + 887 + ret = smsc75xx_read_reg(dev, BULK_IN_DLY, &buf); 888 + check_warn_return(ret, "Failed to read BULK_IN_DLY: %d", ret); 889 + 890 + netif_dbg(dev, ifup, dev->net, 891 + "Read Value from BULK_IN_DLY after writing: 0x%08x", buf); 892 + 893 + if (turbo_mode) { 894 + ret = smsc75xx_read_reg(dev, HW_CFG, &buf); 895 + check_warn_return(ret, "Failed to read HW_CFG: %d", ret); 896 + 897 + netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x", buf); 898 + 899 + buf |= (HW_CFG_MEF | HW_CFG_BCE); 900 + 901 + ret = smsc75xx_write_reg(dev, HW_CFG, buf); 902 + check_warn_return(ret, "Failed to write HW_CFG: %d", ret); 903 + 904 + ret = smsc75xx_read_reg(dev, HW_CFG, &buf); 905 + check_warn_return(ret, "Failed to read HW_CFG: %d", ret); 906 + 907 + netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x", buf); 908 + } 909 + 910 + /* set FIFO sizes */ 911 + buf = (MAX_RX_FIFO_SIZE - 512) / 512; 912 + ret = smsc75xx_write_reg(dev, FCT_RX_FIFO_END, buf); 913 + check_warn_return(ret, "Failed to write FCT_RX_FIFO_END: %d", ret); 914 + 915 + netif_dbg(dev, ifup, dev->net, "FCT_RX_FIFO_END set to 0x%08x", buf); 916 + 917 + buf = (MAX_TX_FIFO_SIZE - 512) / 512; 918 + ret = smsc75xx_write_reg(dev, FCT_TX_FIFO_END, buf); 919 + check_warn_return(ret, "Failed to write FCT_TX_FIFO_END: %d", ret); 920 + 921 + netif_dbg(dev, ifup, dev->net, "FCT_TX_FIFO_END set to 0x%08x", buf); 922 + 923 + ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL); 924 + check_warn_return(ret, "Failed to write INT_STS: %d", ret); 925 + 926 + ret = smsc75xx_read_reg(dev, ID_REV, &buf); 927 + check_warn_return(ret, "Failed to read ID_REV: %d", ret); 928 + 929 + netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x", buf); 930 + 931 + /* Configure GPIO pins as LED outputs */ 932 + ret = smsc75xx_read_reg(dev, LED_GPIO_CFG, &buf); 933 + check_warn_return(ret, "Failed to read LED_GPIO_CFG: %d", ret); 934 + 935 + buf &= ~(LED_GPIO_CFG_LED2_FUN_SEL | LED_GPIO_CFG_LED10_FUN_SEL); 936 + buf |= LED_GPIO_CFG_LEDGPIO_EN | LED_GPIO_CFG_LED2_FUN_SEL; 937 + 938 + ret = smsc75xx_write_reg(dev, LED_GPIO_CFG, buf); 939 + check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d", ret); 940 + 941 + ret = smsc75xx_write_reg(dev, FLOW, 0); 942 + check_warn_return(ret, "Failed to write FLOW: %d", ret); 943 + 944 + ret = smsc75xx_write_reg(dev, FCT_FLOW, 0); 945 + check_warn_return(ret, "Failed to write FCT_FLOW: %d", ret); 946 + 947 + /* Don't need rfe_ctl_lock during initialisation */ 948 + ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl); 949 + check_warn_return(ret, "Failed to read RFE_CTL: %d", ret); 950 + 951 + pdata->rfe_ctl |= RFE_CTL_AB | RFE_CTL_DPF; 952 + 953 + ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); 954 + check_warn_return(ret, "Failed to write RFE_CTL: %d", ret); 955 + 956 + ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl); 957 + check_warn_return(ret, "Failed to read RFE_CTL: %d", ret); 958 + 959 + netif_dbg(dev, ifup, dev->net, "RFE_CTL set to 0x%08x", pdata->rfe_ctl); 960 + 961 + /* Enable or disable checksum offload engines */ 962 + ethtool_op_set_tx_hw_csum(dev->net, DEFAULT_TX_CSUM_ENABLE); 963 + ret = smsc75xx_set_rx_csum_offload(dev); 964 + check_warn_return(ret, "Failed to set rx csum offload: %d", ret); 965 + 966 + smsc75xx_ethtool_set_tso(dev->net, DEFAULT_TSO_ENABLE); 967 + 968 + smsc75xx_set_multicast(dev->net); 969 + 970 + ret = smsc75xx_phy_initialize(dev); 971 + check_warn_return(ret, "Failed to initialize PHY: %d", ret); 972 + 973 + ret = smsc75xx_read_reg(dev, INT_EP_CTL, &buf); 974 + check_warn_return(ret, "Failed to read INT_EP_CTL: %d", ret); 975 + 976 + /* enable PHY interrupts */ 977 + buf |= INT_ENP_PHY_INT; 978 + 979 + ret = smsc75xx_write_reg(dev, INT_EP_CTL, buf); 980 + check_warn_return(ret, "Failed to write INT_EP_CTL: %d", ret); 981 + 982 + ret = smsc75xx_read_reg(dev, MAC_TX, &buf); 983 + check_warn_return(ret, "Failed to read MAC_TX: %d", ret); 984 + 985 + buf |= MAC_TX_TXEN; 986 + 987 + ret = smsc75xx_write_reg(dev, MAC_TX, buf); 988 + check_warn_return(ret, "Failed to write MAC_TX: %d", ret); 989 + 990 + netif_dbg(dev, ifup, dev->net, "MAC_TX set to 0x%08x", buf); 991 + 992 + ret = smsc75xx_read_reg(dev, FCT_TX_CTL, &buf); 993 + check_warn_return(ret, "Failed to read FCT_TX_CTL: %d", ret); 994 + 995 + buf |= FCT_TX_CTL_EN; 996 + 997 + ret = smsc75xx_write_reg(dev, FCT_TX_CTL, buf); 998 + check_warn_return(ret, "Failed to write FCT_TX_CTL: %d", ret); 999 + 1000 + netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x", buf); 1001 + 1002 + ret = smsc75xx_set_rx_max_frame_length(dev, 1514); 1003 + check_warn_return(ret, "Failed to set max rx frame length"); 1004 + 1005 + ret = smsc75xx_read_reg(dev, MAC_RX, &buf); 1006 + check_warn_return(ret, "Failed to read MAC_RX: %d", ret); 1007 + 1008 + buf |= MAC_RX_RXEN; 1009 + 1010 + ret = smsc75xx_write_reg(dev, MAC_RX, buf); 1011 + check_warn_return(ret, "Failed to write MAC_RX: %d", ret); 1012 + 1013 + netif_dbg(dev, ifup, dev->net, "MAC_RX set to 0x%08x", buf); 1014 + 1015 + ret = smsc75xx_read_reg(dev, FCT_RX_CTL, &buf); 1016 + check_warn_return(ret, "Failed to read FCT_RX_CTL: %d", ret); 1017 + 1018 + buf |= FCT_RX_CTL_EN; 1019 + 1020 + ret = smsc75xx_write_reg(dev, FCT_RX_CTL, buf); 1021 + check_warn_return(ret, "Failed to write FCT_RX_CTL: %d", ret); 1022 + 1023 + netif_dbg(dev, ifup, dev->net, "FCT_RX_CTL set to 0x%08x", buf); 1024 + 1025 + netif_dbg(dev, ifup, dev->net, "smsc75xx_reset, return 0"); 1026 + return 0; 1027 + } 1028 + 1029 + static const struct net_device_ops smsc75xx_netdev_ops = { 1030 + .ndo_open = usbnet_open, 1031 + .ndo_stop = usbnet_stop, 1032 + .ndo_start_xmit = usbnet_start_xmit, 1033 + .ndo_tx_timeout = usbnet_tx_timeout, 1034 + .ndo_change_mtu = smsc75xx_change_mtu, 1035 + .ndo_set_mac_address = eth_mac_addr, 1036 + .ndo_validate_addr = eth_validate_addr, 1037 + .ndo_do_ioctl = smsc75xx_ioctl, 1038 + .ndo_set_multicast_list = smsc75xx_set_multicast, 1039 + }; 1040 + 1041 + static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) 1042 + { 1043 + struct smsc75xx_priv *pdata = NULL; 1044 + int ret; 1045 + 1046 + printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n"); 1047 + 1048 + ret = usbnet_get_endpoints(dev, intf); 1049 + check_warn_return(ret, "usbnet_get_endpoints failed: %d", ret); 1050 + 1051 + dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc75xx_priv), 1052 + GFP_KERNEL); 1053 + 1054 + pdata = (struct smsc75xx_priv *)(dev->data[0]); 1055 + if (!pdata) { 1056 + netdev_warn(dev->net, "Unable to allocate smsc75xx_priv"); 1057 + return -ENOMEM; 1058 + } 1059 + 1060 + pdata->dev = dev; 1061 + 1062 + spin_lock_init(&pdata->rfe_ctl_lock); 1063 + mutex_init(&pdata->dataport_mutex); 1064 + 1065 + INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write); 1066 + 1067 + pdata->use_rx_csum = DEFAULT_RX_CSUM_ENABLE; 1068 + 1069 + /* We have to advertise SG otherwise TSO cannot be enabled */ 1070 + dev->net->features |= NETIF_F_SG; 1071 + 1072 + /* Init all registers */ 1073 + ret = smsc75xx_reset(dev); 1074 + 1075 + dev->net->netdev_ops = &smsc75xx_netdev_ops; 1076 + dev->net->ethtool_ops = &smsc75xx_ethtool_ops; 1077 + dev->net->flags |= IFF_MULTICAST; 1078 + dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD; 1079 + return 0; 1080 + } 1081 + 1082 + static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf) 1083 + { 1084 + struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 1085 + if (pdata) { 1086 + netif_dbg(dev, ifdown, dev->net, "free pdata"); 1087 + kfree(pdata); 1088 + pdata = NULL; 1089 + dev->data[0] = 0; 1090 + } 1091 + } 1092 + 1093 + static void smsc75xx_rx_csum_offload(struct sk_buff *skb, u32 rx_cmd_a, 1094 + u32 rx_cmd_b) 1095 + { 1096 + if (unlikely(rx_cmd_a & RX_CMD_A_LCSM)) { 1097 + skb->ip_summed = CHECKSUM_NONE; 1098 + } else { 1099 + skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT)); 1100 + skb->ip_summed = CHECKSUM_COMPLETE; 1101 + } 1102 + } 1103 + 1104 + static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 1105 + { 1106 + struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 1107 + 1108 + while (skb->len > 0) { 1109 + u32 rx_cmd_a, rx_cmd_b, align_count, size; 1110 + struct sk_buff *ax_skb; 1111 + unsigned char *packet; 1112 + 1113 + memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a)); 1114 + le32_to_cpus(&rx_cmd_a); 1115 + skb_pull(skb, 4); 1116 + 1117 + memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b)); 1118 + le32_to_cpus(&rx_cmd_b); 1119 + skb_pull(skb, 4 + NET_IP_ALIGN); 1120 + 1121 + packet = skb->data; 1122 + 1123 + /* get the packet length */ 1124 + size = (rx_cmd_a & RX_CMD_A_LEN) - NET_IP_ALIGN; 1125 + align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4; 1126 + 1127 + if (unlikely(rx_cmd_a & RX_CMD_A_RED)) { 1128 + netif_dbg(dev, rx_err, dev->net, 1129 + "Error rx_cmd_a=0x%08x", rx_cmd_a); 1130 + dev->net->stats.rx_errors++; 1131 + dev->net->stats.rx_dropped++; 1132 + 1133 + if (rx_cmd_a & RX_CMD_A_FCS) 1134 + dev->net->stats.rx_crc_errors++; 1135 + else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT)) 1136 + dev->net->stats.rx_frame_errors++; 1137 + } else { 1138 + /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ 1139 + if (unlikely(size > (ETH_FRAME_LEN + 12))) { 1140 + netif_dbg(dev, rx_err, dev->net, 1141 + "size err rx_cmd_a=0x%08x", rx_cmd_a); 1142 + return 0; 1143 + } 1144 + 1145 + /* last frame in this batch */ 1146 + if (skb->len == size) { 1147 + if (pdata->use_rx_csum) 1148 + smsc75xx_rx_csum_offload(skb, rx_cmd_a, 1149 + rx_cmd_b); 1150 + else 1151 + skb->ip_summed = CHECKSUM_NONE; 1152 + 1153 + skb_trim(skb, skb->len - 4); /* remove fcs */ 1154 + skb->truesize = size + sizeof(struct sk_buff); 1155 + 1156 + return 1; 1157 + } 1158 + 1159 + ax_skb = skb_clone(skb, GFP_ATOMIC); 1160 + if (unlikely(!ax_skb)) { 1161 + netdev_warn(dev->net, "Error allocating skb"); 1162 + return 0; 1163 + } 1164 + 1165 + ax_skb->len = size; 1166 + ax_skb->data = packet; 1167 + skb_set_tail_pointer(ax_skb, size); 1168 + 1169 + if (pdata->use_rx_csum) 1170 + smsc75xx_rx_csum_offload(ax_skb, rx_cmd_a, 1171 + rx_cmd_b); 1172 + else 1173 + ax_skb->ip_summed = CHECKSUM_NONE; 1174 + 1175 + skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */ 1176 + ax_skb->truesize = size + sizeof(struct sk_buff); 1177 + 1178 + usbnet_skb_return(dev, ax_skb); 1179 + } 1180 + 1181 + skb_pull(skb, size); 1182 + 1183 + /* padding bytes before the next frame starts */ 1184 + if (skb->len) 1185 + skb_pull(skb, align_count); 1186 + } 1187 + 1188 + if (unlikely(skb->len < 0)) { 1189 + netdev_warn(dev->net, "invalid rx length<0 %d", skb->len); 1190 + return 0; 1191 + } 1192 + 1193 + return 1; 1194 + } 1195 + 1196 + static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev, 1197 + struct sk_buff *skb, gfp_t flags) 1198 + { 1199 + u32 tx_cmd_a, tx_cmd_b; 1200 + 1201 + skb_linearize(skb); 1202 + 1203 + if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) { 1204 + struct sk_buff *skb2 = 1205 + skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags); 1206 + dev_kfree_skb_any(skb); 1207 + skb = skb2; 1208 + if (!skb) 1209 + return NULL; 1210 + } 1211 + 1212 + tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS; 1213 + 1214 + if (skb->ip_summed == CHECKSUM_PARTIAL) 1215 + tx_cmd_a |= TX_CMD_A_IPE | TX_CMD_A_TPE; 1216 + 1217 + if (skb_is_gso(skb)) { 1218 + u16 mss = max(skb_shinfo(skb)->gso_size, TX_MSS_MIN); 1219 + tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT) & TX_CMD_B_MSS; 1220 + 1221 + tx_cmd_a |= TX_CMD_A_LSO; 1222 + } else { 1223 + tx_cmd_b = 0; 1224 + } 1225 + 1226 + skb_push(skb, 4); 1227 + cpu_to_le32s(&tx_cmd_b); 1228 + memcpy(skb->data, &tx_cmd_b, 4); 1229 + 1230 + skb_push(skb, 4); 1231 + cpu_to_le32s(&tx_cmd_a); 1232 + memcpy(skb->data, &tx_cmd_a, 4); 1233 + 1234 + return skb; 1235 + } 1236 + 1237 + static const struct driver_info smsc75xx_info = { 1238 + .description = "smsc75xx USB 2.0 Gigabit Ethernet", 1239 + .bind = smsc75xx_bind, 1240 + .unbind = smsc75xx_unbind, 1241 + .link_reset = smsc75xx_link_reset, 1242 + .reset = smsc75xx_reset, 1243 + .rx_fixup = smsc75xx_rx_fixup, 1244 + .tx_fixup = smsc75xx_tx_fixup, 1245 + .status = smsc75xx_status, 1246 + .flags = FLAG_ETHER | FLAG_SEND_ZLP, 1247 + }; 1248 + 1249 + static const struct usb_device_id products[] = { 1250 + { 1251 + /* SMSC7500 USB Gigabit Ethernet Device */ 1252 + USB_DEVICE(USB_VENDOR_ID_SMSC, USB_PRODUCT_ID_LAN7500), 1253 + .driver_info = (unsigned long) &smsc75xx_info, 1254 + }, 1255 + { 1256 + /* SMSC7500 USB Gigabit Ethernet Device */ 1257 + USB_DEVICE(USB_VENDOR_ID_SMSC, USB_PRODUCT_ID_LAN7505), 1258 + .driver_info = (unsigned long) &smsc75xx_info, 1259 + }, 1260 + { }, /* END */ 1261 + }; 1262 + MODULE_DEVICE_TABLE(usb, products); 1263 + 1264 + static struct usb_driver smsc75xx_driver = { 1265 + .name = SMSC_CHIPNAME, 1266 + .id_table = products, 1267 + .probe = usbnet_probe, 1268 + .suspend = usbnet_suspend, 1269 + .resume = usbnet_resume, 1270 + .disconnect = usbnet_disconnect, 1271 + }; 1272 + 1273 + static int __init smsc75xx_init(void) 1274 + { 1275 + return usb_register(&smsc75xx_driver); 1276 + } 1277 + module_init(smsc75xx_init); 1278 + 1279 + static void __exit smsc75xx_exit(void) 1280 + { 1281 + usb_deregister(&smsc75xx_driver); 1282 + } 1283 + module_exit(smsc75xx_exit); 1284 + 1285 + MODULE_AUTHOR("Nancy Lin"); 1286 + MODULE_AUTHOR("Steve Glendinning <steve.glendinning@smsc.com>"); 1287 + MODULE_DESCRIPTION("SMSC75XX USB 2.0 Gigabit Ethernet Devices"); 1288 + MODULE_LICENSE("GPL");
+421
drivers/net/usb/smsc75xx.h
··· 1 + /*************************************************************************** 2 + * 3 + * Copyright (C) 2007-2010 SMSC 4 + * 5 + * This program is free software; you can redistribute it and/or 6 + * modify it under the terms of the GNU General Public License 7 + * as published by the Free Software Foundation; either version 2 8 + * of the License, or (at your option) any later version. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software 17 + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 + * 19 + *****************************************************************************/ 20 + 21 + #ifndef _SMSC75XX_H 22 + #define _SMSC75XX_H 23 + 24 + /* Tx command words */ 25 + #define TX_CMD_A_LSO (0x08000000) 26 + #define TX_CMD_A_IPE (0x04000000) 27 + #define TX_CMD_A_TPE (0x02000000) 28 + #define TX_CMD_A_IVTG (0x01000000) 29 + #define TX_CMD_A_RVTG (0x00800000) 30 + #define TX_CMD_A_FCS (0x00400000) 31 + #define TX_CMD_A_LEN (0x000FFFFF) 32 + 33 + #define TX_CMD_B_MSS (0x3FFF0000) 34 + #define TX_CMD_B_MSS_SHIFT (16) 35 + #define TX_MSS_MIN ((u16)8) 36 + #define TX_CMD_B_VTAG (0x0000FFFF) 37 + 38 + /* Rx command words */ 39 + #define RX_CMD_A_ICE (0x80000000) 40 + #define RX_CMD_A_TCE (0x40000000) 41 + #define RX_CMD_A_IPV (0x20000000) 42 + #define RX_CMD_A_PID (0x18000000) 43 + #define RX_CMD_A_PID_NIP (0x00000000) 44 + #define RX_CMD_A_PID_TCP (0x08000000) 45 + #define RX_CMD_A_PID_UDP (0x10000000) 46 + #define RX_CMD_A_PID_PP (0x18000000) 47 + #define RX_CMD_A_PFF (0x04000000) 48 + #define RX_CMD_A_BAM (0x02000000) 49 + #define RX_CMD_A_MAM (0x01000000) 50 + #define RX_CMD_A_FVTG (0x00800000) 51 + #define RX_CMD_A_RED (0x00400000) 52 + #define RX_CMD_A_RWT (0x00200000) 53 + #define RX_CMD_A_RUNT (0x00100000) 54 + #define RX_CMD_A_LONG (0x00080000) 55 + #define RX_CMD_A_RXE (0x00040000) 56 + #define RX_CMD_A_DRB (0x00020000) 57 + #define RX_CMD_A_FCS (0x00010000) 58 + #define RX_CMD_A_UAM (0x00008000) 59 + #define RX_CMD_A_LCSM (0x00004000) 60 + #define RX_CMD_A_LEN (0x00003FFF) 61 + 62 + #define RX_CMD_B_CSUM (0xFFFF0000) 63 + #define RX_CMD_B_CSUM_SHIFT (16) 64 + #define RX_CMD_B_VTAG (0x0000FFFF) 65 + 66 + /* SCSRs */ 67 + #define ID_REV (0x0000) 68 + 69 + #define FPGA_REV (0x0004) 70 + 71 + #define BOND_CTL (0x0008) 72 + 73 + #define INT_STS (0x000C) 74 + #define INT_STS_RDFO_INT (0x00400000) 75 + #define INT_STS_TXE_INT (0x00200000) 76 + #define INT_STS_MACRTO_INT (0x00100000) 77 + #define INT_STS_TX_DIS_INT (0x00080000) 78 + #define INT_STS_RX_DIS_INT (0x00040000) 79 + #define INT_STS_PHY_INT_ (0x00020000) 80 + #define INT_STS_MAC_ERR_INT (0x00008000) 81 + #define INT_STS_TDFU (0x00004000) 82 + #define INT_STS_TDFO (0x00002000) 83 + #define INT_STS_GPIOS (0x00000FFF) 84 + #define INT_STS_CLEAR_ALL (0xFFFFFFFF) 85 + 86 + #define HW_CFG (0x0010) 87 + #define HW_CFG_SMDET_STS (0x00008000) 88 + #define HW_CFG_SMDET_EN (0x00004000) 89 + #define HW_CFG_EEM (0x00002000) 90 + #define HW_CFG_RST_PROTECT (0x00001000) 91 + #define HW_CFG_PORT_SWAP (0x00000800) 92 + #define HW_CFG_PHY_BOOST (0x00000600) 93 + #define HW_CFG_PHY_BOOST_NORMAL (0x00000000) 94 + #define HW_CFG_PHY_BOOST_4 (0x00002000) 95 + #define HW_CFG_PHY_BOOST_8 (0x00004000) 96 + #define HW_CFG_PHY_BOOST_12 (0x00006000) 97 + #define HW_CFG_LEDB (0x00000100) 98 + #define HW_CFG_BIR (0x00000080) 99 + #define HW_CFG_SBP (0x00000040) 100 + #define HW_CFG_IME (0x00000020) 101 + #define HW_CFG_MEF (0x00000010) 102 + #define HW_CFG_ETC (0x00000008) 103 + #define HW_CFG_BCE (0x00000004) 104 + #define HW_CFG_LRST (0x00000002) 105 + #define HW_CFG_SRST (0x00000001) 106 + 107 + #define PMT_CTL (0x0014) 108 + #define PMT_CTL_PHY_PWRUP (0x00000400) 109 + #define PMT_CTL_RES_CLR_WKP_EN (0x00000100) 110 + #define PMT_CTL_DEV_RDY (0x00000080) 111 + #define PMT_CTL_SUS_MODE (0x00000060) 112 + #define PMT_CTL_SUS_MODE_0 (0x00000000) 113 + #define PMT_CTL_SUS_MODE_1 (0x00000020) 114 + #define PMT_CTL_SUS_MODE_2 (0x00000040) 115 + #define PMT_CTL_SUS_MODE_3 (0x00000060) 116 + #define PMT_CTL_PHY_RST (0x00000010) 117 + #define PMT_CTL_WOL_EN (0x00000008) 118 + #define PMT_CTL_ED_EN (0x00000004) 119 + #define PMT_CTL_WUPS (0x00000003) 120 + #define PMT_CTL_WUPS_NO (0x00000000) 121 + #define PMT_CTL_WUPS_ED (0x00000001) 122 + #define PMT_CTL_WUPS_WOL (0x00000002) 123 + #define PMT_CTL_WUPS_MULTI (0x00000003) 124 + 125 + #define LED_GPIO_CFG (0x0018) 126 + #define LED_GPIO_CFG_LED2_FUN_SEL (0x80000000) 127 + #define LED_GPIO_CFG_LED10_FUN_SEL (0x40000000) 128 + #define LED_GPIO_CFG_LEDGPIO_EN (0x0000F000) 129 + #define LED_GPIO_CFG_LEDGPIO_EN_0 (0x00001000) 130 + #define LED_GPIO_CFG_LEDGPIO_EN_1 (0x00002000) 131 + #define LED_GPIO_CFG_LEDGPIO_EN_2 (0x00004000) 132 + #define LED_GPIO_CFG_LEDGPIO_EN_3 (0x00008000) 133 + #define LED_GPIO_CFG_GPBUF (0x00000F00) 134 + #define LED_GPIO_CFG_GPBUF_0 (0x00000100) 135 + #define LED_GPIO_CFG_GPBUF_1 (0x00000200) 136 + #define LED_GPIO_CFG_GPBUF_2 (0x00000400) 137 + #define LED_GPIO_CFG_GPBUF_3 (0x00000800) 138 + #define LED_GPIO_CFG_GPDIR (0x000000F0) 139 + #define LED_GPIO_CFG_GPDIR_0 (0x00000010) 140 + #define LED_GPIO_CFG_GPDIR_1 (0x00000020) 141 + #define LED_GPIO_CFG_GPDIR_2 (0x00000040) 142 + #define LED_GPIO_CFG_GPDIR_3 (0x00000080) 143 + #define LED_GPIO_CFG_GPDATA (0x0000000F) 144 + #define LED_GPIO_CFG_GPDATA_0 (0x00000001) 145 + #define LED_GPIO_CFG_GPDATA_1 (0x00000002) 146 + #define LED_GPIO_CFG_GPDATA_2 (0x00000004) 147 + #define LED_GPIO_CFG_GPDATA_3 (0x00000008) 148 + 149 + #define GPIO_CFG (0x001C) 150 + #define GPIO_CFG_SHIFT (24) 151 + #define GPIO_CFG_GPEN (0xFF000000) 152 + #define GPIO_CFG_GPBUF (0x00FF0000) 153 + #define GPIO_CFG_GPDIR (0x0000FF00) 154 + #define GPIO_CFG_GPDATA (0x000000FF) 155 + 156 + #define GPIO_WAKE (0x0020) 157 + #define GPIO_WAKE_PHY_LINKUP_EN (0x80000000) 158 + #define GPIO_WAKE_POL (0x0FFF0000) 159 + #define GPIO_WAKE_POL_SHIFT (16) 160 + #define GPIO_WAKE_WK (0x00000FFF) 161 + 162 + #define DP_SEL (0x0024) 163 + #define DP_SEL_DPRDY (0x80000000) 164 + #define DP_SEL_RSEL (0x0000000F) 165 + #define DP_SEL_URX (0x00000000) 166 + #define DP_SEL_VHF (0x00000001) 167 + #define DP_SEL_VHF_HASH_LEN (16) 168 + #define DP_SEL_VHF_VLAN_LEN (128) 169 + #define DP_SEL_LSO_HEAD (0x00000002) 170 + #define DP_SEL_FCT_RX (0x00000003) 171 + #define DP_SEL_FCT_TX (0x00000004) 172 + #define DP_SEL_DESCRIPTOR (0x00000005) 173 + #define DP_SEL_WOL (0x00000006) 174 + 175 + #define DP_CMD (0x0028) 176 + #define DP_CMD_WRITE (0x01) 177 + #define DP_CMD_READ (0x00) 178 + 179 + #define DP_ADDR (0x002C) 180 + 181 + #define DP_DATA (0x0030) 182 + 183 + #define BURST_CAP (0x0034) 184 + #define BURST_CAP_MASK (0x0000000F) 185 + 186 + #define INT_EP_CTL (0x0038) 187 + #define INT_EP_CTL_INTEP_ON (0x80000000) 188 + #define INT_EP_CTL_RDFO_EN (0x00400000) 189 + #define INT_EP_CTL_TXE_EN (0x00200000) 190 + #define INT_EP_CTL_MACROTO_EN (0x00100000) 191 + #define INT_EP_CTL_TX_DIS_EN (0x00080000) 192 + #define INT_EP_CTL_RX_DIS_EN (0x00040000) 193 + #define INT_EP_CTL_PHY_EN_ (0x00020000) 194 + #define INT_EP_CTL_MAC_ERR_EN (0x00008000) 195 + #define INT_EP_CTL_TDFU_EN (0x00004000) 196 + #define INT_EP_CTL_TDFO_EN (0x00002000) 197 + #define INT_EP_CTL_RX_FIFO_EN (0x00001000) 198 + #define INT_EP_CTL_GPIOX_EN (0x00000FFF) 199 + 200 + #define BULK_IN_DLY (0x003C) 201 + #define BULK_IN_DLY_MASK (0xFFFF) 202 + 203 + #define E2P_CMD (0x0040) 204 + #define E2P_CMD_BUSY (0x80000000) 205 + #define E2P_CMD_MASK (0x70000000) 206 + #define E2P_CMD_READ (0x00000000) 207 + #define E2P_CMD_EWDS (0x10000000) 208 + #define E2P_CMD_EWEN (0x20000000) 209 + #define E2P_CMD_WRITE (0x30000000) 210 + #define E2P_CMD_WRAL (0x40000000) 211 + #define E2P_CMD_ERASE (0x50000000) 212 + #define E2P_CMD_ERAL (0x60000000) 213 + #define E2P_CMD_RELOAD (0x70000000) 214 + #define E2P_CMD_TIMEOUT (0x00000400) 215 + #define E2P_CMD_LOADED (0x00000200) 216 + #define E2P_CMD_ADDR (0x000001FF) 217 + 218 + #define MAX_EEPROM_SIZE (512) 219 + 220 + #define E2P_DATA (0x0044) 221 + #define E2P_DATA_MASK_ (0x000000FF) 222 + 223 + #define RFE_CTL (0x0060) 224 + #define RFE_CTL_TCPUDP_CKM (0x00001000) 225 + #define RFE_CTL_IP_CKM (0x00000800) 226 + #define RFE_CTL_AB (0x00000400) 227 + #define RFE_CTL_AM (0x00000200) 228 + #define RFE_CTL_AU (0x00000100) 229 + #define RFE_CTL_VS (0x00000080) 230 + #define RFE_CTL_UF (0x00000040) 231 + #define RFE_CTL_VF (0x00000020) 232 + #define RFE_CTL_SPF (0x00000010) 233 + #define RFE_CTL_MHF (0x00000008) 234 + #define RFE_CTL_DHF (0x00000004) 235 + #define RFE_CTL_DPF (0x00000002) 236 + #define RFE_CTL_RST_RF (0x00000001) 237 + 238 + #define VLAN_TYPE (0x0064) 239 + #define VLAN_TYPE_MASK (0x0000FFFF) 240 + 241 + #define FCT_RX_CTL (0x0090) 242 + #define FCT_RX_CTL_EN (0x80000000) 243 + #define FCT_RX_CTL_RST (0x40000000) 244 + #define FCT_RX_CTL_SBF (0x02000000) 245 + #define FCT_RX_CTL_OVERFLOW (0x01000000) 246 + #define FCT_RX_CTL_FRM_DROP (0x00800000) 247 + #define FCT_RX_CTL_RX_NOT_EMPTY (0x00400000) 248 + #define FCT_RX_CTL_RX_EMPTY (0x00200000) 249 + #define FCT_RX_CTL_RX_DISABLED (0x00100000) 250 + #define FCT_RX_CTL_RXUSED (0x0000FFFF) 251 + 252 + #define FCT_TX_CTL (0x0094) 253 + #define FCT_TX_CTL_EN (0x80000000) 254 + #define FCT_TX_CTL_RST (0x40000000) 255 + #define FCT_TX_CTL_TX_NOT_EMPTY (0x00400000) 256 + #define FCT_TX_CTL_TX_EMPTY (0x00200000) 257 + #define FCT_TX_CTL_TX_DISABLED (0x00100000) 258 + #define FCT_TX_CTL_TXUSED (0x0000FFFF) 259 + 260 + #define FCT_RX_FIFO_END (0x0098) 261 + #define FCT_RX_FIFO_END_MASK (0x0000007F) 262 + 263 + #define FCT_TX_FIFO_END (0x009C) 264 + #define FCT_TX_FIFO_END_MASK (0x0000003F) 265 + 266 + #define FCT_FLOW (0x00A0) 267 + #define FCT_FLOW_THRESHOLD_OFF (0x00007F00) 268 + #define FCT_FLOW_THRESHOLD_OFF_SHIFT (8) 269 + #define FCT_FLOW_THRESHOLD_ON (0x0000007F) 270 + 271 + /* MAC CSRs */ 272 + #define MAC_CR (0x100) 273 + #define MAC_CR_ADP (0x00002000) 274 + #define MAC_CR_ADD (0x00001000) 275 + #define MAC_CR_ASD (0x00000800) 276 + #define MAC_CR_INT_LOOP (0x00000400) 277 + #define MAC_CR_BOLMT (0x000000C0) 278 + #define MAC_CR_FDPX (0x00000008) 279 + #define MAC_CR_CFG (0x00000006) 280 + #define MAC_CR_CFG_10 (0x00000000) 281 + #define MAC_CR_CFG_100 (0x00000002) 282 + #define MAC_CR_CFG_1000 (0x00000004) 283 + #define MAC_CR_RST (0x00000001) 284 + 285 + #define MAC_RX (0x104) 286 + #define MAC_RX_MAX_SIZE (0x3FFF0000) 287 + #define MAC_RX_MAX_SIZE_SHIFT (16) 288 + #define MAC_RX_FCS_STRIP (0x00000010) 289 + #define MAC_RX_FSE (0x00000004) 290 + #define MAC_RX_RXD (0x00000002) 291 + #define MAC_RX_RXEN (0x00000001) 292 + 293 + #define MAC_TX (0x108) 294 + #define MAC_TX_BFCS (0x00000004) 295 + #define MAC_TX_TXD (0x00000002) 296 + #define MAC_TX_TXEN (0x00000001) 297 + 298 + #define FLOW (0x10C) 299 + #define FLOW_FORCE_FC (0x80000000) 300 + #define FLOW_TX_FCEN (0x40000000) 301 + #define FLOW_RX_FCEN (0x20000000) 302 + #define FLOW_FPF (0x10000000) 303 + #define FLOW_PAUSE_TIME (0x0000FFFF) 304 + 305 + #define RAND_SEED (0x110) 306 + #define RAND_SEED_MASK (0x0000FFFF) 307 + 308 + #define ERR_STS (0x114) 309 + #define ERR_STS_FCS_ERR (0x00000100) 310 + #define ERR_STS_LFRM_ERR (0x00000080) 311 + #define ERR_STS_RUNT_ERR (0x00000040) 312 + #define ERR_STS_COLLISION_ERR (0x00000010) 313 + #define ERR_STS_ALIGN_ERR (0x00000008) 314 + #define ERR_STS_URUN_ERR (0x00000004) 315 + 316 + #define RX_ADDRH (0x118) 317 + #define RX_ADDRH_MASK (0x0000FFFF) 318 + 319 + #define RX_ADDRL (0x11C) 320 + 321 + #define MII_ACCESS (0x120) 322 + #define MII_ACCESS_PHY_ADDR (0x0000F800) 323 + #define MII_ACCESS_PHY_ADDR_SHIFT (11) 324 + #define MII_ACCESS_REG_ADDR (0x000007C0) 325 + #define MII_ACCESS_REG_ADDR_SHIFT (6) 326 + #define MII_ACCESS_READ (0x00000000) 327 + #define MII_ACCESS_WRITE (0x00000002) 328 + #define MII_ACCESS_BUSY (0x00000001) 329 + 330 + #define MII_DATA (0x124) 331 + #define MII_DATA_MASK (0x0000FFFF) 332 + 333 + #define WUCSR (0x140) 334 + #define WUCSR_PFDA_FR (0x00000080) 335 + #define WUCSR_WUFR (0x00000040) 336 + #define WUCSR_MPR (0x00000020) 337 + #define WUCSR_BCAST_FR (0x00000010) 338 + #define WUCSR_PFDA_EN (0x00000008) 339 + #define WUCSR_WUEN (0x00000004) 340 + #define WUCSR_MPEN (0x00000002) 341 + #define WUCSR_BCST_EN (0x00000001) 342 + 343 + #define WUF_CFGX (0x144) 344 + #define WUF_CFGX_EN (0x80000000) 345 + #define WUF_CFGX_ATYPE (0x03000000) 346 + #define WUF_CFGX_ATYPE_UNICAST (0x00000000) 347 + #define WUF_CFGX_ATYPE_MULTICAST (0x02000000) 348 + #define WUF_CFGX_ATYPE_ALL (0x03000000) 349 + #define WUF_CFGX_PATTERN_OFFSET (0x007F0000) 350 + #define WUF_CFGX_PATTERN_OFFSET_SHIFT (16) 351 + #define WUF_CFGX_CRC16 (0x0000FFFF) 352 + #define WUF_NUM (8) 353 + 354 + #define WUF_MASKX (0x170) 355 + #define WUF_MASKX_AVALID (0x80000000) 356 + #define WUF_MASKX_ATYPE (0x40000000) 357 + 358 + #define ADDR_FILTX (0x300) 359 + #define ADDR_FILTX_FB_VALID (0x80000000) 360 + #define ADDR_FILTX_FB_TYPE (0x40000000) 361 + #define ADDR_FILTX_FB_ADDRHI (0x0000FFFF) 362 + #define ADDR_FILTX_SB_ADDRLO (0xFFFFFFFF) 363 + 364 + #define WUCSR2 (0x500) 365 + #define WUCSR2_NS_RCD (0x00000040) 366 + #define WUCSR2_ARP_RCD (0x00000020) 367 + #define WUCSR2_TCPSYN_RCD (0x00000010) 368 + #define WUCSR2_NS_OFFLOAD (0x00000004) 369 + #define WUCSR2_ARP_OFFLOAD (0x00000002) 370 + #define WUCSR2_TCPSYN_OFFLOAD (0x00000001) 371 + 372 + #define WOL_FIFO_STS (0x504) 373 + 374 + #define IPV6_ADDRX (0x510) 375 + 376 + #define IPV4_ADDRX (0x590) 377 + 378 + 379 + /* Vendor-specific PHY Definitions */ 380 + 381 + /* Mode Control/Status Register */ 382 + #define PHY_MODE_CTRL_STS (17) 383 + #define MODE_CTRL_STS_EDPWRDOWN ((u16)0x2000) 384 + #define MODE_CTRL_STS_ENERGYON ((u16)0x0002) 385 + 386 + #define PHY_INT_SRC (29) 387 + #define PHY_INT_SRC_ENERGY_ON ((u16)0x0080) 388 + #define PHY_INT_SRC_ANEG_COMP ((u16)0x0040) 389 + #define PHY_INT_SRC_REMOTE_FAULT ((u16)0x0020) 390 + #define PHY_INT_SRC_LINK_DOWN ((u16)0x0010) 391 + 392 + #define PHY_INT_MASK (30) 393 + #define PHY_INT_MASK_ENERGY_ON ((u16)0x0080) 394 + #define PHY_INT_MASK_ANEG_COMP ((u16)0x0040) 395 + #define PHY_INT_MASK_REMOTE_FAULT ((u16)0x0020) 396 + #define PHY_INT_MASK_LINK_DOWN ((u16)0x0010) 397 + #define PHY_INT_MASK_DEFAULT (PHY_INT_MASK_ANEG_COMP | \ 398 + PHY_INT_MASK_LINK_DOWN) 399 + 400 + #define PHY_SPECIAL (31) 401 + #define PHY_SPECIAL_SPD ((u16)0x001C) 402 + #define PHY_SPECIAL_SPD_10HALF ((u16)0x0004) 403 + #define PHY_SPECIAL_SPD_10FULL ((u16)0x0014) 404 + #define PHY_SPECIAL_SPD_100HALF ((u16)0x0008) 405 + #define PHY_SPECIAL_SPD_100FULL ((u16)0x0018) 406 + 407 + /* USB Vendor Requests */ 408 + #define USB_VENDOR_REQUEST_WRITE_REGISTER 0xA0 409 + #define USB_VENDOR_REQUEST_READ_REGISTER 0xA1 410 + #define USB_VENDOR_REQUEST_GET_STATS 0xA2 411 + 412 + /* Interrupt Endpoint status word bitfields */ 413 + #define INT_ENP_RDFO_INT ((u32)BIT(22)) 414 + #define INT_ENP_TXE_INT ((u32)BIT(21)) 415 + #define INT_ENP_TX_DIS_INT ((u32)BIT(19)) 416 + #define INT_ENP_RX_DIS_INT ((u32)BIT(18)) 417 + #define INT_ENP_PHY_INT ((u32)BIT(17)) 418 + #define INT_ENP_MAC_ERR_INT ((u32)BIT(15)) 419 + #define INT_ENP_RX_FIFO_DATA_INT ((u32)BIT(12)) 420 + 421 + #endif /* _SMSC75XX_H */
+15
drivers/net/usb/smsc95xx.c
··· 709 709 710 710 static int smsc95xx_phy_initialize(struct usbnet *dev) 711 711 { 712 + int bmcr, timeout = 0; 713 + 712 714 /* Initialize MII structure */ 713 715 dev->mii.dev = dev->net; 714 716 dev->mii.mdio_read = smsc95xx_mdio_read; ··· 719 717 dev->mii.reg_num_mask = 0x1f; 720 718 dev->mii.phy_id = SMSC95XX_INTERNAL_PHY_ID; 721 719 720 + /* reset phy and wait for reset to complete */ 722 721 smsc95xx_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET); 722 + 723 + do { 724 + msleep(10); 725 + bmcr = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR); 726 + timeout++; 727 + } while ((bmcr & MII_BMCR) && (timeout < 100)); 728 + 729 + if (timeout >= 100) { 730 + netdev_warn(dev->net, "timeout on PHY Reset"); 731 + return -EIO; 732 + } 733 + 723 734 smsc95xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, 724 735 ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | 725 736 ADVERTISE_PAUSE_ASYM);
+1 -1
net/bridge/br_device.c
··· 40 40 goto out; 41 41 42 42 mdst = br_mdb_get(br, skb); 43 - if (mdst || BR_INPUT_SKB_CB(skb)->mrouters_only) 43 + if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) 44 44 br_multicast_deliver(mdst, skb); 45 45 else 46 46 br_flood_deliver(br, skb);
+14 -4
net/bridge/br_forward.c
··· 19 19 #include <linux/netfilter_bridge.h> 20 20 #include "br_private.h" 21 21 22 + static int deliver_clone(const struct net_bridge_port *prev, 23 + struct sk_buff *skb, 24 + void (*__packet_hook)(const struct net_bridge_port *p, 25 + struct sk_buff *skb)); 26 + 22 27 /* Don't forward packets to originating port or forwarding diasabled */ 23 28 static inline int should_deliver(const struct net_bridge_port *p, 24 29 const struct sk_buff *skb) ··· 99 94 } 100 95 101 96 /* called with rcu_read_lock */ 102 - void br_forward(const struct net_bridge_port *to, struct sk_buff *skb) 97 + void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) 103 98 { 104 99 if (should_deliver(to, skb)) { 105 - __br_forward(to, skb); 100 + if (skb0) 101 + deliver_clone(to, skb, __br_forward); 102 + else 103 + __br_forward(to, skb); 106 104 return; 107 105 } 108 106 109 - kfree_skb(skb); 107 + if (!skb0) 108 + kfree_skb(skb); 110 109 } 111 110 112 - static int deliver_clone(struct net_bridge_port *prev, struct sk_buff *skb, 111 + static int deliver_clone(const struct net_bridge_port *prev, 112 + struct sk_buff *skb, 113 113 void (*__packet_hook)(const struct net_bridge_port *p, 114 114 struct sk_buff *skb)) 115 115 {
+2 -2
net/bridge/br_input.c
··· 70 70 71 71 if (is_multicast_ether_addr(dest)) { 72 72 mdst = br_mdb_get(br, skb); 73 - if (mdst || BR_INPUT_SKB_CB(skb)->mrouters_only) { 73 + if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { 74 74 if ((mdst && !hlist_unhashed(&mdst->mglist)) || 75 75 br_multicast_is_router(br)) 76 76 skb2 = skb; ··· 90 90 91 91 if (skb) { 92 92 if (dst) 93 - br_forward(dst->dst, skb); 93 + br_forward(dst->dst, skb, skb2); 94 94 else 95 95 br_flood_forward(br, skb, skb2); 96 96 }
+10 -8
net/bridge/br_multicast.c
··· 49 49 static struct net_bridge_mdb_entry *br_mdb_ip_get( 50 50 struct net_bridge_mdb_htable *mdb, __be32 dst) 51 51 { 52 + if (!mdb) 53 + return NULL; 54 + 52 55 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); 53 56 } 54 57 55 58 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 56 59 struct sk_buff *skb) 57 60 { 58 - struct net_bridge_mdb_htable *mdb = br->mdb; 59 - 60 - if (!mdb || br->multicast_disabled) 61 + if (br->multicast_disabled) 61 62 return NULL; 62 63 63 64 switch (skb->protocol) { 64 65 case htons(ETH_P_IP): 65 66 if (BR_INPUT_SKB_CB(skb)->igmp) 66 67 break; 67 - return br_mdb_ip_get(mdb, ip_hdr(skb)->daddr); 68 + return br_mdb_ip_get(br->mdb, ip_hdr(skb)->daddr); 68 69 } 69 70 70 71 return NULL; ··· 852 851 if (ih3->nsrcs) 853 852 goto out; 854 853 855 - max_delay = ih3->code ? 1 : 856 - IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE); 854 + max_delay = ih3->code ? 855 + IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 857 856 } 858 857 859 858 if (!group) ··· 991 990 992 991 err = pskb_trim_rcsum(skb2, len); 993 992 if (err) 994 - return err; 993 + goto err_out; 995 994 } 996 995 997 996 len -= ip_hdrlen(skb2); ··· 1013 1012 case CHECKSUM_NONE: 1014 1013 skb2->csum = 0; 1015 1014 if (skb_checksum_complete(skb2)) 1016 - return -EINVAL; 1015 + goto out; 1017 1016 } 1018 1017 1019 1018 err = 0; ··· 1040 1039 1041 1040 out: 1042 1041 __skb_push(skb2, offset); 1042 + err_out: 1043 1043 if (skb2 != skb) 1044 1044 kfree_skb(skb2); 1045 1045 return err;
+9 -1
net/bridge/br_private.h
··· 206 206 207 207 struct br_input_skb_cb { 208 208 struct net_device *brdev; 209 + #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 209 210 int igmp; 210 211 int mrouters_only; 212 + #endif 211 213 }; 212 214 213 215 #define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb) 216 + 217 + #ifdef CONFIG_BRIDGE_IGMP_SNOOPING 218 + # define BR_INPUT_SKB_CB_MROUTERS_ONLY(__skb) (BR_INPUT_SKB_CB(__skb)->mrouters_only) 219 + #else 220 + # define BR_INPUT_SKB_CB_MROUTERS_ONLY(__skb) (0) 221 + #endif 214 222 215 223 extern struct notifier_block br_device_notifier; 216 224 extern const u8 br_group_address[ETH_ALEN]; ··· 260 252 struct sk_buff *skb); 261 253 extern int br_dev_queue_push_xmit(struct sk_buff *skb); 262 254 extern void br_forward(const struct net_bridge_port *to, 263 - struct sk_buff *skb); 255 + struct sk_buff *skb, struct sk_buff *skb0); 264 256 extern int br_forward_finish(struct sk_buff *skb); 265 257 extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb); 266 258 extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
+2 -2
net/core/netpoll.c
··· 735 735 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); 736 736 if (!npinfo) { 737 737 err = -ENOMEM; 738 - goto release; 738 + goto put; 739 739 } 740 740 741 741 npinfo->rx_flags = 0; ··· 845 845 846 846 kfree(npinfo); 847 847 } 848 - 848 + put: 849 849 dev_put(ndev); 850 850 return err; 851 851 }
+4 -4
net/dccp/ipv4.c
··· 998 998 999 999 static int __net_init dccp_v4_init_net(struct net *net) 1000 1000 { 1001 - int err; 1001 + if (dccp_hashinfo.bhash == NULL) 1002 + return -ESOCKTNOSUPPORT; 1002 1003 1003 - err = inet_ctl_sock_create(&net->dccp.v4_ctl_sk, PF_INET, 1004 - SOCK_DCCP, IPPROTO_DCCP, net); 1005 - return err; 1004 + return inet_ctl_sock_create(&net->dccp.v4_ctl_sk, PF_INET, 1005 + SOCK_DCCP, IPPROTO_DCCP, net); 1006 1006 } 1007 1007 1008 1008 static void __net_exit dccp_v4_exit_net(struct net *net)
+4 -4
net/dccp/ipv6.c
··· 1191 1191 1192 1192 static int __net_init dccp_v6_init_net(struct net *net) 1193 1193 { 1194 - int err; 1194 + if (dccp_hashinfo.bhash == NULL) 1195 + return -ESOCKTNOSUPPORT; 1195 1196 1196 - err = inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6, 1197 - SOCK_DCCP, IPPROTO_DCCP, net); 1198 - return err; 1197 + return inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6, 1198 + SOCK_DCCP, IPPROTO_DCCP, net); 1199 1199 } 1200 1200 1201 1201 static void __net_exit dccp_v6_exit_net(struct net *net)
+9 -7
net/dccp/proto.c
··· 1036 1036 FIELD_SIZEOF(struct sk_buff, cb)); 1037 1037 rc = percpu_counter_init(&dccp_orphan_count, 0); 1038 1038 if (rc) 1039 - goto out; 1039 + goto out_fail; 1040 1040 rc = -ENOBUFS; 1041 1041 inet_hashinfo_init(&dccp_hashinfo); 1042 1042 dccp_hashinfo.bind_bucket_cachep = ··· 1125 1125 goto out_sysctl_exit; 1126 1126 1127 1127 dccp_timestamping_init(); 1128 - out: 1129 - return rc; 1128 + 1129 + return 0; 1130 + 1130 1131 out_sysctl_exit: 1131 1132 dccp_sysctl_exit(); 1132 1133 out_ackvec_exit: ··· 1136 1135 dccp_mib_exit(); 1137 1136 out_free_dccp_bhash: 1138 1137 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order); 1139 - dccp_hashinfo.bhash = NULL; 1140 1138 out_free_dccp_locks: 1141 1139 inet_ehash_locks_free(&dccp_hashinfo); 1142 1140 out_free_dccp_ehash: 1143 1141 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order); 1144 - dccp_hashinfo.ehash = NULL; 1145 1142 out_free_bind_bucket_cachep: 1146 1143 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); 1147 - dccp_hashinfo.bind_bucket_cachep = NULL; 1148 1144 out_free_percpu: 1149 1145 percpu_counter_destroy(&dccp_orphan_count); 1150 - goto out; 1146 + out_fail: 1147 + dccp_hashinfo.bhash = NULL; 1148 + dccp_hashinfo.ehash = NULL; 1149 + dccp_hashinfo.bind_bucket_cachep = NULL; 1150 + return rc; 1151 1151 } 1152 1152 1153 1153 static void __exit dccp_fini(void)
+6 -10
net/ipv4/route.c
··· 932 932 { 933 933 del_timer_sync(&net->ipv4.rt_secret_timer); 934 934 rt_cache_invalidate(net); 935 - if (ip_rt_secret_interval) { 936 - net->ipv4.rt_secret_timer.expires += ip_rt_secret_interval; 937 - add_timer(&net->ipv4.rt_secret_timer); 938 - } 935 + if (ip_rt_secret_interval) 936 + mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval); 939 937 } 940 938 941 939 static void rt_emergency_hash_rebuild(struct net *net) ··· 3101 3103 rtnl_lock(); 3102 3104 for_each_net(net) { 3103 3105 int deleted = del_timer_sync(&net->ipv4.rt_secret_timer); 3106 + long time; 3104 3107 3105 3108 if (!new) 3106 3109 continue; 3107 3110 3108 3111 if (deleted) { 3109 - long time = net->ipv4.rt_secret_timer.expires - jiffies; 3112 + time = net->ipv4.rt_secret_timer.expires - jiffies; 3110 3113 3111 3114 if (time <= 0 || (time += diff) <= 0) 3112 3115 time = 0; 3113 - 3114 - net->ipv4.rt_secret_timer.expires = time; 3115 3116 } else 3116 - net->ipv4.rt_secret_timer.expires = new; 3117 + time = new; 3117 3118 3118 - net->ipv4.rt_secret_timer.expires += jiffies; 3119 - add_timer(&net->ipv4.rt_secret_timer); 3119 + mod_timer(&net->ipv4.rt_secret_timer, jiffies + time); 3120 3120 } 3121 3121 rtnl_unlock(); 3122 3122 }
+1 -2
net/phonet/pn_dev.c
··· 107 107 if (pnd) { 108 108 u8 addr; 109 109 110 - for (addr = find_first_bit(pnd->addrs, 64); addr < 64; 111 - addr = find_next_bit(pnd->addrs, 64, 1+addr)) 110 + for_each_set_bit(addr, pnd->addrs, 64) 112 111 phonet_address_notify(RTM_DELADDR, dev, addr); 113 112 kfree(pnd); 114 113 }
+1 -2
net/phonet/pn_netlink.c
··· 141 141 continue; 142 142 143 143 addr_idx = 0; 144 - for (addr = find_first_bit(pnd->addrs, 64); addr < 64; 145 - addr = find_next_bit(pnd->addrs, 64, 1+addr)) { 144 + for_each_set_bit(addr, pnd->addrs, 64) { 146 145 if (addr_idx++ < addr_start_idx) 147 146 continue; 148 147
+16 -10
net/tipc/ref.c
··· 153 153 154 154 u32 tipc_ref_acquire(void *object, spinlock_t **lock) 155 155 { 156 - struct reference *entry; 157 156 u32 index; 158 157 u32 index_mask; 159 158 u32 next_plus_upper; 160 159 u32 ref; 160 + struct reference *entry = NULL; 161 161 162 162 if (!object) { 163 163 err("Attempt to acquire reference to non-existent object\n"); ··· 175 175 index = tipc_ref_table.first_free; 176 176 entry = &(tipc_ref_table.entries[index]); 177 177 index_mask = tipc_ref_table.index_mask; 178 - /* take lock in case a previous user of entry still holds it */ 179 - spin_lock_bh(&entry->lock); 180 178 next_plus_upper = entry->ref; 181 179 tipc_ref_table.first_free = next_plus_upper & index_mask; 182 180 ref = (next_plus_upper & ~index_mask) + index; 183 - entry->ref = ref; 184 - entry->object = object; 185 - *lock = &entry->lock; 186 181 } 187 182 else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { 188 183 index = tipc_ref_table.init_point++; 189 184 entry = &(tipc_ref_table.entries[index]); 190 185 spin_lock_init(&entry->lock); 191 - spin_lock_bh(&entry->lock); 192 186 ref = tipc_ref_table.start_mask + index; 193 - entry->ref = ref; 194 - entry->object = object; 195 - *lock = &entry->lock; 196 187 } 197 188 else { 198 189 ref = 0; 199 190 } 200 191 write_unlock_bh(&ref_table_lock); 192 + 193 + /* 194 + * Grab the lock so no one else can modify this entry 195 + * While we assign its ref value & object pointer 196 + */ 197 + if (entry) { 198 + spin_lock_bh(&entry->lock); 199 + entry->ref = ref; 200 + entry->object = object; 201 + *lock = &entry->lock; 202 + /* 203 + * keep it locked, the caller is responsible 204 + * for unlocking this when they're done with it 205 + */ 206 + } 201 207 202 208 return ref; 203 209 }