Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-2.6.28' of git://git.marvell.com/mv643xx_eth into upstream-next

authored by

Jeff Garzik and committed by
Jeff Garzik
ae19161e 152cbcf9

+839 -796
+1 -1
arch/arm/mach-kirkwood/db88f6281-bp-setup.c
··· 25 25 #include "common.h" 26 26 27 27 static struct mv643xx_eth_platform_data db88f6281_ge00_data = { 28 - .phy_addr = 8, 28 + .phy_addr = MV643XX_ETH_PHY_ADDR(8), 29 29 }; 30 30 31 31 static struct mv_sata_platform_data db88f6281_sata_data = {
+1 -1
arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
··· 30 30 #define RD88F6192_GPIO_USB_VBUS 10 31 31 32 32 static struct mv643xx_eth_platform_data rd88f6192_ge00_data = { 33 - .phy_addr = 8, 33 + .phy_addr = MV643XX_ETH_PHY_ADDR(8), 34 34 }; 35 35 36 36 static struct mv_sata_platform_data rd88f6192_sata_data = {
+1 -1
arch/arm/mach-kirkwood/rd88f6281-setup.c
··· 69 69 }; 70 70 71 71 static struct mv643xx_eth_platform_data rd88f6281_ge00_data = { 72 - .phy_addr = -1, 72 + .phy_addr = MV643XX_ETH_PHY_NONE, 73 73 .speed = SPEED_1000, 74 74 .duplex = DUPLEX_FULL, 75 75 };
+1 -1
arch/arm/mach-loki/lb88rc8480-setup.c
··· 67 67 }; 68 68 69 69 static struct mv643xx_eth_platform_data lb88rc8480_ge0_data = { 70 - .phy_addr = 1, 70 + .phy_addr = MV643XX_ETH_PHY_ADDR(1), 71 71 .mac_addr = { 0x00, 0x50, 0x43, 0x11, 0x22, 0x33 }, 72 72 }; 73 73
+3 -3
arch/arm/mach-mv78xx0/common.c
··· 330 330 struct mv643xx_eth_shared_platform_data mv78xx0_ge01_shared_data = { 331 331 .t_clk = 0, 332 332 .dram = &mv78xx0_mbus_dram_info, 333 + .shared_smi = &mv78xx0_ge00_shared, 333 334 }; 334 335 335 336 static struct resource mv78xx0_ge01_shared_resources[] = { ··· 371 370 void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data) 372 371 { 373 372 eth_data->shared = &mv78xx0_ge01_shared; 374 - eth_data->shared_smi = &mv78xx0_ge00_shared; 375 373 mv78xx0_ge01.dev.platform_data = eth_data; 376 374 377 375 platform_device_register(&mv78xx0_ge01_shared); ··· 384 384 struct mv643xx_eth_shared_platform_data mv78xx0_ge10_shared_data = { 385 385 .t_clk = 0, 386 386 .dram = &mv78xx0_mbus_dram_info, 387 + .shared_smi = &mv78xx0_ge00_shared, 387 388 }; 388 389 389 390 static struct resource mv78xx0_ge10_shared_resources[] = { ··· 425 424 void __init mv78xx0_ge10_init(struct mv643xx_eth_platform_data *eth_data) 426 425 { 427 426 eth_data->shared = &mv78xx0_ge10_shared; 428 - eth_data->shared_smi = &mv78xx0_ge00_shared; 429 427 mv78xx0_ge10.dev.platform_data = eth_data; 430 428 431 429 platform_device_register(&mv78xx0_ge10_shared); ··· 438 438 struct mv643xx_eth_shared_platform_data mv78xx0_ge11_shared_data = { 439 439 .t_clk = 0, 440 440 .dram = &mv78xx0_mbus_dram_info, 441 + .shared_smi = &mv78xx0_ge00_shared, 441 442 }; 442 443 443 444 static struct resource mv78xx0_ge11_shared_resources[] = { ··· 479 478 void __init mv78xx0_ge11_init(struct mv643xx_eth_platform_data *eth_data) 480 479 { 481 480 eth_data->shared = &mv78xx0_ge11_shared; 482 - eth_data->shared_smi = &mv78xx0_ge00_shared; 483 481 mv78xx0_ge11.dev.platform_data = eth_data; 484 482 485 483 platform_device_register(&mv78xx0_ge11_shared);
+4 -4
arch/arm/mach-mv78xx0/db78x00-bp-setup.c
··· 19 19 #include "common.h" 20 20 21 21 static struct mv643xx_eth_platform_data db78x00_ge00_data = { 22 - .phy_addr = 8, 22 + .phy_addr = MV643XX_ETH_PHY_ADDR(8), 23 23 }; 24 24 25 25 static struct mv643xx_eth_platform_data db78x00_ge01_data = { 26 - .phy_addr = 9, 26 + .phy_addr = MV643XX_ETH_PHY_ADDR(9), 27 27 }; 28 28 29 29 static struct mv643xx_eth_platform_data db78x00_ge10_data = { 30 - .phy_addr = -1, 30 + .phy_addr = MV643XX_ETH_PHY_NONE, 31 31 }; 32 32 33 33 static struct mv643xx_eth_platform_data db78x00_ge11_data = { 34 - .phy_addr = -1, 34 + .phy_addr = MV643XX_ETH_PHY_NONE, 35 35 }; 36 36 37 37 static struct mv_sata_platform_data db78x00_sata_data = {
+1 -1
arch/arm/mach-orion5x/db88f5281-setup.c
··· 285 285 * Ethernet 286 286 ****************************************************************************/ 287 287 static struct mv643xx_eth_platform_data db88f5281_eth_data = { 288 - .phy_addr = 8, 288 + .phy_addr = MV643XX_ETH_PHY_ADDR(8), 289 289 }; 290 290 291 291 /*****************************************************************************
+1 -1
arch/arm/mach-orion5x/dns323-setup.c
··· 79 79 */ 80 80 81 81 static struct mv643xx_eth_platform_data dns323_eth_data = { 82 - .phy_addr = 8, 82 + .phy_addr = MV643XX_ETH_PHY_ADDR(8), 83 83 }; 84 84 85 85 /****************************************************************************
+1 -1
arch/arm/mach-orion5x/kurobox_pro-setup.c
··· 161 161 ****************************************************************************/ 162 162 163 163 static struct mv643xx_eth_platform_data kurobox_pro_eth_data = { 164 - .phy_addr = 8, 164 + .phy_addr = MV643XX_ETH_PHY_ADDR(8), 165 165 }; 166 166 167 167 /*****************************************************************************
+1 -1
arch/arm/mach-orion5x/mss2-setup.c
··· 109 109 ****************************************************************************/ 110 110 111 111 static struct mv643xx_eth_platform_data mss2_eth_data = { 112 - .phy_addr = 8, 112 + .phy_addr = MV643XX_ETH_PHY_ADDR(8), 113 113 }; 114 114 115 115 /*****************************************************************************
+1 -1
arch/arm/mach-orion5x/mv2120-setup.c
··· 39 39 * Ethernet 40 40 ****************************************************************************/ 41 41 static struct mv643xx_eth_platform_data mv2120_eth_data = { 42 - .phy_addr = 8, 42 + .phy_addr = MV643XX_ETH_PHY_ADDR(8), 43 43 }; 44 44 45 45 static struct mv_sata_platform_data mv2120_sata_data = {
+1 -1
arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
··· 88 88 }; 89 89 90 90 static struct mv643xx_eth_platform_data rd88f5181l_fxo_eth_data = { 91 - .phy_addr = -1, 91 + .phy_addr = MV643XX_ETH_PHY_NONE, 92 92 .speed = SPEED_1000, 93 93 .duplex = DUPLEX_FULL, 94 94 };
+1 -1
arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
··· 89 89 }; 90 90 91 91 static struct mv643xx_eth_platform_data rd88f5181l_ge_eth_data = { 92 - .phy_addr = -1, 92 + .phy_addr = MV643XX_ETH_PHY_NONE, 93 93 .speed = SPEED_1000, 94 94 .duplex = DUPLEX_FULL, 95 95 };
+1 -1
arch/arm/mach-orion5x/rd88f5182-setup.c
··· 221 221 ****************************************************************************/ 222 222 223 223 static struct mv643xx_eth_platform_data rd88f5182_eth_data = { 224 - .phy_addr = 8, 224 + .phy_addr = MV643XX_ETH_PHY_ADDR(8), 225 225 }; 226 226 227 227 /*****************************************************************************
+1 -2
arch/arm/mach-orion5x/ts78xx-setup.c
··· 103 103 * Ethernet 104 104 ****************************************************************************/ 105 105 static struct mv643xx_eth_platform_data ts78xx_eth_data = { 106 - .phy_addr = 0, 107 - .force_phy_addr = 1, 106 + .phy_addr = MV643XX_ETH_PHY_ADDR(0), 108 107 }; 109 108 110 109 /*****************************************************************************
+1 -1
arch/arm/mach-orion5x/tsx09-common.c
··· 48 48 ****************************************************************************/ 49 49 50 50 struct mv643xx_eth_platform_data qnap_tsx09_eth_data = { 51 - .phy_addr = 8, 51 + .phy_addr = MV643XX_ETH_PHY_ADDR(8), 52 52 }; 53 53 54 54 static int __init qnap_tsx09_parse_hex_nibble(char n)
+1 -1
arch/arm/mach-orion5x/wnr854t-setup.c
··· 92 92 }; 93 93 94 94 static struct mv643xx_eth_platform_data wnr854t_eth_data = { 95 - .phy_addr = -1, 95 + .phy_addr = MV643XX_ETH_PHY_NONE, 96 96 .speed = SPEED_1000, 97 97 .duplex = DUPLEX_FULL, 98 98 };
+1 -1
arch/arm/mach-orion5x/wrt350n-v2-setup.c
··· 100 100 }; 101 101 102 102 static struct mv643xx_eth_platform_data wrt350n_v2_eth_data = { 103 - .phy_addr = -1, 103 + .phy_addr = MV643XX_ETH_PHY_NONE, 104 104 .speed = SPEED_1000, 105 105 .duplex = DUPLEX_FULL, 106 106 };
+2 -4
arch/powerpc/sysdev/mv64x60_dev.c
··· 293 293 return -ENODEV; 294 294 295 295 prop = of_get_property(phy, "reg", NULL); 296 - if (prop) { 297 - pdata.force_phy_addr = 1; 298 - pdata.phy_addr = *prop; 299 - } 296 + if (prop) 297 + pdata.phy_addr = MV643XX_ETH_PHY_ADDR(*prop); 300 298 301 299 of_node_put(phy); 302 300
+1 -1
drivers/net/Kconfig
··· 2274 2274 config MV643XX_ETH 2275 2275 tristate "Marvell Discovery (643XX) and Orion ethernet support" 2276 2276 depends on MV64360 || MV64X60 || (PPC_MULTIPLATFORM && PPC32) || PLAT_ORION 2277 - select MII 2277 + select PHYLIB 2278 2278 help 2279 2279 This driver supports the gigabit ethernet MACs in the 2280 2280 Marvell Discovery PPC/MIPS chipset family (MV643XX) and
+755 -721
drivers/net/mv643xx_eth.c
··· 48 48 #include <linux/kernel.h> 49 49 #include <linux/spinlock.h> 50 50 #include <linux/workqueue.h> 51 - #include <linux/mii.h> 51 + #include <linux/phy.h> 52 52 #include <linux/mv643xx_eth.h> 53 53 #include <asm/io.h> 54 54 #include <asm/types.h> 55 55 #include <asm/system.h> 56 56 57 57 static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 58 - static char mv643xx_eth_driver_version[] = "1.3"; 58 + static char mv643xx_eth_driver_version[] = "1.4"; 59 59 60 - #define MV643XX_ETH_CHECKSUM_OFFLOAD_TX 61 - #define MV643XX_ETH_NAPI 62 - #define MV643XX_ETH_TX_FAST_REFILL 63 - 64 - #ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX 65 - #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1) 66 - #else 67 - #define MAX_DESCS_PER_SKB 1 68 - #endif 69 60 70 61 /* 71 62 * Registers shared between all ports. 72 63 */ 73 64 #define PHY_ADDR 0x0000 74 65 #define SMI_REG 0x0004 66 + #define SMI_BUSY 0x10000000 67 + #define SMI_READ_VALID 0x08000000 68 + #define SMI_OPCODE_READ 0x04000000 69 + #define SMI_OPCODE_WRITE 0x00000000 70 + #define ERR_INT_CAUSE 0x0080 71 + #define ERR_INT_SMI_DONE 0x00000010 72 + #define ERR_INT_MASK 0x0084 75 73 #define WINDOW_BASE(w) (0x0200 + ((w) << 3)) 76 74 #define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) 77 75 #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) ··· 102 104 #define TX_BW_MTU(p) (0x0458 + ((p) << 10)) 103 105 #define TX_BW_BURST(p) (0x045c + ((p) << 10)) 104 106 #define INT_CAUSE(p) (0x0460 + ((p) << 10)) 105 - #define INT_TX_END_0 0x00080000 106 107 #define INT_TX_END 0x07f80000 107 - #define INT_RX 0x0007fbfc 108 + #define INT_RX 0x000003fc 108 109 #define INT_EXT 0x00000002 109 110 #define INT_CAUSE_EXT(p) (0x0464 + ((p) << 10)) 110 - #define INT_EXT_LINK 0x00100000 111 - #define INT_EXT_PHY 0x00010000 112 - #define INT_EXT_TX_ERROR_0 0x00000100 113 - #define INT_EXT_TX_0 0x00000001 114 - #define INT_EXT_TX 0x0000ffff 111 + #define INT_EXT_LINK_PHY 0x00110000 112 + #define INT_EXT_TX 0x000000ff 115 113 #define INT_MASK(p) (0x0468 + ((p) << 10)) 116 114 #define INT_MASK_EXT(p) (0x046c + ((p) << 10)) 117 115 #define TX_FIFO_URGENT_THRESHOLD(p) (0x0474 + ((p) << 10)) ··· 165 171 #define FORCE_LINK_PASS (1 << 1) 166 172 #define SERIAL_PORT_ENABLE (1 << 0) 167 173 168 - #define DEFAULT_RX_QUEUE_SIZE 400 169 - #define DEFAULT_TX_QUEUE_SIZE 800 174 + #define DEFAULT_RX_QUEUE_SIZE 128 175 + #define DEFAULT_TX_QUEUE_SIZE 256 170 176 171 177 172 178 /* ··· 243 249 void __iomem *base; 244 250 245 251 /* 246 - * Protects access to SMI_REG, which is shared between ports. 252 + * Points at the right SMI instance to use. 247 253 */ 248 - spinlock_t phy_lock; 254 + struct mv643xx_eth_shared_private *smi; 255 + 256 + /* 257 + * Provides access to local SMI interface. 258 + */ 259 + struct mii_bus smi_bus; 260 + 261 + /* 262 + * If we have access to the error interrupt pin (which is 263 + * somewhat misnamed as it not only reflects internal errors 264 + * but also reflects SMI completion), use that to wait for 265 + * SMI access completion instead of polling the SMI busy bit. 266 + */ 267 + int err_interrupt; 268 + wait_queue_head_t smi_busy_wait; 249 269 250 270 /* 251 271 * Per-port MBUS window access register value. ··· 271 263 */ 272 264 unsigned int t_clk; 273 265 int extended_rx_coal_limit; 274 - int tx_bw_control_moved; 266 + int tx_bw_control; 275 267 }; 268 + 269 + #define TX_BW_CONTROL_ABSENT 0 270 + #define TX_BW_CONTROL_OLD_LAYOUT 1 271 + #define TX_BW_CONTROL_NEW_LAYOUT 2 276 272 277 273 278 274 /* per-port *****************************************************************/ ··· 326 314 dma_addr_t rx_desc_dma; 327 315 int rx_desc_area_size; 328 316 struct sk_buff **rx_skb; 329 - 330 - struct timer_list rx_oom; 331 317 }; 332 318 333 319 struct tx_queue { ··· 340 330 struct tx_desc *tx_desc_area; 341 331 dma_addr_t tx_desc_dma; 342 332 int tx_desc_area_size; 343 - struct sk_buff **tx_skb; 333 + 334 + struct sk_buff_head tx_skb; 335 + 336 + unsigned long tx_packets; 337 + unsigned long tx_bytes; 338 + unsigned long tx_dropped; 344 339 }; 345 340 346 341 struct mv643xx_eth_private { ··· 354 339 355 340 struct net_device *dev; 356 341 357 - struct mv643xx_eth_shared_private *shared_smi; 358 - int phy_addr; 342 + struct phy_device *phy; 359 343 360 - spinlock_t lock; 361 - 344 + struct timer_list mib_counters_timer; 345 + spinlock_t mib_counters_lock; 362 346 struct mib_counters mib_counters; 347 + 363 348 struct work_struct tx_timeout_task; 364 - struct mii_if_info mii; 349 + 350 + struct napi_struct napi; 351 + u8 work_link; 352 + u8 work_tx; 353 + u8 work_tx_end; 354 + u8 work_rx; 355 + u8 work_rx_refill; 356 + u8 work_rx_oom; 365 357 366 358 /* 367 359 * RX state. ··· 376 354 int default_rx_ring_size; 377 355 unsigned long rx_desc_sram_addr; 378 356 int rx_desc_sram_size; 379 - u8 rxq_mask; 380 - int rxq_primary; 381 - struct napi_struct napi; 357 + int rxq_count; 358 + struct timer_list rx_oom; 382 359 struct rx_queue rxq[8]; 383 360 384 361 /* ··· 386 365 int default_tx_ring_size; 387 366 unsigned long tx_desc_sram_addr; 388 367 int tx_desc_sram_size; 389 - u8 txq_mask; 390 - int txq_primary; 368 + int txq_count; 391 369 struct tx_queue txq[8]; 392 - #ifdef MV643XX_ETH_TX_FAST_REFILL 393 - int tx_clean_threshold; 394 - #endif 395 370 }; 396 371 397 372 ··· 457 440 udelay(10); 458 441 } 459 442 460 - static void __txq_maybe_wake(struct tx_queue *txq) 443 + static void txq_maybe_wake(struct tx_queue *txq) 461 444 { 462 445 struct mv643xx_eth_private *mp = txq_to_mp(txq); 446 + struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 463 447 464 - /* 465 - * netif_{stop,wake}_queue() flow control only applies to 466 - * the primary queue. 467 - */ 468 - BUG_ON(txq->index != mp->txq_primary); 469 - 470 - if (txq->tx_ring_size - txq->tx_desc_count >= MAX_DESCS_PER_SKB) 471 - netif_wake_queue(mp->dev); 472 - } 473 - 474 - 475 - /* rx ***********************************************************************/ 476 - static void txq_reclaim(struct tx_queue *txq, int force); 477 - 478 - static void rxq_refill(struct rx_queue *rxq) 479 - { 480 - struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 481 - unsigned long flags; 482 - 483 - spin_lock_irqsave(&mp->lock, flags); 484 - 485 - while (rxq->rx_desc_count < rxq->rx_ring_size) { 486 - int skb_size; 487 - struct sk_buff *skb; 488 - int unaligned; 489 - int rx; 490 - 491 - /* 492 - * Reserve 2+14 bytes for an ethernet header (the 493 - * hardware automatically prepends 2 bytes of dummy 494 - * data to each received packet), 16 bytes for up to 495 - * four VLAN tags, and 4 bytes for the trailing FCS 496 - * -- 36 bytes total. 497 - */ 498 - skb_size = mp->dev->mtu + 36; 499 - 500 - /* 501 - * Make sure that the skb size is a multiple of 8 502 - * bytes, as the lower three bits of the receive 503 - * descriptor's buffer size field are ignored by 504 - * the hardware. 505 - */ 506 - skb_size = (skb_size + 7) & ~7; 507 - 508 - skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1); 509 - if (skb == NULL) 510 - break; 511 - 512 - unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1); 513 - if (unaligned) 514 - skb_reserve(skb, dma_get_cache_alignment() - unaligned); 515 - 516 - rxq->rx_desc_count++; 517 - rx = rxq->rx_used_desc; 518 - rxq->rx_used_desc = (rx + 1) % rxq->rx_ring_size; 519 - 520 - rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data, 521 - skb_size, DMA_FROM_DEVICE); 522 - rxq->rx_desc_area[rx].buf_size = skb_size; 523 - rxq->rx_skb[rx] = skb; 524 - wmb(); 525 - rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA | 526 - RX_ENABLE_INTERRUPT; 527 - wmb(); 528 - 529 - /* 530 - * The hardware automatically prepends 2 bytes of 531 - * dummy data to each received packet, so that the 532 - * IP header ends up 16-byte aligned. 533 - */ 534 - skb_reserve(skb, 2); 448 + if (netif_tx_queue_stopped(nq)) { 449 + __netif_tx_lock(nq, smp_processor_id()); 450 + if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) 451 + netif_tx_wake_queue(nq); 452 + __netif_tx_unlock(nq); 535 453 } 536 - 537 - if (rxq->rx_desc_count != rxq->rx_ring_size) 538 - mod_timer(&rxq->rx_oom, jiffies + (HZ / 10)); 539 - 540 - spin_unlock_irqrestore(&mp->lock, flags); 541 454 } 542 455 543 - static inline void rxq_refill_timer_wrapper(unsigned long data) 544 - { 545 - rxq_refill((struct rx_queue *)data); 546 - } 547 456 457 + /* rx napi ******************************************************************/ 548 458 static int rxq_process(struct rx_queue *rxq, int budget) 549 459 { 550 460 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); ··· 483 539 struct rx_desc *rx_desc; 484 540 unsigned int cmd_sts; 485 541 struct sk_buff *skb; 486 - unsigned long flags; 487 - 488 - spin_lock_irqsave(&mp->lock, flags); 542 + u16 byte_cnt; 489 543 490 544 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; 491 545 492 546 cmd_sts = rx_desc->cmd_sts; 493 - if (cmd_sts & BUFFER_OWNED_BY_DMA) { 494 - spin_unlock_irqrestore(&mp->lock, flags); 547 + if (cmd_sts & BUFFER_OWNED_BY_DMA) 495 548 break; 496 - } 497 549 rmb(); 498 550 499 551 skb = rxq->rx_skb[rxq->rx_curr_desc]; 500 552 rxq->rx_skb[rxq->rx_curr_desc] = NULL; 501 553 502 - rxq->rx_curr_desc = (rxq->rx_curr_desc + 1) % rxq->rx_ring_size; 554 + rxq->rx_curr_desc++; 555 + if (rxq->rx_curr_desc == rxq->rx_ring_size) 556 + rxq->rx_curr_desc = 0; 503 557 504 - spin_unlock_irqrestore(&mp->lock, flags); 505 - 506 - dma_unmap_single(NULL, rx_desc->buf_ptr + 2, 558 + dma_unmap_single(NULL, rx_desc->buf_ptr, 507 559 rx_desc->buf_size, DMA_FROM_DEVICE); 508 560 rxq->rx_desc_count--; 509 561 rx++; 562 + 563 + mp->work_rx_refill |= 1 << rxq->index; 564 + 565 + byte_cnt = rx_desc->byte_cnt; 510 566 511 567 /* 512 568 * Update statistics. ··· 517 573 * byte CRC at the end of the packet (which we do count). 518 574 */ 519 575 stats->rx_packets++; 520 - stats->rx_bytes += rx_desc->byte_cnt - 2; 576 + stats->rx_bytes += byte_cnt - 2; 521 577 522 578 /* 523 579 * In case we received a packet without first / last bits ··· 540 596 if (cmd_sts & ERROR_SUMMARY) 541 597 stats->rx_errors++; 542 598 543 - dev_kfree_skb_irq(skb); 599 + dev_kfree_skb(skb); 544 600 } else { 545 601 /* 546 602 * The -4 is for the CRC in the trailer of the 547 603 * received packet 548 604 */ 549 - skb_put(skb, rx_desc->byte_cnt - 2 - 4); 605 + skb_put(skb, byte_cnt - 2 - 4); 550 606 551 - if (cmd_sts & LAYER_4_CHECKSUM_OK) { 607 + if (cmd_sts & LAYER_4_CHECKSUM_OK) 552 608 skb->ip_summed = CHECKSUM_UNNECESSARY; 553 - skb->csum = htons( 554 - (cmd_sts & 0x0007fff8) >> 3); 555 - } 556 609 skb->protocol = eth_type_trans(skb, mp->dev); 557 - #ifdef MV643XX_ETH_NAPI 558 610 netif_receive_skb(skb); 559 - #else 560 - netif_rx(skb); 561 - #endif 562 611 } 563 612 564 613 mp->dev->last_rx = jiffies; 565 614 } 566 615 567 - rxq_refill(rxq); 616 + if (rx < budget) 617 + mp->work_rx &= ~(1 << rxq->index); 568 618 569 619 return rx; 570 620 } 571 621 572 - #ifdef MV643XX_ETH_NAPI 573 - static int mv643xx_eth_poll(struct napi_struct *napi, int budget) 622 + static int rxq_refill(struct rx_queue *rxq, int budget) 574 623 { 575 - struct mv643xx_eth_private *mp; 576 - int rx; 577 - int i; 624 + struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 625 + int skb_size; 626 + int refilled; 578 627 579 - mp = container_of(napi, struct mv643xx_eth_private, napi); 628 + /* 629 + * Reserve 2+14 bytes for an ethernet header (the hardware 630 + * automatically prepends 2 bytes of dummy data to each 631 + * received packet), 16 bytes for up to four VLAN tags, and 632 + * 4 bytes for the trailing FCS -- 36 bytes total. 633 + */ 634 + skb_size = rxq_to_mp(rxq)->dev->mtu + 36; 580 635 581 - #ifdef MV643XX_ETH_TX_FAST_REFILL 582 - if (++mp->tx_clean_threshold > 5) { 583 - mp->tx_clean_threshold = 0; 584 - for (i = 0; i < 8; i++) 585 - if (mp->txq_mask & (1 << i)) 586 - txq_reclaim(mp->txq + i, 0); 636 + /* 637 + * Make sure that the skb size is a multiple of 8 bytes, as 638 + * the lower three bits of the receive descriptor's buffer 639 + * size field are ignored by the hardware. 640 + */ 641 + skb_size = (skb_size + 7) & ~7; 587 642 588 - if (netif_carrier_ok(mp->dev)) { 589 - spin_lock_irq(&mp->lock); 590 - __txq_maybe_wake(mp->txq + mp->txq_primary); 591 - spin_unlock_irq(&mp->lock); 643 + refilled = 0; 644 + while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { 645 + struct sk_buff *skb; 646 + int unaligned; 647 + int rx; 648 + 649 + skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1); 650 + if (skb == NULL) { 651 + mp->work_rx_oom |= 1 << rxq->index; 652 + goto oom; 592 653 } 654 + 655 + unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1); 656 + if (unaligned) 657 + skb_reserve(skb, dma_get_cache_alignment() - unaligned); 658 + 659 + refilled++; 660 + rxq->rx_desc_count++; 661 + 662 + rx = rxq->rx_used_desc++; 663 + if (rxq->rx_used_desc == rxq->rx_ring_size) 664 + rxq->rx_used_desc = 0; 665 + 666 + rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data, 667 + skb_size, DMA_FROM_DEVICE); 668 + rxq->rx_desc_area[rx].buf_size = skb_size; 669 + rxq->rx_skb[rx] = skb; 670 + wmb(); 671 + rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA | 672 + RX_ENABLE_INTERRUPT; 673 + wmb(); 674 + 675 + /* 676 + * The hardware automatically prepends 2 bytes of 677 + * dummy data to each received packet, so that the 678 + * IP header ends up 16-byte aligned. 679 + */ 680 + skb_reserve(skb, 2); 593 681 } 594 - #endif 595 682 596 - rx = 0; 597 - for (i = 7; rx < budget && i >= 0; i--) 598 - if (mp->rxq_mask & (1 << i)) 599 - rx += rxq_process(mp->rxq + i, budget - rx); 683 + if (refilled < budget) 684 + mp->work_rx_refill &= ~(1 << rxq->index); 600 685 601 - if (rx < budget) { 602 - netif_rx_complete(mp->dev, napi); 603 - wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); 604 - } 605 - 606 - return rx; 686 + oom: 687 + return refilled; 607 688 } 608 - #endif 609 689 610 690 611 691 /* tx ***********************************************************************/ ··· 652 684 653 685 BUG_ON(txq->tx_desc_count >= txq->tx_ring_size); 654 686 655 - tx_desc_curr = txq->tx_curr_desc; 656 - txq->tx_curr_desc = (tx_desc_curr + 1) % txq->tx_ring_size; 687 + tx_desc_curr = txq->tx_curr_desc++; 688 + if (txq->tx_curr_desc == txq->tx_ring_size) 689 + txq->tx_curr_desc = 0; 657 690 658 691 BUG_ON(txq->tx_curr_desc == txq->tx_used_desc); 659 692 ··· 683 714 desc->cmd_sts = BUFFER_OWNED_BY_DMA | 684 715 ZERO_PADDING | TX_LAST_DESC | 685 716 TX_ENABLE_INTERRUPT; 686 - txq->tx_skb[tx_index] = skb; 687 717 } else { 688 718 desc->cmd_sts = BUFFER_OWNED_BY_DMA; 689 - txq->tx_skb[tx_index] = NULL; 690 719 } 691 720 692 721 desc->l4i_chk = 0; ··· 701 734 return (__force __be16)sum; 702 735 } 703 736 704 - static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) 737 + static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) 705 738 { 706 739 struct mv643xx_eth_private *mp = txq_to_mp(txq); 707 740 int nr_frags = skb_shinfo(skb)->nr_frags; 708 741 int tx_index; 709 742 struct tx_desc *desc; 710 743 u32 cmd_sts; 744 + u16 l4i_chk; 711 745 int length; 712 746 713 747 cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; 748 + l4i_chk = 0; 749 + 750 + if (skb->ip_summed == CHECKSUM_PARTIAL) { 751 + int tag_bytes; 752 + 753 + BUG_ON(skb->protocol != htons(ETH_P_IP) && 754 + skb->protocol != htons(ETH_P_8021Q)); 755 + 756 + tag_bytes = (void *)ip_hdr(skb) - (void *)skb->data - ETH_HLEN; 757 + if (unlikely(tag_bytes & ~12)) { 758 + if (skb_checksum_help(skb) == 0) 759 + goto no_csum; 760 + kfree_skb(skb); 761 + return 1; 762 + } 763 + 764 + if (tag_bytes & 4) 765 + cmd_sts |= MAC_HDR_EXTRA_4_BYTES; 766 + if (tag_bytes & 8) 767 + cmd_sts |= MAC_HDR_EXTRA_8_BYTES; 768 + 769 + cmd_sts |= GEN_TCP_UDP_CHECKSUM | 770 + GEN_IP_V4_CHECKSUM | 771 + ip_hdr(skb)->ihl << TX_IHL_SHIFT; 772 + 773 + switch (ip_hdr(skb)->protocol) { 774 + case IPPROTO_UDP: 775 + cmd_sts |= UDP_FRAME; 776 + l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check)); 777 + break; 778 + case IPPROTO_TCP: 779 + l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check)); 780 + break; 781 + default: 782 + BUG(); 783 + } 784 + } else { 785 + no_csum: 786 + /* Errata BTS #50, IHL must be 5 if no HW checksum */ 787 + cmd_sts |= 5 << TX_IHL_SHIFT; 788 + } 714 789 715 790 tx_index = txq_alloc_desc_index(txq); 716 791 desc = &txq->tx_desc_area[tx_index]; 717 792 718 793 if (nr_frags) { 719 794 txq_submit_frag_skb(txq, skb); 720 - 721 795 length = skb_headlen(skb); 722 - txq->tx_skb[tx_index] = NULL; 723 796 } else { 724 797 cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; 725 798 length = skb->len; 726 - txq->tx_skb[tx_index] = skb; 727 799 } 728 800 801 + desc->l4i_chk = l4i_chk; 729 802 desc->byte_cnt = length; 730 803 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); 731 804 732 - if (skb->ip_summed == CHECKSUM_PARTIAL) { 733 - int mac_hdr_len; 734 - 735 - BUG_ON(skb->protocol != htons(ETH_P_IP) && 736 - skb->protocol != htons(ETH_P_8021Q)); 737 - 738 - cmd_sts |= GEN_TCP_UDP_CHECKSUM | 739 - GEN_IP_V4_CHECKSUM | 740 - ip_hdr(skb)->ihl << TX_IHL_SHIFT; 741 - 742 - mac_hdr_len = (void *)ip_hdr(skb) - (void *)skb->data; 743 - switch (mac_hdr_len - ETH_HLEN) { 744 - case 0: 745 - break; 746 - case 4: 747 - cmd_sts |= MAC_HDR_EXTRA_4_BYTES; 748 - break; 749 - case 8: 750 - cmd_sts |= MAC_HDR_EXTRA_8_BYTES; 751 - break; 752 - case 12: 753 - cmd_sts |= MAC_HDR_EXTRA_4_BYTES; 754 - cmd_sts |= MAC_HDR_EXTRA_8_BYTES; 755 - break; 756 - default: 757 - if (net_ratelimit()) 758 - dev_printk(KERN_ERR, &txq_to_mp(txq)->dev->dev, 759 - "mac header length is %d?!\n", mac_hdr_len); 760 - break; 761 - } 762 - 763 - switch (ip_hdr(skb)->protocol) { 764 - case IPPROTO_UDP: 765 - cmd_sts |= UDP_FRAME; 766 - desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check)); 767 - break; 768 - case IPPROTO_TCP: 769 - desc->l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check)); 770 - break; 771 - default: 772 - BUG(); 773 - } 774 - } else { 775 - /* Errata BTS #50, IHL must be 5 if no HW checksum */ 776 - cmd_sts |= 5 << TX_IHL_SHIFT; 777 - desc->l4i_chk = 0; 778 - } 805 + __skb_queue_tail(&txq->tx_skb, skb); 779 806 780 807 /* ensure all other descriptors are written before first cmd_sts */ 781 808 wmb(); 782 809 desc->cmd_sts = cmd_sts; 783 810 784 - /* clear TX_END interrupt status */ 785 - wrl(mp, INT_CAUSE(mp->port_num), ~(INT_TX_END_0 << txq->index)); 786 - rdl(mp, INT_CAUSE(mp->port_num)); 811 + /* clear TX_END status */ 812 + mp->work_tx_end &= ~(1 << txq->index); 787 813 788 814 /* ensure all descriptors are written before poking hardware */ 789 815 wmb(); 790 816 txq_enable(txq); 791 817 792 818 txq->tx_desc_count += nr_frags + 1; 819 + 820 + return 0; 793 821 } 794 822 795 823 static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) 796 824 { 797 825 struct mv643xx_eth_private *mp = netdev_priv(dev); 798 - struct net_device_stats *stats = &dev->stats; 826 + int queue; 799 827 struct tx_queue *txq; 800 - unsigned long flags; 828 + struct netdev_queue *nq; 829 + 830 + queue = skb_get_queue_mapping(skb); 831 + txq = mp->txq + queue; 832 + nq = netdev_get_tx_queue(dev, queue); 801 833 802 834 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 803 - stats->tx_dropped++; 835 + txq->tx_dropped++; 804 836 dev_printk(KERN_DEBUG, &dev->dev, 805 837 "failed to linearize skb with tiny " 806 838 "unaligned fragment\n"); 807 839 return NETDEV_TX_BUSY; 808 840 } 809 841 810 - spin_lock_irqsave(&mp->lock, flags); 811 - 812 - txq = mp->txq + mp->txq_primary; 813 - 814 - if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) { 815 - spin_unlock_irqrestore(&mp->lock, flags); 816 - if (txq->index == mp->txq_primary && net_ratelimit()) 817 - dev_printk(KERN_ERR, &dev->dev, 818 - "primary tx queue full?!\n"); 842 + if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { 843 + if (net_ratelimit()) 844 + dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n"); 819 845 kfree_skb(skb); 820 846 return NETDEV_TX_OK; 821 847 } 822 848 823 - txq_submit_skb(txq, skb); 824 - stats->tx_bytes += skb->len; 825 - stats->tx_packets++; 826 - dev->trans_start = jiffies; 827 - 828 - if (txq->index == mp->txq_primary) { 849 + if (!txq_submit_skb(txq, skb)) { 829 850 int entries_left; 830 851 852 + txq->tx_bytes += skb->len; 853 + txq->tx_packets++; 854 + dev->trans_start = jiffies; 855 + 831 856 entries_left = txq->tx_ring_size - txq->tx_desc_count; 832 - if (entries_left < MAX_DESCS_PER_SKB) 833 - netif_stop_queue(dev); 857 + if (entries_left < MAX_SKB_FRAGS + 1) 858 + netif_tx_stop_queue(nq); 834 859 } 835 860 836 - spin_unlock_irqrestore(&mp->lock, flags); 837 - 838 861 return NETDEV_TX_OK; 862 + } 863 + 864 + 865 + /* tx napi ******************************************************************/ 866 + static void txq_kick(struct tx_queue *txq) 867 + { 868 + struct mv643xx_eth_private *mp = txq_to_mp(txq); 869 + struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 870 + u32 hw_desc_ptr; 871 + u32 expected_ptr; 872 + 873 + __netif_tx_lock(nq, smp_processor_id()); 874 + 875 + if (rdl(mp, TXQ_COMMAND(mp->port_num)) & (1 << txq->index)) 876 + goto out; 877 + 878 + hw_desc_ptr = rdl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num, txq->index)); 879 + expected_ptr = (u32)txq->tx_desc_dma + 880 + txq->tx_curr_desc * sizeof(struct tx_desc); 881 + 882 + if (hw_desc_ptr != expected_ptr) 883 + txq_enable(txq); 884 + 885 + out: 886 + __netif_tx_unlock(nq); 887 + 888 + mp->work_tx_end &= ~(1 << txq->index); 889 + } 890 + 891 + static int txq_reclaim(struct tx_queue *txq, int budget, int force) 892 + { 893 + struct mv643xx_eth_private *mp = txq_to_mp(txq); 894 + struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 895 + int reclaimed; 896 + 897 + __netif_tx_lock(nq, smp_processor_id()); 898 + 899 + reclaimed = 0; 900 + while (reclaimed < budget && txq->tx_desc_count > 0) { 901 + int tx_index; 902 + struct tx_desc *desc; 903 + u32 cmd_sts; 904 + struct sk_buff *skb; 905 + 906 + tx_index = txq->tx_used_desc; 907 + desc = &txq->tx_desc_area[tx_index]; 908 + cmd_sts = desc->cmd_sts; 909 + 910 + if (cmd_sts & BUFFER_OWNED_BY_DMA) { 911 + if (!force) 912 + break; 913 + desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; 914 + } 915 + 916 + txq->tx_used_desc = tx_index + 1; 917 + if (txq->tx_used_desc == txq->tx_ring_size) 918 + txq->tx_used_desc = 0; 919 + 920 + reclaimed++; 921 + txq->tx_desc_count--; 922 + 923 + skb = NULL; 924 + if (cmd_sts & TX_LAST_DESC) 925 + skb = __skb_dequeue(&txq->tx_skb); 926 + 927 + if (cmd_sts & ERROR_SUMMARY) { 928 + dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n"); 929 + mp->dev->stats.tx_errors++; 930 + } 931 + 932 + if (cmd_sts & TX_FIRST_DESC) { 933 + dma_unmap_single(NULL, desc->buf_ptr, 934 + desc->byte_cnt, DMA_TO_DEVICE); 935 + } else { 936 + dma_unmap_page(NULL, desc->buf_ptr, 937 + desc->byte_cnt, DMA_TO_DEVICE); 938 + } 939 + 940 + if (skb) 941 + dev_kfree_skb(skb); 942 + } 943 + 944 + __netif_tx_unlock(nq); 945 + 946 + if (reclaimed < budget) 947 + mp->work_tx &= ~(1 << txq->index); 948 + 949 + return reclaimed; 839 950 } 840 951 841 952 ··· 940 895 if (bucket_size > 65535) 941 896 bucket_size = 65535; 942 897 943 - if (mp->shared->tx_bw_control_moved) { 944 - wrl(mp, TX_BW_RATE_MOVED(mp->port_num), token_rate); 945 - wrl(mp, TX_BW_MTU_MOVED(mp->port_num), mtu); 946 - wrl(mp, TX_BW_BURST_MOVED(mp->port_num), bucket_size); 947 - } else { 898 + switch (mp->shared->tx_bw_control) { 899 + case TX_BW_CONTROL_OLD_LAYOUT: 948 900 wrl(mp, TX_BW_RATE(mp->port_num), token_rate); 949 901 wrl(mp, TX_BW_MTU(mp->port_num), mtu); 950 902 wrl(mp, TX_BW_BURST(mp->port_num), bucket_size); 903 + break; 904 + case TX_BW_CONTROL_NEW_LAYOUT: 905 + wrl(mp, TX_BW_RATE_MOVED(mp->port_num), token_rate); 906 + wrl(mp, TX_BW_MTU_MOVED(mp->port_num), mtu); 907 + wrl(mp, TX_BW_BURST_MOVED(mp->port_num), bucket_size); 908 + break; 951 909 } 952 910 } 953 911 ··· 982 934 /* 983 935 * Turn on fixed priority mode. 984 936 */ 985 - if (mp->shared->tx_bw_control_moved) 986 - off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num); 987 - else 937 + off = 0; 938 + switch (mp->shared->tx_bw_control) { 939 + case TX_BW_CONTROL_OLD_LAYOUT: 988 940 off = TXQ_FIX_PRIO_CONF(mp->port_num); 941 + break; 942 + case TX_BW_CONTROL_NEW_LAYOUT: 943 + off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num); 944 + break; 945 + } 989 946 990 - val = rdl(mp, off); 991 - val |= 1 << txq->index; 992 - wrl(mp, off, val); 947 + if (off) { 948 + val = rdl(mp, off); 949 + val |= 1 << txq->index; 950 + wrl(mp, off, val); 951 + } 993 952 } 994 953 995 954 static void txq_set_wrr(struct tx_queue *txq, int weight) ··· 1008 953 /* 1009 954 * Turn off fixed priority mode. 1010 955 */ 1011 - if (mp->shared->tx_bw_control_moved) 1012 - off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num); 1013 - else 956 + off = 0; 957 + switch (mp->shared->tx_bw_control) { 958 + case TX_BW_CONTROL_OLD_LAYOUT: 1014 959 off = TXQ_FIX_PRIO_CONF(mp->port_num); 960 + break; 961 + case TX_BW_CONTROL_NEW_LAYOUT: 962 + off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num); 963 + break; 964 + } 1015 965 1016 - val = rdl(mp, off); 1017 - val &= ~(1 << txq->index); 1018 - wrl(mp, off, val); 966 + if (off) { 967 + val = rdl(mp, off); 968 + val &= ~(1 << txq->index); 969 + wrl(mp, off, val); 1019 970 1020 - /* 1021 - * Configure WRR weight for this queue. 1022 - */ 1023 - off = TXQ_BW_WRR_CONF(mp->port_num, txq->index); 971 + /* 972 + * Configure WRR weight for this queue. 973 + */ 974 + off = TXQ_BW_WRR_CONF(mp->port_num, txq->index); 1024 975 1025 - val = rdl(mp, off); 1026 - val = (val & ~0xff) | (weight & 0xff); 1027 - wrl(mp, off, val); 976 + val = rdl(mp, off); 977 + val = (val & ~0xff) | (weight & 0xff); 978 + wrl(mp, off, val); 979 + } 1028 980 } 1029 981 1030 982 1031 983 /* mii management interface *************************************************/ 1032 - #define SMI_BUSY 0x10000000 1033 - #define SMI_READ_VALID 0x08000000 1034 - #define SMI_OPCODE_READ 0x04000000 1035 - #define SMI_OPCODE_WRITE 0x00000000 1036 - 1037 - static void smi_reg_read(struct mv643xx_eth_private *mp, unsigned int addr, 1038 - unsigned int reg, unsigned int *value) 984 + static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) 1039 985 { 1040 - void __iomem *smi_reg = mp->shared_smi->base + SMI_REG; 1041 - unsigned long flags; 1042 - int i; 986 + struct mv643xx_eth_shared_private *msp = dev_id; 1043 987 1044 - /* the SMI register is a shared resource */ 1045 - spin_lock_irqsave(&mp->shared_smi->phy_lock, flags); 988 + if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) { 989 + writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE); 990 + wake_up(&msp->smi_busy_wait); 991 + return IRQ_HANDLED; 992 + } 1046 993 1047 - /* wait for the SMI register to become available */ 1048 - for (i = 0; readl(smi_reg) & SMI_BUSY; i++) { 1049 - if (i == 1000) { 1050 - printk("%s: PHY busy timeout\n", mp->dev->name); 1051 - goto out; 994 + return IRQ_NONE; 995 + } 996 + 997 + static int smi_is_done(struct mv643xx_eth_shared_private *msp) 998 + { 999 + return !(readl(msp->base + SMI_REG) & SMI_BUSY); 1000 + } 1001 + 1002 + static int smi_wait_ready(struct mv643xx_eth_shared_private *msp) 1003 + { 1004 + if (msp->err_interrupt == NO_IRQ) { 1005 + int i; 1006 + 1007 + for (i = 0; !smi_is_done(msp); i++) { 1008 + if (i == 10) 1009 + return -ETIMEDOUT; 1010 + msleep(10); 1052 1011 } 1053 - udelay(10); 1012 + 1013 + return 0; 1014 + } 1015 + 1016 + if (!wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp), 1017 + msecs_to_jiffies(100))) 1018 + return -ETIMEDOUT; 1019 + 1020 + return 0; 1021 + } 1022 + 1023 + static int smi_bus_read(struct mii_bus *bus, int addr, int reg) 1024 + { 1025 + struct mv643xx_eth_shared_private *msp = bus->priv; 1026 + void __iomem *smi_reg = msp->base + SMI_REG; 1027 + int ret; 1028 + 1029 + if (smi_wait_ready(msp)) { 1030 + printk("mv643xx_eth: SMI bus busy timeout\n"); 1031 + return -ETIMEDOUT; 1054 1032 } 1055 1033 1056 1034 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); 1057 1035 1058 - /* now wait for the data to be valid */ 1059 - for (i = 0; !(readl(smi_reg) & SMI_READ_VALID); i++) { 1060 - if (i == 1000) { 1061 - printk("%s: PHY read timeout\n", mp->dev->name); 1062 - goto out; 1063 - } 1064 - udelay(10); 1036 + if (smi_wait_ready(msp)) { 1037 + printk("mv643xx_eth: SMI bus busy timeout\n"); 1038 + return -ETIMEDOUT; 1065 1039 } 1066 1040 1067 - *value = readl(smi_reg) & 0xffff; 1068 - out: 1069 - spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags); 1041 + ret = readl(smi_reg); 1042 + if (!(ret & SMI_READ_VALID)) { 1043 + printk("mv643xx_eth: SMI bus read not valid\n"); 1044 + return -ENODEV; 1045 + } 1046 + 1047 + return ret & 0xffff; 1070 1048 } 1071 1049 1072 - static void smi_reg_write(struct mv643xx_eth_private *mp, 1073 - unsigned int addr, 1074 - unsigned int reg, unsigned int value) 1050 + static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val) 1075 1051 { 1076 - void __iomem *smi_reg = mp->shared_smi->base + SMI_REG; 1077 - unsigned long flags; 1078 - int i; 1052 + struct mv643xx_eth_shared_private *msp = bus->priv; 1053 + void __iomem *smi_reg = msp->base + SMI_REG; 1079 1054 1080 - /* the SMI register is a shared resource */ 1081 - spin_lock_irqsave(&mp->shared_smi->phy_lock, flags); 1082 - 1083 - /* wait for the SMI register to become available */ 1084 - for (i = 0; readl(smi_reg) & SMI_BUSY; i++) { 1085 - if (i == 1000) { 1086 - printk("%s: PHY busy timeout\n", mp->dev->name); 1087 - goto out; 1088 - } 1089 - udelay(10); 1055 + if (smi_wait_ready(msp)) { 1056 + printk("mv643xx_eth: SMI bus busy timeout\n"); 1057 + return -ETIMEDOUT; 1090 1058 } 1091 1059 1092 1060 writel(SMI_OPCODE_WRITE | (reg << 21) | 1093 - (addr << 16) | (value & 0xffff), smi_reg); 1094 - out: 1095 - spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags); 1061 + (addr << 16) | (val & 0xffff), smi_reg); 1062 + 1063 + if (smi_wait_ready(msp)) { 1064 + printk("mv643xx_eth: SMI bus busy timeout\n"); 1065 + return -ETIMEDOUT; 1066 + } 1067 + 1068 + return 0; 1096 1069 } 1097 1070 1098 1071 1099 - /* mib counters *************************************************************/ 1072 + /* statistics ***************************************************************/ 1073 + static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) 1074 + { 1075 + struct mv643xx_eth_private *mp = netdev_priv(dev); 1076 + struct net_device_stats *stats = &dev->stats; 1077 + unsigned long tx_packets = 0; 1078 + unsigned long tx_bytes = 0; 1079 + unsigned long tx_dropped = 0; 1080 + int i; 1081 + 1082 + for (i = 0; i < mp->txq_count; i++) { 1083 + struct tx_queue *txq = mp->txq + i; 1084 + 1085 + tx_packets += txq->tx_packets; 1086 + tx_bytes += txq->tx_bytes; 1087 + tx_dropped += txq->tx_dropped; 1088 + } 1089 + 1090 + stats->tx_packets = tx_packets; 1091 + stats->tx_bytes = tx_bytes; 1092 + stats->tx_dropped = tx_dropped; 1093 + 1094 + return stats; 1095 + } 1096 + 1100 1097 static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) 1101 1098 { 1102 1099 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); ··· 1166 1059 { 1167 1060 struct mib_counters *p = &mp->mib_counters; 1168 1061 1062 + spin_lock(&mp->mib_counters_lock); 1169 1063 p->good_octets_received += mib_read(mp, 0x00); 1170 1064 p->good_octets_received += (u64)mib_read(mp, 0x04) << 32; 1171 1065 p->bad_octets_received += mib_read(mp, 0x08); ··· 1199 1091 p->bad_crc_event += mib_read(mp, 0x74); 1200 1092 p->collision += mib_read(mp, 0x78); 1201 1093 p->late_collision += mib_read(mp, 0x7c); 1094 + spin_unlock(&mp->mib_counters_lock); 1095 + 1096 + mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); 1097 + } 1098 + 1099 + static void mib_counters_timer_wrapper(unsigned long _mp) 1100 + { 1101 + struct mv643xx_eth_private *mp = (void *)_mp; 1102 + 1103 + mib_counters_update(mp); 1202 1104 } 1203 1105 1204 1106 ··· 1274 1156 struct mv643xx_eth_private *mp = netdev_priv(dev); 1275 1157 int err; 1276 1158 1277 - spin_lock_irq(&mp->lock); 1278 - err = mii_ethtool_gset(&mp->mii, cmd); 1279 - spin_unlock_irq(&mp->lock); 1159 + err = phy_read_status(mp->phy); 1160 + if (err == 0) 1161 + err = phy_ethtool_gset(mp->phy, cmd); 1280 1162 1281 1163 /* 1282 1164 * The MAC does not support 1000baseT_Half. ··· 1324 1206 static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1325 1207 { 1326 1208 struct mv643xx_eth_private *mp = netdev_priv(dev); 1327 - int err; 1328 1209 1329 1210 /* 1330 1211 * The MAC does not support 1000baseT_Half. 1331 1212 */ 1332 1213 cmd->advertising &= ~ADVERTISED_1000baseT_Half; 1333 1214 1334 - spin_lock_irq(&mp->lock); 1335 - err = mii_ethtool_sset(&mp->mii, cmd); 1336 - spin_unlock_irq(&mp->lock); 1337 - 1338 - return err; 1215 + return phy_ethtool_sset(mp->phy, cmd); 1339 1216 } 1340 1217 1341 1218 static int mv643xx_eth_set_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd) ··· 1352 1239 { 1353 1240 struct mv643xx_eth_private *mp = netdev_priv(dev); 1354 1241 1355 - return mii_nway_restart(&mp->mii); 1242 + return genphy_restart_aneg(mp->phy); 1356 1243 } 1357 1244 1358 1245 static int mv643xx_eth_nway_reset_phyless(struct net_device *dev) ··· 1362 1249 1363 1250 static u32 mv643xx_eth_get_link(struct net_device *dev) 1364 1251 { 1365 - struct mv643xx_eth_private *mp = netdev_priv(dev); 1366 - 1367 - return mii_link_ok(&mp->mii); 1368 - } 1369 - 1370 - static u32 mv643xx_eth_get_link_phyless(struct net_device *dev) 1371 - { 1372 - return 1; 1252 + return !!netif_carrier_ok(dev); 1373 1253 } 1374 1254 1375 1255 static void mv643xx_eth_get_strings(struct net_device *dev, ··· 1383 1277 struct ethtool_stats *stats, 1384 1278 uint64_t *data) 1385 1279 { 1386 - struct mv643xx_eth_private *mp = dev->priv; 1280 + struct mv643xx_eth_private *mp = netdev_priv(dev); 1387 1281 int i; 1388 1282 1283 + mv643xx_eth_get_stats(dev); 1389 1284 mib_counters_update(mp); 1390 1285 1391 1286 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { ··· 1430 1323 .set_settings = mv643xx_eth_set_settings_phyless, 1431 1324 .get_drvinfo = mv643xx_eth_get_drvinfo, 1432 1325 .nway_reset = mv643xx_eth_nway_reset_phyless, 1433 - .get_link = mv643xx_eth_get_link_phyless, 1326 + .get_link = mv643xx_eth_get_link, 1434 1327 .set_sg = ethtool_op_set_sg, 1435 1328 .get_strings = mv643xx_eth_get_strings, 1436 1329 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, ··· 1594 1487 1595 1488 size = rxq->rx_ring_size * sizeof(struct rx_desc); 1596 1489 1597 - if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size) { 1490 + if (index == 0 && size <= mp->rx_desc_sram_size) { 1598 1491 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, 1599 1492 mp->rx_desc_sram_size); 1600 1493 rxq->rx_desc_dma = mp->rx_desc_sram_addr; ··· 1622 1515 1623 1516 rx_desc = (struct rx_desc *)rxq->rx_desc_area; 1624 1517 for (i = 0; i < rxq->rx_ring_size; i++) { 1625 - int nexti = (i + 1) % rxq->rx_ring_size; 1518 + int nexti; 1519 + 1520 + nexti = i + 1; 1521 + if (nexti == rxq->rx_ring_size) 1522 + nexti = 0; 1523 + 1626 1524 rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + 1627 1525 nexti * sizeof(struct rx_desc); 1628 1526 } 1629 - 1630 - init_timer(&rxq->rx_oom); 1631 - rxq->rx_oom.data = (unsigned long)rxq; 1632 - rxq->rx_oom.function = rxq_refill_timer_wrapper; 1633 1527 1634 1528 return 0; 1635 1529 1636 1530 1637 1531 out_free: 1638 - if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size) 1532 + if (index == 0 && size <= mp->rx_desc_sram_size) 1639 1533 iounmap(rxq->rx_desc_area); 1640 1534 else 1641 1535 dma_free_coherent(NULL, size, ··· 1654 1546 1655 1547 rxq_disable(rxq); 1656 1548 1657 - del_timer_sync(&rxq->rx_oom); 1658 - 1659 1549 for (i = 0; i < rxq->rx_ring_size; i++) { 1660 1550 if (rxq->rx_skb[i]) { 1661 1551 dev_kfree_skb(rxq->rx_skb[i]); ··· 1667 1561 rxq->rx_desc_count); 1668 1562 } 1669 1563 1670 - if (rxq->index == mp->rxq_primary && 1564 + if (rxq->index == 0 && 1671 1565 rxq->rx_desc_area_size <= mp->rx_desc_sram_size) 1672 1566 iounmap(rxq->rx_desc_area); 1673 1567 else ··· 1694 1588 1695 1589 size = txq->tx_ring_size * sizeof(struct tx_desc); 1696 1590 1697 - if (index == mp->txq_primary && size <= mp->tx_desc_sram_size) { 1591 + if (index == 0 && size <= mp->tx_desc_sram_size) { 1698 1592 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, 1699 1593 mp->tx_desc_sram_size); 1700 1594 txq->tx_desc_dma = mp->tx_desc_sram_addr; ··· 1707 1601 if (txq->tx_desc_area == NULL) { 1708 1602 dev_printk(KERN_ERR, &mp->dev->dev, 1709 1603 "can't allocate tx ring (%d bytes)\n", size); 1710 - goto out; 1604 + return -ENOMEM; 1711 1605 } 1712 1606 memset(txq->tx_desc_area, 0, size); 1713 1607 1714 1608 txq->tx_desc_area_size = size; 1715 - txq->tx_skb = kmalloc(txq->tx_ring_size * sizeof(*txq->tx_skb), 1716 - GFP_KERNEL); 1717 - if (txq->tx_skb == NULL) { 1718 - dev_printk(KERN_ERR, &mp->dev->dev, 1719 - "can't allocate tx skb ring\n"); 1720 - goto out_free; 1721 - } 1722 1609 1723 1610 tx_desc = (struct tx_desc *)txq->tx_desc_area; 1724 1611 for (i = 0; i < txq->tx_ring_size; i++) { 1725 1612 struct tx_desc *txd = tx_desc + i; 1726 - int nexti = (i + 1) % txq->tx_ring_size; 1613 + int nexti; 1614 + 1615 + nexti = i + 1; 1616 + if (nexti == txq->tx_ring_size) 1617 + nexti = 0; 1727 1618 1728 1619 txd->cmd_sts = 0; 1729 1620 txd->next_desc_ptr = txq->tx_desc_dma + 1730 1621 nexti * sizeof(struct tx_desc); 1731 1622 } 1732 1623 1624 + skb_queue_head_init(&txq->tx_skb); 1625 + 1733 1626 return 0; 1734 - 1735 - 1736 - out_free: 1737 - if (index == mp->txq_primary && size <= mp->tx_desc_sram_size) 1738 - iounmap(txq->tx_desc_area); 1739 - else 1740 - dma_free_coherent(NULL, size, 1741 - txq->tx_desc_area, 1742 - txq->tx_desc_dma); 1743 - 1744 - out: 1745 - return -ENOMEM; 1746 - } 1747 - 1748 - static void txq_reclaim(struct tx_queue *txq, int force) 1749 - { 1750 - struct mv643xx_eth_private *mp = txq_to_mp(txq); 1751 - unsigned long flags; 1752 - 1753 - spin_lock_irqsave(&mp->lock, flags); 1754 - while (txq->tx_desc_count > 0) { 1755 - int tx_index; 1756 - struct tx_desc *desc; 1757 - u32 cmd_sts; 1758 - struct sk_buff *skb; 1759 - dma_addr_t addr; 1760 - int count; 1761 - 1762 - tx_index = txq->tx_used_desc; 1763 - desc = &txq->tx_desc_area[tx_index]; 1764 - cmd_sts = desc->cmd_sts; 1765 - 1766 - if (cmd_sts & BUFFER_OWNED_BY_DMA) { 1767 - if (!force) 1768 - break; 1769 - desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; 1770 - } 1771 - 1772 - txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size; 1773 - txq->tx_desc_count--; 1774 - 1775 - addr = desc->buf_ptr; 1776 - count = desc->byte_cnt; 1777 - skb = txq->tx_skb[tx_index]; 1778 - txq->tx_skb[tx_index] = NULL; 1779 - 1780 - if (cmd_sts & ERROR_SUMMARY) { 1781 - dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n"); 1782 - mp->dev->stats.tx_errors++; 1783 - } 1784 - 1785 - /* 1786 - * Drop mp->lock while we free the skb. 1787 - */ 1788 - spin_unlock_irqrestore(&mp->lock, flags); 1789 - 1790 - if (cmd_sts & TX_FIRST_DESC) 1791 - dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE); 1792 - else 1793 - dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE); 1794 - 1795 - if (skb) 1796 - dev_kfree_skb_irq(skb); 1797 - 1798 - spin_lock_irqsave(&mp->lock, flags); 1799 - } 1800 - spin_unlock_irqrestore(&mp->lock, flags); 1801 1627 } 1802 1628 1803 1629 static void txq_deinit(struct tx_queue *txq) ··· 1737 1699 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1738 1700 1739 1701 txq_disable(txq); 1740 - txq_reclaim(txq, 1); 1702 + txq_reclaim(txq, txq->tx_ring_size, 1); 1741 1703 1742 1704 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); 1743 1705 1744 - if (txq->index == mp->txq_primary && 1706 + if (txq->index == 0 && 1745 1707 txq->tx_desc_area_size <= mp->tx_desc_sram_size) 1746 1708 iounmap(txq->tx_desc_area); 1747 1709 else 1748 1710 dma_free_coherent(NULL, txq->tx_desc_area_size, 1749 1711 txq->tx_desc_area, txq->tx_desc_dma); 1750 - 1751 - kfree(txq->tx_skb); 1752 1712 } 1753 1713 1754 1714 1755 1715 /* netdev ops and related ***************************************************/ 1716 + static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) 1717 + { 1718 + u32 int_cause; 1719 + u32 int_cause_ext; 1720 + 1721 + int_cause = rdl(mp, INT_CAUSE(mp->port_num)) & 1722 + (INT_TX_END | INT_RX | INT_EXT); 1723 + if (int_cause == 0) 1724 + return 0; 1725 + 1726 + int_cause_ext = 0; 1727 + if (int_cause & INT_EXT) 1728 + int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num)); 1729 + 1730 + int_cause &= INT_TX_END | INT_RX; 1731 + if (int_cause) { 1732 + wrl(mp, INT_CAUSE(mp->port_num), ~int_cause); 1733 + mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & 1734 + ~(rdl(mp, TXQ_COMMAND(mp->port_num)) & 0xff); 1735 + mp->work_rx |= (int_cause & INT_RX) >> 2; 1736 + } 1737 + 1738 + int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX; 1739 + if (int_cause_ext) { 1740 + wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext); 1741 + if (int_cause_ext & INT_EXT_LINK_PHY) 1742 + mp->work_link = 1; 1743 + mp->work_tx |= int_cause_ext & INT_EXT_TX; 1744 + } 1745 + 1746 + return 1; 1747 + } 1748 + 1749 + static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) 1750 + { 1751 + struct net_device *dev = (struct net_device *)dev_id; 1752 + struct mv643xx_eth_private *mp = netdev_priv(dev); 1753 + 1754 + if (unlikely(!mv643xx_eth_collect_events(mp))) 1755 + return IRQ_NONE; 1756 + 1757 + wrl(mp, INT_MASK(mp->port_num), 0); 1758 + napi_schedule(&mp->napi); 1759 + 1760 + return IRQ_HANDLED; 1761 + } 1762 + 1756 1763 static void handle_link_event(struct mv643xx_eth_private *mp) 1757 1764 { 1758 1765 struct net_device *dev = mp->dev; ··· 1814 1731 printk(KERN_INFO "%s: link down\n", dev->name); 1815 1732 1816 1733 netif_carrier_off(dev); 1817 - netif_stop_queue(dev); 1818 1734 1819 - for (i = 0; i < 8; i++) { 1735 + for (i = 0; i < mp->txq_count; i++) { 1820 1736 struct tx_queue *txq = mp->txq + i; 1821 1737 1822 - if (mp->txq_mask & (1 << i)) { 1823 - txq_reclaim(txq, 1); 1824 - txq_reset_hw_ptr(txq); 1825 - } 1738 + txq_reclaim(txq, txq->tx_ring_size, 1); 1739 + txq_reset_hw_ptr(txq); 1826 1740 } 1827 1741 } 1828 1742 return; ··· 1847 1767 speed, duplex ? "full" : "half", 1848 1768 fc ? "en" : "dis"); 1849 1769 1850 - if (!netif_carrier_ok(dev)) { 1770 + if (!netif_carrier_ok(dev)) 1851 1771 netif_carrier_on(dev); 1852 - netif_wake_queue(dev); 1853 - } 1854 1772 } 1855 1773 1856 - static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) 1774 + static int mv643xx_eth_poll(struct napi_struct *napi, int budget) 1857 1775 { 1858 - struct net_device *dev = (struct net_device *)dev_id; 1859 - struct mv643xx_eth_private *mp = netdev_priv(dev); 1860 - u32 int_cause; 1861 - u32 int_cause_ext; 1776 + struct mv643xx_eth_private *mp; 1777 + int work_done; 1862 1778 1863 - int_cause = rdl(mp, INT_CAUSE(mp->port_num)) & 1864 - (INT_TX_END | INT_RX | INT_EXT); 1865 - if (int_cause == 0) 1866 - return IRQ_NONE; 1779 + mp = container_of(napi, struct mv643xx_eth_private, napi); 1867 1780 1868 - int_cause_ext = 0; 1869 - if (int_cause & INT_EXT) { 1870 - int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num)) 1871 - & (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX); 1872 - wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext); 1873 - } 1781 + mp->work_rx_refill |= mp->work_rx_oom; 1782 + mp->work_rx_oom = 0; 1874 1783 1875 - if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK)) 1876 - handle_link_event(mp); 1784 + work_done = 0; 1785 + while (work_done < budget) { 1786 + u8 queue_mask; 1787 + int queue; 1788 + int work_tbd; 1877 1789 1878 - /* 1879 - * RxBuffer or RxError set for any of the 8 queues? 1880 - */ 1881 - #ifdef MV643XX_ETH_NAPI 1882 - if (int_cause & INT_RX) { 1883 - wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_RX)); 1884 - wrl(mp, INT_MASK(mp->port_num), 0x00000000); 1885 - rdl(mp, INT_MASK(mp->port_num)); 1886 - 1887 - netif_rx_schedule(dev, &mp->napi); 1888 - } 1889 - #else 1890 - if (int_cause & INT_RX) { 1891 - int i; 1892 - 1893 - for (i = 7; i >= 0; i--) 1894 - if (mp->rxq_mask & (1 << i)) 1895 - rxq_process(mp->rxq + i, INT_MAX); 1896 - } 1897 - #endif 1898 - 1899 - /* 1900 - * TxBuffer or TxError set for any of the 8 queues? 1901 - */ 1902 - if (int_cause_ext & INT_EXT_TX) { 1903 - int i; 1904 - 1905 - for (i = 0; i < 8; i++) 1906 - if (mp->txq_mask & (1 << i)) 1907 - txq_reclaim(mp->txq + i, 0); 1908 - 1909 - /* 1910 - * Enough space again in the primary TX queue for a 1911 - * full packet? 1912 - */ 1913 - if (netif_carrier_ok(dev)) { 1914 - spin_lock(&mp->lock); 1915 - __txq_maybe_wake(mp->txq + mp->txq_primary); 1916 - spin_unlock(&mp->lock); 1790 + if (mp->work_link) { 1791 + mp->work_link = 0; 1792 + handle_link_event(mp); 1793 + continue; 1917 1794 } 1918 - } 1919 1795 1920 - /* 1921 - * Any TxEnd interrupts? 1922 - */ 1923 - if (int_cause & INT_TX_END) { 1924 - int i; 1925 - 1926 - wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_TX_END)); 1927 - 1928 - spin_lock(&mp->lock); 1929 - for (i = 0; i < 8; i++) { 1930 - struct tx_queue *txq = mp->txq + i; 1931 - u32 hw_desc_ptr; 1932 - u32 expected_ptr; 1933 - 1934 - if ((int_cause & (INT_TX_END_0 << i)) == 0) 1796 + queue_mask = mp->work_tx | mp->work_tx_end | 1797 + mp->work_rx | mp->work_rx_refill; 1798 + if (!queue_mask) { 1799 + if (mv643xx_eth_collect_events(mp)) 1935 1800 continue; 1936 - 1937 - hw_desc_ptr = 1938 - rdl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num, i)); 1939 - expected_ptr = (u32)txq->tx_desc_dma + 1940 - txq->tx_curr_desc * sizeof(struct tx_desc); 1941 - 1942 - if (hw_desc_ptr != expected_ptr) 1943 - txq_enable(txq); 1801 + break; 1944 1802 } 1945 - spin_unlock(&mp->lock); 1803 + 1804 + queue = fls(queue_mask) - 1; 1805 + queue_mask = 1 << queue; 1806 + 1807 + work_tbd = budget - work_done; 1808 + if (work_tbd > 16) 1809 + work_tbd = 16; 1810 + 1811 + if (mp->work_tx_end & queue_mask) { 1812 + txq_kick(mp->txq + queue); 1813 + } else if (mp->work_tx & queue_mask) { 1814 + work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); 1815 + txq_maybe_wake(mp->txq + queue); 1816 + } else if (mp->work_rx & queue_mask) { 1817 + work_done += rxq_process(mp->rxq + queue, work_tbd); 1818 + } else if (mp->work_rx_refill & queue_mask) { 1819 + work_done += rxq_refill(mp->rxq + queue, work_tbd); 1820 + } else { 1821 + BUG(); 1822 + } 1946 1823 } 1947 1824 1948 - return IRQ_HANDLED; 1825 + if (work_done < budget) { 1826 + if (mp->work_rx_oom) 1827 + mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); 1828 + napi_complete(napi); 1829 + wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); 1830 + } 1831 + 1832 + return work_done; 1833 + } 1834 + 1835 + static inline void oom_timer_wrapper(unsigned long data) 1836 + { 1837 + struct mv643xx_eth_private *mp = (void *)data; 1838 + 1839 + napi_schedule(&mp->napi); 1949 1840 } 1950 1841 1951 1842 static void phy_reset(struct mv643xx_eth_private *mp) 1952 1843 { 1953 - unsigned int data; 1844 + int data; 1954 1845 1955 - smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data); 1846 + data = phy_read(mp->phy, MII_BMCR); 1847 + if (data < 0) 1848 + return; 1849 + 1956 1850 data |= BMCR_RESET; 1957 - smi_reg_write(mp, mp->phy_addr, MII_BMCR, data); 1851 + if (phy_write(mp->phy, MII_BMCR, data) < 0) 1852 + return; 1958 1853 1959 1854 do { 1960 - udelay(1); 1961 - smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data); 1962 - } while (data & BMCR_RESET); 1855 + data = phy_read(mp->phy, MII_BMCR); 1856 + } while (data >= 0 && data & BMCR_RESET); 1963 1857 } 1964 1858 1965 1859 static void port_start(struct mv643xx_eth_private *mp) ··· 1944 1890 /* 1945 1891 * Perform PHY reset, if there is a PHY. 1946 1892 */ 1947 - if (mp->phy_addr != -1) { 1893 + if (mp->phy != NULL) { 1948 1894 struct ethtool_cmd cmd; 1949 1895 1950 1896 mv643xx_eth_get_settings(mp->dev, &cmd); ··· 1961 1907 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr); 1962 1908 1963 1909 pscr |= DO_NOT_FORCE_LINK_FAIL; 1964 - if (mp->phy_addr == -1) 1910 + if (mp->phy == NULL) 1965 1911 pscr |= FORCE_LINK_PASS; 1966 1912 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr); 1967 1913 ··· 1971 1917 * Configure TX path and queues. 1972 1918 */ 1973 1919 tx_set_rate(mp, 1000000000, 16777216); 1974 - for (i = 0; i < 8; i++) { 1920 + for (i = 0; i < mp->txq_count; i++) { 1975 1921 struct tx_queue *txq = mp->txq + i; 1976 - 1977 - if ((mp->txq_mask & (1 << i)) == 0) 1978 - continue; 1979 1922 1980 1923 txq_reset_hw_ptr(txq); 1981 1924 txq_set_rate(txq, 1000000000, 16777216); ··· 1986 1935 1987 1936 /* 1988 1937 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast 1989 - * frames to RX queue #0. 1938 + * frames to RX queue #0, and include the pseudo-header when 1939 + * calculating receive checksums. 1990 1940 */ 1991 - wrl(mp, PORT_CONFIG(mp->port_num), 0x00000000); 1941 + wrl(mp, PORT_CONFIG(mp->port_num), 0x02000000); 1992 1942 1993 1943 /* 1994 1944 * Treat BPDUs as normal multicasts, and disable partition mode. ··· 1999 1947 /* 2000 1948 * Enable the receive queues. 2001 1949 */ 2002 - for (i = 0; i < 8; i++) { 1950 + for (i = 0; i < mp->rxq_count; i++) { 2003 1951 struct rx_queue *rxq = mp->rxq + i; 2004 1952 int off = RXQ_CURRENT_DESC_PTR(mp->port_num, i); 2005 1953 u32 addr; 2006 - 2007 - if ((mp->rxq_mask & (1 << i)) == 0) 2008 - continue; 2009 1954 2010 1955 addr = (u32)rxq->rx_desc_dma; 2011 1956 addr += rxq->rx_curr_desc * sizeof(struct rx_desc); ··· 2053 2004 rdl(mp, INT_CAUSE_EXT(mp->port_num)); 2054 2005 2055 2006 err = request_irq(dev->irq, mv643xx_eth_irq, 2056 - IRQF_SHARED | IRQF_SAMPLE_RANDOM, 2057 - dev->name, dev); 2007 + IRQF_SHARED, dev->name, dev); 2058 2008 if (err) { 2059 2009 dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n"); 2060 2010 return -EAGAIN; ··· 2061 2013 2062 2014 init_mac_tables(mp); 2063 2015 2064 - for (i = 0; i < 8; i++) { 2065 - if ((mp->rxq_mask & (1 << i)) == 0) 2066 - continue; 2016 + napi_enable(&mp->napi); 2067 2017 2018 + for (i = 0; i < mp->rxq_count; i++) { 2068 2019 err = rxq_init(mp, i); 2069 2020 if (err) { 2070 2021 while (--i >= 0) 2071 - if (mp->rxq_mask & (1 << i)) 2072 - rxq_deinit(mp->rxq + i); 2022 + rxq_deinit(mp->rxq + i); 2073 2023 goto out; 2074 2024 } 2075 2025 2076 - rxq_refill(mp->rxq + i); 2026 + rxq_refill(mp->rxq + i, INT_MAX); 2077 2027 } 2078 2028 2079 - for (i = 0; i < 8; i++) { 2080 - if ((mp->txq_mask & (1 << i)) == 0) 2081 - continue; 2029 + if (mp->work_rx_oom) { 2030 + mp->rx_oom.expires = jiffies + (HZ / 10); 2031 + add_timer(&mp->rx_oom); 2032 + } 2082 2033 2034 + for (i = 0; i < mp->txq_count; i++) { 2083 2035 err = txq_init(mp, i); 2084 2036 if (err) { 2085 2037 while (--i >= 0) 2086 - if (mp->txq_mask & (1 << i)) 2087 - txq_deinit(mp->txq + i); 2038 + txq_deinit(mp->txq + i); 2088 2039 goto out_free; 2089 2040 } 2090 2041 } 2091 2042 2092 - #ifdef MV643XX_ETH_NAPI 2093 - napi_enable(&mp->napi); 2094 - #endif 2095 - 2096 2043 netif_carrier_off(dev); 2097 - netif_stop_queue(dev); 2098 2044 2099 2045 port_start(mp); 2100 2046 2101 2047 set_rx_coal(mp, 0); 2102 2048 set_tx_coal(mp, 0); 2103 2049 2104 - wrl(mp, INT_MASK_EXT(mp->port_num), 2105 - INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX); 2106 - 2050 + wrl(mp, INT_MASK_EXT(mp->port_num), INT_EXT_LINK_PHY | INT_EXT_TX); 2107 2051 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); 2108 2052 2109 2053 return 0; 2110 2054 2111 2055 2112 2056 out_free: 2113 - for (i = 0; i < 8; i++) 2114 - if (mp->rxq_mask & (1 << i)) 2115 - rxq_deinit(mp->rxq + i); 2057 + for (i = 0; i < mp->rxq_count; i++) 2058 + rxq_deinit(mp->rxq + i); 2116 2059 out: 2117 2060 free_irq(dev->irq, dev); 2118 2061 ··· 2115 2076 unsigned int data; 2116 2077 int i; 2117 2078 2118 - for (i = 0; i < 8; i++) { 2119 - if (mp->rxq_mask & (1 << i)) 2120 - rxq_disable(mp->rxq + i); 2121 - if (mp->txq_mask & (1 << i)) 2122 - txq_disable(mp->txq + i); 2123 - } 2079 + for (i = 0; i < mp->rxq_count; i++) 2080 + rxq_disable(mp->rxq + i); 2081 + for (i = 0; i < mp->txq_count; i++) 2082 + txq_disable(mp->txq + i); 2124 2083 2125 2084 while (1) { 2126 2085 u32 ps = rdl(mp, PORT_STATUS(mp->port_num)); ··· 2144 2107 wrl(mp, INT_MASK(mp->port_num), 0x00000000); 2145 2108 rdl(mp, INT_MASK(mp->port_num)); 2146 2109 2147 - #ifdef MV643XX_ETH_NAPI 2110 + del_timer_sync(&mp->mib_counters_timer); 2111 + 2148 2112 napi_disable(&mp->napi); 2149 - #endif 2113 + 2114 + del_timer_sync(&mp->rx_oom); 2115 + 2150 2116 netif_carrier_off(dev); 2151 - netif_stop_queue(dev); 2152 2117 2153 2118 free_irq(dev->irq, dev); 2154 2119 2155 2120 port_reset(mp); 2121 + mv643xx_eth_get_stats(dev); 2156 2122 mib_counters_update(mp); 2157 2123 2158 - for (i = 0; i < 8; i++) { 2159 - if (mp->rxq_mask & (1 << i)) 2160 - rxq_deinit(mp->rxq + i); 2161 - if (mp->txq_mask & (1 << i)) 2162 - txq_deinit(mp->txq + i); 2163 - } 2124 + for (i = 0; i < mp->rxq_count; i++) 2125 + rxq_deinit(mp->rxq + i); 2126 + for (i = 0; i < mp->txq_count; i++) 2127 + txq_deinit(mp->txq + i); 2164 2128 2165 2129 return 0; 2166 2130 } ··· 2170 2132 { 2171 2133 struct mv643xx_eth_private *mp = netdev_priv(dev); 2172 2134 2173 - if (mp->phy_addr != -1) 2174 - return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL); 2135 + if (mp->phy != NULL) 2136 + return phy_mii_ioctl(mp->phy, if_mii(ifr), cmd); 2175 2137 2176 2138 return -EOPNOTSUPP; 2177 2139 } ··· 2211 2173 2212 2174 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); 2213 2175 if (netif_running(mp->dev)) { 2214 - netif_stop_queue(mp->dev); 2215 - 2176 + netif_tx_stop_all_queues(mp->dev); 2216 2177 port_reset(mp); 2217 2178 port_start(mp); 2218 - 2219 - __txq_maybe_wake(mp->txq + mp->txq_primary); 2179 + netif_tx_wake_all_queues(mp->dev); 2220 2180 } 2221 2181 } 2222 2182 ··· 2240 2204 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); 2241 2205 } 2242 2206 #endif 2243 - 2244 - static int mv643xx_eth_mdio_read(struct net_device *dev, int addr, int reg) 2245 - { 2246 - struct mv643xx_eth_private *mp = netdev_priv(dev); 2247 - int val; 2248 - 2249 - smi_reg_read(mp, addr, reg, &val); 2250 - 2251 - return val; 2252 - } 2253 - 2254 - static void mv643xx_eth_mdio_write(struct net_device *dev, int addr, int reg, int val) 2255 - { 2256 - struct mv643xx_eth_private *mp = netdev_priv(dev); 2257 - smi_reg_write(mp, addr, reg, val); 2258 - } 2259 2207 2260 2208 2261 2209 /* platform glue ************************************************************/ ··· 2292 2272 msp->extended_rx_coal_limit = 0; 2293 2273 2294 2274 /* 2295 - * Check whether the TX rate control registers are in the 2296 - * old or the new place. 2275 + * Check whether the MAC supports TX rate control, and if 2276 + * yes, whether its associated registers are in the old or 2277 + * the new place. 2297 2278 */ 2298 2279 writel(1, msp->base + TX_BW_MTU_MOVED(0)); 2299 - if (readl(msp->base + TX_BW_MTU_MOVED(0)) & 1) 2300 - msp->tx_bw_control_moved = 1; 2301 - else 2302 - msp->tx_bw_control_moved = 0; 2280 + if (readl(msp->base + TX_BW_MTU_MOVED(0)) & 1) { 2281 + msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT; 2282 + } else { 2283 + writel(7, msp->base + TX_BW_RATE(0)); 2284 + if (readl(msp->base + TX_BW_RATE(0)) & 7) 2285 + msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT; 2286 + else 2287 + msp->tx_bw_control = TX_BW_CONTROL_ABSENT; 2288 + } 2303 2289 } 2304 2290 2305 2291 static int mv643xx_eth_shared_probe(struct platform_device *pdev) ··· 2335 2309 if (msp->base == NULL) 2336 2310 goto out_free; 2337 2311 2338 - spin_lock_init(&msp->phy_lock); 2312 + /* 2313 + * Set up and register SMI bus. 2314 + */ 2315 + if (pd == NULL || pd->shared_smi == NULL) { 2316 + msp->smi_bus.priv = msp; 2317 + msp->smi_bus.name = "mv643xx_eth smi"; 2318 + msp->smi_bus.read = smi_bus_read; 2319 + msp->smi_bus.write = smi_bus_write, 2320 + snprintf(msp->smi_bus.id, MII_BUS_ID_SIZE, "%d", pdev->id); 2321 + msp->smi_bus.dev = &pdev->dev; 2322 + msp->smi_bus.phy_mask = 0xffffffff; 2323 + if (mdiobus_register(&msp->smi_bus) < 0) 2324 + goto out_unmap; 2325 + msp->smi = msp; 2326 + } else { 2327 + msp->smi = platform_get_drvdata(pd->shared_smi); 2328 + } 2329 + 2330 + msp->err_interrupt = NO_IRQ; 2331 + init_waitqueue_head(&msp->smi_busy_wait); 2332 + 2333 + /* 2334 + * Check whether the error interrupt is hooked up. 2335 + */ 2336 + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2337 + if (res != NULL) { 2338 + int err; 2339 + 2340 + err = request_irq(res->start, mv643xx_eth_err_irq, 2341 + IRQF_SHARED, "mv643xx_eth", msp); 2342 + if (!err) { 2343 + writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK); 2344 + msp->err_interrupt = res->start; 2345 + } 2346 + } 2339 2347 2340 2348 /* 2341 2349 * (Re-)program MBUS remapping windows if we are asked to. ··· 2387 2327 2388 2328 return 0; 2389 2329 2330 + out_unmap: 2331 + iounmap(msp->base); 2390 2332 out_free: 2391 2333 kfree(msp); 2392 2334 out: ··· 2398 2336 static int mv643xx_eth_shared_remove(struct platform_device *pdev) 2399 2337 { 2400 2338 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); 2339 + struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; 2401 2340 2341 + if (pd == NULL || pd->shared_smi == NULL) 2342 + mdiobus_unregister(&msp->smi_bus); 2343 + if (msp->err_interrupt != NO_IRQ) 2344 + free_irq(msp->err_interrupt, msp); 2402 2345 iounmap(msp->base); 2403 2346 kfree(msp); 2404 2347 ··· 2449 2382 else 2450 2383 uc_addr_get(mp, dev->dev_addr); 2451 2384 2452 - if (pd->phy_addr == -1) { 2453 - mp->shared_smi = NULL; 2454 - mp->phy_addr = -1; 2455 - } else { 2456 - mp->shared_smi = mp->shared; 2457 - if (pd->shared_smi != NULL) 2458 - mp->shared_smi = platform_get_drvdata(pd->shared_smi); 2459 - 2460 - if (pd->force_phy_addr || pd->phy_addr) { 2461 - mp->phy_addr = pd->phy_addr & 0x3f; 2462 - phy_addr_set(mp, mp->phy_addr); 2463 - } else { 2464 - mp->phy_addr = phy_addr_get(mp); 2465 - } 2466 - } 2467 - 2468 2385 mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE; 2469 2386 if (pd->rx_queue_size) 2470 2387 mp->default_rx_ring_size = pd->rx_queue_size; 2471 2388 mp->rx_desc_sram_addr = pd->rx_sram_addr; 2472 2389 mp->rx_desc_sram_size = pd->rx_sram_size; 2473 2390 2474 - if (pd->rx_queue_mask) 2475 - mp->rxq_mask = pd->rx_queue_mask; 2476 - else 2477 - mp->rxq_mask = 0x01; 2478 - mp->rxq_primary = fls(mp->rxq_mask) - 1; 2391 + mp->rxq_count = pd->rx_queue_count ? : 1; 2479 2392 2480 2393 mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE; 2481 2394 if (pd->tx_queue_size) ··· 2463 2416 mp->tx_desc_sram_addr = pd->tx_sram_addr; 2464 2417 mp->tx_desc_sram_size = pd->tx_sram_size; 2465 2418 2466 - if (pd->tx_queue_mask) 2467 - mp->txq_mask = pd->tx_queue_mask; 2468 - else 2469 - mp->txq_mask = 0x01; 2470 - mp->txq_primary = fls(mp->txq_mask) - 1; 2419 + mp->txq_count = pd->tx_queue_count ? : 1; 2471 2420 } 2472 2421 2473 - static int phy_detect(struct mv643xx_eth_private *mp) 2422 + static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, 2423 + int phy_addr) 2474 2424 { 2475 - unsigned int data; 2476 - unsigned int data2; 2425 + struct mii_bus *bus = &mp->shared->smi->smi_bus; 2426 + struct phy_device *phydev; 2427 + int start; 2428 + int num; 2429 + int i; 2477 2430 2478 - smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data); 2479 - smi_reg_write(mp, mp->phy_addr, MII_BMCR, data ^ BMCR_ANENABLE); 2480 - 2481 - smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data2); 2482 - if (((data ^ data2) & BMCR_ANENABLE) == 0) 2483 - return -ENODEV; 2484 - 2485 - smi_reg_write(mp, mp->phy_addr, MII_BMCR, data); 2486 - 2487 - return 0; 2488 - } 2489 - 2490 - static int phy_init(struct mv643xx_eth_private *mp, 2491 - struct mv643xx_eth_platform_data *pd) 2492 - { 2493 - struct ethtool_cmd cmd; 2494 - int err; 2495 - 2496 - err = phy_detect(mp); 2497 - if (err) { 2498 - dev_printk(KERN_INFO, &mp->dev->dev, 2499 - "no PHY detected at addr %d\n", mp->phy_addr); 2500 - return err; 2431 + if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) { 2432 + start = phy_addr_get(mp) & 0x1f; 2433 + num = 32; 2434 + } else { 2435 + start = phy_addr & 0x1f; 2436 + num = 1; 2501 2437 } 2438 + 2439 + phydev = NULL; 2440 + for (i = 0; i < num; i++) { 2441 + int addr = (start + i) & 0x1f; 2442 + 2443 + if (bus->phy_map[addr] == NULL) 2444 + mdiobus_scan(bus, addr); 2445 + 2446 + if (phydev == NULL) { 2447 + phydev = bus->phy_map[addr]; 2448 + if (phydev != NULL) 2449 + phy_addr_set(mp, addr); 2450 + } 2451 + } 2452 + 2453 + return phydev; 2454 + } 2455 + 2456 + static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) 2457 + { 2458 + struct phy_device *phy = mp->phy; 2459 + 2502 2460 phy_reset(mp); 2503 2461 2504 - mp->mii.phy_id = mp->phy_addr; 2505 - mp->mii.phy_id_mask = 0x3f; 2506 - mp->mii.reg_num_mask = 0x1f; 2507 - mp->mii.dev = mp->dev; 2508 - mp->mii.mdio_read = mv643xx_eth_mdio_read; 2509 - mp->mii.mdio_write = mv643xx_eth_mdio_write; 2462 + phy_attach(mp->dev, phy->dev.bus_id, 0, PHY_INTERFACE_MODE_GMII); 2510 2463 2511 - mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii); 2512 - 2513 - memset(&cmd, 0, sizeof(cmd)); 2514 - 2515 - cmd.port = PORT_MII; 2516 - cmd.transceiver = XCVR_INTERNAL; 2517 - cmd.phy_address = mp->phy_addr; 2518 - if (pd->speed == 0) { 2519 - cmd.autoneg = AUTONEG_ENABLE; 2520 - cmd.speed = SPEED_100; 2521 - cmd.advertising = ADVERTISED_10baseT_Half | 2522 - ADVERTISED_10baseT_Full | 2523 - ADVERTISED_100baseT_Half | 2524 - ADVERTISED_100baseT_Full; 2525 - if (mp->mii.supports_gmii) 2526 - cmd.advertising |= ADVERTISED_1000baseT_Full; 2464 + if (speed == 0) { 2465 + phy->autoneg = AUTONEG_ENABLE; 2466 + phy->speed = 0; 2467 + phy->duplex = 0; 2468 + phy->advertising = phy->supported | ADVERTISED_Autoneg; 2527 2469 } else { 2528 - cmd.autoneg = AUTONEG_DISABLE; 2529 - cmd.speed = pd->speed; 2530 - cmd.duplex = pd->duplex; 2470 + phy->autoneg = AUTONEG_DISABLE; 2471 + phy->advertising = 0; 2472 + phy->speed = speed; 2473 + phy->duplex = duplex; 2531 2474 } 2532 - 2533 - mv643xx_eth_set_settings(mp->dev, &cmd); 2534 - 2535 - return 0; 2475 + phy_start_aneg(phy); 2536 2476 } 2537 2477 2538 2478 static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) ··· 2533 2499 } 2534 2500 2535 2501 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; 2536 - if (mp->phy_addr == -1) { 2502 + if (mp->phy == NULL) { 2537 2503 pscr |= DISABLE_AUTO_NEG_SPEED_GMII; 2538 2504 if (speed == SPEED_1000) 2539 2505 pscr |= SET_GMII_SPEED_TO_1000; ··· 2572 2538 return -ENODEV; 2573 2539 } 2574 2540 2575 - dev = alloc_etherdev(sizeof(struct mv643xx_eth_private)); 2541 + dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8); 2576 2542 if (!dev) 2577 2543 return -ENOMEM; 2578 2544 ··· 2583 2549 mp->port_num = pd->port_number; 2584 2550 2585 2551 mp->dev = dev; 2586 - #ifdef MV643XX_ETH_NAPI 2587 - netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 64); 2588 - #endif 2589 2552 2590 2553 set_params(mp, pd); 2554 + dev->real_num_tx_queues = mp->txq_count; 2591 2555 2592 - spin_lock_init(&mp->lock); 2556 + if (pd->phy_addr != MV643XX_ETH_PHY_NONE) 2557 + mp->phy = phy_scan(mp, pd->phy_addr); 2593 2558 2594 - mib_counters_clear(mp); 2595 - INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); 2596 - 2597 - if (mp->phy_addr != -1) { 2598 - err = phy_init(mp, pd); 2599 - if (err) 2600 - goto out; 2601 - 2559 + if (mp->phy != NULL) { 2560 + phy_init(mp, pd->speed, pd->duplex); 2602 2561 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); 2603 2562 } else { 2604 2563 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops_phyless); 2605 2564 } 2565 + 2606 2566 init_pscr(mp, pd->speed, pd->duplex); 2567 + 2568 + 2569 + mib_counters_clear(mp); 2570 + 2571 + init_timer(&mp->mib_counters_timer); 2572 + mp->mib_counters_timer.data = (unsigned long)mp; 2573 + mp->mib_counters_timer.function = mib_counters_timer_wrapper; 2574 + mp->mib_counters_timer.expires = jiffies + 30 * HZ; 2575 + add_timer(&mp->mib_counters_timer); 2576 + 2577 + spin_lock_init(&mp->mib_counters_lock); 2578 + 2579 + INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); 2580 + 2581 + netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128); 2582 + 2583 + init_timer(&mp->rx_oom); 2584 + mp->rx_oom.data = (unsigned long)mp; 2585 + mp->rx_oom.function = oom_timer_wrapper; 2607 2586 2608 2587 2609 2588 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2610 2589 BUG_ON(!res); 2611 2590 dev->irq = res->start; 2612 2591 2592 + dev->get_stats = mv643xx_eth_get_stats; 2613 2593 dev->hard_start_xmit = mv643xx_eth_xmit; 2614 2594 dev->open = mv643xx_eth_open; 2615 2595 dev->stop = mv643xx_eth_stop; ··· 2638 2590 dev->watchdog_timeo = 2 * HZ; 2639 2591 dev->base_addr = 0; 2640 2592 2641 - #ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX 2642 - /* 2643 - * Zero copy can only work if we use Discovery II memory. Else, we will 2644 - * have to map the buffers to ISA memory which is only 16 MB 2645 - */ 2646 2593 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; 2647 2594 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM; 2648 - #endif 2649 2595 2650 2596 SET_NETDEV_DEV(dev, &pdev->dev); 2651 2597 ··· 2652 2610 2653 2611 dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %s\n", 2654 2612 mp->port_num, print_mac(mac, dev->dev_addr)); 2655 - 2656 - if (dev->features & NETIF_F_SG) 2657 - dev_printk(KERN_NOTICE, &dev->dev, "scatter/gather enabled\n"); 2658 - 2659 - if (dev->features & NETIF_F_IP_CSUM) 2660 - dev_printk(KERN_NOTICE, &dev->dev, "tx checksum offload\n"); 2661 - 2662 - #ifdef MV643XX_ETH_NAPI 2663 - dev_printk(KERN_NOTICE, &dev->dev, "napi enabled\n"); 2664 - #endif 2665 2613 2666 2614 if (mp->tx_desc_sram_size > 0) 2667 2615 dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n"); ··· 2669 2637 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); 2670 2638 2671 2639 unregister_netdev(mp->dev); 2640 + if (mp->phy != NULL) 2641 + phy_detach(mp->phy); 2672 2642 flush_scheduled_work(); 2673 2643 free_netdev(mp->dev); 2674 2644
+48 -41
drivers/net/phy/mdio_bus.c
··· 60 60 bus->reset(bus); 61 61 62 62 for (i = 0; i < PHY_MAX_ADDR; i++) { 63 - struct phy_device *phydev; 63 + bus->phy_map[i] = NULL; 64 + if ((bus->phy_mask & (1 << i)) == 0) { 65 + struct phy_device *phydev; 64 66 65 - if (bus->phy_mask & (1 << i)) { 66 - bus->phy_map[i] = NULL; 67 - continue; 67 + phydev = mdiobus_scan(bus, i); 68 + if (IS_ERR(phydev)) 69 + err = PTR_ERR(phydev); 68 70 } 69 - 70 - phydev = get_phy_device(bus, i); 71 - 72 - if (IS_ERR(phydev)) 73 - return PTR_ERR(phydev); 74 - 75 - /* There's a PHY at this address 76 - * We need to set: 77 - * 1) IRQ 78 - * 2) bus_id 79 - * 3) parent 80 - * 4) bus 81 - * 5) mii_bus 82 - * And, we need to register it */ 83 - if (phydev) { 84 - phydev->irq = bus->irq[i]; 85 - 86 - phydev->dev.parent = bus->dev; 87 - phydev->dev.bus = &mdio_bus_type; 88 - snprintf(phydev->dev.bus_id, BUS_ID_SIZE, PHY_ID_FMT, bus->id, i); 89 - 90 - phydev->bus = bus; 91 - 92 - /* Run all of the fixups for this PHY */ 93 - phy_scan_fixups(phydev); 94 - 95 - err = device_register(&phydev->dev); 96 - 97 - if (err) { 98 - printk(KERN_ERR "phy %d failed to register\n", 99 - i); 100 - phy_device_free(phydev); 101 - phydev = NULL; 102 - } 103 - } 104 - 105 - bus->phy_map[i] = phydev; 106 71 } 107 72 108 73 pr_info("%s: probed\n", bus->name); ··· 86 121 } 87 122 } 88 123 EXPORT_SYMBOL(mdiobus_unregister); 124 + 125 + struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) 126 + { 127 + struct phy_device *phydev; 128 + int err; 129 + 130 + phydev = get_phy_device(bus, addr); 131 + if (IS_ERR(phydev) || phydev == NULL) 132 + return phydev; 133 + 134 + /* There's a PHY at this address 135 + * We need to set: 136 + * 1) IRQ 137 + * 2) bus_id 138 + * 3) parent 139 + * 4) bus 140 + * 5) mii_bus 141 + * And, we need to register it */ 142 + 143 + phydev->irq = bus->irq != NULL ? bus->irq[addr] : PHY_POLL; 144 + 145 + phydev->dev.parent = bus->dev; 146 + phydev->dev.bus = &mdio_bus_type; 147 + snprintf(phydev->dev.bus_id, BUS_ID_SIZE, PHY_ID_FMT, bus->id, addr); 148 + 149 + phydev->bus = bus; 150 + 151 + /* Run all of the fixups for this PHY */ 152 + phy_scan_fixups(phydev); 153 + 154 + err = device_register(&phydev->dev); 155 + if (err) { 156 + printk(KERN_ERR "phy %d failed to register\n", addr); 157 + phy_device_free(phydev); 158 + phydev = NULL; 159 + } 160 + 161 + bus->phy_map[addr] = phydev; 162 + 163 + return phydev; 164 + } 165 + EXPORT_SYMBOL(mdiobus_scan); 89 166 90 167 /** 91 168 * mdio_bus_match - determine if given PHY driver supports the given PHY device
+8 -5
include/linux/mv643xx_eth.h
··· 17 17 18 18 struct mv643xx_eth_shared_platform_data { 19 19 struct mbus_dram_target_info *dram; 20 + struct platform_device *shared_smi; 20 21 unsigned int t_clk; 21 22 }; 23 + 24 + #define MV643XX_ETH_PHY_ADDR_DEFAULT 0 25 + #define MV643XX_ETH_PHY_ADDR(x) (0x80 | (x)) 26 + #define MV643XX_ETH_PHY_NONE 0xff 22 27 23 28 struct mv643xx_eth_platform_data { 24 29 /* ··· 35 30 /* 36 31 * Whether a PHY is present, and if yes, at which address. 37 32 */ 38 - struct platform_device *shared_smi; 39 - int force_phy_addr; 40 33 int phy_addr; 41 34 42 35 /* ··· 52 49 int duplex; 53 50 54 51 /* 55 - * Which RX/TX queues to use. 52 + * How many RX/TX queues to use. 56 53 */ 57 - int rx_queue_mask; 58 - int tx_queue_mask; 54 + int rx_queue_count; 55 + int tx_queue_count; 59 56 60 57 /* 61 58 * Override default RX/TX queue sizes if nonzero.
+2
include/linux/phy.h
··· 410 410 411 411 int mdiobus_register(struct mii_bus *bus); 412 412 void mdiobus_unregister(struct mii_bus *bus); 413 + struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); 414 + 413 415 void phy_sanitize_settings(struct phy_device *phydev); 414 416 int phy_stop_interrupts(struct phy_device *phydev); 415 417 int phy_enable_interrupts(struct phy_device *phydev);