···928928 The module will be called smc91x. If you want to compile it as a929929 module, say M here and read <file:Documentation/kbuild/modules.txt>.930930931931+config PXA168_ETH932932+ tristate "Marvell pxa168 ethernet support"933933+ depends on CPU_PXA168934934+ select PHYLIB935935+ help936936+ This driver supports the pxa168 Ethernet ports.937937+938938+ To compile this driver as a module, choose M here. The module939939+ will be called pxa168_eth.940940+931941config NET_NETX932942 tristate "NetX Ethernet support"933943 select MII
···11+/*22+ * PXA168 ethernet driver.33+ * Most of the code is derived from mv643xx ethernet driver.44+ *55+ * Copyright (C) 2010 Marvell International Ltd.66+ * Sachin Sanap <ssanap@marvell.com>77+ * Philip Rakity <prakity@marvell.com>88+ * Mark Brown <markb@marvell.com>99+ *1010+ * This program is free software; you can redistribute it and/or1111+ * modify it under the terms of the GNU General Public License1212+ * as published by the Free Software Foundation; either version 21313+ * of the License, or (at your option) any later version.1414+ *1515+ * This program is distributed in the hope that it will be useful,1616+ * but WITHOUT ANY WARRANTY; without even the implied warranty of1717+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1818+ * GNU General Public License for more details.1919+ *2020+ * You should have received a copy of the GNU General Public License2121+ * along with this program; if not, write to the Free Software2222+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.2323+ */2424+2525+#include <linux/init.h>2626+#include <linux/dma-mapping.h>2727+#include <linux/in.h>2828+#include <linux/ip.h>2929+#include <linux/tcp.h>3030+#include <linux/udp.h>3131+#include <linux/etherdevice.h>3232+#include <linux/bitops.h>3333+#include <linux/delay.h>3434+#include <linux/ethtool.h>3535+#include <linux/platform_device.h>3636+#include <linux/module.h>3737+#include <linux/kernel.h>3838+#include <linux/workqueue.h>3939+#include <linux/clk.h>4040+#include <linux/phy.h>4141+#include <linux/io.h>4242+#include <linux/types.h>4343+#include <asm/pgtable.h>4444+#include <asm/system.h>4545+#include <linux/delay.h>4646+#include <linux/dma-mapping.h>4747+#include <asm/cacheflush.h>4848+#include <linux/pxa168_eth.h>4949+5050+#define DRIVER_NAME "pxa168-eth"5151+#define DRIVER_VERSION "0.3"5252+5353+/*5454+ * Registers5555+ */5656+5757+#define PHY_ADDRESS 0x00005858+#define SMI 0x00105959+#define PORT_CONFIG 0x04006060+#define PORT_CONFIG_EXT 0x04086161+#define PORT_COMMAND 0x04106262+#define PORT_STATUS 0x04186363+#define HTPR 0x04286464+#define SDMA_CONFIG 0x04406565+#define SDMA_CMD 0x04486666+#define INT_CAUSE 0x04506767+#define INT_W_CLEAR 0x04546868+#define INT_MASK 0x04586969+#define ETH_F_RX_DESC_0 0x04807070+#define ETH_C_RX_DESC_0 0x04A07171+#define ETH_C_TX_DESC_1 0x04E47272+7373+/* smi register */7474+#define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */7575+#define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */7676+#define SMI_OP_W (0 << 26) /* Write operation */7777+#define SMI_OP_R (1 << 26) /* Read operation */7878+7979+#define PHY_WAIT_ITERATIONS 108080+8181+#define PXA168_ETH_PHY_ADDR_DEFAULT 08282+/* RX & TX descriptor command */8383+#define BUF_OWNED_BY_DMA (1 << 31)8484+8585+/* RX descriptor status */8686+#define RX_EN_INT (1 << 23)8787+#define RX_FIRST_DESC (1 << 17)8888+#define RX_LAST_DESC (1 << 16)8989+#define RX_ERROR (1 << 15)9090+9191+/* TX descriptor command */9292+#define TX_EN_INT (1 << 23)9393+#define TX_GEN_CRC (1 << 22)9494+#define TX_ZERO_PADDING (1 << 18)9595+#define TX_FIRST_DESC (1 << 17)9696+#define TX_LAST_DESC (1 << 16)9797+#define TX_ERROR (1 << 15)9898+9999+/* SDMA_CMD */100100+#define SDMA_CMD_AT (1 << 31)101101+#define SDMA_CMD_TXDL (1 << 24)102102+#define SDMA_CMD_TXDH (1 << 23)103103+#define SDMA_CMD_AR (1 << 15)104104+#define SDMA_CMD_ERD (1 << 7)105105+106106+/* Bit definitions of the Port Config Reg */107107+#define PCR_HS (1 << 12)108108+#define PCR_EN (1 << 7)109109+#define PCR_PM (1 << 0)110110+111111+/* Bit definitions of the Port Config Extend Reg */112112+#define PCXR_2BSM (1 << 28)113113+#define PCXR_DSCP_EN (1 << 21)114114+#define PCXR_MFL_1518 (0 << 14)115115+#define PCXR_MFL_1536 (1 << 14)116116+#define PCXR_MFL_2048 (2 << 14)117117+#define PCXR_MFL_64K (3 << 14)118118+#define PCXR_FLP (1 << 11)119119+#define PCXR_PRIO_TX_OFF 3120120+#define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF)121121+122122+/* Bit definitions of the SDMA Config Reg */123123+#define SDCR_BSZ_OFF 12124124+#define SDCR_BSZ8 (3 << SDCR_BSZ_OFF)125125+#define SDCR_BSZ4 (2 << SDCR_BSZ_OFF)126126+#define SDCR_BSZ2 (1 << SDCR_BSZ_OFF)127127+#define SDCR_BSZ1 (0 << SDCR_BSZ_OFF)128128+#define SDCR_BLMR (1 << 6)129129+#define SDCR_BLMT (1 << 7)130130+#define SDCR_RIFB (1 << 9)131131+#define SDCR_RC_OFF 2132132+#define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF)133133+134134+/*135135+ * Bit definitions of the Interrupt Cause Reg136136+ * and Interrupt MASK Reg is the same137137+ */138138+#define ICR_RXBUF (1 << 0)139139+#define ICR_TXBUF_H (1 << 2)140140+#define ICR_TXBUF_L (1 << 3)141141+#define ICR_TXEND_H (1 << 6)142142+#define ICR_TXEND_L (1 << 7)143143+#define ICR_RXERR (1 << 8)144144+#define ICR_TXERR_H (1 << 10)145145+#define ICR_TXERR_L (1 << 11)146146+#define ICR_TX_UDR (1 << 13)147147+#define ICR_MII_CH (1 << 28)148148+149149+#define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\150150+ ICR_TXERR_H | ICR_TXERR_L |\151151+ ICR_TXEND_H | ICR_TXEND_L |\152152+ ICR_RXBUF | ICR_RXERR | ICR_MII_CH)153153+154154+#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */155155+156156+#define NUM_RX_DESCS 64157157+#define NUM_TX_DESCS 64158158+159159+#define HASH_ADD 0160160+#define HASH_DELETE 1161161+#define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */162162+#define HOP_NUMBER 12163163+164164+/* Bit definitions for Port status */165165+#define PORT_SPEED_100 (1 << 0)166166+#define FULL_DUPLEX (1 << 1)167167+#define FLOW_CONTROL_ENABLED (1 << 2)168168+#define LINK_UP (1 << 3)169169+170170+/* Bit definitions for work to be done */171171+#define WORK_LINK (1 << 0)172172+#define WORK_TX_DONE (1 << 1)173173+174174+/*175175+ * Misc definitions.176176+ */177177+#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)178178+179179+struct rx_desc {180180+ u32 cmd_sts; /* Descriptor command status */181181+ u16 byte_cnt; /* Descriptor buffer byte count */182182+ u16 buf_size; /* Buffer size */183183+ u32 buf_ptr; /* Descriptor buffer pointer */184184+ u32 next_desc_ptr; /* Next descriptor pointer */185185+};186186+187187+struct tx_desc {188188+ u32 cmd_sts; /* Command/status field */189189+ u16 reserved;190190+ u16 byte_cnt; /* buffer byte count */191191+ u32 buf_ptr; /* pointer to buffer for this descriptor */192192+ u32 next_desc_ptr; /* Pointer to next descriptor */193193+};194194+195195+struct pxa168_eth_private {196196+ int port_num; /* User Ethernet port number */197197+198198+ int rx_resource_err; /* Rx ring resource error flag */199199+200200+ /* Next available and first returning Rx resource */201201+ int rx_curr_desc_q, rx_used_desc_q;202202+203203+ /* Next available and first returning Tx resource */204204+ int tx_curr_desc_q, tx_used_desc_q;205205+206206+ struct rx_desc *p_rx_desc_area;207207+ dma_addr_t rx_desc_dma;208208+ int rx_desc_area_size;209209+ struct sk_buff **rx_skb;210210+211211+ struct tx_desc *p_tx_desc_area;212212+ dma_addr_t tx_desc_dma;213213+ int tx_desc_area_size;214214+ struct sk_buff **tx_skb;215215+216216+ struct work_struct tx_timeout_task;217217+218218+ struct net_device *dev;219219+ struct napi_struct napi;220220+ u8 work_todo;221221+ int skb_size;222222+223223+ struct net_device_stats stats;224224+ /* Size of Tx Ring per queue */225225+ int tx_ring_size;226226+ /* Number of tx descriptors in use */227227+ int tx_desc_count;228228+ /* Size of Rx Ring per queue */229229+ int rx_ring_size;230230+ /* Number of rx descriptors in use */231231+ int rx_desc_count;232232+233233+ /*234234+ * Used in case RX Ring is empty, which can occur when235235+ * system does not have resources (skb's)236236+ */237237+ struct timer_list timeout;238238+ struct mii_bus *smi_bus;239239+ struct phy_device *phy;240240+241241+ /* clock */242242+ struct clk *clk;243243+ struct pxa168_eth_platform_data *pd;244244+ /*245245+ * Ethernet controller base address.246246+ */247247+ void __iomem *base;248248+249249+ /* Pointer to the hardware address filter table */250250+ void *htpr;251251+ dma_addr_t htpr_dma;252252+};253253+254254+struct addr_table_entry {255255+ __le32 lo;256256+ __le32 hi;257257+};258258+259259+/* Bit fields of a Hash Table Entry */260260+enum hash_table_entry {261261+ HASH_ENTRY_VALID = 1,262262+ SKIP = 2,263263+ HASH_ENTRY_RECEIVE_DISCARD = 4,264264+ HASH_ENTRY_RECEIVE_DISCARD_BIT = 2265265+};266266+267267+static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);268268+static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);269269+static int pxa168_init_hw(struct pxa168_eth_private *pep);270270+static void eth_port_reset(struct net_device *dev);271271+static void eth_port_start(struct net_device *dev);272272+static int pxa168_eth_open(struct net_device *dev);273273+static int pxa168_eth_stop(struct net_device *dev);274274+static int ethernet_phy_setup(struct net_device *dev);275275+276276+static inline u32 rdl(struct pxa168_eth_private *pep, int offset)277277+{278278+ return readl(pep->base + offset);279279+}280280+281281+static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)282282+{283283+ writel(data, pep->base + offset);284284+}285285+286286+static void abort_dma(struct pxa168_eth_private *pep)287287+{288288+ int delay;289289+ int max_retries = 40;290290+291291+ do {292292+ wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);293293+ udelay(100);294294+295295+ delay = 10;296296+ while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))297297+ && delay-- > 0) {298298+ udelay(10);299299+ }300300+ } while (max_retries-- > 0 && delay <= 0);301301+302302+ if (max_retries <= 0)303303+ printk(KERN_ERR "%s : DMA Stuck\n", __func__);304304+}305305+306306+static int ethernet_phy_get(struct pxa168_eth_private *pep)307307+{308308+ unsigned int reg_data;309309+310310+ reg_data = rdl(pep, PHY_ADDRESS);311311+312312+ return (reg_data >> (5 * pep->port_num)) & 0x1f;313313+}314314+315315+static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)316316+{317317+ u32 reg_data;318318+ int addr_shift = 5 * pep->port_num;319319+320320+ reg_data = rdl(pep, PHY_ADDRESS);321321+ reg_data &= ~(0x1f << addr_shift);322322+ reg_data |= (phy_addr & 0x1f) << addr_shift;323323+ wrl(pep, PHY_ADDRESS, reg_data);324324+}325325+326326+static void ethernet_phy_reset(struct pxa168_eth_private *pep)327327+{328328+ int data;329329+330330+ data = phy_read(pep->phy, MII_BMCR);331331+ if (data < 0)332332+ return;333333+334334+ data |= BMCR_RESET;335335+ if (phy_write(pep->phy, MII_BMCR, data) < 0)336336+ return;337337+338338+ do {339339+ data = phy_read(pep->phy, MII_BMCR);340340+ } while (data >= 0 && data & BMCR_RESET);341341+}342342+343343+static void rxq_refill(struct net_device *dev)344344+{345345+ struct pxa168_eth_private *pep = netdev_priv(dev);346346+ struct sk_buff *skb;347347+ struct rx_desc *p_used_rx_desc;348348+ int used_rx_desc;349349+350350+ while (pep->rx_desc_count < pep->rx_ring_size) {351351+ int size;352352+353353+ skb = dev_alloc_skb(pep->skb_size);354354+ if (!skb)355355+ break;356356+ if (SKB_DMA_REALIGN)357357+ skb_reserve(skb, SKB_DMA_REALIGN);358358+ pep->rx_desc_count++;359359+ /* Get 'used' Rx descriptor */360360+ used_rx_desc = pep->rx_used_desc_q;361361+ p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];362362+ size = skb->end - skb->data;363363+ p_used_rx_desc->buf_ptr = dma_map_single(NULL,364364+ skb->data,365365+ size,366366+ DMA_FROM_DEVICE);367367+ p_used_rx_desc->buf_size = size;368368+ pep->rx_skb[used_rx_desc] = skb;369369+370370+ /* Return the descriptor to DMA ownership */371371+ wmb();372372+ p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;373373+ wmb();374374+375375+ /* Move the used descriptor pointer to the next descriptor */376376+ pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;377377+378378+ /* Any Rx return cancels the Rx resource error status */379379+ pep->rx_resource_err = 0;380380+381381+ skb_reserve(skb, ETH_HW_IP_ALIGN);382382+ }383383+384384+ /*385385+ * If RX ring is empty of SKB, set a timer to try allocating386386+ * again at a later time.387387+ */388388+ if (pep->rx_desc_count == 0) {389389+ pep->timeout.expires = jiffies + (HZ / 10);390390+ add_timer(&pep->timeout);391391+ }392392+}393393+394394+static inline void rxq_refill_timer_wrapper(unsigned long data)395395+{396396+ struct pxa168_eth_private *pep = (void *)data;397397+ napi_schedule(&pep->napi);398398+}399399+400400+static inline u8 flip_8_bits(u8 x)401401+{402402+ return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)403403+ | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)404404+ | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)405405+ | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);406406+}407407+408408+static void nibble_swap_every_byte(unsigned char *mac_addr)409409+{410410+ int i;411411+ for (i = 0; i < ETH_ALEN; i++) {412412+ mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |413413+ ((mac_addr[i] & 0xf0) >> 4);414414+ }415415+}416416+417417+static void inverse_every_nibble(unsigned char *mac_addr)418418+{419419+ int i;420420+ for (i = 0; i < ETH_ALEN; i++)421421+ mac_addr[i] = flip_8_bits(mac_addr[i]);422422+}423423+424424+/*425425+ * ----------------------------------------------------------------------------426426+ * This function will calculate the hash function of the address.427427+ * Inputs428428+ * mac_addr_orig - MAC address.429429+ * Outputs430430+ * return the calculated entry.431431+ */432432+static u32 hash_function(unsigned char *mac_addr_orig)433433+{434434+ u32 hash_result;435435+ u32 addr0;436436+ u32 addr1;437437+ u32 addr2;438438+ u32 addr3;439439+ unsigned char mac_addr[ETH_ALEN];440440+441441+ /* Make a copy of MAC address since we are going to performe bit442442+ * operations on it443443+ */444444+ memcpy(mac_addr, mac_addr_orig, ETH_ALEN);445445+446446+ nibble_swap_every_byte(mac_addr);447447+ inverse_every_nibble(mac_addr);448448+449449+ addr0 = (mac_addr[5] >> 2) & 0x3f;450450+ addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);451451+ addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;452452+ addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);453453+454454+ hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);455455+ hash_result = hash_result & 0x07ff;456456+ return hash_result;457457+}458458+459459+/*460460+ * ----------------------------------------------------------------------------461461+ * This function will add/del an entry to the address table.462462+ * Inputs463463+ * pep - ETHERNET .464464+ * mac_addr - MAC address.465465+ * skip - if 1, skip this address.Used in case of deleting an entry which is a466466+ * part of chain in the hash table.We cant just delete the entry since467467+ * that will break the chain.We need to defragment the tables time to468468+ * time.469469+ * rd - 0 Discard packet upon match.470470+ * - 1 Receive packet upon match.471471+ * Outputs472472+ * address table entry is added/deleted.473473+ * 0 if success.474474+ * -ENOSPC if table full475475+ */476476+static int add_del_hash_entry(struct pxa168_eth_private *pep,477477+ unsigned char *mac_addr,478478+ u32 rd, u32 skip, int del)479479+{480480+ struct addr_table_entry *entry, *start;481481+ u32 new_high;482482+ u32 new_low;483483+ u32 i;484484+485485+ new_low = (((mac_addr[1] >> 4) & 0xf) << 15)486486+ | (((mac_addr[1] >> 0) & 0xf) << 11)487487+ | (((mac_addr[0] >> 4) & 0xf) << 7)488488+ | (((mac_addr[0] >> 0) & 0xf) << 3)489489+ | (((mac_addr[3] >> 4) & 0x1) << 31)490490+ | (((mac_addr[3] >> 0) & 0xf) << 27)491491+ | (((mac_addr[2] >> 4) & 0xf) << 23)492492+ | (((mac_addr[2] >> 0) & 0xf) << 19)493493+ | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)494494+ | HASH_ENTRY_VALID;495495+496496+ new_high = (((mac_addr[5] >> 4) & 0xf) << 15)497497+ | (((mac_addr[5] >> 0) & 0xf) << 11)498498+ | (((mac_addr[4] >> 4) & 0xf) << 7)499499+ | (((mac_addr[4] >> 0) & 0xf) << 3)500500+ | (((mac_addr[3] >> 5) & 0x7) << 0);501501+502502+ /*503503+ * Pick the appropriate table, start scanning for free/reusable504504+ * entries at the index obtained by hashing the specified MAC address505505+ */506506+ start = (struct addr_table_entry *)(pep->htpr);507507+ entry = start + hash_function(mac_addr);508508+ for (i = 0; i < HOP_NUMBER; i++) {509509+ if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {510510+ break;511511+ } else {512512+ /* if same address put in same position */513513+ if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==514514+ (new_low & 0xfffffff8)) &&515515+ (le32_to_cpu(entry->hi) == new_high)) {516516+ break;517517+ }518518+ }519519+ if (entry == start + 0x7ff)520520+ entry = start;521521+ else522522+ entry++;523523+ }524524+525525+ if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&526526+ (le32_to_cpu(entry->hi) != new_high) && del)527527+ return 0;528528+529529+ if (i == HOP_NUMBER) {530530+ if (!del) {531531+ printk(KERN_INFO "%s: table section is full, need to "532532+ "move to 16kB implementation?\n",533533+ __FILE__);534534+ return -ENOSPC;535535+ } else536536+ return 0;537537+ }538538+539539+ /*540540+ * Update the selected entry541541+ */542542+ if (del) {543543+ entry->hi = 0;544544+ entry->lo = 0;545545+ } else {546546+ entry->hi = cpu_to_le32(new_high);547547+ entry->lo = cpu_to_le32(new_low);548548+ }549549+550550+ return 0;551551+}552552+553553+/*554554+ * ----------------------------------------------------------------------------555555+ * Create an addressTable entry from MAC address info556556+ * found in the specifed net_device struct557557+ *558558+ * Input : pointer to ethernet interface network device structure559559+ * Output : N/A560560+ */561561+static void update_hash_table_mac_address(struct pxa168_eth_private *pep,562562+ unsigned char *oaddr,563563+ unsigned char *addr)564564+{565565+ /* Delete old entry */566566+ if (oaddr)567567+ add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);568568+ /* Add new entry */569569+ add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);570570+}571571+572572+static int init_hash_table(struct pxa168_eth_private *pep)573573+{574574+ /*575575+ * Hardware expects CPU to build a hash table based on a predefined576576+ * hash function and populate it based on hardware address. The577577+ * location of the hash table is identified by 32-bit pointer stored578578+ * in HTPR internal register. Two possible sizes exists for the hash579579+ * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB580580+ * (16kB of DRAM required (4 x 4 kB banks)).We currently only support581581+ * 1/2kB.582582+ */583583+ /* TODO: Add support for 8kB hash table and alternative hash584584+ * function.Driver can dynamically switch to them if the 1/2kB hash585585+ * table is full.586586+ */587587+ if (pep->htpr == NULL) {588588+ pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,589589+ HASH_ADDR_TABLE_SIZE,590590+ &pep->htpr_dma, GFP_KERNEL);591591+ if (pep->htpr == NULL)592592+ return -ENOMEM;593593+ }594594+ memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);595595+ wrl(pep, HTPR, pep->htpr_dma);596596+ return 0;597597+}598598+599599+static void pxa168_eth_set_rx_mode(struct net_device *dev)600600+{601601+ struct pxa168_eth_private *pep = netdev_priv(dev);602602+ struct netdev_hw_addr *ha;603603+ u32 val;604604+605605+ val = rdl(pep, PORT_CONFIG);606606+ if (dev->flags & IFF_PROMISC)607607+ val |= PCR_PM;608608+ else609609+ val &= ~PCR_PM;610610+ wrl(pep, PORT_CONFIG, val);611611+612612+ /*613613+ * Remove the old list of MAC address and add dev->addr614614+ * and multicast address.615615+ */616616+ memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);617617+ update_hash_table_mac_address(pep, NULL, dev->dev_addr);618618+619619+ netdev_for_each_mc_addr(ha, dev)620620+ update_hash_table_mac_address(pep, NULL, ha->addr);621621+}622622+623623+static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)624624+{625625+ struct sockaddr *sa = addr;626626+ struct pxa168_eth_private *pep = netdev_priv(dev);627627+ unsigned char oldMac[ETH_ALEN];628628+629629+ if (!is_valid_ether_addr(sa->sa_data))630630+ return -EINVAL;631631+ memcpy(oldMac, dev->dev_addr, ETH_ALEN);632632+ memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);633633+ netif_addr_lock_bh(dev);634634+ update_hash_table_mac_address(pep, oldMac, dev->dev_addr);635635+ netif_addr_unlock_bh(dev);636636+ return 0;637637+}638638+639639+static void eth_port_start(struct net_device *dev)640640+{641641+ unsigned int val = 0;642642+ struct pxa168_eth_private *pep = netdev_priv(dev);643643+ int tx_curr_desc, rx_curr_desc;644644+645645+ /* Perform PHY reset, if there is a PHY. */646646+ if (pep->phy != NULL) {647647+ struct ethtool_cmd cmd;648648+649649+ pxa168_get_settings(pep->dev, &cmd);650650+ ethernet_phy_reset(pep);651651+ pxa168_set_settings(pep->dev, &cmd);652652+ }653653+654654+ /* Assignment of Tx CTRP of given queue */655655+ tx_curr_desc = pep->tx_curr_desc_q;656656+ wrl(pep, ETH_C_TX_DESC_1,657657+ (u32) ((struct tx_desc *)pep->tx_desc_dma + tx_curr_desc));658658+659659+ /* Assignment of Rx CRDP of given queue */660660+ rx_curr_desc = pep->rx_curr_desc_q;661661+ wrl(pep, ETH_C_RX_DESC_0,662662+ (u32) ((struct rx_desc *)pep->rx_desc_dma + rx_curr_desc));663663+664664+ wrl(pep, ETH_F_RX_DESC_0,665665+ (u32) ((struct rx_desc *)pep->rx_desc_dma + rx_curr_desc));666666+667667+ /* Clear all interrupts */668668+ wrl(pep, INT_CAUSE, 0);669669+670670+ /* Enable all interrupts for receive, transmit and error. */671671+ wrl(pep, INT_MASK, ALL_INTS);672672+673673+ val = rdl(pep, PORT_CONFIG);674674+ val |= PCR_EN;675675+ wrl(pep, PORT_CONFIG, val);676676+677677+ /* Start RX DMA engine */678678+ val = rdl(pep, SDMA_CMD);679679+ val |= SDMA_CMD_ERD;680680+ wrl(pep, SDMA_CMD, val);681681+}682682+683683+static void eth_port_reset(struct net_device *dev)684684+{685685+ struct pxa168_eth_private *pep = netdev_priv(dev);686686+ unsigned int val = 0;687687+688688+ /* Stop all interrupts for receive, transmit and error. */689689+ wrl(pep, INT_MASK, 0);690690+691691+ /* Clear all interrupts */692692+ wrl(pep, INT_CAUSE, 0);693693+694694+ /* Stop RX DMA */695695+ val = rdl(pep, SDMA_CMD);696696+ val &= ~SDMA_CMD_ERD; /* abort dma command */697697+698698+ /* Abort any transmit and receive operations and put DMA699699+ * in idle state.700700+ */701701+ abort_dma(pep);702702+703703+ /* Disable port */704704+ val = rdl(pep, PORT_CONFIG);705705+ val &= ~PCR_EN;706706+ wrl(pep, PORT_CONFIG, val);707707+}708708+709709+/*710710+ * txq_reclaim - Free the tx desc data for completed descriptors711711+ * If force is non-zero, frees uncompleted descriptors as well712712+ */713713+static int txq_reclaim(struct net_device *dev, int force)714714+{715715+ struct pxa168_eth_private *pep = netdev_priv(dev);716716+ struct tx_desc *desc;717717+ u32 cmd_sts;718718+ struct sk_buff *skb;719719+ int tx_index;720720+ dma_addr_t addr;721721+ int count;722722+ int released = 0;723723+724724+ netif_tx_lock(dev);725725+726726+ pep->work_todo &= ~WORK_TX_DONE;727727+ while (pep->tx_desc_count > 0) {728728+ tx_index = pep->tx_used_desc_q;729729+ desc = &pep->p_tx_desc_area[tx_index];730730+ cmd_sts = desc->cmd_sts;731731+ if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {732732+ if (released > 0) {733733+ goto txq_reclaim_end;734734+ } else {735735+ released = -1;736736+ goto txq_reclaim_end;737737+ }738738+ }739739+ pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;740740+ pep->tx_desc_count--;741741+ addr = desc->buf_ptr;742742+ count = desc->byte_cnt;743743+ skb = pep->tx_skb[tx_index];744744+ if (skb)745745+ pep->tx_skb[tx_index] = NULL;746746+747747+ if (cmd_sts & TX_ERROR) {748748+ if (net_ratelimit())749749+ printk(KERN_ERR "%s: Error in TX\n", dev->name);750750+ dev->stats.tx_errors++;751751+ }752752+ dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);753753+ if (skb)754754+ dev_kfree_skb_irq(skb);755755+ released++;756756+ }757757+txq_reclaim_end:758758+ netif_tx_unlock(dev);759759+ return released;760760+}761761+762762+static void pxa168_eth_tx_timeout(struct net_device *dev)763763+{764764+ struct pxa168_eth_private *pep = netdev_priv(dev);765765+766766+ printk(KERN_INFO "%s: TX timeout desc_count %d\n",767767+ dev->name, pep->tx_desc_count);768768+769769+ schedule_work(&pep->tx_timeout_task);770770+}771771+772772+static void pxa168_eth_tx_timeout_task(struct work_struct *work)773773+{774774+ struct pxa168_eth_private *pep = container_of(work,775775+ struct pxa168_eth_private,776776+ tx_timeout_task);777777+ struct net_device *dev = pep->dev;778778+ pxa168_eth_stop(dev);779779+ pxa168_eth_open(dev);780780+}781781+782782+static int rxq_process(struct net_device *dev, int budget)783783+{784784+ struct pxa168_eth_private *pep = netdev_priv(dev);785785+ struct net_device_stats *stats = &dev->stats;786786+ unsigned int received_packets = 0;787787+ struct sk_buff *skb;788788+789789+ while (budget-- > 0) {790790+ int rx_next_curr_desc, rx_curr_desc, rx_used_desc;791791+ struct rx_desc *rx_desc;792792+ unsigned int cmd_sts;793793+794794+ /* Do not process Rx ring in case of Rx ring resource error */795795+ if (pep->rx_resource_err)796796+ break;797797+ rx_curr_desc = pep->rx_curr_desc_q;798798+ rx_used_desc = pep->rx_used_desc_q;799799+ rx_desc = &pep->p_rx_desc_area[rx_curr_desc];800800+ cmd_sts = rx_desc->cmd_sts;801801+ rmb();802802+ if (cmd_sts & (BUF_OWNED_BY_DMA))803803+ break;804804+ skb = pep->rx_skb[rx_curr_desc];805805+ pep->rx_skb[rx_curr_desc] = NULL;806806+807807+ rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;808808+ pep->rx_curr_desc_q = rx_next_curr_desc;809809+810810+ /* Rx descriptors exhausted. */811811+ /* Set the Rx ring resource error flag */812812+ if (rx_next_curr_desc == rx_used_desc)813813+ pep->rx_resource_err = 1;814814+ pep->rx_desc_count--;815815+ dma_unmap_single(NULL, rx_desc->buf_ptr,816816+ rx_desc->buf_size,817817+ DMA_FROM_DEVICE);818818+ received_packets++;819819+ /*820820+ * Update statistics.821821+ * Note byte count includes 4 byte CRC count822822+ */823823+ stats->rx_packets++;824824+ stats->rx_bytes += rx_desc->byte_cnt;825825+ /*826826+ * In case received a packet without first / last bits on OR827827+ * the error summary bit is on, the packets needs to be droped.828828+ */829829+ if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=830830+ (RX_FIRST_DESC | RX_LAST_DESC))831831+ || (cmd_sts & RX_ERROR)) {832832+833833+ stats->rx_dropped++;834834+ if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=835835+ (RX_FIRST_DESC | RX_LAST_DESC)) {836836+ if (net_ratelimit())837837+ printk(KERN_ERR838838+ "%s: Rx pkt on multiple desc\n",839839+ dev->name);840840+ }841841+ if (cmd_sts & RX_ERROR)842842+ stats->rx_errors++;843843+ dev_kfree_skb_irq(skb);844844+ } else {845845+ /*846846+ * The -4 is for the CRC in the trailer of the847847+ * received packet848848+ */849849+ skb_put(skb, rx_desc->byte_cnt - 4);850850+ skb->protocol = eth_type_trans(skb, dev);851851+ netif_receive_skb(skb);852852+ }853853+ dev->last_rx = jiffies;854854+ }855855+ /* Fill RX ring with skb's */856856+ rxq_refill(dev);857857+ return received_packets;858858+}859859+860860+static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,861861+ struct net_device *dev)862862+{863863+ u32 icr;864864+ int ret = 0;865865+866866+ icr = rdl(pep, INT_CAUSE);867867+ if (icr == 0)868868+ return IRQ_NONE;869869+870870+ wrl(pep, INT_CAUSE, ~icr);871871+ if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {872872+ pep->work_todo |= WORK_TX_DONE;873873+ ret = 1;874874+ }875875+ if (icr & ICR_RXBUF)876876+ ret = 1;877877+ if (icr & ICR_MII_CH) {878878+ pep->work_todo |= WORK_LINK;879879+ ret = 1;880880+ }881881+ return ret;882882+}883883+884884+static void handle_link_event(struct pxa168_eth_private *pep)885885+{886886+ struct net_device *dev = pep->dev;887887+ u32 port_status;888888+ int speed;889889+ int duplex;890890+ int fc;891891+892892+ port_status = rdl(pep, PORT_STATUS);893893+ if (!(port_status & LINK_UP)) {894894+ if (netif_carrier_ok(dev)) {895895+ printk(KERN_INFO "%s: link down\n", dev->name);896896+ netif_carrier_off(dev);897897+ txq_reclaim(dev, 1);898898+ }899899+ return;900900+ }901901+ if (port_status & PORT_SPEED_100)902902+ speed = 100;903903+ else904904+ speed = 10;905905+906906+ duplex = (port_status & FULL_DUPLEX) ? 1 : 0;907907+ fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;908908+ printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "909909+ "flow control %sabled\n", dev->name,910910+ speed, duplex ? "full" : "half", fc ? "en" : "dis");911911+ if (!netif_carrier_ok(dev))912912+ netif_carrier_on(dev);913913+}914914+915915+static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)916916+{917917+ struct net_device *dev = (struct net_device *)dev_id;918918+ struct pxa168_eth_private *pep = netdev_priv(dev);919919+920920+ if (unlikely(!pxa168_eth_collect_events(pep, dev)))921921+ return IRQ_NONE;922922+ /* Disable interrupts */923923+ wrl(pep, INT_MASK, 0);924924+ napi_schedule(&pep->napi);925925+ return IRQ_HANDLED;926926+}927927+928928+static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)929929+{930930+ int skb_size;931931+932932+ /*933933+ * Reserve 2+14 bytes for an ethernet header (the hardware934934+ * automatically prepends 2 bytes of dummy data to each935935+ * received packet), 16 bytes for up to four VLAN tags, and936936+ * 4 bytes for the trailing FCS -- 36 bytes total.937937+ */938938+ skb_size = pep->dev->mtu + 36;939939+940940+ /*941941+ * Make sure that the skb size is a multiple of 8 bytes, as942942+ * the lower three bits of the receive descriptor's buffer943943+ * size field are ignored by the hardware.944944+ */945945+ pep->skb_size = (skb_size + 7) & ~7;946946+947947+ /*948948+ * If NET_SKB_PAD is smaller than a cache line,949949+ * netdev_alloc_skb() will cause skb->data to be misaligned950950+ * to a cache line boundary. If this is the case, include951951+ * some extra space to allow re-aligning the data area.952952+ */953953+ pep->skb_size += SKB_DMA_REALIGN;954954+955955+}956956+957957+static int set_port_config_ext(struct pxa168_eth_private *pep)958958+{959959+ int skb_size;960960+961961+ pxa168_eth_recalc_skb_size(pep);962962+ if (pep->skb_size <= 1518)963963+ skb_size = PCXR_MFL_1518;964964+ else if (pep->skb_size <= 1536)965965+ skb_size = PCXR_MFL_1536;966966+ else if (pep->skb_size <= 2048)967967+ skb_size = PCXR_MFL_2048;968968+ else969969+ skb_size = PCXR_MFL_64K;970970+971971+ /* Extended Port Configuration */972972+ wrl(pep,973973+ PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */974974+ PCXR_DSCP_EN | /* Enable DSCP in IP */975975+ skb_size | PCXR_FLP | /* do not force link pass */976976+ PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */977977+978978+ return 0;979979+}980980+981981+static int pxa168_init_hw(struct pxa168_eth_private *pep)982982+{983983+ int err = 0;984984+985985+ /* Disable interrupts */986986+ wrl(pep, INT_MASK, 0);987987+ wrl(pep, INT_CAUSE, 0);988988+ /* Write to ICR to clear interrupts. */989989+ wrl(pep, INT_W_CLEAR, 0);990990+ /* Abort any transmit and receive operations and put DMA991991+ * in idle state.992992+ */993993+ abort_dma(pep);994994+ /* Initialize address hash table */995995+ err = init_hash_table(pep);996996+ if (err)997997+ return err;998998+ /* SDMA configuration */999999+ wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */10001000+ SDCR_RIFB | /* Rx interrupt on frame */10011001+ SDCR_BLMT | /* Little endian transmit */10021002+ SDCR_BLMR | /* Little endian receive */10031003+ SDCR_RC_MAX_RETRANS); /* Max retransmit count */10041004+ /* Port Configuration */10051005+ wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */10061006+ set_port_config_ext(pep);10071007+10081008+ return err;10091009+}10101010+10111011+static int rxq_init(struct net_device *dev)10121012+{10131013+ struct pxa168_eth_private *pep = netdev_priv(dev);10141014+ struct rx_desc *p_rx_desc;10151015+ int size = 0, i = 0;10161016+ int rx_desc_num = pep->rx_ring_size;10171017+10181018+ /* Allocate RX skb rings */10191019+ pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,10201020+ GFP_KERNEL);10211021+ if (!pep->rx_skb) {10221022+ printk(KERN_ERR "%s: Cannot alloc RX skb ring\n", dev->name);10231023+ return -ENOMEM;10241024+ }10251025+ /* Allocate RX ring */10261026+ pep->rx_desc_count = 0;10271027+ size = pep->rx_ring_size * sizeof(struct rx_desc);10281028+ pep->rx_desc_area_size = size;10291029+ pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,10301030+ &pep->rx_desc_dma, GFP_KERNEL);10311031+ if (!pep->p_rx_desc_area) {10321032+ printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n",10331033+ dev->name, size);10341034+ goto out;10351035+ }10361036+ memset((void *)pep->p_rx_desc_area, 0, size);10371037+ /* initialize the next_desc_ptr links in the Rx descriptors ring */10381038+ p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area;10391039+ for (i = 0; i < rx_desc_num; i++) {10401040+ p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +10411041+ ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);10421042+ }10431043+ /* Save Rx desc pointer to driver struct. */10441044+ pep->rx_curr_desc_q = 0;10451045+ pep->rx_used_desc_q = 0;10461046+ pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);10471047+ return 0;10481048+out:10491049+ kfree(pep->rx_skb);10501050+ return -ENOMEM;10511051+}10521052+10531053+static void rxq_deinit(struct net_device *dev)10541054+{10551055+ struct pxa168_eth_private *pep = netdev_priv(dev);10561056+ int curr;10571057+10581058+ /* Free preallocated skb's on RX rings */10591059+ for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {10601060+ if (pep->rx_skb[curr]) {10611061+ dev_kfree_skb(pep->rx_skb[curr]);10621062+ pep->rx_desc_count--;10631063+ }10641064+ }10651065+ if (pep->rx_desc_count)10661066+ printk(KERN_ERR10671067+ "Error in freeing Rx Ring. %d skb's still\n",10681068+ pep->rx_desc_count);10691069+ /* Free RX ring */10701070+ if (pep->p_rx_desc_area)10711071+ dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,10721072+ pep->p_rx_desc_area, pep->rx_desc_dma);10731073+ kfree(pep->rx_skb);10741074+}10751075+10761076+static int txq_init(struct net_device *dev)10771077+{10781078+ struct pxa168_eth_private *pep = netdev_priv(dev);10791079+ struct tx_desc *p_tx_desc;10801080+ int size = 0, i = 0;10811081+ int tx_desc_num = pep->tx_ring_size;10821082+10831083+ pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,10841084+ GFP_KERNEL);10851085+ if (!pep->tx_skb) {10861086+ printk(KERN_ERR "%s: Cannot alloc TX skb ring\n", dev->name);10871087+ return -ENOMEM;10881088+ }10891089+ /* Allocate TX ring */10901090+ pep->tx_desc_count = 0;10911091+ size = pep->tx_ring_size * sizeof(struct tx_desc);10921092+ pep->tx_desc_area_size = size;10931093+ pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,10941094+ &pep->tx_desc_dma, GFP_KERNEL);10951095+ if (!pep->p_tx_desc_area) {10961096+ printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",10971097+ dev->name, size);10981098+ goto out;10991099+ }11001100+ memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);11011101+ /* Initialize the next_desc_ptr links in the Tx descriptors ring */11021102+ p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area;11031103+ for (i = 0; i < tx_desc_num; i++) {11041104+ p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +11051105+ ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);11061106+ }11071107+ pep->tx_curr_desc_q = 0;11081108+ pep->tx_used_desc_q = 0;11091109+ pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);11101110+ return 0;11111111+out:11121112+ kfree(pep->tx_skb);11131113+ return -ENOMEM;11141114+}11151115+11161116+static void txq_deinit(struct net_device *dev)11171117+{11181118+ struct pxa168_eth_private *pep = netdev_priv(dev);11191119+11201120+ /* Free outstanding skb's on TX ring */11211121+ txq_reclaim(dev, 1);11221122+ BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);11231123+ /* Free TX ring */11241124+ if (pep->p_tx_desc_area)11251125+ dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,11261126+ pep->p_tx_desc_area, pep->tx_desc_dma);11271127+ kfree(pep->tx_skb);11281128+}11291129+11301130+static int pxa168_eth_open(struct net_device *dev)11311131+{11321132+ struct pxa168_eth_private *pep = netdev_priv(dev);11331133+ int err;11341134+11351135+ err = request_irq(dev->irq, pxa168_eth_int_handler,11361136+ IRQF_DISABLED, dev->name, dev);11371137+ if (err) {11381138+ dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");11391139+ return -EAGAIN;11401140+ }11411141+ pep->rx_resource_err = 0;11421142+ err = rxq_init(dev);11431143+ if (err != 0)11441144+ goto out_free_irq;11451145+ err = txq_init(dev);11461146+ if (err != 0)11471147+ goto out_free_rx_skb;11481148+ pep->rx_used_desc_q = 0;11491149+ pep->rx_curr_desc_q = 0;11501150+11511151+ /* Fill RX ring with skb's */11521152+ rxq_refill(dev);11531153+ pep->rx_used_desc_q = 0;11541154+ pep->rx_curr_desc_q = 0;11551155+ netif_carrier_off(dev);11561156+ eth_port_start(dev);11571157+ napi_enable(&pep->napi);11581158+ return 0;11591159+out_free_rx_skb:11601160+ rxq_deinit(dev);11611161+out_free_irq:11621162+ free_irq(dev->irq, dev);11631163+ return err;11641164+}11651165+11661166+static int pxa168_eth_stop(struct net_device *dev)11671167+{11681168+ struct pxa168_eth_private *pep = netdev_priv(dev);11691169+ eth_port_reset(dev);11701170+11711171+ /* Disable interrupts */11721172+ wrl(pep, INT_MASK, 0);11731173+ wrl(pep, INT_CAUSE, 0);11741174+ /* Write to ICR to clear interrupts. */11751175+ wrl(pep, INT_W_CLEAR, 0);11761176+ napi_disable(&pep->napi);11771177+ del_timer_sync(&pep->timeout);11781178+ netif_carrier_off(dev);11791179+ free_irq(dev->irq, dev);11801180+ rxq_deinit(dev);11811181+ txq_deinit(dev);11821182+11831183+ return 0;11841184+}11851185+11861186+static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)11871187+{11881188+ int retval;11891189+ struct pxa168_eth_private *pep = netdev_priv(dev);11901190+11911191+ if ((mtu > 9500) || (mtu < 68))11921192+ return -EINVAL;11931193+11941194+ dev->mtu = mtu;11951195+ retval = set_port_config_ext(pep);11961196+11971197+ if (!netif_running(dev))11981198+ return 0;11991199+12001200+ /*12011201+ * Stop and then re-open the interface. This will allocate RX12021202+ * skbs of the new MTU.12031203+ * There is a possible danger that the open will not succeed,12041204+ * due to memory being full.12051205+ */12061206+ pxa168_eth_stop(dev);12071207+ if (pxa168_eth_open(dev)) {12081208+ dev_printk(KERN_ERR, &dev->dev,12091209+ "fatal error on re-opening device after "12101210+ "MTU change\n");12111211+ }12121212+12131213+ return 0;12141214+}12151215+12161216+static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)12171217+{12181218+ int tx_desc_curr;12191219+12201220+ tx_desc_curr = pep->tx_curr_desc_q;12211221+ pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;12221222+ BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);12231223+ pep->tx_desc_count++;12241224+12251225+ return tx_desc_curr;12261226+}12271227+12281228+static int pxa168_rx_poll(struct napi_struct *napi, int budget)12291229+{12301230+ struct pxa168_eth_private *pep =12311231+ container_of(napi, struct pxa168_eth_private, napi);12321232+ struct net_device *dev = pep->dev;12331233+ int work_done = 0;12341234+12351235+ if (unlikely(pep->work_todo & WORK_LINK)) {12361236+ pep->work_todo &= ~(WORK_LINK);12371237+ handle_link_event(pep);12381238+ }12391239+ /*12401240+ * We call txq_reclaim every time since in NAPI interupts are disabled12411241+ * and due to this we miss the TX_DONE interrupt,which is not updated in12421242+ * interrupt status register.12431243+ */12441244+ txq_reclaim(dev, 0);12451245+ if (netif_queue_stopped(dev)12461246+ && pep->tx_ring_size - pep->tx_desc_count > 1) {12471247+ netif_wake_queue(dev);12481248+ }12491249+ work_done = rxq_process(dev, budget);12501250+ if (work_done < budget) {12511251+ napi_complete(napi);12521252+ wrl(pep, INT_MASK, ALL_INTS);12531253+ }12541254+12551255+ return work_done;12561256+}12571257+12581258+static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)12591259+{12601260+ struct pxa168_eth_private *pep = netdev_priv(dev);12611261+ struct net_device_stats *stats = &dev->stats;12621262+ struct tx_desc *desc;12631263+ int tx_index;12641264+ int length;12651265+12661266+ tx_index = eth_alloc_tx_desc_index(pep);12671267+ desc = &pep->p_tx_desc_area[tx_index];12681268+ length = skb->len;12691269+ pep->tx_skb[tx_index] = skb;12701270+ desc->byte_cnt = length;12711271+ desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);12721272+ wmb();12731273+ desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |12741274+ TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;12751275+ wmb();12761276+ wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);12771277+12781278+ stats->tx_bytes += skb->len;12791279+ stats->tx_packets++;12801280+ dev->trans_start = jiffies;12811281+ if (pep->tx_ring_size - pep->tx_desc_count <= 1) {12821282+ /* We handled the current skb, but now we are out of space.*/12831283+ netif_stop_queue(dev);12841284+ }12851285+12861286+ return NETDEV_TX_OK;12871287+}12881288+12891289+static int smi_wait_ready(struct pxa168_eth_private *pep)12901290+{12911291+ int i = 0;12921292+12931293+ /* wait for the SMI register to become available */12941294+ for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {12951295+ if (i == PHY_WAIT_ITERATIONS)12961296+ return -ETIMEDOUT;12971297+ msleep(10);12981298+ }12991299+13001300+ return 0;13011301+}13021302+13031303+static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)13041304+{13051305+ struct pxa168_eth_private *pep = bus->priv;13061306+ int i = 0;13071307+ int val;13081308+13091309+ if (smi_wait_ready(pep)) {13101310+ printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");13111311+ return -ETIMEDOUT;13121312+ }13131313+ wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);13141314+ /* now wait for the data to be valid */13151315+ for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {13161316+ if (i == PHY_WAIT_ITERATIONS) {13171317+ printk(KERN_WARNING13181318+ "pxa168_eth: SMI bus read not valid\n");13191319+ return -ENODEV;13201320+ }13211321+ msleep(10);13221322+ }13231323+13241324+ return val & 0xffff;13251325+}13261326+13271327+static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,13281328+ u16 value)13291329+{13301330+ struct pxa168_eth_private *pep = bus->priv;13311331+13321332+ if (smi_wait_ready(pep)) {13331333+ printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");13341334+ return -ETIMEDOUT;13351335+ }13361336+13371337+ wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |13381338+ SMI_OP_W | (value & 0xffff));13391339+13401340+ if (smi_wait_ready(pep)) {13411341+ printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n");13421342+ return -ETIMEDOUT;13431343+ }13441344+13451345+ return 0;13461346+}13471347+13481348+static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,13491349+ int cmd)13501350+{13511351+ struct pxa168_eth_private *pep = netdev_priv(dev);13521352+ if (pep->phy != NULL)13531353+ return phy_mii_ioctl(pep->phy, if_mii(ifr), cmd);13541354+13551355+ return -EOPNOTSUPP;13561356+}13571357+13581358+static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)13591359+{13601360+ struct mii_bus *bus = pep->smi_bus;13611361+ struct phy_device *phydev;13621362+ int start;13631363+ int num;13641364+ int i;13651365+13661366+ if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) {13671367+ /* Scan entire range */13681368+ start = ethernet_phy_get(pep);13691369+ num = 32;13701370+ } else {13711371+ /* Use phy addr specific to platform */13721372+ start = phy_addr & 0x1f;13731373+ num = 1;13741374+ }13751375+ phydev = NULL;13761376+ for (i = 0; i < num; i++) {13771377+ int addr = (start + i) & 0x1f;13781378+ if (bus->phy_map[addr] == NULL)13791379+ mdiobus_scan(bus, addr);13801380+13811381+ if (phydev == NULL) {13821382+ phydev = bus->phy_map[addr];13831383+ if (phydev != NULL)13841384+ ethernet_phy_set_addr(pep, addr);13851385+ }13861386+ }13871387+13881388+ return phydev;13891389+}13901390+13911391+static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)13921392+{13931393+ struct phy_device *phy = pep->phy;13941394+ ethernet_phy_reset(pep);13951395+13961396+ phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII);13971397+13981398+ if (speed == 0) {13991399+ phy->autoneg = AUTONEG_ENABLE;14001400+ phy->speed = 0;14011401+ phy->duplex = 0;14021402+ phy->supported &= PHY_BASIC_FEATURES;14031403+ phy->advertising = phy->supported | ADVERTISED_Autoneg;14041404+ } else {14051405+ phy->autoneg = AUTONEG_DISABLE;14061406+ phy->advertising = 0;14071407+ phy->speed = speed;14081408+ phy->duplex = duplex;14091409+ }14101410+ phy_start_aneg(phy);14111411+}14121412+14131413+static int ethernet_phy_setup(struct net_device *dev)14141414+{14151415+ struct pxa168_eth_private *pep = netdev_priv(dev);14161416+14171417+ if (pep->pd != NULL) {14181418+ if (pep->pd->init)14191419+ pep->pd->init();14201420+ }14211421+ pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f);14221422+ if (pep->phy != NULL)14231423+ phy_init(pep, pep->pd->speed, pep->pd->duplex);14241424+ update_hash_table_mac_address(pep, NULL, dev->dev_addr);14251425+14261426+ return 0;14271427+}14281428+14291429+static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)14301430+{14311431+ struct pxa168_eth_private *pep = netdev_priv(dev);14321432+ int err;14331433+14341434+ err = phy_read_status(pep->phy);14351435+ if (err == 0)14361436+ err = phy_ethtool_gset(pep->phy, cmd);14371437+14381438+ return err;14391439+}14401440+14411441+static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)14421442+{14431443+ struct pxa168_eth_private *pep = netdev_priv(dev);14441444+14451445+ return phy_ethtool_sset(pep->phy, cmd);14461446+}14471447+14481448+static void pxa168_get_drvinfo(struct net_device *dev,14491449+ struct ethtool_drvinfo *info)14501450+{14511451+ strncpy(info->driver, DRIVER_NAME, 32);14521452+ strncpy(info->version, DRIVER_VERSION, 32);14531453+ strncpy(info->fw_version, "N/A", 32);14541454+ strncpy(info->bus_info, "N/A", 32);14551455+}14561456+14571457+static u32 pxa168_get_link(struct net_device *dev)14581458+{14591459+ return !!netif_carrier_ok(dev);14601460+}14611461+14621462+static const struct ethtool_ops pxa168_ethtool_ops = {14631463+ .get_settings = pxa168_get_settings,14641464+ .set_settings = pxa168_set_settings,14651465+ .get_drvinfo = pxa168_get_drvinfo,14661466+ .get_link = pxa168_get_link,14671467+};14681468+14691469+static const struct net_device_ops pxa168_eth_netdev_ops = {14701470+ .ndo_open = pxa168_eth_open,14711471+ .ndo_stop = pxa168_eth_stop,14721472+ .ndo_start_xmit = pxa168_eth_start_xmit,14731473+ .ndo_set_rx_mode = pxa168_eth_set_rx_mode,14741474+ .ndo_set_mac_address = pxa168_eth_set_mac_address,14751475+ .ndo_validate_addr = eth_validate_addr,14761476+ .ndo_do_ioctl = pxa168_eth_do_ioctl,14771477+ .ndo_change_mtu = pxa168_eth_change_mtu,14781478+ .ndo_tx_timeout = pxa168_eth_tx_timeout,14791479+};14801480+14811481+static int pxa168_eth_probe(struct platform_device *pdev)14821482+{14831483+ struct pxa168_eth_private *pep = NULL;14841484+ struct net_device *dev = NULL;14851485+ struct resource *res;14861486+ struct clk *clk;14871487+ int err;14881488+14891489+ printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");14901490+14911491+ clk = clk_get(&pdev->dev, "MFUCLK");14921492+ if (IS_ERR(clk)) {14931493+ printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n",14941494+ DRIVER_NAME);14951495+ return -ENODEV;14961496+ }14971497+ clk_enable(clk);14981498+14991499+ dev = alloc_etherdev(sizeof(struct pxa168_eth_private));15001500+ if (!dev) {15011501+ err = -ENOMEM;15021502+ goto out;15031503+ }15041504+15051505+ platform_set_drvdata(pdev, dev);15061506+ pep = netdev_priv(dev);15071507+ pep->dev = dev;15081508+ pep->clk = clk;15091509+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);15101510+ if (res == NULL) {15111511+ err = -ENODEV;15121512+ goto out;15131513+ }15141514+ pep->base = ioremap(res->start, res->end - res->start + 1);15151515+ if (pep->base == NULL) {15161516+ err = -ENOMEM;15171517+ goto out;15181518+ }15191519+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);15201520+ BUG_ON(!res);15211521+ dev->irq = res->start;15221522+ dev->netdev_ops = &pxa168_eth_netdev_ops;15231523+ dev->watchdog_timeo = 2 * HZ;15241524+ dev->base_addr = 0;15251525+ SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops);15261526+15271527+ INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);15281528+15291529+ printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);15301530+ random_ether_addr(dev->dev_addr);15311531+15321532+ pep->pd = pdev->dev.platform_data;15331533+ pep->rx_ring_size = NUM_RX_DESCS;15341534+ if (pep->pd->rx_queue_size)15351535+ pep->rx_ring_size = pep->pd->rx_queue_size;15361536+15371537+ pep->tx_ring_size = NUM_TX_DESCS;15381538+ if (pep->pd->tx_queue_size)15391539+ pep->tx_ring_size = pep->pd->tx_queue_size;15401540+15411541+ pep->port_num = pep->pd->port_number;15421542+ /* Hardware supports only 3 ports */15431543+ BUG_ON(pep->port_num > 2);15441544+ netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);15451545+15461546+ memset(&pep->timeout, 0, sizeof(struct timer_list));15471547+ init_timer(&pep->timeout);15481548+ pep->timeout.function = rxq_refill_timer_wrapper;15491549+ pep->timeout.data = (unsigned long)pep;15501550+15511551+ pep->smi_bus = mdiobus_alloc();15521552+ if (pep->smi_bus == NULL) {15531553+ err = -ENOMEM;15541554+ goto out;15551555+ }15561556+ pep->smi_bus->priv = pep;15571557+ pep->smi_bus->name = "pxa168_eth smi";15581558+ pep->smi_bus->read = pxa168_smi_read;15591559+ pep->smi_bus->write = pxa168_smi_write;15601560+ snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);15611561+ pep->smi_bus->parent = &pdev->dev;15621562+ pep->smi_bus->phy_mask = 0xffffffff;15631563+ if (mdiobus_register(pep->smi_bus) < 0) {15641564+ err = -ENOMEM;15651565+ goto out;15661566+ }15671567+ pxa168_init_hw(pep);15681568+ err = ethernet_phy_setup(dev);15691569+ if (err)15701570+ goto out;15711571+ SET_NETDEV_DEV(dev, &pdev->dev);15721572+ err = register_netdev(dev);15731573+ if (err)15741574+ goto out;15751575+ return 0;15761576+out:15771577+ if (pep->clk) {15781578+ clk_disable(pep->clk);15791579+ clk_put(pep->clk);15801580+ pep->clk = NULL;15811581+ }15821582+ if (pep->base) {15831583+ iounmap(pep->base);15841584+ pep->base = NULL;15851585+ }15861586+ if (dev)15871587+ free_netdev(dev);15881588+ return err;15891589+}15901590+15911591+static int pxa168_eth_remove(struct platform_device *pdev)15921592+{15931593+ struct net_device *dev = platform_get_drvdata(pdev);15941594+ struct pxa168_eth_private *pep = netdev_priv(dev);15951595+15961596+ if (pep->htpr) {15971597+ dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,15981598+ pep->htpr, pep->htpr_dma);15991599+ pep->htpr = NULL;16001600+ }16011601+ if (pep->clk) {16021602+ clk_disable(pep->clk);16031603+ clk_put(pep->clk);16041604+ pep->clk = NULL;16051605+ }16061606+ if (pep->phy != NULL)16071607+ phy_detach(pep->phy);16081608+16091609+ iounmap(pep->base);16101610+ pep->base = NULL;16111611+ unregister_netdev(dev);16121612+ flush_scheduled_work();16131613+ free_netdev(dev);16141614+ platform_set_drvdata(pdev, NULL);16151615+ return 0;16161616+}16171617+16181618+static void pxa168_eth_shutdown(struct platform_device *pdev)16191619+{16201620+ struct net_device *dev = platform_get_drvdata(pdev);16211621+ eth_port_reset(dev);16221622+}16231623+16241624+#ifdef CONFIG_PM16251625+static int pxa168_eth_resume(struct platform_device *pdev)16261626+{16271627+ return -ENOSYS;16281628+}16291629+16301630+static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)16311631+{16321632+ return -ENOSYS;16331633+}16341634+16351635+#else16361636+#define pxa168_eth_resume NULL16371637+#define pxa168_eth_suspend NULL16381638+#endif16391639+16401640+static struct platform_driver pxa168_eth_driver = {16411641+ .probe = pxa168_eth_probe,16421642+ .remove = pxa168_eth_remove,16431643+ .shutdown = pxa168_eth_shutdown,16441644+ .resume = pxa168_eth_resume,16451645+ .suspend = pxa168_eth_suspend,16461646+ .driver = {16471647+ .name = DRIVER_NAME,16481648+ },16491649+};16501650+16511651+static int __init pxa168_init_module(void)16521652+{16531653+ return platform_driver_register(&pxa168_eth_driver);16541654+}16551655+16561656+static void __exit pxa168_cleanup_module(void)16571657+{16581658+ platform_driver_unregister(&pxa168_eth_driver);16591659+}16601660+16611661+module_init(pxa168_init_module);16621662+module_exit(pxa168_cleanup_module);16631663+16641664+MODULE_LICENSE("GPL");16651665+MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");16661666+MODULE_ALIAS("platform:pxa168_eth");
+30
include/linux/pxa168_eth.h
···11+/*22+ *pxa168 ethernet platform device data definition file.33+ */44+#ifndef __LINUX_PXA168_ETH_H55+#define __LINUX_PXA168_ETH_H66+77+struct pxa168_eth_platform_data {88+ int port_number;99+ int phy_addr;1010+1111+ /*1212+ * If speed is 0, then speed and duplex are autonegotiated.1313+ */1414+ int speed; /* 0, SPEED_10, SPEED_100 */1515+ int duplex; /* DUPLEX_HALF or DUPLEX_FULL */1616+1717+ /*1818+ * Override default RX/TX queue sizes if nonzero.1919+ */2020+ int rx_queue_size;2121+ int tx_queue_size;2222+2323+ /*2424+ * init callback is used for board specific initialization2525+ * e.g on Aspenite its used to initialize the PHY transceiver.2626+ */2727+ int (*init)(void);2828+};2929+3030+#endif /* __LINUX_PXA168_ETH_H */