Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'ioc3-eth-improvements'

Thomas Bogendoerfer says:

====================
ioc3-eth improvements

In my patch series for splitting out the serial code from ioc3-eth
by using a MFD device there was one big patch for ioc3-eth.c,
which wasn't really usefull for reviews. This series contains the
ioc3-eth changes splitted in smaller steps and few more cleanups.
Only the conversion to MFD will be done later in a different series.

Changes in v3:
- no need to check skb == NULL before passing it to dev_kfree_skb_any
- free memory allocated with get_page(s) with free_page(s)
- allocate rx ring with just GFP_KERNEL
- add required alignment for rings in comments

Changes in v2:
- use net_err_ratelimited for printing various ioc3 errors
- added missing clearing of rx buf valid flags into ioc3_alloc_rings
- use __func__ for printing out of memory messages
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+666 -749
+145 -216
arch/mips/include/asm/sn/ioc3.h
··· 3 3 * Copyright (C) 1999, 2000 Ralf Baechle 4 4 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 5 5 */ 6 - #ifndef _IOC3_H 7 - #define _IOC3_H 6 + #ifndef MIPS_SN_IOC3_H 7 + #define MIPS_SN_IOC3_H 8 8 9 9 #include <linux/types.h> 10 10 11 - /* SUPERIO uart register map */ 12 - typedef volatile struct ioc3_uartregs { 13 - union { 14 - volatile u8 rbr; /* read only, DLAB == 0 */ 15 - volatile u8 thr; /* write only, DLAB == 0 */ 16 - volatile u8 dll; /* DLAB == 1 */ 17 - } u1; 18 - union { 19 - volatile u8 ier; /* DLAB == 0 */ 20 - volatile u8 dlm; /* DLAB == 1 */ 21 - } u2; 22 - union { 23 - volatile u8 iir; /* read only */ 24 - volatile u8 fcr; /* write only */ 25 - } u3; 26 - volatile u8 iu_lcr; 27 - volatile u8 iu_mcr; 28 - volatile u8 iu_lsr; 29 - volatile u8 iu_msr; 30 - volatile u8 iu_scr; 31 - } ioc3_uregs_t; 11 + /* serial port register map */ 12 + struct ioc3_serialregs { 13 + u32 sscr; 14 + u32 stpir; 15 + u32 stcir; 16 + u32 srpir; 17 + u32 srcir; 18 + u32 srtr; 19 + u32 shadow; 20 + }; 32 21 33 - #define iu_rbr u1.rbr 34 - #define iu_thr u1.thr 35 - #define iu_dll u1.dll 36 - #define iu_ier u2.ier 37 - #define iu_dlm u2.dlm 38 - #define iu_iir u3.iir 39 - #define iu_fcr u3.fcr 22 + /* SUPERIO uart register map */ 23 + struct ioc3_uartregs { 24 + union { 25 + u8 iu_rbr; /* read only, DLAB == 0 */ 26 + u8 iu_thr; /* write only, DLAB == 0 */ 27 + u8 iu_dll; /* DLAB == 1 */ 28 + }; 29 + union { 30 + u8 iu_ier; /* DLAB == 0 */ 31 + u8 iu_dlm; /* DLAB == 1 */ 32 + }; 33 + union { 34 + u8 iu_iir; /* read only */ 35 + u8 iu_fcr; /* write only */ 36 + }; 37 + u8 iu_lcr; 38 + u8 iu_mcr; 39 + u8 iu_lsr; 40 + u8 iu_msr; 41 + u8 iu_scr; 42 + }; 40 43 41 44 struct ioc3_sioregs { 42 - volatile u8 fill[0x141]; /* starts at 0x141 */ 45 + u8 fill[0x141]; /* starts at 0x141 */ 43 46 44 - volatile u8 uartc; 45 - volatile u8 kbdcg; 47 + u8 uartc; 48 + u8 kbdcg; 46 49 47 - volatile u8 fill0[0x150 - 0x142 - 1]; 50 + u8 fill0[0x150 - 0x142 - 1]; 48 51 49 - volatile u8 pp_data; 50 - volatile u8 pp_dsr; 51 - volatile u8 pp_dcr; 52 + u8 pp_data; 53 + u8 pp_dsr; 54 + u8 pp_dcr; 52 55 53 - volatile u8 fill1[0x158 - 0x152 - 1]; 56 + u8 fill1[0x158 - 0x152 - 1]; 54 57 55 - volatile u8 pp_fifa; 56 - volatile u8 pp_cfgb; 57 - volatile u8 pp_ecr; 58 + u8 pp_fifa; 59 + u8 pp_cfgb; 60 + u8 pp_ecr; 58 61 59 - volatile u8 fill2[0x168 - 0x15a - 1]; 62 + u8 fill2[0x168 - 0x15a - 1]; 60 63 61 - volatile u8 rtcad; 62 - volatile u8 rtcdat; 64 + u8 rtcad; 65 + u8 rtcdat; 63 66 64 - volatile u8 fill3[0x170 - 0x169 - 1]; 67 + u8 fill3[0x170 - 0x169 - 1]; 65 68 66 69 struct ioc3_uartregs uartb; /* 0x20170 */ 67 70 struct ioc3_uartregs uarta; /* 0x20178 */ 68 71 }; 69 72 73 + struct ioc3_ethregs { 74 + u32 emcr; /* 0x000f0 */ 75 + u32 eisr; /* 0x000f4 */ 76 + u32 eier; /* 0x000f8 */ 77 + u32 ercsr; /* 0x000fc */ 78 + u32 erbr_h; /* 0x00100 */ 79 + u32 erbr_l; /* 0x00104 */ 80 + u32 erbar; /* 0x00108 */ 81 + u32 ercir; /* 0x0010c */ 82 + u32 erpir; /* 0x00110 */ 83 + u32 ertr; /* 0x00114 */ 84 + u32 etcsr; /* 0x00118 */ 85 + u32 ersr; /* 0x0011c */ 86 + u32 etcdc; /* 0x00120 */ 87 + u32 ebir; /* 0x00124 */ 88 + u32 etbr_h; /* 0x00128 */ 89 + u32 etbr_l; /* 0x0012c */ 90 + u32 etcir; /* 0x00130 */ 91 + u32 etpir; /* 0x00134 */ 92 + u32 emar_h; /* 0x00138 */ 93 + u32 emar_l; /* 0x0013c */ 94 + u32 ehar_h; /* 0x00140 */ 95 + u32 ehar_l; /* 0x00144 */ 96 + u32 micr; /* 0x00148 */ 97 + u32 midr_r; /* 0x0014c */ 98 + u32 midr_w; /* 0x00150 */ 99 + }; 100 + 101 + struct ioc3_serioregs { 102 + u32 km_csr; /* 0x0009c */ 103 + u32 k_rd; /* 0x000a0 */ 104 + u32 m_rd; /* 0x000a4 */ 105 + u32 k_wd; /* 0x000a8 */ 106 + u32 m_wd; /* 0x000ac */ 107 + }; 108 + 70 109 /* Register layout of IOC3 in configuration space. */ 71 110 struct ioc3 { 72 - volatile u32 pad0[7]; /* 0x00000 */ 73 - volatile u32 sio_ir; /* 0x0001c */ 74 - volatile u32 sio_ies; /* 0x00020 */ 75 - volatile u32 sio_iec; /* 0x00024 */ 76 - volatile u32 sio_cr; /* 0x00028 */ 77 - volatile u32 int_out; /* 0x0002c */ 78 - volatile u32 mcr; /* 0x00030 */ 111 + /* PCI Config Space registers */ 112 + u32 pci_id; /* 0x00000 */ 113 + u32 pci_scr; /* 0x00004 */ 114 + u32 pci_rev; /* 0x00008 */ 115 + u32 pci_lat; /* 0x0000c */ 116 + u32 pci_addr; /* 0x00010 */ 117 + u32 pci_err_addr_l; /* 0x00014 */ 118 + u32 pci_err_addr_h; /* 0x00018 */ 119 + 120 + u32 sio_ir; /* 0x0001c */ 121 + u32 sio_ies; /* 0x00020 */ 122 + u32 sio_iec; /* 0x00024 */ 123 + u32 sio_cr; /* 0x00028 */ 124 + u32 int_out; /* 0x0002c */ 125 + u32 mcr; /* 0x00030 */ 79 126 80 127 /* General Purpose I/O registers */ 81 - volatile u32 gpcr_s; /* 0x00034 */ 82 - volatile u32 gpcr_c; /* 0x00038 */ 83 - volatile u32 gpdr; /* 0x0003c */ 84 - volatile u32 gppr_0; /* 0x00040 */ 85 - volatile u32 gppr_1; /* 0x00044 */ 86 - volatile u32 gppr_2; /* 0x00048 */ 87 - volatile u32 gppr_3; /* 0x0004c */ 88 - volatile u32 gppr_4; /* 0x00050 */ 89 - volatile u32 gppr_5; /* 0x00054 */ 90 - volatile u32 gppr_6; /* 0x00058 */ 91 - volatile u32 gppr_7; /* 0x0005c */ 92 - volatile u32 gppr_8; /* 0x00060 */ 93 - volatile u32 gppr_9; /* 0x00064 */ 94 - volatile u32 gppr_10; /* 0x00068 */ 95 - volatile u32 gppr_11; /* 0x0006c */ 96 - volatile u32 gppr_12; /* 0x00070 */ 97 - volatile u32 gppr_13; /* 0x00074 */ 98 - volatile u32 gppr_14; /* 0x00078 */ 99 - volatile u32 gppr_15; /* 0x0007c */ 128 + u32 gpcr_s; /* 0x00034 */ 129 + u32 gpcr_c; /* 0x00038 */ 130 + u32 gpdr; /* 0x0003c */ 131 + u32 gppr[16]; /* 0x00040 */ 100 132 101 133 /* Parallel Port Registers */ 102 - volatile u32 ppbr_h_a; /* 0x00080 */ 103 - volatile u32 ppbr_l_a; /* 0x00084 */ 104 - volatile u32 ppcr_a; /* 0x00088 */ 105 - volatile u32 ppcr; /* 0x0008c */ 106 - volatile u32 ppbr_h_b; /* 0x00090 */ 107 - volatile u32 ppbr_l_b; /* 0x00094 */ 108 - volatile u32 ppcr_b; /* 0x00098 */ 134 + u32 ppbr_h_a; /* 0x00080 */ 135 + u32 ppbr_l_a; /* 0x00084 */ 136 + u32 ppcr_a; /* 0x00088 */ 137 + u32 ppcr; /* 0x0008c */ 138 + u32 ppbr_h_b; /* 0x00090 */ 139 + u32 ppbr_l_b; /* 0x00094 */ 140 + u32 ppcr_b; /* 0x00098 */ 109 141 110 142 /* Keyboard and Mouse Registers */ 111 - volatile u32 km_csr; /* 0x0009c */ 112 - volatile u32 k_rd; /* 0x000a0 */ 113 - volatile u32 m_rd; /* 0x000a4 */ 114 - volatile u32 k_wd; /* 0x000a8 */ 115 - volatile u32 m_wd; /* 0x000ac */ 143 + struct ioc3_serioregs serio; 116 144 117 145 /* Serial Port Registers */ 118 - volatile u32 sbbr_h; /* 0x000b0 */ 119 - volatile u32 sbbr_l; /* 0x000b4 */ 120 - volatile u32 sscr_a; /* 0x000b8 */ 121 - volatile u32 stpir_a; /* 0x000bc */ 122 - volatile u32 stcir_a; /* 0x000c0 */ 123 - volatile u32 srpir_a; /* 0x000c4 */ 124 - volatile u32 srcir_a; /* 0x000c8 */ 125 - volatile u32 srtr_a; /* 0x000cc */ 126 - volatile u32 shadow_a; /* 0x000d0 */ 127 - volatile u32 sscr_b; /* 0x000d4 */ 128 - volatile u32 stpir_b; /* 0x000d8 */ 129 - volatile u32 stcir_b; /* 0x000dc */ 130 - volatile u32 srpir_b; /* 0x000e0 */ 131 - volatile u32 srcir_b; /* 0x000e4 */ 132 - volatile u32 srtr_b; /* 0x000e8 */ 133 - volatile u32 shadow_b; /* 0x000ec */ 146 + u32 sbbr_h; /* 0x000b0 */ 147 + u32 sbbr_l; /* 0x000b4 */ 148 + struct ioc3_serialregs port_a; 149 + struct ioc3_serialregs port_b; 134 150 135 - /* Ethernet Registers */ 136 - volatile u32 emcr; /* 0x000f0 */ 137 - volatile u32 eisr; /* 0x000f4 */ 138 - volatile u32 eier; /* 0x000f8 */ 139 - volatile u32 ercsr; /* 0x000fc */ 140 - volatile u32 erbr_h; /* 0x00100 */ 141 - volatile u32 erbr_l; /* 0x00104 */ 142 - volatile u32 erbar; /* 0x00108 */ 143 - volatile u32 ercir; /* 0x0010c */ 144 - volatile u32 erpir; /* 0x00110 */ 145 - volatile u32 ertr; /* 0x00114 */ 146 - volatile u32 etcsr; /* 0x00118 */ 147 - volatile u32 ersr; /* 0x0011c */ 148 - volatile u32 etcdc; /* 0x00120 */ 149 - volatile u32 ebir; /* 0x00124 */ 150 - volatile u32 etbr_h; /* 0x00128 */ 151 - volatile u32 etbr_l; /* 0x0012c */ 152 - volatile u32 etcir; /* 0x00130 */ 153 - volatile u32 etpir; /* 0x00134 */ 154 - volatile u32 emar_h; /* 0x00138 */ 155 - volatile u32 emar_l; /* 0x0013c */ 156 - volatile u32 ehar_h; /* 0x00140 */ 157 - volatile u32 ehar_l; /* 0x00144 */ 158 - volatile u32 micr; /* 0x00148 */ 159 - volatile u32 midr_r; /* 0x0014c */ 160 - volatile u32 midr_w; /* 0x00150 */ 161 - volatile u32 pad1[(0x20000 - 0x00154) / 4]; 151 + /* Ethernet Registers */ 152 + struct ioc3_ethregs eth; 153 + u32 pad1[(0x20000 - 0x00154) / 4]; 162 154 163 155 /* SuperIO Registers XXX */ 164 156 struct ioc3_sioregs sregs; /* 0x20000 */ 165 - volatile u32 pad2[(0x40000 - 0x20180) / 4]; 157 + u32 pad2[(0x40000 - 0x20180) / 4]; 166 158 167 159 /* SSRAM Diagnostic Access */ 168 - volatile u32 ssram[(0x80000 - 0x40000) / 4]; 160 + u32 ssram[(0x80000 - 0x40000) / 4]; 169 161 170 162 /* Bytebus device offsets 171 163 0x80000 - Access to the generic devices selected with DEV0 ··· 169 177 0xE0000 - Access to the generic devices selected with DEV3 170 178 0xFFFFF bytebus DEV_SEL_3 */ 171 179 }; 180 + 181 + 182 + #define PCI_LAT 0xc /* Latency Timer */ 183 + #define PCI_SCR_DROP_MODE_EN 0x00008000 /* drop pios on parity err */ 184 + #define UARTA_BASE 0x178 185 + #define UARTB_BASE 0x170 186 + 187 + /* 188 + * Bytebus device space 189 + */ 190 + #define IOC3_BYTEBUS_DEV0 0x80000L 191 + #define IOC3_BYTEBUS_DEV1 0xa0000L 192 + #define IOC3_BYTEBUS_DEV2 0xc0000L 193 + #define IOC3_BYTEBUS_DEV3 0xe0000L 172 194 173 195 /* 174 196 * Ethernet RX Buffer ··· 239 233 #define ETXD_B2CNT_MASK 0x7ff00000 240 234 #define ETXD_B2CNT_SHIFT 20 241 235 242 - /* 243 - * Bytebus device space 244 - */ 245 - #define IOC3_BYTEBUS_DEV0 0x80000L 246 - #define IOC3_BYTEBUS_DEV1 0xa0000L 247 - #define IOC3_BYTEBUS_DEV2 0xc0000L 248 - #define IOC3_BYTEBUS_DEV3 0xe0000L 249 - 250 236 /* ------------------------------------------------------------------------- */ 251 237 252 238 /* Superio Registers (PIO Access) */ 253 239 #define IOC3_SIO_BASE 0x20000 254 240 #define IOC3_SIO_UARTC (IOC3_SIO_BASE+0x141) /* UART Config */ 255 241 #define IOC3_SIO_KBDCG (IOC3_SIO_BASE+0x142) /* KBD Config */ 256 - #define IOC3_SIO_PP_BASE (IOC3_SIO_BASE+PP_BASE) /* Parallel Port */ 242 + #define IOC3_SIO_PP_BASE (IOC3_SIO_BASE+PP_BASE) /* Parallel Port */ 257 243 #define IOC3_SIO_RTC_BASE (IOC3_SIO_BASE+0x168) /* Real Time Clock */ 258 244 #define IOC3_SIO_UB_BASE (IOC3_SIO_BASE+UARTB_BASE) /* UART B */ 259 245 #define IOC3_SIO_UA_BASE (IOC3_SIO_BASE+UARTA_BASE) /* UART A */ 260 246 261 247 /* SSRAM Diagnostic Access */ 262 248 #define IOC3_SSRAM IOC3_RAM_OFF /* base of SSRAM diagnostic access */ 263 - #define IOC3_SSRAM_LEN 0x40000 /* 256kb (address space size, may not be fully populated) */ 249 + #define IOC3_SSRAM_LEN 0x40000 /* 256kb (addrspc sz, may not be populated) */ 264 250 #define IOC3_SSRAM_DM 0x0000ffff /* data mask */ 265 251 #define IOC3_SSRAM_PM 0x00010000 /* parity mask */ 266 252 ··· 292 294 SIO_IR to assert */ 293 295 #define KM_CSR_M_TO_EN 0x00080000 /* KM_CSR_M_TO + KM_CSR_M_TO_EN = cause 294 296 SIO_IR to assert */ 295 - #define KM_CSR_K_CLAMP_ONE 0x00100000 /* Pull K_CLK low after rec. one char */ 296 - #define KM_CSR_M_CLAMP_ONE 0x00200000 /* Pull M_CLK low after rec. one char */ 297 - #define KM_CSR_K_CLAMP_THREE 0x00400000 /* Pull K_CLK low after rec. three chars */ 298 - #define KM_CSR_M_CLAMP_THREE 0x00800000 /* Pull M_CLK low after rec. three char */ 297 + #define KM_CSR_K_CLAMP_1 0x00100000 /* Pull K_CLK low aft recv 1 char */ 298 + #define KM_CSR_M_CLAMP_1 0x00200000 /* Pull M_CLK low aft recv 1 char */ 299 + #define KM_CSR_K_CLAMP_3 0x00400000 /* Pull K_CLK low aft recv 3 chars */ 300 + #define KM_CSR_M_CLAMP_3 0x00800000 /* Pull M_CLK low aft recv 3 chars */ 299 301 300 302 /* bitmasks for IOC3_K_RD and IOC3_M_RD */ 301 303 #define KM_RD_DATA_2 0x000000ff /* 3rd char recvd since last read */ ··· 438 440 SIO_IR_PP_INTB | SIO_IR_PP_MEMERR) 439 441 #define SIO_IR_RT (SIO_IR_RT_INT | SIO_IR_GEN_INT1) 440 442 441 - /* macro to load pending interrupts */ 442 - #define IOC3_PENDING_INTRS(mem) (PCI_INW(&((mem)->sio_ir)) & \ 443 - PCI_INW(&((mem)->sio_ies_ro))) 444 - 445 443 /* bitmasks for SIO_CR */ 446 444 #define SIO_CR_SIO_RESET 0x00000001 /* reset the SIO */ 447 445 #define SIO_CR_SER_A_BASE 0x000000fe /* DMA poll addr port A */ ··· 494 500 #define GPCR_UARTB_MODESEL 0x40 /* pin is output to port B mode sel */ 495 501 #define GPCR_UARTA_MODESEL 0x80 /* pin is output to port A mode sel */ 496 502 497 - #define GPPR_PHY_RESET_PIN 5 /* GIO pin controlling phy reset */ 498 - #define GPPR_UARTB_MODESEL_PIN 6 /* GIO pin controlling uart b mode select */ 499 - #define GPPR_UARTA_MODESEL_PIN 7 /* GIO pin controlling uart a mode select */ 503 + #define GPPR_PHY_RESET_PIN 5 /* GIO pin cntrlling phy reset */ 504 + #define GPPR_UARTB_MODESEL_PIN 6 /* GIO pin cntrlling uart b mode sel */ 505 + #define GPPR_UARTA_MODESEL_PIN 7 /* GIO pin cntrlling uart a mode sel */ 500 506 507 + /* ethernet */ 501 508 #define EMCR_DUPLEX 0x00000001 502 509 #define EMCR_PROMISC 0x00000002 503 510 #define EMCR_PADEN 0x00000004 ··· 590 595 591 596 #define MIDR_DATA_MASK 0x0000ffff 592 597 593 - #define ERXBUF_IPCKSUM_MASK 0x0000ffff 594 - #define ERXBUF_BYTECNT_MASK 0x07ff0000 595 - #define ERXBUF_BYTECNT_SHIFT 16 596 - #define ERXBUF_V 0x80000000 597 - 598 - #define ERXBUF_CRCERR 0x00000001 /* aka RSV15 */ 599 - #define ERXBUF_FRAMERR 0x00000002 /* aka RSV14 */ 600 - #define ERXBUF_CODERR 0x00000004 /* aka RSV13 */ 601 - #define ERXBUF_INVPREAMB 0x00000008 /* aka RSV18 */ 602 - #define ERXBUF_LOLEN 0x00007000 /* aka RSV2_0 */ 603 - #define ERXBUF_HILEN 0x03ff0000 /* aka RSV12_3 */ 604 - #define ERXBUF_MULTICAST 0x04000000 /* aka RSV16 */ 605 - #define ERXBUF_BROADCAST 0x08000000 /* aka RSV17 */ 606 - #define ERXBUF_LONGEVENT 0x10000000 /* aka RSV19 */ 607 - #define ERXBUF_BADPKT 0x20000000 /* aka RSV20 */ 608 - #define ERXBUF_GOODPKT 0x40000000 /* aka RSV21 */ 609 - #define ERXBUF_CARRIER 0x80000000 /* aka RSV22 */ 610 - 611 - #define ETXD_BYTECNT_MASK 0x000007ff /* total byte count */ 612 - #define ETXD_INTWHENDONE 0x00001000 /* intr when done */ 613 - #define ETXD_D0V 0x00010000 /* data 0 valid */ 614 - #define ETXD_B1V 0x00020000 /* buf 1 valid */ 615 - #define ETXD_B2V 0x00040000 /* buf 2 valid */ 616 - #define ETXD_DOCHECKSUM 0x00080000 /* insert ip cksum */ 617 - #define ETXD_CHKOFF_MASK 0x07f00000 /* cksum byte offset */ 618 - #define ETXD_CHKOFF_SHIFT 20 619 - 620 - #define ETXD_D0CNT_MASK 0x0000007f 621 - #define ETXD_B1CNT_MASK 0x0007ff00 622 - #define ETXD_B1CNT_SHIFT 8 623 - #define ETXD_B2CNT_MASK 0x7ff00000 624 - #define ETXD_B2CNT_SHIFT 20 625 - 626 - typedef enum ioc3_subdevs_e { 627 - ioc3_subdev_ether, 628 - ioc3_subdev_generic, 629 - ioc3_subdev_nic, 630 - ioc3_subdev_kbms, 631 - ioc3_subdev_ttya, 632 - ioc3_subdev_ttyb, 633 - ioc3_subdev_ecpp, 634 - ioc3_subdev_rt, 635 - ioc3_nsubdevs 636 - } ioc3_subdev_t; 637 - 638 - /* subdevice disable bits, 639 - * from the standard INFO_LBL_SUBDEVS 640 - */ 641 - #define IOC3_SDB_ETHER (1<<ioc3_subdev_ether) 642 - #define IOC3_SDB_GENERIC (1<<ioc3_subdev_generic) 643 - #define IOC3_SDB_NIC (1<<ioc3_subdev_nic) 644 - #define IOC3_SDB_KBMS (1<<ioc3_subdev_kbms) 645 - #define IOC3_SDB_TTYA (1<<ioc3_subdev_ttya) 646 - #define IOC3_SDB_TTYB (1<<ioc3_subdev_ttyb) 647 - #define IOC3_SDB_ECPP (1<<ioc3_subdev_ecpp) 648 - #define IOC3_SDB_RT (1<<ioc3_subdev_rt) 649 - 650 - #define IOC3_ALL_SUBDEVS ((1<<ioc3_nsubdevs)-1) 651 - 652 - #define IOC3_SDB_SERIAL (IOC3_SDB_TTYA|IOC3_SDB_TTYB) 653 - 654 - #define IOC3_STD_SUBDEVS IOC3_ALL_SUBDEVS 655 - 656 - #define IOC3_INTA_SUBDEVS IOC3_SDB_ETHER 657 - #define IOC3_INTB_SUBDEVS (IOC3_SDB_GENERIC|IOC3_SDB_KBMS|IOC3_SDB_SERIAL|IOC3_SDB_ECPP|IOC3_SDB_RT) 658 - 659 - #endif /* _IOC3_H */ 598 + #endif /* MIPS_SN_IOC3_H */
+3 -2
arch/mips/sgi-ip27/ip27-console.c
··· 35 35 { 36 36 struct ioc3_uartregs *uart = console_uart(); 37 37 38 - while ((uart->iu_lsr & 0x20) == 0); 39 - uart->iu_thr = c; 38 + while ((readb(&uart->iu_lsr) & 0x20) == 0) 39 + ; 40 + writeb(c, &uart->iu_thr); 40 41 }
-13
arch/mips/sgi-ip27/ip27-init.c
··· 130 130 return NASID_TO_COMPACT_NODEID(get_nasid()); 131 131 } 132 132 133 - static inline void ioc3_eth_init(void) 134 - { 135 - struct ioc3 *ioc3; 136 - nasid_t nid; 137 - 138 - nid = get_nasid(); 139 - ioc3 = (struct ioc3 *) KL_CONFIG_CH_CONS_INFO(nid)->memory_base; 140 - 141 - ioc3->eier = 0; 142 - } 143 - 144 133 extern void ip27_reboot_setup(void); 145 134 146 135 void __init plat_mem_setup(void) ··· 170 181 if (n_mode) 171 182 panic("Kernel compiled for N mode."); 172 183 #endif 173 - 174 - ioc3_eth_init(); 175 184 176 185 ioport_resource.start = 0; 177 186 ioport_resource.end = ~0UL;
+518 -518
drivers/net/ethernet/sgi/ioc3-eth.c
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - * 6 - * Driver for SGI's IOC3 based Ethernet cards as found in the PCI card. 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Driver for SGI's IOC3 based Ethernet cards as found in the PCI card. 7 3 * 8 4 * Copyright (C) 1999, 2000, 01, 03, 06 Ralf Baechle 9 5 * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc. ··· 11 15 * 12 16 * To do: 13 17 * 14 - * o Handle allocation failures in ioc3_alloc_skb() more gracefully. 15 - * o Handle allocation failures in ioc3_init_rings(). 16 18 * o Use prefetching for large packets. What is a good lower limit for 17 19 * prefetching? 18 - * o We're probably allocating a bit too much memory. 19 20 * o Use hardware checksums. 20 21 * o Convert to using a IOC3 meta driver. 21 22 * o Which PHYs might possibly be attached to the IOC3 in real live, ··· 32 39 #include <linux/crc32.h> 33 40 #include <linux/mii.h> 34 41 #include <linux/in.h> 42 + #include <linux/io.h> 35 43 #include <linux/ip.h> 36 44 #include <linux/tcp.h> 37 45 #include <linux/udp.h> 38 - #include <linux/dma-mapping.h> 39 46 #include <linux/gfp.h> 40 47 41 48 #ifdef CONFIG_SERIAL_8250 ··· 48 55 #include <linux/etherdevice.h> 49 56 #include <linux/ethtool.h> 50 57 #include <linux/skbuff.h> 58 + #include <linux/dma-direct.h> 59 + 51 60 #include <net/ip.h> 52 61 53 62 #include <asm/byteorder.h> 54 - #include <asm/io.h> 55 63 #include <asm/pgtable.h> 56 64 #include <linux/uaccess.h> 57 65 #include <asm/sn/types.h> 58 66 #include <asm/sn/ioc3.h> 59 67 #include <asm/pci/bridge.h> 60 68 61 - /* 62 - * 64 RX buffers. This is tunable in the range of 16 <= x < 512. The 63 - * value must be a power of two. 69 + /* Number of RX buffers. This is tunable in the range of 16 <= x < 512. 70 + * The value must be a power of two. 64 71 */ 65 - #define RX_BUFFS 64 72 + #define RX_BUFFS 64 73 + #define RX_RING_ENTRIES 512 /* fixed in hardware */ 74 + #define RX_RING_MASK (RX_RING_ENTRIES - 1) 75 + #define RX_RING_SIZE (RX_RING_ENTRIES * sizeof(u64)) 66 76 67 - #define ETCSR_FD ((17<<ETCSR_IPGR2_SHIFT) | (11<<ETCSR_IPGR1_SHIFT) | 21) 68 - #define ETCSR_HD ((21<<ETCSR_IPGR2_SHIFT) | (21<<ETCSR_IPGR1_SHIFT) | 21) 77 + /* 128 TX buffers (not tunable) */ 78 + #define TX_RING_ENTRIES 128 79 + #define TX_RING_MASK (TX_RING_ENTRIES - 1) 80 + #define TX_RING_SIZE (TX_RING_ENTRIES * sizeof(struct ioc3_etxd)) 81 + 82 + /* IOC3 does dma transfers in 128 byte blocks */ 83 + #define IOC3_DMA_XFER_LEN 128UL 84 + 85 + /* Every RX buffer starts with 8 byte descriptor data */ 86 + #define RX_OFFSET (sizeof(struct ioc3_erxbuf) + NET_IP_ALIGN) 87 + #define RX_BUF_SIZE (13 * IOC3_DMA_XFER_LEN) 88 + 89 + #define ETCSR_FD ((21 << ETCSR_IPGR2_SHIFT) | (21 << ETCSR_IPGR1_SHIFT) | 21) 90 + #define ETCSR_HD ((17 << ETCSR_IPGR2_SHIFT) | (11 << ETCSR_IPGR1_SHIFT) | 21) 69 91 70 92 /* Private per NIC data of the driver. */ 71 93 struct ioc3_private { 72 - struct ioc3 *regs; 94 + struct ioc3_ethregs *regs; 95 + struct ioc3 *all_regs; 96 + struct device *dma_dev; 97 + u32 *ssram; 73 98 unsigned long *rxr; /* pointer to receiver ring */ 74 99 struct ioc3_etxd *txr; 75 - struct sk_buff *rx_skbs[512]; 76 - struct sk_buff *tx_skbs[128]; 100 + dma_addr_t rxr_dma; 101 + dma_addr_t txr_dma; 102 + struct sk_buff *rx_skbs[RX_RING_ENTRIES]; 103 + struct sk_buff *tx_skbs[TX_RING_ENTRIES]; 77 104 int rx_ci; /* RX consumer index */ 78 105 int rx_pi; /* RX producer index */ 79 106 int tx_ci; /* TX consumer index */ ··· 115 102 static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); 116 103 static void ioc3_timeout(struct net_device *dev); 117 104 static inline unsigned int ioc3_hash(const unsigned char *addr); 105 + static void ioc3_start(struct ioc3_private *ip); 118 106 static inline void ioc3_stop(struct ioc3_private *ip); 119 107 static void ioc3_init(struct net_device *dev); 108 + static int ioc3_alloc_rx_bufs(struct net_device *dev); 109 + static void ioc3_free_rx_bufs(struct ioc3_private *ip); 110 + static inline void ioc3_clean_tx_ring(struct ioc3_private *ip); 120 111 121 112 static const char ioc3_str[] = "IOC3 Ethernet"; 122 113 static const struct ethtool_ops ioc3_ethtool_ops; 123 114 124 - /* We use this to acquire receive skb's that we can DMA directly into. */ 125 - 126 - #define IOC3_CACHELINE 128UL 127 115 128 116 static inline unsigned long aligned_rx_skb_addr(unsigned long addr) 129 117 { 130 - return (~addr + 1) & (IOC3_CACHELINE - 1UL); 118 + return (~addr + 1) & (IOC3_DMA_XFER_LEN - 1UL); 131 119 } 132 120 133 - static inline struct sk_buff * ioc3_alloc_skb(unsigned long length, 134 - unsigned int gfp_mask) 121 + static inline int ioc3_alloc_skb(struct ioc3_private *ip, struct sk_buff **skb, 122 + struct ioc3_erxbuf **rxb, dma_addr_t *rxb_dma) 135 123 { 136 - struct sk_buff *skb; 124 + struct sk_buff *new_skb; 125 + dma_addr_t d; 126 + int offset; 137 127 138 - skb = alloc_skb(length + IOC3_CACHELINE - 1, gfp_mask); 139 - if (likely(skb)) { 140 - int offset = aligned_rx_skb_addr((unsigned long) skb->data); 141 - if (offset) 142 - skb_reserve(skb, offset); 128 + new_skb = alloc_skb(RX_BUF_SIZE + IOC3_DMA_XFER_LEN - 1, GFP_ATOMIC); 129 + if (!new_skb) 130 + return -ENOMEM; 131 + 132 + /* ensure buffer is aligned to IOC3_DMA_XFER_LEN */ 133 + offset = aligned_rx_skb_addr((unsigned long)new_skb->data); 134 + if (offset) 135 + skb_reserve(new_skb, offset); 136 + 137 + d = dma_map_single(ip->dma_dev, new_skb->data, 138 + RX_BUF_SIZE, DMA_FROM_DEVICE); 139 + 140 + if (dma_mapping_error(ip->dma_dev, d)) { 141 + dev_kfree_skb_any(new_skb); 142 + return -ENOMEM; 143 143 } 144 + *rxb_dma = d; 145 + *rxb = (struct ioc3_erxbuf *)new_skb->data; 146 + skb_reserve(new_skb, RX_OFFSET); 147 + *skb = new_skb; 144 148 145 - return skb; 149 + return 0; 146 150 } 147 151 148 - static inline unsigned long ioc3_map(void *ptr, unsigned long vdev) 152 + #ifdef CONFIG_PCI_XTALK_BRIDGE 153 + static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr) 149 154 { 150 - #ifdef CONFIG_SGI_IP27 151 - vdev <<= 57; /* Shift to PCI64_ATTR_VIRTUAL */ 152 - 153 - return vdev | (0xaUL << PCI64_ATTR_TARG_SHFT) | PCI64_ATTR_PREF | 154 - ((unsigned long)ptr & TO_PHYS_MASK); 155 - #else 156 - return virt_to_bus(ptr); 157 - #endif 155 + return (addr & ~PCI64_ATTR_BAR) | attr; 158 156 } 159 157 160 - /* BEWARE: The IOC3 documentation documents the size of rx buffers as 161 - 1644 while it's actually 1664. This one was nasty to track down ... */ 162 - #define RX_OFFSET 10 163 - #define RX_BUF_ALLOC_SIZE (1664 + RX_OFFSET + IOC3_CACHELINE) 158 + #define ERBAR_VAL (ERBAR_BARRIER_BIT << ERBAR_RXBARR_SHIFT) 159 + #else 160 + static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr) 161 + { 162 + return addr; 163 + } 164 164 165 - /* DMA barrier to separate cached and uncached accesses. */ 166 - #define BARRIER() \ 167 - __asm__("sync" ::: "memory") 168 - 165 + #define ERBAR_VAL 0 166 + #endif 169 167 170 168 #define IOC3_SIZE 0x100000 171 - 172 - /* 173 - * IOC3 is a big endian device 174 - * 175 - * Unorthodox but makes the users of these macros more readable - the pointer 176 - * to the IOC3's memory mapped registers is expected as struct ioc3 * ioc3 177 - * in the environment. 178 - */ 179 - #define ioc3_r_mcr() be32_to_cpu(ioc3->mcr) 180 - #define ioc3_w_mcr(v) do { ioc3->mcr = cpu_to_be32(v); } while (0) 181 - #define ioc3_w_gpcr_s(v) do { ioc3->gpcr_s = cpu_to_be32(v); } while (0) 182 - #define ioc3_r_emcr() be32_to_cpu(ioc3->emcr) 183 - #define ioc3_w_emcr(v) do { ioc3->emcr = cpu_to_be32(v); } while (0) 184 - #define ioc3_r_eisr() be32_to_cpu(ioc3->eisr) 185 - #define ioc3_w_eisr(v) do { ioc3->eisr = cpu_to_be32(v); } while (0) 186 - #define ioc3_r_eier() be32_to_cpu(ioc3->eier) 187 - #define ioc3_w_eier(v) do { ioc3->eier = cpu_to_be32(v); } while (0) 188 - #define ioc3_r_ercsr() be32_to_cpu(ioc3->ercsr) 189 - #define ioc3_w_ercsr(v) do { ioc3->ercsr = cpu_to_be32(v); } while (0) 190 - #define ioc3_r_erbr_h() be32_to_cpu(ioc3->erbr_h) 191 - #define ioc3_w_erbr_h(v) do { ioc3->erbr_h = cpu_to_be32(v); } while (0) 192 - #define ioc3_r_erbr_l() be32_to_cpu(ioc3->erbr_l) 193 - #define ioc3_w_erbr_l(v) do { ioc3->erbr_l = cpu_to_be32(v); } while (0) 194 - #define ioc3_r_erbar() be32_to_cpu(ioc3->erbar) 195 - #define ioc3_w_erbar(v) do { ioc3->erbar = cpu_to_be32(v); } while (0) 196 - #define ioc3_r_ercir() be32_to_cpu(ioc3->ercir) 197 - #define ioc3_w_ercir(v) do { ioc3->ercir = cpu_to_be32(v); } while (0) 198 - #define ioc3_r_erpir() be32_to_cpu(ioc3->erpir) 199 - #define ioc3_w_erpir(v) do { ioc3->erpir = cpu_to_be32(v); } while (0) 200 - #define ioc3_r_ertr() be32_to_cpu(ioc3->ertr) 201 - #define ioc3_w_ertr(v) do { ioc3->ertr = cpu_to_be32(v); } while (0) 202 - #define ioc3_r_etcsr() be32_to_cpu(ioc3->etcsr) 203 - #define ioc3_w_etcsr(v) do { ioc3->etcsr = cpu_to_be32(v); } while (0) 204 - #define ioc3_r_ersr() be32_to_cpu(ioc3->ersr) 205 - #define ioc3_w_ersr(v) do { ioc3->ersr = cpu_to_be32(v); } while (0) 206 - #define ioc3_r_etcdc() be32_to_cpu(ioc3->etcdc) 207 - #define ioc3_w_etcdc(v) do { ioc3->etcdc = cpu_to_be32(v); } while (0) 208 - #define ioc3_r_ebir() be32_to_cpu(ioc3->ebir) 209 - #define ioc3_w_ebir(v) do { ioc3->ebir = cpu_to_be32(v); } while (0) 210 - #define ioc3_r_etbr_h() be32_to_cpu(ioc3->etbr_h) 211 - #define ioc3_w_etbr_h(v) do { ioc3->etbr_h = cpu_to_be32(v); } while (0) 212 - #define ioc3_r_etbr_l() be32_to_cpu(ioc3->etbr_l) 213 - #define ioc3_w_etbr_l(v) do { ioc3->etbr_l = cpu_to_be32(v); } while (0) 214 - #define ioc3_r_etcir() be32_to_cpu(ioc3->etcir) 215 - #define ioc3_w_etcir(v) do { ioc3->etcir = cpu_to_be32(v); } while (0) 216 - #define ioc3_r_etpir() be32_to_cpu(ioc3->etpir) 217 - #define ioc3_w_etpir(v) do { ioc3->etpir = cpu_to_be32(v); } while (0) 218 - #define ioc3_r_emar_h() be32_to_cpu(ioc3->emar_h) 219 - #define ioc3_w_emar_h(v) do { ioc3->emar_h = cpu_to_be32(v); } while (0) 220 - #define ioc3_r_emar_l() be32_to_cpu(ioc3->emar_l) 221 - #define ioc3_w_emar_l(v) do { ioc3->emar_l = cpu_to_be32(v); } while (0) 222 - #define ioc3_r_ehar_h() be32_to_cpu(ioc3->ehar_h) 223 - #define ioc3_w_ehar_h(v) do { ioc3->ehar_h = cpu_to_be32(v); } while (0) 224 - #define ioc3_r_ehar_l() be32_to_cpu(ioc3->ehar_l) 225 - #define ioc3_w_ehar_l(v) do { ioc3->ehar_l = cpu_to_be32(v); } while (0) 226 - #define ioc3_r_micr() be32_to_cpu(ioc3->micr) 227 - #define ioc3_w_micr(v) do { ioc3->micr = cpu_to_be32(v); } while (0) 228 - #define ioc3_r_midr_r() be32_to_cpu(ioc3->midr_r) 229 - #define ioc3_w_midr_r(v) do { ioc3->midr_r = cpu_to_be32(v); } while (0) 230 - #define ioc3_r_midr_w() be32_to_cpu(ioc3->midr_w) 231 - #define ioc3_w_midr_w(v) do { ioc3->midr_w = cpu_to_be32(v); } while (0) 232 169 233 170 static inline u32 mcr_pack(u32 pulse, u32 sample) 234 171 { 235 172 return (pulse << 10) | (sample << 2); 236 173 } 237 174 238 - static int nic_wait(struct ioc3 *ioc3) 175 + static int nic_wait(u32 __iomem *mcr) 239 176 { 240 - u32 mcr; 177 + u32 m; 241 178 242 - do { 243 - mcr = ioc3_r_mcr(); 244 - } while (!(mcr & 2)); 179 + do { 180 + m = readl(mcr); 181 + } while (!(m & 2)); 245 182 246 - return mcr & 1; 183 + return m & 1; 247 184 } 248 185 249 - static int nic_reset(struct ioc3 *ioc3) 186 + static int nic_reset(u32 __iomem *mcr) 250 187 { 251 - int presence; 188 + int presence; 252 189 253 - ioc3_w_mcr(mcr_pack(500, 65)); 254 - presence = nic_wait(ioc3); 190 + writel(mcr_pack(500, 65), mcr); 191 + presence = nic_wait(mcr); 255 192 256 - ioc3_w_mcr(mcr_pack(0, 500)); 257 - nic_wait(ioc3); 193 + writel(mcr_pack(0, 500), mcr); 194 + nic_wait(mcr); 258 195 259 - return presence; 196 + return presence; 260 197 } 261 198 262 - static inline int nic_read_bit(struct ioc3 *ioc3) 199 + static inline int nic_read_bit(u32 __iomem *mcr) 263 200 { 264 201 int result; 265 202 266 - ioc3_w_mcr(mcr_pack(6, 13)); 267 - result = nic_wait(ioc3); 268 - ioc3_w_mcr(mcr_pack(0, 100)); 269 - nic_wait(ioc3); 203 + writel(mcr_pack(6, 13), mcr); 204 + result = nic_wait(mcr); 205 + writel(mcr_pack(0, 100), mcr); 206 + nic_wait(mcr); 270 207 271 208 return result; 272 209 } 273 210 274 - static inline void nic_write_bit(struct ioc3 *ioc3, int bit) 211 + static inline void nic_write_bit(u32 __iomem *mcr, int bit) 275 212 { 276 213 if (bit) 277 - ioc3_w_mcr(mcr_pack(6, 110)); 214 + writel(mcr_pack(6, 110), mcr); 278 215 else 279 - ioc3_w_mcr(mcr_pack(80, 30)); 216 + writel(mcr_pack(80, 30), mcr); 280 217 281 - nic_wait(ioc3); 218 + nic_wait(mcr); 282 219 } 283 220 284 - /* 285 - * Read a byte from an iButton device 221 + /* Read a byte from an iButton device 286 222 */ 287 - static u32 nic_read_byte(struct ioc3 *ioc3) 223 + static u32 nic_read_byte(u32 __iomem *mcr) 288 224 { 289 225 u32 result = 0; 290 226 int i; 291 227 292 228 for (i = 0; i < 8; i++) 293 - result = (result >> 1) | (nic_read_bit(ioc3) << 7); 229 + result = (result >> 1) | (nic_read_bit(mcr) << 7); 294 230 295 231 return result; 296 232 } 297 233 298 - /* 299 - * Write a byte to an iButton device 234 + /* Write a byte to an iButton device 300 235 */ 301 - static void nic_write_byte(struct ioc3 *ioc3, int byte) 236 + static void nic_write_byte(u32 __iomem *mcr, int byte) 302 237 { 303 238 int i, bit; 304 239 ··· 254 293 bit = byte & 1; 255 294 byte >>= 1; 256 295 257 - nic_write_bit(ioc3, bit); 296 + nic_write_bit(mcr, bit); 258 297 } 259 298 } 260 299 261 - static u64 nic_find(struct ioc3 *ioc3, int *last) 300 + static u64 nic_find(u32 __iomem *mcr, int *last) 262 301 { 263 302 int a, b, index, disc; 264 303 u64 address = 0; 265 304 266 - nic_reset(ioc3); 305 + nic_reset(mcr); 267 306 /* Search ROM. */ 268 - nic_write_byte(ioc3, 0xf0); 307 + nic_write_byte(mcr, 0xf0); 269 308 270 309 /* Algorithm from ``Book of iButton Standards''. */ 271 310 for (index = 0, disc = 0; index < 64; index++) { 272 - a = nic_read_bit(ioc3); 273 - b = nic_read_bit(ioc3); 311 + a = nic_read_bit(mcr); 312 + b = nic_read_bit(mcr); 274 313 275 314 if (a && b) { 276 - printk("NIC search failed (not fatal).\n"); 315 + pr_warn("NIC search failed (not fatal).\n"); 277 316 *last = 0; 278 317 return 0; 279 318 } ··· 284 323 } else if (index > *last) { 285 324 address &= ~(1UL << index); 286 325 disc = index; 287 - } else if ((address & (1UL << index)) == 0) 326 + } else if ((address & (1UL << index)) == 0) { 288 327 disc = index; 289 - nic_write_bit(ioc3, address & (1UL << index)); 328 + } 329 + nic_write_bit(mcr, address & (1UL << index)); 290 330 continue; 291 331 } else { 292 332 if (a) 293 333 address |= 1UL << index; 294 334 else 295 335 address &= ~(1UL << index); 296 - nic_write_bit(ioc3, a); 336 + nic_write_bit(mcr, a); 297 337 continue; 298 338 } 299 339 } ··· 304 342 return address; 305 343 } 306 344 307 - static int nic_init(struct ioc3 *ioc3) 345 + static int nic_init(u32 __iomem *mcr) 308 346 { 309 347 const char *unknown = "unknown"; 310 348 const char *type = unknown; ··· 314 352 315 353 while (1) { 316 354 u64 reg; 317 - reg = nic_find(ioc3, &save); 355 + 356 + reg = nic_find(mcr, &save); 318 357 319 358 switch (reg & 0xff) { 320 359 case 0x91: ··· 329 366 continue; 330 367 } 331 368 332 - nic_reset(ioc3); 369 + nic_reset(mcr); 333 370 334 371 /* Match ROM. */ 335 - nic_write_byte(ioc3, 0x55); 372 + nic_write_byte(mcr, 0x55); 336 373 for (i = 0; i < 8; i++) 337 - nic_write_byte(ioc3, (reg >> (i << 3)) & 0xff); 374 + nic_write_byte(mcr, (reg >> (i << 3)) & 0xff); 338 375 339 376 reg >>= 8; /* Shift out type. */ 340 377 for (i = 0; i < 6; i++) { ··· 345 382 break; 346 383 } 347 384 348 - printk("Found %s NIC", type); 385 + pr_info("Found %s NIC", type); 349 386 if (type != unknown) 350 - printk (" registration number %pM, CRC %02x", serial, crc); 351 - printk(".\n"); 387 + pr_cont(" registration number %pM, CRC %02x", serial, crc); 388 + pr_cont(".\n"); 352 389 353 390 return 0; 354 391 } 355 392 356 - /* 357 - * Read the NIC (Number-In-a-Can) device used to store the MAC address on 393 + /* Read the NIC (Number-In-a-Can) device used to store the MAC address on 358 394 * SN0 / SN00 nodeboards and PCI cards. 359 395 */ 360 396 static void ioc3_get_eaddr_nic(struct ioc3_private *ip) 361 397 { 362 - struct ioc3 *ioc3 = ip->regs; 363 - u8 nic[14]; 398 + u32 __iomem *mcr = &ip->all_regs->mcr; 364 399 int tries = 2; /* There may be some problem with the battery? */ 400 + u8 nic[14]; 365 401 int i; 366 402 367 - ioc3_w_gpcr_s(1 << 21); 403 + writel(1 << 21, &ip->all_regs->gpcr_s); 368 404 369 405 while (tries--) { 370 - if (!nic_init(ioc3)) 406 + if (!nic_init(mcr)) 371 407 break; 372 408 udelay(500); 373 409 } 374 410 375 411 if (tries < 0) { 376 - printk("Failed to read MAC address\n"); 412 + pr_err("Failed to read MAC address\n"); 377 413 return; 378 414 } 379 415 380 416 /* Read Memory. */ 381 - nic_write_byte(ioc3, 0xf0); 382 - nic_write_byte(ioc3, 0x00); 383 - nic_write_byte(ioc3, 0x00); 417 + nic_write_byte(mcr, 0xf0); 418 + nic_write_byte(mcr, 0x00); 419 + nic_write_byte(mcr, 0x00); 384 420 385 421 for (i = 13; i >= 0; i--) 386 - nic[i] = nic_read_byte(ioc3); 422 + nic[i] = nic_read_byte(mcr); 387 423 388 424 for (i = 2; i < 8; i++) 389 425 ip->dev->dev_addr[i - 2] = nic[i]; 390 426 } 391 427 392 - /* 393 - * Ok, this is hosed by design. It's necessary to know what machine the 428 + /* Ok, this is hosed by design. It's necessary to know what machine the 394 429 * NIC is in in order to know how to read the NIC address. We also have 395 430 * to know if it's a PCI card or a NIC in on the node board ... 396 431 */ ··· 396 435 { 397 436 ioc3_get_eaddr_nic(ip); 398 437 399 - printk("Ethernet address is %pM.\n", ip->dev->dev_addr); 438 + pr_info("Ethernet address is %pM.\n", ip->dev->dev_addr); 400 439 } 401 440 402 441 static void __ioc3_set_mac_address(struct net_device *dev) 403 442 { 404 443 struct ioc3_private *ip = netdev_priv(dev); 405 - struct ioc3 *ioc3 = ip->regs; 406 444 407 - ioc3_w_emar_h((dev->dev_addr[5] << 8) | dev->dev_addr[4]); 408 - ioc3_w_emar_l((dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | 409 - (dev->dev_addr[1] << 8) | dev->dev_addr[0]); 445 + writel((dev->dev_addr[5] << 8) | 446 + dev->dev_addr[4], 447 + &ip->regs->emar_h); 448 + writel((dev->dev_addr[3] << 24) | 449 + (dev->dev_addr[2] << 16) | 450 + (dev->dev_addr[1] << 8) | 451 + dev->dev_addr[0], 452 + &ip->regs->emar_l); 410 453 } 411 454 412 455 static int ioc3_set_mac_address(struct net_device *dev, void *addr) ··· 427 462 return 0; 428 463 } 429 464 430 - /* 431 - * Caller must hold the ioc3_lock ever for MII readers. This is also 465 + /* Caller must hold the ioc3_lock ever for MII readers. This is also 432 466 * used to protect the transmitter side but it's low contention. 433 467 */ 434 468 static int ioc3_mdio_read(struct net_device *dev, int phy, int reg) 435 469 { 436 470 struct ioc3_private *ip = netdev_priv(dev); 437 - struct ioc3 *ioc3 = ip->regs; 471 + struct ioc3_ethregs *regs = ip->regs; 438 472 439 - while (ioc3_r_micr() & MICR_BUSY); 440 - ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG); 441 - while (ioc3_r_micr() & MICR_BUSY); 473 + while (readl(&regs->micr) & MICR_BUSY) 474 + ; 475 + writel((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG, 476 + &regs->micr); 477 + while (readl(&regs->micr) & MICR_BUSY) 478 + ; 442 479 443 - return ioc3_r_midr_r() & MIDR_DATA_MASK; 480 + return readl(&regs->midr_r) & MIDR_DATA_MASK; 444 481 } 445 482 446 483 static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data) 447 484 { 448 485 struct ioc3_private *ip = netdev_priv(dev); 449 - struct ioc3 *ioc3 = ip->regs; 486 + struct ioc3_ethregs *regs = ip->regs; 450 487 451 - while (ioc3_r_micr() & MICR_BUSY); 452 - ioc3_w_midr_w(data); 453 - ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg); 454 - while (ioc3_r_micr() & MICR_BUSY); 488 + while (readl(&regs->micr) & MICR_BUSY) 489 + ; 490 + writel(data, &regs->midr_w); 491 + writel((phy << MICR_PHYADDR_SHIFT) | reg, &regs->micr); 492 + while (readl(&regs->micr) & MICR_BUSY) 493 + ; 455 494 } 456 495 457 496 static int ioc3_mii_init(struct ioc3_private *ip); ··· 463 494 static struct net_device_stats *ioc3_get_stats(struct net_device *dev) 464 495 { 465 496 struct ioc3_private *ip = netdev_priv(dev); 466 - struct ioc3 *ioc3 = ip->regs; 497 + struct ioc3_ethregs *regs = ip->regs; 467 498 468 - dev->stats.collisions += (ioc3_r_etcdc() & ETCDC_COLLCNT_MASK); 499 + dev->stats.collisions += readl(&regs->etcdc) & ETCDC_COLLCNT_MASK; 469 500 return &dev->stats; 470 501 } 471 502 472 - static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len) 503 + static void ioc3_tcpudp_checksum(struct sk_buff *skb, u32 hwsum, int len) 473 504 { 474 505 struct ethhdr *eh = eth_hdr(skb); 475 - uint32_t csum, ehsum; 476 506 unsigned int proto; 477 - struct iphdr *ih; 478 - uint16_t *ew; 479 507 unsigned char *cp; 508 + struct iphdr *ih; 509 + u32 csum, ehsum; 510 + u16 *ew; 480 511 481 - /* 482 - * Did hardware handle the checksum at all? The cases we can handle 512 + /* Did hardware handle the checksum at all? The cases we can handle 483 513 * are: 484 514 * 485 515 * - TCP and UDP checksums of IPv4 only. ··· 494 526 if (eh->h_proto != htons(ETH_P_IP)) 495 527 return; 496 528 497 - ih = (struct iphdr *) ((char *)eh + ETH_HLEN); 529 + ih = (struct iphdr *)((char *)eh + ETH_HLEN); 498 530 if (ip_is_fragment(ih)) 499 531 return; 500 532 ··· 505 537 /* Same as tx - compute csum of pseudo header */ 506 538 csum = hwsum + 507 539 (ih->tot_len - (ih->ihl << 2)) + 508 - htons((uint16_t)ih->protocol) + 540 + htons((u16)ih->protocol) + 509 541 (ih->saddr >> 16) + (ih->saddr & 0xffff) + 510 542 (ih->daddr >> 16) + (ih->daddr & 0xffff); 511 543 512 544 /* Sum up ethernet dest addr, src addr and protocol */ 513 - ew = (uint16_t *) eh; 545 + ew = (u16 *)eh; 514 546 ehsum = ew[0] + ew[1] + ew[2] + ew[3] + ew[4] + ew[5] + ew[6]; 515 547 516 548 ehsum = (ehsum & 0xffff) + (ehsum >> 16); ··· 519 551 csum += 0xffff ^ ehsum; 520 552 521 553 /* In the next step we also subtract the 1's complement 522 - checksum of the trailing ethernet CRC. */ 554 + * checksum of the trailing ethernet CRC. 555 + */ 523 556 cp = (char *)eh + len; /* points at trailing CRC */ 524 557 if (len & 1) { 525 - csum += 0xffff ^ (uint16_t) ((cp[1] << 8) | cp[0]); 526 - csum += 0xffff ^ (uint16_t) ((cp[3] << 8) | cp[2]); 558 + csum += 0xffff ^ (u16)((cp[1] << 8) | cp[0]); 559 + csum += 0xffff ^ (u16)((cp[3] << 8) | cp[2]); 527 560 } else { 528 - csum += 0xffff ^ (uint16_t) ((cp[0] << 8) | cp[1]); 529 - csum += 0xffff ^ (uint16_t) ((cp[2] << 8) | cp[3]); 561 + csum += 0xffff ^ (u16)((cp[0] << 8) | cp[1]); 562 + csum += 0xffff ^ (u16)((cp[2] << 8) | cp[3]); 530 563 } 531 564 532 565 csum = (csum & 0xffff) + (csum >> 16); ··· 541 572 { 542 573 struct ioc3_private *ip = netdev_priv(dev); 543 574 struct sk_buff *skb, *new_skb; 544 - struct ioc3 *ioc3 = ip->regs; 545 575 int rx_entry, n_entry, len; 546 576 struct ioc3_erxbuf *rxb; 547 577 unsigned long *rxr; 578 + dma_addr_t d; 548 579 u32 w0, err; 549 580 550 581 rxr = ip->rxr; /* Ring base */ ··· 552 583 n_entry = ip->rx_pi; 553 584 554 585 skb = ip->rx_skbs[rx_entry]; 555 - rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET); 586 + rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET); 556 587 w0 = be32_to_cpu(rxb->w0); 557 588 558 589 while (w0 & ERXBUF_V) { 559 590 err = be32_to_cpu(rxb->err); /* It's valid ... */ 560 591 if (err & ERXBUF_GOODPKT) { 561 592 len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4; 562 - skb_trim(skb, len); 593 + skb_put(skb, len); 563 594 skb->protocol = eth_type_trans(skb, dev); 564 595 565 - new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); 566 - if (!new_skb) { 596 + if (ioc3_alloc_skb(ip, &new_skb, &rxb, &d)) { 567 597 /* Ouch, drop packet and just recycle packet 568 - to keep the ring filled. */ 598 + * to keep the ring filled. 599 + */ 569 600 dev->stats.rx_dropped++; 570 601 new_skb = skb; 602 + d = rxr[rx_entry]; 571 603 goto next; 572 604 } 573 605 574 606 if (likely(dev->features & NETIF_F_RXCSUM)) 575 607 ioc3_tcpudp_checksum(skb, 576 - w0 & ERXBUF_IPCKSUM_MASK, len); 608 + w0 & ERXBUF_IPCKSUM_MASK, 609 + len); 610 + 611 + dma_unmap_single(ip->dma_dev, rxr[rx_entry], 612 + RX_BUF_SIZE, DMA_FROM_DEVICE); 577 613 578 614 netif_rx(skb); 579 615 580 616 ip->rx_skbs[rx_entry] = NULL; /* Poison */ 581 617 582 - /* Because we reserve afterwards. */ 583 - skb_put(new_skb, (1664 + RX_OFFSET)); 584 - rxb = (struct ioc3_erxbuf *) new_skb->data; 585 - skb_reserve(new_skb, RX_OFFSET); 586 - 587 618 dev->stats.rx_packets++; /* Statistics */ 588 619 dev->stats.rx_bytes += len; 589 620 } else { 590 621 /* The frame is invalid and the skb never 591 - reached the network layer so we can just 592 - recycle it. */ 622 + * reached the network layer so we can just 623 + * recycle it. 624 + */ 593 625 new_skb = skb; 626 + d = rxr[rx_entry]; 594 627 dev->stats.rx_errors++; 595 628 } 596 629 if (err & ERXBUF_CRCERR) /* Statistics */ 597 630 dev->stats.rx_crc_errors++; 598 631 if (err & ERXBUF_FRAMERR) 599 632 dev->stats.rx_frame_errors++; 633 + 600 634 next: 601 635 ip->rx_skbs[n_entry] = new_skb; 602 - rxr[n_entry] = cpu_to_be64(ioc3_map(rxb, 1)); 636 + rxr[n_entry] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR)); 603 637 rxb->w0 = 0; /* Clear valid flag */ 604 - n_entry = (n_entry + 1) & 511; /* Update erpir */ 638 + n_entry = (n_entry + 1) & RX_RING_MASK; /* Update erpir */ 605 639 606 640 /* Now go on to the next ring entry. */ 607 - rx_entry = (rx_entry + 1) & 511; 641 + rx_entry = (rx_entry + 1) & RX_RING_MASK; 608 642 skb = ip->rx_skbs[rx_entry]; 609 - rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET); 643 + rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET); 610 644 w0 = be32_to_cpu(rxb->w0); 611 645 } 612 - ioc3_w_erpir((n_entry << 3) | ERPIR_ARM); 646 + writel((n_entry << 3) | ERPIR_ARM, &ip->regs->erpir); 613 647 ip->rx_pi = n_entry; 614 648 ip->rx_ci = rx_entry; 615 649 } ··· 620 648 static inline void ioc3_tx(struct net_device *dev) 621 649 { 622 650 struct ioc3_private *ip = netdev_priv(dev); 651 + struct ioc3_ethregs *regs = ip->regs; 623 652 unsigned long packets, bytes; 624 - struct ioc3 *ioc3 = ip->regs; 625 653 int tx_entry, o_entry; 626 654 struct sk_buff *skb; 627 655 u32 etcir; 628 656 629 657 spin_lock(&ip->ioc3_lock); 630 - etcir = ioc3_r_etcir(); 658 + etcir = readl(&regs->etcir); 631 659 632 - tx_entry = (etcir >> 7) & 127; 660 + tx_entry = (etcir >> 7) & TX_RING_MASK; 633 661 o_entry = ip->tx_ci; 634 662 packets = 0; 635 663 bytes = 0; ··· 641 669 dev_consume_skb_irq(skb); 642 670 ip->tx_skbs[o_entry] = NULL; 643 671 644 - o_entry = (o_entry + 1) & 127; /* Next */ 672 + o_entry = (o_entry + 1) & TX_RING_MASK; /* Next */ 645 673 646 - etcir = ioc3_r_etcir(); /* More pkts sent? */ 647 - tx_entry = (etcir >> 7) & 127; 674 + etcir = readl(&regs->etcir); /* More pkts sent? */ 675 + tx_entry = (etcir >> 7) & TX_RING_MASK; 648 676 } 649 677 650 678 dev->stats.tx_packets += packets; 651 679 dev->stats.tx_bytes += bytes; 652 680 ip->txqlen -= packets; 653 681 654 - if (ip->txqlen < 128) 682 + if (netif_queue_stopped(dev) && ip->txqlen < TX_RING_ENTRIES) 655 683 netif_wake_queue(dev); 656 684 657 685 ip->tx_ci = o_entry; 658 686 spin_unlock(&ip->ioc3_lock); 659 687 } 660 688 661 - /* 662 - * Deal with fatal IOC3 errors. This condition might be caused by a hard or 689 + /* Deal with fatal IOC3 errors. This condition might be caused by a hard or 663 690 * software problems, so we should try to recover 664 691 * more gracefully if this ever happens. In theory we might be flooded 665 692 * with such error interrupts if something really goes wrong, so we might ··· 667 696 static void ioc3_error(struct net_device *dev, u32 eisr) 668 697 { 669 698 struct ioc3_private *ip = netdev_priv(dev); 670 - unsigned char *iface = dev->name; 671 699 672 700 spin_lock(&ip->ioc3_lock); 673 701 674 702 if (eisr & EISR_RXOFLO) 675 - printk(KERN_ERR "%s: RX overflow.\n", iface); 703 + net_err_ratelimited("%s: RX overflow.\n", dev->name); 676 704 if (eisr & EISR_RXBUFOFLO) 677 - printk(KERN_ERR "%s: RX buffer overflow.\n", iface); 705 + net_err_ratelimited("%s: RX buffer overflow.\n", dev->name); 678 706 if (eisr & EISR_RXMEMERR) 679 - printk(KERN_ERR "%s: RX PCI error.\n", iface); 707 + net_err_ratelimited("%s: RX PCI error.\n", dev->name); 680 708 if (eisr & EISR_RXPARERR) 681 - printk(KERN_ERR "%s: RX SSRAM parity error.\n", iface); 709 + net_err_ratelimited("%s: RX SSRAM parity error.\n", dev->name); 682 710 if (eisr & EISR_TXBUFUFLO) 683 - printk(KERN_ERR "%s: TX buffer underflow.\n", iface); 711 + net_err_ratelimited("%s: TX buffer underflow.\n", dev->name); 684 712 if (eisr & EISR_TXMEMERR) 685 - printk(KERN_ERR "%s: TX PCI error.\n", iface); 713 + net_err_ratelimited("%s: TX PCI error.\n", dev->name); 686 714 687 715 ioc3_stop(ip); 716 + ioc3_free_rx_bufs(ip); 717 + ioc3_clean_tx_ring(ip); 718 + 688 719 ioc3_init(dev); 720 + if (ioc3_alloc_rx_bufs(dev)) { 721 + netdev_err(dev, "%s: rx buffer allocation failed\n", __func__); 722 + spin_unlock(&ip->ioc3_lock); 723 + return; 724 + } 725 + ioc3_start(ip); 689 726 ioc3_mii_init(ip); 690 727 691 728 netif_wake_queue(dev); ··· 702 723 } 703 724 704 725 /* The interrupt handler does all of the Rx thread work and cleans up 705 - after the Tx thread. */ 706 - static irqreturn_t ioc3_interrupt(int irq, void *_dev) 726 + * after the Tx thread. 727 + */ 728 + static irqreturn_t ioc3_interrupt(int irq, void *dev_id) 707 729 { 708 - struct net_device *dev = (struct net_device *)_dev; 709 - struct ioc3_private *ip = netdev_priv(dev); 710 - struct ioc3 *ioc3 = ip->regs; 711 - const u32 enabled = EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO | 712 - EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO | 713 - EISR_TXEXPLICIT | EISR_TXMEMERR; 730 + struct ioc3_private *ip = netdev_priv(dev_id); 731 + struct ioc3_ethregs *regs = ip->regs; 714 732 u32 eisr; 715 733 716 - eisr = ioc3_r_eisr() & enabled; 717 - 718 - ioc3_w_eisr(eisr); 719 - (void) ioc3_r_eisr(); /* Flush */ 734 + eisr = readl(&regs->eisr); 735 + writel(eisr, &regs->eisr); 736 + readl(&regs->eisr); /* Flush */ 720 737 721 738 if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR | 722 - EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR)) 723 - ioc3_error(dev, eisr); 739 + EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR)) 740 + ioc3_error(dev_id, eisr); 724 741 if (eisr & EISR_RXTIMERINT) 725 - ioc3_rx(dev); 742 + ioc3_rx(dev_id); 726 743 if (eisr & EISR_TXEXPLICIT) 727 - ioc3_tx(dev); 744 + ioc3_tx(dev_id); 728 745 729 746 return IRQ_HANDLED; 730 747 } 731 748 732 749 static inline void ioc3_setup_duplex(struct ioc3_private *ip) 733 750 { 734 - struct ioc3 *ioc3 = ip->regs; 751 + struct ioc3_ethregs *regs = ip->regs; 752 + 753 + spin_lock_irq(&ip->ioc3_lock); 735 754 736 755 if (ip->mii.full_duplex) { 737 - ioc3_w_etcsr(ETCSR_FD); 756 + writel(ETCSR_FD, &regs->etcsr); 738 757 ip->emcr |= EMCR_DUPLEX; 739 758 } else { 740 - ioc3_w_etcsr(ETCSR_HD); 759 + writel(ETCSR_HD, &regs->etcsr); 741 760 ip->emcr &= ~EMCR_DUPLEX; 742 761 } 743 - ioc3_w_emcr(ip->emcr); 762 + writel(ip->emcr, &regs->emcr); 763 + 764 + spin_unlock_irq(&ip->ioc3_lock); 744 765 } 745 766 746 767 static void ioc3_timer(struct timer_list *t) ··· 751 772 mii_check_media(&ip->mii, 1, 0); 752 773 ioc3_setup_duplex(ip); 753 774 754 - ip->ioc3_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2s */ 775 + ip->ioc3_timer.expires = jiffies + ((12 * HZ) / 10); /* 1.2s */ 755 776 add_timer(&ip->ioc3_timer); 756 777 } 757 778 758 - /* 759 - * Try to find a PHY. There is no apparent relation between the MII addresses 779 + /* Try to find a PHY. There is no apparent relation between the MII addresses 760 780 * in the SGI documentation and what we find in reality, so we simply probe 761 781 * for the PHY. It seems IOC3 PHYs usually live on address 31. One of my 762 782 * onboard IOC3s has the special oddity that probing doesn't seem to find it ··· 764 786 */ 765 787 static int ioc3_mii_init(struct ioc3_private *ip) 766 788 { 767 - int i, found = 0, res = 0; 768 789 int ioc3_phy_workaround = 1; 790 + int i, found = 0, res = 0; 769 791 u16 word; 770 792 771 793 for (i = 0; i < 32; i++) { ··· 778 800 } 779 801 780 802 if (!found) { 781 - if (ioc3_phy_workaround) 803 + if (ioc3_phy_workaround) { 782 804 i = 31; 783 - else { 805 + } else { 784 806 ip->mii.phy_id = -1; 785 807 res = -ENODEV; 786 808 goto out; ··· 795 817 796 818 static void ioc3_mii_start(struct ioc3_private *ip) 797 819 { 798 - ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */ 820 + ip->ioc3_timer.expires = jiffies + (12 * HZ) / 10; /* 1.2 sec. */ 799 821 add_timer(&ip->ioc3_timer); 800 822 } 801 823 802 - static inline void ioc3_clean_rx_ring(struct ioc3_private *ip) 824 + static inline void ioc3_tx_unmap(struct ioc3_private *ip, int entry) 803 825 { 804 - struct sk_buff *skb; 805 - int i; 826 + struct ioc3_etxd *desc; 827 + u32 cmd, bufcnt, len; 806 828 807 - for (i = ip->rx_ci; i & 15; i++) { 808 - ip->rx_skbs[ip->rx_pi] = ip->rx_skbs[ip->rx_ci]; 809 - ip->rxr[ip->rx_pi++] = ip->rxr[ip->rx_ci++]; 829 + desc = &ip->txr[entry]; 830 + cmd = be32_to_cpu(desc->cmd); 831 + bufcnt = be32_to_cpu(desc->bufcnt); 832 + if (cmd & ETXD_B1V) { 833 + len = (bufcnt & ETXD_B1CNT_MASK) >> ETXD_B1CNT_SHIFT; 834 + dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p1), 835 + len, DMA_TO_DEVICE); 810 836 } 811 - ip->rx_pi &= 511; 812 - ip->rx_ci &= 511; 813 - 814 - for (i = ip->rx_ci; i != ip->rx_pi; i = (i+1) & 511) { 815 - struct ioc3_erxbuf *rxb; 816 - skb = ip->rx_skbs[i]; 817 - rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET); 818 - rxb->w0 = 0; 837 + if (cmd & ETXD_B2V) { 838 + len = (bufcnt & ETXD_B2CNT_MASK) >> ETXD_B2CNT_SHIFT; 839 + dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p2), 840 + len, DMA_TO_DEVICE); 819 841 } 820 842 } 821 843 ··· 824 846 struct sk_buff *skb; 825 847 int i; 826 848 827 - for (i=0; i < 128; i++) { 849 + for (i = 0; i < TX_RING_ENTRIES; i++) { 828 850 skb = ip->tx_skbs[i]; 829 851 if (skb) { 852 + ioc3_tx_unmap(ip, i); 830 853 ip->tx_skbs[i] = NULL; 831 854 dev_kfree_skb_any(skb); 832 855 } ··· 837 858 ip->tx_ci = 0; 838 859 } 839 860 840 - static void ioc3_free_rings(struct ioc3_private *ip) 861 + static void ioc3_free_rx_bufs(struct ioc3_private *ip) 841 862 { 842 - struct sk_buff *skb; 843 863 int rx_entry, n_entry; 864 + struct sk_buff *skb; 844 865 845 - if (ip->txr) { 846 - ioc3_clean_tx_ring(ip); 847 - free_pages((unsigned long)ip->txr, 2); 848 - ip->txr = NULL; 849 - } 866 + n_entry = ip->rx_ci; 867 + rx_entry = ip->rx_pi; 850 868 851 - if (ip->rxr) { 852 - n_entry = ip->rx_ci; 853 - rx_entry = ip->rx_pi; 854 - 855 - while (n_entry != rx_entry) { 856 - skb = ip->rx_skbs[n_entry]; 857 - if (skb) 858 - dev_kfree_skb_any(skb); 859 - 860 - n_entry = (n_entry + 1) & 511; 869 + while (n_entry != rx_entry) { 870 + skb = ip->rx_skbs[n_entry]; 871 + if (skb) { 872 + dma_unmap_single(ip->dma_dev, 873 + be64_to_cpu(ip->rxr[n_entry]), 874 + RX_BUF_SIZE, DMA_FROM_DEVICE); 875 + dev_kfree_skb_any(skb); 861 876 } 862 - free_page((unsigned long)ip->rxr); 863 - ip->rxr = NULL; 877 + n_entry = (n_entry + 1) & RX_RING_MASK; 864 878 } 865 879 } 866 880 867 - static void ioc3_alloc_rings(struct net_device *dev) 881 + static int ioc3_alloc_rx_bufs(struct net_device *dev) 868 882 { 869 883 struct ioc3_private *ip = netdev_priv(dev); 870 884 struct ioc3_erxbuf *rxb; 871 - unsigned long *rxr; 885 + dma_addr_t d; 872 886 int i; 873 887 874 - if (ip->rxr == NULL) { 875 - /* Allocate and initialize rx ring. 4kb = 512 entries */ 876 - ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC); 877 - rxr = ip->rxr; 878 - if (!rxr) 879 - printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n"); 888 + /* Now the rx buffers. The RX ring may be larger but 889 + * we only allocate 16 buffers for now. Need to tune 890 + * this for performance and memory later. 891 + */ 892 + for (i = 0; i < RX_BUFFS; i++) { 893 + if (ioc3_alloc_skb(ip, &ip->rx_skbs[i], &rxb, &d)) 894 + return -ENOMEM; 880 895 881 - /* Now the rx buffers. The RX ring may be larger but 882 - we only allocate 16 buffers for now. Need to tune 883 - this for performance and memory later. */ 884 - for (i = 0; i < RX_BUFFS; i++) { 885 - struct sk_buff *skb; 886 - 887 - skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); 888 - if (!skb) { 889 - show_free_areas(0, NULL); 890 - continue; 891 - } 892 - 893 - ip->rx_skbs[i] = skb; 894 - 895 - /* Because we reserve afterwards. */ 896 - skb_put(skb, (1664 + RX_OFFSET)); 897 - rxb = (struct ioc3_erxbuf *) skb->data; 898 - rxr[i] = cpu_to_be64(ioc3_map(rxb, 1)); 899 - skb_reserve(skb, RX_OFFSET); 900 - } 901 - ip->rx_ci = 0; 902 - ip->rx_pi = RX_BUFFS; 896 + rxb->w0 = 0; /* Clear valid flag */ 897 + ip->rxr[i] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR)); 903 898 } 899 + ip->rx_ci = 0; 900 + ip->rx_pi = RX_BUFFS; 904 901 905 - if (ip->txr == NULL) { 906 - /* Allocate and initialize tx rings. 16kb = 128 bufs. */ 907 - ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2); 908 - if (!ip->txr) 909 - printk("ioc3_alloc_rings(): __get_free_pages() failed!\n"); 910 - ip->tx_pi = 0; 911 - ip->tx_ci = 0; 912 - } 913 - } 914 - 915 - static void ioc3_init_rings(struct net_device *dev) 916 - { 917 - struct ioc3_private *ip = netdev_priv(dev); 918 - struct ioc3 *ioc3 = ip->regs; 919 - unsigned long ring; 920 - 921 - ioc3_free_rings(ip); 922 - ioc3_alloc_rings(dev); 923 - 924 - ioc3_clean_rx_ring(ip); 925 - ioc3_clean_tx_ring(ip); 926 - 927 - /* Now the rx ring base, consume & produce registers. */ 928 - ring = ioc3_map(ip->rxr, 0); 929 - ioc3_w_erbr_h(ring >> 32); 930 - ioc3_w_erbr_l(ring & 0xffffffff); 931 - ioc3_w_ercir(ip->rx_ci << 3); 932 - ioc3_w_erpir((ip->rx_pi << 3) | ERPIR_ARM); 933 - 934 - ring = ioc3_map(ip->txr, 0); 935 - 936 - ip->txqlen = 0; /* nothing queued */ 937 - 938 - /* Now the tx ring base, consume & produce registers. */ 939 - ioc3_w_etbr_h(ring >> 32); 940 - ioc3_w_etbr_l(ring & 0xffffffff); 941 - ioc3_w_etpir(ip->tx_pi << 7); 942 - ioc3_w_etcir(ip->tx_ci << 7); 943 - (void) ioc3_r_etcir(); /* Flush */ 902 + return 0; 944 903 } 945 904 946 905 static inline void ioc3_ssram_disc(struct ioc3_private *ip) 947 906 { 948 - struct ioc3 *ioc3 = ip->regs; 949 - volatile u32 *ssram0 = &ioc3->ssram[0x0000]; 950 - volatile u32 *ssram1 = &ioc3->ssram[0x4000]; 951 - unsigned int pattern = 0x5555; 907 + struct ioc3_ethregs *regs = ip->regs; 908 + u32 *ssram0 = &ip->ssram[0x0000]; 909 + u32 *ssram1 = &ip->ssram[0x4000]; 910 + u32 pattern = 0x5555; 952 911 953 912 /* Assume the larger size SSRAM and enable parity checking */ 954 - ioc3_w_emcr(ioc3_r_emcr() | (EMCR_BUFSIZ | EMCR_RAMPAR)); 913 + writel(readl(&regs->emcr) | (EMCR_BUFSIZ | EMCR_RAMPAR), &regs->emcr); 914 + readl(&regs->emcr); /* Flush */ 955 915 956 - *ssram0 = pattern; 957 - *ssram1 = ~pattern & IOC3_SSRAM_DM; 916 + writel(pattern, ssram0); 917 + writel(~pattern & IOC3_SSRAM_DM, ssram1); 958 918 959 - if ((*ssram0 & IOC3_SSRAM_DM) != pattern || 960 - (*ssram1 & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) { 919 + if ((readl(ssram0) & IOC3_SSRAM_DM) != pattern || 920 + (readl(ssram1) & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) { 961 921 /* set ssram size to 64 KB */ 962 - ip->emcr = EMCR_RAMPAR; 963 - ioc3_w_emcr(ioc3_r_emcr() & ~EMCR_BUFSIZ); 964 - } else 965 - ip->emcr = EMCR_BUFSIZ | EMCR_RAMPAR; 922 + ip->emcr |= EMCR_RAMPAR; 923 + writel(readl(&regs->emcr) & ~EMCR_BUFSIZ, &regs->emcr); 924 + } else { 925 + ip->emcr |= EMCR_BUFSIZ | EMCR_RAMPAR; 926 + } 966 927 } 967 928 968 929 static void ioc3_init(struct net_device *dev) 969 930 { 970 931 struct ioc3_private *ip = netdev_priv(dev); 971 - struct ioc3 *ioc3 = ip->regs; 932 + struct ioc3_ethregs *regs = ip->regs; 972 933 973 934 del_timer_sync(&ip->ioc3_timer); /* Kill if running */ 974 935 975 - ioc3_w_emcr(EMCR_RST); /* Reset */ 976 - (void) ioc3_r_emcr(); /* Flush WB */ 936 + writel(EMCR_RST, &regs->emcr); /* Reset */ 937 + readl(&regs->emcr); /* Flush WB */ 977 938 udelay(4); /* Give it time ... */ 978 - ioc3_w_emcr(0); 979 - (void) ioc3_r_emcr(); 939 + writel(0, &regs->emcr); 940 + readl(&regs->emcr); 980 941 981 942 /* Misc registers */ 982 - #ifdef CONFIG_SGI_IP27 983 - ioc3_w_erbar(PCI64_ATTR_BAR >> 32); /* Barrier on last store */ 984 - #else 985 - ioc3_w_erbar(0); /* Let PCI API get it right */ 986 - #endif 987 - (void) ioc3_r_etcdc(); /* Clear on read */ 988 - ioc3_w_ercsr(15); /* RX low watermark */ 989 - ioc3_w_ertr(0); /* Interrupt immediately */ 943 + writel(ERBAR_VAL, &regs->erbar); 944 + readl(&regs->etcdc); /* Clear on read */ 945 + writel(15, &regs->ercsr); /* RX low watermark */ 946 + writel(0, &regs->ertr); /* Interrupt immediately */ 990 947 __ioc3_set_mac_address(dev); 991 - ioc3_w_ehar_h(ip->ehar_h); 992 - ioc3_w_ehar_l(ip->ehar_l); 993 - ioc3_w_ersr(42); /* XXX should be random */ 948 + writel(ip->ehar_h, &regs->ehar_h); 949 + writel(ip->ehar_l, &regs->ehar_l); 950 + writel(42, &regs->ersr); /* XXX should be random */ 951 + } 994 952 995 - ioc3_init_rings(dev); 953 + static void ioc3_start(struct ioc3_private *ip) 954 + { 955 + struct ioc3_ethregs *regs = ip->regs; 956 + unsigned long ring; 957 + 958 + /* Now the rx ring base, consume & produce registers. */ 959 + ring = ioc3_map(ip->rxr_dma, PCI64_ATTR_PREC); 960 + writel(ring >> 32, &regs->erbr_h); 961 + writel(ring & 0xffffffff, &regs->erbr_l); 962 + writel(ip->rx_ci << 3, &regs->ercir); 963 + writel((ip->rx_pi << 3) | ERPIR_ARM, &regs->erpir); 964 + 965 + ring = ioc3_map(ip->txr_dma, PCI64_ATTR_PREC); 966 + 967 + ip->txqlen = 0; /* nothing queued */ 968 + 969 + /* Now the tx ring base, consume & produce registers. */ 970 + writel(ring >> 32, &regs->etbr_h); 971 + writel(ring & 0xffffffff, &regs->etbr_l); 972 + writel(ip->tx_pi << 7, &regs->etpir); 973 + writel(ip->tx_ci << 7, &regs->etcir); 974 + readl(&regs->etcir); /* Flush */ 996 975 997 976 ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN | 998 - EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN; 999 - ioc3_w_emcr(ip->emcr); 1000 - ioc3_w_eier(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO | 1001 - EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO | 1002 - EISR_TXEXPLICIT | EISR_TXMEMERR); 1003 - (void) ioc3_r_eier(); 977 + EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN; 978 + writel(ip->emcr, &regs->emcr); 979 + writel(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO | 980 + EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO | 981 + EISR_TXEXPLICIT | EISR_TXMEMERR, &regs->eier); 982 + readl(&regs->eier); 1004 983 } 1005 984 1006 985 static inline void ioc3_stop(struct ioc3_private *ip) 1007 986 { 1008 - struct ioc3 *ioc3 = ip->regs; 987 + struct ioc3_ethregs *regs = ip->regs; 1009 988 1010 - ioc3_w_emcr(0); /* Shutup */ 1011 - ioc3_w_eier(0); /* Disable interrupts */ 1012 - (void) ioc3_r_eier(); /* Flush */ 989 + writel(0, &regs->emcr); /* Shutup */ 990 + writel(0, &regs->eier); /* Disable interrupts */ 991 + readl(&regs->eier); /* Flush */ 1013 992 } 1014 993 1015 994 static int ioc3_open(struct net_device *dev) ··· 975 1038 struct ioc3_private *ip = netdev_priv(dev); 976 1039 977 1040 if (request_irq(dev->irq, ioc3_interrupt, IRQF_SHARED, ioc3_str, dev)) { 978 - printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq); 1041 + netdev_err(dev, "Can't get irq %d\n", dev->irq); 979 1042 980 1043 return -EAGAIN; 981 1044 } 982 1045 983 1046 ip->ehar_h = 0; 984 1047 ip->ehar_l = 0; 1048 + 985 1049 ioc3_init(dev); 1050 + if (ioc3_alloc_rx_bufs(dev)) { 1051 + netdev_err(dev, "%s: rx buffer allocation failed\n", __func__); 1052 + return -ENOMEM; 1053 + } 1054 + ioc3_start(ip); 986 1055 ioc3_mii_start(ip); 987 1056 988 1057 netif_start_queue(dev); ··· 1006 1063 ioc3_stop(ip); 1007 1064 free_irq(dev->irq, dev); 1008 1065 1009 - ioc3_free_rings(ip); 1066 + ioc3_free_rx_bufs(ip); 1067 + ioc3_clean_tx_ring(ip); 1068 + 1010 1069 return 0; 1011 1070 } 1012 1071 1013 - /* 1014 - * MENET cards have four IOC3 chips, which are attached to two sets of 1072 + /* MENET cards have four IOC3 chips, which are attached to two sets of 1015 1073 * PCI slot resources each: the primary connections are on slots 1016 1074 * 0..3 and the secondaries are on 4..7 1017 1075 * ··· 1029 1085 1030 1086 if (dev) { 1031 1087 if (dev->vendor == PCI_VENDOR_ID_SGI && 1032 - dev->device == PCI_DEVICE_ID_SGI_IOC3) 1088 + dev->device == PCI_DEVICE_ID_SGI_IOC3) 1033 1089 ret = 1; 1034 1090 pci_dev_put(dev); 1035 1091 } ··· 1039 1095 1040 1096 static int ioc3_is_menet(struct pci_dev *pdev) 1041 1097 { 1042 - return pdev->bus->parent == NULL && 1098 + return !pdev->bus->parent && 1043 1099 ioc3_adjacent_is_ioc3(pdev, 0) && 1044 1100 ioc3_adjacent_is_ioc3(pdev, 1) && 1045 1101 ioc3_adjacent_is_ioc3(pdev, 2); 1046 1102 } 1047 1103 1048 1104 #ifdef CONFIG_SERIAL_8250 1049 - /* 1050 - * Note about serial ports and consoles: 1105 + /* Note about serial ports and consoles: 1051 1106 * For console output, everyone uses the IOC3 UARTA (offset 0x178) 1052 1107 * connected to the master node (look in ip27_setup_console() and 1053 1108 * ip27prom_console_write()). ··· 1083 1140 #define COSMISC_CONSTANT 6 1084 1141 1085 1142 struct uart_8250_port port = { 1086 - .port = { 1143 + .port = { 1087 1144 .irq = 0, 1088 1145 .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF, 1089 1146 .iotype = UPIO_MEM, 1090 1147 .regshift = 0, 1091 1148 .uartclk = (22000000 << 1) / COSMISC_CONSTANT, 1092 1149 1093 - .membase = (unsigned char __iomem *) uart, 1094 - .mapbase = (unsigned long) uart, 1095 - } 1150 + .membase = (unsigned char __iomem *)uart, 1151 + .mapbase = (unsigned long)uart, 1152 + } 1096 1153 }; 1097 1154 unsigned char lcr; 1098 1155 1099 - lcr = uart->iu_lcr; 1100 - uart->iu_lcr = lcr | UART_LCR_DLAB; 1101 - uart->iu_scr = COSMISC_CONSTANT, 1102 - uart->iu_lcr = lcr; 1103 - uart->iu_lcr; 1156 + lcr = readb(&uart->iu_lcr); 1157 + writeb(lcr | UART_LCR_DLAB, &uart->iu_lcr); 1158 + writeb(COSMISC_CONSTANT, &uart->iu_scr); 1159 + writeb(lcr, &uart->iu_lcr); 1160 + readb(&uart->iu_lcr); 1104 1161 serial8250_register_8250_port(&port); 1105 1162 } 1106 1163 1107 1164 static void ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3) 1108 1165 { 1109 - /* 1110 - * We need to recognice and treat the fourth MENET serial as it 1166 + u32 sio_iec; 1167 + 1168 + /* We need to recognice and treat the fourth MENET serial as it 1111 1169 * does not have an SuperIO chip attached to it, therefore attempting 1112 1170 * to access it will result in bus errors. We call something an 1113 1171 * MENET if PCI slot 0, 1, 2 and 3 of a master PCI bus all have an IOC3 ··· 1119 1175 if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3) 1120 1176 return; 1121 1177 1122 - /* 1123 - * Switch IOC3 to PIO mode. It probably already was but let's be 1178 + /* Switch IOC3 to PIO mode. It probably already was but let's be 1124 1179 * paranoid 1125 1180 */ 1126 - ioc3->gpcr_s = GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL; 1127 - ioc3->gpcr_s; 1128 - ioc3->gppr_6 = 0; 1129 - ioc3->gppr_6; 1130 - ioc3->gppr_7 = 0; 1131 - ioc3->gppr_7; 1132 - ioc3->sscr_a = ioc3->sscr_a & ~SSCR_DMA_EN; 1133 - ioc3->sscr_a; 1134 - ioc3->sscr_b = ioc3->sscr_b & ~SSCR_DMA_EN; 1135 - ioc3->sscr_b; 1181 + writel(GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL, &ioc3->gpcr_s); 1182 + readl(&ioc3->gpcr_s); 1183 + writel(0, &ioc3->gppr[6]); 1184 + readl(&ioc3->gppr[6]); 1185 + writel(0, &ioc3->gppr[7]); 1186 + readl(&ioc3->gppr[7]); 1187 + writel(readl(&ioc3->port_a.sscr) & ~SSCR_DMA_EN, &ioc3->port_a.sscr); 1188 + readl(&ioc3->port_a.sscr); 1189 + writel(readl(&ioc3->port_b.sscr) & ~SSCR_DMA_EN, &ioc3->port_b.sscr); 1190 + readl(&ioc3->port_b.sscr); 1136 1191 /* Disable all SA/B interrupts except for SA/B_INT in SIO_IEC. */ 1137 - ioc3->sio_iec &= ~ (SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL | 1138 - SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER | 1139 - SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS | 1140 - SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR); 1141 - ioc3->sio_iec |= SIO_IR_SA_INT; 1142 - ioc3->sscr_a = 0; 1143 - ioc3->sio_iec &= ~ (SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL | 1144 - SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER | 1145 - SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS | 1146 - SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR); 1147 - ioc3->sio_iec |= SIO_IR_SB_INT; 1148 - ioc3->sscr_b = 0; 1192 + sio_iec = readl(&ioc3->sio_iec); 1193 + sio_iec &= ~(SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL | 1194 + SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER | 1195 + SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS | 1196 + SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR); 1197 + sio_iec |= SIO_IR_SA_INT; 1198 + sio_iec &= ~(SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL | 1199 + SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER | 1200 + SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS | 1201 + SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR); 1202 + sio_iec |= SIO_IR_SB_INT; 1203 + writel(sio_iec, &ioc3->sio_iec); 1204 + writel(0, &ioc3->port_a.sscr); 1205 + writel(0, &ioc3->port_b.sscr); 1149 1206 1150 1207 ioc3_8250_register(&ioc3->sregs.uarta); 1151 1208 ioc3_8250_register(&ioc3->sregs.uartb); ··· 1181 1236 pci_using_dac = 1; 1182 1237 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1183 1238 if (err < 0) { 1184 - printk(KERN_ERR "%s: Unable to obtain 64 bit DMA " 1185 - "for consistent allocations\n", pci_name(pdev)); 1239 + pr_err("%s: Unable to obtain 64 bit DMA for consistent allocations\n", 1240 + pci_name(pdev)); 1186 1241 goto out; 1187 1242 } 1188 1243 } else { 1189 1244 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1190 1245 if (err) { 1191 - printk(KERN_ERR "%s: No usable DMA configuration, " 1192 - "aborting.\n", pci_name(pdev)); 1246 + pr_err("%s: No usable DMA configuration, aborting.\n", 1247 + pci_name(pdev)); 1193 1248 goto out; 1194 1249 } 1195 1250 pci_using_dac = 0; ··· 1215 1270 1216 1271 ip = netdev_priv(dev); 1217 1272 ip->dev = dev; 1273 + ip->dma_dev = &pdev->dev; 1218 1274 1219 1275 dev->irq = pdev->irq; 1220 1276 1221 1277 ioc3_base = pci_resource_start(pdev, 0); 1222 1278 ioc3_size = pci_resource_len(pdev, 0); 1223 - ioc3 = (struct ioc3 *) ioremap(ioc3_base, ioc3_size); 1279 + ioc3 = (struct ioc3 *)ioremap(ioc3_base, ioc3_size); 1224 1280 if (!ioc3) { 1225 - printk(KERN_CRIT "ioc3eth(%s): ioremap failed, goodbye.\n", 1281 + pr_err("ioc3eth(%s): ioremap failed, goodbye.\n", 1226 1282 pci_name(pdev)); 1227 1283 err = -ENOMEM; 1228 1284 goto out_res; 1229 1285 } 1230 - ip->regs = ioc3; 1286 + ip->regs = &ioc3->eth; 1287 + ip->ssram = ioc3->ssram; 1288 + ip->all_regs = ioc3; 1231 1289 1232 1290 #ifdef CONFIG_SERIAL_8250 1233 1291 ioc3_serial_probe(pdev, ioc3); ··· 1240 1292 timer_setup(&ip->ioc3_timer, ioc3_timer, 0); 1241 1293 1242 1294 ioc3_stop(ip); 1295 + 1296 + /* Allocate rx ring. 4kb = 512 entries, must be 4kb aligned */ 1297 + ip->rxr = dma_direct_alloc_pages(ip->dma_dev, RX_RING_SIZE, 1298 + &ip->rxr_dma, GFP_ATOMIC, 0); 1299 + if (!ip->rxr) { 1300 + pr_err("ioc3-eth: rx ring allocation failed\n"); 1301 + err = -ENOMEM; 1302 + goto out_stop; 1303 + } 1304 + 1305 + /* Allocate tx rings. 16kb = 128 bufs, must be 16kb aligned */ 1306 + ip->txr = dma_direct_alloc_pages(ip->dma_dev, TX_RING_SIZE, 1307 + &ip->txr_dma, 1308 + GFP_KERNEL | __GFP_ZERO, 0); 1309 + if (!ip->txr) { 1310 + pr_err("ioc3-eth: tx ring allocation failed\n"); 1311 + err = -ENOMEM; 1312 + goto out_stop; 1313 + } 1314 + 1243 1315 ioc3_init(dev); 1244 1316 1245 1317 ip->pdev = pdev; ··· 1273 1305 ioc3_mii_init(ip); 1274 1306 1275 1307 if (ip->mii.phy_id == -1) { 1276 - printk(KERN_CRIT "ioc3-eth(%s): Didn't find a PHY, goodbye.\n", 1308 + pr_err("ioc3-eth(%s): Didn't find a PHY, goodbye.\n", 1277 1309 pci_name(pdev)); 1278 1310 err = -ENODEV; 1279 1311 goto out_stop; ··· 1303 1335 vendor = (sw_physid1 << 12) | (sw_physid2 >> 4); 1304 1336 model = (sw_physid2 >> 4) & 0x3f; 1305 1337 rev = sw_physid2 & 0xf; 1306 - printk(KERN_INFO "%s: Using PHY %d, vendor 0x%x, model %d, " 1307 - "rev %d.\n", dev->name, ip->mii.phy_id, vendor, model, rev); 1308 - printk(KERN_INFO "%s: IOC3 SSRAM has %d kbyte.\n", dev->name, 1309 - ip->emcr & EMCR_BUFSIZ ? 128 : 64); 1338 + netdev_info(dev, "Using PHY %d, vendor 0x%x, model %d, rev %d.\n", 1339 + ip->mii.phy_id, vendor, model, rev); 1340 + netdev_info(dev, "IOC3 SSRAM has %d kbyte.\n", 1341 + ip->emcr & EMCR_BUFSIZ ? 128 : 64); 1310 1342 1311 1343 return 0; 1312 1344 1313 1345 out_stop: 1314 - ioc3_stop(ip); 1315 1346 del_timer_sync(&ip->ioc3_timer); 1316 - ioc3_free_rings(ip); 1347 + if (ip->rxr) 1348 + dma_direct_free_pages(ip->dma_dev, RX_RING_SIZE, ip->rxr, 1349 + ip->rxr_dma, 0); 1350 + if (ip->txr) 1351 + dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr, 1352 + ip->txr_dma, 0); 1317 1353 out_res: 1318 1354 pci_release_regions(pdev); 1319 1355 out_free: 1320 1356 free_netdev(dev); 1321 1357 out_disable: 1322 - /* 1323 - * We should call pci_disable_device(pdev); here if the IOC3 wasn't 1358 + /* We should call pci_disable_device(pdev); here if the IOC3 wasn't 1324 1359 * such a weird device ... 1325 1360 */ 1326 1361 out: ··· 1334 1363 { 1335 1364 struct net_device *dev = pci_get_drvdata(pdev); 1336 1365 struct ioc3_private *ip = netdev_priv(dev); 1337 - struct ioc3 *ioc3 = ip->regs; 1366 + 1367 + dma_direct_free_pages(ip->dma_dev, RX_RING_SIZE, ip->rxr, 1368 + ip->rxr_dma, 0); 1369 + dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr, 1370 + ip->txr_dma, 0); 1338 1371 1339 1372 unregister_netdev(dev); 1340 1373 del_timer_sync(&ip->ioc3_timer); 1341 1374 1342 - iounmap(ioc3); 1375 + iounmap(ip->all_regs); 1343 1376 pci_release_regions(pdev); 1344 1377 free_netdev(dev); 1345 - /* 1346 - * We should call pci_disable_device(pdev); here if the IOC3 wasn't 1378 + /* We should call pci_disable_device(pdev); here if the IOC3 wasn't 1347 1379 * such a weird device ... 1348 1380 */ 1349 1381 } ··· 1366 1392 1367 1393 static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) 1368 1394 { 1369 - unsigned long data; 1370 1395 struct ioc3_private *ip = netdev_priv(dev); 1371 - struct ioc3 *ioc3 = ip->regs; 1372 - unsigned int len; 1373 1396 struct ioc3_etxd *desc; 1374 - uint32_t w0 = 0; 1397 + unsigned long data; 1398 + unsigned int len; 1375 1399 int produce; 1400 + u32 w0 = 0; 1376 1401 1377 - /* 1378 - * IOC3 has a fairly simple minded checksumming hardware which simply 1402 + /* IOC3 has a fairly simple minded checksumming hardware which simply 1379 1403 * adds up the 1's complement checksum for the entire packet and 1380 1404 * inserts it at an offset which can be specified in the descriptor 1381 1405 * into the transmit packet. This means we have to compensate for the ··· 1384 1412 const struct iphdr *ih = ip_hdr(skb); 1385 1413 const int proto = ntohs(ih->protocol); 1386 1414 unsigned int csoff; 1387 - uint32_t csum, ehsum; 1388 - uint16_t *eh; 1415 + u32 csum, ehsum; 1416 + u16 *eh; 1389 1417 1390 1418 /* The MAC header. skb->mac seem the logic approach 1391 - to find the MAC header - except it's a NULL pointer ... */ 1392 - eh = (uint16_t *) skb->data; 1419 + * to find the MAC header - except it's a NULL pointer ... 1420 + */ 1421 + eh = (u16 *)skb->data; 1393 1422 1394 1423 /* Sum up dest addr, src addr and protocol */ 1395 1424 ehsum = eh[0] + eh[1] + eh[2] + eh[3] + eh[4] + eh[5] + eh[6]; 1396 1425 1397 - /* Fold ehsum. can't use csum_fold which negates also ... */ 1398 - ehsum = (ehsum & 0xffff) + (ehsum >> 16); 1399 - ehsum = (ehsum & 0xffff) + (ehsum >> 16); 1400 - 1401 1426 /* Skip IP header; it's sum is always zero and was 1402 - already filled in by ip_output.c */ 1427 + * already filled in by ip_output.c 1428 + */ 1403 1429 csum = csum_tcpudp_nofold(ih->saddr, ih->daddr, 1404 - ih->tot_len - (ih->ihl << 2), 1405 - proto, 0xffff ^ ehsum); 1430 + ih->tot_len - (ih->ihl << 2), 1431 + proto, csum_fold(ehsum)); 1406 1432 1407 1433 csum = (csum & 0xffff) + (csum >> 16); /* Fold again */ 1408 1434 csum = (csum & 0xffff) + (csum >> 16); ··· 1420 1450 1421 1451 spin_lock_irq(&ip->ioc3_lock); 1422 1452 1423 - data = (unsigned long) skb->data; 1453 + data = (unsigned long)skb->data; 1424 1454 len = skb->len; 1425 1455 1426 1456 produce = ip->tx_pi; ··· 1440 1470 unsigned long b2 = (data | 0x3fffUL) + 1UL; 1441 1471 unsigned long s1 = b2 - data; 1442 1472 unsigned long s2 = data + len - b2; 1473 + dma_addr_t d1, d2; 1443 1474 1444 1475 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | 1445 - ETXD_B1V | ETXD_B2V | w0); 1476 + ETXD_B1V | ETXD_B2V | w0); 1446 1477 desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) | 1447 - (s2 << ETXD_B2CNT_SHIFT)); 1448 - desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1)); 1449 - desc->p2 = cpu_to_be64(ioc3_map((void *) b2, 1)); 1478 + (s2 << ETXD_B2CNT_SHIFT)); 1479 + d1 = dma_map_single(ip->dma_dev, skb->data, s1, DMA_TO_DEVICE); 1480 + if (dma_mapping_error(ip->dma_dev, d1)) 1481 + goto drop_packet; 1482 + d2 = dma_map_single(ip->dma_dev, (void *)b2, s1, DMA_TO_DEVICE); 1483 + if (dma_mapping_error(ip->dma_dev, d2)) { 1484 + dma_unmap_single(ip->dma_dev, d1, len, DMA_TO_DEVICE); 1485 + goto drop_packet; 1486 + } 1487 + desc->p1 = cpu_to_be64(ioc3_map(d1, PCI64_ATTR_PREF)); 1488 + desc->p2 = cpu_to_be64(ioc3_map(d2, PCI64_ATTR_PREF)); 1450 1489 } else { 1490 + dma_addr_t d; 1491 + 1451 1492 /* Normal sized packet that doesn't cross a page boundary. */ 1452 1493 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0); 1453 1494 desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT); 1454 - desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1)); 1495 + d = dma_map_single(ip->dma_dev, skb->data, len, DMA_TO_DEVICE); 1496 + if (dma_mapping_error(ip->dma_dev, d)) 1497 + goto drop_packet; 1498 + desc->p1 = cpu_to_be64(ioc3_map(d, PCI64_ATTR_PREF)); 1455 1499 } 1456 1500 1457 - BARRIER(); 1501 + mb(); /* make sure all descriptor changes are visible */ 1458 1502 1459 1503 ip->tx_skbs[produce] = skb; /* Remember skb */ 1460 - produce = (produce + 1) & 127; 1504 + produce = (produce + 1) & TX_RING_MASK; 1461 1505 ip->tx_pi = produce; 1462 - ioc3_w_etpir(produce << 7); /* Fire ... */ 1506 + writel(produce << 7, &ip->regs->etpir); /* Fire ... */ 1463 1507 1464 1508 ip->txqlen++; 1465 1509 1466 - if (ip->txqlen >= 127) 1510 + if (ip->txqlen >= (TX_RING_ENTRIES - 1)) 1467 1511 netif_stop_queue(dev); 1512 + 1513 + spin_unlock_irq(&ip->ioc3_lock); 1514 + 1515 + return NETDEV_TX_OK; 1516 + 1517 + drop_packet: 1518 + dev_kfree_skb_any(skb); 1519 + dev->stats.tx_dropped++; 1468 1520 1469 1521 spin_unlock_irq(&ip->ioc3_lock); 1470 1522 ··· 1497 1505 { 1498 1506 struct ioc3_private *ip = netdev_priv(dev); 1499 1507 1500 - printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); 1508 + netdev_err(dev, "transmit timed out, resetting\n"); 1501 1509 1502 1510 spin_lock_irq(&ip->ioc3_lock); 1503 1511 1504 1512 ioc3_stop(ip); 1513 + ioc3_free_rx_bufs(ip); 1514 + ioc3_clean_tx_ring(ip); 1515 + 1505 1516 ioc3_init(dev); 1517 + if (ioc3_alloc_rx_bufs(dev)) { 1518 + netdev_err(dev, "%s: rx buffer allocation failed\n", __func__); 1519 + spin_unlock_irq(&ip->ioc3_lock); 1520 + return; 1521 + } 1522 + ioc3_start(ip); 1506 1523 ioc3_mii_init(ip); 1507 1524 ioc3_mii_start(ip); 1508 1525 ··· 1520 1519 netif_wake_queue(dev); 1521 1520 } 1522 1521 1523 - /* 1524 - * Given a multicast ethernet address, this routine calculates the 1522 + /* Given a multicast ethernet address, this routine calculates the 1525 1523 * address's bit index in the logical address filter mask 1526 1524 */ 1527 - 1528 1525 static inline unsigned int ioc3_hash(const unsigned char *addr) 1529 1526 { 1530 1527 unsigned int temp = 0; 1531 - u32 crc; 1532 1528 int bits; 1529 + u32 crc; 1533 1530 1534 1531 crc = ether_crc_le(ETH_ALEN, addr); 1535 1532 ··· 1541 1542 return temp; 1542 1543 } 1543 1544 1544 - static void ioc3_get_drvinfo (struct net_device *dev, 1545 - struct ethtool_drvinfo *info) 1545 + static void ioc3_get_drvinfo(struct net_device *dev, 1546 + struct ethtool_drvinfo *info) 1546 1547 { 1547 1548 struct ioc3_private *ip = netdev_priv(dev); 1548 1549 ··· 1622 1623 1623 1624 static void ioc3_set_multicast_list(struct net_device *dev) 1624 1625 { 1625 - struct netdev_hw_addr *ha; 1626 1626 struct ioc3_private *ip = netdev_priv(dev); 1627 - struct ioc3 *ioc3 = ip->regs; 1627 + struct ioc3_ethregs *regs = ip->regs; 1628 + struct netdev_hw_addr *ha; 1628 1629 u64 ehar = 0; 1629 1630 1630 - netif_stop_queue(dev); /* Lock out others. */ 1631 + spin_lock_irq(&ip->ioc3_lock); 1631 1632 1632 1633 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1633 1634 ip->emcr |= EMCR_PROMISC; 1634 - ioc3_w_emcr(ip->emcr); 1635 - (void) ioc3_r_emcr(); 1635 + writel(ip->emcr, &regs->emcr); 1636 + readl(&regs->emcr); 1636 1637 } else { 1637 1638 ip->emcr &= ~EMCR_PROMISC; 1638 - ioc3_w_emcr(ip->emcr); /* Clear promiscuous. */ 1639 - (void) ioc3_r_emcr(); 1639 + writel(ip->emcr, &regs->emcr); /* Clear promiscuous. */ 1640 + readl(&regs->emcr); 1640 1641 1641 1642 if ((dev->flags & IFF_ALLMULTI) || 1642 1643 (netdev_mc_count(dev) > 64)) { 1643 1644 /* Too many for hashing to make sense or we want all 1644 - multicast packets anyway, so skip computing all the 1645 - hashes and just accept all packets. */ 1645 + * multicast packets anyway, so skip computing all the 1646 + * hashes and just accept all packets. 1647 + */ 1646 1648 ip->ehar_h = 0xffffffff; 1647 1649 ip->ehar_l = 0xffffffff; 1648 1650 } else { ··· 1653 1653 ip->ehar_h = ehar >> 32; 1654 1654 ip->ehar_l = ehar & 0xffffffff; 1655 1655 } 1656 - ioc3_w_ehar_h(ip->ehar_h); 1657 - ioc3_w_ehar_l(ip->ehar_l); 1656 + writel(ip->ehar_h, &regs->ehar_h); 1657 + writel(ip->ehar_l, &regs->ehar_l); 1658 1658 } 1659 1659 1660 - netif_wake_queue(dev); /* Let us get going again. */ 1660 + spin_unlock_irq(&ip->ioc3_lock); 1661 1661 } 1662 1662 1663 1663 module_pci_driver(ioc3_driver);