Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

skfp annotations

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Jeff Garzik <jeff@garzik.org>

authored by

Al Viro and committed by
Jeff Garzik
2f220e30 eca1ad82

+60 -62
+6 -6
drivers/net/skfp/fplustm.c
··· 401 401 /* int len ; length of the frame including the FC */ 402 402 { 403 403 int i ; 404 - u_int *p ; 404 + __le32 *p ; 405 405 406 406 CHECK_NPP() ; 407 407 MARW(off) ; /* set memory address reg for writes */ 408 408 409 - p = (u_int *) mac ; 409 + p = (__le32 *) mac ; 410 410 for (i = (len + 3)/4 ; i ; i--) { 411 411 if (i == 1) { 412 412 /* last word, set the tag bit */ 413 413 outpw(FM_A(FM_CMDREG2),FM_ISTTB) ; 414 414 } 415 - write_mdr(smc,MDR_REVERSE(*p)) ; 415 + write_mdr(smc,le32_to_cpu(*p)) ; 416 416 p++ ; 417 417 } 418 418 ··· 444 444 */ 445 445 static void directed_beacon(struct s_smc *smc) 446 446 { 447 - SK_LOC_DECL(u_int,a[2]) ; 447 + SK_LOC_DECL(__le32,a[2]) ; 448 448 449 449 /* 450 450 * set UNA in frame ··· 458 458 CHECK_NPP() ; 459 459 /* set memory address reg for writes */ 460 460 MARW(smc->hw.fp.fifo.rbc_ram_start+DBEACON_FRAME_OFF+4) ; 461 - write_mdr(smc,MDR_REVERSE(a[0])) ; 461 + write_mdr(smc,le32_to_cpu(a[0])) ; 462 462 outpw(FM_A(FM_CMDREG2),FM_ISTTB) ; /* set the tag bit */ 463 - write_mdr(smc,MDR_REVERSE(a[1])) ; 463 + write_mdr(smc,le32_to_cpu(a[1])) ; 464 464 465 465 outpw(FM_A(FM_SABC),smc->hw.fp.fifo.rbc_ram_start + DBEACON_FRAME_OFF) ; 466 466 }
+10 -10
drivers/net/skfp/h/fplustm.h
··· 50 50 * Transmit Descriptor struct 51 51 */ 52 52 struct s_smt_fp_txd { 53 - u_int txd_tbctrl ; /* transmit buffer control */ 54 - u_int txd_txdscr ; /* transmit frame status word */ 55 - u_int txd_tbadr ; /* physical tx buffer address */ 56 - u_int txd_ntdadr ; /* physical pointer to the next TxD */ 53 + __le32 txd_tbctrl ; /* transmit buffer control */ 54 + __le32 txd_txdscr ; /* transmit frame status word */ 55 + __le32 txd_tbadr ; /* physical tx buffer address */ 56 + __le32 txd_ntdadr ; /* physical pointer to the next TxD */ 57 57 #ifdef ENA_64BIT_SUP 58 - u_int txd_tbadr_hi ; /* physical tx buffer addr (high dword)*/ 58 + __le32 txd_tbadr_hi ; /* physical tx buffer addr (high dword)*/ 59 59 #endif 60 60 char far *txd_virt ; /* virtual pointer to the data frag */ 61 61 /* virt pointer to the next TxD */ ··· 67 67 * Receive Descriptor struct 68 68 */ 69 69 struct s_smt_fp_rxd { 70 - u_int rxd_rbctrl ; /* receive buffer control */ 71 - u_int rxd_rfsw ; /* receive frame status word */ 72 - u_int rxd_rbadr ; /* physical rx buffer address */ 73 - u_int rxd_nrdadr ; /* physical pointer to the next RxD */ 70 + __le32 rxd_rbctrl ; /* receive buffer control */ 71 + __le32 rxd_rfsw ; /* receive frame status word */ 72 + __le32 rxd_rbadr ; /* physical rx buffer address */ 73 + __le32 rxd_nrdadr ; /* physical pointer to the next RxD */ 74 74 #ifdef ENA_64BIT_SUP 75 - u_int rxd_rbadr_hi ; /* physical tx buffer addr (high dword)*/ 75 + __le32 rxd_rbadr_hi ; /* physical tx buffer addr (high dword)*/ 76 76 #endif 77 77 char far *rxd_virt ; /* virtual pointer to the data frag */ 78 78 /* virt pointer to the next RxD */
+42 -44
drivers/net/skfp/hwmtm.c
··· 208 208 #if defined(NDIS_OS2) || defined(ODI2) 209 209 #define CR_READ(var) ((var) & 0xffff0000 | ((var) & 0xffff)) 210 210 #else 211 - #define CR_READ(var) (u_long)(var) 211 + #define CR_READ(var) (__le32)(var) 212 212 #endif 213 213 214 214 #define IMASK_SLOW (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \ ··· 343 343 for (i=count-1, d1=start; i ; i--) { 344 344 d2 = d1 ; 345 345 d1++ ; /* descr is owned by the host */ 346 - d2->r.rxd_rbctrl = AIX_REVERSE(BMU_CHECK) ; 346 + d2->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ; 347 347 d2->r.rxd_next = &d1->r ; 348 348 phys = mac_drv_virt2phys(smc,(void *)d1) ; 349 - d2->r.rxd_nrdadr = AIX_REVERSE(phys) ; 349 + d2->r.rxd_nrdadr = cpu_to_le32(phys) ; 350 350 } 351 351 DB_GEN("descr ring ends at = %x ",(void *)d1,0,3) ; 352 - d1->r.rxd_rbctrl = AIX_REVERSE(BMU_CHECK) ; 352 + d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ; 353 353 d1->r.rxd_next = &start->r ; 354 354 phys = mac_drv_virt2phys(smc,(void *)start) ; 355 - d1->r.rxd_nrdadr = AIX_REVERSE(phys) ; 355 + d1->r.rxd_nrdadr = cpu_to_le32(phys) ; 356 356 357 357 for (i=count, d1=start; i ; i--) { 358 358 DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ; ··· 376 376 DB_GEN("Init async TxD ring, %d TxDs ",HWM_ASYNC_TXD_COUNT,0,3) ; 377 377 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, 378 378 HWM_ASYNC_TXD_COUNT) ; 379 - phys = AIX_REVERSE(ds->txd_ntdadr) ; 379 + phys = le32_to_cpu(ds->txd_ntdadr) ; 380 380 ds++ ; 381 381 queue->tx_curr_put = queue->tx_curr_get = ds ; 382 382 ds-- ; ··· 390 390 DB_GEN("Init sync TxD ring, %d TxDs ",HWM_SYNC_TXD_COUNT,0,3) ; 391 391 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, 392 392 HWM_SYNC_TXD_COUNT) ; 393 - phys = AIX_REVERSE(ds->txd_ntdadr) ; 393 + phys = le32_to_cpu(ds->txd_ntdadr) ; 394 394 ds++ ; 395 395 queue->tx_curr_put = queue->tx_curr_get = ds ; 396 396 queue->tx_free = HWM_SYNC_TXD_COUNT ; ··· 412 412 DB_GEN("Init RxD ring, %d RxDs ",SMT_R1_RXD_COUNT,0,3) ; 413 413 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, 414 414 SMT_R1_RXD_COUNT) ; 415 - phys = AIX_REVERSE(ds->rxd_nrdadr) ; 415 + phys = le32_to_cpu(ds->rxd_nrdadr) ; 416 416 ds++ ; 417 417 queue->rx_curr_put = queue->rx_curr_get = ds ; 418 418 queue->rx_free = SMT_R1_RXD_COUNT ; ··· 607 607 for (i = tx_used+queue->tx_free-1 ; i ; i-- ) { 608 608 t = t->txd_next ; 609 609 } 610 - phys = AIX_REVERSE(t->txd_ntdadr) ; 610 + phys = le32_to_cpu(t->txd_ntdadr) ; 611 611 612 612 t = queue->tx_curr_get ; 613 613 while (tx_used) { 614 614 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ; 615 - tbctrl = AIX_REVERSE(t->txd_tbctrl) ; 615 + tbctrl = le32_to_cpu(t->txd_tbctrl) ; 616 616 617 617 if (tbctrl & BMU_OWN) { 618 618 if (tbctrl & BMU_STF) { ··· 622 622 /* 623 623 * repair the descriptor 624 624 */ 625 - t->txd_tbctrl &= AIX_REVERSE(~BMU_OWN) ; 625 + t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ; 626 626 } 627 627 } 628 - phys = AIX_REVERSE(t->txd_ntdadr) ; 628 + phys = le32_to_cpu(t->txd_ntdadr) ; 629 629 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; 630 630 t = t->txd_next ; 631 631 tx_used-- ; ··· 659 659 for (i = SMT_R1_RXD_COUNT-1 ; i ; i-- ) { 660 660 r = r->rxd_next ; 661 661 } 662 - phys = AIX_REVERSE(r->rxd_nrdadr) ; 662 + phys = le32_to_cpu(r->rxd_nrdadr) ; 663 663 664 664 r = queue->rx_curr_get ; 665 665 while (rx_used) { 666 666 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 667 - rbctrl = AIX_REVERSE(r->rxd_rbctrl) ; 667 + rbctrl = le32_to_cpu(r->rxd_rbctrl) ; 668 668 669 669 if (rbctrl & BMU_OWN) { 670 670 if (rbctrl & BMU_STF) { ··· 674 674 /* 675 675 * repair the descriptor 676 676 */ 677 - r->rxd_rbctrl &= AIX_REVERSE(~BMU_OWN) ; 677 + r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ; 678 678 } 679 679 } 680 - phys = AIX_REVERSE(r->rxd_nrdadr) ; 680 + phys = le32_to_cpu(r->rxd_nrdadr) ; 681 681 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; 682 682 r = r->rxd_next ; 683 683 rx_used-- ; ··· 1094 1094 do { 1095 1095 DB_RX("Check RxD %x for OWN and EOF",(void *)r,0,5) ; 1096 1096 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1097 - rbctrl = CR_READ(r->rxd_rbctrl) ; 1098 - rbctrl = AIX_REVERSE(rbctrl) ; 1097 + rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl)); 1099 1098 1100 1099 if (rbctrl & BMU_OWN) { 1101 1100 NDD_TRACE("RHxE",r,rfsw,rbctrl) ; ··· 1117 1118 smc->os.hwm.detec_count = 0 ; 1118 1119 goto rx_end ; 1119 1120 } 1120 - rfsw = AIX_REVERSE(r->rxd_rfsw) ; 1121 + rfsw = le32_to_cpu(r->rxd_rfsw) ; 1121 1122 if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) { 1122 1123 /* 1123 1124 * The BMU_STF bit is deleted, 1 frame is ··· 1150 1151 /* may be next 2 DRV_BUF_FLUSH() can be skipped, because */ 1151 1152 /* BMU_ST_BUF will not be changed by the ASIC */ 1152 1153 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1153 - while (rx_used && !(r->rxd_rbctrl & AIX_REVERSE(BMU_ST_BUF))) { 1154 + while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) { 1154 1155 DB_RX("Check STF bit in %x",(void *)r,0,5) ; 1155 1156 r = r->rxd_next ; 1156 1157 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; ··· 1170 1171 /* 1171 1172 * ASIC Errata no. 7 (STF - Bit Bug) 1172 1173 */ 1173 - rxd->rxd_rbctrl &= AIX_REVERSE(~BMU_STF) ; 1174 + rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ; 1174 1175 1175 1176 for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){ 1176 1177 DB_RX("dma_complete for RxD %x",(void *)r,0,5) ; ··· 1286 1287 hwm_cpy_rxd2mb(rxd,data,len) ; 1287 1288 #else 1288 1289 for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){ 1289 - n = AIX_REVERSE(r->rxd_rbctrl) & RD_LENGTH ; 1290 + n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ; 1290 1291 DB_RX("cp SMT frame to mb: len = %d",n,0,6) ; 1291 1292 memcpy(data,r->rxd_virt,n) ; 1292 1293 data += n ; ··· 1425 1426 int frame_status) 1426 1427 { 1427 1428 struct s_smt_fp_rxd volatile *r ; 1428 - u_int rbctrl ; 1429 + __le32 rbctrl; 1429 1430 1430 1431 NDD_TRACE("RHfB",virt,len,frame_status) ; 1431 1432 DB_RX("hwm_rx_frag: len = %d, frame_status = %x\n",len,frame_status,2) ; 1432 1433 r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ; 1433 1434 r->rxd_virt = virt ; 1434 - r->rxd_rbadr = AIX_REVERSE(phys) ; 1435 - rbctrl = AIX_REVERSE( (((u_long)frame_status & 1435 + r->rxd_rbadr = cpu_to_le32(phys) ; 1436 + rbctrl = cpu_to_le32( (((__u32)frame_status & 1436 1437 (FIRST_FRAG|LAST_FRAG))<<26) | 1437 1438 (((u_long) frame_status & FIRST_FRAG) << 21) | 1438 1439 BMU_OWN | BMU_CHECK | BMU_EN_IRQ_EOF | len) ; ··· 1443 1444 smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ; 1444 1445 smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ; 1445 1446 smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ; 1446 - NDD_TRACE("RHfE",r,AIX_REVERSE(r->rxd_rbadr),0) ; 1447 + NDD_TRACE("RHfE",r,le32_to_cpu(r->rxd_rbadr),0) ; 1447 1448 } 1448 1449 1449 1450 /* ··· 1493 1494 while (queue->rx_used) { 1494 1495 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1495 1496 DB_RX("switch OWN bit of RxD 0x%x ",r,0,5) ; 1496 - r->rxd_rbctrl &= AIX_REVERSE(~BMU_OWN) ; 1497 + r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ; 1497 1498 frag_count = 1 ; 1498 1499 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; 1499 1500 r = r->rxd_next ; 1500 1501 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1501 1502 while (r != queue->rx_curr_put && 1502 - !(r->rxd_rbctrl & AIX_REVERSE(BMU_ST_BUF))) { 1503 + !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) { 1503 1504 DB_RX("Check STF bit in %x",(void *)r,0,5) ; 1504 - r->rxd_rbctrl &= AIX_REVERSE(~BMU_OWN) ; 1505 + r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ; 1505 1506 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; 1506 1507 r = r->rxd_next ; 1507 1508 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; ··· 1639 1640 { 1640 1641 struct s_smt_fp_txd volatile *t ; 1641 1642 struct s_smt_tx_queue *queue ; 1642 - u_int tbctrl ; 1643 + __le32 tbctrl ; 1643 1644 1644 1645 queue = smc->os.hwm.tx_p ; 1645 1646 ··· 1656 1657 /* '*t' is already defined */ 1657 1658 DB_TX("LAN_TX: TxD = %x, virt = %x ",t,virt,3) ; 1658 1659 t->txd_virt = virt ; 1659 - t->txd_txdscr = AIX_REVERSE(smc->os.hwm.tx_descr) ; 1660 - t->txd_tbadr = AIX_REVERSE(phys) ; 1661 - tbctrl = AIX_REVERSE((((u_long)frame_status & 1660 + t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ; 1661 + t->txd_tbadr = cpu_to_le32(phys) ; 1662 + tbctrl = cpu_to_le32((((__u32)frame_status & 1662 1663 (FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) | 1663 1664 BMU_OWN|BMU_CHECK |len) ; 1664 1665 t->txd_tbctrl = tbctrl ; ··· 1825 1826 struct s_smt_tx_queue *queue ; 1826 1827 struct s_smt_fp_txd volatile *t ; 1827 1828 u_long phys ; 1828 - u_int tbctrl ; 1829 + __le32 tbctrl; 1829 1830 1830 1831 NDD_TRACE("THSB",mb,fc,0) ; 1831 1832 DB_TX("smt_send_mbuf: mb = 0x%x, fc = 0x%x",mb,fc,4) ; ··· 1893 1894 DB_TX("init TxD = 0x%x",(void *)t,0,5) ; 1894 1895 if (i == frag_count-1) { 1895 1896 frame_status |= LAST_FRAG ; 1896 - t->txd_txdscr = AIX_REVERSE(TX_DESCRIPTOR | 1897 - (((u_long)(mb->sm_len-1)&3) << 27)) ; 1897 + t->txd_txdscr = cpu_to_le32(TX_DESCRIPTOR | 1898 + (((__u32)(mb->sm_len-1)&3) << 27)) ; 1898 1899 } 1899 1900 t->txd_virt = virt[i] ; 1900 1901 phys = dma_master(smc, (void far *)virt[i], 1901 1902 frag_len[i], DMA_RD|SMT_BUF) ; 1902 - t->txd_tbadr = AIX_REVERSE(phys) ; 1903 - tbctrl = AIX_REVERSE((((u_long) frame_status & 1903 + t->txd_tbadr = cpu_to_le32(phys) ; 1904 + tbctrl = cpu_to_le32((((__u32)frame_status & 1904 1905 (FIRST_FRAG|LAST_FRAG)) << 26) | 1905 1906 BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ; 1906 1907 t->txd_tbctrl = tbctrl ; ··· 1970 1971 do { 1971 1972 DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ; 1972 1973 DB_TX("check OWN/EOF bit of TxD 0x%x",t1,0,5) ; 1973 - tbctrl = CR_READ(t1->txd_tbctrl) ; 1974 - tbctrl = AIX_REVERSE(tbctrl) ; 1974 + tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl)); 1975 1975 1976 1976 if (tbctrl & BMU_OWN || !queue->tx_used){ 1977 1977 DB_TX("End of TxDs queue %d",i,0,4) ; ··· 1982 1984 1983 1985 t1 = queue->tx_curr_get ; 1984 1986 for (n = frag_count; n; n--) { 1985 - tbctrl = AIX_REVERSE(t1->txd_tbctrl) ; 1987 + tbctrl = le32_to_cpu(t1->txd_tbctrl) ; 1986 1988 dma_complete(smc, 1987 1989 (union s_fp_descr volatile *) t1, 1988 1990 (int) (DMA_RD | ··· 2062 2064 while (tx_used) { 2063 2065 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ; 2064 2066 DB_TX("switch OWN bit of TxD 0x%x ",t,0,5) ; 2065 - t->txd_tbctrl &= AIX_REVERSE(~BMU_OWN) ; 2067 + t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ; 2066 2068 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; 2067 2069 t = t->txd_next ; 2068 2070 tx_used-- ; ··· 2084 2086 * tx_curr_get and tx_curr_put to this position 2085 2087 */ 2086 2088 if (i == QUEUE_S) { 2087 - outpd(ADDR(B5_XS_DA),AIX_REVERSE(t->txd_ntdadr)) ; 2089 + outpd(ADDR(B5_XS_DA),le32_to_cpu(t->txd_ntdadr)) ; 2088 2090 } 2089 2091 else { 2090 - outpd(ADDR(B5_XA_DA),AIX_REVERSE(t->txd_ntdadr)) ; 2092 + outpd(ADDR(B5_XA_DA),le32_to_cpu(t->txd_ntdadr)) ; 2091 2093 } 2092 2094 2093 2095 queue->tx_curr_put = queue->tx_curr_get->txd_next ;
+2 -2
drivers/net/skfp/skfddi.c
··· 495 495 496 496 PRINTK(KERN_INFO "entering skfp_open\n"); 497 497 /* Register IRQ - support shared interrupts by passing device ptr */ 498 - err = request_irq(dev->irq, (void *) skfp_interrupt, IRQF_SHARED, 498 + err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED, 499 499 dev->name, dev); 500 500 if (err) 501 501 return err; ··· 1644 1644 // Get RIF length from Routing Control (RC) field. 1645 1645 cp = virt + FDDI_MAC_HDR_LEN; // Point behind MAC header. 1646 1646 1647 - ri = ntohs(*((unsigned short *) cp)); 1647 + ri = ntohs(*((__be16 *) cp)); 1648 1648 RifLength = ri & FDDI_RCF_LEN_MASK; 1649 1649 if (len < (int) (FDDI_MAC_HDR_LEN + RifLength)) { 1650 1650 printk("fddi: Invalid RIF.\n");