Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

brcm80211: smac: use bcma function for register access in dma.c

The dma.c source file now uses the register access functions
provided by bcma.

Reviewed-by: Pieter-Paul Giesberts <pieterpg@broadcom.com>
Reviewed-by: Alwin Beukers <alwin@broadcom.com>
Signed-off-by: Arend van Spriel <arend@broadcom.com>
Signed-off-by: Franky Lin <frankyl@broadcom.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>

authored by

Arend van Spriel and committed by
John W. Linville
e81da650 2e81b9b1

+108 -91
+97 -77
drivers/net/wireless/brcm80211/brcmsmac/dma.c
··· 27 27 #include "soc.h" 28 28 29 29 /* 30 + * dma register field offset calculation 31 + */ 32 + #define DMA64REGOFFS(field) offsetof(struct dma64regs, field) 33 + #define DMA64TXREGOFFS(di, field) (di->d64txregbase + DMA64REGOFFS(field)) 34 + #define DMA64RXREGOFFS(di, field) (di->d64rxregbase + DMA64REGOFFS(field)) 35 + 36 + /* 30 37 * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within 31 38 * a contiguous 8kB physical address. 32 39 */ ··· 234 227 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */ 235 228 236 229 /* 64-bit dma tx engine registers */ 237 - struct dma64regs __iomem *d64txregs; 230 + uint d64txregbase; 238 231 /* 64-bit dma rx engine registers */ 239 - struct dma64regs __iomem *d64rxregs; 232 + uint d64rxregbase; 240 233 /* pointer to dma64 tx descriptor ring */ 241 234 struct dma64desc *txd64; 242 235 /* pointer to dma64 rx descriptor ring */ ··· 383 376 if (dmactrlflags & DMA_CTRL_PEN) { 384 377 u32 control; 385 378 386 - control = R_REG(&di->d64txregs->control); 387 - W_REG(&di->d64txregs->control, 379 + control = bcma_read32(di->d11core, DMA64TXREGOFFS(di, control)); 380 + bcma_write32(di->d11core, DMA64TXREGOFFS(di, control), 388 381 control | D64_XC_PD); 389 - if (R_REG(&di->d64txregs->control) & D64_XC_PD) 382 + if (bcma_read32(di->d11core, DMA64TXREGOFFS(di, control)) & 383 + D64_XC_PD) 390 384 /* We *can* disable it so it is supported, 391 385 * restore control register 392 386 */ 393 - W_REG(&di->d64txregs->control, 394 - control); 387 + bcma_write32(di->d11core, DMA64TXREGOFFS(di, control), 388 + control); 395 389 else 396 390 /* Not supported, don't allow it to be enabled */ 397 391 dmactrlflags &= ~DMA_CTRL_PEN; ··· 403 395 return dmactrlflags; 404 396 } 405 397 406 - static bool _dma64_addrext(struct dma64regs __iomem *dma64regs) 398 + static bool _dma64_addrext(struct dma_info *di, uint ctrl_offset) 407 399 { 408 400 u32 w; 409 - OR_REG(&dma64regs->control, D64_XC_AE); 410 - w = R_REG(&dma64regs->control); 411 - AND_REG(&dma64regs->control, ~D64_XC_AE); 401 + bcma_set32(di->d11core, ctrl_offset, D64_XC_AE); 402 + w = bcma_read32(di->d11core, ctrl_offset); 403 + bcma_mask32(di->d11core, ctrl_offset, ~D64_XC_AE); 412 404 return (w & D64_XC_AE) == D64_XC_AE; 413 405 } 414 406 ··· 421 413 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */ 422 414 423 415 /* not all tx or rx channel are available */ 424 - if (di->d64txregs != NULL) { 425 - if (!_dma64_addrext(di->d64txregs)) 416 + if (di->d64txregbase != 0) { 417 + if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control))) 426 418 DMA_ERROR("%s: DMA64 tx doesn't have AE set\n", 427 419 di->name); 428 420 return true; 429 - } else if (di->d64rxregs != NULL) { 430 - if (!_dma64_addrext(di->d64rxregs)) 421 + } else if (di->d64rxregbase != 0) { 422 + if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control))) 431 423 DMA_ERROR("%s: DMA64 rx doesn't have AE set\n", 432 424 di->name); 433 425 return true; ··· 441 433 u32 addrl; 442 434 443 435 /* Check to see if the descriptors need to be aligned on 4K/8K or not */ 444 - if (di->d64txregs != NULL) { 445 - W_REG(&di->d64txregs->addrlow, 0xff0); 446 - addrl = R_REG(&di->d64txregs->addrlow); 436 + if (di->d64txregbase != 0) { 437 + bcma_write32(di->d11core, DMA64TXREGOFFS(di, addrlow), 0xff0); 438 + addrl = bcma_read32(di->d11core, DMA64TXREGOFFS(di, addrlow)); 447 439 if (addrl != 0) 448 440 return false; 449 - } else if (di->d64rxregs != NULL) { 450 - W_REG(&di->d64rxregs->addrlow, 0xff0); 451 - addrl = R_REG(&di->d64rxregs->addrlow); 441 + } else if (di->d64rxregbase != 0) { 442 + bcma_write32(di->d11core, DMA64RXREGOFFS(di, addrlow), 0xff0); 443 + addrl = bcma_read32(di->d11core, DMA64RXREGOFFS(di, addrlow)); 452 444 if (addrl != 0) 453 445 return false; 454 446 } ··· 566 558 567 559 struct dma_pub *dma_attach(char *name, struct si_pub *sih, 568 560 struct bcma_device *d11core, 569 - void __iomem *dmaregstx, void __iomem *dmaregsrx, 570 - uint ntxd, uint nrxd, 561 + uint txregbase, uint rxregbase, uint ntxd, uint nrxd, 571 562 uint rxbufsize, int rxextheadroom, 572 563 uint nrxpost, uint rxoffset, uint *msg_level) 573 564 { ··· 583 576 584 577 di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64); 585 578 586 - /* init dma reg pointer */ 579 + /* init dma reg info */ 587 580 di->d11core = d11core; 588 - di->d64txregs = (struct dma64regs __iomem *) dmaregstx; 589 - di->d64rxregs = (struct dma64regs __iomem *) dmaregsrx; 581 + di->d64txregbase = txregbase; 582 + di->d64rxregbase = rxregbase; 590 583 591 584 /* 592 585 * Default flags (which can be changed by the driver calling ··· 595 588 */ 596 589 _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0); 597 590 598 - DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n", 599 - name, "DMA64", 591 + DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d " 592 + "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d " 593 + "txregbase %u rxregbase %u\n", name, "DMA64", 600 594 di->dma.dmactrlflags, ntxd, nrxd, rxbufsize, 601 - rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx); 595 + rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase); 602 596 603 597 /* make a private copy of our callers name */ 604 598 strncpy(di->name, name, MAXNAMEL); ··· 791 783 if ((di->ddoffsetlow == 0) 792 784 || !(pa & PCI32ADDR_HIGH)) { 793 785 if (direction == DMA_TX) { 794 - W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow); 795 - W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh); 786 + bcma_write32(di->d11core, DMA64TXREGOFFS(di, addrlow), 787 + pa + di->ddoffsetlow); 788 + bcma_write32(di->d11core, DMA64TXREGOFFS(di, addrhigh), 789 + di->ddoffsethigh); 796 790 } else { 797 - W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow); 798 - W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh); 791 + bcma_write32(di->d11core, DMA64RXREGOFFS(di, addrlow), 792 + pa + di->ddoffsetlow); 793 + bcma_write32(di->d11core, DMA64RXREGOFFS(di, addrhigh), 794 + di->ddoffsethigh); 799 795 } 800 796 } else { 801 797 /* DMA64 32bits address extension */ ··· 810 798 pa &= ~PCI32ADDR_HIGH; 811 799 812 800 if (direction == DMA_TX) { 813 - W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow); 814 - W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh); 815 - SET_REG(&di->d64txregs->control, 816 - D64_XC_AE, (ae << D64_XC_AE_SHIFT)); 801 + bcma_write32(di->d11core, DMA64TXREGOFFS(di, addrlow), 802 + pa + di->ddoffsetlow); 803 + bcma_write32(di->d11core, DMA64TXREGOFFS(di, addrhigh), 804 + di->ddoffsethigh); 805 + bcma_maskset32(di->d11core, DMA64TXREGOFFS(di, control), 806 + D64_XC_AE, (ae << D64_XC_AE_SHIFT)); 817 807 } else { 818 - W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow); 819 - W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh); 820 - SET_REG(&di->d64rxregs->control, 821 - D64_RC_AE, (ae << D64_RC_AE_SHIFT)); 808 + bcma_write32(di->d11core, DMA64RXREGOFFS(di, addrlow), 809 + pa + di->ddoffsetlow); 810 + bcma_write32(di->d11core, DMA64RXREGOFFS(di, addrhigh), 811 + di->ddoffsethigh); 812 + bcma_maskset32(di->d11core, DMA64RXREGOFFS(di, control), 813 + D64_RC_AE, (ae << D64_RC_AE_SHIFT)); 822 814 } 823 815 } 824 816 } ··· 834 818 835 819 DMA_TRACE("%s:\n", di->name); 836 820 837 - control = 838 - (R_REG(&di->d64rxregs->control) & D64_RC_AE) | 839 - D64_RC_RE; 821 + control = D64_RC_RE | (bcma_read32(di->d11core, 822 + DMA64RXREGOFFS(di, control)) & 823 + D64_RC_AE); 840 824 841 825 if ((dmactrlflags & DMA_CTRL_PEN) == 0) 842 826 control |= D64_RC_PD; ··· 844 828 if (dmactrlflags & DMA_CTRL_ROC) 845 829 control |= D64_RC_OC; 846 830 847 - W_REG(&di->d64rxregs->control, 831 + bcma_write32(di->d11core, DMA64RXREGOFFS(di, control), 848 832 ((di->rxoffset << D64_RC_RO_SHIFT) | control)); 849 833 } 850 834 ··· 887 871 return NULL; 888 872 889 873 curr = 890 - B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) - 874 + B2I(((bcma_read32(di->d11core, 875 + DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) - 891 876 di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc); 892 877 893 878 /* ignore curr if forceall */ ··· 970 953 if (resid > 0) { 971 954 uint cur; 972 955 cur = 973 - B2I(((R_REG(&di->d64rxregs->status0) & 974 - D64_RS0_CD_MASK) - 975 - di->rcvptrbase) & D64_RS0_CD_MASK, 976 - struct dma64desc); 956 + B2I(((bcma_read32(di->d11core, 957 + DMA64RXREGOFFS(di, status0)) & 958 + D64_RS0_CD_MASK) - di->rcvptrbase) & 959 + D64_RS0_CD_MASK, struct dma64desc); 977 960 DMA_ERROR("rxin %d rxout %d, hw_curr %d\n", 978 - di->rxin, di->rxout, cur); 961 + di->rxin, di->rxout, cur); 979 962 } 980 963 #endif /* BCMDBG */ 981 964 ··· 1003 986 if (di->nrxd == 0) 1004 987 return true; 1005 988 1006 - return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) == 1007 - (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK)); 989 + return ((bcma_read32(di->d11core, 990 + DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) == 991 + (bcma_read32(di->d11core, DMA64RXREGOFFS(di, ptr)) & 992 + D64_RS0_CD_MASK)); 1008 993 } 1009 994 1010 995 /* ··· 1089 1070 di->rxout = rxout; 1090 1071 1091 1072 /* update the chip lastdscr pointer */ 1092 - W_REG(&di->d64rxregs->ptr, 1073 + bcma_write32(di->d11core, DMA64RXREGOFFS(di, ptr), 1093 1074 di->rcvptrbase + I2B(rxout, struct dma64desc)); 1094 1075 1095 1076 return ring_empty; ··· 1150 1131 1151 1132 if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0) 1152 1133 control |= D64_XC_PD; 1153 - OR_REG(&di->d64txregs->control, control); 1134 + bcma_set32(di->d11core, DMA64TXREGOFFS(di, control), control); 1154 1135 1155 1136 /* DMA engine with alignment requirement requires table to be inited 1156 1137 * before enabling the engine ··· 1168 1149 if (di->ntxd == 0) 1169 1150 return; 1170 1151 1171 - OR_REG(&di->d64txregs->control, D64_XC_SE); 1152 + bcma_set32(di->d11core, DMA64TXREGOFFS(di, control), D64_XC_SE); 1172 1153 } 1173 1154 1174 1155 void dma_txresume(struct dma_pub *pub) ··· 1180 1161 if (di->ntxd == 0) 1181 1162 return; 1182 1163 1183 - AND_REG(&di->d64txregs->control, ~D64_XC_SE); 1164 + bcma_mask32(di->d11core, DMA64TXREGOFFS(di, control), ~D64_XC_SE); 1184 1165 } 1185 1166 1186 1167 bool dma_txsuspended(struct dma_pub *pub) ··· 1188 1169 struct dma_info *di = (struct dma_info *)pub; 1189 1170 1190 1171 return (di->ntxd == 0) || 1191 - ((R_REG(&di->d64txregs->control) & D64_XC_SE) == 1192 - D64_XC_SE); 1172 + ((bcma_read32(di->d11core, 1173 + DMA64TXREGOFFS(di, control)) & D64_XC_SE) == 1174 + D64_XC_SE); 1193 1175 } 1194 1176 1195 1177 void dma_txreclaim(struct dma_pub *pub, enum txd_range range) ··· 1223 1203 return true; 1224 1204 1225 1205 /* suspend tx DMA first */ 1226 - W_REG(&di->d64txregs->control, D64_XC_SE); 1206 + bcma_write32(di->d11core, DMA64TXREGOFFS(di, control), D64_XC_SE); 1227 1207 SPINWAIT(((status = 1228 - (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) 1229 - != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE) 1230 - && (status != D64_XS0_XS_STOPPED), 10000); 1208 + (bcma_read32(di->d11core, DMA64TXREGOFFS(di, status0)) & 1209 + D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED) && 1210 + (status != D64_XS0_XS_IDLE) && (status != D64_XS0_XS_STOPPED), 1211 + 10000); 1231 1212 1232 - W_REG(&di->d64txregs->control, 0); 1213 + bcma_write32(di->d11core, DMA64TXREGOFFS(di, control), 0); 1233 1214 SPINWAIT(((status = 1234 - (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) 1235 - != D64_XS0_XS_DISABLED), 10000); 1215 + (bcma_read32(di->d11core, DMA64TXREGOFFS(di, status0)) & 1216 + D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED), 10000); 1236 1217 1237 1218 /* wait for the last transaction to complete */ 1238 1219 udelay(300); ··· 1249 1228 if (di->nrxd == 0) 1250 1229 return true; 1251 1230 1252 - W_REG(&di->d64rxregs->control, 0); 1231 + bcma_write32(di->d11core, DMA64RXREGOFFS(di, control), 0); 1253 1232 SPINWAIT(((status = 1254 - (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK)) 1255 - != D64_RS0_RS_DISABLED), 10000); 1233 + (bcma_read32(di->d11core, DMA64RXREGOFFS(di, status0)) & 1234 + D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED), 10000); 1256 1235 1257 1236 return status == D64_RS0_RS_DISABLED; 1258 1237 } ··· 1314 1293 1315 1294 /* kick the chip */ 1316 1295 if (commit) 1317 - W_REG(&di->d64txregs->ptr, 1296 + bcma_write32(di->d11core, DMA64TXREGOFFS(di, ptr), 1318 1297 di->xmtptrbase + I2B(txout, struct dma64desc)); 1319 1298 1320 1299 /* tx flow control */ ··· 1362 1341 if (range == DMA_RANGE_ALL) 1363 1342 end = di->txout; 1364 1343 else { 1365 - struct dma64regs __iomem *dregs = di->d64txregs; 1366 - 1367 - end = (u16) (B2I(((R_REG(&dregs->status0) & 1368 - D64_XS0_CD_MASK) - 1369 - di->xmtptrbase) & D64_XS0_CD_MASK, 1370 - struct dma64desc)); 1344 + end = (u16) (B2I(((bcma_read32(di->d11core, 1345 + DMA64TXREGOFFS(di, status0)) & 1346 + D64_XS0_CD_MASK) - di->xmtptrbase) & 1347 + D64_XS0_CD_MASK, struct dma64desc)); 1371 1348 1372 1349 if (range == DMA_RANGE_TRANSFERED) { 1373 1350 active_desc = 1374 - (u16) (R_REG(&dregs->status1) & 1351 + (u16)(bcma_read32(di->d11core, 1352 + DMA64TXREGOFFS(di, status1)) & 1375 1353 D64_XS1_AD_MASK); 1376 1354 active_desc = 1377 1355 (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
+1 -2
drivers/net/wireless/brcm80211/brcmsmac/dma.h
··· 76 76 77 77 extern struct dma_pub *dma_attach(char *name, struct si_pub *sih, 78 78 struct bcma_device *d11core, 79 - void __iomem *dmaregstx, 80 - void __iomem *dmaregsrx, 79 + uint txregbase, uint rxregbase, 81 80 uint ntxd, uint nrxd, 82 81 uint rxbufsize, int rxextheadroom, 83 82 uint nrxpost, uint rxoffset, uint *msg_level);
+10 -12
drivers/net/wireless/brcm80211/brcmsmac/main.c
··· 1065 1065 } 1066 1066 } 1067 1067 1068 - static struct dma64regs __iomem * 1069 - dmareg(struct brcms_hardware *hw, uint direction, uint fifonum) 1068 + static uint 1069 + dmareg(uint direction, uint fifonum) 1070 1070 { 1071 - struct d11regs __iomem *regs = hw->d11core->bus->mmio; 1072 - 1073 1071 if (direction == DMA_TX) 1074 - return &(regs->fifo64regs[fifonum].dmaxmt); 1075 - return &(regs->fifo64regs[fifonum].dmarcv); 1072 + return offsetof(struct d11regs, fifo64regs[fifonum].dmaxmt); 1073 + return offsetof(struct d11regs, fifo64regs[fifonum].dmarcv); 1076 1074 } 1077 1075 1078 1076 static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme) ··· 1097 1099 * RX: RX_FIFO (RX data packets) 1098 1100 */ 1099 1101 wlc_hw->di[0] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core, 1100 - (wme ? dmareg(wlc_hw, DMA_TX, 0) : 1101 - NULL), dmareg(wlc_hw, DMA_RX, 0), 1102 + (wme ? dmareg(DMA_TX, 0) : 0), 1103 + dmareg(DMA_RX, 0), 1102 1104 (wme ? NTXD : 0), NRXD, 1103 1105 RXBUFSZ, -1, NRXBUFPOST, 1104 1106 BRCMS_HWRXOFF, &brcm_msg_level); ··· 1111 1113 * RX: UNUSED 1112 1114 */ 1113 1115 wlc_hw->di[1] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core, 1114 - dmareg(wlc_hw, DMA_TX, 1), NULL, 1116 + dmareg(DMA_TX, 1), 0, 1115 1117 NTXD, 0, 0, -1, 0, 0, 1116 1118 &brcm_msg_level); 1117 1119 dma_attach_err |= (NULL == wlc_hw->di[1]); ··· 1122 1124 * RX: UNUSED 1123 1125 */ 1124 1126 wlc_hw->di[2] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core, 1125 - dmareg(wlc_hw, DMA_TX, 2), NULL, 1127 + dmareg(DMA_TX, 2), 0, 1126 1128 NTXD, 0, 0, -1, 0, 0, 1127 1129 &brcm_msg_level); 1128 1130 dma_attach_err |= (NULL == wlc_hw->di[2]); ··· 1132 1134 * (legacy) TX_CTL_FIFO (TX control & mgmt packets) 1133 1135 */ 1134 1136 wlc_hw->di[3] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core, 1135 - dmareg(wlc_hw, DMA_TX, 3), 1136 - NULL, NTXD, 0, 0, -1, 1137 + dmareg(DMA_TX, 3), 1138 + 0, NTXD, 0, 0, -1, 1137 1139 0, 0, &brcm_msg_level); 1138 1140 dma_attach_err |= (NULL == wlc_hw->di[3]); 1139 1141 /* Cleaner to leave this as if with AP defined */