Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

gianfar: Cleanup/Fix gfar_probe and the hw init code

Factor out gfar_hw_init() to contain all the controller hw
initialization steps for a better control of register writes,
and to significantly simplify the tangled code from gfar_probe().
This results in code size and stack usage reduction (besides
code readability).

Fix memory leak on device removal, by freeing the rx_/tx_queue
structures.

Replace custom bit swapping function with a library one (bitrev8).

Move allocation of rx_/tx_queue struct arrays before the group
structure init, because in order to assign Rx/Tx queues
to groups we need to have the queues first. This also allows
earlier bail out of gfar_probe(), in case the memory allocation
fails.

The flow control checks for maccfg1 were removed from gfar_probe(),
since flow control is disabled at probe time (priv->rx_/tx_pause_en
are 0). Redundant initializations (by 0) also removed.

Signed-off-by: Claudiu Manoil <claudiu.manoil@freescale.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Claudiu Manoil and committed by
David S. Miller
20862788 c85fde83

+191 -180
+160 -177
drivers/net/ethernet/freescale/gianfar.c
··· 9 9 * Maintainer: Kumar Gala 10 10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 11 11 * 12 - * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc. 12 + * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc. 13 13 * Copyright 2007 MontaVista Software, Inc. 14 14 * 15 15 * This program is free software; you can redistribute it and/or modify it ··· 511 511 spin_unlock(&priv->tx_queue[i]->txlock); 512 512 } 513 513 514 - static void free_tx_pointers(struct gfar_private *priv) 514 + static int gfar_alloc_tx_queues(struct gfar_private *priv) 515 + { 516 + int i; 517 + 518 + for (i = 0; i < priv->num_tx_queues; i++) { 519 + priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), 520 + GFP_KERNEL); 521 + if (!priv->tx_queue[i]) 522 + return -ENOMEM; 523 + 524 + priv->tx_queue[i]->tx_skbuff = NULL; 525 + priv->tx_queue[i]->qindex = i; 526 + priv->tx_queue[i]->dev = priv->ndev; 527 + spin_lock_init(&(priv->tx_queue[i]->txlock)); 528 + } 529 + return 0; 530 + } 531 + 532 + static int gfar_alloc_rx_queues(struct gfar_private *priv) 533 + { 534 + int i; 535 + 536 + for (i = 0; i < priv->num_rx_queues; i++) { 537 + priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), 538 + GFP_KERNEL); 539 + if (!priv->rx_queue[i]) 540 + return -ENOMEM; 541 + 542 + priv->rx_queue[i]->rx_skbuff = NULL; 543 + priv->rx_queue[i]->qindex = i; 544 + priv->rx_queue[i]->dev = priv->ndev; 545 + spin_lock_init(&(priv->rx_queue[i]->rxlock)); 546 + } 547 + return 0; 548 + } 549 + 550 + static void gfar_free_tx_queues(struct gfar_private *priv) 515 551 { 516 552 int i; 517 553 ··· 555 519 kfree(priv->tx_queue[i]); 556 520 } 557 521 558 - static void free_rx_pointers(struct gfar_private *priv) 522 + static void gfar_free_rx_queues(struct gfar_private *priv) 559 523 { 560 524 int i; 561 525 ··· 644 608 grp->rx_bit_map = 0xFF; 645 609 grp->tx_bit_map = 0xFF; 646 610 } 611 + 612 + /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses 613 + * right to left, so we need to revert the 8 bits to get the q index 614 + */ 615 + grp->rx_bit_map = bitrev8(grp->rx_bit_map); 616 + grp->tx_bit_map = bitrev8(grp->tx_bit_map); 617 + 618 + /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, 619 + * also assign queues to groups 620 + */ 621 + for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { 622 + grp->num_rx_queues++; 623 + grp->rstat |= (RSTAT_CLEAR_RHALT >> i); 624 + priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i); 625 + priv->rx_queue[i]->grp = grp; 626 + } 627 + 628 + for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { 629 + grp->num_tx_queues++; 630 + grp->tstat |= (TSTAT_CLEAR_THALT >> i); 631 + priv->tqueue |= (TQUEUE_EN0 >> i); 632 + priv->tx_queue[i]->grp = grp; 633 + } 634 + 647 635 priv->num_grps++; 648 636 649 637 return 0; ··· 724 664 priv->num_tx_queues = num_tx_qs; 725 665 netif_set_real_num_rx_queues(dev, num_rx_qs); 726 666 priv->num_rx_queues = num_rx_qs; 727 - priv->num_grps = 0x0; 667 + 668 + err = gfar_alloc_tx_queues(priv); 669 + if (err) 670 + goto tx_alloc_failed; 671 + 672 + err = gfar_alloc_rx_queues(priv); 673 + if (err) 674 + goto rx_alloc_failed; 728 675 729 676 /* Init Rx queue filer rule set linked list */ 730 677 INIT_LIST_HEAD(&priv->rx_list.list); ··· 757 690 if (err) 758 691 goto err_grp_init; 759 692 } 760 - 761 - for (i = 0; i < priv->num_tx_queues; i++) 762 - priv->tx_queue[i] = NULL; 763 - for (i = 0; i < priv->num_rx_queues; i++) 764 - priv->rx_queue[i] = NULL; 765 - 766 - for (i = 0; i < priv->num_tx_queues; i++) { 767 - priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), 768 - GFP_KERNEL); 769 - if (!priv->tx_queue[i]) { 770 - err = -ENOMEM; 771 - goto tx_alloc_failed; 772 - } 773 - priv->tx_queue[i]->tx_skbuff = NULL; 774 - priv->tx_queue[i]->qindex = i; 775 - priv->tx_queue[i]->dev = dev; 776 - spin_lock_init(&(priv->tx_queue[i]->txlock)); 777 - } 778 - 779 - for (i = 0; i < priv->num_rx_queues; i++) { 780 - priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), 781 - GFP_KERNEL); 782 - if (!priv->rx_queue[i]) { 783 - err = -ENOMEM; 784 - goto rx_alloc_failed; 785 - } 786 - priv->rx_queue[i]->rx_skbuff = NULL; 787 - priv->rx_queue[i]->qindex = i; 788 - priv->rx_queue[i]->dev = dev; 789 - spin_lock_init(&(priv->rx_queue[i]->rxlock)); 790 - } 791 - 792 693 793 694 stash = of_get_property(np, "bd-stash", NULL); 794 695 ··· 819 784 820 785 return 0; 821 786 822 - rx_alloc_failed: 823 - free_rx_pointers(priv); 824 - tx_alloc_failed: 825 - free_tx_pointers(priv); 826 787 err_grp_init: 827 788 unmap_group_regs(priv); 789 + rx_alloc_failed: 790 + gfar_free_rx_queues(priv); 791 + tx_alloc_failed: 792 + gfar_free_tx_queues(priv); 828 793 free_gfar_dev(priv); 829 794 return err; 830 795 } ··· 908 873 return -ENODEV; 909 874 910 875 return phy_mii_ioctl(priv->phydev, rq, cmd); 911 - } 912 - 913 - static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) 914 - { 915 - unsigned int new_bit_map = 0x0; 916 - int mask = 0x1 << (max_qs - 1), i; 917 - 918 - for (i = 0; i < max_qs; i++) { 919 - if (bit_map & mask) 920 - new_bit_map = new_bit_map + (1 << i); 921 - mask = mask >> 0x1; 922 - } 923 - return new_bit_map; 924 876 } 925 877 926 878 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, ··· 1027 1005 priv->errata); 1028 1006 } 1029 1007 1030 - /* Set up the ethernet device structure, private data, 1031 - * and anything else we need before we start 1032 - */ 1033 - static int gfar_probe(struct platform_device *ofdev) 1008 + static void gfar_hw_init(struct gfar_private *priv) 1034 1009 { 1010 + struct gfar __iomem *regs = priv->gfargrp[0].regs; 1035 1011 u32 tempval; 1036 - struct net_device *dev = NULL; 1037 - struct gfar_private *priv = NULL; 1038 - struct gfar __iomem *regs = NULL; 1039 - int err = 0, i, grp_idx = 0; 1040 - u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; 1041 - u32 isrg = 0; 1042 - u32 __iomem *baddr; 1043 - 1044 - err = gfar_of_init(ofdev, &dev); 1045 - 1046 - if (err) 1047 - return err; 1048 - 1049 - priv = netdev_priv(dev); 1050 - priv->ndev = dev; 1051 - priv->ofdev = ofdev; 1052 - priv->dev = &ofdev->dev; 1053 - SET_NETDEV_DEV(dev, &ofdev->dev); 1054 - 1055 - spin_lock_init(&priv->bflock); 1056 - INIT_WORK(&priv->reset_task, gfar_reset_task); 1057 - 1058 - platform_set_drvdata(ofdev, priv); 1059 - regs = priv->gfargrp[0].regs; 1060 - 1061 - gfar_detect_errata(priv); 1062 - 1063 - /* Stop the DMA engine now, in case it was running before 1064 - * (The firmware could have used it, and left it running). 1065 - */ 1066 - gfar_halt(dev); 1067 1012 1068 1013 /* Reset MAC layer */ 1069 1014 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET); ··· 1038 1049 /* We need to delay at least 3 TX clocks */ 1039 1050 udelay(2); 1040 1051 1041 - tempval = 0; 1042 - if (!priv->pause_aneg_en && priv->tx_pause_en) 1043 - tempval |= MACCFG1_TX_FLOW; 1044 - if (!priv->pause_aneg_en && priv->rx_pause_en) 1045 - tempval |= MACCFG1_RX_FLOW; 1046 1052 /* the soft reset bit is not self-resetting, so we need to 1047 1053 * clear it before resuming normal operation 1048 1054 */ 1049 - gfar_write(&regs->maccfg1, tempval); 1055 + gfar_write(&regs->maccfg1, 0); 1050 1056 1051 1057 /* Initialize MACCFG2. */ 1052 1058 tempval = MACCFG2_INIT_SETTINGS; ··· 1052 1068 /* Initialize ECNTRL */ 1053 1069 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS); 1054 1070 1055 - /* Set the dev->base_addr to the gfar reg region */ 1056 - dev->base_addr = (unsigned long) regs; 1071 + /* Program the interrupt steering regs, only for MG devices */ 1072 + if (priv->num_grps > 1) 1073 + gfar_write_isrg(priv); 1057 1074 1058 - /* Fill in the dev structure */ 1059 - dev->watchdog_timeo = TX_TIMEOUT; 1060 - dev->mtu = 1500; 1061 - dev->netdev_ops = &gfar_netdev_ops; 1062 - dev->ethtool_ops = &gfar_ethtool_ops; 1075 + /* Enable all Rx/Tx queues after MAC reset */ 1076 + gfar_write(&regs->rqueue, priv->rqueue); 1077 + gfar_write(&regs->tqueue, priv->tqueue); 1078 + } 1063 1079 1064 - /* Register for napi ...We are registering NAPI for each grp */ 1065 - if (priv->mode == SQ_SG_MODE) 1066 - netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq, 1067 - GFAR_DEV_WEIGHT); 1068 - else 1069 - for (i = 0; i < priv->num_grps; i++) 1070 - netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, 1071 - GFAR_DEV_WEIGHT); 1072 - 1073 - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 1074 - dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 1075 - NETIF_F_RXCSUM; 1076 - dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | 1077 - NETIF_F_RXCSUM | NETIF_F_HIGHDMA; 1078 - } 1079 - 1080 - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 1081 - dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | 1082 - NETIF_F_HW_VLAN_CTAG_RX; 1083 - dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 1084 - } 1080 + static void __init gfar_init_addr_hash_table(struct gfar_private *priv) 1081 + { 1082 + struct gfar __iomem *regs = priv->gfargrp[0].regs; 1085 1083 1086 1084 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 1087 1085 priv->extended_hash = 1; ··· 1099 1133 priv->hash_regs[6] = &regs->gaddr6; 1100 1134 priv->hash_regs[7] = &regs->gaddr7; 1101 1135 } 1136 + } 1137 + 1138 + /* Set up the ethernet device structure, private data, 1139 + * and anything else we need before we start 1140 + */ 1141 + static int gfar_probe(struct platform_device *ofdev) 1142 + { 1143 + struct net_device *dev = NULL; 1144 + struct gfar_private *priv = NULL; 1145 + int err = 0, i; 1146 + 1147 + err = gfar_of_init(ofdev, &dev); 1148 + 1149 + if (err) 1150 + return err; 1151 + 1152 + priv = netdev_priv(dev); 1153 + priv->ndev = dev; 1154 + priv->ofdev = ofdev; 1155 + priv->dev = &ofdev->dev; 1156 + SET_NETDEV_DEV(dev, &ofdev->dev); 1157 + 1158 + spin_lock_init(&priv->bflock); 1159 + INIT_WORK(&priv->reset_task, gfar_reset_task); 1160 + 1161 + platform_set_drvdata(ofdev, priv); 1162 + 1163 + gfar_detect_errata(priv); 1164 + 1165 + /* Stop the DMA engine now, in case it was running before 1166 + * (The firmware could have used it, and left it running). 1167 + */ 1168 + gfar_halt(dev); 1169 + 1170 + gfar_hw_init(priv); 1171 + 1172 + /* Set the dev->base_addr to the gfar reg region */ 1173 + dev->base_addr = (unsigned long) priv->gfargrp[0].regs; 1174 + 1175 + /* Fill in the dev structure */ 1176 + dev->watchdog_timeo = TX_TIMEOUT; 1177 + dev->mtu = 1500; 1178 + dev->netdev_ops = &gfar_netdev_ops; 1179 + dev->ethtool_ops = &gfar_ethtool_ops; 1180 + 1181 + /* Register for napi ...We are registering NAPI for each grp */ 1182 + if (priv->mode == SQ_SG_MODE) 1183 + netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq, 1184 + GFAR_DEV_WEIGHT); 1185 + else 1186 + for (i = 0; i < priv->num_grps; i++) 1187 + netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, 1188 + GFAR_DEV_WEIGHT); 1189 + 1190 + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 1191 + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 1192 + NETIF_F_RXCSUM; 1193 + dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | 1194 + NETIF_F_RXCSUM | NETIF_F_HIGHDMA; 1195 + } 1196 + 1197 + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 1198 + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | 1199 + NETIF_F_HW_VLAN_CTAG_RX; 1200 + dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 1201 + } 1202 + 1203 + gfar_init_addr_hash_table(priv); 1102 1204 1103 1205 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 1104 1206 priv->padding = DEFAULT_PADDING; ··· 1176 1142 if (dev->features & NETIF_F_IP_CSUM || 1177 1143 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 1178 1144 dev->needed_headroom = GMAC_FCB_LEN; 1179 - 1180 - /* Program the isrg regs only if number of grps > 1 */ 1181 - if (priv->num_grps > 1) { 1182 - baddr = &regs->isrg0; 1183 - for (i = 0; i < priv->num_grps; i++) { 1184 - isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); 1185 - isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); 1186 - gfar_write(baddr, isrg); 1187 - baddr++; 1188 - isrg = 0x0; 1189 - } 1190 - } 1191 - 1192 - /* Need to reverse the bit maps as bit_map's MSB is q0 1193 - * but, for_each_set_bit parses from right to left, which 1194 - * basically reverses the queue numbers 1195 - */ 1196 - for (i = 0; i< priv->num_grps; i++) { 1197 - priv->gfargrp[i].tx_bit_map = 1198 - reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS); 1199 - priv->gfargrp[i].rx_bit_map = 1200 - reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS); 1201 - } 1202 - 1203 - /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, 1204 - * also assign queues to groups 1205 - */ 1206 - for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { 1207 - priv->gfargrp[grp_idx].num_rx_queues = 0x0; 1208 - 1209 - for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, 1210 - priv->num_rx_queues) { 1211 - priv->gfargrp[grp_idx].num_rx_queues++; 1212 - priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1213 - rstat = rstat | (RSTAT_CLEAR_RHALT >> i); 1214 - rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); 1215 - } 1216 - priv->gfargrp[grp_idx].num_tx_queues = 0x0; 1217 - 1218 - for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, 1219 - priv->num_tx_queues) { 1220 - priv->gfargrp[grp_idx].num_tx_queues++; 1221 - priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1222 - tstat = tstat | (TSTAT_CLEAR_THALT >> i); 1223 - tqueue = tqueue | (TQUEUE_EN0 >> i); 1224 - } 1225 - priv->gfargrp[grp_idx].rstat = rstat; 1226 - priv->gfargrp[grp_idx].tstat = tstat; 1227 - rstat = tstat =0; 1228 - } 1229 - 1230 - gfar_write(&regs->rqueue, rqueue); 1231 - gfar_write(&regs->tqueue, tqueue); 1232 1145 1233 1146 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 1234 1147 ··· 1253 1272 1254 1273 register_fail: 1255 1274 unmap_group_regs(priv); 1256 - free_tx_pointers(priv); 1257 - free_rx_pointers(priv); 1275 + gfar_free_rx_queues(priv); 1276 + gfar_free_tx_queues(priv); 1258 1277 if (priv->phy_node) 1259 1278 of_node_put(priv->phy_node); 1260 1279 if (priv->tbi_node) ··· 1274 1293 1275 1294 unregister_netdev(priv->ndev); 1276 1295 unmap_group_regs(priv); 1296 + gfar_free_rx_queues(priv); 1297 + gfar_free_tx_queues(priv); 1277 1298 free_gfar_dev(priv); 1278 1299 1279 1300 return 0;
+31 -3
drivers/net/ethernet/freescale/gianfar.h
··· 9 9 * Maintainer: Kumar Gala 10 10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 11 11 * 12 - * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc. 12 + * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc. 13 13 * 14 14 * This program is free software; you can redistribute it and/or modify it 15 15 * under the terms of the GNU General Public License as published by the ··· 892 892 #define DEFAULT_MAPPING 0xFF 893 893 #endif 894 894 895 - #define ISRG_SHIFT_TX 0x10 896 - #define ISRG_SHIFT_RX 0x18 895 + #define ISRG_RR0 0x80000000 896 + #define ISRG_TR0 0x00800000 897 897 898 898 /* The same driver can operate in two modes */ 899 899 /* SQ_SG_MODE: Single Queue Single Group Mode ··· 1113 1113 unsigned int total_tx_ring_size; 1114 1114 unsigned int total_rx_ring_size; 1115 1115 1116 + u32 rqueue; 1117 + u32 tqueue; 1118 + 1116 1119 /* RX per device parameters */ 1117 1120 unsigned int rx_stash_size; 1118 1121 unsigned int rx_stash_index; ··· 1177 1174 gfar_write(&regs->rqfar, far); 1178 1175 *fcr = gfar_read(&regs->rqfcr); 1179 1176 *fpr = gfar_read(&regs->rqfpr); 1177 + } 1178 + 1179 + static inline void gfar_write_isrg(struct gfar_private *priv) 1180 + { 1181 + struct gfar __iomem *regs = priv->gfargrp[0].regs; 1182 + u32 __iomem *baddr = &regs->isrg0; 1183 + u32 isrg = 0; 1184 + int grp_idx, i; 1185 + 1186 + for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { 1187 + struct gfar_priv_grp *grp = &priv->gfargrp[grp_idx]; 1188 + 1189 + for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { 1190 + isrg |= (ISRG_RR0 >> i); 1191 + } 1192 + 1193 + for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { 1194 + isrg |= (ISRG_TR0 >> i); 1195 + } 1196 + 1197 + gfar_write(baddr, isrg); 1198 + 1199 + baddr++; 1200 + isrg = 0; 1201 + } 1180 1202 } 1181 1203 1182 1204 void lock_rx_qs(struct gfar_private *priv);