Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IPoIB/cm: spin_lock_irqsave() -> spin_lock_irq() replacements

There are quite a few places in ipoib_cm.c where we know IRQs are
enabled because we do something that sleeps in the same function, so
we can convert several occurrences of spin_lock_irqsave() to a plain
spin_lock_irq(). This cleans up the source a little and makes the
code smaller too:

add/remove: 0/0 grow/shrink: 1/5 up/down: 3/-51 (-48)
function old new delta
ipoib_cm_tx_reap 403 406 +3
ipoib_cm_stale_task 146 145 -1
ipoib_cm_dev_stop 173 172 -1
ipoib_cm_tx_handler 964 956 -8
ipoib_cm_rx_handler 956 937 -19
ipoib_cm_skb_reap 212 190 -22

Signed-off-by: Roland Dreier <rolandd@cisco.com>

+24 -32
+24 -32
drivers/infiniband/ulp/ipoib/ipoib_cm.c
··· 228 228 struct net_device *dev = cm_id->context; 229 229 struct ipoib_dev_priv *priv = netdev_priv(dev); 230 230 struct ipoib_cm_rx *p; 231 - unsigned long flags; 232 231 unsigned psn; 233 232 int ret; 234 233 ··· 256 257 257 258 cm_id->context = p; 258 259 p->jiffies = jiffies; 259 - spin_lock_irqsave(&priv->lock, flags); 260 + spin_lock_irq(&priv->lock); 260 261 list_add(&p->list, &priv->cm.passive_ids); 261 - spin_unlock_irqrestore(&priv->lock, flags); 262 + spin_unlock_irq(&priv->lock); 262 263 queue_delayed_work(ipoib_workqueue, 263 264 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); 264 265 return 0; ··· 276 277 { 277 278 struct ipoib_cm_rx *p; 278 279 struct ipoib_dev_priv *priv; 279 - unsigned long flags; 280 280 int ret; 281 281 282 282 switch (event->event) { ··· 288 290 case IB_CM_REJ_RECEIVED: 289 291 p = cm_id->context; 290 292 priv = netdev_priv(p->dev); 291 - spin_lock_irqsave(&priv->lock, flags); 293 + spin_lock_irq(&priv->lock); 292 294 if (list_empty(&p->list)) 293 295 ret = 0; /* Connection is going away already. */ 294 296 else { 295 297 list_del_init(&p->list); 296 298 ret = -ECONNRESET; 297 299 } 298 - spin_unlock_irqrestore(&priv->lock, flags); 300 + spin_unlock_irq(&priv->lock); 299 301 if (ret) { 300 302 ib_destroy_qp(p->qp); 301 303 kfree(p); ··· 610 612 { 611 613 struct ipoib_dev_priv *priv = netdev_priv(dev); 612 614 struct ipoib_cm_rx *p; 613 - unsigned long flags; 614 615 615 616 if (!IPOIB_CM_SUPPORTED(dev->dev_addr)) 616 617 return; 617 618 618 619 ib_destroy_cm_id(priv->cm.id); 619 - spin_lock_irqsave(&priv->lock, flags); 620 + spin_lock_irq(&priv->lock); 620 621 while (!list_empty(&priv->cm.passive_ids)) { 621 622 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list); 622 623 list_del_init(&p->list); 623 - spin_unlock_irqrestore(&priv->lock, flags); 624 + spin_unlock_irq(&priv->lock); 624 625 ib_destroy_cm_id(p->id); 625 626 ib_destroy_qp(p->qp); 626 627 kfree(p); 627 - spin_lock_irqsave(&priv->lock, flags); 628 + spin_lock_irq(&priv->lock); 628 629 } 629 - spin_unlock_irqrestore(&priv->lock, flags); 630 + spin_unlock_irq(&priv->lock); 630 631 631 632 cancel_delayed_work(&priv->cm.stale_task); 632 633 } ··· 639 642 struct ib_qp_attr qp_attr; 640 643 int qp_attr_mask, ret; 641 644 struct sk_buff *skb; 642 - unsigned long flags; 643 645 644 646 p->mtu = be32_to_cpu(data->mtu); 645 647 ··· 676 680 677 681 skb_queue_head_init(&skqueue); 678 682 679 - spin_lock_irqsave(&priv->lock, flags); 683 + spin_lock_irq(&priv->lock); 680 684 set_bit(IPOIB_FLAG_OPER_UP, &p->flags); 681 685 if (p->neigh) 682 686 while ((skb = __skb_dequeue(&p->neigh->queue))) 683 687 __skb_queue_tail(&skqueue, skb); 684 - spin_unlock_irqrestore(&priv->lock, flags); 688 + spin_unlock_irq(&priv->lock); 685 689 686 690 while ((skb = __skb_dequeue(&skqueue))) { 687 691 skb->dev = p->dev; ··· 891 895 struct ipoib_dev_priv *priv = netdev_priv(tx->dev); 892 896 struct net_device *dev = priv->dev; 893 897 struct ipoib_neigh *neigh; 894 - unsigned long flags; 895 898 int ret; 896 899 897 900 switch (event->event) { ··· 909 914 case IB_CM_REJ_RECEIVED: 910 915 case IB_CM_TIMEWAIT_EXIT: 911 916 ipoib_dbg(priv, "CM error %d.\n", event->event); 912 - spin_lock_irqsave(&priv->tx_lock, flags); 917 + spin_lock_irq(&priv->tx_lock); 913 918 spin_lock(&priv->lock); 914 919 neigh = tx->neigh; 915 920 ··· 929 934 } 930 935 931 936 spin_unlock(&priv->lock); 932 - spin_unlock_irqrestore(&priv->tx_lock, flags); 937 + spin_unlock_irq(&priv->tx_lock); 933 938 break; 934 939 default: 935 940 break; ··· 1018 1023 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1019 1024 cm.reap_task); 1020 1025 struct ipoib_cm_tx *p; 1021 - unsigned long flags; 1022 1026 1023 - spin_lock_irqsave(&priv->tx_lock, flags); 1027 + spin_lock_irq(&priv->tx_lock); 1024 1028 spin_lock(&priv->lock); 1025 1029 while (!list_empty(&priv->cm.reap_list)) { 1026 1030 p = list_entry(priv->cm.reap_list.next, typeof(*p), list); 1027 1031 list_del(&p->list); 1028 1032 spin_unlock(&priv->lock); 1029 - spin_unlock_irqrestore(&priv->tx_lock, flags); 1033 + spin_unlock_irq(&priv->tx_lock); 1030 1034 ipoib_cm_tx_destroy(p); 1031 - spin_lock_irqsave(&priv->tx_lock, flags); 1035 + spin_lock_irq(&priv->tx_lock); 1032 1036 spin_lock(&priv->lock); 1033 1037 } 1034 1038 spin_unlock(&priv->lock); 1035 - spin_unlock_irqrestore(&priv->tx_lock, flags); 1039 + spin_unlock_irq(&priv->tx_lock); 1036 1040 } 1037 1041 1038 1042 static void ipoib_cm_skb_reap(struct work_struct *work) ··· 1040 1046 cm.skb_task); 1041 1047 struct net_device *dev = priv->dev; 1042 1048 struct sk_buff *skb; 1043 - unsigned long flags; 1044 1049 1045 1050 unsigned mtu = priv->mcast_mtu; 1046 1051 1047 - spin_lock_irqsave(&priv->tx_lock, flags); 1052 + spin_lock_irq(&priv->tx_lock); 1048 1053 spin_lock(&priv->lock); 1049 1054 while ((skb = skb_dequeue(&priv->cm.skb_queue))) { 1050 1055 spin_unlock(&priv->lock); 1051 - spin_unlock_irqrestore(&priv->tx_lock, flags); 1056 + spin_unlock_irq(&priv->tx_lock); 1052 1057 if (skb->protocol == htons(ETH_P_IP)) 1053 1058 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 1054 1059 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) ··· 1055 1062 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); 1056 1063 #endif 1057 1064 dev_kfree_skb_any(skb); 1058 - spin_lock_irqsave(&priv->tx_lock, flags); 1065 + spin_lock_irq(&priv->tx_lock); 1059 1066 spin_lock(&priv->lock); 1060 1067 } 1061 1068 spin_unlock(&priv->lock); 1062 - spin_unlock_irqrestore(&priv->tx_lock, flags); 1069 + spin_unlock_irq(&priv->tx_lock); 1063 1070 } 1064 1071 1065 1072 void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb, ··· 1081 1088 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1082 1089 cm.stale_task.work); 1083 1090 struct ipoib_cm_rx *p; 1084 - unsigned long flags; 1085 1091 1086 - spin_lock_irqsave(&priv->lock, flags); 1092 + spin_lock_irq(&priv->lock); 1087 1093 while (!list_empty(&priv->cm.passive_ids)) { 1088 1094 /* List if sorted by LRU, start from tail, 1089 1095 * stop when we see a recently used entry */ ··· 1090 1098 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT)) 1091 1099 break; 1092 1100 list_del_init(&p->list); 1093 - spin_unlock_irqrestore(&priv->lock, flags); 1101 + spin_unlock_irq(&priv->lock); 1094 1102 ib_destroy_cm_id(p->id); 1095 1103 ib_destroy_qp(p->qp); 1096 1104 kfree(p); 1097 - spin_lock_irqsave(&priv->lock, flags); 1105 + spin_lock_irq(&priv->lock); 1098 1106 } 1099 - spin_unlock_irqrestore(&priv->lock, flags); 1107 + spin_unlock_irq(&priv->lock); 1100 1108 } 1101 1109 1102 1110