Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mld: add new workqueues for process mld events

When query/report packets are received, mld module processes them.
But they are processed under BH context so it couldn't use sleepable
functions. So, in order to switch context, the two workqueues are
added which processes query and report event.

In the struct inet6_dev, mc_{query | report}_queue are added so it
is per-interface queue.
And mc_{query | report}_work are workqueue structure.

When the query or report event is received, skb is queued to proper
queue and worker function is scheduled immediately.
Workqueues and queues are protected by spinlock, which is
mc_{query | report}_lock, and worker functions are protected by RTNL.

Suggested-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: Taehee Yoo <ap420073@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Taehee Yoo and committed by
David S. Miller
f185de28 88e2ca30

+210 -86
+8 -1
include/net/if_inet6.h
··· 125 125 unsigned int mca_flags; 126 126 int mca_users; 127 127 refcount_t mca_refcnt; 128 - spinlock_t mca_lock; 129 128 unsigned long mca_cstamp; 130 129 unsigned long mca_tstamp; 131 130 struct rcu_head rcu; ··· 182 183 struct delayed_work mc_gq_work; /* general query work */ 183 184 struct delayed_work mc_ifc_work; /* interface change work */ 184 185 struct delayed_work mc_dad_work; /* dad complete mc work */ 186 + struct delayed_work mc_query_work; /* mld query work */ 187 + struct delayed_work mc_report_work; /* mld report work */ 188 + 189 + struct sk_buff_head mc_query_queue; /* mld query queue */ 190 + struct sk_buff_head mc_report_queue; /* mld report queue */ 191 + 192 + spinlock_t mc_query_lock; /* mld query queue lock */ 193 + spinlock_t mc_report_lock; /* mld query report lock */ 185 194 186 195 struct ifacaddr6 *ac_list; 187 196 rwlock_t lock;
+3
include/net/mld.h
··· 92 92 #define MLD_EXP_MIN_LIMIT 32768UL 93 93 #define MLDV1_MRD_MAX_COMPAT (MLD_EXP_MIN_LIMIT - 1) 94 94 95 + #define MLD_MAX_QUEUE 8 96 + #define MLD_MAX_SKBS 32 97 + 95 98 static inline unsigned long mldv2_mrc(const struct mld2_query *mlh2) 96 99 { 97 100 /* RFC3810, 5.1.3. Maximum Response Code */
+2 -2
net/ipv6/icmp.c
··· 944 944 945 945 case ICMPV6_MGM_QUERY: 946 946 igmp6_event_query(skb); 947 - break; 947 + return 0; 948 948 949 949 case ICMPV6_MGM_REPORT: 950 950 igmp6_event_report(skb); 951 - break; 951 + return 0; 952 952 953 953 case ICMPV6_MGM_REDUCTION: 954 954 case ICMPV6_NI_QUERY:
+197 -83
net/ipv6/mcast.c
··· 439 439 440 440 if (psl) 441 441 count += psl->sl_max; 442 - newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_ATOMIC); 442 + newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_KERNEL); 443 443 if (!newpsl) { 444 444 err = -ENOBUFS; 445 445 goto done; ··· 517 517 } 518 518 if (gsf->gf_numsrc) { 519 519 newpsl = sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc), 520 - GFP_ATOMIC); 520 + GFP_KERNEL); 521 521 if (!newpsl) { 522 522 err = -ENOBUFS; 523 523 goto done; ··· 659 659 IPV6_ADDR_SCOPE_LINKLOCAL) 660 660 return; 661 661 662 - spin_lock_bh(&mc->mca_lock); 663 662 if (!(mc->mca_flags&MAF_LOADED)) { 664 663 mc->mca_flags |= MAF_LOADED; 665 664 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) 666 665 dev_mc_add(dev, buf); 667 666 } 668 - spin_unlock_bh(&mc->mca_lock); 669 667 670 668 if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT)) 671 669 return; ··· 693 695 IPV6_ADDR_SCOPE_LINKLOCAL) 694 696 return; 695 697 696 - spin_lock_bh(&mc->mca_lock); 697 698 if (mc->mca_flags&MAF_LOADED) { 698 699 mc->mca_flags &= ~MAF_LOADED; 699 700 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) 700 701 dev_mc_del(dev, buf); 701 702 } 702 703 703 - spin_unlock_bh(&mc->mca_lock); 704 704 if (mc->mca_flags & MAF_NOREPORT) 705 705 return; 706 706 707 707 if (!mc->idev->dead) 708 708 igmp6_leave_group(mc); 709 709 710 - spin_lock_bh(&mc->mca_lock); 711 710 if (cancel_delayed_work(&mc->mca_work)) 712 711 refcount_dec(&mc->mca_refcnt); 713 - spin_unlock_bh(&mc->mca_lock); 714 712 } 715 713 716 714 /* ··· 722 728 * for deleted items allows change reports to use common code with 723 729 * non-deleted or query-response MCA's. 724 730 */ 725 - pmc = kzalloc(sizeof(*pmc), GFP_ATOMIC); 731 + pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); 726 732 if (!pmc) 727 733 return; 728 734 729 - spin_lock_bh(&im->mca_lock); 730 - spin_lock_init(&pmc->mca_lock); 731 735 pmc->idev = im->idev; 732 736 in6_dev_hold(idev); 733 737 pmc->mca_addr = im->mca_addr; ··· 744 752 for_each_psf_rtnl(pmc, psf) 745 753 psf->sf_crcount = pmc->mca_crcount; 746 754 } 747 - spin_unlock_bh(&im->mca_lock); 748 755 749 756 rcu_assign_pointer(pmc->next, idev->mc_tomb); 750 757 rcu_assign_pointer(idev->mc_tomb, pmc); ··· 768 777 rcu_assign_pointer(idev->mc_tomb, pmc->next); 769 778 } 770 779 771 - spin_lock_bh(&im->mca_lock); 772 780 if (pmc) { 773 781 im->idev = pmc->idev; 774 782 if (im->mca_sfmode == MCAST_INCLUDE) { ··· 789 799 ip6_mc_clear_src(pmc); 790 800 kfree_rcu(pmc, rcu); 791 801 } 792 - spin_unlock_bh(&im->mca_lock); 793 802 } 794 803 795 804 static void mld_clear_delrec(struct inet6_dev *idev) ··· 809 820 for_each_mc_rtnl(idev, pmc) { 810 821 struct ip6_sf_list *psf, *psf_next; 811 822 812 - spin_lock_bh(&pmc->mca_lock); 813 823 psf = rtnl_dereference(pmc->mca_tomb); 814 824 RCU_INIT_POINTER(pmc->mca_tomb, NULL); 815 - spin_unlock_bh(&pmc->mca_lock); 816 825 for (; psf; psf = psf_next) { 817 826 psf_next = rtnl_dereference(psf->sf_next); 818 827 kfree_rcu(psf, rcu); 819 828 } 820 829 } 830 + } 831 + 832 + static void mld_clear_query(struct inet6_dev *idev) 833 + { 834 + struct sk_buff *skb; 835 + 836 + spin_lock_bh(&idev->mc_query_lock); 837 + while ((skb = __skb_dequeue(&idev->mc_query_queue))) 838 + kfree_skb(skb); 839 + spin_unlock_bh(&idev->mc_query_lock); 840 + } 841 + 842 + static void mld_clear_report(struct inet6_dev *idev) 843 + { 844 + struct sk_buff *skb; 845 + 846 + spin_lock_bh(&idev->mc_report_lock); 847 + while ((skb = __skb_dequeue(&idev->mc_report_queue))) 848 + kfree_skb(skb); 849 + spin_unlock_bh(&idev->mc_report_lock); 821 850 } 822 851 823 852 static void mca_get(struct ifmcaddr6 *mc) ··· 857 850 { 858 851 struct ifmcaddr6 *mc; 859 852 860 - mc = kzalloc(sizeof(*mc), GFP_ATOMIC); 853 + mc = kzalloc(sizeof(*mc), GFP_KERNEL); 861 854 if (!mc) 862 855 return NULL; 863 856 ··· 869 862 /* mca_stamp should be updated upon changes */ 870 863 mc->mca_cstamp = mc->mca_tstamp = jiffies; 871 864 refcount_set(&mc->mca_refcnt, 1); 872 - spin_lock_init(&mc->mca_lock); 873 865 874 866 mc->mca_sfmode = mode; 875 867 mc->mca_sfcount[mode] = 1; ··· 1001 995 if (src_addr && !ipv6_addr_any(src_addr)) { 1002 996 struct ip6_sf_list *psf; 1003 997 1004 - spin_lock_bh(&mc->mca_lock); 1005 998 for_each_psf_rcu(mc, psf) { 1006 999 if (ipv6_addr_equal(&psf->sf_addr, src_addr)) 1007 1000 break; ··· 1011 1006 mc->mca_sfcount[MCAST_EXCLUDE]; 1012 1007 else 1013 1008 rv = mc->mca_sfcount[MCAST_EXCLUDE] != 0; 1014 - spin_unlock_bh(&mc->mca_lock); 1015 1009 } else 1016 1010 rv = true; /* don't filter unspecified source */ 1017 1011 } ··· 1064 1060 __in6_dev_put(idev); 1065 1061 } 1066 1062 1063 + static void mld_query_stop_work(struct inet6_dev *idev) 1064 + { 1065 + spin_lock_bh(&idev->mc_query_lock); 1066 + if (cancel_delayed_work(&idev->mc_query_work)) 1067 + __in6_dev_put(idev); 1068 + spin_unlock_bh(&idev->mc_query_lock); 1069 + } 1070 + 1071 + static void mld_report_stop_work(struct inet6_dev *idev) 1072 + { 1073 + if (cancel_delayed_work_sync(&idev->mc_report_work)) 1074 + __in6_dev_put(idev); 1075 + } 1076 + 1067 1077 /* 1068 1078 * IGMP handling (alias multicast ICMPv6 messages) 1069 1079 */ ··· 1111 1093 int i, scount; 1112 1094 1113 1095 scount = 0; 1114 - for_each_psf_rcu(pmc, psf) { 1096 + for_each_psf_rtnl(pmc, psf) { 1115 1097 if (scount == nsrcs) 1116 1098 break; 1117 1099 for (i = 0; i < nsrcs; i++) { ··· 1144 1126 /* mark INCLUDE-mode sources */ 1145 1127 1146 1128 scount = 0; 1147 - for_each_psf_rcu(pmc, psf) { 1129 + for_each_psf_rtnl(pmc, psf) { 1148 1130 if (scount == nsrcs) 1149 1131 break; 1150 1132 for (i = 0; i < nsrcs; i++) { ··· 1336 1318 /* called with rcu_read_lock() */ 1337 1319 int igmp6_event_query(struct sk_buff *skb) 1338 1320 { 1321 + struct inet6_dev *idev = __in6_dev_get(skb->dev); 1322 + 1323 + if (!idev) 1324 + return -EINVAL; 1325 + 1326 + if (idev->dead) { 1327 + kfree_skb(skb); 1328 + return -ENODEV; 1329 + } 1330 + 1331 + spin_lock_bh(&idev->mc_query_lock); 1332 + if (skb_queue_len(&idev->mc_query_queue) < MLD_MAX_SKBS) { 1333 + __skb_queue_tail(&idev->mc_query_queue, skb); 1334 + if (!mod_delayed_work(mld_wq, &idev->mc_query_work, 0)) 1335 + in6_dev_hold(idev); 1336 + } 1337 + spin_unlock_bh(&idev->mc_query_lock); 1338 + 1339 + return 0; 1340 + } 1341 + 1342 + static void __mld_query_work(struct sk_buff *skb) 1343 + { 1339 1344 struct mld2_query *mlh2 = NULL; 1340 - struct ifmcaddr6 *ma; 1341 1345 const struct in6_addr *group; 1342 1346 unsigned long max_delay; 1343 1347 struct inet6_dev *idev; 1348 + struct ifmcaddr6 *ma; 1344 1349 struct mld_msg *mld; 1345 1350 int group_type; 1346 1351 int mark = 0; 1347 1352 int len, err; 1348 1353 1349 1354 if (!pskb_may_pull(skb, sizeof(struct in6_addr))) 1350 - return -EINVAL; 1355 + goto out; 1351 1356 1352 1357 /* compute payload length excluding extension headers */ 1353 1358 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr); ··· 1387 1346 ipv6_hdr(skb)->hop_limit != 1 || 1388 1347 !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) || 1389 1348 IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD)) 1390 - return -EINVAL; 1349 + goto out; 1391 1350 1392 1351 idev = __in6_dev_get(skb->dev); 1393 1352 if (!idev) 1394 - return 0; 1353 + goto out; 1395 1354 1396 1355 mld = (struct mld_msg *)icmp6_hdr(skb); 1397 1356 group = &mld->mld_mca; ··· 1399 1358 1400 1359 if (group_type != IPV6_ADDR_ANY && 1401 1360 !(group_type&IPV6_ADDR_MULTICAST)) 1402 - return -EINVAL; 1361 + goto out; 1403 1362 1404 1363 if (len < MLD_V1_QUERY_LEN) { 1405 - return -EINVAL; 1364 + goto out; 1406 1365 } else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) { 1407 1366 err = mld_process_v1(idev, mld, &max_delay, 1408 1367 len == MLD_V1_QUERY_LEN); 1409 1368 if (err < 0) 1410 - return err; 1369 + goto out; 1411 1370 } else if (len >= MLD_V2_QUERY_LEN_MIN) { 1412 1371 int srcs_offset = sizeof(struct mld2_query) - 1413 1372 sizeof(struct icmp6hdr); 1414 1373 1415 1374 if (!pskb_may_pull(skb, srcs_offset)) 1416 - return -EINVAL; 1375 + goto out; 1417 1376 1418 1377 mlh2 = (struct mld2_query *)skb_transport_header(skb); 1419 1378 1420 1379 err = mld_process_v2(idev, mlh2, &max_delay); 1421 1380 if (err < 0) 1422 - return err; 1381 + goto out; 1423 1382 1424 1383 if (group_type == IPV6_ADDR_ANY) { /* general query */ 1425 1384 if (mlh2->mld2q_nsrcs) 1426 - return -EINVAL; /* no sources allowed */ 1385 + goto out; /* no sources allowed */ 1427 1386 1428 1387 mld_gq_start_work(idev); 1429 - return 0; 1388 + goto out; 1430 1389 } 1431 1390 /* mark sources to include, if group & source-specific */ 1432 1391 if (mlh2->mld2q_nsrcs != 0) { 1433 1392 if (!pskb_may_pull(skb, srcs_offset + 1434 1393 ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr))) 1435 - return -EINVAL; 1394 + goto out; 1436 1395 1437 1396 mlh2 = (struct mld2_query *)skb_transport_header(skb); 1438 1397 mark = 1; 1439 1398 } 1440 1399 } else { 1441 - return -EINVAL; 1400 + goto out; 1442 1401 } 1443 1402 1444 1403 if (group_type == IPV6_ADDR_ANY) { 1445 - for_each_mc_rcu(idev, ma) { 1446 - spin_lock_bh(&ma->mca_lock); 1404 + for_each_mc_rtnl(idev, ma) { 1447 1405 igmp6_group_queried(ma, max_delay); 1448 - spin_unlock_bh(&ma->mca_lock); 1449 1406 } 1450 1407 } else { 1451 - for_each_mc_rcu(idev, ma) { 1408 + for_each_mc_rtnl(idev, ma) { 1452 1409 if (!ipv6_addr_equal(group, &ma->mca_addr)) 1453 1410 continue; 1454 - spin_lock_bh(&ma->mca_lock); 1455 1411 if (ma->mca_flags & MAF_TIMER_RUNNING) { 1456 1412 /* gsquery <- gsquery && mark */ 1457 1413 if (!mark) ··· 1463 1425 if (!(ma->mca_flags & MAF_GSQUERY) || 1464 1426 mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs)) 1465 1427 igmp6_group_queried(ma, max_delay); 1466 - spin_unlock_bh(&ma->mca_lock); 1467 1428 break; 1468 1429 } 1469 1430 } 1470 1431 1471 - return 0; 1432 + out: 1433 + consume_skb(skb); 1434 + } 1435 + 1436 + static void mld_query_work(struct work_struct *work) 1437 + { 1438 + struct inet6_dev *idev = container_of(to_delayed_work(work), 1439 + struct inet6_dev, 1440 + mc_query_work); 1441 + struct sk_buff_head q; 1442 + struct sk_buff *skb; 1443 + bool rework = false; 1444 + int cnt = 0; 1445 + 1446 + skb_queue_head_init(&q); 1447 + 1448 + spin_lock_bh(&idev->mc_query_lock); 1449 + while ((skb = __skb_dequeue(&idev->mc_query_queue))) { 1450 + __skb_queue_tail(&q, skb); 1451 + 1452 + if (++cnt >= MLD_MAX_QUEUE) { 1453 + rework = true; 1454 + schedule_delayed_work(&idev->mc_query_work, 0); 1455 + break; 1456 + } 1457 + } 1458 + spin_unlock_bh(&idev->mc_query_lock); 1459 + 1460 + rtnl_lock(); 1461 + while ((skb = __skb_dequeue(&q))) 1462 + __mld_query_work(skb); 1463 + rtnl_unlock(); 1464 + 1465 + if (!rework) 1466 + in6_dev_put(idev); 1472 1467 } 1473 1468 1474 1469 /* called with rcu_read_lock() */ 1475 1470 int igmp6_event_report(struct sk_buff *skb) 1471 + { 1472 + struct inet6_dev *idev = __in6_dev_get(skb->dev); 1473 + 1474 + if (!idev) 1475 + return -EINVAL; 1476 + 1477 + if (idev->dead) { 1478 + kfree_skb(skb); 1479 + return -ENODEV; 1480 + } 1481 + 1482 + spin_lock_bh(&idev->mc_report_lock); 1483 + if (skb_queue_len(&idev->mc_report_queue) < MLD_MAX_SKBS) { 1484 + __skb_queue_tail(&idev->mc_report_queue, skb); 1485 + if (!mod_delayed_work(mld_wq, &idev->mc_report_work, 0)) 1486 + in6_dev_hold(idev); 1487 + } 1488 + spin_unlock_bh(&idev->mc_report_lock); 1489 + 1490 + return 0; 1491 + } 1492 + 1493 + static void __mld_report_work(struct sk_buff *skb) 1476 1494 { 1477 1495 struct ifmcaddr6 *ma; 1478 1496 struct inet6_dev *idev; ··· 1537 1443 1538 1444 /* Our own report looped back. Ignore it. */ 1539 1445 if (skb->pkt_type == PACKET_LOOPBACK) 1540 - return 0; 1446 + goto out; 1541 1447 1542 1448 /* send our report if the MC router may not have heard this report */ 1543 1449 if (skb->pkt_type != PACKET_MULTICAST && 1544 1450 skb->pkt_type != PACKET_BROADCAST) 1545 - return 0; 1451 + goto out; 1546 1452 1547 1453 if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr))) 1548 - return -EINVAL; 1454 + goto out; 1549 1455 1550 1456 mld = (struct mld_msg *)icmp6_hdr(skb); 1551 1457 ··· 1553 1459 addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr); 1554 1460 if (addr_type != IPV6_ADDR_ANY && 1555 1461 !(addr_type&IPV6_ADDR_LINKLOCAL)) 1556 - return -EINVAL; 1462 + goto out; 1557 1463 1558 1464 idev = __in6_dev_get(skb->dev); 1559 1465 if (!idev) 1560 - return -ENODEV; 1466 + goto out; 1561 1467 1562 1468 /* 1563 1469 * Cancel the work for this group 1564 1470 */ 1565 1471 1566 - for_each_mc_rcu(idev, ma) { 1472 + for_each_mc_rtnl(idev, ma) { 1567 1473 if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) { 1568 - spin_lock(&ma->mca_lock); 1569 1474 if (cancel_delayed_work(&ma->mca_work)) 1570 1475 refcount_dec(&ma->mca_refcnt); 1571 1476 ma->mca_flags &= ~(MAF_LAST_REPORTER | 1572 1477 MAF_TIMER_RUNNING); 1573 - spin_unlock(&ma->mca_lock); 1574 1478 break; 1575 1479 } 1576 1480 } 1577 - return 0; 1481 + 1482 + out: 1483 + consume_skb(skb); 1484 + } 1485 + 1486 + static void mld_report_work(struct work_struct *work) 1487 + { 1488 + struct inet6_dev *idev = container_of(to_delayed_work(work), 1489 + struct inet6_dev, 1490 + mc_report_work); 1491 + struct sk_buff_head q; 1492 + struct sk_buff *skb; 1493 + bool rework = false; 1494 + int cnt = 0; 1495 + 1496 + skb_queue_head_init(&q); 1497 + spin_lock_bh(&idev->mc_report_lock); 1498 + while ((skb = __skb_dequeue(&idev->mc_report_queue))) { 1499 + __skb_queue_tail(&q, skb); 1500 + 1501 + if (++cnt >= MLD_MAX_QUEUE) { 1502 + rework = true; 1503 + schedule_delayed_work(&idev->mc_report_work, 0); 1504 + break; 1505 + } 1506 + } 1507 + spin_unlock_bh(&idev->mc_report_lock); 1508 + 1509 + rtnl_lock(); 1510 + while ((skb = __skb_dequeue(&q))) 1511 + __mld_report_work(skb); 1512 + rtnl_unlock(); 1513 + 1514 + if (!rework) 1515 + in6_dev_put(idev); 1578 1516 } 1579 1517 1580 1518 static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type, ··· 1973 1847 for_each_mc_rtnl(idev, pmc) { 1974 1848 if (pmc->mca_flags & MAF_NOREPORT) 1975 1849 continue; 1976 - spin_lock_bh(&pmc->mca_lock); 1977 1850 if (pmc->mca_sfcount[MCAST_EXCLUDE]) 1978 1851 type = MLD2_MODE_IS_EXCLUDE; 1979 1852 else 1980 1853 type = MLD2_MODE_IS_INCLUDE; 1981 1854 skb = add_grec(skb, pmc, type, 0, 0, 0); 1982 - spin_unlock_bh(&pmc->mca_lock); 1983 1855 } 1984 1856 } else { 1985 - spin_lock_bh(&pmc->mca_lock); 1986 1857 if (pmc->mca_sfcount[MCAST_EXCLUDE]) 1987 1858 type = MLD2_MODE_IS_EXCLUDE; 1988 1859 else 1989 1860 type = MLD2_MODE_IS_INCLUDE; 1990 1861 skb = add_grec(skb, pmc, type, 0, 0, 0); 1991 - spin_unlock_bh(&pmc->mca_lock); 1992 1862 } 1993 1863 if (skb) 1994 1864 mld_sendpack(skb); ··· 2060 1938 2061 1939 /* change recs */ 2062 1940 for_each_mc_rtnl(idev, pmc) { 2063 - spin_lock_bh(&pmc->mca_lock); 2064 1941 if (pmc->mca_sfcount[MCAST_EXCLUDE]) { 2065 1942 type = MLD2_BLOCK_OLD_SOURCES; 2066 1943 dtype = MLD2_ALLOW_NEW_SOURCES; ··· 2079 1958 skb = add_grec(skb, pmc, type, 0, 0, 0); 2080 1959 pmc->mca_crcount--; 2081 1960 } 2082 - spin_unlock_bh(&pmc->mca_lock); 2083 1961 } 2084 1962 if (!skb) 2085 1963 return; ··· 2192 2072 2193 2073 skb = NULL; 2194 2074 for_each_mc_rtnl(idev, pmc) { 2195 - spin_lock_bh(&pmc->mca_lock); 2196 2075 if (pmc->mca_sfcount[MCAST_EXCLUDE]) 2197 2076 type = MLD2_CHANGE_TO_EXCLUDE; 2198 2077 else 2199 2078 type = MLD2_ALLOW_NEW_SOURCES; 2200 2079 skb = add_grec(skb, pmc, type, 0, 0, 1); 2201 - spin_unlock_bh(&pmc->mca_lock); 2202 2080 } 2203 2081 if (skb) 2204 2082 mld_sendpack(skb); ··· 2222 2104 2223 2105 rtnl_lock(); 2224 2106 mld_send_initial_cr(idev); 2225 - rtnl_unlock(); 2226 2107 if (idev->mc_dad_count) { 2227 2108 idev->mc_dad_count--; 2228 2109 if (idev->mc_dad_count) 2229 2110 mld_dad_start_work(idev, 2230 2111 unsolicited_report_interval(idev)); 2231 2112 } 2113 + rtnl_unlock(); 2232 2114 in6_dev_put(idev); 2233 2115 } 2234 2116 ··· 2291 2173 } 2292 2174 if (!pmc) 2293 2175 return -ESRCH; 2294 - spin_lock_bh(&pmc->mca_lock); 2295 2176 2296 2177 sf_markstate(pmc); 2297 2178 if (!delta) { 2298 2179 if (!pmc->mca_sfcount[sfmode]) { 2299 - spin_unlock_bh(&pmc->mca_lock); 2300 2180 return -EINVAL; 2301 2181 } 2302 2182 ··· 2322 2206 mld_ifc_event(pmc->idev); 2323 2207 } else if (sf_setstate(pmc) || changerec) 2324 2208 mld_ifc_event(pmc->idev); 2325 - spin_unlock_bh(&pmc->mca_lock); 2326 2209 return err; 2327 2210 } 2328 2211 ··· 2340 2225 psf_prev = psf; 2341 2226 } 2342 2227 if (!psf) { 2343 - psf = kzalloc(sizeof(*psf), GFP_ATOMIC); 2228 + psf = kzalloc(sizeof(*psf), GFP_KERNEL); 2344 2229 if (!psf) 2345 2230 return -ENOBUFS; 2346 2231 ··· 2419 2304 &psf->sf_addr)) 2420 2305 break; 2421 2306 if (!dpsf) { 2422 - dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC); 2307 + dpsf = kmalloc(sizeof(*dpsf), GFP_KERNEL); 2423 2308 if (!dpsf) 2424 2309 continue; 2425 2310 *dpsf = *psf; ··· 2454 2339 } 2455 2340 if (!pmc) 2456 2341 return -ESRCH; 2457 - spin_lock_bh(&pmc->mca_lock); 2458 2342 2459 2343 sf_markstate(pmc); 2460 2344 isexclude = pmc->mca_sfmode == MCAST_EXCLUDE; ··· 2490 2376 } else if (sf_setstate(pmc)) { 2491 2377 mld_ifc_event(idev); 2492 2378 } 2493 - spin_unlock_bh(&pmc->mca_lock); 2494 2379 return err; 2495 2380 } 2496 2381 ··· 2528 2415 2529 2416 delay = prandom_u32() % unsolicited_report_interval(ma->idev); 2530 2417 2531 - spin_lock_bh(&ma->mca_lock); 2532 2418 if (cancel_delayed_work(&ma->mca_work)) { 2533 2419 refcount_dec(&ma->mca_refcnt); 2534 2420 delay = ma->mca_work.timer.expires - jiffies; ··· 2536 2424 if (!mod_delayed_work(mld_wq, &ma->mca_work, delay)) 2537 2425 refcount_inc(&ma->mca_refcnt); 2538 2426 ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER; 2539 - spin_unlock_bh(&ma->mca_lock); 2540 2427 } 2541 2428 2542 2429 static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, ··· 2580 2469 2581 2470 rtnl_lock(); 2582 2471 mld_send_report(idev, NULL); 2583 - rtnl_unlock(); 2584 - 2585 2472 idev->mc_gq_running = 0; 2473 + rtnl_unlock(); 2586 2474 2587 2475 in6_dev_put(idev); 2588 2476 } ··· 2594 2484 2595 2485 rtnl_lock(); 2596 2486 mld_send_cr(idev); 2597 - rtnl_unlock(); 2598 2487 2599 2488 if (idev->mc_ifc_count) { 2600 2489 idev->mc_ifc_count--; ··· 2601 2492 mld_ifc_start_work(idev, 2602 2493 unsolicited_report_interval(idev)); 2603 2494 } 2495 + rtnl_unlock(); 2604 2496 in6_dev_put(idev); 2605 2497 } 2606 2498 ··· 2624 2514 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT); 2625 2515 else 2626 2516 mld_send_report(ma->idev, ma); 2627 - rtnl_unlock(); 2628 - 2629 - spin_lock_bh(&ma->mca_lock); 2630 2517 ma->mca_flags |= MAF_LAST_REPORTER; 2631 2518 ma->mca_flags &= ~MAF_TIMER_RUNNING; 2632 - spin_unlock_bh(&ma->mca_lock); 2519 + rtnl_unlock(); 2520 + 2633 2521 ma_put(ma); 2634 2522 } 2635 2523 ··· 2661 2553 /* Should stop work after group drop. or we will 2662 2554 * start work again in mld_ifc_event() 2663 2555 */ 2556 + synchronize_net(); 2557 + mld_query_stop_work(idev); 2558 + mld_report_stop_work(idev); 2664 2559 mld_ifc_stop_work(idev); 2665 2560 mld_gq_stop_work(idev); 2666 2561 mld_dad_stop_work(idev); ··· 2703 2592 idev->mc_ifc_count = 0; 2704 2593 INIT_DELAYED_WORK(&idev->mc_ifc_work, mld_ifc_work); 2705 2594 INIT_DELAYED_WORK(&idev->mc_dad_work, mld_dad_work); 2595 + INIT_DELAYED_WORK(&idev->mc_query_work, mld_query_work); 2596 + INIT_DELAYED_WORK(&idev->mc_report_work, mld_report_work); 2597 + skb_queue_head_init(&idev->mc_query_queue); 2598 + skb_queue_head_init(&idev->mc_report_queue); 2599 + spin_lock_init(&idev->mc_query_lock); 2600 + spin_lock_init(&idev->mc_report_lock); 2706 2601 ipv6_mc_reset(idev); 2707 2602 } 2708 2603 ··· 2723 2606 /* Deactivate works */ 2724 2607 ipv6_mc_down(idev); 2725 2608 mld_clear_delrec(idev); 2609 + mld_clear_query(idev); 2610 + mld_clear_report(idev); 2726 2611 2727 2612 /* Delete all-nodes address. */ 2728 2613 /* We cannot call ipv6_dev_mc_dec() directly, our caller in ··· 2914 2795 2915 2796 im = rcu_dereference(idev->mc_list); 2916 2797 if (likely(im)) { 2917 - spin_lock_bh(&im->mca_lock); 2918 2798 psf = rcu_dereference(im->mca_sources); 2919 2799 if (likely(psf)) { 2920 2800 state->im = im; 2921 2801 state->idev = idev; 2922 2802 break; 2923 2803 } 2924 - spin_unlock_bh(&im->mca_lock); 2925 2804 } 2926 2805 } 2927 2806 return psf; ··· 2931 2814 2932 2815 psf = rcu_dereference(psf->sf_next); 2933 2816 while (!psf) { 2934 - spin_unlock_bh(&state->im->mca_lock); 2935 2817 state->im = rcu_dereference(state->im->next); 2936 2818 while (!state->im) { 2937 2819 state->dev = next_net_device_rcu(state->dev); ··· 2945 2829 } 2946 2830 if (!state->im) 2947 2831 break; 2948 - spin_lock_bh(&state->im->mca_lock); 2949 2832 psf = rcu_dereference(state->im->mca_sources); 2950 2833 } 2951 2834 out: ··· 2983 2868 { 2984 2869 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); 2985 2870 2986 - if (likely(state->im)) { 2987 - spin_unlock_bh(&state->im->mca_lock); 2871 + if (likely(state->im)) 2988 2872 state->im = NULL; 2989 - } 2990 2873 if (likely(state->idev)) 2991 2874 state->idev = NULL; 2992 2875 ··· 3068 2955 } 3069 2956 3070 2957 inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1; 2958 + net->ipv6.igmp_sk->sk_allocation = GFP_KERNEL; 3071 2959 3072 2960 err = inet_ctl_sock_create(&net->ipv6.mc_autojoin_sk, PF_INET6, 3073 2961 SOCK_RAW, IPPROTO_ICMPV6, net);