Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[NET_SCHED]: act_api: qdisc internal reclassify support

The behaviour of NET_CLS_POLICE for TC_POLICE_RECLASSIFY was to return
it to the qdisc, which could handle it internally or ignore it. With
NET_CLS_ACT however, tc_classify starts over at the first classifier
and never returns it to the qdisc. This makes it impossible to support
qdisc-internal reclassification, which in turn makes it impossible to
remove the old NET_CLS_POLICE code without breaking compatibility since
we have two qdiscs (CBQ and ATM) that support this.

This patch adds a tc_classify_compat function that handles
reclassification the old way and changes CBQ and ATM to use it.

This again is of course not fully backwards compatible with the previous
NET_CLS_ACT behaviour. Unfortunately there is no way to fully maintain
compatibility *and* support qdisc internal reclassification with
NET_CLS_ACT, but this seems like the better choice over keeping the two
incompatible options around forever.

Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Patrick McHardy and committed by
David S. Miller
73ca4918 f6853e2d

+72 -51
+3 -1
include/net/pkt_sched.h
··· 89 89 __qdisc_run(dev); 90 90 } 91 91 92 + extern int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp, 93 + struct tcf_result *res); 92 94 extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, 93 - struct tcf_result *res); 95 + struct tcf_result *res); 94 96 95 97 /* Calculate maximal size of packet seen by hard_start_xmit 96 98 routine of this device.
+1 -1
include/net/sch_generic.h
··· 290 290 { 291 291 sch->qstats.drops++; 292 292 293 - #ifdef CONFIG_NET_CLS_POLICE 293 + #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) 294 294 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) 295 295 goto drop; 296 296
+37 -28
net/sched/sch_api.c
··· 1145 1145 to this qdisc, (optionally) tests for protocol and asks 1146 1146 specific classifiers. 1147 1147 */ 1148 + int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp, 1149 + struct tcf_result *res) 1150 + { 1151 + __be16 protocol = skb->protocol; 1152 + int err = 0; 1153 + 1154 + for (; tp; tp = tp->next) { 1155 + if ((tp->protocol == protocol || 1156 + tp->protocol == htons(ETH_P_ALL)) && 1157 + (err = tp->classify(skb, tp, res)) >= 0) { 1158 + #ifdef CONFIG_NET_CLS_ACT 1159 + if (err != TC_ACT_RECLASSIFY && skb->tc_verd) 1160 + skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0); 1161 + #endif 1162 + return err; 1163 + } 1164 + } 1165 + return -1; 1166 + } 1167 + EXPORT_SYMBOL(tc_classify_compat); 1168 + 1148 1169 int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, 1149 - struct tcf_result *res) 1170 + struct tcf_result *res) 1150 1171 { 1151 1172 int err = 0; 1152 - __be16 protocol = skb->protocol; 1173 + __be16 protocol; 1153 1174 #ifdef CONFIG_NET_CLS_ACT 1154 1175 struct tcf_proto *otp = tp; 1155 1176 reclassify: 1156 1177 #endif 1157 1178 protocol = skb->protocol; 1158 1179 1159 - for ( ; tp; tp = tp->next) { 1160 - if ((tp->protocol == protocol || 1161 - tp->protocol == htons(ETH_P_ALL)) && 1162 - (err = tp->classify(skb, tp, res)) >= 0) { 1180 + err = tc_classify_compat(skb, tp, res); 1163 1181 #ifdef CONFIG_NET_CLS_ACT 1164 - if ( TC_ACT_RECLASSIFY == err) { 1165 - __u32 verd = (__u32) G_TC_VERD(skb->tc_verd); 1166 - tp = otp; 1182 + if (err == TC_ACT_RECLASSIFY) { 1183 + u32 verd = G_TC_VERD(skb->tc_verd); 1184 + tp = otp; 1167 1185 1168 - if (MAX_REC_LOOP < verd++) { 1169 - printk("rule prio %d protocol %02x reclassify is buggy packet dropped\n", 1170 - tp->prio&0xffff, ntohs(tp->protocol)); 1171 - return TC_ACT_SHOT; 1172 - } 1173 - skb->tc_verd = SET_TC_VERD(skb->tc_verd,verd); 1174 - goto reclassify; 1175 - } else { 1176 - if (skb->tc_verd) 1177 - skb->tc_verd = SET_TC_VERD(skb->tc_verd,0); 1178 - return err; 1179 - } 1180 - #else 1181 - 1182 - return err; 1183 - #endif 1186 + if (verd++ >= MAX_REC_LOOP) { 1187 + printk("rule prio %u protocol %02x reclassify loop, " 1188 + "packet dropped\n", 1189 + tp->prio&0xffff, ntohs(tp->protocol)); 1190 + return TC_ACT_SHOT; 1184 1191 } 1185 - 1192 + skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd); 1193 + goto reclassify; 1186 1194 } 1187 - return -1; 1195 + #endif 1196 + return err; 1188 1197 } 1198 + EXPORT_SYMBOL(tc_classify); 1189 1199 1190 1200 void tcf_destroy(struct tcf_proto *tp) 1191 1201 { ··· 1262 1252 EXPORT_SYMBOL(qdisc_put_rtab); 1263 1253 EXPORT_SYMBOL(register_qdisc); 1264 1254 EXPORT_SYMBOL(unregister_qdisc); 1265 - EXPORT_SYMBOL(tc_classify);
+9 -2
net/sched/sch_atm.c
··· 396 396 !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) 397 397 for (flow = p->flows; flow; flow = flow->next) 398 398 if (flow->filter_list) { 399 - result = tc_classify(skb, flow->filter_list, 400 - &res); 399 + result = tc_classify_compat(skb, 400 + flow->filter_list, 401 + &res); 401 402 if (result < 0) 402 403 continue; 403 404 flow = (struct atm_flow_data *)res.class; ··· 421 420 case TC_ACT_SHOT: 422 421 kfree_skb(skb); 423 422 goto drop; 423 + case TC_POLICE_RECLASSIFY: 424 + if (flow->excess) 425 + flow = flow->excess; 426 + else 427 + ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP; 428 + break; 424 429 } 425 430 #elif defined(CONFIG_NET_CLS_POLICE) 426 431 switch (result) {
+21 -18
net/sched/sch_cbq.c
··· 82 82 unsigned char priority2; /* priority to be used after overlimit */ 83 83 unsigned char ewma_log; /* time constant for idle time calculation */ 84 84 unsigned char ovl_strategy; 85 - #ifdef CONFIG_NET_CLS_POLICE 85 + #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) 86 86 unsigned char police; 87 87 #endif 88 88 ··· 154 154 struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes 155 155 with backlog */ 156 156 157 - #ifdef CONFIG_NET_CLS_POLICE 157 + #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) 158 158 struct cbq_class *rx_class; 159 159 #endif 160 160 struct cbq_class *tx_class; ··· 196 196 return NULL; 197 197 } 198 198 199 - #ifdef CONFIG_NET_CLS_POLICE 199 + #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) 200 200 201 201 static struct cbq_class * 202 202 cbq_reclassify(struct sk_buff *skb, struct cbq_class *this) ··· 247 247 /* 248 248 * Step 2+n. Apply classifier. 249 249 */ 250 - if (!head->filter_list || (result = tc_classify(skb, head->filter_list, &res)) < 0) 250 + if (!head->filter_list || 251 + (result = tc_classify_compat(skb, head->filter_list, &res)) < 0) 251 252 goto fallback; 252 253 253 254 if ((cl = (void*)res.class) == NULL) { ··· 268 267 *qerr = NET_XMIT_SUCCESS; 269 268 case TC_ACT_SHOT: 270 269 return NULL; 270 + case TC_ACT_RECLASSIFY: 271 + return cbq_reclassify(skb, cl); 271 272 } 272 273 #elif defined(CONFIG_NET_CLS_POLICE) 273 274 switch (result) { ··· 392 389 int ret; 393 390 struct cbq_class *cl = cbq_classify(skb, sch, &ret); 394 391 395 - #ifdef CONFIG_NET_CLS_POLICE 392 + #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) 396 393 q->rx_class = cl; 397 394 #endif 398 395 if (cl == NULL) { ··· 402 399 return ret; 403 400 } 404 401 405 - #ifdef CONFIG_NET_CLS_POLICE 402 + #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) 406 403 cl->q->__parent = sch; 407 404 #endif 408 405 if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) { ··· 437 434 438 435 cbq_mark_toplevel(q, cl); 439 436 440 - #ifdef CONFIG_NET_CLS_POLICE 437 + #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) 441 438 q->rx_class = cl; 442 439 cl->q->__parent = sch; 443 440 #endif ··· 673 670 } 674 671 675 672 676 - #ifdef CONFIG_NET_CLS_POLICE 673 + #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) 677 674 678 675 static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) 679 676 { ··· 1367 1364 return 0; 1368 1365 } 1369 1366 1370 - #ifdef CONFIG_NET_CLS_POLICE 1367 + #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) 1371 1368 static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p) 1372 1369 { 1373 1370 cl->police = p->police; ··· 1535 1532 return -1; 1536 1533 } 1537 1534 1538 - #ifdef CONFIG_NET_CLS_POLICE 1535 + #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) 1539 1536 static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) 1540 1537 { 1541 1538 unsigned char *b = skb_tail_pointer(skb); ··· 1561 1558 cbq_dump_rate(skb, cl) < 0 || 1562 1559 cbq_dump_wrr(skb, cl) < 0 || 1563 1560 cbq_dump_ovl(skb, cl) < 0 || 1564 - #ifdef CONFIG_NET_CLS_POLICE 1561 + #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) 1565 1562 cbq_dump_police(skb, cl) < 0 || 1566 1563 #endif 1567 1564 cbq_dump_fopt(skb, cl) < 0) ··· 1656 1653 cl->classid)) == NULL) 1657 1654 return -ENOBUFS; 1658 1655 } else { 1659 - #ifdef CONFIG_NET_CLS_POLICE 1656 + #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) 1660 1657 if (cl->police == TC_POLICE_RECLASSIFY) 1661 1658 new->reshape_fail = cbq_reshape_fail; 1662 1659 #endif ··· 1721 1718 struct cbq_class *cl; 1722 1719 unsigned h; 1723 1720 1724 - #ifdef CONFIG_NET_CLS_POLICE 1721 + #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) 1725 1722 q->rx_class = NULL; 1726 1723 #endif 1727 1724 /* ··· 1750 1747 struct cbq_class *cl = (struct cbq_class*)arg; 1751 1748 1752 1749 if (--cl->refcnt == 0) { 1753 - #ifdef CONFIG_NET_CLS_POLICE 1750 + #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) 1754 1751 struct cbq_sched_data *q = qdisc_priv(sch); 1755 1752 1756 1753 spin_lock_bh(&sch->dev->queue_lock); ··· 1798 1795 RTA_PAYLOAD(tb[TCA_CBQ_WRROPT-1]) < sizeof(struct tc_cbq_wrropt)) 1799 1796 return -EINVAL; 1800 1797 1801 - #ifdef CONFIG_NET_CLS_POLICE 1798 + #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) 1802 1799 if (tb[TCA_CBQ_POLICE-1] && 1803 1800 RTA_PAYLOAD(tb[TCA_CBQ_POLICE-1]) < sizeof(struct tc_cbq_police)) 1804 1801 return -EINVAL; ··· 1841 1838 if (tb[TCA_CBQ_OVL_STRATEGY-1]) 1842 1839 cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1])); 1843 1840 1844 - #ifdef CONFIG_NET_CLS_POLICE 1841 + #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) 1845 1842 if (tb[TCA_CBQ_POLICE-1]) 1846 1843 cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1])); 1847 1844 #endif ··· 1934 1931 cl->overlimit = cbq_ovl_classic; 1935 1932 if (tb[TCA_CBQ_OVL_STRATEGY-1]) 1936 1933 cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1])); 1937 - #ifdef CONFIG_NET_CLS_POLICE 1934 + #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) 1938 1935 if (tb[TCA_CBQ_POLICE-1]) 1939 1936 cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1])); 1940 1937 #endif ··· 1978 1975 q->tx_class = NULL; 1979 1976 q->tx_borrowed = NULL; 1980 1977 } 1981 - #ifdef CONFIG_NET_CLS_POLICE 1978 + #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) 1982 1979 if (q->rx_class == cl) 1983 1980 q->rx_class = NULL; 1984 1981 #endif
+1 -1
net/sched/sch_tbf.c
··· 125 125 126 126 if (skb->len > q->max_size) { 127 127 sch->qstats.drops++; 128 - #ifdef CONFIG_NET_CLS_POLICE 128 + #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE) 129 129 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) 130 130 #endif 131 131 kfree_skb(skb);