Commit 73ca4918 authored by Patrick McHardy's avatar Patrick McHardy Committed by David S. Miller

[NET_SCHED]: act_api: qdisc internal reclassify support

The behaviour of NET_CLS_POLICE for TC_POLICE_RECLASSIFY was to return
it to the qdisc, which could handle it internally or ignore it. With
NET_CLS_ACT however, tc_classify starts over at the first classifier
and never returns it to the qdisc. This makes it impossible to support
qdisc-internal reclassification, which in turn makes it impossible to
remove the old NET_CLS_POLICE code without breaking compatibility since
we have two qdiscs (CBQ and ATM) that support this.

This patch adds a tc_classify_compat function that handles
reclassification the old way and changes CBQ and ATM to use it.

This again is of course not fully backwards compatible with the previous
NET_CLS_ACT behaviour. Unfortunately there is no way to fully maintain
compatibility *and* support qdisc internal reclassification with
NET_CLS_ACT, but this seems like the better choice over keeping the two
incompatible options around forever.
Signed-off-by: default avatarPatrick McHardy <kaber@trash.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f6853e2d
...@@ -89,8 +89,10 @@ static inline void qdisc_run(struct net_device *dev) ...@@ -89,8 +89,10 @@ static inline void qdisc_run(struct net_device *dev)
__qdisc_run(dev); __qdisc_run(dev);
} }
extern int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res);
extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res); struct tcf_result *res);
/* Calculate maximal size of packet seen by hard_start_xmit /* Calculate maximal size of packet seen by hard_start_xmit
routine of this device. routine of this device.
......
...@@ -290,7 +290,7 @@ static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) ...@@ -290,7 +290,7 @@ static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
{ {
sch->qstats.drops++; sch->qstats.drops++;
#ifdef CONFIG_NET_CLS_POLICE #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
goto drop; goto drop;
......
...@@ -1145,47 +1145,57 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -1145,47 +1145,57 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
to this qdisc, (optionally) tests for protocol and asks to this qdisc, (optionally) tests for protocol and asks
specific classifiers. specific classifiers.
*/ */
int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
{
__be16 protocol = skb->protocol;
int err = 0;
for (; tp; tp = tp->next) {
if ((tp->protocol == protocol ||
tp->protocol == htons(ETH_P_ALL)) &&
(err = tp->classify(skb, tp, res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
#endif
return err;
}
}
return -1;
}
EXPORT_SYMBOL(tc_classify_compat);
int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res) struct tcf_result *res)
{ {
int err = 0; int err = 0;
__be16 protocol = skb->protocol; __be16 protocol;
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
struct tcf_proto *otp = tp; struct tcf_proto *otp = tp;
reclassify: reclassify:
#endif #endif
protocol = skb->protocol; protocol = skb->protocol;
for ( ; tp; tp = tp->next) { err = tc_classify_compat(skb, tp, res);
if ((tp->protocol == protocol ||
tp->protocol == htons(ETH_P_ALL)) &&
(err = tp->classify(skb, tp, res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
if ( TC_ACT_RECLASSIFY == err) { if (err == TC_ACT_RECLASSIFY) {
__u32 verd = (__u32) G_TC_VERD(skb->tc_verd); u32 verd = G_TC_VERD(skb->tc_verd);
tp = otp; tp = otp;
if (MAX_REC_LOOP < verd++) { if (verd++ >= MAX_REC_LOOP) {
printk("rule prio %d protocol %02x reclassify is buggy packet dropped\n", printk("rule prio %u protocol %02x reclassify loop, "
tp->prio&0xffff, ntohs(tp->protocol)); "packet dropped\n",
return TC_ACT_SHOT; tp->prio&0xffff, ntohs(tp->protocol));
} return TC_ACT_SHOT;
skb->tc_verd = SET_TC_VERD(skb->tc_verd,verd);
goto reclassify;
} else {
if (skb->tc_verd)
skb->tc_verd = SET_TC_VERD(skb->tc_verd,0);
return err;
}
#else
return err;
#endif
} }
skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
goto reclassify;
} }
return -1; #endif
return err;
} }
EXPORT_SYMBOL(tc_classify);
void tcf_destroy(struct tcf_proto *tp) void tcf_destroy(struct tcf_proto *tp)
{ {
...@@ -1252,4 +1262,3 @@ EXPORT_SYMBOL(qdisc_get_rtab); ...@@ -1252,4 +1262,3 @@ EXPORT_SYMBOL(qdisc_get_rtab);
EXPORT_SYMBOL(qdisc_put_rtab); EXPORT_SYMBOL(qdisc_put_rtab);
EXPORT_SYMBOL(register_qdisc); EXPORT_SYMBOL(register_qdisc);
EXPORT_SYMBOL(unregister_qdisc); EXPORT_SYMBOL(unregister_qdisc);
EXPORT_SYMBOL(tc_classify);
...@@ -396,8 +396,9 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -396,8 +396,9 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
!(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority)))
for (flow = p->flows; flow; flow = flow->next) for (flow = p->flows; flow; flow = flow->next)
if (flow->filter_list) { if (flow->filter_list) {
result = tc_classify(skb, flow->filter_list, result = tc_classify_compat(skb,
&res); flow->filter_list,
&res);
if (result < 0) if (result < 0)
continue; continue;
flow = (struct atm_flow_data *)res.class; flow = (struct atm_flow_data *)res.class;
...@@ -420,6 +421,12 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -420,6 +421,12 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
case TC_ACT_SHOT: case TC_ACT_SHOT:
kfree_skb(skb); kfree_skb(skb);
goto drop; goto drop;
case TC_POLICE_RECLASSIFY:
if (flow->excess)
flow = flow->excess;
else
ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP;
break;
} }
#elif defined(CONFIG_NET_CLS_POLICE) #elif defined(CONFIG_NET_CLS_POLICE)
switch (result) { switch (result) {
......
...@@ -82,7 +82,7 @@ struct cbq_class ...@@ -82,7 +82,7 @@ struct cbq_class
unsigned char priority2; /* priority to be used after overlimit */ unsigned char priority2; /* priority to be used after overlimit */
unsigned char ewma_log; /* time constant for idle time calculation */ unsigned char ewma_log; /* time constant for idle time calculation */
unsigned char ovl_strategy; unsigned char ovl_strategy;
#ifdef CONFIG_NET_CLS_POLICE #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
unsigned char police; unsigned char police;
#endif #endif
...@@ -154,7 +154,7 @@ struct cbq_sched_data ...@@ -154,7 +154,7 @@ struct cbq_sched_data
struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes
with backlog */ with backlog */
#ifdef CONFIG_NET_CLS_POLICE #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
struct cbq_class *rx_class; struct cbq_class *rx_class;
#endif #endif
struct cbq_class *tx_class; struct cbq_class *tx_class;
...@@ -196,7 +196,7 @@ cbq_class_lookup(struct cbq_sched_data *q, u32 classid) ...@@ -196,7 +196,7 @@ cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
return NULL; return NULL;
} }
#ifdef CONFIG_NET_CLS_POLICE #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
static struct cbq_class * static struct cbq_class *
cbq_reclassify(struct sk_buff *skb, struct cbq_class *this) cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
...@@ -247,7 +247,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) ...@@ -247,7 +247,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
/* /*
* Step 2+n. Apply classifier. * Step 2+n. Apply classifier.
*/ */
if (!head->filter_list || (result = tc_classify(skb, head->filter_list, &res)) < 0) if (!head->filter_list ||
(result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
goto fallback; goto fallback;
if ((cl = (void*)res.class) == NULL) { if ((cl = (void*)res.class) == NULL) {
...@@ -267,6 +268,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) ...@@ -267,6 +268,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
*qerr = NET_XMIT_SUCCESS; *qerr = NET_XMIT_SUCCESS;
case TC_ACT_SHOT: case TC_ACT_SHOT:
return NULL; return NULL;
case TC_ACT_RECLASSIFY:
return cbq_reclassify(skb, cl);
} }
#elif defined(CONFIG_NET_CLS_POLICE) #elif defined(CONFIG_NET_CLS_POLICE)
switch (result) { switch (result) {
...@@ -389,7 +392,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -389,7 +392,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
int ret; int ret;
struct cbq_class *cl = cbq_classify(skb, sch, &ret); struct cbq_class *cl = cbq_classify(skb, sch, &ret);
#ifdef CONFIG_NET_CLS_POLICE #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
q->rx_class = cl; q->rx_class = cl;
#endif #endif
if (cl == NULL) { if (cl == NULL) {
...@@ -399,7 +402,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -399,7 +402,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return ret; return ret;
} }
#ifdef CONFIG_NET_CLS_POLICE #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
cl->q->__parent = sch; cl->q->__parent = sch;
#endif #endif
if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) { if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) {
...@@ -434,7 +437,7 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -434,7 +437,7 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
cbq_mark_toplevel(q, cl); cbq_mark_toplevel(q, cl);
#ifdef CONFIG_NET_CLS_POLICE #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
q->rx_class = cl; q->rx_class = cl;
cl->q->__parent = sch; cl->q->__parent = sch;
#endif #endif
...@@ -670,7 +673,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) ...@@ -670,7 +673,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
} }
#ifdef CONFIG_NET_CLS_POLICE #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
{ {
...@@ -1364,7 +1367,7 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl) ...@@ -1364,7 +1367,7 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
return 0; return 0;
} }
#ifdef CONFIG_NET_CLS_POLICE #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p) static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p)
{ {
cl->police = p->police; cl->police = p->police;
...@@ -1532,7 +1535,7 @@ rtattr_failure: ...@@ -1532,7 +1535,7 @@ rtattr_failure:
return -1; return -1;
} }
#ifdef CONFIG_NET_CLS_POLICE #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
{ {
unsigned char *b = skb_tail_pointer(skb); unsigned char *b = skb_tail_pointer(skb);
...@@ -1558,7 +1561,7 @@ static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl) ...@@ -1558,7 +1561,7 @@ static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
cbq_dump_rate(skb, cl) < 0 || cbq_dump_rate(skb, cl) < 0 ||
cbq_dump_wrr(skb, cl) < 0 || cbq_dump_wrr(skb, cl) < 0 ||
cbq_dump_ovl(skb, cl) < 0 || cbq_dump_ovl(skb, cl) < 0 ||
#ifdef CONFIG_NET_CLS_POLICE #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
cbq_dump_police(skb, cl) < 0 || cbq_dump_police(skb, cl) < 0 ||
#endif #endif
cbq_dump_fopt(skb, cl) < 0) cbq_dump_fopt(skb, cl) < 0)
...@@ -1653,7 +1656,7 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, ...@@ -1653,7 +1656,7 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
cl->classid)) == NULL) cl->classid)) == NULL)
return -ENOBUFS; return -ENOBUFS;
} else { } else {
#ifdef CONFIG_NET_CLS_POLICE #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
if (cl->police == TC_POLICE_RECLASSIFY) if (cl->police == TC_POLICE_RECLASSIFY)
new->reshape_fail = cbq_reshape_fail; new->reshape_fail = cbq_reshape_fail;
#endif #endif
...@@ -1718,7 +1721,7 @@ cbq_destroy(struct Qdisc* sch) ...@@ -1718,7 +1721,7 @@ cbq_destroy(struct Qdisc* sch)
struct cbq_class *cl; struct cbq_class *cl;
unsigned h; unsigned h;
#ifdef CONFIG_NET_CLS_POLICE #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
q->rx_class = NULL; q->rx_class = NULL;
#endif #endif
/* /*
...@@ -1747,7 +1750,7 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg) ...@@ -1747,7 +1750,7 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)
struct cbq_class *cl = (struct cbq_class*)arg; struct cbq_class *cl = (struct cbq_class*)arg;
if (--cl->refcnt == 0) { if (--cl->refcnt == 0) {
#ifdef CONFIG_NET_CLS_POLICE #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_sched_data *q = qdisc_priv(sch);
spin_lock_bh(&sch->dev->queue_lock); spin_lock_bh(&sch->dev->queue_lock);
...@@ -1795,7 +1798,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t ...@@ -1795,7 +1798,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
RTA_PAYLOAD(tb[TCA_CBQ_WRROPT-1]) < sizeof(struct tc_cbq_wrropt)) RTA_PAYLOAD(tb[TCA_CBQ_WRROPT-1]) < sizeof(struct tc_cbq_wrropt))
return -EINVAL; return -EINVAL;
#ifdef CONFIG_NET_CLS_POLICE #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
if (tb[TCA_CBQ_POLICE-1] && if (tb[TCA_CBQ_POLICE-1] &&
RTA_PAYLOAD(tb[TCA_CBQ_POLICE-1]) < sizeof(struct tc_cbq_police)) RTA_PAYLOAD(tb[TCA_CBQ_POLICE-1]) < sizeof(struct tc_cbq_police))
return -EINVAL; return -EINVAL;
...@@ -1838,7 +1841,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t ...@@ -1838,7 +1841,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
if (tb[TCA_CBQ_OVL_STRATEGY-1]) if (tb[TCA_CBQ_OVL_STRATEGY-1])
cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1])); cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1]));
#ifdef CONFIG_NET_CLS_POLICE #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
if (tb[TCA_CBQ_POLICE-1]) if (tb[TCA_CBQ_POLICE-1])
cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1])); cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1]));
#endif #endif
...@@ -1931,7 +1934,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t ...@@ -1931,7 +1934,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
cl->overlimit = cbq_ovl_classic; cl->overlimit = cbq_ovl_classic;
if (tb[TCA_CBQ_OVL_STRATEGY-1]) if (tb[TCA_CBQ_OVL_STRATEGY-1])
cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1])); cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1]));
#ifdef CONFIG_NET_CLS_POLICE #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
if (tb[TCA_CBQ_POLICE-1]) if (tb[TCA_CBQ_POLICE-1])
cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1])); cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1]));
#endif #endif
...@@ -1975,7 +1978,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg) ...@@ -1975,7 +1978,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
q->tx_class = NULL; q->tx_class = NULL;
q->tx_borrowed = NULL; q->tx_borrowed = NULL;
} }
#ifdef CONFIG_NET_CLS_POLICE #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
if (q->rx_class == cl) if (q->rx_class == cl)
q->rx_class = NULL; q->rx_class = NULL;
#endif #endif
......
...@@ -125,7 +125,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -125,7 +125,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
if (skb->len > q->max_size) { if (skb->len > q->max_size) {
sch->qstats.drops++; sch->qstats.drops++;
#ifdef CONFIG_NET_CLS_POLICE #if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
#endif #endif
kfree_skb(skb); kfree_skb(skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment