Commit c865e5d9 authored by Stephen Hemminger's avatar Stephen Hemminger Committed by David S. Miller

[PKT_SCHED] netem: packet corruption option

Here is a new feature for netem in 2.6.16. It adds the ability to
randomly corrupt packets with netem. A version was done by
Hagen Paul Pfeifer, but I redid it to handle the cases of backwards
compatibility with netlink interface and presence of hardware checksum
offload. It is useful for testing hardware offload in devices.
Signed-off-by: default avatarStephen Hemminger <shemminger@osdl.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8cbb512e
...@@ -429,6 +429,7 @@ enum ...@@ -429,6 +429,7 @@ enum
TCA_NETEM_CORR, TCA_NETEM_CORR,
TCA_NETEM_DELAY_DIST, TCA_NETEM_DELAY_DIST,
TCA_NETEM_REORDER, TCA_NETEM_REORDER,
TCA_NETEM_CORRUPT,
__TCA_NETEM_MAX, __TCA_NETEM_MAX,
}; };
...@@ -457,6 +458,12 @@ struct tc_netem_reorder ...@@ -457,6 +458,12 @@ struct tc_netem_reorder
__u32 correlation; __u32 correlation;
}; };
struct tc_netem_corrupt
{
__u32 probability;
__u32 correlation;
};
#define NETEM_DIST_SCALE 8192 #define NETEM_DIST_SCALE 8192
#endif #endif
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include <net/pkt_sched.h> #include <net/pkt_sched.h>
#define VERSION "1.1" #define VERSION "1.2"
/* Network Emulation Queuing algorithm. /* Network Emulation Queuing algorithm.
==================================== ====================================
...@@ -65,11 +65,12 @@ struct netem_sched_data { ...@@ -65,11 +65,12 @@ struct netem_sched_data {
u32 jitter; u32 jitter;
u32 duplicate; u32 duplicate;
u32 reorder; u32 reorder;
u32 corrupt;
struct crndstate { struct crndstate {
unsigned long last; unsigned long last;
unsigned long rho; unsigned long rho;
} delay_cor, loss_cor, dup_cor, reorder_cor; } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
struct disttable { struct disttable {
u32 size; u32 size;
...@@ -183,6 +184,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -183,6 +184,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->duplicate = dupsave; q->duplicate = dupsave;
} }
/*
* Randomized packet corruption.
* Make copy if needed since we are modifying
* If packet is going to be hardware checksummed, then
* do it now in software before we mangle it.
*/
if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
if (!(skb = skb_unshare(skb, GFP_ATOMIC))
|| (skb->ip_summed == CHECKSUM_HW
&& skb_checksum_help(skb, 0))) {
sch->qstats.drops++;
return NET_XMIT_DROP;
}
skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
}
if (q->gap == 0 /* not doing reordering */ if (q->gap == 0 /* not doing reordering */
|| q->counter < q->gap /* inside last reordering gap */ || q->counter < q->gap /* inside last reordering gap */
|| q->reorder < get_crandom(&q->reorder_cor)) { || q->reorder < get_crandom(&q->reorder_cor)) {
...@@ -382,6 +400,20 @@ static int get_reorder(struct Qdisc *sch, const struct rtattr *attr) ...@@ -382,6 +400,20 @@ static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
return 0; return 0;
} }
static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr)
{
struct netem_sched_data *q = qdisc_priv(sch);
const struct tc_netem_corrupt *r = RTA_DATA(attr);
if (RTA_PAYLOAD(attr) != sizeof(*r))
return -EINVAL;
q->corrupt = r->probability;
init_crandom(&q->corrupt_cor, r->correlation);
return 0;
}
/* Parse netlink message to set options */
static int netem_change(struct Qdisc *sch, struct rtattr *opt) static int netem_change(struct Qdisc *sch, struct rtattr *opt)
{ {
struct netem_sched_data *q = qdisc_priv(sch); struct netem_sched_data *q = qdisc_priv(sch);
...@@ -432,13 +464,19 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt) ...@@ -432,13 +464,19 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
if (ret) if (ret)
return ret; return ret;
} }
if (tb[TCA_NETEM_REORDER-1]) { if (tb[TCA_NETEM_REORDER-1]) {
ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]); ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
if (ret) if (ret)
return ret; return ret;
} }
}
if (tb[TCA_NETEM_CORRUPT-1]) {
ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT-1]);
if (ret)
return ret;
}
}
return 0; return 0;
} }
...@@ -564,6 +602,7 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -564,6 +602,7 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
struct tc_netem_qopt qopt; struct tc_netem_qopt qopt;
struct tc_netem_corr cor; struct tc_netem_corr cor;
struct tc_netem_reorder reorder; struct tc_netem_reorder reorder;
struct tc_netem_corrupt corrupt;
qopt.latency = q->latency; qopt.latency = q->latency;
qopt.jitter = q->jitter; qopt.jitter = q->jitter;
...@@ -582,6 +621,10 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -582,6 +621,10 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
reorder.correlation = q->reorder_cor.rho; reorder.correlation = q->reorder_cor.rho;
RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder); RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
corrupt.probability = q->corrupt;
corrupt.correlation = q->corrupt_cor.rho;
RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
rta->rta_len = skb->tail - b; rta->rta_len = skb->tail - b;
return skb->len; return skb->len;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment