Commit d6c8f6aa authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

parents d540c742 0dec456d
......@@ -427,6 +427,23 @@ icmp_ignore_bogus_error_responses - BOOLEAN
will avoid log file clutter.
Default: FALSE
icmp_errors_use_inbound_ifaddr - BOOLEAN
If zero, icmp error messages are sent with the primary address of
the exiting interface.
If non-zero, the message will be sent with the primary address of
the interface that received the packet that caused the icmp error.
This is the behaviour network many administrators will expect from
a router. And it can make debugging complicated network layouts
much easier.
Note that if no primary address exists for the interface selected,
then the primary address of the first non-loopback interface that
has one will be used regarldess of this setting.
Default: 0
igmp_max_memberships - INTEGER
Change the maximum number of multicast groups we can subscribe to.
Default: 20
......
......@@ -69,8 +69,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "3.48"
#define DRV_MODULE_RELDATE "Jan 16, 2006"
#define DRV_MODULE_VERSION "3.49"
#define DRV_MODULE_RELDATE "Feb 2, 2006"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
......@@ -3482,6 +3482,17 @@ static void tg3_reset_task(void *_data)
struct tg3 *tp = _data;
unsigned int restart_timer;
tg3_full_lock(tp, 0);
tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
if (!netif_running(tp->dev)) {
tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
tg3_full_unlock(tp);
return;
}
tg3_full_unlock(tp);
tg3_netif_stop(tp);
tg3_full_lock(tp, 1);
......@@ -3494,10 +3505,12 @@ static void tg3_reset_task(void *_data)
tg3_netif_start(tp);
tg3_full_unlock(tp);
if (restart_timer)
mod_timer(&tp->timer, jiffies + 1);
tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
tg3_full_unlock(tp);
}
static void tg3_tx_timeout(struct net_device *dev)
......@@ -6786,6 +6799,13 @@ static int tg3_close(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
/* Calling flush_scheduled_work() may deadlock because
* linkwatch_event() may be on the workqueue and it will try to get
* the rtnl_lock which we are holding.
*/
while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
msleep(1);
netif_stop_queue(dev);
del_timer_sync(&tp->timer);
......@@ -10880,6 +10900,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
if (dev) {
struct tg3 *tp = netdev_priv(dev);
flush_scheduled_work();
unregister_netdev(dev);
if (tp->regs) {
iounmap(tp->regs);
......@@ -10901,6 +10922,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
if (!netif_running(dev))
return 0;
flush_scheduled_work();
tg3_netif_stop(tp);
del_timer_sync(&tp->timer);
......
......@@ -2162,6 +2162,7 @@ struct tg3 {
#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000
#define TG3_FLAG_10_100_ONLY 0x01000000
#define TG3_FLAG_PAUSE_AUTONEG 0x02000000
#define TG3_FLAG_IN_RESET_TASK 0x04000000
#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000
#define TG3_FLAG_GOT_SERDES_FLOWCTL 0x20000000
#define TG3_FLAG_SPLIT_MODE 0x40000000
......
......@@ -700,7 +700,7 @@ struct sctp_chunk {
__u8 ecn_ce_done; /* Have we processed the ECN CE bit? */
__u8 pdiscard; /* Discard the whole packet now? */
__u8 tsn_gap_acked; /* Is this chunk acked by a GAP ACK? */
__u8 fast_retransmit; /* Is this chunk fast retransmitted? */
__s8 fast_retransmit; /* Is this chunk fast retransmitted? */
__u8 tsn_missing_report; /* Data chunk missing counter. */
};
......
......@@ -1354,12 +1354,12 @@ extern int sock_get_timestamp(struct sock *, struct timeval __user *);
* Enable debug/info messages
*/
#if 0
#define NETDEBUG(fmt, args...) do { } while (0)
#define LIMIT_NETDEBUG(fmt, args...) do { } while(0)
#else
#ifdef CONFIG_NETDEBUG
#define NETDEBUG(fmt, args...) printk(fmt,##args)
#define LIMIT_NETDEBUG(fmt, args...) do { if (net_ratelimit()) printk(fmt,##args); } while(0)
#else
#define NETDEBUG(fmt, args...) do { } while (0)
#define LIMIT_NETDEBUG(fmt, args...) do { } while(0)
#endif
/*
......
......@@ -94,10 +94,28 @@ next: bs = bm->bad_shift[text[shift-i]];
return UINT_MAX;
}
static int subpattern(u8 *pattern, int i, int j, int g)
{
int x = i+g-1, y = j+g-1, ret = 0;
while(pattern[x--] == pattern[y--]) {
if (y < 0) {
ret = 1;
break;
}
if (--g == 0) {
ret = pattern[i-1] != pattern[j-1];
break;
}
}
return ret;
}
static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern,
unsigned int len)
{
int i, j, ended, l[ASIZE];
int i, j, g;
for (i = 0; i < ASIZE; i++)
bm->bad_shift[i] = len;
......@@ -106,23 +124,15 @@ static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern,
/* Compute the good shift array, used to match reocurrences
* of a subpattern */
for (i = 1; i < bm->patlen; i++) {
for (j = 0; j < bm->patlen && bm->pattern[bm->patlen - 1 - j]
== bm->pattern[bm->patlen - 1 - i - j]; j++);
l[i] = j;
}
bm->good_shift[0] = 1;
for (i = 1; i < bm->patlen; i++)
bm->good_shift[i] = bm->patlen;
for (i = bm->patlen - 1; i > 0; i--)
bm->good_shift[l[i]] = i;
ended = 0;
for (i = 0; i < bm->patlen; i++) {
if (l[i] == bm->patlen - 1 - i)
ended = i;
if (ended)
bm->good_shift[i] = ended;
for (i = bm->patlen-1, g = 1; i > 0; g++, i--) {
for (j = i-1; j >= 1-g ; j--)
if (subpattern(bm->pattern, i, j, g)) {
bm->good_shift[g] = bm->patlen-j-g;
break;
}
}
}
......
......@@ -59,8 +59,10 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev,
proto = find_snap_client(skb->h.raw);
if (proto) {
/* Pass the frame on. */
u8 *hdr = skb->data;
skb->h.raw += 5;
skb_pull(skb, 5);
skb_postpull_rcsum(skb, hdr, 5);
rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev);
} else {
skb->sk = NULL;
......
......@@ -27,6 +27,13 @@ if NET
menu "Networking options"
config NETDEBUG
bool "Network packet debugging"
help
You can say Y here if you want to get additional messages useful in
debugging bad packets, but can overwhelm logs under denial of service
attacks.
source "net/packet/Kconfig"
source "net/unix/Kconfig"
source "net/xfrm/Kconfig"
......
......@@ -385,7 +385,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
u32 daddr;
if (ip_options_echo(&icmp_param->replyopts, skb))
goto out;
return;
if (icmp_xmit_lock())
return;
......@@ -416,7 +416,6 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
ip_rt_put(rt);
out_unlock:
icmp_xmit_unlock();
out:;
}
......
......@@ -228,7 +228,7 @@ static void wrandom_set_nhinfo(__u32 network,
struct multipath_dest *d, *target_dest = NULL;
/* store the weight information for a certain route */
spin_lock(&state[state_idx].lock);
spin_lock_bh(&state[state_idx].lock);
/* find state entry for gateway or add one if necessary */
list_for_each_entry_rcu(r, &state[state_idx].head, list) {
......@@ -276,7 +276,7 @@ static void wrandom_set_nhinfo(__u32 network,
* we are finished
*/
spin_unlock(&state[state_idx].lock);
spin_unlock_bh(&state[state_idx].lock);
}
static void __multipath_free(struct rcu_head *head)
......@@ -302,7 +302,7 @@ static void wrandom_flush(void)
for (i = 0; i < MULTIPATH_STATE_SIZE; ++i) {
struct multipath_route *r;
spin_lock(&state[i].lock);
spin_lock_bh(&state[i].lock);
list_for_each_entry_rcu(r, &state[i].head, list) {
struct multipath_dest *d;
list_for_each_entry_rcu(d, &r->dests, list) {
......@@ -315,7 +315,7 @@ static void wrandom_flush(void)
__multipath_free);
}
spin_unlock(&state[i].lock);
spin_unlock_bh(&state[i].lock);
}
}
......
......@@ -3321,9 +3321,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
switch (event) {
case RTM_NEWADDR:
dst_hold(&ifp->rt->u.dst);
if (ip6_ins_rt(ifp->rt, NULL, NULL, NULL))
dst_release(&ifp->rt->u.dst);
ip6_ins_rt(ifp->rt, NULL, NULL, NULL);
if (ifp->idev->cnf.forwarding)
addrconf_join_anycast(ifp);
break;
......@@ -3334,8 +3332,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
dst_hold(&ifp->rt->u.dst);
if (ip6_del_rt(ifp->rt, NULL, NULL, NULL))
dst_free(&ifp->rt->u.dst);
else
dst_release(&ifp->rt->u.dst);
break;
}
}
......
......@@ -369,12 +369,6 @@ int inet6_destroy_sock(struct sock *sk)
struct sk_buff *skb;
struct ipv6_txoptions *opt;
/*
* Release destination entry
*/
sk_dst_reset(sk);
/* Release rx options */
if ((skb = xchg(&np->pktoptions, NULL)) != NULL)
......
......@@ -608,7 +608,7 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
* When a Fast Retransmit is being performed the sender SHOULD
* ignore the value of cwnd and SHOULD NOT delay retransmission.
*/
if (!chunk->fast_retransmit)
if (chunk->fast_retransmit <= 0)
if (transport->flight_size >= transport->cwnd) {
retval = SCTP_XMIT_RWND_FULL;
goto finish;
......
......@@ -406,7 +406,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
* chunks that are not yet acked should be added to the
* retransmit queue.
*/
if ((fast_retransmit && chunk->fast_retransmit) ||
if ((fast_retransmit && (chunk->fast_retransmit > 0)) ||
(!fast_retransmit && !chunk->tsn_gap_acked)) {
/* RFC 2960 6.2.1 Processing a Received SACK
*
......@@ -603,7 +603,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
/* Mark the chunk as ineligible for fast retransmit
* after it is retransmitted.
*/
chunk->fast_retransmit = 0;
if (chunk->fast_retransmit > 0)
chunk->fast_retransmit = -1;
*start_timer = 1;
q->empty = 0;
......@@ -621,7 +622,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
list_for_each(lchunk1, lqueue) {
chunk1 = list_entry(lchunk1, struct sctp_chunk,
transmitted_list);
chunk1->fast_retransmit = 0;
if (chunk1->fast_retransmit > 0)
chunk1->fast_retransmit = -1;
}
}
}
......@@ -1562,11 +1564,11 @@ static void sctp_mark_missing(struct sctp_outq *q,
/*
* M4) If any DATA chunk is found to have a
* 'TSN.Missing.Report'
* value larger than or equal to 4, mark that chunk for
* value larger than or equal to 3, mark that chunk for
* retransmission and start the fast retransmit procedure.
*/
if (chunk->tsn_missing_report >= 4) {
if (chunk->tsn_missing_report >= 3) {
chunk->fast_retransmit = 1;
do_fast_retransmit = 1;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment