Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci-2.6.23
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci-2.6.23
Commits
60279944
Commit
60279944
authored
Nov 17, 2005
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
parents
d0fa7e9f
4a59a810
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
31 additions
and
52 deletions
+31
-52
net/ipv4/ip_fragment.c
net/ipv4/ip_fragment.c
+14
-26
net/ipv6/reassembly.c
net/ipv6/reassembly.c
+16
-25
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_core.c
+1
-1
No files found.
net/ipv4/ip_fragment.c
View file @
60279944
...
...
@@ -71,7 +71,7 @@ struct ipfrag_skb_cb
/* Describe an entry in the "incomplete datagrams" queue. */
struct
ipq
{
struct
ipq
*
next
;
/* linked list pointers */
struct
hlist_node
list
;
struct
list_head
lru_list
;
/* lru list member */
u32
user
;
u32
saddr
;
...
...
@@ -89,7 +89,6 @@ struct ipq {
spinlock_t
lock
;
atomic_t
refcnt
;
struct
timer_list
timer
;
/* when will this queue expire? */
struct
ipq
**
pprev
;
int
iif
;
struct
timeval
stamp
;
};
...
...
@@ -99,7 +98,7 @@ struct ipq {
#define IPQ_HASHSZ 64
/* Per-bucket lock is easy to add now. */
static
struct
ipq
*
ipq_hash
[
IPQ_HASHSZ
];
static
struct
hlist_head
ipq_hash
[
IPQ_HASHSZ
];
static
DEFINE_RWLOCK
(
ipfrag_lock
);
static
u32
ipfrag_hash_rnd
;
static
LIST_HEAD
(
ipq_lru_list
);
...
...
@@ -107,9 +106,7 @@ int ip_frag_nqueues = 0;
static
__inline__
void
__ipq_unlink
(
struct
ipq
*
qp
)
{
if
(
qp
->
next
)
qp
->
next
->
pprev
=
qp
->
pprev
;
*
qp
->
pprev
=
qp
->
next
;
hlist_del
(
&
qp
->
list
);
list_del
(
&
qp
->
lru_list
);
ip_frag_nqueues
--
;
}
...
...
@@ -139,27 +136,18 @@ static void ipfrag_secret_rebuild(unsigned long dummy)
get_random_bytes
(
&
ipfrag_hash_rnd
,
sizeof
(
u32
));
for
(
i
=
0
;
i
<
IPQ_HASHSZ
;
i
++
)
{
struct
ipq
*
q
;
struct
hlist_node
*
p
,
*
n
;
q
=
ipq_hash
[
i
];
while
(
q
)
{
struct
ipq
*
next
=
q
->
next
;
hlist_for_each_entry_safe
(
q
,
p
,
n
,
&
ipq_hash
[
i
],
list
)
{
unsigned
int
hval
=
ipqhashfn
(
q
->
id
,
q
->
saddr
,
q
->
daddr
,
q
->
protocol
);
if
(
hval
!=
i
)
{
/* Unlink. */
if
(
q
->
next
)
q
->
next
->
pprev
=
q
->
pprev
;
*
q
->
pprev
=
q
->
next
;
hlist_del
(
&
q
->
list
);
/* Relink to new hash chain. */
if
((
q
->
next
=
ipq_hash
[
hval
])
!=
NULL
)
q
->
next
->
pprev
=
&
q
->
next
;
ipq_hash
[
hval
]
=
q
;
q
->
pprev
=
&
ipq_hash
[
hval
];
hlist_add_head
(
&
q
->
list
,
&
ipq_hash
[
hval
]);
}
q
=
next
;
}
}
write_unlock
(
&
ipfrag_lock
);
...
...
@@ -310,14 +298,16 @@ out:
static
struct
ipq
*
ip_frag_intern
(
unsigned
int
hash
,
struct
ipq
*
qp_in
)
{
struct
ipq
*
qp
;
#ifdef CONFIG_SMP
struct
hlist_node
*
n
;
#endif
write_lock
(
&
ipfrag_lock
);
#ifdef CONFIG_SMP
/* With SMP race we have to recheck hash table, because
* such entry could be created on other cpu, while we
* promoted read lock to write lock.
*/
for
(
qp
=
ipq_hash
[
hash
];
qp
;
qp
=
qp
->
nex
t
)
{
hlist_for_each_entry
(
qp
,
n
,
&
ipq_hash
[
hash
],
lis
t
)
{
if
(
qp
->
id
==
qp_in
->
id
&&
qp
->
saddr
==
qp_in
->
saddr
&&
qp
->
daddr
==
qp_in
->
daddr
&&
...
...
@@ -337,10 +327,7 @@ static struct ipq *ip_frag_intern(unsigned int hash, struct ipq *qp_in)
atomic_inc
(
&
qp
->
refcnt
);
atomic_inc
(
&
qp
->
refcnt
);
if
((
qp
->
next
=
ipq_hash
[
hash
])
!=
NULL
)
qp
->
next
->
pprev
=
&
qp
->
next
;
ipq_hash
[
hash
]
=
qp
;
qp
->
pprev
=
&
ipq_hash
[
hash
];
hlist_add_head
(
&
qp
->
list
,
&
ipq_hash
[
hash
]);
INIT_LIST_HEAD
(
&
qp
->
lru_list
);
list_add_tail
(
&
qp
->
lru_list
,
&
ipq_lru_list
);
ip_frag_nqueues
++
;
...
...
@@ -392,9 +379,10 @@ static inline struct ipq *ip_find(struct iphdr *iph, u32 user)
__u8
protocol
=
iph
->
protocol
;
unsigned
int
hash
=
ipqhashfn
(
id
,
saddr
,
daddr
,
protocol
);
struct
ipq
*
qp
;
struct
hlist_node
*
n
;
read_lock
(
&
ipfrag_lock
);
for
(
qp
=
ipq_hash
[
hash
];
qp
;
qp
=
qp
->
nex
t
)
{
hlist_for_each_entry
(
qp
,
n
,
&
ipq_hash
[
hash
],
lis
t
)
{
if
(
qp
->
id
==
id
&&
qp
->
saddr
==
saddr
&&
qp
->
daddr
==
daddr
&&
...
...
net/ipv6/reassembly.c
View file @
60279944
...
...
@@ -74,7 +74,7 @@ struct ip6frag_skb_cb
struct
frag_queue
{
struct
frag_queue
*
nex
t
;
struct
hlist_node
lis
t
;
struct
list_head
lru_list
;
/* lru list member */
__u32
id
;
/* fragment id */
...
...
@@ -95,14 +95,13 @@ struct frag_queue
#define FIRST_IN 2
#define LAST_IN 1
__u16
nhoffset
;
struct
frag_queue
**
pprev
;
};
/* Hash table. */
#define IP6Q_HASHSZ 64
static
struct
frag_queue
*
ip6_frag_hash
[
IP6Q_HASHSZ
];
static
struct
hlist_head
ip6_frag_hash
[
IP6Q_HASHSZ
];
static
DEFINE_RWLOCK
(
ip6_frag_lock
);
static
u32
ip6_frag_hash_rnd
;
static
LIST_HEAD
(
ip6_frag_lru_list
);
...
...
@@ -110,9 +109,7 @@ int ip6_frag_nqueues = 0;
static
__inline__
void
__fq_unlink
(
struct
frag_queue
*
fq
)
{
if
(
fq
->
next
)
fq
->
next
->
pprev
=
fq
->
pprev
;
*
fq
->
pprev
=
fq
->
next
;
hlist_del
(
&
fq
->
list
);
list_del
(
&
fq
->
lru_list
);
ip6_frag_nqueues
--
;
}
...
...
@@ -163,28 +160,21 @@ static void ip6_frag_secret_rebuild(unsigned long dummy)
get_random_bytes
(
&
ip6_frag_hash_rnd
,
sizeof
(
u32
));
for
(
i
=
0
;
i
<
IP6Q_HASHSZ
;
i
++
)
{
struct
frag_queue
*
q
;
struct
hlist_node
*
p
,
*
n
;
q
=
ip6_frag_hash
[
i
];
while
(
q
)
{
struct
frag_queue
*
next
=
q
->
next
;
hlist_for_each_entry_safe
(
q
,
p
,
n
,
&
ip6_frag_hash
[
i
],
list
)
{
unsigned
int
hval
=
ip6qhashfn
(
q
->
id
,
&
q
->
saddr
,
&
q
->
daddr
);
if
(
hval
!=
i
)
{
/* Unlink. */
if
(
q
->
next
)
q
->
next
->
pprev
=
q
->
pprev
;
*
q
->
pprev
=
q
->
next
;
hlist_del
(
&
q
->
list
);
/* Relink to new hash chain. */
if
((
q
->
next
=
ip6_frag_hash
[
hval
])
!=
NULL
)
q
->
next
->
pprev
=
&
q
->
next
;
ip6_frag_hash
[
hval
]
=
q
;
q
->
pprev
=
&
ip6_frag_hash
[
hval
];
}
hlist_add_head
(
&
q
->
list
,
&
ip6_frag_hash
[
hval
]);
q
=
next
;
}
}
}
write_unlock
(
&
ip6_frag_lock
);
...
...
@@ -337,10 +327,13 @@ static struct frag_queue *ip6_frag_intern(unsigned int hash,
struct
frag_queue
*
fq_in
)
{
struct
frag_queue
*
fq
;
#ifdef CONFIG_SMP
struct
hlist_node
*
n
;
#endif
write_lock
(
&
ip6_frag_lock
);
#ifdef CONFIG_SMP
for
(
fq
=
ip6_frag_hash
[
hash
];
fq
;
fq
=
fq
->
nex
t
)
{
hlist_for_each_entry
(
fq
,
n
,
&
ip6_frag_hash
[
hash
],
lis
t
)
{
if
(
fq
->
id
==
fq_in
->
id
&&
ipv6_addr_equal
(
&
fq_in
->
saddr
,
&
fq
->
saddr
)
&&
ipv6_addr_equal
(
&
fq_in
->
daddr
,
&
fq
->
daddr
))
{
...
...
@@ -358,10 +351,7 @@ static struct frag_queue *ip6_frag_intern(unsigned int hash,
atomic_inc
(
&
fq
->
refcnt
);
atomic_inc
(
&
fq
->
refcnt
);
if
((
fq
->
next
=
ip6_frag_hash
[
hash
])
!=
NULL
)
fq
->
next
->
pprev
=
&
fq
->
next
;
ip6_frag_hash
[
hash
]
=
fq
;
fq
->
pprev
=
&
ip6_frag_hash
[
hash
];
hlist_add_head
(
&
fq
->
list
,
&
ip6_frag_hash
[
hash
]);
INIT_LIST_HEAD
(
&
fq
->
lru_list
);
list_add_tail
(
&
fq
->
lru_list
,
&
ip6_frag_lru_list
);
ip6_frag_nqueues
++
;
...
...
@@ -401,10 +391,11 @@ static __inline__ struct frag_queue *
fq_find
(
u32
id
,
struct
in6_addr
*
src
,
struct
in6_addr
*
dst
)
{
struct
frag_queue
*
fq
;
struct
hlist_node
*
n
;
unsigned
int
hash
=
ip6qhashfn
(
id
,
src
,
dst
);
read_lock
(
&
ip6_frag_lock
);
for
(
fq
=
ip6_frag_hash
[
hash
];
fq
;
fq
=
fq
->
nex
t
)
{
hlist_for_each_entry
(
fq
,
n
,
&
ip6_frag_hash
[
hash
],
lis
t
)
{
if
(
fq
->
id
==
id
&&
ipv6_addr_equal
(
src
,
&
fq
->
saddr
)
&&
ipv6_addr_equal
(
dst
,
&
fq
->
daddr
))
{
...
...
net/netfilter/nf_conntrack_core.c
View file @
60279944
...
...
@@ -387,7 +387,7 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
static
void
nf_ct_unlink_expect
(
struct
nf_conntrack_expect
*
exp
)
{
ASSERT_WRITE_LOCK
(
&
nf_conntrack_lock
);
NF_CT_ASSERT
(
!
timer_pending
(
&
exp
_
timeout
));
NF_CT_ASSERT
(
!
timer_pending
(
&
exp
->
timeout
));
list_del
(
&
exp
->
list
);
NF_CT_STAT_INC
(
expect_delete
);
exp
->
master
->
expecting
--
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment