Commit e3703b3d authored by Thomas Graf's avatar Thomas Graf Committed by David S. Miller

[RTNETLINK]: Add rtnl_put_cacheinfo() to unify some code

IPv4, IPv6, and DECNet all use struct rta_cacheinfo in a similiar
way, therefore rtnl_put_cacheinfo() is added to reuse code.
Signed-off-by: default avatarThomas Graf <tgraf@suug.ch>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4e9b8269
...@@ -585,6 +585,9 @@ extern int rtnl_notify(struct sk_buff *skb, u32 pid, u32 group, ...@@ -585,6 +585,9 @@ extern int rtnl_notify(struct sk_buff *skb, u32 pid, u32 group,
struct nlmsghdr *nlh, gfp_t flags); struct nlmsghdr *nlh, gfp_t flags);
extern void rtnl_set_sk_err(u32 group, int error); extern void rtnl_set_sk_err(u32 group, int error);
extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics); extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics);
extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
u32 id, u32 ts, u32 tsage, long expires,
u32 error);
extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data); extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
......
...@@ -212,6 +212,26 @@ nla_put_failure: ...@@ -212,6 +212,26 @@ nla_put_failure:
return nla_nest_cancel(skb, mx); return nla_nest_cancel(skb, mx);
} }
int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
u32 ts, u32 tsage, long expires, u32 error)
{
struct rta_cacheinfo ci = {
.rta_lastuse = jiffies_to_clock_t(jiffies - dst->lastuse),
.rta_used = dst->__use,
.rta_clntref = atomic_read(&(dst->__refcnt)),
.rta_error = error,
.rta_id = id,
.rta_ts = ts,
.rta_tsage = tsage,
};
if (expires)
ci.rta_expires = jiffies_to_clock_t(expires);
return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
}
EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
static void set_operstate(struct net_device *dev, unsigned char transition) static void set_operstate(struct net_device *dev, unsigned char transition)
{ {
......
...@@ -1469,7 +1469,7 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, ...@@ -1469,7 +1469,7 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
struct rtmsg *r; struct rtmsg *r;
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
unsigned char *b = skb->tail; unsigned char *b = skb->tail;
struct rta_cacheinfo ci; long expires;
nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags); nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags);
r = NLMSG_DATA(nlh); r = NLMSG_DATA(nlh);
...@@ -1502,16 +1502,10 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, ...@@ -1502,16 +1502,10 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway); RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway);
if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
goto rtattr_failure; goto rtattr_failure;
ci.rta_lastuse = jiffies_to_clock_t(jiffies - rt->u.dst.lastuse); expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
ci.rta_used = rt->u.dst.__use; if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0, expires,
ci.rta_clntref = atomic_read(&rt->u.dst.__refcnt); rt->u.dst.error) < 0)
if (rt->u.dst.expires) goto rtattr_failure;
ci.rta_expires = jiffies_to_clock_t(rt->u.dst.expires - jiffies);
else
ci.rta_expires = 0;
ci.rta_error = rt->u.dst.error;
ci.rta_id = ci.rta_ts = ci.rta_tsage = 0;
RTA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci);
if (rt->fl.iif) if (rt->fl.iif)
RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif); RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
......
...@@ -2629,7 +2629,8 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, ...@@ -2629,7 +2629,8 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
struct rtable *rt = (struct rtable*)skb->dst; struct rtable *rt = (struct rtable*)skb->dst;
struct rtmsg *r; struct rtmsg *r;
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
struct rta_cacheinfo ci; long expires;
u32 id = 0, ts = 0, tsage = 0, error;
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags); nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
if (nlh == NULL) if (nlh == NULL)
...@@ -2676,20 +2677,13 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, ...@@ -2676,20 +2677,13 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
goto nla_put_failure; goto nla_put_failure;
ci.rta_lastuse = jiffies_to_clock_t(jiffies - rt->u.dst.lastuse); error = rt->u.dst.error;
ci.rta_used = rt->u.dst.__use; expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
ci.rta_clntref = atomic_read(&rt->u.dst.__refcnt);
if (rt->u.dst.expires)
ci.rta_expires = jiffies_to_clock_t(rt->u.dst.expires - jiffies);
else
ci.rta_expires = 0;
ci.rta_error = rt->u.dst.error;
ci.rta_id = ci.rta_ts = ci.rta_tsage = 0;
if (rt->peer) { if (rt->peer) {
ci.rta_id = rt->peer->ip_id_count; id = rt->peer->ip_id_count;
if (rt->peer->tcp_ts_stamp) { if (rt->peer->tcp_ts_stamp) {
ci.rta_ts = rt->peer->tcp_ts; ts = rt->peer->tcp_ts;
ci.rta_tsage = xtime.tv_sec - rt->peer->tcp_ts_stamp; tsage = xtime.tv_sec - rt->peer->tcp_ts_stamp;
} }
} }
...@@ -2708,7 +2702,7 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, ...@@ -2708,7 +2702,7 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
} else { } else {
if (err == -EMSGSIZE) if (err == -EMSGSIZE)
goto nla_put_failure; goto nla_put_failure;
ci.rta_error = err; error = err;
} }
} }
} else } else
...@@ -2716,7 +2710,9 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, ...@@ -2716,7 +2710,9 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif); NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
} }
NLA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci); if (rtnl_put_cacheinfo(skb, &rt->u.dst, id, ts, tsage,
expires, error) < 0)
goto nla_put_failure;
return nlmsg_end(skb, nlh); return nlmsg_end(skb, nlh);
......
...@@ -2027,7 +2027,7 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt, ...@@ -2027,7 +2027,7 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
{ {
struct rtmsg *rtm; struct rtmsg *rtm;
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
struct rta_cacheinfo ci; long expires;
u32 table; u32 table;
if (prefix) { /* user wants prefix routes only */ if (prefix) { /* user wants prefix routes only */
...@@ -2101,18 +2101,11 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt, ...@@ -2101,18 +2101,11 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex); NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric); NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
ci.rta_lastuse = jiffies_to_clock_t(jiffies - rt->u.dst.lastuse);
if (rt->rt6i_expires) expires = rt->rt6i_expires ? rt->rt6i_expires - jiffies : 0;
ci.rta_expires = jiffies_to_clock_t(rt->rt6i_expires - jiffies); if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0,
else expires, rt->u.dst.error) < 0)
ci.rta_expires = 0; goto nla_put_failure;
ci.rta_used = rt->u.dst.__use;
ci.rta_clntref = atomic_read(&rt->u.dst.__refcnt);
ci.rta_error = rt->u.dst.error;
ci.rta_id = 0;
ci.rta_ts = 0;
ci.rta_tsage = 0;
NLA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci);
return nlmsg_end(skb, nlh); return nlmsg_end(skb, nlh);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment