mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-11 16:29:05 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: netfilter: xt_connbytes: handle negation correctly net: relax rcvbuf limits rps: fix insufficient bounds checking in store_rps_dev_flow_table_cnt() net: introduce DST_NOPEER dst flag mqprio: Avoid panic if no options are provided bridge: provide a mtu() method for fake_dst_ops
This commit is contained in:
commit
155d4551bd
@ -53,6 +53,7 @@ struct dst_entry {
|
||||
#define DST_NOHASH 0x0008
|
||||
#define DST_NOCACHE 0x0010
|
||||
#define DST_NOCOUNT 0x0020
|
||||
#define DST_NOPEER 0x0040
|
||||
|
||||
short error;
|
||||
short obsolete;
|
||||
|
@ -637,12 +637,14 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
/*
|
||||
* Take into account size of receive queue and backlog queue
|
||||
* Do not take into account this skb truesize,
|
||||
* to allow even a single big packet to come.
|
||||
*/
|
||||
static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
|
||||
{
|
||||
unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
|
||||
|
||||
return qsize + skb->truesize > sk->sk_rcvbuf;
|
||||
return qsize > sk->sk_rcvbuf;
|
||||
}
|
||||
|
||||
/* The per-socket spinlock must be held here. */
|
||||
|
@ -114,12 +114,18 @@ static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const vo
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static unsigned int fake_mtu(const struct dst_entry *dst)
|
||||
{
|
||||
return dst->dev->mtu;
|
||||
}
|
||||
|
||||
static struct dst_ops fake_dst_ops = {
|
||||
.family = AF_INET,
|
||||
.protocol = cpu_to_be16(ETH_P_IP),
|
||||
.update_pmtu = fake_update_pmtu,
|
||||
.cow_metrics = fake_cow_metrics,
|
||||
.neigh_lookup = fake_neigh_lookup,
|
||||
.mtu = fake_mtu,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -141,7 +147,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
|
||||
rt->dst.dev = br->dev;
|
||||
rt->dst.path = &rt->dst;
|
||||
dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
|
||||
rt->dst.flags = DST_NOXFRM;
|
||||
rt->dst.flags = DST_NOXFRM | DST_NOPEER;
|
||||
rt->dst.ops = &fake_dst_ops;
|
||||
}
|
||||
|
||||
|
@ -665,11 +665,14 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
|
||||
if (count) {
|
||||
int i;
|
||||
|
||||
if (count > 1<<30) {
|
||||
if (count > INT_MAX)
|
||||
return -EINVAL;
|
||||
count = roundup_pow_of_two(count);
|
||||
if (count > (ULONG_MAX - sizeof(struct rps_dev_flow_table))
|
||||
/ sizeof(struct rps_dev_flow)) {
|
||||
/* Enforce a limit to prevent overflow */
|
||||
return -EINVAL;
|
||||
}
|
||||
count = roundup_pow_of_two(count);
|
||||
table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
|
||||
if (!table)
|
||||
return -ENOMEM;
|
||||
|
@ -288,11 +288,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||
unsigned long flags;
|
||||
struct sk_buff_head *list = &sk->sk_receive_queue;
|
||||
|
||||
/* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
|
||||
number of warnings when compiling with -W --ANK
|
||||
*/
|
||||
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
|
||||
(unsigned)sk->sk_rcvbuf) {
|
||||
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
trace_sock_rcvqueue_full(sk, skb);
|
||||
return -ENOMEM;
|
||||
|
@ -1367,7 +1367,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
|
||||
{
|
||||
struct rtable *rt = (struct rtable *) dst;
|
||||
|
||||
if (rt) {
|
||||
if (rt && !(rt->dst.flags & DST_NOPEER)) {
|
||||
if (rt->peer == NULL)
|
||||
rt_bind_peer(rt, rt->rt_dst, 1);
|
||||
|
||||
@ -1378,7 +1378,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
|
||||
iph->id = htons(inet_getid(rt->peer, more));
|
||||
return;
|
||||
}
|
||||
} else
|
||||
} else if (!rt)
|
||||
printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
|
||||
__builtin_return_address(0));
|
||||
|
||||
|
@ -603,7 +603,7 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
|
||||
static atomic_t ipv6_fragmentation_id;
|
||||
int old, new;
|
||||
|
||||
if (rt) {
|
||||
if (rt && !(rt->dst.flags & DST_NOPEER)) {
|
||||
struct inet_peer *peer;
|
||||
|
||||
if (!rt->rt6i_peer)
|
||||
|
@ -87,10 +87,10 @@ connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
break;
|
||||
}
|
||||
|
||||
if (sinfo->count.to)
|
||||
if (sinfo->count.to >= sinfo->count.from)
|
||||
return what <= sinfo->count.to && what >= sinfo->count.from;
|
||||
else
|
||||
return what >= sinfo->count.from;
|
||||
else /* inverted */
|
||||
return what < sinfo->count.to || what > sinfo->count.from;
|
||||
}
|
||||
|
||||
static int connbytes_mt_check(const struct xt_mtchk_param *par)
|
||||
|
@ -1630,8 +1630,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
if (snaplen > res)
|
||||
snaplen = res;
|
||||
|
||||
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
|
||||
(unsigned)sk->sk_rcvbuf)
|
||||
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
|
||||
goto drop_n_acct;
|
||||
|
||||
if (skb_shared(skb)) {
|
||||
@ -1762,8 +1761,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
if (po->tp_version <= TPACKET_V2) {
|
||||
if (macoff + snaplen > po->rx_ring.frame_size) {
|
||||
if (po->copy_thresh &&
|
||||
atomic_read(&sk->sk_rmem_alloc) + skb->truesize
|
||||
< (unsigned)sk->sk_rcvbuf) {
|
||||
atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
|
||||
if (skb_shared(skb)) {
|
||||
copy_skb = skb_clone(skb, GFP_ATOMIC);
|
||||
} else {
|
||||
|
@ -107,7 +107,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
|
||||
if (!netif_is_multiqueue(dev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (nla_len(opt) < sizeof(*qopt))
|
||||
if (!opt || nla_len(opt) < sizeof(*qopt))
|
||||
return -EINVAL;
|
||||
|
||||
qopt = nla_data(opt);
|
||||
|
Loading…
x
Reference in New Issue
Block a user