mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-07 14:32:23 +00:00
pkt_sched: fq: qdisc dismantle fixes
fq_reset() should drops all packets in queue, including throttled flows. This patch moves code from fq_destroy() to fq_reset() to do the cleaning. fq_change() must stop calling fq_dequeue() if all remaining packets are from throttled flows. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
b86783587b
commit
8d34ce10c5
@ -285,7 +285,7 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
|
|||||||
|
|
||||||
|
|
||||||
/* remove one skb from head of flow queue */
|
/* remove one skb from head of flow queue */
|
||||||
static struct sk_buff *fq_dequeue_head(struct fq_flow *flow)
|
static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb = flow->head;
|
struct sk_buff *skb = flow->head;
|
||||||
|
|
||||||
@ -293,6 +293,8 @@ static struct sk_buff *fq_dequeue_head(struct fq_flow *flow)
|
|||||||
flow->head = skb->next;
|
flow->head = skb->next;
|
||||||
skb->next = NULL;
|
skb->next = NULL;
|
||||||
flow->qlen--;
|
flow->qlen--;
|
||||||
|
sch->qstats.backlog -= qdisc_pkt_len(skb);
|
||||||
|
sch->q.qlen--;
|
||||||
}
|
}
|
||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
@ -419,7 +421,7 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch)
|
|||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
struct fq_flow *f;
|
struct fq_flow *f;
|
||||||
|
|
||||||
skb = fq_dequeue_head(&q->internal);
|
skb = fq_dequeue_head(sch, &q->internal);
|
||||||
if (skb)
|
if (skb)
|
||||||
goto out;
|
goto out;
|
||||||
fq_check_throttled(q, now);
|
fq_check_throttled(q, now);
|
||||||
@ -449,7 +451,7 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch)
|
|||||||
goto begin;
|
goto begin;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb = fq_dequeue_head(f);
|
skb = fq_dequeue_head(sch, f);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
head->first = f->next;
|
head->first = f->next;
|
||||||
/* force a pass through old_flows to prevent starvation */
|
/* force a pass through old_flows to prevent starvation */
|
||||||
@ -490,19 +492,44 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
sch->qstats.backlog -= qdisc_pkt_len(skb);
|
|
||||||
qdisc_bstats_update(sch, skb);
|
qdisc_bstats_update(sch, skb);
|
||||||
sch->q.qlen--;
|
|
||||||
qdisc_unthrottled(sch);
|
qdisc_unthrottled(sch);
|
||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fq_reset(struct Qdisc *sch)
|
static void fq_reset(struct Qdisc *sch)
|
||||||
{
|
{
|
||||||
|
struct fq_sched_data *q = qdisc_priv(sch);
|
||||||
|
struct rb_root *root;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
struct rb_node *p;
|
||||||
|
struct fq_flow *f;
|
||||||
|
unsigned int idx;
|
||||||
|
|
||||||
while ((skb = fq_dequeue(sch)) != NULL)
|
while ((skb = fq_dequeue_head(sch, &q->internal)) != NULL)
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
|
|
||||||
|
if (!q->fq_root)
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
|
||||||
|
root = &q->fq_root[idx];
|
||||||
|
while ((p = rb_first(root)) != NULL) {
|
||||||
|
f = container_of(p, struct fq_flow, fq_node);
|
||||||
|
rb_erase(p, root);
|
||||||
|
|
||||||
|
while ((skb = fq_dequeue_head(sch, f)) != NULL)
|
||||||
|
kfree_skb(skb);
|
||||||
|
|
||||||
|
kmem_cache_free(fq_flow_cachep, f);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
q->new_flows.first = NULL;
|
||||||
|
q->old_flows.first = NULL;
|
||||||
|
q->delayed = RB_ROOT;
|
||||||
|
q->flows = 0;
|
||||||
|
q->inactive_flows = 0;
|
||||||
|
q->throttled_flows = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fq_rehash(struct fq_sched_data *q,
|
static void fq_rehash(struct fq_sched_data *q,
|
||||||
@ -645,6 +672,8 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
|
|||||||
while (sch->q.qlen > sch->limit) {
|
while (sch->q.qlen > sch->limit) {
|
||||||
struct sk_buff *skb = fq_dequeue(sch);
|
struct sk_buff *skb = fq_dequeue(sch);
|
||||||
|
|
||||||
|
if (!skb)
|
||||||
|
break;
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
drop_count++;
|
drop_count++;
|
||||||
}
|
}
|
||||||
@ -657,21 +686,9 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
|
|||||||
static void fq_destroy(struct Qdisc *sch)
|
static void fq_destroy(struct Qdisc *sch)
|
||||||
{
|
{
|
||||||
struct fq_sched_data *q = qdisc_priv(sch);
|
struct fq_sched_data *q = qdisc_priv(sch);
|
||||||
struct rb_root *root;
|
|
||||||
struct rb_node *p;
|
|
||||||
unsigned int idx;
|
|
||||||
|
|
||||||
if (q->fq_root) {
|
fq_reset(sch);
|
||||||
for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
|
kfree(q->fq_root);
|
||||||
root = &q->fq_root[idx];
|
|
||||||
while ((p = rb_first(root)) != NULL) {
|
|
||||||
rb_erase(p, root);
|
|
||||||
kmem_cache_free(fq_flow_cachep,
|
|
||||||
container_of(p, struct fq_flow, fq_node));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
kfree(q->fq_root);
|
|
||||||
}
|
|
||||||
qdisc_watchdog_cancel(&q->watchdog);
|
qdisc_watchdog_cancel(&q->watchdog);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user