diff options
| author | Cong Wang <xiyou.wangcong@gmail.com> | 2025-05-06 21:35:58 -0700 | 
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2025-05-09 12:34:38 +0100 | 
| commit | 2d3cbfd6d54a2c39ce3244f33f85c595844bd7b8 (patch) | |
| tree | 200ad600bde2b564d2f488fa49534cd6fea7f403 | |
| parent | 6b3ab7f2cbfaeb6580709cd8ef4d72cfd01bfde4 (diff) | |
net_sched: Flush gso_skb list too during ->change()
Previously, when reducing a qdisc's limit via the ->change() operation, only
the main skb queue was trimmed, potentially leaving packets in the gso_skb
list. This could result in NULL pointer dereference when we only check
sch->limit against sch->q.qlen.
This patch introduces a new helper, qdisc_dequeue_internal(), which ensures
both the gso_skb list and the main queue are properly flushed when trimming
excess packets. All relevant qdiscs (codel, fq, fq_codel, fq_pie, hhf, pie)
are updated to use this helper in their ->change() routines.
Fixes: 76e3cc126bb2 ("codel: Controlled Delay AQM")
Fixes: 4b549a2ef4be ("fq_codel: Fair Queue Codel AQM")
Fixes: afe4fd062416 ("pkt_sched: fq: Fair Queue packet scheduler")
Fixes: ec97ecf1ebe4 ("net: sched: add Flow Queue PIE packet scheduler")
Fixes: 10239edf86f1 ("net-qdisc-hhf: Heavy-Hitter Filter (HHF) qdisc")
Fixes: d4b36210c2e6 ("net: pkt_sched: PIE AQM scheme")
Reported-by: Will <willsroot@protonmail.com>
Reported-by: Savy <savy@syst3mfailure.io>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| -rw-r--r-- | include/net/sch_generic.h | 15 | ||||
| -rw-r--r-- | net/sched/sch_codel.c | 2 | ||||
| -rw-r--r-- | net/sched/sch_fq.c | 2 | ||||
| -rw-r--r-- | net/sched/sch_fq_codel.c | 2 | ||||
| -rw-r--r-- | net/sched/sch_fq_pie.c | 2 | ||||
| -rw-r--r-- | net/sched/sch_hhf.c | 2 | ||||
| -rw-r--r-- | net/sched/sch_pie.c | 2 | 
7 files changed, 21 insertions, 6 deletions
| diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index d48c657191cd..1c05fed05f2b 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -1031,6 +1031,21 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)  	return skb;  } +static inline struct sk_buff *qdisc_dequeue_internal(struct Qdisc *sch, bool direct) +{ +	struct sk_buff *skb; + +	skb = __skb_dequeue(&sch->gso_skb); +	if (skb) { +		sch->q.qlen--; +		return skb; +	} +	if (direct) +		return __qdisc_dequeue_head(&sch->q); +	else +		return sch->dequeue(sch); +} +  static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)  {  	struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c index 12dd71139da3..c93761040c6e 100644 --- a/net/sched/sch_codel.c +++ b/net/sched/sch_codel.c @@ -144,7 +144,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt,  	qlen = sch->q.qlen;  	while (sch->q.qlen > sch->limit) { -		struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); +		struct sk_buff *skb = qdisc_dequeue_internal(sch, true);  		dropped += qdisc_pkt_len(skb);  		qdisc_qstats_backlog_dec(sch, skb); diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 2ca5332cfcc5..902ff5470607 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -1136,7 +1136,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,  		sch_tree_lock(sch);  	}  	while (sch->q.qlen > sch->limit) { -		struct sk_buff *skb = fq_dequeue(sch); +		struct sk_buff *skb = qdisc_dequeue_internal(sch, false);  		if (!skb)  			break; diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 6c9029f71e88..2a0f3a513bfa 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -441,7 +441,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,  	while (sch->q.qlen > sch->limit ||  	       q->memory_usage > q->memory_limit) { -		struct sk_buff *skb = fq_codel_dequeue(sch); +		struct sk_buff *skb = qdisc_dequeue_internal(sch, false);  		q->cstats.drop_len += qdisc_pkt_len(skb);  		rtnl_kfree_skbs(skb, skb); diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index f3b8203d3e85..df7fac95ab15 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -366,7 +366,7 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,  	/* Drop excess packets if new limit is lower */  	while (sch->q.qlen > sch->limit) { -		struct sk_buff *skb = fq_pie_qdisc_dequeue(sch); +		struct sk_buff *skb = qdisc_dequeue_internal(sch, false);  		len_dropped += qdisc_pkt_len(skb);  		num_dropped += 1; diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index 44d9efe1a96a..5aa434b46707 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -564,7 +564,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,  	qlen = sch->q.qlen;  	prev_backlog = sch->qstats.backlog;  	while (sch->q.qlen > sch->limit) { -		struct sk_buff *skb = hhf_dequeue(sch); +		struct sk_buff *skb = qdisc_dequeue_internal(sch, false);  		rtnl_kfree_skbs(skb, skb);  	} diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index 3771d000b30d..ff49a6c97033 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c @@ -195,7 +195,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,  	/* Drop excess packets if new limit is lower */  	qlen = sch->q.qlen;  	while (sch->q.qlen > sch->limit) { -		struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); +		struct sk_buff *skb = qdisc_dequeue_internal(sch, true);  		dropped += qdisc_pkt_len(skb);  		qdisc_qstats_backlog_dec(sch, skb); | 
