summaryrefslogtreecommitdiff
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/act_bpf.c6
-rw-r--r--net/sched/act_ct.c8
-rw-r--r--net/sched/act_ife.c6
-rw-r--r--net/sched/cls_api.c6
-rw-r--r--net/sched/cls_bpf.c6
-rw-r--r--net/sched/cls_flower.c2
-rw-r--r--net/sched/em_canid.c3
-rw-r--r--net/sched/em_cmp.c5
-rw-r--r--net/sched/em_nbyte.c2
-rw-r--r--net/sched/em_text.c11
-rw-r--r--net/sched/sch_cake.c19
-rw-r--r--net/sched/sch_codel.c4
-rw-r--r--net/sched/sch_dualpi2.c1
-rw-r--r--net/sched/sch_fq.c9
-rw-r--r--net/sched/sch_fq_codel.c5
-rw-r--r--net/sched/sch_netem.c1
-rw-r--r--net/sched/sch_qfq.c2
-rw-r--r--net/sched/sch_taprio.c1
-rw-r--r--net/sched/sch_tbf.c1
19 files changed, 59 insertions, 39 deletions
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 396b576390d0..c2b5bc19e091 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -47,12 +47,10 @@ TC_INDIRECT_SCOPE int tcf_bpf_act(struct sk_buff *skb,
filter = rcu_dereference(prog->filter);
if (at_ingress) {
__skb_push(skb, skb->mac_len);
- bpf_compute_data_pointers(skb);
- filter_res = bpf_prog_run(filter, skb);
+ filter_res = bpf_prog_run_data_pointers(filter, skb);
__skb_pull(skb, skb->mac_len);
} else {
- bpf_compute_data_pointers(skb);
- filter_res = bpf_prog_run(filter, skb);
+ filter_res = bpf_prog_run_data_pointers(filter, skb);
}
if (unlikely(!skb->tstamp && skb->tstamp_type))
skb->tstamp_type = SKB_CLOCK_REALTIME;
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 6749a4a9a9cd..2b6ac7069dc1 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -948,9 +948,9 @@ static int tcf_ct_act_nat(struct sk_buff *skb,
return err & NF_VERDICT_MASK;
if (action & BIT(NF_NAT_MANIP_SRC))
- tc_skb_cb(skb)->post_ct_snat = 1;
+ qdisc_skb_cb(skb)->post_ct_snat = 1;
if (action & BIT(NF_NAT_MANIP_DST))
- tc_skb_cb(skb)->post_ct_dnat = 1;
+ qdisc_skb_cb(skb)->post_ct_dnat = 1;
return err;
#else
@@ -986,7 +986,7 @@ TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
tcf_action_update_bstats(&c->common, skb);
if (clear) {
- tc_skb_cb(skb)->post_ct = false;
+ qdisc_skb_cb(skb)->post_ct = false;
ct = nf_ct_get(skb, &ctinfo);
if (ct) {
nf_ct_put(ct);
@@ -1097,7 +1097,7 @@ do_nat:
out_push:
skb_push_rcsum(skb, nh_ofs);
- tc_skb_cb(skb)->post_ct = true;
+ qdisc_skb_cb(skb)->post_ct = true;
tc_skb_cb(skb)->zone = p->zone;
out_clear:
if (defrag)
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 7c6975632fc2..1dfdda6c2d4c 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -649,9 +649,9 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
memset(&opt, 0, sizeof(opt));
- opt.index = ife->tcf_index,
- opt.refcnt = refcount_read(&ife->tcf_refcnt) - ref,
- opt.bindcnt = atomic_read(&ife->tcf_bindcnt) - bind,
+ opt.index = ife->tcf_index;
+ opt.refcnt = refcount_read(&ife->tcf_refcnt) - ref;
+ opt.bindcnt = atomic_read(&ife->tcf_bindcnt) - bind;
spin_lock_bh(&ife->tcf_lock);
opt.action = ife->tcf_action;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index f751cd5eeac8..ebca4b926dcf 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1872,9 +1872,9 @@ int tcf_classify(struct sk_buff *skb,
}
ext->chain = last_executed_chain;
ext->mru = cb->mru;
- ext->post_ct = cb->post_ct;
- ext->post_ct_snat = cb->post_ct_snat;
- ext->post_ct_dnat = cb->post_ct_dnat;
+ ext->post_ct = qdisc_skb_cb(skb)->post_ct;
+ ext->post_ct_snat = qdisc_skb_cb(skb)->post_ct_snat;
+ ext->post_ct_dnat = qdisc_skb_cb(skb)->post_ct_dnat;
ext->zone = cb->zone;
}
}
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 7fbe42f0e5c2..a32754a2658b 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -97,12 +97,10 @@ TC_INDIRECT_SCOPE int cls_bpf_classify(struct sk_buff *skb,
} else if (at_ingress) {
/* It is safe to push/pull even if skb_shared() */
__skb_push(skb, skb->mac_len);
- bpf_compute_data_pointers(skb);
- filter_res = bpf_prog_run(prog->filter, skb);
+ filter_res = bpf_prog_run_data_pointers(prog->filter, skb);
__skb_pull(skb, skb->mac_len);
} else {
- bpf_compute_data_pointers(skb);
- filter_res = bpf_prog_run(prog->filter, skb);
+ filter_res = bpf_prog_run_data_pointers(prog->filter, skb);
}
if (unlikely(!skb->tstamp && skb->tstamp_type))
skb->tstamp_type = SKB_CLOCK_REALTIME;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 099ff6a3e1f5..7669371c1354 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -326,7 +326,7 @@ TC_INDIRECT_SCOPE int fl_classify(struct sk_buff *skb,
struct tcf_result *res)
{
struct cls_fl_head *head = rcu_dereference_bh(tp->root);
- bool post_ct = tc_skb_cb(skb)->post_ct;
+ bool post_ct = qdisc_skb_cb(skb)->post_ct;
u16 zone = tc_skb_cb(skb)->zone;
struct fl_flow_key skb_key;
struct fl_flow_mask *mask;
diff --git a/net/sched/em_canid.c b/net/sched/em_canid.c
index 5337bc462755..2d27f91d8441 100644
--- a/net/sched/em_canid.c
+++ b/net/sched/em_canid.c
@@ -99,6 +99,9 @@ static int em_canid_match(struct sk_buff *skb, struct tcf_ematch *m,
int i;
const struct can_filter *lp;
+ if (!pskb_may_pull(skb, CAN_MTU))
+ return 0;
+
can_id = em_canid_get_id(skb);
if (can_id & CAN_EFF_FLAG) {
diff --git a/net/sched/em_cmp.c b/net/sched/em_cmp.c
index 64b637f18bc7..48c1bce74f49 100644
--- a/net/sched/em_cmp.c
+++ b/net/sched/em_cmp.c
@@ -22,9 +22,12 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
struct tcf_pkt_info *info)
{
struct tcf_em_cmp *cmp = (struct tcf_em_cmp *) em->data;
- unsigned char *ptr = tcf_get_base_ptr(skb, cmp->layer) + cmp->off;
+ unsigned char *ptr = tcf_get_base_ptr(skb, cmp->layer);
u32 val = 0;
+ if (!ptr)
+ return 0;
+ ptr += cmp->off;
if (!tcf_valid_offset(skb, ptr, cmp->align))
return 0;
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c
index 4f9f21a05d5e..c65ffa5fff94 100644
--- a/net/sched/em_nbyte.c
+++ b/net/sched/em_nbyte.c
@@ -42,6 +42,8 @@ static int em_nbyte_match(struct sk_buff *skb, struct tcf_ematch *em,
struct nbyte_data *nbyte = (struct nbyte_data *) em->data;
unsigned char *ptr = tcf_get_base_ptr(skb, nbyte->hdr.layer);
+ if (!ptr)
+ return 0;
ptr += nbyte->hdr.off;
if (!tcf_valid_offset(skb, ptr, nbyte->hdr.len))
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index 6b3d0af72c39..692e2be1793e 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -29,12 +29,19 @@ static int em_text_match(struct sk_buff *skb, struct tcf_ematch *m,
struct tcf_pkt_info *info)
{
struct text_match *tm = EM_TEXT_PRIV(m);
+ unsigned char *ptr;
int from, to;
- from = tcf_get_base_ptr(skb, tm->from_layer) - skb->data;
+ ptr = tcf_get_base_ptr(skb, tm->from_layer);
+ if (!ptr)
+ return 0;
+ from = ptr - skb->data;
from += tm->from_offset;
- to = tcf_get_base_ptr(skb, tm->to_layer) - skb->data;
+ ptr = tcf_get_base_ptr(skb, tm->to_layer);
+ if (!ptr)
+ return 0;
+ to = ptr - skb->data;
to += tm->to_offset;
return skb_find_text(skb, from, to, tm->config) != UINT_MAX;
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 32bacfc314c2..0ea9440f68c6 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1398,15 +1398,15 @@ static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
const struct skb_shared_info *shinfo = skb_shinfo(skb);
unsigned int hdr_len, last_len = 0;
u32 off = skb_network_offset(skb);
+ u16 segs = qdisc_pkt_segs(skb);
u32 len = qdisc_pkt_len(skb);
- u16 segs = 1;
q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
- if (!shinfo->gso_size)
+ if (segs == 1)
return cake_calc_overhead(q, len, off);
- /* borrowed from qdisc_pkt_len_init() */
+ /* borrowed from qdisc_pkt_len_segs_init() */
if (!skb->encapsulation)
hdr_len = skb_transport_offset(skb);
else
@@ -1430,12 +1430,6 @@ static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
hdr_len += sizeof(struct udphdr);
}
- if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
- segs = DIV_ROUND_UP(skb->len - hdr_len,
- shinfo->gso_size);
- else
- segs = shinfo->gso_segs;
-
len = shinfo->gso_size + hdr_len;
last_len = skb->len - shinfo->gso_size * (segs - 1);
@@ -1788,7 +1782,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (unlikely(len > b->max_skblen))
b->max_skblen = len;
- if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
+ if (qdisc_pkt_segs(skb) > 1 && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
struct sk_buff *segs, *nskb;
netdev_features_t features = netif_skb_features(skb);
unsigned int slen = 0, numsegs = 0;
@@ -1800,6 +1794,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
skb_list_walk_safe(segs, segs, nskb) {
skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len;
+ qdisc_skb_cb(segs)->pkt_segs = 1;
cobalt_set_enqueue_time(segs, now);
get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
segs);
@@ -2188,7 +2183,7 @@ retry:
b->tin_dropped++;
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
qdisc_qstats_drop(sch);
- kfree_skb_reason(skb, reason);
+ qdisc_dequeue_drop(sch, skb, reason);
if (q->rate_flags & CAKE_FLAG_INGRESS)
goto retry;
}
@@ -2729,6 +2724,8 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
int i, j, err;
sch->limit = 10240;
+ sch->flags |= TCQ_F_DEQUEUE_DROPS;
+
q->tin_mode = CAKE_DIFFSERV_DIFFSERV3;
q->flow_mode = CAKE_FLOW_TRIPLE;
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index fa0314679e43..c6551578f1cf 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -52,7 +52,7 @@ static void drop_func(struct sk_buff *skb, void *ctx)
{
struct Qdisc *sch = ctx;
- kfree_skb_reason(skb, SKB_DROP_REASON_QDISC_CONGESTED);
+ qdisc_dequeue_drop(sch, skb, SKB_DROP_REASON_QDISC_CONGESTED);
qdisc_qstats_drop(sch);
}
@@ -182,6 +182,8 @@ static int codel_init(struct Qdisc *sch, struct nlattr *opt,
else
sch->flags &= ~TCQ_F_CAN_BYPASS;
+ sch->flags |= TCQ_F_DEQUEUE_DROPS;
+
return 0;
}
diff --git a/net/sched/sch_dualpi2.c b/net/sched/sch_dualpi2.c
index 4b975feb52b1..6d7e6389758d 100644
--- a/net/sched/sch_dualpi2.c
+++ b/net/sched/sch_dualpi2.c
@@ -475,6 +475,7 @@ static int dualpi2_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
* (3) Enqueue fragment & set ts in dualpi2_enqueue_skb
*/
qdisc_skb_cb(nskb)->pkt_len = nskb->len;
+ qdisc_skb_cb(nskb)->pkt_segs = 1;
dualpi2_skb_cb(nskb)->classified =
dualpi2_skb_cb(skb)->classified;
dualpi2_skb_cb(nskb)->ect = dualpi2_skb_cb(skb)->ect;
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index fee922da2f99..6e5f2f4f2415 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -480,7 +480,10 @@ static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow,
struct sk_buff *skb)
{
if (skb == flow->head) {
- flow->head = skb->next;
+ struct sk_buff *next = skb->next;
+
+ prefetch(next);
+ flow->head = next;
} else {
rb_erase(&skb->rbnode, &flow->t_root);
skb->dev = qdisc_dev(sch);
@@ -497,6 +500,7 @@ static void fq_dequeue_skb(struct Qdisc *sch, struct fq_flow *flow,
skb_mark_not_on_list(skb);
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
+ qdisc_bstats_update(sch, skb);
}
static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
@@ -711,6 +715,7 @@ begin:
goto begin;
}
prefetch(&skb->end);
+ fq_dequeue_skb(sch, f, skb);
if ((s64)(now - time_next_packet - q->ce_threshold) > 0) {
INET_ECN_set_ce(skb);
q->stat_ce_mark++;
@@ -718,7 +723,6 @@ begin:
if (--f->qlen == 0)
q->inactive_flows++;
q->band_pkt_count[fq_skb_cb(skb)->band]--;
- fq_dequeue_skb(sch, f, skb);
} else {
head->first = f->next;
/* force a pass through old_flows to prevent starvation */
@@ -776,7 +780,6 @@ begin:
f->time_next_packet = now + len;
}
out:
- qdisc_bstats_update(sch, skb);
return skb;
}
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index a14142392939..dc187c7f06b1 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -275,7 +275,7 @@ static void drop_func(struct sk_buff *skb, void *ctx)
{
struct Qdisc *sch = ctx;
- kfree_skb_reason(skb, SKB_DROP_REASON_QDISC_CONGESTED);
+ qdisc_dequeue_drop(sch, skb, SKB_DROP_REASON_QDISC_CONGESTED);
qdisc_qstats_drop(sch);
}
@@ -519,6 +519,9 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
sch->flags |= TCQ_F_CAN_BYPASS;
else
sch->flags &= ~TCQ_F_CAN_BYPASS;
+
+ sch->flags |= TCQ_F_DEQUEUE_DROPS;
+
return 0;
alloc_failure:
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index eafc316ae319..32a5f3304046 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -429,6 +429,7 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff *segs;
netdev_features_t features = netif_skb_features(skb);
+ qdisc_skb_cb(skb)->pkt_segs = 1;
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs)) {
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 2255355e51d3..d920f57dc6d7 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1250,7 +1250,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
}
- gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
+ gso_segs = qdisc_pkt_segs(skb);
err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
pr_debug("qfq_enqueue: enqueue failed %d\n", err);
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 39b735386996..300d577b3286 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -595,6 +595,7 @@ static int taprio_enqueue_segmented(struct sk_buff *skb, struct Qdisc *sch,
skb_list_walk_safe(segs, segs, nskb) {
skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len;
+ qdisc_skb_cb(segs)->pkt_segs = 1;
slen += segs->len;
/* FIXME: we should be segmenting to a smaller size
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 4c977f049670..f2340164f579 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -221,6 +221,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
skb_mark_not_on_list(segs);
seg_len = segs->len;
qdisc_skb_cb(segs)->pkt_len = seg_len;
+ qdisc_skb_cb(segs)->pkt_segs = 1;
ret = qdisc_enqueue(segs, q->qdisc, to_free);
if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret))