diff options
-rw-r--r-- | include/net/pkt_sched.h | 1 | ||||
-rw-r--r-- | net/sched/sch_cbq.c | 7 | ||||
-rw-r--r-- | net/sched/sch_netem.c | 6 |
3 files changed, 6 insertions, 8 deletions
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 32cdf0137cb2..49325ffb00b1 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -54,7 +54,6 @@ typedef long psched_tdiff_t; #define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2)) #define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \ min_t(long long, (tv1) - (tv2), bound) -#define PSCHED_TLESS(tv1, tv2) ((tv1) < (tv2)) #define PSCHED_SET_PASTPERFECT(t) ((t) = 0) #define PSCHED_IS_PASTPERFECT(t) ((t) == 0) diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 290b26bdc89d..9e6cdab6af3b 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -390,7 +390,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) now = q->now + incr; do { - if (PSCHED_TLESS(cl->undertime, now)) { + if (cl->undertime < now) { q->toplevel = cl->level; return; } @@ -845,8 +845,7 @@ cbq_under_limit(struct cbq_class *cl) if (cl->tparent == NULL) return cl; - if (PSCHED_IS_PASTPERFECT(cl->undertime) || - !PSCHED_TLESS(q->now, cl->undertime)) { + if (PSCHED_IS_PASTPERFECT(cl->undertime) || q->now >= cl->undertime) { cl->delayed = 0; return cl; } @@ -870,7 +869,7 @@ cbq_under_limit(struct cbq_class *cl) if (cl->level > q->toplevel) return NULL; } while (!PSCHED_IS_PASTPERFECT(cl->undertime) && - PSCHED_TLESS(q->now, cl->undertime)); + q->now < cl->undertime); cl->delayed = 0; return cl; diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 6044ae77d5da..5d571aa04a76 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -286,7 +286,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) /* if more time remaining? */ PSCHED_GET_TIME(now); - if (!PSCHED_TLESS(now, cb->time_to_send)) { + if (cb->time_to_send <= now) { pr_debug("netem_dequeue: return skb=%p\n", skb); sch->q.qlen--; return skb; @@ -494,7 +494,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) if (likely(skb_queue_len(list) < q->limit)) { /* Optimize for add at tail */ - if (likely(skb_queue_empty(list) || !PSCHED_TLESS(tnext, q->oldest))) { + if (likely(skb_queue_empty(list) || tnext >= q->oldest)) { q->oldest = tnext; return qdisc_enqueue_tail(nskb, sch); } @@ -503,7 +503,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) const struct netem_skb_cb *cb = (const struct netem_skb_cb *)skb->cb; - if (!PSCHED_TLESS(tnext, cb->time_to_send)) + if (tnext >= cb->time_to_send) break; } |