diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-05-03 17:24:08 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-05-03 17:24:08 -0700 |
commit | 8800cea62025a5209d110c5fa5990429239d6eee (patch) | |
tree | 2aa9e4732d7014dcda4c0e80d2e377f52e2262e9 /net/sched | |
parent | 84e48b6d64fdc29586bc7d9329f986cdae591a80 (diff) | |
parent | 14d50e78f947d340066ee0465dd892ad1d9162c0 (diff) | |
download | linux-3.10-8800cea62025a5209d110c5fa5990429239d6eee.tar.gz linux-3.10-8800cea62025a5209d110c5fa5990429239d6eee.tar.bz2 linux-3.10-8800cea62025a5209d110c5fa5990429239d6eee.zip |
Merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git/
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/Kconfig | 2 | ||||
-rw-r--r-- | net/sched/act_api.c | 4 | ||||
-rw-r--r-- | net/sched/sch_api.c | 1 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 5 | ||||
-rw-r--r-- | net/sched/sch_htb.c | 4 | ||||
-rw-r--r-- | net/sched/sch_netem.c | 131 |
6 files changed, 94 insertions, 53 deletions
diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 9c118baed9d..b0941186f86 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -185,7 +185,7 @@ config NET_SCH_GRED depends on NET_SCHED help Say Y here if you want to use the Generic Random Early Detection - (RED) packet scheduling algorithm for some of your network devices + (GRED) packet scheduling algorithm for some of your network devices (see the top of <file:net/sched/sch_red.c> for details and references about the algorithm). diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 5e6cc371b39..cafcb084098 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -171,10 +171,10 @@ repeat: skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd); } - if (ret != TC_ACT_PIPE) - goto exec_done; if (ret == TC_ACT_REPEAT) goto repeat; /* we need a ttl - JHS */ + if (ret != TC_ACT_PIPE) + goto exec_done; } act = a->next; } diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 4323a74eea3..07977f8f267 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1289,6 +1289,7 @@ static int __init pktsched_init(void) subsys_initcall(pktsched_init); +EXPORT_SYMBOL(qdisc_lookup); EXPORT_SYMBOL(qdisc_get_rtab); EXPORT_SYMBOL(qdisc_put_rtab); EXPORT_SYMBOL(register_qdisc); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 8c01e023f02..87e48a4e105 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -179,6 +179,7 @@ requeue: netif_schedule(dev); return 1; } + BUG_ON((int) q->q.qlen < 0); return q->q.qlen; } @@ -539,6 +540,10 @@ void dev_activate(struct net_device *dev) write_unlock_bh(&qdisc_tree_lock); } + if (!netif_carrier_ok(dev)) + /* Delay activation until next carrier-on event */ + return; + spin_lock_bh(&dev->queue_lock); rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping); if (dev->qdisc != &noqueue_qdisc) { diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index a85935e7d53..558cc087e60 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -717,6 +717,10 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (q->direct_queue.qlen < q->direct_qlen) { __skb_queue_tail(&q->direct_queue, skb); q->direct_pkts++; + } else { + kfree_skb(skb); + sch->qstats.drops++; + return NET_XMIT_DROP; } #ifdef CONFIG_NET_CLS_ACT } else if (!cl) { diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 31c29deb139..e0c9fbe73b1 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -138,38 +138,77 @@ static long tabledist(unsigned long mu, long sigma, } /* Put skb in the private delayed queue. */ -static int delay_skb(struct Qdisc *sch, struct sk_buff *skb) +static int netem_delay(struct Qdisc *sch, struct sk_buff *skb) { struct netem_sched_data *q = qdisc_priv(sch); - struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; psched_tdiff_t td; psched_time_t now; PSCHED_GET_TIME(now); td = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist); - PSCHED_TADD2(now, td, cb->time_to_send); /* Always queue at tail to keep packets in order */ if (likely(q->delayed.qlen < q->limit)) { + struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; + + PSCHED_TADD2(now, td, cb->time_to_send); + + pr_debug("netem_delay: skb=%p now=%llu tosend=%llu\n", skb, + now, cb->time_to_send); + __skb_queue_tail(&q->delayed, skb); - if (!timer_pending(&q->timer)) { - q->timer.expires = jiffies + PSCHED_US2JIFFIE(td); - add_timer(&q->timer); - } return NET_XMIT_SUCCESS; } + pr_debug("netem_delay: queue over limit %d\n", q->limit); + sch->qstats.overlimits++; kfree_skb(skb); return NET_XMIT_DROP; } +/* + * Move a packet that is ready to send from the delay holding + * list to the underlying qdisc. + */ +static int netem_run(struct Qdisc *sch) +{ + struct netem_sched_data *q = qdisc_priv(sch); + struct sk_buff *skb; + psched_time_t now; + + PSCHED_GET_TIME(now); + + skb = skb_peek(&q->delayed); + if (skb) { + const struct netem_skb_cb *cb + = (const struct netem_skb_cb *)skb->cb; + long delay + = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now)); + pr_debug("netem_run: skb=%p delay=%ld\n", skb, delay); + + /* if more time remaining? */ + if (delay > 0) { + mod_timer(&q->timer, jiffies + delay); + return 1; + } + + __skb_unlink(skb, &q->delayed); + + if (q->qdisc->enqueue(skb, q->qdisc)) { + sch->q.qlen--; + sch->qstats.drops++; + } + } + + return 0; +} + static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); - struct sk_buff *skb2; int ret; - pr_debug("netem_enqueue skb=%p @%lu\n", skb, jiffies); + pr_debug("netem_enqueue skb=%p\n", skb); /* Random packet drop 0 => none, ~0 => all */ if (q->loss && q->loss >= get_crandom(&q->loss_cor)) { @@ -180,11 +219,21 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) } /* Random duplication */ - if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor) - && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { - pr_debug("netem_enqueue: dup %p\n", skb2); + if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) { + struct sk_buff *skb2; + + skb2 = skb_clone(skb, GFP_ATOMIC); + if (skb2 && netem_delay(sch, skb2) == NET_XMIT_SUCCESS) { + struct Qdisc *qp; + + /* Since one packet can generate two packets in the + * queue, the parent's qlen accounting gets confused, + * so fix it. + */ + qp = qdisc_lookup(sch->dev, TC_H_MAJ(sch->parent)); + if (qp) + qp->q.qlen++; - if (delay_skb(sch, skb2)) { sch->q.qlen++; sch->bstats.bytes += skb2->len; sch->bstats.packets++; @@ -202,7 +251,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ret = q->qdisc->enqueue(skb, q->qdisc); } else { q->counter = 0; - ret = delay_skb(sch, skb); + ret = netem_delay(sch, skb); + netem_run(sch); } if (likely(ret == NET_XMIT_SUCCESS)) { @@ -212,6 +262,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) } else sch->qstats.drops++; + pr_debug("netem: enqueue ret %d\n", ret); return ret; } @@ -241,56 +292,35 @@ static unsigned int netem_drop(struct Qdisc* sch) return len; } -/* Dequeue packet. - * Move all packets that are ready to send from the delay holding - * list to the underlying qdisc, then just call dequeue - */ static struct sk_buff *netem_dequeue(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; + int pending; + + pending = netem_run(sch); skb = q->qdisc->dequeue(q->qdisc); - if (skb) + if (skb) { + pr_debug("netem_dequeue: return skb=%p\n", skb); sch->q.qlen--; + sch->flags &= ~TCQ_F_THROTTLED; + } + else if (pending) { + pr_debug("netem_dequeue: throttling\n"); + sch->flags |= TCQ_F_THROTTLED; + } + return skb; } static void netem_watchdog(unsigned long arg) { struct Qdisc *sch = (struct Qdisc *)arg; - struct netem_sched_data *q = qdisc_priv(sch); - struct net_device *dev = sch->dev; - struct sk_buff *skb; - psched_time_t now; - - pr_debug("netem_watchdog: fired @%lu\n", jiffies); - - spin_lock_bh(&dev->queue_lock); - PSCHED_GET_TIME(now); - - while ((skb = skb_peek(&q->delayed)) != NULL) { - const struct netem_skb_cb *cb - = (const struct netem_skb_cb *)skb->cb; - long delay - = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now)); - pr_debug("netem_watchdog: skb %p@%lu %ld\n", - skb, jiffies, delay); - /* if more time remaining? */ - if (delay > 0) { - mod_timer(&q->timer, jiffies + delay); - break; - } - __skb_unlink(skb, &q->delayed); - - if (q->qdisc->enqueue(skb, q->qdisc)) { - sch->q.qlen--; - sch->qstats.drops++; - } - } - qdisc_run(dev); - spin_unlock_bh(&dev->queue_lock); + pr_debug("netem_watchdog qlen=%d\n", sch->q.qlen); + sch->flags &= ~TCQ_F_THROTTLED; + netif_schedule(sch->dev); } static void netem_reset(struct Qdisc *sch) @@ -301,6 +331,7 @@ static void netem_reset(struct Qdisc *sch) skb_queue_purge(&q->delayed); sch->q.qlen = 0; + sch->flags &= ~TCQ_F_THROTTLED; del_timer_sync(&q->timer); } |