net_sched: cleanups

Cleanup net/sched code to current CodingStyle and practices.

Reduce inline abuse

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2011-01-19 19:26:56 +00:00 committed by David S. Miller
commit cc7ec456f8
41 changed files with 835 additions and 794 deletions

View file

@ -447,7 +447,8 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
nest = nla_nest_start(skb, TCA_OPTIONS); nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL) if (nest == NULL)
goto nla_put_failure; goto nla_put_failure;
if ((err = tcf_action_dump_old(skb, a, bind, ref)) > 0) { err = tcf_action_dump_old(skb, a, bind, ref);
if (err > 0) {
nla_nest_end(skb, nest); nla_nest_end(skb, nest);
return err; return err;
} }
@ -549,8 +550,8 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
goto err_free; goto err_free;
/* module count goes up only when brand new policy is created /* module count goes up only when brand new policy is created
if it exists and is only bound to in a_o->init() then * if it exists and is only bound to in a_o->init() then
ACT_P_CREATED is not returned (a zero is). * ACT_P_CREATED is not returned (a zero is).
*/ */
if (err != ACT_P_CREATED) if (err != ACT_P_CREATED)
module_put(a_o->owner); module_put(a_o->owner);
@ -821,7 +822,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
nlh->nlmsg_flags |= NLM_F_ROOT; nlh->nlmsg_flags |= NLM_F_ROOT;
module_put(a->ops->owner); module_put(a->ops->owner);
kfree(a); kfree(a);
err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); err = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
n->nlmsg_flags & NLM_F_ECHO);
if (err > 0) if (err > 0)
return 0; return 0;
@ -967,7 +969,7 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
/* dump then free all the actions after update; inserted policy /* dump then free all the actions after update; inserted policy
* stays intact * stays intact
* */ */
ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags); ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags);
for (a = act; a; a = act) { for (a = act; a; a = act) {
act = a->next; act = a->next;
@ -993,8 +995,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
return -EINVAL; return -EINVAL;
} }
/* n->nlmsg_flags&NLM_F_CREATE /* n->nlmsg_flags & NLM_F_CREATE */
* */
switch (n->nlmsg_type) { switch (n->nlmsg_type) {
case RTM_NEWACTION: case RTM_NEWACTION:
/* we are going to assume all other flags /* we are going to assume all other flags
@ -1071,9 +1072,8 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
} }
a_o = tc_lookup_action(kind); a_o = tc_lookup_action(kind);
if (a_o == NULL) { if (a_o == NULL)
return 0; return 0;
}
memset(&a, 0, sizeof(struct tc_action)); memset(&a, 0, sizeof(struct tc_action));
a.ops = a_o; a.ops = a_o;

View file

@ -205,9 +205,9 @@ MODULE_LICENSE("GPL");
static int __init gact_init_module(void) static int __init gact_init_module(void)
{ {
#ifdef CONFIG_GACT_PROB #ifdef CONFIG_GACT_PROB
printk(KERN_INFO "GACT probability on\n"); pr_info("GACT probability on\n");
#else #else
printk(KERN_INFO "GACT probability NOT on\n"); pr_info("GACT probability NOT on\n");
#endif #endif
return tcf_register_action(&act_gact_ops); return tcf_register_action(&act_gact_ops);
} }

View file

@ -162,7 +162,8 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est,
if (unlikely(!t)) if (unlikely(!t))
goto err2; goto err2;
if ((err = ipt_init_target(t, tname, hook)) < 0) err = ipt_init_target(t, tname, hook);
if (err < 0)
goto err3; goto err3;
spin_lock_bh(&ipt->tcf_lock); spin_lock_bh(&ipt->tcf_lock);
@ -212,8 +213,9 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
bstats_update(&ipt->tcf_bstats, skb); bstats_update(&ipt->tcf_bstats, skb);
/* yes, we have to worry about both in and out dev /* yes, we have to worry about both in and out dev
worry later - danger - this API seems to have changed * worry later - danger - this API seems to have changed
from earlier kernels */ * from earlier kernels
*/
par.in = skb->dev; par.in = skb->dev;
par.out = NULL; par.out = NULL;
par.hooknum = ipt->tcfi_hook; par.hooknum = ipt->tcfi_hook;
@ -253,8 +255,8 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
struct tc_cnt c; struct tc_cnt c;
/* for simple targets kernel size == user size /* for simple targets kernel size == user size
** user name = target name * user name = target name
** for foolproof you need to not assume this * for foolproof you need to not assume this
*/ */
t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC); t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);

View file

@ -41,7 +41,7 @@ static struct tcf_hashinfo mirred_hash_info = {
.lock = &mirred_lock, .lock = &mirred_lock,
}; };
static inline int tcf_mirred_release(struct tcf_mirred *m, int bind) static int tcf_mirred_release(struct tcf_mirred *m, int bind)
{ {
if (m) { if (m) {
if (bind) if (bind)

View file

@ -127,11 +127,9 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
int i, munged = 0; int i, munged = 0;
unsigned int off; unsigned int off;
if (skb_cloned(skb)) { if (skb_cloned(skb) &&
if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
return p->tcf_action; return p->tcf_action;
}
}
off = skb_network_offset(skb); off = skb_network_offset(skb);

View file

@ -37,8 +37,7 @@ static struct tcf_hashinfo police_hash_info = {
}; };
/* old policer structure from before tc actions */ /* old policer structure from before tc actions */
struct tc_police_compat struct tc_police_compat {
{
u32 index; u32 index;
int action; int action;
u32 limit; u32 limit;
@ -139,7 +138,7 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
static int tcf_act_police_locate(struct nlattr *nla, struct nlattr *est, static int tcf_act_police_locate(struct nlattr *nla, struct nlattr *est,
struct tc_action *a, int ovr, int bind) struct tc_action *a, int ovr, int bind)
{ {
unsigned h; unsigned int h;
int ret = 0, err; int ret = 0, err;
struct nlattr *tb[TCA_POLICE_MAX + 1]; struct nlattr *tb[TCA_POLICE_MAX + 1];
struct tc_police *parm; struct tc_police *parm;

View file

@ -47,7 +47,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result
/* print policy string followed by _ then packet count /* print policy string followed by _ then packet count
* Example if this was the 3rd packet and the string was "hello" * Example if this was the 3rd packet and the string was "hello"
* then it would look like "hello_3" (without quotes) * then it would look like "hello_3" (without quotes)
**/ */
pr_info("simple: %s_%d\n", pr_info("simple: %s_%d\n",
(char *)d->tcfd_defdata, d->tcf_bstats.packets); (char *)d->tcfd_defdata, d->tcf_bstats.packets);
spin_unlock(&d->tcf_lock); spin_unlock(&d->tcf_lock);
@ -149,7 +149,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
return ret; return ret;
} }
static inline int tcf_simp_cleanup(struct tc_action *a, int bind) static int tcf_simp_cleanup(struct tc_action *a, int bind)
{ {
struct tcf_defact *d = a->priv; struct tcf_defact *d = a->priv;
@ -158,7 +158,7 @@ static inline int tcf_simp_cleanup(struct tc_action *a, int bind)
return 0; return 0;
} }
static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref) int bind, int ref)
{ {
unsigned char *b = skb_tail_pointer(skb); unsigned char *b = skb_tail_pointer(skb);

View file

@ -144,7 +144,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est,
return ret; return ret;
} }
static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind) static int tcf_skbedit_cleanup(struct tc_action *a, int bind)
{ {
struct tcf_skbedit *d = a->priv; struct tcf_skbedit *d = a->priv;
@ -153,7 +153,7 @@ static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind)
return 0; return 0;
} }
static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref) int bind, int ref)
{ {
unsigned char *b = skb_tail_pointer(skb); unsigned char *b = skb_tail_pointer(skb);

View file

@ -149,7 +149,8 @@ replay:
if (prio == 0) { if (prio == 0) {
/* If no priority is given, user wants we allocated it. */ /* If no priority is given, user wants we allocated it. */
if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE)) if (n->nlmsg_type != RTM_NEWTFILTER ||
!(n->nlmsg_flags & NLM_F_CREATE))
return -ENOENT; return -ENOENT;
prio = TC_H_MAKE(0x80000000U, 0U); prio = TC_H_MAKE(0x80000000U, 0U);
} }
@ -176,7 +177,8 @@ replay:
} }
/* Is it classful? */ /* Is it classful? */
if ((cops = q->ops->cl_ops) == NULL) cops = q->ops->cl_ops;
if (!cops)
return -EINVAL; return -EINVAL;
if (cops->tcf_chain == NULL) if (cops->tcf_chain == NULL)
@ -199,7 +201,8 @@ replay:
for (back = chain; (tp = *back) != NULL; back = &tp->next) { for (back = chain; (tp = *back) != NULL; back = &tp->next) {
if (tp->prio >= prio) { if (tp->prio >= prio) {
if (tp->prio == prio) { if (tp->prio == prio) {
if (!nprio || (tp->protocol != protocol && protocol)) if (!nprio ||
(tp->protocol != protocol && protocol))
goto errout; goto errout;
} else } else
tp = NULL; tp = NULL;
@ -216,7 +219,8 @@ replay:
goto errout; goto errout;
err = -ENOENT; err = -ENOENT;
if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE)) if (n->nlmsg_type != RTM_NEWTFILTER ||
!(n->nlmsg_flags & NLM_F_CREATE))
goto errout; goto errout;
@ -420,7 +424,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
return skb->len; return skb->len;
if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) dev = __dev_get_by_index(net, tcm->tcm_ifindex);
if (!dev)
return skb->len; return skb->len;
if (!tcm->tcm_parent) if (!tcm->tcm_parent)
@ -429,7 +434,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
if (!q) if (!q)
goto out; goto out;
if ((cops = q->ops->cl_ops) == NULL) cops = q->ops->cl_ops;
if (!cops)
goto errout; goto errout;
if (cops->tcf_chain == NULL) if (cops->tcf_chain == NULL)
goto errout; goto errout;
@ -445,7 +451,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
s_t = cb->args[0]; s_t = cb->args[0];
for (tp = *chain, t = 0; tp; tp = tp->next, t++) { for (tp = *chain, t = 0; tp; tp = tp->next, t++) {
if (t < s_t) continue; if (t < s_t)
continue;
if (TC_H_MAJ(tcm->tcm_info) && if (TC_H_MAJ(tcm->tcm_info) &&
TC_H_MAJ(tcm->tcm_info) != tp->prio) TC_H_MAJ(tcm->tcm_info) != tp->prio)
continue; continue;

View file

@ -21,14 +21,12 @@
#include <net/act_api.h> #include <net/act_api.h>
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
struct basic_head struct basic_head {
{
u32 hgenerator; u32 hgenerator;
struct list_head flist; struct list_head flist;
}; };
struct basic_filter struct basic_filter {
{
u32 handle; u32 handle;
struct tcf_exts exts; struct tcf_exts exts;
struct tcf_ematch_tree ematches; struct tcf_ematch_tree ematches;
@ -92,8 +90,7 @@ static int basic_init(struct tcf_proto *tp)
return 0; return 0;
} }
static inline void basic_delete_filter(struct tcf_proto *tp, static void basic_delete_filter(struct tcf_proto *tp, struct basic_filter *f)
struct basic_filter *f)
{ {
tcf_unbind_filter(tp, &f->res); tcf_unbind_filter(tp, &f->res);
tcf_exts_destroy(tp, &f->exts); tcf_exts_destroy(tp, &f->exts);
@ -135,7 +132,7 @@ static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
[TCA_BASIC_EMATCHES] = { .type = NLA_NESTED }, [TCA_BASIC_EMATCHES] = { .type = NLA_NESTED },
}; };
static inline int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f, static int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f,
unsigned long base, struct nlattr **tb, unsigned long base, struct nlattr **tb,
struct nlattr *est) struct nlattr *est)
{ {
@ -203,7 +200,7 @@ static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle,
} while (--i > 0 && basic_get(tp, head->hgenerator)); } while (--i > 0 && basic_get(tp, head->hgenerator));
if (i <= 0) { if (i <= 0) {
printk(KERN_ERR "Insufficient number of handles\n"); pr_err("Insufficient number of handles\n");
goto errout; goto errout;
} }

View file

@ -56,7 +56,8 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
{ {
struct cgroup_cls_state *cs; struct cgroup_cls_state *cs;
if (!(cs = kzalloc(sizeof(*cs), GFP_KERNEL))) cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
if (cgrp->parent) if (cgrp->parent)
@ -94,8 +95,7 @@ static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
} }
struct cls_cgroup_head struct cls_cgroup_head {
{
u32 handle; u32 handle;
struct tcf_exts exts; struct tcf_exts exts;
struct tcf_ematch_tree ematches; struct tcf_ematch_tree ematches;

View file

@ -31,14 +31,12 @@
#define HTSIZE (PAGE_SIZE/sizeof(struct fw_filter *)) #define HTSIZE (PAGE_SIZE/sizeof(struct fw_filter *))
struct fw_head struct fw_head {
{
struct fw_filter *ht[HTSIZE]; struct fw_filter *ht[HTSIZE];
u32 mask; u32 mask;
}; };
struct fw_filter struct fw_filter {
{
struct fw_filter *next; struct fw_filter *next;
u32 id; u32 id;
struct tcf_result res; struct tcf_result res;
@ -53,7 +51,7 @@ static const struct tcf_ext_map fw_ext_map = {
.police = TCA_FW_POLICE .police = TCA_FW_POLICE
}; };
static __inline__ int fw_hash(u32 handle) static inline int fw_hash(u32 handle)
{ {
if (HTSIZE == 4096) if (HTSIZE == 4096)
return ((handle >> 24) & 0xFFF) ^ return ((handle >> 24) & 0xFFF) ^
@ -105,7 +103,8 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
} }
} else { } else {
/* old method */ /* old method */
if (id && (TC_H_MAJ(id) == 0 || !(TC_H_MAJ(id^tp->q->handle)))) { if (id && (TC_H_MAJ(id) == 0 ||
!(TC_H_MAJ(id ^ tp->q->handle)))) {
res->classid = id; res->classid = id;
res->class = 0; res->class = 0;
return 0; return 0;
@ -139,8 +138,7 @@ static int fw_init(struct tcf_proto *tp)
return 0; return 0;
} }
static inline void static void fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f)
fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f)
{ {
tcf_unbind_filter(tp, &f->res); tcf_unbind_filter(tp, &f->res);
tcf_exts_destroy(tp, &f->exts); tcf_exts_destroy(tp, &f->exts);

View file

@ -23,34 +23,30 @@
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
/* /*
1. For now we assume that route tags < 256. * 1. For now we assume that route tags < 256.
It allows to use direct table lookups, instead of hash tables. * It allows to use direct table lookups, instead of hash tables.
2. For now we assume that "from TAG" and "fromdev DEV" statements * 2. For now we assume that "from TAG" and "fromdev DEV" statements
are mutually exclusive. * are mutually exclusive.
3. "to TAG from ANY" has higher priority, than "to ANY from XXX" * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
*/ */
struct route4_fastmap struct route4_fastmap {
{
struct route4_filter *filter; struct route4_filter *filter;
u32 id; u32 id;
int iif; int iif;
}; };
struct route4_head struct route4_head {
{
struct route4_fastmap fastmap[16]; struct route4_fastmap fastmap[16];
struct route4_bucket *table[256 + 1]; struct route4_bucket *table[256 + 1];
}; };
struct route4_bucket struct route4_bucket {
{
/* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */ /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
struct route4_filter *ht[16 + 16 + 1]; struct route4_filter *ht[16 + 16 + 1];
}; };
struct route4_filter struct route4_filter {
{
struct route4_filter *next; struct route4_filter *next;
u32 id; u32 id;
int iif; int iif;
@ -68,13 +64,13 @@ static const struct tcf_ext_map route_ext_map = {
.action = TCA_ROUTE4_ACT .action = TCA_ROUTE4_ACT
}; };
static __inline__ int route4_fastmap_hash(u32 id, int iif) static inline int route4_fastmap_hash(u32 id, int iif)
{ {
return id & 0xF; return id & 0xF;
} }
static inline static void
void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id) route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
{ {
spinlock_t *root_lock = qdisc_root_sleeping_lock(q); spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
@ -83,32 +79,33 @@ void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
spin_unlock_bh(root_lock); spin_unlock_bh(root_lock);
} }
static inline void static void
route4_set_fastmap(struct route4_head *head, u32 id, int iif, route4_set_fastmap(struct route4_head *head, u32 id, int iif,
struct route4_filter *f) struct route4_filter *f)
{ {
int h = route4_fastmap_hash(id, iif); int h = route4_fastmap_hash(id, iif);
head->fastmap[h].id = id; head->fastmap[h].id = id;
head->fastmap[h].iif = iif; head->fastmap[h].iif = iif;
head->fastmap[h].filter = f; head->fastmap[h].filter = f;
} }
static __inline__ int route4_hash_to(u32 id) static inline int route4_hash_to(u32 id)
{ {
return id & 0xFF; return id & 0xFF;
} }
static __inline__ int route4_hash_from(u32 id) static inline int route4_hash_from(u32 id)
{ {
return (id >> 16) & 0xF; return (id >> 16) & 0xF;
} }
static __inline__ int route4_hash_iif(int iif) static inline int route4_hash_iif(int iif)
{ {
return 16 + ((iif >> 16) & 0xF); return 16 + ((iif >> 16) & 0xF);
} }
static __inline__ int route4_hash_wild(void) static inline int route4_hash_wild(void)
{ {
return 32; return 32;
} }
@ -138,7 +135,8 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
u32 id, h; u32 id, h;
int iif, dont_cache = 0; int iif, dont_cache = 0;
if ((dst = skb_dst(skb)) == NULL) dst = skb_dst(skb);
if (!dst)
goto failure; goto failure;
id = dst->tclassid; id = dst->tclassid;
@ -161,7 +159,8 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
h = route4_hash_to(id); h = route4_hash_to(id);
restart: restart:
if ((b = head->table[h]) != NULL) { b = head->table[h];
if (b) {
for (f = b->ht[route4_hash_from(id)]; f; f = f->next) for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
if (f->id == id) if (f->id == id)
ROUTE4_APPLY_RESULT(); ROUTE4_APPLY_RESULT();
@ -198,6 +197,7 @@ old_method:
static inline u32 to_hash(u32 id) static inline u32 to_hash(u32 id)
{ {
u32 h = id & 0xFF; u32 h = id & 0xFF;
if (id & 0x8000) if (id & 0x8000)
h += 256; h += 256;
return h; return h;
@ -221,7 +221,7 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
struct route4_head *head = (struct route4_head *)tp->root; struct route4_head *head = (struct route4_head *)tp->root;
struct route4_bucket *b; struct route4_bucket *b;
struct route4_filter *f; struct route4_filter *f;
unsigned h1, h2; unsigned int h1, h2;
if (!head) if (!head)
return 0; return 0;
@ -234,7 +234,8 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
if (h2 > 32) if (h2 > 32)
return 0; return 0;
if ((b = head->table[h1]) != NULL) { b = head->table[h1];
if (b) {
for (f = b->ht[h2]; f; f = f->next) for (f = b->ht[h2]; f; f = f->next)
if (f->handle == handle) if (f->handle == handle)
return (unsigned long)f; return (unsigned long)f;
@ -251,7 +252,7 @@ static int route4_init(struct tcf_proto *tp)
return 0; return 0;
} }
static inline void static void
route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f) route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
{ {
tcf_unbind_filter(tp, &f->res); tcf_unbind_filter(tp, &f->res);
@ -270,7 +271,8 @@ static void route4_destroy(struct tcf_proto *tp)
for (h1 = 0; h1 <= 256; h1++) { for (h1 = 0; h1 <= 256; h1++) {
struct route4_bucket *b; struct route4_bucket *b;
if ((b = head->table[h1]) != NULL) { b = head->table[h1];
if (b) {
for (h2 = 0; h2 <= 32; h2++) { for (h2 = 0; h2 <= 32; h2++) {
struct route4_filter *f; struct route4_filter *f;
@ -289,7 +291,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg)
{ {
struct route4_head *head = (struct route4_head *)tp->root; struct route4_head *head = (struct route4_head *)tp->root;
struct route4_filter **fp, *f = (struct route4_filter *)arg; struct route4_filter **fp, *f = (struct route4_filter *)arg;
unsigned h = 0; unsigned int h = 0;
struct route4_bucket *b; struct route4_bucket *b;
int i; int i;
@ -380,7 +382,8 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
} }
h1 = to_hash(nhandle); h1 = to_hash(nhandle);
if ((b = head->table[h1]) == NULL) { b = head->table[h1];
if (!b) {
err = -ENOBUFS; err = -ENOBUFS;
b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL); b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
if (b == NULL) if (b == NULL)
@ -391,6 +394,7 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
tcf_tree_unlock(tp); tcf_tree_unlock(tp);
} else { } else {
unsigned int h2 = from_hash(nhandle >> 16); unsigned int h2 = from_hash(nhandle >> 16);
err = -EEXIST; err = -EEXIST;
for (fp = b->ht[h2]; fp; fp = fp->next) for (fp = b->ht[h2]; fp; fp = fp->next)
if (fp->handle == f->handle) if (fp->handle == f->handle)
@ -444,7 +448,8 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
if (err < 0) if (err < 0)
return err; return err;
if ((f = (struct route4_filter*)*arg) != NULL) { f = (struct route4_filter *)*arg;
if (f) {
if (f->handle != handle && handle) if (f->handle != handle && handle)
return -EINVAL; return -EINVAL;
@ -492,7 +497,8 @@ reinsert:
if (old_handle && f->handle != old_handle) { if (old_handle && f->handle != old_handle) {
th = to_hash(old_handle); th = to_hash(old_handle);
h = from_hash(old_handle >> 16); h = from_hash(old_handle >> 16);
if ((b = head->table[th]) != NULL) { b = head->table[th];
if (b) {
for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) { for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
if (*fp == f) { if (*fp == f) {
*fp = f->next; *fp = f->next;
@ -515,7 +521,7 @@ errout:
static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg) static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{ {
struct route4_head *head = tp->root; struct route4_head *head = tp->root;
unsigned h, h1; unsigned int h, h1;
if (head == NULL) if (head == NULL)
arg->stop = 1; arg->stop = 1;

View file

@ -66,16 +66,14 @@
powerful classification engine. */ powerful classification engine. */
struct rsvp_head struct rsvp_head {
{
u32 tmap[256/32]; u32 tmap[256/32];
u32 hgenerator; u32 hgenerator;
u8 tgenerator; u8 tgenerator;
struct rsvp_session *ht[256]; struct rsvp_session *ht[256];
}; };
struct rsvp_session struct rsvp_session {
{
struct rsvp_session *next; struct rsvp_session *next;
__be32 dst[RSVP_DST_LEN]; __be32 dst[RSVP_DST_LEN];
struct tc_rsvp_gpi dpi; struct tc_rsvp_gpi dpi;
@ -86,8 +84,7 @@ struct rsvp_session
}; };
struct rsvp_filter struct rsvp_filter {
{
struct rsvp_filter *next; struct rsvp_filter *next;
__be32 src[RSVP_DST_LEN]; __be32 src[RSVP_DST_LEN];
struct tc_rsvp_gpi spi; struct tc_rsvp_gpi spi;
@ -100,17 +97,19 @@ struct rsvp_filter
struct rsvp_session *sess; struct rsvp_session *sess;
}; };
static __inline__ unsigned hash_dst(__be32 *dst, u8 protocol, u8 tunnelid) static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
{ {
unsigned h = (__force __u32)dst[RSVP_DST_LEN-1]; unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
h ^= h>>16; h ^= h>>16;
h ^= h>>8; h ^= h>>8;
return (h ^ protocol ^ tunnelid) & 0xFF; return (h ^ protocol ^ tunnelid) & 0xFF;
} }
static __inline__ unsigned hash_src(__be32 *src) static inline unsigned int hash_src(__be32 *src)
{ {
unsigned h = (__force __u32)src[RSVP_DST_LEN-1]; unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
h ^= h>>16; h ^= h>>16;
h ^= h>>8; h ^= h>>8;
h ^= h>>4; h ^= h>>4;
@ -137,7 +136,7 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht; struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
struct rsvp_session *s; struct rsvp_session *s;
struct rsvp_filter *f; struct rsvp_filter *f;
unsigned h1, h2; unsigned int h1, h2;
__be32 *dst, *src; __be32 *dst, *src;
u8 protocol; u8 protocol;
u8 tunnelid = 0; u8 tunnelid = 0;
@ -227,8 +226,8 @@ static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle)
struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht; struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
struct rsvp_session *s; struct rsvp_session *s;
struct rsvp_filter *f; struct rsvp_filter *f;
unsigned h1 = handle&0xFF; unsigned int h1 = handle & 0xFF;
unsigned h2 = (handle>>8)&0xFF; unsigned int h2 = (handle >> 8) & 0xFF;
if (h2 > 16) if (h2 > 16)
return 0; return 0;
@ -258,7 +257,7 @@ static int rsvp_init(struct tcf_proto *tp)
return -ENOBUFS; return -ENOBUFS;
} }
static inline void static void
rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f) rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
{ {
tcf_unbind_filter(tp, &f->res); tcf_unbind_filter(tp, &f->res);
@ -300,7 +299,7 @@ static void rsvp_destroy(struct tcf_proto *tp)
static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
{ {
struct rsvp_filter **fp, *f = (struct rsvp_filter *)arg; struct rsvp_filter **fp, *f = (struct rsvp_filter *)arg;
unsigned h = f->handle; unsigned int h = f->handle;
struct rsvp_session **sp; struct rsvp_session **sp;
struct rsvp_session *s = f->sess; struct rsvp_session *s = f->sess;
int i; int i;
@ -337,13 +336,14 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
return 0; return 0;
} }
static unsigned gen_handle(struct tcf_proto *tp, unsigned salt) static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
{ {
struct rsvp_head *data = tp->root; struct rsvp_head *data = tp->root;
int i = 0xFFFF; int i = 0xFFFF;
while (i-- > 0) { while (i-- > 0) {
u32 h; u32 h;
if ((data->hgenerator += 0x10000) == 0) if ((data->hgenerator += 0x10000) == 0)
data->hgenerator = 0x10000; data->hgenerator = 0x10000;
h = data->hgenerator|salt; h = data->hgenerator|salt;
@ -428,7 +428,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
struct nlattr *opt = tca[TCA_OPTIONS-1]; struct nlattr *opt = tca[TCA_OPTIONS-1];
struct nlattr *tb[TCA_RSVP_MAX + 1]; struct nlattr *tb[TCA_RSVP_MAX + 1];
struct tcf_exts e; struct tcf_exts e;
unsigned h1, h2; unsigned int h1, h2;
__be32 *dst; __be32 *dst;
int err; int err;
@ -443,7 +443,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
if (err < 0) if (err < 0)
return err; return err;
if ((f = (struct rsvp_filter*)*arg) != NULL) { f = (struct rsvp_filter *)*arg;
if (f) {
/* Node exists: adjust only classid */ /* Node exists: adjust only classid */
if (f->handle != handle && handle) if (f->handle != handle && handle)
@ -567,7 +568,7 @@ errout2:
static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg) static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{ {
struct rsvp_head *head = tp->root; struct rsvp_head *head = tp->root;
unsigned h, h1; unsigned int h, h1;
if (arg->stop) if (arg->stop)
return; return;

View file

@ -42,8 +42,7 @@
#include <net/act_api.h> #include <net/act_api.h>
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
struct tc_u_knode struct tc_u_knode {
{
struct tc_u_knode *next; struct tc_u_knode *next;
u32 handle; u32 handle;
struct tc_u_hnode *ht_up; struct tc_u_hnode *ht_up;
@ -63,19 +62,17 @@ struct tc_u_knode
struct tc_u32_sel sel; struct tc_u32_sel sel;
}; };
struct tc_u_hnode struct tc_u_hnode {
{
struct tc_u_hnode *next; struct tc_u_hnode *next;
u32 handle; u32 handle;
u32 prio; u32 prio;
struct tc_u_common *tp_c; struct tc_u_common *tp_c;
int refcnt; int refcnt;
unsigned divisor; unsigned int divisor;
struct tc_u_knode *ht[1]; struct tc_u_knode *ht[1];
}; };
struct tc_u_common struct tc_u_common {
{
struct tc_u_hnode *hlist; struct tc_u_hnode *hlist;
struct Qdisc *q; struct Qdisc *q;
int refcnt; int refcnt;
@ -87,9 +84,11 @@ static const struct tcf_ext_map u32_ext_map = {
.police = TCA_U32_POLICE .police = TCA_U32_POLICE
}; };
static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift) static inline unsigned int u32_hash_fold(__be32 key,
const struct tc_u32_sel *sel,
u8 fshift)
{ {
unsigned h = ntohl(key & sel->hmask)>>fshift; unsigned int h = ntohl(key & sel->hmask) >> fshift;
return h; return h;
} }
@ -236,11 +235,11 @@ out:
deadloop: deadloop:
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_WARNING "cls_u32: dead loop\n"); pr_warning("cls_u32: dead loop\n");
return -1; return -1;
} }
static __inline__ struct tc_u_hnode * static struct tc_u_hnode *
u32_lookup_ht(struct tc_u_common *tp_c, u32 handle) u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
{ {
struct tc_u_hnode *ht; struct tc_u_hnode *ht;
@ -252,10 +251,10 @@ u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
return ht; return ht;
} }
static __inline__ struct tc_u_knode * static struct tc_u_knode *
u32_lookup_key(struct tc_u_hnode *ht, u32 handle) u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
{ {
unsigned sel; unsigned int sel;
struct tc_u_knode *n = NULL; struct tc_u_knode *n = NULL;
sel = TC_U32_HASH(handle); sel = TC_U32_HASH(handle);
@ -378,7 +377,7 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
{ {
struct tc_u_knode *n; struct tc_u_knode *n;
unsigned h; unsigned int h;
for (h = 0; h <= ht->divisor; h++) { for (h = 0; h <= ht->divisor; h++) {
while ((n = ht->ht[h]) != NULL) { while ((n = ht->ht[h]) != NULL) {
@ -470,7 +469,7 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle) static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
{ {
struct tc_u_knode *n; struct tc_u_knode *n;
unsigned i = 0x7FF; unsigned int i = 0x7FF;
for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next) for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
if (i < TC_U32_NODE(n->handle)) if (i < TC_U32_NODE(n->handle))
@ -566,7 +565,8 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
if (err < 0) if (err < 0)
return err; return err;
if ((n = (struct tc_u_knode*)*arg) != NULL) { n = (struct tc_u_knode *)*arg;
if (n) {
if (TC_U32_KEY(n->handle) == 0) if (TC_U32_KEY(n->handle) == 0)
return -EINVAL; return -EINVAL;
@ -574,7 +574,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
} }
if (tb[TCA_U32_DIVISOR]) { if (tb[TCA_U32_DIVISOR]) {
unsigned divisor = nla_get_u32(tb[TCA_U32_DIVISOR]); unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
if (--divisor > 0x100) if (--divisor > 0x100)
return -EINVAL; return -EINVAL;
@ -683,7 +683,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
struct tc_u_common *tp_c = tp->data; struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *ht; struct tc_u_hnode *ht;
struct tc_u_knode *n; struct tc_u_knode *n;
unsigned h; unsigned int h;
if (arg->stop) if (arg->stop)
return; return;
@ -732,6 +732,7 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
if (TC_U32_KEY(n->handle) == 0) { if (TC_U32_KEY(n->handle) == 0) {
struct tc_u_hnode *ht = (struct tc_u_hnode *)fh; struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
u32 divisor = ht->divisor + 1; u32 divisor = ht->divisor + 1;
NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor); NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor);
} else { } else {
NLA_PUT(skb, TCA_U32_SEL, NLA_PUT(skb, TCA_U32_SEL,

View file

@ -46,7 +46,8 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
case TCF_EM_ALIGN_U32: case TCF_EM_ALIGN_U32:
/* Worth checking boundries? The branching seems /* Worth checking boundries? The branching seems
* to get worse. Visit again. */ * to get worse. Visit again.
*/
val = get_unaligned_be32(ptr); val = get_unaligned_be32(ptr);
if (cmp_needs_transformation(cmp)) if (cmp_needs_transformation(cmp))

View file

@ -73,21 +73,18 @@
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
#include <net/sock.h> #include <net/sock.h>
struct meta_obj struct meta_obj {
{
unsigned long value; unsigned long value;
unsigned int len; unsigned int len;
}; };
struct meta_value struct meta_value {
{
struct tcf_meta_val hdr; struct tcf_meta_val hdr;
unsigned long val; unsigned long val;
unsigned int len; unsigned int len;
}; };
struct meta_match struct meta_match {
{
struct meta_value lvalue; struct meta_value lvalue;
struct meta_value rvalue; struct meta_value rvalue;
}; };
@ -483,8 +480,7 @@ META_COLLECTOR(int_sk_write_pend)
* Meta value collectors assignment table * Meta value collectors assignment table
**************************************************************************/ **************************************************************************/
struct meta_ops struct meta_ops {
{
void (*get)(struct sk_buff *, struct tcf_pkt_info *, void (*get)(struct sk_buff *, struct tcf_pkt_info *,
struct meta_value *, struct meta_obj *, int *); struct meta_value *, struct meta_obj *, int *);
}; };
@ -649,9 +645,8 @@ static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
{ {
if (v->len == sizeof(unsigned long)) if (v->len == sizeof(unsigned long))
NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val); NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val);
else if (v->len == sizeof(u32)) { else if (v->len == sizeof(u32))
NLA_PUT_U32(skb, tlv, v->val); NLA_PUT_U32(skb, tlv, v->val);
}
return 0; return 0;
@ -663,8 +658,7 @@ nla_put_failure:
* Type specific operations table * Type specific operations table
**************************************************************************/ **************************************************************************/
struct meta_type_ops struct meta_type_ops {
{
void (*destroy)(struct meta_value *); void (*destroy)(struct meta_value *);
int (*compare)(struct meta_obj *, struct meta_obj *); int (*compare)(struct meta_obj *, struct meta_obj *);
int (*change)(struct meta_value *, struct nlattr *); int (*change)(struct meta_value *, struct nlattr *);
@ -771,7 +765,7 @@ static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla)
static inline int meta_is_supported(struct meta_value *val) static inline int meta_is_supported(struct meta_value *val)
{ {
return (!meta_id(val) || meta_ops(val)->get); return !meta_id(val) || meta_ops(val)->get;
} }
static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = { static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = {

View file

@ -18,8 +18,7 @@
#include <linux/tc_ematch/tc_em_nbyte.h> #include <linux/tc_ematch/tc_em_nbyte.h>
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
struct nbyte_data struct nbyte_data {
{
struct tcf_em_nbyte hdr; struct tcf_em_nbyte hdr;
char pattern[0]; char pattern[0];
}; };

View file

@ -19,8 +19,7 @@
#include <linux/tc_ematch/tc_em_text.h> #include <linux/tc_ematch/tc_em_text.h>
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
struct text_match struct text_match {
{
u16 from_offset; u16 from_offset;
u16 to_offset; u16 to_offset;
u8 from_layer; u8 from_layer;

View file

@ -93,7 +93,7 @@
static LIST_HEAD(ematch_ops); static LIST_HEAD(ematch_ops);
static DEFINE_RWLOCK(ematch_mod_lock); static DEFINE_RWLOCK(ematch_mod_lock);
static inline struct tcf_ematch_ops * tcf_em_lookup(u16 kind) static struct tcf_ematch_ops *tcf_em_lookup(u16 kind)
{ {
struct tcf_ematch_ops *e = NULL; struct tcf_ematch_ops *e = NULL;
@ -184,7 +184,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
if (em_hdr->kind == TCF_EM_CONTAINER) { if (em_hdr->kind == TCF_EM_CONTAINER) {
/* Special ematch called "container", carries an index /* Special ematch called "container", carries an index
* referencing an external ematch sequence. */ * referencing an external ematch sequence.
*/
u32 ref; u32 ref;
if (data_len < sizeof(ref)) if (data_len < sizeof(ref))
@ -195,7 +196,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
goto errout; goto errout;
/* We do not allow backward jumps to avoid loops and jumps /* We do not allow backward jumps to avoid loops and jumps
* to our own position are of course illegal. */ * to our own position are of course illegal.
*/
if (ref <= idx) if (ref <= idx)
goto errout; goto errout;
@ -208,7 +210,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
* which automatically releases the reference again, therefore * which automatically releases the reference again, therefore
* the module MUST not be given back under any circumstances * the module MUST not be given back under any circumstances
* here. Be aware, the destroy function assumes that the * here. Be aware, the destroy function assumes that the
* module is held if the ops field is non zero. */ * module is held if the ops field is non zero.
*/
em->ops = tcf_em_lookup(em_hdr->kind); em->ops = tcf_em_lookup(em_hdr->kind);
if (em->ops == NULL) { if (em->ops == NULL) {
@ -221,7 +224,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
if (em->ops) { if (em->ops) {
/* We dropped the RTNL mutex in order to /* We dropped the RTNL mutex in order to
* perform the module load. Tell the caller * perform the module load. Tell the caller
* to replay the request. */ * to replay the request.
*/
module_put(em->ops->owner); module_put(em->ops->owner);
err = -EAGAIN; err = -EAGAIN;
} }
@ -230,7 +234,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
} }
/* ematch module provides expected length of data, so we /* ematch module provides expected length of data, so we
* can do a basic sanity check. */ * can do a basic sanity check.
*/
if (em->ops->datalen && data_len < em->ops->datalen) if (em->ops->datalen && data_len < em->ops->datalen)
goto errout; goto errout;
@ -246,7 +251,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
* TCF_EM_SIMPLE may be specified stating that the * TCF_EM_SIMPLE may be specified stating that the
* data only consists of a u32 integer and the module * data only consists of a u32 integer and the module
* does not expected a memory reference but rather * does not expected a memory reference but rather
* the value carried. */ * the value carried.
*/
if (em_hdr->flags & TCF_EM_SIMPLE) { if (em_hdr->flags & TCF_EM_SIMPLE) {
if (data_len < sizeof(u32)) if (data_len < sizeof(u32))
goto errout; goto errout;
@ -334,7 +340,8 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla,
* The array of rt attributes is parsed in the order as they are * The array of rt attributes is parsed in the order as they are
* provided, their type must be incremental from 1 to n. Even * provided, their type must be incremental from 1 to n. Even
* if it does not serve any real purpose, a failure of sticking * if it does not serve any real purpose, a failure of sticking
* to this policy will result in parsing failure. */ * to this policy will result in parsing failure.
*/
for (idx = 0; nla_ok(rt_match, list_len); idx++) { for (idx = 0; nla_ok(rt_match, list_len); idx++) {
err = -EINVAL; err = -EINVAL;
@ -359,7 +366,8 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla,
/* Check if the number of matches provided by userspace actually /* Check if the number of matches provided by userspace actually
* complies with the array of matches. The number was used for * complies with the array of matches. The number was used for
* the validation of references and a mismatch could lead to * the validation of references and a mismatch could lead to
* undefined references during the matching process. */ * undefined references during the matching process.
*/
if (idx != tree_hdr->nmatches) { if (idx != tree_hdr->nmatches) {
err = -EINVAL; err = -EINVAL;
goto errout_abort; goto errout_abort;
@ -478,6 +486,7 @@ static inline int tcf_em_match(struct sk_buff *skb, struct tcf_ematch *em,
struct tcf_pkt_info *info) struct tcf_pkt_info *info)
{ {
int r = em->ops->match(skb, em, info); int r = em->ops->match(skb, em, info);
return tcf_em_is_inverted(em) ? !r : r; return tcf_em_is_inverted(em) ? !r : r;
} }
@ -527,7 +536,7 @@ pop_stack:
stack_overflow: stack_overflow:
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_WARNING "tc ematch: local stack overflow," pr_warning("tc ematch: local stack overflow,"
" increase NET_EMATCH_STACK\n"); " increase NET_EMATCH_STACK\n");
return -1; return -1;
} }

View file

@ -321,7 +321,9 @@ void qdisc_put_rtab(struct qdisc_rate_table *tab)
if (!tab || --tab->refcnt) if (!tab || --tab->refcnt)
return; return;
for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) { for (rtabp = &qdisc_rtab_list;
(rtab = *rtabp) != NULL;
rtabp = &rtab->next) {
if (rtab == tab) { if (rtab == tab) {
*rtabp = rtab->next; *rtabp = rtab->next;
kfree(rtab); kfree(rtab);
@ -459,8 +461,7 @@ EXPORT_SYMBOL(qdisc_calculate_pkt_len);
void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc) void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc)
{ {
if (!(qdisc->flags & TCQ_F_WARN_NONWC)) { if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
printk(KERN_WARNING pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
"%s: %s qdisc %X: is non-work-conserving?\n",
txt, qdisc->ops->id, qdisc->handle >> 16); txt, qdisc->ops->id, qdisc->handle >> 16);
qdisc->flags |= TCQ_F_WARN_NONWC; qdisc->flags |= TCQ_F_WARN_NONWC;
} }
@ -915,8 +916,7 @@ out:
return 0; return 0;
} }
struct check_loop_arg struct check_loop_arg {
{
struct qdisc_walker w; struct qdisc_walker w;
struct Qdisc *p; struct Qdisc *p;
int depth; int depth;
@ -970,7 +970,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
struct Qdisc *p = NULL; struct Qdisc *p = NULL;
int err; int err;
if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) dev = __dev_get_by_index(net, tcm->tcm_ifindex);
if (!dev)
return -ENODEV; return -ENODEV;
err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@ -980,11 +981,11 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (clid) { if (clid) {
if (clid != TC_H_ROOT) { if (clid != TC_H_ROOT) {
if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) { if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL) p = qdisc_lookup(dev, TC_H_MAJ(clid));
if (!p)
return -ENOENT; return -ENOENT;
q = qdisc_leaf(p, clid); q = qdisc_leaf(p, clid);
} else { /* ingress */ } else if (dev_ingress_queue(dev)) {
if (dev_ingress_queue(dev))
q = dev_ingress_queue(dev)->qdisc_sleeping; q = dev_ingress_queue(dev)->qdisc_sleeping;
} }
} else { } else {
@ -996,7 +997,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (tcm->tcm_handle && q->handle != tcm->tcm_handle) if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
return -EINVAL; return -EINVAL;
} else { } else {
if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL) q = qdisc_lookup(dev, tcm->tcm_handle);
if (!q)
return -ENOENT; return -ENOENT;
} }
@ -1008,7 +1010,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
return -EINVAL; return -EINVAL;
if (q->handle == 0) if (q->handle == 0)
return -ENOENT; return -ENOENT;
if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0) err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
if (err != 0)
return err; return err;
} else { } else {
qdisc_notify(net, skb, n, clid, NULL, q); qdisc_notify(net, skb, n, clid, NULL, q);
@ -1017,7 +1020,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
} }
/* /*
Create/change qdisc. * Create/change qdisc.
*/ */
static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
@ -1036,7 +1039,8 @@ replay:
clid = tcm->tcm_parent; clid = tcm->tcm_parent;
q = p = NULL; q = p = NULL;
if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) dev = __dev_get_by_index(net, tcm->tcm_ifindex);
if (!dev)
return -ENODEV; return -ENODEV;
err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@ -1046,11 +1050,11 @@ replay:
if (clid) { if (clid) {
if (clid != TC_H_ROOT) { if (clid != TC_H_ROOT) {
if (clid != TC_H_INGRESS) { if (clid != TC_H_INGRESS) {
if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL) p = qdisc_lookup(dev, TC_H_MAJ(clid));
if (!p)
return -ENOENT; return -ENOENT;
q = qdisc_leaf(p, clid); q = qdisc_leaf(p, clid);
} else { /* ingress */ } else if (dev_ingress_queue_create(dev)) {
if (dev_ingress_queue_create(dev))
q = dev_ingress_queue(dev)->qdisc_sleeping; q = dev_ingress_queue(dev)->qdisc_sleeping;
} }
} else { } else {
@ -1067,7 +1071,8 @@ replay:
return -EEXIST; return -EEXIST;
if (TC_H_MIN(tcm->tcm_handle)) if (TC_H_MIN(tcm->tcm_handle))
return -EINVAL; return -EINVAL;
if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL) q = qdisc_lookup(dev, tcm->tcm_handle);
if (!q)
goto create_n_graft; goto create_n_graft;
if (n->nlmsg_flags & NLM_F_EXCL) if (n->nlmsg_flags & NLM_F_EXCL)
return -EEXIST; return -EEXIST;
@ -1079,7 +1084,7 @@ replay:
atomic_inc(&q->refcnt); atomic_inc(&q->refcnt);
goto graft; goto graft;
} else { } else {
if (q == NULL) if (!q)
goto create_n_graft; goto create_n_graft;
/* This magic test requires explanation. /* This magic test requires explanation.
@ -1234,16 +1239,19 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb,
return -ENOBUFS; return -ENOBUFS;
if (old && !tc_qdisc_dump_ignore(old)) { if (old && !tc_qdisc_dump_ignore(old)) {
if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0) if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq,
0, RTM_DELQDISC) < 0)
goto err_out; goto err_out;
} }
if (new && !tc_qdisc_dump_ignore(new)) { if (new && !tc_qdisc_dump_ignore(new)) {
if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq,
old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
goto err_out; goto err_out;
} }
if (skb->len) if (skb->len)
return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
n->nlmsg_flags & NLM_F_ECHO);
err_out: err_out:
kfree_skb(skb); kfree_skb(skb);
@ -1356,7 +1364,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
u32 qid = TC_H_MAJ(clid); u32 qid = TC_H_MAJ(clid);
int err; int err;
if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) dev = __dev_get_by_index(net, tcm->tcm_ifindex);
if (!dev)
return -ENODEV; return -ENODEV;
err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@ -1391,9 +1400,9 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
qid = dev->qdisc->handle; qid = dev->qdisc->handle;
/* Now qid is genuine qdisc handle consistent /* Now qid is genuine qdisc handle consistent
both with parent and child. * both with parent and child.
*
TC_H_MAJ(pid) still may be unspecified, complete it now. * TC_H_MAJ(pid) still may be unspecified, complete it now.
*/ */
if (pid) if (pid)
pid = TC_H_MAKE(qid, pid); pid = TC_H_MAKE(qid, pid);
@ -1403,7 +1412,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
} }
/* OK. Locate qdisc */ /* OK. Locate qdisc */
if ((q = qdisc_lookup(dev, qid)) == NULL) q = qdisc_lookup(dev, qid);
if (!q)
return -ENOENT; return -ENOENT;
/* An check that it supports classes */ /* An check that it supports classes */
@ -1423,7 +1433,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (cl == 0) { if (cl == 0) {
err = -ENOENT; err = -ENOENT;
if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE)) if (n->nlmsg_type != RTM_NEWTCLASS ||
!(n->nlmsg_flags & NLM_F_CREATE))
goto out; goto out;
} else { } else {
switch (n->nlmsg_type) { switch (n->nlmsg_type) {
@ -1521,11 +1532,11 @@ static int tclass_notify(struct net *net, struct sk_buff *oskb,
return -EINVAL; return -EINVAL;
} }
return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
n->nlmsg_flags & NLM_F_ECHO);
} }
struct qdisc_dump_args struct qdisc_dump_args {
{
struct qdisc_walker w; struct qdisc_walker w;
struct sk_buff *skb; struct sk_buff *skb;
struct netlink_callback *cb; struct netlink_callback *cb;
@ -1598,7 +1609,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
return 0; return 0;
if ((dev = dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) dev = dev_get_by_index(net, tcm->tcm_ifindex);
if (!dev)
return 0; return 0;
s_t = cb->args[0]; s_t = cb->args[0];
@ -1621,19 +1633,22 @@ done:
} }
/* Main classifier routine: scans classifier chain attached /* Main classifier routine: scans classifier chain attached
to this qdisc, (optionally) tests for protocol and asks * to this qdisc, (optionally) tests for protocol and asks
specific classifiers. * specific classifiers.
*/ */
int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp, int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res) struct tcf_result *res)
{ {
__be16 protocol = skb->protocol; __be16 protocol = skb->protocol;
int err = 0; int err;
for (; tp; tp = tp->next) { for (; tp; tp = tp->next) {
if ((tp->protocol == protocol || if (tp->protocol != protocol &&
tp->protocol == htons(ETH_P_ALL)) && tp->protocol != htons(ETH_P_ALL))
(err = tp->classify(skb, tp, res)) >= 0) { continue;
err = tp->classify(skb, tp, res);
if (err >= 0) {
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
if (err != TC_ACT_RECLASSIFY && skb->tc_verd) if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0); skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
@ -1664,11 +1679,11 @@ reclassify:
if (verd++ >= MAX_REC_LOOP) { if (verd++ >= MAX_REC_LOOP) {
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_NOTICE pr_notice("%s: packet reclassify loop"
"%s: packet reclassify loop"
" rule prio %u protocol %02x\n", " rule prio %u protocol %02x\n",
tp->q->ops->id, tp->q->ops->id,
tp->prio & 0xffff, ntohs(tp->protocol)); tp->prio & 0xffff,
ntohs(tp->protocol));
return TC_ACT_SHOT; return TC_ACT_SHOT;
} }
skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd); skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
@ -1761,7 +1776,7 @@ static int __init pktsched_init(void)
err = register_pernet_subsys(&psched_net_ops); err = register_pernet_subsys(&psched_net_ops);
if (err) { if (err) {
printk(KERN_ERR "pktsched_init: " pr_err("pktsched_init: "
"cannot initialize per netns operations\n"); "cannot initialize per netns operations\n");
return err; return err;
} }

View file

@ -319,7 +319,7 @@ static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
* creation), and one for the reference held when calling delete. * creation), and one for the reference held when calling delete.
*/ */
if (flow->ref < 2) { if (flow->ref < 2) {
printk(KERN_ERR "atm_tc_delete: flow->ref == %d\n", flow->ref); pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref);
return -EINVAL; return -EINVAL;
} }
if (flow->ref > 2) if (flow->ref > 2)
@ -387,9 +387,9 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
done: done:
; ;
} }
if (!flow) if (!flow) {
flow = &p->link; flow = &p->link;
else { } else {
if (flow->vcc) if (flow->vcc)
ATM_SKB(skb)->atm_options = flow->vcc->atm_options; ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
/*@@@ looks good ... but it's not supposed to work :-) */ /*@@@ looks good ... but it's not supposed to work :-) */
@ -576,8 +576,7 @@ static void atm_tc_destroy(struct Qdisc *sch)
list_for_each_entry_safe(flow, tmp, &p->flows, list) { list_for_each_entry_safe(flow, tmp, &p->flows, list) {
if (flow->ref > 1) if (flow->ref > 1)
printk(KERN_ERR "atm_destroy: %p->ref = %d\n", flow, pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref);
flow->ref);
atm_tc_put(sch, (unsigned long)flow); atm_tc_put(sch, (unsigned long)flow);
} }
tasklet_kill(&p->task); tasklet_kill(&p->task);
@ -616,9 +615,8 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
} }
if (flow->excess) if (flow->excess)
NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid); NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid);
else { else
NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0); NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0);
}
nla_nest_end(skb, nest); nla_nest_end(skb, nest);
return skb->len; return skb->len;

View file

@ -72,8 +72,7 @@
struct cbq_sched_data; struct cbq_sched_data;
struct cbq_class struct cbq_class {
{
struct Qdisc_class_common common; struct Qdisc_class_common common;
struct cbq_class *next_alive; /* next class with backlog in this priority band */ struct cbq_class *next_alive; /* next class with backlog in this priority band */
@ -142,15 +141,14 @@ struct cbq_class
struct cbq_class *defaults[TC_PRIO_MAX + 1]; struct cbq_class *defaults[TC_PRIO_MAX + 1];
}; };
struct cbq_sched_data struct cbq_sched_data {
{
struct Qdisc_class_hash clhash; /* Hash table of all classes */ struct Qdisc_class_hash clhash; /* Hash table of all classes */
int nclasses[TC_CBQ_MAXPRIO + 1]; int nclasses[TC_CBQ_MAXPRIO + 1];
unsigned quanta[TC_CBQ_MAXPRIO+1]; unsigned int quanta[TC_CBQ_MAXPRIO + 1];
struct cbq_class link; struct cbq_class link;
unsigned activemask; unsigned int activemask;
struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
with backlog */ with backlog */
@ -162,7 +160,7 @@ struct cbq_sched_data
int tx_len; int tx_len;
psched_time_t now; /* Cached timestamp */ psched_time_t now; /* Cached timestamp */
psched_time_t now_rt; /* Cached real time */ psched_time_t now_rt; /* Cached real time */
unsigned pmask; unsigned int pmask;
struct hrtimer delay_timer; struct hrtimer delay_timer;
struct qdisc_watchdog watchdog; /* Watchdog timer, struct qdisc_watchdog watchdog; /* Watchdog timer,
@ -177,7 +175,7 @@ struct cbq_sched_data
#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len) #define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
static __inline__ struct cbq_class * static inline struct cbq_class *
cbq_class_lookup(struct cbq_sched_data *q, u32 classid) cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
{ {
struct Qdisc_class_common *clc; struct Qdisc_class_common *clc;
@ -193,25 +191,27 @@ cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
static struct cbq_class * static struct cbq_class *
cbq_reclassify(struct sk_buff *skb, struct cbq_class *this) cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
{ {
struct cbq_class *cl, *new; struct cbq_class *cl;
for (cl = this->tparent; cl; cl = cl->tparent) for (cl = this->tparent; cl; cl = cl->tparent) {
if ((new = cl->defaults[TC_PRIO_BESTEFFORT]) != NULL && new != this) struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
if (new != NULL && new != this)
return new; return new;
}
return NULL; return NULL;
} }
#endif #endif
/* Classify packet. The procedure is pretty complicated, but /* Classify packet. The procedure is pretty complicated, but
it allows us to combine link sharing and priority scheduling * it allows us to combine link sharing and priority scheduling
transparently. * transparently.
*
Namely, you can put link sharing rules (f.e. route based) at root of CBQ, * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
so that it resolves to split nodes. Then packets are classified * so that it resolves to split nodes. Then packets are classified
by logical priority, or a more specific classifier may be attached * by logical priority, or a more specific classifier may be attached
to the split node. * to the split node.
*/ */
static struct cbq_class * static struct cbq_class *
@ -243,7 +243,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
(result = tc_classify_compat(skb, head->filter_list, &res)) < 0) (result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
goto fallback; goto fallback;
if ((cl = (void*)res.class) == NULL) { cl = (void *)res.class;
if (!cl) {
if (TC_H_MAJ(res.classid)) if (TC_H_MAJ(res.classid))
cl = cbq_class_lookup(q, res.classid); cl = cbq_class_lookup(q, res.classid);
else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL) else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
@ -290,12 +291,12 @@ fallback:
} }
/* /*
A packet has just been enqueued on the empty class. * A packet has just been enqueued on the empty class.
cbq_activate_class adds it to the tail of active class list * cbq_activate_class adds it to the tail of active class list
of its priority band. * of its priority band.
*/ */
static __inline__ void cbq_activate_class(struct cbq_class *cl) static inline void cbq_activate_class(struct cbq_class *cl)
{ {
struct cbq_sched_data *q = qdisc_priv(cl->qdisc); struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
int prio = cl->cpriority; int prio = cl->cpriority;
@ -314,9 +315,9 @@ static __inline__ void cbq_activate_class(struct cbq_class *cl)
} }
/* /*
Unlink class from active chain. * Unlink class from active chain.
Note that this same procedure is done directly in cbq_dequeue* * Note that this same procedure is done directly in cbq_dequeue*
during round-robin procedure. * during round-robin procedure.
*/ */
static void cbq_deactivate_class(struct cbq_class *this) static void cbq_deactivate_class(struct cbq_class *this)
@ -418,11 +419,11 @@ static void cbq_ovl_classic(struct cbq_class *cl)
delay += cl->offtime; delay += cl->offtime;
/* /*
Class goes to sleep, so that it will have no * Class goes to sleep, so that it will have no
chance to work avgidle. Let's forgive it 8) * chance to work avgidle. Let's forgive it 8)
*
BTW cbq-2.0 has a crap in this * BTW cbq-2.0 has a crap in this
place, apparently they forgot to shift it by cl->ewma_log. * place, apparently they forgot to shift it by cl->ewma_log.
*/ */
if (cl->avgidle < 0) if (cl->avgidle < 0)
delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
@ -439,8 +440,8 @@ static void cbq_ovl_classic(struct cbq_class *cl)
q->wd_expires = delay; q->wd_expires = delay;
/* Dirty work! We must schedule wakeups based on /* Dirty work! We must schedule wakeups based on
real available rate, rather than leaf rate, * real available rate, rather than leaf rate,
which may be tiny (even zero). * which may be tiny (even zero).
*/ */
if (q->toplevel == TC_CBQ_MAXLEVEL) { if (q->toplevel == TC_CBQ_MAXLEVEL) {
struct cbq_class *b; struct cbq_class *b;
@ -460,7 +461,7 @@ static void cbq_ovl_classic(struct cbq_class *cl)
} }
/* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when /* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when
they go overlimit * they go overlimit
*/ */
static void cbq_ovl_rclassic(struct cbq_class *cl) static void cbq_ovl_rclassic(struct cbq_class *cl)
@ -595,7 +596,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
struct Qdisc *sch = q->watchdog.qdisc; struct Qdisc *sch = q->watchdog.qdisc;
psched_time_t now; psched_time_t now;
psched_tdiff_t delay = 0; psched_tdiff_t delay = 0;
unsigned pmask; unsigned int pmask;
now = psched_get_time(); now = psched_get_time();
@ -665,15 +666,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
#endif #endif
/* /*
It is mission critical procedure. * It is mission critical procedure.
*
We "regenerate" toplevel cutoff, if transmitting class * We "regenerate" toplevel cutoff, if transmitting class
has backlog and it is not regulated. It is not part of * has backlog and it is not regulated. It is not part of
original CBQ description, but looks more reasonable. * original CBQ description, but looks more reasonable.
Probably, it is wrong. This question needs further investigation. * Probably, it is wrong. This question needs further investigation.
*/ */
static __inline__ void static inline void
cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
struct cbq_class *borrowed) struct cbq_class *borrowed)
{ {
@ -712,10 +713,10 @@ cbq_update(struct cbq_sched_data *q)
cl->bstats.bytes += len; cl->bstats.bytes += len;
/* /*
(now - last) is total time between packet right edges. * (now - last) is total time between packet right edges.
(last_pktlen/rate) is "virtual" busy time, so that * (last_pktlen/rate) is "virtual" busy time, so that
*
idle = (now - last) - last_pktlen/rate * idle = (now - last) - last_pktlen/rate
*/ */
idle = q->now - cl->last; idle = q->now - cl->last;
@ -725,9 +726,9 @@ cbq_update(struct cbq_sched_data *q)
idle -= L2T(cl, len); idle -= L2T(cl, len);
/* true_avgidle := (1-W)*true_avgidle + W*idle, /* true_avgidle := (1-W)*true_avgidle + W*idle,
where W=2^{-ewma_log}. But cl->avgidle is scaled: * where W=2^{-ewma_log}. But cl->avgidle is scaled:
cl->avgidle == true_avgidle/W, * cl->avgidle == true_avgidle/W,
hence: * hence:
*/ */
avgidle += idle - (avgidle>>cl->ewma_log); avgidle += idle - (avgidle>>cl->ewma_log);
} }
@ -741,22 +742,22 @@ cbq_update(struct cbq_sched_data *q)
cl->avgidle = avgidle; cl->avgidle = avgidle;
/* Calculate expected time, when this class /* Calculate expected time, when this class
will be allowed to send. * will be allowed to send.
It will occur, when: * It will occur, when:
(1-W)*true_avgidle + W*delay = 0, i.e. * (1-W)*true_avgidle + W*delay = 0, i.e.
idle = (1/W - 1)*(-true_avgidle) * idle = (1/W - 1)*(-true_avgidle)
or * or
idle = (1 - W)*(-cl->avgidle); * idle = (1 - W)*(-cl->avgidle);
*/ */
idle = (-avgidle) - ((-avgidle) >> cl->ewma_log); idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
/* /*
That is not all. * That is not all.
To maintain the rate allocated to the class, * To maintain the rate allocated to the class,
we add to undertime virtual clock, * we add to undertime virtual clock,
necessary to complete transmitted packet. * necessary to complete transmitted packet.
(len/phys_bandwidth has been already passed * (len/phys_bandwidth has been already passed
to the moment of cbq_update) * to the moment of cbq_update)
*/ */
idle -= L2T(&q->link, len); idle -= L2T(&q->link, len);
@ -778,7 +779,7 @@ cbq_update(struct cbq_sched_data *q)
cbq_update_toplevel(q, this, q->tx_borrowed); cbq_update_toplevel(q, this, q->tx_borrowed);
} }
static __inline__ struct cbq_class * static inline struct cbq_class *
cbq_under_limit(struct cbq_class *cl) cbq_under_limit(struct cbq_class *cl)
{ {
struct cbq_sched_data *q = qdisc_priv(cl->qdisc); struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
@ -794,16 +795,17 @@ cbq_under_limit(struct cbq_class *cl)
do { do {
/* It is very suspicious place. Now overlimit /* It is very suspicious place. Now overlimit
action is generated for not bounded classes * action is generated for not bounded classes
only if link is completely congested. * only if link is completely congested.
Though it is in agree with ancestor-only paradigm, * Though it is in agree with ancestor-only paradigm,
it looks very stupid. Particularly, * it looks very stupid. Particularly,
it means that this chunk of code will either * it means that this chunk of code will either
never be called or result in strong amplification * never be called or result in strong amplification
of burstiness. Dangerous, silly, and, however, * of burstiness. Dangerous, silly, and, however,
no another solution exists. * no another solution exists.
*/ */
if ((cl = cl->borrow) == NULL) { cl = cl->borrow;
if (!cl) {
this_cl->qstats.overlimits++; this_cl->qstats.overlimits++;
this_cl->overlimit(this_cl); this_cl->overlimit(this_cl);
return NULL; return NULL;
@ -816,7 +818,7 @@ cbq_under_limit(struct cbq_class *cl)
return cl; return cl;
} }
static __inline__ struct sk_buff * static inline struct sk_buff *
cbq_dequeue_prio(struct Qdisc *sch, int prio) cbq_dequeue_prio(struct Qdisc *sch, int prio)
{ {
struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_sched_data *q = qdisc_priv(sch);
@ -840,7 +842,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
if (cl->deficit <= 0) { if (cl->deficit <= 0) {
/* Class exhausted its allotment per /* Class exhausted its allotment per
this round. Switch to the next one. * this round. Switch to the next one.
*/ */
deficit = 1; deficit = 1;
cl->deficit += cl->quantum; cl->deficit += cl->quantum;
@ -850,8 +852,8 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
skb = cl->q->dequeue(cl->q); skb = cl->q->dequeue(cl->q);
/* Class did not give us any skb :-( /* Class did not give us any skb :-(
It could occur even if cl->q->q.qlen != 0 * It could occur even if cl->q->q.qlen != 0
f.e. if cl->q == "tbf" * f.e. if cl->q == "tbf"
*/ */
if (skb == NULL) if (skb == NULL)
goto skip_class; goto skip_class;
@ -880,7 +882,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
skip_class: skip_class:
if (cl->q->q.qlen == 0 || prio != cl->cpriority) { if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
/* Class is empty or penalized. /* Class is empty or penalized.
Unlink it from active chain. * Unlink it from active chain.
*/ */
cl_prev->next_alive = cl->next_alive; cl_prev->next_alive = cl->next_alive;
cl->next_alive = NULL; cl->next_alive = NULL;
@ -919,12 +921,12 @@ next_class:
return NULL; return NULL;
} }
static __inline__ struct sk_buff * static inline struct sk_buff *
cbq_dequeue_1(struct Qdisc *sch) cbq_dequeue_1(struct Qdisc *sch)
{ {
struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb; struct sk_buff *skb;
unsigned activemask; unsigned int activemask;
activemask = q->activemask & 0xFF; activemask = q->activemask & 0xFF;
while (activemask) { while (activemask) {
@ -951,11 +953,11 @@ cbq_dequeue(struct Qdisc *sch)
if (q->tx_class) { if (q->tx_class) {
psched_tdiff_t incr2; psched_tdiff_t incr2;
/* Time integrator. We calculate EOS time /* Time integrator. We calculate EOS time
by adding expected packet transmission time. * by adding expected packet transmission time.
If real time is greater, we warp artificial clock, * If real time is greater, we warp artificial clock,
so that: * so that:
*
cbq_time = max(real_time, work); * cbq_time = max(real_time, work);
*/ */
incr2 = L2T(&q->link, q->tx_len); incr2 = L2T(&q->link, q->tx_len);
q->now += incr2; q->now += incr2;
@ -977,21 +979,21 @@ cbq_dequeue(struct Qdisc *sch)
} }
/* All the classes are overlimit. /* All the classes are overlimit.
*
It is possible, if: * It is possible, if:
*
1. Scheduler is empty. * 1. Scheduler is empty.
2. Toplevel cutoff inhibited borrowing. * 2. Toplevel cutoff inhibited borrowing.
3. Root class is overlimit. * 3. Root class is overlimit.
*
Reset 2d and 3d conditions and retry. * Reset 2d and 3d conditions and retry.
*
Note, that NS and cbq-2.0 are buggy, peeking * Note, that NS and cbq-2.0 are buggy, peeking
an arbitrary class is appropriate for ancestor-only * an arbitrary class is appropriate for ancestor-only
sharing, but not for toplevel algorithm. * sharing, but not for toplevel algorithm.
*
Our version is better, but slower, because it requires * Our version is better, but slower, because it requires
two passes, but it is unavoidable with top-level sharing. * two passes, but it is unavoidable with top-level sharing.
*/ */
if (q->toplevel == TC_CBQ_MAXLEVEL && if (q->toplevel == TC_CBQ_MAXLEVEL &&
@ -1003,7 +1005,8 @@ cbq_dequeue(struct Qdisc *sch)
} }
/* No packets in scheduler or nobody wants to give them to us :-( /* No packets in scheduler or nobody wants to give them to us :-(
Sigh... start watchdog timer in the last case. */ * Sigh... start watchdog timer in the last case.
*/
if (sch->q.qlen) { if (sch->q.qlen) {
sch->qstats.overlimits++; sch->qstats.overlimits++;
@ -1025,7 +1028,8 @@ static void cbq_adjust_levels(struct cbq_class *this)
int level = 0; int level = 0;
struct cbq_class *cl; struct cbq_class *cl;
if ((cl = this->children) != NULL) { cl = this->children;
if (cl) {
do { do {
if (cl->level > level) if (cl->level > level)
level = cl->level; level = cl->level;
@ -1047,14 +1051,15 @@ static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
for (h = 0; h < q->clhash.hashsize; h++) { for (h = 0; h < q->clhash.hashsize; h++) {
hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
/* BUGGGG... Beware! This expression suffer of /* BUGGGG... Beware! This expression suffer of
arithmetic overflows! * arithmetic overflows!
*/ */
if (cl->priority == prio) { if (cl->priority == prio) {
cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
q->quanta[prio]; q->quanta[prio];
} }
if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) { if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) {
printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum); pr_warning("CBQ: class %08x has bad quantum==%ld, repaired.\n",
cl->common.classid, cl->quantum);
cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1; cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
} }
} }
@ -1065,7 +1070,7 @@ static void cbq_sync_defmap(struct cbq_class *cl)
{ {
struct cbq_sched_data *q = qdisc_priv(cl->qdisc); struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
struct cbq_class *split = cl->split; struct cbq_class *split = cl->split;
unsigned h; unsigned int h;
int i; int i;
if (split == NULL) if (split == NULL)
@ -1103,7 +1108,8 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma
struct cbq_class *split = NULL; struct cbq_class *split = NULL;
if (splitid == 0) { if (splitid == 0) {
if ((split = cl->split) == NULL) split = cl->split;
if (!split)
return; return;
splitid = split->common.classid; splitid = split->common.classid;
} }
@ -1183,7 +1189,8 @@ static unsigned int cbq_drop(struct Qdisc* sch)
unsigned int len; unsigned int len;
for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) { for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) {
if ((cl_head = q->active[prio]) == NULL) cl_head = q->active[prio];
if (!cl_head)
continue; continue;
cl = cl_head; cl = cl_head;
@ -1206,7 +1213,7 @@ cbq_reset(struct Qdisc* sch)
struct cbq_class *cl; struct cbq_class *cl;
struct hlist_node *n; struct hlist_node *n;
int prio; int prio;
unsigned h; unsigned int h;
q->activemask = 0; q->activemask = 0;
q->pmask = 0; q->pmask = 0;
@ -1415,7 +1422,7 @@ put_rtab:
return err; return err;
} }
static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
{ {
unsigned char *b = skb_tail_pointer(skb); unsigned char *b = skb_tail_pointer(skb);
@ -1427,7 +1434,7 @@ nla_put_failure:
return -1; return -1;
} }
static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
{ {
unsigned char *b = skb_tail_pointer(skb); unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_lssopt opt; struct tc_cbq_lssopt opt;
@ -1452,7 +1459,7 @@ nla_put_failure:
return -1; return -1;
} }
static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
{ {
unsigned char *b = skb_tail_pointer(skb); unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_wrropt opt; struct tc_cbq_wrropt opt;
@ -1470,7 +1477,7 @@ nla_put_failure:
return -1; return -1;
} }
static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
{ {
unsigned char *b = skb_tail_pointer(skb); unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_ovl opt; struct tc_cbq_ovl opt;
@ -1487,7 +1494,7 @@ nla_put_failure:
return -1; return -1;
} }
static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
{ {
unsigned char *b = skb_tail_pointer(skb); unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_fopt opt; struct tc_cbq_fopt opt;
@ -1506,7 +1513,7 @@ nla_put_failure:
} }
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
{ {
unsigned char *b = skb_tail_pointer(skb); unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_police opt; struct tc_cbq_police opt;
@ -1641,8 +1648,7 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
return 0; return 0;
} }
static struct Qdisc * static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
cbq_leaf(struct Qdisc *sch, unsigned long arg)
{ {
struct cbq_class *cl = (struct cbq_class *)arg; struct cbq_class *cl = (struct cbq_class *)arg;
@ -1683,13 +1689,12 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
kfree(cl); kfree(cl);
} }
static void static void cbq_destroy(struct Qdisc *sch)
cbq_destroy(struct Qdisc* sch)
{ {
struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_sched_data *q = qdisc_priv(sch);
struct hlist_node *n, *next; struct hlist_node *n, *next;
struct cbq_class *cl; struct cbq_class *cl;
unsigned h; unsigned int h;
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
q->rx_class = NULL; q->rx_class = NULL;
@ -1828,7 +1833,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
if (classid) { if (classid) {
err = -EINVAL; err = -EINVAL;
if (TC_H_MAJ(classid^sch->handle) || cbq_class_lookup(q, classid)) if (TC_H_MAJ(classid ^ sch->handle) ||
cbq_class_lookup(q, classid))
goto failure; goto failure;
} else { } else {
int i; int i;
@ -2003,7 +2009,7 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl; struct cbq_class *cl;
struct hlist_node *n; struct hlist_node *n;
unsigned h; unsigned int h;
if (arg->stop) if (arg->stop)
return; return;

View file

@ -304,8 +304,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
* and don't need yet another qdisc as a bypass. * and don't need yet another qdisc as a bypass.
*/ */
if (p->mask[index] != 0xff || p->value[index]) if (p->mask[index] != 0xff || p->value[index])
printk(KERN_WARNING pr_warning("dsmark_dequeue: unsupported protocol %d\n",
"dsmark_dequeue: unsupported protocol %d\n",
ntohs(skb->protocol)); ntohs(skb->protocol));
break; break;
} }

View file

@ -19,8 +19,7 @@
/* 1 band FIFO pseudo-"scheduler" */ /* 1 band FIFO pseudo-"scheduler" */
struct fifo_sched_data struct fifo_sched_data {
{
u32 limit; u32 limit;
}; };

View file

@ -87,8 +87,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
*/ */
kfree_skb(skb); kfree_skb(skb);
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_WARNING "Dead loop on netdevice %s, " pr_warning("Dead loop on netdevice %s, fix it urgently!\n",
"fix it urgently!\n", dev_queue->dev->name); dev_queue->dev->name);
ret = qdisc_qlen(q); ret = qdisc_qlen(q);
} else { } else {
/* /*
@ -137,7 +137,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
} else { } else {
/* Driver returned NETDEV_TX_BUSY - requeue skb */ /* Driver returned NETDEV_TX_BUSY - requeue skb */
if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
printk(KERN_WARNING "BUG %s code %d qlen %d\n", pr_warning("BUG %s code %d qlen %d\n",
dev->name, ret, q->q.qlen); dev->name, ret, q->q.qlen);
ret = dev_requeue_skb(skb, q); ret = dev_requeue_skb(skb, q);
@ -412,8 +412,9 @@ static struct Qdisc noqueue_qdisc = {
}; };
static const u8 prio2band[TC_PRIO_MAX+1] = static const u8 prio2band[TC_PRIO_MAX + 1] = {
{ 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 }; 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
};
/* 3-band FIFO queue: old style, but should be a bit faster than /* 3-band FIFO queue: old style, but should be a bit faster than
generic prio+fifo combination. generic prio+fifo combination.
@ -681,20 +682,18 @@ static void attach_one_default_qdisc(struct net_device *dev,
struct netdev_queue *dev_queue, struct netdev_queue *dev_queue,
void *_unused) void *_unused)
{ {
struct Qdisc *qdisc; struct Qdisc *qdisc = &noqueue_qdisc;
if (dev->tx_queue_len) { if (dev->tx_queue_len) {
qdisc = qdisc_create_dflt(dev_queue, qdisc = qdisc_create_dflt(dev_queue,
&pfifo_fast_ops, TC_H_ROOT); &pfifo_fast_ops, TC_H_ROOT);
if (!qdisc) { if (!qdisc) {
printk(KERN_INFO "%s: activation failed\n", dev->name); netdev_info(dev, "activation failed\n");
return; return;
} }
/* Can by-pass the queue discipline for default qdisc */ /* Can by-pass the queue discipline for default qdisc */
qdisc->flags |= TCQ_F_CAN_BYPASS; qdisc->flags |= TCQ_F_CAN_BYPASS;
} else {
qdisc = &noqueue_qdisc;
} }
dev_queue->qdisc_sleeping = qdisc; dev_queue->qdisc_sleeping = qdisc;
} }

View file

@ -32,8 +32,7 @@
struct gred_sched_data; struct gred_sched_data;
struct gred_sched; struct gred_sched;
struct gred_sched_data struct gred_sched_data {
{
u32 limit; /* HARD maximal queue length */ u32 limit; /* HARD maximal queue length */
u32 DP; /* the drop pramaters */ u32 DP; /* the drop pramaters */
u32 bytesin; /* bytes seen on virtualQ so far*/ u32 bytesin; /* bytes seen on virtualQ so far*/
@ -50,8 +49,7 @@ enum {
GRED_RIO_MODE, GRED_RIO_MODE,
}; };
struct gred_sched struct gred_sched {
{
struct gred_sched_data *tab[MAX_DPs]; struct gred_sched_data *tab[MAX_DPs];
unsigned long flags; unsigned long flags;
u32 red_flags; u32 red_flags;
@ -160,7 +158,8 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
dp = t->def; dp = t->def;
if ((q = t->tab[dp]) == NULL) { q = t->tab[dp];
if (!q) {
/* Pass through packets not assigned to a DP /* Pass through packets not assigned to a DP
* if no default DP has been configured. This * if no default DP has been configured. This
* allows for DP flows to be left untouched. * allows for DP flows to be left untouched.
@ -254,8 +253,8 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch)
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_WARNING "GRED: Unable to relocate " pr_warning("GRED: Unable to relocate VQ 0x%x "
"VQ 0x%x after dequeue, screwing up " "after dequeue, screwing up "
"backlog.\n", tc_index_to_dp(skb)); "backlog.\n", tc_index_to_dp(skb));
} else { } else {
q->backlog -= qdisc_pkt_len(skb); q->backlog -= qdisc_pkt_len(skb);
@ -286,8 +285,8 @@ static unsigned int gred_drop(struct Qdisc* sch)
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_WARNING "GRED: Unable to relocate " pr_warning("GRED: Unable to relocate VQ 0x%x "
"VQ 0x%x while dropping, screwing up " "while dropping, screwing up "
"backlog.\n", tc_index_to_dp(skb)); "backlog.\n", tc_index_to_dp(skb));
} else { } else {
q->backlog -= len; q->backlog -= len;
@ -369,7 +368,7 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
for (i = table->DPs; i < MAX_DPs; i++) { for (i = table->DPs; i < MAX_DPs; i++) {
if (table->tab[i]) { if (table->tab[i]) {
printk(KERN_WARNING "GRED: Warning: Destroying " pr_warning("GRED: Warning: Destroying "
"shadowed VQ 0x%x\n", i); "shadowed VQ 0x%x\n", i);
gred_destroy_vq(table->tab[i]); gred_destroy_vq(table->tab[i]);
table->tab[i] = NULL; table->tab[i] = NULL;

View file

@ -81,8 +81,7 @@
* that are expensive on 32-bit architectures. * that are expensive on 32-bit architectures.
*/ */
struct internal_sc struct internal_sc {
{
u64 sm1; /* scaled slope of the 1st segment */ u64 sm1; /* scaled slope of the 1st segment */
u64 ism1; /* scaled inverse-slope of the 1st segment */ u64 ism1; /* scaled inverse-slope of the 1st segment */
u64 dx; /* the x-projection of the 1st segment */ u64 dx; /* the x-projection of the 1st segment */
@ -92,8 +91,7 @@ struct internal_sc
}; };
/* runtime service curve */ /* runtime service curve */
struct runtime_sc struct runtime_sc {
{
u64 x; /* current starting position on x-axis */ u64 x; /* current starting position on x-axis */
u64 y; /* current starting position on y-axis */ u64 y; /* current starting position on y-axis */
u64 sm1; /* scaled slope of the 1st segment */ u64 sm1; /* scaled slope of the 1st segment */
@ -104,15 +102,13 @@ struct runtime_sc
u64 ism2; /* scaled inverse-slope of the 2nd segment */ u64 ism2; /* scaled inverse-slope of the 2nd segment */
}; };
enum hfsc_class_flags enum hfsc_class_flags {
{
HFSC_RSC = 0x1, HFSC_RSC = 0x1,
HFSC_FSC = 0x2, HFSC_FSC = 0x2,
HFSC_USC = 0x4 HFSC_USC = 0x4
}; };
struct hfsc_class struct hfsc_class {
{
struct Qdisc_class_common cl_common; struct Qdisc_class_common cl_common;
unsigned int refcnt; /* usage count */ unsigned int refcnt; /* usage count */
@ -176,8 +172,7 @@ struct hfsc_class
unsigned long cl_nactive; /* number of active children */ unsigned long cl_nactive; /* number of active children */
}; };
struct hfsc_sched struct hfsc_sched {
{
u16 defcls; /* default class id */ u16 defcls; /* default class id */
struct hfsc_class root; /* root class */ struct hfsc_class root; /* root class */
struct Qdisc_class_hash clhash; /* class hash */ struct Qdisc_class_hash clhash; /* class hash */
@ -1177,8 +1172,10 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
return NULL; return NULL;
} }
#endif #endif
if ((cl = (struct hfsc_class *)res.class) == NULL) { cl = (struct hfsc_class *)res.class;
if ((cl = hfsc_find_class(res.classid, sch)) == NULL) if (!cl) {
cl = hfsc_find_class(res.classid, sch);
if (!cl)
break; /* filter selected invalid classid */ break; /* filter selected invalid classid */
if (cl->level >= head->level) if (cl->level >= head->level)
break; /* filter may only point downwards */ break; /* filter may only point downwards */
@ -1316,7 +1313,7 @@ hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
return -1; return -1;
} }
static inline int static int
hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl) hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
{ {
if ((cl->cl_flags & HFSC_RSC) && if ((cl->cl_flags & HFSC_RSC) &&
@ -1420,7 +1417,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch)
struct hfsc_class *cl; struct hfsc_class *cl;
u64 next_time = 0; u64 next_time = 0;
if ((cl = eltree_get_minel(q)) != NULL) cl = eltree_get_minel(q);
if (cl)
next_time = cl->cl_e; next_time = cl->cl_e;
if (q->root.cl_cfmin != 0) { if (q->root.cl_cfmin != 0) {
if (next_time == 0 || next_time > q->root.cl_cfmin) if (next_time == 0 || next_time > q->root.cl_cfmin)
@ -1626,7 +1624,8 @@ hfsc_dequeue(struct Qdisc *sch)
* find the class with the minimum deadline among * find the class with the minimum deadline among
* the eligible classes. * the eligible classes.
*/ */
if ((cl = eltree_get_mindl(q, cur_time)) != NULL) { cl = eltree_get_mindl(q, cur_time);
if (cl) {
realtime = 1; realtime = 1;
} else { } else {
/* /*

View file

@ -99,9 +99,10 @@ struct htb_class {
struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */ struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */ struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
/* When class changes from state 1->2 and disconnects from /* When class changes from state 1->2 and disconnects from
parent's feed then we lost ptr value and start from the * parent's feed then we lost ptr value and start from the
first child again. Here we store classid of the * first child again. Here we store classid of the
last valid ptr (used when ptr is NULL). */ * last valid ptr (used when ptr is NULL).
*/
u32 last_ptr_id[TC_HTB_NUMPRIO]; u32 last_ptr_id[TC_HTB_NUMPRIO];
} inner; } inner;
} un; } un;
@ -185,7 +186,7 @@ static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
* have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
* then finish and return direct queue. * then finish and return direct queue.
*/ */
#define HTB_DIRECT (struct htb_class*)-1 #define HTB_DIRECT ((struct htb_class *)-1L)
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
int *qerr) int *qerr)
@ -197,11 +198,13 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
int result; int result;
/* allow to select class by setting skb->priority to valid classid; /* allow to select class by setting skb->priority to valid classid;
note that nfmark can be used too by attaching filter fw with no * note that nfmark can be used too by attaching filter fw with no
rules in it */ * rules in it
*/
if (skb->priority == sch->handle) if (skb->priority == sch->handle)
return HTB_DIRECT; /* X:0 (direct flow) selected */ return HTB_DIRECT; /* X:0 (direct flow) selected */
if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0) cl = htb_find(skb->priority, sch);
if (cl && cl->level == 0)
return cl; return cl;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
@ -216,10 +219,12 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
return NULL; return NULL;
} }
#endif #endif
if ((cl = (void *)res.class) == NULL) { cl = (void *)res.class;
if (!cl) {
if (res.classid == sch->handle) if (res.classid == sch->handle)
return HTB_DIRECT; /* X:0 (direct flow) */ return HTB_DIRECT; /* X:0 (direct flow) */
if ((cl = htb_find(res.classid, sch)) == NULL) cl = htb_find(res.classid, sch);
if (!cl)
break; /* filter selected invalid classid */ break; /* filter selected invalid classid */
} }
if (!cl->level) if (!cl->level)
@ -378,7 +383,8 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
if (p->un.inner.feed[prio].rb_node) if (p->un.inner.feed[prio].rb_node)
/* parent already has its feed in use so that /* parent already has its feed in use so that
reset bit in mask as parent is already ok */ * reset bit in mask as parent is already ok
*/
mask &= ~(1 << prio); mask &= ~(1 << prio);
htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio); htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
@ -413,8 +419,9 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
if (p->un.inner.ptr[prio] == cl->node + prio) { if (p->un.inner.ptr[prio] == cl->node + prio) {
/* we are removing child which is pointed to from /* we are removing child which is pointed to from
parent feed - forget the pointer but remember * parent feed - forget the pointer but remember
classid */ * classid
*/
p->un.inner.last_ptr_id[prio] = cl->common.classid; p->un.inner.last_ptr_id[prio] = cl->common.classid;
p->un.inner.ptr[prio] = NULL; p->un.inner.ptr[prio] = NULL;
} }
@ -664,8 +671,9 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
unsigned long start) unsigned long start)
{ {
/* don't run for longer than 2 jiffies; 2 is used instead of /* don't run for longer than 2 jiffies; 2 is used instead of
1 to simplify things when jiffy is going to be incremented * 1 to simplify things when jiffy is going to be incremented
too soon */ * too soon
*/
unsigned long stop_at = start + 2; unsigned long stop_at = start + 2;
while (time_before(jiffies, stop_at)) { while (time_before(jiffies, stop_at)) {
struct htb_class *cl; struct htb_class *cl;
@ -688,7 +696,7 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
/* too much load - let's continue after a break for scheduling */ /* too much load - let's continue after a break for scheduling */
if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
printk(KERN_WARNING "htb: too many events!\n"); pr_warning("htb: too many events!\n");
q->warned |= HTB_WARN_TOOMANYEVENTS; q->warned |= HTB_WARN_TOOMANYEVENTS;
} }
@ -696,7 +704,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
} }
/* Returns class->node+prio from id-tree where classe's id is >= id. NULL /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
is no such one exists. */ * is no such one exists.
*/
static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n, static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
u32 id) u32 id)
{ {
@ -740,12 +749,14 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
for (i = 0; i < 65535; i++) { for (i = 0; i < 65535; i++) {
if (!*sp->pptr && *sp->pid) { if (!*sp->pptr && *sp->pid) {
/* ptr was invalidated but id is valid - try to recover /* ptr was invalidated but id is valid - try to recover
the original or next ptr */ * the original or next ptr
*/
*sp->pptr = *sp->pptr =
htb_id_find_next_upper(prio, sp->root, *sp->pid); htb_id_find_next_upper(prio, sp->root, *sp->pid);
} }
*sp->pid = 0; /* ptr is valid now so that remove this hint as it *sp->pid = 0; /* ptr is valid now so that remove this hint as it
can become out of date quickly */ * can become out of date quickly
*/
if (!*sp->pptr) { /* we are at right end; rewind & go up */ if (!*sp->pptr) { /* we are at right end; rewind & go up */
*sp->pptr = sp->root; *sp->pptr = sp->root;
while ((*sp->pptr)->rb_left) while ((*sp->pptr)->rb_left)
@ -773,7 +784,8 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
} }
/* dequeues packet at given priority and level; call only if /* dequeues packet at given priority and level; call only if
you are sure that there is active class at prio/level */ * you are sure that there is active class at prio/level
*/
static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio, static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
int level) int level)
{ {
@ -790,9 +802,10 @@ next:
return NULL; return NULL;
/* class can be empty - it is unlikely but can be true if leaf /* class can be empty - it is unlikely but can be true if leaf
qdisc drops packets in enqueue routine or if someone used * qdisc drops packets in enqueue routine or if someone used
graft operation on the leaf since last dequeue; * graft operation on the leaf since last dequeue;
simply deactivate and skip such class */ * simply deactivate and skip such class
*/
if (unlikely(cl->un.leaf.q->q.qlen == 0)) { if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
struct htb_class *next; struct htb_class *next;
htb_deactivate(q, cl); htb_deactivate(q, cl);
@ -832,7 +845,8 @@ next:
ptr[0]) + prio); ptr[0]) + prio);
} }
/* this used to be after charge_class but this constelation /* this used to be after charge_class but this constelation
gives us slightly better performance */ * gives us slightly better performance
*/
if (!cl->un.leaf.q->q.qlen) if (!cl->un.leaf.q->q.qlen)
htb_deactivate(q, cl); htb_deactivate(q, cl);
htb_charge_class(q, cl, level, skb); htb_charge_class(q, cl, level, skb);
@ -882,6 +896,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
m = ~q->row_mask[level]; m = ~q->row_mask[level];
while (m != (int)(-1)) { while (m != (int)(-1)) {
int prio = ffz(m); int prio = ffz(m);
m |= 1 << prio; m |= 1 << prio;
skb = htb_dequeue_tree(q, prio, level); skb = htb_dequeue_tree(q, prio, level);
if (likely(skb != NULL)) { if (likely(skb != NULL)) {
@ -989,13 +1004,12 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
return err; return err;
if (tb[TCA_HTB_INIT] == NULL) { if (tb[TCA_HTB_INIT] == NULL) {
printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n"); pr_err("HTB: hey probably you have bad tc tool ?\n");
return -EINVAL; return -EINVAL;
} }
gopt = nla_data(tb[TCA_HTB_INIT]); gopt = nla_data(tb[TCA_HTB_INIT]);
if (gopt->version != HTB_VER >> 16) { if (gopt->version != HTB_VER >> 16) {
printk(KERN_ERR pr_err("HTB: need tc/htb version %d (minor is %d), you have %d\n",
"HTB: need tc/htb version %d (minor is %d), you have %d\n",
HTB_VER >> 16, HTB_VER & 0xffff, gopt->version); HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
return -EINVAL; return -EINVAL;
} }
@ -1208,9 +1222,10 @@ static void htb_destroy(struct Qdisc *sch)
cancel_work_sync(&q->work); cancel_work_sync(&q->work);
qdisc_watchdog_cancel(&q->watchdog); qdisc_watchdog_cancel(&q->watchdog);
/* This line used to be after htb_destroy_class call below /* This line used to be after htb_destroy_class call below
and surprisingly it worked in 2.4. But it must precede it * and surprisingly it worked in 2.4. But it must precede it
because filter need its target class alive to be able to call * because filter need its target class alive to be able to call
unbind_filter on it (without Oops). */ * unbind_filter on it (without Oops).
*/
tcf_destroy_chain(&q->filter_list); tcf_destroy_chain(&q->filter_list);
for (i = 0; i < q->clhash.hashsize; i++) { for (i = 0; i < q->clhash.hashsize; i++) {
@ -1344,11 +1359,12 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
/* check maximal depth */ /* check maximal depth */
if (parent && parent->parent && parent->parent->level < 2) { if (parent && parent->parent && parent->parent->level < 2) {
printk(KERN_ERR "htb: tree is too deep\n"); pr_err("htb: tree is too deep\n");
goto failure; goto failure;
} }
err = -ENOBUFS; err = -ENOBUFS;
if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL) cl = kzalloc(sizeof(*cl), GFP_KERNEL);
if (!cl)
goto failure; goto failure;
err = gen_new_estimator(&cl->bstats, &cl->rate_est, err = gen_new_estimator(&cl->bstats, &cl->rate_est,
@ -1368,8 +1384,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
RB_CLEAR_NODE(&cl->node[prio]); RB_CLEAR_NODE(&cl->node[prio]);
/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
so that can't be used inside of sch_tree_lock * so that can't be used inside of sch_tree_lock
-- thanks to Karlis Peisenieks */ * -- thanks to Karlis Peisenieks
*/
new_q = qdisc_create_dflt(sch->dev_queue, new_q = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops, classid); &pfifo_qdisc_ops, classid);
sch_tree_lock(sch); sch_tree_lock(sch);
@ -1421,17 +1438,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
} }
/* it used to be a nasty bug here, we have to check that node /* it used to be a nasty bug here, we have to check that node
is really leaf before changing cl->un.leaf ! */ * is really leaf before changing cl->un.leaf !
*/
if (!cl->level) { if (!cl->level) {
cl->quantum = rtab->rate.rate / q->rate2quantum; cl->quantum = rtab->rate.rate / q->rate2quantum;
if (!hopt->quantum && cl->quantum < 1000) { if (!hopt->quantum && cl->quantum < 1000) {
printk(KERN_WARNING pr_warning(
"HTB: quantum of class %X is small. Consider r2q change.\n", "HTB: quantum of class %X is small. Consider r2q change.\n",
cl->common.classid); cl->common.classid);
cl->quantum = 1000; cl->quantum = 1000;
} }
if (!hopt->quantum && cl->quantum > 200000) { if (!hopt->quantum && cl->quantum > 200000) {
printk(KERN_WARNING pr_warning(
"HTB: quantum of class %X is big. Consider r2q change.\n", "HTB: quantum of class %X is big. Consider r2q change.\n",
cl->common.classid); cl->common.classid);
cl->quantum = 200000; cl->quantum = 200000;
@ -1480,13 +1498,13 @@ static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
struct htb_class *cl = htb_find(classid, sch); struct htb_class *cl = htb_find(classid, sch);
/*if (cl && !cl->level) return 0; /*if (cl && !cl->level) return 0;
The line above used to be there to prevent attaching filters to * The line above used to be there to prevent attaching filters to
leaves. But at least tc_index filter uses this just to get class * leaves. But at least tc_index filter uses this just to get class
for other reasons so that we have to allow for it. * for other reasons so that we have to allow for it.
---- * ----
19.6.2002 As Werner explained it is ok - bind filter is just * 19.6.2002 As Werner explained it is ok - bind filter is just
another way to "lock" the class - unlike "get" this lock can * another way to "lock" the class - unlike "get" this lock can
be broken by class during destroy IIUC. * be broken by class during destroy IIUC.
*/ */
if (cl) if (cl)
cl->filter_cnt++; cl->filter_cnt++;

View file

@ -22,8 +22,7 @@
#include <net/pkt_sched.h> #include <net/pkt_sched.h>
struct prio_sched_data struct prio_sched_data {
{
int bands; int bands;
struct tcf_proto *filter_list; struct tcf_proto *filter_list;
u8 prio2band[TC_PRIO_MAX+1]; u8 prio2band[TC_PRIO_MAX+1];
@ -199,6 +198,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
for (i = 0; i < q->bands; i++) { for (i = 0; i < q->bands; i++) {
if (q->queues[i] == &noop_qdisc) { if (q->queues[i] == &noop_qdisc) {
struct Qdisc *child, *old; struct Qdisc *child, *old;
child = qdisc_create_dflt(sch->dev_queue, child = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops, &pfifo_qdisc_ops,
TC_H_MAKE(sch->handle, i + 1)); TC_H_MAKE(sch->handle, i + 1));

View file

@ -36,8 +36,7 @@
if RED works correctly. if RED works correctly.
*/ */
struct red_sched_data struct red_sched_data {
{
u32 limit; /* HARD maximal queue length */ u32 limit; /* HARD maximal queue length */
unsigned char flags; unsigned char flags;
struct red_parms parms; struct red_parms parms;

View file

@ -92,8 +92,7 @@ typedef unsigned char sfq_index;
* while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1] * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
* are 'pointers' to dep[] array * are 'pointers' to dep[] array
*/ */
struct sfq_head struct sfq_head {
{
sfq_index next; sfq_index next;
sfq_index prev; sfq_index prev;
}; };
@ -108,11 +107,10 @@ struct sfq_slot {
short allot; /* credit for this slot */ short allot; /* credit for this slot */
}; };
struct sfq_sched_data struct sfq_sched_data {
{
/* Parameters */ /* Parameters */
int perturb_period; int perturb_period;
unsigned quantum; /* Allotment per round: MUST BE >= MTU */ unsigned int quantum; /* Allotment per round: MUST BE >= MTU */
int limit; int limit;
/* Variables */ /* Variables */
@ -137,12 +135,12 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index
return &q->dep[val - SFQ_SLOTS]; return &q->dep[val - SFQ_SLOTS];
} }
static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1) static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
{ {
return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1); return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
} }
static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
{ {
u32 h, h2; u32 h, h2;

View file

@ -97,8 +97,7 @@
changed the limit is not effective anymore. changed the limit is not effective anymore.
*/ */
struct tbf_sched_data struct tbf_sched_data {
{
/* Parameters */ /* Parameters */
u32 limit; /* Maximal length of backlog: bytes */ u32 limit; /* Maximal length of backlog: bytes */
u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */ u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
@ -259,15 +258,18 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
} }
for (n = 0; n < 256; n++) for (n = 0; n < 256; n++)
if (rtab->data[n] > qopt->buffer) break; if (rtab->data[n] > qopt->buffer)
break;
max_size = (n << qopt->rate.cell_log) - 1; max_size = (n << qopt->rate.cell_log) - 1;
if (ptab) { if (ptab) {
int size; int size;
for (n = 0; n < 256; n++) for (n = 0; n < 256; n++)
if (ptab->data[n] > qopt->mtu) break; if (ptab->data[n] > qopt->mtu)
break;
size = (n << qopt->peakrate.cell_log) - 1; size = (n << qopt->peakrate.cell_log) - 1;
if (size < max_size) max_size = size; if (size < max_size)
max_size = size;
} }
if (max_size < 0) if (max_size < 0)
goto done; goto done;
@ -422,8 +424,7 @@ static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
} }
} }
static const struct Qdisc_class_ops tbf_class_ops = static const struct Qdisc_class_ops tbf_class_ops = {
{
.graft = tbf_graft, .graft = tbf_graft,
.leaf = tbf_leaf, .leaf = tbf_leaf,
.get = tbf_get, .get = tbf_get,

View file

@ -53,8 +53,7 @@
which will not break load balancing, though native slave which will not break load balancing, though native slave
traffic will have the highest priority. */ traffic will have the highest priority. */
struct teql_master struct teql_master {
{
struct Qdisc_ops qops; struct Qdisc_ops qops;
struct net_device *dev; struct net_device *dev;
struct Qdisc *slaves; struct Qdisc *slaves;
@ -65,8 +64,7 @@ struct teql_master
unsigned long tx_dropped; unsigned long tx_dropped;
}; };
struct teql_sched_data struct teql_sched_data {
{
struct Qdisc *next; struct Qdisc *next;
struct teql_master *m; struct teql_master *m;
struct neighbour *ncache; struct neighbour *ncache;
@ -123,7 +121,7 @@ teql_peek(struct Qdisc* sch)
return NULL; return NULL;
} }
static __inline__ void static inline void
teql_neigh_release(struct neighbour *n) teql_neigh_release(struct neighbour *n)
{ {
if (n) if (n)
@ -147,7 +145,8 @@ teql_destroy(struct Qdisc* sch)
struct teql_sched_data *dat = qdisc_priv(sch); struct teql_sched_data *dat = qdisc_priv(sch);
struct teql_master *master = dat->m; struct teql_master *master = dat->m;
if ((prev = master->slaves) != NULL) { prev = master->slaves;
if (prev) {
do { do {
q = NEXT_SLAVE(prev); q = NEXT_SLAVE(prev);
if (q == sch) { if (q == sch) {
@ -290,7 +289,8 @@ restart:
nores = 0; nores = 0;
busy = 0; busy = 0;
if ((q = start) == NULL) q = start;
if (!q)
goto drop; goto drop;
do { do {
@ -358,7 +358,7 @@ static int teql_master_open(struct net_device *dev)
struct Qdisc *q; struct Qdisc *q;
struct teql_master *m = netdev_priv(dev); struct teql_master *m = netdev_priv(dev);
int mtu = 0xFFFE; int mtu = 0xFFFE;
unsigned flags = IFF_NOARP|IFF_MULTICAST; unsigned int flags = IFF_NOARP | IFF_MULTICAST;
if (m->slaves == NULL) if (m->slaves == NULL)
return -EUNATCH; return -EUNATCH;