Import 2.1.120pre1
[davej-history.git] / net / sched / sch_api.c
blobf2fb9e36f944196dd52c90a894b60933f7eda547
1 /*
2 * net/sched/sch_api.c Packet scheduler API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * Fixes:
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
16 #include <linux/config.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/string.h>
21 #include <linux/mm.h>
22 #include <linux/socket.h>
23 #include <linux/sockios.h>
24 #include <linux/in.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/netdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/rtnetlink.h>
30 #include <linux/init.h>
31 #include <linux/proc_fs.h>
33 #include <net/sock.h>
34 #include <net/pkt_sched.h>
36 #include <asm/processor.h>
37 #include <asm/uaccess.h>
38 #include <asm/system.h>
39 #include <asm/bitops.h>
41 #define BUG_TRAP(x) if (!(x)) { printk("Assertion (" #x") failed at " __FILE__"(%d):" __FUNCTION__"\n", __LINE__); }
43 #ifdef CONFIG_RTNETLINK
44 static intqdisc_notify(struct sk_buff *oskb,struct nlmsghdr *n,
45 struct Qdisc *old,struct Qdisc *new);
46 static inttclass_notify(struct sk_buff *oskb,struct nlmsghdr *n,
47 struct Qdisc *q,unsigned long cl,int event);
48 #endif
52 Short review.
53 -------------
55 This file consists of two interrelated parts:
57 1. queueing disciplines manager frontend.
58 2. traffic classes manager frontend.
60 Generally, queueing discipline ("qdisc") is a black box,
61 which is able to enqueue packets and to dequeue them (when
62 device is ready to send something) in order and at times
63 determined by algorithm hidden in it.
65 qdisc's are divided to two categories:
66 - "queues", which have no internal structure visible from outside.
67 - "schedulers", which split all the packets to "traffic classes",
68 using "packet classifiers" (look at cls_api.c)
70 In turn, classes may have child qdiscs (as rule, queues)
71 attached to them etc. etc. etc.
73 The goal of the routines in this file is to translate
74 information supplied by user in the form of handles
75 to more intelligible for kernel form, to make some sanity
76 checks and part of work, which is common to all qdiscs
77 and to provide rtnetlink notifications.
79 All real intelligent work is done inside qdisc modules.
83 Every discipline has two major routines: enqueue and dequeue.
85 ---dequeue
87 dequeue usually returns a skb to send. It is allowed to return NULL,
88 but it does not mean that queue is empty, it just means that
89 discipline does not want to send anything this time.
90 Queue is really empty if q->q.qlen == 0.
91 For complicated disciplines with multiple queues q->q is not
92 real packet queue, but however q->q.qlen must be valid.
94 ---enqueue
96 enqueue returns number of enqueued packets i.e. this number is 1,
97 if packet was enqueued sucessfully and <1 if something (not
98 necessary THIS packet) was dropped.
100 Auxiliary routines:
102 ---requeue
104 requeues once dequeued packet. It is used for non-standard or
105 just buggy devices, which can defer output even if dev->tbusy=0.
107 ---reset
109 returns qdisc to initial state: purge all buffers, clear all
110 timers, counters (except for statistics) etc.
112 ---init
114 initializes newly created qdisc.
116 ---destroy
118 destroys resources allocated by init and during lifetime of qdisc.
121 /************************************************
122 * Queueing disciplines manipulation. *
123 ************************************************/
126 /* The list of all installed queueing disciplines. */
128 static struct Qdisc_ops *qdisc_base = NULL;
130 /* Register/uregister queueing discipline */
132 intregister_qdisc(struct Qdisc_ops *qops)
134 struct Qdisc_ops *q, **qp;
136 for(qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
137 if(strcmp(qops->id, q->id) ==0)
138 return-EEXIST;
140 if(qops->enqueue == NULL)
141 qops->enqueue = noop_qdisc_ops.enqueue;
142 if(qops->requeue == NULL)
143 qops->requeue = noop_qdisc_ops.requeue;
144 if(qops->dequeue == NULL)
145 qops->dequeue = noop_qdisc_ops.dequeue;
147 qops->next = NULL;
148 *qp = qops;
149 return0;
152 intunregister_qdisc(struct Qdisc_ops *qops)
154 struct Qdisc_ops *q, **qp;
155 for(qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
156 if(q == qops)
157 break;
158 if(!q)
159 return-ENOENT;
160 *qp = q->next;
161 q->next = NULL;
162 return0;
165 /* We know handle. Find qdisc among all qdisc's attached to device
166 (root qdisc, all its children, children of children etc.)
169 struct Qdisc *qdisc_lookup(struct device *dev, u32 handle)
171 struct Qdisc *q;
173 for(q = dev->qdisc_list; q; q = q->next) {
174 if(q->handle == handle)
175 return q;
177 return NULL;
180 /* We know classid. Find qdisc among all qdisc's attached to device
181 (root qdisc, all its children, children of children etc.)
184 struct Qdisc *qdisc_lookup_class(struct device *dev, u32 classid)
186 struct Qdisc *q;
188 for(q = dev->qdisc_list; q; q = q->next) {
189 if(q->classid == classid)
190 return q;
192 return NULL;
196 /* Find queueing discipline by name */
198 struct Qdisc_ops *qdisc_lookup_ops(struct rtattr *kind)
200 struct Qdisc_ops *q;
202 if(kind) {
203 for(q = qdisc_base; q; q = q->next) {
204 if(rtattr_strcmp(kind, q->id) ==0)
205 return q;
208 return NULL;
211 static struct qdisc_rate_table *qdisc_rtab_list;
213 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,struct rtattr *tab)
215 struct qdisc_rate_table *rtab;
217 for(rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
218 if(memcmp(&rtab->rate, r,sizeof(struct tc_ratespec)) ==0) {
219 rtab->refcnt++;
220 return rtab;
224 if(tab == NULL || r->rate ==0|| r->cell_log ==0||RTA_PAYLOAD(tab) !=1024)
225 return NULL;
227 rtab =kmalloc(sizeof(*rtab), GFP_KERNEL);
228 if(rtab) {
229 rtab->rate = *r;
230 rtab->refcnt =1;
231 memcpy(rtab->data,RTA_DATA(tab),1024);
232 rtab->next = qdisc_rtab_list;
233 qdisc_rtab_list = rtab;
235 return rtab;
238 voidqdisc_put_rtab(struct qdisc_rate_table *tab)
240 struct qdisc_rate_table *rtab, **rtabp;
242 if(!tab || --tab->refcnt)
243 return;
245 for(rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) {
246 if(rtab == tab) {
247 *rtabp = rtab->next;
248 kfree(rtab);
249 return;
255 /* Allocate an unique handle from space managed by kernel */
257 u32 qdisc_alloc_handle(struct device *dev)
259 int i =0x10000;
260 static u32 autohandle =TC_H_MAKE(0x80000000U,0);
263 autohandle +=TC_H_MAKE(0x10000U,0);
264 if(autohandle ==TC_H_MAKE(TC_H_ROOT,0))
265 autohandle =TC_H_MAKE(0x80000000U,0);
266 }while(qdisc_lookup(dev, autohandle) && --i >0);
268 return i>0? autohandle :0;
271 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
272 to device "dev".
274 Old qdisc is not destroyed but returned in *old.
277 intqdisc_graft(struct device *dev,struct Qdisc *parent, u32 classid,
278 struct Qdisc *new,struct Qdisc **old)
280 int err =0;
282 if(parent == NULL) {
283 BUG_TRAP(classid == TC_H_ROOT);
284 if(new) {
285 new->parent = NULL;
286 new->classid = TC_H_ROOT;
288 *old =dev_set_scheduler(dev,new);
289 }else{
290 struct Qdisc_class_ops *cops = parent->ops->cl_ops;
292 BUG_TRAP(classid != TC_H_ROOT);
294 err = -EINVAL;
296 if(cops) {
297 unsigned long cl = cops->get(parent, classid);
298 if(cl) {
299 err = cops->graft(parent, cl,new, old);
300 cops->put(parent, cl);
304 return err;
307 #ifdef CONFIG_RTNETLINK
310 Allocate and initialize new qdisc.
312 Parameters are passed via opt.
315 static struct Qdisc *
316 qdisc_create(struct device *dev,struct Qdisc_ops *ops, u32 handle,
317 u32 parentid,struct rtattr **tca,int*errp)
319 int err;
320 struct rtattr *kind = tca[TCA_KIND-1];
321 struct Qdisc *sch = NULL;
322 int size;
323 intnew=0;
325 if(ops == NULL) {
326 ops =qdisc_lookup_ops(kind);
327 err = -EINVAL;
328 if(ops == NULL)
329 goto err_out;
330 new=1;
333 size =sizeof(*sch) + ops->priv_size;
335 sch =kmalloc(size, GFP_KERNEL);
336 err = -ENOBUFS;
337 if(!sch)
338 goto err_out;
340 /* Grrr... Resolve race condition with module unload */
342 err = -EINVAL;
343 if(new) {
344 if(ops !=qdisc_lookup_ops(kind))
345 goto err_out;
346 }else if(kind) {
347 if(rtattr_strcmp(kind, ops->id))
348 goto err_out;
351 memset(sch,0, size);
353 skb_queue_head_init(&sch->q);
354 sch->ops = ops;
355 sch->enqueue = ops->enqueue;
356 sch->dequeue = ops->dequeue;
357 sch->dev = dev;
358 if(handle ==0) {
359 handle =qdisc_alloc_handle(dev);
360 err = -ENOMEM;
361 if(handle ==0)
362 goto err_out;
364 sch->handle = handle;
365 sch->classid = parentid;
367 if(ops->init && (err = ops->init(sch, tca[TCA_OPTIONS-1])) ==0) {
368 sch->next = dev->qdisc_list;
369 dev->qdisc_list = sch;
370 #ifdef CONFIG_NET_ESTIMATOR
371 if(tca[TCA_RATE-1])
372 qdisc_new_estimator(&sch->stats, tca[TCA_RATE-1]);
373 #endif
374 return sch;
377 err_out:
378 *errp = err;
379 if(sch)
380 kfree(sch);
381 return NULL;
386 Create/delete/change/get qdisc.
389 static inttc_ctl_qdisc(struct sk_buff *skb,struct nlmsghdr *n,void*arg)
391 struct tcmsg *tcm =NLMSG_DATA(n);
392 struct rtattr **tca = arg;
393 struct device *dev;
394 u32 clid = tcm->tcm_parent;
395 struct Qdisc *old_q;
396 struct Qdisc *q = NULL;
397 struct Qdisc *p = NULL;
398 struct Qdisc *leaf = NULL;
399 struct Qdisc_ops *qops = NULL;
400 int err;
402 /* Find device */
403 if((dev =dev_get_by_index(tcm->tcm_ifindex)) == NULL)
404 return-ENODEV;
406 /* If parent is specified, it must exist
407 and tcm_parent selects a class in parent which
408 new qdisc will be attached to.
410 The place may be already busy by another qdisc,
411 remember this fact, if it was not auto-created discipline.
413 if(clid) {
414 if(clid != TC_H_ROOT) {
415 p =qdisc_lookup(dev,TC_H_MAJ(clid));
416 if(p == NULL)
417 return-ENOENT;
418 leaf =qdisc_lookup_class(dev, clid);
419 }else
420 leaf = dev->qdisc_sleeping;
422 if(leaf && leaf->flags&TCQ_F_DEFAULT && n->nlmsg_type == RTM_NEWQDISC)
423 leaf = NULL;
426 Also, leaf may be exactly that qdisc, which we want
427 to control. Remember this to avoid one more qdisc_lookup.
430 if(leaf && leaf->handle == tcm->tcm_handle)
431 q = leaf;
434 /* Try to locate the discipline */
435 if(tcm->tcm_handle && q == NULL) {
436 if(TC_H_MIN(tcm->tcm_handle))
437 return-EINVAL;
438 q =qdisc_lookup(dev, tcm->tcm_handle);
441 /* If discipline already exists, check that its real parent
442 matches to one selected by tcm_parent.
445 if(q) {
446 if(clid && p != q->parent)
447 return-EINVAL;
448 BUG_TRAP(!leaf || leaf == q);
449 if(tca[TCA_KIND-1] &&rtattr_strcmp(tca[TCA_KIND-1], q->ops->id))
450 return-EINVAL;
451 clid = q->classid;
452 goto process_existing;
455 /* The discipline is known not to exist.
456 If parent was not selected too, return error.
458 if(clid ==0)
459 return tcm->tcm_handle ? -ENOENT : -EINVAL;
461 /* Check for the case when leaf is exactly the thing,
462 that you want.
465 if(leaf && tcm->tcm_handle ==0) {
466 q = leaf;
467 if(!tca[TCA_KIND-1] ||rtattr_strcmp(tca[TCA_KIND-1], q->ops->id) ==0)
468 goto process_existing;
471 if(n->nlmsg_type != RTM_NEWQDISC || !(n->nlmsg_flags&NLM_F_CREATE))
472 return-ENOENT;
473 if(leaf && n->nlmsg_flags&NLM_F_EXCL)
474 return-EEXIST;
476 create_and_graft:
477 q =qdisc_create(dev, qops, tcm->tcm_handle, clid, tca, &err);
478 if(q == NULL)
479 return err;
481 graft:
482 err =qdisc_graft(dev, p, clid, q, &old_q);
483 if(err) {
484 if(q)
485 qdisc_destroy(q);
486 return err;
488 qdisc_notify(skb, n, old_q, q);
489 if(old_q)
490 qdisc_destroy(old_q);
491 return0;
493 process_existing:
495 switch(n->nlmsg_type) {
496 case RTM_NEWQDISC:
497 if(n->nlmsg_flags&NLM_F_EXCL)
498 return-EEXIST;
499 qops = q->ops;
500 goto create_and_graft;
501 case RTM_GETQDISC:
502 qdisc_notify(skb, n, NULL, q);
503 return0;
504 case RTM_DELQDISC:
505 q = NULL;
506 goto graft;
507 default:
508 return-EINVAL;
512 static inttc_fill_qdisc(struct sk_buff *skb,struct Qdisc *q,
513 u32 pid, u32 seq,unsigned flags,int event)
515 struct tcmsg *tcm;
516 struct nlmsghdr *nlh;
517 unsigned char*b = skb->tail;
519 nlh =NLMSG_PUT(skb, pid, seq, event,sizeof(*tcm));
520 nlh->nlmsg_flags = flags;
521 tcm =NLMSG_DATA(nlh);
522 tcm->tcm_family = AF_UNSPEC;
523 tcm->tcm_ifindex = q->dev ? q->dev->ifindex :0;
524 tcm->tcm_parent = q->classid;
525 tcm->tcm_handle = q->handle;
526 tcm->tcm_info =0;
527 RTA_PUT(skb, TCA_KIND, IFNAMSIZ, q->ops->id);
528 if(q->ops->dump && q->ops->dump(q, skb) <0)
529 goto rtattr_failure;
530 q->stats.qlen = q->q.qlen;
531 RTA_PUT(skb, TCA_STATS,sizeof(q->stats), &q->stats);
532 nlh->nlmsg_len = skb->tail - b;
533 return skb->len;
535 nlmsg_failure:
536 rtattr_failure:
537 skb_trim(skb, b - skb->data);
538 return-1;
541 static intqdisc_notify(struct sk_buff *oskb,struct nlmsghdr *n,
542 struct Qdisc *old,struct Qdisc *new)
544 struct sk_buff *skb;
545 u32 pid = oskb ?NETLINK_CB(oskb).pid :0;
547 skb =alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
548 if(!skb)
549 return-ENOBUFS;
551 if(old && !(old->flags&TCQ_F_DEFAULT)) {
552 if(tc_fill_qdisc(skb, old, pid, n->nlmsg_seq,0, RTM_DELQDISC) <0)
553 goto err_out;
555 if(new) {
556 if(tc_fill_qdisc(skb,new, pid, n->nlmsg_seq, old ? NLM_F_REPLACE :0, RTM_NEWQDISC) <0)
557 goto err_out;
560 if(skb->len)
561 returnrtnetlink_send(skb, pid, RTMGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
563 err_out:
564 kfree_skb(skb);
565 return-EINVAL;
568 static inttc_dump_qdisc(struct sk_buff *skb,struct netlink_callback *cb)
570 int idx, q_idx;
571 int s_idx, s_q_idx;
572 struct device *dev;
573 struct Qdisc *q;
575 s_idx = cb->args[0];
576 s_q_idx = q_idx = cb->args[1];
577 for(dev=dev_base, idx=0; dev; dev = dev->next, idx++) {
578 if(idx < s_idx)
579 continue;
580 if(idx > s_idx)
581 s_q_idx =0;
582 for(q = dev->qdisc_list, q_idx =0; q;
583 q = q->next, q_idx++) {
584 if(q_idx < s_q_idx)
585 continue;
586 if(tc_fill_qdisc(skb, q,NETLINK_CB(cb->skb).pid,
587 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <=0)
588 goto done;
592 done:
593 cb->args[0] = idx;
594 cb->args[1] = q_idx;
596 return skb->len;
601 /************************************************
602 * Traffic classes manipulation. *
603 ************************************************/
607 static inttc_ctl_tclass(struct sk_buff *skb,struct nlmsghdr *n,void*arg)
609 struct tcmsg *tcm =NLMSG_DATA(n);
610 struct rtattr **tca = arg;
611 struct device *dev;
612 struct Qdisc *q = NULL;
613 struct Qdisc_class_ops *cops;
614 unsigned long cl =0;
615 unsigned long new_cl;
616 u32 pid = tcm->tcm_parent;
617 u32 clid = tcm->tcm_handle;
618 u32 qid =TC_H_MAJ(clid);
619 int err;
621 if((dev =dev_get_by_index(tcm->tcm_ifindex)) == NULL)
622 return-ENODEV;
625 parent == TC_H_UNSPEC - unspecified parent.
626 parent == TC_H_ROOT - class is root, which has no parent.
627 parent == X:0 - parent is root class.
628 parent == X:Y - parent is a node in hierarchy.
629 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
631 handle == 0:0 - generate handle from kernel pool.
632 handle == 0:Y - class is X:Y, where X:0 is qdisc.
633 handle == X:Y - clear.
634 handle == X:0 - root class.
637 /* Step 1. Determine qdisc handle X:0 */
639 if(pid != TC_H_ROOT) {
640 u32 qid1 =TC_H_MAJ(pid);
642 if(qid && qid1) {
643 /* If both majors are known, they must be identical. */
644 if(qid != qid1)
645 return-EINVAL;
646 }else if(qid1) {
647 qid = qid1;
648 }else if(qid ==0)
649 qid = dev->qdisc_sleeping->handle;
651 /* Now qid is genuine qdisc handle consistent
652 both with parent and child.
654 TC_H_MAJ(pid) still may be unspecified, complete it now.
656 if(pid)
657 pid =TC_H_MAKE(qid, pid);
658 }else{
659 if(qid ==0)
660 qid = dev->qdisc_sleeping->handle;
663 /* OK. Locate qdisc */
664 if((q =qdisc_lookup(dev, qid)) == NULL)
665 return-ENOENT;
667 /* An check that it supports classes */
668 cops = q->ops->cl_ops;
669 if(cops == NULL)
670 return-EINVAL;
672 /* Now try to get class */
673 if(clid ==0) {
674 if(pid == TC_H_ROOT)
675 clid = qid;
676 }else
677 clid =TC_H_MAKE(qid, clid);
679 if(clid)
680 cl = cops->get(q, clid);
682 if(cl ==0) {
683 err = -ENOENT;
684 if(n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE))
685 goto out;
686 }else{
687 switch(n->nlmsg_type) {
688 case RTM_NEWTCLASS:
689 err = -EEXIST;
690 if(n->nlmsg_flags&NLM_F_EXCL)
691 goto out;
692 break;
693 case RTM_DELTCLASS:
694 err = cops->delete(q, cl);
695 if(err ==0)
696 tclass_notify(skb, n, q, cl, RTM_DELTCLASS);
697 goto out;
698 case RTM_GETTCLASS:
699 err =tclass_notify(skb, n, q, cl, RTM_NEWTCLASS);
700 goto out;
701 default:
702 err = -EINVAL;
703 goto out;
707 new_cl = cl;
708 err = cops->change(q, clid, pid, tca, &new_cl);
709 if(err ==0)
710 tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS);
712 out:
713 if(cl)
714 cops->put(q, cl);
716 return err;
720 static inttc_fill_tclass(struct sk_buff *skb,struct Qdisc *q,
721 unsigned long cl,
722 u32 pid, u32 seq,unsigned flags,int event)
724 struct tcmsg *tcm;
725 struct nlmsghdr *nlh;
726 unsigned char*b = skb->tail;
728 nlh =NLMSG_PUT(skb, pid, seq, event,sizeof(*tcm));
729 nlh->nlmsg_flags = flags;
730 tcm =NLMSG_DATA(nlh);
731 tcm->tcm_family = AF_UNSPEC;
732 tcm->tcm_ifindex = q->dev ? q->dev->ifindex :0;
733 tcm->tcm_parent = q->handle;
734 tcm->tcm_handle = q->handle;
735 tcm->tcm_info =0;
736 RTA_PUT(skb, TCA_KIND, IFNAMSIZ, q->ops->id);
737 if(q->ops->cl_ops->dump && q->ops->cl_ops->dump(q, cl, skb, tcm) <0)
738 goto rtattr_failure;
739 nlh->nlmsg_len = skb->tail - b;
740 return skb->len;
742 nlmsg_failure:
743 rtattr_failure:
744 skb_trim(skb, b - skb->data);
745 return-1;
748 static inttclass_notify(struct sk_buff *oskb,struct nlmsghdr *n,
749 struct Qdisc *q,unsigned long cl,int event)
751 struct sk_buff *skb;
752 u32 pid = oskb ?NETLINK_CB(oskb).pid :0;
754 skb =alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
755 if(!skb)
756 return-ENOBUFS;
758 if(tc_fill_tclass(skb, q, cl, pid, n->nlmsg_seq,0, event) <0) {
759 kfree_skb(skb);
760 return-EINVAL;
763 returnrtnetlink_send(skb, pid, RTMGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
766 struct qdisc_dump_args
768 struct qdisc_walker w;
769 struct sk_buff *skb;
770 struct netlink_callback *cb;
773 static intqdisc_class_dump(struct Qdisc *q,unsigned long cl,struct qdisc_walker *arg)
775 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
777 returntc_fill_tclass(a->skb, q, cl,NETLINK_CB(a->cb->skb).pid,
778 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
781 static inttc_dump_tclass(struct sk_buff *skb,struct netlink_callback *cb)
783 int t;
784 int s_t;
785 struct device *dev;
786 struct Qdisc *q;
787 struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
788 struct qdisc_dump_args arg;
790 if(cb->nlh->nlmsg_len <NLMSG_LENGTH(sizeof(*tcm)))
791 return0;
792 if((dev =dev_get_by_index(tcm->tcm_ifindex)) == NULL)
793 return0;
795 s_t = cb->args[0];
797 for(q=dev->qdisc_list, t=0; q; q = q->next, t++) {
798 if(t < s_t)continue;
799 if(!q->ops->cl_ops)continue;
800 if(tcm->tcm_parent &&TC_H_MAJ(tcm->tcm_parent) != q->handle
801 && (tcm->tcm_parent != TC_H_ROOT || q->parent != NULL))
802 continue;
803 if(t > s_t)
804 memset(&cb->args[1],0,sizeof(cb->args)-sizeof(int));
805 arg.w.fn = qdisc_class_dump;
806 arg.skb = skb;
807 arg.cb = cb;
808 arg.w.stop =0;
809 arg.w.skip = cb->args[1];
810 arg.w.count =0;
811 q->ops->cl_ops->walk(q, &arg.w);
812 cb->args[1] = arg.w.count;
813 if(arg.w.stop)
814 break;
817 cb->args[0] = t;
819 return skb->len;
821 #endif
823 int psched_us_per_tick =1;
824 int psched_tick_per_us =1;
826 #ifdef CONFIG_PROC_FS
827 static intpsched_read_proc(char*buffer,char**start, off_t offset,
828 int length,int*eof,void*data)
830 int len;
832 len =sprintf(buffer,"%08x %08x\n",
833 psched_tick_per_us, psched_us_per_tick);
835 len -= offset;
837 if(len > length)
838 len = length;
839 if(len <0)
840 len =0;
842 *start = buffer + offset;
843 *eof =1;
845 return len;
847 #endif
849 psched_time_t psched_time_base;
851 #if PSCHED_CLOCK_SOURCE == PSCHED_CPU
852 psched_tdiff_t psched_clock_per_hz;
853 int psched_clock_scale;
854 #endif
856 #ifdef PSCHED_WATCHER
857 PSCHED_WATCHER psched_time_mark;
859 static voidpsched_tick(unsigned long);
861 static struct timer_list psched_timer =
862 { NULL, NULL,0,0L, psched_tick };
864 static voidpsched_tick(unsigned long dummy)
866 #if PSCHED_CLOCK_SOURCE == PSCHED_CPU
867 psched_time_t dummy_stamp;
868 PSCHED_GET_TIME(dummy_stamp);
869 psched_timer.expires = jiffies +4*HZ;
870 #else
871 unsigned long now = jiffies;
872 psched_time_base = ((u64)now)<<PSCHED_JSCALE;
873 psched_time_mark = now;
874 psched_timer.expires = now +60*60*HZ;
875 #endif
876 add_timer(&psched_timer);
878 #endif
880 #if PSCHED_CLOCK_SOURCE == PSCHED_CPU
881 __initfunc(intpsched_calibrate_clock(void))
883 psched_time_t stamp, stamp1;
884 struct timeval tv, tv1;
885 psched_tdiff_t delay;
886 long rdelay;
887 unsigned long stop;
889 #if CPU == 586 || CPU == 686
890 if(!(boot_cpu_data.x86_capability & X86_FEATURE_TSC))
891 return-1;
892 #endif
894 start_bh_atomic();
895 #ifdef PSCHED_WATCHER
896 psched_tick(0);
897 #endif
898 stop = jiffies + HZ/10;
899 PSCHED_GET_TIME(stamp);
900 do_gettimeofday(&tv);
901 while(jiffies < stop)
902 barrier();
903 PSCHED_GET_TIME(stamp1);
904 do_gettimeofday(&tv1);
905 end_bh_atomic();
907 delay =PSCHED_TDIFF(stamp1, stamp);
908 rdelay = tv1.tv_usec - tv.tv_usec;
909 rdelay += (tv1.tv_sec - tv.tv_sec)*1000000;
910 if(rdelay > delay)
911 return-1;
912 delay /= rdelay;
913 psched_tick_per_us = delay;
914 while((delay>>=1) !=0)
915 psched_clock_scale++;
916 psched_us_per_tick =1<<psched_clock_scale;
917 psched_clock_per_hz = (psched_tick_per_us*(1000000/HZ))>>psched_clock_scale;
918 return0;
920 #endif
922 __initfunc(intpktsched_init(void))
924 #ifdef CONFIG_PROC_FS
925 struct proc_dir_entry *ent;
926 #endif
928 #if PSCHED_CLOCK_SOURCE == PSCHED_CPU
929 if(psched_calibrate_clock() <0)
930 return-1;
931 #elif PSCHED_CLOCK_SOURCE == PSCHED_JIFFIES
932 psched_tick_per_us = HZ<<PSCHED_JSCALE;
933 psched_us_per_tick =1000000;
934 #endif
936 #ifdef CONFIG_RTNETLINK
937 struct rtnetlink_link *link_p = rtnetlink_links[PF_UNSPEC];
939 /* Setup rtnetlink links. It is made here to avoid
940 exporting large number of public symbols.
943 if(link_p) {
944 link_p[RTM_NEWQDISC-RTM_BASE].doit = tc_ctl_qdisc;
945 link_p[RTM_DELQDISC-RTM_BASE].doit = tc_ctl_qdisc;
946 link_p[RTM_GETQDISC-RTM_BASE].doit = tc_ctl_qdisc;
947 link_p[RTM_GETQDISC-RTM_BASE].dumpit = tc_dump_qdisc;
948 link_p[RTM_NEWTCLASS-RTM_BASE].doit = tc_ctl_tclass;
949 link_p[RTM_DELTCLASS-RTM_BASE].doit = tc_ctl_tclass;
950 link_p[RTM_GETTCLASS-RTM_BASE].doit = tc_ctl_tclass;
951 link_p[RTM_GETTCLASS-RTM_BASE].dumpit = tc_dump_tclass;
953 #endif
955 #define INIT_QDISC(name) { \
956 extern struct Qdisc_ops name##_qdisc_ops; \
957 register_qdisc(&##name##_qdisc_ops); \
960 INIT_QDISC(pfifo);
961 INIT_QDISC(bfifo);
963 #ifdef CONFIG_NET_SCH_CBQ
964 INIT_QDISC(cbq);
965 #endif
966 #ifdef CONFIG_NET_SCH_CSZ
967 INIT_QDISC(csz);
968 #endif
969 #ifdef CONFIG_NET_SCH_HPFQ
970 INIT_QDISC(hpfq);
971 #endif
972 #ifdef CONFIG_NET_SCH_HFSC
973 INIT_QDISC(hfsc);
974 #endif
975 #ifdef CONFIG_NET_SCH_RED
976 INIT_QDISC(red);
977 #endif
978 #ifdef CONFIG_NET_SCH_SFQ
979 INIT_QDISC(sfq);
980 #endif
981 #ifdef CONFIG_NET_SCH_TBF
982 INIT_QDISC(tbf);
983 #endif
984 #ifdef CONFIG_NET_SCH_TEQL
985 teql_init();
986 #endif
987 #ifdef CONFIG_NET_SCH_PRIO
988 INIT_QDISC(prio);
989 #endif
990 #ifdef CONFIG_NET_CLS
991 tc_filter_init();
992 #endif
994 #ifdef CONFIG_PROC_FS
995 ent =create_proc_entry("net/psched",0,0);
996 ent->read_proc = psched_read_proc;
997 #endif
999 return0;
close