Lines Matching +full:1 +full:q

23 	Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
48 allotment is W*r_i, and r_1+...+r_k = r < 1
103 struct Qdisc *q; /* Elementary queueing discipline */ member
111 level of children + 1 for nodes.
129 struct cbq_class *defaults[TC_PRIO_MAX + 1];
134 int nclasses[TC_CBQ_MAXPRIO + 1];
135 unsigned int quanta[TC_CBQ_MAXPRIO + 1];
140 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
166 cbq_class_lookup(struct cbq_sched_data *q, u32 classid) in cbq_class_lookup() argument
170 clc = qdisc_class_find(&q->clhash, classid); in cbq_class_lookup()
207 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_classify() local
208 struct cbq_class *head = &q->link; in cbq_classify()
216 * Step 1. If skb->priority points to one of our classes, use it. in cbq_classify()
219 (cl = cbq_class_lookup(q, prio)) != NULL) in cbq_classify()
238 cl = cbq_class_lookup(q, res.classid); in cbq_classify()
293 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_activate_class() local
297 cl_tail = q->active[prio]; in cbq_activate_class()
298 q->active[prio] = cl; in cbq_activate_class()
305 q->activemask |= (1<<prio); in cbq_activate_class()
317 struct cbq_sched_data *q = qdisc_priv(this->qdisc); in cbq_deactivate_class() local
320 struct cbq_class *cl_prev = q->active[prio]; in cbq_deactivate_class()
328 if (cl == q->active[prio]) { in cbq_deactivate_class()
329 q->active[prio] = cl_prev; in cbq_deactivate_class()
330 if (cl == q->active[prio]) { in cbq_deactivate_class()
331 q->active[prio] = NULL; in cbq_deactivate_class()
332 q->activemask &= ~(1<<prio); in cbq_deactivate_class()
338 } while ((cl_prev = cl) != q->active[prio]); in cbq_deactivate_class()
342 cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_mark_toplevel() argument
344 int toplevel = q->toplevel; in cbq_mark_toplevel()
351 q->toplevel = cl->level; in cbq_mark_toplevel()
362 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_enqueue() local
367 q->rx_class = cl; in cbq_enqueue()
376 ret = qdisc_enqueue(skb, cl->q, to_free); in cbq_enqueue()
378 sch->q.qlen++; in cbq_enqueue()
379 cbq_mark_toplevel(q, cl); in cbq_enqueue()
387 cbq_mark_toplevel(q, cl); in cbq_enqueue()
396 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_overlimit() local
397 psched_tdiff_t delay = cl->undertime - q->now; in cbq_overlimit()
414 delay = 1; in cbq_overlimit()
415 cl->undertime = q->now + delay; in cbq_overlimit()
418 cl->delayed = 1; in cbq_overlimit()
420 if (q->wd_expires == 0 || q->wd_expires > delay) in cbq_overlimit()
421 q->wd_expires = delay; in cbq_overlimit()
427 if (q->toplevel == TC_CBQ_MAXLEVEL) { in cbq_overlimit()
429 psched_tdiff_t base_delay = q->wd_expires; in cbq_overlimit()
432 delay = b->undertime - q->now; in cbq_overlimit()
435 delay = 1; in cbq_overlimit()
440 q->wd_expires = base_delay; in cbq_overlimit()
444 static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio, in cbq_undelay_prio() argument
448 struct cbq_class *cl_prev = q->active[prio]; in cbq_undelay_prio()
463 if (cl == q->active[prio]) { in cbq_undelay_prio()
464 q->active[prio] = cl_prev; in cbq_undelay_prio()
465 if (cl == q->active[prio]) { in cbq_undelay_prio()
466 q->active[prio] = NULL; in cbq_undelay_prio()
474 } while ((cl_prev = cl) != q->active[prio]); in cbq_undelay_prio()
481 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data, in cbq_undelay() local
483 struct Qdisc *sch = q->watchdog.qdisc; in cbq_undelay()
490 pmask = q->pmask; in cbq_undelay()
491 q->pmask = 0; in cbq_undelay()
497 pmask &= ~(1<<prio); in cbq_undelay()
499 tmp = cbq_undelay_prio(q, prio, now); in cbq_undelay()
501 q->pmask |= 1<<prio; in cbq_undelay()
512 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED); in cbq_undelay()
529 cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, in cbq_update_toplevel() argument
532 if (cl && q->toplevel >= borrowed->level) { in cbq_update_toplevel()
533 if (cl->q->q.qlen > 1) { in cbq_update_toplevel()
536 q->toplevel = borrowed->level; in cbq_update_toplevel()
545 q->toplevel = TC_CBQ_MAXLEVEL; in cbq_update_toplevel()
551 cbq_update(struct cbq_sched_data *q) in cbq_update() argument
553 struct cbq_class *this = q->tx_class; in cbq_update()
555 int len = q->tx_len; in cbq_update()
558 q->tx_class = NULL; in cbq_update()
562 now = q->now + L2T(&q->link, len); in cbq_update()
584 /* true_avgidle := (1-W)*true_avgidle + W*idle, in cbq_update()
603 * (1-W)*true_avgidle + W*delay = 0, i.e. in cbq_update()
604 * idle = (1/W - 1)*(-true_avgidle) in cbq_update()
606 * idle = (1 - W)*(-cl->avgidle); in cbq_update()
619 idle -= L2T(&q->link, len); in cbq_update()
636 cbq_update_toplevel(q, this, q->tx_borrowed); in cbq_update()
642 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_under_limit() local
648 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) { in cbq_under_limit()
670 if (cl->level > q->toplevel) in cbq_under_limit()
672 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime); in cbq_under_limit()
681 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_dequeue_prio() local
686 cl_tail = cl_prev = q->active[prio]; in cbq_dequeue_prio()
696 if (cl->q->q.qlen && in cbq_dequeue_prio()
704 deficit = 1; in cbq_dequeue_prio()
709 skb = cl->q->dequeue(cl->q); in cbq_dequeue_prio()
712 * It could occur even if cl->q->q.qlen != 0 in cbq_dequeue_prio()
713 * f.e. if cl->q == "tbf" in cbq_dequeue_prio()
719 q->tx_class = cl; in cbq_dequeue_prio()
720 q->tx_borrowed = borrow; in cbq_dequeue_prio()
730 q->tx_len = qdisc_pkt_len(skb); in cbq_dequeue_prio()
733 q->active[prio] = cl; in cbq_dequeue_prio()
740 if (cl->q->q.qlen == 0 || prio != cl->cpriority) { in cbq_dequeue_prio()
755 q->active[prio] = NULL; in cbq_dequeue_prio()
756 q->activemask &= ~(1<<prio); in cbq_dequeue_prio()
757 if (cl->q->q.qlen) in cbq_dequeue_prio()
762 q->active[prio] = cl_tail; in cbq_dequeue_prio()
764 if (cl->q->q.qlen) in cbq_dequeue_prio()
776 q->active[prio] = cl_prev; in cbq_dequeue_prio()
784 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_dequeue_1() local
788 activemask = q->activemask & 0xFF; in cbq_dequeue_1()
791 activemask &= ~(1<<prio); in cbq_dequeue_1()
803 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_dequeue() local
808 if (q->tx_class) in cbq_dequeue()
809 cbq_update(q); in cbq_dequeue()
811 q->now = now; in cbq_dequeue()
814 q->wd_expires = 0; in cbq_dequeue()
819 sch->q.qlen--; in cbq_dequeue()
827 * 1. Scheduler is empty. in cbq_dequeue()
841 if (q->toplevel == TC_CBQ_MAXLEVEL && in cbq_dequeue()
842 q->link.undertime == PSCHED_PASTPERFECT) in cbq_dequeue()
845 q->toplevel = TC_CBQ_MAXLEVEL; in cbq_dequeue()
846 q->link.undertime = PSCHED_PASTPERFECT; in cbq_dequeue()
853 if (sch->q.qlen) { in cbq_dequeue()
855 if (q->wd_expires) in cbq_dequeue()
856 qdisc_watchdog_schedule(&q->watchdog, in cbq_dequeue()
857 now + q->wd_expires); in cbq_dequeue()
880 this->level = level + 1; in cbq_adjust_levels()
884 static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) in cbq_normalize_quanta() argument
889 if (q->quanta[prio] == 0) in cbq_normalize_quanta()
892 for (h = 0; h < q->clhash.hashsize; h++) { in cbq_normalize_quanta()
893 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_normalize_quanta()
898 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ in cbq_normalize_quanta()
899 q->quanta[prio]; in cbq_normalize_quanta()
905 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1; in cbq_normalize_quanta()
913 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_sync_defmap() local
922 if (split->defaults[i] == cl && !(cl->defmap & (1<<i))) in cbq_sync_defmap()
932 for (h = 0; h < q->clhash.hashsize; h++) { in cbq_sync_defmap()
935 hlist_for_each_entry(c, &q->clhash.hash[h], in cbq_sync_defmap()
938 c->defmap & (1<<i)) { in cbq_sync_defmap()
981 struct cbq_sched_data *q = qdisc_priv(this->qdisc); in cbq_unlink_class() local
983 qdisc_class_hash_remove(&q->clhash, &this->common); in cbq_unlink_class()
1008 struct cbq_sched_data *q = qdisc_priv(this->qdisc); in cbq_link_class() local
1012 qdisc_class_hash_insert(&q->clhash, &this->common); in cbq_link_class()
1028 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_reset() local
1033 q->activemask = 0; in cbq_reset()
1034 q->pmask = 0; in cbq_reset()
1035 q->tx_class = NULL; in cbq_reset()
1036 q->tx_borrowed = NULL; in cbq_reset()
1037 qdisc_watchdog_cancel(&q->watchdog); in cbq_reset()
1038 hrtimer_cancel(&q->delay_timer); in cbq_reset()
1039 q->toplevel = TC_CBQ_MAXLEVEL; in cbq_reset()
1040 q->now = psched_get_time(); in cbq_reset()
1043 q->active[prio] = NULL; in cbq_reset()
1045 for (h = 0; h < q->clhash.hashsize; h++) { in cbq_reset()
1046 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_reset()
1047 qdisc_reset(cl->q); in cbq_reset()
1056 sch->q.qlen = 0; in cbq_reset()
1081 static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_rmprio() argument
1083 q->nclasses[cl->priority]--; in cbq_rmprio()
1084 q->quanta[cl->priority] -= cl->weight; in cbq_rmprio()
1085 cbq_normalize_quanta(q, cl->priority); in cbq_rmprio()
1088 static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_addprio() argument
1090 q->nclasses[cl->priority]++; in cbq_addprio()
1091 q->quanta[cl->priority] += cl->weight; in cbq_addprio()
1092 cbq_normalize_quanta(q, cl->priority); in cbq_addprio()
1097 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_set_wrr() local
1104 cl->priority = wrr->priority - 1; in cbq_set_wrr()
1107 cl->priority2 = TC_CBQ_MAXPRIO - 1; in cbq_set_wrr()
1110 cbq_addprio(q, cl); in cbq_set_wrr()
1120 static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1130 static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1], in cbq_opt_parse() argument
1160 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_init() local
1161 struct nlattr *tb[TCA_CBQ_MAX + 1]; in cbq_init()
1165 qdisc_watchdog_init(&q->watchdog, sch); in cbq_init()
1166 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); in cbq_init()
1167 q->delay_timer.function = cbq_undelay; in cbq_init()
1180 q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB], extack); in cbq_init()
1181 if (!q->link.R_tab) in cbq_init()
1184 err = tcf_block_get(&q->link.block, &q->link.filter_list, sch, extack); in cbq_init()
1188 err = qdisc_class_hash_init(&q->clhash); in cbq_init()
1192 q->link.sibling = &q->link; in cbq_init()
1193 q->link.common.classid = sch->handle; in cbq_init()
1194 q->link.qdisc = sch; in cbq_init()
1195 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in cbq_init()
1197 if (!q->link.q) in cbq_init()
1198 q->link.q = &noop_qdisc; in cbq_init()
1200 qdisc_hash_add(q->link.q, true); in cbq_init()
1202 q->link.priority = TC_CBQ_MAXPRIO - 1; in cbq_init()
1203 q->link.priority2 = TC_CBQ_MAXPRIO - 1; in cbq_init()
1204 q->link.cpriority = TC_CBQ_MAXPRIO - 1; in cbq_init()
1205 q->link.allot = psched_mtu(qdisc_dev(sch)); in cbq_init()
1206 q->link.quantum = q->link.allot; in cbq_init()
1207 q->link.weight = q->link.R_tab->rate.rate; in cbq_init()
1209 q->link.ewma_log = TC_CBQ_DEF_EWMA; in cbq_init()
1210 q->link.avpkt = q->link.allot/2; in cbq_init()
1211 q->link.minidle = -0x7FFFFFFF; in cbq_init()
1213 q->toplevel = TC_CBQ_MAXLEVEL; in cbq_init()
1214 q->now = psched_get_time(); in cbq_init()
1216 cbq_link_class(&q->link); in cbq_init()
1219 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT])); in cbq_init()
1221 cbq_addprio(q, &q->link); in cbq_init()
1225 tcf_block_put(q->link.block); in cbq_init()
1228 qdisc_put_rtab(q->link.R_tab); in cbq_init()
1242 return -1; in cbq_dump_rate()
1268 return -1; in cbq_dump_lss()
1279 opt.priority = cl->priority + 1; in cbq_dump_wrr()
1280 opt.cpriority = cl->cpriority + 1; in cbq_dump_wrr()
1288 return -1; in cbq_dump_wrr()
1307 return -1; in cbq_dump_fopt()
1316 return -1; in cbq_dump_attr()
1322 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_dump() local
1328 if (cbq_dump_attr(skb, &q->link) < 0) in cbq_dump()
1334 return -1; in cbq_dump()
1340 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_dump_stats() local
1342 q->link.xstats.avgidle = q->link.avgidle; in cbq_dump_stats()
1343 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats)); in cbq_dump_stats()
1358 tcm->tcm_info = cl->q->handle; in cbq_dump_class()
1369 return -1; in cbq_dump_class()
1376 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_dump_class_stats() local
1382 qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog); in cbq_dump_class_stats()
1385 cl->xstats.undertime = cl->undertime - q->now; in cbq_dump_class_stats()
1391 return -1; in cbq_dump_class_stats()
1408 *old = qdisc_replace(sch, new, &cl->q); in cbq_graft()
1416 return cl->q; in cbq_leaf()
1428 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_find() local
1430 return (unsigned long)cbq_class_lookup(q, classid); in cbq_find()
1435 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_destroy_class() local
1440 qdisc_put(cl->q); in cbq_destroy_class()
1443 if (cl != &q->link) in cbq_destroy_class()
1449 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_destroy() local
1455 q->rx_class = NULL; in cbq_destroy()
1462 for (h = 0; h < q->clhash.hashsize; h++) { in cbq_destroy()
1463 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_destroy()
1468 for (h = 0; h < q->clhash.hashsize; h++) { in cbq_destroy()
1469 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h], in cbq_destroy()
1473 qdisc_class_hash_destroy(&q->clhash); in cbq_destroy()
1481 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_change_class() local
1484 struct nlattr *tb[TCA_CBQ_MAX + 1]; in cbq_change_class()
1546 cbq_rmprio(q, cl); in cbq_change_class()
1553 if (cl->q->q.qlen) in cbq_change_class()
1577 cbq_class_lookup(q, classid)) { in cbq_change_class()
1586 if (++q->hgenerator >= 0x8000) in cbq_change_class()
1587 q->hgenerator = 1; in cbq_change_class()
1588 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL) in cbq_change_class()
1596 classid = classid|q->hgenerator; in cbq_change_class()
1599 parent = &q->link; in cbq_change_class()
1601 parent = cbq_class_lookup(q, parentid); in cbq_change_class()
1635 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid, in cbq_change_class()
1637 if (!cl->q) in cbq_change_class()
1638 cl->q = &noop_qdisc; in cbq_change_class()
1640 qdisc_hash_add(cl->q, true); in cbq_change_class()
1652 if (cl->tparent != &q->link) in cbq_change_class()
1659 cl->ewma_log = q->link.ewma_log; in cbq_change_class()
1661 cl->maxidle = q->link.maxidle; in cbq_change_class()
1663 cl->avpkt = q->link.avpkt; in cbq_change_class()
1668 qdisc_class_hash_grow(sch, &q->clhash); in cbq_change_class()
1681 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_delete() local
1684 if (cl->filters || cl->children || cl == &q->link) in cbq_delete()
1689 qdisc_purge_queue(cl->q); in cbq_delete()
1694 if (q->tx_borrowed == cl) in cbq_delete()
1695 q->tx_borrowed = q->tx_class; in cbq_delete()
1696 if (q->tx_class == cl) { in cbq_delete()
1697 q->tx_class = NULL; in cbq_delete()
1698 q->tx_borrowed = NULL; in cbq_delete()
1701 if (q->rx_class == cl) in cbq_delete()
1702 q->rx_class = NULL; in cbq_delete()
1710 cbq_rmprio(q, cl); in cbq_delete()
1720 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_tcf_block() local
1724 cl = &q->link; in cbq_tcf_block()
1732 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_bind_filter() local
1734 struct cbq_class *cl = cbq_class_lookup(q, classid); in cbq_bind_filter()
1754 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_walk() local
1761 for (h = 0; h < q->clhash.hashsize; h++) { in cbq_walk()
1762 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_walk()
1768 arg->stop = 1; in cbq_walk()