Lines Matching refs:cl

168 #define L2T(cl, len)	qdisc_l2t((cl)->R_tab, len)  argument
186 struct cbq_class *cl; in cbq_reclassify() local
188 for (cl = this->tparent; cl; cl = cl->tparent) { in cbq_reclassify()
189 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT]; in cbq_reclassify()
215 struct cbq_class *cl = NULL; in cbq_classify() local
224 (cl = cbq_class_lookup(q, prio)) != NULL) in cbq_classify()
225 return cl; in cbq_classify()
240 cl = (void *)res.class; in cbq_classify()
241 if (!cl) { in cbq_classify()
243 cl = cbq_class_lookup(q, res.classid); in cbq_classify()
244 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL) in cbq_classify()
245 cl = defmap[TC_PRIO_BESTEFFORT]; in cbq_classify()
247 if (cl == NULL) in cbq_classify()
250 if (cl->level >= head->level) in cbq_classify()
262 return cbq_reclassify(skb, cl); in cbq_classify()
265 if (cl->level == 0) in cbq_classify()
266 return cl; in cbq_classify()
273 head = cl; in cbq_classify()
277 cl = head; in cbq_classify()
283 !(cl = head->defaults[prio & TC_PRIO_MAX]) && in cbq_classify()
284 !(cl = head->defaults[TC_PRIO_BESTEFFORT])) in cbq_classify()
287 return cl; in cbq_classify()
296 static inline void cbq_activate_class(struct cbq_class *cl) in cbq_activate_class() argument
298 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_activate_class()
299 int prio = cl->cpriority; in cbq_activate_class()
303 q->active[prio] = cl; in cbq_activate_class()
306 cl->next_alive = cl_tail->next_alive; in cbq_activate_class()
307 cl_tail->next_alive = cl; in cbq_activate_class()
309 cl->next_alive = cl; in cbq_activate_class()
324 struct cbq_class *cl; in cbq_deactivate_class() local
328 cl = cl_prev->next_alive; in cbq_deactivate_class()
329 if (cl == this) { in cbq_deactivate_class()
330 cl_prev->next_alive = cl->next_alive; in cbq_deactivate_class()
331 cl->next_alive = NULL; in cbq_deactivate_class()
333 if (cl == q->active[prio]) { in cbq_deactivate_class()
335 if (cl == q->active[prio]) { in cbq_deactivate_class()
343 } while ((cl_prev = cl) != q->active[prio]); in cbq_deactivate_class()
347 cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_mark_toplevel() argument
351 if (toplevel > cl->level) { in cbq_mark_toplevel()
355 if (cl->undertime < now) { in cbq_mark_toplevel()
356 q->toplevel = cl->level; in cbq_mark_toplevel()
359 } while ((cl = cl->borrow) != NULL && toplevel > cl->level); in cbq_mark_toplevel()
369 struct cbq_class *cl = cbq_classify(skb, sch, &ret); in cbq_enqueue() local
372 q->rx_class = cl; in cbq_enqueue()
374 if (cl == NULL) { in cbq_enqueue()
381 ret = qdisc_enqueue(skb, cl->q, to_free); in cbq_enqueue()
384 cbq_mark_toplevel(q, cl); in cbq_enqueue()
385 if (!cl->next_alive) in cbq_enqueue()
386 cbq_activate_class(cl); in cbq_enqueue()
392 cbq_mark_toplevel(q, cl); in cbq_enqueue()
393 cl->qstats.drops++; in cbq_enqueue()
399 static void cbq_overlimit(struct cbq_class *cl) in cbq_overlimit() argument
401 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_overlimit()
402 psched_tdiff_t delay = cl->undertime - q->now; in cbq_overlimit()
404 if (!cl->delayed) { in cbq_overlimit()
405 delay += cl->offtime; in cbq_overlimit()
414 if (cl->avgidle < 0) in cbq_overlimit()
415 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); in cbq_overlimit()
416 if (cl->avgidle < cl->minidle) in cbq_overlimit()
417 cl->avgidle = cl->minidle; in cbq_overlimit()
420 cl->undertime = q->now + delay; in cbq_overlimit()
422 cl->xstats.overactions++; in cbq_overlimit()
423 cl->delayed = 1; in cbq_overlimit()
436 for (b = cl->borrow; b; b = b->borrow) { in cbq_overlimit()
452 struct cbq_class *cl; in cbq_undelay_prio() local
460 cl = cl_prev->next_alive; in cbq_undelay_prio()
461 if (now - cl->penalized > 0) { in cbq_undelay_prio()
462 cl_prev->next_alive = cl->next_alive; in cbq_undelay_prio()
463 cl->next_alive = NULL; in cbq_undelay_prio()
464 cl->cpriority = cl->priority; in cbq_undelay_prio()
465 cl->delayed = 0; in cbq_undelay_prio()
466 cbq_activate_class(cl); in cbq_undelay_prio()
468 if (cl == q->active[prio]) { in cbq_undelay_prio()
470 if (cl == q->active[prio]) { in cbq_undelay_prio()
476 cl = cl_prev->next_alive; in cbq_undelay_prio()
477 } else if (sched - cl->penalized > 0) in cbq_undelay_prio()
478 sched = cl->penalized; in cbq_undelay_prio()
479 } while ((cl_prev = cl) != q->active[prio]); in cbq_undelay_prio()
534 cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, in cbq_update_toplevel() argument
537 if (cl && q->toplevel >= borrowed->level) { in cbq_update_toplevel()
538 if (cl->q->q.qlen > 1) { in cbq_update_toplevel()
559 struct cbq_class *cl = this; in cbq_update() local
569 for ( ; cl; cl = cl->share) { in cbq_update()
570 long avgidle = cl->avgidle; in cbq_update()
573 cl->bstats.packets++; in cbq_update()
574 cl->bstats.bytes += len; in cbq_update()
583 idle = now - cl->last; in cbq_update()
585 avgidle = cl->maxidle; in cbq_update()
587 idle -= L2T(cl, len); in cbq_update()
594 avgidle += idle - (avgidle>>cl->ewma_log); in cbq_update()
600 if (avgidle < cl->minidle) in cbq_update()
601 avgidle = cl->minidle; in cbq_update()
603 cl->avgidle = avgidle; in cbq_update()
613 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log); in cbq_update()
625 idle += L2T(cl, len); in cbq_update()
627 cl->undertime = now + idle; in cbq_update()
631 cl->undertime = PSCHED_PASTPERFECT; in cbq_update()
632 if (avgidle > cl->maxidle) in cbq_update()
633 cl->avgidle = cl->maxidle; in cbq_update()
635 cl->avgidle = avgidle; in cbq_update()
637 if ((s64)(now - cl->last) > 0) in cbq_update()
638 cl->last = now; in cbq_update()
645 cbq_under_limit(struct cbq_class *cl) in cbq_under_limit() argument
647 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_under_limit()
648 struct cbq_class *this_cl = cl; in cbq_under_limit()
650 if (cl->tparent == NULL) in cbq_under_limit()
651 return cl; in cbq_under_limit()
653 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) { in cbq_under_limit()
654 cl->delayed = 0; in cbq_under_limit()
655 return cl; in cbq_under_limit()
669 cl = cl->borrow; in cbq_under_limit()
670 if (!cl) { in cbq_under_limit()
675 if (cl->level > q->toplevel) in cbq_under_limit()
677 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime); in cbq_under_limit()
679 cl->delayed = 0; in cbq_under_limit()
680 return cl; in cbq_under_limit()
687 struct cbq_class *cl_tail, *cl_prev, *cl; in cbq_dequeue_prio() local
692 cl = cl_prev->next_alive; in cbq_dequeue_prio()
699 struct cbq_class *borrow = cl; in cbq_dequeue_prio()
701 if (cl->q->q.qlen && in cbq_dequeue_prio()
702 (borrow = cbq_under_limit(cl)) == NULL) in cbq_dequeue_prio()
705 if (cl->deficit <= 0) { in cbq_dequeue_prio()
710 cl->deficit += cl->quantum; in cbq_dequeue_prio()
714 skb = cl->q->dequeue(cl->q); in cbq_dequeue_prio()
723 cl->deficit -= qdisc_pkt_len(skb); in cbq_dequeue_prio()
724 q->tx_class = cl; in cbq_dequeue_prio()
726 if (borrow != cl) { in cbq_dequeue_prio()
729 cl->xstats.borrows++; in cbq_dequeue_prio()
732 cl->xstats.borrows += qdisc_pkt_len(skb); in cbq_dequeue_prio()
737 if (cl->deficit <= 0) { in cbq_dequeue_prio()
738 q->active[prio] = cl; in cbq_dequeue_prio()
739 cl = cl->next_alive; in cbq_dequeue_prio()
740 cl->deficit += cl->quantum; in cbq_dequeue_prio()
745 if (cl->q->q.qlen == 0 || prio != cl->cpriority) { in cbq_dequeue_prio()
749 cl_prev->next_alive = cl->next_alive; in cbq_dequeue_prio()
750 cl->next_alive = NULL; in cbq_dequeue_prio()
753 if (cl == cl_tail) { in cbq_dequeue_prio()
758 if (cl == cl_tail) { in cbq_dequeue_prio()
762 if (cl->q->q.qlen) in cbq_dequeue_prio()
763 cbq_activate_class(cl); in cbq_dequeue_prio()
769 if (cl->q->q.qlen) in cbq_dequeue_prio()
770 cbq_activate_class(cl); in cbq_dequeue_prio()
772 cl = cl_prev; in cbq_dequeue_prio()
776 cl_prev = cl; in cbq_dequeue_prio()
777 cl = cl->next_alive; in cbq_dequeue_prio()
876 struct cbq_class *cl; in cbq_adjust_levels() local
878 cl = this->children; in cbq_adjust_levels()
879 if (cl) { in cbq_adjust_levels()
881 if (cl->level > level) in cbq_adjust_levels()
882 level = cl->level; in cbq_adjust_levels()
883 } while ((cl = cl->sibling) != this->children); in cbq_adjust_levels()
891 struct cbq_class *cl; in cbq_normalize_quanta() local
898 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_normalize_quanta()
902 if (cl->priority == prio) { in cbq_normalize_quanta()
903 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ in cbq_normalize_quanta()
906 if (cl->quantum <= 0 || in cbq_normalize_quanta()
907 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) { in cbq_normalize_quanta()
909 cl->common.classid, cl->quantum); in cbq_normalize_quanta()
910 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1; in cbq_normalize_quanta()
916 static void cbq_sync_defmap(struct cbq_class *cl) in cbq_sync_defmap() argument
918 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_sync_defmap()
919 struct cbq_class *split = cl->split; in cbq_sync_defmap()
927 if (split->defaults[i] == cl && !(cl->defmap & (1<<i))) in cbq_sync_defmap()
952 static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask) in cbq_change_defmap() argument
957 split = cl->split; in cbq_change_defmap()
964 for (split = cl->tparent; split; split = split->tparent) in cbq_change_defmap()
972 if (cl->split != split) { in cbq_change_defmap()
973 cl->defmap = 0; in cbq_change_defmap()
974 cbq_sync_defmap(cl); in cbq_change_defmap()
975 cl->split = split; in cbq_change_defmap()
976 cl->defmap = def & mask; in cbq_change_defmap()
978 cl->defmap = (cl->defmap & ~mask) | (def & mask); in cbq_change_defmap()
980 cbq_sync_defmap(cl); in cbq_change_defmap()
985 struct cbq_class *cl, **clp; in cbq_unlink_class() local
992 cl = *clp; in cbq_unlink_class()
994 if (cl == this) { in cbq_unlink_class()
995 *clp = cl->sibling; in cbq_unlink_class()
998 clp = &cl->sibling; in cbq_unlink_class()
999 } while ((cl = *clp) != this->sibling); in cbq_unlink_class()
1034 struct cbq_class *cl; in cbq_reset() local
1051 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_reset()
1052 qdisc_reset(cl->q); in cbq_reset()
1054 cl->next_alive = NULL; in cbq_reset()
1055 cl->undertime = PSCHED_PASTPERFECT; in cbq_reset()
1056 cl->avgidle = cl->maxidle; in cbq_reset()
1057 cl->deficit = cl->quantum; in cbq_reset()
1058 cl->cpriority = cl->priority; in cbq_reset()
1065 static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss) in cbq_set_lss() argument
1068 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent; in cbq_set_lss()
1069 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent; in cbq_set_lss()
1072 cl->ewma_log = lss->ewma_log; in cbq_set_lss()
1074 cl->avpkt = lss->avpkt; in cbq_set_lss()
1076 cl->minidle = -(long)lss->minidle; in cbq_set_lss()
1078 cl->maxidle = lss->maxidle; in cbq_set_lss()
1079 cl->avgidle = lss->maxidle; in cbq_set_lss()
1082 cl->offtime = lss->offtime; in cbq_set_lss()
1086 static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_rmprio() argument
1088 q->nclasses[cl->priority]--; in cbq_rmprio()
1089 q->quanta[cl->priority] -= cl->weight; in cbq_rmprio()
1090 cbq_normalize_quanta(q, cl->priority); in cbq_rmprio()
1093 static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl) in cbq_addprio() argument
1095 q->nclasses[cl->priority]++; in cbq_addprio()
1096 q->quanta[cl->priority] += cl->weight; in cbq_addprio()
1097 cbq_normalize_quanta(q, cl->priority); in cbq_addprio()
1100 static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr) in cbq_set_wrr() argument
1102 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_set_wrr()
1105 cl->allot = wrr->allot; in cbq_set_wrr()
1107 cl->weight = wrr->weight; in cbq_set_wrr()
1109 cl->priority = wrr->priority - 1; in cbq_set_wrr()
1110 cl->cpriority = cl->priority; in cbq_set_wrr()
1111 if (cl->priority >= cl->priority2) in cbq_set_wrr()
1112 cl->priority2 = TC_CBQ_MAXPRIO - 1; in cbq_set_wrr()
1115 cbq_addprio(q, cl); in cbq_set_wrr()
1119 static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt) in cbq_set_fopt() argument
1121 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange); in cbq_set_fopt()
1215 static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_rate() argument
1219 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate)) in cbq_dump_rate()
1228 static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_lss() argument
1234 if (cl->borrow == NULL) in cbq_dump_lss()
1236 if (cl->share == NULL) in cbq_dump_lss()
1238 opt.ewma_log = cl->ewma_log; in cbq_dump_lss()
1239 opt.level = cl->level; in cbq_dump_lss()
1240 opt.avpkt = cl->avpkt; in cbq_dump_lss()
1241 opt.maxidle = cl->maxidle; in cbq_dump_lss()
1242 opt.minidle = (u32)(-cl->minidle); in cbq_dump_lss()
1243 opt.offtime = cl->offtime; in cbq_dump_lss()
1254 static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_wrr() argument
1261 opt.allot = cl->allot; in cbq_dump_wrr()
1262 opt.priority = cl->priority + 1; in cbq_dump_wrr()
1263 opt.cpriority = cl->cpriority + 1; in cbq_dump_wrr()
1264 opt.weight = cl->weight; in cbq_dump_wrr()
1274 static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_fopt() argument
1279 if (cl->split || cl->defmap) { in cbq_dump_fopt()
1280 opt.split = cl->split ? cl->split->common.classid : 0; in cbq_dump_fopt()
1281 opt.defmap = cl->defmap; in cbq_dump_fopt()
1293 static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl) in cbq_dump_attr() argument
1295 if (cbq_dump_lss(skb, cl) < 0 || in cbq_dump_attr()
1296 cbq_dump_rate(skb, cl) < 0 || in cbq_dump_attr()
1297 cbq_dump_wrr(skb, cl) < 0 || in cbq_dump_attr()
1298 cbq_dump_fopt(skb, cl) < 0) in cbq_dump_attr()
1333 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_dump_class() local
1336 if (cl->tparent) in cbq_dump_class()
1337 tcm->tcm_parent = cl->tparent->common.classid; in cbq_dump_class()
1340 tcm->tcm_handle = cl->common.classid; in cbq_dump_class()
1341 tcm->tcm_info = cl->q->handle; in cbq_dump_class()
1346 if (cbq_dump_attr(skb, cl) < 0) in cbq_dump_class()
1360 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_dump_class_stats() local
1362 cl->xstats.avgidle = cl->avgidle; in cbq_dump_class_stats()
1363 cl->xstats.undertime = 0; in cbq_dump_class_stats()
1365 if (cl->undertime != PSCHED_PASTPERFECT) in cbq_dump_class_stats()
1366 cl->xstats.undertime = cl->undertime - q->now; in cbq_dump_class_stats()
1369 d, NULL, &cl->bstats) < 0 || in cbq_dump_class_stats()
1370 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || in cbq_dump_class_stats()
1371 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0) in cbq_dump_class_stats()
1374 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); in cbq_dump_class_stats()
1380 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_graft() local
1384 cl->common.classid, extack); in cbq_graft()
1389 *old = qdisc_replace(sch, new, &cl->q); in cbq_graft()
1395 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_leaf() local
1397 return cl->q; in cbq_leaf()
1402 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_qlen_notify() local
1404 cbq_deactivate_class(cl); in cbq_qlen_notify()
1414 static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl) in cbq_destroy_class() argument
1418 WARN_ON(cl->filters); in cbq_destroy_class()
1420 tcf_block_put(cl->block); in cbq_destroy_class()
1421 qdisc_destroy(cl->q); in cbq_destroy_class()
1422 qdisc_put_rtab(cl->R_tab); in cbq_destroy_class()
1423 gen_kill_estimator(&cl->rate_est); in cbq_destroy_class()
1424 if (cl != &q->link) in cbq_destroy_class()
1425 kfree(cl); in cbq_destroy_class()
1432 struct cbq_class *cl; in cbq_destroy() local
1444 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_destroy()
1445 tcf_block_put(cl->block); in cbq_destroy()
1446 cl->block = NULL; in cbq_destroy()
1450 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h], in cbq_destroy()
1452 cbq_destroy_class(sch, cl); in cbq_destroy()
1463 struct cbq_class *cl = (struct cbq_class *)*arg; in cbq_change_class() local
1483 if (cl) { in cbq_change_class()
1486 if (cl->tparent && in cbq_change_class()
1487 cl->tparent->common.classid != parentid) { in cbq_change_class()
1491 if (!cl->tparent && parentid != TC_H_ROOT) { in cbq_change_class()
1505 err = gen_replace_estimator(&cl->bstats, NULL, in cbq_change_class()
1506 &cl->rate_est, in cbq_change_class()
1520 if (cl->next_alive != NULL) in cbq_change_class()
1521 cbq_deactivate_class(cl); in cbq_change_class()
1524 qdisc_put_rtab(cl->R_tab); in cbq_change_class()
1525 cl->R_tab = rtab; in cbq_change_class()
1529 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); in cbq_change_class()
1532 cbq_rmprio(q, cl); in cbq_change_class()
1533 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); in cbq_change_class()
1537 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); in cbq_change_class()
1539 if (cl->q->q.qlen) in cbq_change_class()
1540 cbq_activate_class(cl); in cbq_change_class()
1596 cl = kzalloc(sizeof(*cl), GFP_KERNEL); in cbq_change_class()
1597 if (cl == NULL) in cbq_change_class()
1600 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); in cbq_change_class()
1602 kfree(cl); in cbq_change_class()
1607 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, in cbq_change_class()
1613 tcf_block_put(cl->block); in cbq_change_class()
1614 kfree(cl); in cbq_change_class()
1619 cl->R_tab = rtab; in cbq_change_class()
1621 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid, in cbq_change_class()
1623 if (!cl->q) in cbq_change_class()
1624 cl->q = &noop_qdisc; in cbq_change_class()
1626 qdisc_hash_add(cl->q, true); in cbq_change_class()
1628 cl->common.classid = classid; in cbq_change_class()
1629 cl->tparent = parent; in cbq_change_class()
1630 cl->qdisc = sch; in cbq_change_class()
1631 cl->allot = parent->allot; in cbq_change_class()
1632 cl->quantum = cl->allot; in cbq_change_class()
1633 cl->weight = cl->R_tab->rate.rate; in cbq_change_class()
1636 cbq_link_class(cl); in cbq_change_class()
1637 cl->borrow = cl->tparent; in cbq_change_class()
1638 if (cl->tparent != &q->link) in cbq_change_class()
1639 cl->share = cl->tparent; in cbq_change_class()
1641 cl->minidle = -0x7FFFFFFF; in cbq_change_class()
1642 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); in cbq_change_class()
1643 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); in cbq_change_class()
1644 if (cl->ewma_log == 0) in cbq_change_class()
1645 cl->ewma_log = q->link.ewma_log; in cbq_change_class()
1646 if (cl->maxidle == 0) in cbq_change_class()
1647 cl->maxidle = q->link.maxidle; in cbq_change_class()
1648 if (cl->avpkt == 0) in cbq_change_class()
1649 cl->avpkt = q->link.avpkt; in cbq_change_class()
1651 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); in cbq_change_class()
1656 *arg = (unsigned long)cl; in cbq_change_class()
1667 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_delete() local
1670 if (cl->filters || cl->children || cl == &q->link) in cbq_delete()
1675 qlen = cl->q->q.qlen; in cbq_delete()
1676 backlog = cl->q->qstats.backlog; in cbq_delete()
1677 qdisc_reset(cl->q); in cbq_delete()
1678 qdisc_tree_reduce_backlog(cl->q, qlen, backlog); in cbq_delete()
1680 if (cl->next_alive) in cbq_delete()
1681 cbq_deactivate_class(cl); in cbq_delete()
1683 if (q->tx_borrowed == cl) in cbq_delete()
1685 if (q->tx_class == cl) { in cbq_delete()
1690 if (q->rx_class == cl) in cbq_delete()
1694 cbq_unlink_class(cl); in cbq_delete()
1695 cbq_adjust_levels(cl->tparent); in cbq_delete()
1696 cl->defmap = 0; in cbq_delete()
1697 cbq_sync_defmap(cl); in cbq_delete()
1699 cbq_rmprio(q, cl); in cbq_delete()
1702 cbq_destroy_class(sch, cl); in cbq_delete()
1710 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_tcf_block() local
1712 if (cl == NULL) in cbq_tcf_block()
1713 cl = &q->link; in cbq_tcf_block()
1715 return cl->block; in cbq_tcf_block()
1723 struct cbq_class *cl = cbq_class_lookup(q, classid); in cbq_bind_filter() local
1725 if (cl) { in cbq_bind_filter()
1726 if (p && p->level <= cl->level) in cbq_bind_filter()
1728 cl->filters++; in cbq_bind_filter()
1729 return (unsigned long)cl; in cbq_bind_filter()
1736 struct cbq_class *cl = (struct cbq_class *)arg; in cbq_unbind_filter() local
1738 cl->filters--; in cbq_unbind_filter()
1744 struct cbq_class *cl; in cbq_walk() local
1751 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { in cbq_walk()
1756 if (arg->fn(sch, (unsigned long)cl, arg) < 0) { in cbq_walk()