Lines Matching full:q
70 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, in fq_codel_hash() argument
73 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); in fq_codel_hash()
79 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_classify() local
86 TC_H_MIN(skb->priority) <= q->flows_cnt) in fq_codel_classify()
89 filter = rcu_dereference_bh(q->filter_list); in fq_codel_classify()
91 return fq_codel_hash(q, skb) + 1; in fq_codel_classify()
107 if (TC_H_MIN(res.classid) <= q->flows_cnt) in fq_codel_classify()
140 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_drop() local
154 for (i = 0; i < q->flows_cnt; i++) { in fq_codel_drop()
155 if (q->backlogs[i] > maxbacklog) { in fq_codel_drop()
156 maxbacklog = q->backlogs[i]; in fq_codel_drop()
164 flow = &q->flows[idx]; in fq_codel_drop()
176 q->backlogs[idx] -= len; in fq_codel_drop()
177 q->memory_usage -= mem; in fq_codel_drop()
180 sch->q.qlen -= i; in fq_codel_drop()
187 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_enqueue() local
204 flow = &q->flows[idx]; in fq_codel_enqueue()
206 q->backlogs[idx] += qdisc_pkt_len(skb); in fq_codel_enqueue()
210 list_add_tail(&flow->flowchain, &q->new_flows); in fq_codel_enqueue()
211 q->new_flow_count++; in fq_codel_enqueue()
212 flow->deficit = q->quantum; in fq_codel_enqueue()
215 q->memory_usage += get_codel_cb(skb)->mem_usage; in fq_codel_enqueue()
216 memory_limited = q->memory_usage > q->memory_limit; in fq_codel_enqueue()
217 if (++sch->q.qlen <= sch->limit && !memory_limited) in fq_codel_enqueue()
221 prev_qlen = sch->q.qlen; in fq_codel_enqueue()
226 * in q->backlogs[] to find a fat flow. in fq_codel_enqueue()
230 ret = fq_codel_drop(sch, q->drop_batch_size, to_free); in fq_codel_enqueue()
232 prev_qlen -= sch->q.qlen; in fq_codel_enqueue()
234 q->drop_overlimit += prev_qlen; in fq_codel_enqueue()
236 q->drop_overmemory += prev_qlen; in fq_codel_enqueue()
258 struct fq_codel_sched_data *q = qdisc_priv(sch); in dequeue_func() local
265 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); in dequeue_func()
266 q->memory_usage -= get_codel_cb(skb)->mem_usage; in dequeue_func()
267 sch->q.qlen--; in dequeue_func()
283 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_dequeue() local
289 head = &q->new_flows; in fq_codel_dequeue()
291 head = &q->old_flows; in fq_codel_dequeue()
298 flow->deficit += q->quantum; in fq_codel_dequeue()
299 list_move_tail(&flow->flowchain, &q->old_flows); in fq_codel_dequeue()
303 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams, in fq_codel_dequeue()
304 &flow->cvars, &q->cstats, qdisc_pkt_len, in fq_codel_dequeue()
309 if ((head == &q->new_flows) && !list_empty(&q->old_flows)) in fq_codel_dequeue()
310 list_move_tail(&flow->flowchain, &q->old_flows); in fq_codel_dequeue()
320 if (q->cstats.drop_count && sch->q.qlen) { in fq_codel_dequeue()
321 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, in fq_codel_dequeue()
322 q->cstats.drop_len); in fq_codel_dequeue()
323 q->cstats.drop_count = 0; in fq_codel_dequeue()
324 q->cstats.drop_len = 0; in fq_codel_dequeue()
337 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_reset() local
340 INIT_LIST_HEAD(&q->new_flows); in fq_codel_reset()
341 INIT_LIST_HEAD(&q->old_flows); in fq_codel_reset()
342 for (i = 0; i < q->flows_cnt; i++) { in fq_codel_reset()
343 struct fq_codel_flow *flow = q->flows + i; in fq_codel_reset()
349 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32)); in fq_codel_reset()
350 q->memory_usage = 0; in fq_codel_reset()
370 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_change() local
380 if (q->flows) in fq_codel_change()
382 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]); in fq_codel_change()
383 if (!q->flows_cnt || in fq_codel_change()
384 q->flows_cnt > 65536) in fq_codel_change()
399 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT; in fq_codel_change()
405 q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT; in fq_codel_change()
409 q->cparams.ce_threshold_selector = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR]); in fq_codel_change()
411 q->cparams.ce_threshold_mask = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK]); in fq_codel_change()
416 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT; in fq_codel_change()
423 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]); in fq_codel_change()
426 q->quantum = quantum; in fq_codel_change()
429 q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); in fq_codel_change()
432 q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT])); in fq_codel_change()
434 while (sch->q.qlen > sch->limit || in fq_codel_change()
435 q->memory_usage > q->memory_limit) { in fq_codel_change()
438 q->cstats.drop_len += qdisc_pkt_len(skb); in fq_codel_change()
440 q->cstats.drop_count++; in fq_codel_change()
442 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len); in fq_codel_change()
443 q->cstats.drop_count = 0; in fq_codel_change()
444 q->cstats.drop_len = 0; in fq_codel_change()
452 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_destroy() local
454 tcf_block_put(q->block); in fq_codel_destroy()
455 kvfree(q->backlogs); in fq_codel_destroy()
456 kvfree(q->flows); in fq_codel_destroy()
462 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_init() local
467 q->flows_cnt = 1024; in fq_codel_init()
468 q->memory_limit = 32 << 20; /* 32 MBytes */ in fq_codel_init()
469 q->drop_batch_size = 64; in fq_codel_init()
470 q->quantum = psched_mtu(qdisc_dev(sch)); in fq_codel_init()
471 INIT_LIST_HEAD(&q->new_flows); in fq_codel_init()
472 INIT_LIST_HEAD(&q->old_flows); in fq_codel_init()
473 codel_params_init(&q->cparams); in fq_codel_init()
474 codel_stats_init(&q->cstats); in fq_codel_init()
475 q->cparams.ecn = true; in fq_codel_init()
476 q->cparams.mtu = psched_mtu(qdisc_dev(sch)); in fq_codel_init()
484 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); in fq_codel_init()
488 if (!q->flows) { in fq_codel_init()
489 q->flows = kvcalloc(q->flows_cnt, in fq_codel_init()
492 if (!q->flows) { in fq_codel_init()
496 q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL); in fq_codel_init()
497 if (!q->backlogs) { in fq_codel_init()
501 for (i = 0; i < q->flows_cnt; i++) { in fq_codel_init()
502 struct fq_codel_flow *flow = q->flows + i; in fq_codel_init()
515 kvfree(q->flows); in fq_codel_init()
516 q->flows = NULL; in fq_codel_init()
518 q->flows_cnt = 0; in fq_codel_init()
524 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_dump() local
532 codel_time_to_us(q->cparams.target)) || in fq_codel_dump()
536 codel_time_to_us(q->cparams.interval)) || in fq_codel_dump()
538 q->cparams.ecn) || in fq_codel_dump()
540 q->quantum) || in fq_codel_dump()
542 q->drop_batch_size) || in fq_codel_dump()
544 q->memory_limit) || in fq_codel_dump()
546 q->flows_cnt)) in fq_codel_dump()
549 if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD) { in fq_codel_dump()
551 codel_time_to_us(q->cparams.ce_threshold))) in fq_codel_dump()
553 if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR, q->cparams.ce_threshold_selector)) in fq_codel_dump()
555 if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_MASK, q->cparams.ce_threshold_mask)) in fq_codel_dump()
567 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_dump_stats() local
573 st.qdisc_stats.maxpacket = q->cstats.maxpacket; in fq_codel_dump_stats()
574 st.qdisc_stats.drop_overlimit = q->drop_overlimit; in fq_codel_dump_stats()
575 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark; in fq_codel_dump_stats()
576 st.qdisc_stats.new_flow_count = q->new_flow_count; in fq_codel_dump_stats()
577 st.qdisc_stats.ce_mark = q->cstats.ce_mark; in fq_codel_dump_stats()
578 st.qdisc_stats.memory_usage = q->memory_usage; in fq_codel_dump_stats()
579 st.qdisc_stats.drop_overmemory = q->drop_overmemory; in fq_codel_dump_stats()
582 list_for_each(pos, &q->new_flows) in fq_codel_dump_stats()
585 list_for_each(pos, &q->old_flows) in fq_codel_dump_stats()
608 static void fq_codel_unbind(struct Qdisc *q, unsigned long cl) in fq_codel_unbind() argument
615 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_tcf_block() local
619 return q->block; in fq_codel_tcf_block()
632 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_dump_class_stats() local
637 if (idx < q->flows_cnt) { in fq_codel_dump_class_stats()
638 const struct fq_codel_flow *flow = &q->flows[idx]; in fq_codel_dump_class_stats()
666 qs.backlog = q->backlogs[idx]; in fq_codel_dump_class_stats()
671 if (idx < q->flows_cnt) in fq_codel_dump_class_stats()
678 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_walk() local
684 for (i = 0; i < q->flows_cnt; i++) { in fq_codel_walk()
685 if (list_empty(&q->flows[i].flowchain)) { in fq_codel_walk()