Lines Matching refs:q

200 static bool loss_4state(struct netem_sched_data *q)  in loss_4state()  argument
202 struct clgstate *clg = &q->clg; in loss_4state()
265 static bool loss_gilb_ell(struct netem_sched_data *q) in loss_gilb_ell() argument
267 struct clgstate *clg = &q->clg; in loss_gilb_ell()
286 static bool loss_event(struct netem_sched_data *q) in loss_event() argument
288 switch (q->loss_model) { in loss_event()
291 return q->loss && q->loss >= get_crandom(&q->loss_cor); in loss_event()
299 return loss_4state(q); in loss_event()
307 return loss_gilb_ell(q); in loss_event()
345 static u64 packet_time_ns(u64 len, const struct netem_sched_data *q) in packet_time_ns() argument
347 len += q->packet_overhead; in packet_time_ns()
349 if (q->cell_size) { in packet_time_ns()
350 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal); in packet_time_ns()
352 if (len > cells * q->cell_size) /* extra cell needed for remainder */ in packet_time_ns()
354 len = cells * (q->cell_size + q->cell_overhead); in packet_time_ns()
357 return div64_u64(len * NSEC_PER_SEC, q->rate); in packet_time_ns()
362 struct netem_sched_data *q = qdisc_priv(sch); in tfifo_reset() local
363 struct rb_node *p = rb_first(&q->t_root); in tfifo_reset()
369 rb_erase(&skb->rbnode, &q->t_root); in tfifo_reset()
376 struct netem_sched_data *q = qdisc_priv(sch); in tfifo_enqueue() local
378 struct rb_node **p = &q->t_root.rb_node, *parent = NULL; in tfifo_enqueue()
391 rb_insert_color(&nskb->rbnode, &q->t_root); in tfifo_enqueue()
392 sch->q.qlen++; in tfifo_enqueue()
434 struct netem_sched_data *q = qdisc_priv(sch); in netem_enqueue() local
445 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) in netem_enqueue()
449 if (loss_event(q)) { in netem_enqueue()
450 if (q->ecn && INET_ECN_set_ce(skb)) in netem_enqueue()
464 if (q->latency || q->jitter || q->rate) in netem_enqueue()
474 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ in netem_enqueue()
476 q->duplicate = 0; in netem_enqueue()
478 q->duplicate = dupsave; in netem_enqueue()
487 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { in netem_enqueue()
514 if (unlikely(sch->q.qlen >= sch->limit)) in netem_enqueue()
520 if (q->gap == 0 || /* not doing reordering */ in netem_enqueue()
521 q->counter < q->gap - 1 || /* inside last reordering gap */ in netem_enqueue()
522 q->reorder < get_crandom(&q->reorder_cor)) { in netem_enqueue()
526 delay = tabledist(q->latency, q->jitter, in netem_enqueue()
527 &q->delay_cor, q->delay_dist); in netem_enqueue()
531 if (q->rate) { in netem_enqueue()
534 if (sch->q.tail) in netem_enqueue()
535 last = netem_skb_cb(sch->q.tail); in netem_enqueue()
536 if (q->t_root.rb_node) { in netem_enqueue()
540 t_skb = skb_rb_last(&q->t_root); in netem_enqueue()
559 delay += packet_time_ns(qdisc_pkt_len(skb), q); in netem_enqueue()
563 ++q->counter; in netem_enqueue()
571 q->counter = 0; in netem_enqueue()
573 netem_enqueue_skb_head(&sch->q, skb); in netem_enqueue()
594 sch->q.qlen += nb; in netem_enqueue()
605 static void get_slot_next(struct netem_sched_data *q, u64 now) in get_slot_next() argument
609 if (!q->slot_dist) in get_slot_next()
610 next_delay = q->slot_config.min_delay + in get_slot_next()
612 (q->slot_config.max_delay - in get_slot_next()
613 q->slot_config.min_delay) >> 32); in get_slot_next()
615 next_delay = tabledist(q->slot_config.dist_delay, in get_slot_next()
616 (s32)(q->slot_config.dist_jitter), in get_slot_next()
617 NULL, q->slot_dist); in get_slot_next()
619 q->slot.slot_next = now + next_delay; in get_slot_next()
620 q->slot.packets_left = q->slot_config.max_packets; in get_slot_next()
621 q->slot.bytes_left = q->slot_config.max_bytes; in get_slot_next()
626 struct netem_sched_data *q = qdisc_priv(sch); in netem_dequeue() local
631 skb = __qdisc_dequeue_head(&sch->q); in netem_dequeue()
638 p = rb_first(&q->t_root); in netem_dequeue()
647 if (q->slot.slot_next && q->slot.slot_next < time_to_send) in netem_dequeue()
648 get_slot_next(q, now); in netem_dequeue()
650 if (time_to_send <= now && q->slot.slot_next <= now) { in netem_dequeue()
651 rb_erase(p, &q->t_root); in netem_dequeue()
652 sch->q.qlen--; in netem_dequeue()
670 if (q->slot.slot_next) { in netem_dequeue()
671 q->slot.packets_left--; in netem_dequeue()
672 q->slot.bytes_left -= qdisc_pkt_len(skb); in netem_dequeue()
673 if (q->slot.packets_left <= 0 || in netem_dequeue()
674 q->slot.bytes_left <= 0) in netem_dequeue()
675 get_slot_next(q, now); in netem_dequeue()
678 if (q->qdisc) { in netem_dequeue()
683 err = qdisc_enqueue(skb, q->qdisc, &to_free); in netem_dequeue()
696 if (q->qdisc) { in netem_dequeue()
697 skb = q->qdisc->ops->dequeue(q->qdisc); in netem_dequeue()
702 qdisc_watchdog_schedule_ns(&q->watchdog, in netem_dequeue()
704 q->slot.slot_next)); in netem_dequeue()
707 if (q->qdisc) { in netem_dequeue()
708 skb = q->qdisc->ops->dequeue(q->qdisc); in netem_dequeue()
717 struct netem_sched_data *q = qdisc_priv(sch); in netem_reset() local
721 if (q->qdisc) in netem_reset()
722 qdisc_reset(q->qdisc); in netem_reset()
723 qdisc_watchdog_cancel(&q->watchdog); in netem_reset()
766 static void get_slot(struct netem_sched_data *q, const struct nlattr *attr) in get_slot() argument
770 q->slot_config = *c; in get_slot()
771 if (q->slot_config.max_packets == 0) in get_slot()
772 q->slot_config.max_packets = INT_MAX; in get_slot()
773 if (q->slot_config.max_bytes == 0) in get_slot()
774 q->slot_config.max_bytes = INT_MAX; in get_slot()
775 q->slot.packets_left = q->slot_config.max_packets; in get_slot()
776 q->slot.bytes_left = q->slot_config.max_bytes; in get_slot()
777 if (q->slot_config.min_delay | q->slot_config.max_delay | in get_slot()
778 q->slot_config.dist_jitter) in get_slot()
779 q->slot.slot_next = ktime_get_ns(); in get_slot()
781 q->slot.slot_next = 0; in get_slot()
784 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr) in get_correlation() argument
788 init_crandom(&q->delay_cor, c->delay_corr); in get_correlation()
789 init_crandom(&q->loss_cor, c->loss_corr); in get_correlation()
790 init_crandom(&q->dup_cor, c->dup_corr); in get_correlation()
793 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr) in get_reorder() argument
797 q->reorder = r->probability; in get_reorder()
798 init_crandom(&q->reorder_cor, r->correlation); in get_reorder()
801 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr) in get_corrupt() argument
805 q->corrupt = r->probability; in get_corrupt()
806 init_crandom(&q->corrupt_cor, r->correlation); in get_corrupt()
809 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr) in get_rate() argument
813 q->rate = r->rate; in get_rate()
814 q->packet_overhead = r->packet_overhead; in get_rate()
815 q->cell_size = r->cell_size; in get_rate()
816 q->cell_overhead = r->cell_overhead; in get_rate()
817 if (q->cell_size) in get_rate()
818 q->cell_size_reciprocal = reciprocal_value(q->cell_size); in get_rate()
820 q->cell_size_reciprocal = (struct reciprocal_value) { 0 }; in get_rate()
823 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr) in get_loss_clg() argument
840 q->loss_model = CLG_4_STATES; in get_loss_clg()
842 q->clg.state = TX_IN_GAP_PERIOD; in get_loss_clg()
843 q->clg.a1 = gi->p13; in get_loss_clg()
844 q->clg.a2 = gi->p31; in get_loss_clg()
845 q->clg.a3 = gi->p32; in get_loss_clg()
846 q->clg.a4 = gi->p14; in get_loss_clg()
847 q->clg.a5 = gi->p23; in get_loss_clg()
859 q->loss_model = CLG_GILB_ELL; in get_loss_clg()
860 q->clg.state = GOOD_STATE; in get_loss_clg()
861 q->clg.a1 = ge->p; in get_loss_clg()
862 q->clg.a2 = ge->r; in get_loss_clg()
863 q->clg.a3 = ge->h; in get_loss_clg()
864 q->clg.a4 = ge->k1; in get_loss_clg()
912 struct netem_sched_data *q = qdisc_priv(sch); in netem_change() local
928 old_clg = q->clg; in netem_change()
929 old_loss_model = q->loss_model; in netem_change()
932 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]); in netem_change()
934 q->loss_model = old_loss_model; in netem_change()
938 q->loss_model = CLG_RANDOM; in netem_change()
942 ret = get_dist_table(sch, &q->delay_dist, in netem_change()
949 ret = get_dist_table(sch, &q->slot_dist, in netem_change()
957 q->latency = PSCHED_TICKS2NS(qopt->latency); in netem_change()
958 q->jitter = PSCHED_TICKS2NS(qopt->jitter); in netem_change()
959 q->limit = qopt->limit; in netem_change()
960 q->gap = qopt->gap; in netem_change()
961 q->counter = 0; in netem_change()
962 q->loss = qopt->loss; in netem_change()
963 q->duplicate = qopt->duplicate; in netem_change()
968 if (q->gap) in netem_change()
969 q->reorder = ~0; in netem_change()
972 get_correlation(q, tb[TCA_NETEM_CORR]); in netem_change()
975 get_reorder(q, tb[TCA_NETEM_REORDER]); in netem_change()
978 get_corrupt(q, tb[TCA_NETEM_CORRUPT]); in netem_change()
981 get_rate(q, tb[TCA_NETEM_RATE]); in netem_change()
984 q->rate = max_t(u64, q->rate, in netem_change()
988 q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]); in netem_change()
991 q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]); in netem_change()
994 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]); in netem_change()
997 get_slot(q, tb[TCA_NETEM_SLOT]); in netem_change()
1006 q->clg = old_clg; in netem_change()
1007 q->loss_model = old_loss_model; in netem_change()
1014 struct netem_sched_data *q = qdisc_priv(sch); in netem_init() local
1017 qdisc_watchdog_init(&q->watchdog, sch); in netem_init()
1022 q->loss_model = CLG_RANDOM; in netem_init()
1031 struct netem_sched_data *q = qdisc_priv(sch); in netem_destroy() local
1033 qdisc_watchdog_cancel(&q->watchdog); in netem_destroy()
1034 if (q->qdisc) in netem_destroy()
1035 qdisc_destroy(q->qdisc); in netem_destroy()
1036 dist_free(q->delay_dist); in netem_destroy()
1037 dist_free(q->slot_dist); in netem_destroy()
1040 static int dump_loss_model(const struct netem_sched_data *q, in dump_loss_model() argument
1049 switch (q->loss_model) { in dump_loss_model()
1057 .p13 = q->clg.a1, in dump_loss_model()
1058 .p31 = q->clg.a2, in dump_loss_model()
1059 .p32 = q->clg.a3, in dump_loss_model()
1060 .p14 = q->clg.a4, in dump_loss_model()
1061 .p23 = q->clg.a5, in dump_loss_model()
1070 .p = q->clg.a1, in dump_loss_model()
1071 .r = q->clg.a2, in dump_loss_model()
1072 .h = q->clg.a3, in dump_loss_model()
1073 .k1 = q->clg.a4, in dump_loss_model()
1092 const struct netem_sched_data *q = qdisc_priv(sch); in netem_dump() local
1101 qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency), in netem_dump()
1103 qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter), in netem_dump()
1105 qopt.limit = q->limit; in netem_dump()
1106 qopt.loss = q->loss; in netem_dump()
1107 qopt.gap = q->gap; in netem_dump()
1108 qopt.duplicate = q->duplicate; in netem_dump()
1112 if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency)) in netem_dump()
1115 if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter)) in netem_dump()
1118 cor.delay_corr = q->delay_cor.rho; in netem_dump()
1119 cor.loss_corr = q->loss_cor.rho; in netem_dump()
1120 cor.dup_corr = q->dup_cor.rho; in netem_dump()
1124 reorder.probability = q->reorder; in netem_dump()
1125 reorder.correlation = q->reorder_cor.rho; in netem_dump()
1129 corrupt.probability = q->corrupt; in netem_dump()
1130 corrupt.correlation = q->corrupt_cor.rho; in netem_dump()
1134 if (q->rate >= (1ULL << 32)) { in netem_dump()
1135 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate, in netem_dump()
1140 rate.rate = q->rate; in netem_dump()
1142 rate.packet_overhead = q->packet_overhead; in netem_dump()
1143 rate.cell_size = q->cell_size; in netem_dump()
1144 rate.cell_overhead = q->cell_overhead; in netem_dump()
1148 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn)) in netem_dump()
1151 if (dump_loss_model(q, skb) != 0) in netem_dump()
1154 if (q->slot_config.min_delay | q->slot_config.max_delay | in netem_dump()
1155 q->slot_config.dist_jitter) { in netem_dump()
1156 slot = q->slot_config; in netem_dump()
1175 struct netem_sched_data *q = qdisc_priv(sch); in netem_dump_class() local
1177 if (cl != 1 || !q->qdisc) /* only one class */ in netem_dump_class()
1181 tcm->tcm_info = q->qdisc->handle; in netem_dump_class()
1189 struct netem_sched_data *q = qdisc_priv(sch); in netem_graft() local
1191 *old = qdisc_replace(sch, new, &q->qdisc); in netem_graft()
1197 struct netem_sched_data *q = qdisc_priv(sch); in netem_leaf() local
1198 return q->qdisc; in netem_leaf()