/Linux-v5.10/net/sched/ |
D | sch_tbf.c | 145 struct tc_tbf_qopt_offload qopt; in tbf_offload_change() local 150 qopt.command = TC_TBF_REPLACE; in tbf_offload_change() 151 qopt.handle = sch->handle; in tbf_offload_change() 152 qopt.parent = sch->parent; in tbf_offload_change() 153 qopt.replace_params.rate = q->rate; in tbf_offload_change() 154 qopt.replace_params.max_size = q->max_size; in tbf_offload_change() 155 qopt.replace_params.qstats = &sch->qstats; in tbf_offload_change() 157 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TBF, &qopt); in tbf_offload_change() 163 struct tc_tbf_qopt_offload qopt; in tbf_offload_destroy() local 168 qopt.command = TC_TBF_DESTROY; in tbf_offload_destroy() [all …]
|
D | sch_fifo.c | 58 struct tc_fifo_qopt_offload qopt; in fifo_offload_init() local 63 qopt.command = TC_FIFO_REPLACE; in fifo_offload_init() 64 qopt.handle = sch->handle; in fifo_offload_init() 65 qopt.parent = sch->parent; in fifo_offload_init() 66 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt); in fifo_offload_init() 72 struct tc_fifo_qopt_offload qopt; in fifo_offload_destroy() local 77 qopt.command = TC_FIFO_DESTROY; in fifo_offload_destroy() 78 qopt.handle = sch->handle; in fifo_offload_destroy() 79 qopt.parent = sch->parent; in fifo_offload_destroy() 80 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt); in fifo_offload_destroy() [all …]
|
D | sch_mqprio.c | 62 static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt) in mqprio_parse_opt() argument 67 if (qopt->num_tc > TC_MAX_QUEUE) in mqprio_parse_opt() 72 if (qopt->prio_tc_map[i] >= qopt->num_tc) in mqprio_parse_opt() 80 if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX) in mqprio_parse_opt() 81 qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX; in mqprio_parse_opt() 88 if (qopt->hw) in mqprio_parse_opt() 91 for (i = 0; i < qopt->num_tc; i++) { in mqprio_parse_opt() 92 unsigned int last = qopt->offset[i] + qopt->count[i]; in mqprio_parse_opt() 97 if (qopt->offset[i] >= dev->real_num_tx_queues || in mqprio_parse_opt() 98 !qopt->count[i] || in mqprio_parse_opt() [all …]
|
D | sch_ets.c | 109 struct tc_ets_qopt_offload qopt; in ets_offload_change() local 121 qopt.command = TC_ETS_REPLACE; in ets_offload_change() 122 qopt.handle = sch->handle; in ets_offload_change() 123 qopt.parent = sch->parent; in ets_offload_change() 124 qopt.replace_params.bands = q->nbands; in ets_offload_change() 125 qopt.replace_params.qstats = &sch->qstats; in ets_offload_change() 126 memcpy(&qopt.replace_params.priomap, in ets_offload_change() 139 qopt.replace_params.quanta[i] = quantum; in ets_offload_change() 140 qopt.replace_params.weights[i] = weight; in ets_offload_change() 143 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETS, &qopt); in ets_offload_change() [all …]
|
D | sch_etf.c | 44 static inline int validate_input_params(struct tc_etf_qopt *qopt, in validate_input_params() argument 57 if (qopt->clockid < 0) { in validate_input_params() 62 if (qopt->clockid != CLOCK_TAI) { in validate_input_params() 67 if (qopt->delta < 0) { in validate_input_params() 352 struct tc_etf_qopt *qopt; in etf_init() local 371 qopt = nla_data(tb[TCA_ETF_PARMS]); in etf_init() 374 qopt->delta, qopt->clockid, in etf_init() 375 OFFLOAD_IS_ON(qopt) ? "on" : "off", in etf_init() 376 DEADLINE_MODE_IS_ON(qopt) ? "on" : "off"); in etf_init() 378 err = validate_input_params(qopt, extack); in etf_init() [all …]
|
D | sch_prio.c | 142 static int prio_offload(struct Qdisc *sch, struct tc_prio_qopt *qopt) in prio_offload() argument 153 if (qopt) { in prio_offload() 155 opt.replace_params.bands = qopt->bands; in prio_offload() 156 memcpy(&opt.replace_params.priomap, qopt->priomap, in prio_offload() 184 struct tc_prio_qopt *qopt; in prio_tune() local 186 if (nla_len(opt) < sizeof(*qopt)) in prio_tune() 188 qopt = nla_data(opt); in prio_tune() 190 if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2) in prio_tune() 194 if (qopt->priomap[i] >= qopt->bands) in prio_tune() 199 for (i = oldbands; i < qopt->bands; i++) { in prio_tune() [all …]
|
D | sch_cbs.c | 366 struct tc_cbs_qopt *qopt; in cbs_change() local 379 qopt = nla_data(tb[TCA_CBS_PARMS]); in cbs_change() 381 if (!qopt->offload) { in cbs_change() 385 err = cbs_enable_offload(dev, q, qopt, extack); in cbs_change() 391 q->hicredit = qopt->hicredit; in cbs_change() 392 q->locredit = qopt->locredit; in cbs_change() 393 q->idleslope = qopt->idleslope * BYTES_PER_KBIT; in cbs_change() 394 q->sendslope = qopt->sendslope * BYTES_PER_KBIT; in cbs_change() 395 q->offload = qopt->offload; in cbs_change()
|
D | sch_netem.c | 959 struct tc_netem_qopt *qopt; in netem_change() local 967 qopt = nla_data(opt); in netem_change() 968 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); in netem_change() 1000 sch->limit = qopt->limit; in netem_change() 1002 q->latency = PSCHED_TICKS2NS(qopt->latency); in netem_change() 1003 q->jitter = PSCHED_TICKS2NS(qopt->jitter); in netem_change() 1004 q->limit = qopt->limit; in netem_change() 1005 q->gap = qopt->gap; in netem_change() 1007 q->loss = qopt->loss; in netem_change() 1008 q->duplicate = qopt->duplicate; in netem_change() [all …]
|
D | sch_multiq.c | 176 struct tc_multiq_qopt *qopt; in multiq_tune() local 182 if (nla_len(opt) < sizeof(*qopt)) in multiq_tune() 185 qopt = nla_data(opt); in multiq_tune() 187 qopt->bands = qdisc_dev(sch)->real_num_tx_queues; in multiq_tune() 195 q->bands = qopt->bands; in multiq_tune()
|
D | sch_hfsc.c | 1389 struct tc_hfsc_qopt *qopt; in hfsc_init_qdisc() local 1394 if (!opt || nla_len(opt) < sizeof(*qopt)) in hfsc_init_qdisc() 1396 qopt = nla_data(opt); in hfsc_init_qdisc() 1398 q->defcls = qopt->defcls; in hfsc_init_qdisc() 1431 struct tc_hfsc_qopt *qopt; in hfsc_change_qdisc() local 1433 if (opt == NULL || nla_len(opt) < sizeof(*qopt)) in hfsc_change_qdisc() 1435 qopt = nla_data(opt); in hfsc_change_qdisc() 1438 q->defcls = qopt->defcls; in hfsc_change_qdisc() 1519 struct tc_hfsc_qopt qopt; in hfsc_dump_qdisc() local 1521 qopt.defcls = q->defcls; in hfsc_dump_qdisc() [all …]
|
D | sch_taprio.c | 910 struct tc_mqprio_qopt *qopt, in taprio_parse_mqprio_opt() argument 916 if (!qopt && !dev->num_tc) { in taprio_parse_mqprio_opt() 928 if (qopt->num_tc > TC_MAX_QUEUE) { in taprio_parse_mqprio_opt() 934 if (qopt->num_tc > dev->num_tx_queues) { in taprio_parse_mqprio_opt() 941 if (qopt->prio_tc_map[i] >= qopt->num_tc) { in taprio_parse_mqprio_opt() 947 for (i = 0; i < qopt->num_tc; i++) { in taprio_parse_mqprio_opt() 948 unsigned int last = qopt->offset[i] + qopt->count[i]; in taprio_parse_mqprio_opt() 953 if (qopt->offset[i] >= dev->num_tx_queues || in taprio_parse_mqprio_opt() 954 !qopt->count[i] || in taprio_parse_mqprio_opt() 964 for (j = i + 1; j < qopt->num_tc; j++) { in taprio_parse_mqprio_opt() [all …]
|
/Linux-v5.10/drivers/net/ethernet/stmicro/stmmac/ |
D | stmmac_tc.c | 310 struct tc_cbs_qopt_offload *qopt) in tc_setup_cbs() argument 313 u32 queue = qopt->queue; in tc_setup_cbs() 326 if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) { in tc_setup_cbs() 332 } else if (!qopt->enable) { in tc_setup_cbs() 341 value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div); in tc_setup_cbs() 344 value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div); in tc_setup_cbs() 347 value = qopt->hicredit * 1024ll * 8; in tc_setup_cbs() 350 value = qopt->locredit * 1024ll * 8; in tc_setup_cbs() 363 queue, qopt->sendslope, qopt->idleslope, in tc_setup_cbs() 364 qopt->hicredit, qopt->locredit); in tc_setup_cbs() [all …]
|
D | stmmac_selftests.c | 1757 struct tc_etf_qopt_offload qopt; in stmmac_test_tbs() local 1773 qopt.enable = true; in stmmac_test_tbs() 1774 qopt.queue = i; in stmmac_test_tbs() 1776 ret = stmmac_tc_setup_etf(priv, priv, &qopt); in stmmac_test_tbs() 1810 qopt.enable = false; in stmmac_test_tbs() 1811 stmmac_tc_setup_etf(priv, priv, &qopt); in stmmac_test_tbs()
|
D | hwif.h | 557 struct tc_cbs_qopt_offload *qopt); 561 struct tc_taprio_qopt_offload *qopt); 563 struct tc_etf_qopt_offload *qopt);
|
/Linux-v5.10/drivers/net/ethernet/chelsio/cxgb4/ |
D | cxgb4_tc_mqprio.c | 19 if (!mqprio->qopt.num_tc) in cxgb4_mqprio_validate() 22 if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS) { in cxgb4_mqprio_validate() 31 } else if (mqprio->qopt.num_tc > adap->params.nsched_cls) { in cxgb4_mqprio_validate() 47 for (i = 0; i < mqprio->qopt.num_tc; i++) { in cxgb4_mqprio_validate() 48 qoffset = max_t(u16, mqprio->qopt.offset[i], qoffset); in cxgb4_mqprio_validate() 49 qcount += mqprio->qopt.count[i]; in cxgb4_mqprio_validate() 51 start_a = mqprio->qopt.offset[i]; in cxgb4_mqprio_validate() 52 end_a = start_a + mqprio->qopt.count[i] - 1; in cxgb4_mqprio_validate() 53 for (j = i + 1; j < mqprio->qopt.num_tc; j++) { in cxgb4_mqprio_validate() 54 start_b = mqprio->qopt.offset[j]; in cxgb4_mqprio_validate() [all …]
|
D | cxgb4_debugfs.c | 2863 for (tc = 0; tc < port_mqprio->mqprio.qopt.num_tc; tc++) in sge_qinfo_show() 2864 entries += port_mqprio->mqprio.qopt.count[tc]; in sge_qinfo_show() 3170 for (tc = 0; tc < port_mqprio->mqprio.qopt.num_tc; tc++) in sge_queue_entries() 3171 entries += port_mqprio->mqprio.qopt.count[tc]; in sge_queue_entries()
|
/Linux-v5.10/drivers/net/ethernet/aquantia/atlantic/ |
D | aq_main.c | 390 err = aq_validate_mqprio_opt(aq_nic, mqprio, mqprio->qopt.num_tc); in aq_ndo_setup_tc() 394 for (i = 0; i < mqprio->qopt.num_tc; i++) { in aq_ndo_setup_tc() 410 return aq_nic_setup_tc_mqprio(aq_nic, mqprio->qopt.num_tc, in aq_ndo_setup_tc() 411 mqprio->qopt.prio_tc_map); in aq_ndo_setup_tc()
|
/Linux-v5.10/drivers/net/ethernet/ti/ |
D | cpsw_priv.c | 931 struct tc_cbs_qopt_offload *qopt) in cpsw_set_cbs() argument 940 tc = netdev_txq_to_tc(priv->ndev, qopt->queue); in cpsw_set_cbs() 953 if (!qopt->enable && !priv->fifo_bw[fifo]) in cpsw_set_cbs() 977 bw = qopt->enable ? qopt->idleslope : 0; in cpsw_set_cbs() 1002 num_tc = mqprio->qopt.num_tc; in cpsw_set_mqprio() 1017 tc = mqprio->qopt.prio_tc_map[i]; in cpsw_set_mqprio() 1024 count = mqprio->qopt.count[i]; in cpsw_set_mqprio() 1025 offset = mqprio->qopt.offset[i]; in cpsw_set_mqprio() 1030 if (!mqprio->qopt.hw) { in cpsw_set_mqprio() 1036 priv->mqprio_hw = mqprio->qopt.hw; in cpsw_set_mqprio()
|
/Linux-v5.10/drivers/net/ethernet/intel/iavf/ |
D | iavf_main.c | 2549 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS || in iavf_validate_ch_config() 2550 mqprio_qopt->qopt.num_tc < 1) in iavf_validate_ch_config() 2553 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) { in iavf_validate_ch_config() 2554 if (!mqprio_qopt->qopt.count[i] || in iavf_validate_ch_config() 2555 mqprio_qopt->qopt.offset[i] != num_qps) in iavf_validate_ch_config() 2566 num_qps += mqprio_qopt->qopt.count[i]; in iavf_validate_ch_config() 2615 num_tc = mqprio_qopt->qopt.num_tc; in __iavf_setup_tc() 2619 if (!mqprio_qopt->qopt.hw) { in __iavf_setup_tc() 2656 mqprio_qopt->qopt.count[i]; in __iavf_setup_tc() 2658 mqprio_qopt->qopt.offset[i]; in __iavf_setup_tc() [all …]
|
/Linux-v5.10/drivers/net/ethernet/intel/igc/ |
D | igc_main.c | 4716 const struct tc_taprio_qopt_offload *qopt) in validate_schedule() argument 4722 if (qopt->cycle_time_extension) in validate_schedule() 4732 if (!is_base_time_past(qopt->base_time, &now)) in validate_schedule() 4735 for (n = 0; n < qopt->num_entries; n++) { in validate_schedule() 4739 e = &qopt->entries[n]; in validate_schedule() 4760 struct tc_etf_qopt_offload *qopt) in igc_tsn_enable_launchtime() argument 4768 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable); in igc_tsn_enable_launchtime() 4776 struct tc_taprio_qopt_offload *qopt) in igc_save_qbv_schedule() argument 4781 if (!qopt->enable) { in igc_save_qbv_schedule() 4789 if (!validate_schedule(adapter, qopt)) in igc_save_qbv_schedule() [all …]
|
/Linux-v5.10/drivers/net/ethernet/freescale/enetc/ |
D | enetc_qos.c | 298 struct tc_etf_qopt_offload *qopt = type_data; in enetc_setup_tc_txtime() local 305 tc = qopt->queue; in enetc_setup_tc_txtime() 318 priv->tx_ring[tc]->tsd_enable = qopt->enable; in enetc_setup_tc_txtime() 320 qopt->enable ? ENETC_TSDE : 0); in enetc_setup_tc_txtime()
|
/Linux-v5.10/drivers/net/ethernet/intel/igb/ |
D | igb_main.c | 2547 struct tc_cbs_qopt_offload *qopt) in igb_offload_cbs() argument 2557 if (qopt->queue < 0 || qopt->queue > 1) in igb_offload_cbs() 2560 err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable, in igb_offload_cbs() 2561 qopt->idleslope, qopt->sendslope, in igb_offload_cbs() 2562 qopt->hicredit, qopt->locredit); in igb_offload_cbs() 2566 igb_offload_apply(adapter, qopt->queue); in igb_offload_cbs() 2781 struct tc_etf_qopt_offload *qopt) in igb_offload_txtime() argument 2791 if (qopt->queue < 0 || qopt->queue > 1) in igb_offload_txtime() 2794 err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable); in igb_offload_txtime() 2798 igb_offload_apply(adapter, qopt->queue); in igb_offload_txtime()
|
/Linux-v5.10/drivers/net/ethernet/intel/i40e/ |
D | i40e_main.c | 1708 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc; in i40e_vsi_setup_queue_map_mqprio() 1710 num_qps = vsi->mqprio_qopt.qopt.count[0]; in i40e_vsi_setup_queue_map_mqprio() 1720 max_qcount = vsi->mqprio_qopt.qopt.count[0]; in i40e_vsi_setup_queue_map_mqprio() 1724 offset = vsi->mqprio_qopt.qopt.offset[i]; in i40e_vsi_setup_queue_map_mqprio() 1725 qcount = vsi->mqprio_qopt.qopt.count[i]; in i40e_vsi_setup_queue_map_mqprio() 1767 override_q = vsi->mqprio_qopt.qopt.count[0]; in i40e_vsi_setup_queue_map_mqprio() 5132 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc; in i40e_mqprio_get_enabled_tc() 5154 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc; in i40e_pf_get_num_tc() 5284 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { in i40e_vsi_configure_bw_alloc() 5473 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) { in i40e_vsi_config_tc() [all …]
|
/Linux-v5.10/include/net/ |
D | pkt_cls.h | 743 struct tc_mqprio_qopt qopt; member
|
/Linux-v5.10/drivers/net/ethernet/hisilicon/hns3/ |
D | hns3_enet.c | 1711 u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map; in hns3_setup_tc() 1713 u8 tc = mqprio_qopt->qopt.num_tc; in hns3_setup_tc() 1715 u8 hw = mqprio_qopt->qopt.hw; in hns3_setup_tc()
|