Lines Matching +full:ri +full:- +full:override

4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
16 * - Redistributions of source code must retain the above
20 * - Redistributions in binary form must reproduce the above
110 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
127 #define FW4_CFNAME "cxgb4/t4-config.txt"
128 #define FW5_CFNAME "cxgb4/t5-config.txt"
129 #define FW6_CFNAME "cxgb4/t6-config.txt"
145 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
155 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
159 * offset by 2 bytes in order to have the IP headers line up on 4-byte
161 * a machine check fault if an attempt is made to access one of the 4-byte IP
162 * header fields on a non-4-byte boundary. And it's a major performance issue
165 * edge-case performance sensitive applications (like forwarding large volumes
167 * PCI-E Bus transfers enough to measurably affect performance.
200 switch (p->link_cfg.speed) { in link_report()
224 dev->name, p->link_cfg.speed); in link_report()
228 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, in link_report()
229 fc[p->link_cfg.fc]); in link_report()
238 struct adapter *adap = pi->adapter; in dcb_tx_queue_prio_enable()
239 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; in dcb_tx_queue_prio_enable()
245 for (i = 0; i < pi->nqsets; i++, txq++) { in dcb_tx_queue_prio_enable()
252 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id)); in dcb_tx_queue_prio_enable()
259 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, in dcb_tx_queue_prio_enable()
261 -FW_CMD_MAX_TIMEOUT); in dcb_tx_queue_prio_enable()
264 dev_err(adap->pdev_dev, in dcb_tx_queue_prio_enable()
266 enable ? "set" : "unset", pi->port_id, i, -err); in dcb_tx_queue_prio_enable()
268 txq->dcb_prio = enable ? value : 0; in dcb_tx_queue_prio_enable()
276 if (!pi->dcb.enabled) in cxgb4_dcb_enabled()
279 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) || in cxgb4_dcb_enabled()
280 (pi->dcb.state == CXGB4_DCB_STATE_HOST)); in cxgb4_dcb_enabled()
286 struct net_device *dev = adapter->port[port_id]; in t4_os_link_changed()
312 struct net_device *dev = adap->port[port_id]; in t4_os_portmod_changed()
315 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) in t4_os_portmod_changed()
317 else if (pi->mod_type < ARRAY_SIZE(mod_str)) in t4_os_portmod_changed()
318 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); in t4_os_portmod_changed()
319 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) in t4_os_portmod_changed()
321 dev->name); in t4_os_portmod_changed()
322 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) in t4_os_portmod_changed()
324 dev->name); in t4_os_portmod_changed()
325 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR) in t4_os_portmod_changed()
326 netdev_info(dev, "%s: transceiver module error\n", dev->name); in t4_os_portmod_changed()
329 dev->name, pi->mod_type); in t4_os_portmod_changed()
334 pi->link_cfg.redo_l1cfg = netif_running(dev); in t4_os_portmod_changed()
351 struct adapter *adap = pi->adapter; in cxgb4_set_addr_hash()
357 list_for_each_entry(entry, &adap->mac_hlist, list) { in cxgb4_set_addr_hash()
358 ucast |= is_unicast_ether_addr(entry->addr); in cxgb4_set_addr_hash()
359 vec |= (1ULL << hash_mac_addr(entry->addr)); in cxgb4_set_addr_hash()
361 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast, in cxgb4_set_addr_hash()
368 struct adapter *adap = pi->adapter; in cxgb4_mac_sync()
383 ret = cxgb4_alloc_mac_filt(adap, pi->viid, free, 1, maclist, in cxgb4_mac_sync()
394 return -ENOMEM; in cxgb4_mac_sync()
395 ether_addr_copy(new_entry->addr, mac_addr); in cxgb4_mac_sync()
396 list_add_tail(&new_entry->list, &adap->mac_hlist); in cxgb4_mac_sync()
406 struct adapter *adap = pi->adapter; in cxgb4_mac_unsync()
414 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) { in cxgb4_mac_unsync()
415 if (ether_addr_equal(entry->addr, mac_addr)) { in cxgb4_mac_unsync()
416 list_del(&entry->list); in cxgb4_mac_unsync()
422 ret = cxgb4_free_mac_filt(adap, pi->viid, 1, maclist, false); in cxgb4_mac_unsync()
423 return ret < 0 ? -EINVAL : 0; in cxgb4_mac_unsync()
428 * If @mtu is -1 it is left unchanged.
433 struct adapter *adapter = pi->adapter; in set_rxmode()
438 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, pi->viid_mirror, in set_rxmode()
439 mtu, (dev->flags & IFF_PROMISC) ? 1 : 0, in set_rxmode()
440 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, in set_rxmode()
445 * cxgb4_change_mac - Update match filter for a MAC address.
449 * or -1
465 struct adapter *adapter = pi->adapter; in cxgb4_change_mac()
469 ret = t4_change_mac(adapter, adapter->mbox, viid, in cxgb4_change_mac()
472 if (ret == -ENOMEM) { in cxgb4_change_mac()
476 list_for_each_entry(entry, &adapter->mac_hlist, list) { in cxgb4_change_mac()
477 if (entry->iface_mac) { in cxgb4_change_mac()
478 ether_addr_copy(entry->addr, addr); in cxgb4_change_mac()
484 return -ENOMEM; in cxgb4_change_mac()
485 ether_addr_copy(new_entry->addr, addr); in cxgb4_change_mac()
486 new_entry->iface_mac = true; in cxgb4_change_mac()
487 list_add_tail(&new_entry->list, &adapter->mac_hlist); in cxgb4_change_mac()
499 * link_start - enable a port
507 unsigned int mb = pi->adapter->mbox; in link_start()
514 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, pi->viid_mirror, in link_start()
515 dev->mtu, -1, -1, -1, in link_start()
516 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); in link_start()
518 ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt, in link_start()
519 dev->dev_addr, true, &pi->smt_idx); in link_start()
521 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan, in link_start()
522 &pi->link_cfg); in link_start()
525 ret = t4_enable_pi_params(pi->adapter, mb, pi, true, in link_start()
537 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid)); in dcb_rpl()
538 struct net_device *dev = adap->port[adap->chan_map[port]]; in dcb_rpl()
559 u8 opcode = ((const struct rss_header *)rsp)->opcode; in fwevtq_handler()
566 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) { in fwevtq_handler()
568 opcode = ((const struct rss_header *)rsp)->opcode; in fwevtq_handler()
571 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" in fwevtq_handler()
579 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid)); in fwevtq_handler()
582 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; in fwevtq_handler()
583 txq->restarts++; in fwevtq_handler()
584 if (txq->q_type == CXGB4_TXQ_ETH) { in fwevtq_handler()
588 t4_sge_eth_txq_egress_update(q->adap, eq, -1); in fwevtq_handler()
593 tasklet_schedule(&oq->qresume_tsk); in fwevtq_handler()
599 const struct fw_port_cmd *pcmd = (const void *)p->data; in fwevtq_handler()
600 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid)); in fwevtq_handler()
602 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16)); in fwevtq_handler()
608 be32_to_cpu(pcmd->op_to_portid)); in fwevtq_handler()
612 dev = q->adap->port[q->adap->chan_map[port]]; in fwevtq_handler()
614 ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F) in fwevtq_handler()
615 : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32) in fwevtq_handler()
626 dcb_rpl(q->adap, pcmd); in fwevtq_handler()
629 if (p->type == 0) in fwevtq_handler()
630 t4_handle_fw_rpl(q->adap, p->data); in fwevtq_handler()
634 do_l2t_write_rpl(q->adap, p); in fwevtq_handler()
638 do_smt_write_rpl(q->adap, p); in fwevtq_handler()
642 filter_rpl(q->adap, p); in fwevtq_handler()
646 hash_filter_rpl(q->adap, p); in fwevtq_handler()
650 hash_del_filter_rpl(q->adap, p); in fwevtq_handler()
654 do_srq_table_rpl(q->adap, p); in fwevtq_handler()
656 dev_err(q->adap->pdev_dev, in fwevtq_handler()
664 if (adapter->flags & CXGB4_USING_MSIX) { in disable_msi()
665 pci_disable_msix(adapter->pdev); in disable_msi()
666 adapter->flags &= ~CXGB4_USING_MSIX; in disable_msi()
667 } else if (adapter->flags & CXGB4_USING_MSI) { in disable_msi()
668 pci_disable_msi(adapter->pdev); in disable_msi()
669 adapter->flags &= ~CXGB4_USING_MSI; in disable_msi()
674 * Interrupt handler for non-data events used with MSI-X.
682 adap->swintr = 1; in t4_nondata_intr()
685 if (adap->flags & CXGB4_MASTER_PF) in t4_nondata_intr()
696 dev_err(adap->pdev_dev, "alloc_cpumask_var failed\n"); in cxgb4_set_msix_aff()
697 return -ENOMEM; in cxgb4_set_msix_aff()
700 cpumask_set_cpu(cpumask_local_spread(idx, dev_to_node(adap->pdev_dev)), in cxgb4_set_msix_aff()
705 dev_warn(adap->pdev_dev, in cxgb4_set_msix_aff()
720 struct sge *s = &adap->sge; in request_msix_queue_irqs()
724 if (s->fwevtq_msix_idx < 0) in request_msix_queue_irqs()
725 return -ENOMEM; in request_msix_queue_irqs()
727 err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec, in request_msix_queue_irqs()
729 adap->msix_info[s->fwevtq_msix_idx].desc, in request_msix_queue_irqs()
730 &s->fw_evtq); in request_msix_queue_irqs()
735 minfo = s->ethrxq[ethqidx].msix; in request_msix_queue_irqs()
736 err = request_irq(minfo->vec, in request_msix_queue_irqs()
738 minfo->desc, in request_msix_queue_irqs()
739 &s->ethrxq[ethqidx].rspq); in request_msix_queue_irqs()
743 cxgb4_set_msix_aff(adap, minfo->vec, in request_msix_queue_irqs()
744 &minfo->aff_mask, ethqidx); in request_msix_queue_irqs()
749 while (--ethqidx >= 0) { in request_msix_queue_irqs()
750 minfo = s->ethrxq[ethqidx].msix; in request_msix_queue_irqs()
751 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); in request_msix_queue_irqs()
752 free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq); in request_msix_queue_irqs()
754 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq); in request_msix_queue_irqs()
760 struct sge *s = &adap->sge; in free_msix_queue_irqs()
764 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq); in free_msix_queue_irqs()
766 minfo = s->ethrxq[i].msix; in free_msix_queue_irqs()
767 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); in free_msix_queue_irqs()
768 free_irq(minfo->vec, &s->ethrxq[i].rspq); in free_msix_queue_irqs()
786 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val); in setup_ppod_edram()
788 dev_warn(adap->pdev_dev, in setup_ppod_edram()
791 return -1; in setup_ppod_edram()
795 return -1; in setup_ppod_edram()
797 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val); in setup_ppod_edram()
799 dev_err(adap->pdev_dev, in setup_ppod_edram()
801 return -1; in setup_ppod_edram()
815 ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0, in adap_config_hpfilter()
822 dev_err(adapter->pdev_dev, in adap_config_hpfilter()
829 struct adapter *adap = pi->adapter; in cxgb4_config_rss()
832 ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss, in cxgb4_config_rss()
842 return t4_config_vi_rss(adap, adap->mbox, viid, in cxgb4_config_rss()
852 * cxgb4_write_rss - write the RSS table for a given port
862 struct adapter *adapter = pi->adapter; in cxgb4_write_rss()
867 rxq = &adapter->sge.ethrxq[pi->first_qset]; in cxgb4_write_rss()
868 rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL); in cxgb4_write_rss()
870 return -ENOMEM; in cxgb4_write_rss()
873 for (i = 0; i < pi->rss_size; i++, queues++) in cxgb4_write_rss()
876 err = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid); in cxgb4_write_rss()
882 * setup_rss - configure RSS
895 for (j = 0; j < pi->rss_size; j++) in setup_rss()
896 pi->rss[j] = j % pi->nqsets; in setup_rss()
898 err = cxgb4_write_rss(pi, pi->rss); in setup_rss()
910 qid -= p->ingr_start; in rxq_to_chan()
911 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan; in rxq_to_chan()
916 if (q->handler) in cxgb4_quiesce_rx()
917 napi_disable(&q->napi); in cxgb4_quiesce_rx()
927 for (i = 0; i < adap->sge.ingr_sz; i++) { in quiesce_rx()
928 struct sge_rspq *q = adap->sge.ingr_map[i]; in quiesce_rx()
940 struct sge *s = &adap->sge; in disable_interrupts()
942 if (adap->flags & CXGB4_FULL_INIT_DONE) { in disable_interrupts()
944 if (adap->flags & CXGB4_USING_MSIX) { in disable_interrupts()
946 free_irq(adap->msix_info[s->nd_msix_idx].vec, in disable_interrupts()
949 free_irq(adap->pdev->irq, adap); in disable_interrupts()
957 if (q->handler) in cxgb4_enable_rx()
958 napi_enable(&q->napi); in cxgb4_enable_rx()
960 /* 0-increment GTS to start the timer and enable interrupts */ in cxgb4_enable_rx()
962 SEINTARM_V(q->intr_params) | in cxgb4_enable_rx()
963 INGRESSQID_V(q->cntxt_id)); in cxgb4_enable_rx()
973 for (i = 0; i < adap->sge.ingr_sz; i++) { in enable_rx()
974 struct sge_rspq *q = adap->sge.ingr_map[i]; in enable_rx()
987 adap->sge.nd_msix_idx = -1; in setup_non_data_intr()
988 if (!(adap->flags & CXGB4_USING_MSIX)) in setup_non_data_intr()
991 /* Request MSI-X vector for non-data interrupt */ in setup_non_data_intr()
994 return -ENOMEM; in setup_non_data_intr()
996 snprintf(adap->msix_info[msix].desc, in setup_non_data_intr()
997 sizeof(adap->msix_info[msix].desc), in setup_non_data_intr()
998 "%s", adap->port[0]->name); in setup_non_data_intr()
1000 adap->sge.nd_msix_idx = msix; in setup_non_data_intr()
1006 struct sge *s = &adap->sge; in setup_fw_sge_queues()
1009 bitmap_zero(s->starving_fl, s->egr_sz); in setup_fw_sge_queues()
1010 bitmap_zero(s->txq_maperr, s->egr_sz); in setup_fw_sge_queues()
1012 if (adap->flags & CXGB4_USING_MSIX) { in setup_fw_sge_queues()
1013 s->fwevtq_msix_idx = -1; in setup_fw_sge_queues()
1016 return -ENOMEM; in setup_fw_sge_queues()
1018 snprintf(adap->msix_info[msix].desc, in setup_fw_sge_queues()
1019 sizeof(adap->msix_info[msix].desc), in setup_fw_sge_queues()
1020 "%s-FWeventq", adap->port[0]->name); in setup_fw_sge_queues()
1022 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, in setup_fw_sge_queues()
1023 NULL, NULL, NULL, -1); in setup_fw_sge_queues()
1026 msix = -((int)s->intrq.abs_id + 1); in setup_fw_sge_queues()
1029 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], in setup_fw_sge_queues()
1030 msix, NULL, fwevtq_handler, NULL, -1); in setup_fw_sge_queues()
1034 s->fwevtq_msix_idx = msix; in setup_fw_sge_queues()
1039 * setup_sge_queues - configure SGE Tx/Rx/response queues
1043 * We support multiple queue sets per port if we have MSI-X, otherwise
1049 struct sge *s = &adap->sge; in setup_sge_queues()
1054 rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA]; in setup_sge_queues()
1056 if (!(adap->flags & CXGB4_USING_MSIX)) in setup_sge_queues()
1057 msix = -((int)s->intrq.abs_id + 1); in setup_sge_queues()
1060 struct net_device *dev = adap->port[i]; in setup_sge_queues()
1062 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset]; in setup_sge_queues()
1063 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; in setup_sge_queues()
1065 for (j = 0; j < pi->nqsets; j++, q++) { in setup_sge_queues()
1073 snprintf(adap->msix_info[msix].desc, in setup_sge_queues()
1074 sizeof(adap->msix_info[msix].desc), in setup_sge_queues()
1075 "%s-Rx%d", dev->name, j); in setup_sge_queues()
1076 q->msix = &adap->msix_info[msix]; in setup_sge_queues()
1079 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, in setup_sge_queues()
1080 msix, &q->fl, in setup_sge_queues()
1084 pi->tx_chan)); in setup_sge_queues()
1087 q->rspq.idx = j; in setup_sge_queues()
1088 memset(&q->stats, 0, sizeof(q->stats)); in setup_sge_queues()
1091 q = &s->ethrxq[pi->first_qset]; in setup_sge_queues()
1092 for (j = 0; j < pi->nqsets; j++, t++, q++) { in setup_sge_queues()
1095 q->rspq.cntxt_id, in setup_sge_queues()
1096 !!(adap->flags & CXGB4_SGE_DBQ_TIMER)); in setup_sge_queues()
1107 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id; in setup_sge_queues()
1109 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], in setup_sge_queues()
1110 s->fw_evtq.cntxt_id, cmplqid); in setup_sge_queues()
1115 if (!is_t4(adap->params.chip)) { in setup_sge_queues()
1116 err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0], in setup_sge_queues()
1117 netdev_get_tx_queue(adap->port[0], 0) in setup_sge_queues()
1118 , s->fw_evtq.cntxt_id, false); in setup_sge_queues()
1123 t4_write_reg(adap, is_t4(adap->params.chip) ? in setup_sge_queues()
1126 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) | in setup_sge_queues()
1127 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id)); in setup_sge_queues()
1130 dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err); in setup_sge_queues()
1159 if (skb->protocol == htons(ETH_P_FCOE)) in cxgb_select_queue()
1160 txq = skb->priority & 0x7; in cxgb_select_queue()
1167 if (dev->num_tc) { in cxgb_select_queue()
1171 ver = ip_hdr(skb)->version; in cxgb_select_queue()
1172 proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : in cxgb_select_queue()
1173 ip_hdr(skb)->protocol; in cxgb_select_queue()
1178 skb->encapsulation || in cxgb_select_queue()
1181 txq = txq % pi->nqsets; in cxgb_select_queue()
1191 while (unlikely(txq >= dev->real_num_tx_queues)) in cxgb_select_queue()
1192 txq -= dev->real_num_tx_queues; in cxgb_select_queue()
1197 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; in cxgb_select_queue()
1204 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { in closest_timer()
1205 delta = time - s->timer_val[i]; in closest_timer()
1207 delta = -delta; in closest_timer()
1220 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { in closest_thres()
1221 delta = thres - s->counter_val[i]; in closest_thres()
1223 delta = -delta; in closest_thres()
1233 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1235 * @us: the hold-off time in us, or 0 to disable timer
1236 * @cnt: the hold-off packet count, or 0 to disable counter
1238 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1244 struct adapter *adap = q->adap; in cxgb4_set_rspq_intr_params()
1253 new_idx = closest_thres(&adap->sge, cnt); in cxgb4_set_rspq_intr_params()
1254 if (q->desc && q->pktcnt_idx != new_idx) { in cxgb4_set_rspq_intr_params()
1259 FW_PARAMS_PARAM_YZ_V(q->cntxt_id); in cxgb4_set_rspq_intr_params()
1260 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, in cxgb4_set_rspq_intr_params()
1265 q->pktcnt_idx = new_idx; in cxgb4_set_rspq_intr_params()
1268 us = us == 0 ? 6 : closest_timer(&adap->sge, us); in cxgb4_set_rspq_intr_params()
1269 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0); in cxgb4_set_rspq_intr_params()
1275 netdev_features_t changed = dev->features ^ features; in cxgb_set_features()
1282 err = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid, in cxgb_set_features()
1283 pi->viid_mirror, -1, -1, -1, -1, in cxgb_set_features()
1286 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX; in cxgb_set_features()
1292 if (IS_ERR_OR_NULL(adap->debugfs_root)) in setup_debugfs()
1293 return -1; in setup_debugfs()
1304 if ((adap->flags & CXGB4_FULL_INIT_DONE) && in cxgb4_port_mirror_free_rxq()
1305 !(adap->flags & CXGB4_SHUTTING_DOWN)) in cxgb4_port_mirror_free_rxq()
1306 cxgb4_quiesce_rx(&mirror_rxq->rspq); in cxgb4_port_mirror_free_rxq()
1308 if (adap->flags & CXGB4_USING_MSIX) { in cxgb4_port_mirror_free_rxq()
1309 cxgb4_clear_msix_aff(mirror_rxq->msix->vec, in cxgb4_port_mirror_free_rxq()
1310 mirror_rxq->msix->aff_mask); in cxgb4_port_mirror_free_rxq()
1311 free_irq(mirror_rxq->msix->vec, &mirror_rxq->rspq); in cxgb4_port_mirror_free_rxq()
1312 cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx); in cxgb4_port_mirror_free_rxq()
1315 free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl); in cxgb4_port_mirror_free_rxq()
1323 struct sge *s = &adap->sge; in cxgb4_port_mirror_alloc_queues()
1328 if (!pi->vi_mirror_count) in cxgb4_port_mirror_alloc_queues()
1331 if (s->mirror_rxq[pi->port_id]) in cxgb4_port_mirror_alloc_queues()
1334 mirror_rxq = kcalloc(pi->nmirrorqsets, sizeof(*mirror_rxq), GFP_KERNEL); in cxgb4_port_mirror_alloc_queues()
1336 return -ENOMEM; in cxgb4_port_mirror_alloc_queues()
1338 s->mirror_rxq[pi->port_id] = mirror_rxq; in cxgb4_port_mirror_alloc_queues()
1340 if (!(adap->flags & CXGB4_USING_MSIX)) in cxgb4_port_mirror_alloc_queues()
1341 msix = -((int)adap->sge.intrq.abs_id + 1); in cxgb4_port_mirror_alloc_queues()
1343 for (i = 0, rxqid = 0; i < pi->nmirrorqsets; i++, rxqid++) { in cxgb4_port_mirror_alloc_queues()
1344 mirror_rxq = &s->mirror_rxq[pi->port_id][i]; in cxgb4_port_mirror_alloc_queues()
1354 mirror_rxq->msix = &adap->msix_info[msix]; in cxgb4_port_mirror_alloc_queues()
1355 snprintf(mirror_rxq->msix->desc, in cxgb4_port_mirror_alloc_queues()
1356 sizeof(mirror_rxq->msix->desc), in cxgb4_port_mirror_alloc_queues()
1357 "%s-mirrorrxq%d", dev->name, i); in cxgb4_port_mirror_alloc_queues()
1360 init_rspq(adap, &mirror_rxq->rspq, in cxgb4_port_mirror_alloc_queues()
1366 mirror_rxq->fl.size = CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM; in cxgb4_port_mirror_alloc_queues()
1368 ret = t4_sge_alloc_rxq(adap, &mirror_rxq->rspq, false, in cxgb4_port_mirror_alloc_queues()
1369 dev, msix, &mirror_rxq->fl, in cxgb4_port_mirror_alloc_queues()
1374 /* Setup MSI-X vectors for Mirror Rxqs */ in cxgb4_port_mirror_alloc_queues()
1375 if (adap->flags & CXGB4_USING_MSIX) { in cxgb4_port_mirror_alloc_queues()
1376 ret = request_irq(mirror_rxq->msix->vec, in cxgb4_port_mirror_alloc_queues()
1378 mirror_rxq->msix->desc, in cxgb4_port_mirror_alloc_queues()
1379 &mirror_rxq->rspq); in cxgb4_port_mirror_alloc_queues()
1383 cxgb4_set_msix_aff(adap, mirror_rxq->msix->vec, in cxgb4_port_mirror_alloc_queues()
1384 &mirror_rxq->msix->aff_mask, i); in cxgb4_port_mirror_alloc_queues()
1388 cxgb4_enable_rx(adap, &mirror_rxq->rspq); in cxgb4_port_mirror_alloc_queues()
1392 rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL); in cxgb4_port_mirror_alloc_queues()
1394 ret = -ENOMEM; in cxgb4_port_mirror_alloc_queues()
1398 mirror_rxq = &s->mirror_rxq[pi->port_id][0]; in cxgb4_port_mirror_alloc_queues()
1399 for (i = 0; i < pi->rss_size; i++) in cxgb4_port_mirror_alloc_queues()
1400 rss[i] = mirror_rxq[i % pi->nmirrorqsets].rspq.abs_id; in cxgb4_port_mirror_alloc_queues()
1402 ret = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid_mirror); in cxgb4_port_mirror_alloc_queues()
1410 free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl); in cxgb4_port_mirror_alloc_queues()
1413 cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx); in cxgb4_port_mirror_alloc_queues()
1416 while (rxqid-- > 0) in cxgb4_port_mirror_alloc_queues()
1418 &s->mirror_rxq[pi->port_id][rxqid]); in cxgb4_port_mirror_alloc_queues()
1420 kfree(s->mirror_rxq[pi->port_id]); in cxgb4_port_mirror_alloc_queues()
1421 s->mirror_rxq[pi->port_id] = NULL; in cxgb4_port_mirror_alloc_queues()
1429 struct sge *s = &adap->sge; in cxgb4_port_mirror_free_queues()
1432 if (!pi->vi_mirror_count) in cxgb4_port_mirror_free_queues()
1435 if (!s->mirror_rxq[pi->port_id]) in cxgb4_port_mirror_free_queues()
1438 for (i = 0; i < pi->nmirrorqsets; i++) in cxgb4_port_mirror_free_queues()
1440 &s->mirror_rxq[pi->port_id][i]); in cxgb4_port_mirror_free_queues()
1442 kfree(s->mirror_rxq[pi->port_id]); in cxgb4_port_mirror_free_queues()
1443 s->mirror_rxq[pi->port_id] = NULL; in cxgb4_port_mirror_free_queues()
1450 int ret, idx = -1; in cxgb4_port_mirror_start()
1452 if (!pi->vi_mirror_count) in cxgb4_port_mirror_start()
1460 ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror, in cxgb4_port_mirror_start()
1461 dev->mtu, (dev->flags & IFF_PROMISC) ? 1 : 0, in cxgb4_port_mirror_start()
1462 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, in cxgb4_port_mirror_start()
1463 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); in cxgb4_port_mirror_start()
1465 dev_err(adap->pdev_dev, in cxgb4_port_mirror_start()
1467 pi->viid_mirror, ret); in cxgb4_port_mirror_start()
1475 ret = cxgb4_update_mac_filt(pi, pi->viid_mirror, &idx, in cxgb4_port_mirror_start()
1476 dev->dev_addr, true, NULL); in cxgb4_port_mirror_start()
1478 dev_err(adap->pdev_dev, in cxgb4_port_mirror_start()
1480 pi->viid_mirror, ret); in cxgb4_port_mirror_start()
1491 ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true, in cxgb4_port_mirror_start()
1495 dev_err(adap->pdev_dev, in cxgb4_port_mirror_start()
1497 pi->viid_mirror, ret); in cxgb4_port_mirror_start()
1507 if (!pi->vi_mirror_count) in cxgb4_port_mirror_stop()
1510 t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, false, false, in cxgb4_port_mirror_stop()
1520 if (!pi->nmirrorqsets) in cxgb4_port_mirror_alloc()
1521 return -EOPNOTSUPP; in cxgb4_port_mirror_alloc()
1523 mutex_lock(&pi->vi_mirror_mutex); in cxgb4_port_mirror_alloc()
1524 if (pi->viid_mirror) { in cxgb4_port_mirror_alloc()
1525 pi->vi_mirror_count++; in cxgb4_port_mirror_alloc()
1529 ret = t4_init_port_mirror(pi, adap->mbox, pi->port_id, adap->pf, 0, in cxgb4_port_mirror_alloc()
1530 &pi->viid_mirror); in cxgb4_port_mirror_alloc()
1534 pi->vi_mirror_count = 1; in cxgb4_port_mirror_alloc()
1536 if (adap->flags & CXGB4_FULL_INIT_DONE) { in cxgb4_port_mirror_alloc()
1546 mutex_unlock(&pi->vi_mirror_mutex); in cxgb4_port_mirror_alloc()
1553 pi->vi_mirror_count = 0; in cxgb4_port_mirror_alloc()
1554 t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror); in cxgb4_port_mirror_alloc()
1555 pi->viid_mirror = 0; in cxgb4_port_mirror_alloc()
1558 mutex_unlock(&pi->vi_mirror_mutex); in cxgb4_port_mirror_alloc()
1567 mutex_lock(&pi->vi_mirror_mutex); in cxgb4_port_mirror_free()
1568 if (!pi->viid_mirror) in cxgb4_port_mirror_free()
1571 if (pi->vi_mirror_count > 1) { in cxgb4_port_mirror_free()
1572 pi->vi_mirror_count--; in cxgb4_port_mirror_free()
1579 pi->vi_mirror_count = 0; in cxgb4_port_mirror_free()
1580 t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror); in cxgb4_port_mirror_free()
1581 pi->viid_mirror = 0; in cxgb4_port_mirror_free()
1584 mutex_unlock(&pi->vi_mirror_mutex); in cxgb4_port_mirror_free()
1588 * upper-layer driver support
1592 * Allocate an active-open TID and set it to the supplied value.
1596 int atid = -1; in cxgb4_alloc_atid()
1598 spin_lock_bh(&t->atid_lock); in cxgb4_alloc_atid()
1599 if (t->afree) { in cxgb4_alloc_atid()
1600 union aopen_entry *p = t->afree; in cxgb4_alloc_atid()
1602 atid = (p - t->atid_tab) + t->atid_base; in cxgb4_alloc_atid()
1603 t->afree = p->next; in cxgb4_alloc_atid()
1604 p->data = data; in cxgb4_alloc_atid()
1605 t->atids_in_use++; in cxgb4_alloc_atid()
1607 spin_unlock_bh(&t->atid_lock); in cxgb4_alloc_atid()
1613 * Release an active-open TID.
1617 union aopen_entry *p = &t->atid_tab[atid - t->atid_base]; in cxgb4_free_atid()
1619 spin_lock_bh(&t->atid_lock); in cxgb4_free_atid()
1620 p->next = t->afree; in cxgb4_free_atid()
1621 t->afree = p; in cxgb4_free_atid()
1622 t->atids_in_use--; in cxgb4_free_atid()
1623 spin_unlock_bh(&t->atid_lock); in cxgb4_free_atid()
1634 spin_lock_bh(&t->stid_lock); in cxgb4_alloc_stid()
1636 stid = find_first_zero_bit(t->stid_bmap, t->nstids); in cxgb4_alloc_stid()
1637 if (stid < t->nstids) in cxgb4_alloc_stid()
1638 __set_bit(stid, t->stid_bmap); in cxgb4_alloc_stid()
1640 stid = -1; in cxgb4_alloc_stid()
1642 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1); in cxgb4_alloc_stid()
1644 stid = -1; in cxgb4_alloc_stid()
1647 t->stid_tab[stid].data = data; in cxgb4_alloc_stid()
1648 stid += t->stid_base; in cxgb4_alloc_stid()
1654 t->stids_in_use += 2; in cxgb4_alloc_stid()
1655 t->v6_stids_in_use += 2; in cxgb4_alloc_stid()
1657 t->stids_in_use++; in cxgb4_alloc_stid()
1660 spin_unlock_bh(&t->stid_lock); in cxgb4_alloc_stid()
1671 spin_lock_bh(&t->stid_lock); in cxgb4_alloc_sftid()
1673 stid = find_next_zero_bit(t->stid_bmap, in cxgb4_alloc_sftid()
1674 t->nstids + t->nsftids, t->nstids); in cxgb4_alloc_sftid()
1675 if (stid < (t->nstids + t->nsftids)) in cxgb4_alloc_sftid()
1676 __set_bit(stid, t->stid_bmap); in cxgb4_alloc_sftid()
1678 stid = -1; in cxgb4_alloc_sftid()
1680 stid = -1; in cxgb4_alloc_sftid()
1683 t->stid_tab[stid].data = data; in cxgb4_alloc_sftid()
1684 stid -= t->nstids; in cxgb4_alloc_sftid()
1685 stid += t->sftid_base; in cxgb4_alloc_sftid()
1686 t->sftids_in_use++; in cxgb4_alloc_sftid()
1688 spin_unlock_bh(&t->stid_lock); in cxgb4_alloc_sftid()
1698 if (t->nsftids && (stid >= t->sftid_base)) { in cxgb4_free_stid()
1699 stid -= t->sftid_base; in cxgb4_free_stid()
1700 stid += t->nstids; in cxgb4_free_stid()
1702 stid -= t->stid_base; in cxgb4_free_stid()
1705 spin_lock_bh(&t->stid_lock); in cxgb4_free_stid()
1707 __clear_bit(stid, t->stid_bmap); in cxgb4_free_stid()
1709 bitmap_release_region(t->stid_bmap, stid, 1); in cxgb4_free_stid()
1710 t->stid_tab[stid].data = NULL; in cxgb4_free_stid()
1711 if (stid < t->nstids) { in cxgb4_free_stid()
1713 t->stids_in_use -= 2; in cxgb4_free_stid()
1714 t->v6_stids_in_use -= 2; in cxgb4_free_stid()
1716 t->stids_in_use--; in cxgb4_free_stid()
1719 t->sftids_in_use--; in cxgb4_free_stid()
1722 spin_unlock_bh(&t->stid_lock); in cxgb4_free_stid()
1748 void **p = &t->tid_tab[tid - t->tid_base]; in cxgb4_queue_tid_release()
1750 spin_lock_bh(&adap->tid_release_lock); in cxgb4_queue_tid_release()
1751 *p = adap->tid_release_head; in cxgb4_queue_tid_release()
1753 adap->tid_release_head = (void **)((uintptr_t)p | chan); in cxgb4_queue_tid_release()
1754 if (!adap->tid_release_task_busy) { in cxgb4_queue_tid_release()
1755 adap->tid_release_task_busy = true; in cxgb4_queue_tid_release()
1756 queue_work(adap->workq, &adap->tid_release_task); in cxgb4_queue_tid_release()
1758 spin_unlock_bh(&adap->tid_release_lock); in cxgb4_queue_tid_release()
1771 spin_lock_bh(&adap->tid_release_lock); in process_tid_release_list()
1772 while (adap->tid_release_head) { in process_tid_release_list()
1773 void **p = adap->tid_release_head; in process_tid_release_list()
1775 p = (void *)p - chan; in process_tid_release_list()
1777 adap->tid_release_head = *p; in process_tid_release_list()
1779 spin_unlock_bh(&adap->tid_release_lock); in process_tid_release_list()
1785 mk_tid_release(skb, chan, p - adap->tids.tid_tab); in process_tid_release_list()
1787 spin_lock_bh(&adap->tid_release_lock); in process_tid_release_list()
1789 adap->tid_release_task_busy = false; in process_tid_release_list()
1790 spin_unlock_bh(&adap->tid_release_lock); in process_tid_release_list()
1803 WARN_ON(tid_out_of_range(&adap->tids, tid)); in cxgb4_remove_tid()
1805 if (t->tid_tab[tid - adap->tids.tid_base]) { in cxgb4_remove_tid()
1806 t->tid_tab[tid - adap->tids.tid_base] = NULL; in cxgb4_remove_tid()
1807 atomic_dec(&t->conns_in_use); in cxgb4_remove_tid()
1808 if (t->hash_base && (tid >= t->hash_base)) { in cxgb4_remove_tid()
1810 atomic_sub(2, &t->hash_tids_in_use); in cxgb4_remove_tid()
1812 atomic_dec(&t->hash_tids_in_use); in cxgb4_remove_tid()
1815 atomic_sub(2, &t->tids_in_use); in cxgb4_remove_tid()
1817 atomic_dec(&t->tids_in_use); in cxgb4_remove_tid()
1836 unsigned int max_ftids = t->nftids + t->nsftids; in tid_init()
1837 unsigned int natids = t->natids; in tid_init()
1844 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); in tid_init()
1845 ftid_bmap_size = BITS_TO_LONGS(t->nftids); in tid_init()
1846 hpftid_bmap_size = BITS_TO_LONGS(t->nhpftids); in tid_init()
1847 eotid_bmap_size = BITS_TO_LONGS(t->neotids); in tid_init()
1848 size = t->ntids * sizeof(*t->tid_tab) + in tid_init()
1849 natids * sizeof(*t->atid_tab) + in tid_init()
1850 t->nstids * sizeof(*t->stid_tab) + in tid_init()
1851 t->nsftids * sizeof(*t->stid_tab) + in tid_init()
1853 t->nhpftids * sizeof(*t->hpftid_tab) + in tid_init()
1855 max_ftids * sizeof(*t->ftid_tab) + in tid_init()
1857 t->neotids * sizeof(*t->eotid_tab) + in tid_init()
1860 t->tid_tab = kvzalloc(size, GFP_KERNEL); in tid_init()
1861 if (!t->tid_tab) in tid_init()
1862 return -ENOMEM; in tid_init()
1864 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; in tid_init()
1865 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; in tid_init()
1866 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids]; in tid_init()
1867 t->hpftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size]; in tid_init()
1868 t->hpftid_bmap = (unsigned long *)&t->hpftid_tab[t->nhpftids]; in tid_init()
1869 t->ftid_tab = (struct filter_entry *)&t->hpftid_bmap[hpftid_bmap_size]; in tid_init()
1870 t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids]; in tid_init()
1871 t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size]; in tid_init()
1872 t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids]; in tid_init()
1873 spin_lock_init(&t->stid_lock); in tid_init()
1874 spin_lock_init(&t->atid_lock); in tid_init()
1875 spin_lock_init(&t->ftid_lock); in tid_init()
1877 t->stids_in_use = 0; in tid_init()
1878 t->v6_stids_in_use = 0; in tid_init()
1879 t->sftids_in_use = 0; in tid_init()
1880 t->afree = NULL; in tid_init()
1881 t->atids_in_use = 0; in tid_init()
1882 atomic_set(&t->tids_in_use, 0); in tid_init()
1883 atomic_set(&t->conns_in_use, 0); in tid_init()
1884 atomic_set(&t->hash_tids_in_use, 0); in tid_init()
1885 atomic_set(&t->eotids_in_use, 0); in tid_init()
1889 while (--natids) in tid_init()
1890 t->atid_tab[natids - 1].next = &t->atid_tab[natids]; in tid_init()
1891 t->afree = t->atid_tab; in tid_init()
1895 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); in tid_init()
1897 if (!t->stid_base && in tid_init()
1898 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in tid_init()
1899 __set_bit(0, t->stid_bmap); in tid_init()
1901 if (t->neotids) in tid_init()
1902 bitmap_zero(t->eotid_bmap, t->neotids); in tid_init()
1905 if (t->nhpftids) in tid_init()
1906 bitmap_zero(t->hpftid_bmap, t->nhpftids); in tid_init()
1907 bitmap_zero(t->ftid_bmap, t->nftids); in tid_init()
1912 * cxgb4_create_server - create an IP server
1935 return -ENOMEM; in cxgb4_create_server()
1941 req->local_port = sport; in cxgb4_create_server()
1942 req->peer_port = htons(0); in cxgb4_create_server()
1943 req->local_ip = sip; in cxgb4_create_server()
1944 req->peer_ip = htonl(0); in cxgb4_create_server()
1945 chan = rxq_to_chan(&adap->sge, queue); in cxgb4_create_server()
1946 req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); in cxgb4_create_server()
1947 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) | in cxgb4_create_server()
1954 /* cxgb4_create_server6 - create an IPv6 server
1976 return -ENOMEM; in cxgb4_create_server6()
1982 req->local_port = sport; in cxgb4_create_server6()
1983 req->peer_port = htons(0); in cxgb4_create_server6()
1984 req->local_ip_hi = *(__be64 *)(sip->s6_addr); in cxgb4_create_server6()
1985 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8); in cxgb4_create_server6()
1986 req->peer_ip_hi = cpu_to_be64(0); in cxgb4_create_server6()
1987 req->peer_ip_lo = cpu_to_be64(0); in cxgb4_create_server6()
1988 chan = rxq_to_chan(&adap->sge, queue); in cxgb4_create_server6()
1989 req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); in cxgb4_create_server6()
1990 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) | in cxgb4_create_server6()
2009 return -ENOMEM; in cxgb4_remove_server()
2014 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) : in cxgb4_remove_server()
2022 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2036 while (i < NMTUS - 1 && mtus[i + 1] <= mtu) in cxgb4_best_mtu()
2045 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
2067 unsigned short data_size_align_mask = data_size_align - 1; in cxgb4_best_aligned_mtu()
2075 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) { in cxgb4_best_aligned_mtu()
2076 unsigned short data_size = mtus[mtu_idx] - header_size; in cxgb4_best_aligned_mtu()
2096 mtu_idx--; in cxgb4_best_aligned_mtu()
2103 mtu_idx - aligned_mtu_idx <= 1) in cxgb4_best_aligned_mtu()
2116 * cxgb4_port_chan - get the HW channel of a port
2123 return netdev2pinfo(dev)->tx_chan; in cxgb4_port_chan()
2128 * cxgb4_port_e2cchan - get the HW c-channel of a port
2131 * Return the HW RX c-channel of the given port.
2135 return netdev2pinfo(dev)->rx_cchan; in cxgb4_port_e2cchan()
2146 if (is_t4(adap->params.chip)) { in cxgb4_dbfifo_count()
2158 * cxgb4_port_viid - get the VI id of a port
2165 return netdev2pinfo(dev)->viid; in cxgb4_port_viid()
2170 * cxgb4_port_idx - get the index of a port
2177 return netdev2pinfo(dev)->port_id; in cxgb4_port_idx()
2186 spin_lock(&adap->stats_lock); in cxgb4_get_tcp_stats()
2188 spin_unlock(&adap->stats_lock); in cxgb4_get_tcp_stats()
2208 return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS); in cxgb4_flush_eq_cache()
2218 spin_lock(&adap->win0_lock); in read_eq_indices()
2222 spin_unlock(&adap->win0_lock); in read_eq_indices()
2246 delta = pidx - hw_pidx; in cxgb4_sync_txq_pidx()
2248 delta = size - hw_pidx + pidx; in cxgb4_sync_txq_pidx()
2250 if (is_t4(adap->params.chip)) in cxgb4_sync_txq_pidx()
2274 offset = ((stag >> 8) * 32) + adap->vres.stag.start; in cxgb4_read_tpte()
2302 memaddr = offset - edc0_end; in cxgb4_read_tpte()
2306 memaddr = offset - edc1_end; in cxgb4_read_tpte()
2309 memaddr = offset - edc1_end; in cxgb4_read_tpte()
2310 } else if (is_t5(adap->params.chip)) { in cxgb4_read_tpte()
2316 memaddr = offset - mc0_end; in cxgb4_read_tpte()
2327 spin_lock(&adap->win0_lock); in cxgb4_read_tpte()
2329 spin_unlock(&adap->win0_lock); in cxgb4_read_tpte()
2333 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n", in cxgb4_read_tpte()
2335 return -EINVAL; in cxgb4_read_tpte()
2375 const struct net_device *netdev = neigh->dev; in check_neigh_update()
2379 parent = netdev->dev.parent; in check_neigh_update()
2380 if (parent && parent->driver == &cxgb4_driver.driver) in check_neigh_update()
2410 if (is_t4(adap->params.chip)) { in drain_db_fifo()
2429 spin_lock_irqsave(&q->db_lock, flags); in disable_txq_db()
2430 q->db_disabled = 1; in disable_txq_db()
2431 spin_unlock_irqrestore(&q->db_lock, flags); in disable_txq_db()
2436 spin_lock_irq(&q->db_lock); in enable_txq_db()
2437 if (q->db_pidx_inc) { in enable_txq_db()
2443 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc)); in enable_txq_db()
2444 q->db_pidx_inc = 0; in enable_txq_db()
2446 q->db_disabled = 0; in enable_txq_db()
2447 spin_unlock_irq(&q->db_lock); in enable_txq_db()
2454 for_each_ethrxq(&adap->sge, i) in disable_dbs()
2455 disable_txq_db(&adap->sge.ethtxq[i].q); in disable_dbs()
2458 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in disable_dbs()
2461 for_each_ofldtxq(&adap->sge, i) { in disable_dbs()
2462 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; in disable_dbs()
2464 disable_txq_db(&txq->q); in disable_dbs()
2469 disable_txq_db(&adap->sge.ctrlq[i].q); in disable_dbs()
2476 for_each_ethrxq(&adap->sge, i) in enable_dbs()
2477 enable_txq_db(adap, &adap->sge.ethtxq[i].q); in enable_dbs()
2480 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in enable_dbs()
2483 for_each_ofldtxq(&adap->sge, i) { in enable_dbs()
2484 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; in enable_dbs()
2486 enable_txq_db(adap, &txq->q); in enable_dbs()
2491 enable_txq_db(adap, &adap->sge.ctrlq[i].q); in enable_dbs()
2498 if (adap->uld && adap->uld[type].handle) in notify_rdma_uld()
2499 adap->uld[type].control(adap->uld[type].handle, cmd); in notify_rdma_uld()
2511 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in process_db_full()
2525 spin_lock_irq(&q->db_lock); in sync_txq_pidx()
2526 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); in sync_txq_pidx()
2529 if (q->db_pidx != hw_pidx) { in sync_txq_pidx()
2533 if (q->db_pidx >= hw_pidx) in sync_txq_pidx()
2534 delta = q->db_pidx - hw_pidx; in sync_txq_pidx()
2536 delta = q->size - hw_pidx + q->db_pidx; in sync_txq_pidx()
2538 if (is_t4(adap->params.chip)) in sync_txq_pidx()
2544 QID_V(q->cntxt_id) | val); in sync_txq_pidx()
2547 q->db_disabled = 0; in sync_txq_pidx()
2548 q->db_pidx_inc = 0; in sync_txq_pidx()
2549 spin_unlock_irq(&q->db_lock); in sync_txq_pidx()
2558 for_each_ethrxq(&adap->sge, i) in recover_all_queues()
2559 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); in recover_all_queues()
2562 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in recover_all_queues()
2564 for_each_ofldtxq(&adap->sge, i) { in recover_all_queues()
2565 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; in recover_all_queues()
2567 sync_txq_pidx(adap, &txq->q); in recover_all_queues()
2572 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); in recover_all_queues()
2581 if (is_t4(adap->params.chip)) { in process_db_drop()
2589 } else if (is_t5(adap->params.chip)) { in process_db_drop()
2600 dev_err(adap->pdev_dev, "doorbell drop recovery: " in process_db_drop()
2604 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL); in process_db_drop()
2606 /* Re-enable BAR2 WC */ in process_db_drop()
2610 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in process_db_drop()
2616 if (is_t4(adap->params.chip)) { in t4_db_full()
2621 queue_work(adap->workq, &adap->db_full_task); in t4_db_full()
2627 if (is_t4(adap->params.chip)) { in t4_db_dropped()
2631 queue_work(adap->workq, &adap->db_drop_task); in t4_db_dropped()
2650 list_del(&adap->list_node); in detach_ulds()
2653 if (adap->uld && adap->uld[i].handle) in detach_ulds()
2654 adap->uld[i].state_change(adap->uld[i].handle, in detach_ulds()
2670 if (adap->uld && adap->uld[i].handle) in notify_ulds()
2671 adap->uld[i].state_change(adap->uld[i].handle, in notify_ulds()
2681 struct net_device *event_dev = ifa->idev->dev; in cxgb4_inet6addr_handler()
2689 if (event_dev->flags & IFF_MASTER) { in cxgb4_inet6addr_handler()
2693 cxgb4_clip_get(adap->port[0], in cxgb4_inet6addr_handler()
2697 cxgb4_clip_release(adap->port[0], in cxgb4_inet6addr_handler()
2709 parent = event_dev->dev.parent; in cxgb4_inet6addr_handler()
2711 if (parent && parent->driver == &cxgb4_driver.driver) { in cxgb4_inet6addr_handler()
2740 dev = adap->port[i]; in update_clip()
2754 * cxgb_up - enable the adapter
2765 struct sge *s = &adap->sge; in cxgb_up()
2776 if (adap->flags & CXGB4_USING_MSIX) { in cxgb_up()
2777 if (s->nd_msix_idx < 0) { in cxgb_up()
2778 err = -ENOMEM; in cxgb_up()
2782 err = request_irq(adap->msix_info[s->nd_msix_idx].vec, in cxgb_up()
2784 adap->msix_info[s->nd_msix_idx].desc, adap); in cxgb_up()
2792 err = request_irq(adap->pdev->irq, t4_intr_handler(adap), in cxgb_up()
2793 (adap->flags & CXGB4_USING_MSI) ? 0 in cxgb_up()
2795 adap->port[0]->name, adap); in cxgb_up()
2803 adap->flags |= CXGB4_FULL_INIT_DONE; in cxgb_up()
2813 free_irq(adap->msix_info[s->nd_msix_idx].vec, adap); in cxgb_up()
2815 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); in cxgb_up()
2825 cancel_work_sync(&adapter->tid_release_task); in cxgb_down()
2826 cancel_work_sync(&adapter->db_full_task); in cxgb_down()
2827 cancel_work_sync(&adapter->db_drop_task); in cxgb_down()
2828 adapter->tid_release_task_busy = false; in cxgb_down()
2829 adapter->tid_release_head = NULL; in cxgb_down()
2834 adapter->flags &= ~CXGB4_FULL_INIT_DONE; in cxgb_down()
2843 struct adapter *adapter = pi->adapter; in cxgb_open()
2848 if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_open()
2865 if (pi->nmirrorqsets) { in cxgb_open()
2866 mutex_lock(&pi->vi_mirror_mutex); in cxgb_open()
2874 mutex_unlock(&pi->vi_mirror_mutex); in cxgb_open()
2884 mutex_unlock(&pi->vi_mirror_mutex); in cxgb_open()
2891 struct adapter *adapter = pi->adapter; in cxgb_close()
2896 ret = t4_enable_pi_params(adapter, adapter->pf, pi, in cxgb_close()
2905 if (pi->nmirrorqsets) { in cxgb_close()
2906 mutex_lock(&pi->vi_mirror_mutex); in cxgb_close()
2909 mutex_unlock(&pi->vi_mirror_mutex); in cxgb_close()
2928 stid -= adap->tids.sftid_base; in cxgb4_create_server_filter()
2929 stid += adap->tids.nftids; in cxgb4_create_server_filter()
2933 f = &adap->tids.ftid_tab[stid]; in cxgb4_create_server_filter()
2941 if (f->valid) in cxgb4_create_server_filter()
2945 memset(&f->fs, 0, sizeof(struct ch_filter_specification)); in cxgb4_create_server_filter()
2946 f->fs.val.lport = be16_to_cpu(sport); in cxgb4_create_server_filter()
2947 f->fs.mask.lport = ~0; in cxgb4_create_server_filter()
2951 f->fs.val.lip[i] = val[i]; in cxgb4_create_server_filter()
2952 f->fs.mask.lip[i] = ~0; in cxgb4_create_server_filter()
2954 if (adap->params.tp.vlan_pri_map & PORT_F) { in cxgb4_create_server_filter()
2955 f->fs.val.iport = port; in cxgb4_create_server_filter()
2956 f->fs.mask.iport = mask; in cxgb4_create_server_filter()
2960 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) { in cxgb4_create_server_filter()
2961 f->fs.val.proto = IPPROTO_TCP; in cxgb4_create_server_filter()
2962 f->fs.mask.proto = ~0; in cxgb4_create_server_filter()
2965 f->fs.dirsteer = 1; in cxgb4_create_server_filter()
2966 f->fs.iq = queue; in cxgb4_create_server_filter()
2968 f->locked = 1; in cxgb4_create_server_filter()
2969 f->fs.rpttid = 1; in cxgb4_create_server_filter()
2974 f->tid = stid + adap->tids.ftid_base; in cxgb4_create_server_filter()
2994 stid -= adap->tids.sftid_base; in cxgb4_remove_server_filter()
2995 stid += adap->tids.nftids; in cxgb4_remove_server_filter()
2997 f = &adap->tids.ftid_tab[stid]; in cxgb4_remove_server_filter()
2999 f->locked = 0; in cxgb4_remove_server_filter()
3010 struct adapter *adapter = p->adapter; in cxgb_get_stats()
3016 spin_lock(&adapter->stats_lock); in cxgb_get_stats()
3018 spin_unlock(&adapter->stats_lock); in cxgb_get_stats()
3021 t4_get_port_stats_offset(adapter, p->tx_chan, &stats, in cxgb_get_stats()
3022 &p->stats_base); in cxgb_get_stats()
3023 spin_unlock(&adapter->stats_lock); in cxgb_get_stats()
3025 ns->tx_bytes = stats.tx_octets; in cxgb_get_stats()
3026 ns->tx_packets = stats.tx_frames; in cxgb_get_stats()
3027 ns->rx_bytes = stats.rx_octets; in cxgb_get_stats()
3028 ns->rx_packets = stats.rx_frames; in cxgb_get_stats()
3029 ns->multicast = stats.rx_mcast_frames; in cxgb_get_stats()
3032 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long + in cxgb_get_stats()
3034 ns->rx_over_errors = 0; in cxgb_get_stats()
3035 ns->rx_crc_errors = stats.rx_fcs_err; in cxgb_get_stats()
3036 ns->rx_frame_errors = stats.rx_symbol_err; in cxgb_get_stats()
3037 ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 + in cxgb_get_stats()
3041 ns->rx_missed_errors = 0; in cxgb_get_stats()
3044 ns->tx_aborted_errors = 0; in cxgb_get_stats()
3045 ns->tx_carrier_errors = 0; in cxgb_get_stats()
3046 ns->tx_fifo_errors = 0; in cxgb_get_stats()
3047 ns->tx_heartbeat_errors = 0; in cxgb_get_stats()
3048 ns->tx_window_errors = 0; in cxgb_get_stats()
3050 ns->tx_errors = stats.tx_error_frames; in cxgb_get_stats()
3051 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + in cxgb_get_stats()
3052 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; in cxgb_get_stats()
3060 struct adapter *adapter = pi->adapter; in cxgb_ioctl()
3061 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; in cxgb_ioctl()
3065 if (pi->mdio_addr < 0) in cxgb_ioctl()
3066 return -EOPNOTSUPP; in cxgb_ioctl()
3067 data->phy_id = pi->mdio_addr; in cxgb_ioctl()
3071 if (mdio_phy_id_is_c45(data->phy_id)) { in cxgb_ioctl()
3072 prtad = mdio_phy_id_prtad(data->phy_id); in cxgb_ioctl()
3073 devad = mdio_phy_id_devad(data->phy_id); in cxgb_ioctl()
3074 } else if (data->phy_id < 32) { in cxgb_ioctl()
3075 prtad = data->phy_id; in cxgb_ioctl()
3077 data->reg_num &= 0x1f; in cxgb_ioctl()
3079 return -EINVAL; in cxgb_ioctl()
3081 mbox = pi->adapter->pf; in cxgb_ioctl()
3083 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad, in cxgb_ioctl()
3084 data->reg_num, &data->val_out); in cxgb_ioctl()
3086 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad, in cxgb_ioctl()
3087 data->reg_num, data->val_in); in cxgb_ioctl()
3090 return copy_to_user(req->ifr_data, &pi->tstamp_config, in cxgb_ioctl()
3091 sizeof(pi->tstamp_config)) ? in cxgb_ioctl()
3092 -EFAULT : 0; in cxgb_ioctl()
3094 if (copy_from_user(&pi->tstamp_config, req->ifr_data, in cxgb_ioctl()
3095 sizeof(pi->tstamp_config))) in cxgb_ioctl()
3096 return -EFAULT; in cxgb_ioctl()
3098 if (!is_t4(adapter->params.chip)) { in cxgb_ioctl()
3099 switch (pi->tstamp_config.tx_type) { in cxgb_ioctl()
3104 return -ERANGE; in cxgb_ioctl()
3107 switch (pi->tstamp_config.rx_filter) { in cxgb_ioctl()
3109 pi->rxtstamp = false; in cxgb_ioctl()
3113 cxgb4_ptprx_timestamping(pi, pi->port_id, in cxgb_ioctl()
3117 cxgb4_ptprx_timestamping(pi, pi->port_id, in cxgb_ioctl()
3125 pi->rxtstamp = true; in cxgb_ioctl()
3128 pi->tstamp_config.rx_filter = in cxgb_ioctl()
3130 return -ERANGE; in cxgb_ioctl()
3133 if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) && in cxgb_ioctl()
3134 (pi->tstamp_config.rx_filter == in cxgb_ioctl()
3136 if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0) in cxgb_ioctl()
3137 pi->ptp_enable = false; in cxgb_ioctl()
3140 if (pi->tstamp_config.rx_filter != in cxgb_ioctl()
3144 pi->ptp_enable = true; in cxgb_ioctl()
3148 switch (pi->tstamp_config.rx_filter) { in cxgb_ioctl()
3150 pi->rxtstamp = false; in cxgb_ioctl()
3153 pi->rxtstamp = true; in cxgb_ioctl()
3156 pi->tstamp_config.rx_filter = in cxgb_ioctl()
3158 return -ERANGE; in cxgb_ioctl()
3161 return copy_to_user(req->ifr_data, &pi->tstamp_config, in cxgb_ioctl()
3162 sizeof(pi->tstamp_config)) ? in cxgb_ioctl()
3163 -EFAULT : 0; in cxgb_ioctl()
3165 return -EOPNOTSUPP; in cxgb_ioctl()
3173 set_rxmode(dev, -1, false); in cxgb_set_rxmode()
3181 ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid, in cxgb_change_mtu()
3182 pi->viid_mirror, new_mtu, -1, -1, -1, -1, true); in cxgb_change_mtu()
3184 dev->mtu = new_mtu; in cxgb_change_mtu()
3207 err = t4_get_raw_vpd_params(adap, &adap->params.vpd); in cxgb4_mgmt_fill_vf_station_mac_addr()
3211 na = adap->params.vpd.na; in cxgb4_mgmt_fill_vf_station_mac_addr()
3227 for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev); in cxgb4_mgmt_fill_vf_station_mac_addr()
3229 macaddr[5] = adap->pf * nvfs + vf; in cxgb4_mgmt_fill_vf_station_mac_addr()
3230 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr); in cxgb4_mgmt_fill_vf_station_mac_addr()
3237 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_mac()
3242 dev_err(pi->adapter->pdev_dev, in cxgb4_mgmt_set_vf_mac()
3245 return -EINVAL; in cxgb4_mgmt_set_vf_mac()
3248 dev_info(pi->adapter->pdev_dev, in cxgb4_mgmt_set_vf_mac()
3252 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac); in cxgb4_mgmt_set_vf_mac()
3260 struct adapter *adap = pi->adapter; in cxgb4_mgmt_get_vf_config()
3263 if (vf >= adap->num_vfs) in cxgb4_mgmt_get_vf_config()
3264 return -EINVAL; in cxgb4_mgmt_get_vf_config()
3265 vfinfo = &adap->vfinfo[vf]; in cxgb4_mgmt_get_vf_config()
3267 ivi->vf = vf; in cxgb4_mgmt_get_vf_config()
3268 ivi->max_tx_rate = vfinfo->tx_rate; in cxgb4_mgmt_get_vf_config()
3269 ivi->min_tx_rate = 0; in cxgb4_mgmt_get_vf_config()
3270 ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr); in cxgb4_mgmt_get_vf_config()
3271 ivi->vlan = vfinfo->vlan; in cxgb4_mgmt_get_vf_config()
3272 ivi->linkstate = vfinfo->link_state; in cxgb4_mgmt_get_vf_config()
3282 phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id; in cxgb4_mgmt_get_phys_port_id()
3283 ppid->id_len = sizeof(phy_port_id); in cxgb4_mgmt_get_phys_port_id()
3284 memcpy(ppid->id, &phy_port_id, ppid->id_len); in cxgb4_mgmt_get_phys_port_id()
3292 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_rate()
3299 if (vf >= adap->num_vfs) in cxgb4_mgmt_set_vf_rate()
3300 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3303 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3306 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3315 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, in cxgb4_mgmt_set_vf_rate()
3318 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3320 ret, adap->pf, vf); in cxgb4_mgmt_set_vf_rate()
3321 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3323 dev_info(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3325 adap->pf, vf); in cxgb4_mgmt_set_vf_rate()
3326 adap->vfinfo[vf].tx_rate = 0; in cxgb4_mgmt_set_vf_rate()
3332 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3334 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3338 dev_err(adap->pdev_dev, "Link down for VF %d\n", vf); in cxgb4_mgmt_set_vf_rate()
3339 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3343 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3344 "Max tx rate %d for VF %d can't be > link-speed %u", in cxgb4_mgmt_set_vf_rate()
3346 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3351 pktsize = pktsize - sizeof(struct ethhdr) - 4; in cxgb4_mgmt_set_vf_rate()
3353 pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr); in cxgb4_mgmt_set_vf_rate()
3354 /* configure Traffic Class for rate-limiting */ in cxgb4_mgmt_set_vf_rate()
3360 pi->tx_chan, class_id, 0, in cxgb4_mgmt_set_vf_rate()
3363 dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n", in cxgb4_mgmt_set_vf_rate()
3365 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3367 dev_info(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3375 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf, in cxgb4_mgmt_set_vf_rate()
3378 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
3380 ret, adap->pf, vf, class_id); in cxgb4_mgmt_set_vf_rate()
3381 return -EINVAL; in cxgb4_mgmt_set_vf_rate()
3383 dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n", in cxgb4_mgmt_set_vf_rate()
3384 adap->pf, vf, class_id); in cxgb4_mgmt_set_vf_rate()
3385 adap->vfinfo[vf].tx_rate = max_tx_rate; in cxgb4_mgmt_set_vf_rate()
3393 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_vlan()
3396 if (vf >= adap->num_vfs || vlan > 4095 || qos > 7) in cxgb4_mgmt_set_vf_vlan()
3397 return -EINVAL; in cxgb4_mgmt_set_vf_vlan()
3400 return -EPROTONOSUPPORT; in cxgb4_mgmt_set_vf_vlan()
3402 ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan); in cxgb4_mgmt_set_vf_vlan()
3404 adap->vfinfo[vf].vlan = vlan; in cxgb4_mgmt_set_vf_vlan()
3408 dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n", in cxgb4_mgmt_set_vf_vlan()
3409 ret, (vlan ? "setting" : "clearing"), adap->pf, vf); in cxgb4_mgmt_set_vf_vlan()
3417 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_link_state()
3421 if (vf >= adap->num_vfs) in cxgb4_mgmt_set_vf_link_state()
3422 return -EINVAL; in cxgb4_mgmt_set_vf_link_state()
3438 return -EINVAL; in cxgb4_mgmt_set_vf_link_state()
3443 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, in cxgb4_mgmt_set_vf_link_state()
3446 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_link_state()
3448 ret, adap->pf, vf); in cxgb4_mgmt_set_vf_link_state()
3449 return -EINVAL; in cxgb4_mgmt_set_vf_link_state()
3452 adap->vfinfo[vf].link_state = link; in cxgb4_mgmt_set_vf_link_state()
3463 if (!is_valid_ether_addr(addr->sa_data)) in cxgb_set_mac_addr()
3464 return -EADDRNOTAVAIL; in cxgb_set_mac_addr()
3466 ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt, in cxgb_set_mac_addr()
3467 addr->sa_data, true, &pi->smt_idx); in cxgb_set_mac_addr()
3471 eth_hw_addr_set(dev, addr->sa_data); in cxgb_set_mac_addr()
3479 struct adapter *adap = pi->adapter; in cxgb_netpoll()
3481 if (adap->flags & CXGB4_USING_MSIX) { in cxgb_netpoll()
3483 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; in cxgb_netpoll()
3485 for (i = pi->nqsets; i; i--, rx++) in cxgb_netpoll()
3486 t4_sge_intr_msix(0, &rx->rspq); in cxgb_netpoll()
3495 struct adapter *adap = pi->adapter; in cxgb_set_tx_maxrate()
3503 return -ENOTSUPP; in cxgb_set_tx_maxrate()
3505 if (index < 0 || index > pi->nqsets - 1) in cxgb_set_tx_maxrate()
3506 return -EINVAL; in cxgb_set_tx_maxrate()
3508 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_set_tx_maxrate()
3509 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3512 return -EINVAL; in cxgb_set_tx_maxrate()
3517 if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) { in cxgb_set_tx_maxrate()
3518 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3520 index, e->idx, e->info.u.params.level); in cxgb_set_tx_maxrate()
3521 return -EBUSY; in cxgb_set_tx_maxrate()
3529 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3532 return -ERANGE; in cxgb_set_tx_maxrate()
3542 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3544 index, pi->port_id, err); in cxgb_set_tx_maxrate()
3558 p.u.params.channel = pi->tx_chan; in cxgb_set_tx_maxrate()
3563 p.u.params.pktsize = dev->mtu; in cxgb_set_tx_maxrate()
3567 return -ENOMEM; in cxgb_set_tx_maxrate()
3572 qe.class = e->idx; in cxgb_set_tx_maxrate()
3576 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
3584 switch (cls_flower->command) { in cxgb_setup_tc_flower()
3592 return -EOPNOTSUPP; in cxgb_setup_tc_flower()
3599 switch (cls_u32->command) { in cxgb_setup_tc_cls_u32()
3606 return -EOPNOTSUPP; in cxgb_setup_tc_cls_u32()
3616 if (!adap->tc_matchall) in cxgb_setup_tc_matchall()
3617 return -ENOMEM; in cxgb_setup_tc_matchall()
3619 switch (cls_matchall->command) { in cxgb_setup_tc_matchall()
3632 return -EOPNOTSUPP; in cxgb_setup_tc_matchall()
3642 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_setup_tc_block_ingress_cb()
3643 dev_err(adap->pdev_dev, in cxgb_setup_tc_block_ingress_cb()
3645 pi->port_id); in cxgb_setup_tc_block_ingress_cb()
3646 return -EINVAL; in cxgb_setup_tc_block_ingress_cb()
3650 return -EOPNOTSUPP; in cxgb_setup_tc_block_ingress_cb()
3660 return -EOPNOTSUPP; in cxgb_setup_tc_block_ingress_cb()
3671 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { in cxgb_setup_tc_block_egress_cb()
3672 dev_err(adap->pdev_dev, in cxgb_setup_tc_block_egress_cb()
3674 pi->port_id); in cxgb_setup_tc_block_egress_cb()
3675 return -EINVAL; in cxgb_setup_tc_block_egress_cb()
3679 return -EOPNOTSUPP; in cxgb_setup_tc_block_egress_cb()
3688 return -EOPNOTSUPP; in cxgb_setup_tc_block_egress_cb()
3696 if (!is_ethofld(adap) || !adap->tc_mqprio) in cxgb_setup_tc_mqprio()
3697 return -ENOMEM; in cxgb_setup_tc_mqprio()
3711 pi->tc_block_shared = f->block_shared; in cxgb_setup_tc_block()
3712 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { in cxgb_setup_tc_block()
3733 return -EOPNOTSUPP; in cxgb_setup_tc()
3742 struct adapter *adapter = pi->adapter; in cxgb_udp_tunnel_unset_port()
3746 switch (ti->type) { in cxgb_udp_tunnel_unset_port()
3748 adapter->vxlan_port = 0; in cxgb_udp_tunnel_unset_port()
3752 adapter->geneve_port = 0; in cxgb_udp_tunnel_unset_port()
3756 return -EINVAL; in cxgb_udp_tunnel_unset_port()
3762 if (!adapter->rawf_cnt) in cxgb_udp_tunnel_unset_port()
3766 ret = t4_free_raw_mac_filt(adapter, pi->viid, in cxgb_udp_tunnel_unset_port()
3768 adapter->rawf_start + pi->port_id, in cxgb_udp_tunnel_unset_port()
3769 1, pi->port_id, false); in cxgb_udp_tunnel_unset_port()
3785 struct adapter *adapter = pi->adapter; in cxgb_udp_tunnel_set_port()
3789 switch (ti->type) { in cxgb_udp_tunnel_set_port()
3791 adapter->vxlan_port = ti->port; in cxgb_udp_tunnel_set_port()
3793 VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F); in cxgb_udp_tunnel_set_port()
3796 adapter->geneve_port = ti->port; in cxgb_udp_tunnel_set_port()
3798 GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F); in cxgb_udp_tunnel_set_port()
3801 return -EINVAL; in cxgb_udp_tunnel_set_port()
3813 ret = t4_alloc_raw_mac_filt(adapter, pi->viid, in cxgb_udp_tunnel_set_port()
3816 adapter->rawf_start + pi->port_id, in cxgb_udp_tunnel_set_port()
3817 1, pi->port_id, false); in cxgb_udp_tunnel_set_port()
3820 be16_to_cpu(ti->port)); in cxgb_udp_tunnel_set_port()
3842 struct adapter *adapter = pi->adapter; in cxgb_features_check()
3844 if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6) in cxgb_features_check()
3848 if (!skb->encapsulation || cxgb_encap_offload_supported(skb)) in cxgb_features_check()
3906 strscpy(info->driver, cxgb4_driver_name, sizeof(info->driver)); in cxgb4_mgmt_get_drvinfo()
3907 strscpy(info->bus_info, pci_name(adapter->pdev), in cxgb4_mgmt_get_drvinfo()
3908 sizeof(info->bus_info)); in cxgb4_mgmt_get_drvinfo()
3928 if (pci_channel_offline(adap->pdev)) in t4_fatal_err()
3936 struct net_device *dev = adap->port[port]; in t4_fatal_err()
3947 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); in t4_fatal_err()
3948 queue_work(adap->workq, &adap->fatal_err_notify_task); in t4_fatal_err()
3960 if (adap->vres.ocq.size) { in setup_memwin_rdma()
3966 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres); in setup_memwin_rdma()
3967 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; in setup_memwin_rdma()
3973 adap->vres.ocq.start); in setup_memwin_rdma()
4006 if (!adapter->hma.sgt) in adap_free_hma_mem()
4009 if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) { in adap_free_hma_mem()
4010 dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl, in adap_free_hma_mem()
4011 adapter->hma.sgt->nents, DMA_BIDIRECTIONAL); in adap_free_hma_mem()
4012 adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG; in adap_free_hma_mem()
4015 for_each_sg(adapter->hma.sgt->sgl, iter, in adap_free_hma_mem()
4016 adapter->hma.sgt->orig_nents, i) { in adap_free_hma_mem()
4022 kfree(adapter->hma.phy_addr); in adap_free_hma_mem()
4023 sg_free_table(adapter->hma.sgt); in adap_free_hma_mem()
4024 kfree(adapter->hma.sgt); in adap_free_hma_mem()
4025 adapter->hma.sgt = NULL; in adap_free_hma_mem()
4044 CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6) in adap_config_hma()
4050 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, in adap_config_hma()
4060 dev_err(adapter->pdev_dev, in adap_config_hma()
4061 "HMA size %uMB beyond bounds(%u-%lu)MB\n", in adap_config_hma()
4063 return -EINVAL; in adap_config_hma()
4068 adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL); in adap_config_hma()
4069 if (unlikely(!adapter->hma.sgt)) { in adap_config_hma()
4070 dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n"); in adap_config_hma()
4071 return -ENOMEM; in adap_config_hma()
4073 sgt = adapter->hma.sgt; in adap_config_hma()
4076 sgt->orig_nents = (hma_size << 20) / (page_size << page_order); in adap_config_hma()
4077 if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) { in adap_config_hma()
4078 dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n"); in adap_config_hma()
4079 kfree(adapter->hma.sgt); in adap_config_hma()
4080 adapter->hma.sgt = NULL; in adap_config_hma()
4081 return -ENOMEM; in adap_config_hma()
4084 sgl = adapter->hma.sgt->sgl; in adap_config_hma()
4085 node = dev_to_node(adapter->pdev_dev); in adap_config_hma()
4086 for_each_sg(sgl, iter, sgt->orig_nents, i) { in adap_config_hma()
4090 dev_err(adapter->pdev_dev, in adap_config_hma()
4092 ret = -ENOMEM; in adap_config_hma()
4098 sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents, in adap_config_hma()
4100 if (!sgt->nents) { in adap_config_hma()
4101 dev_err(adapter->pdev_dev, in adap_config_hma()
4103 ret = -ENOMEM; in adap_config_hma()
4106 adapter->hma.flags |= HMA_DMA_MAPPED_FLAG; in adap_config_hma()
4108 adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t), in adap_config_hma()
4110 if (unlikely(!adapter->hma.phy_addr)) in adap_config_hma()
4113 for_each_sg(sgl, iter, sgt->nents, i) { in adap_config_hma()
4115 adapter->hma.phy_addr[i] = sg_dma_address(iter); in adap_config_hma()
4118 ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD); in adap_config_hma()
4127 eoc = (i == ncmds - 1) ? 1 : 0; in adap_config_hma()
4132 if (i == ncmds - 1) { in adap_config_hma()
4133 naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD; in adap_config_hma()
4158 cpu_to_be64(adapter->hma.phy_addr[j + k]); in adap_config_hma()
4160 ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd, in adap_config_hma()
4163 dev_err(adapter->pdev_dev, in adap_config_hma()
4170 dev_info(adapter->pdev_dev, in adap_config_hma()
4189 dev_err(adap->pdev_dev, in adap_init1()
4196 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | in adap_init1()
4198 c->cfvalid_to_len16 = htonl(FW_LEN16(*c)); in adap_init1()
4199 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c); in adap_init1()
4203 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | in adap_init1()
4205 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL); in adap_init1()
4209 ret = t4_config_glbl_rss(adap, adap->pf, in adap_init1()
4216 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64, in adap_init1()
4226 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12)); in adap_init1()
4232 adap->params.tp.tx_modq_map = 0xE4; in adap_init1()
4234 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map)); in adap_init1()
4260 return t4_early_init(adap, adap->pf); in adap_init1()
4278 * them) but need to be explicitly set if we're using hard-coded
4282 * Configuration Files and hard-coded initialization ...
4287 * Fix up various Host-Dependent Parameters like Page Size, Cache in adap_init0_tweaks()
4297 dev_err(&adapter->pdev->dev, in adap_init0_tweaks()
4316 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
4379 /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
4392 phy_info = find_phy_info(adap->pdev->device); in adap_init0_phy()
4394 dev_warn(adap->pdev_dev, in adap_init0_phy()
4396 return -EOPNOTSUPP; in adap_init0_phy()
4402 * override the PHY firmware File in flash. in adap_init0_phy()
4404 ret = request_firmware_direct(&phyf, phy_info->phy_fw_file, in adap_init0_phy()
4405 adap->pdev_dev); in adap_init0_phy()
4413 dev_err(adap->pdev_dev, "unable to find PHY Firmware image " in adap_init0_phy()
4415 phy_info->phy_fw_file, -ret); in adap_init0_phy()
4416 if (phy_info->phy_flash) { in adap_init0_phy()
4420 dev_warn(adap->pdev_dev, "continuing with, on-adapter " in adap_init0_phy()
4430 ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version, in adap_init0_phy()
4431 (u8 *)phyf->data, phyf->size); in adap_init0_phy()
4433 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n", in adap_init0_phy()
4434 -ret); in adap_init0_phy()
4438 if (phy_info->phy_fw_version) in adap_init0_phy()
4439 new_phy_fw_ver = phy_info->phy_fw_version(phyf->data, in adap_init0_phy()
4440 phyf->size); in adap_init0_phy()
4441 dev_info(adap->pdev_dev, "Successfully transferred PHY " in adap_init0_phy()
4443 phy_info->phy_fw_file, new_phy_fw_ver); in adap_init0_phy()
4469 ret = t4_fw_reset(adapter, adapter->mbox, in adap_init0_config()
4475 /* If this is a 10Gb/s-BT adapter make sure the chip-external in adap_init0_config()
4476 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs in adap_init0_config()
4480 if (is_10gbt_device(adapter->pdev->device)) { in adap_init0_config()
4490 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) { in adap_init0_config()
4501 dev_err(adapter->pdev_dev, "Device %d is not supported\n", in adap_init0_config()
4502 adapter->pdev->device); in adap_init0_config()
4503 ret = -EINVAL; in adap_init0_config()
4507 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev); in adap_init0_config()
4519 if (cf->size >= FLASH_CFG_MAX_SIZE) in adap_init0_config()
4520 ret = -ENOMEM; in adap_init0_config()
4524 ret = t4_query_params(adapter, adapter->mbox, in adap_init0_config()
4525 adapter->pf, 0, 1, params, val); in adap_init0_config()
4537 size_t resid = cf->size & 0x3; in adap_init0_config()
4538 size_t size = cf->size & ~0x3; in adap_init0_config()
4539 __be32 *data = (__be32 *)cf->data; in adap_init0_config()
4544 spin_lock(&adapter->win0_lock); in adap_init0_config()
4562 spin_unlock(&adapter->win0_lock); in adap_init0_config()
4578 ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0, in adap_init0_config()
4585 dev_warn(adapter->pdev_dev, in adap_init0_config()
4605 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), in adap_init0_config()
4614 if (ret == -ENOENT) { in adap_init0_config()
4621 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, in adap_init0_config()
4634 dev_warn(adapter->pdev_dev, "Configuration File checksum "\ in adap_init0_config()
4646 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), in adap_init0_config()
4662 dev_err(adapter->pdev_dev, in adap_init0_config()
4665 if (is_t6(adapter->params.chip)) { in adap_init0_config()
4669 dev_info(adapter->pdev_dev, "Successfully enabled " in adap_init0_config()
4677 ret = t4_fw_initialize(adapter, adapter->mbox); in adap_init0_config()
4684 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ in adap_init0_config()
4695 if (config_issued && ret != -ENOENT) in adap_init0_config()
4696 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n", in adap_init0_config()
4697 config_name, -ret); in adap_init0_config()
4711 .intfver_ri = FW_INTFVER(T4, RI),
4724 .intfver_ri = FW_INTFVER(T5, RI),
4738 .intfver_ri = FW_INTFVER(T6, RI),
4779 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, in adap_init0()
4782 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", in adap_init0()
4786 if (ret == adap->mbox) in adap_init0()
4787 adap->flags |= CXGB4_MASTER_PF; in adap_init0()
4802 if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) { in adap_init0()
4812 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip)); in adap_init0()
4814 dev_err(adap->pdev_dev, in adap_init0()
4816 CHELSIO_CHIP_VERSION(adap->params.chip)); in adap_init0()
4817 return -EINVAL; in adap_init0()
4825 ret = -ENOMEM; in adap_init0()
4830 ret = request_firmware(&fw, fw_info->fw_mod_name, in adap_init0()
4831 adap->pdev_dev); in adap_init0()
4833 dev_err(adap->pdev_dev, in adap_init0()
4835 fw_info->fw_mod_name, ret); in adap_init0()
4837 fw_data = fw->data; in adap_init0()
4838 fw_size = fw->size; in adap_init0()
4859 dev_err(adap->pdev_dev, in adap_init0()
4862 dev_info(adap->pdev_dev, "Coming up as %s: "\ in adap_init0()
4864 adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE"); in adap_init0()
4866 dev_info(adap->pdev_dev, "Coming up as MASTER: "\ in adap_init0()
4874 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, in adap_init0()
4881 dev_err(adap->pdev_dev, "firmware doesn't support " in adap_init0()
4888 * override the Configuration File in flash. in adap_init0()
4891 if (ret == -ENOENT) { in adap_init0()
4892 dev_err(adap->pdev_dev, "no Configuration File " in adap_init0()
4897 dev_err(adap->pdev_dev, "could not initialize " in adap_init0()
4898 "adapter, error %d\n", -ret); in adap_init0()
4909 dev_err(adap->pdev_dev, in adap_init0()
4925 ret = t4_get_vpd_params(adap, &adap->params.vpd); in adap_init0()
4937 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); in adap_init0()
4941 adap->params.nports = hweight32(port_vec); in adap_init0()
4942 adap->params.portvec = port_vec; in adap_init0()
4957 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
4961 adap->sge.dbqtimer_tick = val[0]; in adap_init0()
4963 ARRAY_SIZE(adap->sge.dbqtimer_val), in adap_init0()
4964 adap->sge.dbqtimer_val); in adap_init0()
4968 adap->flags |= CXGB4_SGE_DBQ_TIMER; in adap_init0()
4970 if (is_bypass_device(adap->pdev->device)) in adap_init0()
4971 adap->params.bypass = 1; in adap_init0()
4982 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val); in adap_init0()
4985 adap->sge.egr_start = val[0]; in adap_init0()
4986 adap->l2t_start = val[1]; in adap_init0()
4987 adap->l2t_end = val[2]; in adap_init0()
4988 adap->tids.ftid_base = val[3]; in adap_init0()
4989 adap->tids.nftids = val[4] - val[3] + 1; in adap_init0()
4990 adap->sge.ingr_start = val[5]; in adap_init0()
4992 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { in adap_init0()
4995 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5000 adap->tids.hpftid_base = val[0]; in adap_init0()
5001 adap->tids.nhpftids = val[1] - val[0] + 1; in adap_init0()
5008 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5011 adap->rawf_start = val[0]; in adap_init0()
5012 adap->rawf_cnt = val[1] - val[0] + 1; in adap_init0()
5015 adap->tids.tid_base = in adap_init0()
5027 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
5030 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1; in adap_init0()
5031 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1; in adap_init0()
5033 adap->sge.egr_map = kcalloc(adap->sge.egr_sz, in adap_init0()
5034 sizeof(*adap->sge.egr_map), GFP_KERNEL); in adap_init0()
5035 if (!adap->sge.egr_map) { in adap_init0()
5036 ret = -ENOMEM; in adap_init0()
5040 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz, in adap_init0()
5041 sizeof(*adap->sge.ingr_map), GFP_KERNEL); in adap_init0()
5042 if (!adap->sge.ingr_map) { in adap_init0()
5043 ret = -ENOMEM; in adap_init0()
5050 adap->sge.starving_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL); in adap_init0()
5051 if (!adap->sge.starving_fl) { in adap_init0()
5052 ret = -ENOMEM; in adap_init0()
5056 adap->sge.txq_maperr = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL); in adap_init0()
5057 if (!adap->sge.txq_maperr) { in adap_init0()
5058 ret = -ENOMEM; in adap_init0()
5063 adap->sge.blocked_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL); in adap_init0()
5064 if (!adap->sge.blocked_fl) { in adap_init0()
5065 ret = -ENOMEM; in adap_init0()
5072 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
5075 adap->clipt_start = val[0]; in adap_init0()
5076 adap->clipt_end = val[1]; in adap_init0()
5080 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val); in adap_init0()
5086 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16; in adap_init0()
5088 adap->params.nsched_cls = val[0]; in adap_init0()
5094 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
5099 adap->flags |= CXGB4_FW_OFLD_CONN; in adap_init0()
5100 adap->tids.aftid_base = val[0]; in adap_init0()
5101 adap->tids.aftid_end = val[1]; in adap_init0()
5111 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val); in adap_init0()
5119 if (is_t4(adap->params.chip)) { in adap_init0()
5120 adap->params.ulptx_memwrite_dsgl = false; in adap_init0()
5123 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5125 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); in adap_init0()
5130 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5132 adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0); in adap_init0()
5135 if (is_t4(adap->params.chip)) { in adap_init0()
5136 adap->params.filter2_wr_support = false; in adap_init0()
5139 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5141 adap->params.filter2_wr_support = (ret == 0 && val[0] != 0); in adap_init0()
5149 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5151 adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0); in adap_init0()
5161 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), in adap_init0()
5171 adap->params.offload = 1; in adap_init0()
5176 /* query offload-related parameters */ in adap_init0()
5183 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, in adap_init0()
5187 adap->tids.ntids = val[0]; in adap_init0()
5188 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); in adap_init0()
5189 adap->tids.stid_base = val[1]; in adap_init0()
5190 adap->tids.nstids = val[2] - val[1] + 1; in adap_init0()
5200 if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) { in adap_init0()
5201 adap->tids.sftid_base = adap->tids.ftid_base + in adap_init0()
5202 DIV_ROUND_UP(adap->tids.nftids, 3); in adap_init0()
5203 adap->tids.nsftids = adap->tids.nftids - in adap_init0()
5204 DIV_ROUND_UP(adap->tids.nftids, 3); in adap_init0()
5205 adap->tids.nftids = adap->tids.sftid_base - in adap_init0()
5206 adap->tids.ftid_base; in adap_init0()
5208 adap->vres.ddp.start = val[3]; in adap_init0()
5209 adap->vres.ddp.size = val[4] - val[3] + 1; in adap_init0()
5210 adap->params.ofldq_wr_cred = val[5]; in adap_init0()
5215 adap->num_ofld_uld += 1; in adap_init0()
5221 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5224 adap->tids.eotid_base = val[0]; in adap_init0()
5225 adap->tids.neotids = min_t(u32, MAX_ATIDS, in adap_init0()
5226 val[1] - val[0] + 1); in adap_init0()
5227 adap->params.ethofld = 1; in adap_init0()
5238 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, in adap_init0()
5242 adap->vres.stag.start = val[0]; in adap_init0()
5243 adap->vres.stag.size = val[1] - val[0] + 1; in adap_init0()
5244 adap->vres.rq.start = val[2]; in adap_init0()
5245 adap->vres.rq.size = val[3] - val[2] + 1; in adap_init0()
5246 adap->vres.pbl.start = val[4]; in adap_init0()
5247 adap->vres.pbl.size = val[5] - val[4] + 1; in adap_init0()
5251 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5254 adap->vres.srq.start = val[0]; in adap_init0()
5255 adap->vres.srq.size = val[1] - val[0] + 1; in adap_init0()
5257 if (adap->vres.srq.size) { in adap_init0()
5258 adap->srq = t4_init_srq(adap->vres.srq.size); in adap_init0()
5259 if (!adap->srq) in adap_init0()
5260 dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n"); in adap_init0()
5269 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, in adap_init0()
5273 adap->vres.qp.start = val[0]; in adap_init0()
5274 adap->vres.qp.size = val[1] - val[0] + 1; in adap_init0()
5275 adap->vres.cq.start = val[2]; in adap_init0()
5276 adap->vres.cq.size = val[3] - val[2] + 1; in adap_init0()
5277 adap->vres.ocq.start = val[4]; in adap_init0()
5278 adap->vres.ocq.size = val[5] - val[4] + 1; in adap_init0()
5282 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, in adap_init0()
5285 adap->params.max_ordird_qp = 8; in adap_init0()
5286 adap->params.max_ird_adapter = 32 * adap->tids.ntids; in adap_init0()
5289 adap->params.max_ordird_qp = val[0]; in adap_init0()
5290 adap->params.max_ird_adapter = val[1]; in adap_init0()
5292 dev_info(adap->pdev_dev, in adap_init0()
5294 adap->params.max_ordird_qp, in adap_init0()
5295 adap->params.max_ird_adapter); in adap_init0()
5299 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, in adap_init0()
5301 adap->params.write_w_imm_support = (ret == 0 && val[0] != 0); in adap_init0()
5305 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, in adap_init0()
5307 adap->params.write_cmpl_support = (ret == 0 && val[0] != 0); in adap_init0()
5308 adap->num_ofld_uld += 2; in adap_init0()
5313 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5317 adap->vres.iscsi.start = val[0]; in adap_init0()
5318 adap->vres.iscsi.size = val[1] - val[0] + 1; in adap_init0()
5319 if (is_t6(adap->params.chip)) { in adap_init0()
5322 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
5325 adap->vres.ppod_edram.start = val[0]; in adap_init0()
5326 adap->vres.ppod_edram.size = in adap_init0()
5327 val[1] - val[0] + 1; in adap_init0()
5329 dev_info(adap->pdev_dev, in adap_init0()
5332 adap->vres.ppod_edram.size); in adap_init0()
5336 adap->num_ofld_uld += 2; in adap_init0()
5342 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5345 if (ret != -EINVAL) in adap_init0()
5348 adap->vres.ncrypto_fc = val[0]; in adap_init0()
5350 adap->num_ofld_uld += 1; in adap_init0()
5356 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
5360 adap->vres.key.start = val[0]; in adap_init0()
5361 adap->vres.key.size = val[1] - val[0] + 1; in adap_init0()
5362 adap->num_uld += 1; in adap_init0()
5364 adap->params.crypto = ntohs(caps_cmd.cryptocaps); in adap_init0()
5372 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); in adap_init0()
5378 * a multiple of 8 +/- 4 bytes apart near this popular MTU. in adap_init0()
5383 * options are in use, then we have a 20-byte IP header and a in adap_init0()
5384 * 20-byte TCP header. In this case, a 1500-byte MSS would in adap_init0()
5385 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes in adap_init0()
5388 * is a multiple of 8. On the other hand, if 12-byte TCP Time in adap_init0()
5394 if (adap->params.mtus[i] == 1492) { in adap_init0()
5395 adap->params.mtus[i] = 1488; in adap_init0()
5399 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, in adap_init0()
5400 adap->params.b_wnd); in adap_init0()
5403 adap->flags |= CXGB4_FW_OK; in adap_init0()
5414 kfree(adap->sge.egr_map); in adap_init0()
5415 kfree(adap->sge.ingr_map); in adap_init0()
5416 bitmap_free(adap->sge.starving_fl); in adap_init0()
5417 bitmap_free(adap->sge.txq_maperr); in adap_init0()
5419 bitmap_free(adap->sge.blocked_fl); in adap_init0()
5421 if (ret != -ETIMEDOUT && ret != -EIO) in adap_init0()
5422 t4_fw_bye(adap, adap->mbox); in adap_init0()
5438 adap->flags &= ~CXGB4_FW_OK; in eeh_err_detected()
5440 spin_lock(&adap->stats_lock); in eeh_err_detected()
5442 struct net_device *dev = adap->port[i]; in eeh_err_detected()
5448 spin_unlock(&adap->stats_lock); in eeh_err_detected()
5450 if (adap->flags & CXGB4_FULL_INIT_DONE) in eeh_err_detected()
5453 if ((adap->flags & CXGB4_DEV_ENABLED)) { in eeh_err_detected()
5455 adap->flags &= ~CXGB4_DEV_ENABLED; in eeh_err_detected()
5473 if (!(adap->flags & CXGB4_DEV_ENABLED)) { in eeh_slot_reset()
5475 dev_err(&pdev->dev, "Cannot reenable PCI " in eeh_slot_reset()
5479 adap->flags |= CXGB4_DEV_ENABLED; in eeh_slot_reset()
5486 if (t4_wait_dev_ready(adap->regs) < 0) in eeh_slot_reset()
5488 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0) in eeh_slot_reset()
5490 adap->flags |= CXGB4_FW_OK; in eeh_slot_reset()
5498 ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1, in eeh_slot_reset()
5502 pi->viid = ret; in eeh_slot_reset()
5503 pi->xact_addr_filt = -1; in eeh_slot_reset()
5507 if (adap->params.viid_smt_extn_support) { in eeh_slot_reset()
5508 pi->vivld = vivld; in eeh_slot_reset()
5509 pi->vin = vin; in eeh_slot_reset()
5512 pi->vivld = FW_VIID_VIVLD_G(pi->viid); in eeh_slot_reset()
5513 pi->vin = FW_VIID_VIN_G(pi->viid); in eeh_slot_reset()
5517 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, in eeh_slot_reset()
5518 adap->params.b_wnd); in eeh_slot_reset()
5535 struct net_device *dev = adap->port[i]; in eeh_resume()
5552 if (adapter->pf != 4) in eeh_reset_prepare()
5555 adapter->flags &= ~CXGB4_FW_OK; in eeh_reset_prepare()
5560 if (adapter->port[i]->reg_state == NETREG_REGISTERED) in eeh_reset_prepare()
5561 cxgb_close(adapter->port[i]); in eeh_reset_prepare()
5568 if (adapter->flags & CXGB4_FULL_INIT_DONE) in eeh_reset_prepare()
5577 if (adapter->pf != 4) in eeh_reset_done()
5580 err = t4_wait_dev_ready(adapter->regs); in eeh_reset_done()
5582 dev_err(adapter->pdev_dev, in eeh_reset_done()
5591 dev_err(adapter->pdev_dev, in eeh_reset_done()
5598 if (adapter->flags & CXGB4_FW_OK) { in eeh_reset_done()
5599 err = t4_port_init(adapter, adapter->pf, adapter->pf, 0); in eeh_reset_done()
5601 dev_err(adapter->pdev_dev, in eeh_reset_done()
5609 dev_err(adapter->pdev_dev, in eeh_reset_done()
5618 dev_err(adapter->pdev_dev, in eeh_reset_done()
5624 if (adapter->port[i]->reg_state == NETREG_REGISTERED) in eeh_reset_done()
5625 cxgb_open(adapter->port[i]); in eeh_reset_done()
5643 speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps)); in is_x_10g_port()
5659 struct sge *s = &adap->sge; in cfg_queues()
5665 adap->params.offload = 0; in cfg_queues()
5666 adap->params.crypto = 0; in cfg_queues()
5667 adap->params.ethofld = 0; in cfg_queues()
5682 niqflint = adap->params.pfres.niqflint - 1; in cfg_queues()
5683 if (!(adap->flags & CXGB4_USING_MSIX)) in cfg_queues()
5684 niqflint--; in cfg_queues()
5685 neq = adap->params.pfres.neq / 2; in cfg_queues()
5688 if (avail_qsets < adap->params.nports) { in cfg_queues()
5689 dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n", in cfg_queues()
5690 avail_qsets, adap->params.nports); in cfg_queues()
5691 return -ENOMEM; in cfg_queues()
5696 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); in cfg_queues()
5700 /* We default to 1 queue per non-10G port and up to # of cores queues in cfg_queues()
5704 q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g; in cfg_queues()
5709 * own TX Queue in order to prevent Head-Of-Line Blocking. in cfg_queues()
5712 if (adap->params.nports * 8 > avail_eth_qsets) { in cfg_queues()
5713 dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n", in cfg_queues()
5714 avail_eth_qsets, adap->params.nports * 8); in cfg_queues()
5715 return -ENOMEM; in cfg_queues()
5718 if (adap->params.nports * ncpus < avail_eth_qsets) in cfg_queues()
5724 (avail_eth_qsets - (adap->params.nports - n10g) * q1g)) in cfg_queues()
5725 q10g--; in cfg_queues()
5739 pi->first_qset = qidx; in cfg_queues()
5740 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g; in cfg_queues()
5741 qidx += pi->nqsets; in cfg_queues()
5744 s->ethqsets = qidx; in cfg_queues()
5745 s->max_ethqsets = qidx; /* MSI-X may lower it later */ in cfg_queues()
5746 avail_qsets -= qidx; in cfg_queues()
5753 num_ulds = adap->num_uld + adap->num_ofld_uld; in cfg_queues()
5755 avail_uld_qsets = roundup(i, adap->params.nports); in cfg_queues()
5756 if (avail_qsets < num_ulds * adap->params.nports) { in cfg_queues()
5757 adap->params.offload = 0; in cfg_queues()
5758 adap->params.crypto = 0; in cfg_queues()
5759 s->ofldqsets = 0; in cfg_queues()
5761 s->ofldqsets = adap->params.nports; in cfg_queues()
5763 s->ofldqsets = avail_uld_qsets; in cfg_queues()
5766 avail_qsets -= num_ulds * s->ofldqsets; in cfg_queues()
5773 if (avail_qsets < s->max_ethqsets) { in cfg_queues()
5774 adap->params.ethofld = 0; in cfg_queues()
5775 s->eoqsets = 0; in cfg_queues()
5777 s->eoqsets = s->max_ethqsets; in cfg_queues()
5779 avail_qsets -= s->eoqsets; in cfg_queues()
5787 if (avail_qsets >= s->max_ethqsets) in cfg_queues()
5788 s->mirrorqsets = s->max_ethqsets; in cfg_queues()
5789 else if (avail_qsets >= adap->params.nports) in cfg_queues()
5790 s->mirrorqsets = adap->params.nports; in cfg_queues()
5792 s->mirrorqsets = 0; in cfg_queues()
5793 avail_qsets -= s->mirrorqsets; in cfg_queues()
5795 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { in cfg_queues()
5796 struct sge_eth_rxq *r = &s->ethrxq[i]; in cfg_queues()
5798 init_rspq(adap, &r->rspq, 5, 10, 1024, 64); in cfg_queues()
5799 r->fl.size = 72; in cfg_queues()
5802 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) in cfg_queues()
5803 s->ethtxq[i].q.size = 1024; in cfg_queues()
5805 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) in cfg_queues()
5806 s->ctrlq[i].q.size = 512; in cfg_queues()
5808 if (!is_t4(adap->params.chip)) in cfg_queues()
5809 s->ptptxq.q.size = 8; in cfg_queues()
5811 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64); in cfg_queues()
5812 init_rspq(adap, &s->intrq, 0, 1, 512, 64); in cfg_queues()
5826 while (n < adap->sge.ethqsets) in reduce_ethqs()
5829 if (pi->nqsets > 1) { in reduce_ethqs()
5830 pi->nqsets--; in reduce_ethqs()
5831 adap->sge.ethqsets--; in reduce_ethqs()
5832 if (adap->sge.ethqsets <= n) in reduce_ethqs()
5840 pi->first_qset = n; in reduce_ethqs()
5841 n += pi->nqsets; in reduce_ethqs()
5851 return -ENOMEM; in alloc_msix_info()
5853 adap->msix_bmap.msix_bmap = bitmap_zalloc(num_vec, GFP_KERNEL); in alloc_msix_info()
5854 if (!adap->msix_bmap.msix_bmap) { in alloc_msix_info()
5856 return -ENOMEM; in alloc_msix_info()
5859 spin_lock_init(&adap->msix_bmap.lock); in alloc_msix_info()
5860 adap->msix_bmap.mapsize = num_vec; in alloc_msix_info()
5862 adap->msix_info = msix_info; in alloc_msix_info()
5868 bitmap_free(adap->msix_bmap.msix_bmap); in free_msix_info()
5869 kfree(adap->msix_info); in free_msix_info()
5874 struct msix_bmap *bmap = &adap->msix_bmap; in cxgb4_get_msix_idx_from_bmap()
5878 spin_lock_irqsave(&bmap->lock, flags); in cxgb4_get_msix_idx_from_bmap()
5879 msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize); in cxgb4_get_msix_idx_from_bmap()
5880 if (msix_idx < bmap->mapsize) { in cxgb4_get_msix_idx_from_bmap()
5881 __set_bit(msix_idx, bmap->msix_bmap); in cxgb4_get_msix_idx_from_bmap()
5883 spin_unlock_irqrestore(&bmap->lock, flags); in cxgb4_get_msix_idx_from_bmap()
5884 return -ENOSPC; in cxgb4_get_msix_idx_from_bmap()
5887 spin_unlock_irqrestore(&bmap->lock, flags); in cxgb4_get_msix_idx_from_bmap()
5894 struct msix_bmap *bmap = &adap->msix_bmap; in cxgb4_free_msix_idx_in_bmap()
5897 spin_lock_irqsave(&bmap->lock, flags); in cxgb4_free_msix_idx_in_bmap()
5898 __clear_bit(msix_idx, bmap->msix_bmap); in cxgb4_free_msix_idx_in_bmap()
5899 spin_unlock_irqrestore(&bmap->lock, flags); in cxgb4_free_msix_idx_in_bmap()
5902 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5909 u8 num_uld = 0, nchan = adap->params.nports; in enable_msix()
5911 struct sge *s = &adap->sge; in enable_msix()
5916 want = s->max_ethqsets; in enable_msix()
5927 num_uld = adap->num_ofld_uld + adap->num_uld; in enable_msix()
5928 want += num_uld * s->ofldqsets; in enable_msix()
5934 want += s->eoqsets; in enable_msix()
5939 if (s->mirrorqsets) { in enable_msix()
5940 want += s->mirrorqsets; in enable_msix()
5950 return -ENOMEM; in enable_msix()
5955 allocated = pci_enable_msix_range(adap->pdev, entries, need, want); in enable_msix()
5960 want = s->max_ethqsets + EXTRA_VECS; in enable_msix()
5962 allocated = pci_enable_msix_range(adap->pdev, entries, in enable_msix()
5965 dev_info(adap->pdev_dev, in enable_msix()
5966 "Disabling MSI-X due to insufficient MSI-X vectors\n"); in enable_msix()
5971 dev_info(adap->pdev_dev, in enable_msix()
5972 "Disabling offload due to insufficient MSI-X vectors\n"); in enable_msix()
5973 adap->params.offload = 0; in enable_msix()
5974 adap->params.crypto = 0; in enable_msix()
5975 adap->params.ethofld = 0; in enable_msix()
5976 s->ofldqsets = 0; in enable_msix()
5977 s->eoqsets = 0; in enable_msix()
5978 s->mirrorqsets = 0; in enable_msix()
5995 if (s->mirrorqsets) in enable_msix()
5998 num_vec -= need; in enable_msix()
6001 ethqsets > s->max_ethqsets) in enable_msix()
6006 if (pi->nqsets < 2) in enable_msix()
6010 num_vec--; in enable_msix()
6013 num_vec--; in enable_msix()
6021 ofldqsets > s->ofldqsets) in enable_msix()
6025 num_vec -= uld_need; in enable_msix()
6029 if (s->mirrorqsets) { in enable_msix()
6032 mirrorqsets > s->mirrorqsets) in enable_msix()
6036 num_vec -= mirror_need; in enable_msix()
6040 ethqsets = s->max_ethqsets; in enable_msix()
6042 ofldqsets = s->ofldqsets; in enable_msix()
6044 eoqsets = s->eoqsets; in enable_msix()
6045 if (s->mirrorqsets) in enable_msix()
6046 mirrorqsets = s->mirrorqsets; in enable_msix()
6049 if (ethqsets < s->max_ethqsets) { in enable_msix()
6050 s->max_ethqsets = ethqsets; in enable_msix()
6055 s->ofldqsets = ofldqsets; in enable_msix()
6056 s->nqs_per_uld = s->ofldqsets; in enable_msix()
6060 s->eoqsets = eoqsets; in enable_msix()
6062 if (s->mirrorqsets) { in enable_msix()
6063 s->mirrorqsets = mirrorqsets; in enable_msix()
6066 pi->nmirrorqsets = s->mirrorqsets / nchan; in enable_msix()
6067 mutex_init(&pi->vi_mirror_mutex); in enable_msix()
6077 adap->msix_info[i].vec = entries[i].vector; in enable_msix()
6078 adap->msix_info[i].idx = i; in enable_msix()
6081 dev_info(adap->pdev_dev, in enable_msix()
6082 "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d mirrorqsets %d\n", in enable_msix()
6083 allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld, in enable_msix()
6084 s->mirrorqsets); in enable_msix()
6090 pci_disable_msix(adap->pdev); in enable_msix()
6104 err = t4_init_rss_mode(adap, adap->mbox); in init_rss()
6111 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL); in init_rss()
6112 if (!pi->rss) in init_rss()
6113 return -ENOMEM; in init_rss()
6125 dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n", in print_adapter_info()
6127 ((adapter->flags & CXGB4_USING_MSIX) ? "MSI-X" : in print_adapter_info()
6128 (adapter->flags & CXGB4_USING_MSI) ? "MSI" : ""), in print_adapter_info()
6129 is_offload(adapter) ? "Offload" : "non-Offload"); in print_adapter_info()
6137 const struct adapter *adap = pi->adapter; in print_port_info()
6139 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M) in print_port_info()
6141 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G) in print_port_info()
6143 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G) in print_port_info()
6145 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G) in print_port_info()
6147 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G) in print_port_info()
6149 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G) in print_port_info()
6151 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G) in print_port_info()
6153 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G) in print_port_info()
6155 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G) in print_port_info()
6158 --bufp; in print_port_info()
6159 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); in print_port_info()
6161 netdev_info(dev, "Chelsio %s %s\n", adap->params.vpd.id, buf); in print_port_info()
6166 * - memory used for tables
6167 * - MSI/MSI-X
6168 * - net devices
6169 * - resources FW is holding for us
6175 kvfree(adapter->smt); in free_some_resources()
6176 kvfree(adapter->l2t); in free_some_resources()
6177 kvfree(adapter->srq); in free_some_resources()
6179 kvfree(adapter->tids.tid_tab); in free_some_resources()
6185 kfree(adapter->sge.egr_map); in free_some_resources()
6186 kfree(adapter->sge.ingr_map); in free_some_resources()
6187 bitmap_free(adapter->sge.starving_fl); in free_some_resources()
6188 bitmap_free(adapter->sge.txq_maperr); in free_some_resources()
6190 bitmap_free(adapter->sge.blocked_fl); in free_some_resources()
6195 if (adapter->port[i]) { in free_some_resources()
6198 if (pi->viid != 0) in free_some_resources()
6199 t4_free_vi(adapter, adapter->mbox, adapter->pf, in free_some_resources()
6200 0, pi->viid); in free_some_resources()
6201 kfree(adap2pinfo(adapter, i)->rss); in free_some_resources()
6202 free_netdev(adapter->port[i]); in free_some_resources()
6204 if (adapter->flags & CXGB4_FW_OK) in free_some_resources()
6205 t4_fw_bye(adapter, adapter->pf); in free_some_resources()
6228 return -EINVAL; in t4_get_chip_type()
6234 dev->type = ARPHRD_NONE; in cxgb4_mgmt_setup()
6235 dev->mtu = 0; in cxgb4_mgmt_setup()
6236 dev->hard_header_len = 0; in cxgb4_mgmt_setup()
6237 dev->addr_len = 0; in cxgb4_mgmt_setup()
6238 dev->tx_queue_len = 0; in cxgb4_mgmt_setup()
6239 dev->flags |= IFF_NOARP; in cxgb4_mgmt_setup()
6240 dev->priv_flags |= IFF_NO_QUEUE; in cxgb4_mgmt_setup()
6243 dev->netdev_ops = &cxgb4_mgmt_netdev_ops; in cxgb4_mgmt_setup()
6244 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; in cxgb4_mgmt_setup()
6254 pcie_fw = readl(adap->regs + PCIE_FW_A); in cxgb4_iov_configure()
6257 dev_warn(&pdev->dev, "Device not initialized\n"); in cxgb4_iov_configure()
6258 return -EOPNOTSUPP; in cxgb4_iov_configure()
6265 dev_err(&pdev->dev, in cxgb4_iov_configure()
6266 "Cannot modify SR-IOV while VFs are assigned\n"); in cxgb4_iov_configure()
6269 /* Note that the upper-level code ensures that we're never called with in cxgb4_iov_configure()
6270 * a non-zero "num_vfs" when we already have VFs instantiated. But in cxgb4_iov_configure()
6274 return -EBUSY; in cxgb4_iov_configure()
6284 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
6285 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6286 adap->port[0] = NULL; in cxgb4_iov_configure()
6289 adap->num_vfs = 0; in cxgb4_iov_configure()
6290 kfree(adap->vfinfo); in cxgb4_iov_configure()
6291 adap->vfinfo = NULL; in cxgb4_iov_configure()
6306 * parent bridge's PCI-E needs to support Alternative Routing in cxgb4_iov_configure()
6310 pbridge = pdev->bus->self; in cxgb4_iov_configure()
6320 …dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Fu… in cxgb4_iov_configure()
6321 pbridge->bus->number, PCI_SLOT(pbridge->devfn), in cxgb4_iov_configure()
6322 PCI_FUNC(pbridge->devfn)); in cxgb4_iov_configure()
6323 return -ENOTSUPP; in cxgb4_iov_configure()
6329 FW_PFVF_CMD_PFN_V(adap->pf) | in cxgb4_iov_configure()
6332 err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd), in cxgb4_iov_configure()
6337 port = ffs(pmask) - 1; in cxgb4_iov_configure()
6339 snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx, in cxgb4_iov_configure()
6340 adap->pf); in cxgb4_iov_configure()
6344 return -ENOMEM; in cxgb4_iov_configure()
6347 pi->adapter = adap; in cxgb4_iov_configure()
6348 pi->lport = port; in cxgb4_iov_configure()
6349 pi->tx_chan = port; in cxgb4_iov_configure()
6350 SET_NETDEV_DEV(netdev, &pdev->dev); in cxgb4_iov_configure()
6352 adap->port[0] = netdev; in cxgb4_iov_configure()
6353 pi->port_id = 0; in cxgb4_iov_configure()
6355 err = register_netdev(adap->port[0]); in cxgb4_iov_configure()
6358 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6359 adap->port[0] = NULL; in cxgb4_iov_configure()
6363 adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev), in cxgb4_iov_configure()
6365 if (!adap->vfinfo) { in cxgb4_iov_configure()
6366 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
6367 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6368 adap->port[0] = NULL; in cxgb4_iov_configure()
6369 return -ENOMEM; in cxgb4_iov_configure()
6378 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
6379 free_netdev(adap->port[0]); in cxgb4_iov_configure()
6380 adap->port[0] = NULL; in cxgb4_iov_configure()
6381 kfree(adap->vfinfo); in cxgb4_iov_configure()
6382 adap->vfinfo = NULL; in cxgb4_iov_configure()
6387 adap->num_vfs = num_vfs; in cxgb4_iov_configure()
6400 if (!adap->uld[CXGB4_ULD_KTLS].handle) { in chcr_offload_state()
6401 dev_dbg(adap->pdev_dev, "ch_ktls driver is not loaded\n"); in chcr_offload_state()
6402 return -EOPNOTSUPP; in chcr_offload_state()
6404 if (!adap->uld[CXGB4_ULD_KTLS].tlsdev_ops) { in chcr_offload_state()
6405 dev_dbg(adap->pdev_dev, in chcr_offload_state()
6407 return -EOPNOTSUPP; in chcr_offload_state()
6413 if (!adap->uld[CXGB4_ULD_IPSEC].handle) { in chcr_offload_state()
6414 dev_dbg(adap->pdev_dev, "chipsec driver is not loaded\n"); in chcr_offload_state()
6415 return -EOPNOTSUPP; in chcr_offload_state()
6417 if (!adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops) { in chcr_offload_state()
6418 dev_dbg(adap->pdev_dev, in chcr_offload_state()
6420 return -EOPNOTSUPP; in chcr_offload_state()
6425 dev_dbg(adap->pdev_dev, in chcr_offload_state()
6427 return -EOPNOTSUPP; in chcr_offload_state()
6454 ret = adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_add(netdev, sk, in cxgb4_ktls_dev_add()
6477 adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx, in cxgb4_ktls_dev_del()
6495 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_xfrm_add_state()
6499 dev_dbg(adap->pdev_dev, in cxgb4_xfrm_add_state()
6501 return -EBUSY; in cxgb4_xfrm_add_state()
6507 ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x); in cxgb4_xfrm_add_state()
6517 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_xfrm_del_state()
6520 dev_dbg(adap->pdev_dev, in cxgb4_xfrm_del_state()
6527 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x); in cxgb4_xfrm_del_state()
6535 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_xfrm_free_state()
6538 dev_dbg(adap->pdev_dev, in cxgb4_xfrm_free_state()
6545 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x); in cxgb4_xfrm_free_state()
6553 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_ipsec_offload_ok()
6557 dev_dbg(adap->pdev_dev, in cxgb4_ipsec_offload_ok()
6564 ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x); in cxgb4_ipsec_offload_ok()
6573 struct adapter *adap = netdev2adap(x->xso.dev); in cxgb4_advance_esn_state()
6576 dev_dbg(adap->pdev_dev, in cxgb4_advance_esn_state()
6583 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_advance_esn(x); in cxgb4_advance_esn_state()
6616 dev_info(&pdev->dev, "cannot obtain PCI resources\n"); in init_one()
6622 dev_err(&pdev->dev, "cannot enable PCI device\n"); in init_one()
6628 dev_err(&pdev->dev, "cannot map device registers\n"); in init_one()
6629 err = -ENOMEM; in init_one()
6635 err = -ENOMEM; in init_one()
6639 adapter->regs = regs; in init_one()
6649 dev_err(&pdev->dev, "Device %d is not supported\n", device_id); in init_one()
6657 adapter->pdev = pdev; in init_one()
6658 adapter->pdev_dev = &pdev->dev; in init_one()
6659 adapter->name = pci_name(pdev); in init_one()
6660 adapter->mbox = func; in init_one()
6661 adapter->pf = func; in init_one()
6662 adapter->params.chip = chip; in init_one()
6663 adapter->adap_idx = adap_idx; in init_one()
6664 adapter->msg_enable = DFLT_MSG_ENABLE; in init_one()
6665 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + in init_one()
6669 if (!adapter->mbox_log) { in init_one()
6670 err = -ENOMEM; in init_one()
6673 spin_lock_init(&adapter->mbox_lock); in init_one()
6674 INIT_LIST_HEAD(&adapter->mlist.list); in init_one()
6675 adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS; in init_one()
6678 if (func != ent->driver_data) { in init_one()
6680 pci_save_state(pdev); /* to restore SR-IOV later */ in init_one()
6684 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in init_one()
6686 dev_err(&pdev->dev, "no usable DMA configuration\n"); in init_one()
6694 adapter->workq = create_singlethread_workqueue("cxgb4"); in init_one()
6695 if (!adapter->workq) { in init_one()
6696 err = -ENOMEM; in init_one()
6701 adapter->flags |= CXGB4_DEV_ENABLED; in init_one()
6702 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); in init_one()
6719 adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING; in init_one()
6721 spin_lock_init(&adapter->stats_lock); in init_one()
6722 spin_lock_init(&adapter->tid_release_lock); in init_one()
6723 spin_lock_init(&adapter->win0_lock); in init_one()
6725 INIT_WORK(&adapter->tid_release_task, process_tid_release_list); in init_one()
6726 INIT_WORK(&adapter->db_full_task, process_db_full); in init_one()
6727 INIT_WORK(&adapter->db_drop_task, process_db_drop); in init_one()
6728 INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err); in init_one()
6738 dev_warn(adapter->pdev_dev, in init_one()
6745 if (!is_t4(adapter->params.chip)) { in init_one()
6747 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * in init_one()
6748 adapter->pf); in init_one()
6759 dev_err(&pdev->dev, in init_one()
6761 err = -EINVAL; in init_one()
6764 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), in init_one()
6766 if (!adapter->bar2) { in init_one()
6767 dev_err(&pdev->dev, "cannot map device bar2 region\n"); in init_one()
6768 err = -ENOMEM; in init_one()
6781 if (!is_t4(adapter->params.chip)) in init_one()
6783 (is_t5(adapter->params.chip) ? STATMODE_V(0) : in init_one()
6787 INIT_LIST_HEAD(&adapter->mac_hlist); in init_one()
6799 err = -ENOMEM; in init_one()
6803 SET_NETDEV_DEV(netdev, &pdev->dev); in init_one()
6805 adapter->port[i] = netdev; in init_one()
6807 pi->adapter = adapter; in init_one()
6808 pi->xact_addr_filt = -1; in init_one()
6809 pi->port_id = i; in init_one()
6810 netdev->irq = pdev->irq; in init_one()
6812 netdev->hw_features = NETIF_F_SG | TSO_FLAGS | in init_one()
6819 netdev->hw_enc_features |= NETIF_F_IP_CSUM | in init_one()
6826 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | in init_one()
6830 if (adapter->rawf_cnt) in init_one()
6831 netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels; in init_one()
6834 netdev->features |= netdev->hw_features; in init_one()
6835 netdev->vlan_features = netdev->features & VLAN_FEAT; in init_one()
6837 if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) { in init_one()
6838 netdev->hw_features |= NETIF_F_HW_TLS_TX; in init_one()
6839 netdev->tlsdev_ops = &cxgb4_ktls_ops; in init_one()
6841 refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0); in init_one()
6845 if (pi->adapter->params.crypto & FW_CAPS_CONFIG_IPSEC_INLINE) { in init_one()
6846 netdev->hw_enc_features |= NETIF_F_HW_ESP; in init_one()
6847 netdev->features |= NETIF_F_HW_ESP; in init_one()
6848 netdev->xfrmdev_ops = &cxgb4_xfrmdev_ops; in init_one()
6852 netdev->priv_flags |= IFF_UNICAST_FLT; in init_one()
6854 /* MTU range: 81 - 9600 */ in init_one()
6855 netdev->min_mtu = 81; /* accommodate SACK */ in init_one()
6856 netdev->max_mtu = MAX_MTU; in init_one()
6858 netdev->netdev_ops = &cxgb4_netdev_ops; in init_one()
6860 netdev->dcbnl_ops = &cxgb4_dcb_ops; in init_one()
6871 if (adapter->flags & CXGB4_FW_OK) { in init_one()
6875 } else if (adapter->params.nports == 1) { in init_one()
6876 /* If we don't have a connection to the firmware -- possibly in init_one()
6877 * because of an error -- grab the raw VPD parameters so we in init_one()
6882 u8 *na = adapter->params.vpd.na; in init_one()
6884 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd); in init_one()
6893 if (!(adapter->flags & CXGB4_FW_OK)) in init_one()
6903 adapter->smt = t4_init_smt(); in init_one()
6904 if (!adapter->smt) { in init_one()
6906 dev_warn(&pdev->dev, "could not allocate SMT, continuing\n"); in init_one()
6909 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end); in init_one()
6910 if (!adapter->l2t) { in init_one()
6912 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n"); in init_one()
6913 adapter->params.offload = 0; in init_one()
6922 dev_warn(&pdev->dev, in init_one()
6924 adapter->params.offload = 0; in init_one()
6926 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start, in init_one()
6927 adapter->clipt_end); in init_one()
6928 if (!adapter->clipt) { in init_one()
6932 dev_warn(&pdev->dev, in init_one()
6934 adapter->params.offload = 0; in init_one()
6941 pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls); in init_one()
6942 if (!pi->sched_tbl) in init_one()
6943 dev_warn(&pdev->dev, in init_one()
6954 adapter->tids.nhash = 1 << HASHTIDSIZE_G(v); in init_one()
6956 adapter->tids.hash_base = v / 4; in init_one()
6958 adapter->tids.nhash = HASHTBLSIZE_G(v) << 3; in init_one()
6961 adapter->tids.hash_base = v; in init_one()
6966 if (tid_init(&adapter->tids) < 0) { in init_one()
6967 dev_warn(&pdev->dev, "could not allocate TID table, " in init_one()
6969 adapter->params.offload = 0; in init_one()
6971 adapter->tc_u32 = cxgb4_init_tc_u32(adapter); in init_one()
6972 if (!adapter->tc_u32) in init_one()
6973 dev_warn(&pdev->dev, in init_one()
6977 dev_warn(&pdev->dev, in init_one()
6981 dev_warn(&pdev->dev, in init_one()
6985 dev_warn(&pdev->dev, in init_one()
6988 dev_warn(&pdev->dev, in init_one()
6994 adapter->flags |= CXGB4_USING_MSIX; in init_one()
6996 adapter->flags |= CXGB4_USING_MSI; in init_one()
7012 dev_err(adapter->pdev_dev, in init_one()
7019 dev_err(adapter->pdev_dev, in init_one()
7033 adapter->port[i]->dev_port = pi->lport; in init_one()
7034 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets); in init_one()
7035 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets); in init_one()
7037 netif_carrier_off(adapter->port[i]); in init_one()
7039 err = register_netdev(adapter->port[i]); in init_one()
7042 adapter->chan_map[pi->tx_chan] = i; in init_one()
7043 print_port_info(adapter->port[i]); in init_one()
7046 dev_err(&pdev->dev, "could not register any net devices\n"); in init_one()
7050 dev_warn(&pdev->dev, "only %d net devices registered\n", i); in init_one()
7055 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), in init_one()
7061 pdev->needs_freset = 1; in init_one()
7066 if (!is_t4(adapter->params.chip)) in init_one()
7070 !is_t4(adapter->params.chip) && (adapter->flags & CXGB4_FW_OK)) in init_one()
7079 if (adapter->flags & CXGB4_USING_MSIX) in init_one()
7081 if (adapter->num_uld || adapter->num_ofld_uld) in init_one()
7084 if (!is_t4(adapter->params.chip)) in init_one()
7085 iounmap(adapter->bar2); in init_one()
7087 if (adapter->workq) in init_one()
7088 destroy_workqueue(adapter->workq); in init_one()
7090 kfree(adapter->mbox_log); in init_one()
7117 adapter->flags |= CXGB4_SHUTTING_DOWN; in remove_one()
7119 if (adapter->pf == 4) { in remove_one()
7122 /* Tear down per-adapter Work Queue first since it can contain in remove_one()
7125 destroy_workqueue(adapter->workq); in remove_one()
7130 if (adapter->port[i]->reg_state == NETREG_REGISTERED) in remove_one()
7131 unregister_netdev(adapter->port[i]); in remove_one()
7141 debugfs_remove_recursive(adapter->debugfs_root); in remove_one()
7143 if (!is_t4(adapter->params.chip)) in remove_one()
7148 if (adapter->flags & CXGB4_FULL_INIT_DONE) in remove_one()
7151 if (adapter->flags & CXGB4_USING_MSIX) in remove_one()
7153 if (adapter->num_uld || adapter->num_ofld_uld) in remove_one()
7156 list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, in remove_one()
7158 list_del(&entry->list); in remove_one()
7165 if (!is_t4(adapter->params.chip)) in remove_one()
7166 iounmap(adapter->bar2); in remove_one()
7170 cxgb4_iov_configure(adapter->pdev, 0); in remove_one()
7173 iounmap(adapter->regs); in remove_one()
7175 if ((adapter->flags & CXGB4_DEV_ENABLED)) { in remove_one()
7177 adapter->flags &= ~CXGB4_DEV_ENABLED; in remove_one()
7180 kfree(adapter->mbox_log); in remove_one()
7203 adapter->flags |= CXGB4_SHUTTING_DOWN; in shutdown_one()
7205 if (adapter->pf == 4) { in shutdown_one()
7209 if (adapter->port[i]->reg_state == NETREG_REGISTERED) in shutdown_one()
7210 cxgb_close(adapter->port[i]); in shutdown_one()
7225 if (adapter->flags & CXGB4_FW_OK) in shutdown_one()
7226 t4_fw_bye(adapter, adapter->mbox); in shutdown_one()