Lines Matching refs:adap

236 	struct adapter *adap = pi->adapter;  in dcb_tx_queue_prio_enable()  local
237 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; in dcb_tx_queue_prio_enable()
257 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, in dcb_tx_queue_prio_enable()
262 dev_err(adap->pdev_dev, in dcb_tx_queue_prio_enable()
304 void t4_os_portmod_changed(struct adapter *adap, int port_id) in t4_os_portmod_changed() argument
310 struct net_device *dev = adap->port[port_id]; in t4_os_portmod_changed()
349 struct adapter *adap = pi->adapter; in cxgb4_set_addr_hash() local
355 list_for_each_entry(entry, &adap->mac_hlist, list) { in cxgb4_set_addr_hash()
359 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast, in cxgb4_set_addr_hash()
366 struct adapter *adap = pi->adapter; in cxgb4_mac_sync() local
375 ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist, in cxgb4_mac_sync()
388 list_add_tail(&new_entry->list, &adap->mac_hlist); in cxgb4_mac_sync()
398 struct adapter *adap = pi->adapter; in cxgb4_mac_unsync() local
406 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) { in cxgb4_mac_unsync()
414 ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false); in cxgb4_mac_unsync()
478 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd) in dcb_rpl() argument
481 struct net_device *dev = adap->port[adap->chan_map[port]]; in dcb_rpl()
485 cxgb4_dcb_handle_fw_update(adap, pcmd); in dcb_rpl()
514 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" in fwevtq_handler()
525 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; in fwevtq_handler()
555 dev = q->adap->port[q->adap->chan_map[port]]; in fwevtq_handler()
569 dcb_rpl(q->adap, pcmd); in fwevtq_handler()
573 t4_handle_fw_rpl(q->adap, p->data); in fwevtq_handler()
577 do_l2t_write_rpl(q->adap, p); in fwevtq_handler()
581 do_smt_write_rpl(q->adap, p); in fwevtq_handler()
585 filter_rpl(q->adap, p); in fwevtq_handler()
589 hash_filter_rpl(q->adap, p); in fwevtq_handler()
593 hash_del_filter_rpl(q->adap, p); in fwevtq_handler()
597 do_srq_table_rpl(q->adap, p); in fwevtq_handler()
599 dev_err(q->adap->pdev_dev, in fwevtq_handler()
621 struct adapter *adap = cookie; in t4_nondata_intr() local
622 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A)); in t4_nondata_intr()
625 adap->swintr = 1; in t4_nondata_intr()
626 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v); in t4_nondata_intr()
628 if (adap->flags & MASTER_PF) in t4_nondata_intr()
629 t4_slow_intr_handler(adap); in t4_nondata_intr()
636 static void name_msix_vecs(struct adapter *adap) in name_msix_vecs() argument
638 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc); in name_msix_vecs()
641 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name); in name_msix_vecs()
644 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", in name_msix_vecs()
645 adap->port[0]->name); in name_msix_vecs()
648 for_each_port(adap, j) { in name_msix_vecs()
649 struct net_device *d = adap->port[j]; in name_msix_vecs()
653 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", in name_msix_vecs()
658 static int request_msix_queue_irqs(struct adapter *adap) in request_msix_queue_irqs() argument
660 struct sge *s = &adap->sge; in request_msix_queue_irqs()
664 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, in request_msix_queue_irqs()
665 adap->msix_info[1].desc, &s->fw_evtq); in request_msix_queue_irqs()
670 err = request_irq(adap->msix_info[msi_index].vec, in request_msix_queue_irqs()
672 adap->msix_info[msi_index].desc, in request_msix_queue_irqs()
682 free_irq(adap->msix_info[--msi_index].vec, in request_msix_queue_irqs()
684 free_irq(adap->msix_info[1].vec, &s->fw_evtq); in request_msix_queue_irqs()
688 static void free_msix_queue_irqs(struct adapter *adap) in free_msix_queue_irqs() argument
691 struct sge *s = &adap->sge; in free_msix_queue_irqs()
693 free_irq(adap->msix_info[1].vec, &s->fw_evtq); in free_msix_queue_irqs()
695 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq); in free_msix_queue_irqs()
748 static int setup_rss(struct adapter *adap) in setup_rss() argument
752 for_each_port(adap, i) { in setup_rss()
753 const struct port_info *pi = adap2pinfo(adap, i); in setup_rss()
778 static void quiesce_rx(struct adapter *adap) in quiesce_rx() argument
782 for (i = 0; i < adap->sge.ingr_sz; i++) { in quiesce_rx()
783 struct sge_rspq *q = adap->sge.ingr_map[i]; in quiesce_rx()
791 static void disable_interrupts(struct adapter *adap) in disable_interrupts() argument
793 if (adap->flags & FULL_INIT_DONE) { in disable_interrupts()
794 t4_intr_disable(adap); in disable_interrupts()
795 if (adap->flags & USING_MSIX) { in disable_interrupts()
796 free_msix_queue_irqs(adap); in disable_interrupts()
797 free_irq(adap->msix_info[0].vec, adap); in disable_interrupts()
799 free_irq(adap->pdev->irq, adap); in disable_interrupts()
801 quiesce_rx(adap); in disable_interrupts()
808 static void enable_rx(struct adapter *adap) in enable_rx() argument
812 for (i = 0; i < adap->sge.ingr_sz; i++) { in enable_rx()
813 struct sge_rspq *q = adap->sge.ingr_map[i]; in enable_rx()
821 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), in enable_rx()
828 static int setup_fw_sge_queues(struct adapter *adap) in setup_fw_sge_queues() argument
830 struct sge *s = &adap->sge; in setup_fw_sge_queues()
836 if (adap->flags & USING_MSIX) in setup_fw_sge_queues()
837 adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */ in setup_fw_sge_queues()
839 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, in setup_fw_sge_queues()
843 adap->msi_idx = -((int)s->intrq.abs_id + 1); in setup_fw_sge_queues()
846 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], in setup_fw_sge_queues()
847 adap->msi_idx, NULL, fwevtq_handler, NULL, -1); in setup_fw_sge_queues()
859 static int setup_sge_queues(struct adapter *adap) in setup_sge_queues() argument
862 struct sge *s = &adap->sge; in setup_sge_queues()
866 if (is_uld(adap)) in setup_sge_queues()
869 for_each_port(adap, i) { in setup_sge_queues()
870 struct net_device *dev = adap->port[i]; in setup_sge_queues()
876 if (adap->msi_idx > 0) in setup_sge_queues()
877 adap->msi_idx++; in setup_sge_queues()
878 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, in setup_sge_queues()
879 adap->msi_idx, &q->fl, in setup_sge_queues()
882 t4_get_tp_ch_map(adap, in setup_sge_queues()
890 err = t4_sge_alloc_eth_txq(adap, t, dev, in setup_sge_queues()
898 for_each_port(adap, i) { in setup_sge_queues()
905 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], in setup_sge_queues()
911 if (!is_t4(adap->params.chip)) { in setup_sge_queues()
912 err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0], in setup_sge_queues()
913 netdev_get_tx_queue(adap->port[0], 0) in setup_sge_queues()
919 t4_write_reg(adap, is_t4(adap->params.chip) ? in setup_sge_queues()
922 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) | in setup_sge_queues()
926 dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err); in setup_sge_queues()
927 t4_free_sge_resources(adap); in setup_sge_queues()
1022 struct adapter *adap = q->adap; in cxgb4_set_rspq_intr_params() local
1031 new_idx = closest_thres(&adap->sge, cnt); in cxgb4_set_rspq_intr_params()
1038 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, in cxgb4_set_rspq_intr_params()
1046 us = us == 0 ? 6 : closest_timer(&adap->sge, us); in cxgb4_set_rspq_intr_params()
1068 static int setup_debugfs(struct adapter *adap) in setup_debugfs() argument
1070 if (IS_ERR_OR_NULL(adap->debugfs_root)) in setup_debugfs()
1074 t4_setup_debugfs(adap); in setup_debugfs()
1240 struct adapter *adap = container_of(t, struct adapter, tids); in cxgb4_queue_tid_release() local
1242 spin_lock_bh(&adap->tid_release_lock); in cxgb4_queue_tid_release()
1243 *p = adap->tid_release_head; in cxgb4_queue_tid_release()
1245 adap->tid_release_head = (void **)((uintptr_t)p | chan); in cxgb4_queue_tid_release()
1246 if (!adap->tid_release_task_busy) { in cxgb4_queue_tid_release()
1247 adap->tid_release_task_busy = true; in cxgb4_queue_tid_release()
1248 queue_work(adap->workq, &adap->tid_release_task); in cxgb4_queue_tid_release()
1250 spin_unlock_bh(&adap->tid_release_lock); in cxgb4_queue_tid_release()
1259 struct adapter *adap; in process_tid_release_list() local
1261 adap = container_of(work, struct adapter, tid_release_task); in process_tid_release_list()
1263 spin_lock_bh(&adap->tid_release_lock); in process_tid_release_list()
1264 while (adap->tid_release_head) { in process_tid_release_list()
1265 void **p = adap->tid_release_head; in process_tid_release_list()
1269 adap->tid_release_head = *p; in process_tid_release_list()
1271 spin_unlock_bh(&adap->tid_release_lock); in process_tid_release_list()
1277 mk_tid_release(skb, chan, p - adap->tids.tid_tab); in process_tid_release_list()
1278 t4_ofld_send(adap, skb); in process_tid_release_list()
1279 spin_lock_bh(&adap->tid_release_lock); in process_tid_release_list()
1281 adap->tid_release_task_busy = false; in process_tid_release_list()
1282 spin_unlock_bh(&adap->tid_release_lock); in process_tid_release_list()
1293 struct adapter *adap = container_of(t, struct adapter, tids); in cxgb4_remove_tid() local
1316 t4_ofld_send(adap, skb); in cxgb4_remove_tid()
1327 struct adapter *adap = container_of(t, struct adapter, tids); in tid_init() local
1373 if (is_offload(adap)) { in tid_init()
1377 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in tid_init()
1402 struct adapter *adap; in cxgb4_create_server() local
1410 adap = netdev2adap(dev); in cxgb4_create_server()
1418 chan = rxq_to_chan(&adap->sge, queue); in cxgb4_create_server()
1422 ret = t4_mgmt_tx(adap, skb); in cxgb4_create_server()
1443 struct adapter *adap; in cxgb4_create_server6() local
1451 adap = netdev2adap(dev); in cxgb4_create_server6()
1461 chan = rxq_to_chan(&adap->sge, queue); in cxgb4_create_server6()
1465 ret = t4_mgmt_tx(adap, skb); in cxgb4_create_server6()
1474 struct adapter *adap; in cxgb4_remove_server() local
1478 adap = netdev2adap(dev); in cxgb4_remove_server()
1489 ret = t4_mgmt_tx(adap, skb); in cxgb4_remove_server()
1624 struct adapter *adap = netdev2adap(dev); in cxgb4_dbfifo_count() local
1627 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A); in cxgb4_dbfifo_count()
1628 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A); in cxgb4_dbfifo_count()
1629 if (is_t4(adap->params.chip)) { in cxgb4_dbfifo_count()
1667 struct adapter *adap = pci_get_drvdata(pdev); in cxgb4_get_tcp_stats() local
1669 spin_lock(&adap->stats_lock); in cxgb4_get_tcp_stats()
1670 t4_tp_get_tcp_stats(adap, v4, v6, false); in cxgb4_get_tcp_stats()
1671 spin_unlock(&adap->stats_lock); in cxgb4_get_tcp_stats()
1678 struct adapter *adap = netdev2adap(dev); in cxgb4_iscsi_init() local
1680 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask); in cxgb4_iscsi_init()
1681 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) | in cxgb4_iscsi_init()
1689 struct adapter *adap = netdev2adap(dev); in cxgb4_flush_eq_cache() local
1691 return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS); in cxgb4_flush_eq_cache()
1695 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx) in read_eq_indices() argument
1697 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8; in read_eq_indices()
1701 spin_lock(&adap->win0_lock); in read_eq_indices()
1702 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr, in read_eq_indices()
1705 spin_unlock(&adap->win0_lock); in read_eq_indices()
1716 struct adapter *adap = netdev2adap(dev); in cxgb4_sync_txq_pidx() local
1720 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx); in cxgb4_sync_txq_pidx()
1733 if (is_t4(adap->params.chip)) in cxgb4_sync_txq_pidx()
1738 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in cxgb4_sync_txq_pidx()
1751 struct adapter *adap; in cxgb4_read_tpte() local
1755 adap = netdev2adap(dev); in cxgb4_read_tpte()
1757 offset = ((stag >> 8) * 32) + adap->vres.stag.start; in cxgb4_read_tpte()
1765 size = t4_read_reg(adap, MA_EDRAM0_BAR_A); in cxgb4_read_tpte()
1767 size = t4_read_reg(adap, MA_EDRAM1_BAR_A); in cxgb4_read_tpte()
1769 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); in cxgb4_read_tpte()
1772 if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) { in cxgb4_read_tpte()
1773 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); in cxgb4_read_tpte()
1793 } else if (is_t5(adap->params.chip)) { in cxgb4_read_tpte()
1794 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); in cxgb4_read_tpte()
1810 spin_lock(&adap->win0_lock); in cxgb4_read_tpte()
1811 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ); in cxgb4_read_tpte()
1812 spin_unlock(&adap->win0_lock); in cxgb4_read_tpte()
1816 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n", in cxgb4_read_tpte()
1825 struct adapter *adap; in cxgb4_read_sge_timestamp() local
1827 adap = netdev2adap(dev); in cxgb4_read_sge_timestamp()
1828 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A); in cxgb4_read_sge_timestamp()
1829 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A)); in cxgb4_read_sge_timestamp()
1886 static void drain_db_fifo(struct adapter *adap, int usecs) in drain_db_fifo() argument
1891 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A); in drain_db_fifo()
1892 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A); in drain_db_fifo()
1893 if (is_t4(adap->params.chip)) { in drain_db_fifo()
1917 static void enable_txq_db(struct adapter *adap, struct sge_txq *q) in enable_txq_db() argument
1925 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in enable_txq_db()
1933 static void disable_dbs(struct adapter *adap) in disable_dbs() argument
1937 for_each_ethrxq(&adap->sge, i) in disable_dbs()
1938 disable_txq_db(&adap->sge.ethtxq[i].q); in disable_dbs()
1939 if (is_offload(adap)) { in disable_dbs()
1941 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in disable_dbs()
1944 for_each_ofldtxq(&adap->sge, i) { in disable_dbs()
1951 for_each_port(adap, i) in disable_dbs()
1952 disable_txq_db(&adap->sge.ctrlq[i].q); in disable_dbs()
1955 static void enable_dbs(struct adapter *adap) in enable_dbs() argument
1959 for_each_ethrxq(&adap->sge, i) in enable_dbs()
1960 enable_txq_db(adap, &adap->sge.ethtxq[i].q); in enable_dbs()
1961 if (is_offload(adap)) { in enable_dbs()
1963 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in enable_dbs()
1966 for_each_ofldtxq(&adap->sge, i) { in enable_dbs()
1969 enable_txq_db(adap, &txq->q); in enable_dbs()
1973 for_each_port(adap, i) in enable_dbs()
1974 enable_txq_db(adap, &adap->sge.ctrlq[i].q); in enable_dbs()
1977 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd) in notify_rdma_uld() argument
1981 if (adap->uld && adap->uld[type].handle) in notify_rdma_uld()
1982 adap->uld[type].control(adap->uld[type].handle, cmd); in notify_rdma_uld()
1987 struct adapter *adap; in process_db_full() local
1989 adap = container_of(work, struct adapter, db_full_task); in process_db_full()
1991 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_full()
1992 enable_dbs(adap); in process_db_full()
1993 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); in process_db_full()
1994 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in process_db_full()
1995 t4_set_reg_field(adap, SGE_INT_ENABLE3_A, in process_db_full()
1999 t4_set_reg_field(adap, SGE_INT_ENABLE3_A, in process_db_full()
2003 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) in sync_txq_pidx() argument
2009 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); in sync_txq_pidx()
2021 if (is_t4(adap->params.chip)) in sync_txq_pidx()
2026 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in sync_txq_pidx()
2034 CH_WARN(adap, "DB drop recovery failed.\n"); in sync_txq_pidx()
2037 static void recover_all_queues(struct adapter *adap) in recover_all_queues() argument
2041 for_each_ethrxq(&adap->sge, i) in recover_all_queues()
2042 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); in recover_all_queues()
2043 if (is_offload(adap)) { in recover_all_queues()
2045 adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in recover_all_queues()
2047 for_each_ofldtxq(&adap->sge, i) { in recover_all_queues()
2050 sync_txq_pidx(adap, &txq->q); in recover_all_queues()
2054 for_each_port(adap, i) in recover_all_queues()
2055 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); in recover_all_queues()
2060 struct adapter *adap; in process_db_drop() local
2062 adap = container_of(work, struct adapter, db_drop_task); in process_db_drop()
2064 if (is_t4(adap->params.chip)) { in process_db_drop()
2065 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_drop()
2066 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); in process_db_drop()
2067 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_drop()
2068 recover_all_queues(adap); in process_db_drop()
2069 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_drop()
2070 enable_dbs(adap); in process_db_drop()
2071 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); in process_db_drop()
2072 } else if (is_t5(adap->params.chip)) { in process_db_drop()
2073 u32 dropped_db = t4_read_reg(adap, 0x010ac); in process_db_drop()
2080 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS, in process_db_drop()
2083 dev_err(adap->pdev_dev, "doorbell drop recovery: " in process_db_drop()
2087 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL); in process_db_drop()
2090 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15); in process_db_drop()
2093 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in process_db_drop()
2094 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0); in process_db_drop()
2097 void t4_db_full(struct adapter *adap) in t4_db_full() argument
2099 if (is_t4(adap->params.chip)) { in t4_db_full()
2100 disable_dbs(adap); in t4_db_full()
2101 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); in t4_db_full()
2102 t4_set_reg_field(adap, SGE_INT_ENABLE3_A, in t4_db_full()
2104 queue_work(adap->workq, &adap->db_full_task); in t4_db_full()
2108 void t4_db_dropped(struct adapter *adap) in t4_db_dropped() argument
2110 if (is_t4(adap->params.chip)) { in t4_db_dropped()
2111 disable_dbs(adap); in t4_db_dropped()
2112 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); in t4_db_dropped()
2114 queue_work(adap->workq, &adap->db_drop_task); in t4_db_dropped()
2125 static void detach_ulds(struct adapter *adap) in detach_ulds() argument
2130 list_del(&adap->list_node); in detach_ulds()
2133 if (adap->uld && adap->uld[i].handle) in detach_ulds()
2134 adap->uld[i].state_change(adap->uld[i].handle, in detach_ulds()
2144 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) in notify_ulds() argument
2150 if (adap->uld && adap->uld[i].handle) in notify_ulds()
2151 adap->uld[i].state_change(adap->uld[i].handle, in notify_ulds()
2164 struct adapter *adap; in cxgb4_inet6addr_handler() local
2170 list_for_each_entry(adap, &adapter_list, list_node) { in cxgb4_inet6addr_handler()
2173 cxgb4_clip_get(adap->port[0], in cxgb4_inet6addr_handler()
2177 cxgb4_clip_release(adap->port[0], in cxgb4_inet6addr_handler()
2211 static void update_clip(const struct adapter *adap) in update_clip() argument
2220 dev = adap->port[i]; in update_clip()
2243 static int cxgb_up(struct adapter *adap) in cxgb_up() argument
2248 err = setup_sge_queues(adap); in cxgb_up()
2251 err = setup_rss(adap); in cxgb_up()
2255 if (adap->flags & USING_MSIX) { in cxgb_up()
2256 name_msix_vecs(adap); in cxgb_up()
2257 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, in cxgb_up()
2258 adap->msix_info[0].desc, adap); in cxgb_up()
2261 err = request_msix_queue_irqs(adap); in cxgb_up()
2263 free_irq(adap->msix_info[0].vec, adap); in cxgb_up()
2267 err = request_irq(adap->pdev->irq, t4_intr_handler(adap), in cxgb_up()
2268 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, in cxgb_up()
2269 adap->port[0]->name, adap); in cxgb_up()
2274 enable_rx(adap); in cxgb_up()
2275 t4_sge_start(adap); in cxgb_up()
2276 t4_intr_enable(adap); in cxgb_up()
2277 adap->flags |= FULL_INIT_DONE; in cxgb_up()
2280 notify_ulds(adap, CXGB4_STATE_UP); in cxgb_up()
2282 update_clip(adap); in cxgb_up()
2285 INIT_LIST_HEAD(&adap->mac_hlist); in cxgb_up()
2289 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); in cxgb_up()
2291 t4_free_sge_resources(adap); in cxgb_up()
2363 struct adapter *adap; in cxgb4_create_server_filter() local
2367 adap = netdev2adap(dev); in cxgb4_create_server_filter()
2370 stid -= adap->tids.sftid_base; in cxgb4_create_server_filter()
2371 stid += adap->tids.nftids; in cxgb4_create_server_filter()
2375 f = &adap->tids.ftid_tab[stid]; in cxgb4_create_server_filter()
2384 clear_filter(adap, f); in cxgb4_create_server_filter()
2396 if (adap->params.tp.vlan_pri_map & PORT_F) { in cxgb4_create_server_filter()
2402 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) { in cxgb4_create_server_filter()
2416 f->tid = stid + adap->tids.ftid_base; in cxgb4_create_server_filter()
2417 ret = set_filter_wr(adap, stid); in cxgb4_create_server_filter()
2419 clear_filter(adap, f); in cxgb4_create_server_filter()
2431 struct adapter *adap; in cxgb4_remove_server_filter() local
2433 adap = netdev2adap(dev); in cxgb4_remove_server_filter()
2436 stid -= adap->tids.sftid_base; in cxgb4_remove_server_filter()
2437 stid += adap->tids.nftids; in cxgb4_remove_server_filter()
2439 f = &adap->tids.ftid_tab[stid]; in cxgb4_remove_server_filter()
2443 return delete_filter(adap, stid); in cxgb4_remove_server_filter()
2641 static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap) in cxgb4_mgmt_fill_vf_station_mac_addr() argument
2649 adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev, in cxgb4_mgmt_fill_vf_station_mac_addr()
2651 err = t4_get_raw_vpd_params(adap, &adap->params.vpd); in cxgb4_mgmt_fill_vf_station_mac_addr()
2655 na = adap->params.vpd.na; in cxgb4_mgmt_fill_vf_station_mac_addr()
2671 for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev); in cxgb4_mgmt_fill_vf_station_mac_addr()
2673 macaddr[5] = adap->pf * 16 + vf; in cxgb4_mgmt_fill_vf_station_mac_addr()
2674 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr); in cxgb4_mgmt_fill_vf_station_mac_addr()
2681 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_mac() local
2694 ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac); in cxgb4_mgmt_set_vf_mac()
2696 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac); in cxgb4_mgmt_set_vf_mac()
2704 struct adapter *adap = pi->adapter; in cxgb4_mgmt_get_vf_config() local
2707 if (vf >= adap->num_vfs) in cxgb4_mgmt_get_vf_config()
2709 vfinfo = &adap->vfinfo[vf]; in cxgb4_mgmt_get_vf_config()
2735 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_rate() local
2742 if (vf >= adap->num_vfs) in cxgb4_mgmt_set_vf_rate()
2746 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
2754 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
2760 dev_err(adap->pdev_dev, "Link down for VF %d\n", vf); in cxgb4_mgmt_set_vf_rate()
2765 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
2777 ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET, in cxgb4_mgmt_set_vf_rate()
2785 dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n", in cxgb4_mgmt_set_vf_rate()
2789 dev_info(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
2797 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf, in cxgb4_mgmt_set_vf_rate()
2800 dev_err(adap->pdev_dev, in cxgb4_mgmt_set_vf_rate()
2805 dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n", in cxgb4_mgmt_set_vf_rate()
2806 adap->pf, vf, class_id); in cxgb4_mgmt_set_vf_rate()
2807 adap->vfinfo[vf].tx_rate = max_tx_rate; in cxgb4_mgmt_set_vf_rate()
2815 struct adapter *adap = pi->adapter; in cxgb4_mgmt_set_vf_vlan() local
2818 if (vf >= adap->num_vfs || vlan > 4095 || qos > 7) in cxgb4_mgmt_set_vf_vlan()
2824 ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan); in cxgb4_mgmt_set_vf_vlan()
2826 adap->vfinfo[vf].vlan = vlan; in cxgb4_mgmt_set_vf_vlan()
2830 dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n", in cxgb4_mgmt_set_vf_vlan()
2831 ret, (vlan ? "setting" : "clearing"), adap->pf, vf); in cxgb4_mgmt_set_vf_vlan()
2859 struct adapter *adap = pi->adapter; in cxgb_netpoll() local
2861 if (adap->flags & USING_MSIX) { in cxgb_netpoll()
2863 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; in cxgb_netpoll()
2868 t4_intr_handler(adap)(0, adap); in cxgb_netpoll()
2875 struct adapter *adap = pi->adapter; in cxgb_set_tx_maxrate() local
2888 if (!(adap->flags & FULL_INIT_DONE)) { in cxgb_set_tx_maxrate()
2889 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
2900 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
2913 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
2948 dev_err(adap->pdev_dev, in cxgb_set_tx_maxrate()
2987 struct adapter *adap = netdev2adap(dev); in cxgb_setup_tc_block_cb() local
2989 if (!(adap->flags & FULL_INIT_DONE)) { in cxgb_setup_tc_block_cb()
2990 dev_err(adap->pdev_dev, in cxgb_setup_tc_block_cb()
3276 struct adapter *adap; in notify_fatal_err() local
3278 adap = container_of(work, struct adapter, fatal_err_notify_task); in notify_fatal_err()
3279 notify_ulds(adap, CXGB4_STATE_FATAL_ERROR); in notify_fatal_err()
3282 void t4_fatal_err(struct adapter *adap) in t4_fatal_err() argument
3286 if (pci_channel_offline(adap->pdev)) in t4_fatal_err()
3292 t4_shutdown_adapter(adap); in t4_fatal_err()
3293 for_each_port(adap, port) { in t4_fatal_err()
3294 struct net_device *dev = adap->port[port]; in t4_fatal_err()
3305 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); in t4_fatal_err()
3306 queue_work(adap->workq, &adap->fatal_err_notify_task); in t4_fatal_err()
3309 static void setup_memwin(struct adapter *adap) in setup_memwin() argument
3311 u32 nic_win_base = t4_get_util_window(adap); in setup_memwin()
3313 t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC); in setup_memwin()
3316 static void setup_memwin_rdma(struct adapter *adap) in setup_memwin_rdma() argument
3318 if (adap->vres.ocq.size) { in setup_memwin_rdma()
3322 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2); in setup_memwin_rdma()
3324 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres); in setup_memwin_rdma()
3325 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; in setup_memwin_rdma()
3326 t4_write_reg(adap, in setup_memwin_rdma()
3329 t4_write_reg(adap, in setup_memwin_rdma()
3331 adap->vres.ocq.start); in setup_memwin_rdma()
3332 t4_read_reg(adap, in setup_memwin_rdma()
3537 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) in adap_init1() argument
3545 ret = t4_get_pfres(adap); in adap_init1()
3547 dev_err(adap->pdev_dev, in adap_init1()
3557 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c); in adap_init1()
3563 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL); in adap_init1()
3567 ret = t4_config_glbl_rss(adap, adap->pf, in adap_init1()
3574 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64, in adap_init1()
3580 t4_sge_init(adap); in adap_init1()
3583 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849); in adap_init1()
3584 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12)); in adap_init1()
3585 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A); in adap_init1()
3586 v = t4_read_reg(adap, TP_PIO_DATA_A); in adap_init1()
3587 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F); in adap_init1()
3590 adap->params.tp.tx_modq_map = 0xE4; in adap_init1()
3591 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A, in adap_init1()
3592 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map)); in adap_init1()
3596 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, in adap_init1()
3598 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, in adap_init1()
3600 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, in adap_init1()
3604 if (is_offload(adap)) { in adap_init1()
3605 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A, in adap_init1()
3610 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A, in adap_init1()
3618 return t4_early_init(adap, adap->pf); in adap_init1()
3742 static int adap_init0_phy(struct adapter *adap) in adap_init0_phy() argument
3750 phy_info = find_phy_info(adap->pdev->device); in adap_init0_phy()
3752 dev_warn(adap->pdev_dev, in adap_init0_phy()
3763 adap->pdev_dev); in adap_init0_phy()
3771 dev_err(adap->pdev_dev, "unable to find PHY Firmware image " in adap_init0_phy()
3777 t4_phy_fw_ver(adap, &cur_phy_fw_ver); in adap_init0_phy()
3778 dev_warn(adap->pdev_dev, "continuing with, on-adapter " in adap_init0_phy()
3788 ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock, in adap_init0_phy()
3792 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n", in adap_init0_phy()
3800 dev_info(adap->pdev_dev, "Successfully transferred PHY " in adap_init0_phy()
4095 static int adap_init0(struct adapter *adap) in adap_init0() argument
4107 ret = t4_init_devlog_params(adap); in adap_init0()
4112 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, in adap_init0()
4115 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", in adap_init0()
4119 if (ret == adap->mbox) in adap_init0()
4120 adap->flags |= MASTER_PF; in adap_init0()
4130 t4_get_version_info(adap); in adap_init0()
4131 ret = t4_check_fw_version(adap); in adap_init0()
4135 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { in adap_init0()
4145 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip)); in adap_init0()
4147 dev_err(adap->pdev_dev, in adap_init0()
4149 CHELSIO_CHIP_VERSION(adap->params.chip)); in adap_init0()
4164 adap->pdev_dev); in adap_init0()
4166 dev_err(adap->pdev_dev, in adap_init0()
4175 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw, in adap_init0()
4190 ret = adap_config_hma(adap); in adap_init0()
4192 dev_err(adap->pdev_dev, in adap_init0()
4195 dev_info(adap->pdev_dev, "Coming up as %s: "\ in adap_init0()
4197 adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); in adap_init0()
4199 dev_info(adap->pdev_dev, "Coming up as MASTER: "\ in adap_init0()
4207 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, in adap_init0()
4214 dev_err(adap->pdev_dev, "firmware doesn't support " in adap_init0()
4223 ret = adap_init0_config(adap, reset); in adap_init0()
4225 dev_err(adap->pdev_dev, "no Configuration File " in adap_init0()
4230 dev_err(adap->pdev_dev, "could not initialize " in adap_init0()
4240 ret = t4_get_pfres(adap); in adap_init0()
4242 dev_err(adap->pdev_dev, in adap_init0()
4257 ret = t4_get_vpd_params(adap, &adap->params.vpd); in adap_init0()
4268 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); in adap_init0()
4272 adap->params.nports = hweight32(port_vec); in adap_init0()
4273 adap->params.portvec = port_vec; in adap_init0()
4279 ret = t4_sge_init(adap); in adap_init0()
4283 if (is_bypass_device(adap->pdev->device)) in adap_init0()
4284 adap->params.bypass = 1; in adap_init0()
4305 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val); in adap_init0()
4308 adap->sge.egr_start = val[0]; in adap_init0()
4309 adap->l2t_start = val[1]; in adap_init0()
4310 adap->l2t_end = val[2]; in adap_init0()
4311 adap->tids.ftid_base = val[3]; in adap_init0()
4312 adap->tids.nftids = val[4] - val[3] + 1; in adap_init0()
4313 adap->sge.ingr_start = val[5]; in adap_init0()
4315 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { in adap_init0()
4321 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
4324 adap->rawf_start = val[0]; in adap_init0()
4325 adap->rawf_cnt = val[1] - val[0] + 1; in adap_init0()
4337 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
4340 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1; in adap_init0()
4341 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1; in adap_init0()
4343 adap->sge.egr_map = kcalloc(adap->sge.egr_sz, in adap_init0()
4344 sizeof(*adap->sge.egr_map), GFP_KERNEL); in adap_init0()
4345 if (!adap->sge.egr_map) { in adap_init0()
4350 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz, in adap_init0()
4351 sizeof(*adap->sge.ingr_map), GFP_KERNEL); in adap_init0()
4352 if (!adap->sge.ingr_map) { in adap_init0()
4360 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), in adap_init0()
4362 if (!adap->sge.starving_fl) { in adap_init0()
4367 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), in adap_init0()
4369 if (!adap->sge.txq_maperr) { in adap_init0()
4375 adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), in adap_init0()
4377 if (!adap->sge.blocked_fl) { in adap_init0()
4385 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
4388 adap->clipt_start = val[0]; in adap_init0()
4389 adap->clipt_end = val[1]; in adap_init0()
4395 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16; in adap_init0()
4400 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
4405 adap->flags |= FW_OFLD_CONN; in adap_init0()
4406 adap->tids.aftid_base = val[0]; in adap_init0()
4407 adap->tids.aftid_end = val[1]; in adap_init0()
4417 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val); in adap_init0()
4425 if (is_t4(adap->params.chip)) { in adap_init0()
4426 adap->params.ulptx_memwrite_dsgl = false; in adap_init0()
4429 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
4431 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); in adap_init0()
4436 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
4438 adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0); in adap_init0()
4441 if (is_t4(adap->params.chip)) { in adap_init0()
4442 adap->params.filter2_wr_support = 0; in adap_init0()
4445 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
4447 adap->params.filter2_wr_support = (ret == 0 && val[0] != 0); in adap_init0()
4458 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), in adap_init0()
4472 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, in adap_init0()
4476 adap->tids.ntids = val[0]; in adap_init0()
4477 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); in adap_init0()
4478 adap->tids.stid_base = val[1]; in adap_init0()
4479 adap->tids.nstids = val[2] - val[1] + 1; in adap_init0()
4489 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) { in adap_init0()
4490 adap->tids.sftid_base = adap->tids.ftid_base + in adap_init0()
4491 DIV_ROUND_UP(adap->tids.nftids, 3); in adap_init0()
4492 adap->tids.nsftids = adap->tids.nftids - in adap_init0()
4493 DIV_ROUND_UP(adap->tids.nftids, 3); in adap_init0()
4494 adap->tids.nftids = adap->tids.sftid_base - in adap_init0()
4495 adap->tids.ftid_base; in adap_init0()
4497 adap->vres.ddp.start = val[3]; in adap_init0()
4498 adap->vres.ddp.size = val[4] - val[3] + 1; in adap_init0()
4499 adap->params.ofldq_wr_cred = val[5]; in adap_init0()
4502 ret = init_hash_filter(adap); in adap_init0()
4506 adap->params.offload = 1; in adap_init0()
4507 adap->num_ofld_uld += 1; in adap_init0()
4517 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, in adap_init0()
4521 adap->vres.stag.start = val[0]; in adap_init0()
4522 adap->vres.stag.size = val[1] - val[0] + 1; in adap_init0()
4523 adap->vres.rq.start = val[2]; in adap_init0()
4524 adap->vres.rq.size = val[3] - val[2] + 1; in adap_init0()
4525 adap->vres.pbl.start = val[4]; in adap_init0()
4526 adap->vres.pbl.size = val[5] - val[4] + 1; in adap_init0()
4530 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
4533 adap->vres.srq.start = val[0]; in adap_init0()
4534 adap->vres.srq.size = val[1] - val[0] + 1; in adap_init0()
4536 if (adap->vres.srq.size) { in adap_init0()
4537 adap->srq = t4_init_srq(adap->vres.srq.size); in adap_init0()
4538 if (!adap->srq) in adap_init0()
4539 dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n"); in adap_init0()
4548 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, in adap_init0()
4552 adap->vres.qp.start = val[0]; in adap_init0()
4553 adap->vres.qp.size = val[1] - val[0] + 1; in adap_init0()
4554 adap->vres.cq.start = val[2]; in adap_init0()
4555 adap->vres.cq.size = val[3] - val[2] + 1; in adap_init0()
4556 adap->vres.ocq.start = val[4]; in adap_init0()
4557 adap->vres.ocq.size = val[5] - val[4] + 1; in adap_init0()
4561 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, in adap_init0()
4564 adap->params.max_ordird_qp = 8; in adap_init0()
4565 adap->params.max_ird_adapter = 32 * adap->tids.ntids; in adap_init0()
4568 adap->params.max_ordird_qp = val[0]; in adap_init0()
4569 adap->params.max_ird_adapter = val[1]; in adap_init0()
4571 dev_info(adap->pdev_dev, in adap_init0()
4573 adap->params.max_ordird_qp, in adap_init0()
4574 adap->params.max_ird_adapter); in adap_init0()
4578 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, in adap_init0()
4580 adap->params.write_w_imm_support = (ret == 0 && val[0] != 0); in adap_init0()
4584 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, in adap_init0()
4586 adap->params.write_cmpl_support = (ret == 0 && val[0] != 0); in adap_init0()
4587 adap->num_ofld_uld += 2; in adap_init0()
4592 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
4596 adap->vres.iscsi.start = val[0]; in adap_init0()
4597 adap->vres.iscsi.size = val[1] - val[0] + 1; in adap_init0()
4599 adap->num_ofld_uld += 2; in adap_init0()
4605 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
4611 adap->vres.ncrypto_fc = val[0]; in adap_init0()
4613 adap->num_ofld_uld += 1; in adap_init0()
4619 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
4623 adap->vres.key.start = val[0]; in adap_init0()
4624 adap->vres.key.size = val[1] - val[0] + 1; in adap_init0()
4625 adap->num_uld += 1; in adap_init0()
4627 adap->params.crypto = ntohs(caps_cmd.cryptocaps); in adap_init0()
4637 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); in adap_init0()
4659 if (adap->params.mtus[i] == 1492) { in adap_init0()
4660 adap->params.mtus[i] = 1488; in adap_init0()
4664 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, in adap_init0()
4665 adap->params.b_wnd); in adap_init0()
4667 t4_init_sge_params(adap); in adap_init0()
4668 adap->flags |= FW_OK; in adap_init0()
4669 t4_init_tp_params(adap, true); in adap_init0()
4678 adap_free_hma_mem(adap); in adap_init0()
4679 kfree(adap->sge.egr_map); in adap_init0()
4680 kfree(adap->sge.ingr_map); in adap_init0()
4681 kfree(adap->sge.starving_fl); in adap_init0()
4682 kfree(adap->sge.txq_maperr); in adap_init0()
4684 kfree(adap->sge.blocked_fl); in adap_init0()
4687 t4_fw_bye(adap, adap->mbox); in adap_init0()
4697 struct adapter *adap = pci_get_drvdata(pdev); in eeh_err_detected() local
4699 if (!adap) in eeh_err_detected()
4703 adap->flags &= ~FW_OK; in eeh_err_detected()
4704 notify_ulds(adap, CXGB4_STATE_START_RECOVERY); in eeh_err_detected()
4705 spin_lock(&adap->stats_lock); in eeh_err_detected()
4706 for_each_port(adap, i) { in eeh_err_detected()
4707 struct net_device *dev = adap->port[i]; in eeh_err_detected()
4713 spin_unlock(&adap->stats_lock); in eeh_err_detected()
4714 disable_interrupts(adap); in eeh_err_detected()
4715 if (adap->flags & FULL_INIT_DONE) in eeh_err_detected()
4716 cxgb_down(adap); in eeh_err_detected()
4718 if ((adap->flags & DEV_ENABLED)) { in eeh_err_detected()
4720 adap->flags &= ~DEV_ENABLED; in eeh_err_detected()
4730 struct adapter *adap = pci_get_drvdata(pdev); in eeh_slot_reset() local
4732 if (!adap) { in eeh_slot_reset()
4738 if (!(adap->flags & DEV_ENABLED)) { in eeh_slot_reset()
4744 adap->flags |= DEV_ENABLED; in eeh_slot_reset()
4752 if (t4_wait_dev_ready(adap->regs) < 0) in eeh_slot_reset()
4754 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0) in eeh_slot_reset()
4756 adap->flags |= FW_OK; in eeh_slot_reset()
4757 if (adap_init1(adap, &c)) in eeh_slot_reset()
4760 for_each_port(adap, i) { in eeh_slot_reset()
4761 struct port_info *p = adap2pinfo(adap, i); in eeh_slot_reset()
4763 ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1, in eeh_slot_reset()
4771 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, in eeh_slot_reset()
4772 adap->params.b_wnd); in eeh_slot_reset()
4773 setup_memwin(adap); in eeh_slot_reset()
4774 if (cxgb_up(adap)) in eeh_slot_reset()
4782 struct adapter *adap = pci_get_drvdata(pdev); in eeh_resume() local
4784 if (!adap) in eeh_resume()
4788 for_each_port(adap, i) { in eeh_resume()
4789 struct net_device *dev = adap->port[i]; in eeh_resume()
4826 static int cfg_queues(struct adapter *adap) in cfg_queues() argument
4828 struct sge *s = &adap->sge; in cfg_queues()
4838 if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) { in cfg_queues()
4839 adap->params.offload = 0; in cfg_queues()
4840 adap->params.crypto = 0; in cfg_queues()
4855 niqflint = adap->params.pfres.niqflint - 1; in cfg_queues()
4856 if (!(adap->flags & USING_MSIX)) in cfg_queues()
4858 neq = adap->params.pfres.neq / 2; in cfg_queues()
4864 if (avail_eth_qsets < adap->params.nports) { in cfg_queues()
4865 dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n", in cfg_queues()
4866 avail_eth_qsets, adap->params.nports); in cfg_queues()
4871 for_each_port(adap, i) in cfg_queues()
4872 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); in cfg_queues()
4879 if (adap->params.nports * 8 > avail_eth_qsets) { in cfg_queues()
4880 dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n", in cfg_queues()
4881 avail_eth_qsets, adap->params.nports * 8); in cfg_queues()
4885 for_each_port(adap, i) { in cfg_queues()
4886 struct port_info *pi = adap2pinfo(adap, i); in cfg_queues()
4898 q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g; in cfg_queues()
4905 for_each_port(adap, i) { in cfg_queues()
4906 struct port_info *pi = adap2pinfo(adap, i); in cfg_queues()
4917 if (is_uld(adap)) { in cfg_queues()
4925 s->ofldqsets = roundup(i, adap->params.nports); in cfg_queues()
4927 s->ofldqsets = adap->params.nports; in cfg_queues()
4934 init_rspq(adap, &r->rspq, 5, 10, 1024, 64); in cfg_queues()
4944 if (!is_t4(adap->params.chip)) in cfg_queues()
4947 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64); in cfg_queues()
4948 init_rspq(adap, &s->intrq, 0, 1, 512, 64); in cfg_queues()
4957 static void reduce_ethqs(struct adapter *adap, int n) in reduce_ethqs() argument
4962 while (n < adap->sge.ethqsets) in reduce_ethqs()
4963 for_each_port(adap, i) { in reduce_ethqs()
4964 pi = adap2pinfo(adap, i); in reduce_ethqs()
4967 adap->sge.ethqsets--; in reduce_ethqs()
4968 if (adap->sge.ethqsets <= n) in reduce_ethqs()
4974 for_each_port(adap, i) { in reduce_ethqs()
4975 pi = adap2pinfo(adap, i); in reduce_ethqs()
4981 static int get_msix_info(struct adapter *adap) in get_msix_info() argument
4986 if (is_offload(adap)) in get_msix_info()
4987 max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld; in get_msix_info()
4988 if (is_pci_uld(adap)) in get_msix_info()
4989 max_ingq += MAX_OFLD_QSETS * adap->num_uld; in get_msix_info()
4998 adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq), in get_msix_info()
5000 if (!adap->msix_bmap_ulds.msix_bmap) { in get_msix_info()
5004 spin_lock_init(&adap->msix_bmap_ulds.lock); in get_msix_info()
5005 adap->msix_info_ulds = msix_info; in get_msix_info()
5010 static void free_msix_info(struct adapter *adap) in free_msix_info() argument
5012 if (!(adap->num_uld && adap->num_ofld_uld)) in free_msix_info()
5015 kfree(adap->msix_info_ulds); in free_msix_info()
5016 kfree(adap->msix_bmap_ulds.msix_bmap); in free_msix_info()
5022 static int enable_msix(struct adapter *adap) in enable_msix() argument
5026 struct sge *s = &adap->sge; in enable_msix()
5027 unsigned int nchan = adap->params.nports; in enable_msix()
5031 if (is_pci_uld(adap)) in enable_msix()
5032 max_ingq += (MAX_OFLD_QSETS * adap->num_uld); in enable_msix()
5033 if (is_offload(adap)) in enable_msix()
5034 max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld); in enable_msix()
5041 if (get_msix_info(adap)) { in enable_msix()
5042 adap->params.offload = 0; in enable_msix()
5043 adap->params.crypto = 0; in enable_msix()
5050 if (is_offload(adap)) { in enable_msix()
5051 want += adap->num_ofld_uld * s->ofldqsets; in enable_msix()
5052 ofld_need = adap->num_ofld_uld * nchan; in enable_msix()
5054 if (is_pci_uld(adap)) { in enable_msix()
5055 want += adap->num_uld * s->ofldqsets; in enable_msix()
5056 uld_need = adap->num_uld * nchan; in enable_msix()
5062 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need; in enable_msix()
5064 need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need; in enable_msix()
5066 allocated = pci_enable_msix_range(adap->pdev, entries, need, want); in enable_msix()
5068 dev_info(adap->pdev_dev, "not enough MSI-X vectors left," in enable_msix()
5082 reduce_ethqs(adap, i); in enable_msix()
5084 if (is_uld(adap)) { in enable_msix()
5092 adap->msix_info[i].vec = entries[i].vector; in enable_msix()
5093 if (is_uld(adap)) { in enable_msix()
5095 adap->msix_info_ulds[j].vec = entries[i].vector; in enable_msix()
5096 adap->msix_info_ulds[j].idx = i; in enable_msix()
5098 adap->msix_bmap_ulds.mapsize = j; in enable_msix()
5100 dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, " in enable_msix()
5110 static int init_rss(struct adapter *adap) in init_rss() argument
5115 err = t4_init_rss_mode(adap, adap->mbox); in init_rss()
5119 for_each_port(adap, i) { in init_rss()
5120 struct port_info *pi = adap2pinfo(adap, i); in init_rss()
5148 const struct adapter *adap = pi->adapter; in print_port_info() local
5173 dev->name, adap->params.vpd.id, adap->name, buf); in print_port_info()
5223 static int t4_get_chip_type(struct adapter *adap, int ver) in t4_get_chip_type() argument
5225 u32 pl_rev = REV_G(t4_read_reg(adap, PL_REV_A)); in t4_get_chip_type()
5258 struct adapter *adap = pci_get_drvdata(pdev); in cxgb4_iov_configure() local
5263 pcie_fw = readl(adap->regs + PCIE_FW_A); in cxgb4_iov_configure()
5293 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
5294 free_netdev(adap->port[0]); in cxgb4_iov_configure()
5295 adap->port[0] = NULL; in cxgb4_iov_configure()
5298 adap->num_vfs = 0; in cxgb4_iov_configure()
5299 kfree(adap->vfinfo); in cxgb4_iov_configure()
5300 adap->vfinfo = NULL; in cxgb4_iov_configure()
5340 FW_PFVF_CMD_PFN_V(adap->pf) | in cxgb4_iov_configure()
5343 err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd), in cxgb4_iov_configure()
5350 snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx, in cxgb4_iov_configure()
5351 adap->pf); in cxgb4_iov_configure()
5358 pi->adapter = adap; in cxgb4_iov_configure()
5363 adap->port[0] = netdev; in cxgb4_iov_configure()
5366 err = register_netdev(adap->port[0]); in cxgb4_iov_configure()
5369 free_netdev(adap->port[0]); in cxgb4_iov_configure()
5370 adap->port[0] = NULL; in cxgb4_iov_configure()
5374 adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev), in cxgb4_iov_configure()
5376 if (!adap->vfinfo) { in cxgb4_iov_configure()
5377 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
5378 free_netdev(adap->port[0]); in cxgb4_iov_configure()
5379 adap->port[0] = NULL; in cxgb4_iov_configure()
5382 cxgb4_mgmt_fill_vf_station_mac_addr(adap); in cxgb4_iov_configure()
5389 unregister_netdev(adap->port[0]); in cxgb4_iov_configure()
5390 free_netdev(adap->port[0]); in cxgb4_iov_configure()
5391 adap->port[0] = NULL; in cxgb4_iov_configure()
5392 kfree(adap->vfinfo); in cxgb4_iov_configure()
5393 adap->vfinfo = NULL; in cxgb4_iov_configure()
5398 adap->num_vfs = num_vfs; in cxgb4_iov_configure()