Lines Matching refs:adap
56 static int get_msix_idx_from_bmap(struct adapter *adap) in get_msix_idx_from_bmap() argument
58 struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds; in get_msix_idx_from_bmap()
75 static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx) in free_msix_idx_in_bmap() argument
77 struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds; in free_msix_idx_in_bmap()
88 struct adapter *adap = q->adap; in uldrx_flush_handler() local
90 if (adap->uld[q->uld].lro_flush) in uldrx_flush_handler()
91 adap->uld[q->uld].lro_flush(&q->lro_mgr); in uldrx_flush_handler()
106 struct adapter *adap = q->adap; in uldrx_handler() local
116 ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle, in uldrx_handler()
120 ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle, in uldrx_handler()
137 static int alloc_uld_rxqs(struct adapter *adap, in alloc_uld_rxqs() argument
140 struct sge *s = &adap->sge; in alloc_uld_rxqs()
148 per_chan = rxq_info->nrxq / adap->params.nports; in alloc_uld_rxqs()
150 if (adap->flags & USING_MSIX) in alloc_uld_rxqs()
158 per_chan = rxq_info->nciq / adap->params.nports; in alloc_uld_rxqs()
163 bmap_idx = get_msix_idx_from_bmap(adap); in alloc_uld_rxqs()
164 msi_idx = adap->msix_info_ulds[bmap_idx].idx; in alloc_uld_rxqs()
166 err = t4_sge_alloc_rxq(adap, &q->rspq, false, in alloc_uld_rxqs()
167 adap->port[que_idx++ / per_chan], in alloc_uld_rxqs()
186 free_rspq_fl(adap, &q->rspq, in alloc_uld_rxqs()
193 setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) in setup_sge_queues_uld() argument
195 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in setup_sge_queues_uld()
198 if (adap->flags & USING_MSIX) { in setup_sge_queues_uld()
206 ret = !(!alloc_uld_rxqs(adap, rxq_info, lro)); in setup_sge_queues_uld()
209 if (adap->flags & FULL_INIT_DONE && in setup_sge_queues_uld()
211 struct sge *s = &adap->sge; in setup_sge_queues_uld()
216 for_each_port(adap, i) { in setup_sge_queues_uld()
221 ret = t4_set_params(adap, adap->mbox, adap->pf, in setup_sge_queues_uld()
228 static void t4_free_uld_rxqs(struct adapter *adap, int n, in t4_free_uld_rxqs() argument
233 free_rspq_fl(adap, &q->rspq, in t4_free_uld_rxqs()
238 static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type) in free_sge_queues_uld() argument
240 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in free_sge_queues_uld()
242 if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) { in free_sge_queues_uld()
243 struct sge *s = &adap->sge; in free_sge_queues_uld()
248 for_each_port(adap, i) { in free_sge_queues_uld()
252 t4_set_params(adap, adap->mbox, adap->pf, in free_sge_queues_uld()
258 t4_free_uld_rxqs(adap, rxq_info->nciq, in free_sge_queues_uld()
260 t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq); in free_sge_queues_uld()
261 if (adap->flags & USING_MSIX) in free_sge_queues_uld()
265 static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, in cfg_queues_uld() argument
268 struct sge *s = &adap->sge; in cfg_queues_uld()
276 if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) { in cfg_queues_uld()
278 rxq_info->nrxq = roundup(i, adap->params.nports); in cfg_queues_uld()
282 rxq_info->nrxq = roundup(i, adap->params.nports); in cfg_queues_uld()
287 if (adap->flags & USING_MSIX) in cfg_queues_uld()
293 rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) * in cfg_queues_uld()
294 adap->params.nports); in cfg_queues_uld()
296 adap->params.nports); in cfg_queues_uld()
317 init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64); in cfg_queues_uld()
322 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids; in cfg_queues_uld()
324 dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n"); in cfg_queues_uld()
331 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64); in cfg_queues_uld()
336 adap->sge.uld_rxq_info[uld_type] = rxq_info; in cfg_queues_uld()
341 static void free_queues_uld(struct adapter *adap, unsigned int uld_type) in free_queues_uld() argument
343 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in free_queues_uld()
345 adap->sge.uld_rxq_info[uld_type] = NULL; in free_queues_uld()
352 request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) in request_msix_queue_irqs_uld() argument
354 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in request_msix_queue_irqs_uld()
360 err = request_irq(adap->msix_info_ulds[bmap_idx].vec, in request_msix_queue_irqs_uld()
362 adap->msix_info_ulds[bmap_idx].desc, in request_msix_queue_irqs_uld()
371 free_msix_idx_in_bmap(adap, bmap_idx); in request_msix_queue_irqs_uld()
372 free_irq(adap->msix_info_ulds[bmap_idx].vec, in request_msix_queue_irqs_uld()
379 free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) in free_msix_queue_irqs_uld() argument
381 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in free_msix_queue_irqs_uld()
387 free_msix_idx_in_bmap(adap, bmap_idx); in free_msix_queue_irqs_uld()
388 free_irq(adap->msix_info_ulds[bmap_idx].vec, in free_msix_queue_irqs_uld()
393 static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type) in name_msix_vecs_uld() argument
395 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in name_msix_vecs_uld()
396 int n = sizeof(adap->msix_info_ulds[0].desc); in name_msix_vecs_uld()
402 snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d", in name_msix_vecs_uld()
403 adap->port[0]->name, rxq_info->name, idx); in name_msix_vecs_uld()
407 static void enable_rx(struct adapter *adap, struct sge_rspq *q) in enable_rx() argument
416 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), in enable_rx()
421 static void quiesce_rx(struct adapter *adap, struct sge_rspq *q) in quiesce_rx() argument
427 static void enable_rx_uld(struct adapter *adap, unsigned int uld_type) in enable_rx_uld() argument
429 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in enable_rx_uld()
433 enable_rx(adap, &rxq_info->uldrxq[idx].rspq); in enable_rx_uld()
436 static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type) in quiesce_rx_uld() argument
438 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in quiesce_rx_uld()
442 quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq); in quiesce_rx_uld()
446 free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info) in free_sge_txq_uld() argument
456 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0, in free_sge_txq_uld()
458 free_tx_desc(adap, &txq->q, txq->q.in_use, false); in free_sge_txq_uld()
461 free_txq(adap, &txq->q); in free_sge_txq_uld()
467 alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info, in alloc_sge_txq_uld() argument
470 struct sge *s = &adap->sge; in alloc_sge_txq_uld()
474 j = nq / adap->params.nports; in alloc_sge_txq_uld()
479 err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j], in alloc_sge_txq_uld()
486 free_sge_txq_uld(adap, txq_info); in alloc_sge_txq_uld()
491 release_sge_txq_uld(struct adapter *adap, unsigned int uld_type) in release_sge_txq_uld() argument
496 txq_info = adap->sge.uld_txq_info[tx_uld_type]; in release_sge_txq_uld()
499 free_sge_txq_uld(adap, txq_info); in release_sge_txq_uld()
502 adap->sge.uld_txq_info[tx_uld_type] = NULL; in release_sge_txq_uld()
507 setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type, in setup_sge_txq_uld() argument
514 txq_info = adap->sge.uld_txq_info[tx_uld_type]; in setup_sge_txq_uld()
525 txq_info->ntxq = roundup(i, adap->params.nports); in setup_sge_txq_uld()
534 if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) { in setup_sge_txq_uld()
541 adap->sge.uld_txq_info[tx_uld_type] = txq_info; in setup_sge_txq_uld()
545 static void uld_queue_init(struct adapter *adap, unsigned int uld_type, in uld_queue_init() argument
548 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in uld_queue_init()
556 int t4_uld_mem_alloc(struct adapter *adap) in t4_uld_mem_alloc() argument
558 struct sge *s = &adap->sge; in t4_uld_mem_alloc()
560 adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL); in t4_uld_mem_alloc()
561 if (!adap->uld) in t4_uld_mem_alloc()
580 kfree(adap->uld); in t4_uld_mem_alloc()
584 void t4_uld_mem_free(struct adapter *adap) in t4_uld_mem_free() argument
586 struct sge *s = &adap->sge; in t4_uld_mem_free()
590 kfree(adap->uld); in t4_uld_mem_free()
594 static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type) in cxgb4_shutdown_uld_adapter() argument
596 if (adap->uld[type].handle) { in cxgb4_shutdown_uld_adapter()
597 adap->uld[type].handle = NULL; in cxgb4_shutdown_uld_adapter()
598 adap->uld[type].add = NULL; in cxgb4_shutdown_uld_adapter()
599 release_sge_txq_uld(adap, type); in cxgb4_shutdown_uld_adapter()
601 if (adap->flags & FULL_INIT_DONE) in cxgb4_shutdown_uld_adapter()
602 quiesce_rx_uld(adap, type); in cxgb4_shutdown_uld_adapter()
604 if (adap->flags & USING_MSIX) in cxgb4_shutdown_uld_adapter()
605 free_msix_queue_irqs_uld(adap, type); in cxgb4_shutdown_uld_adapter()
607 free_sge_queues_uld(adap, type); in cxgb4_shutdown_uld_adapter()
608 free_queues_uld(adap, type); in cxgb4_shutdown_uld_adapter()
612 void t4_uld_clean_up(struct adapter *adap) in t4_uld_clean_up() argument
618 if (!adap->uld[i].handle) in t4_uld_clean_up()
621 cxgb4_shutdown_uld_adapter(adap, i); in t4_uld_clean_up()
626 static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) in uld_init() argument
630 lld->pdev = adap->pdev; in uld_init()
631 lld->pf = adap->pf; in uld_init()
632 lld->l2t = adap->l2t; in uld_init()
633 lld->tids = &adap->tids; in uld_init()
634 lld->ports = adap->port; in uld_init()
635 lld->vr = &adap->vres; in uld_init()
636 lld->mtus = adap->params.mtus; in uld_init()
637 lld->ntxq = adap->sge.ofldqsets; in uld_init()
638 lld->nchan = adap->params.nports; in uld_init()
639 lld->nports = adap->params.nports; in uld_init()
640 lld->wr_cred = adap->params.ofldq_wr_cred; in uld_init()
641 lld->crypto = adap->params.crypto; in uld_init()
642 lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A)); in uld_init()
643 lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A); in uld_init()
644 lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A); in uld_init()
645 lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A); in uld_init()
646 lld->iscsi_ppm = &adap->iscsi_ppm; in uld_init()
647 lld->adapter_type = adap->params.chip; in uld_init()
648 lld->cclk_ps = 1000000000 / adap->params.vpd.cclk; in uld_init()
649 lld->udb_density = 1 << adap->params.sge.eq_qpp; in uld_init()
650 lld->ucq_density = 1 << adap->params.sge.iq_qpp; in uld_init()
651 lld->filt_mode = adap->params.tp.vlan_pri_map; in uld_init()
655 lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A); in uld_init()
656 lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A); in uld_init()
657 lld->fw_vers = adap->params.fw_vers; in uld_init()
659 lld->sge_ingpadboundary = adap->sge.fl_align; in uld_init()
660 lld->sge_egrstatuspagesize = adap->sge.stat_len; in uld_init()
661 lld->sge_pktshift = adap->sge.pktshift; in uld_init()
662 lld->ulp_crypto = adap->params.crypto; in uld_init()
663 lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; in uld_init()
664 lld->max_ordird_qp = adap->params.max_ordird_qp; in uld_init()
665 lld->max_ird_adapter = adap->params.max_ird_adapter; in uld_init()
666 lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; in uld_init()
667 lld->nodeid = dev_to_node(adap->pdev_dev); in uld_init()
668 lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support; in uld_init()
669 lld->write_w_imm_support = adap->params.write_w_imm_support; in uld_init()
670 lld->write_cmpl_support = adap->params.write_cmpl_support; in uld_init()
673 static void uld_attach(struct adapter *adap, unsigned int uld) in uld_attach() argument
678 uld_init(adap, &lli); in uld_attach()
679 uld_queue_init(adap, uld, &lli); in uld_attach()
681 handle = adap->uld[uld].add(&lli); in uld_attach()
683 dev_warn(adap->pdev_dev, in uld_attach()
685 adap->uld[uld].name, PTR_ERR(handle)); in uld_attach()
689 adap->uld[uld].handle = handle; in uld_attach()
692 if (adap->flags & FULL_INIT_DONE) in uld_attach()
693 adap->uld[uld].state_change(handle, CXGB4_STATE_UP); in uld_attach()
710 struct adapter *adap; in cxgb4_register_uld() local
716 list_for_each_entry(adap, &adapter_list, list_node) { in cxgb4_register_uld()
717 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || in cxgb4_register_uld()
718 (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) in cxgb4_register_uld()
720 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) in cxgb4_register_uld()
722 ret = cfg_queues_uld(adap, type, p); in cxgb4_register_uld()
725 ret = setup_sge_queues_uld(adap, type, p->lro); in cxgb4_register_uld()
728 if (adap->flags & USING_MSIX) { in cxgb4_register_uld()
729 name_msix_vecs_uld(adap, type); in cxgb4_register_uld()
730 ret = request_msix_queue_irqs_uld(adap, type); in cxgb4_register_uld()
734 if (adap->flags & FULL_INIT_DONE) in cxgb4_register_uld()
735 enable_rx_uld(adap, type); in cxgb4_register_uld()
736 if (adap->uld[type].add) { in cxgb4_register_uld()
740 ret = setup_sge_txq_uld(adap, type, p); in cxgb4_register_uld()
743 adap->uld[type] = *p; in cxgb4_register_uld()
744 uld_attach(adap, type); in cxgb4_register_uld()
751 if (adap->flags & FULL_INIT_DONE) in cxgb4_register_uld()
752 quiesce_rx_uld(adap, type); in cxgb4_register_uld()
753 if (adap->flags & USING_MSIX) in cxgb4_register_uld()
754 free_msix_queue_irqs_uld(adap, type); in cxgb4_register_uld()
756 free_sge_queues_uld(adap, type); in cxgb4_register_uld()
758 free_queues_uld(adap, type); in cxgb4_register_uld()
761 list_for_each_entry(adap, &adapter_list, list_node) { in cxgb4_register_uld()
762 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || in cxgb4_register_uld()
763 (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) in cxgb4_register_uld()
765 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) in cxgb4_register_uld()
769 adap->uld[type].handle = NULL; in cxgb4_register_uld()
770 adap->uld[type].add = NULL; in cxgb4_register_uld()
771 release_sge_txq_uld(adap, type); in cxgb4_register_uld()
772 if (adap->flags & FULL_INIT_DONE) in cxgb4_register_uld()
773 quiesce_rx_uld(adap, type); in cxgb4_register_uld()
774 if (adap->flags & USING_MSIX) in cxgb4_register_uld()
775 free_msix_queue_irqs_uld(adap, type); in cxgb4_register_uld()
776 free_sge_queues_uld(adap, type); in cxgb4_register_uld()
777 free_queues_uld(adap, type); in cxgb4_register_uld()
793 struct adapter *adap; in cxgb4_unregister_uld() local
799 list_for_each_entry(adap, &adapter_list, list_node) { in cxgb4_unregister_uld()
800 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || in cxgb4_unregister_uld()
801 (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) in cxgb4_unregister_uld()
803 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) in cxgb4_unregister_uld()
806 cxgb4_shutdown_uld_adapter(adap, type); in cxgb4_unregister_uld()