Lines Matching refs:hw
47 struct csio_hw *hw = (struct csio_hw *) dev_id; in csio_nondata_isr() local
51 if (unlikely(!hw)) in csio_nondata_isr()
54 if (unlikely(pci_channel_offline(hw->pdev))) { in csio_nondata_isr()
55 CSIO_INC_STATS(hw, n_pcich_offline); in csio_nondata_isr()
59 spin_lock_irqsave(&hw->lock, flags); in csio_nondata_isr()
60 csio_hw_slow_intr_handler(hw); in csio_nondata_isr()
61 rv = csio_mb_isr_handler(hw); in csio_nondata_isr()
63 if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) { in csio_nondata_isr()
64 hw->flags |= CSIO_HWF_FWEVT_PENDING; in csio_nondata_isr()
65 spin_unlock_irqrestore(&hw->lock, flags); in csio_nondata_isr()
66 schedule_work(&hw->evtq_work); in csio_nondata_isr()
69 spin_unlock_irqrestore(&hw->lock, flags); in csio_nondata_isr()
81 csio_fwevt_handler(struct csio_hw *hw) in csio_fwevt_handler() argument
86 rv = csio_fwevtq_handler(hw); in csio_fwevt_handler()
88 spin_lock_irqsave(&hw->lock, flags); in csio_fwevt_handler()
89 if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) { in csio_fwevt_handler()
90 hw->flags |= CSIO_HWF_FWEVT_PENDING; in csio_fwevt_handler()
91 spin_unlock_irqrestore(&hw->lock, flags); in csio_fwevt_handler()
92 schedule_work(&hw->evtq_work); in csio_fwevt_handler()
95 spin_unlock_irqrestore(&hw->lock, flags); in csio_fwevt_handler()
110 struct csio_hw *hw = (struct csio_hw *) dev_id; in csio_fwevt_isr() local
112 if (unlikely(!hw)) in csio_fwevt_isr()
115 if (unlikely(pci_channel_offline(hw->pdev))) { in csio_fwevt_isr()
116 CSIO_INC_STATS(hw, n_pcich_offline); in csio_fwevt_isr()
120 csio_fwevt_handler(hw); in csio_fwevt_isr()
131 csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len, in csio_fwevt_intx_handler() argument
134 csio_fwevt_handler(hw); in csio_fwevt_intx_handler()
146 csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len, in csio_process_scsi_cmpl() argument
155 ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr); in csio_process_scsi_cmpl()
162 csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n", in csio_process_scsi_cmpl()
166 spin_lock_irqsave(&hw->lock, flags); in csio_process_scsi_cmpl()
188 spin_unlock_irqrestore(&hw->lock, flags); in csio_process_scsi_cmpl()
191 csio_put_scsi_ioreq_lock(hw, in csio_process_scsi_cmpl()
192 csio_hw_to_scsim(hw), ioreq); in csio_process_scsi_cmpl()
194 spin_lock_irqsave(&hw->lock, flags); in csio_process_scsi_cmpl()
196 spin_unlock_irqrestore(&hw->lock, flags); in csio_process_scsi_cmpl()
214 struct csio_hw *hw = (struct csio_hw *)iq->owner; in csio_scsi_isr_handler() local
221 scm = csio_hw_to_scsim(hw); in csio_scsi_isr_handler()
223 if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl, in csio_scsi_isr_handler()
231 ioreq->io_cbfn(hw, ioreq); in csio_scsi_isr_handler()
234 csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list, in csio_scsi_isr_handler()
240 csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q, in csio_scsi_isr_handler()
259 struct csio_hw *hw; in csio_scsi_isr() local
264 hw = (struct csio_hw *)iq->owner; in csio_scsi_isr()
266 if (unlikely(pci_channel_offline(hw->pdev))) { in csio_scsi_isr()
267 CSIO_INC_STATS(hw, n_pcich_offline); in csio_scsi_isr()
285 csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len, in csio_scsi_intx_handler() argument
304 struct csio_hw *hw = (struct csio_hw *) dev_id; in csio_fcoe_isr() local
310 if (unlikely(!hw)) in csio_fcoe_isr()
313 if (unlikely(pci_channel_offline(hw->pdev))) { in csio_fcoe_isr()
314 CSIO_INC_STATS(hw, n_pcich_offline); in csio_fcoe_isr()
319 if (hw->intr_mode == CSIO_IM_INTX) in csio_fcoe_isr()
320 csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A)); in csio_fcoe_isr()
326 if (csio_hw_slow_intr_handler(hw)) in csio_fcoe_isr()
330 intx_q = csio_get_q(hw, hw->intr_iq_idx); in csio_fcoe_isr()
335 if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0)) in csio_fcoe_isr()
338 spin_lock_irqsave(&hw->lock, flags); in csio_fcoe_isr()
339 rv = csio_mb_isr_handler(hw); in csio_fcoe_isr()
340 if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) { in csio_fcoe_isr()
341 hw->flags |= CSIO_HWF_FWEVT_PENDING; in csio_fcoe_isr()
342 spin_unlock_irqrestore(&hw->lock, flags); in csio_fcoe_isr()
343 schedule_work(&hw->evtq_work); in csio_fcoe_isr()
346 spin_unlock_irqrestore(&hw->lock, flags); in csio_fcoe_isr()
352 csio_add_msix_desc(struct csio_hw *hw) in csio_add_msix_desc() argument
355 struct csio_msix_entries *entryp = &hw->msix_entries[0]; in csio_add_msix_desc()
358 int cnt = hw->num_sqsets + k; in csio_add_msix_desc()
363 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw)); in csio_add_msix_desc()
368 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw)); in csio_add_msix_desc()
375 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), in csio_add_msix_desc()
376 CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS); in csio_add_msix_desc()
381 csio_request_irqs(struct csio_hw *hw) in csio_request_irqs() argument
384 struct csio_msix_entries *entryp = &hw->msix_entries[0]; in csio_request_irqs()
386 struct pci_dev *pdev = hw->pdev; in csio_request_irqs()
388 if (hw->intr_mode != CSIO_IM_MSIX) { in csio_request_irqs()
390 hw->intr_mode == CSIO_IM_MSI ? 0 : IRQF_SHARED, in csio_request_irqs()
391 KBUILD_MODNAME, hw); in csio_request_irqs()
393 csio_err(hw, "Failed to allocate interrupt line.\n"); in csio_request_irqs()
401 csio_add_msix_desc(hw); in csio_request_irqs()
404 entryp[k].desc, hw); in csio_request_irqs()
406 csio_err(hw, "IRQ request failed for vec %d err:%d\n", in csio_request_irqs()
411 entryp[k++].dev_id = hw; in csio_request_irqs()
414 entryp[k].desc, hw); in csio_request_irqs()
416 csio_err(hw, "IRQ request failed for vec %d err:%d\n", in csio_request_irqs()
421 entryp[k++].dev_id = (void *)hw; in csio_request_irqs()
424 for (i = 0; i < hw->num_pports; i++) { in csio_request_irqs()
425 info = &hw->scsi_cpu_info[i]; in csio_request_irqs()
427 struct csio_scsi_qset *sqset = &hw->sqset[i][j]; in csio_request_irqs()
428 struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx]; in csio_request_irqs()
433 csio_err(hw, in csio_request_irqs()
445 hw->flags |= CSIO_HWF_HOST_INTR_ENABLED; in csio_request_irqs()
450 free_irq(pci_irq_vector(pdev, i), hw->msix_entries[i].dev_id); in csio_request_irqs()
451 pci_free_irq_vectors(hw->pdev); in csio_request_irqs()
457 csio_reduce_sqsets(struct csio_hw *hw, int cnt) in csio_reduce_sqsets() argument
462 while (cnt < hw->num_sqsets) { in csio_reduce_sqsets()
463 for (i = 0; i < hw->num_pports; i++) { in csio_reduce_sqsets()
464 info = &hw->scsi_cpu_info[i]; in csio_reduce_sqsets()
467 hw->num_sqsets--; in csio_reduce_sqsets()
468 if (hw->num_sqsets <= cnt) in csio_reduce_sqsets()
474 csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets); in csio_reduce_sqsets()
479 struct csio_hw *hw = affd->priv; in csio_calc_sets() local
485 if (nvecs < hw->num_pports) { in csio_calc_sets()
491 affd->nr_sets = hw->num_pports; in csio_calc_sets()
492 for (i = 0; i < hw->num_pports; i++) in csio_calc_sets()
493 affd->set_size[i] = nvecs / hw->num_pports; in csio_calc_sets()
497 csio_enable_msix(struct csio_hw *hw) in csio_enable_msix() argument
505 .priv = hw, in csio_enable_msix()
508 if (hw->num_pports > IRQ_AFFINITY_MAX_SETS) in csio_enable_msix()
511 min = hw->num_pports + extra; in csio_enable_msix()
512 cnt = hw->num_sqsets + extra; in csio_enable_msix()
515 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw)) in csio_enable_msix()
516 cnt = min_t(uint8_t, hw->cfg_niq, cnt); in csio_enable_msix()
518 csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt); in csio_enable_msix()
520 cnt = pci_alloc_irq_vectors_affinity(hw->pdev, min, cnt, in csio_enable_msix()
525 if (cnt < (hw->num_sqsets + extra)) { in csio_enable_msix()
526 csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra); in csio_enable_msix()
527 csio_reduce_sqsets(hw, cnt - extra); in csio_enable_msix()
532 csio_set_nondata_intr_idx(hw, k); in csio_enable_msix()
533 csio_set_mb_intr_idx(csio_hw_to_mbm(hw), k++); in csio_enable_msix()
534 csio_set_fwevt_intr_idx(hw, k++); in csio_enable_msix()
536 for (i = 0; i < hw->num_pports; i++) { in csio_enable_msix()
537 info = &hw->scsi_cpu_info[i]; in csio_enable_msix()
539 for (j = 0; j < hw->num_scsi_msix_cpus; j++) { in csio_enable_msix()
541 hw->sqset[i][j].intr_idx = n; in csio_enable_msix()
551 csio_intr_enable(struct csio_hw *hw) in csio_intr_enable() argument
553 hw->intr_mode = CSIO_IM_NONE; in csio_intr_enable()
554 hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED; in csio_intr_enable()
557 if ((csio_msi == 2) && !csio_enable_msix(hw)) in csio_intr_enable()
558 hw->intr_mode = CSIO_IM_MSIX; in csio_intr_enable()
561 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || in csio_intr_enable()
562 !csio_is_hw_master(hw)) { in csio_intr_enable()
565 if (hw->cfg_niq < (hw->num_sqsets + extra)) { in csio_intr_enable()
566 csio_dbg(hw, "Reducing sqsets to %d\n", in csio_intr_enable()
567 hw->cfg_niq - extra); in csio_intr_enable()
568 csio_reduce_sqsets(hw, hw->cfg_niq - extra); in csio_intr_enable()
572 if ((csio_msi == 1) && !pci_enable_msi(hw->pdev)) in csio_intr_enable()
573 hw->intr_mode = CSIO_IM_MSI; in csio_intr_enable()
575 hw->intr_mode = CSIO_IM_INTX; in csio_intr_enable()
578 csio_dbg(hw, "Using %s interrupt mode.\n", in csio_intr_enable()
579 (hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" : in csio_intr_enable()
580 ((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx")); in csio_intr_enable()
584 csio_intr_disable(struct csio_hw *hw, bool free) in csio_intr_disable() argument
586 csio_hw_intr_disable(hw); in csio_intr_disable()
591 switch (hw->intr_mode) { in csio_intr_disable()
593 for (i = 0; i < hw->num_sqsets + CSIO_EXTRA_VECS; i++) { in csio_intr_disable()
594 free_irq(pci_irq_vector(hw->pdev, i), in csio_intr_disable()
595 hw->msix_entries[i].dev_id); in csio_intr_disable()
600 free_irq(pci_irq_vector(hw->pdev, 0), hw); in csio_intr_disable()
607 pci_free_irq_vectors(hw->pdev); in csio_intr_disable()
608 hw->intr_mode = CSIO_IM_NONE; in csio_intr_disable()
609 hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED; in csio_intr_disable()