Lines Matching +full:vcu +full:- +full:settings

1 /* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
4 * Copyright(c) 2015-2020 Intel Corporation.
13 #include <linux/dma-mapping.h>
27 #include <linux/i2c-algo-bit.h>
74 /* Offline Disabled Reason is 4-bits */
98 * per driver stats, either not device nor port-specific, or
108 __u64 sps_txerrs; /* tx-related packet errors */
109 __u64 sps_rcverrs; /* non-crc rcv packet errors */
124 * First-cut criterion for "device is active" is
126 * 5-second interval. SMA packets are 64 dwords,
233 /* Timer for re-enabling ASPM if interrupt activity quiets down */
235 /* per-context configuration flags */
284 /* per-context event flags for fileops/intr communication */
298 /* non-zero if ctxt is being shared. */
303 * non-zero if ctxt can be shared, and defines the maximum number of
304 * sub-contexts for this device context.
314 * rcvhdrq_size - return total size in bytes for header queue
322 return PAGE_ALIGN(rcd->rcvhdrq_cnt * in rcvhdrq_size()
323 rcd->rcvhdrqentsize * sizeof(u32)); in rcvhdrq_size()
415 return (u8)(hdr->lrh[2] & OPA_16B_L4_MASK); in hfi1_16B_get_l4()
420 return (u8)((hdr->lrh[1] & OPA_16B_SC_MASK) >> OPA_16B_SC_SHIFT); in hfi1_16B_get_sc()
425 return (u32)((hdr->lrh[1] & OPA_16B_LID_MASK) | in hfi1_16B_get_dlid()
426 (((hdr->lrh[2] & OPA_16B_DLID_MASK) >> in hfi1_16B_get_dlid()
432 return (u32)((hdr->lrh[0] & OPA_16B_LID_MASK) | in hfi1_16B_get_slid()
433 (((hdr->lrh[2] & OPA_16B_SLID_MASK) >> in hfi1_16B_get_slid()
439 return (u8)((hdr->lrh[0] & OPA_16B_BECN_MASK) >> OPA_16B_BECN_SHIFT); in hfi1_16B_get_becn()
444 return (u8)((hdr->lrh[1] & OPA_16B_FECN_MASK) >> OPA_16B_FECN_SHIFT); in hfi1_16B_get_fecn()
449 return (u8)((hdr->lrh[1] & OPA_16B_L2_MASK) >> OPA_16B_L2_SHIFT); in hfi1_16B_get_l2()
454 return (u16)((hdr->lrh[2] & OPA_16B_PKEY_MASK) >> OPA_16B_PKEY_SHIFT); in hfi1_16B_get_pkey()
459 return (u8)((hdr->lrh[1] & OPA_16B_RC_MASK) >> OPA_16B_RC_SHIFT); in hfi1_16B_get_rc()
464 return (u8)((hdr->lrh[3] & OPA_16B_AGE_MASK) >> OPA_16B_AGE_SHIFT); in hfi1_16B_get_age()
469 return (u16)((hdr->lrh[0] & OPA_16B_LEN_MASK) >> OPA_16B_LEN_SHIFT); in hfi1_16B_get_len()
474 return (u16)(hdr->lrh[3] & OPA_16B_ENTROPY_MASK); in hfi1_16B_get_entropy()
485 return (u8)((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_PAD_SHIFT) & in hfi1_16B_bth_get_pad()
495 return be32_to_cpu(mgmt->dest_qpn) & OPA_16B_MGMT_QPN_MASK; in hfi1_16B_get_dest_qpn()
500 return be32_to_cpu(mgmt->src_qpn) & OPA_16B_MGMT_QPN_MASK; in hfi1_16B_get_src_qpn()
506 mgmt->dest_qpn = cpu_to_be32(dest_qp & OPA_16B_MGMT_QPN_MASK); in hfi1_16B_set_qpn()
507 mgmt->src_qpn = cpu_to_be32(src_qp & OPA_16B_MGMT_QPN_MASK); in hfi1_16B_set_qpn()
511 * hfi1_get_rc_ohdr - get extended header
512 * @opah - the opaheader
522 if (opah->hdr_type == HFI1_PKT_TYPE_9B) { in hfi1_get_rc_ohdr()
523 hdr = &opah->ibh; in hfi1_get_rc_ohdr()
525 ohdr = &hdr->u.oth; in hfi1_get_rc_ohdr()
527 ohdr = &hdr->u.l.oth; in hfi1_get_rc_ohdr()
531 hdr_16b = &opah->opah; in hfi1_get_rc_ohdr()
534 ohdr = &hdr_16b->u.oth; in hfi1_get_rc_ohdr()
536 ohdr = &hdr_16b->u.l.oth; in hfi1_get_rc_ohdr()
544 * Get/Set IB link-level config parameters for f_get/set_ib_cfg()
549 #define HFI1_IB_CFG_LWID_DG_ENB 1 /* allowed Link-width downgrade */
550 #define HFI1_IB_CFG_LWID_ENB 2 /* allowed Link-width */
551 #define HFI1_IB_CFG_LWID 3 /* currently active Link-width */
554 #define HFI1_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */
555 #define HFI1_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */
575 * as bits for easy multi-state checking. The actual state can only be
620 #define FM_TBL_SC2VLNT 4 /* Get/set SC->VLnt */
662 #define CNTR_INVALID_VL -1 /* Specifies invalid VL */
675 if (*cntr < (u64)-1LL) in incr_cntr64()
696 /* per-SL CCA information */
699 struct hfi1_pportdata *ppd; /* read-only */
700 int sl; /* read-only */
701 u16 ccti; /* read/write - current value of CCTI */
706 * SMA-facing value. Should be set from .latest when
707 * HLS_UP_* -> HLS_DN_* transition actually occurs.
729 * port-numbers are one-based. The first or only port is port1.
761 * this address is mapped read-only into user processes so they can
816 u32 port; /* IB port number and index into dd->pports - 1 */
831 /* placeholders for IB MAD packet settings */
851 * cca_timer_lock protects access to the per-SL cca_timer
865 * cc_state_lock protects (write) access to the per-port
1008 /* device data struct now contains only "general per-device" info.
1084 /* mem-mapped pointer to base of PIO buffers */
1087 * write-combining mem-mapped pointer to base of RcvArray
1092 * credit return base - a per-NUMA range of DMA address that
1093 * the chip will use to update the per-context free counter
1141 * mapped read-only into user processes so they can get unit and
1190 /* vCU of this device */
1191 u8 vcu; member
1199 * credits are to be kept at 0 and set when handling the link-up
1231 /* MSI-X information */
1250 * per-port counters
1304 /* hfi1_pportdata, points to array of (physical) port-specific
1305 * data structs, indexed by pidx (0..n-1)
1394 /* for cpu affinity; -1 if none */
1413 return (uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) * in uctxt_offset()
1462 * hfi1_rcd_head - add accessor for rcd head
1467 return rcd->head; in hfi1_rcd_head()
1471 * hfi1_set_rcd_head - add accessor for rcd head
1477 rcd->head = head; in hfi1_set_rcd_head()
1483 return (__le32 *)rcd->rcvhdrq + rcd->head + rcd->rhf_offset; in get_rhf_addr()
1489 return !!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL); in get_dma_rtail_setting()
1493 * hfi1_seq_incr_wrap - wrapping increment for sequence
1506 * hfi1_seq_cnt - return seq_cnt member
1513 return rcd->seq_cnt; in hfi1_seq_cnt()
1517 * hfi1_set_seq_cnt - return seq_cnt member
1524 rcd->seq_cnt = cnt; in hfi1_set_seq_cnt()
1528 * last_rcv_seq - is last
1536 return seq != rcd->seq_cnt; in last_rcv_seq()
1540 * rcd_seq_incr - increment context sequence number
1548 rcd->seq_cnt = hfi1_seq_incr_wrap(rcd->seq_cnt); in hfi1_seq_incr()
1553 * get_hdrqentsize - return hdrq entry size
1558 return rcd->rcvhdrqentsize; in get_hdrqentsize()
1562 * get_hdrq_cnt - return hdrq count
1567 return rcd->rcvhdrq_cnt; in get_hdrq_cnt()
1571 * hfi1_is_slowpath - check if this context is slow path
1576 return rcd->do_interrupt == rcd->slow_handler; in hfi1_is_slowpath()
1580 * hfi1_is_fastpath - check if this context is fast path
1585 if (rcd->ctxt == HFI1_CTRL_CTXT) in hfi1_is_fastpath()
1588 return rcd->do_interrupt == rcd->fast_handler; in hfi1_is_fastpath()
1592 * hfi1_set_fast - change to the fast handler
1600 rcd->do_interrupt = rcd->fast_handler; in hfi1_set_fast()
1614 #define HFI1_JKEY_MASK (BIT(16) - 1)
1619 * 0 - 31 - users with administrator privileges
1620 * 32 - 63 - kernel protocols using KDETH packets
1621 * 64 - 65535 - all other users using KDETH packets
1628 jkey &= HFI1_ADMIN_JKEY_RANGE - 1; in generate_jkey()
1630 jkey |= BIT(HFI1_JKEY_WIDTH - 1); in generate_jkey()
1642 u16 link_speed = ppd->link_speed_active; in active_egress_rate()
1643 u16 link_width = ppd->link_width_active; in active_egress_rate()
1685 * --------------------------------------------------- in egress_cycles()
1709 #define PKEY_CHECK_INVALID -1
1723 * sc_to_vlt() - reverse lookup sc to vl
1724 * @dd - devdata
1725 * @sc5 - 5 bit sc
1736 seq = read_seqbegin(&dd->sc2vl_lock); in sc_to_vlt()
1737 rval = *(((u8 *)dd->sc2vl) + sc5); in sc_to_vlt()
1738 } while (read_seqretry(&dd->sc2vl_lock, seq)); in sc_to_vlt()
1747 * ingress_pkey_matches_entry - return 1 if the pkey matches ent (ent
1771 * ingress_pkey_table_search - search the entire pkey table for
1780 if (ingress_pkey_matches_entry(pkey, ppd->pkeys[i])) in ingress_pkey_table_search()
1787 * ingress_pkey_table_fail - record a failure of ingress pkey validation,
1794 struct hfi1_devdata *dd = ppd->dd; in ingress_pkey_table_fail()
1796 incr_cntr64(&ppd->port_rcv_constraint_errors); in ingress_pkey_table_fail()
1797 if (!(dd->err_info_rcv_constraint.status & OPA_EI_STATUS_SMASK)) { in ingress_pkey_table_fail()
1798 dd->err_info_rcv_constraint.status |= OPA_EI_STATUS_SMASK; in ingress_pkey_table_fail()
1799 dd->err_info_rcv_constraint.slid = slid; in ingress_pkey_table_fail()
1800 dd->err_info_rcv_constraint.pkey = pkey; in ingress_pkey_table_fail()
1805 * ingress_pkey_check - Return 0 if the ingress pkey is valid, return 1
1815 if (!(force) && !(ppd->part_enforce & HFI1_PART_ENFORCE_IN)) in ingress_pkey_check()
1827 if (ingress_pkey_matches_entry(pkey, ppd->pkeys[idx])) in ingress_pkey_check()
1830 /* no match - try the whole table */ in ingress_pkey_check()
1840 * rcv_pkey_check - Return 0 if the ingress pkey is valid, return 1
1848 if (!(ppd->part_enforce & HFI1_PART_ENFORCE_IN)) in rcv_pkey_check()
1863 /* MTU enumeration, 256-4k match IB */
1900 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
1906 return ppd->dd; in dd_from_ppd()
1932 u32 pidx = port - 1; /* IB number port from 1, hdw from 0 */ in to_iport()
1934 WARN_ON(pidx >= dd->num_pports); in to_iport()
1935 return &dd->pport[pidx].ibport_data; in to_iport()
1940 return &rcd->ppd->ibport_data; in rcd_to_iport()
1944 * hfi1_may_ecn - Check whether FECN or BECN processing should be done
1958 if (pkt->etype == RHF_RCV_TYPE_BYPASS) { in hfi1_may_ecn()
1959 fecn = hfi1_16B_get_fecn(pkt->hdr); in hfi1_may_ecn()
1960 becn = hfi1_16B_get_becn(pkt->hdr); in hfi1_may_ecn()
1962 fecn = ib_bth_get_fecn(pkt->ohdr); in hfi1_may_ecn()
1963 becn = ib_bth_get_becn(pkt->ohdr); in hfi1_may_ecn()
1988 if (index >= ARRAY_SIZE(ppd->pkeys)) in hfi1_get_pkey()
1991 ret = ppd->pkeys[index]; in hfi1_get_pkey()
2004 return cpu_to_be64(ppd->guids[index]); in get_sguid()
2012 return rcu_dereference(ppd->cc_state); in get_cc_state()
2021 return rcu_dereference_protected(ppd->cc_state, in get_cc_state_protected()
2022 lockdep_is_held(&ppd->cc_state_lock)); in get_cc_state_protected()
2026 * values for dd->flags (_device_ related flags)
2037 #define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1)
2074 * ---
2077 * For a 64-byte cache line, KDETH would need to be 36 bytes or 9 DWORDS
2090 * ---
2106 * hfi1_rcvhdrtail_kvaddr - return tail kvaddr
2107 * @rcd - the receive context
2111 return (__le64 *)rcd->rcvhdrtail_kvaddr; in hfi1_rcvhdrtail_kvaddr()
2133 if (likely(!rcd->rcvhdrtail_kvaddr)) { in hfi1_packet_present()
2262 /* turn on send-side job key checks if !A0 */ in hfi1_pkt_default_send_ctxt_mask()
2297 /* turn on send-side job key checks if !A0 */ in hfi1_pkt_base_sdma_integrity()
2306 dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \
2307 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2310 dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
2311 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2314 dev_err_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
2315 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
2319 dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
2320 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2323 dev_warn_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
2324 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
2328 dev_info(&(dd)->pcidev->dev, "%s: " fmt, \
2329 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2332 dev_info_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
2333 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
2337 dev_dbg(&(dd)->pcidev->dev, "%s: " fmt, \
2338 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2341 dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
2342 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (port), ##__VA_ARGS__)
2368 dd->z_int_counter = get_all_cpu_total(dd->int_counter); in hfi1_reset_cpu_counters()
2369 dd->z_rcv_limit = get_all_cpu_total(dd->rcv_limit); in hfi1_reset_cpu_counters()
2370 dd->z_send_schedule = get_all_cpu_total(dd->send_schedule); in hfi1_reset_cpu_counters()
2373 for (i = 0; i < dd->num_pports; i++, ppd++) { in hfi1_reset_cpu_counters()
2374 ppd->ibport_data.rvp.z_rc_acks = in hfi1_reset_cpu_counters()
2375 get_all_cpu_total(ppd->ibport_data.rvp.rc_acks); in hfi1_reset_cpu_counters()
2376 ppd->ibport_data.rvp.z_rc_qacks = in hfi1_reset_cpu_counters()
2377 get_all_cpu_total(ppd->ibport_data.rvp.rc_qacks); in hfi1_reset_cpu_counters()
2399 return i2c_target(dd->hfi1_id); in qsfp_resource()
2405 return dd->pcidev->device == PCI_DEVICE_ID_INTEL1; in is_integrated()
2409 * hfi1_need_drop - detect need for drop
2410 * @dd: - the device
2418 if (unlikely(dd->do_drop && in hfi1_need_drop()
2419 atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) == in hfi1_need_drop()
2421 dd->do_drop = false; in hfi1_need_drop()
2429 #define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev))
2430 #define DD_DEV_ASSIGN(dd) __assign_str(dev, dev_name(&(dd)->pcidev->dev))
2446 (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))) && in hfi1_update_ah_attr()
2453 rdma_ah_set_subnet_prefix(attr, ibp->rvp.gid_prefix); in hfi1_update_ah_attr()
2458 * hfi1_check_mcast- Check if the given lid is
2483 return (lid - opa_get_mcast_base(OPA_MCAST_NR) + in __opa_get_lid()
2488 return (lid - opa_get_mcast_base(OPA_MCAST_NR) + in __opa_get_lid()
2493 return (lid - in __opa_get_lid()
2524 if (ib_is_opa_gid(&grh->dgid)) in hfi1_make_opa_lid()
2525 dlid = opa_get_lid_from_gid(&grh->dgid); in hfi1_make_opa_lid()
2529 dlid = dlid - be16_to_cpu(IB_MULTICAST_LID_BASE) + in hfi1_make_opa_lid()
2560 return (ib_is_opa_gid(&rdma_ah_read_grh(attr)->dgid)) ? in hfi1_get_hdr_type()
2577 struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data; in hfi1_make_ext_grh()
2583 grh->hop_limit = 1; in hfi1_make_ext_grh()
2584 grh->sgid.global.subnet_prefix = ibp->rvp.gid_prefix; in hfi1_make_ext_grh()
2586 grh->sgid.global.interface_id = in hfi1_make_ext_grh()
2589 grh->sgid.global.interface_id = OPA_MAKE_ID(slid); in hfi1_make_ext_grh()
2598 grh->dgid.global.subnet_prefix = ibp->rvp.gid_prefix; in hfi1_make_ext_grh()
2599 grh->dgid.global.interface_id = in hfi1_make_ext_grh()
2600 cpu_to_be64(ppd->guids[HFI1_PORT_GUID_INDEX]); in hfi1_make_ext_grh()
2605 return -(hdr_size + payload + (SIZE_OF_CRC << 2) + in hfi1_get_16b_padding()
2613 hdr->lrh[0] = cpu_to_be16(lrh0); in hfi1_make_ib_hdr()
2614 hdr->lrh[1] = cpu_to_be16(dlid); in hfi1_make_ib_hdr()
2615 hdr->lrh[2] = cpu_to_be16(len); in hfi1_make_ib_hdr()
2616 hdr->lrh[3] = cpu_to_be16(slid); in hfi1_make_ib_hdr()
2643 hdr->lrh[0] = lrh0; in hfi1_make_16b_hdr()
2644 hdr->lrh[1] = lrh1; in hfi1_make_16b_hdr()
2645 hdr->lrh[2] = lrh2; in hfi1_make_16b_hdr()
2646 hdr->lrh[3] = lrh3; in hfi1_make_16b_hdr()