Lines Matching +full:parallel +full:- +full:memories

1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
3 * Copyright(c) 2015 - 2020 Intel Corporation.
32 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
78 #define SEC_SC_HALTED 0x4 /* per-context only */
79 #define SEC_SPC_FREEZE 0x8 /* per-HFI only */
87 * 0 - User Fecn Handling
88 * 1 - Vnic
89 * 2 - AIP
90 * 3 - Verbs
101 #define emulator_rev(dd) ((dd)->irev >> 8)
102 /* parallel and serial emulation versions are 3 and 4 respectively */
103 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
104 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
113 /* LRH.BTH: QW 0, OFFSET 48 - for match */
122 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
130 /* SC[n..0] QW 0, OFFSET 60 - for select */
156 /* L2_TYPE: QW 0, OFFSET 61 - for match */
164 /* L4_TYPE QW 1, OFFSET 0 - for match */
172 /* 16B VESWID - for select */
174 /* 16B ENTROPY - for select */
236 /* all CceStatus sub-block freeze bits */
241 /* all CceStatus sub-block TXE pause bits */
245 /* all CceStatus sub-block RXE pause bits */
337 /*41-63 reserved*/
454 /*30-31 reserved*/
467 /*36-63 reserved*/
514 /*04-63 reserved*/
546 /* 9-10 reserved */
708 /* 5-63 reserved*/
1070 * in the top-level CceIntStatus.
1080 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START)
1081 #define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START)
1082 #define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START)
1119 * SDMA error interrupt entry - refers to another register containing more
1150 /* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1284 * hfi1_addr_from_offset - return addr for readq/writeq
1295 if (offset >= dd->base2_start) in hfi1_addr_from_offset()
1296 return dd->kregbase2 + (offset - dd->base2_start); in hfi1_addr_from_offset()
1297 return dd->kregbase1 + offset; in hfi1_addr_from_offset()
1301 * read_csr - read CSR at the indicated offset
1310 if (dd->flags & HFI1_PRESENT) in read_csr()
1312 return -1; in read_csr()
1316 * write_csr - write CSR at the indicated offset
1323 if (dd->flags & HFI1_PRESENT) { in write_csr()
1327 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start)) in write_csr()
1334 * get_csr_addr - return te iomem address for offset
1345 if (dd->flags & HFI1_PRESENT) in get_csr_addr()
1374 u64 csr = entry->csr; in dev_access_u32_csr()
1376 if (entry->flags & CNTR_SDMA) { in dev_access_u32_csr()
1392 if (dd->per_sdma && idx < dd->num_sdma) in access_sde_err_cnt()
1393 return dd->per_sdma[idx].err_cnt; in access_sde_err_cnt()
1402 if (dd->per_sdma && idx < dd->num_sdma) in access_sde_int_cnt()
1403 return dd->per_sdma[idx].sdma_int_cnt; in access_sde_int_cnt()
1412 if (dd->per_sdma && idx < dd->num_sdma) in access_sde_idle_int_cnt()
1413 return dd->per_sdma[idx].idle_int_cnt; in access_sde_idle_int_cnt()
1423 if (dd->per_sdma && idx < dd->num_sdma) in access_sde_progress_int_cnt()
1424 return dd->per_sdma[idx].progress_int_cnt; in access_sde_progress_int_cnt()
1434 u64 csr = entry->csr; in dev_access_u64_csr()
1436 if (entry->flags & CNTR_VL) { in dev_access_u64_csr()
1453 u32 csr = entry->csr; in dc_access_lcb_cntr()
1480 return read_write_csr(ppd->dd, entry->csr, mode, data); in port_access_u32_csr()
1488 u64 csr = entry->csr; in port_access_u64_csr()
1490 if (entry->flags & CNTR_VL) { in port_access_u64_csr()
1498 val = read_write_csr(ppd->dd, csr, mode, data); in port_access_u64_csr()
1530 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data); in access_sw_link_dn_cnt()
1540 return read_write_sw(ppd->dd, &ppd->link_up, mode, data); in access_sw_link_up_cnt()
1551 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data); in access_sw_unknown_frame_cnt()
1562 counter = &ppd->port_xmit_discards; in access_sw_xmit_discards()
1564 counter = &ppd->port_xmit_discards_vl[vl]; in access_sw_xmit_discards()
1568 return read_write_sw(ppd->dd, counter, mode, data); in access_sw_xmit_discards()
1580 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors, in access_xmit_constraint_errs()
1592 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors, in access_rcv_constraint_errs()
1616 ret = get_all_cpu_total(cntr) - *z_val; in read_write_cpu()
1636 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl, in access_sw_cpu_intr()
1645 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl, in access_sw_cpu_rcv_limit()
1654 return dd->verbs_dev.n_piowait; in access_sw_pio_wait()
1662 return dd->verbs_dev.n_piodrain; in access_sw_pio_drain()
1670 return dd->ctx0_seq_drop; in access_sw_ctx0_seq_drop()
1678 return dd->verbs_dev.n_txwait; in access_sw_vtx_wait()
1686 return dd->verbs_dev.n_kmem_wait; in access_sw_kmem_wait()
1694 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl, in access_sw_send_schedule()
1705 return dd->misc_err_status_cnt[12]; in access_misc_pll_lock_fail_err_cnt()
1714 return dd->misc_err_status_cnt[11]; in access_misc_mbist_fail_err_cnt()
1723 return dd->misc_err_status_cnt[10]; in access_misc_invalid_eep_cmd_err_cnt()
1732 return dd->misc_err_status_cnt[9]; in access_misc_efuse_done_parity_err_cnt()
1741 return dd->misc_err_status_cnt[8]; in access_misc_efuse_write_err_cnt()
1750 return dd->misc_err_status_cnt[7]; in access_misc_efuse_read_bad_addr_err_cnt()
1759 return dd->misc_err_status_cnt[6]; in access_misc_efuse_csr_parity_err_cnt()
1768 return dd->misc_err_status_cnt[5]; in access_misc_fw_auth_failed_err_cnt()
1777 return dd->misc_err_status_cnt[4]; in access_misc_key_mismatch_err_cnt()
1786 return dd->misc_err_status_cnt[3]; in access_misc_sbus_write_failed_err_cnt()
1795 return dd->misc_err_status_cnt[2]; in access_misc_csr_write_bad_addr_err_cnt()
1804 return dd->misc_err_status_cnt[1]; in access_misc_csr_read_bad_addr_err_cnt()
1813 return dd->misc_err_status_cnt[0]; in access_misc_csr_parity_err_cnt()
1826 return dd->sw_cce_err_status_aggregate; in access_sw_cce_err_status_aggregated_cnt()
1839 return dd->cce_err_status_cnt[40]; in access_cce_msix_csr_parity_err_cnt()
1848 return dd->cce_err_status_cnt[39]; in access_cce_int_map_unc_err_cnt()
1857 return dd->cce_err_status_cnt[38]; in access_cce_int_map_cor_err_cnt()
1866 return dd->cce_err_status_cnt[37]; in access_cce_msix_table_unc_err_cnt()
1875 return dd->cce_err_status_cnt[36]; in access_cce_msix_table_cor_err_cnt()
1884 return dd->cce_err_status_cnt[35]; in access_cce_rxdma_conv_fifo_parity_err_cnt()
1893 return dd->cce_err_status_cnt[34]; in access_cce_rcpl_async_fifo_parity_err_cnt()
1902 return dd->cce_err_status_cnt[33]; in access_cce_seg_write_bad_addr_err_cnt()
1911 return dd->cce_err_status_cnt[32]; in access_cce_seg_read_bad_addr_err_cnt()
1919 return dd->cce_err_status_cnt[31]; in access_la_triggered_cnt()
1928 return dd->cce_err_status_cnt[30]; in access_cce_trgt_cpl_timeout_err_cnt()
1937 return dd->cce_err_status_cnt[29]; in access_pcic_receive_parity_err_cnt()
1946 return dd->cce_err_status_cnt[28]; in access_pcic_transmit_back_parity_err_cnt()
1955 return dd->cce_err_status_cnt[27]; in access_pcic_transmit_front_parity_err_cnt()
1964 return dd->cce_err_status_cnt[26]; in access_pcic_cpl_dat_q_unc_err_cnt()
1973 return dd->cce_err_status_cnt[25]; in access_pcic_cpl_hd_q_unc_err_cnt()
1982 return dd->cce_err_status_cnt[24]; in access_pcic_post_dat_q_unc_err_cnt()
1991 return dd->cce_err_status_cnt[23]; in access_pcic_post_hd_q_unc_err_cnt()
2000 return dd->cce_err_status_cnt[22]; in access_pcic_retry_sot_mem_unc_err_cnt()
2009 return dd->cce_err_status_cnt[21]; in access_pcic_retry_mem_unc_err()
2018 return dd->cce_err_status_cnt[20]; in access_pcic_n_post_dat_q_parity_err_cnt()
2027 return dd->cce_err_status_cnt[19]; in access_pcic_n_post_h_q_parity_err_cnt()
2036 return dd->cce_err_status_cnt[18]; in access_pcic_cpl_dat_q_cor_err_cnt()
2045 return dd->cce_err_status_cnt[17]; in access_pcic_cpl_hd_q_cor_err_cnt()
2054 return dd->cce_err_status_cnt[16]; in access_pcic_post_dat_q_cor_err_cnt()
2063 return dd->cce_err_status_cnt[15]; in access_pcic_post_hd_q_cor_err_cnt()
2072 return dd->cce_err_status_cnt[14]; in access_pcic_retry_sot_mem_cor_err_cnt()
2081 return dd->cce_err_status_cnt[13]; in access_pcic_retry_mem_cor_err_cnt()
2090 return dd->cce_err_status_cnt[12]; in access_cce_cli1_async_fifo_dbg_parity_err_cnt()
2099 return dd->cce_err_status_cnt[11]; in access_cce_cli1_async_fifo_rxdma_parity_err_cnt()
2108 return dd->cce_err_status_cnt[10]; in access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt()
2117 return dd->cce_err_status_cnt[9]; in access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt()
2126 return dd->cce_err_status_cnt[8]; in access_cce_cli2_async_fifo_parity_err_cnt()
2135 return dd->cce_err_status_cnt[7]; in access_cce_csr_cfg_bus_parity_err_cnt()
2144 return dd->cce_err_status_cnt[6]; in access_cce_cli0_async_fifo_parity_err_cnt()
2153 return dd->cce_err_status_cnt[5]; in access_cce_rspd_data_parity_err_cnt()
2162 return dd->cce_err_status_cnt[4]; in access_cce_trgt_access_err_cnt()
2171 return dd->cce_err_status_cnt[3]; in access_cce_trgt_async_fifo_parity_err_cnt()
2180 return dd->cce_err_status_cnt[2]; in access_cce_csr_write_bad_addr_err_cnt()
2189 return dd->cce_err_status_cnt[1]; in access_cce_csr_read_bad_addr_err_cnt()
2198 return dd->cce_err_status_cnt[0]; in access_ccs_csr_parity_err_cnt()
2211 return dd->rcv_err_status_cnt[63]; in access_rx_csr_parity_err_cnt()
2220 return dd->rcv_err_status_cnt[62]; in access_rx_csr_write_bad_addr_err_cnt()
2229 return dd->rcv_err_status_cnt[61]; in access_rx_csr_read_bad_addr_err_cnt()
2238 return dd->rcv_err_status_cnt[60]; in access_rx_dma_csr_unc_err_cnt()
2247 return dd->rcv_err_status_cnt[59]; in access_rx_dma_dq_fsm_encoding_err_cnt()
2256 return dd->rcv_err_status_cnt[58]; in access_rx_dma_eq_fsm_encoding_err_cnt()
2265 return dd->rcv_err_status_cnt[57]; in access_rx_dma_csr_parity_err_cnt()
2274 return dd->rcv_err_status_cnt[56]; in access_rx_rbuf_data_cor_err_cnt()
2283 return dd->rcv_err_status_cnt[55]; in access_rx_rbuf_data_unc_err_cnt()
2292 return dd->rcv_err_status_cnt[54]; in access_rx_dma_data_fifo_rd_cor_err_cnt()
2301 return dd->rcv_err_status_cnt[53]; in access_rx_dma_data_fifo_rd_unc_err_cnt()
2310 return dd->rcv_err_status_cnt[52]; in access_rx_dma_hdr_fifo_rd_cor_err_cnt()
2319 return dd->rcv_err_status_cnt[51]; in access_rx_dma_hdr_fifo_rd_unc_err_cnt()
2328 return dd->rcv_err_status_cnt[50]; in access_rx_rbuf_desc_part2_cor_err_cnt()
2337 return dd->rcv_err_status_cnt[49]; in access_rx_rbuf_desc_part2_unc_err_cnt()
2346 return dd->rcv_err_status_cnt[48]; in access_rx_rbuf_desc_part1_cor_err_cnt()
2355 return dd->rcv_err_status_cnt[47]; in access_rx_rbuf_desc_part1_unc_err_cnt()
2364 return dd->rcv_err_status_cnt[46]; in access_rx_hq_intr_fsm_err_cnt()
2373 return dd->rcv_err_status_cnt[45]; in access_rx_hq_intr_csr_parity_err_cnt()
2382 return dd->rcv_err_status_cnt[44]; in access_rx_lookup_csr_parity_err_cnt()
2391 return dd->rcv_err_status_cnt[43]; in access_rx_lookup_rcv_array_cor_err_cnt()
2400 return dd->rcv_err_status_cnt[42]; in access_rx_lookup_rcv_array_unc_err_cnt()
2409 return dd->rcv_err_status_cnt[41]; in access_rx_lookup_des_part2_parity_err_cnt()
2418 return dd->rcv_err_status_cnt[40]; in access_rx_lookup_des_part1_unc_cor_err_cnt()
2427 return dd->rcv_err_status_cnt[39]; in access_rx_lookup_des_part1_unc_err_cnt()
2436 return dd->rcv_err_status_cnt[38]; in access_rx_rbuf_next_free_buf_cor_err_cnt()
2445 return dd->rcv_err_status_cnt[37]; in access_rx_rbuf_next_free_buf_unc_err_cnt()
2454 return dd->rcv_err_status_cnt[36]; in access_rbuf_fl_init_wr_addr_parity_err_cnt()
2463 return dd->rcv_err_status_cnt[35]; in access_rx_rbuf_fl_initdone_parity_err_cnt()
2472 return dd->rcv_err_status_cnt[34]; in access_rx_rbuf_fl_write_addr_parity_err_cnt()
2481 return dd->rcv_err_status_cnt[33]; in access_rx_rbuf_fl_rd_addr_parity_err_cnt()
2490 return dd->rcv_err_status_cnt[32]; in access_rx_rbuf_empty_err_cnt()
2499 return dd->rcv_err_status_cnt[31]; in access_rx_rbuf_full_err_cnt()
2508 return dd->rcv_err_status_cnt[30]; in access_rbuf_bad_lookup_err_cnt()
2517 return dd->rcv_err_status_cnt[29]; in access_rbuf_ctx_id_parity_err_cnt()
2526 return dd->rcv_err_status_cnt[28]; in access_rbuf_csr_qeopdw_parity_err_cnt()
2535 return dd->rcv_err_status_cnt[27]; in access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt()
2544 return dd->rcv_err_status_cnt[26]; in access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt()
2553 return dd->rcv_err_status_cnt[25]; in access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt()
2562 return dd->rcv_err_status_cnt[24]; in access_rx_rbuf_csr_q_vld_bit_parity_err_cnt()
2571 return dd->rcv_err_status_cnt[23]; in access_rx_rbuf_csr_q_next_buf_parity_err_cnt()
2580 return dd->rcv_err_status_cnt[22]; in access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt()
2589 return dd->rcv_err_status_cnt[21]; in access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt()
2598 return dd->rcv_err_status_cnt[20]; in access_rx_rbuf_block_list_read_cor_err_cnt()
2607 return dd->rcv_err_status_cnt[19]; in access_rx_rbuf_block_list_read_unc_err_cnt()
2616 return dd->rcv_err_status_cnt[18]; in access_rx_rbuf_lookup_des_cor_err_cnt()
2625 return dd->rcv_err_status_cnt[17]; in access_rx_rbuf_lookup_des_unc_err_cnt()
2634 return dd->rcv_err_status_cnt[16]; in access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt()
2643 return dd->rcv_err_status_cnt[15]; in access_rx_rbuf_lookup_des_reg_unc_err_cnt()
2652 return dd->rcv_err_status_cnt[14]; in access_rx_rbuf_free_list_cor_err_cnt()
2661 return dd->rcv_err_status_cnt[13]; in access_rx_rbuf_free_list_unc_err_cnt()
2670 return dd->rcv_err_status_cnt[12]; in access_rx_rcv_fsm_encoding_err_cnt()
2679 return dd->rcv_err_status_cnt[11]; in access_rx_dma_flag_cor_err_cnt()
2688 return dd->rcv_err_status_cnt[10]; in access_rx_dma_flag_unc_err_cnt()
2697 return dd->rcv_err_status_cnt[9]; in access_rx_dc_sop_eop_parity_err_cnt()
2706 return dd->rcv_err_status_cnt[8]; in access_rx_rcv_csr_parity_err_cnt()
2715 return dd->rcv_err_status_cnt[7]; in access_rx_rcv_qp_map_table_cor_err_cnt()
2724 return dd->rcv_err_status_cnt[6]; in access_rx_rcv_qp_map_table_unc_err_cnt()
2733 return dd->rcv_err_status_cnt[5]; in access_rx_rcv_data_cor_err_cnt()
2742 return dd->rcv_err_status_cnt[4]; in access_rx_rcv_data_unc_err_cnt()
2751 return dd->rcv_err_status_cnt[3]; in access_rx_rcv_hdr_cor_err_cnt()
2760 return dd->rcv_err_status_cnt[2]; in access_rx_rcv_hdr_unc_err_cnt()
2769 return dd->rcv_err_status_cnt[1]; in access_rx_dc_intf_parity_err_cnt()
2778 return dd->rcv_err_status_cnt[0]; in access_rx_dma_csr_cor_err_cnt()
2791 return dd->send_pio_err_status_cnt[35]; in access_pio_pec_sop_head_parity_err_cnt()
2800 return dd->send_pio_err_status_cnt[34]; in access_pio_pcc_sop_head_parity_err_cnt()
2809 return dd->send_pio_err_status_cnt[33]; in access_pio_last_returned_cnt_parity_err_cnt()
2818 return dd->send_pio_err_status_cnt[32]; in access_pio_current_free_cnt_parity_err_cnt()
2827 return dd->send_pio_err_status_cnt[31]; in access_pio_reserved_31_err_cnt()
2836 return dd->send_pio_err_status_cnt[30]; in access_pio_reserved_30_err_cnt()
2845 return dd->send_pio_err_status_cnt[29]; in access_pio_ppmc_sop_len_err_cnt()
2854 return dd->send_pio_err_status_cnt[28]; in access_pio_ppmc_bqc_mem_parity_err_cnt()
2863 return dd->send_pio_err_status_cnt[27]; in access_pio_vl_fifo_parity_err_cnt()
2872 return dd->send_pio_err_status_cnt[26]; in access_pio_vlf_sop_parity_err_cnt()
2881 return dd->send_pio_err_status_cnt[25]; in access_pio_vlf_v1_len_parity_err_cnt()
2890 return dd->send_pio_err_status_cnt[24]; in access_pio_block_qw_count_parity_err_cnt()
2899 return dd->send_pio_err_status_cnt[23]; in access_pio_write_qw_valid_parity_err_cnt()
2908 return dd->send_pio_err_status_cnt[22]; in access_pio_state_machine_err_cnt()
2917 return dd->send_pio_err_status_cnt[21]; in access_pio_write_data_parity_err_cnt()
2926 return dd->send_pio_err_status_cnt[20]; in access_pio_host_addr_mem_cor_err_cnt()
2935 return dd->send_pio_err_status_cnt[19]; in access_pio_host_addr_mem_unc_err_cnt()
2944 return dd->send_pio_err_status_cnt[18]; in access_pio_pkt_evict_sm_or_arb_sm_err_cnt()
2953 return dd->send_pio_err_status_cnt[17]; in access_pio_init_sm_in_err_cnt()
2962 return dd->send_pio_err_status_cnt[16]; in access_pio_ppmc_pbl_fifo_err_cnt()
2971 return dd->send_pio_err_status_cnt[15]; in access_pio_credit_ret_fifo_parity_err_cnt()
2980 return dd->send_pio_err_status_cnt[14]; in access_pio_v1_len_mem_bank1_cor_err_cnt()
2989 return dd->send_pio_err_status_cnt[13]; in access_pio_v1_len_mem_bank0_cor_err_cnt()
2998 return dd->send_pio_err_status_cnt[12]; in access_pio_v1_len_mem_bank1_unc_err_cnt()
3007 return dd->send_pio_err_status_cnt[11]; in access_pio_v1_len_mem_bank0_unc_err_cnt()
3016 return dd->send_pio_err_status_cnt[10]; in access_pio_sm_pkt_reset_parity_err_cnt()
3025 return dd->send_pio_err_status_cnt[9]; in access_pio_pkt_evict_fifo_parity_err_cnt()
3034 return dd->send_pio_err_status_cnt[8]; in access_pio_sbrdctrl_crrel_fifo_parity_err_cnt()
3043 return dd->send_pio_err_status_cnt[7]; in access_pio_sbrdctl_crrel_parity_err_cnt()
3052 return dd->send_pio_err_status_cnt[6]; in access_pio_pec_fifo_parity_err_cnt()
3061 return dd->send_pio_err_status_cnt[5]; in access_pio_pcc_fifo_parity_err_cnt()
3070 return dd->send_pio_err_status_cnt[4]; in access_pio_sb_mem_fifo1_err_cnt()
3079 return dd->send_pio_err_status_cnt[3]; in access_pio_sb_mem_fifo0_err_cnt()
3088 return dd->send_pio_err_status_cnt[2]; in access_pio_csr_parity_err_cnt()
3097 return dd->send_pio_err_status_cnt[1]; in access_pio_write_addr_parity_err_cnt()
3106 return dd->send_pio_err_status_cnt[0]; in access_pio_write_bad_ctxt_err_cnt()
3119 return dd->send_dma_err_status_cnt[3]; in access_sdma_pcie_req_tracking_cor_err_cnt()
3128 return dd->send_dma_err_status_cnt[2]; in access_sdma_pcie_req_tracking_unc_err_cnt()
3137 return dd->send_dma_err_status_cnt[1]; in access_sdma_csr_parity_err_cnt()
3146 return dd->send_dma_err_status_cnt[0]; in access_sdma_rpy_tag_err_cnt()
3159 return dd->send_egress_err_status_cnt[63]; in access_tx_read_pio_memory_csr_unc_err_cnt()
3168 return dd->send_egress_err_status_cnt[62]; in access_tx_read_sdma_memory_csr_err_cnt()
3177 return dd->send_egress_err_status_cnt[61]; in access_tx_egress_fifo_cor_err_cnt()
3186 return dd->send_egress_err_status_cnt[60]; in access_tx_read_pio_memory_cor_err_cnt()
3195 return dd->send_egress_err_status_cnt[59]; in access_tx_read_sdma_memory_cor_err_cnt()
3204 return dd->send_egress_err_status_cnt[58]; in access_tx_sb_hdr_cor_err_cnt()
3213 return dd->send_egress_err_status_cnt[57]; in access_tx_credit_overrun_err_cnt()
3222 return dd->send_egress_err_status_cnt[56]; in access_tx_launch_fifo8_cor_err_cnt()
3231 return dd->send_egress_err_status_cnt[55]; in access_tx_launch_fifo7_cor_err_cnt()
3240 return dd->send_egress_err_status_cnt[54]; in access_tx_launch_fifo6_cor_err_cnt()
3249 return dd->send_egress_err_status_cnt[53]; in access_tx_launch_fifo5_cor_err_cnt()
3258 return dd->send_egress_err_status_cnt[52]; in access_tx_launch_fifo4_cor_err_cnt()
3267 return dd->send_egress_err_status_cnt[51]; in access_tx_launch_fifo3_cor_err_cnt()
3276 return dd->send_egress_err_status_cnt[50]; in access_tx_launch_fifo2_cor_err_cnt()
3285 return dd->send_egress_err_status_cnt[49]; in access_tx_launch_fifo1_cor_err_cnt()
3294 return dd->send_egress_err_status_cnt[48]; in access_tx_launch_fifo0_cor_err_cnt()
3303 return dd->send_egress_err_status_cnt[47]; in access_tx_credit_return_vl_err_cnt()
3312 return dd->send_egress_err_status_cnt[46]; in access_tx_hcrc_insertion_err_cnt()
3321 return dd->send_egress_err_status_cnt[45]; in access_tx_egress_fifo_unc_err_cnt()
3330 return dd->send_egress_err_status_cnt[44]; in access_tx_read_pio_memory_unc_err_cnt()
3339 return dd->send_egress_err_status_cnt[43]; in access_tx_read_sdma_memory_unc_err_cnt()
3348 return dd->send_egress_err_status_cnt[42]; in access_tx_sb_hdr_unc_err_cnt()
3357 return dd->send_egress_err_status_cnt[41]; in access_tx_credit_return_partiy_err_cnt()
3366 return dd->send_egress_err_status_cnt[40]; in access_tx_launch_fifo8_unc_or_parity_err_cnt()
3375 return dd->send_egress_err_status_cnt[39]; in access_tx_launch_fifo7_unc_or_parity_err_cnt()
3384 return dd->send_egress_err_status_cnt[38]; in access_tx_launch_fifo6_unc_or_parity_err_cnt()
3393 return dd->send_egress_err_status_cnt[37]; in access_tx_launch_fifo5_unc_or_parity_err_cnt()
3402 return dd->send_egress_err_status_cnt[36]; in access_tx_launch_fifo4_unc_or_parity_err_cnt()
3411 return dd->send_egress_err_status_cnt[35]; in access_tx_launch_fifo3_unc_or_parity_err_cnt()
3420 return dd->send_egress_err_status_cnt[34]; in access_tx_launch_fifo2_unc_or_parity_err_cnt()
3429 return dd->send_egress_err_status_cnt[33]; in access_tx_launch_fifo1_unc_or_parity_err_cnt()
3438 return dd->send_egress_err_status_cnt[32]; in access_tx_launch_fifo0_unc_or_parity_err_cnt()
3447 return dd->send_egress_err_status_cnt[31]; in access_tx_sdma15_disallowed_packet_err_cnt()
3456 return dd->send_egress_err_status_cnt[30]; in access_tx_sdma14_disallowed_packet_err_cnt()
3465 return dd->send_egress_err_status_cnt[29]; in access_tx_sdma13_disallowed_packet_err_cnt()
3474 return dd->send_egress_err_status_cnt[28]; in access_tx_sdma12_disallowed_packet_err_cnt()
3483 return dd->send_egress_err_status_cnt[27]; in access_tx_sdma11_disallowed_packet_err_cnt()
3492 return dd->send_egress_err_status_cnt[26]; in access_tx_sdma10_disallowed_packet_err_cnt()
3501 return dd->send_egress_err_status_cnt[25]; in access_tx_sdma9_disallowed_packet_err_cnt()
3510 return dd->send_egress_err_status_cnt[24]; in access_tx_sdma8_disallowed_packet_err_cnt()
3519 return dd->send_egress_err_status_cnt[23]; in access_tx_sdma7_disallowed_packet_err_cnt()
3528 return dd->send_egress_err_status_cnt[22]; in access_tx_sdma6_disallowed_packet_err_cnt()
3537 return dd->send_egress_err_status_cnt[21]; in access_tx_sdma5_disallowed_packet_err_cnt()
3546 return dd->send_egress_err_status_cnt[20]; in access_tx_sdma4_disallowed_packet_err_cnt()
3555 return dd->send_egress_err_status_cnt[19]; in access_tx_sdma3_disallowed_packet_err_cnt()
3564 return dd->send_egress_err_status_cnt[18]; in access_tx_sdma2_disallowed_packet_err_cnt()
3573 return dd->send_egress_err_status_cnt[17]; in access_tx_sdma1_disallowed_packet_err_cnt()
3582 return dd->send_egress_err_status_cnt[16]; in access_tx_sdma0_disallowed_packet_err_cnt()
3591 return dd->send_egress_err_status_cnt[15]; in access_tx_config_parity_err_cnt()
3600 return dd->send_egress_err_status_cnt[14]; in access_tx_sbrd_ctl_csr_parity_err_cnt()
3609 return dd->send_egress_err_status_cnt[13]; in access_tx_launch_csr_parity_err_cnt()
3618 return dd->send_egress_err_status_cnt[12]; in access_tx_illegal_vl_err_cnt()
3627 return dd->send_egress_err_status_cnt[11]; in access_tx_sbrd_ctl_state_machine_parity_err_cnt()
3636 return dd->send_egress_err_status_cnt[10]; in access_egress_reserved_10_err_cnt()
3645 return dd->send_egress_err_status_cnt[9]; in access_egress_reserved_9_err_cnt()
3654 return dd->send_egress_err_status_cnt[8]; in access_tx_sdma_launch_intf_parity_err_cnt()
3663 return dd->send_egress_err_status_cnt[7]; in access_tx_pio_launch_intf_parity_err_cnt()
3672 return dd->send_egress_err_status_cnt[6]; in access_egress_reserved_6_err_cnt()
3681 return dd->send_egress_err_status_cnt[5]; in access_tx_incorrect_link_state_err_cnt()
3690 return dd->send_egress_err_status_cnt[4]; in access_tx_linkdown_err_cnt()
3699 return dd->send_egress_err_status_cnt[3]; in access_tx_egress_fifi_underrun_or_parity_err_cnt()
3708 return dd->send_egress_err_status_cnt[2]; in access_egress_reserved_2_err_cnt()
3717 return dd->send_egress_err_status_cnt[1]; in access_tx_pkt_integrity_mem_unc_err_cnt()
3726 return dd->send_egress_err_status_cnt[0]; in access_tx_pkt_integrity_mem_cor_err_cnt()
3739 return dd->send_err_status_cnt[2]; in access_send_csr_write_bad_addr_err_cnt()
3748 return dd->send_err_status_cnt[1]; in access_send_csr_read_bad_addr_err_cnt()
3757 return dd->send_err_status_cnt[0]; in access_send_csr_parity_cnt()
3770 return dd->sw_ctxt_err_status_cnt[4]; in access_pio_write_out_of_bounds_err_cnt()
3779 return dd->sw_ctxt_err_status_cnt[3]; in access_pio_write_overflow_err_cnt()
3788 return dd->sw_ctxt_err_status_cnt[2]; in access_pio_write_crosses_boundary_err_cnt()
3797 return dd->sw_ctxt_err_status_cnt[1]; in access_pio_disallowed_packet_err_cnt()
3806 return dd->sw_ctxt_err_status_cnt[0]; in access_pio_inconsistent_sop_err_cnt()
3819 return dd->sw_send_dma_eng_err_status_cnt[23]; in access_sdma_header_request_fifo_cor_err_cnt()
3828 return dd->sw_send_dma_eng_err_status_cnt[22]; in access_sdma_header_storage_cor_err_cnt()
3837 return dd->sw_send_dma_eng_err_status_cnt[21]; in access_sdma_packet_tracking_cor_err_cnt()
3846 return dd->sw_send_dma_eng_err_status_cnt[20]; in access_sdma_assembly_cor_err_cnt()
3855 return dd->sw_send_dma_eng_err_status_cnt[19]; in access_sdma_desc_table_cor_err_cnt()
3864 return dd->sw_send_dma_eng_err_status_cnt[18]; in access_sdma_header_request_fifo_unc_err_cnt()
3873 return dd->sw_send_dma_eng_err_status_cnt[17]; in access_sdma_header_storage_unc_err_cnt()
3882 return dd->sw_send_dma_eng_err_status_cnt[16]; in access_sdma_packet_tracking_unc_err_cnt()
3891 return dd->sw_send_dma_eng_err_status_cnt[15]; in access_sdma_assembly_unc_err_cnt()
3900 return dd->sw_send_dma_eng_err_status_cnt[14]; in access_sdma_desc_table_unc_err_cnt()
3909 return dd->sw_send_dma_eng_err_status_cnt[13]; in access_sdma_timeout_err_cnt()
3918 return dd->sw_send_dma_eng_err_status_cnt[12]; in access_sdma_header_length_err_cnt()
3927 return dd->sw_send_dma_eng_err_status_cnt[11]; in access_sdma_header_address_err_cnt()
3936 return dd->sw_send_dma_eng_err_status_cnt[10]; in access_sdma_header_select_err_cnt()
3945 return dd->sw_send_dma_eng_err_status_cnt[9]; in access_sdma_reserved_9_err_cnt()
3954 return dd->sw_send_dma_eng_err_status_cnt[8]; in access_sdma_packet_desc_overflow_err_cnt()
3963 return dd->sw_send_dma_eng_err_status_cnt[7]; in access_sdma_length_mismatch_err_cnt()
3971 return dd->sw_send_dma_eng_err_status_cnt[6]; in access_sdma_halt_err_cnt()
3980 return dd->sw_send_dma_eng_err_status_cnt[5]; in access_sdma_mem_read_err_cnt()
3989 return dd->sw_send_dma_eng_err_status_cnt[4]; in access_sdma_first_desc_err_cnt()
3998 return dd->sw_send_dma_eng_err_status_cnt[3]; in access_sdma_tail_out_of_bounds_err_cnt()
4007 return dd->sw_send_dma_eng_err_status_cnt[2]; in access_sdma_too_long_err_cnt()
4016 return dd->sw_send_dma_eng_err_status_cnt[1]; in access_sdma_gen_mismatch_err_cnt()
4025 return dd->sw_send_dma_eng_err_status_cnt[0]; in access_sdma_wrong_dw_err_cnt()
4035 u64 csr = entry->csr; in access_dc_rcv_err_cnt()
4039 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ? in access_dc_rcv_err_cnt()
4040 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors; in access_dc_rcv_err_cnt()
4042 dd->sw_rcv_bypass_packet_errors = 0; in access_dc_rcv_err_cnt()
4055 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
4056 ppd->ibport_data.rvp.cntr, vl, \
4073 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
5206 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT in is_ax()
5215 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT in is_bx()
5224 u32 is = IS_RCVURGENT_START + rcd->ctxt; in is_urg_masked()
5227 mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64))); in is_urg_masked()
5251 len--; in append_str()
5261 len--; in append_str()
5289 len--; /* leave room for a nul */ in flag_string()
5309 --p; in flag_string()
5313 /* add final nul - space already allocated above */ in flag_string()
5528 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) { in handle_cce_err()
5531 start_freeze_handling(dd->pport, FREEZE_SELF); in handle_cce_err()
5536 incr_cntr64(&dd->cce_err_status_cnt[i]); in handle_cce_err()
5538 incr_cntr64(&dd->sw_cce_err_status_aggregate); in handle_cce_err()
5551 struct hfi1_pportdata *ppd = dd->pport; in update_rcverr_timer()
5554 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt && in update_rcverr_timer()
5555 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) { in update_rcverr_timer()
5560 queue_work(ppd->link_wq, &ppd->link_bounce_work); in update_rcverr_timer()
5562 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt; in update_rcverr_timer()
5564 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME); in update_rcverr_timer()
5569 timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0); in init_rcverr()
5571 dd->rcv_ovfl_cnt = 0; in init_rcverr()
5572 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME); in init_rcverr()
5577 if (dd->rcverr_timer.function) in free_rcverr()
5578 del_timer_sync(&dd->rcverr_timer); in free_rcverr()
5599 start_freeze_handling(dd->pport, flags); in handle_rxe_err()
5604 incr_cntr64(&dd->rcv_err_status_cnt[i]); in handle_rxe_err()
5617 incr_cntr64(&dd->misc_err_status_cnt[i]); in handle_misc_err()
5630 start_freeze_handling(dd->pport, 0); in handle_pio_err()
5634 incr_cntr64(&dd->send_pio_err_status_cnt[i]); in handle_pio_err()
5647 start_freeze_handling(dd->pport, 0); in handle_sdma_err()
5651 incr_cntr64(&dd->send_dma_err_status_cnt[i]); in handle_sdma_err()
5657 incr_cntr64(&ppd->port_xmit_discards); in __count_port_discards()
5662 __count_port_discards(dd->pport); in count_port_inactive()
5677 struct hfi1_pportdata *ppd = dd->pport; in handle_send_egress_err_info()
5719 incr_cntr64(&ppd->port_xmit_discards_vl[vl]); in handle_send_egress_err_info()
5721 incr_cntr64(&ppd->port_xmit_discards_vl in handle_send_egress_err_info()
5754 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET); in disallowed_pkt_engine()
5758 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5768 return -1; in engine_to_vl()
5771 m = rcu_dereference(dd->sdma_map); in engine_to_vl()
5772 vl = m->engine_to_vl[engine]; in engine_to_vl()
5779 * Translate the send context (sofware index) into a VL. Return -1 if the
5788 sci = &dd->send_contexts[sw_index]; in sc_to_vl()
5791 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15)) in sc_to_vl()
5792 return -1; in sc_to_vl()
5794 sc = sci->sc; in sc_to_vl()
5796 return -1; in sc_to_vl()
5797 if (dd->vld[15].sc == sc) in sc_to_vl()
5800 if (dd->vld[i].sc == sc) in sc_to_vl()
5803 return -1; in sc_to_vl()
5813 start_freeze_handling(dd->pport, 0); in handle_egress_err()
5816 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) in handle_egress_err()
5817 start_freeze_handling(dd->pport, 0); in handle_egress_err()
5821 /* fls64() returns a 1-based offset, we want it zero based */ in handle_egress_err()
5822 int shift = posn - 1; in handle_egress_err()
5845 incr_cntr64(&dd->send_egress_err_status_cnt[i]); in handle_egress_err()
5859 incr_cntr64(&dd->send_err_status_cnt[i]); in handle_txe_err()
5871 * through here to have a central location to correctly handle single-
5872 * or multi-shot errors.
5874 * For non per-context registers, call this routine with a context value
5875 * of 0 so the per-context offset is zero.
5890 reg = read_kctxt_csr(dd, context, eri->status); in interrupt_clear_down()
5893 write_kctxt_csr(dd, context, eri->clear, reg); in interrupt_clear_down()
5894 if (likely(eri->handler)) in interrupt_clear_down()
5895 eri->handler(dd, context, reg); in interrupt_clear_down()
5900 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n", in interrupt_clear_down()
5901 eri->desc, reg); in interrupt_clear_down()
5903 * Read-modify-write so any other masked bits in interrupt_clear_down()
5906 mask = read_kctxt_csr(dd, context, eri->mask); in interrupt_clear_down()
5908 write_kctxt_csr(dd, context, eri->mask, mask); in interrupt_clear_down()
5921 if (eri->handler) { in is_misc_err_int()
5924 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n", in is_misc_err_int()
5940 * clear-down mechanism cannot be used because we cannot clear the
5941 * error bits until several other long-running items are done first.
5956 sw_index = dd->hw_to_sw[hw_context]; in is_sendctxt_err_int()
5957 if (sw_index >= dd->num_send_contexts) { in is_sendctxt_err_int()
5963 sci = &dd->send_contexts[sw_index]; in is_sendctxt_err_int()
5964 spin_lock_irqsave(&dd->sc_lock, irq_flags); in is_sendctxt_err_int()
5965 sc = sci->sc; in is_sendctxt_err_int()
5969 spin_unlock_irqrestore(&dd->sc_lock, irq_flags); in is_sendctxt_err_int()
5989 if (sc->type != SC_USER) in is_sendctxt_err_int()
5990 queue_work(dd->pport->hfi1_wq, &sc->halt_work); in is_sendctxt_err_int()
5991 spin_unlock_irqrestore(&dd->sc_lock, irq_flags); in is_sendctxt_err_int()
6000 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]); in is_sendctxt_err_int()
6010 sde = &dd->per_sdma[source]; in handle_sdma_eng_err()
6012 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, in handle_sdma_eng_err()
6014 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n", in handle_sdma_eng_err()
6015 sde->this_idx, source, (unsigned long long)status); in handle_sdma_eng_err()
6017 sde->err_cnt++; in handle_sdma_eng_err()
6027 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]); in handle_sdma_eng_err()
6037 struct sdma_engine *sde = &dd->per_sdma[source]; in is_sdma_eng_err_int()
6039 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, in is_sdma_eng_err_int()
6041 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx, in is_sdma_eng_err_int()
6062 else if (eri->handler) in is_various_int()
6073 struct hfi1_pportdata *ppd = dd->pport; in handle_qsfp_int()
6082 ppd->driver_link_ready = 0; in handle_qsfp_int()
6088 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); in handle_qsfp_int()
6093 ppd->qsfp_info.cache_valid = 0; in handle_qsfp_int()
6094 ppd->qsfp_info.reset_needed = 0; in handle_qsfp_int()
6095 ppd->qsfp_info.limiting_active = 0; in handle_qsfp_int()
6096 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, in handle_qsfp_int()
6098 /* Invert the ModPresent pin now to detect plug-in */ in handle_qsfp_int()
6099 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : in handle_qsfp_int()
6102 if ((ppd->offline_disabled_reason > in handle_qsfp_int()
6105 (ppd->offline_disabled_reason == in handle_qsfp_int()
6107 ppd->offline_disabled_reason = in handle_qsfp_int()
6111 if (ppd->host_link_state == HLS_DN_POLL) { in handle_qsfp_int()
6118 queue_work(ppd->link_wq, &ppd->link_down_work); in handle_qsfp_int()
6124 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); in handle_qsfp_int()
6125 ppd->qsfp_info.cache_valid = 0; in handle_qsfp_int()
6126 ppd->qsfp_info.cache_refresh_required = 1; in handle_qsfp_int()
6127 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, in handle_qsfp_int()
6135 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : in handle_qsfp_int()
6138 ppd->offline_disabled_reason = in handle_qsfp_int()
6146 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); in handle_qsfp_int()
6147 ppd->qsfp_info.check_interrupt_flags = 1; in handle_qsfp_int()
6148 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags); in handle_qsfp_int()
6153 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work); in handle_qsfp_int()
6167 return ret == HCMD_SUCCESS ? 0 : -EBUSY; in request_host_lcb_access()
6181 return ret == HCMD_SUCCESS ? 0 : -EBUSY; in request_8051_lcb_access()
6185 * Set the LCB selector - allow host access. The DCC selector always
6196 * Clear the LCB selector - allow 8051 access. The DCC selector always
6212 * -EBUSY if the 8051 has control and cannot be disturbed
6213 * -errno if unable to acquire access from the 8051
6217 struct hfi1_pportdata *ppd = dd->pport; in acquire_lcb_access()
6227 mutex_lock(&ppd->hls_lock); in acquire_lcb_access()
6229 while (!mutex_trylock(&ppd->hls_lock)) in acquire_lcb_access()
6234 if (ppd->host_link_state & HLS_DOWN) { in acquire_lcb_access()
6236 __func__, link_state_name(ppd->host_link_state)); in acquire_lcb_access()
6237 ret = -EBUSY; in acquire_lcb_access()
6241 if (dd->lcb_access_count == 0) { in acquire_lcb_access()
6251 dd->lcb_access_count++; in acquire_lcb_access()
6253 mutex_unlock(&ppd->hls_lock); in acquire_lcb_access()
6263 * -errno if unable to release access to the 8051
6275 mutex_lock(&dd->pport->hls_lock); in release_lcb_access()
6277 while (!mutex_trylock(&dd->pport->hls_lock)) in release_lcb_access()
6281 if (dd->lcb_access_count == 0) { in release_lcb_access()
6287 if (dd->lcb_access_count == 1) { in release_lcb_access()
6299 dd->lcb_access_count--; in release_lcb_access()
6301 mutex_unlock(&dd->pport->hls_lock); in release_lcb_access()
6310 * leaving access to the 8051. Assign access now - this constrains the call
6311 * to this routine to be after all LCB set-up is done. In particular, after
6312 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6316 dd->lcb_access_count = 0; in init_lcb_access()
6336 struct hfi1_devdata *dd = ppd->dd; in handle_8051_request()
6444 dd->vl15buf_cached = 0; in reset_link_credits()
6467 ppd->sm_trap_qp = 0x0; in set_linkup_defaults()
6468 ppd->sa_qp = 0x1; in set_linkup_defaults()
6484 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN); in lcb_shutdown()
6492 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en); in lcb_shutdown()
6503 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6508 lockdep_assert_held(&dd->dc8051_lock); in _dc_shutdown()
6510 if (dd->dc_shutdown) in _dc_shutdown()
6513 dd->dc_shutdown = 1; in _dc_shutdown()
6526 mutex_lock(&dd->dc8051_lock); in dc_shutdown()
6528 mutex_unlock(&dd->dc8051_lock); in dc_shutdown()
6534 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6539 lockdep_assert_held(&dd->dc8051_lock); in _dc_start()
6541 if (!dd->dc_shutdown) in _dc_start()
6554 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en); in _dc_start()
6555 dd->dc_shutdown = 0; in _dc_start()
6560 mutex_lock(&dd->dc8051_lock); in dc_start()
6562 mutex_unlock(&dd->dc8051_lock); in dc_start()
6573 if (dd->icode != ICODE_FPGA_EMULATION) in adjust_lcb_for_fpga_serdes()
6653 * This is a work-queue function outside of the interrupt.
6659 struct hfi1_devdata *dd = ppd->dd; in handle_sma_message()
6664 * msg is bytes 1-4 of the 40-bit idle message - the command code in handle_sma_message()
6677 * See OPAv1 table 9-14 - HFI and External Switch Ports Key in handle_sma_message()
6682 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED)) in handle_sma_message()
6683 ppd->neighbor_normal = 1; in handle_sma_message()
6687 * See OPAv1 table 9-14 - HFI and External Switch Ports Key in handle_sma_message()
6692 if (ppd->host_link_state == HLS_UP_ARMED && in handle_sma_message()
6693 ppd->is_active_optimize_enabled) { in handle_sma_message()
6694 ppd->neighbor_normal = 1; in handle_sma_message()
6716 spin_lock_irqsave(&dd->rcvctrl_lock, flags); in adjust_rcvctrl()
6721 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags); in adjust_rcvctrl()
6739 struct hfi1_devdata *dd = ppd->dd; in start_freeze_handling()
6748 dd->flags |= HFI1_FROZEN; in start_freeze_handling()
6755 /* do halt pre-handling on all enabled send contexts */ in start_freeze_handling()
6756 for (i = 0; i < dd->num_send_contexts; i++) { in start_freeze_handling()
6757 sc = dd->send_contexts[i].sc; in start_freeze_handling()
6758 if (sc && (sc->flags & SCF_ENABLED)) in start_freeze_handling()
6770 /* queue non-interrupt handler */ in start_freeze_handling()
6771 queue_work(ppd->hfi1_wq, &ppd->freeze_work); in start_freeze_handling()
6775 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6822 for (i = 0; i < dd->num_rcv_contexts; i++) { in rxe_freeze()
6830 * Unfreeze handling for the RXE block - kernel contexts only.
6832 * handling on a per-context basis as they call into the driver.
6842 for (i = 0; i < dd->num_rcv_contexts; i++) { in rxe_kernel_unfreeze()
6845 /* Ensure all non-user contexts(including vnic) are enabled */ in rxe_kernel_unfreeze()
6847 (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) { in rxe_kernel_unfreeze()
6864 * Non-interrupt SPC freeze handling.
6866 * This is a work-queue function outside of the triggering interrupt.
6872 struct hfi1_devdata *dd = ppd->dd; in handle_freeze()
6885 /* do send egress freeze steps - nothing to do */ in handle_freeze()
6891 * Unfreeze the hardware - clear the freeze, wait for each in handle_freeze()
6910 /* do send egress unfreeze steps - nothing to do */ in handle_freeze()
6917 * it disables and re-enables RXE. Mark the device unfrozen in handle_freeze()
6928 dd->flags &= ~HFI1_FROZEN; in handle_freeze()
6929 wake_up(&dd->event_queue); in handle_freeze()
6935 * update_xmit_counters - update PortXmitWait/PortVlXmitWait
6950 link_speed = get_link_speed(ppd->link_speed_active); in update_xmit_counters()
6963 * This is a work-queue function outside of the interrupt.
6969 struct hfi1_devdata *dd = ppd->dd; in handle_link_up()
6991 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) in handle_link_up()
6992 set_up_vl15(dd, dd->vl15buf_cached); in handle_link_up()
6995 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) { in handle_link_up()
6996 /* oops - current speed is not enabled, bounce */ in handle_link_up()
6999 ppd->link_speed_active, ppd->link_speed_enabled); in handle_link_up()
7013 ppd->neighbor_guid = 0; in reset_neighbor_info()
7014 ppd->neighbor_port_number = 0; in reset_neighbor_info()
7015 ppd->neighbor_type = 0; in reset_neighbor_info()
7016 ppd->neighbor_fm_security = 0; in reset_neighbor_info()
7091 * This is a work-queue function outside of the interrupt.
7102 if ((ppd->host_link_state & in handle_link_down()
7104 ppd->port_type == PORT_TYPE_FIXED) in handle_link_down()
7105 ppd->offline_disabled_reason = in handle_link_down()
7109 was_up = !!(ppd->host_link_state & HLS_UP); in handle_link_down()
7111 xchg(&ppd->is_link_down_queued, 0); in handle_link_down()
7116 read_link_down_reason(ppd->dd, &link_down_reason); in handle_link_down()
7120 dd_dev_info(ppd->dd, "%sUnexpected link down\n", in handle_link_down()
7128 read_planned_down_reason_code(ppd->dd, &neigh_reason); in handle_link_down()
7129 dd_dev_info(ppd->dd, in handle_link_down()
7135 dd_dev_info(ppd->dd, in handle_link_down()
7140 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n", in handle_link_down()
7146 * If no reason, assume peer-initiated but missed in handle_link_down()
7159 if (was_up && ppd->local_link_down_reason.sma == 0 && in handle_link_down()
7160 ppd->neigh_link_down_reason.sma == 0) { in handle_link_down()
7161 ppd->local_link_down_reason.sma = in handle_link_down()
7162 ppd->local_link_down_reason.latest; in handle_link_down()
7163 ppd->neigh_link_down_reason.sma = in handle_link_down()
7164 ppd->neigh_link_down_reason.latest; in handle_link_down()
7170 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); in handle_link_down()
7176 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd)) in handle_link_down()
7177 dc_shutdown(ppd->dd); in handle_link_down()
7190 if (ppd->host_link_state & HLS_UP) { in handle_link_bounce()
7194 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n", in handle_link_bounce()
7195 __func__, link_state_name(ppd->host_link_state)); in handle_link_bounce()
7255 if (ppd->pkeys[2] != 0) { in clear_full_mgmt_pkey()
7256 ppd->pkeys[2] = 0; in clear_full_mgmt_pkey()
7258 hfi1_event_pkey_change(ppd->dd, ppd->port); in clear_full_mgmt_pkey()
7273 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup) in link_width_to_bits()
7331 if ((dd->icode == ICODE_RTL_SILICON) && in get_link_widths()
7332 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) { in get_link_widths()
7336 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G; in get_link_widths()
7339 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; in get_link_widths()
7345 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; in get_link_widths()
7389 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7400 /* get end-of-LNI link widths */ in get_linkup_link_widths()
7401 get_linkup_widths(ppd->dd, &tx_width, &rx_width); in get_linkup_link_widths()
7404 ppd->link_width_active = tx_width; in get_linkup_link_widths()
7406 ppd->link_width_downgrade_tx_active = ppd->link_width_active; in get_linkup_link_widths()
7407 ppd->link_width_downgrade_rx_active = ppd->link_width_active; in get_linkup_link_widths()
7409 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported; in get_linkup_link_widths()
7411 ppd->current_egress_rate = active_egress_rate(ppd); in get_linkup_link_widths()
7417 * This is a work-queue function outside of the interrupt.
7423 struct hfi1_devdata *dd = ppd->dd; in handle_verify_cap()
7470 * about the peer Z value - our sent vAU is 3 (hardwired) and is not in handle_verify_cap()
7479 * credits value and wait for link-up interrupt ot set it. in handle_verify_cap()
7482 dd->vl15buf_cached = vl15buf; in handle_verify_cap()
7485 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc; in handle_verify_cap()
7511 ppd->link_speed_active = 0; /* invalid value */ in handle_verify_cap()
7512 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) { in handle_verify_cap()
7516 ppd->link_speed_active = OPA_LINK_SPEED_12_5G; in handle_verify_cap()
7519 ppd->link_speed_active = OPA_LINK_SPEED_25G; in handle_verify_cap()
7524 u8 rate = remote_tx_rate & ppd->local_tx_rate; in handle_verify_cap()
7527 ppd->link_speed_active = OPA_LINK_SPEED_25G; in handle_verify_cap()
7529 ppd->link_speed_active = OPA_LINK_SPEED_12_5G; in handle_verify_cap()
7531 if (ppd->link_speed_active == 0) { in handle_verify_cap()
7534 ppd->link_speed_active = OPA_LINK_SPEED_25G; in handle_verify_cap()
7544 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8; in handle_verify_cap()
7546 ppd->port_ltp_crc_mode |= in handle_verify_cap()
7547 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4; in handle_verify_cap()
7549 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val); in handle_verify_cap()
7571 /* pull LCB fifos out of reset - all fifo clocks must be stable */ in handle_verify_cap()
7583 * apply_link_downgrade_policy - Apply the link width downgrade enabled
7609 mutex_lock(&ppd->hls_lock); in apply_link_downgrade_policy()
7611 if (ppd->host_link_state & HLS_DOWN) { in apply_link_downgrade_policy()
7613 if (ppd->host_link_state & HLS_GOING_UP) { in apply_link_downgrade_policy()
7615 mutex_unlock(&ppd->hls_lock); in apply_link_downgrade_policy()
7619 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7626 lwde = ppd->link_width_downgrade_enabled; in apply_link_downgrade_policy()
7629 get_link_widths(ppd->dd, &tx, &rx); in apply_link_downgrade_policy()
7630 ppd->link_width_downgrade_tx_active = tx; in apply_link_downgrade_policy()
7631 ppd->link_width_downgrade_rx_active = rx; in apply_link_downgrade_policy()
7634 if (ppd->link_width_downgrade_tx_active == 0 || in apply_link_downgrade_policy()
7635 ppd->link_width_downgrade_rx_active == 0) { in apply_link_downgrade_policy()
7637 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n"); in apply_link_downgrade_policy()
7643 if ((ppd->link_width_active != in apply_link_downgrade_policy()
7644 ppd->link_width_downgrade_tx_active) || in apply_link_downgrade_policy()
7645 (ppd->link_width_active != in apply_link_downgrade_policy()
7646 ppd->link_width_downgrade_rx_active)) { in apply_link_downgrade_policy()
7647 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7649 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7651 ppd->link_width_active, in apply_link_downgrade_policy()
7652 ppd->link_width_downgrade_tx_active, in apply_link_downgrade_policy()
7653 ppd->link_width_downgrade_rx_active); in apply_link_downgrade_policy()
7657 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 || in apply_link_downgrade_policy()
7658 (lwde & ppd->link_width_downgrade_rx_active) == 0) { in apply_link_downgrade_policy()
7660 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7662 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7664 lwde, ppd->link_width_downgrade_tx_active, in apply_link_downgrade_policy()
7665 ppd->link_width_downgrade_rx_active); in apply_link_downgrade_policy()
7671 mutex_unlock(&ppd->hls_lock); in apply_link_downgrade_policy()
7686 * This is a work-queue function outside of the interrupt.
7693 dd_dev_info(ppd->dd, "8051: Link width downgrade\n"); in handle_link_downgrade()
7695 update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active); in handle_link_downgrade()
7730 struct hfi1_pportdata *ppd = dd->pport; in handle_8051_interrupt()
7756 if (ppd->host_link_state in handle_8051_interrupt()
7769 ppd->unknown_frame_count++; in handle_8051_interrupt()
7795 queue_work(ppd->link_wq, &ppd->sma_message_work); in handle_8051_interrupt()
7800 queue_work(ppd->link_wq, &ppd->link_up_work); in handle_8051_interrupt()
7808 queue_work(ppd->link_wq, &ppd->link_vc_work); in handle_8051_interrupt()
7823 queue_work(ppd->link_wq, &ppd->link_downgrade_work); in handle_8051_interrupt()
7861 if ((ppd->host_link_state & in handle_8051_interrupt()
7863 ppd->link_enabled == 0) { in handle_8051_interrupt()
7865 __func__, ppd->host_link_state, in handle_8051_interrupt()
7866 ppd->link_enabled); in handle_8051_interrupt()
7868 if (xchg(&ppd->is_link_down_queued, 1) == 1) in handle_8051_interrupt()
7873 queue_work(ppd->link_wq, &ppd->link_down_work); in handle_8051_interrupt()
7928 struct hfi1_pportdata *ppd = dd->pport; in handle_dcc_err()
7933 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) { in handle_dcc_err()
7935 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK; in handle_dcc_err()
7937 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK; in handle_dcc_err()
7943 struct hfi1_pportdata *ppd = dd->pport; in handle_dcc_err()
7944 /* this counter saturates at (2^32) - 1 */ in handle_dcc_err()
7945 if (ppd->link_downed < (u32)UINT_MAX) in handle_dcc_err()
7946 ppd->link_downed++; in handle_dcc_err()
7954 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) { in handle_dcc_err()
7955 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK; in handle_dcc_err()
7957 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK; in handle_dcc_err()
7971 if (ppd->port_error_action & in handle_dcc_err()
7990 do_bounce = ppd->port_error_action & in handle_dcc_err()
8007 if (!(dd->err_info_rcvport.status_and_code & in handle_dcc_err()
8009 dd->err_info_rcvport.status_and_code = in handle_dcc_err()
8012 dd->err_info_rcvport.status_and_code |= in handle_dcc_err()
8018 dd->err_info_rcvport.packet_flit1 = hdr0; in handle_dcc_err()
8019 dd->err_info_rcvport.packet_flit2 = hdr1; in handle_dcc_err()
8042 do_bounce = ppd->port_error_action & in handle_dcc_err()
8066 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev))) in handle_dcc_err()
8081 queue_work(ppd->link_wq, &ppd->link_bounce_work); in handle_dcc_err()
8100 if (eri->handler) { in is_dc_int()
8107 * and it is non-maskable. This is because if a parity in is_dc_int()
8131 * 0 - N-1 = SDma
8132 * N - 2N-1 = SDmaProgress
8133 * 2N - 3N-1 = SDmaIdle
8145 sdma_dumpstate(&dd->per_sdma[which]); in is_sdma_eng_int()
8148 if (likely(what < 3 && which < dd->num_sdma)) { in is_sdma_eng_int()
8149 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source); in is_sdma_eng_int()
8157 * is_rcv_avail_int() - User receive context available IRQ handler
8164 * and can only be used for non-threaded IRQs.
8171 if (likely(source < dd->num_rcv_contexts)) { in is_rcv_avail_int()
8189 * is_rcv_urgent_int() - User receive context urgent IRQ handler
8202 if (likely(source < dd->num_rcv_contexts)) { in is_rcv_urgent_int()
8258 * Interrupt source interrupt - called when the given source has an interrupt.
8259 * Source is a bit index into an array of 64-bit integers.
8265 /* avoids a double compare by walking the table in-order */ in is_interrupt()
8266 for (entry = &is_table[0]; entry->is_name; entry++) { in is_interrupt()
8267 if (source <= entry->end) { in is_interrupt()
8269 entry->is_int(dd, source - entry->start); in is_interrupt()
8278 * general_interrupt - General interrupt handler
8282 * This is able to correctly handle all non-threaded interrupts. Receive
8294 this_cpu_inc(*dd->int_counter); in general_interrupt()
8298 if (dd->gi_mask[i] == 0) { in general_interrupt()
8303 dd->gi_mask[i]; in general_interrupt()
8322 struct hfi1_devdata *dd = sde->dd; in sdma_interrupt()
8326 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, in sdma_interrupt()
8331 this_cpu_inc(*dd->int_counter); in sdma_interrupt()
8336 & sde->imask; in sdma_interrupt()
8347 sde->this_idx); in sdma_interrupt()
8359 struct hfi1_devdata *dd = rcd->dd; in clear_recv_intr()
8360 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg); in clear_recv_intr()
8362 write_csr(dd, addr, rcd->imask); in clear_recv_intr()
8370 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask); in force_recv_intr()
8374 * Return non-zero if a packet is present.
8391 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); in check_packet_present()
8402 struct hfi1_devdata *dd = rcd->dd; in receive_interrupt_common()
8405 this_cpu_inc(*dd->int_counter); in receive_interrupt_common()
8410 * __hfi1_rcd_eoi_intr() - Make HW issue receive interrupt
8418 if (!rcd->rcvhdrq) in __hfi1_rcd_eoi_intr()
8426 * hfi1_rcd_eoi_intr() - End of Interrupt processing action
8447 * hfi1_netdev_rx_napi - napi poll function to move eoi inline
8455 struct hfi1_ctxtdata *rcd = rxq->rcd; in hfi1_netdev_rx_napi()
8458 work_done = rcd->do_interrupt(rcd, budget); in hfi1_netdev_rx_napi()
8475 if (likely(rcd->napi)) { in receive_context_interrupt_napi()
8476 if (likely(napi_schedule_prep(rcd->napi))) in receive_context_interrupt_napi()
8477 __napi_schedule_irqoff(rcd->napi); in receive_context_interrupt_napi()
8482 rcd->ctxt); in receive_context_interrupt_napi()
8505 disposition = rcd->do_interrupt(rcd, 0); in receive_context_interrupt()
8528 (void)rcd->do_interrupt(rcd, 1); in receive_context_thread()
8574 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { in read_lcb_via_8051()
8580 return -EBUSY; in read_lcb_via_8051()
8583 /* register is an index of LCB registers: (offset - base) / 8 */ in read_lcb_via_8051()
8584 regno = (addr - DC_LCB_CFG_RUN) >> 3; in read_lcb_via_8051()
8587 return -EBUSY; in read_lcb_via_8051()
8617 if (likely(ret != -EBUSY)) in update_lcb_cache()
8634 return -1; in read_lcb_cache()
8639 * Return 0 on success, -EBUSY on failure.
8643 struct hfi1_pportdata *ppd = dd->pport; in read_lcb_csr()
8646 if (ppd->host_link_state & HLS_UP) in read_lcb_csr()
8649 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) { in read_lcb_csr()
8651 return -EBUSY; in read_lcb_csr()
8668 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || in write_lcb_via_8051()
8669 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) { in write_lcb_via_8051()
8675 return -EBUSY; in write_lcb_via_8051()
8678 /* register is an index of LCB registers: (offset - base) / 8 */ in write_lcb_via_8051()
8679 regno = (addr - DC_LCB_CFG_RUN) >> 3; in write_lcb_via_8051()
8682 return -EBUSY; in write_lcb_via_8051()
8688 * Return 0 on success, -EBUSY on failure.
8692 struct hfi1_pportdata *ppd = dd->pport; in write_lcb_csr()
8695 if (ppd->host_link_state & HLS_UP) in write_lcb_csr()
8698 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) in write_lcb_csr()
8699 return -EBUSY; in write_lcb_csr()
8719 mutex_lock(&dd->dc8051_lock); in do_8051_command()
8722 if (dd->dc_shutdown) { in do_8051_command()
8723 return_code = -ENODEV; in do_8051_command()
8737 if (dd->dc8051_timed_out) { in do_8051_command()
8738 if (dd->dc8051_timed_out > 1) { in do_8051_command()
8742 return_code = -ENXIO; in do_8051_command()
8761 * 39:00 -> in_data[47:8] in do_8051_command()
8762 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE in do_8051_command()
8763 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA in do_8051_command()
8767 /* must preserve COMPLETED - it is tied to hardware */ in do_8051_command()
8797 dd->dc8051_timed_out++; in do_8051_command()
8801 return_code = -ETIMEDOUT; in do_8051_command()
8815 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT); in do_8051_command()
8820 dd->dc8051_timed_out = 0; in do_8051_command()
8827 mutex_unlock(&dd->dc8051_lock); in do_8051_command()
8857 * Return 0 on success, -errno on failure
8874 /* read is in 8-byte chunks, hardware will truncate the address down */ in read_8051_config()
9056 if (dd->pport->host_link_state & HLS_UP) { in hfi1_read_link_quality()
9120 * Returns 0 on success, -EINVAL on error
9130 return -EINVAL; in read_idle_message()
9142 * Returns 0 on success, -EINVAL on error
9153 * Returns 0 on success, -EINVAL on error
9164 return -EINVAL; in send_idle_message()
9172 * Returns 0 on success, -EINVAL on error
9187 * return 0 on success, -errno on error
9208 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { in do_quick_linkup()
9224 * sides must be done with LCB set-up before either in do_quick_linkup()
9253 ret = -EINVAL; in do_quick_linkup()
9272 * The simulator has only one loopback option - LCB. Switch in init_loopback()
9277 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) && in init_loopback()
9291 /* LCB loopback - handled at poll time */ in init_loopback()
9296 if (dd->icode == ICODE_FPGA_EMULATION) { in init_loopback()
9299 return -EINVAL; in init_loopback()
9309 return -EINVAL; in init_loopback()
9325 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) }, in opa_to_vc_link_widths()
9326 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) }, in opa_to_vc_link_widths()
9327 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) }, in opa_to_vc_link_widths()
9328 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) }, in opa_to_vc_link_widths()
9343 struct hfi1_devdata *dd = ppd->dd; in set_local_link_attributes()
9352 /* set the local tx rate - need to read-modify-write */ in set_local_link_attributes()
9354 &rx_polarity_inversion, &ppd->local_tx_rate); in set_local_link_attributes()
9358 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) { in set_local_link_attributes()
9360 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G) in set_local_link_attributes()
9361 ppd->local_tx_rate = 1; in set_local_link_attributes()
9363 ppd->local_tx_rate = 0; in set_local_link_attributes()
9366 ppd->local_tx_rate = 0; in set_local_link_attributes()
9367 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G) in set_local_link_attributes()
9368 ppd->local_tx_rate |= 2; in set_local_link_attributes()
9369 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G) in set_local_link_attributes()
9370 ppd->local_tx_rate |= 1; in set_local_link_attributes()
9375 rx_polarity_inversion, ppd->local_tx_rate); in set_local_link_attributes()
9397 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init, in set_local_link_attributes()
9398 ppd->port_crc_mode_enabled); in set_local_link_attributes()
9414 if (dd->dc8051_ver >= dc8051_ver(1, 25, 0)) in set_local_link_attributes()
9419 ppd->link_width_enabled)); in set_local_link_attributes()
9424 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev); in set_local_link_attributes()
9448 if (!ppd->driver_link_ready) { in start_link()
9449 dd_dev_info(ppd->dd, in start_link()
9467 struct hfi1_devdata *dd = ppd->dd; in wait_for_qsfp_init()
9473 * effect of power up on plug-in. We ignore this false positive in wait_for_qsfp_init()
9476 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the in wait_for_qsfp_init()
9482 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1) in wait_for_qsfp_init()
9486 mask = read_csr(dd, dd->hfi1_id ? in wait_for_qsfp_init()
9501 struct hfi1_devdata *dd = ppd->dd; in set_qsfp_int_n()
9504 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK); in set_qsfp_int_n()
9508 * when we re-enable the IntN pin in set_qsfp_int_n()
9510 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR, in set_qsfp_int_n()
9516 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask); in set_qsfp_int_n()
9521 struct hfi1_devdata *dd = ppd->dd; in reset_qsfp()
9531 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT); in reset_qsfp()
9534 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); in reset_qsfp()
9540 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); in reset_qsfp()
9561 struct hfi1_devdata *dd = ppd->dd; in handle_qsfp_error_conditions()
9576 if (ppd->host_link_state & HLS_DOWN) in handle_qsfp_error_conditions()
9651 /* Bytes 9-10 and 11-12 are reserved */ in handle_qsfp_error_conditions()
9652 /* Bytes 13-15 are vendor specific */ in handle_qsfp_error_conditions()
9665 ppd = qd->ppd; in qsfp_event()
9666 dd = ppd->dd; in qsfp_event()
9672 if (ppd->host_link_state == HLS_DN_DISABLE) { in qsfp_event()
9673 dd_dev_info(ppd->dd, in qsfp_event()
9680 * Turn DC back on after cable has been re-inserted. Up until in qsfp_event()
9685 if (qd->cache_refresh_required) { in qsfp_event()
9699 if (qd->check_interrupt_flags) { in qsfp_event()
9702 if (one_qsfp_read(ppd, dd->hfi1_id, 6, in qsfp_event()
9712 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); in qsfp_event()
9713 ppd->qsfp_info.check_interrupt_flags = 0; in qsfp_event()
9714 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, in qsfp_event()
9722 struct hfi1_pportdata *ppd = dd->pport; in init_qsfp_int()
9727 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR, in init_qsfp_int()
9729 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, in init_qsfp_int()
9738 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT, in init_qsfp_int()
9742 if (!dd->hfi1_id) in init_qsfp_int()
9749 * Do a one-time initialize of the LCB block.
9754 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) in init_lcb()
9770 * Perform a test read on the QSFP. Return 0 on success, -ERRNO
9782 if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd)) in test_qsfp_read()
9786 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1); in test_qsfp_read()
9790 return -EIO; in test_qsfp_read()
9812 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) { in try_start_link()
9813 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n"); in try_start_link()
9816 dd_dev_info(ppd->dd, in try_start_link()
9818 (int)ppd->qsfp_retry_count); in try_start_link()
9819 ppd->qsfp_retry_count++; in try_start_link()
9820 queue_delayed_work(ppd->link_wq, &ppd->start_link_work, in try_start_link()
9824 ppd->qsfp_retry_count = 0; in try_start_link()
9841 struct hfi1_devdata *dd = ppd->dd; in bringup_serdes()
9848 guid = ppd->guids[HFI1_PORT_GUID_INDEX]; in bringup_serdes()
9850 if (dd->base_guid) in bringup_serdes()
9851 guid = dd->base_guid + ppd->port - 1; in bringup_serdes()
9852 ppd->guids[HFI1_PORT_GUID_INDEX] = guid; in bringup_serdes()
9856 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP; in bringup_serdes()
9858 /* one-time init of the LCB */ in bringup_serdes()
9868 if (ppd->port_type == PORT_TYPE_QSFP) { in bringup_serdes()
9880 struct hfi1_devdata *dd = ppd->dd; in hfi1_quiet_serdes()
9889 ppd->driver_link_ready = 0; in hfi1_quiet_serdes()
9890 ppd->link_enabled = 0; in hfi1_quiet_serdes()
9892 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */ in hfi1_quiet_serdes()
9893 flush_delayed_work(&ppd->start_link_work); in hfi1_quiet_serdes()
9894 cancel_delayed_work_sync(&ppd->start_link_work); in hfi1_quiet_serdes()
9896 ppd->offline_disabled_reason = in hfi1_quiet_serdes()
9904 cancel_work_sync(&ppd->freeze_work); in hfi1_quiet_serdes()
9913 for (i = 0; i < dd->num_pports; i++, ppd++) { in init_cpu_counters()
9914 ppd->ibport_data.rvp.rc_acks = NULL; in init_cpu_counters()
9915 ppd->ibport_data.rvp.rc_qacks = NULL; in init_cpu_counters()
9916 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64); in init_cpu_counters()
9917 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64); in init_cpu_counters()
9918 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64); in init_cpu_counters()
9919 if (!ppd->ibport_data.rvp.rc_acks || in init_cpu_counters()
9920 !ppd->ibport_data.rvp.rc_delayed_comp || in init_cpu_counters()
9921 !ppd->ibport_data.rvp.rc_qacks) in init_cpu_counters()
9922 return -ENOMEM; in init_cpu_counters()
9936 if (!(dd->flags & HFI1_PRESENT)) in hfi1_put_tid()
9955 trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg); in hfi1_put_tid()
9956 writeq(reg, dd->rcvarray_wc + (index * 8)); in hfi1_put_tid()
9971 struct hfi1_devdata *dd = rcd->dd; in hfi1_clear_tids()
9975 for (i = rcd->eager_base; i < rcd->eager_base + in hfi1_clear_tids()
9976 rcd->egrbufs.alloced; i++) in hfi1_clear_tids()
9979 for (i = rcd->expected_base; in hfi1_clear_tids()
9980 i < rcd->expected_base + rcd->expected_count; i++) in hfi1_clear_tids()
10018 struct hfi1_devdata *dd = ppd->dd; in hfi1_get_ib_cfg()
10022 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */ in hfi1_get_ib_cfg()
10023 val = ppd->link_width_enabled; in hfi1_get_ib_cfg()
10025 case HFI1_IB_CFG_LWID: /* currently active Link-width */ in hfi1_get_ib_cfg()
10026 val = ppd->link_width_active; in hfi1_get_ib_cfg()
10029 val = ppd->link_speed_enabled; in hfi1_get_ib_cfg()
10032 val = ppd->link_speed_active; in hfi1_get_ib_cfg()
10035 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */ in hfi1_get_ib_cfg()
10036 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */ in hfi1_get_ib_cfg()
10041 val = ppd->actual_vls_operational; in hfi1_get_ib_cfg()
10050 val = ppd->overrun_threshold; in hfi1_get_ib_cfg()
10053 val = ppd->phy_error_threshold; in hfi1_get_ib_cfg()
10086 * HFI allows this to be set per-receive context, but the
10092 * The maximum non-payload (MTU) bytes in LRH.PktLen are in lrh_max_header_bytes()
10096 * dd->rcd[0].rcvhdrqentsize is in DW. in lrh_max_header_bytes()
10101 return (get_hdrqentsize(dd->rcd[0]) - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2; in lrh_max_header_bytes()
10117 struct hfi1_devdata *dd = ppd->dd; in set_send_length()
10119 u32 maxvlmtu = dd->vld[15].mtu; in set_send_length()
10120 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2) in set_send_length()
10126 for (i = 0; i < ppd->vls_supported; i++) { in set_send_length()
10127 if (dd->vld[i].mtu > maxvlmtu) in set_send_length()
10128 maxvlmtu = dd->vld[i].mtu; in set_send_length()
10130 len1 |= (((dd->vld[i].mtu + max_hb) >> 2) in set_send_length()
10134 len2 |= (((dd->vld[i].mtu + max_hb) >> 2) in set_send_length()
10142 for (i = 0; i < ppd->vls_supported; i++) { in set_send_length()
10143 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50), in set_send_length()
10144 sc_mtu_to_threshold(dd->vld[i].sc, in set_send_length()
10145 dd->vld[i].mtu, in set_send_length()
10146 get_hdrqentsize(dd->rcd[0]))); in set_send_length()
10152 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50), in set_send_length()
10153 sc_mtu_to_threshold(dd->vld[15].sc, in set_send_length()
10154 dd->vld[15].mtu, in set_send_length()
10155 dd->rcd[0]->rcvhdrqentsize)); in set_send_length()
10156 sc_set_cr_threshold(dd->vld[15].sc, thres); in set_send_length()
10161 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG); in set_send_length()
10165 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1); in set_send_length()
10172 struct hfi1_devdata *dd = ppd->dd; in set_lidlmc()
10173 u32 mask = ~((1U << ppd->lmc) - 1); in set_lidlmc()
10174 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1); in set_lidlmc()
10181 lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid; in set_lidlmc()
10188 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1); in set_lidlmc()
10282 struct hfi1_devdata *dd = ppd->dd; in decode_state_complete()
10290 * [ 0: 0] - success in decode_state_complete()
10291 * [ 3: 1] - state in decode_state_complete()
10292 * [ 7: 4] - next state timeout in decode_state_complete()
10293 * [15: 8] - reason code in decode_state_complete()
10294 * [31:16] - lanes in decode_state_complete()
10322 read_last_local_state(ppd->dd, &last_local_state); in check_lni_states()
10323 read_last_remote_state(ppd->dd, &last_remote_state); in check_lni_states()
10328 * training in-process. in check_lni_states()
10352 return -ETIMEDOUT; in wait_link_transfer_active()
10362 struct hfi1_devdata *dd = ppd->dd; in force_logical_link_state_down()
10391 dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n"); in force_logical_link_state_down()
10396 * Expects ppd->hls_mutex to be held.
10404 struct hfi1_devdata *dd = ppd->dd; in goto_offline()
10411 previous_state = ppd->host_link_state; in goto_offline()
10412 ppd->host_link_state = HLS_GOING_OFFLINE; in goto_offline()
10421 return -EINVAL; in goto_offline()
10423 if (ppd->offline_disabled_reason == in goto_offline()
10425 ppd->offline_disabled_reason = in goto_offline()
10433 if (ppd->port_type == PORT_TYPE_QSFP && in goto_offline()
10434 ppd->qsfp_info.limiting_active && in goto_offline()
10460 * Now in charge of LCB - must be after the physical state is in goto_offline()
10471 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */ in goto_offline()
10488 ppd->host_link_state = HLS_DN_OFFLINE; in goto_offline()
10495 * - change our state in goto_offline()
10496 * - notify others if we were previously in a linkup state in goto_offline()
10498 ppd->host_link_state = HLS_DN_OFFLINE; in goto_offline()
10508 ppd->qsfp_info.reset_needed = 0; in goto_offline()
10512 ppd->link_width_active = 0; in goto_offline()
10513 ppd->link_width_downgrade_tx_active = 0; in goto_offline()
10514 ppd->link_width_downgrade_rx_active = 0; in goto_offline()
10515 ppd->current_egress_rate = 0; in goto_offline()
10546 switch (ppd->linkinit_reason) { in link_state_reason_name()
10565 * driver_pstate - convert the driver's notion of a port's
10567 * Return -1 (converted to a u32) to indicate error.
10571 switch (ppd->host_link_state) { in driver_pstate()
10592 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n", in driver_pstate()
10593 ppd->host_link_state); in driver_pstate()
10594 return -1; in driver_pstate()
10599 * driver_lstate - convert the driver's notion of a port's
10600 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10605 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN)) in driver_lstate()
10608 switch (ppd->host_link_state & HLS_UP) { in driver_lstate()
10616 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n", in driver_lstate()
10617 ppd->host_link_state); in driver_lstate()
10618 return -1; in driver_lstate()
10625 if (ppd->local_link_down_reason.latest == 0 && in set_link_down_reason()
10626 ppd->neigh_link_down_reason.latest == 0) { in set_link_down_reason()
10627 ppd->local_link_down_reason.latest = lcl_reason; in set_link_down_reason()
10628 ppd->neigh_link_down_reason.latest = neigh_reason; in set_link_down_reason()
10629 ppd->remote_link_down_reason = rem_reason; in set_link_down_reason()
10634 * data_vls_operational() - Verify if data VL BCT credits and MTU
10638 * Return: true - Ok, false -otherwise.
10645 if (!ppd->actual_vls_operational) in data_vls_operational()
10648 for (i = 0; i < ppd->vls_supported; i++) { in data_vls_operational()
10649 reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i)); in data_vls_operational()
10650 if ((reg && !ppd->dd->vld[i].mtu) || in data_vls_operational()
10651 (!reg && ppd->dd->vld[i].mtu)) in data_vls_operational()
10664 * Returns 0 on success, -errno on failure.
10668 struct hfi1_devdata *dd = ppd->dd; in set_link_state()
10673 mutex_lock(&ppd->hls_lock); in set_link_state()
10679 /* interpret poll -> poll as a link bounce */ in set_link_state()
10680 poll_bounce = ppd->host_link_state == HLS_DN_POLL && in set_link_state()
10684 link_state_name(ppd->host_link_state), in set_link_state()
10695 ppd->is_sm_config_started = 0; in set_link_state()
10701 if (ppd->host_link_state == state && !poll_bounce) in set_link_state()
10706 if (ppd->host_link_state == HLS_DN_POLL && in set_link_state()
10707 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) { in set_link_state()
10716 } else if (ppd->host_link_state != HLS_GOING_UP) { in set_link_state()
10728 "%s: physical state did not change to LINK-UP\n", in set_link_state()
10742 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR) in set_link_state()
10743 ppd->linkinit_reason = in set_link_state()
10757 update_xmit_counters(ppd, ppd->link_width_active); in set_link_state()
10759 ppd->host_link_state = HLS_UP_INIT; in set_link_state()
10763 if (ppd->host_link_state != HLS_UP_INIT) in set_link_state()
10770 ret = -EINVAL; in set_link_state()
10782 ppd->host_link_state = HLS_UP_ARMED; in set_link_state()
10789 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) in set_link_state()
10790 ppd->neighbor_normal = 1; in set_link_state()
10793 if (ppd->host_link_state != HLS_UP_ARMED) in set_link_state()
10805 ppd->host_link_state = HLS_UP_ACTIVE; in set_link_state()
10809 event.device = &dd->verbs_dev.rdi.ibdev; in set_link_state()
10810 event.element.port_num = ppd->port; in set_link_state()
10815 if ((ppd->host_link_state == HLS_DN_DISABLE || in set_link_state()
10816 ppd->host_link_state == HLS_DN_OFFLINE) && in set_link_state()
10817 dd->dc_shutdown) in set_link_state()
10822 if (ppd->host_link_state != HLS_DN_OFFLINE) { in set_link_state()
10823 u8 tmp = ppd->link_enabled; in set_link_state()
10825 ret = goto_offline(ppd, ppd->remote_link_down_reason); in set_link_state()
10827 ppd->link_enabled = tmp; in set_link_state()
10830 ppd->remote_link_down_reason = 0; in set_link_state()
10832 if (ppd->driver_link_ready) in set_link_state()
10833 ppd->link_enabled = 1; in set_link_state()
10836 set_all_slowpath(ppd->dd); in set_link_state()
10841 ppd->port_error_action = 0; in set_link_state()
10855 ret = -EINVAL; in set_link_state()
10865 ppd->host_link_state = HLS_DN_POLL; in set_link_state()
10866 ppd->offline_disabled_reason = in set_link_state()
10879 ppd->link_enabled = 0; in set_link_state()
10884 if (ppd->host_link_state != HLS_DN_OFFLINE) { in set_link_state()
10885 ret = goto_offline(ppd, ppd->remote_link_down_reason); in set_link_state()
10888 ppd->remote_link_down_reason = 0; in set_link_state()
10891 if (!dd->dc_shutdown) { in set_link_state()
10897 ret = -EINVAL; in set_link_state()
10909 ppd->host_link_state = HLS_DN_DISABLE; in set_link_state()
10912 if (ppd->host_link_state == HLS_DN_DISABLE) in set_link_state()
10916 ret = goto_offline(ppd, ppd->remote_link_down_reason); in set_link_state()
10918 ppd->remote_link_down_reason = 0; in set_link_state()
10921 if (ppd->host_link_state != HLS_DN_POLL) in set_link_state()
10923 ppd->host_link_state = HLS_VERIFY_CAP; in set_link_state()
10927 if (ppd->host_link_state != HLS_VERIFY_CAP) in set_link_state()
10935 ret = -EINVAL; in set_link_state()
10938 ppd->host_link_state = HLS_GOING_UP; in set_link_state()
10946 ret = -EINVAL; in set_link_state()
10954 __func__, link_state_name(ppd->host_link_state), in set_link_state()
10956 ret = -EINVAL; in set_link_state()
10959 mutex_unlock(&ppd->hls_lock); in set_link_state()
10984 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg); in hfi1_set_ib_cfg()
10989 ret = -EINVAL; in hfi1_set_ib_cfg()
10992 if (ppd->vls_operational != val) { in hfi1_set_ib_cfg()
10993 ppd->vls_operational = val; in hfi1_set_ib_cfg()
10994 if (!ppd->port) in hfi1_set_ib_cfg()
10995 ret = -EINVAL; in hfi1_set_ib_cfg()
11006 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */ in hfi1_set_ib_cfg()
11007 ppd->link_width_enabled = val & ppd->link_width_supported; in hfi1_set_ib_cfg()
11010 ppd->link_width_downgrade_enabled = in hfi1_set_ib_cfg()
11011 val & ppd->link_width_downgrade_supported; in hfi1_set_ib_cfg()
11014 ppd->link_speed_enabled = val & ppd->link_speed_supported; in hfi1_set_ib_cfg()
11021 ppd->overrun_threshold = val; in hfi1_set_ib_cfg()
11028 ppd->phy_error_threshold = val; in hfi1_set_ib_cfg()
11042 dd_dev_info(ppd->dd, in hfi1_set_ib_cfg()
11071 spin_lock_init(&ppd->vl_arb_cache[i].lock); in init_vl_arb_caches()
11085 spin_lock(&ppd->vl_arb_cache[idx].lock); in vl_arb_lock_cache()
11086 return &ppd->vl_arb_cache[idx]; in vl_arb_lock_cache()
11091 spin_unlock(&ppd->vl_arb_cache[idx].lock); in vl_arb_unlock_cache()
11097 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl)); in vl_arb_get_cache()
11103 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl)); in vl_arb_set_cache()
11109 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl)); in vl_arb_match_cache()
11117 struct hfi1_devdata *dd = ppd->dd; in set_vl_weights()
11122 mutex_lock(&ppd->hls_lock); in set_vl_weights()
11124 if (ppd->host_link_state & HLS_UP) in set_vl_weights()
11131 * Before adjusting VL arbitration weights, empty per-VL in set_vl_weights()
11141 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n", in set_vl_weights()
11151 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK) in set_vl_weights()
11153 | (((u64)vl->weight in set_vl_weights()
11164 mutex_unlock(&ppd->hls_lock); in set_vl_weights()
11177 vll->dedicated = cpu_to_be16( in read_one_cm_vl()
11180 vll->shared = cpu_to_be16( in read_one_cm_vl()
11197 /* OPA and HFI have a 1-1 mapping */ in get_buffer_control()
11199 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]); in get_buffer_control()
11201 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */ in get_buffer_control()
11202 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]); in get_buffer_control()
11205 bc->overall_shared_limit = cpu_to_be16( in get_buffer_control()
11220 /* each register contains 16 SC->VLnt mappings, 4 bits each */ in get_sc2vlnt()
11225 dp->vlnt[2 * i] = byte & 0xf; in get_sc2vlnt()
11226 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4; in get_sc2vlnt()
11233 dp->vlnt[16 + (2 * i)] = byte & 0xf; in get_sc2vlnt()
11234 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4; in get_sc2vlnt()
11245 vl->vl = 0xf; in get_vlarb_preempt()
11246 vl->weight = 0; in get_vlarb_preempt()
11254 0, dp->vlnt[0] & 0xf, in set_sc2vlnt()
11255 1, dp->vlnt[1] & 0xf, in set_sc2vlnt()
11256 2, dp->vlnt[2] & 0xf, in set_sc2vlnt()
11257 3, dp->vlnt[3] & 0xf, in set_sc2vlnt()
11258 4, dp->vlnt[4] & 0xf, in set_sc2vlnt()
11259 5, dp->vlnt[5] & 0xf, in set_sc2vlnt()
11260 6, dp->vlnt[6] & 0xf, in set_sc2vlnt()
11261 7, dp->vlnt[7] & 0xf, in set_sc2vlnt()
11262 8, dp->vlnt[8] & 0xf, in set_sc2vlnt()
11263 9, dp->vlnt[9] & 0xf, in set_sc2vlnt()
11264 10, dp->vlnt[10] & 0xf, in set_sc2vlnt()
11265 11, dp->vlnt[11] & 0xf, in set_sc2vlnt()
11266 12, dp->vlnt[12] & 0xf, in set_sc2vlnt()
11267 13, dp->vlnt[13] & 0xf, in set_sc2vlnt()
11268 14, dp->vlnt[14] & 0xf, in set_sc2vlnt()
11269 15, dp->vlnt[15] & 0xf)); in set_sc2vlnt()
11272 16, dp->vlnt[16] & 0xf, in set_sc2vlnt()
11273 17, dp->vlnt[17] & 0xf, in set_sc2vlnt()
11274 18, dp->vlnt[18] & 0xf, in set_sc2vlnt()
11275 19, dp->vlnt[19] & 0xf, in set_sc2vlnt()
11276 20, dp->vlnt[20] & 0xf, in set_sc2vlnt()
11277 21, dp->vlnt[21] & 0xf, in set_sc2vlnt()
11278 22, dp->vlnt[22] & 0xf, in set_sc2vlnt()
11279 23, dp->vlnt[23] & 0xf, in set_sc2vlnt()
11280 24, dp->vlnt[24] & 0xf, in set_sc2vlnt()
11281 25, dp->vlnt[25] & 0xf, in set_sc2vlnt()
11282 26, dp->vlnt[26] & 0xf, in set_sc2vlnt()
11283 27, dp->vlnt[27] & 0xf, in set_sc2vlnt()
11284 28, dp->vlnt[28] & 0xf, in set_sc2vlnt()
11285 29, dp->vlnt[29] & 0xf, in set_sc2vlnt()
11286 30, dp->vlnt[30] & 0xf, in set_sc2vlnt()
11287 31, dp->vlnt[31] & 0xf)); in set_sc2vlnt()
11320 /* set the given per-VL shared limit */
11337 /* set the given per-VL dedicated limit */
11354 /* spin until the given per-VL status mask bits clear */
11410 struct hfi1_devdata *dd = ppd->dd; in set_buffer_control()
11443 new_total += be16_to_cpu(new_bc->vl[i].dedicated); in set_buffer_control()
11447 be16_to_cpu(new_bc->vl[i].dedicated)); in set_buffer_control()
11449 be16_to_cpu(new_bc->vl[i].shared)); in set_buffer_control()
11450 new_bc->vl[i].dedicated = 0; in set_buffer_control()
11451 new_bc->vl[i].shared = 0; in set_buffer_control()
11453 new_total += be16_to_cpu(new_bc->overall_shared_limit); in set_buffer_control()
11476 this_shared_changing = new_bc->vl[i].shared in set_buffer_control()
11480 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated || in set_buffer_control()
11486 if (be16_to_cpu(new_bc->vl[i].dedicated) < in set_buffer_control()
11501 if ((be16_to_cpu(new_bc->overall_shared_limit) < in set_buffer_control()
11529 be16_to_cpu(new_bc-> in set_buffer_control()
11532 new_bc->vl[i].dedicated; in set_buffer_control()
11543 if (be16_to_cpu(new_bc->vl[i].dedicated) > in set_buffer_control()
11546 be16_to_cpu(new_bc-> in set_buffer_control()
11556 if (be16_to_cpu(new_bc->vl[i].shared) > in set_buffer_control()
11558 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared)); in set_buffer_control()
11562 if (be16_to_cpu(new_bc->overall_shared_limit) > in set_buffer_control()
11565 be16_to_cpu(new_bc->overall_shared_limit)); in set_buffer_control()
11577 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 || in set_buffer_control()
11578 be16_to_cpu(new_bc->vl[i].shared) > 0) in set_buffer_control()
11580 ppd->actual_vls_operational = vl_count; in set_buffer_control()
11581 ret = sdma_map_init(dd, ppd->port - 1, vl_count ? in set_buffer_control()
11582 ppd->actual_vls_operational : in set_buffer_control()
11583 ppd->vls_operational, in set_buffer_control()
11586 ret = pio_map_init(dd, ppd->port - 1, vl_count ? in set_buffer_control()
11587 ppd->actual_vls_operational : in set_buffer_control()
11588 ppd->vls_operational, NULL); in set_buffer_control()
11628 size = get_buffer_control(ppd->dd, t, NULL); in fm_get_table()
11631 size = get_sc2vlnt(ppd->dd, t); in fm_get_table()
11636 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t); in fm_get_table()
11646 return -EINVAL; in fm_get_table()
11686 set_sc2vlnt(ppd->dd, t); in fm_set_table()
11689 ret = -EINVAL; in fm_set_table()
11697 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11710 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11711 * Just re-enables all data VLs (the "fill" part happens
11712 * automatically - the name was chosen for symmetry with
11715 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11728 * drain_data_vls() - assumes that disable_data_vls() has been called,
11729 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11740 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11746 * // do things with per-VL resources
11762 * the cclock, a non-zero ns will always have a non-zero result.
11768 if (dd->icode == ICODE_FPGA_EMULATION) in ns_to_cclock()
11779 * the cclock, a non-zero cclocks will always have a non-zero result.
11785 if (dd->icode == ICODE_FPGA_EMULATION) in cclock_to_ns()
11802 struct hfi1_devdata *dd = rcd->dd; in adjust_rcv_timeout()
11803 u32 timeout = rcd->rcvavail_timeout; in adjust_rcv_timeout()
11827 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */ in adjust_rcv_timeout()
11829 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr); in adjust_rcv_timeout()
11832 rcd->rcvavail_timeout = timeout; in adjust_rcv_timeout()
11837 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT, in adjust_rcv_timeout()
11845 struct hfi1_devdata *dd = rcd->dd; in update_usrhead()
11847 u32 ctxt = rcd->ctxt; in update_usrhead()
11870 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) in hdrqempty()
11876 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); in hdrqempty()
11895 * 0xB-0xF - reserved (Receive Array only)
11918 * encode_rcv_header_entry_size - return chip specific encoding for size
11938 * hfi1_validate_rcvhdrcnt - validate hdrcnt
11946 return -EINVAL; in hfi1_validate_rcvhdrcnt()
11953 return -EINVAL; in hfi1_validate_rcvhdrcnt()
11959 return -EINVAL; in hfi1_validate_rcvhdrcnt()
11966 * set_hdrq_regs - set header queue registers for context
11992 dd->rcvhdrtail_dummy_dma); in set_hdrq_regs()
12005 ctxt = rcd->ctxt; in hfi1_rcvctrl()
12015 rcd->rcvhdrq_dma); in hfi1_rcvctrl()
12018 rcd->rcvhdrqtailaddr_dma); in hfi1_rcvctrl()
12030 memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd)); in hfi1_rcvctrl()
12033 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr; in hfi1_rcvctrl()
12040 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size) in hfi1_rcvctrl()
12044 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */ in hfi1_rcvctrl()
12052 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT) in hfi1_rcvctrl()
12055 (((rcd->eager_base >> RCV_SHIFT) in hfi1_rcvctrl()
12062 * rcd->expected_count is set to individual RcvArray entries, in hfi1_rcvctrl()
12063 * not pairs, and the CSR takes a pair-count in groups of in hfi1_rcvctrl()
12066 reg = (((rcd->expected_count >> RCV_SHIFT) in hfi1_rcvctrl()
12069 (((rcd->expected_base >> RCV_SHIFT) in hfi1_rcvctrl()
12083 if (dd->rcvhdrtail_dummy_dma) { in hfi1_rcvctrl()
12085 dd->rcvhdrtail_dummy_dma); in hfi1_rcvctrl()
12093 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, in hfi1_rcvctrl()
12094 IS_RCVAVAIL_START + rcd->ctxt, true); in hfi1_rcvctrl()
12098 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, in hfi1_rcvctrl()
12099 IS_RCVAVAIL_START + rcd->ctxt, false); in hfi1_rcvctrl()
12115 * In one-packet-per-eager mode, the size comes from in hfi1_rcvctrl()
12132 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, in hfi1_rcvctrl()
12133 IS_RCVURGENT_START + rcd->ctxt, true); in hfi1_rcvctrl()
12135 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, in hfi1_rcvctrl()
12136 IS_RCVURGENT_START + rcd->ctxt, false); in hfi1_rcvctrl()
12165 (u64)rcd->rcvavail_timeout << in hfi1_rcvctrl()
12180 dd->rcvhdrtail_dummy_dma); in hfi1_rcvctrl()
12189 ret = dd->cntrnameslen; in hfi1_read_cntrs()
12190 *namep = dd->cntrnames; in hfi1_read_cntrs()
12195 ret = (dd->ndevcntrs) * sizeof(u64); in hfi1_read_cntrs()
12198 *cntrp = dd->cntrs; in hfi1_read_cntrs()
12205 hfi1_cdbg(CNTR, "reading %s", entry->name); in hfi1_read_cntrs()
12206 if (entry->flags & CNTR_DISABLED) { in hfi1_read_cntrs()
12210 if (entry->flags & CNTR_VL) { in hfi1_read_cntrs()
12213 val = entry->rw_cntr(entry, in hfi1_read_cntrs()
12221 dd->cntrs[entry->offset + j] = in hfi1_read_cntrs()
12224 } else if (entry->flags & CNTR_SDMA) { in hfi1_read_cntrs()
12230 entry->rw_cntr(entry, dd, j, in hfi1_read_cntrs()
12235 dd->cntrs[entry->offset + j] = in hfi1_read_cntrs()
12239 val = entry->rw_cntr(entry, dd, in hfi1_read_cntrs()
12242 dd->cntrs[entry->offset] = val; in hfi1_read_cntrs()
12260 ret = ppd->dd->portcntrnameslen; in hfi1_read_portcntrs()
12261 *namep = ppd->dd->portcntrnames; in hfi1_read_portcntrs()
12266 ret = ppd->dd->nportcntrs * sizeof(u64); in hfi1_read_portcntrs()
12267 *cntrp = ppd->cntrs; in hfi1_read_portcntrs()
12271 hfi1_cdbg(CNTR, "reading %s", entry->name); in hfi1_read_portcntrs()
12272 if (entry->flags & CNTR_DISABLED) { in hfi1_read_portcntrs()
12278 if (entry->flags & CNTR_VL) { in hfi1_read_portcntrs()
12281 val = entry->rw_cntr(entry, ppd, j, in hfi1_read_portcntrs()
12288 ppd->cntrs[entry->offset + j] = val; in hfi1_read_portcntrs()
12291 val = entry->rw_cntr(entry, ppd, in hfi1_read_portcntrs()
12295 ppd->cntrs[entry->offset] = val; in hfi1_read_portcntrs()
12308 if (dd->synth_stats_timer.function) in free_cntrs()
12309 del_timer_sync(&dd->synth_stats_timer); in free_cntrs()
12311 for (i = 0; i < dd->num_pports; i++, ppd++) { in free_cntrs()
12312 kfree(ppd->cntrs); in free_cntrs()
12313 kfree(ppd->scntrs); in free_cntrs()
12314 free_percpu(ppd->ibport_data.rvp.rc_acks); in free_cntrs()
12315 free_percpu(ppd->ibport_data.rvp.rc_qacks); in free_cntrs()
12316 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp); in free_cntrs()
12317 ppd->cntrs = NULL; in free_cntrs()
12318 ppd->scntrs = NULL; in free_cntrs()
12319 ppd->ibport_data.rvp.rc_acks = NULL; in free_cntrs()
12320 ppd->ibport_data.rvp.rc_qacks = NULL; in free_cntrs()
12321 ppd->ibport_data.rvp.rc_delayed_comp = NULL; in free_cntrs()
12323 kfree(dd->portcntrnames); in free_cntrs()
12324 dd->portcntrnames = NULL; in free_cntrs()
12325 kfree(dd->cntrs); in free_cntrs()
12326 dd->cntrs = NULL; in free_cntrs()
12327 kfree(dd->scntrs); in free_cntrs()
12328 dd->scntrs = NULL; in free_cntrs()
12329 kfree(dd->cntrnames); in free_cntrs()
12330 dd->cntrnames = NULL; in free_cntrs()
12331 if (dd->update_cntr_wq) { in free_cntrs()
12332 destroy_workqueue(dd->update_cntr_wq); in free_cntrs()
12333 dd->update_cntr_wq = NULL; in free_cntrs()
12343 if (entry->flags & CNTR_DISABLED) { in read_dev_port_cntr()
12344 dd_dev_err(dd, "Counter %s not enabled", entry->name); in read_dev_port_cntr()
12348 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval); in read_dev_port_cntr()
12350 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0); in read_dev_port_cntr()
12353 if (entry->flags & CNTR_SYNTH) { in read_dev_port_cntr()
12359 if (entry->flags & CNTR_32BIT) { in read_dev_port_cntr()
12394 if (entry->flags & CNTR_DISABLED) { in write_dev_port_cntr()
12395 dd_dev_err(dd, "Counter %s not enabled", entry->name); in write_dev_port_cntr()
12399 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval); in write_dev_port_cntr()
12401 if (entry->flags & CNTR_SYNTH) { in write_dev_port_cntr()
12403 if (entry->flags & CNTR_32BIT) { in write_dev_port_cntr()
12404 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, in write_dev_port_cntr()
12408 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, in write_dev_port_cntr()
12412 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data); in write_dev_port_cntr()
12428 sval = dd->scntrs + entry->offset; in read_dev_cntr()
12442 sval = dd->scntrs + entry->offset; in write_dev_cntr()
12456 sval = ppd->scntrs + entry->offset; in read_port_cntr()
12461 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) && in read_port_cntr()
12467 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl); in read_port_cntr()
12476 sval = ppd->scntrs + entry->offset; in write_port_cntr()
12481 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) && in write_port_cntr()
12487 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data); in write_port_cntr()
12509 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); in do_update_synth_timer()
12512 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); in do_update_synth_timer()
12517 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx); in do_update_synth_timer()
12519 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) { in do_update_synth_timer()
12526 dd->unit); in do_update_synth_timer()
12528 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx); in do_update_synth_timer()
12530 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit, in do_update_synth_timer()
12534 dd->unit); in do_update_synth_timer()
12540 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit); in do_update_synth_timer()
12543 if (entry->flags & CNTR_VL) { in do_update_synth_timer()
12551 for (i = 0; i < dd->num_pports; i++, ppd++) { in do_update_synth_timer()
12554 if (entry->flags & CNTR_VL) { in do_update_synth_timer()
12570 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, in do_update_synth_timer()
12574 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, in do_update_synth_timer()
12578 dd->unit, dd->last_tx, dd->last_rx); in do_update_synth_timer()
12581 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit); in do_update_synth_timer()
12589 queue_work(dd->update_cntr_wq, &dd->update_cntr_work); in update_synth_timer()
12590 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); in update_synth_timer()
12606 timer_setup(&dd->synth_stats_timer, update_synth_timer, 0); in init_cntrs()
12613 dd->ndevcntrs = 0; in init_cntrs()
12623 dev_cntrs[i].offset = dd->ndevcntrs; in init_cntrs()
12628 /* Add ",32" for 32-bit counters */ in init_cntrs()
12632 dd->ndevcntrs++; in init_cntrs()
12635 dev_cntrs[i].offset = dd->ndevcntrs; in init_cntrs()
12640 /* Add ",32" for 32-bit counters */ in init_cntrs()
12644 dd->ndevcntrs++; in init_cntrs()
12649 /* Add ",32" for 32-bit counters */ in init_cntrs()
12652 dev_cntrs[i].offset = dd->ndevcntrs; in init_cntrs()
12653 dd->ndevcntrs++; in init_cntrs()
12658 dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64), in init_cntrs()
12660 if (!dd->cntrs) in init_cntrs()
12663 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL); in init_cntrs()
12664 if (!dd->scntrs) in init_cntrs()
12668 dd->cntrnameslen = sz; in init_cntrs()
12669 dd->cntrnames = kmalloc(sz, GFP_KERNEL); in init_cntrs()
12670 if (!dd->cntrnames) in init_cntrs()
12674 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) { in init_cntrs()
12731 rcv_ctxts = dd->num_rcv_contexts; in init_cntrs()
12739 dd->nportcntrs = 0; in init_cntrs()
12747 port_cntrs[i].offset = dd->nportcntrs; in init_cntrs()
12752 /* Add ",32" for 32-bit counters */ in init_cntrs()
12756 dd->nportcntrs++; in init_cntrs()
12761 /* Add ",32" for 32-bit counters */ in init_cntrs()
12764 port_cntrs[i].offset = dd->nportcntrs; in init_cntrs()
12765 dd->nportcntrs++; in init_cntrs()
12770 dd->portcntrnameslen = sz; in init_cntrs()
12771 dd->portcntrnames = kmalloc(sz, GFP_KERNEL); in init_cntrs()
12772 if (!dd->portcntrnames) in init_cntrs()
12776 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) { in init_cntrs()
12812 for (i = 0; i < dd->num_pports; i++, ppd++) { in init_cntrs()
12813 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL); in init_cntrs()
12814 if (!ppd->cntrs) in init_cntrs()
12817 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL); in init_cntrs()
12818 if (!ppd->scntrs) in init_cntrs()
12826 dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d", in init_cntrs()
12827 WQ_MEM_RECLAIM, dd->unit); in init_cntrs()
12828 if (!dd->update_cntr_wq) in init_cntrs()
12831 INIT_WORK(&dd->update_cntr_work, do_update_synth_timer); in init_cntrs()
12833 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); in init_cntrs()
12837 return -ENOMEM; in init_cntrs()
12861 /* look at the HFI meta-states only */ in chip_to_opa_pstate()
12921 * update_statusp - Update userspace status flag
12935 * memory. Do it here to ensure a reliable state - this is in update_statusp()
12941 if (ppd->statusp) { in update_statusp()
12945 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | in update_statusp()
12949 *ppd->statusp |= HFI1_STATUS_IB_CONF; in update_statusp()
12952 *ppd->statusp |= HFI1_STATUS_IB_READY; in update_statusp()
12956 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n", in update_statusp()
12961 * wait_logical_linkstate - wait for an IB link state change to occur
12968 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12978 new_state = chip_to_opa_lstate(ppd->dd, in wait_logical_linkstate()
12979 read_logical_state(ppd->dd)); in wait_logical_linkstate()
12983 dd_dev_err(ppd->dd, in wait_logical_linkstate()
12986 return -ETIMEDOUT; in wait_logical_linkstate()
12996 u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state); in log_state_transition()
12998 dd_dev_info(ppd->dd, in log_state_transition()
13009 u32 read_state = read_physical_state(ppd->dd); in log_physical_state()
13014 dd_dev_err(ppd->dd, in log_physical_state()
13021 * wait_physical_linkstate - wait for an physical link state change to occur
13027 * Returns 0 if state reached, otherwise -ETIMEDOUT.
13037 read_state = read_physical_state(ppd->dd); in wait_physical_linkstate()
13041 dd_dev_err(ppd->dd, in wait_physical_linkstate()
13044 return -ETIMEDOUT; in wait_physical_linkstate()
13046 usleep_range(1950, 2050); /* sleep 2ms-ish */ in wait_physical_linkstate()
13054 * wait_phys_link_offline_quiet_substates - wait for any offline substate
13060 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
13070 read_state = read_physical_state(ppd->dd); in wait_phys_link_offline_substates()
13074 dd_dev_err(ppd->dd, in wait_phys_link_offline_substates()
13077 return -ETIMEDOUT; in wait_phys_link_offline_substates()
13079 usleep_range(1950, 2050); /* sleep 2ms-ish */ in wait_phys_link_offline_substates()
13087 * wait_phys_link_out_of_offline - wait for any out of offline state
13093 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
13103 read_state = read_physical_state(ppd->dd); in wait_phys_link_out_of_offline()
13107 dd_dev_err(ppd->dd, in wait_phys_link_out_of_offline()
13110 return -ETIMEDOUT; in wait_phys_link_out_of_offline()
13112 usleep_range(1950, 2050); /* sleep 2ms-ish */ in wait_phys_link_out_of_offline()
13128 struct hfi1_devdata *dd = sc->dd; in hfi1_init_ctxt()
13130 u8 set = (sc->type == SC_USER ? in hfi1_init_ctxt()
13133 reg = read_kctxt_csr(dd, sc->hw_context, in hfi1_init_ctxt()
13139 write_kctxt_csr(dd, sc->hw_context, in hfi1_init_ctxt()
13149 if (dd->icode != ICODE_RTL_SILICON) { in hfi1_tempsense_rd()
13153 return -EINVAL; in hfi1_tempsense_rd()
13156 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) & in hfi1_tempsense_rd()
13158 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) & in hfi1_tempsense_rd()
13160 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) & in hfi1_tempsense_rd()
13162 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) & in hfi1_tempsense_rd()
13164 /* triggers is a 3-bit value - 1 bit per trigger. */ in hfi1_tempsense_rd()
13165 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7); in hfi1_tempsense_rd()
13173 * read_mod_write() - Calculate the IRQ register index and set/clear the bits
13186 spin_lock(&dd->irq_src_lock); in read_mod_write()
13193 spin_unlock(&dd->irq_src_lock); in read_mod_write()
13197 * set_intr_bits() - Enable/disable a range (one or more) IRQ sources
13212 return -EINVAL; in set_intr_bits()
13215 return -ERANGE; in set_intr_bits()
13221 read_mod_write(dd, src - 1, bits, set); in set_intr_bits()
13259 * Remap the interrupt source from the general handler to the given MSI-X
13271 dd->gi_mask[m] &= ~((u64)1 << n); in remap_intr()
13277 /* direct the chip source to the given MSI-X interrupt */ in remap_intr()
13290 * engine. Per-engine interrupts are as follows: in remap_sdma_interrupts()
13302 * chip interrupts back to MSI-X 0.
13310 dd->gi_mask[i] = ~(u64)0; in reset_interrupts()
13312 /* all chip interrupts map to MSI-X 0 */ in reset_interrupts()
13318 * set_up_interrupts() - Initialize the IRQ resources and state
13332 /* reset general handler mask, chip MSI-X mappings */ in set_up_interrupts()
13335 /* ask for MSI-X interrupts */ in set_up_interrupts()
13350 * num_rcv_contexts - number of contexts being used
13351 * n_krcv_queues - number of kernel contexts
13352 * first_dyn_alloc_ctxt - first dynamically allocated context
13354 * freectxts - number of free user contexts
13355 * num_send_contexts - number of PIO send contexts being used
13356 * num_netdev_contexts - number of contexts reserved for netdev
13372 * - Context 0 - control context (VL15/multicast/error) in set_up_context_variables()
13373 * - Context 1 - first kernel context in set_up_context_variables()
13374 * - Context 2 - second kernel context in set_up_context_variables()
13388 * one send context is allocated for each VL{0-7} and VL15 in set_up_context_variables()
13390 if (num_kernel_contexts > (send_contexts - num_vls - 1)) { in set_up_context_variables()
13393 send_contexts - num_vls - 1, in set_up_context_variables()
13395 num_kernel_contexts = send_contexts - num_vls - 1; in set_up_context_variables()
13400 * - default to 1 user context per real (non-HT) CPU core if in set_up_context_variables()
13413 (u32)(rcv_contexts - num_kernel_contexts), in set_up_context_variables()
13416 n_usr_ctxts = rcv_contexts - num_kernel_contexts; in set_up_context_variables()
13420 hfi1_num_netdev_contexts(dd, rcv_contexts - in set_up_context_variables()
13426 * 2. FECN (num_kernel_context - 1 + num_user_contexts + in set_up_context_variables()
13431 * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts, in set_up_context_variables()
13437 rmt_count += num_kernel_contexts - 1; in set_up_context_variables()
13439 user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count; in set_up_context_variables()
13449 dd->num_rcv_contexts = in set_up_context_variables()
13451 dd->n_krcv_queues = num_kernel_contexts; in set_up_context_variables()
13452 dd->first_dyn_alloc_ctxt = num_kernel_contexts; in set_up_context_variables()
13453 dd->num_netdev_contexts = num_netdev_contexts; in set_up_context_variables()
13454 dd->num_user_contexts = n_usr_ctxts; in set_up_context_variables()
13455 dd->freectxts = n_usr_ctxts; in set_up_context_variables()
13459 (int)dd->num_rcv_contexts, in set_up_context_variables()
13460 (int)dd->n_krcv_queues, in set_up_context_variables()
13461 dd->num_netdev_contexts, in set_up_context_variables()
13462 dd->num_user_contexts); in set_up_context_variables()
13468 * consecutive entries by using write-combining of the entire in set_up_context_variables()
13475 dd->rcv_entries.group_size = RCV_INCREMENT; in set_up_context_variables()
13476 ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size; in set_up_context_variables()
13477 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts; in set_up_context_variables()
13478 dd->rcv_entries.nctxt_extra = ngroups - in set_up_context_variables()
13479 (dd->num_rcv_contexts * dd->rcv_entries.ngroups); in set_up_context_variables()
13481 dd->rcv_entries.ngroups, in set_up_context_variables()
13482 dd->rcv_entries.nctxt_extra); in set_up_context_variables()
13483 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size > in set_up_context_variables()
13485 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) / in set_up_context_variables()
13486 dd->rcv_entries.group_size; in set_up_context_variables()
13489 dd->rcv_entries.ngroups); in set_up_context_variables()
13490 dd->rcv_entries.nctxt_extra = 0; in set_up_context_variables()
13497 dd->num_send_contexts = ret; in set_up_context_variables()
13502 dd->num_send_contexts, in set_up_context_variables()
13503 dd->sc_sizes[SC_KERNEL].count, in set_up_context_variables()
13504 dd->sc_sizes[SC_ACK].count, in set_up_context_variables()
13505 dd->sc_sizes[SC_USER].count, in set_up_context_variables()
13506 dd->sc_sizes[SC_VL15].count); in set_up_context_variables()
13520 struct hfi1_devdata *dd = ppd->dd; in set_partition_keys()
13526 reg |= (ppd->pkeys[i] & in set_partition_keys()
13533 ((i - 3) * 2), reg); in set_partition_keys()
13543 * These CSRs and memories are uninitialized on reset and must be
13546 * NOTE: All user context CSRs that are not mmaped write-only
13566 * to be read, so are not pre-initialized in write_uninitialized_csrs_and_memories()
13626 /* CCE_REVISION read-only */ in reset_cce_csrs()
13627 /* CCE_REVISION2 read-only */ in reset_cce_csrs()
13628 /* CCE_CTRL - bits clear automatically */ in reset_cce_csrs()
13629 /* CCE_STATUS read-only, use CceCtrl to clear */ in reset_cce_csrs()
13635 /* CCE_ERR_STATUS read-only */ in reset_cce_csrs()
13649 /* CCE_MSIX_PBA read-only */ in reset_cce_csrs()
13656 /* CCE_INT_STATUS read-only */ in reset_cce_csrs()
13660 /* CCE_INT_BLOCKED read-only */ in reset_cce_csrs()
13677 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can in reset_misc_csrs()
13678 * only be written 128-byte chunks in reset_misc_csrs()
13684 /* MISC_STS_8051_DIGEST read-only */ in reset_misc_csrs()
13685 /* MISC_STS_SBM_DIGEST read-only */ in reset_misc_csrs()
13686 /* MISC_STS_PCIE_DIGEST read-only */ in reset_misc_csrs()
13687 /* MISC_STS_FAB_DIGEST read-only */ in reset_misc_csrs()
13688 /* MISC_ERR_STATUS read-only */ in reset_misc_csrs()
13704 /* SEND_CONTEXTS read-only */ in reset_txe_csrs()
13705 /* SEND_DMA_ENGINES read-only */ in reset_txe_csrs()
13706 /* SEND_PIO_MEM_SIZE read-only */ in reset_txe_csrs()
13707 /* SEND_DMA_MEM_SIZE read-only */ in reset_txe_csrs()
13710 /* SEND_PIO_ERR_STATUS read-only */ in reset_txe_csrs()
13714 /* SEND_DMA_ERR_STATUS read-only */ in reset_txe_csrs()
13718 /* SEND_EGRESS_ERR_STATUS read-only */ in reset_txe_csrs()
13730 /* SEND_ERR_STATUS read-only */ in reset_txe_csrs()
13733 /* SEND_ERR_FORCE read-only */ in reset_txe_csrs()
13746 /* SEND_CM_CREDIT_USED_STATUS read-only */ in reset_txe_csrs()
13755 /* SEND_CM_CREDIT_USED_VL read-only */ in reset_txe_csrs()
13756 /* SEND_CM_CREDIT_USED_VL15 read-only */ in reset_txe_csrs()
13757 /* SEND_EGRESS_CTXT_STATUS read-only */ in reset_txe_csrs()
13758 /* SEND_EGRESS_SEND_DMA_STATUS read-only */ in reset_txe_csrs()
13760 /* SEND_EGRESS_ERR_INFO read-only */ in reset_txe_csrs()
13761 /* SEND_EGRESS_ERR_SOURCE read-only */ in reset_txe_csrs()
13764 * TXE Per-Context CSRs in reset_txe_csrs()
13782 * TXE Per-SDMA CSRs in reset_txe_csrs()
13786 /* SEND_DMA_STATUS read-only */ in reset_txe_csrs()
13790 /* SEND_DMA_HEAD read-only */ in reset_txe_csrs()
13793 /* SEND_DMA_IDLE_CNT read-only */ in reset_txe_csrs()
13796 /* SEND_DMA_DESC_FETCHED_CNT read-only */ in reset_txe_csrs()
13797 /* SEND_DMA_ENG_ERR_STATUS read-only */ in reset_txe_csrs()
13831 * Give up after 1ms - maximum wait time. in init_rbufs()
13839 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n", in init_rbufs()
13843 udelay(2); /* do not busy-wait the CSR */ in init_rbufs()
13846 /* start the init - expect RcvCtrl to be 0 */ in init_rbufs()
13860 /* delay is required first time through - see above */ in init_rbufs()
13861 udelay(2); /* do not busy-wait the CSR */ in init_rbufs()
13866 /* give up after 100us - slowest possible at 33MHz is 73us */ in init_rbufs()
13886 /* RCV_STATUS read-only */ in reset_rxe_csrs()
13887 /* RCV_CONTEXTS read-only */ in reset_rxe_csrs()
13888 /* RCV_ARRAY_CNT read-only */ in reset_rxe_csrs()
13889 /* RCV_BUF_SIZE read-only */ in reset_rxe_csrs()
13894 /* this is a clear-down */ in reset_rxe_csrs()
13897 /* RCV_ERR_STATUS read-only */ in reset_rxe_csrs()
13915 * RXE Kernel and User Per-Context CSRs in reset_rxe_csrs()
13920 /* RCV_CTXT_STATUS read-only */ in reset_rxe_csrs()
13933 /* RCV_HDR_TAIL read-only */ in reset_rxe_csrs()
13935 /* RCV_EGR_INDEX_TAIL read-only */ in reset_rxe_csrs()
13937 /* RCV_EGR_OFFSET_TAIL read-only */ in reset_rxe_csrs()
13951 * SC 0-7 -> VL 0-7 (respectively)
13952 * SC 15 -> VL 15
13954 * -> VL 0
14000 *((u8 *)(dd->sc2vl) + i) = (u8)i; in init_sc2vl_tables()
14002 *((u8 *)(dd->sc2vl) + i) = 0; in init_sc2vl_tables()
14008 * depend on the chip going through a power-on reset - a driver may be loaded
14011 * Do not write any CSR values to the chip in this routine - there may be
14063 pcie_flr(dd->pcidev); in init_chip()
14075 pcie_flr(dd->pcidev); in init_chip()
14117 dd->vau = CM_VAU; in init_early_variables()
14118 dd->link_credits = CM_GLOBAL_CREDITS; in init_early_variables()
14120 dd->link_credits--; in init_early_variables()
14121 dd->vcu = cu_to_vcu(hfi1_cu); in init_early_variables()
14122 /* enough room for 8 MAD packets plus header - 17K */ in init_early_variables()
14123 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau); in init_early_variables()
14124 if (dd->vl15_init > dd->link_credits) in init_early_variables()
14125 dd->vl15_init = dd->link_credits; in init_early_variables()
14130 for (i = 0; i < dd->num_pports; i++) { in init_early_variables()
14131 struct hfi1_pportdata *ppd = &dd->pport[i]; in init_early_variables()
14150 * hfi1_get_qp_map - get qp map
14163 * init_qpmap_table - init qp map
14235 memset(rmt->map, rxcontext, sizeof(rmt->map)); in alloc_rsm_map_table()
14236 rmt->used = 0; in alloc_rsm_map_table()
14254 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]); in complete_rsm_map_table()
14274 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT | in add_rsm_rule()
14276 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT); in add_rsm_rule()
14278 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT | in add_rsm_rule()
14279 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT | in add_rsm_rule()
14280 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT | in add_rsm_rule()
14281 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT | in add_rsm_rule()
14282 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT | in add_rsm_rule()
14283 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT); in add_rsm_rule()
14285 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT | in add_rsm_rule()
14286 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT | in add_rsm_rule()
14287 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT | in add_rsm_rule()
14288 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT); in add_rsm_rule()
14310 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS || in qos_rmt_entries()
14346 * init_qos - init RX qos
14375 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES) in init_qos()
14387 idx = rmt->used + ((qpn << n) ^ i); in init_qos()
14391 reg = rmt->map[regidx]; in init_qos()
14395 rmt->map[regidx] = reg; in init_qos()
14402 rrd.offset = rmt->used; in init_qos()
14419 rmt->used += rmt_entries; in init_qos()
14422 dd->qos_shift = n + 1; in init_qos()
14425 dd->qos_shift = 1; in init_qos()
14426 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1); in init_qos()
14442 start = dd->first_dyn_alloc_ctxt; in init_fecn_handling()
14444 total_cnt = dd->num_rcv_contexts - start; in init_fecn_handling()
14447 if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) { in init_fecn_handling()
14448 dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n"); in init_fecn_handling()
14455 * in the range start...num_rcv_contexts-1 (inclusive). in init_fecn_handling()
14458 * the table - as long as the entries themselves do not wrap. in init_fecn_handling()
14462 offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start); in init_fecn_handling()
14464 for (i = start, idx = rmt->used; i < dd->num_rcv_contexts; in init_fecn_handling()
14469 reg = rmt->map[regidx]; in init_fecn_handling()
14472 rmt->map[regidx] = reg; in init_fecn_handling()
14477 * o packet type 0 - expected in init_fecn_handling()
14481 * Use index 1 to extract the 8-bit receive context from DestQP in init_fecn_handling()
14500 rmt->used += total_cnt; in init_fecn_handling()
14529 dev_dbg(&(dd)->pcidev->dev, "RMT start = %d, end %d\n", in hfi1_netdev_update_rmt()
14533 /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */ in hfi1_netdev_update_rmt()
14540 reg |= (u64)hfi1_netdev_get_ctxt(dd, ctx_id++)->ctxt << (j * 8); in hfi1_netdev_update_rmt()
14545 dev_dbg(&(dd)->pcidev->dev, in hfi1_netdev_update_rmt()
14547 regoff - RCV_RSM_MAP_TABLE, reg); in hfi1_netdev_update_rmt()
14551 if (i < (NUM_NETDEV_MAP_ENTRIES - 1)) in hfi1_netdev_update_rmt()
14577 if (atomic_fetch_inc(&dd->ipoib_rsm_usr_num) == 0) { in hfi1_init_aip_rsm()
14633 if (atomic_fetch_add_unless(&dd->ipoib_rsm_usr_num, -1, 0) == 1) in hfi1_deinit_aip_rsm()
14647 return -ENOMEM; in init_rxe()
14654 hfi1_netdev_set_free_rmt_idx(dd, rmt->used); in init_rxe()
14740 /* enable all per-context and per-SDMA engine errors */ in init_txe()
14747 assign_local_cm_au_table(dd, dd->vcu); in init_txe()
14751 * Don't set on Simulator - causes it to choke. in init_txe()
14753 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR) in init_txe()
14763 if (!rcd || !rcd->sc) in hfi1_set_ctxt_jkey()
14764 return -EINVAL; in hfi1_set_ctxt_jkey()
14766 hw_ctxt = rcd->sc->hw_context; in hfi1_set_ctxt_jkey()
14771 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY)) in hfi1_set_ctxt_jkey()
14775 * Enable send-side J_KEY integrity check, unless this is A0 h/w in hfi1_set_ctxt_jkey()
14787 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg); in hfi1_set_ctxt_jkey()
14797 if (!rcd || !rcd->sc) in hfi1_clear_ctxt_jkey()
14798 return -EINVAL; in hfi1_clear_ctxt_jkey()
14800 hw_ctxt = rcd->sc->hw_context; in hfi1_clear_ctxt_jkey()
14803 * Disable send-side J_KEY integrity check, unless this is A0 h/w. in hfi1_clear_ctxt_jkey()
14813 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0); in hfi1_clear_ctxt_jkey()
14824 if (!rcd || !rcd->sc) in hfi1_set_ctxt_pkey()
14825 return -EINVAL; in hfi1_set_ctxt_pkey()
14827 hw_ctxt = rcd->sc->hw_context; in hfi1_set_ctxt_pkey()
14844 if (!ctxt || !ctxt->sc) in hfi1_clear_ctxt_pkey()
14845 return -EINVAL; in hfi1_clear_ctxt_pkey()
14847 hw_ctxt = ctxt->sc->hw_context; in hfi1_clear_ctxt_pkey()
14869 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14883 /* pre-allocate the asic structure in case we are the first device */ in init_asic_data()
14884 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL); in init_asic_data()
14886 return -ENOMEM; in init_asic_data()
14892 dd->unit != peer->unit) in init_asic_data()
14898 dd->asic_data = peer->asic_data; in init_asic_data()
14901 dd->asic_data = asic_data; in init_asic_data()
14902 mutex_init(&dd->asic_data->asic_resource_mutex); in init_asic_data()
14904 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */ in init_asic_data()
14907 /* first one through - set up i2c devices */ in init_asic_data()
14909 ret = set_up_i2c(dd, dd->asic_data); in init_asic_data()
14915 * Set dd->boardname. Use a generic name if a name is not returned from
14918 * Return 0 on success, -ENOMEM if space could not be allocated.
14924 "Cornelis Omni-Path Host Fabric Interface Adapter 100 Series"; in obtain_boardname()
14929 (void **)&dd->boardname); in obtain_boardname()
14933 dd->boardname = kstrdup(generic, GFP_KERNEL); in obtain_boardname()
14934 if (!dd->boardname) in obtain_boardname()
14935 return -ENOMEM; in obtain_boardname()
14946 * Return 0 on success, -EINVAL on failure.
14981 return -EINVAL; in check_int_registers()
14985 * hfi1_init_dd() - Initialize most of the dd structure.
14989 * chip-specific function pointers for later use.
14993 struct pci_dev *pdev = dd->pcidev; in hfi1_init_dd()
15003 struct pci_dev *parent = pdev->bus->self; in hfi1_init_dd()
15006 ppd = dd->pport; in hfi1_init_dd()
15007 for (i = 0; i < dd->num_pports; i++, ppd++) { in hfi1_init_dd()
15012 ppd->link_width_supported = in hfi1_init_dd()
15015 ppd->link_width_downgrade_supported = in hfi1_init_dd()
15016 ppd->link_width_supported; in hfi1_init_dd()
15018 ppd->link_width_enabled = OPA_LINK_WIDTH_4X; in hfi1_init_dd()
15019 ppd->link_width_downgrade_enabled = in hfi1_init_dd()
15020 ppd->link_width_downgrade_supported; in hfi1_init_dd()
15030 ppd->vls_supported = num_vls; in hfi1_init_dd()
15031 ppd->vls_operational = ppd->vls_supported; in hfi1_init_dd()
15034 dd->vld[vl].mtu = hfi1_max_mtu; in hfi1_init_dd()
15035 dd->vld[15].mtu = MAX_MAD_PACKET; in hfi1_init_dd()
15040 ppd->overrun_threshold = 0x4; in hfi1_init_dd()
15041 ppd->phy_error_threshold = 0xf; in hfi1_init_dd()
15042 ppd->port_crc_mode_enabled = link_crc_mask; in hfi1_init_dd()
15044 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8; in hfi1_init_dd()
15046 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4; in hfi1_init_dd()
15048 ppd->host_link_state = HLS_DN_OFFLINE; in hfi1_init_dd()
15066 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT) in hfi1_init_dd()
15068 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT) in hfi1_init_dd()
15083 * obtain the hardware ID - NOT related to unit, which is a in hfi1_init_dd()
15087 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT) in hfi1_init_dd()
15090 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT; in hfi1_init_dd()
15091 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT; in hfi1_init_dd()
15093 dd->icode < ARRAY_SIZE(inames) ? in hfi1_init_dd()
15094 inames[dd->icode] : "unknown", (int)dd->irev); in hfi1_init_dd()
15097 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G; in hfi1_init_dd()
15099 dd->pport->link_speed_enabled = dd->pport->link_speed_supported; in hfi1_init_dd()
15101 dd->pport->link_speed_active = OPA_LINK_SPEED_25G; in hfi1_init_dd()
15104 ppd = dd->pport; in hfi1_init_dd()
15105 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) { in hfi1_init_dd()
15106 ppd->link_width_supported = in hfi1_init_dd()
15107 ppd->link_width_enabled = in hfi1_init_dd()
15108 ppd->link_width_downgrade_supported = in hfi1_init_dd()
15109 ppd->link_width_downgrade_enabled = in hfi1_init_dd()
15117 ppd->vls_supported = sdma_engines; in hfi1_init_dd()
15118 ppd->vls_operational = ppd->vls_supported; in hfi1_init_dd()
15124 * non-zero, then the calculated field will be at least 1. in hfi1_init_dd()
15126 * Must be after icode is set up - the cclock rate depends in hfi1_init_dd()
15129 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64; in hfi1_init_dd()
15130 if (dd->rcv_intr_timeout_csr > in hfi1_init_dd()
15132 dd->rcv_intr_timeout_csr = in hfi1_init_dd()
15134 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout) in hfi1_init_dd()
15135 dd->rcv_intr_timeout_csr = 1; in hfi1_init_dd()
15175 * - init_chip() - the chip will not initiate any PCIe transactions in hfi1_init_dd()
15176 * - pcie_speeds() - reads the current link speed in hfi1_init_dd()
15177 * - hfi1_firmware_init() - the needed firmware is ready to be in hfi1_init_dd()
15199 snprintf(dd->boardversion, BOARD_VERS_MAX, in hfi1_init_dd()
15202 (u32)dd->majrev, in hfi1_init_dd()
15203 (u32)dd->minrev, in hfi1_init_dd()
15204 (dd->revision >> CCE_REVISION_SW_SHIFT) in hfi1_init_dd()
15223 /* set initial non-RXE, non-TXE CSRs */ in hfi1_init_dd()
15252 for (i = 0; i < dd->num_pports; ++i) { in hfi1_init_dd()
15267 /* set up LCB access - must be after set_up_interrupts() */ in hfi1_init_dd()
15275 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n", in hfi1_init_dd()
15276 (dd->base_guid & 0xFFFFFF) | in hfi1_init_dd()
15277 ((dd->base_guid >> 11) & 0xF000000)); in hfi1_init_dd()
15279 dd->oui1 = dd->base_guid >> 56 & 0xFF; in hfi1_init_dd()
15280 dd->oui2 = dd->base_guid >> 48 & 0xFF; in hfi1_init_dd()
15281 dd->oui3 = dd->base_guid >> 40 & 0xFF; in hfi1_init_dd()
15297 init_completion(&dd->user_comp); in hfi1_init_dd()
15300 refcount_set(&dd->user_refcount, 1); in hfi1_init_dd()
15324 u32 current_egress_rate = ppd->current_egress_rate; in delay_cycles()
15327 if (desired_egress_rate == -1) in delay_cycles()
15333 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) - in delay_cycles()
15340 * create_pbc - build a pbc for transmission
15342 * @flags: special case flags or-ed in built pbc
15349 * NOTE: The PBC created will not insert any HCRC - all callers but one are
15394 if (dd->icode != ICODE_RTL_SILICON || in thermal_init()
15423 /* Step 3: Write clock divider value (100MHz -> 2MHz) */ in thermal_init()
15438 /* Step 5: De-assert block reset and start conversion */ in thermal_init()
15463 struct hfi1_pportdata *ppd = &dd->pport[0]; in handle_temp_err()
15471 dd->flags |= HFI1_FORCED_FREEZE; in handle_temp_err()
15484 ppd->driver_link_ready = 0; in handle_temp_err()
15485 ppd->link_enabled = 0; in handle_temp_err()