Lines Matching +full:quirk +full:- +full:frame +full:- +full:length +full:- +full:adjustment
2 * Copyright(c) 2015 - 2020 Intel Corporation.
24 * - Redistributions of source code must retain the above copyright
26 * - Redistributions in binary form must reproduce the above copyright
30 * - Neither the name of Intel Corporation nor the names of its
73 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
119 #define SEC_SC_HALTED 0x4 /* per-context only */
120 #define SEC_SPC_FREEZE 0x8 /* per-HFI only */
128 * 0 - User Fecn Handling
129 * 1 - Vnic
130 * 2 - AIP
131 * 3 - Verbs
142 #define emulator_rev(dd) ((dd)->irev >> 8)
144 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
145 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
154 /* LRH.BTH: QW 0, OFFSET 48 - for match */
163 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
171 /* SC[n..0] QW 0, OFFSET 60 - for select */
197 /* L2_TYPE: QW 0, OFFSET 61 - for match */
205 /* L4_TYPE QW 1, OFFSET 0 - for match */
213 /* 16B VESWID - for select */
215 /* 16B ENTROPY - for select */
277 /* all CceStatus sub-block freeze bits */
282 /* all CceStatus sub-block TXE pause bits */
286 /* all CceStatus sub-block RXE pause bits */
378 /*41-63 reserved*/
495 /*30-31 reserved*/
508 /*36-63 reserved*/
555 /*04-63 reserved*/
587 /* 9-10 reserved */
749 /* 5-63 reserved*/
1008 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
1111 * in the top-level CceIntStatus.
1121 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START)
1122 #define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START)
1123 #define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START)
1160 * SDMA error interrupt entry - refers to another register containing more
1191 /* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1325 * hfi_addr_from_offset - return addr for readq/writeq
1326 * @dd - the dd device
1327 * @offset - the offset of the CSR within bar0
1336 if (offset >= dd->base2_start) in hfi1_addr_from_offset()
1337 return dd->kregbase2 + (offset - dd->base2_start); in hfi1_addr_from_offset()
1338 return dd->kregbase1 + offset; in hfi1_addr_from_offset()
1342 * read_csr - read CSR at the indicated offset
1343 * @dd - the dd device
1344 * @offset - the offset of the CSR within bar0
1351 if (dd->flags & HFI1_PRESENT) in read_csr()
1353 return -1; in read_csr()
1357 * write_csr - write CSR at the indicated offset
1358 * @dd - the dd device
1359 * @offset - the offset of the CSR within bar0
1360 * @value - value to write
1364 if (dd->flags & HFI1_PRESENT) { in write_csr()
1368 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start)) in write_csr()
1375 * get_csr_addr - return te iomem address for offset
1376 * @dd - the dd device
1377 * @offset - the offset of the CSR within bar0
1386 if (dd->flags & HFI1_PRESENT) in get_csr_addr()
1415 u64 csr = entry->csr; in dev_access_u32_csr()
1417 if (entry->flags & CNTR_SDMA) { in dev_access_u32_csr()
1433 if (dd->per_sdma && idx < dd->num_sdma) in access_sde_err_cnt()
1434 return dd->per_sdma[idx].err_cnt; in access_sde_err_cnt()
1443 if (dd->per_sdma && idx < dd->num_sdma) in access_sde_int_cnt()
1444 return dd->per_sdma[idx].sdma_int_cnt; in access_sde_int_cnt()
1453 if (dd->per_sdma && idx < dd->num_sdma) in access_sde_idle_int_cnt()
1454 return dd->per_sdma[idx].idle_int_cnt; in access_sde_idle_int_cnt()
1464 if (dd->per_sdma && idx < dd->num_sdma) in access_sde_progress_int_cnt()
1465 return dd->per_sdma[idx].progress_int_cnt; in access_sde_progress_int_cnt()
1475 u64 csr = entry->csr; in dev_access_u64_csr()
1477 if (entry->flags & CNTR_VL) { in dev_access_u64_csr()
1494 u32 csr = entry->csr; in dc_access_lcb_cntr()
1521 return read_write_csr(ppd->dd, entry->csr, mode, data); in port_access_u32_csr()
1529 u64 csr = entry->csr; in port_access_u64_csr()
1531 if (entry->flags & CNTR_VL) { in port_access_u64_csr()
1539 val = read_write_csr(ppd->dd, csr, mode, data); in port_access_u64_csr()
1571 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data); in access_sw_link_dn_cnt()
1581 return read_write_sw(ppd->dd, &ppd->link_up, mode, data); in access_sw_link_up_cnt()
1592 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data); in access_sw_unknown_frame_cnt()
1603 counter = &ppd->port_xmit_discards; in access_sw_xmit_discards()
1605 counter = &ppd->port_xmit_discards_vl[vl]; in access_sw_xmit_discards()
1609 return read_write_sw(ppd->dd, counter, mode, data); in access_sw_xmit_discards()
1621 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors, in access_xmit_constraint_errs()
1633 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors, in access_rcv_constraint_errs()
1657 ret = get_all_cpu_total(cntr) - *z_val; in read_write_cpu()
1677 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl, in access_sw_cpu_intr()
1686 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl, in access_sw_cpu_rcv_limit()
1695 return dd->verbs_dev.n_piowait; in access_sw_pio_wait()
1703 return dd->verbs_dev.n_piodrain; in access_sw_pio_drain()
1711 return dd->ctx0_seq_drop; in access_sw_ctx0_seq_drop()
1719 return dd->verbs_dev.n_txwait; in access_sw_vtx_wait()
1727 return dd->verbs_dev.n_kmem_wait; in access_sw_kmem_wait()
1735 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl, in access_sw_send_schedule()
1746 return dd->misc_err_status_cnt[12]; in access_misc_pll_lock_fail_err_cnt()
1755 return dd->misc_err_status_cnt[11]; in access_misc_mbist_fail_err_cnt()
1764 return dd->misc_err_status_cnt[10]; in access_misc_invalid_eep_cmd_err_cnt()
1773 return dd->misc_err_status_cnt[9]; in access_misc_efuse_done_parity_err_cnt()
1782 return dd->misc_err_status_cnt[8]; in access_misc_efuse_write_err_cnt()
1791 return dd->misc_err_status_cnt[7]; in access_misc_efuse_read_bad_addr_err_cnt()
1800 return dd->misc_err_status_cnt[6]; in access_misc_efuse_csr_parity_err_cnt()
1809 return dd->misc_err_status_cnt[5]; in access_misc_fw_auth_failed_err_cnt()
1818 return dd->misc_err_status_cnt[4]; in access_misc_key_mismatch_err_cnt()
1827 return dd->misc_err_status_cnt[3]; in access_misc_sbus_write_failed_err_cnt()
1836 return dd->misc_err_status_cnt[2]; in access_misc_csr_write_bad_addr_err_cnt()
1845 return dd->misc_err_status_cnt[1]; in access_misc_csr_read_bad_addr_err_cnt()
1854 return dd->misc_err_status_cnt[0]; in access_misc_csr_parity_err_cnt()
1867 return dd->sw_cce_err_status_aggregate; in access_sw_cce_err_status_aggregated_cnt()
1880 return dd->cce_err_status_cnt[40]; in access_cce_msix_csr_parity_err_cnt()
1889 return dd->cce_err_status_cnt[39]; in access_cce_int_map_unc_err_cnt()
1898 return dd->cce_err_status_cnt[38]; in access_cce_int_map_cor_err_cnt()
1907 return dd->cce_err_status_cnt[37]; in access_cce_msix_table_unc_err_cnt()
1916 return dd->cce_err_status_cnt[36]; in access_cce_msix_table_cor_err_cnt()
1925 return dd->cce_err_status_cnt[35]; in access_cce_rxdma_conv_fifo_parity_err_cnt()
1934 return dd->cce_err_status_cnt[34]; in access_cce_rcpl_async_fifo_parity_err_cnt()
1943 return dd->cce_err_status_cnt[33]; in access_cce_seg_write_bad_addr_err_cnt()
1952 return dd->cce_err_status_cnt[32]; in access_cce_seg_read_bad_addr_err_cnt()
1960 return dd->cce_err_status_cnt[31]; in access_la_triggered_cnt()
1969 return dd->cce_err_status_cnt[30]; in access_cce_trgt_cpl_timeout_err_cnt()
1978 return dd->cce_err_status_cnt[29]; in access_pcic_receive_parity_err_cnt()
1987 return dd->cce_err_status_cnt[28]; in access_pcic_transmit_back_parity_err_cnt()
1996 return dd->cce_err_status_cnt[27]; in access_pcic_transmit_front_parity_err_cnt()
2005 return dd->cce_err_status_cnt[26]; in access_pcic_cpl_dat_q_unc_err_cnt()
2014 return dd->cce_err_status_cnt[25]; in access_pcic_cpl_hd_q_unc_err_cnt()
2023 return dd->cce_err_status_cnt[24]; in access_pcic_post_dat_q_unc_err_cnt()
2032 return dd->cce_err_status_cnt[23]; in access_pcic_post_hd_q_unc_err_cnt()
2041 return dd->cce_err_status_cnt[22]; in access_pcic_retry_sot_mem_unc_err_cnt()
2050 return dd->cce_err_status_cnt[21]; in access_pcic_retry_mem_unc_err()
2059 return dd->cce_err_status_cnt[20]; in access_pcic_n_post_dat_q_parity_err_cnt()
2068 return dd->cce_err_status_cnt[19]; in access_pcic_n_post_h_q_parity_err_cnt()
2077 return dd->cce_err_status_cnt[18]; in access_pcic_cpl_dat_q_cor_err_cnt()
2086 return dd->cce_err_status_cnt[17]; in access_pcic_cpl_hd_q_cor_err_cnt()
2095 return dd->cce_err_status_cnt[16]; in access_pcic_post_dat_q_cor_err_cnt()
2104 return dd->cce_err_status_cnt[15]; in access_pcic_post_hd_q_cor_err_cnt()
2113 return dd->cce_err_status_cnt[14]; in access_pcic_retry_sot_mem_cor_err_cnt()
2122 return dd->cce_err_status_cnt[13]; in access_pcic_retry_mem_cor_err_cnt()
2131 return dd->cce_err_status_cnt[12]; in access_cce_cli1_async_fifo_dbg_parity_err_cnt()
2140 return dd->cce_err_status_cnt[11]; in access_cce_cli1_async_fifo_rxdma_parity_err_cnt()
2149 return dd->cce_err_status_cnt[10]; in access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt()
2158 return dd->cce_err_status_cnt[9]; in access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt()
2167 return dd->cce_err_status_cnt[8]; in access_cce_cli2_async_fifo_parity_err_cnt()
2176 return dd->cce_err_status_cnt[7]; in access_cce_csr_cfg_bus_parity_err_cnt()
2185 return dd->cce_err_status_cnt[6]; in access_cce_cli0_async_fifo_parity_err_cnt()
2194 return dd->cce_err_status_cnt[5]; in access_cce_rspd_data_parity_err_cnt()
2203 return dd->cce_err_status_cnt[4]; in access_cce_trgt_access_err_cnt()
2212 return dd->cce_err_status_cnt[3]; in access_cce_trgt_async_fifo_parity_err_cnt()
2221 return dd->cce_err_status_cnt[2]; in access_cce_csr_write_bad_addr_err_cnt()
2230 return dd->cce_err_status_cnt[1]; in access_cce_csr_read_bad_addr_err_cnt()
2239 return dd->cce_err_status_cnt[0]; in access_ccs_csr_parity_err_cnt()
2252 return dd->rcv_err_status_cnt[63]; in access_rx_csr_parity_err_cnt()
2261 return dd->rcv_err_status_cnt[62]; in access_rx_csr_write_bad_addr_err_cnt()
2270 return dd->rcv_err_status_cnt[61]; in access_rx_csr_read_bad_addr_err_cnt()
2279 return dd->rcv_err_status_cnt[60]; in access_rx_dma_csr_unc_err_cnt()
2288 return dd->rcv_err_status_cnt[59]; in access_rx_dma_dq_fsm_encoding_err_cnt()
2297 return dd->rcv_err_status_cnt[58]; in access_rx_dma_eq_fsm_encoding_err_cnt()
2306 return dd->rcv_err_status_cnt[57]; in access_rx_dma_csr_parity_err_cnt()
2315 return dd->rcv_err_status_cnt[56]; in access_rx_rbuf_data_cor_err_cnt()
2324 return dd->rcv_err_status_cnt[55]; in access_rx_rbuf_data_unc_err_cnt()
2333 return dd->rcv_err_status_cnt[54]; in access_rx_dma_data_fifo_rd_cor_err_cnt()
2342 return dd->rcv_err_status_cnt[53]; in access_rx_dma_data_fifo_rd_unc_err_cnt()
2351 return dd->rcv_err_status_cnt[52]; in access_rx_dma_hdr_fifo_rd_cor_err_cnt()
2360 return dd->rcv_err_status_cnt[51]; in access_rx_dma_hdr_fifo_rd_unc_err_cnt()
2369 return dd->rcv_err_status_cnt[50]; in access_rx_rbuf_desc_part2_cor_err_cnt()
2378 return dd->rcv_err_status_cnt[49]; in access_rx_rbuf_desc_part2_unc_err_cnt()
2387 return dd->rcv_err_status_cnt[48]; in access_rx_rbuf_desc_part1_cor_err_cnt()
2396 return dd->rcv_err_status_cnt[47]; in access_rx_rbuf_desc_part1_unc_err_cnt()
2405 return dd->rcv_err_status_cnt[46]; in access_rx_hq_intr_fsm_err_cnt()
2414 return dd->rcv_err_status_cnt[45]; in access_rx_hq_intr_csr_parity_err_cnt()
2423 return dd->rcv_err_status_cnt[44]; in access_rx_lookup_csr_parity_err_cnt()
2432 return dd->rcv_err_status_cnt[43]; in access_rx_lookup_rcv_array_cor_err_cnt()
2441 return dd->rcv_err_status_cnt[42]; in access_rx_lookup_rcv_array_unc_err_cnt()
2450 return dd->rcv_err_status_cnt[41]; in access_rx_lookup_des_part2_parity_err_cnt()
2459 return dd->rcv_err_status_cnt[40]; in access_rx_lookup_des_part1_unc_cor_err_cnt()
2468 return dd->rcv_err_status_cnt[39]; in access_rx_lookup_des_part1_unc_err_cnt()
2477 return dd->rcv_err_status_cnt[38]; in access_rx_rbuf_next_free_buf_cor_err_cnt()
2486 return dd->rcv_err_status_cnt[37]; in access_rx_rbuf_next_free_buf_unc_err_cnt()
2495 return dd->rcv_err_status_cnt[36]; in access_rbuf_fl_init_wr_addr_parity_err_cnt()
2504 return dd->rcv_err_status_cnt[35]; in access_rx_rbuf_fl_initdone_parity_err_cnt()
2513 return dd->rcv_err_status_cnt[34]; in access_rx_rbuf_fl_write_addr_parity_err_cnt()
2522 return dd->rcv_err_status_cnt[33]; in access_rx_rbuf_fl_rd_addr_parity_err_cnt()
2531 return dd->rcv_err_status_cnt[32]; in access_rx_rbuf_empty_err_cnt()
2540 return dd->rcv_err_status_cnt[31]; in access_rx_rbuf_full_err_cnt()
2549 return dd->rcv_err_status_cnt[30]; in access_rbuf_bad_lookup_err_cnt()
2558 return dd->rcv_err_status_cnt[29]; in access_rbuf_ctx_id_parity_err_cnt()
2567 return dd->rcv_err_status_cnt[28]; in access_rbuf_csr_qeopdw_parity_err_cnt()
2576 return dd->rcv_err_status_cnt[27]; in access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt()
2585 return dd->rcv_err_status_cnt[26]; in access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt()
2594 return dd->rcv_err_status_cnt[25]; in access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt()
2603 return dd->rcv_err_status_cnt[24]; in access_rx_rbuf_csr_q_vld_bit_parity_err_cnt()
2612 return dd->rcv_err_status_cnt[23]; in access_rx_rbuf_csr_q_next_buf_parity_err_cnt()
2621 return dd->rcv_err_status_cnt[22]; in access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt()
2630 return dd->rcv_err_status_cnt[21]; in access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt()
2639 return dd->rcv_err_status_cnt[20]; in access_rx_rbuf_block_list_read_cor_err_cnt()
2648 return dd->rcv_err_status_cnt[19]; in access_rx_rbuf_block_list_read_unc_err_cnt()
2657 return dd->rcv_err_status_cnt[18]; in access_rx_rbuf_lookup_des_cor_err_cnt()
2666 return dd->rcv_err_status_cnt[17]; in access_rx_rbuf_lookup_des_unc_err_cnt()
2675 return dd->rcv_err_status_cnt[16]; in access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt()
2684 return dd->rcv_err_status_cnt[15]; in access_rx_rbuf_lookup_des_reg_unc_err_cnt()
2693 return dd->rcv_err_status_cnt[14]; in access_rx_rbuf_free_list_cor_err_cnt()
2702 return dd->rcv_err_status_cnt[13]; in access_rx_rbuf_free_list_unc_err_cnt()
2711 return dd->rcv_err_status_cnt[12]; in access_rx_rcv_fsm_encoding_err_cnt()
2720 return dd->rcv_err_status_cnt[11]; in access_rx_dma_flag_cor_err_cnt()
2729 return dd->rcv_err_status_cnt[10]; in access_rx_dma_flag_unc_err_cnt()
2738 return dd->rcv_err_status_cnt[9]; in access_rx_dc_sop_eop_parity_err_cnt()
2747 return dd->rcv_err_status_cnt[8]; in access_rx_rcv_csr_parity_err_cnt()
2756 return dd->rcv_err_status_cnt[7]; in access_rx_rcv_qp_map_table_cor_err_cnt()
2765 return dd->rcv_err_status_cnt[6]; in access_rx_rcv_qp_map_table_unc_err_cnt()
2774 return dd->rcv_err_status_cnt[5]; in access_rx_rcv_data_cor_err_cnt()
2783 return dd->rcv_err_status_cnt[4]; in access_rx_rcv_data_unc_err_cnt()
2792 return dd->rcv_err_status_cnt[3]; in access_rx_rcv_hdr_cor_err_cnt()
2801 return dd->rcv_err_status_cnt[2]; in access_rx_rcv_hdr_unc_err_cnt()
2810 return dd->rcv_err_status_cnt[1]; in access_rx_dc_intf_parity_err_cnt()
2819 return dd->rcv_err_status_cnt[0]; in access_rx_dma_csr_cor_err_cnt()
2832 return dd->send_pio_err_status_cnt[35]; in access_pio_pec_sop_head_parity_err_cnt()
2841 return dd->send_pio_err_status_cnt[34]; in access_pio_pcc_sop_head_parity_err_cnt()
2850 return dd->send_pio_err_status_cnt[33]; in access_pio_last_returned_cnt_parity_err_cnt()
2859 return dd->send_pio_err_status_cnt[32]; in access_pio_current_free_cnt_parity_err_cnt()
2868 return dd->send_pio_err_status_cnt[31]; in access_pio_reserved_31_err_cnt()
2877 return dd->send_pio_err_status_cnt[30]; in access_pio_reserved_30_err_cnt()
2886 return dd->send_pio_err_status_cnt[29]; in access_pio_ppmc_sop_len_err_cnt()
2895 return dd->send_pio_err_status_cnt[28]; in access_pio_ppmc_bqc_mem_parity_err_cnt()
2904 return dd->send_pio_err_status_cnt[27]; in access_pio_vl_fifo_parity_err_cnt()
2913 return dd->send_pio_err_status_cnt[26]; in access_pio_vlf_sop_parity_err_cnt()
2922 return dd->send_pio_err_status_cnt[25]; in access_pio_vlf_v1_len_parity_err_cnt()
2931 return dd->send_pio_err_status_cnt[24]; in access_pio_block_qw_count_parity_err_cnt()
2940 return dd->send_pio_err_status_cnt[23]; in access_pio_write_qw_valid_parity_err_cnt()
2949 return dd->send_pio_err_status_cnt[22]; in access_pio_state_machine_err_cnt()
2958 return dd->send_pio_err_status_cnt[21]; in access_pio_write_data_parity_err_cnt()
2967 return dd->send_pio_err_status_cnt[20]; in access_pio_host_addr_mem_cor_err_cnt()
2976 return dd->send_pio_err_status_cnt[19]; in access_pio_host_addr_mem_unc_err_cnt()
2985 return dd->send_pio_err_status_cnt[18]; in access_pio_pkt_evict_sm_or_arb_sm_err_cnt()
2994 return dd->send_pio_err_status_cnt[17]; in access_pio_init_sm_in_err_cnt()
3003 return dd->send_pio_err_status_cnt[16]; in access_pio_ppmc_pbl_fifo_err_cnt()
3012 return dd->send_pio_err_status_cnt[15]; in access_pio_credit_ret_fifo_parity_err_cnt()
3021 return dd->send_pio_err_status_cnt[14]; in access_pio_v1_len_mem_bank1_cor_err_cnt()
3030 return dd->send_pio_err_status_cnt[13]; in access_pio_v1_len_mem_bank0_cor_err_cnt()
3039 return dd->send_pio_err_status_cnt[12]; in access_pio_v1_len_mem_bank1_unc_err_cnt()
3048 return dd->send_pio_err_status_cnt[11]; in access_pio_v1_len_mem_bank0_unc_err_cnt()
3057 return dd->send_pio_err_status_cnt[10]; in access_pio_sm_pkt_reset_parity_err_cnt()
3066 return dd->send_pio_err_status_cnt[9]; in access_pio_pkt_evict_fifo_parity_err_cnt()
3075 return dd->send_pio_err_status_cnt[8]; in access_pio_sbrdctrl_crrel_fifo_parity_err_cnt()
3084 return dd->send_pio_err_status_cnt[7]; in access_pio_sbrdctl_crrel_parity_err_cnt()
3093 return dd->send_pio_err_status_cnt[6]; in access_pio_pec_fifo_parity_err_cnt()
3102 return dd->send_pio_err_status_cnt[5]; in access_pio_pcc_fifo_parity_err_cnt()
3111 return dd->send_pio_err_status_cnt[4]; in access_pio_sb_mem_fifo1_err_cnt()
3120 return dd->send_pio_err_status_cnt[3]; in access_pio_sb_mem_fifo0_err_cnt()
3129 return dd->send_pio_err_status_cnt[2]; in access_pio_csr_parity_err_cnt()
3138 return dd->send_pio_err_status_cnt[1]; in access_pio_write_addr_parity_err_cnt()
3147 return dd->send_pio_err_status_cnt[0]; in access_pio_write_bad_ctxt_err_cnt()
3160 return dd->send_dma_err_status_cnt[3]; in access_sdma_pcie_req_tracking_cor_err_cnt()
3169 return dd->send_dma_err_status_cnt[2]; in access_sdma_pcie_req_tracking_unc_err_cnt()
3178 return dd->send_dma_err_status_cnt[1]; in access_sdma_csr_parity_err_cnt()
3187 return dd->send_dma_err_status_cnt[0]; in access_sdma_rpy_tag_err_cnt()
3200 return dd->send_egress_err_status_cnt[63]; in access_tx_read_pio_memory_csr_unc_err_cnt()
3209 return dd->send_egress_err_status_cnt[62]; in access_tx_read_sdma_memory_csr_err_cnt()
3218 return dd->send_egress_err_status_cnt[61]; in access_tx_egress_fifo_cor_err_cnt()
3227 return dd->send_egress_err_status_cnt[60]; in access_tx_read_pio_memory_cor_err_cnt()
3236 return dd->send_egress_err_status_cnt[59]; in access_tx_read_sdma_memory_cor_err_cnt()
3245 return dd->send_egress_err_status_cnt[58]; in access_tx_sb_hdr_cor_err_cnt()
3254 return dd->send_egress_err_status_cnt[57]; in access_tx_credit_overrun_err_cnt()
3263 return dd->send_egress_err_status_cnt[56]; in access_tx_launch_fifo8_cor_err_cnt()
3272 return dd->send_egress_err_status_cnt[55]; in access_tx_launch_fifo7_cor_err_cnt()
3281 return dd->send_egress_err_status_cnt[54]; in access_tx_launch_fifo6_cor_err_cnt()
3290 return dd->send_egress_err_status_cnt[53]; in access_tx_launch_fifo5_cor_err_cnt()
3299 return dd->send_egress_err_status_cnt[52]; in access_tx_launch_fifo4_cor_err_cnt()
3308 return dd->send_egress_err_status_cnt[51]; in access_tx_launch_fifo3_cor_err_cnt()
3317 return dd->send_egress_err_status_cnt[50]; in access_tx_launch_fifo2_cor_err_cnt()
3326 return dd->send_egress_err_status_cnt[49]; in access_tx_launch_fifo1_cor_err_cnt()
3335 return dd->send_egress_err_status_cnt[48]; in access_tx_launch_fifo0_cor_err_cnt()
3344 return dd->send_egress_err_status_cnt[47]; in access_tx_credit_return_vl_err_cnt()
3353 return dd->send_egress_err_status_cnt[46]; in access_tx_hcrc_insertion_err_cnt()
3362 return dd->send_egress_err_status_cnt[45]; in access_tx_egress_fifo_unc_err_cnt()
3371 return dd->send_egress_err_status_cnt[44]; in access_tx_read_pio_memory_unc_err_cnt()
3380 return dd->send_egress_err_status_cnt[43]; in access_tx_read_sdma_memory_unc_err_cnt()
3389 return dd->send_egress_err_status_cnt[42]; in access_tx_sb_hdr_unc_err_cnt()
3398 return dd->send_egress_err_status_cnt[41]; in access_tx_credit_return_partiy_err_cnt()
3407 return dd->send_egress_err_status_cnt[40]; in access_tx_launch_fifo8_unc_or_parity_err_cnt()
3416 return dd->send_egress_err_status_cnt[39]; in access_tx_launch_fifo7_unc_or_parity_err_cnt()
3425 return dd->send_egress_err_status_cnt[38]; in access_tx_launch_fifo6_unc_or_parity_err_cnt()
3434 return dd->send_egress_err_status_cnt[37]; in access_tx_launch_fifo5_unc_or_parity_err_cnt()
3443 return dd->send_egress_err_status_cnt[36]; in access_tx_launch_fifo4_unc_or_parity_err_cnt()
3452 return dd->send_egress_err_status_cnt[35]; in access_tx_launch_fifo3_unc_or_parity_err_cnt()
3461 return dd->send_egress_err_status_cnt[34]; in access_tx_launch_fifo2_unc_or_parity_err_cnt()
3470 return dd->send_egress_err_status_cnt[33]; in access_tx_launch_fifo1_unc_or_parity_err_cnt()
3479 return dd->send_egress_err_status_cnt[32]; in access_tx_launch_fifo0_unc_or_parity_err_cnt()
3488 return dd->send_egress_err_status_cnt[31]; in access_tx_sdma15_disallowed_packet_err_cnt()
3497 return dd->send_egress_err_status_cnt[30]; in access_tx_sdma14_disallowed_packet_err_cnt()
3506 return dd->send_egress_err_status_cnt[29]; in access_tx_sdma13_disallowed_packet_err_cnt()
3515 return dd->send_egress_err_status_cnt[28]; in access_tx_sdma12_disallowed_packet_err_cnt()
3524 return dd->send_egress_err_status_cnt[27]; in access_tx_sdma11_disallowed_packet_err_cnt()
3533 return dd->send_egress_err_status_cnt[26]; in access_tx_sdma10_disallowed_packet_err_cnt()
3542 return dd->send_egress_err_status_cnt[25]; in access_tx_sdma9_disallowed_packet_err_cnt()
3551 return dd->send_egress_err_status_cnt[24]; in access_tx_sdma8_disallowed_packet_err_cnt()
3560 return dd->send_egress_err_status_cnt[23]; in access_tx_sdma7_disallowed_packet_err_cnt()
3569 return dd->send_egress_err_status_cnt[22]; in access_tx_sdma6_disallowed_packet_err_cnt()
3578 return dd->send_egress_err_status_cnt[21]; in access_tx_sdma5_disallowed_packet_err_cnt()
3587 return dd->send_egress_err_status_cnt[20]; in access_tx_sdma4_disallowed_packet_err_cnt()
3596 return dd->send_egress_err_status_cnt[19]; in access_tx_sdma3_disallowed_packet_err_cnt()
3605 return dd->send_egress_err_status_cnt[18]; in access_tx_sdma2_disallowed_packet_err_cnt()
3614 return dd->send_egress_err_status_cnt[17]; in access_tx_sdma1_disallowed_packet_err_cnt()
3623 return dd->send_egress_err_status_cnt[16]; in access_tx_sdma0_disallowed_packet_err_cnt()
3632 return dd->send_egress_err_status_cnt[15]; in access_tx_config_parity_err_cnt()
3641 return dd->send_egress_err_status_cnt[14]; in access_tx_sbrd_ctl_csr_parity_err_cnt()
3650 return dd->send_egress_err_status_cnt[13]; in access_tx_launch_csr_parity_err_cnt()
3659 return dd->send_egress_err_status_cnt[12]; in access_tx_illegal_vl_err_cnt()
3668 return dd->send_egress_err_status_cnt[11]; in access_tx_sbrd_ctl_state_machine_parity_err_cnt()
3677 return dd->send_egress_err_status_cnt[10]; in access_egress_reserved_10_err_cnt()
3686 return dd->send_egress_err_status_cnt[9]; in access_egress_reserved_9_err_cnt()
3695 return dd->send_egress_err_status_cnt[8]; in access_tx_sdma_launch_intf_parity_err_cnt()
3704 return dd->send_egress_err_status_cnt[7]; in access_tx_pio_launch_intf_parity_err_cnt()
3713 return dd->send_egress_err_status_cnt[6]; in access_egress_reserved_6_err_cnt()
3722 return dd->send_egress_err_status_cnt[5]; in access_tx_incorrect_link_state_err_cnt()
3731 return dd->send_egress_err_status_cnt[4]; in access_tx_linkdown_err_cnt()
3740 return dd->send_egress_err_status_cnt[3]; in access_tx_egress_fifi_underrun_or_parity_err_cnt()
3749 return dd->send_egress_err_status_cnt[2]; in access_egress_reserved_2_err_cnt()
3758 return dd->send_egress_err_status_cnt[1]; in access_tx_pkt_integrity_mem_unc_err_cnt()
3767 return dd->send_egress_err_status_cnt[0]; in access_tx_pkt_integrity_mem_cor_err_cnt()
3780 return dd->send_err_status_cnt[2]; in access_send_csr_write_bad_addr_err_cnt()
3789 return dd->send_err_status_cnt[1]; in access_send_csr_read_bad_addr_err_cnt()
3798 return dd->send_err_status_cnt[0]; in access_send_csr_parity_cnt()
3811 return dd->sw_ctxt_err_status_cnt[4]; in access_pio_write_out_of_bounds_err_cnt()
3820 return dd->sw_ctxt_err_status_cnt[3]; in access_pio_write_overflow_err_cnt()
3829 return dd->sw_ctxt_err_status_cnt[2]; in access_pio_write_crosses_boundary_err_cnt()
3838 return dd->sw_ctxt_err_status_cnt[1]; in access_pio_disallowed_packet_err_cnt()
3847 return dd->sw_ctxt_err_status_cnt[0]; in access_pio_inconsistent_sop_err_cnt()
3860 return dd->sw_send_dma_eng_err_status_cnt[23]; in access_sdma_header_request_fifo_cor_err_cnt()
3869 return dd->sw_send_dma_eng_err_status_cnt[22]; in access_sdma_header_storage_cor_err_cnt()
3878 return dd->sw_send_dma_eng_err_status_cnt[21]; in access_sdma_packet_tracking_cor_err_cnt()
3887 return dd->sw_send_dma_eng_err_status_cnt[20]; in access_sdma_assembly_cor_err_cnt()
3896 return dd->sw_send_dma_eng_err_status_cnt[19]; in access_sdma_desc_table_cor_err_cnt()
3905 return dd->sw_send_dma_eng_err_status_cnt[18]; in access_sdma_header_request_fifo_unc_err_cnt()
3914 return dd->sw_send_dma_eng_err_status_cnt[17]; in access_sdma_header_storage_unc_err_cnt()
3923 return dd->sw_send_dma_eng_err_status_cnt[16]; in access_sdma_packet_tracking_unc_err_cnt()
3932 return dd->sw_send_dma_eng_err_status_cnt[15]; in access_sdma_assembly_unc_err_cnt()
3941 return dd->sw_send_dma_eng_err_status_cnt[14]; in access_sdma_desc_table_unc_err_cnt()
3950 return dd->sw_send_dma_eng_err_status_cnt[13]; in access_sdma_timeout_err_cnt()
3959 return dd->sw_send_dma_eng_err_status_cnt[12]; in access_sdma_header_length_err_cnt()
3968 return dd->sw_send_dma_eng_err_status_cnt[11]; in access_sdma_header_address_err_cnt()
3977 return dd->sw_send_dma_eng_err_status_cnt[10]; in access_sdma_header_select_err_cnt()
3986 return dd->sw_send_dma_eng_err_status_cnt[9]; in access_sdma_reserved_9_err_cnt()
3995 return dd->sw_send_dma_eng_err_status_cnt[8]; in access_sdma_packet_desc_overflow_err_cnt()
4004 return dd->sw_send_dma_eng_err_status_cnt[7]; in access_sdma_length_mismatch_err_cnt()
4012 return dd->sw_send_dma_eng_err_status_cnt[6]; in access_sdma_halt_err_cnt()
4021 return dd->sw_send_dma_eng_err_status_cnt[5]; in access_sdma_mem_read_err_cnt()
4030 return dd->sw_send_dma_eng_err_status_cnt[4]; in access_sdma_first_desc_err_cnt()
4039 return dd->sw_send_dma_eng_err_status_cnt[3]; in access_sdma_tail_out_of_bounds_err_cnt()
4048 return dd->sw_send_dma_eng_err_status_cnt[2]; in access_sdma_too_long_err_cnt()
4057 return dd->sw_send_dma_eng_err_status_cnt[1]; in access_sdma_gen_mismatch_err_cnt()
4066 return dd->sw_send_dma_eng_err_status_cnt[0]; in access_sdma_wrong_dw_err_cnt()
4076 u64 csr = entry->csr; in access_dc_rcv_err_cnt()
4080 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ? in access_dc_rcv_err_cnt()
4081 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors; in access_dc_rcv_err_cnt()
4083 dd->sw_rcv_bypass_packet_errors = 0; in access_dc_rcv_err_cnt()
4096 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
4097 ppd->ibport_data.rvp.cntr, vl, \
4114 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
5247 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT in is_ax()
5256 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT in is_bx()
5265 u32 is = IS_RCVURGENT_START + rcd->ctxt; in is_urg_masked()
5268 mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64))); in is_urg_masked()
5274 * position and remaining length, respectively.
5292 len--; in append_str()
5302 len--; in append_str()
5330 len--; /* leave room for a nul */ in flag_string()
5350 --p; in flag_string()
5354 /* add final nul - space already allocated above */ in flag_string()
5569 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) { in handle_cce_err()
5572 start_freeze_handling(dd->pport, FREEZE_SELF); in handle_cce_err()
5577 incr_cntr64(&dd->cce_err_status_cnt[i]); in handle_cce_err()
5579 incr_cntr64(&dd->sw_cce_err_status_aggregate); in handle_cce_err()
5592 struct hfi1_pportdata *ppd = dd->pport; in update_rcverr_timer()
5595 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt && in update_rcverr_timer()
5596 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) { in update_rcverr_timer()
5601 queue_work(ppd->link_wq, &ppd->link_bounce_work); in update_rcverr_timer()
5603 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt; in update_rcverr_timer()
5605 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME); in update_rcverr_timer()
5610 timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0); in init_rcverr()
5612 dd->rcv_ovfl_cnt = 0; in init_rcverr()
5613 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME); in init_rcverr()
5618 if (dd->rcverr_timer.function) in free_rcverr()
5619 del_timer_sync(&dd->rcverr_timer); in free_rcverr()
5640 start_freeze_handling(dd->pport, flags); in handle_rxe_err()
5645 incr_cntr64(&dd->rcv_err_status_cnt[i]); in handle_rxe_err()
5658 incr_cntr64(&dd->misc_err_status_cnt[i]); in handle_misc_err()
5671 start_freeze_handling(dd->pport, 0); in handle_pio_err()
5675 incr_cntr64(&dd->send_pio_err_status_cnt[i]); in handle_pio_err()
5688 start_freeze_handling(dd->pport, 0); in handle_sdma_err()
5692 incr_cntr64(&dd->send_dma_err_status_cnt[i]); in handle_sdma_err()
5698 incr_cntr64(&ppd->port_xmit_discards); in __count_port_discards()
5703 __count_port_discards(dd->pport); in count_port_inactive()
5718 struct hfi1_pportdata *ppd = dd->pport; in handle_send_egress_err_info()
5760 incr_cntr64(&ppd->port_xmit_discards_vl[vl]); in handle_send_egress_err_info()
5762 incr_cntr64(&ppd->port_xmit_discards_vl in handle_send_egress_err_info()
5795 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET); in disallowed_pkt_engine()
5799 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5809 return -1; in engine_to_vl()
5812 m = rcu_dereference(dd->sdma_map); in engine_to_vl()
5813 vl = m->engine_to_vl[engine]; in engine_to_vl()
5820 * Translate the send context (sofware index) into a VL. Return -1 if the
5829 sci = &dd->send_contexts[sw_index]; in sc_to_vl()
5832 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15)) in sc_to_vl()
5833 return -1; in sc_to_vl()
5835 sc = sci->sc; in sc_to_vl()
5837 return -1; in sc_to_vl()
5838 if (dd->vld[15].sc == sc) in sc_to_vl()
5841 if (dd->vld[i].sc == sc) in sc_to_vl()
5844 return -1; in sc_to_vl()
5854 start_freeze_handling(dd->pport, 0); in handle_egress_err()
5857 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) in handle_egress_err()
5858 start_freeze_handling(dd->pport, 0); in handle_egress_err()
5862 /* fls64() returns a 1-based offset, we want it zero based */ in handle_egress_err()
5863 int shift = posn - 1; in handle_egress_err()
5886 incr_cntr64(&dd->send_egress_err_status_cnt[i]); in handle_egress_err()
5900 incr_cntr64(&dd->send_err_status_cnt[i]); in handle_txe_err()
5912 * through here to have a central location to correctly handle single-
5913 * or multi-shot errors.
5915 * For non per-context registers, call this routine with a context value
5916 * of 0 so the per-context offset is zero.
5931 reg = read_kctxt_csr(dd, context, eri->status); in interrupt_clear_down()
5934 write_kctxt_csr(dd, context, eri->clear, reg); in interrupt_clear_down()
5935 if (likely(eri->handler)) in interrupt_clear_down()
5936 eri->handler(dd, context, reg); in interrupt_clear_down()
5941 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n", in interrupt_clear_down()
5942 eri->desc, reg); in interrupt_clear_down()
5944 * Read-modify-write so any other masked bits in interrupt_clear_down()
5947 mask = read_kctxt_csr(dd, context, eri->mask); in interrupt_clear_down()
5949 write_kctxt_csr(dd, context, eri->mask, mask); in interrupt_clear_down()
5962 if (eri->handler) { in is_misc_err_int()
5965 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n", in is_misc_err_int()
5981 * clear-down mechanism cannot be used because we cannot clear the
5982 * error bits until several other long-running items are done first.
5997 sw_index = dd->hw_to_sw[hw_context]; in is_sendctxt_err_int()
5998 if (sw_index >= dd->num_send_contexts) { in is_sendctxt_err_int()
6004 sci = &dd->send_contexts[sw_index]; in is_sendctxt_err_int()
6005 spin_lock_irqsave(&dd->sc_lock, irq_flags); in is_sendctxt_err_int()
6006 sc = sci->sc; in is_sendctxt_err_int()
6010 spin_unlock_irqrestore(&dd->sc_lock, irq_flags); in is_sendctxt_err_int()
6030 if (sc->type != SC_USER) in is_sendctxt_err_int()
6031 queue_work(dd->pport->hfi1_wq, &sc->halt_work); in is_sendctxt_err_int()
6032 spin_unlock_irqrestore(&dd->sc_lock, irq_flags); in is_sendctxt_err_int()
6041 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]); in is_sendctxt_err_int()
6051 sde = &dd->per_sdma[source]; in handle_sdma_eng_err()
6053 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, in handle_sdma_eng_err()
6055 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n", in handle_sdma_eng_err()
6056 sde->this_idx, source, (unsigned long long)status); in handle_sdma_eng_err()
6058 sde->err_cnt++; in handle_sdma_eng_err()
6068 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]); in handle_sdma_eng_err()
6078 struct sdma_engine *sde = &dd->per_sdma[source]; in is_sdma_eng_err_int()
6080 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, in is_sdma_eng_err_int()
6082 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx, in is_sdma_eng_err_int()
6103 else if (eri->handler) in is_various_int()
6114 struct hfi1_pportdata *ppd = dd->pport; in handle_qsfp_int()
6123 ppd->driver_link_ready = 0; in handle_qsfp_int()
6129 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); in handle_qsfp_int()
6134 ppd->qsfp_info.cache_valid = 0; in handle_qsfp_int()
6135 ppd->qsfp_info.reset_needed = 0; in handle_qsfp_int()
6136 ppd->qsfp_info.limiting_active = 0; in handle_qsfp_int()
6137 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, in handle_qsfp_int()
6139 /* Invert the ModPresent pin now to detect plug-in */ in handle_qsfp_int()
6140 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : in handle_qsfp_int()
6143 if ((ppd->offline_disabled_reason > in handle_qsfp_int()
6146 (ppd->offline_disabled_reason == in handle_qsfp_int()
6148 ppd->offline_disabled_reason = in handle_qsfp_int()
6152 if (ppd->host_link_state == HLS_DN_POLL) { in handle_qsfp_int()
6159 queue_work(ppd->link_wq, &ppd->link_down_work); in handle_qsfp_int()
6165 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); in handle_qsfp_int()
6166 ppd->qsfp_info.cache_valid = 0; in handle_qsfp_int()
6167 ppd->qsfp_info.cache_refresh_required = 1; in handle_qsfp_int()
6168 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, in handle_qsfp_int()
6176 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : in handle_qsfp_int()
6179 ppd->offline_disabled_reason = in handle_qsfp_int()
6187 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); in handle_qsfp_int()
6188 ppd->qsfp_info.check_interrupt_flags = 1; in handle_qsfp_int()
6189 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags); in handle_qsfp_int()
6194 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work); in handle_qsfp_int()
6208 return ret == HCMD_SUCCESS ? 0 : -EBUSY; in request_host_lcb_access()
6222 return ret == HCMD_SUCCESS ? 0 : -EBUSY; in request_8051_lcb_access()
6226 * Set the LCB selector - allow host access. The DCC selector always
6237 * Clear the LCB selector - allow 8051 access. The DCC selector always
6253 * -EBUSY if the 8051 has control and cannot be disturbed
6254 * -errno if unable to acquire access from the 8051
6258 struct hfi1_pportdata *ppd = dd->pport; in acquire_lcb_access()
6268 mutex_lock(&ppd->hls_lock); in acquire_lcb_access()
6270 while (!mutex_trylock(&ppd->hls_lock)) in acquire_lcb_access()
6275 if (ppd->host_link_state & HLS_DOWN) { in acquire_lcb_access()
6277 __func__, link_state_name(ppd->host_link_state)); in acquire_lcb_access()
6278 ret = -EBUSY; in acquire_lcb_access()
6282 if (dd->lcb_access_count == 0) { in acquire_lcb_access()
6292 dd->lcb_access_count++; in acquire_lcb_access()
6294 mutex_unlock(&ppd->hls_lock); in acquire_lcb_access()
6304 * -errno if unable to release access to the 8051
6316 mutex_lock(&dd->pport->hls_lock); in release_lcb_access()
6318 while (!mutex_trylock(&dd->pport->hls_lock)) in release_lcb_access()
6322 if (dd->lcb_access_count == 0) { in release_lcb_access()
6328 if (dd->lcb_access_count == 1) { in release_lcb_access()
6340 dd->lcb_access_count--; in release_lcb_access()
6342 mutex_unlock(&dd->pport->hls_lock); in release_lcb_access()
6351 * leaving access to the 8051. Assign access now - this constrains the call
6352 * to this routine to be after all LCB set-up is done. In particular, after
6353 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6357 dd->lcb_access_count = 0; in init_lcb_access()
6377 struct hfi1_devdata *dd = ppd->dd; in handle_8051_request()
6485 dd->vl15buf_cached = 0; in reset_link_credits()
6508 ppd->sm_trap_qp = 0x0; in set_linkup_defaults()
6509 ppd->sa_qp = 0x1; in set_linkup_defaults()
6525 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN); in lcb_shutdown()
6533 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en); in lcb_shutdown()
6544 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6549 lockdep_assert_held(&dd->dc8051_lock); in _dc_shutdown()
6551 if (dd->dc_shutdown) in _dc_shutdown()
6554 dd->dc_shutdown = 1; in _dc_shutdown()
6567 mutex_lock(&dd->dc8051_lock); in dc_shutdown()
6569 mutex_unlock(&dd->dc8051_lock); in dc_shutdown()
6575 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6580 lockdep_assert_held(&dd->dc8051_lock); in _dc_start()
6582 if (!dd->dc_shutdown) in _dc_start()
6595 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en); in _dc_start()
6596 dd->dc_shutdown = 0; in _dc_start()
6601 mutex_lock(&dd->dc8051_lock); in dc_start()
6603 mutex_unlock(&dd->dc8051_lock); in dc_start()
6614 if (dd->icode != ICODE_FPGA_EMULATION) in adjust_lcb_for_fpga_serdes()
6694 * This is a work-queue function outside of the interrupt.
6700 struct hfi1_devdata *dd = ppd->dd; in handle_sma_message()
6705 * msg is bytes 1-4 of the 40-bit idle message - the command code in handle_sma_message()
6718 * See OPAv1 table 9-14 - HFI and External Switch Ports Key in handle_sma_message()
6723 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED)) in handle_sma_message()
6724 ppd->neighbor_normal = 1; in handle_sma_message()
6728 * See OPAv1 table 9-14 - HFI and External Switch Ports Key in handle_sma_message()
6733 if (ppd->host_link_state == HLS_UP_ARMED && in handle_sma_message()
6734 ppd->is_active_optimize_enabled) { in handle_sma_message()
6735 ppd->neighbor_normal = 1; in handle_sma_message()
6757 spin_lock_irqsave(&dd->rcvctrl_lock, flags); in adjust_rcvctrl()
6762 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags); in adjust_rcvctrl()
6780 struct hfi1_devdata *dd = ppd->dd; in start_freeze_handling()
6789 dd->flags |= HFI1_FROZEN; in start_freeze_handling()
6796 /* do halt pre-handling on all enabled send contexts */ in start_freeze_handling()
6797 for (i = 0; i < dd->num_send_contexts; i++) { in start_freeze_handling()
6798 sc = dd->send_contexts[i].sc; in start_freeze_handling()
6799 if (sc && (sc->flags & SCF_ENABLED)) in start_freeze_handling()
6811 /* queue non-interrupt handler */ in start_freeze_handling()
6812 queue_work(ppd->hfi1_wq, &ppd->freeze_work); in start_freeze_handling()
6816 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6863 for (i = 0; i < dd->num_rcv_contexts; i++) { in rxe_freeze()
6871 * Unfreeze handling for the RXE block - kernel contexts only.
6873 * handling on a per-context basis as they call into the driver.
6883 for (i = 0; i < dd->num_rcv_contexts; i++) { in rxe_kernel_unfreeze()
6886 /* Ensure all non-user contexts(including vnic) are enabled */ in rxe_kernel_unfreeze()
6888 (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) { in rxe_kernel_unfreeze()
6905 * Non-interrupt SPC freeze handling.
6907 * This is a work-queue function outside of the triggering interrupt.
6913 struct hfi1_devdata *dd = ppd->dd; in handle_freeze()
6926 /* do send egress freeze steps - nothing to do */ in handle_freeze()
6932 * Unfreeze the hardware - clear the freeze, wait for each in handle_freeze()
6951 /* do send egress unfreeze steps - nothing to do */ in handle_freeze()
6958 * it disables and re-enables RXE. Mark the device unfrozen in handle_freeze()
6969 dd->flags &= ~HFI1_FROZEN; in handle_freeze()
6970 wake_up(&dd->event_queue); in handle_freeze()
6976 * update_xmit_counters - update PortXmitWait/PortVlXmitWait
6991 link_speed = get_link_speed(ppd->link_speed_active); in update_xmit_counters()
7004 * This is a work-queue function outside of the interrupt.
7010 struct hfi1_devdata *dd = ppd->dd; in handle_link_up()
7032 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) in handle_link_up()
7033 set_up_vl15(dd, dd->vl15buf_cached); in handle_link_up()
7036 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) { in handle_link_up()
7037 /* oops - current speed is not enabled, bounce */ in handle_link_up()
7040 ppd->link_speed_active, ppd->link_speed_enabled); in handle_link_up()
7054 ppd->neighbor_guid = 0; in reset_neighbor_info()
7055 ppd->neighbor_port_number = 0; in reset_neighbor_info()
7056 ppd->neighbor_type = 0; in reset_neighbor_info()
7057 ppd->neighbor_fm_security = 0; in reset_neighbor_info()
7063 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
7132 * This is a work-queue function outside of the interrupt.
7143 if ((ppd->host_link_state & in handle_link_down()
7145 ppd->port_type == PORT_TYPE_FIXED) in handle_link_down()
7146 ppd->offline_disabled_reason = in handle_link_down()
7150 was_up = !!(ppd->host_link_state & HLS_UP); in handle_link_down()
7152 xchg(&ppd->is_link_down_queued, 0); in handle_link_down()
7157 read_link_down_reason(ppd->dd, &link_down_reason); in handle_link_down()
7161 dd_dev_info(ppd->dd, "%sUnexpected link down\n", in handle_link_down()
7169 read_planned_down_reason_code(ppd->dd, &neigh_reason); in handle_link_down()
7170 dd_dev_info(ppd->dd, in handle_link_down()
7176 dd_dev_info(ppd->dd, in handle_link_down()
7181 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n", in handle_link_down()
7187 * If no reason, assume peer-initiated but missed in handle_link_down()
7200 if (was_up && ppd->local_link_down_reason.sma == 0 && in handle_link_down()
7201 ppd->neigh_link_down_reason.sma == 0) { in handle_link_down()
7202 ppd->local_link_down_reason.sma = in handle_link_down()
7203 ppd->local_link_down_reason.latest; in handle_link_down()
7204 ppd->neigh_link_down_reason.sma = in handle_link_down()
7205 ppd->neigh_link_down_reason.latest; in handle_link_down()
7211 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); in handle_link_down()
7217 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd)) in handle_link_down()
7218 dc_shutdown(ppd->dd); in handle_link_down()
7231 if (ppd->host_link_state & HLS_UP) { in handle_link_bounce()
7235 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n", in handle_link_bounce()
7236 __func__, link_state_name(ppd->host_link_state)); in handle_link_bounce()
7296 if (ppd->pkeys[2] != 0) { in clear_full_mgmt_pkey()
7297 ppd->pkeys[2] = 0; in clear_full_mgmt_pkey()
7299 hfi1_event_pkey_change(ppd->dd, ppd->port); in clear_full_mgmt_pkey()
7314 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup) in link_width_to_bits()
7372 if ((dd->icode == ICODE_RTL_SILICON) && in get_link_widths()
7373 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) { in get_link_widths()
7377 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G; in get_link_widths()
7380 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; in get_link_widths()
7386 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; in get_link_widths()
7430 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7441 /* get end-of-LNI link widths */ in get_linkup_link_widths()
7442 get_linkup_widths(ppd->dd, &tx_width, &rx_width); in get_linkup_link_widths()
7445 ppd->link_width_active = tx_width; in get_linkup_link_widths()
7447 ppd->link_width_downgrade_tx_active = ppd->link_width_active; in get_linkup_link_widths()
7448 ppd->link_width_downgrade_rx_active = ppd->link_width_active; in get_linkup_link_widths()
7450 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported; in get_linkup_link_widths()
7452 ppd->current_egress_rate = active_egress_rate(ppd); in get_linkup_link_widths()
7458 * This is a work-queue function outside of the interrupt.
7464 struct hfi1_devdata *dd = ppd->dd; in handle_verify_cap()
7511 * about the peer Z value - our sent vAU is 3 (hardwired) and is not in handle_verify_cap()
7520 * credits value and wait for link-up interrupt ot set it. in handle_verify_cap()
7523 dd->vl15buf_cached = vl15buf; in handle_verify_cap()
7526 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc; in handle_verify_cap()
7552 ppd->link_speed_active = 0; /* invalid value */ in handle_verify_cap()
7553 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) { in handle_verify_cap()
7557 ppd->link_speed_active = OPA_LINK_SPEED_12_5G; in handle_verify_cap()
7560 ppd->link_speed_active = OPA_LINK_SPEED_25G; in handle_verify_cap()
7565 u8 rate = remote_tx_rate & ppd->local_tx_rate; in handle_verify_cap()
7568 ppd->link_speed_active = OPA_LINK_SPEED_25G; in handle_verify_cap()
7570 ppd->link_speed_active = OPA_LINK_SPEED_12_5G; in handle_verify_cap()
7572 if (ppd->link_speed_active == 0) { in handle_verify_cap()
7575 ppd->link_speed_active = OPA_LINK_SPEED_25G; in handle_verify_cap()
7585 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8; in handle_verify_cap()
7587 ppd->port_ltp_crc_mode |= in handle_verify_cap()
7588 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4; in handle_verify_cap()
7590 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val); in handle_verify_cap()
7612 /* pull LCB fifos out of reset - all fifo clocks must be stable */ in handle_verify_cap()
7624 * apply_link_downgrade_policy - Apply the link width downgrade enabled
7650 mutex_lock(&ppd->hls_lock); in apply_link_downgrade_policy()
7652 if (ppd->host_link_state & HLS_DOWN) { in apply_link_downgrade_policy()
7654 if (ppd->host_link_state & HLS_GOING_UP) { in apply_link_downgrade_policy()
7656 mutex_unlock(&ppd->hls_lock); in apply_link_downgrade_policy()
7660 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7667 lwde = ppd->link_width_downgrade_enabled; in apply_link_downgrade_policy()
7670 get_link_widths(ppd->dd, &tx, &rx); in apply_link_downgrade_policy()
7671 ppd->link_width_downgrade_tx_active = tx; in apply_link_downgrade_policy()
7672 ppd->link_width_downgrade_rx_active = rx; in apply_link_downgrade_policy()
7675 if (ppd->link_width_downgrade_tx_active == 0 || in apply_link_downgrade_policy()
7676 ppd->link_width_downgrade_rx_active == 0) { in apply_link_downgrade_policy()
7678 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n"); in apply_link_downgrade_policy()
7684 if ((ppd->link_width_active != in apply_link_downgrade_policy()
7685 ppd->link_width_downgrade_tx_active) || in apply_link_downgrade_policy()
7686 (ppd->link_width_active != in apply_link_downgrade_policy()
7687 ppd->link_width_downgrade_rx_active)) { in apply_link_downgrade_policy()
7688 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7690 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7692 ppd->link_width_active, in apply_link_downgrade_policy()
7693 ppd->link_width_downgrade_tx_active, in apply_link_downgrade_policy()
7694 ppd->link_width_downgrade_rx_active); in apply_link_downgrade_policy()
7698 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 || in apply_link_downgrade_policy()
7699 (lwde & ppd->link_width_downgrade_rx_active) == 0) { in apply_link_downgrade_policy()
7701 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7703 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7705 lwde, ppd->link_width_downgrade_tx_active, in apply_link_downgrade_policy()
7706 ppd->link_width_downgrade_rx_active); in apply_link_downgrade_policy()
7712 mutex_unlock(&ppd->hls_lock); in apply_link_downgrade_policy()
7727 * This is a work-queue function outside of the interrupt.
7734 dd_dev_info(ppd->dd, "8051: Link width downgrade\n"); in handle_link_downgrade()
7736 update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active); in handle_link_downgrade()
7771 struct hfi1_pportdata *ppd = dd->pport; in handle_8051_interrupt()
7797 if (ppd->host_link_state in handle_8051_interrupt()
7810 ppd->unknown_frame_count++; in handle_8051_interrupt()
7836 queue_work(ppd->link_wq, &ppd->sma_message_work); in handle_8051_interrupt()
7841 queue_work(ppd->link_wq, &ppd->link_up_work); in handle_8051_interrupt()
7849 queue_work(ppd->link_wq, &ppd->link_vc_work); in handle_8051_interrupt()
7864 queue_work(ppd->link_wq, &ppd->link_downgrade_work); in handle_8051_interrupt()
7902 if ((ppd->host_link_state & in handle_8051_interrupt()
7904 ppd->link_enabled == 0) { in handle_8051_interrupt()
7906 __func__, ppd->host_link_state, in handle_8051_interrupt()
7907 ppd->link_enabled); in handle_8051_interrupt()
7909 if (xchg(&ppd->is_link_down_queued, 1) == 1) in handle_8051_interrupt()
7914 queue_work(ppd->link_wq, &ppd->link_down_work); in handle_8051_interrupt()
7969 struct hfi1_pportdata *ppd = dd->pport; in handle_dcc_err()
7974 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) { in handle_dcc_err()
7976 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK; in handle_dcc_err()
7978 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK; in handle_dcc_err()
7984 struct hfi1_pportdata *ppd = dd->pport; in handle_dcc_err()
7985 /* this counter saturates at (2^32) - 1 */ in handle_dcc_err()
7986 if (ppd->link_downed < (u32)UINT_MAX) in handle_dcc_err()
7987 ppd->link_downed++; in handle_dcc_err()
7995 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) { in handle_dcc_err()
7996 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK; in handle_dcc_err()
7998 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK; in handle_dcc_err()
8012 if (ppd->port_error_action & in handle_dcc_err()
8031 do_bounce = ppd->port_error_action & in handle_dcc_err()
8048 if (!(dd->err_info_rcvport.status_and_code & in handle_dcc_err()
8050 dd->err_info_rcvport.status_and_code = in handle_dcc_err()
8053 dd->err_info_rcvport.status_and_code |= in handle_dcc_err()
8059 dd->err_info_rcvport.packet_flit1 = hdr0; in handle_dcc_err()
8060 dd->err_info_rcvport.packet_flit2 = hdr1; in handle_dcc_err()
8083 do_bounce = ppd->port_error_action & in handle_dcc_err()
8107 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev))) in handle_dcc_err()
8122 queue_work(ppd->link_wq, &ppd->link_bounce_work); in handle_dcc_err()
8141 if (eri->handler) { in is_dc_int()
8148 * and it is non-maskable. This is because if a parity in is_dc_int()
8172 * 0 - N-1 = SDma
8173 * N - 2N-1 = SDmaProgress
8174 * 2N - 3N-1 = SDmaIdle
8186 sdma_dumpstate(&dd->per_sdma[which]); in is_sdma_eng_int()
8189 if (likely(what < 3 && which < dd->num_sdma)) { in is_sdma_eng_int()
8190 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source); in is_sdma_eng_int()
8198 * is_rcv_avail_int() - User receive context available IRQ handler
8205 * and can only be used for non-threaded IRQs.
8212 if (likely(source < dd->num_rcv_contexts)) { in is_rcv_avail_int()
8230 * is_rcv_urgent_int() - User receive context urgent IRQ handler
8243 if (likely(source < dd->num_rcv_contexts)) { in is_rcv_urgent_int()
8299 * Interrupt source interrupt - called when the given source has an interrupt.
8300 * Source is a bit index into an array of 64-bit integers.
8306 /* avoids a double compare by walking the table in-order */ in is_interrupt()
8307 for (entry = &is_table[0]; entry->is_name; entry++) { in is_interrupt()
8308 if (source <= entry->end) { in is_interrupt()
8310 entry->is_int(dd, source - entry->start); in is_interrupt()
8319 * gerneral_interrupt() - General interrupt handler
8323 * This is able to correctly handle all non-threaded interrupts. Receive
8335 this_cpu_inc(*dd->int_counter); in general_interrupt()
8339 if (dd->gi_mask[i] == 0) { in general_interrupt()
8344 dd->gi_mask[i]; in general_interrupt()
8363 struct hfi1_devdata *dd = sde->dd; in sdma_interrupt()
8367 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, in sdma_interrupt()
8372 this_cpu_inc(*dd->int_counter); in sdma_interrupt()
8377 & sde->imask; in sdma_interrupt()
8388 sde->this_idx); in sdma_interrupt()
8400 struct hfi1_devdata *dd = rcd->dd; in clear_recv_intr()
8401 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg); in clear_recv_intr()
8403 write_csr(dd, addr, rcd->imask); in clear_recv_intr()
8411 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask); in force_recv_intr()
8415 * Return non-zero if a packet is present.
8432 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); in check_packet_present()
8443 struct hfi1_devdata *dd = rcd->dd; in receive_interrupt_common()
8446 this_cpu_inc(*dd->int_counter); in receive_interrupt_common()
8451 * __hfi1_rcd_eoi_intr() - Make HW issue receive interrupt
8465 * hfi1_rcd_eoi_intr() - End of Interrupt processing action
8486 * hfi1_netdev_rx_napi - napi poll function to move eoi inline
8487 * @napi - pointer to napi object
8488 * @budget - netdev budget
8494 struct hfi1_ctxtdata *rcd = rxq->rcd; in hfi1_netdev_rx_napi()
8497 work_done = rcd->do_interrupt(rcd, budget); in hfi1_netdev_rx_napi()
8514 if (likely(rcd->napi)) { in receive_context_interrupt_napi()
8515 if (likely(napi_schedule_prep(rcd->napi))) in receive_context_interrupt_napi()
8516 __napi_schedule_irqoff(rcd->napi); in receive_context_interrupt_napi()
8521 rcd->ctxt); in receive_context_interrupt_napi()
8544 disposition = rcd->do_interrupt(rcd, 0); in receive_context_interrupt()
8567 (void)rcd->do_interrupt(rcd, 1); in receive_context_thread()
8613 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { in read_lcb_via_8051()
8619 return -EBUSY; in read_lcb_via_8051()
8622 /* register is an index of LCB registers: (offset - base) / 8 */ in read_lcb_via_8051()
8623 regno = (addr - DC_LCB_CFG_RUN) >> 3; in read_lcb_via_8051()
8626 return -EBUSY; in read_lcb_via_8051()
8656 if (likely(ret != -EBUSY)) in update_lcb_cache()
8673 return -1; in read_lcb_cache()
8678 * Return 0 on success, -EBUSY on failure.
8682 struct hfi1_pportdata *ppd = dd->pport; in read_lcb_csr()
8685 if (ppd->host_link_state & HLS_UP) in read_lcb_csr()
8688 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) { in read_lcb_csr()
8690 return -EBUSY; in read_lcb_csr()
8707 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || in write_lcb_via_8051()
8708 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) { in write_lcb_via_8051()
8714 return -EBUSY; in write_lcb_via_8051()
8717 /* register is an index of LCB registers: (offset - base) / 8 */ in write_lcb_via_8051()
8718 regno = (addr - DC_LCB_CFG_RUN) >> 3; in write_lcb_via_8051()
8721 return -EBUSY; in write_lcb_via_8051()
8727 * Return 0 on success, -EBUSY on failure.
8731 struct hfi1_pportdata *ppd = dd->pport; in write_lcb_csr()
8734 if (ppd->host_link_state & HLS_UP) in write_lcb_csr()
8737 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) in write_lcb_csr()
8738 return -EBUSY; in write_lcb_csr()
8758 mutex_lock(&dd->dc8051_lock); in do_8051_command()
8761 if (dd->dc_shutdown) { in do_8051_command()
8762 return_code = -ENODEV; in do_8051_command()
8776 if (dd->dc8051_timed_out) { in do_8051_command()
8777 if (dd->dc8051_timed_out > 1) { in do_8051_command()
8781 return_code = -ENXIO; in do_8051_command()
8800 * 39:00 -> in_data[47:8] in do_8051_command()
8801 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE in do_8051_command()
8802 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA in do_8051_command()
8806 /* must preserve COMPLETED - it is tied to hardware */ in do_8051_command()
8836 dd->dc8051_timed_out++; in do_8051_command()
8840 return_code = -ETIMEDOUT; in do_8051_command()
8854 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT); in do_8051_command()
8859 dd->dc8051_timed_out = 0; in do_8051_command()
8866 mutex_unlock(&dd->dc8051_lock); in do_8051_command()
8896 * Return 0 on success, -errno on failure
8913 /* read is in 8-byte chunks, hardware will truncate the address down */ in read_8051_config()
8934 u32 frame; in write_vc_local_phy() local
8936 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT in write_vc_local_phy()
8939 GENERAL_CONFIG, frame); in write_vc_local_phy()
8945 u32 frame; in write_vc_local_fabric() local
8947 frame = (u32)vau << VAU_SHIFT in write_vc_local_fabric()
8953 GENERAL_CONFIG, frame); in write_vc_local_fabric()
8959 u32 frame; in read_vc_local_link_mode() local
8962 &frame); in read_vc_local_link_mode()
8963 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK; in read_vc_local_link_mode()
8964 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK; in read_vc_local_link_mode()
8965 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK; in read_vc_local_link_mode()
8973 u32 frame; in write_vc_local_link_mode() local
8975 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT in write_vc_local_link_mode()
8979 frame); in write_vc_local_link_mode()
8985 u32 frame; in write_local_device_id() local
8987 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT) in write_local_device_id()
8989 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame); in write_local_device_id()
8995 u32 frame; in read_remote_device_id() local
8997 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame); in read_remote_device_id()
8998 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK; in read_remote_device_id()
8999 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT) in read_remote_device_id()
9005 u32 frame; in write_host_interface_version() local
9009 read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame); in write_host_interface_version()
9011 frame &= ~mask; in write_host_interface_version()
9012 frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT); in write_host_interface_version()
9014 frame); in write_host_interface_version()
9020 u32 frame; in read_misc_status() local
9022 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame); in read_misc_status()
9023 *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) & in read_misc_status()
9025 *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) & in read_misc_status()
9028 read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame); in read_misc_status()
9029 *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) & in read_misc_status()
9036 u32 frame; in read_vc_remote_phy() local
9038 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame); in read_vc_remote_phy()
9039 *power_management = (frame >> POWER_MANAGEMENT_SHIFT) in read_vc_remote_phy()
9041 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT) in read_vc_remote_phy()
9048 u32 frame; in read_vc_remote_fabric() local
9050 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame); in read_vc_remote_fabric()
9051 *vau = (frame >> VAU_SHIFT) & VAU_MASK; in read_vc_remote_fabric()
9052 *z = (frame >> Z_SHIFT) & Z_MASK; in read_vc_remote_fabric()
9053 *vcu = (frame >> VCU_SHIFT) & VCU_MASK; in read_vc_remote_fabric()
9054 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK; in read_vc_remote_fabric()
9055 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK; in read_vc_remote_fabric()
9062 u32 frame; in read_vc_remote_link_width() local
9065 &frame); in read_vc_remote_link_width()
9066 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT) in read_vc_remote_link_width()
9068 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK; in read_vc_remote_link_width()
9073 u32 frame; in read_local_lni() local
9075 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame); in read_local_lni()
9076 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK; in read_local_lni()
9091 u32 frame; in hfi1_read_link_quality() local
9095 if (dd->pport->host_link_state & HLS_UP) { in hfi1_read_link_quality()
9097 &frame); in hfi1_read_link_quality()
9099 *link_quality = (frame >> LINK_QUALITY_SHIFT) in hfi1_read_link_quality()
9106 u32 frame; in read_planned_down_reason_code() local
9108 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame); in read_planned_down_reason_code()
9109 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK; in read_planned_down_reason_code()
9114 u32 frame; in read_link_down_reason() local
9116 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame); in read_link_down_reason()
9117 *ldr = (frame & 0xff); in read_link_down_reason()
9126 u32 frame; in read_tx_settings() local
9129 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame); in read_tx_settings()
9130 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT) in read_tx_settings()
9132 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT) in read_tx_settings()
9134 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT) in read_tx_settings()
9136 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK; in read_tx_settings()
9146 u32 frame; in write_tx_settings() local
9149 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT in write_tx_settings()
9153 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame); in write_tx_settings()
9159 * Returns 0 on success, -EINVAL on error
9169 return -EINVAL; in read_idle_message()
9181 * Returns 0 on success, -EINVAL on error
9192 * Returns 0 on success, -EINVAL on error
9203 return -EINVAL; in send_idle_message()
9211 * Returns 0 on success, -EINVAL on error
9226 * return 0 on success, -errno on error
9247 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { in do_quick_linkup()
9263 * sides must be done with LCB set-up before either in do_quick_linkup()
9292 ret = -EINVAL; in do_quick_linkup()
9311 * The simulator has only one loopback option - LCB. Switch in init_loopback()
9316 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) && in init_loopback()
9330 /* LCB loopback - handled at poll time */ in init_loopback()
9335 if (dd->icode == ICODE_FPGA_EMULATION) { in init_loopback()
9338 return -EINVAL; in init_loopback()
9348 return -EINVAL; in init_loopback()
9364 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) }, in opa_to_vc_link_widths()
9365 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) }, in opa_to_vc_link_widths()
9366 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) }, in opa_to_vc_link_widths()
9367 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) }, in opa_to_vc_link_widths()
9382 struct hfi1_devdata *dd = ppd->dd; in set_local_link_attributes()
9391 /* set the local tx rate - need to read-modify-write */ in set_local_link_attributes()
9393 &rx_polarity_inversion, &ppd->local_tx_rate); in set_local_link_attributes()
9397 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) { in set_local_link_attributes()
9399 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G) in set_local_link_attributes()
9400 ppd->local_tx_rate = 1; in set_local_link_attributes()
9402 ppd->local_tx_rate = 0; in set_local_link_attributes()
9405 ppd->local_tx_rate = 0; in set_local_link_attributes()
9406 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G) in set_local_link_attributes()
9407 ppd->local_tx_rate |= 2; in set_local_link_attributes()
9408 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G) in set_local_link_attributes()
9409 ppd->local_tx_rate |= 1; in set_local_link_attributes()
9414 rx_polarity_inversion, ppd->local_tx_rate); in set_local_link_attributes()
9436 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init, in set_local_link_attributes()
9437 ppd->port_crc_mode_enabled); in set_local_link_attributes()
9453 if (dd->dc8051_ver >= dc8051_ver(1, 25, 0)) in set_local_link_attributes()
9458 ppd->link_width_enabled)); in set_local_link_attributes()
9463 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev); in set_local_link_attributes()
9487 if (!ppd->driver_link_ready) { in start_link()
9488 dd_dev_info(ppd->dd, in start_link()
9506 struct hfi1_devdata *dd = ppd->dd; in wait_for_qsfp_init()
9511 * Some QSFP cables have a quirk that asserts the IntN line as a side in wait_for_qsfp_init()
9512 * effect of power up on plug-in. We ignore this false positive in wait_for_qsfp_init()
9515 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the in wait_for_qsfp_init()
9521 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1) in wait_for_qsfp_init()
9525 mask = read_csr(dd, dd->hfi1_id ? in wait_for_qsfp_init()
9540 struct hfi1_devdata *dd = ppd->dd; in set_qsfp_int_n()
9543 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK); in set_qsfp_int_n()
9547 * when we re-enable the IntN pin in set_qsfp_int_n()
9549 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR, in set_qsfp_int_n()
9555 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask); in set_qsfp_int_n()
9560 struct hfi1_devdata *dd = ppd->dd; in reset_qsfp()
9570 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT); in reset_qsfp()
9573 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); in reset_qsfp()
9579 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); in reset_qsfp()
9600 struct hfi1_devdata *dd = ppd->dd; in handle_qsfp_error_conditions()
9615 if (ppd->host_link_state & HLS_DOWN) in handle_qsfp_error_conditions()
9690 /* Bytes 9-10 and 11-12 are reserved */ in handle_qsfp_error_conditions()
9691 /* Bytes 13-15 are vendor specific */ in handle_qsfp_error_conditions()
9704 ppd = qd->ppd; in qsfp_event()
9705 dd = ppd->dd; in qsfp_event()
9711 if (ppd->host_link_state == HLS_DN_DISABLE) { in qsfp_event()
9712 dd_dev_info(ppd->dd, in qsfp_event()
9719 * Turn DC back on after cable has been re-inserted. Up until in qsfp_event()
9724 if (qd->cache_refresh_required) { in qsfp_event()
9738 if (qd->check_interrupt_flags) { in qsfp_event()
9741 if (one_qsfp_read(ppd, dd->hfi1_id, 6, in qsfp_event()
9751 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); in qsfp_event()
9752 ppd->qsfp_info.check_interrupt_flags = 0; in qsfp_event()
9753 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, in qsfp_event()
9761 struct hfi1_pportdata *ppd = dd->pport; in init_qsfp_int()
9766 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR, in init_qsfp_int()
9768 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, in init_qsfp_int()
9777 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT, in init_qsfp_int()
9781 if (!dd->hfi1_id) in init_qsfp_int()
9788 * Do a one-time initialize of the LCB block.
9793 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) in init_lcb()
9809 * Perform a test read on the QSFP. Return 0 on success, -ERRNO
9821 if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd)) in test_qsfp_read()
9825 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1); in test_qsfp_read()
9829 return -EIO; in test_qsfp_read()
9851 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) { in try_start_link()
9852 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n"); in try_start_link()
9855 dd_dev_info(ppd->dd, in try_start_link()
9857 (int)ppd->qsfp_retry_count); in try_start_link()
9858 ppd->qsfp_retry_count++; in try_start_link()
9859 queue_delayed_work(ppd->link_wq, &ppd->start_link_work, in try_start_link()
9863 ppd->qsfp_retry_count = 0; in try_start_link()
9880 struct hfi1_devdata *dd = ppd->dd; in bringup_serdes()
9887 guid = ppd->guids[HFI1_PORT_GUID_INDEX]; in bringup_serdes()
9889 if (dd->base_guid) in bringup_serdes()
9890 guid = dd->base_guid + ppd->port - 1; in bringup_serdes()
9891 ppd->guids[HFI1_PORT_GUID_INDEX] = guid; in bringup_serdes()
9895 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP; in bringup_serdes()
9897 /* one-time init of the LCB */ in bringup_serdes()
9907 if (ppd->port_type == PORT_TYPE_QSFP) { in bringup_serdes()
9919 struct hfi1_devdata *dd = ppd->dd; in hfi1_quiet_serdes()
9928 ppd->driver_link_ready = 0; in hfi1_quiet_serdes()
9929 ppd->link_enabled = 0; in hfi1_quiet_serdes()
9931 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */ in hfi1_quiet_serdes()
9932 flush_delayed_work(&ppd->start_link_work); in hfi1_quiet_serdes()
9933 cancel_delayed_work_sync(&ppd->start_link_work); in hfi1_quiet_serdes()
9935 ppd->offline_disabled_reason = in hfi1_quiet_serdes()
9943 cancel_work_sync(&ppd->freeze_work); in hfi1_quiet_serdes()
9952 for (i = 0; i < dd->num_pports; i++, ppd++) { in init_cpu_counters()
9953 ppd->ibport_data.rvp.rc_acks = NULL; in init_cpu_counters()
9954 ppd->ibport_data.rvp.rc_qacks = NULL; in init_cpu_counters()
9955 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64); in init_cpu_counters()
9956 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64); in init_cpu_counters()
9957 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64); in init_cpu_counters()
9958 if (!ppd->ibport_data.rvp.rc_acks || in init_cpu_counters()
9959 !ppd->ibport_data.rvp.rc_delayed_comp || in init_cpu_counters()
9960 !ppd->ibport_data.rvp.rc_qacks) in init_cpu_counters()
9961 return -ENOMEM; in init_cpu_counters()
9975 if (!(dd->flags & HFI1_PRESENT)) in hfi1_put_tid()
9994 trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg); in hfi1_put_tid()
9995 writeq(reg, dd->rcvarray_wc + (index * 8)); in hfi1_put_tid()
10010 struct hfi1_devdata *dd = rcd->dd; in hfi1_clear_tids()
10014 for (i = rcd->eager_base; i < rcd->eager_base + in hfi1_clear_tids()
10015 rcd->egrbufs.alloced; i++) in hfi1_clear_tids()
10018 for (i = rcd->expected_base; in hfi1_clear_tids()
10019 i < rcd->expected_base + rcd->expected_count; i++) in hfi1_clear_tids()
10057 struct hfi1_devdata *dd = ppd->dd; in hfi1_get_ib_cfg()
10061 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */ in hfi1_get_ib_cfg()
10062 val = ppd->link_width_enabled; in hfi1_get_ib_cfg()
10064 case HFI1_IB_CFG_LWID: /* currently active Link-width */ in hfi1_get_ib_cfg()
10065 val = ppd->link_width_active; in hfi1_get_ib_cfg()
10068 val = ppd->link_speed_enabled; in hfi1_get_ib_cfg()
10071 val = ppd->link_speed_active; in hfi1_get_ib_cfg()
10074 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */ in hfi1_get_ib_cfg()
10075 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */ in hfi1_get_ib_cfg()
10080 val = ppd->actual_vls_operational; in hfi1_get_ib_cfg()
10089 val = ppd->overrun_threshold; in hfi1_get_ib_cfg()
10092 val = ppd->phy_error_threshold; in hfi1_get_ib_cfg()
10125 * HFI allows this to be set per-receive context, but the
10131 * The maximum non-payload (MTU) bytes in LRH.PktLen are in lrh_max_header_bytes()
10135 * dd->rcd[0].rcvhdrqentsize is in DW. in lrh_max_header_bytes()
10140 return (get_hdrqentsize(dd->rcd[0]) - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2; in lrh_max_header_bytes()
10144 * Set Send Length
10145 * @ppd - per port data
10156 struct hfi1_devdata *dd = ppd->dd; in set_send_length()
10158 u32 maxvlmtu = dd->vld[15].mtu; in set_send_length()
10159 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2) in set_send_length()
10165 for (i = 0; i < ppd->vls_supported; i++) { in set_send_length()
10166 if (dd->vld[i].mtu > maxvlmtu) in set_send_length()
10167 maxvlmtu = dd->vld[i].mtu; in set_send_length()
10169 len1 |= (((dd->vld[i].mtu + max_hb) >> 2) in set_send_length()
10173 len2 |= (((dd->vld[i].mtu + max_hb) >> 2) in set_send_length()
10181 for (i = 0; i < ppd->vls_supported; i++) { in set_send_length()
10182 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50), in set_send_length()
10183 sc_mtu_to_threshold(dd->vld[i].sc, in set_send_length()
10184 dd->vld[i].mtu, in set_send_length()
10185 get_hdrqentsize(dd->rcd[0]))); in set_send_length()
10191 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50), in set_send_length()
10192 sc_mtu_to_threshold(dd->vld[15].sc, in set_send_length()
10193 dd->vld[15].mtu, in set_send_length()
10194 dd->rcd[0]->rcvhdrqentsize)); in set_send_length()
10195 sc_set_cr_threshold(dd->vld[15].sc, thres); in set_send_length()
10200 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG); in set_send_length()
10204 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1); in set_send_length()
10211 struct hfi1_devdata *dd = ppd->dd; in set_lidlmc()
10212 u32 mask = ~((1U << ppd->lmc) - 1); in set_lidlmc()
10213 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1); in set_lidlmc()
10220 lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid; in set_lidlmc()
10227 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1); in set_lidlmc()
10271 [0x10] = "Unable to achieve frame sync on any lane",
10275 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10317 /* describe the given last state complete frame */
10318 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame, in decode_state_complete() argument
10321 struct hfi1_devdata *dd = ppd->dd; in decode_state_complete()
10328 * Decode frame: in decode_state_complete()
10329 * [ 0: 0] - success in decode_state_complete()
10330 * [ 3: 1] - state in decode_state_complete()
10331 * [ 7: 4] - next state timeout in decode_state_complete()
10332 * [15: 8] - reason code in decode_state_complete()
10333 * [31:16] - lanes in decode_state_complete()
10335 success = frame & 0x1; in decode_state_complete()
10336 state = (frame >> 1) & 0x7; in decode_state_complete()
10337 reason = (frame >> 8) & 0xff; in decode_state_complete()
10338 lanes = (frame >> 16) & 0xffff; in decode_state_complete()
10340 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n", in decode_state_complete()
10341 prefix, frame); in decode_state_complete()
10361 read_last_local_state(ppd->dd, &last_local_state); in check_lni_states()
10362 read_last_remote_state(ppd->dd, &last_remote_state); in check_lni_states()
10367 * training in-process. in check_lni_states()
10391 return -ETIMEDOUT; in wait_link_transfer_active()
10401 struct hfi1_devdata *dd = ppd->dd; in force_logical_link_state_down()
10430 dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n"); in force_logical_link_state_down()
10435 * Expects ppd->hls_mutex to be held.
10443 struct hfi1_devdata *dd = ppd->dd; in goto_offline()
10450 previous_state = ppd->host_link_state; in goto_offline()
10451 ppd->host_link_state = HLS_GOING_OFFLINE; in goto_offline()
10460 return -EINVAL; in goto_offline()
10462 if (ppd->offline_disabled_reason == in goto_offline()
10464 ppd->offline_disabled_reason = in goto_offline()
10472 if (ppd->port_type == PORT_TYPE_QSFP && in goto_offline()
10473 ppd->qsfp_info.limiting_active && in goto_offline()
10499 * Now in charge of LCB - must be after the physical state is in goto_offline()
10510 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */ in goto_offline()
10527 ppd->host_link_state = HLS_DN_OFFLINE; in goto_offline()
10534 * - change our state in goto_offline()
10535 * - notify others if we were previously in a linkup state in goto_offline()
10537 ppd->host_link_state = HLS_DN_OFFLINE; in goto_offline()
10547 ppd->qsfp_info.reset_needed = 0; in goto_offline()
10551 ppd->link_width_active = 0; in goto_offline()
10552 ppd->link_width_downgrade_tx_active = 0; in goto_offline()
10553 ppd->link_width_downgrade_rx_active = 0; in goto_offline()
10554 ppd->current_egress_rate = 0; in goto_offline()
10585 switch (ppd->linkinit_reason) { in link_state_reason_name()
10604 * driver_pstate - convert the driver's notion of a port's
10606 * Return -1 (converted to a u32) to indicate error.
10610 switch (ppd->host_link_state) { in driver_pstate()
10631 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n", in driver_pstate()
10632 ppd->host_link_state); in driver_pstate()
10633 return -1; in driver_pstate()
10638 * driver_lstate - convert the driver's notion of a port's
10639 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10644 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN)) in driver_lstate()
10647 switch (ppd->host_link_state & HLS_UP) { in driver_lstate()
10655 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n", in driver_lstate()
10656 ppd->host_link_state); in driver_lstate()
10657 return -1; in driver_lstate()
10664 if (ppd->local_link_down_reason.latest == 0 && in set_link_down_reason()
10665 ppd->neigh_link_down_reason.latest == 0) { in set_link_down_reason()
10666 ppd->local_link_down_reason.latest = lcl_reason; in set_link_down_reason()
10667 ppd->neigh_link_down_reason.latest = neigh_reason; in set_link_down_reason()
10668 ppd->remote_link_down_reason = rem_reason; in set_link_down_reason()
10673 * data_vls_operational() - Verify if data VL BCT credits and MTU
10677 * Return: true - Ok, false -otherwise.
10684 if (!ppd->actual_vls_operational) in data_vls_operational()
10687 for (i = 0; i < ppd->vls_supported; i++) { in data_vls_operational()
10688 reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i)); in data_vls_operational()
10689 if ((reg && !ppd->dd->vld[i].mtu) || in data_vls_operational()
10690 (!reg && ppd->dd->vld[i].mtu)) in data_vls_operational()
10703 * Returns 0 on success, -errno on failure.
10707 struct hfi1_devdata *dd = ppd->dd; in set_link_state()
10712 mutex_lock(&ppd->hls_lock); in set_link_state()
10718 /* interpret poll -> poll as a link bounce */ in set_link_state()
10719 poll_bounce = ppd->host_link_state == HLS_DN_POLL && in set_link_state()
10723 link_state_name(ppd->host_link_state), in set_link_state()
10734 ppd->is_sm_config_started = 0; in set_link_state()
10740 if (ppd->host_link_state == state && !poll_bounce) in set_link_state()
10745 if (ppd->host_link_state == HLS_DN_POLL && in set_link_state()
10746 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) { in set_link_state()
10755 } else if (ppd->host_link_state != HLS_GOING_UP) { in set_link_state()
10767 "%s: physical state did not change to LINK-UP\n", in set_link_state()
10781 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR) in set_link_state()
10782 ppd->linkinit_reason = in set_link_state()
10796 update_xmit_counters(ppd, ppd->link_width_active); in set_link_state()
10798 ppd->host_link_state = HLS_UP_INIT; in set_link_state()
10802 if (ppd->host_link_state != HLS_UP_INIT) in set_link_state()
10809 ret = -EINVAL; in set_link_state()
10821 ppd->host_link_state = HLS_UP_ARMED; in set_link_state()
10828 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) in set_link_state()
10829 ppd->neighbor_normal = 1; in set_link_state()
10832 if (ppd->host_link_state != HLS_UP_ARMED) in set_link_state()
10844 ppd->host_link_state = HLS_UP_ACTIVE; in set_link_state()
10848 event.device = &dd->verbs_dev.rdi.ibdev; in set_link_state()
10849 event.element.port_num = ppd->port; in set_link_state()
10854 if ((ppd->host_link_state == HLS_DN_DISABLE || in set_link_state()
10855 ppd->host_link_state == HLS_DN_OFFLINE) && in set_link_state()
10856 dd->dc_shutdown) in set_link_state()
10861 if (ppd->host_link_state != HLS_DN_OFFLINE) { in set_link_state()
10862 u8 tmp = ppd->link_enabled; in set_link_state()
10864 ret = goto_offline(ppd, ppd->remote_link_down_reason); in set_link_state()
10866 ppd->link_enabled = tmp; in set_link_state()
10869 ppd->remote_link_down_reason = 0; in set_link_state()
10871 if (ppd->driver_link_ready) in set_link_state()
10872 ppd->link_enabled = 1; in set_link_state()
10875 set_all_slowpath(ppd->dd); in set_link_state()
10880 ppd->port_error_action = 0; in set_link_state()
10894 ret = -EINVAL; in set_link_state()
10904 ppd->host_link_state = HLS_DN_POLL; in set_link_state()
10905 ppd->offline_disabled_reason = in set_link_state()
10918 ppd->link_enabled = 0; in set_link_state()
10923 if (ppd->host_link_state != HLS_DN_OFFLINE) { in set_link_state()
10924 ret = goto_offline(ppd, ppd->remote_link_down_reason); in set_link_state()
10927 ppd->remote_link_down_reason = 0; in set_link_state()
10930 if (!dd->dc_shutdown) { in set_link_state()
10936 ret = -EINVAL; in set_link_state()
10948 ppd->host_link_state = HLS_DN_DISABLE; in set_link_state()
10951 if (ppd->host_link_state == HLS_DN_DISABLE) in set_link_state()
10955 ret = goto_offline(ppd, ppd->remote_link_down_reason); in set_link_state()
10957 ppd->remote_link_down_reason = 0; in set_link_state()
10960 if (ppd->host_link_state != HLS_DN_POLL) in set_link_state()
10962 ppd->host_link_state = HLS_VERIFY_CAP; in set_link_state()
10966 if (ppd->host_link_state != HLS_VERIFY_CAP) in set_link_state()
10974 ret = -EINVAL; in set_link_state()
10977 ppd->host_link_state = HLS_GOING_UP; in set_link_state()
10985 ret = -EINVAL; in set_link_state()
10993 __func__, link_state_name(ppd->host_link_state), in set_link_state()
10995 ret = -EINVAL; in set_link_state()
10998 mutex_unlock(&ppd->hls_lock); in set_link_state()
11023 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg); in hfi1_set_ib_cfg()
11028 ret = -EINVAL; in hfi1_set_ib_cfg()
11031 if (ppd->vls_operational != val) { in hfi1_set_ib_cfg()
11032 ppd->vls_operational = val; in hfi1_set_ib_cfg()
11033 if (!ppd->port) in hfi1_set_ib_cfg()
11034 ret = -EINVAL; in hfi1_set_ib_cfg()
11045 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */ in hfi1_set_ib_cfg()
11046 ppd->link_width_enabled = val & ppd->link_width_supported; in hfi1_set_ib_cfg()
11049 ppd->link_width_downgrade_enabled = in hfi1_set_ib_cfg()
11050 val & ppd->link_width_downgrade_supported; in hfi1_set_ib_cfg()
11053 ppd->link_speed_enabled = val & ppd->link_speed_supported; in hfi1_set_ib_cfg()
11060 ppd->overrun_threshold = val; in hfi1_set_ib_cfg()
11067 ppd->phy_error_threshold = val; in hfi1_set_ib_cfg()
11081 dd_dev_info(ppd->dd, in hfi1_set_ib_cfg()
11110 spin_lock_init(&ppd->vl_arb_cache[i].lock); in init_vl_arb_caches()
11124 spin_lock(&ppd->vl_arb_cache[idx].lock); in vl_arb_lock_cache()
11125 return &ppd->vl_arb_cache[idx]; in vl_arb_lock_cache()
11130 spin_unlock(&ppd->vl_arb_cache[idx].lock); in vl_arb_unlock_cache()
11136 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl)); in vl_arb_get_cache()
11142 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl)); in vl_arb_set_cache()
11148 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl)); in vl_arb_match_cache()
11156 struct hfi1_devdata *dd = ppd->dd; in set_vl_weights()
11161 mutex_lock(&ppd->hls_lock); in set_vl_weights()
11163 if (ppd->host_link_state & HLS_UP) in set_vl_weights()
11170 * Before adjusting VL arbitration weights, empty per-VL in set_vl_weights()
11180 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n", in set_vl_weights()
11190 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK) in set_vl_weights()
11192 | (((u64)vl->weight in set_vl_weights()
11203 mutex_unlock(&ppd->hls_lock); in set_vl_weights()
11216 vll->dedicated = cpu_to_be16( in read_one_cm_vl()
11219 vll->shared = cpu_to_be16( in read_one_cm_vl()
11236 /* OPA and HFI have a 1-1 mapping */ in get_buffer_control()
11238 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]); in get_buffer_control()
11240 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */ in get_buffer_control()
11241 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]); in get_buffer_control()
11244 bc->overall_shared_limit = cpu_to_be16( in get_buffer_control()
11259 /* each register contains 16 SC->VLnt mappings, 4 bits each */ in get_sc2vlnt()
11264 dp->vlnt[2 * i] = byte & 0xf; in get_sc2vlnt()
11265 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4; in get_sc2vlnt()
11272 dp->vlnt[16 + (2 * i)] = byte & 0xf; in get_sc2vlnt()
11273 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4; in get_sc2vlnt()
11284 vl->vl = 0xf; in get_vlarb_preempt()
11285 vl->weight = 0; in get_vlarb_preempt()
11293 0, dp->vlnt[0] & 0xf, in set_sc2vlnt()
11294 1, dp->vlnt[1] & 0xf, in set_sc2vlnt()
11295 2, dp->vlnt[2] & 0xf, in set_sc2vlnt()
11296 3, dp->vlnt[3] & 0xf, in set_sc2vlnt()
11297 4, dp->vlnt[4] & 0xf, in set_sc2vlnt()
11298 5, dp->vlnt[5] & 0xf, in set_sc2vlnt()
11299 6, dp->vlnt[6] & 0xf, in set_sc2vlnt()
11300 7, dp->vlnt[7] & 0xf, in set_sc2vlnt()
11301 8, dp->vlnt[8] & 0xf, in set_sc2vlnt()
11302 9, dp->vlnt[9] & 0xf, in set_sc2vlnt()
11303 10, dp->vlnt[10] & 0xf, in set_sc2vlnt()
11304 11, dp->vlnt[11] & 0xf, in set_sc2vlnt()
11305 12, dp->vlnt[12] & 0xf, in set_sc2vlnt()
11306 13, dp->vlnt[13] & 0xf, in set_sc2vlnt()
11307 14, dp->vlnt[14] & 0xf, in set_sc2vlnt()
11308 15, dp->vlnt[15] & 0xf)); in set_sc2vlnt()
11311 16, dp->vlnt[16] & 0xf, in set_sc2vlnt()
11312 17, dp->vlnt[17] & 0xf, in set_sc2vlnt()
11313 18, dp->vlnt[18] & 0xf, in set_sc2vlnt()
11314 19, dp->vlnt[19] & 0xf, in set_sc2vlnt()
11315 20, dp->vlnt[20] & 0xf, in set_sc2vlnt()
11316 21, dp->vlnt[21] & 0xf, in set_sc2vlnt()
11317 22, dp->vlnt[22] & 0xf, in set_sc2vlnt()
11318 23, dp->vlnt[23] & 0xf, in set_sc2vlnt()
11319 24, dp->vlnt[24] & 0xf, in set_sc2vlnt()
11320 25, dp->vlnt[25] & 0xf, in set_sc2vlnt()
11321 26, dp->vlnt[26] & 0xf, in set_sc2vlnt()
11322 27, dp->vlnt[27] & 0xf, in set_sc2vlnt()
11323 28, dp->vlnt[28] & 0xf, in set_sc2vlnt()
11324 29, dp->vlnt[29] & 0xf, in set_sc2vlnt()
11325 30, dp->vlnt[30] & 0xf, in set_sc2vlnt()
11326 31, dp->vlnt[31] & 0xf)); in set_sc2vlnt()
11359 /* set the given per-VL shared limit */
11376 /* set the given per-VL dedicated limit */
11393 /* spin until the given per-VL status mask bits clear */
11449 struct hfi1_devdata *dd = ppd->dd; in set_buffer_control()
11482 new_total += be16_to_cpu(new_bc->vl[i].dedicated); in set_buffer_control()
11486 be16_to_cpu(new_bc->vl[i].dedicated)); in set_buffer_control()
11488 be16_to_cpu(new_bc->vl[i].shared)); in set_buffer_control()
11489 new_bc->vl[i].dedicated = 0; in set_buffer_control()
11490 new_bc->vl[i].shared = 0; in set_buffer_control()
11492 new_total += be16_to_cpu(new_bc->overall_shared_limit); in set_buffer_control()
11515 this_shared_changing = new_bc->vl[i].shared in set_buffer_control()
11519 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated || in set_buffer_control()
11525 if (be16_to_cpu(new_bc->vl[i].dedicated) < in set_buffer_control()
11532 /* bracket the credit change with a total adjustment */ in set_buffer_control()
11540 if ((be16_to_cpu(new_bc->overall_shared_limit) < in set_buffer_control()
11568 be16_to_cpu(new_bc-> in set_buffer_control()
11571 new_bc->vl[i].dedicated; in set_buffer_control()
11582 if (be16_to_cpu(new_bc->vl[i].dedicated) > in set_buffer_control()
11585 be16_to_cpu(new_bc-> in set_buffer_control()
11595 if (be16_to_cpu(new_bc->vl[i].shared) > in set_buffer_control()
11597 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared)); in set_buffer_control()
11601 if (be16_to_cpu(new_bc->overall_shared_limit) > in set_buffer_control()
11604 be16_to_cpu(new_bc->overall_shared_limit)); in set_buffer_control()
11606 /* bracket the credit change with a total adjustment */ in set_buffer_control()
11616 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 || in set_buffer_control()
11617 be16_to_cpu(new_bc->vl[i].shared) > 0) in set_buffer_control()
11619 ppd->actual_vls_operational = vl_count; in set_buffer_control()
11620 ret = sdma_map_init(dd, ppd->port - 1, vl_count ? in set_buffer_control()
11621 ppd->actual_vls_operational : in set_buffer_control()
11622 ppd->vls_operational, in set_buffer_control()
11625 ret = pio_map_init(dd, ppd->port - 1, vl_count ? in set_buffer_control()
11626 ppd->actual_vls_operational : in set_buffer_control()
11627 ppd->vls_operational, NULL); in set_buffer_control()
11667 size = get_buffer_control(ppd->dd, t, NULL); in fm_get_table()
11670 size = get_sc2vlnt(ppd->dd, t); in fm_get_table()
11675 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t); in fm_get_table()
11685 return -EINVAL; in fm_get_table()
11725 set_sc2vlnt(ppd->dd, t); in fm_set_table()
11728 ret = -EINVAL; in fm_set_table()
11736 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11749 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11750 * Just re-enables all data VLs (the "fill" part happens
11751 * automatically - the name was chosen for symmetry with
11754 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11767 * drain_data_vls() - assumes that disable_data_vls() has been called,
11768 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11779 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11785 * // do things with per-VL resources
11801 * the cclock, a non-zero ns will always have a non-zero result.
11807 if (dd->icode == ICODE_FPGA_EMULATION) in ns_to_cclock()
11818 * the cclock, a non-zero cclocks will always have a non-zero result.
11824 if (dd->icode == ICODE_FPGA_EMULATION) in cclock_to_ns()
11837 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11841 struct hfi1_devdata *dd = rcd->dd; in adjust_rcv_timeout()
11842 u32 timeout = rcd->rcvavail_timeout; in adjust_rcv_timeout()
11866 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */ in adjust_rcv_timeout()
11868 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr); in adjust_rcv_timeout()
11871 rcd->rcvavail_timeout = timeout; in adjust_rcv_timeout()
11876 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT, in adjust_rcv_timeout()
11884 struct hfi1_devdata *dd = rcd->dd; in update_usrhead()
11886 u32 ctxt = rcd->ctxt; in update_usrhead()
11909 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) in hdrqempty()
11915 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); in hdrqempty()
11934 * 0xB-0xF - reserved (Receive Array only)
11957 * encode_rcv_header_entry_size - return chip specific encoding for size
11977 * hfi1_validate_rcvhdrcnt - validate hdrcnt
11985 return -EINVAL; in hfi1_validate_rcvhdrcnt()
11992 return -EINVAL; in hfi1_validate_rcvhdrcnt()
11998 return -EINVAL; in hfi1_validate_rcvhdrcnt()
12005 * set_hdrq_regs - set header queue registers for context
12031 dd->rcvhdrtail_dummy_dma); in set_hdrq_regs()
12044 ctxt = rcd->ctxt; in hfi1_rcvctrl()
12054 rcd->rcvhdrq_dma); in hfi1_rcvctrl()
12057 rcd->rcvhdrqtailaddr_dma); in hfi1_rcvctrl()
12069 memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd)); in hfi1_rcvctrl()
12072 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr; in hfi1_rcvctrl()
12079 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size) in hfi1_rcvctrl()
12083 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */ in hfi1_rcvctrl()
12091 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT) in hfi1_rcvctrl()
12094 (((rcd->eager_base >> RCV_SHIFT) in hfi1_rcvctrl()
12101 * rcd->expected_count is set to individual RcvArray entries, in hfi1_rcvctrl()
12102 * not pairs, and the CSR takes a pair-count in groups of in hfi1_rcvctrl()
12105 reg = (((rcd->expected_count >> RCV_SHIFT) in hfi1_rcvctrl()
12108 (((rcd->expected_base >> RCV_SHIFT) in hfi1_rcvctrl()
12122 if (dd->rcvhdrtail_dummy_dma) { in hfi1_rcvctrl()
12124 dd->rcvhdrtail_dummy_dma); in hfi1_rcvctrl()
12132 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, in hfi1_rcvctrl()
12133 IS_RCVAVAIL_START + rcd->ctxt, true); in hfi1_rcvctrl()
12137 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, in hfi1_rcvctrl()
12138 IS_RCVAVAIL_START + rcd->ctxt, false); in hfi1_rcvctrl()
12154 * In one-packet-per-eager mode, the size comes from in hfi1_rcvctrl()
12171 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, in hfi1_rcvctrl()
12172 IS_RCVURGENT_START + rcd->ctxt, true); in hfi1_rcvctrl()
12174 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, in hfi1_rcvctrl()
12175 IS_RCVURGENT_START + rcd->ctxt, false); in hfi1_rcvctrl()
12204 (u64)rcd->rcvavail_timeout << in hfi1_rcvctrl()
12219 dd->rcvhdrtail_dummy_dma); in hfi1_rcvctrl()
12228 ret = dd->cntrnameslen; in hfi1_read_cntrs()
12229 *namep = dd->cntrnames; in hfi1_read_cntrs()
12234 ret = (dd->ndevcntrs) * sizeof(u64); in hfi1_read_cntrs()
12237 *cntrp = dd->cntrs; in hfi1_read_cntrs()
12244 hfi1_cdbg(CNTR, "reading %s", entry->name); in hfi1_read_cntrs()
12245 if (entry->flags & CNTR_DISABLED) { in hfi1_read_cntrs()
12249 if (entry->flags & CNTR_VL) { in hfi1_read_cntrs()
12252 val = entry->rw_cntr(entry, in hfi1_read_cntrs()
12260 dd->cntrs[entry->offset + j] = in hfi1_read_cntrs()
12263 } else if (entry->flags & CNTR_SDMA) { in hfi1_read_cntrs()
12269 entry->rw_cntr(entry, dd, j, in hfi1_read_cntrs()
12274 dd->cntrs[entry->offset + j] = in hfi1_read_cntrs()
12278 val = entry->rw_cntr(entry, dd, in hfi1_read_cntrs()
12281 dd->cntrs[entry->offset] = val; in hfi1_read_cntrs()
12299 ret = ppd->dd->portcntrnameslen; in hfi1_read_portcntrs()
12300 *namep = ppd->dd->portcntrnames; in hfi1_read_portcntrs()
12305 ret = ppd->dd->nportcntrs * sizeof(u64); in hfi1_read_portcntrs()
12306 *cntrp = ppd->cntrs; in hfi1_read_portcntrs()
12310 hfi1_cdbg(CNTR, "reading %s", entry->name); in hfi1_read_portcntrs()
12311 if (entry->flags & CNTR_DISABLED) { in hfi1_read_portcntrs()
12317 if (entry->flags & CNTR_VL) { in hfi1_read_portcntrs()
12320 val = entry->rw_cntr(entry, ppd, j, in hfi1_read_portcntrs()
12327 ppd->cntrs[entry->offset + j] = val; in hfi1_read_portcntrs()
12330 val = entry->rw_cntr(entry, ppd, in hfi1_read_portcntrs()
12334 ppd->cntrs[entry->offset] = val; in hfi1_read_portcntrs()
12347 if (dd->synth_stats_timer.function) in free_cntrs()
12348 del_timer_sync(&dd->synth_stats_timer); in free_cntrs()
12350 for (i = 0; i < dd->num_pports; i++, ppd++) { in free_cntrs()
12351 kfree(ppd->cntrs); in free_cntrs()
12352 kfree(ppd->scntrs); in free_cntrs()
12353 free_percpu(ppd->ibport_data.rvp.rc_acks); in free_cntrs()
12354 free_percpu(ppd->ibport_data.rvp.rc_qacks); in free_cntrs()
12355 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp); in free_cntrs()
12356 ppd->cntrs = NULL; in free_cntrs()
12357 ppd->scntrs = NULL; in free_cntrs()
12358 ppd->ibport_data.rvp.rc_acks = NULL; in free_cntrs()
12359 ppd->ibport_data.rvp.rc_qacks = NULL; in free_cntrs()
12360 ppd->ibport_data.rvp.rc_delayed_comp = NULL; in free_cntrs()
12362 kfree(dd->portcntrnames); in free_cntrs()
12363 dd->portcntrnames = NULL; in free_cntrs()
12364 kfree(dd->cntrs); in free_cntrs()
12365 dd->cntrs = NULL; in free_cntrs()
12366 kfree(dd->scntrs); in free_cntrs()
12367 dd->scntrs = NULL; in free_cntrs()
12368 kfree(dd->cntrnames); in free_cntrs()
12369 dd->cntrnames = NULL; in free_cntrs()
12370 if (dd->update_cntr_wq) { in free_cntrs()
12371 destroy_workqueue(dd->update_cntr_wq); in free_cntrs()
12372 dd->update_cntr_wq = NULL; in free_cntrs()
12382 if (entry->flags & CNTR_DISABLED) { in read_dev_port_cntr()
12383 dd_dev_err(dd, "Counter %s not enabled", entry->name); in read_dev_port_cntr()
12387 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval); in read_dev_port_cntr()
12389 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0); in read_dev_port_cntr()
12392 if (entry->flags & CNTR_SYNTH) { in read_dev_port_cntr()
12398 if (entry->flags & CNTR_32BIT) { in read_dev_port_cntr()
12433 if (entry->flags & CNTR_DISABLED) { in write_dev_port_cntr()
12434 dd_dev_err(dd, "Counter %s not enabled", entry->name); in write_dev_port_cntr()
12438 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval); in write_dev_port_cntr()
12440 if (entry->flags & CNTR_SYNTH) { in write_dev_port_cntr()
12442 if (entry->flags & CNTR_32BIT) { in write_dev_port_cntr()
12443 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, in write_dev_port_cntr()
12447 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, in write_dev_port_cntr()
12451 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data); in write_dev_port_cntr()
12467 sval = dd->scntrs + entry->offset; in read_dev_cntr()
12481 sval = dd->scntrs + entry->offset; in write_dev_cntr()
12495 sval = ppd->scntrs + entry->offset; in read_port_cntr()
12500 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) && in read_port_cntr()
12506 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl); in read_port_cntr()
12515 sval = ppd->scntrs + entry->offset; in write_port_cntr()
12520 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) && in write_port_cntr()
12526 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data); in write_port_cntr()
12548 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); in do_update_synth_timer()
12551 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); in do_update_synth_timer()
12556 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx); in do_update_synth_timer()
12558 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) { in do_update_synth_timer()
12565 dd->unit); in do_update_synth_timer()
12567 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx); in do_update_synth_timer()
12569 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit, in do_update_synth_timer()
12573 dd->unit); in do_update_synth_timer()
12579 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit); in do_update_synth_timer()
12582 if (entry->flags & CNTR_VL) { in do_update_synth_timer()
12590 for (i = 0; i < dd->num_pports; i++, ppd++) { in do_update_synth_timer()
12593 if (entry->flags & CNTR_VL) { in do_update_synth_timer()
12609 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, in do_update_synth_timer()
12613 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, in do_update_synth_timer()
12617 dd->unit, dd->last_tx, dd->last_rx); in do_update_synth_timer()
12620 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit); in do_update_synth_timer()
12628 queue_work(dd->update_cntr_wq, &dd->update_cntr_work); in update_synth_timer()
12629 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); in update_synth_timer()
12645 timer_setup(&dd->synth_stats_timer, update_synth_timer, 0); in init_cntrs()
12652 dd->ndevcntrs = 0; in init_cntrs()
12662 dev_cntrs[i].offset = dd->ndevcntrs; in init_cntrs()
12667 /* Add ",32" for 32-bit counters */ in init_cntrs()
12671 dd->ndevcntrs++; in init_cntrs()
12674 dev_cntrs[i].offset = dd->ndevcntrs; in init_cntrs()
12679 /* Add ",32" for 32-bit counters */ in init_cntrs()
12683 dd->ndevcntrs++; in init_cntrs()
12688 /* Add ",32" for 32-bit counters */ in init_cntrs()
12691 dev_cntrs[i].offset = dd->ndevcntrs; in init_cntrs()
12692 dd->ndevcntrs++; in init_cntrs()
12697 dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64), in init_cntrs()
12699 if (!dd->cntrs) in init_cntrs()
12702 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL); in init_cntrs()
12703 if (!dd->scntrs) in init_cntrs()
12707 dd->cntrnameslen = sz; in init_cntrs()
12708 dd->cntrnames = kmalloc(sz, GFP_KERNEL); in init_cntrs()
12709 if (!dd->cntrnames) in init_cntrs()
12713 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) { in init_cntrs()
12770 rcv_ctxts = dd->num_rcv_contexts; in init_cntrs()
12778 dd->nportcntrs = 0; in init_cntrs()
12786 port_cntrs[i].offset = dd->nportcntrs; in init_cntrs()
12791 /* Add ",32" for 32-bit counters */ in init_cntrs()
12795 dd->nportcntrs++; in init_cntrs()
12800 /* Add ",32" for 32-bit counters */ in init_cntrs()
12803 port_cntrs[i].offset = dd->nportcntrs; in init_cntrs()
12804 dd->nportcntrs++; in init_cntrs()
12809 dd->portcntrnameslen = sz; in init_cntrs()
12810 dd->portcntrnames = kmalloc(sz, GFP_KERNEL); in init_cntrs()
12811 if (!dd->portcntrnames) in init_cntrs()
12815 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) { in init_cntrs()
12851 for (i = 0; i < dd->num_pports; i++, ppd++) { in init_cntrs()
12852 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL); in init_cntrs()
12853 if (!ppd->cntrs) in init_cntrs()
12856 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL); in init_cntrs()
12857 if (!ppd->scntrs) in init_cntrs()
12865 dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d", in init_cntrs()
12866 WQ_MEM_RECLAIM, dd->unit); in init_cntrs()
12867 if (!dd->update_cntr_wq) in init_cntrs()
12870 INIT_WORK(&dd->update_cntr_work, do_update_synth_timer); in init_cntrs()
12872 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); in init_cntrs()
12876 return -ENOMEM; in init_cntrs()
12900 /* look at the HFI meta-states only */ in chip_to_opa_pstate()
12960 * update_statusp - Update userspace status flag
12974 * memory. Do it here to ensure a reliable state - this is in update_statusp()
12980 if (ppd->statusp) { in update_statusp()
12984 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | in update_statusp()
12988 *ppd->statusp |= HFI1_STATUS_IB_CONF; in update_statusp()
12991 *ppd->statusp |= HFI1_STATUS_IB_READY; in update_statusp()
12995 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n", in update_statusp()
13000 * wait_logical_linkstate - wait for an IB link state change to occur
13007 * Returns 0 if state reached, otherwise -ETIMEDOUT.
13017 new_state = chip_to_opa_lstate(ppd->dd, in wait_logical_linkstate()
13018 read_logical_state(ppd->dd)); in wait_logical_linkstate()
13022 dd_dev_err(ppd->dd, in wait_logical_linkstate()
13025 return -ETIMEDOUT; in wait_logical_linkstate()
13035 u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state); in log_state_transition()
13037 dd_dev_info(ppd->dd, in log_state_transition()
13048 u32 read_state = read_physical_state(ppd->dd); in log_physical_state()
13053 dd_dev_err(ppd->dd, in log_physical_state()
13060 * wait_physical_linkstate - wait for an physical link state change to occur
13066 * Returns 0 if state reached, otherwise -ETIMEDOUT.
13076 read_state = read_physical_state(ppd->dd); in wait_physical_linkstate()
13080 dd_dev_err(ppd->dd, in wait_physical_linkstate()
13083 return -ETIMEDOUT; in wait_physical_linkstate()
13085 usleep_range(1950, 2050); /* sleep 2ms-ish */ in wait_physical_linkstate()
13093 * wait_phys_link_offline_quiet_substates - wait for any offline substate
13099 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
13109 read_state = read_physical_state(ppd->dd); in wait_phys_link_offline_substates()
13113 dd_dev_err(ppd->dd, in wait_phys_link_offline_substates()
13116 return -ETIMEDOUT; in wait_phys_link_offline_substates()
13118 usleep_range(1950, 2050); /* sleep 2ms-ish */ in wait_phys_link_offline_substates()
13126 * wait_phys_link_out_of_offline - wait for any out of offline state
13132 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
13142 read_state = read_physical_state(ppd->dd); in wait_phys_link_out_of_offline()
13146 dd_dev_err(ppd->dd, in wait_phys_link_out_of_offline()
13149 return -ETIMEDOUT; in wait_phys_link_out_of_offline()
13151 usleep_range(1950, 2050); /* sleep 2ms-ish */ in wait_phys_link_out_of_offline()
13167 struct hfi1_devdata *dd = sc->dd; in hfi1_init_ctxt()
13169 u8 set = (sc->type == SC_USER ? in hfi1_init_ctxt()
13172 reg = read_kctxt_csr(dd, sc->hw_context, in hfi1_init_ctxt()
13178 write_kctxt_csr(dd, sc->hw_context, in hfi1_init_ctxt()
13188 if (dd->icode != ICODE_RTL_SILICON) { in hfi1_tempsense_rd()
13192 return -EINVAL; in hfi1_tempsense_rd()
13195 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) & in hfi1_tempsense_rd()
13197 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) & in hfi1_tempsense_rd()
13199 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) & in hfi1_tempsense_rd()
13201 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) & in hfi1_tempsense_rd()
13203 /* triggers is a 3-bit value - 1 bit per trigger. */ in hfi1_tempsense_rd()
13204 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7); in hfi1_tempsense_rd()
13212 * read_mod_write() - Calculate the IRQ register index and set/clear the bits
13225 spin_lock(&dd->irq_src_lock); in read_mod_write()
13232 spin_unlock(&dd->irq_src_lock); in read_mod_write()
13236 * set_intr_bits() - Enable/disable a range (one or more) IRQ sources
13251 return -EINVAL; in set_intr_bits()
13254 return -ERANGE; in set_intr_bits()
13260 read_mod_write(dd, src - 1, bits, set); in set_intr_bits()
13298 * Remap the interrupt source from the general handler to the given MSI-X
13310 dd->gi_mask[m] &= ~((u64)1 << n); in remap_intr()
13316 /* direct the chip source to the given MSI-X interrupt */ in remap_intr()
13329 * engine. Per-engine interrupts are as follows: in remap_sdma_interrupts()
13341 * chip interrupts back to MSI-X 0.
13349 dd->gi_mask[i] = ~(u64)0; in reset_interrupts()
13351 /* all chip interrupts map to MSI-X 0 */ in reset_interrupts()
13357 * set_up_interrupts() - Initialize the IRQ resources and state
13371 /* reset general handler mask, chip MSI-X mappings */ in set_up_interrupts()
13374 /* ask for MSI-X interrupts */ in set_up_interrupts()
13389 * num_rcv_contexts - number of contexts being used
13390 * n_krcv_queues - number of kernel contexts
13391 * first_dyn_alloc_ctxt - first dynamically allocated context
13393 * freectxts - number of free user contexts
13394 * num_send_contexts - number of PIO send contexts being used
13395 * num_netdev_contexts - number of contexts reserved for netdev
13411 * - Context 0 - control context (VL15/multicast/error) in set_up_context_variables()
13412 * - Context 1 - first kernel context in set_up_context_variables()
13413 * - Context 2 - second kernel context in set_up_context_variables()
13427 * one send context is allocated for each VL{0-7} and VL15 in set_up_context_variables()
13429 if (num_kernel_contexts > (send_contexts - num_vls - 1)) { in set_up_context_variables()
13432 send_contexts - num_vls - 1, in set_up_context_variables()
13434 num_kernel_contexts = send_contexts - num_vls - 1; in set_up_context_variables()
13439 * - default to 1 user context per real (non-HT) CPU core if in set_up_context_variables()
13452 (u32)(rcv_contexts - num_kernel_contexts), in set_up_context_variables()
13455 n_usr_ctxts = rcv_contexts - num_kernel_contexts; in set_up_context_variables()
13459 hfi1_num_netdev_contexts(dd, rcv_contexts - in set_up_context_variables()
13465 * 2. FECN (num_kernel_context - 1 + num_user_contexts + in set_up_context_variables()
13470 * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts, in set_up_context_variables()
13476 rmt_count += num_kernel_contexts - 1; in set_up_context_variables()
13478 user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count; in set_up_context_variables()
13488 dd->num_rcv_contexts = in set_up_context_variables()
13490 dd->n_krcv_queues = num_kernel_contexts; in set_up_context_variables()
13491 dd->first_dyn_alloc_ctxt = num_kernel_contexts; in set_up_context_variables()
13492 dd->num_netdev_contexts = num_netdev_contexts; in set_up_context_variables()
13493 dd->num_user_contexts = n_usr_ctxts; in set_up_context_variables()
13494 dd->freectxts = n_usr_ctxts; in set_up_context_variables()
13498 (int)dd->num_rcv_contexts, in set_up_context_variables()
13499 (int)dd->n_krcv_queues, in set_up_context_variables()
13500 dd->num_netdev_contexts, in set_up_context_variables()
13501 dd->num_user_contexts); in set_up_context_variables()
13507 * consecutive entries by using write-combining of the entire in set_up_context_variables()
13514 dd->rcv_entries.group_size = RCV_INCREMENT; in set_up_context_variables()
13515 ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size; in set_up_context_variables()
13516 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts; in set_up_context_variables()
13517 dd->rcv_entries.nctxt_extra = ngroups - in set_up_context_variables()
13518 (dd->num_rcv_contexts * dd->rcv_entries.ngroups); in set_up_context_variables()
13520 dd->rcv_entries.ngroups, in set_up_context_variables()
13521 dd->rcv_entries.nctxt_extra); in set_up_context_variables()
13522 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size > in set_up_context_variables()
13524 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) / in set_up_context_variables()
13525 dd->rcv_entries.group_size; in set_up_context_variables()
13528 dd->rcv_entries.ngroups); in set_up_context_variables()
13529 dd->rcv_entries.nctxt_extra = 0; in set_up_context_variables()
13536 dd->num_send_contexts = ret; in set_up_context_variables()
13541 dd->num_send_contexts, in set_up_context_variables()
13542 dd->sc_sizes[SC_KERNEL].count, in set_up_context_variables()
13543 dd->sc_sizes[SC_ACK].count, in set_up_context_variables()
13544 dd->sc_sizes[SC_USER].count, in set_up_context_variables()
13545 dd->sc_sizes[SC_VL15].count); in set_up_context_variables()
13559 struct hfi1_devdata *dd = ppd->dd; in set_partition_keys()
13565 reg |= (ppd->pkeys[i] & in set_partition_keys()
13572 ((i - 3) * 2), reg); in set_partition_keys()
13585 * NOTE: All user context CSRs that are not mmaped write-only
13605 * to be read, so are not pre-initialized in write_uninitialized_csrs_and_memories()
13665 /* CCE_REVISION read-only */ in reset_cce_csrs()
13666 /* CCE_REVISION2 read-only */ in reset_cce_csrs()
13667 /* CCE_CTRL - bits clear automatically */ in reset_cce_csrs()
13668 /* CCE_STATUS read-only, use CceCtrl to clear */ in reset_cce_csrs()
13674 /* CCE_ERR_STATUS read-only */ in reset_cce_csrs()
13688 /* CCE_MSIX_PBA read-only */ in reset_cce_csrs()
13695 /* CCE_INT_STATUS read-only */ in reset_cce_csrs()
13699 /* CCE_INT_BLOCKED read-only */ in reset_cce_csrs()
13716 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can in reset_misc_csrs()
13717 * only be written 128-byte chunks in reset_misc_csrs()
13723 /* MISC_STS_8051_DIGEST read-only */ in reset_misc_csrs()
13724 /* MISC_STS_SBM_DIGEST read-only */ in reset_misc_csrs()
13725 /* MISC_STS_PCIE_DIGEST read-only */ in reset_misc_csrs()
13726 /* MISC_STS_FAB_DIGEST read-only */ in reset_misc_csrs()
13727 /* MISC_ERR_STATUS read-only */ in reset_misc_csrs()
13743 /* SEND_CONTEXTS read-only */ in reset_txe_csrs()
13744 /* SEND_DMA_ENGINES read-only */ in reset_txe_csrs()
13745 /* SEND_PIO_MEM_SIZE read-only */ in reset_txe_csrs()
13746 /* SEND_DMA_MEM_SIZE read-only */ in reset_txe_csrs()
13749 /* SEND_PIO_ERR_STATUS read-only */ in reset_txe_csrs()
13753 /* SEND_DMA_ERR_STATUS read-only */ in reset_txe_csrs()
13757 /* SEND_EGRESS_ERR_STATUS read-only */ in reset_txe_csrs()
13769 /* SEND_ERR_STATUS read-only */ in reset_txe_csrs()
13772 /* SEND_ERR_FORCE read-only */ in reset_txe_csrs()
13785 /* SEND_CM_CREDIT_USED_STATUS read-only */ in reset_txe_csrs()
13794 /* SEND_CM_CREDIT_USED_VL read-only */ in reset_txe_csrs()
13795 /* SEND_CM_CREDIT_USED_VL15 read-only */ in reset_txe_csrs()
13796 /* SEND_EGRESS_CTXT_STATUS read-only */ in reset_txe_csrs()
13797 /* SEND_EGRESS_SEND_DMA_STATUS read-only */ in reset_txe_csrs()
13799 /* SEND_EGRESS_ERR_INFO read-only */ in reset_txe_csrs()
13800 /* SEND_EGRESS_ERR_SOURCE read-only */ in reset_txe_csrs()
13803 * TXE Per-Context CSRs in reset_txe_csrs()
13821 * TXE Per-SDMA CSRs in reset_txe_csrs()
13825 /* SEND_DMA_STATUS read-only */ in reset_txe_csrs()
13829 /* SEND_DMA_HEAD read-only */ in reset_txe_csrs()
13832 /* SEND_DMA_IDLE_CNT read-only */ in reset_txe_csrs()
13835 /* SEND_DMA_DESC_FETCHED_CNT read-only */ in reset_txe_csrs()
13836 /* SEND_DMA_ENG_ERR_STATUS read-only */ in reset_txe_csrs()
13870 * Give up after 1ms - maximum wait time. in init_rbufs()
13878 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n", in init_rbufs()
13882 udelay(2); /* do not busy-wait the CSR */ in init_rbufs()
13885 /* start the init - expect RcvCtrl to be 0 */ in init_rbufs()
13899 /* delay is required first time through - see above */ in init_rbufs()
13900 udelay(2); /* do not busy-wait the CSR */ in init_rbufs()
13905 /* give up after 100us - slowest possible at 33MHz is 73us */ in init_rbufs()
13925 /* RCV_STATUS read-only */ in reset_rxe_csrs()
13926 /* RCV_CONTEXTS read-only */ in reset_rxe_csrs()
13927 /* RCV_ARRAY_CNT read-only */ in reset_rxe_csrs()
13928 /* RCV_BUF_SIZE read-only */ in reset_rxe_csrs()
13933 /* this is a clear-down */ in reset_rxe_csrs()
13936 /* RCV_ERR_STATUS read-only */ in reset_rxe_csrs()
13954 * RXE Kernel and User Per-Context CSRs in reset_rxe_csrs()
13959 /* RCV_CTXT_STATUS read-only */ in reset_rxe_csrs()
13972 /* RCV_HDR_TAIL read-only */ in reset_rxe_csrs()
13974 /* RCV_EGR_INDEX_TAIL read-only */ in reset_rxe_csrs()
13976 /* RCV_EGR_OFFSET_TAIL read-only */ in reset_rxe_csrs()
13990 * SC 0-7 -> VL 0-7 (respectively)
13991 * SC 15 -> VL 15
13993 * -> VL 0
14039 *((u8 *)(dd->sc2vl) + i) = (u8)i; in init_sc2vl_tables()
14041 *((u8 *)(dd->sc2vl) + i) = 0; in init_sc2vl_tables()
14047 * depend on the chip going through a power-on reset - a driver may be loaded
14050 * Do not write any CSR values to the chip in this routine - there may be
14086 * A recommended length of time to hold is one CSR read, in init_chip()
14102 pcie_flr(dd->pcidev); in init_chip()
14114 pcie_flr(dd->pcidev); in init_chip()
14156 dd->vau = CM_VAU; in init_early_variables()
14157 dd->link_credits = CM_GLOBAL_CREDITS; in init_early_variables()
14159 dd->link_credits--; in init_early_variables()
14160 dd->vcu = cu_to_vcu(hfi1_cu); in init_early_variables()
14161 /* enough room for 8 MAD packets plus header - 17K */ in init_early_variables()
14162 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau); in init_early_variables()
14163 if (dd->vl15_init > dd->link_credits) in init_early_variables()
14164 dd->vl15_init = dd->link_credits; in init_early_variables()
14169 for (i = 0; i < dd->num_pports; i++) { in init_early_variables()
14170 struct hfi1_pportdata *ppd = &dd->pport[i]; in init_early_variables()
14203 * @dd - device data
14204 * @first_ctxt - first context
14205 * @last_ctxt - first context
14274 memset(rmt->map, rxcontext, sizeof(rmt->map)); in alloc_rsm_map_table()
14275 rmt->used = 0; in alloc_rsm_map_table()
14293 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]); in complete_rsm_map_table()
14313 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT | in add_rsm_rule()
14315 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT); in add_rsm_rule()
14317 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT | in add_rsm_rule()
14318 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT | in add_rsm_rule()
14319 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT | in add_rsm_rule()
14320 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT | in add_rsm_rule()
14321 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT | in add_rsm_rule()
14322 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT); in add_rsm_rule()
14324 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT | in add_rsm_rule()
14325 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT | in add_rsm_rule()
14326 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT | in add_rsm_rule()
14327 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT); in add_rsm_rule()
14349 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS || in qos_rmt_entries()
14385 * init_qos - init RX qos
14386 * @dd - device data
14387 * @rmt - RSM map table
14414 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES) in init_qos()
14426 idx = rmt->used + ((qpn << n) ^ i); in init_qos()
14430 reg = rmt->map[regidx]; in init_qos()
14434 rmt->map[regidx] = reg; in init_qos()
14441 rrd.offset = rmt->used; in init_qos()
14458 rmt->used += rmt_entries; in init_qos()
14461 dd->qos_shift = n + 1; in init_qos()
14464 dd->qos_shift = 1; in init_qos()
14465 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1); in init_qos()
14481 start = dd->first_dyn_alloc_ctxt; in init_fecn_handling()
14483 total_cnt = dd->num_rcv_contexts - start; in init_fecn_handling()
14486 if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) { in init_fecn_handling()
14487 dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n"); in init_fecn_handling()
14494 * in the range start...num_rcv_contexts-1 (inclusive). in init_fecn_handling()
14497 * the table - as long as the entries themselves do not wrap. in init_fecn_handling()
14501 offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start); in init_fecn_handling()
14503 for (i = start, idx = rmt->used; i < dd->num_rcv_contexts; in init_fecn_handling()
14508 reg = rmt->map[regidx]; in init_fecn_handling()
14511 rmt->map[regidx] = reg; in init_fecn_handling()
14516 * o packet type 0 - expected in init_fecn_handling()
14520 * Use index 1 to extract the 8-bit receive context from DestQP in init_fecn_handling()
14539 rmt->used += total_cnt; in init_fecn_handling()
14568 dev_dbg(&(dd)->pcidev->dev, "RMT start = %d, end %d\n", in hfi1_netdev_update_rmt()
14572 /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */ in hfi1_netdev_update_rmt()
14579 reg |= (u64)hfi1_netdev_get_ctxt(dd, ctx_id++)->ctxt << (j * 8); in hfi1_netdev_update_rmt()
14584 dev_dbg(&(dd)->pcidev->dev, in hfi1_netdev_update_rmt()
14586 regoff - RCV_RSM_MAP_TABLE, reg); in hfi1_netdev_update_rmt()
14590 if (i < (NUM_NETDEV_MAP_ENTRIES - 1)) in hfi1_netdev_update_rmt()
14616 if (atomic_fetch_inc(&dd->ipoib_rsm_usr_num) == 0) { in hfi1_init_aip_rsm()
14672 if (atomic_fetch_add_unless(&dd->ipoib_rsm_usr_num, -1, 0) == 1) in hfi1_deinit_aip_rsm()
14686 return -ENOMEM; in init_rxe()
14693 hfi1_netdev_set_free_rmt_idx(dd, rmt->used); in init_rxe()
14779 /* enable all per-context and per-SDMA engine errors */ in init_txe()
14786 assign_local_cm_au_table(dd, dd->vcu); in init_txe()
14790 * Don't set on Simulator - causes it to choke. in init_txe()
14792 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR) in init_txe()
14802 if (!rcd || !rcd->sc) in hfi1_set_ctxt_jkey()
14803 return -EINVAL; in hfi1_set_ctxt_jkey()
14805 hw_ctxt = rcd->sc->hw_context; in hfi1_set_ctxt_jkey()
14810 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY)) in hfi1_set_ctxt_jkey()
14814 * Enable send-side J_KEY integrity check, unless this is A0 h/w in hfi1_set_ctxt_jkey()
14826 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg); in hfi1_set_ctxt_jkey()
14836 if (!rcd || !rcd->sc) in hfi1_clear_ctxt_jkey()
14837 return -EINVAL; in hfi1_clear_ctxt_jkey()
14839 hw_ctxt = rcd->sc->hw_context; in hfi1_clear_ctxt_jkey()
14842 * Disable send-side J_KEY integrity check, unless this is A0 h/w. in hfi1_clear_ctxt_jkey()
14852 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0); in hfi1_clear_ctxt_jkey()
14863 if (!rcd || !rcd->sc) in hfi1_set_ctxt_pkey()
14864 return -EINVAL; in hfi1_set_ctxt_pkey()
14866 hw_ctxt = rcd->sc->hw_context; in hfi1_set_ctxt_pkey()
14883 if (!ctxt || !ctxt->sc) in hfi1_clear_ctxt_pkey()
14884 return -EINVAL; in hfi1_clear_ctxt_pkey()
14886 hw_ctxt = ctxt->sc->hw_context; in hfi1_clear_ctxt_pkey()
14908 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14922 /* pre-allocate the asic structure in case we are the first device */ in init_asic_data()
14923 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL); in init_asic_data()
14925 return -ENOMEM; in init_asic_data()
14931 dd->unit != peer->unit) in init_asic_data()
14937 dd->asic_data = peer->asic_data; in init_asic_data()
14940 dd->asic_data = asic_data; in init_asic_data()
14941 mutex_init(&dd->asic_data->asic_resource_mutex); in init_asic_data()
14943 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */ in init_asic_data()
14946 /* first one through - set up i2c devices */ in init_asic_data()
14948 ret = set_up_i2c(dd, dd->asic_data); in init_asic_data()
14954 * Set dd->boardname. Use a generic name if a name is not returned from
14957 * Return 0 on success, -ENOMEM if space could not be allocated.
14963 "Intel Omni-Path Host Fabric Interface Adapter 100 Series"; in obtain_boardname()
14968 (void **)&dd->boardname); in obtain_boardname()
14972 dd->boardname = kstrdup(generic, GFP_KERNEL); in obtain_boardname()
14973 if (!dd->boardname) in obtain_boardname()
14974 return -ENOMEM; in obtain_boardname()
14985 * Return 0 on success, -EINVAL on failure.
15020 return -EINVAL; in check_int_registers()
15024 * hfi1_init_dd() - Initialize most of the dd structure.
15029 * chip-specific function pointers for later use.
15033 struct pci_dev *pdev = dd->pcidev; in hfi1_init_dd()
15043 struct pci_dev *parent = pdev->bus->self; in hfi1_init_dd()
15046 ppd = dd->pport; in hfi1_init_dd()
15047 for (i = 0; i < dd->num_pports; i++, ppd++) { in hfi1_init_dd()
15052 ppd->link_width_supported = in hfi1_init_dd()
15055 ppd->link_width_downgrade_supported = in hfi1_init_dd()
15056 ppd->link_width_supported; in hfi1_init_dd()
15058 ppd->link_width_enabled = OPA_LINK_WIDTH_4X; in hfi1_init_dd()
15059 ppd->link_width_downgrade_enabled = in hfi1_init_dd()
15060 ppd->link_width_downgrade_supported; in hfi1_init_dd()
15070 ppd->vls_supported = num_vls; in hfi1_init_dd()
15071 ppd->vls_operational = ppd->vls_supported; in hfi1_init_dd()
15074 dd->vld[vl].mtu = hfi1_max_mtu; in hfi1_init_dd()
15075 dd->vld[15].mtu = MAX_MAD_PACKET; in hfi1_init_dd()
15080 ppd->overrun_threshold = 0x4; in hfi1_init_dd()
15081 ppd->phy_error_threshold = 0xf; in hfi1_init_dd()
15082 ppd->port_crc_mode_enabled = link_crc_mask; in hfi1_init_dd()
15084 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8; in hfi1_init_dd()
15086 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4; in hfi1_init_dd()
15088 ppd->host_link_state = HLS_DN_OFFLINE; in hfi1_init_dd()
15106 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT) in hfi1_init_dd()
15108 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT) in hfi1_init_dd()
15123 * obtain the hardware ID - NOT related to unit, which is a in hfi1_init_dd()
15127 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT) in hfi1_init_dd()
15130 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT; in hfi1_init_dd()
15131 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT; in hfi1_init_dd()
15133 dd->icode < ARRAY_SIZE(inames) ? in hfi1_init_dd()
15134 inames[dd->icode] : "unknown", (int)dd->irev); in hfi1_init_dd()
15137 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G; in hfi1_init_dd()
15139 dd->pport->link_speed_enabled = dd->pport->link_speed_supported; in hfi1_init_dd()
15141 dd->pport->link_speed_active = OPA_LINK_SPEED_25G; in hfi1_init_dd()
15144 ppd = dd->pport; in hfi1_init_dd()
15145 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) { in hfi1_init_dd()
15146 ppd->link_width_supported = in hfi1_init_dd()
15147 ppd->link_width_enabled = in hfi1_init_dd()
15148 ppd->link_width_downgrade_supported = in hfi1_init_dd()
15149 ppd->link_width_downgrade_enabled = in hfi1_init_dd()
15157 ppd->vls_supported = sdma_engines; in hfi1_init_dd()
15158 ppd->vls_operational = ppd->vls_supported; in hfi1_init_dd()
15164 * non-zero, then the calculated field will be at least 1. in hfi1_init_dd()
15166 * Must be after icode is set up - the cclock rate depends in hfi1_init_dd()
15169 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64; in hfi1_init_dd()
15170 if (dd->rcv_intr_timeout_csr > in hfi1_init_dd()
15172 dd->rcv_intr_timeout_csr = in hfi1_init_dd()
15174 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout) in hfi1_init_dd()
15175 dd->rcv_intr_timeout_csr = 1; in hfi1_init_dd()
15215 * - init_chip() - the chip will not initiate any PCIe transactions in hfi1_init_dd()
15216 * - pcie_speeds() - reads the current link speed in hfi1_init_dd()
15217 * - hfi1_firmware_init() - the needed firmware is ready to be in hfi1_init_dd()
15239 snprintf(dd->boardversion, BOARD_VERS_MAX, in hfi1_init_dd()
15242 (u32)dd->majrev, in hfi1_init_dd()
15243 (u32)dd->minrev, in hfi1_init_dd()
15244 (dd->revision >> CCE_REVISION_SW_SHIFT) in hfi1_init_dd()
15263 /* set initial non-RXE, non-TXE CSRs */ in hfi1_init_dd()
15292 for (i = 0; i < dd->num_pports; ++i) { in hfi1_init_dd()
15307 /* set up LCB access - must be after set_up_interrupts() */ in hfi1_init_dd()
15315 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n", in hfi1_init_dd()
15316 (dd->base_guid & 0xFFFFFF) | in hfi1_init_dd()
15317 ((dd->base_guid >> 11) & 0xF000000)); in hfi1_init_dd()
15319 dd->oui1 = dd->base_guid >> 56 & 0xFF; in hfi1_init_dd()
15320 dd->oui2 = dd->base_guid >> 48 & 0xFF; in hfi1_init_dd()
15321 dd->oui3 = dd->base_guid >> 40 & 0xFF; in hfi1_init_dd()
15337 init_completion(&dd->user_comp); in hfi1_init_dd()
15340 atomic_set(&dd->user_refcount, 1); in hfi1_init_dd()
15364 u32 current_egress_rate = ppd->current_egress_rate; in delay_cycles()
15367 if (desired_egress_rate == -1) in delay_cycles()
15373 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) - in delay_cycles()
15380 * create_pbc - build a pbc for transmission
15381 * @flags: special case flags or-ed in built pbc
15384 * @dwlen: dword length (header words + data words + pbc words)
15386 * Create a PBC with the given flags, rate, VL, and length.
15388 * NOTE: The PBC created will not insert any HCRC - all callers but one are
15433 if (dd->icode != ICODE_RTL_SILICON || in thermal_init()
15462 /* Step 3: Write clock divider value (100MHz -> 2MHz) */ in thermal_init()
15477 /* Step 5: De-assert block reset and start conversion */ in thermal_init()
15502 struct hfi1_pportdata *ppd = &dd->pport[0]; in handle_temp_err()
15510 dd->flags |= HFI1_FORCED_FREEZE; in handle_temp_err()
15523 ppd->driver_link_ready = 0; in handle_temp_err()
15524 ppd->link_enabled = 0; in handle_temp_err()