| /Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
| D | ktls.h | 46 struct tls_offload_context_tx *tx_ctx; member 55 struct tls_offload_context_tx tx_ctx; member 63 struct tls_offload_context_tx *tx_ctx = tls_offload_ctx_tx(tls_ctx); in mlx5e_set_ktls_tx_priv_ctx() local 68 shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx; in mlx5e_set_ktls_tx_priv_ctx() 71 priv_tx->tx_ctx = tx_ctx; in mlx5e_set_ktls_tx_priv_ctx() 77 struct tls_offload_context_tx *tx_ctx = tls_offload_ctx_tx(tls_ctx); in mlx5e_get_ktls_tx_priv_ctx() local 82 shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx; in mlx5e_get_ktls_tx_priv_ctx()
|
| D | tls.c | 130 struct mlx5e_tls_offload_context_tx *tx_ctx = in mlx5e_tls_add() local 133 tx_ctx->swid = htonl(swid); in mlx5e_tls_add() 134 tx_ctx->expected_seq = start_offload_tcp_sn; in mlx5e_tls_add()
|
| D | ktls_tx.c | 198 struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx; in tx_sync_info_get() local 204 spin_lock_irqsave(&tx_ctx->lock, flags); in tx_sync_info_get() 205 record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn); in tx_sync_info_get() 232 spin_unlock_irqrestore(&tx_ctx->lock, flags); in tx_sync_info_get()
|
| /Linux-v5.4/net/wireless/ |
| D | lib80211_crypt_wep.c | 35 struct arc4_ctx tx_ctx; member 138 arc4_setkey(&wep->tx_ctx, key, klen); in lib80211_wep_encrypt() 139 arc4_crypt(&wep->tx_ctx, pos, pos, len + 4); in lib80211_wep_encrypt()
|
| /Linux-v5.4/drivers/infiniband/sw/siw/ |
| D | siw_qp.c | 134 qp->tx_ctx.tx_suspend = 1; in siw_qp_llp_close() 229 struct siw_iwarp_tx *c_tx = &qp->tx_ctx; in siw_qp_enable_crc() 582 if (qp->tx_ctx.mpa_crc_hd) { in siw_send_terminate() 583 crypto_shash_init(qp->tx_ctx.mpa_crc_hd); in siw_send_terminate() 584 if (crypto_shash_update(qp->tx_ctx.mpa_crc_hd, in siw_send_terminate() 590 if (crypto_shash_update(qp->tx_ctx.mpa_crc_hd, in siw_send_terminate() 595 crypto_shash_final(qp->tx_ctx.mpa_crc_hd, (u8 *)&crc); in siw_send_terminate() 659 qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_SEND] = 0; in siw_qp_nextstate_from_idle() 660 qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ] = 0; in siw_qp_nextstate_from_idle() 661 qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] = 0; in siw_qp_nextstate_from_idle() [all …]
|
| D | siw_qp_tx.c | 695 struct siw_iwarp_tx *c_tx = &qp->tx_ctx; in siw_prepare_fpdu() 783 struct siw_iwarp_tx *c_tx = &qp->tx_ctx; in siw_qp_sq_proc_tx() 785 int rv = 0, burst_len = qp->tx_ctx.burst; in siw_qp_sq_proc_tx() 905 qp->tx_ctx.burst = burst_len; in siw_qp_sq_proc_tx() 1018 if (unlikely(qp->tx_ctx.tx_suspend)) { in siw_qp_sq_process() 1076 qp->tx_ctx.ctrl_sent, qp->tx_ctx.ctrl_len, in siw_qp_sq_process() 1077 qp->tx_ctx.bytes_unsent); in siw_qp_sq_process() 1113 if (!qp->tx_ctx.tx_suspend) in siw_qp_sq_process() 1158 !qp->tx_ctx.tx_suspend)) { in siw_sq_resume() 1166 if (!qp->tx_ctx.tx_suspend) in siw_sq_resume()
|
| D | siw.h | 448 struct siw_iwarp_tx tx_ctx; /* Transmit context */ member 492 #define tx_qp(tx) container_of(tx, struct siw_qp, tx_ctx) 493 #define tx_wqe(qp) (&(qp)->tx_ctx.wqe_active)
|
| D | siw_verbs.c | 448 qp->tx_ctx.gso_seg_limit = 1; in siw_create_qp() 449 qp->tx_ctx.zcopy_tx = zcopy_tx; in siw_create_qp() 588 qp->tx_ctx.tx_suspend = 1; in siw_verbs_modify_qp() 637 kfree(qp->tx_ctx.mpa_crc_hd); in siw_destroy_qp() 878 qp->tx_ctx.in_syscall = 1; in siw_post_send() 880 if (siw_qp_sq_process(qp) != 0 && !(qp->tx_ctx.tx_suspend)) in siw_post_send() 883 qp->tx_ctx.in_syscall = 0; in siw_post_send()
|
| D | siw_cm.c | 377 qp->tx_ctx.tx_suspend = 1; in siw_qp_cm_drop() 755 qp->tx_ctx.gso_seg_limit = 0; in siw_proc_mpareply() 1302 cep->qp->tx_ctx.tx_suspend = 1; in siw_cm_llp_state_change() 1607 qp->tx_ctx.gso_seg_limit = 0; in siw_accept()
|
| D | siw_qp_rx.c | 1152 if (qp->tx_ctx.orq_fence) { in siw_check_tx_fence() 1171 qp->tx_ctx.orq_fence = 0; in siw_check_tx_fence() 1175 qp->tx_ctx.orq_fence = 0; in siw_check_tx_fence()
|
| /Linux-v5.4/drivers/net/ethernet/intel/i40e/ |
| D | i40e_virtchnl_pf.c | 549 struct i40e_hmc_obj_txq tx_ctx; in i40e_config_vsi_tx_queue() local 567 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); in i40e_config_vsi_tx_queue() 570 tx_ctx.base = info->dma_ring_addr / 128; in i40e_config_vsi_tx_queue() 571 tx_ctx.qlen = info->ring_len; in i40e_config_vsi_tx_queue() 572 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); in i40e_config_vsi_tx_queue() 573 tx_ctx.rdylist_act = 0; in i40e_config_vsi_tx_queue() 574 tx_ctx.head_wb_ena = info->headwb_enabled; in i40e_config_vsi_tx_queue() 575 tx_ctx.head_wb_addr = info->dma_headwb_addr; in i40e_config_vsi_tx_queue() 588 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); in i40e_config_vsi_tx_queue()
|
| D | i40e_main.c | 3136 struct i40e_hmc_obj_txq tx_ctx; in i40e_configure_tx_ring() local 3155 memset(&tx_ctx, 0, sizeof(tx_ctx)); in i40e_configure_tx_ring() 3157 tx_ctx.new_context = 1; in i40e_configure_tx_ring() 3158 tx_ctx.base = (ring->dma / 128); in i40e_configure_tx_ring() 3159 tx_ctx.qlen = ring->count; in i40e_configure_tx_ring() 3160 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | in i40e_configure_tx_ring() 3162 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); in i40e_configure_tx_ring() 3165 tx_ctx.head_wb_ena = 1; in i40e_configure_tx_ring() 3166 tx_ctx.head_wb_addr = ring->dma + in i40e_configure_tx_ring() 3181 tx_ctx.rdylist = in i40e_configure_tx_ring() [all …]
|
| /Linux-v5.4/net/tls/ |
| D | tls_sw.c | 2184 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx); in tls_sw_write_space() local 2187 if (is_tx_ready(tx_ctx) && in tls_sw_write_space() 2188 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask)) in tls_sw_write_space() 2189 schedule_delayed_work(&tx_ctx->tx_work.work, 0); in tls_sw_write_space()
|