/Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/en/ |
D | xdp.c | 59 mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, in mlx5e_xmit_xdp_buff() argument 89 dma_addr = dma_map_single(sq->pdev, xdptxd.data, xdptxd.len, in mlx5e_xmit_xdp_buff() 91 if (dma_mapping_error(sq->pdev, dma_addr)) { in mlx5e_xmit_xdp_buff() 109 dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, in mlx5e_xmit_xdp_buff() 117 return INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, in mlx5e_xmit_xdp_buff() 118 mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0); in mlx5e_xmit_xdp_buff() 166 static u16 mlx5e_xdpsq_get_next_pi(struct mlx5e_xdpsq *sq, u16 size) in mlx5e_xdpsq_get_next_pi() argument 168 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_xdpsq_get_next_pi() 171 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); in mlx5e_xdpsq_get_next_pi() 176 wi = &sq->db.wqe_info[pi]; in mlx5e_xdpsq_get_next_pi() [all …]
|
D | reporter_tx.c | 8 static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) in mlx5e_wait_for_sq_flush() argument 14 if (sq->cc == sq->pc) in mlx5e_wait_for_sq_flush() 20 netdev_err(sq->netdev, in mlx5e_wait_for_sq_flush() 22 sq->sqn, sq->cc, sq->pc); in mlx5e_wait_for_sq_flush() 27 static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq) in mlx5e_reset_txqsq_cc_pc() argument 29 WARN_ONCE(sq->cc != sq->pc, in mlx5e_reset_txqsq_cc_pc() 31 sq->sqn, sq->cc, sq->pc); in mlx5e_reset_txqsq_cc_pc() 32 sq->cc = 0; in mlx5e_reset_txqsq_cc_pc() 33 sq->dma_fifo_cc = 0; in mlx5e_reset_txqsq_cc_pc() 34 sq->pc = 0; in mlx5e_reset_txqsq_cc_pc() [all …]
|
D | xdp.h | 52 void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq); 54 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq); 55 void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw); 60 INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, 64 INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, 68 INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)); 69 INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)); 99 static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) in mlx5e_xmit_xdp_doorbell() argument 101 if (sq->doorbell_cseg) { in mlx5e_xmit_xdp_doorbell() 102 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg); in mlx5e_xmit_xdp_doorbell() [all …]
|
D | txrx.h | 52 void mlx5e_trigger_irq(struct mlx5e_icosq *sq); 74 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); 92 #define MLX5E_TX_FETCH_WQE(sq, pi) \ argument 93 ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe))) 141 static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size) in mlx5e_txqsq_get_next_pi() argument 143 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_txqsq_get_next_pi() 146 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); in mlx5e_txqsq_get_next_pi() 151 wi = &sq->db.wqe_info[pi]; in mlx5e_txqsq_get_next_pi() 159 mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_txqsq_get_next_pi() 161 sq->stats->nop += contig_wqebbs; in mlx5e_txqsq_get_next_pi() [all …]
|
/Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_tx.c | 43 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) in mlx5e_dma_unmap_wqe_err() argument 49 mlx5e_dma_get(sq, --sq->dma_fifo_pc); in mlx5e_dma_unmap_wqe_err() 51 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma); in mlx5e_dma_unmap_wqe_err() 217 ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, in ipsec_txwqe_build_eseg_csum() argument 227 sq->stats->csum_partial_inner++; in ipsec_txwqe_build_eseg_csum() 232 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, in mlx5e_txwqe_build_eseg_csum() argument 237 ipsec_txwqe_build_eseg_csum(sq, skb, eseg); in mlx5e_txwqe_build_eseg_csum() 246 sq->stats->csum_partial_inner++; in mlx5e_txwqe_build_eseg_csum() 249 sq->stats->csum_partial++; in mlx5e_txwqe_build_eseg_csum() 254 sq->stats->csum_partial++; in mlx5e_txwqe_build_eseg_csum() [all …]
|
D | en_txrx.c | 48 static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) in mlx5e_handle_tx_dim() argument 50 struct mlx5e_sq_stats *stats = sq->stats; in mlx5e_handle_tx_dim() 53 if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state))) in mlx5e_handle_tx_dim() 56 dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); in mlx5e_handle_tx_dim() 57 net_dim(&sq->dim, dim_sample); in mlx5e_handle_tx_dim() 72 void mlx5e_trigger_irq(struct mlx5e_icosq *sq) in mlx5e_trigger_irq() argument 74 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_trigger_irq() 76 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); in mlx5e_trigger_irq() 78 sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { in mlx5e_trigger_irq() 83 nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_trigger_irq() [all …]
|
D | en_main.c | 204 struct mlx5e_icosq *sq, in mlx5e_build_umr_wqe() argument 211 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | in mlx5e_build_umr_wqe() 922 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) in mlx5e_free_xdpsq_db() argument 924 kvfree(sq->db.xdpi_fifo.xi); in mlx5e_free_xdpsq_db() 925 kvfree(sq->db.wqe_info); in mlx5e_free_xdpsq_db() 928 static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa) in mlx5e_alloc_xdpsq_fifo() argument 930 struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; in mlx5e_alloc_xdpsq_fifo() 931 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); in mlx5e_alloc_xdpsq_fifo() 939 xdpi_fifo->pc = &sq->xdpi_fifo_pc; in mlx5e_alloc_xdpsq_fifo() 940 xdpi_fifo->cc = &sq->xdpi_fifo_cc; in mlx5e_alloc_xdpsq_fifo() [all …]
|
/Linux-v5.15/tools/io_uring/ |
D | queue.c | 70 struct io_uring_sq *sq = &ring->sq; in io_uring_submit() local 71 const unsigned mask = *sq->kring_mask; in io_uring_submit() 81 if (*sq->khead != *sq->ktail) { in io_uring_submit() 82 submitted = *sq->kring_entries; in io_uring_submit() 86 if (sq->sqe_head == sq->sqe_tail) in io_uring_submit() 93 ktail = ktail_next = *sq->ktail; in io_uring_submit() 94 to_submit = sq->sqe_tail - sq->sqe_head; in io_uring_submit() 99 sq->array[ktail & mask] = sq->sqe_head & mask; in io_uring_submit() 102 sq->sqe_head++; in io_uring_submit() 109 if (*sq->ktail != ktail) { in io_uring_submit() [all …]
|
D | setup.c | 11 struct io_uring_sq *sq, struct io_uring_cq *cq) in io_uring_mmap() argument 17 sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned); in io_uring_mmap() 18 ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE, in io_uring_mmap() 22 sq->khead = ptr + p->sq_off.head; in io_uring_mmap() 23 sq->ktail = ptr + p->sq_off.tail; in io_uring_mmap() 24 sq->kring_mask = ptr + p->sq_off.ring_mask; in io_uring_mmap() 25 sq->kring_entries = ptr + p->sq_off.ring_entries; in io_uring_mmap() 26 sq->kflags = ptr + p->sq_off.flags; in io_uring_mmap() 27 sq->kdropped = ptr + p->sq_off.dropped; in io_uring_mmap() 28 sq->array = ptr + p->sq_off.array; in io_uring_mmap() [all …]
|
/Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
D | ktls_tx.c | 147 static void tx_fill_wi(struct mlx5e_txqsq *sq, in tx_fill_wi() argument 151 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; in tx_fill_wi() 171 post_static_params(struct mlx5e_txqsq *sq, in post_static_params() argument 179 pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs); in post_static_params() 180 wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); in post_static_params() 181 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info, in post_static_params() 184 tx_fill_wi(sq, pi, num_wqebbs, 0, NULL); in post_static_params() 185 sq->pc += num_wqebbs; in post_static_params() 189 post_progress_params(struct mlx5e_txqsq *sq, in post_progress_params() argument 197 pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs); in post_progress_params() [all …]
|
D | ktls_rx.c | 147 static void icosq_fill_wi(struct mlx5e_icosq *sq, u16 pi, in icosq_fill_wi() argument 150 sq->db.wqe_info[pi] = *wi; in icosq_fill_wi() 154 post_static_params(struct mlx5e_icosq *sq, in post_static_params() argument 162 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs))) in post_static_params() 165 pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); in post_static_params() 166 wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); in post_static_params() 167 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info, in post_static_params() 176 icosq_fill_wi(sq, pi, &wi); in post_static_params() 177 sq->pc += num_wqebbs; in post_static_params() 183 post_progress_params(struct mlx5e_icosq *sq, in post_progress_params() argument [all …]
|
/Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
D | tx.c | 52 static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq, in mlx5e_xsk_tx_post_err() argument 55 u16 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); in mlx5e_xsk_tx_post_err() 56 struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi]; in mlx5e_xsk_tx_post_err() 62 nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); in mlx5e_xsk_tx_post_err() 63 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi); in mlx5e_xsk_tx_post_err() 64 sq->doorbell_cseg = &nopwqe->ctrl; in mlx5e_xsk_tx_post_err() 67 bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) in mlx5e_xsk_tx() argument 69 struct xsk_buff_pool *pool = sq->xsk_pool; in mlx5e_xsk_tx() 78 int check_result = INDIRECT_CALL_2(sq->xmit_xdp_frame_check, in mlx5e_xsk_tx() 81 sq); in mlx5e_xsk_tx() [all …]
|
D | tx.h | 14 bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget); 16 static inline void mlx5e_xsk_update_tx_wakeup(struct mlx5e_xdpsq *sq) in mlx5e_xsk_update_tx_wakeup() argument 18 if (!xsk_uses_need_wakeup(sq->xsk_pool)) in mlx5e_xsk_update_tx_wakeup() 21 if (sq->pc != sq->cc) in mlx5e_xsk_update_tx_wakeup() 22 xsk_clear_tx_need_wakeup(sq->xsk_pool); in mlx5e_xsk_update_tx_wakeup() 24 xsk_set_tx_need_wakeup(sq->xsk_pool); in mlx5e_xsk_update_tx_wakeup()
|
/Linux-v5.15/sound/oss/dmasound/ |
D | dmasound_core.c | 416 static int sq_allocate_buffers(struct sound_queue *sq, int num, int size) in sq_allocate_buffers() argument 420 if (sq->buffers) in sq_allocate_buffers() 422 sq->numBufs = num; in sq_allocate_buffers() 423 sq->bufSize = size; in sq_allocate_buffers() 424 sq->buffers = kmalloc_array (num, sizeof(char *), GFP_KERNEL); in sq_allocate_buffers() 425 if (!sq->buffers) in sq_allocate_buffers() 428 sq->buffers[i] = dmasound.mach.dma_alloc(size, GFP_KERNEL); in sq_allocate_buffers() 429 if (!sq->buffers[i]) { in sq_allocate_buffers() 431 dmasound.mach.dma_free(sq->buffers[i], size); in sq_allocate_buffers() 432 kfree(sq->buffers); in sq_allocate_buffers() [all …]
|
/Linux-v5.15/drivers/net/ethernet/cavium/thunder/ |
D | nicvf_queues.c | 19 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, 504 struct snd_queue *sq, int q_len, int qidx) in nicvf_init_snd_queue() argument 508 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, in nicvf_init_snd_queue() 513 sq->desc = sq->dmem.base; in nicvf_init_snd_queue() 514 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); in nicvf_init_snd_queue() 515 if (!sq->skbuff) in nicvf_init_snd_queue() 518 sq->head = 0; in nicvf_init_snd_queue() 519 sq->tail = 0; in nicvf_init_snd_queue() 520 sq->thresh = SND_QUEUE_THRESH; in nicvf_init_snd_queue() 527 sq->xdp_page = kcalloc(q_len, sizeof(u64), GFP_KERNEL); in nicvf_init_snd_queue() [all …]
|
/Linux-v5.15/drivers/net/ethernet/intel/ice/ |
D | ice_controlq.c | 8 (qinfo)->sq.head = prefix##_ATQH; \ 9 (qinfo)->sq.tail = prefix##_ATQT; \ 10 (qinfo)->sq.len = prefix##_ATQLEN; \ 11 (qinfo)->sq.bah = prefix##_ATQBAH; \ 12 (qinfo)->sq.bal = prefix##_ATQBAL; \ 13 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ 14 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ 15 (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \ 16 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ 77 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) in ice_check_sq_alive() [all …]
|
/Linux-v5.15/drivers/net/ethernet/marvell/octeontx2/nic/ |
D | otx2_txrx.c | 78 struct otx2_snd_queue *sq, in otx2_snd_pkt_handler() argument 94 sg = &sq->sg[snd_comp->sqe_id]; in otx2_snd_pkt_handler() 100 timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id]; in otx2_snd_pkt_handler() 378 otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[cq->cint_idx], in otx2_tx_napi_handler() 448 void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq, in otx2_sqe_flush() argument 457 memcpy(sq->lmt_addr, sq->sqe_base, size); in otx2_sqe_flush() 458 status = otx2_lmt_flush(sq->io_addr); in otx2_sqe_flush() 461 sq->head++; in otx2_sqe_flush() 462 sq->head &= (sq->sqe_cnt - 1); in otx2_sqe_flush() 467 static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, in otx2_sqe_add_sg() argument [all …]
|
/Linux-v5.15/drivers/soc/qcom/ |
D | qmi_interface.c | 18 struct sockaddr_qrtr *sq); 167 struct sockaddr_qrtr sq; in qmi_send_new_lookup() local 177 sq.sq_family = qmi->sq.sq_family; in qmi_send_new_lookup() 178 sq.sq_node = qmi->sq.sq_node; in qmi_send_new_lookup() 179 sq.sq_port = QRTR_PORT_CTRL; in qmi_send_new_lookup() 181 msg.msg_name = &sq; in qmi_send_new_lookup() 182 msg.msg_namelen = sizeof(sq); in qmi_send_new_lookup() 230 struct sockaddr_qrtr sq; in qmi_send_new_server() local 239 pkt.server.node = cpu_to_le32(qmi->sq.sq_node); in qmi_send_new_server() 240 pkt.server.port = cpu_to_le32(qmi->sq.sq_port); in qmi_send_new_server() [all …]
|
/Linux-v5.15/drivers/net/ethernet/huawei/hinic/ |
D | hinic_hw_qp.c | 59 #define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi)) argument 61 #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) argument 93 struct hinic_sq *sq, u16 global_qid) in hinic_sq_prepare_ctxt() argument 100 wq = sq->wq; in hinic_sq_prepare_ctxt() 219 static int alloc_sq_skb_arr(struct hinic_sq *sq) in alloc_sq_skb_arr() argument 221 struct hinic_wq *wq = sq->wq; in alloc_sq_skb_arr() 224 skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); in alloc_sq_skb_arr() 225 sq->saved_skb = vzalloc(skb_arr_size); in alloc_sq_skb_arr() 226 if (!sq->saved_skb) in alloc_sq_skb_arr() 236 static void free_sq_skb_arr(struct hinic_sq *sq) in free_sq_skb_arr() argument [all …]
|
D | hinic_hw_qp.h | 57 #define HINIC_MIN_TX_NUM_WQEBBS(sq) \ argument 58 (HINIC_MIN_TX_WQE_SIZE((sq)->wq) / (sq)->wq->wqebb_size) 122 struct hinic_sq sq; member 133 struct hinic_sq *sq, u16 global_qid); 138 int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif, 142 void hinic_clean_sq(struct hinic_sq *sq); 149 int hinic_get_sq_free_wqebbs(struct hinic_sq *sq); 178 void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx, 182 void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size, 185 struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, [all …]
|
D | hinic_tx.c | 46 #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) argument 504 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_lb_xmit_frame() 513 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_lb_xmit_frame() 517 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_lb_xmit_frame() 534 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); in hinic_lb_xmit_frame() 535 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); in hinic_lb_xmit_frame() 540 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); in hinic_lb_xmit_frame() 565 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_xmit_frame() 594 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_xmit_frame() 601 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_xmit_frame() [all …]
|
/Linux-v5.15/net/qrtr/ |
D | ns.c | 52 struct sockaddr_qrtr sq; member 190 static int announce_servers(struct sockaddr_qrtr *sq) in announce_servers() argument 215 ret = service_announce_new(sq, srv); in announce_servers() 297 lookup_notify(&lookup->sq, srv, false); in server_del() 329 static int ctrl_cmd_hello(struct sockaddr_qrtr *sq) in ctrl_cmd_hello() argument 333 ret = say_hello(sq); in ctrl_cmd_hello() 337 return announce_servers(sq); in ctrl_cmd_hello() 346 struct sockaddr_qrtr sq; in ctrl_cmd_bye() local 398 sq.sq_family = AF_QIPCRTR; in ctrl_cmd_bye() 399 sq.sq_node = srv->node; in ctrl_cmd_bye() [all …]
|
/Linux-v5.15/drivers/infiniband/hw/cxgb4/ |
D | qp.c | 95 static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) in dealloc_oc_sq() argument 97 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize); in dealloc_oc_sq() 100 static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) in dealloc_host_sq() argument 102 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue, in dealloc_host_sq() 103 dma_unmap_addr(sq, mapping)); in dealloc_host_sq() 106 static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) in dealloc_sq() argument 108 if (t4_sq_onchip(sq)) in dealloc_sq() 109 dealloc_oc_sq(rdev, sq); in dealloc_sq() 111 dealloc_host_sq(rdev, sq); in dealloc_sq() 114 static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) in alloc_oc_sq() argument [all …]
|
/Linux-v5.15/drivers/infiniband/hw/bnxt_re/ |
D | qplib_fp.c | 61 qp->sq.condition = false; in bnxt_qplib_cancel_phantom_processing() 62 qp->sq.send_phantom = false; in bnxt_qplib_cancel_phantom_processing() 63 qp->sq.single = false; in bnxt_qplib_cancel_phantom_processing() 74 if (!qp->sq.flushed) { in __bnxt_qplib_add_flush_qp() 79 qp->sq.flushed = true; in __bnxt_qplib_add_flush_qp() 124 if (qp->sq.flushed) { in __bnxt_qplib_del_flush_qp() 125 qp->sq.flushed = false; in __bnxt_qplib_del_flush_qp() 142 qp->sq.hwq.prod = 0; in bnxt_qplib_clean_qp() 143 qp->sq.hwq.cons = 0; in bnxt_qplib_clean_qp() 177 struct bnxt_qplib_q *sq = &qp->sq; in bnxt_qplib_free_qp_hdr_buf() local [all …]
|
/Linux-v5.15/drivers/infiniband/hw/mlx5/ |
D | mem.c | 118 spin_lock_irqsave(&qp->sq.lock, flags); in post_send_nop() 120 idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); in post_send_nop() 121 ctrl = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx); in post_send_nop() 126 cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | MLX5_OPCODE_NOP); in post_send_nop() 130 qp->sq.wrid[idx] = wr_id; in post_send_nop() 131 qp->sq.w_list[idx].opcode = MLX5_OPCODE_NOP; in post_send_nop() 132 qp->sq.wqe_head[idx] = qp->sq.head + 1; in post_send_nop() 133 qp->sq.cur_post += DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg), in post_send_nop() 135 qp->sq.w_list[idx].next = qp->sq.cur_post; in post_send_nop() 136 qp->sq.head++; in post_send_nop() [all …]
|