Home
last modified time | relevance | path

Searched refs:sq (Results 1 – 25 of 186) sorted by relevance

12345678

/Linux-v4.19/drivers/net/ethernet/mellanox/mlx5/core/
Den_tx.c69 static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i) in mlx5e_dma_get() argument
71 return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; in mlx5e_dma_get()
74 static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq, in mlx5e_dma_push() argument
79 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++); in mlx5e_dma_push()
86 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) in mlx5e_dma_unmap_wqe_err() argument
92 mlx5e_dma_get(sq, --sq->dma_fifo_pc); in mlx5e_dma_unmap_wqe_err()
94 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma); in mlx5e_dma_unmap_wqe_err()
204 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *e… in mlx5e_txwqe_build_eseg_csum() argument
211 sq->stats->csum_partial_inner++; in mlx5e_txwqe_build_eseg_csum()
214 sq->stats->csum_partial++; in mlx5e_txwqe_build_eseg_csum()
[all …]
Den_main.c73 struct mlx5e_sq_param sq; member
316 struct mlx5e_icosq *sq, in mlx5e_build_umr_wqe() argument
323 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | in mlx5e_build_umr_wqe()
944 struct mlx5e_icosq *sq = &rq->channel->icosq; in mlx5e_activate_rq() local
945 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_activate_rq()
948 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); in mlx5e_activate_rq()
951 sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; in mlx5e_activate_rq()
952 nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_activate_rq()
953 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); in mlx5e_activate_rq()
970 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) in mlx5e_free_xdpsq_db() argument
[all …]
Den_txrx.c48 static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) in mlx5e_handle_tx_dim() argument
50 struct mlx5e_sq_stats *stats = sq->stats; in mlx5e_handle_tx_dim()
53 if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state))) in mlx5e_handle_tx_dim()
56 net_dim_sample(sq->cq.event_ctr, stats->packets, stats->bytes, in mlx5e_handle_tx_dim()
58 net_dim(&sq->dim, dim_sample); in mlx5e_handle_tx_dim()
86 busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget); in mlx5e_napi_poll()
114 mlx5e_handle_tx_dim(&c->sq[i]); in mlx5e_napi_poll()
115 mlx5e_cq_arm(&c->sq[i].cq); in mlx5e_napi_poll()
/Linux-v4.19/drivers/net/ethernet/mellanox/mlx5/core/en/
Dxdp.c37 mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di, in mlx5e_xmit_xdp_buff() argument
46 dma_sync_single_for_device(sq->pdev, xdpi.dma_addr, in mlx5e_xmit_xdp_buff()
50 return mlx5e_xmit_xdp_frame(sq, &xdpi); in mlx5e_xmit_xdp_buff()
105 bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi) in mlx5e_xmit_xdp_frame() argument
107 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_xmit_xdp_frame()
108 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); in mlx5e_xmit_xdp_frame()
119 struct mlx5e_xdpsq_stats *stats = sq->stats; in mlx5e_xmit_xdp_frame()
123 if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) { in mlx5e_xmit_xdp_frame()
128 if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) { in mlx5e_xmit_xdp_frame()
129 if (sq->doorbell) { in mlx5e_xmit_xdp_frame()
[all …]
Dxdp.h46 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
48 bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi);
52 static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) in mlx5e_xmit_xdp_doorbell() argument
54 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_xmit_xdp_doorbell()
56 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */ in mlx5e_xmit_xdp_doorbell()
60 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl); in mlx5e_xmit_xdp_doorbell()
/Linux-v4.19/drivers/net/ethernet/intel/ice/
Dice_controlq.c16 cq->sq.head = PF_FW_ATQH; in ice_adminq_init_regs()
17 cq->sq.tail = PF_FW_ATQT; in ice_adminq_init_regs()
18 cq->sq.len = PF_FW_ATQLEN; in ice_adminq_init_regs()
19 cq->sq.bah = PF_FW_ATQBAH; in ice_adminq_init_regs()
20 cq->sq.bal = PF_FW_ATQBAL; in ice_adminq_init_regs()
21 cq->sq.len_mask = PF_FW_ATQLEN_ATQLEN_M; in ice_adminq_init_regs()
22 cq->sq.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M; in ice_adminq_init_regs()
23 cq->sq.head_mask = PF_FW_ATQH_ATQH_M; in ice_adminq_init_regs()
45 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) in ice_check_sq_alive()
46 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | in ice_check_sq_alive()
[all …]
/Linux-v4.19/sound/oss/dmasound/
Ddmasound_core.c415 static int sq_allocate_buffers(struct sound_queue *sq, int num, int size) in sq_allocate_buffers() argument
419 if (sq->buffers) in sq_allocate_buffers()
421 sq->numBufs = num; in sq_allocate_buffers()
422 sq->bufSize = size; in sq_allocate_buffers()
423 sq->buffers = kmalloc_array (num, sizeof(char *), GFP_KERNEL); in sq_allocate_buffers()
424 if (!sq->buffers) in sq_allocate_buffers()
427 sq->buffers[i] = dmasound.mach.dma_alloc(size, GFP_KERNEL); in sq_allocate_buffers()
428 if (!sq->buffers[i]) { in sq_allocate_buffers()
430 dmasound.mach.dma_free(sq->buffers[i], size); in sq_allocate_buffers()
431 kfree(sq->buffers); in sq_allocate_buffers()
[all …]
/Linux-v4.19/drivers/net/ethernet/cavium/thunder/
Dnicvf_queues.c22 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
509 struct snd_queue *sq, int q_len, int qidx) in nicvf_init_snd_queue() argument
513 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, in nicvf_init_snd_queue()
518 sq->desc = sq->dmem.base; in nicvf_init_snd_queue()
519 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); in nicvf_init_snd_queue()
520 if (!sq->skbuff) in nicvf_init_snd_queue()
523 sq->head = 0; in nicvf_init_snd_queue()
524 sq->tail = 0; in nicvf_init_snd_queue()
525 sq->thresh = SND_QUEUE_THRESH; in nicvf_init_snd_queue()
532 sq->xdp_page = kcalloc(q_len, sizeof(u64), GFP_KERNEL); in nicvf_init_snd_queue()
[all …]
/Linux-v4.19/drivers/net/ethernet/huawei/hinic/
Dhinic_tx.c48 #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) argument
190 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_xmit_frame()
213 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_xmit_frame()
220 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_xmit_frame()
237 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); in hinic_xmit_frame()
239 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); in hinic_xmit_frame()
244 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); in hinic_xmit_frame()
279 struct hinic_sq *sq = txq->sq; in free_all_tx_skbs() local
286 while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) { in free_all_tx_skbs()
287 sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci); in free_all_tx_skbs()
[all …]
Dhinic_hw_qp.c68 #define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi)) argument
70 #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) argument
104 struct hinic_sq *sq, u16 global_qid) in hinic_sq_prepare_ctxt() argument
111 wq = sq->wq; in hinic_sq_prepare_ctxt()
225 static int alloc_sq_skb_arr(struct hinic_sq *sq) in alloc_sq_skb_arr() argument
227 struct hinic_wq *wq = sq->wq; in alloc_sq_skb_arr()
230 skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); in alloc_sq_skb_arr()
231 sq->saved_skb = vzalloc(skb_arr_size); in alloc_sq_skb_arr()
232 if (!sq->saved_skb) in alloc_sq_skb_arr()
242 static void free_sq_skb_arr(struct hinic_sq *sq) in free_sq_skb_arr() argument
[all …]
Dhinic_hw_qp.h63 #define HINIC_MIN_TX_NUM_WQEBBS(sq) \ argument
64 (HINIC_MIN_TX_WQE_SIZE((sq)->wq) / (sq)->wq->wqebb_size)
121 struct hinic_sq sq; member
132 struct hinic_sq *sq, u16 global_qid);
137 int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif,
141 void hinic_clean_sq(struct hinic_sq *sq);
148 int hinic_get_sq_free_wqebbs(struct hinic_sq *sq);
152 void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx,
156 void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
159 struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq,
[all …]
/Linux-v4.19/drivers/soc/qcom/
Dqmi_interface.c18 struct sockaddr_qrtr *sq);
167 struct sockaddr_qrtr sq; in qmi_send_new_lookup() local
177 sq.sq_family = qmi->sq.sq_family; in qmi_send_new_lookup()
178 sq.sq_node = qmi->sq.sq_node; in qmi_send_new_lookup()
179 sq.sq_port = QRTR_PORT_CTRL; in qmi_send_new_lookup()
181 msg.msg_name = &sq; in qmi_send_new_lookup()
182 msg.msg_namelen = sizeof(sq); in qmi_send_new_lookup()
230 struct sockaddr_qrtr sq; in qmi_send_new_server() local
239 pkt.server.node = cpu_to_le32(qmi->sq.sq_node); in qmi_send_new_server()
240 pkt.server.port = cpu_to_le32(qmi->sq.sq_port); in qmi_send_new_server()
[all …]
/Linux-v4.19/drivers/infiniband/hw/bnxt_re/
Dqplib_fp.c59 qp->sq.condition = false; in bnxt_qplib_cancel_phantom_processing()
60 qp->sq.send_phantom = false; in bnxt_qplib_cancel_phantom_processing()
61 qp->sq.single = false; in bnxt_qplib_cancel_phantom_processing()
72 if (!qp->sq.flushed) { in __bnxt_qplib_add_flush_qp()
78 qp->sq.flushed = true; in __bnxt_qplib_add_flush_qp()
124 if (qp->sq.flushed) { in __bnxt_qplib_del_flush_qp()
125 qp->sq.flushed = false; in __bnxt_qplib_del_flush_qp()
142 qp->sq.hwq.prod = 0; in bnxt_qplib_clean_qp()
143 qp->sq.hwq.cons = 0; in bnxt_qplib_clean_qp()
177 struct bnxt_qplib_q *sq = &qp->sq; in bnxt_qplib_free_qp_hdr_buf() local
[all …]
/Linux-v4.19/drivers/infiniband/hw/cxgb4/
Dqp.c94 static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) in dealloc_oc_sq() argument
96 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize); in dealloc_oc_sq()
99 static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) in dealloc_host_sq() argument
101 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue, in dealloc_host_sq()
102 pci_unmap_addr(sq, mapping)); in dealloc_host_sq()
105 static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) in dealloc_sq() argument
107 if (t4_sq_onchip(sq)) in dealloc_sq()
108 dealloc_oc_sq(rdev, sq); in dealloc_sq()
110 dealloc_host_sq(rdev, sq); in dealloc_sq()
113 static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) in alloc_oc_sq() argument
[all …]
Dcq.c196 CQE_QPID_V(wq->sq.qid)); in insert_recv_cqe()
230 CQE_QPID_V(wq->sq.qid)); in insert_sq_cqe()
248 if (wq->sq.flush_cidx == -1) in c4iw_flush_sq()
249 wq->sq.flush_cidx = wq->sq.cidx; in c4iw_flush_sq()
250 idx = wq->sq.flush_cidx; in c4iw_flush_sq()
251 while (idx != wq->sq.pidx) { in c4iw_flush_sq()
252 swsqe = &wq->sq.sw_sq[idx]; in c4iw_flush_sq()
255 if (wq->sq.oldest_read == swsqe) { in c4iw_flush_sq()
259 if (++idx == wq->sq.size) in c4iw_flush_sq()
262 wq->sq.flush_cidx += flushed; in c4iw_flush_sq()
[all …]
Dt4.h382 struct t4_sq sq; member
526 static inline int t4_sq_onchip(struct t4_sq *sq) in t4_sq_onchip() argument
528 return sq->flags & T4_SQ_ONCHIP; in t4_sq_onchip()
533 return wq->sq.in_use == 0; in t4_sq_empty()
538 return wq->sq.in_use == (wq->sq.size - 1); in t4_sq_full()
543 return wq->sq.size - 1 - wq->sq.in_use; in t4_sq_avail()
548 wq->sq.in_use++; in t4_sq_produce()
549 if (++wq->sq.pidx == wq->sq.size) in t4_sq_produce()
550 wq->sq.pidx = 0; in t4_sq_produce()
551 wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); in t4_sq_produce()
[all …]
Drestrack.c42 if (rdma_nl_put_driver_u32(msg, "sqid", wq->sq.qid)) in fill_sq()
46 if (rdma_nl_put_driver_u32(msg, "memsize", wq->sq.memsize)) in fill_sq()
48 if (rdma_nl_put_driver_u32(msg, "cidx", wq->sq.cidx)) in fill_sq()
50 if (rdma_nl_put_driver_u32(msg, "pidx", wq->sq.pidx)) in fill_sq()
52 if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->sq.wq_pidx)) in fill_sq()
54 if (rdma_nl_put_driver_u32(msg, "flush_cidx", wq->sq.flush_cidx)) in fill_sq()
56 if (rdma_nl_put_driver_u32(msg, "in_use", wq->sq.in_use)) in fill_sq()
58 if (rdma_nl_put_driver_u32(msg, "size", wq->sq.size)) in fill_sq()
60 if (rdma_nl_put_driver_u32_hex(msg, "flags", wq->sq.flags)) in fill_sq()
95 static int fill_swsqe(struct sk_buff *msg, struct t4_sq *sq, u16 idx, in fill_swsqe() argument
[all …]
/Linux-v4.19/drivers/nvme/target/
Dcore.c505 if (req->sq->size) { in __nvmet_req_complete()
507 old_sqhd = req->sq->sqhd; in __nvmet_req_complete()
508 new_sqhd = (old_sqhd + 1) % req->sq->size; in __nvmet_req_complete()
509 } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) != in __nvmet_req_complete()
512 sqhd = req->sq->sqhd & 0x0000FFFF; in __nvmet_req_complete()
514 req->rsp->sq_id = cpu_to_le16(req->sq->qid); in __nvmet_req_complete()
525 percpu_ref_put(&req->sq->ref); in nvmet_req_complete()
538 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, in nvmet_sq_setup() argument
541 sq->sqhd = 0; in nvmet_sq_setup()
542 sq->qid = qid; in nvmet_sq_setup()
[all …]
/Linux-v4.19/block/
Dblk-throttle.c250 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq) in sq_to_tg() argument
252 if (sq && sq->parent_sq) in sq_to_tg()
253 return container_of(sq, struct throtl_grp, service_queue); in sq_to_tg()
265 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq) in sq_to_td() argument
267 struct throtl_grp *tg = sq_to_tg(sq); in sq_to_td()
272 return container_of(sq, struct throtl_data, service_queue); in sq_to_td()
367 #define throtl_log(sq, fmt, args...) do { \ argument
368 struct throtl_grp *__tg = sq_to_tg((sq)); \
369 struct throtl_data *__td = sq_to_td((sq)); \
474 static void throtl_service_queue_init(struct throtl_service_queue *sq) in throtl_service_queue_init() argument
[all …]
/Linux-v4.19/drivers/infiniband/hw/hns/
Dhns_roce_qp.c363 hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; in hns_roce_set_user_sq_size()
364 hr_qp->sq.wqe_shift = ucmd->log_sq_stride; in hns_roce_set_user_sq_size()
368 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); in hns_roce_set_user_sq_size()
370 hr_qp->sq.max_gs = max_cnt; in hns_roce_set_user_sq_size()
372 if (hr_qp->sq.max_gs > 2) in hns_roce_set_user_sq_size()
373 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * in hns_roce_set_user_sq_size()
374 (hr_qp->sq.max_gs - 2)); in hns_roce_set_user_sq_size()
381 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << in hns_roce_set_user_sq_size()
382 hr_qp->sq.wqe_shift), PAGE_SIZE); in hns_roce_set_user_sq_size()
384 hr_qp->sq.offset = 0; in hns_roce_set_user_sq_size()
[all …]
/Linux-v4.19/drivers/infiniband/hw/mthca/
Dmthca_qp.c220 (n << qp->sq.wqe_shift); in get_send_wqe()
223 (n << qp->sq.wqe_shift)) >> in get_send_wqe()
225 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & in get_send_wqe()
504 qp_attr->cap.max_send_wr = qp->sq.max; in mthca_query_qp()
506 qp_attr->cap.max_send_sge = qp->sq.max_gs; in mthca_query_qp()
614 if (qp->sq.max) in __mthca_modify_qp()
615 qp_context->sq_size_stride = ilog2(qp->sq.max) << 3; in __mthca_modify_qp()
616 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; in __mthca_modify_qp()
735 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); in __mthca_modify_qp()
836 mthca_wq_reset(&qp->sq); in __mthca_modify_qp()
[all …]
/Linux-v4.19/drivers/net/
Dvirtio_net.c190 struct send_queue *sq; member
334 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; in skb_xmit_done()
439 struct send_queue *sq, in __virtnet_xdp_xmit_one() argument
459 sg_init_one(sq->sg, xdpf->data, xdpf->len); in __virtnet_xdp_xmit_one()
461 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC); in __virtnet_xdp_xmit_one()
473 return &vi->sq[qp]; in virtnet_xdp_sq()
483 struct send_queue *sq; in virtnet_xdp_xmit() local
490 sq = virtnet_xdp_sq(vi); in virtnet_xdp_xmit()
509 while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) in virtnet_xdp_xmit()
515 err = __virtnet_xdp_xmit_one(vi, sq, xdpf); in virtnet_xdp_xmit()
[all …]
/Linux-v4.19/drivers/infiniband/sw/rxe/
Drxe_qp.c230 qp->sq.max_wr = init->cap.max_send_wr; in rxe_qp_init_req()
231 qp->sq.max_sge = init->cap.max_send_sge; in rxe_qp_init_req()
232 qp->sq.max_inline = init->cap.max_inline_data; in rxe_qp_init_req()
235 qp->sq.max_sge * sizeof(struct ib_sge), in rxe_qp_init_req()
237 qp->sq.max_inline); in rxe_qp_init_req()
239 qp->sq.queue = rxe_queue_init(rxe, in rxe_qp_init_req()
240 &qp->sq.max_wr, in rxe_qp_init_req()
242 if (!qp->sq.queue) in rxe_qp_init_req()
246 qp->sq.queue->buf, qp->sq.queue->buf_size, in rxe_qp_init_req()
247 &qp->sq.queue->ip); in rxe_qp_init_req()
[all …]
/Linux-v4.19/tools/scripts/
Dutilities.mak78 # escape-for-shell-sq
80 # Usage: embeddable-text = $(call escape-for-shell-sq,text)
86 escape-for-shell-sq = $(subst ','\'',$(1))
88 # shell-sq
90 # Usage: single-quoted-and-escaped-text = $(call shell-sq,text)
92 shell-sq = '$(escape-for-shell-sq)'
124 # produces the same results as the `$(shell-sq)' function.
126 shell-wordify = $(if $(findstring $(newline),$(1)),$(_sw-esc-nl),$(shell-sq))
128 "$$(echo $(call escape-nl,$(shell-sq),$(2)) | $(call shell-unescape-nl,$(2)))"
135 is-absolute = $(shell echo $(shell-sq) | grep -q ^/ && echo y)
[all …]
/Linux-v4.19/drivers/net/ethernet/mellanox/mlx5/core/fpga/
Dconn.c135 *conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc); in mlx5_fpga_conn_notify_hw()
149 ix = conn->qp.sq.pc & (conn->qp.sq.size - 1); in mlx5_fpga_conn_post_send()
151 ctrl = mlx5_wq_cyc_get_wqe(&conn->qp.wq.sq, ix); in mlx5_fpga_conn_post_send()
166 ctrl->opmod_idx_opcode = cpu_to_be32(((conn->qp.sq.pc & 0xffff) << 8) | in mlx5_fpga_conn_post_send()
170 conn->qp.sq.pc++; in mlx5_fpga_conn_post_send()
171 conn->qp.sq.bufs[ix] = buf; in mlx5_fpga_conn_post_send()
189 spin_lock_irqsave(&conn->qp.sq.lock, flags); in mlx5_fpga_conn_send()
191 if (conn->qp.sq.pc - conn->qp.sq.cc >= conn->qp.sq.size) { in mlx5_fpga_conn_send()
192 list_add_tail(&buf->list, &conn->qp.sq.backlog); in mlx5_fpga_conn_send()
199 spin_unlock_irqrestore(&conn->qp.sq.lock, flags); in mlx5_fpga_conn_send()
[all …]

12345678