Lines Matching refs:rcd

133 				   struct hfi1_ctxtdata *rcd,
197 p->qp = (RVT_KDETH_QP_PREFIX << 16) | priv->rcd->ctxt; in tid_rdma_opfn_init()
199 p->jkey = priv->rcd->jkey; in tid_rdma_opfn_init()
203 p->urg = is_urg_masked(priv->rcd); in tid_rdma_opfn_init()
298 int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit) in hfi1_kern_exp_rcv_init() argument
305 rcd->jkey = TID_RDMA_JKEY; in hfi1_kern_exp_rcv_init()
306 hfi1_set_ctxt_jkey(rcd->dd, rcd, rcd->jkey); in hfi1_kern_exp_rcv_init()
307 return hfi1_alloc_ctxt_rcv_groups(rcd); in hfi1_kern_exp_rcv_init()
334 return dd->rcd[ctxt]; in qp_to_rcd()
343 qpriv->rcd = qp_to_rcd(rdi, qp); in hfi1_qp_priv_init()
368 struct hfi1_devdata *dd = qpriv->rcd->dd; in hfi1_qp_priv_init()
468 static struct rvt_qp *first_qp(struct hfi1_ctxtdata *rcd, in first_qp() argument
470 __must_hold(&rcd->exp_lock) in first_qp()
474 lockdep_assert_held(&rcd->exp_lock); in first_qp()
502 static bool kernel_tid_waiters(struct hfi1_ctxtdata *rcd, in kernel_tid_waiters() argument
504 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock) in kernel_tid_waiters()
510 lockdep_assert_held(&rcd->exp_lock); in kernel_tid_waiters()
511 fqp = first_qp(rcd, queue); in kernel_tid_waiters()
533 static void dequeue_tid_waiter(struct hfi1_ctxtdata *rcd, in dequeue_tid_waiter() argument
535 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock) in dequeue_tid_waiter()
540 lockdep_assert_held(&rcd->exp_lock); in dequeue_tid_waiter()
559 static void queue_qp_for_tid_wait(struct hfi1_ctxtdata *rcd, in queue_qp_for_tid_wait() argument
561 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock) in queue_qp_for_tid_wait()
566 lockdep_assert_held(&rcd->exp_lock); in queue_qp_for_tid_wait()
571 rcd->dd->verbs_dev.n_tidwait++; in queue_qp_for_tid_wait()
674 spin_lock(&priv->rcd->exp_lock); in _tid_rdma_flush_wait()
681 spin_unlock(&priv->rcd->exp_lock); in _tid_rdma_flush_wait()
689 _tid_rdma_flush_wait(qp, &priv->rcd->flow_queue); in hfi1_tid_rdma_flush_wait()
690 _tid_rdma_flush_wait(qp, &priv->rcd->rarr_queue); in hfi1_tid_rdma_flush_wait()
711 static int kern_reserve_flow(struct hfi1_ctxtdata *rcd, int last) in kern_reserve_flow() argument
712 __must_hold(&rcd->exp_lock) in kern_reserve_flow()
718 !test_and_set_bit(last, &rcd->flow_mask)) in kern_reserve_flow()
721 nr = ffz(rcd->flow_mask); in kern_reserve_flow()
723 (sizeof(rcd->flow_mask) * BITS_PER_BYTE)); in kern_reserve_flow()
726 set_bit(nr, &rcd->flow_mask); in kern_reserve_flow()
730 static void kern_set_hw_flow(struct hfi1_ctxtdata *rcd, u32 generation, in kern_set_hw_flow() argument
745 write_uctxt_csr(rcd->dd, rcd->ctxt, in kern_set_hw_flow()
749 static u32 kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx) in kern_setup_hw_flow() argument
750 __must_hold(&rcd->exp_lock) in kern_setup_hw_flow()
752 u32 generation = rcd->flows[flow_idx].generation; in kern_setup_hw_flow()
754 kern_set_hw_flow(rcd, generation, flow_idx); in kern_setup_hw_flow()
767 static void kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx) in kern_clear_hw_flow() argument
768 __must_hold(&rcd->exp_lock) in kern_clear_hw_flow()
770 rcd->flows[flow_idx].generation = in kern_clear_hw_flow()
771 kern_flow_generation_next(rcd->flows[flow_idx].generation); in kern_clear_hw_flow()
772 kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, flow_idx); in kern_clear_hw_flow()
775 int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp) in hfi1_kern_setup_hw_flow() argument
787 spin_lock_irqsave(&rcd->exp_lock, flags); in hfi1_kern_setup_hw_flow()
788 if (kernel_tid_waiters(rcd, &rcd->flow_queue, qp)) in hfi1_kern_setup_hw_flow()
791 ret = kern_reserve_flow(rcd, fs->last_index); in hfi1_kern_setup_hw_flow()
799 rcd->flows[fs->index].generation = fs->generation; in hfi1_kern_setup_hw_flow()
800 fs->generation = kern_setup_hw_flow(rcd, fs->index); in hfi1_kern_setup_hw_flow()
802 dequeue_tid_waiter(rcd, &rcd->flow_queue, qp); in hfi1_kern_setup_hw_flow()
804 fqp = first_qp(rcd, &rcd->flow_queue); in hfi1_kern_setup_hw_flow()
805 spin_unlock_irqrestore(&rcd->exp_lock, flags); in hfi1_kern_setup_hw_flow()
810 queue_qp_for_tid_wait(rcd, &rcd->flow_queue, qp); in hfi1_kern_setup_hw_flow()
811 spin_unlock_irqrestore(&rcd->exp_lock, flags); in hfi1_kern_setup_hw_flow()
815 void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp) in hfi1_kern_clear_hw_flow() argument
824 spin_lock_irqsave(&rcd->exp_lock, flags); in hfi1_kern_clear_hw_flow()
825 kern_clear_hw_flow(rcd, fs->index); in hfi1_kern_clear_hw_flow()
826 clear_bit(fs->index, &rcd->flow_mask); in hfi1_kern_clear_hw_flow()
832 fqp = first_qp(rcd, &rcd->flow_queue); in hfi1_kern_clear_hw_flow()
833 spin_unlock_irqrestore(&rcd->exp_lock, flags); in hfi1_kern_clear_hw_flow()
843 void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd) in hfi1_kern_init_ctxt_generations() argument
848 rcd->flows[i].generation = mask_generation(prandom_u32()); in hfi1_kern_init_ctxt_generations()
849 kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, i); in hfi1_kern_init_ctxt_generations()
1121 dd = flow->req->rcd->dd; in dma_unmap_flow()
1137 struct hfi1_devdata *dd = flow->req->rcd->dd; in dma_map_flow()
1198 struct hfi1_ctxtdata *rcd, char *s, in kern_add_tid_node() argument
1206 dd_dev_err(rcd->dd, in kern_add_tid_node()
1232 struct hfi1_ctxtdata *rcd = flow->req->rcd; in kern_alloc_tids() local
1233 struct hfi1_devdata *dd = rcd->dd; in kern_alloc_tids()
1244 list_for_each_entry(group, &rcd->tid_group_list.list, list) { in kern_alloc_tids()
1245 kern_add_tid_node(flow, rcd, "complete groups", group, in kern_alloc_tids()
1258 list_for_each_entry(used, &rcd->tid_used_list.list, list) { in kern_alloc_tids()
1261 kern_add_tid_node(flow, rcd, "used groups", used, use); in kern_alloc_tids()
1273 if (group && &group->list == &rcd->tid_group_list.list) in kern_alloc_tids()
1275 group = list_prepare_entry(group, &rcd->tid_group_list.list, in kern_alloc_tids()
1277 if (list_is_last(&group->list, &rcd->tid_group_list.list)) in kern_alloc_tids()
1281 kern_add_tid_node(flow, rcd, "complete continue", group, use); in kern_alloc_tids()
1296 struct hfi1_ctxtdata *rcd = flow->req->rcd; in kern_program_rcv_group() local
1297 struct hfi1_devdata *dd = rcd->dd; in kern_program_rcv_group()
1321 rcventry -= rcd->expected_base; in kern_program_rcv_group()
1349 tid_group_move(grp, &rcd->tid_used_list, in kern_program_rcv_group()
1350 &rcd->tid_full_list); in kern_program_rcv_group()
1352 tid_group_move(grp, &rcd->tid_group_list, in kern_program_rcv_group()
1353 &rcd->tid_used_list); in kern_program_rcv_group()
1363 struct hfi1_ctxtdata *rcd = flow->req->rcd; in kern_unprogram_rcv_group() local
1364 struct hfi1_devdata *dd = rcd->dd; in kern_unprogram_rcv_group()
1385 tid_group_move(grp, &rcd->tid_full_list, in kern_unprogram_rcv_group()
1386 &rcd->tid_used_list); in kern_unprogram_rcv_group()
1388 tid_group_move(grp, &rcd->tid_used_list, in kern_unprogram_rcv_group()
1389 &rcd->tid_group_list); in kern_unprogram_rcv_group()
1392 struct hfi1_ctxtdata *rcd = flow->req->rcd; in kern_unprogram_rcv_group() local
1393 struct hfi1_devdata *dd = rcd->dd; in kern_unprogram_rcv_group()
1459 struct hfi1_ctxtdata *rcd = req->rcd; in hfi1_kern_exp_rcv_setup() local
1487 spin_lock_irqsave(&rcd->exp_lock, flags); in hfi1_kern_exp_rcv_setup()
1488 if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp)) in hfi1_kern_exp_rcv_setup()
1521 dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp); in hfi1_kern_exp_rcv_setup()
1523 fqp = first_qp(rcd, &rcd->rarr_queue); in hfi1_kern_exp_rcv_setup()
1524 spin_unlock_irqrestore(&rcd->exp_lock, flags); in hfi1_kern_exp_rcv_setup()
1530 queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp); in hfi1_kern_exp_rcv_setup()
1531 spin_unlock_irqrestore(&rcd->exp_lock, flags); in hfi1_kern_exp_rcv_setup()
1550 struct hfi1_ctxtdata *rcd = req->rcd; in hfi1_kern_exp_rcv_clear() local
1560 spin_lock_irqsave(&rcd->exp_lock, flags); in hfi1_kern_exp_rcv_clear()
1567 fqp = first_qp(rcd, &rcd->rarr_queue); in hfi1_kern_exp_rcv_clear()
1568 spin_unlock_irqrestore(&rcd->exp_lock, flags); in hfi1_kern_exp_rcv_clear()
1633 req->rcd->numa_id); in hfi1_kern_exp_rcv_alloc_flows()
1663 req->rcd = qpriv->rcd; in hfi1_init_trdma_req()
1754 qpriv->rcd->ctxt); in hfi1_build_tid_rdma_read_packet()
1804 hfi1_kern_clear_hw_flow(req->rcd, qp); in hfi1_build_tid_rdma_read_req()
1838 if (hfi1_kern_setup_hw_flow(qpriv->rcd, qp)) in hfi1_build_tid_rdma_read_req()
1983 struct hfi1_ctxtdata *rcd = ((struct hfi1_qp_priv *)qp->priv)->rcd; in tid_rdma_rcv_error() local
2000 rc_defered_ack(rcd, qp); in tid_rdma_rcv_error()
2223 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_rc_rcv_tid_rdma_read_req() local
2328 rc_defered_ack(rcd, qp); in hfi1_rc_rcv_tid_rdma_read_req()
2447 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_rc_rcv_tid_rdma_read_resp() local
2470 update_r_next_psn_fecn(packet, priv, rcd, flow, fecn); in hfi1_rc_rcv_tid_rdma_read_resp()
2529 if (!do_rc_ack(qp, aeth, ipsn, opcode, 0, rcd)) in hfi1_rc_rcv_tid_rdma_read_resp()
2546 hfi1_kern_clear_hw_flow(priv->rcd, qp); in hfi1_rc_rcv_tid_rdma_read_resp()
2592 hfi1_kern_clear_hw_flow(priv->rcd, qp); in hfi1_kern_read_tid_flow_free()
2622 static void restart_tid_rdma_read_req(struct hfi1_ctxtdata *rcd, in restart_tid_rdma_read_req() argument
2636 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); in restart_tid_rdma_read_req()
2648 static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, in handle_read_kdeth_eflags() argument
2653 struct hfi1_pportdata *ppd = rcd->ppd; in handle_read_kdeth_eflags()
2700 restart_tid_rdma_read_req(rcd, qp, in handle_read_kdeth_eflags()
2710 &rcd->qp_wait_list); in handle_read_kdeth_eflags()
2791 last_psn = read_r_next_psn(dd, rcd->ctxt, in handle_read_kdeth_eflags()
2800 restart_tid_rdma_read_req(rcd, qp, in handle_read_kdeth_eflags()
2837 bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd, in hfi1_handle_kdeth_eflags() argument
2868 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf))); in hfi1_handle_kdeth_eflags()
2916 ret = handle_read_kdeth_eflags(rcd, packet, rcv_type, rte, psn, in hfi1_handle_kdeth_eflags()
2952 read_r_next_psn(dd, rcd->ctxt, in hfi1_handle_kdeth_eflags()
3169 hfi1_kern_clear_hw_flow(qpriv->rcd, qp); in hfi1_qp_kern_exp_rcv_clear_all()
3277 struct hfi1_pportdata *ppd = qpriv->rcd->ppd; in setup_tid_rdma_wqe()
3421 bytes_per_us = active_egress_rate(qpriv->rcd->ppd) / 8; in hfi1_compute_tid_rnr_timeout()
3456 struct hfi1_ctxtdata *rcd = qpriv->rcd; in hfi1_tid_write_alloc_resources() local
3488 hfi1_kern_clear_hw_flow(rcd, qp); in hfi1_tid_write_alloc_resources()
3514 hfi1_kern_clear_hw_flow(rcd, qp); in hfi1_tid_write_alloc_resources()
3521 ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp); in hfi1_tid_write_alloc_resources()
3525 &rcd->flow_queue); in hfi1_tid_write_alloc_resources()
3559 to_seg = position_in_queue(qpriv, &rcd->rarr_queue); in hfi1_tid_write_alloc_resources()
3629 rc_defered_ack(rcd, qp); in hfi1_tid_write_alloc_resources()
3647 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_rc_rcv_tid_rdma_write_req() local
3814 rc_defered_ack(rcd, qp); in hfi1_rc_rcv_tid_rdma_write_req()
3916 qpriv->rcd->ctxt); in hfi1_build_tid_rdma_write_resp()
3991 hfi1_kern_clear_hw_flow(qpriv->rcd, qp); in hfi1_tid_timeout()
4031 struct hfi1_ctxtdata *rcd = packet->rcd; in hfi1_rc_rcv_tid_rdma_write_resp() local
4089 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd)) in hfi1_rc_rcv_tid_rdma_write_resp()
4261 struct hfi1_ctxtdata *rcd = priv->rcd; in hfi1_rc_rcv_tid_rdma_write_data() local
4285 update_r_next_psn_fecn(packet, priv, rcd, flow, fecn); in hfi1_rc_rcv_tid_rdma_write_data()
4337 rcd->flows[flow->idx].psn = psn & HFI1_KDETH_BTH_SEQ_MASK; in hfi1_rc_rcv_tid_rdma_write_data()
4450 qpriv->rcd->ctxt); in hfi1_build_tid_rdma_write_ack()
4862 struct hfi1_ctxtdata *rcd = qpriv->rcd; in hfi1_rc_rcv_tid_rdma_resync() local
4891 spin_lock(&rcd->exp_lock); in hfi1_rc_rcv_tid_rdma_resync()
4900 rcd->flows[fs->index].generation = generation; in hfi1_rc_rcv_tid_rdma_resync()
4901 fs->generation = kern_setup_hw_flow(rcd, fs->index); in hfi1_rc_rcv_tid_rdma_resync()
4956 spin_unlock(&rcd->exp_lock); in hfi1_rc_rcv_tid_rdma_resync()
5505 struct hfi1_ctxtdata *rcd, in update_r_next_psn_fecn() argument
5515 struct hfi1_devdata *dd = rcd->dd; in update_r_next_psn_fecn()
5518 read_r_next_psn(dd, rcd->ctxt, flow->idx); in update_r_next_psn_fecn()