Lines Matching refs:pkt
111 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); in rxe_resp_queue_pkt() local
115 must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) || in rxe_resp_queue_pkt()
149 struct rxe_pkt_info *pkt) in check_psn() argument
151 int diff = psn_compare(pkt->psn, qp->resp.psn); in check_psn()
176 if (pkt->mask & RXE_START_MASK) { in check_psn()
193 struct rxe_pkt_info *pkt) in check_op_seq() argument
200 switch (pkt->opcode) { in check_op_seq()
212 switch (pkt->opcode) { in check_op_seq()
222 switch (pkt->opcode) { in check_op_seq()
241 switch (pkt->opcode) { in check_op_seq()
252 switch (pkt->opcode) { in check_op_seq()
262 switch (pkt->opcode) { in check_op_seq()
283 struct rxe_pkt_info *pkt) in check_op_valid() argument
287 if (((pkt->mask & RXE_READ_MASK) && in check_op_valid()
289 ((pkt->mask & RXE_WRITE_MASK) && in check_op_valid()
291 ((pkt->mask & RXE_ATOMIC_MASK) && in check_op_valid()
299 if ((pkt->mask & RXE_WRITE_MASK) && in check_op_valid()
363 struct rxe_pkt_info *pkt) in check_resource() argument
384 if (pkt->mask & RXE_READ_OR_ATOMIC) { in check_resource()
395 if (pkt->mask & RXE_RWR_MASK) { in check_resource()
407 struct rxe_pkt_info *pkt) in check_length() argument
422 struct rxe_pkt_info *pkt) in check_rkey() argument
433 if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) { in check_rkey()
434 if (pkt->mask & RXE_RETH_MASK) { in check_rkey()
435 qp->resp.va = reth_va(pkt); in check_rkey()
436 qp->resp.rkey = reth_rkey(pkt); in check_rkey()
437 qp->resp.resid = reth_len(pkt); in check_rkey()
439 access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ in check_rkey()
441 } else if (pkt->mask & RXE_ATOMIC_MASK) { in check_rkey()
442 qp->resp.va = atmeth_va(pkt); in check_rkey()
443 qp->resp.rkey = atmeth_rkey(pkt); in check_rkey()
451 if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) && in check_rkey()
452 (pkt->mask & RXE_RETH_MASK) && in check_rkey()
453 reth_len(pkt) == 0) { in check_rkey()
460 pktlen = payload_size(pkt); in check_rkey()
478 if (pkt->mask & RXE_WRITE_MASK) { in check_rkey()
480 if (pktlen != mtu || bth_pad(pkt)) { in check_rkey()
489 if ((bth_pad(pkt) != (0x3 & (-resid)))) { in check_rkey()
525 struct rxe_pkt_info *pkt) in write_data_in() argument
529 int data_len = payload_size(pkt); in write_data_in()
531 err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt), in write_data_in()
549 struct rxe_pkt_info *pkt) in process_atomic() argument
551 u64 iova = atmeth_va(pkt); in process_atomic()
573 if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP || in process_atomic()
574 pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) { in process_atomic()
575 if (*vaddr == atmeth_comp(pkt)) in process_atomic()
576 *vaddr = atmeth_swap_add(pkt); in process_atomic()
578 *vaddr += atmeth_swap_add(pkt); in process_atomic()
589 struct rxe_pkt_info *pkt, in prepare_ack_packet() argument
618 ack->offset = pkt->offset; in prepare_ack_packet()
622 memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES); in prepare_ack_packet()
765 struct rxe_pkt_info *pkt) in build_rdma_network_hdr() argument
767 struct sk_buff *skb = PKT_TO_SKB(pkt); in build_rdma_network_hdr()
779 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) in execute() argument
783 if (pkt->mask & RXE_SEND_MASK) { in execute()
789 build_rdma_network_hdr(&hdr, pkt); in execute()
795 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt)); in execute()
798 } else if (pkt->mask & RXE_WRITE_MASK) { in execute()
799 err = write_data_in(qp, pkt); in execute()
802 } else if (pkt->mask & RXE_READ_MASK) { in execute()
806 } else if (pkt->mask & RXE_ATOMIC_MASK) { in execute()
807 err = process_atomic(qp, pkt); in execute()
816 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; in execute()
818 qp->resp.opcode = pkt->opcode; in execute()
821 if (pkt->mask & RXE_COMP_MASK) { in execute()
832 struct rxe_pkt_info *pkt) in do_complete() argument
850 wc->opcode = (pkt->mask & RXE_IMMDT_MASK && in do_complete()
851 pkt->mask & RXE_WRITE_MASK) ? in do_complete()
862 if (pkt->mask & RXE_IMMDT_MASK) { in do_complete()
864 uwc->ex.imm_data = immdt_imm(pkt); in do_complete()
867 if (pkt->mask & RXE_IETH_MASK) { in do_complete()
869 uwc->ex.invalidate_rkey = ieth_rkey(pkt); in do_complete()
874 if (pkt->mask & RXE_DETH_MASK) in do_complete()
875 uwc->src_qp = deth_sqp(pkt); in do_complete()
879 struct sk_buff *skb = PKT_TO_SKB(pkt); in do_complete()
892 if (pkt->mask & RXE_IMMDT_MASK) { in do_complete()
894 wc->ex.imm_data = immdt_imm(pkt); in do_complete()
897 if (pkt->mask & RXE_IETH_MASK) { in do_complete()
902 wc->ex.invalidate_rkey = ieth_rkey(pkt); in do_complete()
917 if (pkt->mask & RXE_DETH_MASK) in do_complete()
918 wc->src_qp = deth_sqp(pkt); in do_complete()
930 if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1)) in do_complete()
936 if (!pkt) in do_complete()
944 static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, in send_ack() argument
952 skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE, in send_ack()
967 static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, in send_atomic_ack() argument
976 skb = prepare_ack_packet(qp, pkt, &ack_pkt, in send_atomic_ack()
977 IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn, in send_atomic_ack()
1011 struct rxe_pkt_info *pkt) in acknowledge() argument
1017 send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn); in acknowledge()
1018 else if (pkt->mask & RXE_ATOMIC_MASK) in acknowledge()
1019 send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED); in acknowledge()
1020 else if (bth_ack(pkt)) in acknowledge()
1021 send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn); in acknowledge()
1027 struct rxe_pkt_info *pkt) in cleanup() argument
1031 if (pkt) { in cleanup()
1065 struct rxe_pkt_info *pkt) in duplicate_request() argument
1070 if (pkt->mask & RXE_SEND_MASK || in duplicate_request()
1071 pkt->mask & RXE_WRITE_MASK) { in duplicate_request()
1073 if (bth_ack(pkt)) in duplicate_request()
1074 send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn); in duplicate_request()
1077 } else if (pkt->mask & RXE_READ_MASK) { in duplicate_request()
1080 res = find_resource(qp, pkt->psn); in duplicate_request()
1091 u64 iova = reth_va(pkt); in duplicate_request()
1092 u32 resid = reth_len(pkt); in duplicate_request()
1102 if (reth_rkey(pkt) != res->read.rkey) { in duplicate_request()
1107 res->cur_psn = pkt->psn; in duplicate_request()
1108 res->state = (pkt->psn == res->first_psn) ? in duplicate_request()
1126 res = find_resource(qp, pkt->psn); in duplicate_request()
1131 pkt, res->atomic.skb); in duplicate_request()
1213 struct rxe_pkt_info *pkt = NULL; in rxe_responder() local
1240 state = get_req(qp, &pkt); in rxe_responder()
1243 state = check_psn(qp, pkt); in rxe_responder()
1246 state = check_op_seq(qp, pkt); in rxe_responder()
1249 state = check_op_valid(qp, pkt); in rxe_responder()
1252 state = check_resource(qp, pkt); in rxe_responder()
1255 state = check_length(qp, pkt); in rxe_responder()
1258 state = check_rkey(qp, pkt); in rxe_responder()
1261 state = execute(qp, pkt); in rxe_responder()
1264 state = do_complete(qp, pkt); in rxe_responder()
1267 state = read_reply(qp, pkt); in rxe_responder()
1270 state = acknowledge(qp, pkt); in rxe_responder()
1273 state = cleanup(qp, pkt); in rxe_responder()
1276 state = duplicate_request(qp, pkt); in rxe_responder()
1280 send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn); in rxe_responder()
1302 send_ack(qp, pkt, AETH_RNR_NAK | in rxe_responder()
1305 pkt->psn); in rxe_responder()