Lines Matching refs:pkt

110 	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);  in rxe_resp_queue_pkt()  local
114 must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) || in rxe_resp_queue_pkt()
145 struct rxe_pkt_info *pkt) in check_psn() argument
147 int diff = psn_compare(pkt->psn, qp->resp.psn); in check_psn()
172 if (pkt->mask & RXE_START_MASK) { in check_psn()
189 struct rxe_pkt_info *pkt) in check_op_seq() argument
196 switch (pkt->opcode) { in check_op_seq()
208 switch (pkt->opcode) { in check_op_seq()
218 switch (pkt->opcode) { in check_op_seq()
237 switch (pkt->opcode) { in check_op_seq()
248 switch (pkt->opcode) { in check_op_seq()
258 switch (pkt->opcode) { in check_op_seq()
279 struct rxe_pkt_info *pkt) in check_op_valid() argument
283 if (((pkt->mask & RXE_READ_MASK) && in check_op_valid()
285 ((pkt->mask & RXE_WRITE_MASK) && in check_op_valid()
287 ((pkt->mask & RXE_ATOMIC_MASK) && in check_op_valid()
295 if ((pkt->mask & RXE_WRITE_MASK) && in check_op_valid()
359 struct rxe_pkt_info *pkt) in check_resource() argument
380 if (pkt->mask & RXE_READ_OR_ATOMIC) { in check_resource()
391 if (pkt->mask & RXE_RWR_MASK) { in check_resource()
403 struct rxe_pkt_info *pkt) in check_length() argument
418 struct rxe_pkt_info *pkt) in check_rkey() argument
429 if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) { in check_rkey()
430 if (pkt->mask & RXE_RETH_MASK) { in check_rkey()
431 qp->resp.va = reth_va(pkt); in check_rkey()
432 qp->resp.rkey = reth_rkey(pkt); in check_rkey()
433 qp->resp.resid = reth_len(pkt); in check_rkey()
434 qp->resp.length = reth_len(pkt); in check_rkey()
436 access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ in check_rkey()
438 } else if (pkt->mask & RXE_ATOMIC_MASK) { in check_rkey()
439 qp->resp.va = atmeth_va(pkt); in check_rkey()
440 qp->resp.rkey = atmeth_rkey(pkt); in check_rkey()
448 if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) && in check_rkey()
449 (pkt->mask & RXE_RETH_MASK) && in check_rkey()
450 reth_len(pkt) == 0) { in check_rkey()
457 pktlen = payload_size(pkt); in check_rkey()
475 if (pkt->mask & RXE_WRITE_MASK) { in check_rkey()
477 if (pktlen != mtu || bth_pad(pkt)) { in check_rkey()
486 if ((bth_pad(pkt) != (0x3 & (-resid)))) { in check_rkey()
522 struct rxe_pkt_info *pkt) in write_data_in() argument
526 int data_len = payload_size(pkt); in write_data_in()
528 err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt), in write_data_in()
546 struct rxe_pkt_info *pkt) in process_atomic() argument
548 u64 iova = atmeth_va(pkt); in process_atomic()
570 if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP || in process_atomic()
571 pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) { in process_atomic()
572 if (*vaddr == atmeth_comp(pkt)) in process_atomic()
573 *vaddr = atmeth_swap_add(pkt); in process_atomic()
575 *vaddr += atmeth_swap_add(pkt); in process_atomic()
586 struct rxe_pkt_info *pkt, in prepare_ack_packet() argument
615 ack->offset = pkt->offset; in prepare_ack_packet()
619 memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES); in prepare_ack_packet()
763 struct rxe_pkt_info *pkt) in build_rdma_network_hdr() argument
765 struct sk_buff *skb = PKT_TO_SKB(pkt); in build_rdma_network_hdr()
777 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) in execute() argument
781 if (pkt->mask & RXE_SEND_MASK) { in execute()
787 build_rdma_network_hdr(&hdr, pkt); in execute()
793 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt)); in execute()
796 } else if (pkt->mask & RXE_WRITE_MASK) { in execute()
797 err = write_data_in(qp, pkt); in execute()
800 } else if (pkt->mask & RXE_READ_MASK) { in execute()
804 } else if (pkt->mask & RXE_ATOMIC_MASK) { in execute()
805 err = process_atomic(qp, pkt); in execute()
814 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; in execute()
817 qp->resp.opcode = pkt->opcode; in execute()
820 if (pkt->mask & RXE_COMP_MASK) { in execute()
831 struct rxe_pkt_info *pkt) in do_complete() argument
856 wc->opcode = (pkt->mask & RXE_IMMDT_MASK && in do_complete()
857 pkt->mask & RXE_WRITE_MASK) ? in do_complete()
860 wc->byte_len = (pkt->mask & RXE_IMMDT_MASK && in do_complete()
861 pkt->mask & RXE_WRITE_MASK) ? in do_complete()
870 if (pkt->mask & RXE_IMMDT_MASK) { in do_complete()
872 uwc->ex.imm_data = immdt_imm(pkt); in do_complete()
875 if (pkt->mask & RXE_IETH_MASK) { in do_complete()
877 uwc->ex.invalidate_rkey = ieth_rkey(pkt); in do_complete()
882 if (pkt->mask & RXE_DETH_MASK) in do_complete()
883 uwc->src_qp = deth_sqp(pkt); in do_complete()
887 struct sk_buff *skb = PKT_TO_SKB(pkt); in do_complete()
900 if (pkt->mask & RXE_IMMDT_MASK) { in do_complete()
902 wc->ex.imm_data = immdt_imm(pkt); in do_complete()
905 if (pkt->mask & RXE_IETH_MASK) { in do_complete()
909 wc->ex.invalidate_rkey = ieth_rkey(pkt); in do_complete()
924 if (pkt->mask & RXE_DETH_MASK) in do_complete()
925 wc->src_qp = deth_sqp(pkt); in do_complete()
937 if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1)) in do_complete()
943 if (!pkt) in do_complete()
951 static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, in send_ack() argument
958 skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE, in send_ack()
973 static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, in send_atomic_ack() argument
981 skb = prepare_ack_packet(qp, pkt, &ack_pkt, in send_atomic_ack()
982 IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn, in send_atomic_ack()
1016 struct rxe_pkt_info *pkt) in acknowledge() argument
1022 send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn); in acknowledge()
1023 else if (pkt->mask & RXE_ATOMIC_MASK) in acknowledge()
1024 send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED); in acknowledge()
1025 else if (bth_ack(pkt)) in acknowledge()
1026 send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn); in acknowledge()
1032 struct rxe_pkt_info *pkt) in cleanup() argument
1036 if (pkt) { in cleanup()
1070 struct rxe_pkt_info *pkt) in duplicate_request() argument
1075 if (pkt->mask & RXE_SEND_MASK || in duplicate_request()
1076 pkt->mask & RXE_WRITE_MASK) { in duplicate_request()
1078 if (bth_ack(pkt)) in duplicate_request()
1079 send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn); in duplicate_request()
1082 } else if (pkt->mask & RXE_READ_MASK) { in duplicate_request()
1085 res = find_resource(qp, pkt->psn); in duplicate_request()
1096 u64 iova = reth_va(pkt); in duplicate_request()
1097 u32 resid = reth_len(pkt); in duplicate_request()
1107 if (reth_rkey(pkt) != res->read.rkey) { in duplicate_request()
1112 res->cur_psn = pkt->psn; in duplicate_request()
1113 res->state = (pkt->psn == res->first_psn) ? in duplicate_request()
1132 res = find_resource(qp, pkt->psn); in duplicate_request()
1136 rc = rxe_xmit_packet(qp, pkt, res->atomic.skb); in duplicate_request()
1218 struct rxe_pkt_info *pkt = NULL; in rxe_responder() local
1245 state = get_req(qp, &pkt); in rxe_responder()
1248 state = check_psn(qp, pkt); in rxe_responder()
1251 state = check_op_seq(qp, pkt); in rxe_responder()
1254 state = check_op_valid(qp, pkt); in rxe_responder()
1257 state = check_resource(qp, pkt); in rxe_responder()
1260 state = check_length(qp, pkt); in rxe_responder()
1263 state = check_rkey(qp, pkt); in rxe_responder()
1266 state = execute(qp, pkt); in rxe_responder()
1269 state = do_complete(qp, pkt); in rxe_responder()
1272 state = read_reply(qp, pkt); in rxe_responder()
1275 state = acknowledge(qp, pkt); in rxe_responder()
1278 state = cleanup(qp, pkt); in rxe_responder()
1281 state = duplicate_request(qp, pkt); in rxe_responder()
1285 send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn); in rxe_responder()
1307 send_ack(qp, pkt, AETH_RNR_NAK | in rxe_responder()
1310 pkt->psn); in rxe_responder()