| /Linux-v6.6/net/dns_resolver/ |
| D | dns_query.c | 79 struct key *rkey; in dns_query() local 128 rkey = request_key_net(&key_type_dns_resolver, desc, net, options); in dns_query() 131 if (IS_ERR(rkey)) { in dns_query() 132 ret = PTR_ERR(rkey); in dns_query() 136 down_read(&rkey->sem); in dns_query() 137 set_bit(KEY_FLAG_ROOT_CAN_INVAL, &rkey->flags); in dns_query() 138 rkey->perm |= KEY_USR_VIEW; in dns_query() 140 ret = key_validate(rkey); in dns_query() 145 ret = PTR_ERR(rkey->payload.data[dns_key_error]); in dns_query() 149 upayload = user_key_payload_locked(rkey); in dns_query() [all …]
|
| /Linux-v6.6/drivers/infiniband/sw/rxe/ |
| D | rxe_mw.c | 31 mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1); in rxe_alloc_mw() 138 u32 key = wqe->wr.wr.mw.rkey & 0xff; in rxe_do_bind_mw() 140 mw->rkey = (mw->rkey & ~0xff) | key; in rxe_do_bind_mw() 180 if (unlikely(mw->rkey != mw_rkey)) { in rxe_bind_mw() 258 int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) in rxe_invalidate_mw() argument 264 mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8); in rxe_invalidate_mw() 270 if (rkey != mw->rkey) { in rxe_invalidate_mw() 290 struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey) in rxe_lookup_mw() argument 295 int index = rkey >> 8; in rxe_lookup_mw() 301 if (unlikely((mw->rkey != rkey) || rxe_mw_pd(mw) != pd || in rxe_lookup_mw()
|
| D | rxe_hdr.h | 528 __be32 rkey; member 550 return be32_to_cpu(reth->rkey); in __reth_rkey() 553 static inline void __reth_set_rkey(void *arg, u32 rkey) in __reth_set_rkey() argument 557 reth->rkey = cpu_to_be32(rkey); in __reth_set_rkey() 592 static inline void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey) in reth_set_rkey() argument 595 rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey); in reth_set_rkey() 661 __be32 rkey; member 684 return be32_to_cpu(atmeth->rkey); in __atmeth_rkey() 687 static inline void __atmeth_set_rkey(void *arg, u32 rkey) in __atmeth_set_rkey() argument 691 atmeth->rkey = cpu_to_be32(rkey); in __atmeth_set_rkey() [all …]
|
| D | rxe_resp.c | 410 qp->resp.rkey = 0; in qp_resp_from_reth() 412 qp->resp.rkey = reth_rkey(pkt); in qp_resp_from_reth() 419 qp->resp.rkey = atmeth_rkey(pkt); in qp_resp_from_atmeth() 433 u32 rkey; in check_rkey() local 478 rkey = qp->resp.rkey; in check_rkey() 482 if (rkey_is_mw(rkey)) { in check_rkey() 483 mw = rxe_lookup_mw(qp, access, rkey); in check_rkey() 485 rxe_dbg_qp(qp, "no MW matches rkey %#x\n", rkey); in check_rkey() 504 mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE); in check_rkey() 506 rxe_dbg_qp(qp, "no MR matches rkey %#x\n", rkey); in check_rkey() [all …]
|
| D | rxe_verbs.h | 150 u32 rkey; member 183 u32 rkey; member 293 static inline int rkey_is_mw(u32 rkey) in rkey_is_mw() argument 295 u32 index = rkey >> 8; in rkey_is_mw() 307 u32 rkey; member 340 u32 rkey; member
|
| D | rxe_req.c | 459 reth_set_rkey(pkt, ibwr->wr.flush.rkey); in init_req_packet() 461 reth_set_rkey(pkt, ibwr->wr.rdma.rkey); in init_req_packet() 484 atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey); in init_req_packet() 618 u32 rkey; in rxe_do_local_ops() local 623 rkey = wqe->wr.ex.invalidate_rkey; in rxe_do_local_ops() 624 if (rkey_is_mw(rkey)) in rxe_do_local_ops() 625 ret = rxe_invalidate_mw(qp, rkey); in rxe_do_local_ops() 627 ret = rxe_invalidate_mr(qp, rkey); in rxe_do_local_ops()
|
| /Linux-v6.6/drivers/infiniband/ulp/iser/ |
| D | iser_memory.c | 135 reg->rkey = device->pd->unsafe_global_rkey; in iser_reg_dma() 137 reg->rkey = 0; in iser_reg_dma() 142 " length=0x%x\n", reg->sge.lkey, reg->rkey, in iser_reg_dma() 241 inv_wr->ex.invalidate_rkey = mr->rkey; in iser_inv_rkey() 270 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); in iser_reg_sig_mr() 287 wr->key = mr->rkey; in iser_reg_sig_mr() 294 sig_reg->rkey = mr->rkey; in iser_reg_sig_mr() 299 sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr, in iser_reg_sig_mr() 319 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); in iser_fast_reg_mr() 334 wr->key = mr->rkey; in iser_fast_reg_mr() [all …]
|
| D | iser_initiator.c | 70 hdr->read_stag = cpu_to_be32(mem_reg->rkey); in iser_prepare_read_cmd() 74 task->itt, mem_reg->rkey, in iser_prepare_read_cmd() 117 hdr->write_stag = cpu_to_be32(mem_reg->rkey); in iser_prepare_write_cmd() 122 task->itt, mem_reg->rkey, in iser_prepare_write_cmd() 576 static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey) in iser_inv_desc() argument 578 if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) || in iser_inv_desc() 579 (desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) { in iser_inv_desc() 580 iser_err("Bogus remote invalidation for rkey %#x\n", rkey); in iser_inv_desc() 594 u32 rkey = wc->ex.invalidate_rkey; in iser_check_remote_inv() local 597 iser_conn, rkey); in iser_check_remote_inv() [all …]
|
| /Linux-v6.6/fs/nfs/ |
| D | nfs4idmap.c | 283 struct key *rkey = ERR_PTR(-EAGAIN); in nfs_idmap_request_key() local 291 rkey = request_key(&key_type_id_resolver, desc, ""); in nfs_idmap_request_key() 292 if (IS_ERR(rkey)) { in nfs_idmap_request_key() 294 rkey = request_key_with_auxdata(&key_type_id_resolver_legacy, in nfs_idmap_request_key() 298 if (!IS_ERR(rkey)) in nfs_idmap_request_key() 299 set_bit(KEY_FLAG_ROOT_CAN_INVAL, &rkey->flags); in nfs_idmap_request_key() 302 return rkey; in nfs_idmap_request_key() 310 struct key *rkey; in nfs_idmap_get_key() local 315 rkey = nfs_idmap_request_key(name, namelen, type, idmap); in nfs_idmap_get_key() 318 if (IS_ERR(rkey)) { in nfs_idmap_get_key() [all …]
|
| /Linux-v6.6/drivers/infiniband/core/ |
| D | rw.c | 126 u64 remote_addr, u32 rkey, enum dma_data_direction dir) in rdma_rw_init_mr_wrs() argument 162 reg->wr.rkey = rkey; in rdma_rw_init_mr_wrs() 197 u64 remote_addr, u32 rkey, enum dma_data_direction dir) in rdma_rw_init_map_wrs() argument 223 rdma_wr->rkey = rkey; in rdma_rw_init_map_wrs() 252 struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey, in rdma_rw_init_single_wr() argument 271 rdma_wr->rkey = rkey; in rdma_rw_init_single_wr() 294 u64 remote_addr, u32 rkey, enum dma_data_direction dir) in rdma_rw_ctx_init() argument 328 sg_offset, remote_addr, rkey, dir); in rdma_rw_ctx_init() 331 remote_addr, rkey, dir); in rdma_rw_ctx_init() 334 remote_addr, rkey, dir); in rdma_rw_ctx_init() [all …]
|
| D | uverbs_std_types_mr.c | 152 &mr->rkey, sizeof(mr->rkey)); in UVERBS_HANDLER() 169 &mr->rkey, sizeof(mr->rkey)); in UVERBS_HANDLER() 265 &mr->rkey, sizeof(mr->rkey)); in UVERBS_HANDLER()
|
| /Linux-v6.6/include/uapi/rdma/ |
| D | rdma_user_rxe.h | 88 __u32 rkey; member 94 __u32 rkey; member 101 __u32 rkey; member 118 __u32 rkey; member
|
| D | vmw_pvrdma-abi.h | 251 __u32 rkey; member 258 __u32 rkey; member 264 __u32 rkey; member 277 __u32 rkey; member
|
| /Linux-v6.6/drivers/infiniband/hw/vmw_pvrdma/ |
| D | pvrdma_mr.c | 95 mr->ibmr.rkey = resp->rkey; in pvrdma_get_dma_mr() 183 mr->ibmr.rkey = resp->rkey; in pvrdma_reg_user_mr() 255 mr->ibmr.rkey = resp->rkey; in pvrdma_alloc_mr()
|
| /Linux-v6.6/fs/reiserfs/ |
| D | dir.c | 69 const struct reiserfs_key *rkey; in reiserfs_readdir_inode() local 242 rkey = get_rkey(&path_to_entry, inode->i_sb); in reiserfs_readdir_inode() 243 if (!comp_le_keys(rkey, &MIN_KEY)) { in reiserfs_readdir_inode() 253 if (COMP_SHORT_KEYS(rkey, &pos_key)) { in reiserfs_readdir_inode() 259 le_key_k_offset(KEY_FORMAT_3_5, rkey)); in reiserfs_readdir_inode()
|
| /Linux-v6.6/drivers/infiniband/sw/siw/ |
| D | siw_qp_tx.c | 139 c_tx->pkt.rreq.source_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx() 184 c_tx->pkt.send_inv.inval_stag = cpu_to_be32(wqe->sqe.rkey); in siw_qp_prepare_tx() 196 c_tx->pkt.rwrite.sink_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx() 210 c_tx->pkt.rresp.sink_stag = cpu_to_be32(wqe->sqe.rkey); in siw_qp_prepare_tx() 945 siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey); in siw_fastreg_mr() 948 pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey); in siw_fastreg_mr() 952 if (unlikely(base_mr->rkey >> 8 != sqe->rkey >> 8)) { in siw_fastreg_mr() 953 pr_warn("siw: fastreg: STag 0x%08x: bad MR\n", sqe->rkey); in siw_fastreg_mr() 957 mem = siw_mem_id2obj(sdev, sqe->rkey >> 8); in siw_fastreg_mr() 959 pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey); in siw_fastreg_mr() [all …]
|
| /Linux-v6.6/arch/arm64/crypto/ |
| D | sm4-ce-glue.c | 29 asmlinkage void sm4_ce_crypt_block(const u32 *rkey, u8 *dst, const u8 *src); 30 asmlinkage void sm4_ce_crypt(const u32 *rkey, u8 *dst, const u8 *src, 32 asmlinkage void sm4_ce_cbc_enc(const u32 *rkey, u8 *dst, const u8 *src, 34 asmlinkage void sm4_ce_cbc_dec(const u32 *rkey, u8 *dst, const u8 *src, 36 asmlinkage void sm4_ce_cbc_cts_enc(const u32 *rkey, u8 *dst, const u8 *src, 38 asmlinkage void sm4_ce_cbc_cts_dec(const u32 *rkey, u8 *dst, const u8 *src, 40 asmlinkage void sm4_ce_cfb_enc(const u32 *rkey, u8 *dst, const u8 *src, 42 asmlinkage void sm4_ce_cfb_dec(const u32 *rkey, u8 *dst, const u8 *src, 44 asmlinkage void sm4_ce_ctr_enc(const u32 *rkey, u8 *dst, const u8 *src, 114 static int sm4_ecb_do_crypt(struct skcipher_request *req, const u32 *rkey) in sm4_ecb_do_crypt() argument [all …]
|
| /Linux-v6.6/drivers/infiniband/sw/rdmavt/ |
| D | mr.c | 260 mr->ibmr.rkey = mr->mr.lkey; in __rvt_alloc_mr() 630 ibmr->rkey = key; in rvt_fast_reg_mr() 647 int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey) in rvt_invalidate_rkey() argument 653 if (rkey == 0) in rvt_invalidate_rkey() 658 rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]); in rvt_invalidate_rkey() 659 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) in rvt_invalidate_rkey() 827 u32 len, u64 vaddr, u32 rkey, int acc) in rvt_rkey_ok() argument 840 if (rkey == 0) { in rvt_rkey_ok() 861 mr = rcu_dereference(rkt->table[rkey >> rkt->shift]); in rvt_rkey_ok() 869 mr->lkey != rkey || qp->ibqp.pd != mr->pd)) in rvt_rkey_ok()
|
| /Linux-v6.6/drivers/infiniband/ulp/rtrs/ |
| D | README | 51 then pass it to the block layer. A new rkey is generated and registered for the 53 The new rkey is sent back to the client along with the IO result. 144 using the IMM field, Server invalidate rkey associated to the memory chunks 149 inflight IO and for the error code. The new rkey is sent back using 150 SEND_WITH_IMM WR, client When it recived new rkey message, it validates 151 the message and finished IO after update rkey for the rbuffer, then post 186 Server invalidate rkey associated to the memory chunks first, when it finishes, 192 outstanding inflight IO and the error code. The new rkey is sent back using 193 SEND_WITH_IMM WR, client When it recived new rkey message, it validates 194 the message and finished IO after update rkey for the rbuffer, then post
|
| D | rtrs-srv.c | 207 u32 rkey = 0; in rdma_write_sg() local 238 wr->rkey = le32_to_cpu(id->rd_msg->desc[0].key); in rdma_write_sg() 239 if (rkey == 0) in rdma_write_sg() 240 rkey = wr->rkey; in rdma_write_sg() 243 WARN_ON_ONCE(rkey != wr->rkey); in rdma_write_sg() 276 inv_wr.ex.invalidate_rkey = rkey; in rdma_write_sg() 289 rwr.key = srv_mr->mr->rkey; in rdma_write_sg() 295 msg->rkey = cpu_to_le32(srv_mr->mr->rkey); in rdma_write_sg() 410 rwr.key = srv_mr->mr->rkey; in send_io_resp_imm() 416 msg->rkey = cpu_to_le32(srv_mr->mr->rkey); in send_io_resp_imm() [all …]
|
| /Linux-v6.6/drivers/infiniband/hw/qib/ |
| D | qib_rc.c | 350 ohdr->u.rc.reth.rkey = in qib_make_rc_req() 351 cpu_to_be32(wqe->rdma_wr.rkey); in qib_make_rc_req() 393 ohdr->u.rc.reth.rkey = in qib_make_rc_req() 394 cpu_to_be32(wqe->rdma_wr.rkey); in qib_make_rc_req() 435 ohdr->u.atomic_eth.rkey = cpu_to_be32( in qib_make_rc_req() 436 wqe->atomic_wr.rkey); in qib_make_rc_req() 555 ohdr->u.rc.reth.rkey = in qib_make_rc_req() 556 cpu_to_be32(wqe->rdma_wr.rkey); in qib_make_rc_req() 1608 u32 rkey = be32_to_cpu(reth->rkey); in qib_rc_rcv_error() local 1612 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, in qib_rc_rcv_error() [all …]
|
| /Linux-v6.6/include/rdma/ |
| D | rw.h | 47 u64 remote_addr, u32 rkey, enum dma_data_direction dir); 55 struct ib_sig_attrs *sig_attrs, u64 remote_addr, u32 rkey,
|
| /Linux-v6.6/drivers/nvme/host/ |
| D | pr.c | 231 le64_to_cpu(rse->regctl_eds[i].rkey); in nvme_pr_read_keys() 236 keys_info->keys[i] = le64_to_cpu(rs->regctl_ds[i].rkey); in nvme_pr_read_keys() 288 resv->key = le64_to_cpu(rse->regctl_eds[i].rkey); in nvme_pr_read_reservation() 296 resv->key = le64_to_cpu(rs->regctl_ds[i].rkey); in nvme_pr_read_reservation()
|
| /Linux-v6.6/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
| D | dr_send.c | 31 u32 rkey; member 483 u32 rkey, in dr_rdma_handle_icm_write_segments() argument 493 wq_raddr->rkey = cpu_to_be32(rkey); in dr_rdma_handle_icm_write_segments() 522 u32 rkey, struct dr_data_seg *data_seg, in dr_rdma_segments() argument 539 rkey, data_seg, &size); in dr_rdma_segments() 571 dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey, in dr_post_send() 573 dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey, in dr_post_send() 576 dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey, in dr_post_send() 795 send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(ste->htbl->chunk); in mlx5dr_send_postsend_ste() 855 send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(htbl->chunk); in mlx5dr_send_postsend_htbl() [all …]
|
| /Linux-v6.6/drivers/infiniband/hw/hns/ |
| D | hns_roce_mr.c | 211 mr->ibmr.rkey = mr->ibmr.lkey = mr->key; in hns_roce_get_dma_mr() 252 mr->ibmr.rkey = mr->ibmr.lkey = mr->key; in hns_roce_reg_user_mr() 391 mr->ibmr.rkey = mr->ibmr.lkey = mr->key; in hns_roce_alloc_mr() 467 key_to_hw_index(mw->rkey) & in hns_roce_mw_free() 473 key_to_hw_index(mw->rkey)); in hns_roce_mw_free() 477 (int)key_to_hw_index(mw->rkey)); in hns_roce_mw_free() 486 unsigned long mtpt_idx = key_to_hw_index(mw->rkey); in hns_roce_mw_enable() 545 mw->rkey = hw_index_to_key(id); in hns_roce_alloc_mw() 547 ibmw->rkey = mw->rkey; in hns_roce_alloc_mw()
|