| /Linux-v5.10/net/smc/ |
| D | smc_core.c | 47 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb, 49 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft); 54 static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr, in smc_lgr_list_head() argument 57 if (lgr->is_smcd) { in smc_lgr_list_head() 58 *lgr_lock = &lgr->smcd->lgr_lock; in smc_lgr_list_head() 59 return &lgr->smcd->lgr_list; in smc_lgr_list_head() 66 static void smc_lgr_schedule_free_work(struct smc_link_group *lgr) in smc_lgr_schedule_free_work() argument 72 if (!lgr->freeing) { in smc_lgr_schedule_free_work() 73 mod_delayed_work(system_wq, &lgr->free_work, in smc_lgr_schedule_free_work() 74 (!lgr->is_smcd && lgr->role == SMC_CLNT) ? in smc_lgr_schedule_free_work() [all …]
|
| D | smc_llc.c | 189 static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type, in smc_llc_flow_parallel() argument 195 flow_type != msg_type && !lgr->delayed_event) { in smc_llc_flow_parallel() 196 lgr->delayed_event = qentry; in smc_llc_flow_parallel() 203 SMC_LGR_ID_SIZE, &lgr->id, in smc_llc_flow_parallel() 205 flow_type, lgr->role); in smc_llc_flow_parallel() 213 struct smc_link_group *lgr = qentry->link->lgr; in smc_llc_flow_start() local 215 spin_lock_bh(&lgr->llc_flow_lock); in smc_llc_flow_start() 218 smc_llc_flow_parallel(lgr, flow->type, qentry); in smc_llc_flow_start() 219 spin_unlock_bh(&lgr->llc_flow_lock); in smc_llc_flow_start() 237 spin_unlock_bh(&lgr->llc_flow_lock); in smc_llc_flow_start() [all …]
|
| D | smc_llc.h | 53 static inline struct smc_link *smc_llc_usable_link(struct smc_link_group *lgr) in smc_llc_usable_link() argument 58 if (smc_link_usable(&lgr->lnk[i])) in smc_llc_usable_link() 59 return &lgr->lnk[i]; in smc_llc_usable_link() 64 static inline void smc_llc_set_termination_rsn(struct smc_link_group *lgr, in smc_llc_set_termination_rsn() argument 67 if (!lgr->llc_termination_rsn) in smc_llc_set_termination_rsn() 68 lgr->llc_termination_rsn = rsn; in smc_llc_set_termination_rsn() 81 void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc); 82 void smc_llc_lgr_clear(struct smc_link_group *lgr); 88 int smc_llc_do_delete_rkey(struct smc_link_group *lgr, 90 int smc_llc_flow_initiate(struct smc_link_group *lgr, [all …]
|
| D | smc_diag.c | 105 else if (smc->conn.lgr && smc->conn.lgr->is_smcd) in __smc_diag_dump() 158 if (smc->conn.lgr && !smc->conn.lgr->is_smcd && in __smc_diag_dump() 160 !list_empty(&smc->conn.lgr->list)) { in __smc_diag_dump() 162 .role = smc->conn.lgr->role, in __smc_diag_dump() 163 .lnk[0].ibport = smc->conn.lgr->lnk[0].ibport, in __smc_diag_dump() 164 .lnk[0].link_id = smc->conn.lgr->lnk[0].link_id, in __smc_diag_dump() 168 smc->conn.lgr->lnk[0].smcibdev->ibdev->name, in __smc_diag_dump() 169 sizeof(smc->conn.lgr->lnk[0].smcibdev->ibdev->name)); in __smc_diag_dump() 171 smc->conn.lgr->lnk[0].gid); in __smc_diag_dump() 173 smc->conn.lgr->lnk[0].peer_gid); in __smc_diag_dump() [all …]
|
| D | smc_ism.c | 59 spin_lock_irqsave(&conn->lgr->smcd->lock, flags); in smc_ism_set_conn() 60 conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = conn; in smc_ism_set_conn() 61 spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags); in smc_ism_set_conn() 72 spin_lock_irqsave(&conn->lgr->smcd->lock, flags); in smc_ism_unset_conn() 73 conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = NULL; in smc_ism_unset_conn() 74 spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags); in smc_ism_unset_conn() 182 int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len, in smc_ism_register_dmb() argument 191 dmb.vlan_id = lgr->vlan_id; in smc_ism_register_dmb() 192 dmb.rgid = lgr->peer_gid; in smc_ism_register_dmb() 193 rc = lgr->smcd->ops->register_dmb(lgr->smcd, &dmb); in smc_ism_register_dmb() [all …]
|
| D | smc_core.h | 125 struct smc_link_group *lgr; /* parent link group */ member 327 u32 token, struct smc_link_group *lgr) in smc_lgr_find_conn() argument 332 node = lgr->conns_all.rb_node; in smc_lgr_find_conn() 370 void smc_lgr_terminate_sched(struct smc_link_group *lgr); 383 void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new, 385 void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id, 395 void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr); 399 int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, 404 void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type); 405 void smcr_lgr_set_type_asym(struct smc_link_group *lgr, [all …]
|
| D | smc_cdc.c | 177 if (!conn->lgr || (conn->lgr->is_smcd && conn->lgr->peer_shutdown)) in smc_cdc_get_slot_and_msg_send() 180 if (conn->lgr->is_smcd) { in smc_cdc_get_slot_and_msg_send() 424 struct smc_link_group *lgr; in smc_cdc_rx_handler() local 433 lgr = smc_get_lgr(link); in smc_cdc_rx_handler() 434 read_lock_bh(&lgr->conns_lock); in smc_cdc_rx_handler() 435 conn = smc_lgr_find_conn(ntohl(cdc->token), lgr); in smc_cdc_rx_handler() 436 read_unlock_bh(&lgr->conns_lock); in smc_cdc_rx_handler()
|
| D | smc_tx.c | 231 queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, in smc_tx_sendmsg() 261 rc = smc_ism_write(conn->lgr->smcd, &pos, data, len); in smcd_tx_ism_write() 271 struct smc_link_group *lgr = conn->lgr; in smc_tx_rdma_write() local 278 lgr->rtokens[conn->rtoken_idx][link->link_idx].dma_addr + in smc_tx_rdma_write() 283 rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][link->link_idx].rkey; in smc_tx_rdma_write() 459 if (conn->lgr->is_smcd) in smc_tx_rdma_writes() 502 mod_delayed_work(conn->lgr->tx_wq, &conn->tx_work, in smcr_tx_sndbuf_nonempty() 562 if (conn->lgr->is_smcd) in smc_tx_sndbuf_nonempty() 626 queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, in smc_tx_consumer_update()
|
| D | af_smc.c | 350 struct smc_link_group *lgr = link->lgr; in smcr_lgr_reg_rmbs() local 353 rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY); in smcr_lgr_reg_rmbs() 359 mutex_lock(&lgr->llc_conf_mutex); in smcr_lgr_reg_rmbs() 361 if (!smc_link_active(&lgr->lnk[i])) in smcr_lgr_reg_rmbs() 363 rc = smcr_link_reg_rmb(&lgr->lnk[i], rmb_desc); in smcr_lgr_reg_rmbs() 376 mutex_unlock(&lgr->llc_conf_mutex); in smcr_lgr_reg_rmbs() 377 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); in smcr_lgr_reg_rmbs() 388 qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME, in smcr_clnt_conf_first_link() 399 smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl); in smcr_clnt_conf_first_link() 421 smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE); in smcr_clnt_conf_first_link() [all …]
|
| D | smc_ism.h | 47 int smc_ism_register_dmb(struct smc_link_group *lgr, int buf_size, 52 int smc_ism_signal_shutdown(struct smc_link_group *lgr);
|
| D | smc_clc.c | 435 smc->conn.lgr->sync_err = 1; in smc_clc_wait_msg() 436 smc_lgr_terminate_sched(smc->conn.lgr); in smc_clc_wait_msg() 461 if ((!smc->conn.lgr || !smc->conn.lgr->is_smcd) && in smc_clc_send_decline() 653 if (conn->lgr->is_smcd) { in smc_clc_send_confirm_accept() 658 clc->d0.gid = conn->lgr->smcd->local_gid; in smc_clc_send_confirm_accept() 662 memcpy(&clc->d0.linkid, conn->lgr->id, SMC_LGR_ID_SIZE); in smc_clc_send_confirm_accept() 668 clc_v2->chid = htons(smc_ism_get_chid(conn->lgr->smcd)); in smc_clc_send_confirm_accept() 669 smc_ism_get_system_eid(conn->lgr->smcd, &eid); in smc_clc_send_confirm_accept()
|
| D | smc_pnet.h | 65 void smc_pnet_find_alt_roce(struct smc_link_group *lgr,
|
| D | smc.h | 134 struct smc_link_group *lgr; /* link group of connection */ member
|
| D | smc_wr.c | 199 struct smc_link_group *lgr = smc_get_lgr(link); in smc_wr_tx_get_free_slot() local 208 if (in_softirq() || lgr->terminating) { in smc_wr_tx_get_free_slot() 216 lgr->terminating || in smc_wr_tx_get_free_slot()
|
| D | smc_ib.c | 114 struct smc_link_group *lgr = smc_get_lgr(lnk); in smc_ib_ready_link() local 134 if (lgr->role == SMC_SERV) { in smc_ib_ready_link()
|
| /Linux-v5.10/arch/s390/purgatory/ |
| D | head.S | 37 lgr %r0,\dst 38 lgr %r1,\len 39 lgr %r2,\src 40 lgr %r3,\len 49 lgr %r4,\len 137 lgr %r8,%r13 158 lgr %r7,%r9 188 lgr %r12,%r7 189 lgr %r11,%r9
|
| /Linux-v5.10/arch/s390/lib/ |
| D | mem.S | 20 lgr %r1,%r2 78 lgr %r1,%r2 91 lgr %r1,%r2 129 lgr %r1,%r2 163 lgr %r1,%r2
|
| /Linux-v5.10/arch/s390/boot/ |
| D | text_dma.S | 41 lgr %r1,%r2 42 lgr %r2,%r3 43 lgr %r3,%r4 61 lgr %r1,%r2
|
| D | head_kdump.S | 62 lgr %r11,%r2 # Save kdump base address 63 lgr %r12,%r2
|
| /Linux-v5.10/arch/s390/kernel/ |
| D | relocate_kernel.S | 38 lgr %r6,%r5 # r6 = r5 45 lgr %r2,%r5 # move it into the right register, 54 lgr %r8,%r5 # r8 = r5
|
| D | entry.S | 95 lgr %r14,%r15 117 lgr %r14,%r9 420 lgr %r2,%r11 507 lgr %r2,%r11 # pass pointer to pt_regs 521 lgr %r2,%r11 # pass pointer to pt_regs 530 lgr %r2,%r11 # pass pointer to pt_regs 539 lgr %r2,%r11 # pass pointer to pt_regs 557 lgr %r2,%r11 # pass pointer to pt_regs 575 lgr %r2,%r11 # pass pointer to pt_regs 593 lgr %r2,%r11 # pass pointer to pt_regs [all …]
|
| D | reipl.S | 73 lgr %r9,%r2 74 lgr %r2,%r3
|
| D | Makefile | 39 obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o pgm_check.o
|
| /Linux-v5.10/fs/nfs/filelayout/ |
| D | filelayout.c | 601 struct nfs4_layoutget_res *lgr, in filelayout_check_layout() argument 609 if (lgr->range.offset != 0 || in filelayout_check_layout() 610 lgr->range.length != NFS4_MAX_UINT64) { in filelayout_check_layout() 616 if (fl->pattern_offset > lgr->range.offset) { in filelayout_check_layout() 652 struct nfs4_layoutget_res *lgr, in filelayout_decode_layout() argument 668 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, lgr->layoutp->len); in filelayout_decode_layout() 767 struct nfs4_layoutget_res *lgr, in filelayout_alloc_lseg() argument 778 rc = filelayout_decode_layout(layoutid, fl, lgr, gfp_flags); in filelayout_alloc_lseg() 779 if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, gfp_flags)) { in filelayout_alloc_lseg()
|
| /Linux-v5.10/fs/nfs/blocklayout/ |
| D | blocklayout.c | 668 bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr, in bl_alloc_lseg() argument 672 .mode = lgr->range.iomode, in bl_alloc_lseg() 673 .start = lgr->range.offset >> SECTOR_SHIFT, in bl_alloc_lseg() 674 .inval = lgr->range.offset >> SECTOR_SHIFT, in bl_alloc_lseg() 675 .cowread = lgr->range.offset >> SECTOR_SHIFT, in bl_alloc_lseg() 699 lgr->layoutp->pages, lgr->layoutp->len); in bl_alloc_lseg() 720 if (lgr->range.offset + lgr->range.length != in bl_alloc_lseg()
|