| /Linux-v5.15/drivers/thunderbolt/ |
| D | xdomain.c | 139 int tb_xdomain_response(struct tb_xdomain *xd, const void *response, in tb_xdomain_response() argument 142 return __tb_xdomain_response(xd->tb->ctl, response, size, type); in tb_xdomain_response() 191 int tb_xdomain_request(struct tb_xdomain *xd, const void *request, in tb_xdomain_request() argument 196 return __tb_xdomain_request(xd->tb->ctl, request, request_size, in tb_xdomain_request() 392 struct tb_xdomain *xd, u8 sequence, const struct tb_xdp_properties *req) in tb_xdp_properties_response() argument 404 if (!uuid_equal(xd->local_uuid, &req->dst_uuid)) { in tb_xdp_properties_response() 405 tb_xdp_error_response(ctl, xd->route, sequence, in tb_xdp_properties_response() 410 mutex_lock(&xd->lock); in tb_xdp_properties_response() 412 if (req->offset >= xd->local_property_block_len) { in tb_xdp_properties_response() 413 mutex_unlock(&xd->lock); in tb_xdp_properties_response() [all …]
|
| D | icm.c | 560 static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, in icm_fr_approve_xdomain_paths() argument 570 request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link; in icm_fr_approve_xdomain_paths() 571 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); in icm_fr_approve_xdomain_paths() 590 static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, in icm_fr_disconnect_xdomain_paths() argument 597 phy_port = tb_phy_port_from_link(xd->link); in icm_fr_disconnect_xdomain_paths() 685 struct tb_xdomain *xd; in add_xdomain() local 689 xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid); in add_xdomain() 690 if (!xd) in add_xdomain() 693 xd->link = link; in add_xdomain() 694 xd->depth = depth; in add_xdomain() [all …]
|
| D | dma_test.c | 94 struct tb_xdomain *xd; member 125 tb_xdomain_release_in_hopid(dt->xd, dt->rx_hopid); in dma_test_free_rings() 130 tb_xdomain_release_out_hopid(dt->xd, dt->tx_hopid); in dma_test_free_rings() 139 struct tb_xdomain *xd = dt->xd; in dma_test_start_rings() local 152 ring = tb_ring_alloc_tx(xd->tb->nhi, -1, DMA_TEST_TX_RING_SIZE, in dma_test_start_rings() 160 ret = tb_xdomain_alloc_out_hopid(xd, -1); in dma_test_start_rings() 175 ring = tb_ring_alloc_rx(xd->tb->nhi, -1, DMA_TEST_RX_RING_SIZE, in dma_test_start_rings() 185 ret = tb_xdomain_alloc_in_hopid(xd, -1); in dma_test_start_rings() 194 ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid, in dma_test_start_rings() 220 ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid, in dma_test_stop_rings() [all …]
|
| D | tb.c | 186 struct tb_xdomain *xd; in tb_scan_xdomain() local 193 xd = tb_xdomain_find_by_route(tb, route); in tb_scan_xdomain() 194 if (xd) { in tb_scan_xdomain() 195 tb_xdomain_put(xd); in tb_scan_xdomain() 199 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, in tb_scan_xdomain() 201 if (xd) { in tb_scan_xdomain() 202 tb_port_at(route, sw)->xdomain = xd; in tb_scan_xdomain() 204 tb_xdomain_add(xd); in tb_scan_xdomain() 1083 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, in tb_approve_xdomain_paths() argument 1092 sw = tb_to_switch(xd->dev.parent); in tb_approve_xdomain_paths() [all …]
|
| D | domain.c | 805 int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, in tb_domain_approve_xdomain_paths() argument 812 return tb->cm_ops->approve_xdomain_paths(tb, xd, transmit_path, in tb_domain_approve_xdomain_paths() 832 int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, in tb_domain_disconnect_xdomain_paths() argument 839 return tb->cm_ops->disconnect_xdomain_paths(tb, xd, transmit_path, in tb_domain_disconnect_xdomain_paths() 845 struct tb_xdomain *xd; in disconnect_xdomain() local 849 xd = tb_to_xdomain(dev); in disconnect_xdomain() 850 if (xd && xd->tb == tb) in disconnect_xdomain() 851 ret = tb_xdomain_disable_all_paths(xd); in disconnect_xdomain()
|
| /Linux-v5.15/fs/jffs2/ |
| D | xattr.c | 71 static int is_xattr_datum_unchecked(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) in is_xattr_datum_unchecked() argument 77 for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) { in is_xattr_datum_unchecked() 87 static void unload_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) in unload_xattr_datum() argument 90 D1(dbg_xattr("%s: xid=%u, version=%u\n", __func__, xd->xid, xd->version)); in unload_xattr_datum() 91 if (xd->xname) { in unload_xattr_datum() 92 c->xdatum_mem_usage -= (xd->name_len + 1 + xd->value_len); in unload_xattr_datum() 93 kfree(xd->xname); in unload_xattr_datum() 96 list_del_init(&xd->xindex); in unload_xattr_datum() 97 xd->hashkey = 0; in unload_xattr_datum() 98 xd->xname = NULL; in unload_xattr_datum() [all …]
|
| D | malloc.c | 279 struct jffs2_xattr_datum *xd; in jffs2_alloc_xattr_datum() local 280 xd = kmem_cache_zalloc(xattr_datum_cache, GFP_KERNEL); in jffs2_alloc_xattr_datum() 281 dbg_memalloc("%p\n", xd); in jffs2_alloc_xattr_datum() 282 if (!xd) in jffs2_alloc_xattr_datum() 285 xd->class = RAWNODE_CLASS_XATTR_DATUM; in jffs2_alloc_xattr_datum() 286 xd->node = (void *)xd; in jffs2_alloc_xattr_datum() 287 INIT_LIST_HEAD(&xd->xindex); in jffs2_alloc_xattr_datum() 288 return xd; in jffs2_alloc_xattr_datum() 291 void jffs2_free_xattr_datum(struct jffs2_xattr_datum *xd) in jffs2_free_xattr_datum() argument 293 dbg_memalloc("%p\n", xd); in jffs2_free_xattr_datum() [all …]
|
| D | xattr.h | 59 struct jffs2_xattr_datum *xd; /* reference to jffs2_xattr_datum */ member 84 extern int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd, 89 extern void jffs2_release_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd);
|
| D | scan.c | 332 struct jffs2_xattr_datum *xd; in jffs2_scan_xattr_node() local 358 xd = jffs2_setup_xattr_datum(c, xid, version); in jffs2_scan_xattr_node() 359 if (IS_ERR(xd)) in jffs2_scan_xattr_node() 360 return PTR_ERR(xd); in jffs2_scan_xattr_node() 362 if (xd->version > version) { in jffs2_scan_xattr_node() 365 raw->next_in_ino = xd->node->next_in_ino; in jffs2_scan_xattr_node() 366 xd->node->next_in_ino = raw; in jffs2_scan_xattr_node() 368 xd->version = version; in jffs2_scan_xattr_node() 369 xd->xprefix = rx->xprefix; in jffs2_scan_xattr_node() 370 xd->name_len = rx->name_len; in jffs2_scan_xattr_node() [all …]
|
| D | summary.c | 492 struct jffs2_xattr_datum *xd; in jffs2_sum_process_sum_data() local 501 xd = jffs2_setup_xattr_datum(c, je32_to_cpu(spx->xid), in jffs2_sum_process_sum_data() 503 if (IS_ERR(xd)) in jffs2_sum_process_sum_data() 504 return PTR_ERR(xd); in jffs2_sum_process_sum_data() 505 if (xd->version > je32_to_cpu(spx->version)) { in jffs2_sum_process_sum_data() 510 raw->next_in_ino = xd->node->next_in_ino; in jffs2_sum_process_sum_data() 511 xd->node->next_in_ino = raw; in jffs2_sum_process_sum_data() 513 xd->version = je32_to_cpu(spx->version); in jffs2_sum_process_sum_data() 515 PAD(je32_to_cpu(spx->totlen)), (void *)xd); in jffs2_sum_process_sum_data()
|
| /Linux-v5.15/arch/powerpc/sysdev/xive/ |
| D | common.c | 207 static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset) in xive_esb_read() argument 211 if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) in xive_esb_read() 214 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) in xive_esb_read() 215 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0); in xive_esb_read() 217 val = in_be64(xd->eoi_mmio + offset); in xive_esb_read() 222 static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data) in xive_esb_write() argument 224 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) in xive_esb_write() 225 xive_ops->esb_rw(xd->hw_irq, offset, data, 1); in xive_esb_write() 227 out_be64(xd->eoi_mmio + offset, data); in xive_esb_write() 294 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xmon_xive_get_irq_config() local [all …]
|
| /Linux-v5.15/drivers/dma/ |
| D | uniphier-xdmac.c | 89 struct uniphier_xdmac_desc *xd; member 132 struct uniphier_xdmac_desc *xd) in uniphier_xdmac_chan_start() argument 139 src_addr = xd->nodes[xd->cur_node].src; in uniphier_xdmac_chan_start() 140 dst_addr = xd->nodes[xd->cur_node].dst; in uniphier_xdmac_chan_start() 141 its = xd->nodes[xd->cur_node].burst_size; in uniphier_xdmac_chan_start() 142 tnum = xd->nodes[xd->cur_node].nr_burst; in uniphier_xdmac_chan_start() 148 if (xd->dir == DMA_DEV_TO_MEM) { in uniphier_xdmac_chan_start() 157 if (xd->dir == DMA_MEM_TO_DEV) { in uniphier_xdmac_chan_start() 219 struct uniphier_xdmac_desc *xd; in uniphier_xdmac_start() local 221 xd = uniphier_xdmac_next_desc(xc); in uniphier_xdmac_start() [all …]
|
| /Linux-v5.15/include/linux/ |
| D | thunderbolt.h | 257 int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd); 258 void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd); 259 int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid); 260 void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid); 261 int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid); 262 void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid); 263 int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path, 266 int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path, 270 static inline int tb_xdomain_disable_all_paths(struct tb_xdomain *xd) in tb_xdomain_disable_all_paths() argument 272 return tb_xdomain_disable_paths(xd, -1, -1, -1, -1); in tb_xdomain_disable_all_paths() [all …]
|
| /Linux-v5.15/arch/powerpc/kvm/ |
| D | book3s_xive_template.c | 57 static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset) in GLUE() 61 if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) in GLUE() 64 val =__x_readq(__x_eoi_page(xd) + offset); in GLUE() 72 static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd) in GLUE() 75 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) in GLUE() 76 __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI); in GLUE() 77 else if (xd->flags & XIVE_IRQ_FLAG_LSI) { in GLUE() 83 __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI); in GLUE() 96 eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00); in GLUE() 99 if ((eoi_val & 1) && __x_trig_page(xd)) in GLUE() [all …]
|
| D | book3s_hv_rm_xive.c | 40 #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_page)) argument 41 #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_page)) argument
|
| D | book3s_xive.c | 46 #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio)) argument 47 #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio)) argument 220 static bool xive_irq_trigger(struct xive_irq_data *xd) in xive_irq_trigger() argument 223 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) in xive_irq_trigger() 227 if (WARN_ON(!xd->trig_mmio)) in xive_irq_trigger() 230 out_be64(xd->trig_mmio, 0); in xive_irq_trigger() 315 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in kvmppc_xive_attach_escalation() local 317 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01); in kvmppc_xive_attach_escalation() 318 vcpu->arch.xive_esc_raddr = xd->eoi_page; in kvmppc_xive_attach_escalation() 319 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio; in kvmppc_xive_attach_escalation() [all …]
|
| D | book3s_xive_native.c | 30 static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset) in xive_vm_esb_load() argument 40 val = in_be64(xd->eoi_mmio + offset); in xive_vm_esb_load() 236 struct xive_irq_data *xd; in xive_native_esb_fault() local 264 kvmppc_xive_select_irq(state, &hw_num, &xd); in xive_native_esb_fault() 272 page = page_offset % 2 ? xd->eoi_page : xd->trig_page; in xive_native_esb_fault() 515 struct xive_irq_data *xd; in kvmppc_xive_native_sync_source() local 533 kvmppc_xive_select_irq(state, &hw_num, &xd); in kvmppc_xive_native_sync_source() 863 struct xive_irq_data *xd; in kvmppc_xive_native_sync_sources() local 886 kvmppc_xive_select_irq(state, &hw_num, &xd); in kvmppc_xive_native_sync_sources()
|
| /Linux-v5.15/drivers/net/ |
| D | thunderbolt.c | 178 struct tb_xdomain *xd; member 236 struct tb_xdomain *xd = net->xd; in tbnet_login_response() local 239 tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid, in tbnet_login_response() 240 xd->remote_uuid, TBIP_LOGIN_RESPONSE, sizeof(reply), in tbnet_login_response() 245 return tb_xdomain_response(xd, &reply, sizeof(reply), in tbnet_login_response() 253 struct tb_xdomain *xd = net->xd; in tbnet_login_request() local 256 tbnet_fill_header(&request.hdr, xd->route, sequence, xd->local_uuid, in tbnet_login_request() 257 xd->remote_uuid, TBIP_LOGIN, sizeof(request), in tbnet_login_request() 263 return tb_xdomain_request(xd, &request, sizeof(request), in tbnet_login_request() 273 struct tb_xdomain *xd = net->xd; in tbnet_logout_response() local [all …]
|
| /Linux-v5.15/arch/powerpc/platforms/powernv/ |
| D | vas.c | 54 struct xive_irq_data *xd; in init_vas_instance() local 124 xd = irq_get_handler_data(vinst->virq); in init_vas_instance() 125 if (!xd) { in init_vas_instance() 131 vinst->irq_port = xd->trig_page; in init_vas_instance()
|
| /Linux-v5.15/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ |
| D | gddr5.c | 37 int pd, lf, xd, vh, vr, vo, l3; in nvkm_gddr5_calc() local 41 xd = !ram->next->bios.ramcfg_DLLoff; in nvkm_gddr5_calc() 81 ram->mr[1] |= (xd & 0x01) << 7; in nvkm_gddr5_calc()
|
| /Linux-v5.15/drivers/misc/ocxl/ |
| D | afu_irq.c | 198 struct xive_irq_data *xd; in ocxl_afu_irq_get_addr() local 205 xd = irq_get_handler_data(irq->virq); in ocxl_afu_irq_get_addr() 206 addr = xd ? xd->trig_page : 0; in ocxl_afu_irq_get_addr()
|
| /Linux-v5.15/tools/testing/selftests/arm64/fp/ |
| D | sve-test.S | 227 .macro _adrz xd, xn, nrtmp 228 ldr \xd, =zref 230 madd \xd, x\nrtmp, \xn, \xd 234 .macro _adrp xd, xn, nrtmp 235 ldr \xd, =pref 239 madd \xd, x\nrtmp, \xn, \xd
|
| D | fpsimd-test.S | 203 .macro _adrv xd, xn, nrtmp 204 ldr \xd, =vref 206 madd \xd, x\nrtmp, \xn, \xd
|
| /Linux-v5.15/fs/jfs/ |
| D | jfs_dtree.h | 20 pxd_t xd; member 53 pxd_t xd; /* 8: child extent descriptor */ member
|
| /Linux-v5.15/drivers/staging/rts5208/ |
| D | Makefile | 5 rtsx_card.o general.o sd.o xd.o ms.o spi.o
|