/Linux-v6.1/drivers/video/fbdev/omap/ |
D | lcd_mipid.c | 54 static void mipid_transfer(struct mipid_device *md, int cmd, const u8 *wbuf, in mipid_transfer() argument 62 BUG_ON(md->spi == NULL); in mipid_transfer() 103 r = spi_sync(md->spi, &m); in mipid_transfer() 105 dev_dbg(&md->spi->dev, "spi_sync %d\n", r); in mipid_transfer() 111 static inline void mipid_cmd(struct mipid_device *md, int cmd) in mipid_cmd() argument 113 mipid_transfer(md, cmd, NULL, 0, NULL, 0); in mipid_cmd() 116 static inline void mipid_write(struct mipid_device *md, in mipid_write() argument 119 mipid_transfer(md, reg, buf, len, NULL, 0); in mipid_write() 122 static inline void mipid_read(struct mipid_device *md, in mipid_read() argument 125 mipid_transfer(md, reg, NULL, 0, buf, len); in mipid_read() [all …]
|
/Linux-v6.1/drivers/md/ |
D | dm.c | 307 int dm_deleting_md(struct mapped_device *md) in dm_deleting_md() argument 309 return test_bit(DMF_DELETING, &md->flags); in dm_deleting_md() 314 struct mapped_device *md; in dm_blk_open() local 318 md = bdev->bd_disk->private_data; in dm_blk_open() 319 if (!md) in dm_blk_open() 322 if (test_bit(DMF_FREEING, &md->flags) || in dm_blk_open() 323 dm_deleting_md(md)) { in dm_blk_open() 324 md = NULL; in dm_blk_open() 328 dm_get(md); in dm_blk_open() 329 atomic_inc(&md->open_count); in dm_blk_open() [all …]
|
D | dm-ima.c | 68 static int dm_ima_alloc_and_copy_name_uuid(struct mapped_device *md, char **dev_name, in dm_ima_alloc_and_copy_name_uuid() argument 84 r = dm_copy_name_and_uuid(md, *dev_name, *dev_uuid); in dm_ima_alloc_and_copy_name_uuid() 103 static int dm_ima_alloc_and_copy_device_data(struct mapped_device *md, char **device_data, in dm_ima_alloc_and_copy_device_data() argument 109 r = dm_ima_alloc_and_copy_name_uuid(md, &dev_name, &dev_uuid, noio); in dm_ima_alloc_and_copy_device_data() 121 dev_name, dev_uuid, md->disk->major, md->disk->first_minor, in dm_ima_alloc_and_copy_device_data() 122 md->disk->minors, num_targets); in dm_ima_alloc_and_copy_device_data() 150 static int dm_ima_alloc_and_copy_capacity_str(struct mapped_device *md, char **capacity_str, in dm_ima_alloc_and_copy_capacity_str() argument 155 capacity = get_capacity(md->disk); in dm_ima_alloc_and_copy_capacity_str() 170 void dm_ima_reset_data(struct mapped_device *md) in dm_ima_reset_data() argument 172 memset(&(md->ima), 0, sizeof(md->ima)); in dm_ima_reset_data() [all …]
|
D | dm-era-target.c | 34 struct writeset_metadata md; member 94 ws->md.nr_bits = nr_blocks; in writeset_init() 95 r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root); in writeset_init() 139 r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root); in writeset_test_and_set() 301 static int superblock_read_lock(struct era_metadata *md, in superblock_read_lock() argument 304 return dm_bm_read_lock(md->bm, SUPERBLOCK_LOCATION, in superblock_read_lock() 308 static int superblock_lock_zero(struct era_metadata *md, in superblock_lock_zero() argument 311 return dm_bm_write_lock_zero(md->bm, SUPERBLOCK_LOCATION, in superblock_lock_zero() 315 static int superblock_lock(struct era_metadata *md, in superblock_lock() argument 318 return dm_bm_write_lock(md->bm, SUPERBLOCK_LOCATION, in superblock_lock() [all …]
|
D | dm-rq.c | 18 struct mapped_device *md; member 58 int dm_request_based(struct mapped_device *md) in dm_request_based() argument 60 return queue_is_mq(md->queue); in dm_request_based() 126 static void rq_end_stats(struct mapped_device *md, struct request *orig) in rq_end_stats() argument 128 if (unlikely(dm_stats_used(&md->stats))) { in rq_end_stats() 131 dm_stats_account_io(&md->stats, rq_data_dir(orig), in rq_end_stats() 138 * Don't touch any member of the md after calling this function because 139 * the md may be freed in dm_put() at the end of this function. 142 static void rq_completed(struct mapped_device *md) in rq_completed() argument 147 dm_put(md); in rq_completed() [all …]
|
D | dm-zone.c | 20 static int dm_blk_do_report_zones(struct mapped_device *md, struct dm_table *t, in dm_blk_do_report_zones() argument 24 struct gendisk *disk = md->disk; in dm_blk_do_report_zones() 58 struct mapped_device *md = disk->private_data; in dm_blk_report_zones() local 62 if (dm_suspended_md(md)) in dm_blk_report_zones() 65 map = dm_get_live_table(md, &srcu_idx); in dm_blk_report_zones() 69 ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb, data); in dm_blk_report_zones() 71 dm_put_live_table(md, srcu_idx); in dm_blk_report_zones() 124 bool dm_is_zone_write(struct mapped_device *md, struct bio *bio) in dm_is_zone_write() argument 126 struct request_queue *q = md->queue; in dm_is_zone_write() 140 void dm_cleanup_zoned_dev(struct mapped_device *md) in dm_cleanup_zoned_dev() argument [all …]
|
D | dm-ioctl.c | 49 struct mapped_device *md; member 92 dm_get(hc->md); in __get_name_cell() 109 dm_get(hc->md); in __get_uuid_cell() 182 struct mapped_device *md; in __get_dev_cell() local 185 md = dm_get_md(huge_decode_dev(dev)); in __get_dev_cell() 186 if (!md) in __get_dev_cell() 189 hc = dm_get_mdptr(md); in __get_dev_cell() 191 dm_put(md); in __get_dev_cell() 202 struct mapped_device *md) in alloc_cell() argument 229 hc->md = md; in alloc_cell() [all …]
|
D | dm-sysfs.c | 26 struct mapped_device *md; in dm_attr_show() local 33 md = dm_get_from_kobject(kobj); in dm_attr_show() 34 if (!md) in dm_attr_show() 37 ret = dm_attr->show(md, page); in dm_attr_show() 38 dm_put(md); in dm_attr_show() 51 struct mapped_device *md; in dm_attr_store() local 58 md = dm_get_from_kobject(kobj); in dm_attr_store() 59 if (!md) in dm_attr_store() 62 ret = dm_attr->store(md, page, count); in dm_attr_store() 63 dm_put(md); in dm_attr_store() [all …]
|
D | dm.h | 74 void dm_lock_md_type(struct mapped_device *md); 75 void dm_unlock_md_type(struct mapped_device *md); 76 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type); 77 enum dm_queue_mode dm_get_md_type(struct mapped_device *md); 78 struct target_type *dm_get_immutable_target_type(struct mapped_device *md); 80 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t); 104 void dm_cleanup_zoned_dev(struct mapped_device *md); 107 bool dm_is_zone_write(struct mapped_device *md, struct bio *bio); 110 static inline void dm_cleanup_zoned_dev(struct mapped_device *md) {} in dm_cleanup_zoned_dev() argument 112 static inline bool dm_is_zone_write(struct mapped_device *md, struct bio *bio) in dm_is_zone_write() argument [all …]
|
D | md-autodetect.c | 12 #include "md.h" 15 * When md (and any require personalities) are compiled into the kernel 19 * with md=..... 42 * actually try to invoke the MD device now; that is handled by 48 * the MD devices (by specifying multiple "md=" lines) 51 * md=n,0,factor,fault,device-list uses RAID0 for device n 52 * md=n,-1,factor,fault,device-list uses LINEAR for device n 53 * md=n,device-list reads a RAID superblock from the devices 71 if (get_option(&str, &minor) != 2) { /* MD Number */ in md_setup() 72 printk(KERN_WARNING "md: Too few arguments supplied to md=.\n"); in md_setup() [all …]
|
/Linux-v6.1/drivers/net/wwan/t7xx/ |
D | t7xx_modem_ops.c | 81 struct t7xx_modem *md = t7xx_dev->md; in t7xx_pci_mhccif_isr() local 87 ctl = md->fsm_ctl; in t7xx_pci_mhccif_isr() 90 "MHCCIF interrupt received before initializing MD monitor\n"); in t7xx_pci_mhccif_isr() 94 spin_lock_bh(&md->exp_lock); in t7xx_pci_mhccif_isr() 96 md->exp_id |= int_sta; in t7xx_pci_mhccif_isr() 97 if (md->exp_id & D2H_INT_EXCEPTION_INIT) { in t7xx_pci_mhccif_isr() 102 md->exp_id &= ~D2H_INT_EXCEPTION_INIT; in t7xx_pci_mhccif_isr() 105 } else if (md->exp_id & D2H_INT_PORT_ENUM) { in t7xx_pci_mhccif_isr() 106 md->exp_id &= ~D2H_INT_PORT_ENUM; in t7xx_pci_mhccif_isr() 113 if ((md->exp_id & D2H_INT_ASYNC_MD_HK) && !(mask & D2H_INT_ASYNC_MD_HK)) { in t7xx_pci_mhccif_isr() [all …]
|
D | t7xx_state_monitor.c | 50 void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier) in t7xx_fsm_notifier_register() argument 52 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in t7xx_fsm_notifier_register() 60 void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier) in t7xx_fsm_notifier_unregister() argument 63 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in t7xx_fsm_notifier_unregister() 74 static void fsm_state_notify(struct t7xx_modem *md, enum md_state state) in fsm_state_notify() argument 76 struct t7xx_fsm_ctl *ctl = md->fsm_ctl; in fsm_state_notify() 96 t7xx_port_proxy_md_status_notify(ctl->md->port_prox, state); in t7xx_fsm_broadcast_state() 97 fsm_state_notify(ctl->md, state); in t7xx_fsm_broadcast_state() 118 struct device *dev = &ctl->md->t7xx_dev->pdev->dev; in fsm_flush_event_cmd_qs() 172 struct device *dev = &ctl->md->t7xx_dev->pdev->dev; in fsm_routine_exception() [all …]
|
/Linux-v6.1/drivers/net/mdio/ |
D | mdio-mux-bcm-iproc.c | 57 static void mdio_mux_iproc_config(struct iproc_mdiomux_desc *md) in mdio_mux_iproc_config() argument 63 val = readl(md->base + MDIO_SCAN_CTRL_OFFSET); in mdio_mux_iproc_config() 65 writel(val, md->base + MDIO_SCAN_CTRL_OFFSET); in mdio_mux_iproc_config() 67 if (md->core_clk) { in mdio_mux_iproc_config() 71 divisor = clk_get_rate(md->core_clk) / MDIO_OPERATING_FREQUENCY; in mdio_mux_iproc_config() 75 writel(val, md->base + MDIO_RATE_ADJ_EXT_OFFSET); in mdio_mux_iproc_config() 76 writel(val, md->base + MDIO_RATE_ADJ_INT_OFFSET); in mdio_mux_iproc_config() 136 struct iproc_mdiomux_desc *md = bus->priv; in iproc_mdiomux_read() local 139 ret = start_miim_ops(md->base, phyid, reg, 0, MDIO_CTRL_READ_OP); in iproc_mdiomux_read() 149 struct iproc_mdiomux_desc *md = bus->priv; in iproc_mdiomux_write() local [all …]
|
D | mdio-mux-bcm6368.c | 39 struct bcm6368_mdiomux_desc *md = bus->priv; in bcm6368_mdiomux_read() local 43 __raw_writel(0, md->base + MDIOC_REG); in bcm6368_mdiomux_read() 48 if (md->ext_phy) in bcm6368_mdiomux_read() 51 __raw_writel(reg, md->base + MDIOC_REG); in bcm6368_mdiomux_read() 53 ret = __raw_readw(md->base + MDIOD_REG); in bcm6368_mdiomux_read() 61 struct bcm6368_mdiomux_desc *md = bus->priv; in bcm6368_mdiomux_write() local 64 __raw_writel(0, md->base + MDIOC_REG); in bcm6368_mdiomux_write() 69 if (md->ext_phy) in bcm6368_mdiomux_write() 73 __raw_writel(reg, md->base + MDIOC_REG); in bcm6368_mdiomux_write() 82 struct bcm6368_mdiomux_desc *md = data; in bcm6368_mdiomux_switch_fn() local [all …]
|
/Linux-v6.1/drivers/clk/qcom/ |
D | clk-regmap-mux-div.c | 23 int mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div) in mux_div_set_src_div() argument 27 const char *name = clk_hw_get_name(&md->clkr.hw); in mux_div_set_src_div() 29 val = (div << md->hid_shift) | (src << md->src_shift); in mux_div_set_src_div() 30 mask = ((BIT(md->hid_width) - 1) << md->hid_shift) | in mux_div_set_src_div() 31 ((BIT(md->src_width) - 1) << md->src_shift); in mux_div_set_src_div() 33 ret = regmap_update_bits(md->clkr.regmap, CFG_RCGR + md->reg_offset, in mux_div_set_src_div() 38 ret = regmap_update_bits(md->clkr.regmap, CMD_RCGR + md->reg_offset, in mux_div_set_src_div() 45 ret = regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset, in mux_div_set_src_div() 59 static void mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src, in mux_div_get_src_div() argument 63 const char *name = clk_hw_get_name(&md->clkr.hw); in mux_div_get_src_div() [all …]
|
/Linux-v6.1/drivers/scsi/aic94xx/ |
D | aic94xx_dump.c | 24 #define MD(x) (1 << (x)) macro 34 {"LmMnSCBPTR", 0x20, 16, MD(0)|MD(1)|MD(2)|MD(3)|MD(4) }, 35 {"LmMnDDBPTR", 0x22, 16, MD(0)|MD(1)|MD(2)|MD(3)|MD(4) }, 43 {"LmMnDMAERRS", 0x46, 8, MD(0)|MD(1) }, 44 {"LmMnSGDMAERRS", 0x47, 8, MD(0)|MD(1) }, 45 {"LmMnEXPHDRP", 0x48, 8, MD(0) }, 46 {"LmMnSASAALIGN", 0x48, 8, MD(1) }, 47 {"LmMnMSKHDRP", 0x49, 8, MD(0) }, 48 {"LmMnSTPALIGN", 0x49, 8, MD(1) }, 49 {"LmMnRCVHDRP", 0x4A, 8, MD(0) }, [all …]
|
/Linux-v6.1/arch/ia64/kernel/ |
D | efi.c | 269 is_memory_available (efi_memory_desc_t *md) in is_memory_available() argument 271 if (!(md->attribute & EFI_MEMORY_WB)) in is_memory_available() 274 switch (md->type) { in is_memory_available() 293 #define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT) argument 302 efi_md_end(efi_memory_desc_t *md) in efi_md_end() argument 304 return (md->phys_addr + efi_md_size(md)); in efi_md_end() 308 efi_wb(efi_memory_desc_t *md) in efi_wb() argument 310 return (md->attribute & EFI_MEMORY_WB); in efi_wb() 314 efi_uc(efi_memory_desc_t *md) in efi_uc() argument 316 return (md->attribute & EFI_MEMORY_UC); in efi_uc() [all …]
|
/Linux-v6.1/drivers/soundwire/ |
D | master.c | 42 struct sdw_master_device *md = dev_to_sdw_master_device(dev); \ 43 return sprintf(buf, format_string, md->bus->prop.field); \ 59 struct sdw_master_device *md = dev_to_sdw_master_device(dev); in clock_frequencies_show() local 63 for (i = 0; i < md->bus->prop.num_clk_freq; i++) in clock_frequencies_show() 65 md->bus->prop.clk_freq[i]); in clock_frequencies_show() 75 struct sdw_master_device *md = dev_to_sdw_master_device(dev); in clock_gears_show() local 79 for (i = 0; i < md->bus->prop.num_clk_gears; i++) in clock_gears_show() 81 md->bus->prop.clk_gears[i]); in clock_gears_show() 105 struct sdw_master_device *md = dev_to_sdw_master_device(dev); in sdw_master_device_release() local 107 kfree(md); in sdw_master_device_release() [all …]
|
/Linux-v6.1/drivers/rapidio/devices/ |
D | rio_mport_cdev.c | 123 struct mport_dev *md; member 187 * @md master port character device object 199 struct mport_dev *md; member 261 struct rio_mport *mport = priv->md->mport; in rio_mport_maint_rd() 306 struct rio_mport *mport = priv->md->mport; in rio_mport_maint_wr() 359 rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, in rio_mport_create_outbound_mapping() argument 363 struct rio_mport *mport = md->mport; in rio_mport_create_outbound_mapping() 383 map->md = md; in rio_mport_create_outbound_mapping() 385 list_add_tail(&map->node, &md->mappings); in rio_mport_create_outbound_mapping() 393 rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, in rio_mport_get_outbound_mapping() argument [all …]
|
/Linux-v6.1/arch/arm64/kernel/ |
D | efi.c | 15 static bool region_is_misaligned(const efi_memory_desc_t *md) in region_is_misaligned() argument 19 return !PAGE_ALIGNED(md->phys_addr) || in region_is_misaligned() 20 !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT); in region_is_misaligned() 28 static __init pteval_t create_mapping_protection(efi_memory_desc_t *md) in create_mapping_protection() argument 30 u64 attr = md->attribute; in create_mapping_protection() 31 u32 type = md->type; in create_mapping_protection() 36 if (region_is_misaligned(md)) { in create_mapping_protection() 76 int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) in efi_create_mapping() argument 78 pteval_t prot_val = create_mapping_protection(md); in efi_create_mapping() 79 bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE || in efi_create_mapping() [all …]
|
/Linux-v6.1/drivers/dma/ |
D | milbeaut-hdmac.c | 70 struct milbeaut_hdmac_desc *md; member 103 mc->md = NULL; in milbeaut_hdmac_next_desc() 109 mc->md = to_milbeaut_hdmac_desc(vd); in milbeaut_hdmac_next_desc() 111 return mc->md; in milbeaut_hdmac_next_desc() 116 struct milbeaut_hdmac_desc *md) in milbeaut_chan_start() argument 122 sg = &md->sgl[md->sg_cur]; in milbeaut_chan_start() 126 if (md->dir == DMA_MEM_TO_DEV) { in milbeaut_chan_start() 164 struct milbeaut_hdmac_desc *md; in milbeaut_hdmac_start() local 166 md = milbeaut_hdmac_next_desc(mc); in milbeaut_hdmac_start() 167 if (md) in milbeaut_hdmac_start() [all …]
|
D | uniphier-mdmac.c | 62 struct uniphier_mdmac_desc *md; member 94 mc->md = NULL; in uniphier_mdmac_next_desc() 100 mc->md = to_uniphier_mdmac_desc(vd); in uniphier_mdmac_next_desc() 102 return mc->md; in uniphier_mdmac_next_desc() 107 struct uniphier_mdmac_desc *md) in uniphier_mdmac_handle() argument 114 sg = &md->sgl[md->sg_cur]; in uniphier_mdmac_handle() 116 if (md->dir == DMA_MEM_TO_DEV) { in uniphier_mdmac_handle() 147 struct uniphier_mdmac_desc *md; in uniphier_mdmac_start() local 149 md = uniphier_mdmac_next_desc(mc); in uniphier_mdmac_start() 150 if (md) in uniphier_mdmac_start() [all …]
|
/Linux-v6.1/arch/riscv/kernel/ |
D | efi.c | 19 static __init pgprot_t efimem_to_pgprot_map(efi_memory_desc_t *md) in efimem_to_pgprot_map() argument 21 u64 attr = md->attribute; in efimem_to_pgprot_map() 22 u32 type = md->type; in efimem_to_pgprot_map() 46 int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) in efi_create_mapping() argument 48 pgprot_t prot = __pgprot(pgprot_val(efimem_to_pgprot_map(md)) & in efi_create_mapping() 53 for (i = 0; i < md->num_pages; i++) in efi_create_mapping() 54 create_pgd_mapping(mm->pgd, md->virt_addr + i * PAGE_SIZE, in efi_create_mapping() 55 md->phys_addr + i * PAGE_SIZE, in efi_create_mapping() 62 efi_memory_desc_t *md = data; in set_permissions() local 66 if (md->attribute & EFI_MEMORY_RO) { in set_permissions() [all …]
|
/Linux-v6.1/drivers/firmware/efi/ |
D | efi-init.c | 25 static int __init is_memory(efi_memory_desc_t *md) in is_memory() argument 27 if (md->attribute & (EFI_MEMORY_WB|EFI_MEMORY_WT|EFI_MEMORY_WC)) in is_memory() 39 efi_memory_desc_t *md; in efi_to_phys() local 41 for_each_efi_memory_desc(md) { in efi_to_phys() 42 if (!(md->attribute & EFI_MEMORY_RUNTIME)) in efi_to_phys() 44 if (md->virt_addr == 0) in efi_to_phys() 47 if (md->virt_addr <= addr && in efi_to_phys() 48 (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT)) in efi_to_phys() 49 return md->phys_addr + addr - md->virt_addr; in efi_to_phys() 109 static __init int is_usable_memory(efi_memory_desc_t *md) in is_usable_memory() argument [all …]
|
/Linux-v6.1/Documentation/driver-api/md/ |
D | raid5-cache.rst | 13 echo "write-back" > /sys/block/md0/md/journal_mode 17 echo "write-through" > /sys/block/md0/md/journal_mode 29 unclean shutdown. We call an array degraded if it has inconsistent data. MD 36 two-step write will guarantee MD can recover correct data after unclean 39 In write-through mode, MD reports IO completion to upper layer (usually 53 write. For non-full-stripe writes, MD must read old data before the new parity 61 In write-back mode, MD reports IO completion to upper layer (usually 66 In write-back mode, MD also caches data in memory. The memory cache includes 71 echo "2048" > /sys/block/md0/md/stripe_cache_size 85 checksum is an optimization because MD can write meta and data freely without [all …]
|