Home
last modified time | relevance | path

Searched refs:back (Results 1 – 25 of 699) sorted by relevance

12345678910>>...28

/Linux-v4.19/Documentation/scsi/
Dsd-parameters.txt12 write back | 1 0 | on | on
13 write back, no read (daft) | 1 1 | on | off
15 To set cache type to "write back" and save this setting to the drive:
17 # echo "write back" > cache_type
22 # echo "temporary write back" > cache_type
/Linux-v4.19/drivers/staging/erofs/
Dnamei.c49 unsigned ndirents, head, back; in find_target_dirent() local
62 back = ndirents - 1; in find_target_dirent()
65 while (head <= back) { in find_target_dirent()
66 unsigned mid = head + (back - head) / 2; in find_target_dirent()
86 back = mid - 1; in find_target_dirent()
99 unsigned head, back; in find_target_block_classic() local
105 back = inode_datablocks(dir) - 1; in find_target_block_classic()
107 while (head <= back) { in find_target_block_classic()
108 unsigned mid = head + (back - head) / 2; in find_target_block_classic()
156 back = mid - 1; in find_target_block_classic()
/Linux-v4.19/lib/lz4/
Dlz4hc_compress.c208 int back = 0; in LZ4HC_InsertAndGetWiderMatch() local
210 while ((ip + back > iLowLimit) in LZ4HC_InsertAndGetWiderMatch()
211 && (matchPtr + back > lowPrefixPtr) in LZ4HC_InsertAndGetWiderMatch()
212 && (ip[back - 1] == matchPtr[back - 1])) in LZ4HC_InsertAndGetWiderMatch()
213 back--; in LZ4HC_InsertAndGetWiderMatch()
215 mlt -= back; in LZ4HC_InsertAndGetWiderMatch()
219 *matchpos = matchPtr + back; in LZ4HC_InsertAndGetWiderMatch()
220 *startpos = ip + back; in LZ4HC_InsertAndGetWiderMatch()
229 int back = 0; in LZ4HC_InsertAndGetWiderMatch() local
241 while ((ip + back > iLowLimit) in LZ4HC_InsertAndGetWiderMatch()
[all …]
/Linux-v4.19/Documentation/md/
Draid5-cache.txt6 since 4.4) or write-back mode (supported since 4.10). mdadm (supported since
9 in write-through mode. A user can switch it to write-back mode by:
11 echo "write-back" > /sys/block/md0/md/journal_mode
13 And switch it back to write-through mode by:
28 tries to resync the array to bring it back to normal state. But before the
46 write-back mode:
48 write-back mode fixes the 'write hole' issue too, since all write data is
49 cached on cache disk. But the main goal of 'write-back' cache is to speed up
54 overhead too. Write-back cache will aggregate the data and flush the data to
59 In write-back mode, MD reports IO completion to upper layer (usually
[all …]
/Linux-v4.19/fs/xfs/libxfs/
Dxfs_da_format.c441 to->back = be32_to_cpu(from->hdr.info.back); in xfs_dir2_leaf_hdr_from_disk()
459 to->hdr.info.back = cpu_to_be32(from->back); in xfs_dir2_leaf_hdr_to_disk()
473 to->back = be32_to_cpu(hdr3->info.hdr.back); in xfs_dir3_leaf_hdr_from_disk()
493 hdr3->info.hdr.back = cpu_to_be32(from->back); in xfs_dir3_leaf_hdr_to_disk()
522 to->back = be32_to_cpu(from->hdr.info.back); in xfs_da2_node_hdr_from_disk()
535 to->hdr.info.back = cpu_to_be32(from->back); in xfs_da2_node_hdr_to_disk()
550 to->back = be32_to_cpu(hdr3->info.hdr.back); in xfs_da3_node_hdr_from_disk()
565 hdr3->info.hdr.back = cpu_to_be32(from->back); in xfs_da3_node_hdr_to_disk()
Dxfs_da_btree.c497 node->hdr.info.back = cpu_to_be32(oldblk->blkno); in xfs_da3_split()
503 if (node->hdr.info.back) { in xfs_da3_split()
504 ASSERT(be32_to_cpu(node->hdr.info.back) == addblk->blkno); in xfs_da3_split()
1047 ASSERT(!blkinfo->back); in xfs_da_blkinfo_onlychild_validate()
1079 ASSERT(oldroothdr.back == 0); in xfs_da3_root_join()
1200 forward = nodehdr.forw < nodehdr.back; in xfs_da3_node_toosmall()
1206 blkno = nodehdr.back; in xfs_da3_node_toosmall()
1727 new_info->back = old_info->back; in xfs_da3_blk_link()
1728 if (old_info->back) { in xfs_da3_blk_link()
1730 be32_to_cpu(old_info->back), in xfs_da3_blk_link()
[all …]
/Linux-v4.19/drivers/net/ethernet/hisilicon/hns3/hns3pf/
Dhclge_dcb.c66 struct hclge_dev *hdev = vport->back; in hclge_ieee_getets()
122 struct hclge_dev *hdev = vport->back; in hclge_map_update()
172 struct hclge_dev *hdev = vport->back; in hclge_ieee_setets()
204 struct hclge_dev *hdev = vport->back; in hclge_ieee_getpfc()
239 struct hclge_dev *hdev = vport->back; in hclge_ieee_setpfc()
270 struct hclge_dev *hdev = vport->back; in hclge_getdcbx()
281 struct hclge_dev *hdev = vport->back; in hclge_setdcbx()
298 struct hclge_dev *hdev = vport->back; in hclge_setup_tc()
Dhclge_mbx.c21 struct hclge_dev *hdev = vport->back; in hclge_gen_resp_to_vf()
59 struct hclge_dev *hdev = vport->back; in hclge_send_mbx_msg()
201 return hclge_cmd_set_promisc_mode(vport->back, &param); in hclge_set_vf_promisc_mode()
209 struct hclge_dev *hdev = vport->back; in hclge_set_vf_uc_mac_addr()
278 struct hclge_dev *hdev = vport->back; in hclge_set_vf_mc_mac_addr()
355 struct hclge_dev *hdev = vport->back; in hclge_get_vf_tcinfo()
370 struct hclge_dev *hdev = vport->back; in hclge_get_vf_queue_info()
385 struct hclge_dev *hdev = vport->back; in hclge_get_link_info()
420 struct hclge_dev *hdev = vport->back; in hclge_reset_vf()
Dhclge_main.c541 struct hclge_dev *hdev = vport->back; in hclge_tqps_update_stats()
733 struct hclge_dev *hdev = vport->back; in hclge_update_stats()
774 struct hclge_dev *hdev = vport->back; in hclge_get_sset_count()
853 struct hclge_dev *hdev = vport->back; in hclge_get_stats()
1273 struct hclge_dev *hdev = vport->back; in hclge_assign_tqp()
1297 struct hclge_dev *hdev = vport->back; in hclge_knic_setup()
1385 struct hclge_dev *hdev = vport->back; in hclge_vport_setup()
1440 vport->back = hdev; in hclge_alloc_vport()
2006 roce->rinfo.num_vectors = vport->back->num_roce_msi; in hclge_init_roce_base_info()
2008 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || in hclge_init_roce_base_info()
[all …]
/Linux-v4.19/scripts/coccinelle/iterators/
Dlist_entry_update.cocci4 /// that there is a path from the reassignment back to the top of the loop.
38 @back depends on (org || report) && !context exists@
48 @script:python depends on back && org@
56 @script:python depends on back && report@
/Linux-v4.19/drivers/net/ethernet/intel/ice/
Dice_main.c184 struct ice_pf *pf = vsi->back; in ice_add_mac_to_list()
284 struct device *dev = &vsi->back->pdev->dev; in ice_vsi_sync_fltr()
287 struct ice_pf *pf = vsi->back; in ice_vsi_sync_fltr()
706 dev_dbg(&vsi->back->pdev->dev, in ice_vsi_link_event()
1081 struct ice_pf *pf = vsi->back; in ice_vsi_dis_irq()
1132 struct ice_pf *pf = vsi->back; in ice_vsi_ena_irq()
1152 struct ice_pf *pf = vsi->back; in ice_vsi_delete()
1174 struct ice_pf *pf = vsi->back; in ice_vsi_req_irq_msix()
1240 struct ice_pf *pf = vsi->back; in ice_vsi_set_rss_params()
1304 if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) { in ice_vsi_setup_q_map()
[all …]
/Linux-v4.19/drivers/md/bcache/
Dutil.h120 size_t front, back, size, mask; \
126 c = (fifo)->data[iter], iter != (fifo)->back; \
138 (fifo)->front = (fifo)->back = 0; \
164 #define fifo_used(fifo) (((fifo)->back - (fifo)->front) & (fifo)->mask)
172 ((fifo)->data[((fifo)->back - 1) & (fifo)->mask])
180 (fifo)->data[(fifo)->back++] = (i); \
181 (fifo)->back &= (fifo)->mask; \
211 --(fifo)->back; \
212 (fifo)->back &= (fifo)->mask; \
213 (i) = (fifo)->data[(fifo)->back] \
[all …]
/Linux-v4.19/drivers/net/ethernet/intel/i40evf/
Di40evf_client.c42 params->link_up = vsi->back->link_up; in i40evf_client_get_params()
65 cinst = vsi->back->cinst; in i40evf_notify_client_message()
68 dev_dbg(&vsi->back->pdev->dev, in i40evf_notify_client_message()
90 cinst = vsi->back->cinst; in i40evf_notify_client_l2_params()
94 dev_dbg(&vsi->back->pdev->dev, in i40evf_notify_client_l2_params()
112 struct i40evf_adapter *adapter = vsi->back; in i40evf_notify_client_open()
118 dev_dbg(&vsi->back->pdev->dev, in i40evf_notify_client_open()
164 struct i40evf_adapter *adapter = vsi->back; in i40evf_notify_client_close()
169 dev_dbg(&vsi->back->pdev->dev, in i40evf_notify_client_close()
/Linux-v4.19/Documentation/ABI/testing/
Dsysfs-class-bdi36 total write-back cache that relates to its current average
40 percentage of the write-back cache to a particular device.
46 given percentage of the write-back cache. This is useful in
48 most of the write-back cache. For example in case of an NFS
Dsysfs-platform-hidma-mgmt63 read transactions that can be issued back to back.
84 write transactions that can be issued back to back.
/Linux-v4.19/drivers/net/ethernet/intel/i40e/
Di40e_main.c107 struct i40e_pf *pf = (struct i40e_pf *)hw->back; in i40e_allocate_dma_mem_d()
125 struct i40e_pf *pf = (struct i40e_pf *)hw->back; in i40e_free_dma_mem_d()
296 struct i40e_pf *pf = vsi->back; in i40e_tx_timeout()
604 struct i40e_pf *pf = vsi->back; in i40e_update_eth_stats()
753 struct i40e_pf *pf = vsi->back; in i40e_update_vsi_stats()
1085 struct i40e_pf *pf = vsi->back; in i40e_update_stats()
1299 struct i40e_pf *pf = vsi->back; in i40e_rm_default_mac_filter()
1362 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); in i40e_add_filter()
1518 struct i40e_pf *pf = vsi->back; in i40e_set_mac()
1531 if (test_bit(__I40E_VSI_DOWN, vsi->back->state) || in i40e_set_mac()
[all …]
Di40e_ethtool.c748 struct i40e_pf *pf = np->vsi->back; in i40e_get_link_ksettings()
838 struct i40e_pf *pf = np->vsi->back; in i40e_set_link_ksettings()
1068 struct i40e_pf *pf = np->vsi->back; in i40e_nway_reset()
1095 struct i40e_pf *pf = np->vsi->back; in i40e_get_pauseparam()
1130 struct i40e_pf *pf = np->vsi->back; in i40e_set_pauseparam()
1223 struct i40e_pf *pf = np->vsi->back; in i40e_get_msglevel()
1235 struct i40e_pf *pf = np->vsi->back; in i40e_set_msglevel()
1258 struct i40e_pf *pf = np->vsi->back; in i40e_get_regs()
1289 struct i40e_hw *hw = &np->vsi->back->hw; in i40e_get_eeprom()
1290 struct i40e_pf *pf = np->vsi->back; in i40e_get_eeprom()
[all …]
Di40e_client.c49 struct i40e_dcbx_config *dcb_cfg = &vsi->back->hw.local_dcbx_config; in i40e_client_get_params()
64 dev_err(&vsi->back->pdev->dev, "Invalid queue set handle for TC = %d, vsi id = %d\n", in i40e_client_get_params()
86 struct i40e_pf *pf = vsi->back; in i40e_notify_client_of_vf_msg()
112 struct i40e_pf *pf = vsi->back; in i40e_notify_client_of_l2_param_changes()
119 dev_dbg(&vsi->back->pdev->dev, in i40e_notify_client_of_l2_param_changes()
124 dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n"); in i40e_notify_client_of_l2_param_changes()
171 struct i40e_pf *pf = vsi->back; in i40e_notify_client_of_netdev_close()
177 dev_dbg(&vsi->back->pdev->dev, in i40e_notify_client_of_netdev_close()
726 err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_client_update_vsi_ctxt()
/Linux-v4.19/arch/mips/include/asm/octeon/
Dcvmx-packet.h52 uint64_t back:4; member
63 uint64_t back:4;
Dcvmx-helper-util.h169 start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7; in cvmx_helper_free_packet_data()
183 ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7; in cvmx_helper_free_packet_data()
/Linux-v4.19/Documentation/
Ddell_rbu.txt36 maintains a link list of packets for reading them back.
74 packets of data arranged back to back. It can be done as follows
79 image file and then arrange all these packets back to back in to one single
120 read back the image downloaded.
/Linux-v4.19/Documentation/powerpc/
DDAWR-POWER9.txt25 PPC_PTRACE_GETHWDBGINFO call. This results in GDB falling back to
48 host. The watchpoint will fail and GDB will fall back to software
57 migrated back to the POWER8 host, it will start working again.
/Linux-v4.19/drivers/gpu/drm/udl/
Dudl_transfer.c46 const unsigned long *back = (const unsigned long *) bback;
54 if (back[j] != front[j]) {
61 if (back[k] != front[k]) {
/Linux-v4.19/Documentation/devicetree/bindings/dma/
Dqcom_hidma_mgmt.txt32 applied back to back while writing to the destination before yielding
35 applied back to back while reading the source before yielding the bus.
/Linux-v4.19/arch/m68k/fpsp040/
Dsgetem.S84 movew %d0,LOCAL_EX(%a0) |move the sign & exp back to fsave stack
85 fmovex (%a0),%fp0 |put new value back in fp0
97 movel %d0,LOCAL_HI(%a0) |put ms mant back on stack
98 movel %d1,LOCAL_LO(%a0) |put ls mant back on stack

12345678910>>...28