/Linux-v4.19/include/trace/events/ |
D | xdp.h | 58 const struct bpf_map *map, u32 map_index), 60 TP_ARGS(dev, xdp, to_ifindex, err, map, map_index), 69 __field(int, map_index) 79 __entry->map_index = map_index; 93 const struct bpf_map *map, u32 map_index), 94 TP_ARGS(dev, xdp, to_ifindex, err, map, map_index) 101 const struct bpf_map *map, u32 map_index), 102 TP_ARGS(dev, xdp, to_ifindex, err, map, map_index) 115 const struct bpf_map *map, u32 map_index), 116 TP_ARGS(dev, xdp, to_ifindex, err, map, map_index), [all …]
|
/Linux-v4.19/drivers/gpu/drm/amd/display/modules/freesync/ |
D | freesync.c | 369 int map_index, in adjust_vmin_vmax() argument 376 core_freesync->map[map_index].state.vmin = v_total_min; in adjust_vmin_vmax() 377 core_freesync->map[map_index].state.vmax = v_total_max; in adjust_vmin_vmax() 602 unsigned int stream_idx, map_index = 0; in set_freesync_on_streams() local 610 map_index = map_index_from_stream(core_freesync, in set_freesync_on_streams() 613 state = &core_freesync->map[map_index].state; in set_freesync_on_streams() 615 if (core_freesync->map[map_index].caps->supported) { in set_freesync_on_streams() 630 if (core_freesync->map[map_index].user_enable. in set_freesync_on_streams() 644 num_streams, map_index, in set_freesync_on_streams() 650 } else if (core_freesync->map[map_index].user_enable. in set_freesync_on_streams() [all …]
|
/Linux-v4.19/drivers/infiniband/sw/rxe/ |
D | rxe_mr.c | 272 int map_index; in lookup_iova() local 282 map_index = 0; in lookup_iova() 285 length = mem->map[map_index]->buf[buf_index].size; in lookup_iova() 292 map_index++; in lookup_iova() 295 length = mem->map[map_index]->buf[buf_index].size; in lookup_iova() 298 *m_out = map_index; in lookup_iova()
|
/Linux-v4.19/samples/bpf/ |
D | xdp_monitor_kern.c | 36 int map_index; // offset:32 size:4; signed:1; member 227 u32 map_index; // offset:16; size:4; signed:0; member
|
D | xdp_redirect_cpu_kern.c | 583 int map_index; // offset:32 size:4; signed:1; member
|
/Linux-v4.19/drivers/scsi/smartpqi/ |
D | smartpqi_init.c | 2070 u32 map_index; in pqi_raid_bypass_submit_scsi_cmd() local 2203 map_index = (map_row * total_disks_per_row) + first_column; in pqi_raid_bypass_submit_scsi_cmd() 2208 map_index += data_disks_per_row; in pqi_raid_bypass_submit_scsi_cmd() 2219 map_index %= data_disks_per_row; in pqi_raid_bypass_submit_scsi_cmd() 2226 current_group = map_index / data_disks_per_row; in pqi_raid_bypass_submit_scsi_cmd() 2235 map_index += data_disks_per_row; in pqi_raid_bypass_submit_scsi_cmd() 2242 map_index %= data_disks_per_row; in pqi_raid_bypass_submit_scsi_cmd() 2338 map_index = (first_group * in pqi_raid_bypass_submit_scsi_cmd() 2344 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES)) in pqi_raid_bypass_submit_scsi_cmd() 2347 aio_handle = raid_map->disk_data[map_index].aio_handle; in pqi_raid_bypass_submit_scsi_cmd()
|
/Linux-v4.19/drivers/scsi/ |
D | hpsa.c | 5025 int offload_to_mirror, u32 *map_index, u32 *current_group) in raid_map_helper() argument 5029 *map_index %= le16_to_cpu(map->data_disks_per_row); in raid_map_helper() 5034 *current_group = *map_index / in raid_map_helper() 5040 *map_index += le16_to_cpu(map->data_disks_per_row); in raid_map_helper() 5044 *map_index %= le16_to_cpu(map->data_disks_per_row); in raid_map_helper() 5061 u32 map_index; in hpsa_scsi_ioaccel_raid_map() local 5198 map_index = (map_row * total_disks_per_row) + first_column; in hpsa_scsi_ioaccel_raid_map() 5210 map_index += le16_to_cpu(map->data_disks_per_row); in hpsa_scsi_ioaccel_raid_map() 5221 &map_index, ¤t_group); in hpsa_scsi_ioaccel_raid_map() 5318 map_index = (first_group * in hpsa_scsi_ioaccel_raid_map() [all …]
|
/Linux-v4.19/drivers/infiniband/hw/bnxt_re/ |
D | main.c | 375 u32 map_index, u16 *fw_ring_id) in bnxt_re_net_ring_alloc() argument 397 req.logical_id = cpu_to_le16(map_index); in bnxt_re_net_ring_alloc()
|
/Linux-v4.19/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt.c | 4312 u32 ring_type, u32 map_index) in hwrm_ring_alloc_send_msg() argument 4333 req.logical_id = cpu_to_le16(map_index); in hwrm_ring_alloc_send_msg()
|