| /Linux-v5.15/drivers/soc/ti/ | 
| D | knav_qmss_acc.c | 283 		cmd->command, cmd->queue_mask, cmd->list_dma,  in knav_acc_write()289 	writel_relaxed(cmd->queue_mask, &pdsp->acc_command->queue_mask);  in knav_acc_write()
 308 	u32 queue_mask;  in knav_acc_setup_cmd()  local
 313 		queue_mask = BIT(range->num_queues) - 1;  in knav_acc_setup_cmd()
 317 		queue_mask = 0;  in knav_acc_setup_cmd()
 322 	cmd->queue_mask = queue_mask;  in knav_acc_setup_cmd()
 
 | 
| D | knav_qmss.h | 89 	u32		queue_mask;  member
 | 
| /Linux-v5.15/drivers/gpu/drm/amd/amdkfd/ | 
| D | kfd_packet_manager_vi.c | 135 	packet->queue_mask_lo = lower_32_bits(res->queue_mask);  in pm_set_resources_vi()136 	packet->queue_mask_hi = upper_32_bits(res->queue_mask);  in pm_set_resources_vi()
 
 | 
| D | kfd_packet_manager_v9.c | 175 	packet->queue_mask_lo = lower_32_bits(res->queue_mask);  in pm_set_resources_v9()176 	packet->queue_mask_hi = upper_32_bits(res->queue_mask);  in pm_set_resources_v9()
 
 | 
| D | kfd_device_queue_manager.c | 1111 	res.queue_mask = 0;  in set_sched_resources()1127 		if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {  in set_sched_resources()
 1132 		res.queue_mask |= 1ull  in set_sched_resources()
 1142 			res.vmid_mask, res.queue_mask);  in set_sched_resources()
 
 | 
| D | kfd_priv.h | 557 	uint64_t queue_mask;  member
 | 
| /Linux-v5.15/drivers/gpu/drm/amd/amdgpu/ | 
| D | amdgpu_gfx.c | 505 	uint64_t queue_mask = 0;  in amdgpu_gfx_enable_kcq()  local518 		if (WARN_ON(i > (sizeof(queue_mask)*8))) {  in amdgpu_gfx_enable_kcq()
 523 		queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));  in amdgpu_gfx_enable_kcq()
 538 	kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);  in amdgpu_gfx_enable_kcq()
 
 | 
| D | amdgpu_gfx.h | 84 					uint64_t queue_mask);
 | 
| D | gfx_v8_0.c | 4361 	uint64_t queue_mask = 0;  in gfx_v8_0_kiq_kcq_enable()  local4371 		if (WARN_ON(i >= (sizeof(queue_mask)*8))) {  in gfx_v8_0_kiq_kcq_enable()
 4376 		queue_mask |= (1ull << i);  in gfx_v8_0_kiq_kcq_enable()
 4387 	amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));	/* queue mask lo */  in gfx_v8_0_kiq_kcq_enable()
 4388 	amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));	/* queue mask hi */  in gfx_v8_0_kiq_kcq_enable()
 
 | 
| D | gfx_v9_0.c | 827 				uint64_t queue_mask)  in gfx_v9_0_kiq_set_resources()  argument835 			lower_32_bits(queue_mask));	/* queue mask lo */  in gfx_v9_0_kiq_set_resources()
 837 			upper_32_bits(queue_mask));	/* queue mask hi */  in gfx_v9_0_kiq_set_resources()
 
 | 
| D | gfx_v10_0.c | 3610 static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)  in gfx10_kiq_set_resources()  argument3615 	amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));	/* queue mask lo */  in gfx10_kiq_set_resources()
 3616 	amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));	/* queue mask hi */  in gfx10_kiq_set_resources()
 
 | 
| /Linux-v5.15/drivers/net/ethernet/marvell/ | 
| D | mv643xx_eth.c | 2254 		u8 queue_mask;  in mv643xx_eth_poll()  local2265 		queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;  in mv643xx_eth_poll()
 2267 			queue_mask |= mp->work_rx_refill;  in mv643xx_eth_poll()
 2269 		if (!queue_mask) {  in mv643xx_eth_poll()
 2275 		queue = fls(queue_mask) - 1;  in mv643xx_eth_poll()
 2276 		queue_mask = 1 << queue;  in mv643xx_eth_poll()
 2282 		if (mp->work_tx_end & queue_mask) {  in mv643xx_eth_poll()
 2284 		} else if (mp->work_tx & queue_mask) {  in mv643xx_eth_poll()
 2287 		} else if (mp->work_rx & queue_mask) {  in mv643xx_eth_poll()
 2289 		} else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {  in mv643xx_eth_poll()
 
 | 
| /Linux-v5.15/net/ethtool/ | 
| D | ioctl.c | 2458 	DECLARE_BITMAP(queue_mask, MAX_NUM_QUEUE);  in ethtool_get_per_queue_coalesce()2465 	bitmap_from_arr32(queue_mask, per_queue_opt->queue_mask,  in ethtool_get_per_queue_coalesce()
 2468 	for_each_set_bit(bit, queue_mask, MAX_NUM_QUEUE) {  in ethtool_get_per_queue_coalesce()
 2491 	DECLARE_BITMAP(queue_mask, MAX_NUM_QUEUE);  in ethtool_set_per_queue_coalesce()
 2499 	bitmap_from_arr32(queue_mask, per_queue_opt->queue_mask, MAX_NUM_QUEUE);  in ethtool_set_per_queue_coalesce()
 2500 	n_queue = bitmap_weight(queue_mask, MAX_NUM_QUEUE);  in ethtool_set_per_queue_coalesce()
 2505 	for_each_set_bit(bit, queue_mask, MAX_NUM_QUEUE) {  in ethtool_set_per_queue_coalesce()
 2534 		for_each_set_bit(i, queue_mask, bit) {  in ethtool_set_per_queue_coalesce()
 
 | 
| /Linux-v5.15/drivers/net/ethernet/cadence/ | 
| D | macb_main.c | 3721 			      unsigned int *queue_mask,  in macb_probe_queues()  argument3724 	*queue_mask = 0x1;  in macb_probe_queues()
 3737 	*queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff;  in macb_probe_queues()
 3738 	*num_queues = hweight32(*queue_mask);  in macb_probe_queues()
 3857 		if (!(bp->queue_mask & (1 << hw_q)))  in macb_init()
 4658 	unsigned int queue_mask, num_queues;  in macb_probe()  local
 4693 	macb_probe_queues(mem, native_io, &queue_mask, &num_queues);  in macb_probe()
 4717 	bp->queue_mask = queue_mask;  in macb_probe()
 
 | 
| D | macb.h | 1250 	unsigned int		queue_mask;  member
 | 
| /Linux-v5.15/net/sched/ | 
| D | sch_taprio.c | 1204 	u32 i, queue_mask = 0;  in tc_map_to_queue_mask()  local1215 		queue_mask |= GENMASK(offset + count - 1, offset);  in tc_map_to_queue_mask()
 1218 	return queue_mask;  in tc_map_to_queue_mask()
 
 | 
| /Linux-v5.15/include/uapi/linux/ | 
| D | ethtool.h | 1393 	__u32	queue_mask[__KERNEL_DIV_ROUND_UP(MAX_NUM_QUEUE, 32)];  member
 | 
| /Linux-v5.15/Documentation/networking/device_drivers/ethernet/intel/ | 
| D | ice.rst | 968     # ethtool --per-queue <ethX> queue_mask 0xa --coalesce adaptive-rx off973     # ethtool --per-queue <ethX> queue_mask 0xa --show-coalesce
 
 |