/Linux-v5.4/drivers/net/ethernet/sfc/falcon/ |
D | tx.c | 25 static inline u8 *ef4_tx_get_copy_buffer(struct ef4_tx_queue *tx_queue, in ef4_tx_get_copy_buffer() argument 28 unsigned int index = ef4_tx_queue_get_insert_index(tx_queue); in ef4_tx_get_copy_buffer() 30 &tx_queue->cb_page[index >> (PAGE_SHIFT - EF4_TX_CB_ORDER)]; in ef4_tx_get_copy_buffer() 35 ef4_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, in ef4_tx_get_copy_buffer() 43 u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue, in ef4_tx_get_copy_buffer_limited() argument 48 return ef4_tx_get_copy_buffer(tx_queue, buffer); in ef4_tx_get_copy_buffer_limited() 51 static void ef4_dequeue_buffer(struct ef4_tx_queue *tx_queue, in ef4_dequeue_buffer() argument 57 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; in ef4_dequeue_buffer() 72 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, in ef4_dequeue_buffer() 74 tx_queue->queue, tx_queue->read_count); in ef4_dequeue_buffer() [all …]
|
D | nic.h | 63 ef4_tx_desc(struct ef4_tx_queue *tx_queue, unsigned int index) in ef4_tx_desc() argument 65 return ((ef4_qword_t *) (tx_queue->txd.buf.addr)) + index; in ef4_tx_desc() 69 static inline struct ef4_tx_queue *ef4_tx_queue_partner(struct ef4_tx_queue *tx_queue) in ef4_tx_queue_partner() argument 71 if (tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD) in ef4_tx_queue_partner() 72 return tx_queue - EF4_TXQ_TYPE_OFFLOAD; in ef4_tx_queue_partner() 74 return tx_queue + EF4_TXQ_TYPE_OFFLOAD; in ef4_tx_queue_partner() 80 static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue, in __ef4_nic_tx_is_empty() argument 83 unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count); in __ef4_nic_tx_is_empty() 99 static inline bool ef4_nic_may_push_tx_desc(struct ef4_tx_queue *tx_queue, in ef4_nic_may_push_tx_desc() argument 102 bool was_empty = __ef4_nic_tx_is_empty(tx_queue, write_count); in ef4_nic_may_push_tx_desc() [all …]
|
D | farch.c | 272 static inline void ef4_farch_notify_tx_desc(struct ef4_tx_queue *tx_queue) in ef4_farch_notify_tx_desc() argument 277 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in ef4_farch_notify_tx_desc() 279 ef4_writed_page(tx_queue->efx, ®, in ef4_farch_notify_tx_desc() 280 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); in ef4_farch_notify_tx_desc() 284 static inline void ef4_farch_push_tx_desc(struct ef4_tx_queue *tx_queue, in ef4_farch_push_tx_desc() argument 293 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in ef4_farch_push_tx_desc() 297 ef4_writeo_page(tx_queue->efx, ®, in ef4_farch_push_tx_desc() 298 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); in ef4_farch_push_tx_desc() 306 void ef4_farch_tx_write(struct ef4_tx_queue *tx_queue) in ef4_farch_tx_write() argument 311 unsigned old_write_count = tx_queue->write_count; in ef4_farch_tx_write() [all …]
|
D | net_driver.h | 445 struct ef4_tx_queue tx_queue[EF4_TXQ_TYPES]; member 1082 int (*tx_probe)(struct ef4_tx_queue *tx_queue); 1083 void (*tx_init)(struct ef4_tx_queue *tx_queue); 1084 void (*tx_remove)(struct ef4_tx_queue *tx_queue); 1085 void (*tx_write)(struct ef4_tx_queue *tx_queue); 1086 unsigned int (*tx_limit_len)(struct ef4_tx_queue *tx_queue, 1192 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; in ef4_get_tx_queue() 1206 return &channel->tx_queue[type]; in ef4_channel_get_tx_queue() 1209 static inline bool ef4_tx_queue_used(struct ef4_tx_queue *tx_queue) in ef4_tx_queue_used() argument 1211 return !(tx_queue->efx->net_dev->num_tc < 2 && in ef4_tx_queue_used() [all …]
|
D | selftest.c | 410 static int ef4_begin_loopback(struct ef4_tx_queue *tx_queue) in ef4_begin_loopback() argument 412 struct ef4_nic *efx = tx_queue->efx; in ef4_begin_loopback() 440 rc = ef4_enqueue_skb(tx_queue, skb); in ef4_begin_loopback() 446 "%d in %s loopback test\n", tx_queue->queue, in ef4_begin_loopback() 466 static int ef4_end_loopback(struct ef4_tx_queue *tx_queue, in ef4_end_loopback() argument 469 struct ef4_nic *efx = tx_queue->efx; in ef4_end_loopback() 498 tx_queue->queue, tx_done, state->packet_count, in ef4_end_loopback() 509 tx_queue->queue, rx_good, state->packet_count, in ef4_end_loopback() 516 lb_tests->tx_sent[tx_queue->queue] += state->packet_count; in ef4_end_loopback() 517 lb_tests->tx_done[tx_queue->queue] += tx_done; in ef4_end_loopback() [all …]
|
D | tx.h | 15 unsigned int ef4_tx_limit_len(struct ef4_tx_queue *tx_queue, 18 u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue, 21 int ef4_enqueue_skb_tso(struct ef4_tx_queue *tx_queue, struct sk_buff *skb,
|
D | efx.h | 23 int ef4_probe_tx_queue(struct ef4_tx_queue *tx_queue); 24 void ef4_remove_tx_queue(struct ef4_tx_queue *tx_queue); 25 void ef4_init_tx_queue(struct ef4_tx_queue *tx_queue); 26 void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue); 27 void ef4_fini_tx_queue(struct ef4_tx_queue *tx_queue); 30 netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb); 31 void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index);
|
D | efx.c | 240 struct ef4_tx_queue *tx_queue; in ef4_process_channel() local 246 ef4_for_each_channel_tx_queue(tx_queue, channel) { in ef4_process_channel() 247 tx_queue->pkts_compl = 0; in ef4_process_channel() 248 tx_queue->bytes_compl = 0; in ef4_process_channel() 261 ef4_for_each_channel_tx_queue(tx_queue, channel) { in ef4_process_channel() 262 if (tx_queue->bytes_compl) { in ef4_process_channel() 263 netdev_tx_completed_queue(tx_queue->core_txq, in ef4_process_channel() 264 tx_queue->pkts_compl, tx_queue->bytes_compl); in ef4_process_channel() 429 struct ef4_tx_queue *tx_queue; in ef4_alloc_channel() local 441 tx_queue = &channel->tx_queue[j]; in ef4_alloc_channel() [all …]
|
/Linux-v5.4/drivers/net/ethernet/sfc/ |
D | tx.c | 33 static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue, in efx_tx_get_copy_buffer() argument 36 unsigned int index = efx_tx_queue_get_insert_index(tx_queue); in efx_tx_get_copy_buffer() 38 &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)]; in efx_tx_get_copy_buffer() 43 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, in efx_tx_get_copy_buffer() 51 u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue, in efx_tx_get_copy_buffer_limited() argument 56 return efx_tx_get_copy_buffer(tx_queue, buffer); in efx_tx_get_copy_buffer_limited() 59 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, in efx_dequeue_buffer() argument 65 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; in efx_dequeue_buffer() 82 if (tx_queue->timestamping && in efx_dequeue_buffer() 83 (tx_queue->completed_timestamp_major || in efx_dequeue_buffer() [all …]
|
D | tx_tso.c | 79 static inline void prefetch_ptr(struct efx_tx_queue *tx_queue) in prefetch_ptr() argument 81 unsigned int insert_ptr = efx_tx_queue_get_insert_index(tx_queue); in prefetch_ptr() 84 ptr = (char *) (tx_queue->buffer + insert_ptr); in prefetch_ptr() 88 ptr = (char *) (((efx_qword_t *)tx_queue->txd.buf.addr) + insert_ptr); in prefetch_ptr() 102 static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue, in efx_tx_queue_insert() argument 112 buffer = efx_tx_queue_get_insert_buffer(tx_queue); in efx_tx_queue_insert() 113 ++tx_queue->insert_count; in efx_tx_queue_insert() 115 EFX_WARN_ON_ONCE_PARANOID(tx_queue->insert_count - in efx_tx_queue_insert() 116 tx_queue->read_count >= in efx_tx_queue_insert() 117 tx_queue->efx->txq_entries); in efx_tx_queue_insert() [all …]
|
D | nic.h | 61 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) in efx_tx_desc() argument 63 return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; in efx_tx_desc() 67 static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) in efx_tx_queue_partner() argument 69 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) in efx_tx_queue_partner() 70 return tx_queue - EFX_TXQ_TYPE_OFFLOAD; in efx_tx_queue_partner() 72 return tx_queue + EFX_TXQ_TYPE_OFFLOAD; in efx_tx_queue_partner() 78 static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, in __efx_nic_tx_is_empty() argument 81 unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count); in __efx_nic_tx_is_empty() 94 static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue) in efx_nic_tx_is_empty() argument 96 EFX_WARN_ON_ONCE_PARANOID(!tx_queue->efx->type->option_descriptors); in efx_nic_tx_is_empty() [all …]
|
D | farch.c | 281 static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue) in efx_farch_notify_tx_desc() argument 286 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in efx_farch_notify_tx_desc() 288 efx_writed_page(tx_queue->efx, ®, in efx_farch_notify_tx_desc() 289 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); in efx_farch_notify_tx_desc() 293 static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue, in efx_farch_push_tx_desc() argument 302 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in efx_farch_push_tx_desc() 306 efx_writeo_page(tx_queue->efx, ®, in efx_farch_push_tx_desc() 307 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); in efx_farch_push_tx_desc() 315 void efx_farch_tx_write(struct efx_tx_queue *tx_queue) in efx_farch_tx_write() argument 320 unsigned old_write_count = tx_queue->write_count; in efx_farch_tx_write() [all …]
|
D | selftest.c | 410 static int efx_begin_loopback(struct efx_tx_queue *tx_queue) in efx_begin_loopback() argument 412 struct efx_nic *efx = tx_queue->efx; in efx_begin_loopback() 440 rc = efx_enqueue_skb(tx_queue, skb); in efx_begin_loopback() 446 "%d in %s loopback test\n", tx_queue->queue, in efx_begin_loopback() 466 static int efx_end_loopback(struct efx_tx_queue *tx_queue, in efx_end_loopback() argument 469 struct efx_nic *efx = tx_queue->efx; in efx_end_loopback() 498 tx_queue->queue, tx_done, state->packet_count, in efx_end_loopback() 509 tx_queue->queue, rx_good, state->packet_count, in efx_end_loopback() 516 lb_tests->tx_sent[tx_queue->queue] += state->packet_count; in efx_end_loopback() 517 lb_tests->tx_done[tx_queue->queue] += tx_done; in efx_end_loopback() [all …]
|
D | net_driver.h | 504 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; member 1321 int (*tx_probe)(struct efx_tx_queue *tx_queue); 1322 void (*tx_init)(struct efx_tx_queue *tx_queue); 1323 void (*tx_remove)(struct efx_tx_queue *tx_queue); 1324 void (*tx_write)(struct efx_tx_queue *tx_queue); 1325 unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue, 1473 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; in efx_get_tx_queue() 1487 return &channel->tx_queue[type]; in efx_channel_get_tx_queue() 1490 static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue) in efx_tx_queue_used() argument 1492 return !(tx_queue->efx->net_dev->num_tc < 2 && in efx_tx_queue_used() [all …]
|
D | tx.h | 15 unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue, 18 u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue, 21 int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
|
D | efx.h | 18 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); 19 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); 20 void efx_init_tx_queue(struct efx_tx_queue *tx_queue); 21 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); 22 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); 25 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 26 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
|
D | ef10.c | 916 struct efx_tx_queue *tx_queue; in efx_ef10_link_piobufs() local 951 efx_for_each_channel_tx_queue(tx_queue, channel) { in efx_ef10_link_piobufs() 957 tx_queue->channel->channel - 1) * in efx_ef10_link_piobufs() 967 if (tx_queue->queue == nic_data->pio_write_vi_base) { in efx_ef10_link_piobufs() 976 tx_queue->queue); in efx_ef10_link_piobufs() 988 tx_queue->queue, index, rc); in efx_ef10_link_piobufs() 989 tx_queue->piobuf = NULL; in efx_ef10_link_piobufs() 991 tx_queue->piobuf = in efx_ef10_link_piobufs() 994 tx_queue->piobuf_offset = offset; in efx_ef10_link_piobufs() 997 tx_queue->queue, index, in efx_ef10_link_piobufs() [all …]
|
D | efx.c | 263 struct efx_tx_queue *tx_queue; in efx_process_channel() local 275 efx_for_each_channel_tx_queue(tx_queue, channel) { in efx_process_channel() 276 tx_queue->pkts_compl = 0; in efx_process_channel() 277 tx_queue->bytes_compl = 0; in efx_process_channel() 290 efx_for_each_channel_tx_queue(tx_queue, channel) { in efx_process_channel() 291 if (tx_queue->bytes_compl) { in efx_process_channel() 292 netdev_tx_completed_queue(tx_queue->core_txq, in efx_process_channel() 293 tx_queue->pkts_compl, tx_queue->bytes_compl); in efx_process_channel() 465 struct efx_tx_queue *tx_queue; in efx_alloc_channel() local 477 tx_queue = &channel->tx_queue[j]; in efx_alloc_channel() [all …]
|
/Linux-v5.4/drivers/net/ethernet/freescale/ |
D | gianfar.c | 136 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); in gfar_init_tx_rx_base() 254 if (likely(priv->tx_queue[i]->txcoalescing)) in gfar_configure_coalescing() 255 gfar_write(baddr + i, priv->tx_queue[i]->txic); in gfar_configure_coalescing() 269 if (likely(priv->tx_queue[0]->txcoalescing)) in gfar_configure_coalescing() 270 gfar_write(®s->txic, priv->tx_queue[0]->txic); in gfar_configure_coalescing() 301 tx_bytes += priv->tx_queue[i]->stats.tx_bytes; in gfar_get_stats() 302 tx_packets += priv->tx_queue[i]->stats.tx_packets; in gfar_get_stats() 404 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), in gfar_alloc_tx_queues() 406 if (!priv->tx_queue[i]) in gfar_alloc_tx_queues() 409 priv->tx_queue[i]->tx_skbuff = NULL; in gfar_alloc_tx_queues() [all …]
|
/Linux-v5.4/drivers/net/wireless/rsi/ |
D | rsi_91x_core.c | 36 q_len = skb_queue_len(&common->tx_queue[ii]); in rsi_determine_min_weight_queue() 60 q_len = skb_queue_len(&common->tx_queue[ii]); in rsi_recalculate_weights() 106 if (skb_queue_len(&common->tx_queue[q_num])) in rsi_get_num_pkts_dequeue() 107 skb = skb_peek(&common->tx_queue[q_num]); in rsi_get_num_pkts_dequeue() 121 if (skb_queue_len(&common->tx_queue[q_num]) - pkt_cnt) in rsi_get_num_pkts_dequeue() 145 if (skb_queue_len(&common->tx_queue[MGMT_BEACON_Q])) { in rsi_core_determine_hal_queue() 149 if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) { in rsi_core_determine_hal_queue() 172 q_len = skb_queue_len(&common->tx_queue[ii]); in rsi_core_determine_hal_queue() 187 q_len = skb_queue_len(&common->tx_queue[q_num]); in rsi_core_determine_hal_queue() 201 q_len = skb_queue_len(&common->tx_queue[q_num]); in rsi_core_determine_hal_queue() [all …]
|
D | rsi_91x_debugfs.c | 146 skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])); in rsi_stats_read() 154 skb_queue_len(&common->tx_queue[VO_Q])); in rsi_stats_read() 160 skb_queue_len(&common->tx_queue[VI_Q])); in rsi_stats_read() 166 skb_queue_len(&common->tx_queue[BE_Q])); in rsi_stats_read() 172 skb_queue_len(&common->tx_queue[BK_Q])); in rsi_stats_read()
|
/Linux-v5.4/drivers/net/wireless/ath/ath5k/ |
D | dma.c | 132 u32 tx_queue; in ath5k_hw_start_tx_dma() local 141 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); in ath5k_hw_start_tx_dma() 148 tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0; in ath5k_hw_start_tx_dma() 151 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; in ath5k_hw_start_tx_dma() 156 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; in ath5k_hw_start_tx_dma() 164 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); in ath5k_hw_start_tx_dma() 191 u32 tx_queue, pending; in ath5k_hw_stop_tx_dma() local 200 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); in ath5k_hw_stop_tx_dma() 207 tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0; in ath5k_hw_stop_tx_dma() 212 tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1; in ath5k_hw_stop_tx_dma() [all …]
|
/Linux-v5.4/drivers/net/wireless/intersil/p54/ |
D | txrx.c | 38 spin_lock_irqsave(&priv->tx_queue.lock, flags); in p54_dump_tx_queue() 40 skb_queue_len(&priv->tx_queue)); in p54_dump_tx_queue() 43 skb_queue_walk(&priv->tx_queue, skb) { in p54_dump_tx_queue() 66 spin_unlock_irqrestore(&priv->tx_queue.lock, flags); in p54_dump_tx_queue() 94 spin_lock_irqsave(&priv->tx_queue.lock, flags); in p54_assign_address() 95 if (unlikely(skb_queue_len(&priv->tx_queue) == 32)) { in p54_assign_address() 101 spin_unlock_irqrestore(&priv->tx_queue.lock, flags); in p54_assign_address() 105 skb_queue_walk(&priv->tx_queue, entry) { in p54_assign_address() 121 target_skb = skb_peek_tail(&priv->tx_queue); in p54_assign_address() 128 spin_unlock_irqrestore(&priv->tx_queue.lock, flags); in p54_assign_address() [all …]
|
/Linux-v5.4/net/nfc/ |
D | llcp_commands.c | 352 skb_queue_tail(&local->tx_queue, skb); in nfc_llcp_send_disconnect() 449 skb_queue_tail(&local->tx_queue, skb); in nfc_llcp_send_connect() 509 skb_queue_tail(&local->tx_queue, skb); in nfc_llcp_send_cc() 571 skb_queue_tail(&local->tx_queue, skb); in nfc_llcp_send_snl_sdres() 605 skb_queue_tail(&local->tx_queue, skb); in nfc_llcp_send_snl_sdreq() 638 skb_queue_head(&local->tx_queue, skb); in nfc_llcp_send_dm() 662 skb_queue_len(&sock->tx_queue) >= 2 * sock->remote_rw)) { in nfc_llcp_send_i_frame() 670 skb_queue_len(&sock->tx_queue) >= 2 * sock->remote_rw)) { in nfc_llcp_send_i_frame() 672 skb_queue_len(&sock->tx_queue)); in nfc_llcp_send_i_frame() 709 skb_queue_tail(&sock->tx_queue, pdu); in nfc_llcp_send_i_frame() [all …]
|
/Linux-v5.4/drivers/net/ethernet/qlogic/qed/ |
D | qed_ll2.c | 67 #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registered) 309 p_tx = &p_ll2_conn->tx_queue; in qed_ll2_txq_flush() 349 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; in qed_ll2_txq_completion() 866 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; in qed_ll2_lb_txq_completion() 1006 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; in qed_sp_ll2_tx_queue_start() 1127 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; in qed_sp_ll2_tx_queue_stop() 1218 &p_ll2_info->tx_queue.txq_chain, NULL); in qed_ll2_acquire_connection_tx() 1222 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain); in qed_ll2_acquire_connection_tx() 1233 p_ll2_info->tx_queue.descq_mem = p_descq; in qed_ll2_acquire_connection_tx() 1432 &p_ll2_info->tx_queue.tx_sb_index, in qed_ll2_acquire_connection() [all …]
|