Lines Matching full:trans

65 void iwl_txq_gen2_tx_stop(struct iwl_trans *trans)  in iwl_txq_gen2_tx_stop()  argument
74 memset(trans->txqs.queue_stopped, 0, in iwl_txq_gen2_tx_stop()
75 sizeof(trans->txqs.queue_stopped)); in iwl_txq_gen2_tx_stop()
76 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); in iwl_txq_gen2_tx_stop()
79 for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) { in iwl_txq_gen2_tx_stop()
80 if (!trans->txqs.txq[txq_id]) in iwl_txq_gen2_tx_stop()
82 iwl_txq_gen2_unmap(trans, txq_id); in iwl_txq_gen2_tx_stop()
89 static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, in iwl_pcie_gen2_update_byte_tbl() argument
113 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { in iwl_pcie_gen2_update_byte_tbl()
117 WARN_ON(trans->txqs.bc_table_dword); in iwl_pcie_gen2_update_byte_tbl()
125 WARN_ON(!trans->txqs.bc_table_dword); in iwl_pcie_gen2_update_byte_tbl()
136 void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_inc_wr_ptr() argument
140 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); in iwl_txq_inc_wr_ptr()
146 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); in iwl_txq_inc_wr_ptr()
149 static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans, in iwl_txq_gen2_get_num_tbs() argument
155 void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, in iwl_txq_gen2_tfd_unmap() argument
161 num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd); in iwl_txq_gen2_tfd_unmap()
163 if (num_tbs > trans->txqs.tfd.max_tbs) { in iwl_txq_gen2_tfd_unmap()
164 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); in iwl_txq_gen2_tfd_unmap()
171 dma_unmap_page(trans->dev, in iwl_txq_gen2_tfd_unmap()
176 dma_unmap_single(trans->dev, in iwl_txq_gen2_tfd_unmap()
185 void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_gen2_free_tfd() argument
194 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, in iwl_txq_gen2_free_tfd()
195 iwl_txq_get_tfd(trans, txq, idx)); in iwl_txq_gen2_free_tfd()
208 iwl_op_mode_free_skb(trans->op_mode, skb); in iwl_txq_gen2_free_tfd()
214 int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, in iwl_txq_gen2_set_tb() argument
217 int idx = iwl_txq_gen2_get_num_tbs(trans, tfd); in iwl_txq_gen2_set_tb()
236 if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) { in iwl_txq_gen2_set_tb()
237 IWL_ERR(trans, "Error can not send more than %d chunks\n", in iwl_txq_gen2_set_tb()
238 trans->txqs.tfd.max_tbs); in iwl_txq_gen2_set_tb()
250 static struct page *get_workaround_page(struct iwl_trans *trans, in get_workaround_page() argument
256 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); in get_workaround_page()
275 static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, in iwl_txq_gen2_set_tb_with_wa() argument
285 if (unlikely(dma_mapping_error(trans->dev, phys))) in iwl_txq_gen2_set_tb_with_wa()
289 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); in iwl_txq_gen2_set_tb_with_wa()
315 page = get_workaround_page(trans, skb); in iwl_txq_gen2_set_tb_with_wa()
323 phys = dma_map_single(trans->dev, page_address(page), len, in iwl_txq_gen2_set_tb_with_wa()
325 if (unlikely(dma_mapping_error(trans->dev, phys))) in iwl_txq_gen2_set_tb_with_wa()
327 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); in iwl_txq_gen2_set_tb_with_wa()
334 IWL_WARN(trans, in iwl_txq_gen2_set_tb_with_wa()
341 dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); in iwl_txq_gen2_set_tb_with_wa()
343 dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE); in iwl_txq_gen2_set_tb_with_wa()
345 trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len); in iwl_txq_gen2_set_tb_with_wa()
351 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, in get_page_hdr() argument
354 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page); in get_page_hdr()
357 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); in get_page_hdr()
396 static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, in iwl_txq_gen2_build_amsdu() argument
412 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), in iwl_txq_gen2_build_amsdu()
425 hdr_page = get_page_hdr(trans, hdr_room, skb); in iwl_txq_gen2_build_amsdu()
478 tb_phys = dma_map_single(trans->dev, start_hdr, in iwl_txq_gen2_build_amsdu()
480 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { in iwl_txq_gen2_build_amsdu()
489 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len); in iwl_txq_gen2_build_amsdu()
490 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, in iwl_txq_gen2_build_amsdu()
503 tb_phys = dma_map_single(trans->dev, tso.data, in iwl_txq_gen2_build_amsdu()
505 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, in iwl_txq_gen2_build_amsdu()
529 iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans, in iwl_txq_gen2_build_tx_amsdu() argument
538 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); in iwl_txq_gen2_build_tx_amsdu()
550 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); in iwl_txq_gen2_build_tx_amsdu()
565 tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); in iwl_txq_gen2_build_tx_amsdu()
566 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) in iwl_txq_gen2_build_tx_amsdu()
572 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len); in iwl_txq_gen2_build_tx_amsdu()
574 if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE, in iwl_txq_gen2_build_tx_amsdu()
583 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); in iwl_txq_gen2_build_tx_amsdu()
587 static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans, in iwl_txq_gen2_tx_add_frags() argument
603 tb_phys = skb_frag_dma_map(trans->dev, frag, 0, in iwl_txq_gen2_tx_add_frags()
605 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, in iwl_txq_gen2_tx_add_frags()
616 iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans, in iwl_txq_gen2_build_tx() argument
626 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); in iwl_txq_gen2_build_tx()
642 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); in iwl_txq_gen2_build_tx()
660 tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); in iwl_txq_gen2_build_tx()
661 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) in iwl_txq_gen2_build_tx()
667 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len); in iwl_txq_gen2_build_tx()
668 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, in iwl_txq_gen2_build_tx()
677 tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, in iwl_txq_gen2_build_tx()
679 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, in iwl_txq_gen2_build_tx()
686 if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta)) in iwl_txq_gen2_build_tx()
692 tb_phys = dma_map_single(trans->dev, frag->data, in iwl_txq_gen2_build_tx()
694 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, in iwl_txq_gen2_build_tx()
699 if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta)) in iwl_txq_gen2_build_tx()
706 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); in iwl_txq_gen2_build_tx()
711 struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans, in iwl_txq_gen2_build_tfd() argument
719 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); in iwl_txq_gen2_build_tfd()
728 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) in iwl_txq_gen2_build_tfd()
745 return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, in iwl_txq_gen2_build_tfd()
747 return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, in iwl_txq_gen2_build_tfd()
751 int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q) in iwl_txq_space() argument
762 if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) in iwl_txq_space()
765 max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; in iwl_txq_space()
772 (trans->trans_cfg->base_params->max_tfd_queue_size - 1); in iwl_txq_space()
780 int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, in iwl_txq_gen2_tx() argument
784 struct iwl_txq *txq = trans->txqs.txq[txq_id]; in iwl_txq_gen2_tx()
793 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), in iwl_txq_gen2_tx()
798 skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && in iwl_txq_gen2_tx()
804 if (iwl_txq_space(trans, txq) < txq->high_mark) { in iwl_txq_gen2_tx()
805 iwl_txq_stop(trans, txq); in iwl_txq_gen2_tx()
808 if (unlikely(iwl_txq_space(trans, txq) < 3)) { in iwl_txq_gen2_tx()
812 trans->txqs.dev_cmd_offs); in iwl_txq_gen2_tx()
835 tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); in iwl_txq_gen2_tx()
841 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { in iwl_txq_gen2_tx()
854 iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len, in iwl_txq_gen2_tx()
855 iwl_txq_gen2_get_num_tbs(trans, tfd)); in iwl_txq_gen2_tx()
862 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); in iwl_txq_gen2_tx()
863 iwl_txq_inc_wr_ptr(trans, txq); in iwl_txq_gen2_tx()
877 void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id) in iwl_txq_gen2_unmap() argument
879 struct iwl_txq *txq = trans->txqs.txq[txq_id]; in iwl_txq_gen2_unmap()
883 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", in iwl_txq_gen2_unmap()
886 if (txq_id != trans->txqs.cmd.q_id) { in iwl_txq_gen2_unmap()
893 iwl_txq_free_tso_page(trans, skb); in iwl_txq_gen2_unmap()
895 iwl_txq_gen2_free_tfd(trans, txq); in iwl_txq_gen2_unmap()
896 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); in iwl_txq_gen2_unmap()
902 iwl_op_mode_free_skb(trans->op_mode, skb); in iwl_txq_gen2_unmap()
908 iwl_wake_queue(trans, txq); in iwl_txq_gen2_unmap()
911 static void iwl_txq_gen2_free_memory(struct iwl_trans *trans, in iwl_txq_gen2_free_memory() argument
914 struct device *dev = trans->dev; in iwl_txq_gen2_free_memory()
919 trans->txqs.tfd.size * txq->n_window, in iwl_txq_gen2_free_memory()
928 dma_pool_free(trans->txqs.bc_pool, in iwl_txq_gen2_free_memory()
941 static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id) in iwl_txq_gen2_free() argument
950 txq = trans->txqs.txq[txq_id]; in iwl_txq_gen2_free()
955 iwl_txq_gen2_unmap(trans, txq_id); in iwl_txq_gen2_free()
958 if (txq_id == trans->txqs.cmd.q_id) in iwl_txq_gen2_free()
965 iwl_txq_gen2_free_memory(trans, txq); in iwl_txq_gen2_free()
967 trans->txqs.txq[txq_id] = NULL; in iwl_txq_gen2_free()
969 clear_bit(txq_id, trans->txqs.queue_used); in iwl_txq_gen2_free()
998 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, in iwl_txq_init() argument
1003 trans->trans_cfg->base_params->max_tfd_queue_size; in iwl_txq_init()
1032 void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb) in iwl_txq_free_tso_page() argument
1037 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); in iwl_txq_free_tso_page()
1050 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_log_scd_error() argument
1057 if (trans->trans_cfg->use_tfh) { in iwl_txq_log_scd_error()
1058 IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, in iwl_txq_log_scd_error()
1064 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); in iwl_txq_log_scd_error()
1068 IWL_ERR(trans, in iwl_txq_log_scd_error()
1073 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & in iwl_txq_log_scd_error()
1074 (trans->trans_cfg->base_params->max_tfd_queue_size - 1), in iwl_txq_log_scd_error()
1075 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & in iwl_txq_log_scd_error()
1076 (trans->trans_cfg->base_params->max_tfd_queue_size - 1), in iwl_txq_log_scd_error()
1077 iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); in iwl_txq_log_scd_error()
1083 struct iwl_trans *trans = txq->trans; in iwl_txq_stuck_timer() local
1093 iwl_txq_log_scd_error(trans, txq); in iwl_txq_stuck_timer()
1095 iwl_force_nmi(trans); in iwl_txq_stuck_timer()
1098 int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, in iwl_txq_alloc() argument
1101 size_t tfd_sz = trans->txqs.tfd.size * in iwl_txq_alloc()
1102 trans->trans_cfg->base_params->max_tfd_queue_size; in iwl_txq_alloc()
1109 if (trans->trans_cfg->use_tfh) in iwl_txq_alloc()
1110 tfd_sz = trans->txqs.tfd.size * slots_num; in iwl_txq_alloc()
1113 txq->trans = trans; in iwl_txq_alloc()
1135 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, in iwl_txq_alloc()
1144 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, in iwl_txq_alloc()
1152 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); in iwl_txq_alloc()
1163 static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, in iwl_txq_dyn_alloc_dma() argument
1171 WARN_ON(!trans->txqs.bc_tbl_size); in iwl_txq_dyn_alloc_dma()
1173 bc_tbl_size = trans->txqs.bc_tbl_size; in iwl_txq_dyn_alloc_dma()
1183 txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL, in iwl_txq_dyn_alloc_dma()
1186 IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); in iwl_txq_dyn_alloc_dma()
1191 ret = iwl_txq_alloc(trans, txq, size, false); in iwl_txq_dyn_alloc_dma()
1193 IWL_ERR(trans, "Tx queue alloc failed\n"); in iwl_txq_dyn_alloc_dma()
1196 ret = iwl_txq_init(trans, txq, size, false); in iwl_txq_dyn_alloc_dma()
1198 IWL_ERR(trans, "Tx queue init failed\n"); in iwl_txq_dyn_alloc_dma()
1208 iwl_txq_gen2_free_memory(trans, txq); in iwl_txq_dyn_alloc_dma()
1212 static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, in iwl_txq_alloc_response() argument
1229 if (qid >= ARRAY_SIZE(trans->txqs.txq)) { in iwl_txq_alloc_response()
1235 if (test_and_set_bit(qid, trans->txqs.queue_used)) { in iwl_txq_alloc_response()
1242 trans->txqs.txq[qid] = txq; in iwl_txq_alloc_response()
1243 wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); in iwl_txq_alloc_response()
1249 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); in iwl_txq_alloc_response()
1256 iwl_txq_gen2_free_memory(trans, txq); in iwl_txq_alloc_response()
1260 int iwl_txq_dyn_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid, in iwl_txq_dyn_alloc() argument
1277 ret = iwl_txq_dyn_alloc_dma(trans, &txq, size, timeout); in iwl_txq_dyn_alloc()
1285 ret = iwl_trans_send_cmd(trans, &hcmd); in iwl_txq_dyn_alloc()
1289 return iwl_txq_alloc_response(trans, txq, &hcmd); in iwl_txq_dyn_alloc()
1292 iwl_txq_gen2_free_memory(trans, txq); in iwl_txq_dyn_alloc()
1296 void iwl_txq_dyn_free(struct iwl_trans *trans, int queue) in iwl_txq_dyn_free() argument
1308 if (!test_and_clear_bit(queue, trans->txqs.queue_used)) { in iwl_txq_dyn_free()
1309 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), in iwl_txq_dyn_free()
1314 iwl_txq_gen2_unmap(trans, queue); in iwl_txq_dyn_free()
1316 iwl_txq_gen2_free_memory(trans, trans->txqs.txq[queue]); in iwl_txq_dyn_free()
1318 trans->txqs.txq[queue] = NULL; in iwl_txq_dyn_free()
1320 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); in iwl_txq_dyn_free()
1323 void iwl_txq_gen2_tx_free(struct iwl_trans *trans) in iwl_txq_gen2_tx_free() argument
1327 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); in iwl_txq_gen2_tx_free()
1330 for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) { in iwl_txq_gen2_tx_free()
1331 if (!trans->txqs.txq[i]) in iwl_txq_gen2_tx_free()
1334 iwl_txq_gen2_free(trans, i); in iwl_txq_gen2_tx_free()
1338 int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size) in iwl_txq_gen2_init() argument
1344 if (!trans->txqs.txq[txq_id]) { in iwl_txq_gen2_init()
1347 IWL_ERR(trans, "Not enough memory for tx queue\n"); in iwl_txq_gen2_init()
1350 trans->txqs.txq[txq_id] = queue; in iwl_txq_gen2_init()
1351 ret = iwl_txq_alloc(trans, queue, queue_size, true); in iwl_txq_gen2_init()
1353 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); in iwl_txq_gen2_init()
1357 queue = trans->txqs.txq[txq_id]; in iwl_txq_gen2_init()
1360 ret = iwl_txq_init(trans, queue, queue_size, in iwl_txq_gen2_init()
1361 (txq_id == trans->txqs.cmd.q_id)); in iwl_txq_gen2_init()
1363 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); in iwl_txq_gen2_init()
1366 trans->txqs.txq[txq_id]->id = txq_id; in iwl_txq_gen2_init()
1367 set_bit(txq_id, trans->txqs.queue_used); in iwl_txq_gen2_init()
1372 iwl_txq_gen2_tx_free(trans); in iwl_txq_gen2_init()
1376 static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans, in iwl_txq_gen1_tfd_tb_get_addr() argument
1384 if (trans->trans_cfg->use_tfh) { in iwl_txq_gen1_tfd_tb_get_addr()
1408 void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, in iwl_txq_gen1_tfd_unmap() argument
1413 void *tfd = iwl_txq_get_tfd(trans, txq, index); in iwl_txq_gen1_tfd_unmap()
1416 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); in iwl_txq_gen1_tfd_unmap()
1418 if (num_tbs > trans->txqs.tfd.max_tbs) { in iwl_txq_gen1_tfd_unmap()
1419 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); in iwl_txq_gen1_tfd_unmap()
1428 dma_unmap_page(trans->dev, in iwl_txq_gen1_tfd_unmap()
1429 iwl_txq_gen1_tfd_tb_get_addr(trans, in iwl_txq_gen1_tfd_unmap()
1431 iwl_txq_gen1_tfd_tb_get_len(trans, in iwl_txq_gen1_tfd_unmap()
1435 dma_unmap_single(trans->dev, in iwl_txq_gen1_tfd_unmap()
1436 iwl_txq_gen1_tfd_tb_get_addr(trans, in iwl_txq_gen1_tfd_unmap()
1438 iwl_txq_gen1_tfd_tb_get_len(trans, in iwl_txq_gen1_tfd_unmap()
1445 if (trans->trans_cfg->use_tfh) { in iwl_txq_gen1_tfd_unmap()
1462 void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, in iwl_txq_gen1_update_byte_cnt_tbl() argument
1476 scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; in iwl_txq_gen1_update_byte_cnt_tbl()
1491 if (trans->txqs.bc_table_dword) in iwl_txq_gen1_update_byte_cnt_tbl()
1506 void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, in iwl_txq_gen1_inval_byte_cnt_tbl() argument
1509 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; in iwl_txq_gen1_inval_byte_cnt_tbl()
1519 if (txq_id != trans->txqs.cmd.q_id) in iwl_txq_gen1_inval_byte_cnt_tbl()