Lines Matching refs:txq
23 struct iwl_txq *txq, u16 byte_cnt, in iwl_pcie_gen2_update_byte_tbl() argument
26 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_pcie_gen2_update_byte_tbl()
31 if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) in iwl_pcie_gen2_update_byte_tbl()
47 struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr; in iwl_pcie_gen2_update_byte_tbl()
55 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; in iwl_pcie_gen2_update_byte_tbl()
69 void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_inc_wr_ptr() argument
71 lockdep_assert_held(&txq->lock); in iwl_txq_inc_wr_ptr()
73 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); in iwl_txq_inc_wr_ptr()
79 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); in iwl_txq_inc_wr_ptr()
162 void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_gen2_free_tfd() argument
167 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); in iwl_txq_gen2_free_tfd()
170 lockdep_assert_held(&txq->lock); in iwl_txq_gen2_free_tfd()
172 if (!txq->entries) in iwl_txq_gen2_free_tfd()
175 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, in iwl_txq_gen2_free_tfd()
176 iwl_txq_get_tfd(trans, txq, idx)); in iwl_txq_gen2_free_tfd()
178 skb = txq->entries[idx].skb; in iwl_txq_gen2_free_tfd()
186 txq->entries[idx].skb = NULL; in iwl_txq_gen2_free_tfd()
465 struct iwl_txq *txq, in iwl_txq_gen2_build_tx_amsdu() argument
472 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_txq_gen2_build_tx_amsdu()
473 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); in iwl_txq_gen2_build_tx_amsdu()
478 tb_phys = iwl_txq_get_first_tb_dma(txq, idx); in iwl_txq_gen2_build_tx_amsdu()
514 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); in iwl_txq_gen2_build_tx_amsdu()
552 struct iwl_txq *txq, in iwl_txq_gen2_build_tx() argument
560 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_txq_gen2_build_tx()
561 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); in iwl_txq_gen2_build_tx()
567 tb_phys = iwl_txq_get_first_tb_dma(txq, idx); in iwl_txq_gen2_build_tx()
570 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); in iwl_txq_gen2_build_tx()
647 struct iwl_txq *txq, in iwl_txq_gen2_build_tfd() argument
653 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_txq_gen2_build_tfd()
654 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); in iwl_txq_gen2_build_tfd()
687 return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, in iwl_txq_gen2_build_tfd()
689 return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, in iwl_txq_gen2_build_tfd()
726 struct iwl_txq *txq = trans->txqs.txq[txq_id]; in iwl_txq_gen2_tx() local
744 spin_lock(&txq->lock); in iwl_txq_gen2_tx()
746 if (iwl_txq_space(trans, txq) < txq->high_mark) { in iwl_txq_gen2_tx()
747 iwl_txq_stop(trans, txq); in iwl_txq_gen2_tx()
750 if (unlikely(iwl_txq_space(trans, txq) < 3)) { in iwl_txq_gen2_tx()
757 __skb_queue_tail(&txq->overflow_q, skb); in iwl_txq_gen2_tx()
758 spin_unlock(&txq->lock); in iwl_txq_gen2_tx()
763 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); in iwl_txq_gen2_tx()
766 txq->entries[idx].skb = skb; in iwl_txq_gen2_tx()
767 txq->entries[idx].cmd = dev_cmd; in iwl_txq_gen2_tx()
774 out_meta = &txq->entries[idx].meta; in iwl_txq_gen2_tx()
777 tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); in iwl_txq_gen2_tx()
779 spin_unlock(&txq->lock); in iwl_txq_gen2_tx()
796 iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len, in iwl_txq_gen2_tx()
800 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) in iwl_txq_gen2_tx()
801 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); in iwl_txq_gen2_tx()
804 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); in iwl_txq_gen2_tx()
805 iwl_txq_inc_wr_ptr(trans, txq); in iwl_txq_gen2_tx()
810 spin_unlock(&txq->lock); in iwl_txq_gen2_tx()
821 struct iwl_txq *txq = trans->txqs.txq[txq_id]; in iwl_txq_gen2_unmap() local
823 spin_lock_bh(&txq->lock); in iwl_txq_gen2_unmap()
824 while (txq->write_ptr != txq->read_ptr) { in iwl_txq_gen2_unmap()
826 txq_id, txq->read_ptr); in iwl_txq_gen2_unmap()
829 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); in iwl_txq_gen2_unmap()
830 struct sk_buff *skb = txq->entries[idx].skb; in iwl_txq_gen2_unmap()
835 iwl_txq_gen2_free_tfd(trans, txq); in iwl_txq_gen2_unmap()
836 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); in iwl_txq_gen2_unmap()
839 while (!skb_queue_empty(&txq->overflow_q)) { in iwl_txq_gen2_unmap()
840 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); in iwl_txq_gen2_unmap()
845 spin_unlock_bh(&txq->lock); in iwl_txq_gen2_unmap()
848 iwl_wake_queue(trans, txq); in iwl_txq_gen2_unmap()
852 struct iwl_txq *txq) in iwl_txq_gen2_free_memory() argument
857 if (txq->tfds) { in iwl_txq_gen2_free_memory()
859 trans->txqs.tfd.size * txq->n_window, in iwl_txq_gen2_free_memory()
860 txq->tfds, txq->dma_addr); in iwl_txq_gen2_free_memory()
862 sizeof(*txq->first_tb_bufs) * txq->n_window, in iwl_txq_gen2_free_memory()
863 txq->first_tb_bufs, txq->first_tb_dma); in iwl_txq_gen2_free_memory()
866 kfree(txq->entries); in iwl_txq_gen2_free_memory()
867 if (txq->bc_tbl.addr) in iwl_txq_gen2_free_memory()
869 txq->bc_tbl.addr, txq->bc_tbl.dma); in iwl_txq_gen2_free_memory()
870 kfree(txq); in iwl_txq_gen2_free_memory()
883 struct iwl_txq *txq; in iwl_txq_gen2_free() local
890 txq = trans->txqs.txq[txq_id]; in iwl_txq_gen2_free()
892 if (WARN_ON(!txq)) in iwl_txq_gen2_free()
899 for (i = 0; i < txq->n_window; i++) { in iwl_txq_gen2_free()
900 kfree_sensitive(txq->entries[i].cmd); in iwl_txq_gen2_free()
901 kfree_sensitive(txq->entries[i].free_buf); in iwl_txq_gen2_free()
903 del_timer_sync(&txq->stuck_timer); in iwl_txq_gen2_free()
905 iwl_txq_gen2_free_memory(trans, txq); in iwl_txq_gen2_free()
907 trans->txqs.txq[txq_id] = NULL; in iwl_txq_gen2_free()
938 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, in iwl_txq_init() argument
945 txq->need_update = false; in iwl_txq_init()
955 ret = iwl_queue_init(txq, slots_num); in iwl_txq_init()
959 spin_lock_init(&txq->lock); in iwl_txq_init()
964 lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class); in iwl_txq_init()
967 __skb_queue_head_init(&txq->overflow_q); in iwl_txq_init()
990 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_log_scd_error() argument
992 u32 txq_id = txq->id; in iwl_txq_log_scd_error()
999 txq->read_ptr, txq->write_ptr); in iwl_txq_log_scd_error()
1011 jiffies_to_msecs(txq->wd_timeout), in iwl_txq_log_scd_error()
1012 txq->read_ptr, txq->write_ptr, in iwl_txq_log_scd_error()
1022 struct iwl_txq *txq = from_timer(txq, t, stuck_timer); in iwl_txq_stuck_timer() local
1023 struct iwl_trans *trans = txq->trans; in iwl_txq_stuck_timer()
1025 spin_lock(&txq->lock); in iwl_txq_stuck_timer()
1027 if (txq->read_ptr == txq->write_ptr) { in iwl_txq_stuck_timer()
1028 spin_unlock(&txq->lock); in iwl_txq_stuck_timer()
1031 spin_unlock(&txq->lock); in iwl_txq_stuck_timer()
1033 iwl_txq_log_scd_error(trans, txq); in iwl_txq_stuck_timer()
1047 int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, in iwl_txq_alloc() argument
1059 if (WARN_ON(txq->entries || txq->tfds)) in iwl_txq_alloc()
1064 timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0); in iwl_txq_alloc()
1065 txq->trans = trans; in iwl_txq_alloc()
1067 txq->n_window = slots_num; in iwl_txq_alloc()
1069 txq->entries = kcalloc(slots_num, in iwl_txq_alloc()
1073 if (!txq->entries) in iwl_txq_alloc()
1078 txq->entries[i].cmd = in iwl_txq_alloc()
1081 if (!txq->entries[i].cmd) in iwl_txq_alloc()
1087 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, in iwl_txq_alloc()
1088 &txq->dma_addr, GFP_KERNEL); in iwl_txq_alloc()
1089 if (!txq->tfds) in iwl_txq_alloc()
1092 BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN); in iwl_txq_alloc()
1094 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; in iwl_txq_alloc()
1096 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, in iwl_txq_alloc()
1097 &txq->first_tb_dma, in iwl_txq_alloc()
1099 if (!txq->first_tb_bufs) in iwl_txq_alloc()
1103 void *tfd = iwl_txq_get_tfd(trans, txq, i); in iwl_txq_alloc()
1113 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); in iwl_txq_alloc()
1114 txq->tfds = NULL; in iwl_txq_alloc()
1116 if (txq->entries && cmd_queue) in iwl_txq_alloc()
1118 kfree(txq->entries[i].cmd); in iwl_txq_alloc()
1119 kfree(txq->entries); in iwl_txq_alloc()
1120 txq->entries = NULL; in iwl_txq_alloc()
1129 struct iwl_txq *txq; in iwl_txq_dyn_alloc_dma() local
1140 txq = kzalloc(sizeof(*txq), GFP_KERNEL); in iwl_txq_dyn_alloc_dma()
1141 if (!txq) in iwl_txq_dyn_alloc_dma()
1144 txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL, in iwl_txq_dyn_alloc_dma()
1145 &txq->bc_tbl.dma); in iwl_txq_dyn_alloc_dma()
1146 if (!txq->bc_tbl.addr) { in iwl_txq_dyn_alloc_dma()
1148 kfree(txq); in iwl_txq_dyn_alloc_dma()
1152 ret = iwl_txq_alloc(trans, txq, size, false); in iwl_txq_dyn_alloc_dma()
1157 ret = iwl_txq_init(trans, txq, size, false); in iwl_txq_dyn_alloc_dma()
1163 txq->wd_timeout = msecs_to_jiffies(timeout); in iwl_txq_dyn_alloc_dma()
1165 return txq; in iwl_txq_dyn_alloc_dma()
1168 iwl_txq_gen2_free_memory(trans, txq); in iwl_txq_dyn_alloc_dma()
1172 static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, in iwl_txq_alloc_response() argument
1189 if (qid >= ARRAY_SIZE(trans->txqs.txq)) { in iwl_txq_alloc_response()
1201 if (WARN_ONCE(trans->txqs.txq[qid], in iwl_txq_alloc_response()
1207 txq->id = qid; in iwl_txq_alloc_response()
1208 trans->txqs.txq[qid] = txq; in iwl_txq_alloc_response()
1212 txq->read_ptr = wr_ptr; in iwl_txq_alloc_response()
1213 txq->write_ptr = wr_ptr; in iwl_txq_alloc_response()
1222 iwl_txq_gen2_free_memory(trans, txq); in iwl_txq_alloc_response()
1229 struct iwl_txq *txq; in iwl_txq_dyn_alloc() local
1243 txq = iwl_txq_dyn_alloc_dma(trans, size, timeout); in iwl_txq_dyn_alloc()
1244 if (IS_ERR(txq)) in iwl_txq_dyn_alloc()
1245 return PTR_ERR(txq); in iwl_txq_dyn_alloc()
1249 cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr); in iwl_txq_dyn_alloc()
1250 cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); in iwl_txq_dyn_alloc()
1267 cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr); in iwl_txq_dyn_alloc()
1268 cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma); in iwl_txq_dyn_alloc()
1286 return iwl_txq_alloc_response(trans, txq, &hcmd); in iwl_txq_dyn_alloc()
1289 iwl_txq_gen2_free_memory(trans, txq); in iwl_txq_dyn_alloc()
1323 for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) { in iwl_txq_gen2_tx_free()
1324 if (!trans->txqs.txq[i]) in iwl_txq_gen2_tx_free()
1337 if (!trans->txqs.txq[txq_id]) { in iwl_txq_gen2_init()
1343 trans->txqs.txq[txq_id] = queue; in iwl_txq_gen2_init()
1350 queue = trans->txqs.txq[txq_id]; in iwl_txq_gen2_init()
1359 trans->txqs.txq[txq_id]->id = txq_id; in iwl_txq_gen2_init()
1393 struct iwl_txq *txq, int index) in iwl_txq_gen1_tfd_unmap() argument
1396 struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index); in iwl_txq_gen1_tfd_unmap()
1438 struct iwl_txq *txq, u16 byte_cnt, in iwl_txq_gen1_update_byte_cnt_tbl() argument
1442 int write_ptr = txq->write_ptr; in iwl_txq_gen1_update_byte_cnt_tbl()
1443 int txq_id = txq->id; in iwl_txq_gen1_update_byte_cnt_tbl()
1447 struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; in iwl_txq_gen1_update_byte_cnt_tbl()
1482 struct iwl_txq *txq) in iwl_txq_gen1_inval_byte_cnt_tbl() argument
1485 int txq_id = txq->id; in iwl_txq_gen1_inval_byte_cnt_tbl()
1486 int read_ptr = txq->read_ptr; in iwl_txq_gen1_inval_byte_cnt_tbl()
1489 struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; in iwl_txq_gen1_inval_byte_cnt_tbl()
1515 void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_free_tfd() argument
1520 int rd_ptr = txq->read_ptr; in iwl_txq_free_tfd()
1521 int idx = iwl_txq_get_cmd_index(txq, rd_ptr); in iwl_txq_free_tfd()
1524 lockdep_assert_held(&txq->lock); in iwl_txq_free_tfd()
1526 if (!txq->entries) in iwl_txq_free_tfd()
1533 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, in iwl_txq_free_tfd()
1534 iwl_txq_get_tfd(trans, txq, rd_ptr)); in iwl_txq_free_tfd()
1536 iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, in iwl_txq_free_tfd()
1537 txq, rd_ptr); in iwl_txq_free_tfd()
1540 skb = txq->entries[idx].skb; in iwl_txq_free_tfd()
1548 txq->entries[idx].skb = NULL; in iwl_txq_free_tfd()
1552 void iwl_txq_progress(struct iwl_txq *txq) in iwl_txq_progress() argument
1554 lockdep_assert_held(&txq->lock); in iwl_txq_progress()
1556 if (!txq->wd_timeout) in iwl_txq_progress()
1563 if (txq->frozen) in iwl_txq_progress()
1570 if (txq->read_ptr == txq->write_ptr) in iwl_txq_progress()
1571 del_timer(&txq->stuck_timer); in iwl_txq_progress()
1573 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); in iwl_txq_progress()
1580 struct iwl_txq *txq = trans->txqs.txq[txq_id]; in iwl_txq_reclaim() local
1587 if (WARN_ON(!txq)) in iwl_txq_reclaim()
1590 tfd_num = iwl_txq_get_cmd_index(txq, ssn); in iwl_txq_reclaim()
1591 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); in iwl_txq_reclaim()
1593 spin_lock_bh(&txq->lock); in iwl_txq_reclaim()
1605 txq_id, txq->read_ptr, tfd_num, ssn); in iwl_txq_reclaim()
1611 if (!iwl_txq_used(txq, last_to_free)) { in iwl_txq_reclaim()
1616 txq->write_ptr, txq->read_ptr); in iwl_txq_reclaim()
1629 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr), in iwl_txq_reclaim()
1630 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) { in iwl_txq_reclaim()
1631 struct sk_buff *skb = txq->entries[read_ptr].skb; in iwl_txq_reclaim()
1640 txq->entries[read_ptr].skb = NULL; in iwl_txq_reclaim()
1643 iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq); in iwl_txq_reclaim()
1645 iwl_txq_free_tfd(trans, txq); in iwl_txq_reclaim()
1648 iwl_txq_progress(txq); in iwl_txq_reclaim()
1650 if (iwl_txq_space(trans, txq) > txq->low_mark && in iwl_txq_reclaim()
1655 skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); in iwl_txq_reclaim()
1664 txq->overflow_tx = true; in iwl_txq_reclaim()
1673 spin_unlock_bh(&txq->lock); in iwl_txq_reclaim()
1690 if (iwl_txq_space(trans, txq) > txq->low_mark) in iwl_txq_reclaim()
1691 iwl_wake_queue(trans, txq); in iwl_txq_reclaim()
1693 spin_lock_bh(&txq->lock); in iwl_txq_reclaim()
1694 txq->overflow_tx = false; in iwl_txq_reclaim()
1698 spin_unlock_bh(&txq->lock); in iwl_txq_reclaim()
1704 struct iwl_txq *txq = trans->txqs.txq[txq_id]; in iwl_txq_set_q_ptrs() local
1706 spin_lock_bh(&txq->lock); in iwl_txq_set_q_ptrs()
1708 txq->write_ptr = ptr; in iwl_txq_set_q_ptrs()
1709 txq->read_ptr = txq->write_ptr; in iwl_txq_set_q_ptrs()
1711 spin_unlock_bh(&txq->lock); in iwl_txq_set_q_ptrs()
1720 struct iwl_txq *txq = trans->txqs.txq[queue]; in iwl_trans_txq_freeze_timer() local
1723 spin_lock_bh(&txq->lock); in iwl_trans_txq_freeze_timer()
1727 if (txq->frozen == freeze) in iwl_trans_txq_freeze_timer()
1733 txq->frozen = freeze; in iwl_trans_txq_freeze_timer()
1735 if (txq->read_ptr == txq->write_ptr) in iwl_trans_txq_freeze_timer()
1740 txq->stuck_timer.expires))) { in iwl_trans_txq_freeze_timer()
1748 txq->frozen_expiry_remainder = in iwl_trans_txq_freeze_timer()
1749 txq->stuck_timer.expires - now; in iwl_trans_txq_freeze_timer()
1750 del_timer(&txq->stuck_timer); in iwl_trans_txq_freeze_timer()
1758 mod_timer(&txq->stuck_timer, in iwl_trans_txq_freeze_timer()
1759 now + txq->frozen_expiry_remainder); in iwl_trans_txq_freeze_timer()
1762 spin_unlock_bh(&txq->lock); in iwl_trans_txq_freeze_timer()
1772 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; in iwl_trans_txq_send_hcmd_sync() local
1803 txq->read_ptr, txq->write_ptr); in iwl_trans_txq_send_hcmd_sync()
1847 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; in iwl_trans_txq_send_hcmd_sync()