1 /******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2020 Intel Corporation
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * The full GNU General Public License is included in this distribution in the
20 * file called COPYING.
21 *
22 * Contact Information:
23 * Intel Linux Wireless <linuxwifi@intel.com>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 * BSD LICENSE
27 *
28 * Copyright(c) 2020 Intel Corporation
29 * All rights reserved.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 *
35 * * Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * * Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in
39 * the documentation and/or other materials provided with the
40 * distribution.
41 * * Neither the name Intel Corporation nor the names of its
42 * contributors may be used to endorse or promote products derived
43 * from this software without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
46 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
47 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
48 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
49 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
50 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
51 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
52 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
53 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
54 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
55 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 *
57 *****************************************************************************/
58 #ifndef __iwl_trans_queue_tx_h__
59 #define __iwl_trans_queue_tx_h__
60 #include "iwl-fh.h"
61 #include "fw/api/tx.h"
62
63 struct iwl_tso_hdr_page {
64 struct page *page;
65 u8 *pos;
66 };
67
68 static inline dma_addr_t
iwl_txq_get_first_tb_dma(struct iwl_txq * txq,int idx)69 iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
70 {
71 return txq->first_tb_dma +
72 sizeof(struct iwl_pcie_first_tb_buf) * idx;
73 }
74
iwl_txq_get_cmd_index(const struct iwl_txq * q,u32 index)75 static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
76 {
77 return index & (q->n_window - 1);
78 }
79
80 void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id);
81
iwl_wake_queue(struct iwl_trans * trans,struct iwl_txq * txq)82 static inline void iwl_wake_queue(struct iwl_trans *trans,
83 struct iwl_txq *txq)
84 {
85 if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
86 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
87 iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
88 }
89 }
90
iwl_txq_get_tfd(struct iwl_trans * trans,struct iwl_txq * txq,int idx)91 static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
92 struct iwl_txq *txq, int idx)
93 {
94 if (trans->trans_cfg->use_tfh)
95 idx = iwl_txq_get_cmd_index(txq, idx);
96
97 return txq->tfds + trans->txqs.tfd.size * idx;
98 }
99
100 int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
101 bool cmd_queue);
102 /*
103 * We need this inline in case dma_addr_t is only 32-bits - since the
104 * hardware is always 64-bit, the issue can still occur in that case,
105 * so use u64 for 'phys' here to force the addition in 64-bit.
106 */
iwl_txq_crosses_4g_boundary(u64 phys,u16 len)107 static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
108 {
109 return upper_32_bits(phys) != upper_32_bits(phys + len);
110 }
111
112 int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
113
iwl_txq_stop(struct iwl_trans * trans,struct iwl_txq * txq)114 static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
115 {
116 if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
117 iwl_op_mode_queue_full(trans->op_mode, txq->id);
118 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
119 } else {
120 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
121 txq->id);
122 }
123 }
124
125 /**
126 * iwl_txq_inc_wrap - increment queue index, wrap back to beginning
127 * @index -- current index
128 */
iwl_txq_inc_wrap(struct iwl_trans * trans,int index)129 static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
130 {
131 return ++index &
132 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
133 }
134
135 /**
136 * iwl_txq_dec_wrap - decrement queue index, wrap back to end
137 * @index -- current index
138 */
iwl_txq_dec_wrap(struct iwl_trans * trans,int index)139 static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
140 {
141 return --index &
142 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
143 }
144
iwl_txq_used(const struct iwl_txq * q,int i)145 static inline bool iwl_txq_used(const struct iwl_txq *q, int i)
146 {
147 int index = iwl_txq_get_cmd_index(q, i);
148 int r = iwl_txq_get_cmd_index(q, q->read_ptr);
149 int w = iwl_txq_get_cmd_index(q, q->write_ptr);
150
151 return w >= r ?
152 (index >= r && index < w) :
153 !(index < r && index >= w);
154 }
155
156 void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb);
157
158 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
159
160 int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
161 struct iwl_tfh_tfd *tfd, dma_addr_t addr,
162 u16 len);
163
164 void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
165 struct iwl_cmd_meta *meta,
166 struct iwl_tfh_tfd *tfd);
167
168 int iwl_txq_dyn_alloc(struct iwl_trans *trans,
169 __le16 flags, u8 sta_id, u8 tid,
170 int cmd_id, int size,
171 unsigned int timeout);
172
173 int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
174 struct iwl_device_tx_cmd *dev_cmd, int txq_id);
175
176 void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
177 void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
178 void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
179 void iwl_txq_gen2_tx_stop(struct iwl_trans *trans);
180 void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
181 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
182 bool cmd_queue);
183 int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size);
184 #ifdef CONFIG_INET
185 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
186 struct sk_buff *skb);
187 #endif
iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans * trans,void * _tfd)188 static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
189 void *_tfd)
190 {
191 struct iwl_tfd *tfd;
192
193 if (trans->trans_cfg->use_tfh) {
194 struct iwl_tfh_tfd *tfd = _tfd;
195
196 return le16_to_cpu(tfd->num_tbs) & 0x1f;
197 }
198
199 tfd = (struct iwl_tfd *)_tfd;
200 return tfd->num_tbs & 0x1f;
201 }
202
iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans * trans,void * _tfd,u8 idx)203 static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
204 void *_tfd, u8 idx)
205 {
206 struct iwl_tfd *tfd;
207 struct iwl_tfd_tb *tb;
208
209 if (trans->trans_cfg->use_tfh) {
210 struct iwl_tfh_tfd *tfd = _tfd;
211 struct iwl_tfh_tb *tb = &tfd->tbs[idx];
212
213 return le16_to_cpu(tb->tb_len);
214 }
215
216 tfd = (struct iwl_tfd *)_tfd;
217 tb = &tfd->tbs[idx];
218
219 return le16_to_cpu(tb->hi_n_len) >> 4;
220 }
221
222 void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
223 struct iwl_cmd_meta *meta,
224 struct iwl_txq *txq, int index);
225 void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
226 struct iwl_txq *txq);
227 void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
228 struct iwl_txq *txq, u16 byte_cnt,
229 int num_tbs);
230 #endif /* __iwl_trans_queue_tx_h__ */
231