1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
4 */
5
6 #ifndef ENA_ETH_COM_H_
7 #define ENA_ETH_COM_H_
8
9 #include "ena_com.h"
10
11 /* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
12 #define ENA_COMP_HEAD_THRESH 4
13 /* we allow 2 DMA descriptors per LLQ entry */
14 #define ENA_LLQ_ENTRY_DESC_CHUNK_SIZE (2 * sizeof(struct ena_eth_io_tx_desc))
15 #define ENA_LLQ_HEADER (128UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
16 #define ENA_LLQ_LARGE_HEADER (256UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
17
18 struct ena_com_tx_ctx {
19 struct ena_com_tx_meta ena_meta;
20 struct ena_com_buf *ena_bufs;
21 /* For LLQ, header buffer - pushed to the device mem space */
22 void *push_header;
23
24 enum ena_eth_io_l3_proto_index l3_proto;
25 enum ena_eth_io_l4_proto_index l4_proto;
26 u16 num_bufs;
27 u16 req_id;
28 /* For regular queue, indicate the size of the header
29 * For LLQ, indicate the size of the pushed buffer
30 */
31 u16 header_len;
32
33 u8 meta_valid;
34 u8 tso_enable;
35 u8 l3_csum_enable;
36 u8 l4_csum_enable;
37 u8 l4_csum_partial;
38 u8 df; /* Don't fragment */
39 };
40
41 struct ena_com_rx_ctx {
42 struct ena_com_rx_buf_info *ena_bufs;
43 enum ena_eth_io_l3_proto_index l3_proto;
44 enum ena_eth_io_l4_proto_index l4_proto;
45 bool l3_csum_err;
46 bool l4_csum_err;
47 u8 l4_csum_checked;
48 /* fragmented packet */
49 bool frag;
50 u32 hash;
51 u16 descs;
52 int max_bufs;
53 u8 pkt_offset;
54 };
55
56 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
57 struct ena_com_tx_ctx *ena_tx_ctx,
58 int *nb_hw_desc);
59
60 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
61 struct ena_com_io_sq *io_sq,
62 struct ena_com_rx_ctx *ena_rx_ctx);
63
64 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
65 struct ena_com_buf *ena_buf,
66 u16 req_id);
67
68 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
69
ena_com_unmask_intr(struct ena_com_io_cq * io_cq,struct ena_eth_io_intr_reg * intr_reg)70 static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
71 struct ena_eth_io_intr_reg *intr_reg)
72 {
73 writel(intr_reg->intr_control, io_cq->unmask_reg);
74 }
75
ena_com_free_q_entries(struct ena_com_io_sq * io_sq)76 static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq)
77 {
78 u16 tail, next_to_comp, cnt;
79
80 next_to_comp = io_sq->next_to_comp;
81 tail = io_sq->tail;
82 cnt = tail - next_to_comp;
83
84 return io_sq->q_depth - 1 - cnt;
85 }
86
87 /* Check if the submission queue has enough space to hold required_buffers */
ena_com_sq_have_enough_space(struct ena_com_io_sq * io_sq,u16 required_buffers)88 static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
89 u16 required_buffers)
90 {
91 int temp;
92
93 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
94 return ena_com_free_q_entries(io_sq) >= required_buffers;
95
96 /* This calculation doesn't need to be 100% accurate. So to reduce
97 * the calculation overhead just Subtract 2 lines from the free descs
98 * (one for the header line and one to compensate the devision
99 * down calculation.
100 */
101 temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
102
103 return ena_com_free_q_entries(io_sq) > temp;
104 }
105
ena_com_meta_desc_changed(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx)106 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
107 struct ena_com_tx_ctx *ena_tx_ctx)
108 {
109 if (!ena_tx_ctx->meta_valid)
110 return false;
111
112 return !!memcmp(&io_sq->cached_tx_meta,
113 &ena_tx_ctx->ena_meta,
114 sizeof(struct ena_com_tx_meta));
115 }
116
is_llq_max_tx_burst_exists(struct ena_com_io_sq * io_sq)117 static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq)
118 {
119 return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) &&
120 io_sq->llq_info.max_entries_in_tx_burst > 0;
121 }
122
ena_com_is_doorbell_needed(struct ena_com_io_sq * io_sq,struct ena_com_tx_ctx * ena_tx_ctx)123 static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
124 struct ena_com_tx_ctx *ena_tx_ctx)
125 {
126 struct ena_com_llq_info *llq_info;
127 int descs_after_first_entry;
128 int num_entries_needed = 1;
129 u16 num_descs;
130
131 if (!is_llq_max_tx_burst_exists(io_sq))
132 return false;
133
134 llq_info = &io_sq->llq_info;
135 num_descs = ena_tx_ctx->num_bufs;
136
137 if (llq_info->disable_meta_caching ||
138 unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))
139 ++num_descs;
140
141 if (num_descs > llq_info->descs_num_before_header) {
142 descs_after_first_entry = num_descs - llq_info->descs_num_before_header;
143 num_entries_needed += DIV_ROUND_UP(descs_after_first_entry,
144 llq_info->descs_per_entry);
145 }
146
147 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
148 "Queue: %d num_descs: %d num_entries_needed: %d\n",
149 io_sq->qid, num_descs, num_entries_needed);
150
151 return num_entries_needed > io_sq->entries_in_tx_burst_left;
152 }
153
ena_com_write_sq_doorbell(struct ena_com_io_sq * io_sq)154 static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
155 {
156 u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
157 u16 tail = io_sq->tail;
158
159 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
160 "Write submission queue doorbell for queue: %d tail: %d\n",
161 io_sq->qid, tail);
162
163 writel(tail, io_sq->db_addr);
164
165 if (is_llq_max_tx_burst_exists(io_sq)) {
166 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
167 "Reset available entries in tx burst for queue %d to %d\n",
168 io_sq->qid, max_entries_in_tx_burst);
169 io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
170 }
171
172 return 0;
173 }
174
ena_com_update_dev_comp_head(struct ena_com_io_cq * io_cq)175 static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
176 {
177 u16 unreported_comp, head;
178 bool need_update;
179
180 if (unlikely(io_cq->cq_head_db_reg)) {
181 head = io_cq->head;
182 unreported_comp = head - io_cq->last_head_update;
183 need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
184
185 if (unlikely(need_update)) {
186 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
187 "Write completion queue doorbell for queue %d: head: %d\n",
188 io_cq->qid, head);
189 writel(head, io_cq->cq_head_db_reg);
190 io_cq->last_head_update = head;
191 }
192 }
193
194 return 0;
195 }
196
ena_com_update_numa_node(struct ena_com_io_cq * io_cq,u8 numa_node)197 static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
198 u8 numa_node)
199 {
200 struct ena_eth_io_numa_node_cfg_reg numa_cfg;
201
202 if (!io_cq->numa_node_cfg_reg)
203 return;
204
205 numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)
206 | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
207
208 writel(numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
209 }
210
ena_com_comp_ack(struct ena_com_io_sq * io_sq,u16 elem)211 static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
212 {
213 io_sq->next_to_comp += elem;
214 }
215
ena_com_cq_inc_head(struct ena_com_io_cq * io_cq)216 static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
217 {
218 io_cq->head++;
219
220 /* Switch phase bit in case of wrap around */
221 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
222 io_cq->phase ^= 1;
223 }
224
ena_com_tx_comp_req_id_get(struct ena_com_io_cq * io_cq,u16 * req_id)225 static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
226 u16 *req_id)
227 {
228 u8 expected_phase, cdesc_phase;
229 struct ena_eth_io_tx_cdesc *cdesc;
230 u16 masked_head;
231
232 masked_head = io_cq->head & (io_cq->q_depth - 1);
233 expected_phase = io_cq->phase;
234
235 cdesc = (struct ena_eth_io_tx_cdesc *)
236 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
237 (masked_head * io_cq->cdesc_entry_size_in_bytes));
238
239 /* When the current completion descriptor phase isn't the same as the
240 * expected, it mean that the device still didn't update
241 * this completion.
242 */
243 cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
244 if (cdesc_phase != expected_phase)
245 return -EAGAIN;
246
247 dma_rmb();
248
249 *req_id = READ_ONCE(cdesc->req_id);
250 if (unlikely(*req_id >= io_cq->q_depth)) {
251 netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
252 "Invalid req id %d\n", cdesc->req_id);
253 return -EINVAL;
254 }
255
256 ena_com_cq_inc_head(io_cq);
257
258 return 0;
259 }
260
261 #endif /* ENA_ETH_COM_H_ */
262