1 /*
2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #ifndef ENA_ETH_COM_H_
34 #define ENA_ETH_COM_H_
35
36 #include "ena_com.h"
37
38 /* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
39 #define ENA_COMP_HEAD_THRESH 4
40
41 struct ena_com_tx_ctx {
42 struct ena_com_tx_meta ena_meta;
43 struct ena_com_buf *ena_bufs;
44 /* For LLQ, header buffer - pushed to the device mem space */
45 void *push_header;
46
47 enum ena_eth_io_l3_proto_index l3_proto;
48 enum ena_eth_io_l4_proto_index l4_proto;
49 u16 num_bufs;
50 u16 req_id;
51 /* For regular queue, indicate the size of the header
52 * For LLQ, indicate the size of the pushed buffer
53 */
54 u16 header_len;
55
56 u8 meta_valid;
57 u8 tso_enable;
58 u8 l3_csum_enable;
59 u8 l4_csum_enable;
60 u8 l4_csum_partial;
61 u8 df; /* Don't fragment */
62 };
63
64 struct ena_com_rx_ctx {
65 struct ena_com_rx_buf_info *ena_bufs;
66 enum ena_eth_io_l3_proto_index l3_proto;
67 enum ena_eth_io_l4_proto_index l4_proto;
68 bool l3_csum_err;
69 bool l4_csum_err;
70 /* fragmented packet */
71 bool frag;
72 u32 hash;
73 u16 descs;
74 int max_bufs;
75 };
76
77 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
78 struct ena_com_tx_ctx *ena_tx_ctx,
79 int *nb_hw_desc);
80
81 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
82 struct ena_com_io_sq *io_sq,
83 struct ena_com_rx_ctx *ena_rx_ctx);
84
85 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
86 struct ena_com_buf *ena_buf,
87 u16 req_id);
88
89 int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
90
91 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
92
ena_com_unmask_intr(struct ena_com_io_cq * io_cq,struct ena_eth_io_intr_reg * intr_reg)93 static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
94 struct ena_eth_io_intr_reg *intr_reg)
95 {
96 writel(intr_reg->intr_control, io_cq->unmask_reg);
97 }
98
ena_com_sq_empty_space(struct ena_com_io_sq * io_sq)99 static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
100 {
101 u16 tail, next_to_comp, cnt;
102
103 next_to_comp = io_sq->next_to_comp;
104 tail = io_sq->tail;
105 cnt = tail - next_to_comp;
106
107 return io_sq->q_depth - 1 - cnt;
108 }
109
ena_com_write_sq_doorbell(struct ena_com_io_sq * io_sq)110 static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
111 {
112 u16 tail;
113
114 tail = io_sq->tail;
115
116 pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
117 io_sq->qid, tail);
118
119 writel(tail, io_sq->db_addr);
120
121 return 0;
122 }
123
ena_com_update_dev_comp_head(struct ena_com_io_cq * io_cq)124 static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
125 {
126 u16 unreported_comp, head;
127 bool need_update;
128
129 head = io_cq->head;
130 unreported_comp = head - io_cq->last_head_update;
131 need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
132
133 if (io_cq->cq_head_db_reg && need_update) {
134 pr_debug("Write completion queue doorbell for queue %d: head: %d\n",
135 io_cq->qid, head);
136 writel(head, io_cq->cq_head_db_reg);
137 io_cq->last_head_update = head;
138 }
139
140 return 0;
141 }
142
ena_com_update_numa_node(struct ena_com_io_cq * io_cq,u8 numa_node)143 static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
144 u8 numa_node)
145 {
146 struct ena_eth_io_numa_node_cfg_reg numa_cfg;
147
148 if (!io_cq->numa_node_cfg_reg)
149 return;
150
151 numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)
152 | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
153
154 writel(numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
155 }
156
ena_com_comp_ack(struct ena_com_io_sq * io_sq,u16 elem)157 static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
158 {
159 io_sq->next_to_comp += elem;
160 }
161
162 #endif /* ENA_ETH_COM_H_ */
163