1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * RSS and Classifier helpers for Marvell PPv2 Network Controller
4 *
5 * Copyright (C) 2014 Marvell
6 *
7 * Marcin Wojtas <mw@semihalf.com>
8 */
9
10 #include "mvpp2.h"
11 #include "mvpp2_cls.h"
12 #include "mvpp2_prs.h"
13
14 #define MVPP2_DEF_FLOW(_type, _id, _opts, _ri, _ri_mask) \
15 { \
16 .flow_type = _type, \
17 .flow_id = _id, \
18 .supported_hash_opts = _opts, \
19 .prs_ri = { \
20 .ri = _ri, \
21 .ri_mask = _ri_mask \
22 } \
23 }
24
25 static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = {
26 /* TCP over IPv4 flows, Not fragmented, no vlan tag */
27 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
28 MVPP22_CLS_HEK_IP4_5T,
29 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
30 MVPP2_PRS_RI_L4_TCP,
31 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
32
33 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
34 MVPP22_CLS_HEK_IP4_5T,
35 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
36 MVPP2_PRS_RI_L4_TCP,
37 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
38
39 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
40 MVPP22_CLS_HEK_IP4_5T,
41 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
42 MVPP2_PRS_RI_L4_TCP,
43 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
44
45 /* TCP over IPv4 flows, Not fragmented, with vlan tag */
46 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
47 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
48 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
49 MVPP2_PRS_IP_MASK),
50
51 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
52 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
53 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
54 MVPP2_PRS_IP_MASK),
55
56 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
57 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
58 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
59 MVPP2_PRS_IP_MASK),
60
61 /* TCP over IPv4 flows, fragmented, no vlan tag */
62 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
63 MVPP22_CLS_HEK_IP4_2T,
64 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
65 MVPP2_PRS_RI_L4_TCP,
66 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
67
68 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
69 MVPP22_CLS_HEK_IP4_2T,
70 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
71 MVPP2_PRS_RI_L4_TCP,
72 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
73
74 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
75 MVPP22_CLS_HEK_IP4_2T,
76 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
77 MVPP2_PRS_RI_L4_TCP,
78 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
79
80 /* TCP over IPv4 flows, fragmented, with vlan tag */
81 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
82 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
83 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
84 MVPP2_PRS_IP_MASK),
85
86 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
87 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
88 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
89 MVPP2_PRS_IP_MASK),
90
91 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
92 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
93 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
94 MVPP2_PRS_IP_MASK),
95
96 /* UDP over IPv4 flows, Not fragmented, no vlan tag */
97 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
98 MVPP22_CLS_HEK_IP4_5T,
99 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
100 MVPP2_PRS_RI_L4_UDP,
101 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
102
103 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
104 MVPP22_CLS_HEK_IP4_5T,
105 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
106 MVPP2_PRS_RI_L4_UDP,
107 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
108
109 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
110 MVPP22_CLS_HEK_IP4_5T,
111 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
112 MVPP2_PRS_RI_L4_UDP,
113 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
114
115 /* UDP over IPv4 flows, Not fragmented, with vlan tag */
116 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
117 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
118 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
119 MVPP2_PRS_IP_MASK),
120
121 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
122 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
123 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
124 MVPP2_PRS_IP_MASK),
125
126 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
127 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
128 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
129 MVPP2_PRS_IP_MASK),
130
131 /* UDP over IPv4 flows, fragmented, no vlan tag */
132 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
133 MVPP22_CLS_HEK_IP4_2T,
134 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
135 MVPP2_PRS_RI_L4_UDP,
136 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
137
138 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
139 MVPP22_CLS_HEK_IP4_2T,
140 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
141 MVPP2_PRS_RI_L4_UDP,
142 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
143
144 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
145 MVPP22_CLS_HEK_IP4_2T,
146 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
147 MVPP2_PRS_RI_L4_UDP,
148 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
149
150 /* UDP over IPv4 flows, fragmented, with vlan tag */
151 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
152 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
153 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
154 MVPP2_PRS_IP_MASK),
155
156 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
157 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
158 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
159 MVPP2_PRS_IP_MASK),
160
161 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
162 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
163 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
164 MVPP2_PRS_IP_MASK),
165
166 /* TCP over IPv6 flows, not fragmented, no vlan tag */
167 MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
168 MVPP22_CLS_HEK_IP6_5T,
169 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
170 MVPP2_PRS_RI_L4_TCP,
171 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
172
173 MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
174 MVPP22_CLS_HEK_IP6_5T,
175 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
176 MVPP2_PRS_RI_L4_TCP,
177 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
178
179 /* TCP over IPv6 flows, not fragmented, with vlan tag */
180 MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
181 MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
182 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
183 MVPP2_PRS_IP_MASK),
184
185 MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
186 MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
187 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP,
188 MVPP2_PRS_IP_MASK),
189
190 /* TCP over IPv6 flows, fragmented, no vlan tag */
191 MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
192 MVPP22_CLS_HEK_IP6_2T,
193 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
194 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
195 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
196
197 MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
198 MVPP22_CLS_HEK_IP6_2T,
199 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
200 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
201 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
202
203 /* TCP over IPv6 flows, fragmented, with vlan tag */
204 MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
205 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
206 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
207 MVPP2_PRS_RI_L4_TCP,
208 MVPP2_PRS_IP_MASK),
209
210 MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
211 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
212 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
213 MVPP2_PRS_RI_L4_TCP,
214 MVPP2_PRS_IP_MASK),
215
216 /* UDP over IPv6 flows, not fragmented, no vlan tag */
217 MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
218 MVPP22_CLS_HEK_IP6_5T,
219 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
220 MVPP2_PRS_RI_L4_UDP,
221 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
222
223 MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
224 MVPP22_CLS_HEK_IP6_5T,
225 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
226 MVPP2_PRS_RI_L4_UDP,
227 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
228
229 /* UDP over IPv6 flows, not fragmented, with vlan tag */
230 MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
231 MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
232 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
233 MVPP2_PRS_IP_MASK),
234
235 MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
236 MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
237 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP,
238 MVPP2_PRS_IP_MASK),
239
240 /* UDP over IPv6 flows, fragmented, no vlan tag */
241 MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
242 MVPP22_CLS_HEK_IP6_2T,
243 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
244 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
245 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
246
247 MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
248 MVPP22_CLS_HEK_IP6_2T,
249 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
250 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
251 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
252
253 /* UDP over IPv6 flows, fragmented, with vlan tag */
254 MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
255 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
256 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
257 MVPP2_PRS_RI_L4_UDP,
258 MVPP2_PRS_IP_MASK),
259
260 MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
261 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
262 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
263 MVPP2_PRS_RI_L4_UDP,
264 MVPP2_PRS_IP_MASK),
265
266 /* IPv4 flows, no vlan tag */
267 MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
268 MVPP22_CLS_HEK_IP4_2T,
269 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4,
270 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
271 MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
272 MVPP22_CLS_HEK_IP4_2T,
273 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT,
274 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
275 MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
276 MVPP22_CLS_HEK_IP4_2T,
277 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER,
278 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
279
280 /* IPv4 flows, with vlan tag */
281 MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
282 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
283 MVPP2_PRS_RI_L3_IP4,
284 MVPP2_PRS_RI_L3_PROTO_MASK),
285 MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
286 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
287 MVPP2_PRS_RI_L3_IP4_OPT,
288 MVPP2_PRS_RI_L3_PROTO_MASK),
289 MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
290 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
291 MVPP2_PRS_RI_L3_IP4_OTHER,
292 MVPP2_PRS_RI_L3_PROTO_MASK),
293
294 /* IPv6 flows, no vlan tag */
295 MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
296 MVPP22_CLS_HEK_IP6_2T,
297 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
298 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
299 MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
300 MVPP22_CLS_HEK_IP6_2T,
301 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
302 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
303
304 /* IPv6 flows, with vlan tag */
305 MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
306 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
307 MVPP2_PRS_RI_L3_IP6,
308 MVPP2_PRS_RI_L3_PROTO_MASK),
309 MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
310 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
311 MVPP2_PRS_RI_L3_IP6,
312 MVPP2_PRS_RI_L3_PROTO_MASK),
313
314 /* Non IP flow, no vlan tag */
315 MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_UNTAG,
316 0,
317 MVPP2_PRS_RI_VLAN_NONE,
318 MVPP2_PRS_RI_VLAN_MASK),
319 /* Non IP flow, with vlan tag */
320 MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_TAG,
321 MVPP22_CLS_HEK_OPT_VLAN,
322 0, 0),
323 };
324
mvpp2_cls_flow_hits(struct mvpp2 * priv,int index)325 u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index)
326 {
327 mvpp2_write(priv, MVPP2_CTRS_IDX, index);
328
329 return mvpp2_read(priv, MVPP2_CLS_FLOW_TBL_HIT_CTR);
330 }
331
mvpp2_cls_flow_read(struct mvpp2 * priv,int index,struct mvpp2_cls_flow_entry * fe)332 void mvpp2_cls_flow_read(struct mvpp2 *priv, int index,
333 struct mvpp2_cls_flow_entry *fe)
334 {
335 fe->index = index;
336 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, index);
337 fe->data[0] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL0_REG);
338 fe->data[1] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL1_REG);
339 fe->data[2] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL2_REG);
340 }
341
342 /* Update classification flow table registers */
mvpp2_cls_flow_write(struct mvpp2 * priv,struct mvpp2_cls_flow_entry * fe)343 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
344 struct mvpp2_cls_flow_entry *fe)
345 {
346 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
347 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
348 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
349 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
350 }
351
mvpp2_cls_lookup_hits(struct mvpp2 * priv,int index)352 u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index)
353 {
354 mvpp2_write(priv, MVPP2_CTRS_IDX, index);
355
356 return mvpp2_read(priv, MVPP2_CLS_DEC_TBL_HIT_CTR);
357 }
358
mvpp2_cls_lookup_read(struct mvpp2 * priv,int lkpid,int way,struct mvpp2_cls_lookup_entry * le)359 void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way,
360 struct mvpp2_cls_lookup_entry *le)
361 {
362 u32 val;
363
364 val = (way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | lkpid;
365 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
366 le->way = way;
367 le->lkpid = lkpid;
368 le->data = mvpp2_read(priv, MVPP2_CLS_LKP_TBL_REG);
369 }
370
371 /* Update classification lookup table register */
mvpp2_cls_lookup_write(struct mvpp2 * priv,struct mvpp2_cls_lookup_entry * le)372 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
373 struct mvpp2_cls_lookup_entry *le)
374 {
375 u32 val;
376
377 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
378 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
379 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
380 }
381
382 /* Operations on flow entry */
mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry * fe)383 static int mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry *fe)
384 {
385 return fe->data[1] & MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
386 }
387
mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry * fe,int num_of_fields)388 static void mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry *fe,
389 int num_of_fields)
390 {
391 fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
392 fe->data[1] |= MVPP2_CLS_FLOW_TBL1_N_FIELDS(num_of_fields);
393 }
394
mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry * fe,int field_index)395 static int mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry *fe,
396 int field_index)
397 {
398 return (fe->data[2] >> MVPP2_CLS_FLOW_TBL2_FLD_OFFS(field_index)) &
399 MVPP2_CLS_FLOW_TBL2_FLD_MASK;
400 }
401
mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry * fe,int field_index,int field_id)402 static void mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry *fe,
403 int field_index, int field_id)
404 {
405 fe->data[2] &= ~MVPP2_CLS_FLOW_TBL2_FLD(field_index,
406 MVPP2_CLS_FLOW_TBL2_FLD_MASK);
407 fe->data[2] |= MVPP2_CLS_FLOW_TBL2_FLD(field_index, field_id);
408 }
409
mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry * fe,int engine)410 static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry *fe,
411 int engine)
412 {
413 fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_ENG(MVPP2_CLS_FLOW_TBL0_ENG_MASK);
414 fe->data[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine);
415 }
416
mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry * fe)417 int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe)
418 {
419 return (fe->data[0] >> MVPP2_CLS_FLOW_TBL0_OFFS) &
420 MVPP2_CLS_FLOW_TBL0_ENG_MASK;
421 }
422
mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry * fe,bool from_packet)423 static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe,
424 bool from_packet)
425 {
426 if (from_packet)
427 fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
428 else
429 fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
430 }
431
mvpp2_cls_flow_seq_set(struct mvpp2_cls_flow_entry * fe,u32 seq)432 static void mvpp2_cls_flow_seq_set(struct mvpp2_cls_flow_entry *fe, u32 seq)
433 {
434 fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_SEQ(MVPP2_CLS_FLOW_TBL1_SEQ_MASK);
435 fe->data[1] |= MVPP2_CLS_FLOW_TBL1_SEQ(seq);
436 }
437
mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry * fe,bool is_last)438 static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe,
439 bool is_last)
440 {
441 fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_LAST;
442 fe->data[0] |= !!is_last;
443 }
444
mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry * fe,int prio)445 static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry *fe, int prio)
446 {
447 fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK);
448 fe->data[1] |= MVPP2_CLS_FLOW_TBL1_PRIO(prio);
449 }
450
mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry * fe,u32 port)451 static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe,
452 u32 port)
453 {
454 fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
455 }
456
457 /* Initialize the parser entry for the given flow */
mvpp2_cls_flow_prs_init(struct mvpp2 * priv,struct mvpp2_cls_flow * flow)458 static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
459 struct mvpp2_cls_flow *flow)
460 {
461 mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri,
462 flow->prs_ri.ri_mask);
463 }
464
465 /* Initialize the Lookup Id table entry for the given flow */
mvpp2_cls_flow_lkp_init(struct mvpp2 * priv,struct mvpp2_cls_flow * flow)466 static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
467 struct mvpp2_cls_flow *flow)
468 {
469 struct mvpp2_cls_lookup_entry le;
470
471 le.way = 0;
472 le.lkpid = flow->flow_id;
473
474 /* The default RxQ for this port is set in the C2 lookup */
475 le.data = 0;
476
477 /* We point on the first lookup in the sequence for the flow, that is
478 * the C2 lookup.
479 */
480 le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_FLOW_C2_ENTRY(flow->flow_id));
481
482 /* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
483 le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
484
485 mvpp2_cls_lookup_write(priv, &le);
486 }
487
488 /* Initialize the flow table entries for the given flow */
mvpp2_cls_flow_init(struct mvpp2 * priv,struct mvpp2_cls_flow * flow)489 static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow)
490 {
491 struct mvpp2_cls_flow_entry fe;
492 int i;
493
494 /* C2 lookup */
495 memset(&fe, 0, sizeof(fe));
496 fe.index = MVPP2_FLOW_C2_ENTRY(flow->flow_id);
497
498 mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2);
499 mvpp2_cls_flow_port_id_sel(&fe, true);
500 mvpp2_cls_flow_last_set(&fe, 0);
501 mvpp2_cls_flow_pri_set(&fe, 0);
502 mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_FIRST1);
503
504 /* Add all ports */
505 for (i = 0; i < MVPP2_MAX_PORTS; i++)
506 mvpp2_cls_flow_port_add(&fe, BIT(i));
507
508 mvpp2_cls_flow_write(priv, &fe);
509
510 /* C3Hx lookups */
511 for (i = 0; i < MVPP2_MAX_PORTS; i++) {
512 memset(&fe, 0, sizeof(fe));
513 fe.index = MVPP2_PORT_FLOW_HASH_ENTRY(i, flow->flow_id);
514
515 mvpp2_cls_flow_port_id_sel(&fe, true);
516 mvpp2_cls_flow_pri_set(&fe, i + 1);
517 mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_MIDDLE);
518 mvpp2_cls_flow_port_add(&fe, BIT(i));
519
520 mvpp2_cls_flow_write(priv, &fe);
521 }
522
523 /* Update the last entry */
524 mvpp2_cls_flow_last_set(&fe, 1);
525 mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_LAST);
526
527 mvpp2_cls_flow_write(priv, &fe);
528 }
529
530 /* Adds a field to the Header Extracted Key generation parameters*/
mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry * fe,u32 field_id)531 static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe,
532 u32 field_id)
533 {
534 int nb_fields = mvpp2_cls_flow_hek_num_get(fe);
535
536 if (nb_fields == MVPP2_FLOW_N_FIELDS)
537 return -EINVAL;
538
539 mvpp2_cls_flow_hek_set(fe, nb_fields, field_id);
540
541 mvpp2_cls_flow_hek_num_set(fe, nb_fields + 1);
542
543 return 0;
544 }
545
mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry * fe,unsigned long hash_opts)546 static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
547 unsigned long hash_opts)
548 {
549 u32 field_id;
550 int i;
551
552 /* Clear old fields */
553 mvpp2_cls_flow_hek_num_set(fe, 0);
554 fe->data[2] = 0;
555
556 for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
557 switch (BIT(i)) {
558 case MVPP22_CLS_HEK_OPT_VLAN:
559 field_id = MVPP22_CLS_FIELD_VLAN;
560 break;
561 case MVPP22_CLS_HEK_OPT_IP4SA:
562 field_id = MVPP22_CLS_FIELD_IP4SA;
563 break;
564 case MVPP22_CLS_HEK_OPT_IP4DA:
565 field_id = MVPP22_CLS_FIELD_IP4DA;
566 break;
567 case MVPP22_CLS_HEK_OPT_IP6SA:
568 field_id = MVPP22_CLS_FIELD_IP6SA;
569 break;
570 case MVPP22_CLS_HEK_OPT_IP6DA:
571 field_id = MVPP22_CLS_FIELD_IP6DA;
572 break;
573 case MVPP22_CLS_HEK_OPT_L4SIP:
574 field_id = MVPP22_CLS_FIELD_L4SIP;
575 break;
576 case MVPP22_CLS_HEK_OPT_L4DIP:
577 field_id = MVPP22_CLS_FIELD_L4DIP;
578 break;
579 default:
580 return -EINVAL;
581 }
582 if (mvpp2_flow_add_hek_field(fe, field_id))
583 return -EINVAL;
584 }
585
586 return 0;
587 }
588
mvpp2_cls_flow_get(int flow)589 struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
590 {
591 if (flow >= MVPP2_N_FLOWS)
592 return NULL;
593
594 return &cls_flows[flow];
595 }
596
597 /* Set the hash generation options for the given traffic flow.
598 * One traffic flow (in the ethtool sense) has multiple classification flows,
599 * to handle specific cases such as fragmentation, or the presence of a
600 * VLAN / DSA Tag.
601 *
602 * Each of these individual flows has different constraints, for example we
603 * can't hash fragmented packets on L4 data (else we would risk having packet
604 * re-ordering), so each classification flows masks the options with their
605 * supported ones.
606 *
607 */
mvpp2_port_rss_hash_opts_set(struct mvpp2_port * port,int flow_type,u16 requested_opts)608 static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
609 u16 requested_opts)
610 {
611 struct mvpp2_cls_flow_entry fe;
612 struct mvpp2_cls_flow *flow;
613 int i, engine, flow_index;
614 u16 hash_opts;
615
616 for (i = 0; i < MVPP2_N_FLOWS; i++) {
617 flow = mvpp2_cls_flow_get(i);
618 if (!flow)
619 return -EINVAL;
620
621 if (flow->flow_type != flow_type)
622 continue;
623
624 flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
625 flow->flow_id);
626
627 mvpp2_cls_flow_read(port->priv, flow_index, &fe);
628
629 hash_opts = flow->supported_hash_opts & requested_opts;
630
631 /* Use C3HB engine to access L4 infos. This adds L4 infos to the
632 * hash parameters
633 */
634 if (hash_opts & MVPP22_CLS_HEK_L4_OPTS)
635 engine = MVPP22_CLS_ENGINE_C3HB;
636 else
637 engine = MVPP22_CLS_ENGINE_C3HA;
638
639 if (mvpp2_flow_set_hek_fields(&fe, hash_opts))
640 return -EINVAL;
641
642 mvpp2_cls_flow_eng_set(&fe, engine);
643
644 mvpp2_cls_flow_write(port->priv, &fe);
645 }
646
647 return 0;
648 }
649
mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry * fe)650 u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
651 {
652 u16 hash_opts = 0;
653 int n_fields, i, field;
654
655 n_fields = mvpp2_cls_flow_hek_num_get(fe);
656
657 for (i = 0; i < n_fields; i++) {
658 field = mvpp2_cls_flow_hek_get(fe, i);
659
660 switch (field) {
661 case MVPP22_CLS_FIELD_MAC_DA:
662 hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
663 break;
664 case MVPP22_CLS_FIELD_VLAN:
665 hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
666 break;
667 case MVPP22_CLS_FIELD_L3_PROTO:
668 hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
669 break;
670 case MVPP22_CLS_FIELD_IP4SA:
671 hash_opts |= MVPP22_CLS_HEK_OPT_IP4SA;
672 break;
673 case MVPP22_CLS_FIELD_IP4DA:
674 hash_opts |= MVPP22_CLS_HEK_OPT_IP4DA;
675 break;
676 case MVPP22_CLS_FIELD_IP6SA:
677 hash_opts |= MVPP22_CLS_HEK_OPT_IP6SA;
678 break;
679 case MVPP22_CLS_FIELD_IP6DA:
680 hash_opts |= MVPP22_CLS_HEK_OPT_IP6DA;
681 break;
682 case MVPP22_CLS_FIELD_L4SIP:
683 hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
684 break;
685 case MVPP22_CLS_FIELD_L4DIP:
686 hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
687 break;
688 default:
689 break;
690 }
691 }
692 return hash_opts;
693 }
694
695 /* Returns the hash opts for this flow. There are several classifier flows
696 * for one traffic flow, this returns an aggregation of all configurations.
697 */
mvpp2_port_rss_hash_opts_get(struct mvpp2_port * port,int flow_type)698 static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
699 {
700 struct mvpp2_cls_flow_entry fe;
701 struct mvpp2_cls_flow *flow;
702 int i, flow_index;
703 u16 hash_opts = 0;
704
705 for (i = 0; i < MVPP2_N_FLOWS; i++) {
706 flow = mvpp2_cls_flow_get(i);
707 if (!flow)
708 return 0;
709
710 if (flow->flow_type != flow_type)
711 continue;
712
713 flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
714 flow->flow_id);
715
716 mvpp2_cls_flow_read(port->priv, flow_index, &fe);
717
718 hash_opts |= mvpp2_flow_get_hek_fields(&fe);
719 }
720
721 return hash_opts;
722 }
723
mvpp2_cls_port_init_flows(struct mvpp2 * priv)724 static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
725 {
726 struct mvpp2_cls_flow *flow;
727 int i;
728
729 for (i = 0; i < MVPP2_N_FLOWS; i++) {
730 flow = mvpp2_cls_flow_get(i);
731 if (!flow)
732 break;
733
734 mvpp2_cls_flow_prs_init(priv, flow);
735 mvpp2_cls_flow_lkp_init(priv, flow);
736 mvpp2_cls_flow_init(priv, flow);
737 }
738 }
739
mvpp2_cls_c2_write(struct mvpp2 * priv,struct mvpp2_cls_c2_entry * c2)740 static void mvpp2_cls_c2_write(struct mvpp2 *priv,
741 struct mvpp2_cls_c2_entry *c2)
742 {
743 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
744
745 /* Write TCAM */
746 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
747 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
748 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
749 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
750 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
751
752 mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
753
754 mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
755 mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
756 mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
757 mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
758 }
759
mvpp2_cls_c2_read(struct mvpp2 * priv,int index,struct mvpp2_cls_c2_entry * c2)760 void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
761 struct mvpp2_cls_c2_entry *c2)
762 {
763 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
764
765 c2->index = index;
766
767 c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
768 c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
769 c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
770 c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
771 c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
772
773 c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
774
775 c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
776 c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
777 c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
778 c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
779 }
780
mvpp2_port_c2_cls_init(struct mvpp2_port * port)781 static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
782 {
783 struct mvpp2_cls_c2_entry c2;
784 u8 qh, ql, pmap;
785
786 memset(&c2, 0, sizeof(c2));
787
788 c2.index = MVPP22_CLS_C2_RSS_ENTRY(port->id);
789
790 pmap = BIT(port->id);
791 c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
792 c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
793
794 /* Update RSS status after matching this entry */
795 c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
796
797 /* Mark packet as "forwarded to software", needed for RSS */
798 c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
799
800 /* Configure the default rx queue : Update Queue Low and Queue High, but
801 * don't lock, since the rx queue selection might be overridden by RSS
802 */
803 c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD) |
804 MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD);
805
806 qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
807 ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
808
809 c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
810 MVPP22_CLS_C2_ATTR0_QLOW(ql);
811
812 mvpp2_cls_c2_write(port->priv, &c2);
813 }
814
815 /* Classifier default initialization */
mvpp2_cls_init(struct mvpp2 * priv)816 void mvpp2_cls_init(struct mvpp2 *priv)
817 {
818 struct mvpp2_cls_lookup_entry le;
819 struct mvpp2_cls_flow_entry fe;
820 int index;
821
822 /* Enable classifier */
823 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
824
825 /* Clear classifier flow table */
826 memset(&fe.data, 0, sizeof(fe.data));
827 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
828 fe.index = index;
829 mvpp2_cls_flow_write(priv, &fe);
830 }
831
832 /* Clear classifier lookup table */
833 le.data = 0;
834 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
835 le.lkpid = index;
836 le.way = 0;
837 mvpp2_cls_lookup_write(priv, &le);
838
839 le.way = 1;
840 mvpp2_cls_lookup_write(priv, &le);
841 }
842
843 mvpp2_cls_port_init_flows(priv);
844 }
845
mvpp2_cls_port_config(struct mvpp2_port * port)846 void mvpp2_cls_port_config(struct mvpp2_port *port)
847 {
848 struct mvpp2_cls_lookup_entry le;
849 u32 val;
850
851 /* Set way for the port */
852 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
853 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
854 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
855
856 /* Pick the entry to be accessed in lookup ID decoding table
857 * according to the way and lkpid.
858 */
859 le.lkpid = port->id;
860 le.way = 0;
861 le.data = 0;
862
863 /* Set initial CPU queue for receiving packets */
864 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
865 le.data |= port->first_rxq;
866
867 /* Disable classification engines */
868 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
869
870 /* Update lookup ID table entry */
871 mvpp2_cls_lookup_write(port->priv, &le);
872
873 mvpp2_port_c2_cls_init(port);
874 }
875
mvpp2_cls_c2_hit_count(struct mvpp2 * priv,int c2_index)876 u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index)
877 {
878 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2_index);
879
880 return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR);
881 }
882
mvpp2_rss_port_c2_enable(struct mvpp2_port * port)883 static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port)
884 {
885 struct mvpp2_cls_c2_entry c2;
886
887 mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
888
889 c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
890
891 mvpp2_cls_c2_write(port->priv, &c2);
892 }
893
mvpp2_rss_port_c2_disable(struct mvpp2_port * port)894 static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
895 {
896 struct mvpp2_cls_c2_entry c2;
897
898 mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
899
900 c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN;
901
902 mvpp2_cls_c2_write(port->priv, &c2);
903 }
904
mvpp22_rss_enable(struct mvpp2_port * port)905 void mvpp22_rss_enable(struct mvpp2_port *port)
906 {
907 mvpp2_rss_port_c2_enable(port);
908 }
909
mvpp22_rss_disable(struct mvpp2_port * port)910 void mvpp22_rss_disable(struct mvpp2_port *port)
911 {
912 mvpp2_rss_port_c2_disable(port);
913 }
914
915 /* Set CPU queue number for oversize packets */
mvpp2_cls_oversize_rxq_set(struct mvpp2_port * port)916 void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
917 {
918 u32 val;
919
920 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
921 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
922
923 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
924 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
925
926 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
927 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
928 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
929 }
930
mvpp22_rxfh_indir(struct mvpp2_port * port,u32 rxq)931 static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
932 {
933 int nrxqs, cpu, cpus = num_possible_cpus();
934
935 /* Number of RXQs per CPU */
936 nrxqs = port->nrxqs / cpus;
937
938 /* CPU that will handle this rx queue */
939 cpu = rxq / nrxqs;
940
941 if (!cpu_online(cpu))
942 return port->first_rxq;
943
944 /* Indirection to better distribute the paquets on the CPUs when
945 * configuring the RSS queues.
946 */
947 return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
948 }
949
mvpp22_rss_fill_table(struct mvpp2_port * port,u32 table)950 void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table)
951 {
952 struct mvpp2 *priv = port->priv;
953 int i;
954
955 for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
956 u32 sel = MVPP22_RSS_INDEX_TABLE(table) |
957 MVPP22_RSS_INDEX_TABLE_ENTRY(i);
958 mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
959
960 mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY,
961 mvpp22_rxfh_indir(port, port->indir[i]));
962 }
963 }
964
mvpp2_ethtool_rxfh_set(struct mvpp2_port * port,struct ethtool_rxnfc * info)965 int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
966 {
967 u16 hash_opts = 0;
968
969 switch (info->flow_type) {
970 case TCP_V4_FLOW:
971 case UDP_V4_FLOW:
972 case TCP_V6_FLOW:
973 case UDP_V6_FLOW:
974 if (info->data & RXH_L4_B_0_1)
975 hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
976 if (info->data & RXH_L4_B_2_3)
977 hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
978 /* Fallthrough */
979 case IPV4_FLOW:
980 case IPV6_FLOW:
981 if (info->data & RXH_L2DA)
982 hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
983 if (info->data & RXH_VLAN)
984 hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
985 if (info->data & RXH_L3_PROTO)
986 hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
987 if (info->data & RXH_IP_SRC)
988 hash_opts |= (MVPP22_CLS_HEK_OPT_IP4SA |
989 MVPP22_CLS_HEK_OPT_IP6SA);
990 if (info->data & RXH_IP_DST)
991 hash_opts |= (MVPP22_CLS_HEK_OPT_IP4DA |
992 MVPP22_CLS_HEK_OPT_IP6DA);
993 break;
994 default: return -EOPNOTSUPP;
995 }
996
997 return mvpp2_port_rss_hash_opts_set(port, info->flow_type, hash_opts);
998 }
999
mvpp2_ethtool_rxfh_get(struct mvpp2_port * port,struct ethtool_rxnfc * info)1000 int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
1001 {
1002 unsigned long hash_opts;
1003 int i;
1004
1005 hash_opts = mvpp2_port_rss_hash_opts_get(port, info->flow_type);
1006 info->data = 0;
1007
1008 for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
1009 switch (BIT(i)) {
1010 case MVPP22_CLS_HEK_OPT_MAC_DA:
1011 info->data |= RXH_L2DA;
1012 break;
1013 case MVPP22_CLS_HEK_OPT_VLAN:
1014 info->data |= RXH_VLAN;
1015 break;
1016 case MVPP22_CLS_HEK_OPT_L3_PROTO:
1017 info->data |= RXH_L3_PROTO;
1018 break;
1019 case MVPP22_CLS_HEK_OPT_IP4SA:
1020 case MVPP22_CLS_HEK_OPT_IP6SA:
1021 info->data |= RXH_IP_SRC;
1022 break;
1023 case MVPP22_CLS_HEK_OPT_IP4DA:
1024 case MVPP22_CLS_HEK_OPT_IP6DA:
1025 info->data |= RXH_IP_DST;
1026 break;
1027 case MVPP22_CLS_HEK_OPT_L4SIP:
1028 info->data |= RXH_L4_B_0_1;
1029 break;
1030 case MVPP22_CLS_HEK_OPT_L4DIP:
1031 info->data |= RXH_L4_B_2_3;
1032 break;
1033 default:
1034 return -EINVAL;
1035 }
1036 }
1037 return 0;
1038 }
1039
mvpp22_rss_port_init(struct mvpp2_port * port)1040 void mvpp22_rss_port_init(struct mvpp2_port *port)
1041 {
1042 struct mvpp2 *priv = port->priv;
1043 int i;
1044
1045 /* Set the table width: replace the whole classifier Rx queue number
1046 * with the ones configured in RSS table entries.
1047 */
1048 mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(port->id));
1049 mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
1050
1051 /* The default RxQ is used as a key to select the RSS table to use.
1052 * We use one RSS table per port.
1053 */
1054 mvpp2_write(priv, MVPP22_RSS_INDEX,
1055 MVPP22_RSS_INDEX_QUEUE(port->first_rxq));
1056 mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE,
1057 MVPP22_RSS_TABLE_POINTER(port->id));
1058
1059 /* Configure the first table to evenly distribute the packets across
1060 * real Rx Queues. The table entries map a hash to a port Rx Queue.
1061 */
1062 for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++)
1063 port->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
1064
1065 mvpp22_rss_fill_table(port, port->id);
1066
1067 /* Configure default flows */
1068 mvpp2_port_rss_hash_opts_set(port, IPV4_FLOW, MVPP22_CLS_HEK_IP4_2T);
1069 mvpp2_port_rss_hash_opts_set(port, IPV6_FLOW, MVPP22_CLS_HEK_IP6_2T);
1070 mvpp2_port_rss_hash_opts_set(port, TCP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
1071 mvpp2_port_rss_hash_opts_set(port, TCP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
1072 mvpp2_port_rss_hash_opts_set(port, UDP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
1073 mvpp2_port_rss_hash_opts_set(port, UDP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
1074 }
1075