1 /*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34 #include <net/ipv6.h>
35
36 #include "cxgb4.h"
37 #include "t4_regs.h"
38 #include "t4_tcb.h"
39 #include "t4_values.h"
40 #include "clip_tbl.h"
41 #include "l2t.h"
42 #include "smt.h"
43 #include "t4fw_api.h"
44 #include "cxgb4_filter.h"
45
is_field_set(u32 val,u32 mask)46 static inline bool is_field_set(u32 val, u32 mask)
47 {
48 return val || mask;
49 }
50
unsupported(u32 conf,u32 conf_mask,u32 val,u32 mask)51 static inline bool unsupported(u32 conf, u32 conf_mask, u32 val, u32 mask)
52 {
53 return !(conf & conf_mask) && is_field_set(val, mask);
54 }
55
set_tcb_field(struct adapter * adap,struct filter_entry * f,unsigned int ftid,u16 word,u64 mask,u64 val,int no_reply)56 static int set_tcb_field(struct adapter *adap, struct filter_entry *f,
57 unsigned int ftid, u16 word, u64 mask, u64 val,
58 int no_reply)
59 {
60 struct cpl_set_tcb_field *req;
61 struct sk_buff *skb;
62
63 skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
64 if (!skb)
65 return -ENOMEM;
66
67 req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req));
68 INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid);
69 req->reply_ctrl = htons(REPLY_CHAN_V(0) |
70 QUEUENO_V(adap->sge.fw_evtq.abs_id) |
71 NO_REPLY_V(no_reply));
72 req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(ftid));
73 req->mask = cpu_to_be64(mask);
74 req->val = cpu_to_be64(val);
75 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
76 t4_ofld_send(adap, skb);
77 return 0;
78 }
79
80 /* Set one of the t_flags bits in the TCB.
81 */
set_tcb_tflag(struct adapter * adap,struct filter_entry * f,unsigned int ftid,unsigned int bit_pos,unsigned int val,int no_reply)82 static int set_tcb_tflag(struct adapter *adap, struct filter_entry *f,
83 unsigned int ftid, unsigned int bit_pos,
84 unsigned int val, int no_reply)
85 {
86 return set_tcb_field(adap, f, ftid, TCB_T_FLAGS_W, 1ULL << bit_pos,
87 (unsigned long long)val << bit_pos, no_reply);
88 }
89
mk_abort_req_ulp(struct cpl_abort_req * abort_req,unsigned int tid)90 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req, unsigned int tid)
91 {
92 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
93 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
94
95 txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
96 txpkt->len = htonl(DIV_ROUND_UP(sizeof(*abort_req), 16));
97 sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
98 sc->len = htonl(sizeof(*abort_req) - sizeof(struct work_request_hdr));
99 OPCODE_TID(abort_req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
100 abort_req->rsvd0 = htonl(0);
101 abort_req->rsvd1 = 0;
102 abort_req->cmd = CPL_ABORT_NO_RST;
103 }
104
mk_abort_rpl_ulp(struct cpl_abort_rpl * abort_rpl,unsigned int tid)105 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl, unsigned int tid)
106 {
107 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
108 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
109
110 txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
111 txpkt->len = htonl(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
112 sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
113 sc->len = htonl(sizeof(*abort_rpl) - sizeof(struct work_request_hdr));
114 OPCODE_TID(abort_rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
115 abort_rpl->rsvd0 = htonl(0);
116 abort_rpl->rsvd1 = 0;
117 abort_rpl->cmd = CPL_ABORT_NO_RST;
118 }
119
mk_set_tcb_ulp(struct filter_entry * f,struct cpl_set_tcb_field * req,unsigned int word,u64 mask,u64 val,u8 cookie,int no_reply)120 static void mk_set_tcb_ulp(struct filter_entry *f,
121 struct cpl_set_tcb_field *req,
122 unsigned int word, u64 mask, u64 val,
123 u8 cookie, int no_reply)
124 {
125 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
126 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
127
128 txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
129 txpkt->len = htonl(DIV_ROUND_UP(sizeof(*req), 16));
130 sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
131 sc->len = htonl(sizeof(*req) - sizeof(struct work_request_hdr));
132 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
133 req->reply_ctrl = htons(NO_REPLY_V(no_reply) | REPLY_CHAN_V(0) |
134 QUEUENO_V(0));
135 req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(cookie));
136 req->mask = cpu_to_be64(mask);
137 req->val = cpu_to_be64(val);
138 sc = (struct ulptx_idata *)(req + 1);
139 sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
140 sc->len = htonl(0);
141 }
142
configure_filter_smac(struct adapter * adap,struct filter_entry * f)143 static int configure_filter_smac(struct adapter *adap, struct filter_entry *f)
144 {
145 int err;
146
147 /* do a set-tcb for smac-sel and CWR bit.. */
148 err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
149 if (err)
150 goto smac_err;
151
152 err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W,
153 TCB_SMAC_SEL_V(TCB_SMAC_SEL_M),
154 TCB_SMAC_SEL_V(f->smt->idx), 1);
155 if (!err)
156 return 0;
157
158 smac_err:
159 dev_err(adap->pdev_dev, "filter %u smac config failed with error %u\n",
160 f->tid, err);
161 return err;
162 }
163
set_nat_params(struct adapter * adap,struct filter_entry * f,unsigned int tid,bool dip,bool sip,bool dp,bool sp)164 static void set_nat_params(struct adapter *adap, struct filter_entry *f,
165 unsigned int tid, bool dip, bool sip, bool dp,
166 bool sp)
167 {
168 if (dip) {
169 if (f->fs.type) {
170 set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W,
171 WORD_MASK, f->fs.nat_lip[15] |
172 f->fs.nat_lip[14] << 8 |
173 f->fs.nat_lip[13] << 16 |
174 f->fs.nat_lip[12] << 24, 1);
175
176 set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 1,
177 WORD_MASK, f->fs.nat_lip[11] |
178 f->fs.nat_lip[10] << 8 |
179 f->fs.nat_lip[9] << 16 |
180 f->fs.nat_lip[8] << 24, 1);
181
182 set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 2,
183 WORD_MASK, f->fs.nat_lip[7] |
184 f->fs.nat_lip[6] << 8 |
185 f->fs.nat_lip[5] << 16 |
186 f->fs.nat_lip[4] << 24, 1);
187
188 set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 3,
189 WORD_MASK, f->fs.nat_lip[3] |
190 f->fs.nat_lip[2] << 8 |
191 f->fs.nat_lip[1] << 16 |
192 f->fs.nat_lip[0] << 24, 1);
193 } else {
194 set_tcb_field(adap, f, tid, TCB_RX_FRAG3_LEN_RAW_W,
195 WORD_MASK, f->fs.nat_lip[3] |
196 f->fs.nat_lip[2] << 8 |
197 f->fs.nat_lip[1] << 16 |
198 f->fs.nat_lip[0] << 24, 1);
199 }
200 }
201
202 if (sip) {
203 if (f->fs.type) {
204 set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W,
205 WORD_MASK, f->fs.nat_fip[15] |
206 f->fs.nat_fip[14] << 8 |
207 f->fs.nat_fip[13] << 16 |
208 f->fs.nat_fip[12] << 24, 1);
209
210 set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 1,
211 WORD_MASK, f->fs.nat_fip[11] |
212 f->fs.nat_fip[10] << 8 |
213 f->fs.nat_fip[9] << 16 |
214 f->fs.nat_fip[8] << 24, 1);
215
216 set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 2,
217 WORD_MASK, f->fs.nat_fip[7] |
218 f->fs.nat_fip[6] << 8 |
219 f->fs.nat_fip[5] << 16 |
220 f->fs.nat_fip[4] << 24, 1);
221
222 set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 3,
223 WORD_MASK, f->fs.nat_fip[3] |
224 f->fs.nat_fip[2] << 8 |
225 f->fs.nat_fip[1] << 16 |
226 f->fs.nat_fip[0] << 24, 1);
227
228 } else {
229 set_tcb_field(adap, f, tid,
230 TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W,
231 WORD_MASK, f->fs.nat_fip[3] |
232 f->fs.nat_fip[2] << 8 |
233 f->fs.nat_fip[1] << 16 |
234 f->fs.nat_fip[0] << 24, 1);
235 }
236 }
237
238 set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
239 (dp ? f->fs.nat_lport : 0) |
240 (sp ? f->fs.nat_fport << 16 : 0), 1);
241 }
242
243 /* Validate filter spec against configuration done on the card. */
validate_filter(struct net_device * dev,struct ch_filter_specification * fs)244 static int validate_filter(struct net_device *dev,
245 struct ch_filter_specification *fs)
246 {
247 struct adapter *adapter = netdev2adap(dev);
248 u32 fconf, iconf;
249
250 /* Check for unconfigured fields being used. */
251 fconf = adapter->params.tp.vlan_pri_map;
252 iconf = adapter->params.tp.ingress_config;
253
254 if (unsupported(fconf, FCOE_F, fs->val.fcoe, fs->mask.fcoe) ||
255 unsupported(fconf, PORT_F, fs->val.iport, fs->mask.iport) ||
256 unsupported(fconf, TOS_F, fs->val.tos, fs->mask.tos) ||
257 unsupported(fconf, ETHERTYPE_F, fs->val.ethtype,
258 fs->mask.ethtype) ||
259 unsupported(fconf, MACMATCH_F, fs->val.macidx, fs->mask.macidx) ||
260 unsupported(fconf, MPSHITTYPE_F, fs->val.matchtype,
261 fs->mask.matchtype) ||
262 unsupported(fconf, FRAGMENTATION_F, fs->val.frag, fs->mask.frag) ||
263 unsupported(fconf, PROTOCOL_F, fs->val.proto, fs->mask.proto) ||
264 unsupported(fconf, VNIC_ID_F, fs->val.pfvf_vld,
265 fs->mask.pfvf_vld) ||
266 unsupported(fconf, VNIC_ID_F, fs->val.ovlan_vld,
267 fs->mask.ovlan_vld) ||
268 unsupported(fconf, VNIC_ID_F, fs->val.encap_vld,
269 fs->mask.encap_vld) ||
270 unsupported(fconf, VLAN_F, fs->val.ivlan_vld, fs->mask.ivlan_vld))
271 return -EOPNOTSUPP;
272
273 /* T4 inconveniently uses the same FT_VNIC_ID_W bits for both the Outer
274 * VLAN Tag and PF/VF/VFvld fields based on VNIC_F being set
275 * in TP_INGRESS_CONFIG. Hense the somewhat crazy checks
276 * below. Additionally, since the T4 firmware interface also
277 * carries that overlap, we need to translate any PF/VF
278 * specification into that internal format below.
279 */
280 if ((is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
281 is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld)) ||
282 (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
283 is_field_set(fs->val.encap_vld, fs->mask.encap_vld)) ||
284 (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
285 is_field_set(fs->val.encap_vld, fs->mask.encap_vld)))
286 return -EOPNOTSUPP;
287 if (unsupported(iconf, VNIC_F, fs->val.pfvf_vld, fs->mask.pfvf_vld) ||
288 (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
289 (iconf & VNIC_F)))
290 return -EOPNOTSUPP;
291 if (fs->val.pf > 0x7 || fs->val.vf > 0x7f)
292 return -ERANGE;
293 fs->mask.pf &= 0x7;
294 fs->mask.vf &= 0x7f;
295
296 /* If the user is requesting that the filter action loop
297 * matching packets back out one of our ports, make sure that
298 * the egress port is in range.
299 */
300 if (fs->action == FILTER_SWITCH &&
301 fs->eport >= adapter->params.nports)
302 return -ERANGE;
303
304 /* Don't allow various trivially obvious bogus out-of-range values... */
305 if (fs->val.iport >= adapter->params.nports)
306 return -ERANGE;
307
308 /* T4 doesn't support removing VLAN Tags for loop back filters. */
309 if (is_t4(adapter->params.chip) &&
310 fs->action == FILTER_SWITCH &&
311 (fs->newvlan == VLAN_REMOVE ||
312 fs->newvlan == VLAN_REWRITE))
313 return -EOPNOTSUPP;
314
315 if (fs->val.encap_vld &&
316 CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
317 return -EOPNOTSUPP;
318 return 0;
319 }
320
get_filter_steerq(struct net_device * dev,struct ch_filter_specification * fs)321 static int get_filter_steerq(struct net_device *dev,
322 struct ch_filter_specification *fs)
323 {
324 struct adapter *adapter = netdev2adap(dev);
325 int iq;
326
327 /* If the user has requested steering matching Ingress Packets
328 * to a specific Queue Set, we need to make sure it's in range
329 * for the port and map that into the Absolute Queue ID of the
330 * Queue Set's Response Queue.
331 */
332 if (!fs->dirsteer) {
333 if (fs->iq)
334 return -EINVAL;
335 iq = 0;
336 } else {
337 struct port_info *pi = netdev_priv(dev);
338
339 /* If the iq id is greater than the number of qsets,
340 * then assume it is an absolute qid.
341 */
342 if (fs->iq < pi->nqsets)
343 iq = adapter->sge.ethrxq[pi->first_qset +
344 fs->iq].rspq.abs_id;
345 else
346 iq = fs->iq;
347 }
348
349 return iq;
350 }
351
get_filter_count(struct adapter * adapter,unsigned int fidx,u64 * pkts,u64 * bytes,bool hash)352 static int get_filter_count(struct adapter *adapter, unsigned int fidx,
353 u64 *pkts, u64 *bytes, bool hash)
354 {
355 unsigned int tcb_base, tcbaddr;
356 unsigned int word_offset;
357 struct filter_entry *f;
358 __be64 be64_byte_count;
359 int ret;
360
361 tcb_base = t4_read_reg(adapter, TP_CMM_TCB_BASE_A);
362 if (is_hashfilter(adapter) && hash) {
363 if (fidx < adapter->tids.ntids) {
364 f = adapter->tids.tid_tab[fidx];
365 if (!f)
366 return -EINVAL;
367 } else {
368 return -E2BIG;
369 }
370 } else {
371 if ((fidx != (adapter->tids.nftids +
372 adapter->tids.nsftids - 1)) &&
373 fidx >= adapter->tids.nftids)
374 return -E2BIG;
375
376 f = &adapter->tids.ftid_tab[fidx];
377 if (!f->valid)
378 return -EINVAL;
379 }
380 tcbaddr = tcb_base + f->tid * TCB_SIZE;
381
382 spin_lock(&adapter->win0_lock);
383 if (is_t4(adapter->params.chip)) {
384 __be64 be64_count;
385
386 /* T4 doesn't maintain byte counts in hw */
387 *bytes = 0;
388
389 /* Get pkts */
390 word_offset = 4;
391 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
392 tcbaddr + (word_offset * sizeof(__be32)),
393 sizeof(be64_count),
394 (__be32 *)&be64_count,
395 T4_MEMORY_READ);
396 if (ret < 0)
397 goto out;
398 *pkts = be64_to_cpu(be64_count);
399 } else {
400 __be32 be32_count;
401
402 /* Get bytes */
403 word_offset = 4;
404 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
405 tcbaddr + (word_offset * sizeof(__be32)),
406 sizeof(be64_byte_count),
407 &be64_byte_count,
408 T4_MEMORY_READ);
409 if (ret < 0)
410 goto out;
411 *bytes = be64_to_cpu(be64_byte_count);
412
413 /* Get pkts */
414 word_offset = 6;
415 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
416 tcbaddr + (word_offset * sizeof(__be32)),
417 sizeof(be32_count),
418 &be32_count,
419 T4_MEMORY_READ);
420 if (ret < 0)
421 goto out;
422 *pkts = (u64)be32_to_cpu(be32_count);
423 }
424
425 out:
426 spin_unlock(&adapter->win0_lock);
427 return ret;
428 }
429
cxgb4_get_filter_counters(struct net_device * dev,unsigned int fidx,u64 * hitcnt,u64 * bytecnt,bool hash)430 int cxgb4_get_filter_counters(struct net_device *dev, unsigned int fidx,
431 u64 *hitcnt, u64 *bytecnt, bool hash)
432 {
433 struct adapter *adapter = netdev2adap(dev);
434
435 return get_filter_count(adapter, fidx, hitcnt, bytecnt, hash);
436 }
437
cxgb4_get_free_ftid(struct net_device * dev,int family)438 int cxgb4_get_free_ftid(struct net_device *dev, int family)
439 {
440 struct adapter *adap = netdev2adap(dev);
441 struct tid_info *t = &adap->tids;
442 int ftid;
443
444 spin_lock_bh(&t->ftid_lock);
445 if (family == PF_INET) {
446 ftid = find_first_zero_bit(t->ftid_bmap, t->nftids);
447 if (ftid >= t->nftids)
448 ftid = -1;
449 } else {
450 if (is_t6(adap->params.chip)) {
451 ftid = bitmap_find_free_region(t->ftid_bmap,
452 t->nftids, 1);
453 if (ftid < 0)
454 goto out_unlock;
455
456 /* this is only a lookup, keep the found region
457 * unallocated
458 */
459 bitmap_release_region(t->ftid_bmap, ftid, 1);
460 } else {
461 ftid = bitmap_find_free_region(t->ftid_bmap,
462 t->nftids, 2);
463 if (ftid < 0)
464 goto out_unlock;
465
466 bitmap_release_region(t->ftid_bmap, ftid, 2);
467 }
468 }
469 out_unlock:
470 spin_unlock_bh(&t->ftid_lock);
471 return ftid;
472 }
473
cxgb4_set_ftid(struct tid_info * t,int fidx,int family,unsigned int chip_ver)474 static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family,
475 unsigned int chip_ver)
476 {
477 spin_lock_bh(&t->ftid_lock);
478
479 if (test_bit(fidx, t->ftid_bmap)) {
480 spin_unlock_bh(&t->ftid_lock);
481 return -EBUSY;
482 }
483
484 if (family == PF_INET) {
485 __set_bit(fidx, t->ftid_bmap);
486 } else {
487 if (chip_ver < CHELSIO_T6)
488 bitmap_allocate_region(t->ftid_bmap, fidx, 2);
489 else
490 bitmap_allocate_region(t->ftid_bmap, fidx, 1);
491 }
492
493 spin_unlock_bh(&t->ftid_lock);
494 return 0;
495 }
496
cxgb4_clear_ftid(struct tid_info * t,int fidx,int family,unsigned int chip_ver)497 static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
498 unsigned int chip_ver)
499 {
500 spin_lock_bh(&t->ftid_lock);
501 if (family == PF_INET) {
502 __clear_bit(fidx, t->ftid_bmap);
503 } else {
504 if (chip_ver < CHELSIO_T6)
505 bitmap_release_region(t->ftid_bmap, fidx, 2);
506 else
507 bitmap_release_region(t->ftid_bmap, fidx, 1);
508 }
509 spin_unlock_bh(&t->ftid_lock);
510 }
511
512 /* Delete the filter at a specified index. */
del_filter_wr(struct adapter * adapter,int fidx)513 static int del_filter_wr(struct adapter *adapter, int fidx)
514 {
515 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
516 struct fw_filter_wr *fwr;
517 struct sk_buff *skb;
518 unsigned int len;
519
520 len = sizeof(*fwr);
521
522 skb = alloc_skb(len, GFP_KERNEL);
523 if (!skb)
524 return -ENOMEM;
525
526 fwr = __skb_put(skb, len);
527 t4_mk_filtdelwr(f->tid, fwr, (adapter->flags & SHUTTING_DOWN) ? -1
528 : adapter->sge.fw_evtq.abs_id);
529
530 /* Mark the filter as "pending" and ship off the Filter Work Request.
531 * When we get the Work Request Reply we'll clear the pending status.
532 */
533 f->pending = 1;
534 t4_mgmt_tx(adapter, skb);
535 return 0;
536 }
537
538 /* Send a Work Request to write the filter at a specified index. We construct
539 * a Firmware Filter Work Request to have the work done and put the indicated
540 * filter into "pending" mode which will prevent any further actions against
541 * it till we get a reply from the firmware on the completion status of the
542 * request.
543 */
set_filter_wr(struct adapter * adapter,int fidx)544 int set_filter_wr(struct adapter *adapter, int fidx)
545 {
546 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
547 struct fw_filter2_wr *fwr;
548 struct sk_buff *skb;
549
550 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
551 if (!skb)
552 return -ENOMEM;
553
554 /* If the new filter requires loopback Destination MAC and/or VLAN
555 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
556 * the filter.
557 */
558 if (f->fs.newdmac || f->fs.newvlan) {
559 /* allocate L2T entry for new filter */
560 f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
561 f->fs.eport, f->fs.dmac);
562 if (!f->l2t) {
563 kfree_skb(skb);
564 return -ENOMEM;
565 }
566 }
567
568 /* If the new filter requires loopback Source MAC rewriting then
569 * we need to allocate a SMT entry for the filter.
570 */
571 if (f->fs.newsmac) {
572 f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac);
573 if (!f->smt) {
574 if (f->l2t) {
575 cxgb4_l2t_release(f->l2t);
576 f->l2t = NULL;
577 }
578 kfree_skb(skb);
579 return -ENOMEM;
580 }
581 }
582
583 fwr = __skb_put_zero(skb, sizeof(*fwr));
584
585 /* It would be nice to put most of the following in t4_hw.c but most
586 * of the work is translating the cxgbtool ch_filter_specification
587 * into the Work Request and the definition of that structure is
588 * currently in cxgbtool.h which isn't appropriate to pull into the
589 * common code. We may eventually try to come up with a more neutral
590 * filter specification structure but for now it's easiest to simply
591 * put this fairly direct code in line ...
592 */
593 if (adapter->params.filter2_wr_support)
594 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER2_WR));
595 else
596 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
597 fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr) / 16));
598 fwr->tid_to_iq =
599 htonl(FW_FILTER_WR_TID_V(f->tid) |
600 FW_FILTER_WR_RQTYPE_V(f->fs.type) |
601 FW_FILTER_WR_NOREPLY_V(0) |
602 FW_FILTER_WR_IQ_V(f->fs.iq));
603 fwr->del_filter_to_l2tix =
604 htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
605 FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
606 FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
607 FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
608 FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
609 FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
610 FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
611 FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
612 f->fs.newvlan == VLAN_REWRITE) |
613 FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
614 f->fs.newvlan == VLAN_REWRITE) |
615 FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
616 FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
617 FW_FILTER_WR_PRIO_V(f->fs.prio) |
618 FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
619 fwr->ethtype = htons(f->fs.val.ethtype);
620 fwr->ethtypem = htons(f->fs.mask.ethtype);
621 fwr->frag_to_ovlan_vldm =
622 (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
623 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
624 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
625 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
626 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
627 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
628 fwr->smac_sel = 0;
629 fwr->rx_chan_rx_rpl_iq =
630 htons(FW_FILTER_WR_RX_CHAN_V(0) |
631 FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
632 fwr->maci_to_matchtypem =
633 htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
634 FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
635 FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
636 FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
637 FW_FILTER_WR_PORT_V(f->fs.val.iport) |
638 FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
639 FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
640 FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
641 fwr->ptcl = f->fs.val.proto;
642 fwr->ptclm = f->fs.mask.proto;
643 fwr->ttyp = f->fs.val.tos;
644 fwr->ttypm = f->fs.mask.tos;
645 fwr->ivlan = htons(f->fs.val.ivlan);
646 fwr->ivlanm = htons(f->fs.mask.ivlan);
647 fwr->ovlan = htons(f->fs.val.ovlan);
648 fwr->ovlanm = htons(f->fs.mask.ovlan);
649 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
650 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
651 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
652 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
653 fwr->lp = htons(f->fs.val.lport);
654 fwr->lpm = htons(f->fs.mask.lport);
655 fwr->fp = htons(f->fs.val.fport);
656 fwr->fpm = htons(f->fs.mask.fport);
657
658 if (adapter->params.filter2_wr_support) {
659 fwr->natmode_to_ulp_type =
660 FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ?
661 ULP_MODE_TCPDDP :
662 ULP_MODE_NONE) |
663 FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode);
664 memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
665 memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
666 fwr->newlport = htons(f->fs.nat_lport);
667 fwr->newfport = htons(f->fs.nat_fport);
668 }
669
670 /* Mark the filter as "pending" and ship off the Filter Work Request.
671 * When we get the Work Request Reply we'll clear the pending status.
672 */
673 f->pending = 1;
674 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
675 t4_ofld_send(adapter, skb);
676 return 0;
677 }
678
679 /* Return an error number if the indicated filter isn't writable ... */
writable_filter(struct filter_entry * f)680 int writable_filter(struct filter_entry *f)
681 {
682 if (f->locked)
683 return -EPERM;
684 if (f->pending)
685 return -EBUSY;
686
687 return 0;
688 }
689
690 /* Delete the filter at the specified index (if valid). The checks for all
691 * the common problems with doing this like the filter being locked, currently
692 * pending in another operation, etc.
693 */
delete_filter(struct adapter * adapter,unsigned int fidx)694 int delete_filter(struct adapter *adapter, unsigned int fidx)
695 {
696 struct filter_entry *f;
697 int ret;
698
699 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
700 return -EINVAL;
701
702 f = &adapter->tids.ftid_tab[fidx];
703 ret = writable_filter(f);
704 if (ret)
705 return ret;
706 if (f->valid)
707 return del_filter_wr(adapter, fidx);
708
709 return 0;
710 }
711
712 /* Clear a filter and release any of its resources that we own. This also
713 * clears the filter's "pending" status.
714 */
clear_filter(struct adapter * adap,struct filter_entry * f)715 void clear_filter(struct adapter *adap, struct filter_entry *f)
716 {
717 struct port_info *pi = netdev_priv(f->dev);
718
719 /* If the new or old filter have loopback rewriteing rules then we'll
720 * need to free any existing L2T, SMT, CLIP entries of filter
721 * rule.
722 */
723 if (f->l2t)
724 cxgb4_l2t_release(f->l2t);
725
726 if (f->smt)
727 cxgb4_smt_release(f->smt);
728
729 if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
730 if (atomic_dec_and_test(&adap->mps_encap[f->fs.val.ovlan &
731 0x1ff].refcnt))
732 t4_free_encap_mac_filt(adap, pi->viid,
733 f->fs.val.ovlan & 0x1ff, 0);
734
735 if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type)
736 cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
737
738 /* The zeroing of the filter rule below clears the filter valid,
739 * pending, locked flags, l2t pointer, etc. so it's all we need for
740 * this operation.
741 */
742 memset(f, 0, sizeof(*f));
743 }
744
clear_all_filters(struct adapter * adapter)745 void clear_all_filters(struct adapter *adapter)
746 {
747 unsigned int i;
748
749 if (adapter->tids.ftid_tab) {
750 struct filter_entry *f = &adapter->tids.ftid_tab[0];
751 unsigned int max_ftid = adapter->tids.nftids +
752 adapter->tids.nsftids;
753
754 for (i = 0; i < max_ftid; i++, f++)
755 if (f->valid || f->pending)
756 clear_filter(adapter, f);
757 }
758 }
759
760 /* Fill up default masks for set match fields. */
fill_default_mask(struct ch_filter_specification * fs)761 static void fill_default_mask(struct ch_filter_specification *fs)
762 {
763 unsigned int lip = 0, lip_mask = 0;
764 unsigned int fip = 0, fip_mask = 0;
765 unsigned int i;
766
767 if (fs->val.iport && !fs->mask.iport)
768 fs->mask.iport |= ~0;
769 if (fs->val.fcoe && !fs->mask.fcoe)
770 fs->mask.fcoe |= ~0;
771 if (fs->val.matchtype && !fs->mask.matchtype)
772 fs->mask.matchtype |= ~0;
773 if (fs->val.macidx && !fs->mask.macidx)
774 fs->mask.macidx |= ~0;
775 if (fs->val.ethtype && !fs->mask.ethtype)
776 fs->mask.ethtype |= ~0;
777 if (fs->val.ivlan && !fs->mask.ivlan)
778 fs->mask.ivlan |= ~0;
779 if (fs->val.ovlan && !fs->mask.ovlan)
780 fs->mask.ovlan |= ~0;
781 if (fs->val.frag && !fs->mask.frag)
782 fs->mask.frag |= ~0;
783 if (fs->val.tos && !fs->mask.tos)
784 fs->mask.tos |= ~0;
785 if (fs->val.proto && !fs->mask.proto)
786 fs->mask.proto |= ~0;
787
788 for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) {
789 lip |= fs->val.lip[i];
790 lip_mask |= fs->mask.lip[i];
791 fip |= fs->val.fip[i];
792 fip_mask |= fs->mask.fip[i];
793 }
794
795 if (lip && !lip_mask)
796 memset(fs->mask.lip, ~0, sizeof(fs->mask.lip));
797
798 if (fip && !fip_mask)
799 memset(fs->mask.fip, ~0, sizeof(fs->mask.lip));
800
801 if (fs->val.lport && !fs->mask.lport)
802 fs->mask.lport = ~0;
803 if (fs->val.fport && !fs->mask.fport)
804 fs->mask.fport = ~0;
805 }
806
is_addr_all_mask(u8 * ipmask,int family)807 static bool is_addr_all_mask(u8 *ipmask, int family)
808 {
809 if (family == AF_INET) {
810 struct in_addr *addr;
811
812 addr = (struct in_addr *)ipmask;
813 if (addr->s_addr == 0xffffffff)
814 return true;
815 } else if (family == AF_INET6) {
816 struct in6_addr *addr6;
817
818 addr6 = (struct in6_addr *)ipmask;
819 if (addr6->s6_addr32[0] == 0xffffffff &&
820 addr6->s6_addr32[1] == 0xffffffff &&
821 addr6->s6_addr32[2] == 0xffffffff &&
822 addr6->s6_addr32[3] == 0xffffffff)
823 return true;
824 }
825 return false;
826 }
827
is_inaddr_any(u8 * ip,int family)828 static bool is_inaddr_any(u8 *ip, int family)
829 {
830 int addr_type;
831
832 if (family == AF_INET) {
833 struct in_addr *addr;
834
835 addr = (struct in_addr *)ip;
836 if (addr->s_addr == htonl(INADDR_ANY))
837 return true;
838 } else if (family == AF_INET6) {
839 struct in6_addr *addr6;
840
841 addr6 = (struct in6_addr *)ip;
842 addr_type = ipv6_addr_type((const struct in6_addr *)
843 &addr6);
844 if (addr_type == IPV6_ADDR_ANY)
845 return true;
846 }
847 return false;
848 }
849
is_filter_exact_match(struct adapter * adap,struct ch_filter_specification * fs)850 bool is_filter_exact_match(struct adapter *adap,
851 struct ch_filter_specification *fs)
852 {
853 struct tp_params *tp = &adap->params.tp;
854 u64 hash_filter_mask = tp->hash_filter_mask;
855 u64 ntuple_mask = 0;
856
857 if (!is_hashfilter(adap))
858 return false;
859
860 /* Keep tunnel VNI match disabled for hash-filters for now */
861 if (fs->mask.encap_vld)
862 return false;
863
864 if (fs->type) {
865 if (is_inaddr_any(fs->val.fip, AF_INET6) ||
866 !is_addr_all_mask(fs->mask.fip, AF_INET6))
867 return false;
868
869 if (is_inaddr_any(fs->val.lip, AF_INET6) ||
870 !is_addr_all_mask(fs->mask.lip, AF_INET6))
871 return false;
872 } else {
873 if (is_inaddr_any(fs->val.fip, AF_INET) ||
874 !is_addr_all_mask(fs->mask.fip, AF_INET))
875 return false;
876
877 if (is_inaddr_any(fs->val.lip, AF_INET) ||
878 !is_addr_all_mask(fs->mask.lip, AF_INET))
879 return false;
880 }
881
882 if (!fs->val.lport || fs->mask.lport != 0xffff)
883 return false;
884
885 if (!fs->val.fport || fs->mask.fport != 0xffff)
886 return false;
887
888 /* calculate tuple mask and compare with mask configured in hw */
889 if (tp->fcoe_shift >= 0)
890 ntuple_mask |= (u64)fs->mask.fcoe << tp->fcoe_shift;
891
892 if (tp->port_shift >= 0)
893 ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
894
895 if (tp->vnic_shift >= 0) {
896 if ((adap->params.tp.ingress_config & VNIC_F))
897 ntuple_mask |= (u64)fs->mask.pfvf_vld << tp->vnic_shift;
898 else
899 ntuple_mask |= (u64)fs->mask.ovlan_vld <<
900 tp->vnic_shift;
901 }
902
903 if (tp->vlan_shift >= 0)
904 ntuple_mask |= (u64)fs->mask.ivlan << tp->vlan_shift;
905
906 if (tp->tos_shift >= 0)
907 ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
908
909 if (tp->protocol_shift >= 0)
910 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
911
912 if (tp->ethertype_shift >= 0)
913 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
914
915 if (tp->macmatch_shift >= 0)
916 ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
917
918 if (tp->matchtype_shift >= 0)
919 ntuple_mask |= (u64)fs->mask.matchtype << tp->matchtype_shift;
920
921 if (tp->frag_shift >= 0)
922 ntuple_mask |= (u64)fs->mask.frag << tp->frag_shift;
923
924 if (ntuple_mask != hash_filter_mask)
925 return false;
926
927 return true;
928 }
929
hash_filter_ntuple(struct ch_filter_specification * fs,struct net_device * dev)930 static u64 hash_filter_ntuple(struct ch_filter_specification *fs,
931 struct net_device *dev)
932 {
933 struct adapter *adap = netdev2adap(dev);
934 struct tp_params *tp = &adap->params.tp;
935 u64 ntuple = 0;
936
937 /* Initialize each of the fields which we care about which are present
938 * in the Compressed Filter Tuple.
939 */
940 if (tp->vlan_shift >= 0 && fs->mask.ivlan)
941 ntuple |= (FT_VLAN_VLD_F | fs->val.ivlan) << tp->vlan_shift;
942
943 if (tp->port_shift >= 0 && fs->mask.iport)
944 ntuple |= (u64)fs->val.iport << tp->port_shift;
945
946 if (tp->protocol_shift >= 0) {
947 if (!fs->val.proto)
948 ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
949 else
950 ntuple |= (u64)fs->val.proto << tp->protocol_shift;
951 }
952
953 if (tp->tos_shift >= 0 && fs->mask.tos)
954 ntuple |= (u64)(fs->val.tos) << tp->tos_shift;
955
956 if (tp->vnic_shift >= 0) {
957 if ((adap->params.tp.ingress_config & USE_ENC_IDX_F) &&
958 fs->mask.encap_vld)
959 ntuple |= (u64)((fs->val.encap_vld << 16) |
960 (fs->val.ovlan)) << tp->vnic_shift;
961 else if ((adap->params.tp.ingress_config & VNIC_F) &&
962 fs->mask.pfvf_vld)
963 ntuple |= (u64)((fs->val.pfvf_vld << 16) |
964 (fs->val.pf << 13) |
965 (fs->val.vf)) << tp->vnic_shift;
966 else
967 ntuple |= (u64)((fs->val.ovlan_vld << 16) |
968 (fs->val.ovlan)) << tp->vnic_shift;
969 }
970
971 if (tp->macmatch_shift >= 0 && fs->mask.macidx)
972 ntuple |= (u64)(fs->val.macidx) << tp->macmatch_shift;
973
974 if (tp->ethertype_shift >= 0 && fs->mask.ethtype)
975 ntuple |= (u64)(fs->val.ethtype) << tp->ethertype_shift;
976
977 if (tp->matchtype_shift >= 0 && fs->mask.matchtype)
978 ntuple |= (u64)(fs->val.matchtype) << tp->matchtype_shift;
979
980 if (tp->frag_shift >= 0 && fs->mask.frag)
981 ntuple |= (u64)(fs->val.frag) << tp->frag_shift;
982
983 if (tp->fcoe_shift >= 0 && fs->mask.fcoe)
984 ntuple |= (u64)(fs->val.fcoe) << tp->fcoe_shift;
985 return ntuple;
986 }
987
mk_act_open_req6(struct filter_entry * f,struct sk_buff * skb,unsigned int qid_filterid,struct adapter * adap)988 static void mk_act_open_req6(struct filter_entry *f, struct sk_buff *skb,
989 unsigned int qid_filterid, struct adapter *adap)
990 {
991 struct cpl_t6_act_open_req6 *t6req = NULL;
992 struct cpl_act_open_req6 *req = NULL;
993
994 t6req = (struct cpl_t6_act_open_req6 *)__skb_put(skb, sizeof(*t6req));
995 INIT_TP_WR(t6req, 0);
996 req = (struct cpl_act_open_req6 *)t6req;
997 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_filterid));
998 req->local_port = cpu_to_be16(f->fs.val.lport);
999 req->peer_port = cpu_to_be16(f->fs.val.fport);
1000 req->local_ip_hi = *(__be64 *)(&f->fs.val.lip);
1001 req->local_ip_lo = *(((__be64 *)&f->fs.val.lip) + 1);
1002 req->peer_ip_hi = *(__be64 *)(&f->fs.val.fip);
1003 req->peer_ip_lo = *(((__be64 *)&f->fs.val.fip) + 1);
1004 req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
1005 f->fs.newvlan == VLAN_REWRITE) |
1006 DELACK_V(f->fs.hitcnts) |
1007 L2T_IDX_V(f->l2t ? f->l2t->idx : 0) |
1008 SMAC_SEL_V((cxgb4_port_viid(f->dev) &
1009 0x7F) << 1) |
1010 TX_CHAN_V(f->fs.eport) |
1011 NO_CONG_V(f->fs.rpttid) |
1012 ULP_MODE_V(f->fs.nat_mode ?
1013 ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1014 TCAM_BYPASS_F | NON_OFFLOAD_F);
1015 t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs,
1016 f->dev)));
1017 t6req->opt2 = htonl(RSS_QUEUE_VALID_F |
1018 RSS_QUEUE_V(f->fs.iq) |
1019 TX_QUEUE_V(f->fs.nat_mode) |
1020 T5_OPT_2_VALID_F |
1021 RX_CHANNEL_F |
1022 CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
1023 (f->fs.dirsteer << 1)) |
1024 PACE_V((f->fs.maskhash) |
1025 ((f->fs.dirsteerhash) << 1)) |
1026 CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
1027 }
1028
mk_act_open_req(struct filter_entry * f,struct sk_buff * skb,unsigned int qid_filterid,struct adapter * adap)1029 static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
1030 unsigned int qid_filterid, struct adapter *adap)
1031 {
1032 struct cpl_t6_act_open_req *t6req = NULL;
1033 struct cpl_act_open_req *req = NULL;
1034
1035 t6req = (struct cpl_t6_act_open_req *)__skb_put(skb, sizeof(*t6req));
1036 INIT_TP_WR(t6req, 0);
1037 req = (struct cpl_act_open_req *)t6req;
1038 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid));
1039 req->local_port = cpu_to_be16(f->fs.val.lport);
1040 req->peer_port = cpu_to_be16(f->fs.val.fport);
1041 memcpy(&req->local_ip, f->fs.val.lip, 4);
1042 memcpy(&req->peer_ip, f->fs.val.fip, 4);
1043 req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
1044 f->fs.newvlan == VLAN_REWRITE) |
1045 DELACK_V(f->fs.hitcnts) |
1046 L2T_IDX_V(f->l2t ? f->l2t->idx : 0) |
1047 SMAC_SEL_V((cxgb4_port_viid(f->dev) &
1048 0x7F) << 1) |
1049 TX_CHAN_V(f->fs.eport) |
1050 NO_CONG_V(f->fs.rpttid) |
1051 ULP_MODE_V(f->fs.nat_mode ?
1052 ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1053 TCAM_BYPASS_F | NON_OFFLOAD_F);
1054
1055 t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs,
1056 f->dev)));
1057 t6req->opt2 = htonl(RSS_QUEUE_VALID_F |
1058 RSS_QUEUE_V(f->fs.iq) |
1059 TX_QUEUE_V(f->fs.nat_mode) |
1060 T5_OPT_2_VALID_F |
1061 RX_CHANNEL_F |
1062 CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
1063 (f->fs.dirsteer << 1)) |
1064 PACE_V((f->fs.maskhash) |
1065 ((f->fs.dirsteerhash) << 1)) |
1066 CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
1067 }
1068
cxgb4_set_hash_filter(struct net_device * dev,struct ch_filter_specification * fs,struct filter_ctx * ctx)1069 static int cxgb4_set_hash_filter(struct net_device *dev,
1070 struct ch_filter_specification *fs,
1071 struct filter_ctx *ctx)
1072 {
1073 struct adapter *adapter = netdev2adap(dev);
1074 struct port_info *pi = netdev_priv(dev);
1075 struct tid_info *t = &adapter->tids;
1076 struct filter_entry *f;
1077 struct sk_buff *skb;
1078 int iq, atid, size;
1079 int ret = 0;
1080 u32 iconf;
1081
1082 fill_default_mask(fs);
1083 ret = validate_filter(dev, fs);
1084 if (ret)
1085 return ret;
1086
1087 iq = get_filter_steerq(dev, fs);
1088 if (iq < 0)
1089 return iq;
1090
1091 f = kzalloc(sizeof(*f), GFP_KERNEL);
1092 if (!f)
1093 return -ENOMEM;
1094
1095 f->fs = *fs;
1096 f->ctx = ctx;
1097 f->dev = dev;
1098 f->fs.iq = iq;
1099
1100 /* If the new filter requires loopback Destination MAC and/or VLAN
1101 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1102 * the filter.
1103 */
1104 if (f->fs.newdmac || f->fs.newvlan) {
1105 /* allocate L2T entry for new filter */
1106 f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
1107 f->fs.eport, f->fs.dmac);
1108 if (!f->l2t) {
1109 ret = -ENOMEM;
1110 goto out_err;
1111 }
1112 }
1113
1114 /* If the new filter requires loopback Source MAC rewriting then
1115 * we need to allocate a SMT entry for the filter.
1116 */
1117 if (f->fs.newsmac) {
1118 f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac);
1119 if (!f->smt) {
1120 if (f->l2t) {
1121 cxgb4_l2t_release(f->l2t);
1122 f->l2t = NULL;
1123 }
1124 ret = -ENOMEM;
1125 goto free_l2t;
1126 }
1127 }
1128
1129 atid = cxgb4_alloc_atid(t, f);
1130 if (atid < 0) {
1131 ret = atid;
1132 goto free_smt;
1133 }
1134
1135 iconf = adapter->params.tp.ingress_config;
1136 if (iconf & VNIC_F) {
1137 f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
1138 f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
1139 f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1140 f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1141 } else if (iconf & USE_ENC_IDX_F) {
1142 if (f->fs.val.encap_vld) {
1143 struct port_info *pi = netdev_priv(f->dev);
1144 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
1145
1146 /* allocate MPS TCAM entry */
1147 ret = t4_alloc_encap_mac_filt(adapter, pi->viid,
1148 match_all_mac,
1149 match_all_mac,
1150 f->fs.val.vni,
1151 f->fs.mask.vni,
1152 0, 1, 1);
1153 if (ret < 0)
1154 goto free_atid;
1155
1156 atomic_inc(&adapter->mps_encap[ret].refcnt);
1157 f->fs.val.ovlan = ret;
1158 f->fs.mask.ovlan = 0xffff;
1159 f->fs.val.ovlan_vld = 1;
1160 f->fs.mask.ovlan_vld = 1;
1161 }
1162 }
1163
1164 size = sizeof(struct cpl_t6_act_open_req);
1165 if (f->fs.type) {
1166 ret = cxgb4_clip_get(f->dev, (const u32 *)&f->fs.val.lip, 1);
1167 if (ret)
1168 goto free_mps;
1169
1170 skb = alloc_skb(size, GFP_KERNEL);
1171 if (!skb) {
1172 ret = -ENOMEM;
1173 goto free_clip;
1174 }
1175
1176 mk_act_open_req6(f, skb,
1177 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
1178 adapter);
1179 } else {
1180 skb = alloc_skb(size, GFP_KERNEL);
1181 if (!skb) {
1182 ret = -ENOMEM;
1183 goto free_mps;
1184 }
1185
1186 mk_act_open_req(f, skb,
1187 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
1188 adapter);
1189 }
1190
1191 f->pending = 1;
1192 set_wr_txq(skb, CPL_PRIORITY_SETUP, f->fs.val.iport & 0x3);
1193 t4_ofld_send(adapter, skb);
1194 return 0;
1195
1196 free_clip:
1197 cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
1198
1199 free_mps:
1200 if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
1201 t4_free_encap_mac_filt(adapter, pi->viid, f->fs.val.ovlan, 1);
1202
1203 free_atid:
1204 cxgb4_free_atid(t, atid);
1205
1206 free_smt:
1207 if (f->smt) {
1208 cxgb4_smt_release(f->smt);
1209 f->smt = NULL;
1210 }
1211
1212 free_l2t:
1213 if (f->l2t) {
1214 cxgb4_l2t_release(f->l2t);
1215 f->l2t = NULL;
1216 }
1217
1218 out_err:
1219 kfree(f);
1220 return ret;
1221 }
1222
1223 /* Check a Chelsio Filter Request for validity, convert it into our internal
1224 * format and send it to the hardware. Return 0 on success, an error number
1225 * otherwise. We attach any provided filter operation context to the internal
1226 * filter specification in order to facilitate signaling completion of the
1227 * operation.
1228 */
__cxgb4_set_filter(struct net_device * dev,int filter_id,struct ch_filter_specification * fs,struct filter_ctx * ctx)1229 int __cxgb4_set_filter(struct net_device *dev, int filter_id,
1230 struct ch_filter_specification *fs,
1231 struct filter_ctx *ctx)
1232 {
1233 struct adapter *adapter = netdev2adap(dev);
1234 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1235 unsigned int max_fidx, fidx;
1236 struct filter_entry *f;
1237 u32 iconf;
1238 int iq, ret;
1239
1240 if (fs->hash) {
1241 if (is_hashfilter(adapter))
1242 return cxgb4_set_hash_filter(dev, fs, ctx);
1243 netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n",
1244 __func__);
1245 return -EINVAL;
1246 }
1247
1248 max_fidx = adapter->tids.nftids;
1249 if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
1250 filter_id >= max_fidx)
1251 return -E2BIG;
1252
1253 fill_default_mask(fs);
1254
1255 ret = validate_filter(dev, fs);
1256 if (ret)
1257 return ret;
1258
1259 iq = get_filter_steerq(dev, fs);
1260 if (iq < 0)
1261 return iq;
1262
1263 /* IPv6 filters occupy four slots and must be aligned on
1264 * four-slot boundaries. IPv4 filters only occupy a single
1265 * slot and have no alignment requirements but writing a new
1266 * IPv4 filter into the middle of an existing IPv6 filter
1267 * requires clearing the old IPv6 filter and hence we prevent
1268 * insertion.
1269 */
1270 if (fs->type == 0) { /* IPv4 */
1271 /* For T6, If our IPv4 filter isn't being written to a
1272 * multiple of two filter index and there's an IPv6
1273 * filter at the multiple of 2 base slot, then we need
1274 * to delete that IPv6 filter ...
1275 * For adapters below T6, IPv6 filter occupies 4 entries.
1276 * Hence we need to delete the filter in multiple of 4 slot.
1277 */
1278 if (chip_ver < CHELSIO_T6)
1279 fidx = filter_id & ~0x3;
1280 else
1281 fidx = filter_id & ~0x1;
1282
1283 if (fidx != filter_id &&
1284 adapter->tids.ftid_tab[fidx].fs.type) {
1285 f = &adapter->tids.ftid_tab[fidx];
1286 if (f->valid) {
1287 dev_err(adapter->pdev_dev,
1288 "Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n",
1289 fidx, fidx + 3);
1290 return -EINVAL;
1291 }
1292 }
1293 } else { /* IPv6 */
1294 if (chip_ver < CHELSIO_T6) {
1295 /* Ensure that the IPv6 filter is aligned on a
1296 * multiple of 4 boundary.
1297 */
1298 if (filter_id & 0x3) {
1299 dev_err(adapter->pdev_dev,
1300 "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
1301 return -EINVAL;
1302 }
1303
1304 /* Check all except the base overlapping IPv4 filter
1305 * slots.
1306 */
1307 for (fidx = filter_id + 1; fidx < filter_id + 4;
1308 fidx++) {
1309 f = &adapter->tids.ftid_tab[fidx];
1310 if (f->valid) {
1311 dev_err(adapter->pdev_dev,
1312 "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
1313 fidx);
1314 return -EBUSY;
1315 }
1316 }
1317 } else {
1318 /* For T6, CLIP being enabled, IPv6 filter would occupy
1319 * 2 entries.
1320 */
1321 if (filter_id & 0x1)
1322 return -EINVAL;
1323 /* Check overlapping IPv4 filter slot */
1324 fidx = filter_id + 1;
1325 f = &adapter->tids.ftid_tab[fidx];
1326 if (f->valid) {
1327 pr_err("%s: IPv6 filter requires 2 indices. IPv4 filter already present at %d. Please remove IPv4 filter first.\n",
1328 __func__, fidx);
1329 return -EBUSY;
1330 }
1331 }
1332 }
1333
1334 /* Check to make sure that provided filter index is not
1335 * already in use by someone else
1336 */
1337 f = &adapter->tids.ftid_tab[filter_id];
1338 if (f->valid)
1339 return -EBUSY;
1340
1341 fidx = filter_id + adapter->tids.ftid_base;
1342 ret = cxgb4_set_ftid(&adapter->tids, filter_id,
1343 fs->type ? PF_INET6 : PF_INET,
1344 chip_ver);
1345 if (ret)
1346 return ret;
1347
1348 /* Check t make sure the filter requested is writable ... */
1349 ret = writable_filter(f);
1350 if (ret) {
1351 /* Clear the bits we have set above */
1352 cxgb4_clear_ftid(&adapter->tids, filter_id,
1353 fs->type ? PF_INET6 : PF_INET,
1354 chip_ver);
1355 return ret;
1356 }
1357
1358 if (is_t6(adapter->params.chip) && fs->type &&
1359 ipv6_addr_type((const struct in6_addr *)fs->val.lip) !=
1360 IPV6_ADDR_ANY) {
1361 ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1);
1362 if (ret) {
1363 cxgb4_clear_ftid(&adapter->tids, filter_id, PF_INET6,
1364 chip_ver);
1365 return ret;
1366 }
1367 }
1368
1369 /* Convert the filter specification into our internal format.
1370 * We copy the PF/VF specification into the Outer VLAN field
1371 * here so the rest of the code -- including the interface to
1372 * the firmware -- doesn't have to constantly do these checks.
1373 */
1374 f->fs = *fs;
1375 f->fs.iq = iq;
1376 f->dev = dev;
1377
1378 iconf = adapter->params.tp.ingress_config;
1379 if (iconf & VNIC_F) {
1380 f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
1381 f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
1382 f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1383 f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1384 } else if (iconf & USE_ENC_IDX_F) {
1385 if (f->fs.val.encap_vld) {
1386 struct port_info *pi = netdev_priv(f->dev);
1387 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
1388
1389 /* allocate MPS TCAM entry */
1390 ret = t4_alloc_encap_mac_filt(adapter, pi->viid,
1391 match_all_mac,
1392 match_all_mac,
1393 f->fs.val.vni,
1394 f->fs.mask.vni,
1395 0, 1, 1);
1396 if (ret < 0)
1397 goto free_clip;
1398
1399 atomic_inc(&adapter->mps_encap[ret].refcnt);
1400 f->fs.val.ovlan = ret;
1401 f->fs.mask.ovlan = 0x1ff;
1402 f->fs.val.ovlan_vld = 1;
1403 f->fs.mask.ovlan_vld = 1;
1404 }
1405 }
1406
1407 /* Attempt to set the filter. If we don't succeed, we clear
1408 * it and return the failure.
1409 */
1410 f->ctx = ctx;
1411 f->tid = fidx; /* Save the actual tid */
1412 ret = set_filter_wr(adapter, filter_id);
1413 if (ret) {
1414 cxgb4_clear_ftid(&adapter->tids, filter_id,
1415 fs->type ? PF_INET6 : PF_INET,
1416 chip_ver);
1417 clear_filter(adapter, f);
1418 }
1419
1420 return ret;
1421
1422 free_clip:
1423 if (is_t6(adapter->params.chip) && f->fs.type)
1424 cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
1425 cxgb4_clear_ftid(&adapter->tids, filter_id,
1426 fs->type ? PF_INET6 : PF_INET, chip_ver);
1427 return ret;
1428 }
1429
cxgb4_del_hash_filter(struct net_device * dev,int filter_id,struct filter_ctx * ctx)1430 static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id,
1431 struct filter_ctx *ctx)
1432 {
1433 struct adapter *adapter = netdev2adap(dev);
1434 struct tid_info *t = &adapter->tids;
1435 struct cpl_abort_req *abort_req;
1436 struct cpl_abort_rpl *abort_rpl;
1437 struct cpl_set_tcb_field *req;
1438 struct ulptx_idata *aligner;
1439 struct work_request_hdr *wr;
1440 struct filter_entry *f;
1441 struct sk_buff *skb;
1442 unsigned int wrlen;
1443 int ret;
1444
1445 netdev_dbg(dev, "%s: filter_id = %d ; nftids = %d\n",
1446 __func__, filter_id, adapter->tids.nftids);
1447
1448 if (filter_id > adapter->tids.ntids)
1449 return -E2BIG;
1450
1451 f = lookup_tid(t, filter_id);
1452 if (!f) {
1453 netdev_err(dev, "%s: no filter entry for filter_id = %d",
1454 __func__, filter_id);
1455 return -EINVAL;
1456 }
1457
1458 ret = writable_filter(f);
1459 if (ret)
1460 return ret;
1461
1462 if (!f->valid)
1463 return -EINVAL;
1464
1465 f->ctx = ctx;
1466 f->pending = 1;
1467 wrlen = roundup(sizeof(*wr) + (sizeof(*req) + sizeof(*aligner))
1468 + sizeof(*abort_req) + sizeof(*abort_rpl), 16);
1469 skb = alloc_skb(wrlen, GFP_KERNEL);
1470 if (!skb) {
1471 netdev_err(dev, "%s: could not allocate skb ..\n", __func__);
1472 return -ENOMEM;
1473 }
1474 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1475 req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen);
1476 INIT_ULPTX_WR(req, wrlen, 0, 0);
1477 wr = (struct work_request_hdr *)req;
1478 wr++;
1479 req = (struct cpl_set_tcb_field *)wr;
1480 mk_set_tcb_ulp(f, req, TCB_RSS_INFO_W, TCB_RSS_INFO_V(TCB_RSS_INFO_M),
1481 TCB_RSS_INFO_V(adapter->sge.fw_evtq.abs_id), 0, 1);
1482 aligner = (struct ulptx_idata *)(req + 1);
1483 abort_req = (struct cpl_abort_req *)(aligner + 1);
1484 mk_abort_req_ulp(abort_req, f->tid);
1485 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
1486 mk_abort_rpl_ulp(abort_rpl, f->tid);
1487 t4_ofld_send(adapter, skb);
1488 return 0;
1489 }
1490
1491 /* Check a delete filter request for validity and send it to the hardware.
1492 * Return 0 on success, an error number otherwise. We attach any provided
1493 * filter operation context to the internal filter specification in order to
1494 * facilitate signaling completion of the operation.
1495 */
__cxgb4_del_filter(struct net_device * dev,int filter_id,struct ch_filter_specification * fs,struct filter_ctx * ctx)1496 int __cxgb4_del_filter(struct net_device *dev, int filter_id,
1497 struct ch_filter_specification *fs,
1498 struct filter_ctx *ctx)
1499 {
1500 struct adapter *adapter = netdev2adap(dev);
1501 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1502 struct filter_entry *f;
1503 unsigned int max_fidx;
1504 int ret;
1505
1506 if (fs && fs->hash) {
1507 if (is_hashfilter(adapter))
1508 return cxgb4_del_hash_filter(dev, filter_id, ctx);
1509 netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n",
1510 __func__);
1511 return -EINVAL;
1512 }
1513
1514 max_fidx = adapter->tids.nftids;
1515 if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
1516 filter_id >= max_fidx)
1517 return -E2BIG;
1518
1519 f = &adapter->tids.ftid_tab[filter_id];
1520 ret = writable_filter(f);
1521 if (ret)
1522 return ret;
1523
1524 if (f->valid) {
1525 f->ctx = ctx;
1526 cxgb4_clear_ftid(&adapter->tids, filter_id,
1527 f->fs.type ? PF_INET6 : PF_INET,
1528 chip_ver);
1529 return del_filter_wr(adapter, filter_id);
1530 }
1531
1532 /* If the caller has passed in a Completion Context then we need to
1533 * mark it as a successful completion so they don't stall waiting
1534 * for it.
1535 */
1536 if (ctx) {
1537 ctx->result = 0;
1538 complete(&ctx->completion);
1539 }
1540 return ret;
1541 }
1542
cxgb4_set_filter(struct net_device * dev,int filter_id,struct ch_filter_specification * fs)1543 int cxgb4_set_filter(struct net_device *dev, int filter_id,
1544 struct ch_filter_specification *fs)
1545 {
1546 struct filter_ctx ctx;
1547 int ret;
1548
1549 init_completion(&ctx.completion);
1550
1551 ret = __cxgb4_set_filter(dev, filter_id, fs, &ctx);
1552 if (ret)
1553 goto out;
1554
1555 /* Wait for reply */
1556 ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
1557 if (!ret)
1558 return -ETIMEDOUT;
1559
1560 ret = ctx.result;
1561 out:
1562 return ret;
1563 }
1564
cxgb4_del_filter(struct net_device * dev,int filter_id,struct ch_filter_specification * fs)1565 int cxgb4_del_filter(struct net_device *dev, int filter_id,
1566 struct ch_filter_specification *fs)
1567 {
1568 struct filter_ctx ctx;
1569 int ret;
1570
1571 /* If we are shutting down the adapter do not wait for completion */
1572 if (netdev2adap(dev)->flags & SHUTTING_DOWN)
1573 return __cxgb4_del_filter(dev, filter_id, fs, NULL);
1574
1575 init_completion(&ctx.completion);
1576
1577 ret = __cxgb4_del_filter(dev, filter_id, fs, &ctx);
1578 if (ret)
1579 goto out;
1580
1581 /* Wait for reply */
1582 ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
1583 if (!ret)
1584 return -ETIMEDOUT;
1585
1586 ret = ctx.result;
1587 out:
1588 return ret;
1589 }
1590
configure_filter_tcb(struct adapter * adap,unsigned int tid,struct filter_entry * f)1591 static int configure_filter_tcb(struct adapter *adap, unsigned int tid,
1592 struct filter_entry *f)
1593 {
1594 if (f->fs.hitcnts)
1595 set_tcb_field(adap, f, tid, TCB_TIMESTAMP_W,
1596 TCB_TIMESTAMP_V(TCB_TIMESTAMP_M) |
1597 TCB_RTT_TS_RECENT_AGE_V(TCB_RTT_TS_RECENT_AGE_M),
1598 TCB_TIMESTAMP_V(0ULL) |
1599 TCB_RTT_TS_RECENT_AGE_V(0ULL),
1600 1);
1601
1602 if (f->fs.newdmac)
1603 set_tcb_tflag(adap, f, tid, TF_CCTRL_ECE_S, 1,
1604 1);
1605
1606 if (f->fs.newvlan == VLAN_INSERT ||
1607 f->fs.newvlan == VLAN_REWRITE)
1608 set_tcb_tflag(adap, f, tid, TF_CCTRL_RFR_S, 1,
1609 1);
1610 if (f->fs.newsmac)
1611 configure_filter_smac(adap, f);
1612
1613 if (f->fs.nat_mode) {
1614 switch (f->fs.nat_mode) {
1615 case NAT_MODE_DIP:
1616 set_nat_params(adap, f, tid, true, false, false, false);
1617 break;
1618
1619 case NAT_MODE_DIP_DP:
1620 set_nat_params(adap, f, tid, true, false, true, false);
1621 break;
1622
1623 case NAT_MODE_DIP_DP_SIP:
1624 set_nat_params(adap, f, tid, true, true, true, false);
1625 break;
1626 case NAT_MODE_DIP_DP_SP:
1627 set_nat_params(adap, f, tid, true, false, true, true);
1628 break;
1629
1630 case NAT_MODE_SIP_SP:
1631 set_nat_params(adap, f, tid, false, true, false, true);
1632 break;
1633
1634 case NAT_MODE_DIP_SIP_SP:
1635 set_nat_params(adap, f, tid, true, true, false, true);
1636 break;
1637
1638 case NAT_MODE_ALL:
1639 set_nat_params(adap, f, tid, true, true, true, true);
1640 break;
1641
1642 default:
1643 pr_err("%s: Invalid NAT mode: %d\n",
1644 __func__, f->fs.nat_mode);
1645 return -EINVAL;
1646 }
1647 }
1648 return 0;
1649 }
1650
hash_del_filter_rpl(struct adapter * adap,const struct cpl_abort_rpl_rss * rpl)1651 void hash_del_filter_rpl(struct adapter *adap,
1652 const struct cpl_abort_rpl_rss *rpl)
1653 {
1654 unsigned int status = rpl->status;
1655 struct tid_info *t = &adap->tids;
1656 unsigned int tid = GET_TID(rpl);
1657 struct filter_ctx *ctx = NULL;
1658 struct filter_entry *f;
1659
1660 dev_dbg(adap->pdev_dev, "%s: status = %u; tid = %u\n",
1661 __func__, status, tid);
1662
1663 f = lookup_tid(t, tid);
1664 if (!f) {
1665 dev_err(adap->pdev_dev, "%s:could not find filter entry",
1666 __func__);
1667 return;
1668 }
1669 ctx = f->ctx;
1670 f->ctx = NULL;
1671 clear_filter(adap, f);
1672 cxgb4_remove_tid(t, 0, tid, 0);
1673 kfree(f);
1674 if (ctx) {
1675 ctx->result = 0;
1676 complete(&ctx->completion);
1677 }
1678 }
1679
hash_filter_rpl(struct adapter * adap,const struct cpl_act_open_rpl * rpl)1680 void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
1681 {
1682 unsigned int ftid = TID_TID_G(AOPEN_ATID_G(ntohl(rpl->atid_status)));
1683 unsigned int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
1684 struct tid_info *t = &adap->tids;
1685 unsigned int tid = GET_TID(rpl);
1686 struct filter_ctx *ctx = NULL;
1687 struct filter_entry *f;
1688
1689 dev_dbg(adap->pdev_dev, "%s: tid = %u; atid = %u; status = %u\n",
1690 __func__, tid, ftid, status);
1691
1692 f = lookup_atid(t, ftid);
1693 if (!f) {
1694 dev_err(adap->pdev_dev, "%s:could not find filter entry",
1695 __func__);
1696 return;
1697 }
1698 ctx = f->ctx;
1699 f->ctx = NULL;
1700
1701 switch (status) {
1702 case CPL_ERR_NONE:
1703 f->tid = tid;
1704 f->pending = 0;
1705 f->valid = 1;
1706 cxgb4_insert_tid(t, f, f->tid, 0);
1707 cxgb4_free_atid(t, ftid);
1708 if (ctx) {
1709 ctx->tid = f->tid;
1710 ctx->result = 0;
1711 }
1712 if (configure_filter_tcb(adap, tid, f)) {
1713 clear_filter(adap, f);
1714 cxgb4_remove_tid(t, 0, tid, 0);
1715 kfree(f);
1716 if (ctx) {
1717 ctx->result = -EINVAL;
1718 complete(&ctx->completion);
1719 }
1720 return;
1721 }
1722 break;
1723
1724 default:
1725 dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n",
1726 __func__, status);
1727
1728 if (ctx) {
1729 if (status == CPL_ERR_TCAM_FULL)
1730 ctx->result = -EAGAIN;
1731 else
1732 ctx->result = -EINVAL;
1733 }
1734 clear_filter(adap, f);
1735 cxgb4_free_atid(t, ftid);
1736 kfree(f);
1737 }
1738 if (ctx)
1739 complete(&ctx->completion);
1740 }
1741
1742 /* Handle a filter write/deletion reply. */
filter_rpl(struct adapter * adap,const struct cpl_set_tcb_rpl * rpl)1743 void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
1744 {
1745 unsigned int tid = GET_TID(rpl);
1746 struct filter_entry *f = NULL;
1747 unsigned int max_fidx;
1748 int idx;
1749
1750 max_fidx = adap->tids.nftids + adap->tids.nsftids;
1751 /* Get the corresponding filter entry for this tid */
1752 if (adap->tids.ftid_tab) {
1753 /* Check this in normal filter region */
1754 idx = tid - adap->tids.ftid_base;
1755 if (idx >= max_fidx)
1756 return;
1757 f = &adap->tids.ftid_tab[idx];
1758 if (f->tid != tid)
1759 return;
1760 }
1761
1762 /* We found the filter entry for this tid */
1763 if (f) {
1764 unsigned int ret = TCB_COOKIE_G(rpl->cookie);
1765 struct filter_ctx *ctx;
1766
1767 /* Pull off any filter operation context attached to the
1768 * filter.
1769 */
1770 ctx = f->ctx;
1771 f->ctx = NULL;
1772
1773 if (ret == FW_FILTER_WR_FLT_DELETED) {
1774 /* Clear the filter when we get confirmation from the
1775 * hardware that the filter has been deleted.
1776 */
1777 clear_filter(adap, f);
1778 if (ctx)
1779 ctx->result = 0;
1780 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
1781 int err = 0;
1782
1783 if (f->fs.newsmac)
1784 err = configure_filter_smac(adap, f);
1785
1786 if (!err) {
1787 f->pending = 0; /* async setup completed */
1788 f->valid = 1;
1789 if (ctx) {
1790 ctx->result = 0;
1791 ctx->tid = idx;
1792 }
1793 } else {
1794 clear_filter(adap, f);
1795 if (ctx)
1796 ctx->result = err;
1797 }
1798 } else {
1799 /* Something went wrong. Issue a warning about the
1800 * problem and clear everything out.
1801 */
1802 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
1803 idx, ret);
1804 clear_filter(adap, f);
1805 if (ctx)
1806 ctx->result = -EINVAL;
1807 }
1808 if (ctx)
1809 complete(&ctx->completion);
1810 }
1811 }
1812
init_hash_filter(struct adapter * adap)1813 int init_hash_filter(struct adapter *adap)
1814 {
1815 /* On T6, verify the necessary register configs and warn the user in
1816 * case of improper config
1817 */
1818 if (is_t6(adap->params.chip)) {
1819 if (TCAM_ACTV_HIT_G(t4_read_reg(adap, LE_DB_RSP_CODE_0_A)) != 4)
1820 goto err;
1821
1822 if (HASH_ACTV_HIT_G(t4_read_reg(adap, LE_DB_RSP_CODE_1_A)) != 4)
1823 goto err;
1824 } else {
1825 dev_err(adap->pdev_dev, "Hash filter supported only on T6\n");
1826 return -EINVAL;
1827 }
1828 adap->params.hash_filter = 1;
1829 return 0;
1830 err:
1831 dev_warn(adap->pdev_dev, "Invalid hash filter config!\n");
1832 return -EINVAL;
1833 }
1834