1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3 
4 #include <linux/netdevice.h>
5 #include <linux/dynamic_debug.h>
6 #include <linux/etherdevice.h>
7 #include <linux/list.h>
8 
9 #include "ionic.h"
10 #include "ionic_lif.h"
11 #include "ionic_rx_filter.h"
12 
ionic_rx_filter_free(struct ionic_lif * lif,struct ionic_rx_filter * f)13 void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f)
14 {
15 	struct device *dev = lif->ionic->dev;
16 
17 	hlist_del(&f->by_id);
18 	hlist_del(&f->by_hash);
19 	devm_kfree(dev, f);
20 }
21 
ionic_rx_filter_replay(struct ionic_lif * lif)22 void ionic_rx_filter_replay(struct ionic_lif *lif)
23 {
24 	struct ionic_rx_filter_add_cmd *ac;
25 	struct hlist_head new_id_list;
26 	struct ionic_admin_ctx ctx;
27 	struct ionic_rx_filter *f;
28 	struct hlist_head *head;
29 	struct hlist_node *tmp;
30 	unsigned int key;
31 	unsigned int i;
32 	int err;
33 
34 	INIT_HLIST_HEAD(&new_id_list);
35 	ac = &ctx.cmd.rx_filter_add;
36 
37 	for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
38 		head = &lif->rx_filters.by_id[i];
39 		hlist_for_each_entry_safe(f, tmp, head, by_id) {
40 			ctx.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work);
41 			memcpy(ac, &f->cmd, sizeof(f->cmd));
42 			dev_dbg(&lif->netdev->dev, "replay filter command:\n");
43 			dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
44 					 &ctx.cmd, sizeof(ctx.cmd), true);
45 
46 			err = ionic_adminq_post_wait(lif, &ctx);
47 			if (err) {
48 				switch (le16_to_cpu(ac->match)) {
49 				case IONIC_RX_FILTER_MATCH_VLAN:
50 					netdev_info(lif->netdev, "Replay failed - %d: vlan %d\n",
51 						    err,
52 						    le16_to_cpu(ac->vlan.vlan));
53 					break;
54 				case IONIC_RX_FILTER_MATCH_MAC:
55 					netdev_info(lif->netdev, "Replay failed - %d: mac %pM\n",
56 						    err, ac->mac.addr);
57 					break;
58 				case IONIC_RX_FILTER_MATCH_MAC_VLAN:
59 					netdev_info(lif->netdev, "Replay failed - %d: vlan %d mac %pM\n",
60 						    err,
61 						    le16_to_cpu(ac->vlan.vlan),
62 						    ac->mac.addr);
63 					break;
64 				}
65 				spin_lock_bh(&lif->rx_filters.lock);
66 				ionic_rx_filter_free(lif, f);
67 				spin_unlock_bh(&lif->rx_filters.lock);
68 
69 				continue;
70 			}
71 
72 			/* remove from old id list, save new id in tmp list */
73 			spin_lock_bh(&lif->rx_filters.lock);
74 			hlist_del(&f->by_id);
75 			spin_unlock_bh(&lif->rx_filters.lock);
76 			f->filter_id = le32_to_cpu(ctx.comp.rx_filter_add.filter_id);
77 			hlist_add_head(&f->by_id, &new_id_list);
78 		}
79 	}
80 
81 	/* rebuild the by_id hash lists with the new filter ids */
82 	spin_lock_bh(&lif->rx_filters.lock);
83 	hlist_for_each_entry_safe(f, tmp, &new_id_list, by_id) {
84 		key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK;
85 		head = &lif->rx_filters.by_id[key];
86 		hlist_add_head(&f->by_id, head);
87 	}
88 	spin_unlock_bh(&lif->rx_filters.lock);
89 }
90 
ionic_rx_filters_init(struct ionic_lif * lif)91 int ionic_rx_filters_init(struct ionic_lif *lif)
92 {
93 	unsigned int i;
94 
95 	spin_lock_init(&lif->rx_filters.lock);
96 
97 	spin_lock_bh(&lif->rx_filters.lock);
98 	for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
99 		INIT_HLIST_HEAD(&lif->rx_filters.by_hash[i]);
100 		INIT_HLIST_HEAD(&lif->rx_filters.by_id[i]);
101 	}
102 	spin_unlock_bh(&lif->rx_filters.lock);
103 
104 	return 0;
105 }
106 
ionic_rx_filters_deinit(struct ionic_lif * lif)107 void ionic_rx_filters_deinit(struct ionic_lif *lif)
108 {
109 	struct ionic_rx_filter *f;
110 	struct hlist_head *head;
111 	struct hlist_node *tmp;
112 	unsigned int i;
113 
114 	spin_lock_bh(&lif->rx_filters.lock);
115 	for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
116 		head = &lif->rx_filters.by_id[i];
117 		hlist_for_each_entry_safe(f, tmp, head, by_id)
118 			ionic_rx_filter_free(lif, f);
119 	}
120 	spin_unlock_bh(&lif->rx_filters.lock);
121 }
122 
ionic_rx_filter_save(struct ionic_lif * lif,u32 flow_id,u16 rxq_index,u32 hash,struct ionic_admin_ctx * ctx,enum ionic_filter_state state)123 int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
124 			 u32 hash, struct ionic_admin_ctx *ctx,
125 			 enum ionic_filter_state state)
126 {
127 	struct device *dev = lif->ionic->dev;
128 	struct ionic_rx_filter_add_cmd *ac;
129 	struct ionic_rx_filter *f = NULL;
130 	struct hlist_head *head;
131 	unsigned int key;
132 
133 	ac = &ctx->cmd.rx_filter_add;
134 
135 	switch (le16_to_cpu(ac->match)) {
136 	case IONIC_RX_FILTER_MATCH_VLAN:
137 		key = le16_to_cpu(ac->vlan.vlan);
138 		f = ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan));
139 		break;
140 	case IONIC_RX_FILTER_MATCH_MAC:
141 		key = *(u32 *)ac->mac.addr;
142 		f = ionic_rx_filter_by_addr(lif, ac->mac.addr);
143 		break;
144 	case IONIC_RX_FILTER_MATCH_MAC_VLAN:
145 		key = le16_to_cpu(ac->mac_vlan.vlan);
146 		break;
147 	case IONIC_RX_FILTER_STEER_PKTCLASS:
148 		key = 0;
149 		break;
150 	default:
151 		return -EINVAL;
152 	}
153 
154 	if (f) {
155 		/* remove from current linking so we can refresh it */
156 		hlist_del(&f->by_id);
157 		hlist_del(&f->by_hash);
158 	} else {
159 		f = devm_kzalloc(dev, sizeof(*f), GFP_ATOMIC);
160 		if (!f)
161 			return -ENOMEM;
162 	}
163 
164 	f->flow_id = flow_id;
165 	f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id);
166 	f->state = state;
167 	f->rxq_index = rxq_index;
168 	memcpy(&f->cmd, ac, sizeof(f->cmd));
169 	netdev_dbg(lif->netdev, "rx_filter add filter_id %d\n", f->filter_id);
170 
171 	INIT_HLIST_NODE(&f->by_hash);
172 	INIT_HLIST_NODE(&f->by_id);
173 
174 	key = hash_32(key, IONIC_RX_FILTER_HASH_BITS);
175 	head = &lif->rx_filters.by_hash[key];
176 	hlist_add_head(&f->by_hash, head);
177 
178 	key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK;
179 	head = &lif->rx_filters.by_id[key];
180 	hlist_add_head(&f->by_id, head);
181 
182 	return 0;
183 }
184 
ionic_rx_filter_by_vlan(struct ionic_lif * lif,u16 vid)185 struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, u16 vid)
186 {
187 	struct ionic_rx_filter *f;
188 	struct hlist_head *head;
189 	unsigned int key;
190 
191 	key = hash_32(vid, IONIC_RX_FILTER_HASH_BITS);
192 	head = &lif->rx_filters.by_hash[key];
193 
194 	hlist_for_each_entry(f, head, by_hash) {
195 		if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_VLAN)
196 			continue;
197 		if (le16_to_cpu(f->cmd.vlan.vlan) == vid)
198 			return f;
199 	}
200 
201 	return NULL;
202 }
203 
ionic_rx_filter_by_addr(struct ionic_lif * lif,const u8 * addr)204 struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif,
205 						const u8 *addr)
206 {
207 	struct ionic_rx_filter *f;
208 	struct hlist_head *head;
209 	unsigned int key;
210 
211 	key = hash_32(*(u32 *)addr, IONIC_RX_FILTER_HASH_BITS);
212 	head = &lif->rx_filters.by_hash[key];
213 
214 	hlist_for_each_entry(f, head, by_hash) {
215 		if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_MAC)
216 			continue;
217 		if (memcmp(addr, f->cmd.mac.addr, ETH_ALEN) == 0)
218 			return f;
219 	}
220 
221 	return NULL;
222 }
223 
ionic_rx_filter_rxsteer(struct ionic_lif * lif)224 struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif)
225 {
226 	struct ionic_rx_filter *f;
227 	struct hlist_head *head;
228 	unsigned int key;
229 
230 	key = hash_32(0, IONIC_RX_FILTER_HASH_BITS);
231 	head = &lif->rx_filters.by_hash[key];
232 
233 	hlist_for_each_entry(f, head, by_hash) {
234 		if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_STEER_PKTCLASS)
235 			continue;
236 		return f;
237 	}
238 
239 	return NULL;
240 }
241 
ionic_lif_list_addr(struct ionic_lif * lif,const u8 * addr,bool mode)242 int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode)
243 {
244 	struct ionic_rx_filter *f;
245 	int err;
246 
247 	spin_lock_bh(&lif->rx_filters.lock);
248 
249 	f = ionic_rx_filter_by_addr(lif, addr);
250 	if (mode == ADD_ADDR && !f) {
251 		struct ionic_admin_ctx ctx = {
252 			.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
253 			.cmd.rx_filter_add = {
254 				.opcode = IONIC_CMD_RX_FILTER_ADD,
255 				.lif_index = cpu_to_le16(lif->index),
256 				.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
257 			},
258 		};
259 
260 		memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
261 		err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
262 					   IONIC_FILTER_STATE_NEW);
263 		if (err) {
264 			spin_unlock_bh(&lif->rx_filters.lock);
265 			return err;
266 		}
267 
268 	} else if (mode == ADD_ADDR && f) {
269 		if (f->state == IONIC_FILTER_STATE_OLD)
270 			f->state = IONIC_FILTER_STATE_SYNCED;
271 
272 	} else if (mode == DEL_ADDR && f) {
273 		if (f->state == IONIC_FILTER_STATE_NEW)
274 			ionic_rx_filter_free(lif, f);
275 		else if (f->state == IONIC_FILTER_STATE_SYNCED)
276 			f->state = IONIC_FILTER_STATE_OLD;
277 	} else if (mode == DEL_ADDR && !f) {
278 		spin_unlock_bh(&lif->rx_filters.lock);
279 		return -ENOENT;
280 	}
281 
282 	spin_unlock_bh(&lif->rx_filters.lock);
283 
284 	set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
285 
286 	return 0;
287 }
288 
289 struct sync_item {
290 	struct list_head list;
291 	struct ionic_rx_filter f;
292 };
293 
ionic_rx_filter_sync(struct ionic_lif * lif)294 void ionic_rx_filter_sync(struct ionic_lif *lif)
295 {
296 	struct device *dev = lif->ionic->dev;
297 	struct list_head sync_add_list;
298 	struct list_head sync_del_list;
299 	struct sync_item *sync_item;
300 	struct ionic_rx_filter *f;
301 	struct hlist_head *head;
302 	struct hlist_node *tmp;
303 	struct sync_item *spos;
304 	unsigned int i;
305 
306 	INIT_LIST_HEAD(&sync_add_list);
307 	INIT_LIST_HEAD(&sync_del_list);
308 
309 	clear_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
310 
311 	/* Copy the filters to be added and deleted
312 	 * into a separate local list that needs no locking.
313 	 */
314 	spin_lock_bh(&lif->rx_filters.lock);
315 	for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
316 		head = &lif->rx_filters.by_id[i];
317 		hlist_for_each_entry_safe(f, tmp, head, by_id) {
318 			if (f->state == IONIC_FILTER_STATE_NEW ||
319 			    f->state == IONIC_FILTER_STATE_OLD) {
320 				sync_item = devm_kzalloc(dev, sizeof(*sync_item),
321 							 GFP_ATOMIC);
322 				if (!sync_item)
323 					goto loop_out;
324 
325 				sync_item->f = *f;
326 
327 				if (f->state == IONIC_FILTER_STATE_NEW)
328 					list_add(&sync_item->list, &sync_add_list);
329 				else
330 					list_add(&sync_item->list, &sync_del_list);
331 			}
332 		}
333 	}
334 loop_out:
335 	spin_unlock_bh(&lif->rx_filters.lock);
336 
337 	/* If the add or delete fails, it won't get marked as sync'd
338 	 * and will be tried again in the next sync action.
339 	 * Do the deletes first in case we're in an overflow state and
340 	 * they can clear room for some new filters
341 	 */
342 	list_for_each_entry_safe(sync_item, spos, &sync_del_list, list) {
343 		(void)ionic_lif_addr_del(lif, sync_item->f.cmd.mac.addr);
344 
345 		list_del(&sync_item->list);
346 		devm_kfree(dev, sync_item);
347 	}
348 
349 	list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) {
350 		(void)ionic_lif_addr_add(lif, sync_item->f.cmd.mac.addr);
351 
352 		list_del(&sync_item->list);
353 		devm_kfree(dev, sync_item);
354 	}
355 }
356