1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright Gavin Shan, IBM Corporation 2016.
4  */
5 
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/netdevice.h>
10 #include <linux/skbuff.h>
11 #include <linux/of.h>
12 #include <linux/platform_device.h>
13 
14 #include <net/ncsi.h>
15 #include <net/net_namespace.h>
16 #include <net/sock.h>
17 #include <net/addrconf.h>
18 #include <net/ipv6.h>
19 #include <net/genetlink.h>
20 
21 #include "internal.h"
22 #include "ncsi-pkt.h"
23 #include "ncsi-netlink.h"
24 
25 LIST_HEAD(ncsi_dev_list);
26 DEFINE_SPINLOCK(ncsi_dev_lock);
27 
ncsi_channel_has_link(struct ncsi_channel * channel)28 bool ncsi_channel_has_link(struct ncsi_channel *channel)
29 {
30 	return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1);
31 }
32 
ncsi_channel_is_last(struct ncsi_dev_priv * ndp,struct ncsi_channel * channel)33 bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
34 			  struct ncsi_channel *channel)
35 {
36 	struct ncsi_package *np;
37 	struct ncsi_channel *nc;
38 
39 	NCSI_FOR_EACH_PACKAGE(ndp, np)
40 		NCSI_FOR_EACH_CHANNEL(np, nc) {
41 			if (nc == channel)
42 				continue;
43 			if (nc->state == NCSI_CHANNEL_ACTIVE &&
44 			    ncsi_channel_has_link(nc))
45 				return false;
46 		}
47 
48 	return true;
49 }
50 
ncsi_report_link(struct ncsi_dev_priv * ndp,bool force_down)51 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
52 {
53 	struct ncsi_dev *nd = &ndp->ndev;
54 	struct ncsi_package *np;
55 	struct ncsi_channel *nc;
56 	unsigned long flags;
57 
58 	nd->state = ncsi_dev_state_functional;
59 	if (force_down) {
60 		nd->link_up = 0;
61 		goto report;
62 	}
63 
64 	nd->link_up = 0;
65 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
66 		NCSI_FOR_EACH_CHANNEL(np, nc) {
67 			spin_lock_irqsave(&nc->lock, flags);
68 
69 			if (!list_empty(&nc->link) ||
70 			    nc->state != NCSI_CHANNEL_ACTIVE) {
71 				spin_unlock_irqrestore(&nc->lock, flags);
72 				continue;
73 			}
74 
75 			if (ncsi_channel_has_link(nc)) {
76 				spin_unlock_irqrestore(&nc->lock, flags);
77 				nd->link_up = 1;
78 				goto report;
79 			}
80 
81 			spin_unlock_irqrestore(&nc->lock, flags);
82 		}
83 	}
84 
85 report:
86 	nd->handler(nd);
87 }
88 
ncsi_channel_monitor(struct timer_list * t)89 static void ncsi_channel_monitor(struct timer_list *t)
90 {
91 	struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
92 	struct ncsi_package *np = nc->package;
93 	struct ncsi_dev_priv *ndp = np->ndp;
94 	struct ncsi_channel_mode *ncm;
95 	struct ncsi_cmd_arg nca;
96 	bool enabled, chained;
97 	unsigned int monitor_state;
98 	unsigned long flags;
99 	int state, ret;
100 
101 	spin_lock_irqsave(&nc->lock, flags);
102 	state = nc->state;
103 	chained = !list_empty(&nc->link);
104 	enabled = nc->monitor.enabled;
105 	monitor_state = nc->monitor.state;
106 	spin_unlock_irqrestore(&nc->lock, flags);
107 
108 	if (!enabled || chained) {
109 		ncsi_stop_channel_monitor(nc);
110 		return;
111 	}
112 	if (state != NCSI_CHANNEL_INACTIVE &&
113 	    state != NCSI_CHANNEL_ACTIVE) {
114 		ncsi_stop_channel_monitor(nc);
115 		return;
116 	}
117 
118 	switch (monitor_state) {
119 	case NCSI_CHANNEL_MONITOR_START:
120 	case NCSI_CHANNEL_MONITOR_RETRY:
121 		nca.ndp = ndp;
122 		nca.package = np->id;
123 		nca.channel = nc->id;
124 		nca.type = NCSI_PKT_CMD_GLS;
125 		nca.req_flags = 0;
126 		ret = ncsi_xmit_cmd(&nca);
127 		if (ret)
128 			netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
129 				   ret);
130 		break;
131 	case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
132 		break;
133 	default:
134 		netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
135 			   nc->id);
136 		ncsi_report_link(ndp, true);
137 		ndp->flags |= NCSI_DEV_RESHUFFLE;
138 
139 		ncsi_stop_channel_monitor(nc);
140 
141 		ncm = &nc->modes[NCSI_MODE_LINK];
142 		spin_lock_irqsave(&nc->lock, flags);
143 		nc->state = NCSI_CHANNEL_INVISIBLE;
144 		ncm->data[2] &= ~0x1;
145 		spin_unlock_irqrestore(&nc->lock, flags);
146 
147 		spin_lock_irqsave(&ndp->lock, flags);
148 		nc->state = NCSI_CHANNEL_ACTIVE;
149 		list_add_tail_rcu(&nc->link, &ndp->channel_queue);
150 		spin_unlock_irqrestore(&ndp->lock, flags);
151 		ncsi_process_next_channel(ndp);
152 		return;
153 	}
154 
155 	spin_lock_irqsave(&nc->lock, flags);
156 	nc->monitor.state++;
157 	spin_unlock_irqrestore(&nc->lock, flags);
158 	mod_timer(&nc->monitor.timer, jiffies + HZ);
159 }
160 
ncsi_start_channel_monitor(struct ncsi_channel * nc)161 void ncsi_start_channel_monitor(struct ncsi_channel *nc)
162 {
163 	unsigned long flags;
164 
165 	spin_lock_irqsave(&nc->lock, flags);
166 	WARN_ON_ONCE(nc->monitor.enabled);
167 	nc->monitor.enabled = true;
168 	nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
169 	spin_unlock_irqrestore(&nc->lock, flags);
170 
171 	mod_timer(&nc->monitor.timer, jiffies + HZ);
172 }
173 
ncsi_stop_channel_monitor(struct ncsi_channel * nc)174 void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
175 {
176 	unsigned long flags;
177 
178 	spin_lock_irqsave(&nc->lock, flags);
179 	if (!nc->monitor.enabled) {
180 		spin_unlock_irqrestore(&nc->lock, flags);
181 		return;
182 	}
183 	nc->monitor.enabled = false;
184 	spin_unlock_irqrestore(&nc->lock, flags);
185 
186 	del_timer_sync(&nc->monitor.timer);
187 }
188 
ncsi_find_channel(struct ncsi_package * np,unsigned char id)189 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
190 				       unsigned char id)
191 {
192 	struct ncsi_channel *nc;
193 
194 	NCSI_FOR_EACH_CHANNEL(np, nc) {
195 		if (nc->id == id)
196 			return nc;
197 	}
198 
199 	return NULL;
200 }
201 
ncsi_add_channel(struct ncsi_package * np,unsigned char id)202 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
203 {
204 	struct ncsi_channel *nc, *tmp;
205 	int index;
206 	unsigned long flags;
207 
208 	nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
209 	if (!nc)
210 		return NULL;
211 
212 	nc->id = id;
213 	nc->package = np;
214 	nc->state = NCSI_CHANNEL_INACTIVE;
215 	nc->monitor.enabled = false;
216 	timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
217 	spin_lock_init(&nc->lock);
218 	INIT_LIST_HEAD(&nc->link);
219 	for (index = 0; index < NCSI_CAP_MAX; index++)
220 		nc->caps[index].index = index;
221 	for (index = 0; index < NCSI_MODE_MAX; index++)
222 		nc->modes[index].index = index;
223 
224 	spin_lock_irqsave(&np->lock, flags);
225 	tmp = ncsi_find_channel(np, id);
226 	if (tmp) {
227 		spin_unlock_irqrestore(&np->lock, flags);
228 		kfree(nc);
229 		return tmp;
230 	}
231 
232 	list_add_tail_rcu(&nc->node, &np->channels);
233 	np->channel_num++;
234 	spin_unlock_irqrestore(&np->lock, flags);
235 
236 	return nc;
237 }
238 
ncsi_remove_channel(struct ncsi_channel * nc)239 static void ncsi_remove_channel(struct ncsi_channel *nc)
240 {
241 	struct ncsi_package *np = nc->package;
242 	unsigned long flags;
243 
244 	spin_lock_irqsave(&nc->lock, flags);
245 
246 	/* Release filters */
247 	kfree(nc->mac_filter.addrs);
248 	kfree(nc->vlan_filter.vids);
249 
250 	nc->state = NCSI_CHANNEL_INACTIVE;
251 	spin_unlock_irqrestore(&nc->lock, flags);
252 	ncsi_stop_channel_monitor(nc);
253 
254 	/* Remove and free channel */
255 	spin_lock_irqsave(&np->lock, flags);
256 	list_del_rcu(&nc->node);
257 	np->channel_num--;
258 	spin_unlock_irqrestore(&np->lock, flags);
259 
260 	kfree(nc);
261 }
262 
ncsi_find_package(struct ncsi_dev_priv * ndp,unsigned char id)263 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
264 				       unsigned char id)
265 {
266 	struct ncsi_package *np;
267 
268 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
269 		if (np->id == id)
270 			return np;
271 	}
272 
273 	return NULL;
274 }
275 
ncsi_add_package(struct ncsi_dev_priv * ndp,unsigned char id)276 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
277 				      unsigned char id)
278 {
279 	struct ncsi_package *np, *tmp;
280 	unsigned long flags;
281 
282 	np = kzalloc(sizeof(*np), GFP_ATOMIC);
283 	if (!np)
284 		return NULL;
285 
286 	np->id = id;
287 	np->ndp = ndp;
288 	spin_lock_init(&np->lock);
289 	INIT_LIST_HEAD(&np->channels);
290 	np->channel_whitelist = UINT_MAX;
291 
292 	spin_lock_irqsave(&ndp->lock, flags);
293 	tmp = ncsi_find_package(ndp, id);
294 	if (tmp) {
295 		spin_unlock_irqrestore(&ndp->lock, flags);
296 		kfree(np);
297 		return tmp;
298 	}
299 
300 	list_add_tail_rcu(&np->node, &ndp->packages);
301 	ndp->package_num++;
302 	spin_unlock_irqrestore(&ndp->lock, flags);
303 
304 	return np;
305 }
306 
ncsi_remove_package(struct ncsi_package * np)307 void ncsi_remove_package(struct ncsi_package *np)
308 {
309 	struct ncsi_dev_priv *ndp = np->ndp;
310 	struct ncsi_channel *nc, *tmp;
311 	unsigned long flags;
312 
313 	/* Release all child channels */
314 	list_for_each_entry_safe(nc, tmp, &np->channels, node)
315 		ncsi_remove_channel(nc);
316 
317 	/* Remove and free package */
318 	spin_lock_irqsave(&ndp->lock, flags);
319 	list_del_rcu(&np->node);
320 	ndp->package_num--;
321 	spin_unlock_irqrestore(&ndp->lock, flags);
322 
323 	kfree(np);
324 }
325 
ncsi_find_package_and_channel(struct ncsi_dev_priv * ndp,unsigned char id,struct ncsi_package ** np,struct ncsi_channel ** nc)326 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
327 				   unsigned char id,
328 				   struct ncsi_package **np,
329 				   struct ncsi_channel **nc)
330 {
331 	struct ncsi_package *p;
332 	struct ncsi_channel *c;
333 
334 	p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
335 	c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
336 
337 	if (np)
338 		*np = p;
339 	if (nc)
340 		*nc = c;
341 }
342 
343 /* For two consecutive NCSI commands, the packet IDs shouldn't
344  * be same. Otherwise, the bogus response might be replied. So
345  * the available IDs are allocated in round-robin fashion.
346  */
ncsi_alloc_request(struct ncsi_dev_priv * ndp,unsigned int req_flags)347 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
348 					unsigned int req_flags)
349 {
350 	struct ncsi_request *nr = NULL;
351 	int i, limit = ARRAY_SIZE(ndp->requests);
352 	unsigned long flags;
353 
354 	/* Check if there is one available request until the ceiling */
355 	spin_lock_irqsave(&ndp->lock, flags);
356 	for (i = ndp->request_id; i < limit; i++) {
357 		if (ndp->requests[i].used)
358 			continue;
359 
360 		nr = &ndp->requests[i];
361 		nr->used = true;
362 		nr->flags = req_flags;
363 		ndp->request_id = i + 1;
364 		goto found;
365 	}
366 
367 	/* Fail back to check from the starting cursor */
368 	for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
369 		if (ndp->requests[i].used)
370 			continue;
371 
372 		nr = &ndp->requests[i];
373 		nr->used = true;
374 		nr->flags = req_flags;
375 		ndp->request_id = i + 1;
376 		goto found;
377 	}
378 
379 found:
380 	spin_unlock_irqrestore(&ndp->lock, flags);
381 	return nr;
382 }
383 
ncsi_free_request(struct ncsi_request * nr)384 void ncsi_free_request(struct ncsi_request *nr)
385 {
386 	struct ncsi_dev_priv *ndp = nr->ndp;
387 	struct sk_buff *cmd, *rsp;
388 	unsigned long flags;
389 	bool driven;
390 
391 	if (nr->enabled) {
392 		nr->enabled = false;
393 		del_timer_sync(&nr->timer);
394 	}
395 
396 	spin_lock_irqsave(&ndp->lock, flags);
397 	cmd = nr->cmd;
398 	rsp = nr->rsp;
399 	nr->cmd = NULL;
400 	nr->rsp = NULL;
401 	nr->used = false;
402 	driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
403 	spin_unlock_irqrestore(&ndp->lock, flags);
404 
405 	if (driven && cmd && --ndp->pending_req_num == 0)
406 		schedule_work(&ndp->work);
407 
408 	/* Release command and response */
409 	consume_skb(cmd);
410 	consume_skb(rsp);
411 }
412 
ncsi_find_dev(struct net_device * dev)413 struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
414 {
415 	struct ncsi_dev_priv *ndp;
416 
417 	NCSI_FOR_EACH_DEV(ndp) {
418 		if (ndp->ndev.dev == dev)
419 			return &ndp->ndev;
420 	}
421 
422 	return NULL;
423 }
424 
ncsi_request_timeout(struct timer_list * t)425 static void ncsi_request_timeout(struct timer_list *t)
426 {
427 	struct ncsi_request *nr = from_timer(nr, t, timer);
428 	struct ncsi_dev_priv *ndp = nr->ndp;
429 	struct ncsi_cmd_pkt *cmd;
430 	struct ncsi_package *np;
431 	struct ncsi_channel *nc;
432 	unsigned long flags;
433 
434 	/* If the request already had associated response,
435 	 * let the response handler to release it.
436 	 */
437 	spin_lock_irqsave(&ndp->lock, flags);
438 	nr->enabled = false;
439 	if (nr->rsp || !nr->cmd) {
440 		spin_unlock_irqrestore(&ndp->lock, flags);
441 		return;
442 	}
443 	spin_unlock_irqrestore(&ndp->lock, flags);
444 
445 	if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
446 		if (nr->cmd) {
447 			/* Find the package */
448 			cmd = (struct ncsi_cmd_pkt *)
449 			      skb_network_header(nr->cmd);
450 			ncsi_find_package_and_channel(ndp,
451 						      cmd->cmd.common.channel,
452 						      &np, &nc);
453 			ncsi_send_netlink_timeout(nr, np, nc);
454 		}
455 	}
456 
457 	/* Release the request */
458 	ncsi_free_request(nr);
459 }
460 
ncsi_suspend_channel(struct ncsi_dev_priv * ndp)461 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
462 {
463 	struct ncsi_dev *nd = &ndp->ndev;
464 	struct ncsi_package *np;
465 	struct ncsi_channel *nc, *tmp;
466 	struct ncsi_cmd_arg nca;
467 	unsigned long flags;
468 	int ret;
469 
470 	np = ndp->active_package;
471 	nc = ndp->active_channel;
472 	nca.ndp = ndp;
473 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
474 	switch (nd->state) {
475 	case ncsi_dev_state_suspend:
476 		nd->state = ncsi_dev_state_suspend_select;
477 		fallthrough;
478 	case ncsi_dev_state_suspend_select:
479 		ndp->pending_req_num = 1;
480 
481 		nca.type = NCSI_PKT_CMD_SP;
482 		nca.package = np->id;
483 		nca.channel = NCSI_RESERVED_CHANNEL;
484 		if (ndp->flags & NCSI_DEV_HWA)
485 			nca.bytes[0] = 0;
486 		else
487 			nca.bytes[0] = 1;
488 
489 		/* To retrieve the last link states of channels in current
490 		 * package when current active channel needs fail over to
491 		 * another one. It means we will possibly select another
492 		 * channel as next active one. The link states of channels
493 		 * are most important factor of the selection. So we need
494 		 * accurate link states. Unfortunately, the link states on
495 		 * inactive channels can't be updated with LSC AEN in time.
496 		 */
497 		if (ndp->flags & NCSI_DEV_RESHUFFLE)
498 			nd->state = ncsi_dev_state_suspend_gls;
499 		else
500 			nd->state = ncsi_dev_state_suspend_dcnt;
501 		ret = ncsi_xmit_cmd(&nca);
502 		if (ret)
503 			goto error;
504 
505 		break;
506 	case ncsi_dev_state_suspend_gls:
507 		ndp->pending_req_num = np->channel_num;
508 
509 		nca.type = NCSI_PKT_CMD_GLS;
510 		nca.package = np->id;
511 
512 		nd->state = ncsi_dev_state_suspend_dcnt;
513 		NCSI_FOR_EACH_CHANNEL(np, nc) {
514 			nca.channel = nc->id;
515 			ret = ncsi_xmit_cmd(&nca);
516 			if (ret)
517 				goto error;
518 		}
519 
520 		break;
521 	case ncsi_dev_state_suspend_dcnt:
522 		ndp->pending_req_num = 1;
523 
524 		nca.type = NCSI_PKT_CMD_DCNT;
525 		nca.package = np->id;
526 		nca.channel = nc->id;
527 
528 		nd->state = ncsi_dev_state_suspend_dc;
529 		ret = ncsi_xmit_cmd(&nca);
530 		if (ret)
531 			goto error;
532 
533 		break;
534 	case ncsi_dev_state_suspend_dc:
535 		ndp->pending_req_num = 1;
536 
537 		nca.type = NCSI_PKT_CMD_DC;
538 		nca.package = np->id;
539 		nca.channel = nc->id;
540 		nca.bytes[0] = 1;
541 
542 		nd->state = ncsi_dev_state_suspend_deselect;
543 		ret = ncsi_xmit_cmd(&nca);
544 		if (ret)
545 			goto error;
546 
547 		NCSI_FOR_EACH_CHANNEL(np, tmp) {
548 			/* If there is another channel active on this package
549 			 * do not deselect the package.
550 			 */
551 			if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) {
552 				nd->state = ncsi_dev_state_suspend_done;
553 				break;
554 			}
555 		}
556 		break;
557 	case ncsi_dev_state_suspend_deselect:
558 		ndp->pending_req_num = 1;
559 
560 		nca.type = NCSI_PKT_CMD_DP;
561 		nca.package = np->id;
562 		nca.channel = NCSI_RESERVED_CHANNEL;
563 
564 		nd->state = ncsi_dev_state_suspend_done;
565 		ret = ncsi_xmit_cmd(&nca);
566 		if (ret)
567 			goto error;
568 
569 		break;
570 	case ncsi_dev_state_suspend_done:
571 		spin_lock_irqsave(&nc->lock, flags);
572 		nc->state = NCSI_CHANNEL_INACTIVE;
573 		spin_unlock_irqrestore(&nc->lock, flags);
574 		if (ndp->flags & NCSI_DEV_RESET)
575 			ncsi_reset_dev(nd);
576 		else
577 			ncsi_process_next_channel(ndp);
578 		break;
579 	default:
580 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
581 			    nd->state);
582 	}
583 
584 	return;
585 error:
586 	nd->state = ncsi_dev_state_functional;
587 }
588 
589 /* Check the VLAN filter bitmap for a set filter, and construct a
590  * "Set VLAN Filter - Disable" packet if found.
591  */
clear_one_vid(struct ncsi_dev_priv * ndp,struct ncsi_channel * nc,struct ncsi_cmd_arg * nca)592 static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
593 			 struct ncsi_cmd_arg *nca)
594 {
595 	struct ncsi_channel_vlan_filter *ncf;
596 	unsigned long flags;
597 	void *bitmap;
598 	int index;
599 	u16 vid;
600 
601 	ncf = &nc->vlan_filter;
602 	bitmap = &ncf->bitmap;
603 
604 	spin_lock_irqsave(&nc->lock, flags);
605 	index = find_next_bit(bitmap, ncf->n_vids, 0);
606 	if (index >= ncf->n_vids) {
607 		spin_unlock_irqrestore(&nc->lock, flags);
608 		return -1;
609 	}
610 	vid = ncf->vids[index];
611 
612 	clear_bit(index, bitmap);
613 	ncf->vids[index] = 0;
614 	spin_unlock_irqrestore(&nc->lock, flags);
615 
616 	nca->type = NCSI_PKT_CMD_SVF;
617 	nca->words[1] = vid;
618 	/* HW filter index starts at 1 */
619 	nca->bytes[6] = index + 1;
620 	nca->bytes[7] = 0x00;
621 	return 0;
622 }
623 
624 /* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable"
625  * packet.
626  */
set_one_vid(struct ncsi_dev_priv * ndp,struct ncsi_channel * nc,struct ncsi_cmd_arg * nca)627 static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
628 		       struct ncsi_cmd_arg *nca)
629 {
630 	struct ncsi_channel_vlan_filter *ncf;
631 	struct vlan_vid *vlan = NULL;
632 	unsigned long flags;
633 	int i, index;
634 	void *bitmap;
635 	u16 vid;
636 
637 	if (list_empty(&ndp->vlan_vids))
638 		return -1;
639 
640 	ncf = &nc->vlan_filter;
641 	bitmap = &ncf->bitmap;
642 
643 	spin_lock_irqsave(&nc->lock, flags);
644 
645 	rcu_read_lock();
646 	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
647 		vid = vlan->vid;
648 		for (i = 0; i < ncf->n_vids; i++)
649 			if (ncf->vids[i] == vid) {
650 				vid = 0;
651 				break;
652 			}
653 		if (vid)
654 			break;
655 	}
656 	rcu_read_unlock();
657 
658 	if (!vid) {
659 		/* No VLAN ID is not set */
660 		spin_unlock_irqrestore(&nc->lock, flags);
661 		return -1;
662 	}
663 
664 	index = find_next_zero_bit(bitmap, ncf->n_vids, 0);
665 	if (index < 0 || index >= ncf->n_vids) {
666 		netdev_err(ndp->ndev.dev,
667 			   "Channel %u already has all VLAN filters set\n",
668 			   nc->id);
669 		spin_unlock_irqrestore(&nc->lock, flags);
670 		return -1;
671 	}
672 
673 	ncf->vids[index] = vid;
674 	set_bit(index, bitmap);
675 	spin_unlock_irqrestore(&nc->lock, flags);
676 
677 	nca->type = NCSI_PKT_CMD_SVF;
678 	nca->words[1] = vid;
679 	/* HW filter index starts at 1 */
680 	nca->bytes[6] = index + 1;
681 	nca->bytes[7] = 0x01;
682 
683 	return 0;
684 }
685 
686 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
687 
688 /* NCSI OEM Command APIs */
ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg * nca)689 static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
690 {
691 	unsigned char data[NCSI_OEM_BCM_CMD_GMA_LEN];
692 	int ret = 0;
693 
694 	nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
695 
696 	memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
697 	*(unsigned int *)data = ntohl(NCSI_OEM_MFR_BCM_ID);
698 	data[5] = NCSI_OEM_BCM_CMD_GMA;
699 
700 	nca->data = data;
701 
702 	ret = ncsi_xmit_cmd(nca);
703 	if (ret)
704 		netdev_err(nca->ndp->ndev.dev,
705 			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
706 			   nca->type);
707 	return ret;
708 }
709 
ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg * nca)710 static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
711 {
712 	union {
713 		u8 data_u8[NCSI_OEM_MLX_CMD_GMA_LEN];
714 		u32 data_u32[NCSI_OEM_MLX_CMD_GMA_LEN / sizeof(u32)];
715 	} u;
716 	int ret = 0;
717 
718 	nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
719 
720 	memset(&u, 0, sizeof(u));
721 	u.data_u32[0] = ntohl(NCSI_OEM_MFR_MLX_ID);
722 	u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
723 	u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
724 
725 	nca->data = u.data_u8;
726 
727 	ret = ncsi_xmit_cmd(nca);
728 	if (ret)
729 		netdev_err(nca->ndp->ndev.dev,
730 			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
731 			   nca->type);
732 	return ret;
733 }
734 
ncsi_oem_smaf_mlx(struct ncsi_cmd_arg * nca)735 static int ncsi_oem_smaf_mlx(struct ncsi_cmd_arg *nca)
736 {
737 	union {
738 		u8 data_u8[NCSI_OEM_MLX_CMD_SMAF_LEN];
739 		u32 data_u32[NCSI_OEM_MLX_CMD_SMAF_LEN / sizeof(u32)];
740 	} u;
741 	int ret = 0;
742 
743 	memset(&u, 0, sizeof(u));
744 	u.data_u32[0] = ntohl(NCSI_OEM_MFR_MLX_ID);
745 	u.data_u8[5] = NCSI_OEM_MLX_CMD_SMAF;
746 	u.data_u8[6] = NCSI_OEM_MLX_CMD_SMAF_PARAM;
747 	memcpy(&u.data_u8[MLX_SMAF_MAC_ADDR_OFFSET],
748 	       nca->ndp->ndev.dev->dev_addr,	ETH_ALEN);
749 	u.data_u8[MLX_SMAF_MED_SUPPORT_OFFSET] =
750 		(MLX_MC_RBT_AVL | MLX_MC_RBT_SUPPORT);
751 
752 	nca->payload = NCSI_OEM_MLX_CMD_SMAF_LEN;
753 	nca->data = u.data_u8;
754 
755 	ret = ncsi_xmit_cmd(nca);
756 	if (ret)
757 		netdev_err(nca->ndp->ndev.dev,
758 			   "NCSI: Failed to transmit cmd 0x%x during probe\n",
759 			   nca->type);
760 	return ret;
761 }
762 
763 /* OEM Command handlers initialization */
764 static struct ncsi_oem_gma_handler {
765 	unsigned int	mfr_id;
766 	int		(*handler)(struct ncsi_cmd_arg *nca);
767 } ncsi_oem_gma_handlers[] = {
768 	{ NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm },
769 	{ NCSI_OEM_MFR_MLX_ID, ncsi_oem_gma_handler_mlx }
770 };
771 
ncsi_gma_handler(struct ncsi_cmd_arg * nca,unsigned int mf_id)772 static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
773 {
774 	struct ncsi_oem_gma_handler *nch = NULL;
775 	int i;
776 
777 	/* This function should only be called once, return if flag set */
778 	if (nca->ndp->gma_flag == 1)
779 		return -1;
780 
781 	/* Find gma handler for given manufacturer id */
782 	for (i = 0; i < ARRAY_SIZE(ncsi_oem_gma_handlers); i++) {
783 		if (ncsi_oem_gma_handlers[i].mfr_id == mf_id) {
784 			if (ncsi_oem_gma_handlers[i].handler)
785 				nch = &ncsi_oem_gma_handlers[i];
786 			break;
787 			}
788 	}
789 
790 	if (!nch) {
791 		netdev_err(nca->ndp->ndev.dev,
792 			   "NCSI: No GMA handler available for MFR-ID (0x%x)\n",
793 			   mf_id);
794 		return -1;
795 	}
796 
797 	/* Get Mac address from NCSI device */
798 	return nch->handler(nca);
799 }
800 
801 #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
802 
803 /* Determine if a given channel from the channel_queue should be used for Tx */
ncsi_channel_is_tx(struct ncsi_dev_priv * ndp,struct ncsi_channel * nc)804 static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
805 			       struct ncsi_channel *nc)
806 {
807 	struct ncsi_channel_mode *ncm;
808 	struct ncsi_channel *channel;
809 	struct ncsi_package *np;
810 
811 	/* Check if any other channel has Tx enabled; a channel may have already
812 	 * been configured and removed from the channel queue.
813 	 */
814 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
815 		if (!ndp->multi_package && np != nc->package)
816 			continue;
817 		NCSI_FOR_EACH_CHANNEL(np, channel) {
818 			ncm = &channel->modes[NCSI_MODE_TX_ENABLE];
819 			if (ncm->enable)
820 				return false;
821 		}
822 	}
823 
824 	/* This channel is the preferred channel and has link */
825 	list_for_each_entry_rcu(channel, &ndp->channel_queue, link) {
826 		np = channel->package;
827 		if (np->preferred_channel &&
828 		    ncsi_channel_has_link(np->preferred_channel)) {
829 			return np->preferred_channel == nc;
830 		}
831 	}
832 
833 	/* This channel has link */
834 	if (ncsi_channel_has_link(nc))
835 		return true;
836 
837 	list_for_each_entry_rcu(channel, &ndp->channel_queue, link)
838 		if (ncsi_channel_has_link(channel))
839 			return false;
840 
841 	/* No other channel has link; default to this one */
842 	return true;
843 }
844 
845 /* Change the active Tx channel in a multi-channel setup */
ncsi_update_tx_channel(struct ncsi_dev_priv * ndp,struct ncsi_package * package,struct ncsi_channel * disable,struct ncsi_channel * enable)846 int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
847 			   struct ncsi_package *package,
848 			   struct ncsi_channel *disable,
849 			   struct ncsi_channel *enable)
850 {
851 	struct ncsi_cmd_arg nca;
852 	struct ncsi_channel *nc;
853 	struct ncsi_package *np;
854 	int ret = 0;
855 
856 	if (!package->multi_channel && !ndp->multi_package)
857 		netdev_warn(ndp->ndev.dev,
858 			    "NCSI: Trying to update Tx channel in single-channel mode\n");
859 	nca.ndp = ndp;
860 	nca.req_flags = 0;
861 
862 	/* Find current channel with Tx enabled */
863 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
864 		if (disable)
865 			break;
866 		if (!ndp->multi_package && np != package)
867 			continue;
868 
869 		NCSI_FOR_EACH_CHANNEL(np, nc)
870 			if (nc->modes[NCSI_MODE_TX_ENABLE].enable) {
871 				disable = nc;
872 				break;
873 			}
874 	}
875 
876 	/* Find a suitable channel for Tx */
877 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
878 		if (enable)
879 			break;
880 		if (!ndp->multi_package && np != package)
881 			continue;
882 		if (!(ndp->package_whitelist & (0x1 << np->id)))
883 			continue;
884 
885 		if (np->preferred_channel &&
886 		    ncsi_channel_has_link(np->preferred_channel)) {
887 			enable = np->preferred_channel;
888 			break;
889 		}
890 
891 		NCSI_FOR_EACH_CHANNEL(np, nc) {
892 			if (!(np->channel_whitelist & 0x1 << nc->id))
893 				continue;
894 			if (nc->state != NCSI_CHANNEL_ACTIVE)
895 				continue;
896 			if (ncsi_channel_has_link(nc)) {
897 				enable = nc;
898 				break;
899 			}
900 		}
901 	}
902 
903 	if (disable == enable)
904 		return -1;
905 
906 	if (!enable)
907 		return -1;
908 
909 	if (disable) {
910 		nca.channel = disable->id;
911 		nca.package = disable->package->id;
912 		nca.type = NCSI_PKT_CMD_DCNT;
913 		ret = ncsi_xmit_cmd(&nca);
914 		if (ret)
915 			netdev_err(ndp->ndev.dev,
916 				   "Error %d sending DCNT\n",
917 				   ret);
918 	}
919 
920 	netdev_info(ndp->ndev.dev, "NCSI: channel %u enables Tx\n", enable->id);
921 
922 	nca.channel = enable->id;
923 	nca.package = enable->package->id;
924 	nca.type = NCSI_PKT_CMD_ECNT;
925 	ret = ncsi_xmit_cmd(&nca);
926 	if (ret)
927 		netdev_err(ndp->ndev.dev,
928 			   "Error %d sending ECNT\n",
929 			   ret);
930 
931 	return ret;
932 }
933 
ncsi_configure_channel(struct ncsi_dev_priv * ndp)934 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
935 {
936 	struct ncsi_package *np = ndp->active_package;
937 	struct ncsi_channel *nc = ndp->active_channel;
938 	struct ncsi_channel *hot_nc = NULL;
939 	struct ncsi_dev *nd = &ndp->ndev;
940 	struct net_device *dev = nd->dev;
941 	struct ncsi_cmd_arg nca;
942 	unsigned char index;
943 	unsigned long flags;
944 	int ret;
945 
946 	nca.ndp = ndp;
947 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
948 	switch (nd->state) {
949 	case ncsi_dev_state_config:
950 	case ncsi_dev_state_config_sp:
951 		ndp->pending_req_num = 1;
952 
953 		/* Select the specific package */
954 		nca.type = NCSI_PKT_CMD_SP;
955 		if (ndp->flags & NCSI_DEV_HWA)
956 			nca.bytes[0] = 0;
957 		else
958 			nca.bytes[0] = 1;
959 		nca.package = np->id;
960 		nca.channel = NCSI_RESERVED_CHANNEL;
961 		ret = ncsi_xmit_cmd(&nca);
962 		if (ret) {
963 			netdev_err(ndp->ndev.dev,
964 				   "NCSI: Failed to transmit CMD_SP\n");
965 			goto error;
966 		}
967 
968 		nd->state = ncsi_dev_state_config_cis;
969 		break;
970 	case ncsi_dev_state_config_cis:
971 		ndp->pending_req_num = 1;
972 
973 		/* Clear initial state */
974 		nca.type = NCSI_PKT_CMD_CIS;
975 		nca.package = np->id;
976 		nca.channel = nc->id;
977 		ret = ncsi_xmit_cmd(&nca);
978 		if (ret) {
979 			netdev_err(ndp->ndev.dev,
980 				   "NCSI: Failed to transmit CMD_CIS\n");
981 			goto error;
982 		}
983 
984 		nd->state = ncsi_dev_state_config_oem_gma;
985 		break;
986 	case ncsi_dev_state_config_oem_gma:
987 		nd->state = ncsi_dev_state_config_clear_vids;
988 		ret = -1;
989 
990 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
991 		nca.type = NCSI_PKT_CMD_OEM;
992 		nca.package = np->id;
993 		nca.channel = nc->id;
994 		ndp->pending_req_num = 1;
995 		ret = ncsi_gma_handler(&nca, nc->version.mf_id);
996 #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
997 
998 		if (ret < 0)
999 			schedule_work(&ndp->work);
1000 
1001 		break;
1002 	case ncsi_dev_state_config_clear_vids:
1003 	case ncsi_dev_state_config_svf:
1004 	case ncsi_dev_state_config_ev:
1005 	case ncsi_dev_state_config_sma:
1006 	case ncsi_dev_state_config_ebf:
1007 	case ncsi_dev_state_config_dgmf:
1008 	case ncsi_dev_state_config_ecnt:
1009 	case ncsi_dev_state_config_ec:
1010 	case ncsi_dev_state_config_ae:
1011 	case ncsi_dev_state_config_gls:
1012 		ndp->pending_req_num = 1;
1013 
1014 		nca.package = np->id;
1015 		nca.channel = nc->id;
1016 
1017 		/* Clear any active filters on the channel before setting */
1018 		if (nd->state == ncsi_dev_state_config_clear_vids) {
1019 			ret = clear_one_vid(ndp, nc, &nca);
1020 			if (ret) {
1021 				nd->state = ncsi_dev_state_config_svf;
1022 				schedule_work(&ndp->work);
1023 				break;
1024 			}
1025 			/* Repeat */
1026 			nd->state = ncsi_dev_state_config_clear_vids;
1027 		/* Add known VLAN tags to the filter */
1028 		} else if (nd->state == ncsi_dev_state_config_svf) {
1029 			ret = set_one_vid(ndp, nc, &nca);
1030 			if (ret) {
1031 				nd->state = ncsi_dev_state_config_ev;
1032 				schedule_work(&ndp->work);
1033 				break;
1034 			}
1035 			/* Repeat */
1036 			nd->state = ncsi_dev_state_config_svf;
1037 		/* Enable/Disable the VLAN filter */
1038 		} else if (nd->state == ncsi_dev_state_config_ev) {
1039 			if (list_empty(&ndp->vlan_vids)) {
1040 				nca.type = NCSI_PKT_CMD_DV;
1041 			} else {
1042 				nca.type = NCSI_PKT_CMD_EV;
1043 				nca.bytes[3] = NCSI_CAP_VLAN_NO;
1044 			}
1045 			nd->state = ncsi_dev_state_config_sma;
1046 		} else if (nd->state == ncsi_dev_state_config_sma) {
1047 		/* Use first entry in unicast filter table. Note that
1048 		 * the MAC filter table starts from entry 1 instead of
1049 		 * 0.
1050 		 */
1051 			nca.type = NCSI_PKT_CMD_SMA;
1052 			for (index = 0; index < 6; index++)
1053 				nca.bytes[index] = dev->dev_addr[index];
1054 			nca.bytes[6] = 0x1;
1055 			nca.bytes[7] = 0x1;
1056 			nd->state = ncsi_dev_state_config_ebf;
1057 		} else if (nd->state == ncsi_dev_state_config_ebf) {
1058 			nca.type = NCSI_PKT_CMD_EBF;
1059 			nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
1060 			/* if multicast global filtering is supported then
1061 			 * disable it so that all multicast packet will be
1062 			 * forwarded to management controller
1063 			 */
1064 			if (nc->caps[NCSI_CAP_GENERIC].cap &
1065 			    NCSI_CAP_GENERIC_MC)
1066 				nd->state = ncsi_dev_state_config_dgmf;
1067 			else if (ncsi_channel_is_tx(ndp, nc))
1068 				nd->state = ncsi_dev_state_config_ecnt;
1069 			else
1070 				nd->state = ncsi_dev_state_config_ec;
1071 		} else if (nd->state == ncsi_dev_state_config_dgmf) {
1072 			nca.type = NCSI_PKT_CMD_DGMF;
1073 			if (ncsi_channel_is_tx(ndp, nc))
1074 				nd->state = ncsi_dev_state_config_ecnt;
1075 			else
1076 				nd->state = ncsi_dev_state_config_ec;
1077 		} else if (nd->state == ncsi_dev_state_config_ecnt) {
1078 			if (np->preferred_channel &&
1079 			    nc != np->preferred_channel)
1080 				netdev_info(ndp->ndev.dev,
1081 					    "NCSI: Tx failed over to channel %u\n",
1082 					    nc->id);
1083 			nca.type = NCSI_PKT_CMD_ECNT;
1084 			nd->state = ncsi_dev_state_config_ec;
1085 		} else if (nd->state == ncsi_dev_state_config_ec) {
1086 			/* Enable AEN if it's supported */
1087 			nca.type = NCSI_PKT_CMD_EC;
1088 			nd->state = ncsi_dev_state_config_ae;
1089 			if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
1090 				nd->state = ncsi_dev_state_config_gls;
1091 		} else if (nd->state == ncsi_dev_state_config_ae) {
1092 			nca.type = NCSI_PKT_CMD_AE;
1093 			nca.bytes[0] = 0;
1094 			nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
1095 			nd->state = ncsi_dev_state_config_gls;
1096 		} else if (nd->state == ncsi_dev_state_config_gls) {
1097 			nca.type = NCSI_PKT_CMD_GLS;
1098 			nd->state = ncsi_dev_state_config_done;
1099 		}
1100 
1101 		ret = ncsi_xmit_cmd(&nca);
1102 		if (ret) {
1103 			netdev_err(ndp->ndev.dev,
1104 				   "NCSI: Failed to transmit CMD %x\n",
1105 				   nca.type);
1106 			goto error;
1107 		}
1108 		break;
1109 	case ncsi_dev_state_config_done:
1110 		netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
1111 			   nc->id);
1112 		spin_lock_irqsave(&nc->lock, flags);
1113 		nc->state = NCSI_CHANNEL_ACTIVE;
1114 
1115 		if (ndp->flags & NCSI_DEV_RESET) {
1116 			/* A reset event happened during config, start it now */
1117 			nc->reconfigure_needed = false;
1118 			spin_unlock_irqrestore(&nc->lock, flags);
1119 			ncsi_reset_dev(nd);
1120 			break;
1121 		}
1122 
1123 		if (nc->reconfigure_needed) {
1124 			/* This channel's configuration has been updated
1125 			 * part-way during the config state - start the
1126 			 * channel configuration over
1127 			 */
1128 			nc->reconfigure_needed = false;
1129 			nc->state = NCSI_CHANNEL_INACTIVE;
1130 			spin_unlock_irqrestore(&nc->lock, flags);
1131 
1132 			spin_lock_irqsave(&ndp->lock, flags);
1133 			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1134 			spin_unlock_irqrestore(&ndp->lock, flags);
1135 
1136 			netdev_dbg(dev, "Dirty NCSI channel state reset\n");
1137 			ncsi_process_next_channel(ndp);
1138 			break;
1139 		}
1140 
1141 		if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
1142 			hot_nc = nc;
1143 		} else {
1144 			hot_nc = NULL;
1145 			netdev_dbg(ndp->ndev.dev,
1146 				   "NCSI: channel %u link down after config\n",
1147 				   nc->id);
1148 		}
1149 		spin_unlock_irqrestore(&nc->lock, flags);
1150 
1151 		/* Update the hot channel */
1152 		spin_lock_irqsave(&ndp->lock, flags);
1153 		ndp->hot_channel = hot_nc;
1154 		spin_unlock_irqrestore(&ndp->lock, flags);
1155 
1156 		ncsi_start_channel_monitor(nc);
1157 		ncsi_process_next_channel(ndp);
1158 		break;
1159 	default:
1160 		netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
1161 			     nd->state);
1162 	}
1163 
1164 	return;
1165 
1166 error:
1167 	ncsi_report_link(ndp, true);
1168 }
1169 
ncsi_choose_active_channel(struct ncsi_dev_priv * ndp)1170 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
1171 {
1172 	struct ncsi_channel *nc, *found, *hot_nc;
1173 	struct ncsi_channel_mode *ncm;
1174 	unsigned long flags, cflags;
1175 	struct ncsi_package *np;
1176 	bool with_link;
1177 
1178 	spin_lock_irqsave(&ndp->lock, flags);
1179 	hot_nc = ndp->hot_channel;
1180 	spin_unlock_irqrestore(&ndp->lock, flags);
1181 
1182 	/* By default the search is done once an inactive channel with up
1183 	 * link is found, unless a preferred channel is set.
1184 	 * If multi_package or multi_channel are configured all channels in the
1185 	 * whitelist are added to the channel queue.
1186 	 */
1187 	found = NULL;
1188 	with_link = false;
1189 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1190 		if (!(ndp->package_whitelist & (0x1 << np->id)))
1191 			continue;
1192 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1193 			if (!(np->channel_whitelist & (0x1 << nc->id)))
1194 				continue;
1195 
1196 			spin_lock_irqsave(&nc->lock, cflags);
1197 
1198 			if (!list_empty(&nc->link) ||
1199 			    nc->state != NCSI_CHANNEL_INACTIVE) {
1200 				spin_unlock_irqrestore(&nc->lock, cflags);
1201 				continue;
1202 			}
1203 
1204 			if (!found)
1205 				found = nc;
1206 
1207 			if (nc == hot_nc)
1208 				found = nc;
1209 
1210 			ncm = &nc->modes[NCSI_MODE_LINK];
1211 			if (ncm->data[2] & 0x1) {
1212 				found = nc;
1213 				with_link = true;
1214 			}
1215 
1216 			/* If multi_channel is enabled configure all valid
1217 			 * channels whether or not they currently have link
1218 			 * so they will have AENs enabled.
1219 			 */
1220 			if (with_link || np->multi_channel) {
1221 				spin_lock_irqsave(&ndp->lock, flags);
1222 				list_add_tail_rcu(&nc->link,
1223 						  &ndp->channel_queue);
1224 				spin_unlock_irqrestore(&ndp->lock, flags);
1225 
1226 				netdev_dbg(ndp->ndev.dev,
1227 					   "NCSI: Channel %u added to queue (link %s)\n",
1228 					   nc->id,
1229 					   ncm->data[2] & 0x1 ? "up" : "down");
1230 			}
1231 
1232 			spin_unlock_irqrestore(&nc->lock, cflags);
1233 
1234 			if (with_link && !np->multi_channel)
1235 				break;
1236 		}
1237 		if (with_link && !ndp->multi_package)
1238 			break;
1239 	}
1240 
1241 	if (list_empty(&ndp->channel_queue) && found) {
1242 		netdev_info(ndp->ndev.dev,
1243 			    "NCSI: No channel with link found, configuring channel %u\n",
1244 			    found->id);
1245 		spin_lock_irqsave(&ndp->lock, flags);
1246 		list_add_tail_rcu(&found->link, &ndp->channel_queue);
1247 		spin_unlock_irqrestore(&ndp->lock, flags);
1248 	} else if (!found) {
1249 		netdev_warn(ndp->ndev.dev,
1250 			    "NCSI: No channel found to configure!\n");
1251 		ncsi_report_link(ndp, true);
1252 		return -ENODEV;
1253 	}
1254 
1255 	return ncsi_process_next_channel(ndp);
1256 }
1257 
ncsi_check_hwa(struct ncsi_dev_priv * ndp)1258 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1259 {
1260 	struct ncsi_package *np;
1261 	struct ncsi_channel *nc;
1262 	unsigned int cap;
1263 	bool has_channel = false;
1264 
1265 	/* The hardware arbitration is disabled if any one channel
1266 	 * doesn't support explicitly.
1267 	 */
1268 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1269 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1270 			has_channel = true;
1271 
1272 			cap = nc->caps[NCSI_CAP_GENERIC].cap;
1273 			if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1274 			    (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
1275 			    NCSI_CAP_GENERIC_HWA_SUPPORT) {
1276 				ndp->flags &= ~NCSI_DEV_HWA;
1277 				return false;
1278 			}
1279 		}
1280 	}
1281 
1282 	if (has_channel) {
1283 		ndp->flags |= NCSI_DEV_HWA;
1284 		return true;
1285 	}
1286 
1287 	ndp->flags &= ~NCSI_DEV_HWA;
1288 	return false;
1289 }
1290 
ncsi_probe_channel(struct ncsi_dev_priv * ndp)1291 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
1292 {
1293 	struct ncsi_dev *nd = &ndp->ndev;
1294 	struct ncsi_package *np;
1295 	struct ncsi_channel *nc;
1296 	struct ncsi_cmd_arg nca;
1297 	unsigned char index;
1298 	int ret;
1299 
1300 	nca.ndp = ndp;
1301 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1302 	switch (nd->state) {
1303 	case ncsi_dev_state_probe:
1304 		nd->state = ncsi_dev_state_probe_deselect;
1305 		fallthrough;
1306 	case ncsi_dev_state_probe_deselect:
1307 		ndp->pending_req_num = 8;
1308 
1309 		/* Deselect all possible packages */
1310 		nca.type = NCSI_PKT_CMD_DP;
1311 		nca.channel = NCSI_RESERVED_CHANNEL;
1312 		for (index = 0; index < 8; index++) {
1313 			nca.package = index;
1314 			ret = ncsi_xmit_cmd(&nca);
1315 			if (ret)
1316 				goto error;
1317 		}
1318 
1319 		nd->state = ncsi_dev_state_probe_package;
1320 		break;
1321 	case ncsi_dev_state_probe_package:
1322 		ndp->pending_req_num = 1;
1323 
1324 		nca.type = NCSI_PKT_CMD_SP;
1325 		nca.bytes[0] = 1;
1326 		nca.package = ndp->package_probe_id;
1327 		nca.channel = NCSI_RESERVED_CHANNEL;
1328 		ret = ncsi_xmit_cmd(&nca);
1329 		if (ret)
1330 			goto error;
1331 		nd->state = ncsi_dev_state_probe_channel;
1332 		break;
1333 	case ncsi_dev_state_probe_channel:
1334 		ndp->active_package = ncsi_find_package(ndp,
1335 							ndp->package_probe_id);
1336 		if (!ndp->active_package) {
1337 			/* No response */
1338 			nd->state = ncsi_dev_state_probe_dp;
1339 			schedule_work(&ndp->work);
1340 			break;
1341 		}
1342 		nd->state = ncsi_dev_state_probe_cis;
1343 		if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC) &&
1344 		    ndp->mlx_multi_host)
1345 			nd->state = ncsi_dev_state_probe_mlx_gma;
1346 
1347 		schedule_work(&ndp->work);
1348 		break;
1349 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
1350 	case ncsi_dev_state_probe_mlx_gma:
1351 		ndp->pending_req_num = 1;
1352 
1353 		nca.type = NCSI_PKT_CMD_OEM;
1354 		nca.package = ndp->active_package->id;
1355 		nca.channel = 0;
1356 		ret = ncsi_oem_gma_handler_mlx(&nca);
1357 		if (ret)
1358 			goto error;
1359 
1360 		nd->state = ncsi_dev_state_probe_mlx_smaf;
1361 		break;
1362 	case ncsi_dev_state_probe_mlx_smaf:
1363 		ndp->pending_req_num = 1;
1364 
1365 		nca.type = NCSI_PKT_CMD_OEM;
1366 		nca.package = ndp->active_package->id;
1367 		nca.channel = 0;
1368 		ret = ncsi_oem_smaf_mlx(&nca);
1369 		if (ret)
1370 			goto error;
1371 
1372 		nd->state = ncsi_dev_state_probe_cis;
1373 		break;
1374 #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
1375 	case ncsi_dev_state_probe_cis:
1376 		ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
1377 
1378 		/* Clear initial state */
1379 		nca.type = NCSI_PKT_CMD_CIS;
1380 		nca.package = ndp->active_package->id;
1381 		for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
1382 			nca.channel = index;
1383 			ret = ncsi_xmit_cmd(&nca);
1384 			if (ret)
1385 				goto error;
1386 		}
1387 
1388 		nd->state = ncsi_dev_state_probe_gvi;
1389 		break;
1390 	case ncsi_dev_state_probe_gvi:
1391 	case ncsi_dev_state_probe_gc:
1392 	case ncsi_dev_state_probe_gls:
1393 		np = ndp->active_package;
1394 		ndp->pending_req_num = np->channel_num;
1395 
1396 		/* Retrieve version, capability or link status */
1397 		if (nd->state == ncsi_dev_state_probe_gvi)
1398 			nca.type = NCSI_PKT_CMD_GVI;
1399 		else if (nd->state == ncsi_dev_state_probe_gc)
1400 			nca.type = NCSI_PKT_CMD_GC;
1401 		else
1402 			nca.type = NCSI_PKT_CMD_GLS;
1403 
1404 		nca.package = np->id;
1405 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1406 			nca.channel = nc->id;
1407 			ret = ncsi_xmit_cmd(&nca);
1408 			if (ret)
1409 				goto error;
1410 		}
1411 
1412 		if (nd->state == ncsi_dev_state_probe_gvi)
1413 			nd->state = ncsi_dev_state_probe_gc;
1414 		else if (nd->state == ncsi_dev_state_probe_gc)
1415 			nd->state = ncsi_dev_state_probe_gls;
1416 		else
1417 			nd->state = ncsi_dev_state_probe_dp;
1418 		break;
1419 	case ncsi_dev_state_probe_dp:
1420 		ndp->pending_req_num = 1;
1421 
1422 		/* Deselect the current package */
1423 		nca.type = NCSI_PKT_CMD_DP;
1424 		nca.package = ndp->package_probe_id;
1425 		nca.channel = NCSI_RESERVED_CHANNEL;
1426 		ret = ncsi_xmit_cmd(&nca);
1427 		if (ret)
1428 			goto error;
1429 
1430 		/* Probe next package */
1431 		ndp->package_probe_id++;
1432 		if (ndp->package_probe_id >= 8) {
1433 			/* Probe finished */
1434 			ndp->flags |= NCSI_DEV_PROBED;
1435 			break;
1436 		}
1437 		nd->state = ncsi_dev_state_probe_package;
1438 		ndp->active_package = NULL;
1439 		break;
1440 	default:
1441 		netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1442 			    nd->state);
1443 	}
1444 
1445 	if (ndp->flags & NCSI_DEV_PROBED) {
1446 		/* Check if all packages have HWA support */
1447 		ncsi_check_hwa(ndp);
1448 		ncsi_choose_active_channel(ndp);
1449 	}
1450 
1451 	return;
1452 error:
1453 	netdev_err(ndp->ndev.dev,
1454 		   "NCSI: Failed to transmit cmd 0x%x during probe\n",
1455 		   nca.type);
1456 	ncsi_report_link(ndp, true);
1457 }
1458 
ncsi_dev_work(struct work_struct * work)1459 static void ncsi_dev_work(struct work_struct *work)
1460 {
1461 	struct ncsi_dev_priv *ndp = container_of(work,
1462 			struct ncsi_dev_priv, work);
1463 	struct ncsi_dev *nd = &ndp->ndev;
1464 
1465 	switch (nd->state & ncsi_dev_state_major) {
1466 	case ncsi_dev_state_probe:
1467 		ncsi_probe_channel(ndp);
1468 		break;
1469 	case ncsi_dev_state_suspend:
1470 		ncsi_suspend_channel(ndp);
1471 		break;
1472 	case ncsi_dev_state_config:
1473 		ncsi_configure_channel(ndp);
1474 		break;
1475 	default:
1476 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1477 			    nd->state);
1478 	}
1479 }
1480 
ncsi_process_next_channel(struct ncsi_dev_priv * ndp)1481 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1482 {
1483 	struct ncsi_channel *nc;
1484 	int old_state;
1485 	unsigned long flags;
1486 
1487 	spin_lock_irqsave(&ndp->lock, flags);
1488 	nc = list_first_or_null_rcu(&ndp->channel_queue,
1489 				    struct ncsi_channel, link);
1490 	if (!nc) {
1491 		spin_unlock_irqrestore(&ndp->lock, flags);
1492 		goto out;
1493 	}
1494 
1495 	list_del_init(&nc->link);
1496 	spin_unlock_irqrestore(&ndp->lock, flags);
1497 
1498 	spin_lock_irqsave(&nc->lock, flags);
1499 	old_state = nc->state;
1500 	nc->state = NCSI_CHANNEL_INVISIBLE;
1501 	spin_unlock_irqrestore(&nc->lock, flags);
1502 
1503 	ndp->active_channel = nc;
1504 	ndp->active_package = nc->package;
1505 
1506 	switch (old_state) {
1507 	case NCSI_CHANNEL_INACTIVE:
1508 		ndp->ndev.state = ncsi_dev_state_config;
1509 		netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
1510 	                   nc->id);
1511 		ncsi_configure_channel(ndp);
1512 		break;
1513 	case NCSI_CHANNEL_ACTIVE:
1514 		ndp->ndev.state = ncsi_dev_state_suspend;
1515 		netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
1516 			   nc->id);
1517 		ncsi_suspend_channel(ndp);
1518 		break;
1519 	default:
1520 		netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1521 			   old_state, nc->package->id, nc->id);
1522 		ncsi_report_link(ndp, false);
1523 		return -EINVAL;
1524 	}
1525 
1526 	return 0;
1527 
1528 out:
1529 	ndp->active_channel = NULL;
1530 	ndp->active_package = NULL;
1531 	if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1532 		ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1533 		return ncsi_choose_active_channel(ndp);
1534 	}
1535 
1536 	ncsi_report_link(ndp, false);
1537 	return -ENODEV;
1538 }
1539 
ncsi_kick_channels(struct ncsi_dev_priv * ndp)1540 static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1541 {
1542 	struct ncsi_dev *nd = &ndp->ndev;
1543 	struct ncsi_channel *nc;
1544 	struct ncsi_package *np;
1545 	unsigned long flags;
1546 	unsigned int n = 0;
1547 
1548 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1549 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1550 			spin_lock_irqsave(&nc->lock, flags);
1551 
1552 			/* Channels may be busy, mark dirty instead of
1553 			 * kicking if;
1554 			 * a) not ACTIVE (configured)
1555 			 * b) in the channel_queue (to be configured)
1556 			 * c) it's ndev is in the config state
1557 			 */
1558 			if (nc->state != NCSI_CHANNEL_ACTIVE) {
1559 				if ((ndp->ndev.state & 0xff00) ==
1560 						ncsi_dev_state_config ||
1561 						!list_empty(&nc->link)) {
1562 					netdev_dbg(nd->dev,
1563 						   "NCSI: channel %p marked dirty\n",
1564 						   nc);
1565 					nc->reconfigure_needed = true;
1566 				}
1567 				spin_unlock_irqrestore(&nc->lock, flags);
1568 				continue;
1569 			}
1570 
1571 			spin_unlock_irqrestore(&nc->lock, flags);
1572 
1573 			ncsi_stop_channel_monitor(nc);
1574 			spin_lock_irqsave(&nc->lock, flags);
1575 			nc->state = NCSI_CHANNEL_INACTIVE;
1576 			spin_unlock_irqrestore(&nc->lock, flags);
1577 
1578 			spin_lock_irqsave(&ndp->lock, flags);
1579 			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1580 			spin_unlock_irqrestore(&ndp->lock, flags);
1581 
1582 			netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
1583 			n++;
1584 		}
1585 	}
1586 
1587 	return n;
1588 }
1589 
ncsi_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)1590 int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1591 {
1592 	struct ncsi_dev_priv *ndp;
1593 	unsigned int n_vids = 0;
1594 	struct vlan_vid *vlan;
1595 	struct ncsi_dev *nd;
1596 	bool found = false;
1597 
1598 	if (vid == 0)
1599 		return 0;
1600 
1601 	nd = ncsi_find_dev(dev);
1602 	if (!nd) {
1603 		netdev_warn(dev, "NCSI: No net_device?\n");
1604 		return 0;
1605 	}
1606 
1607 	ndp = TO_NCSI_DEV_PRIV(nd);
1608 
1609 	/* Add the VLAN id to our internal list */
1610 	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1611 		n_vids++;
1612 		if (vlan->vid == vid) {
1613 			netdev_dbg(dev, "NCSI: vid %u already registered\n",
1614 				   vid);
1615 			return 0;
1616 		}
1617 	}
1618 	if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1619 		netdev_warn(dev,
1620 			    "tried to add vlan id %u but NCSI max already registered (%u)\n",
1621 			    vid, NCSI_MAX_VLAN_VIDS);
1622 		return -ENOSPC;
1623 	}
1624 
1625 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1626 	if (!vlan)
1627 		return -ENOMEM;
1628 
1629 	vlan->proto = proto;
1630 	vlan->vid = vid;
1631 	list_add_rcu(&vlan->list, &ndp->vlan_vids);
1632 
1633 	netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
1634 
1635 	found = ncsi_kick_channels(ndp) != 0;
1636 
1637 	return found ? ncsi_process_next_channel(ndp) : 0;
1638 }
1639 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1640 
ncsi_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)1641 int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1642 {
1643 	struct vlan_vid *vlan, *tmp;
1644 	struct ncsi_dev_priv *ndp;
1645 	struct ncsi_dev *nd;
1646 	bool found = false;
1647 
1648 	if (vid == 0)
1649 		return 0;
1650 
1651 	nd = ncsi_find_dev(dev);
1652 	if (!nd) {
1653 		netdev_warn(dev, "NCSI: no net_device?\n");
1654 		return 0;
1655 	}
1656 
1657 	ndp = TO_NCSI_DEV_PRIV(nd);
1658 
1659 	/* Remove the VLAN id from our internal list */
1660 	list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1661 		if (vlan->vid == vid) {
1662 			netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
1663 			list_del_rcu(&vlan->list);
1664 			found = true;
1665 			kfree(vlan);
1666 		}
1667 
1668 	if (!found) {
1669 		netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
1670 		return -EINVAL;
1671 	}
1672 
1673 	found = ncsi_kick_channels(ndp) != 0;
1674 
1675 	return found ? ncsi_process_next_channel(ndp) : 0;
1676 }
1677 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1678 
ncsi_register_dev(struct net_device * dev,void (* handler)(struct ncsi_dev * ndev))1679 struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1680 				   void (*handler)(struct ncsi_dev *ndev))
1681 {
1682 	struct ncsi_dev_priv *ndp;
1683 	struct ncsi_dev *nd;
1684 	struct platform_device *pdev;
1685 	struct device_node *np;
1686 	unsigned long flags;
1687 	int i;
1688 
1689 	/* Check if the device has been registered or not */
1690 	nd = ncsi_find_dev(dev);
1691 	if (nd)
1692 		return nd;
1693 
1694 	/* Create NCSI device */
1695 	ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1696 	if (!ndp)
1697 		return NULL;
1698 
1699 	nd = &ndp->ndev;
1700 	nd->state = ncsi_dev_state_registered;
1701 	nd->dev = dev;
1702 	nd->handler = handler;
1703 	ndp->pending_req_num = 0;
1704 	INIT_LIST_HEAD(&ndp->channel_queue);
1705 	INIT_LIST_HEAD(&ndp->vlan_vids);
1706 	INIT_WORK(&ndp->work, ncsi_dev_work);
1707 	ndp->package_whitelist = UINT_MAX;
1708 
1709 	/* Initialize private NCSI device */
1710 	spin_lock_init(&ndp->lock);
1711 	INIT_LIST_HEAD(&ndp->packages);
1712 	ndp->request_id = NCSI_REQ_START_IDX;
1713 	for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1714 		ndp->requests[i].id = i;
1715 		ndp->requests[i].ndp = ndp;
1716 		timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
1717 	}
1718 
1719 	spin_lock_irqsave(&ncsi_dev_lock, flags);
1720 	list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1721 	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1722 
1723 	/* Register NCSI packet Rx handler */
1724 	ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1725 	ndp->ptype.func = ncsi_rcv_rsp;
1726 	ndp->ptype.dev = dev;
1727 	dev_add_pack(&ndp->ptype);
1728 
1729 	pdev = to_platform_device(dev->dev.parent);
1730 	if (pdev) {
1731 		np = pdev->dev.of_node;
1732 		if (np && of_get_property(np, "mlx,multi-host", NULL))
1733 			ndp->mlx_multi_host = true;
1734 	}
1735 
1736 	return nd;
1737 }
1738 EXPORT_SYMBOL_GPL(ncsi_register_dev);
1739 
ncsi_start_dev(struct ncsi_dev * nd)1740 int ncsi_start_dev(struct ncsi_dev *nd)
1741 {
1742 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1743 
1744 	if (nd->state != ncsi_dev_state_registered &&
1745 	    nd->state != ncsi_dev_state_functional)
1746 		return -ENOTTY;
1747 
1748 	if (!(ndp->flags & NCSI_DEV_PROBED)) {
1749 		ndp->package_probe_id = 0;
1750 		nd->state = ncsi_dev_state_probe;
1751 		schedule_work(&ndp->work);
1752 		return 0;
1753 	}
1754 
1755 	return ncsi_reset_dev(nd);
1756 }
1757 EXPORT_SYMBOL_GPL(ncsi_start_dev);
1758 
ncsi_stop_dev(struct ncsi_dev * nd)1759 void ncsi_stop_dev(struct ncsi_dev *nd)
1760 {
1761 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1762 	struct ncsi_package *np;
1763 	struct ncsi_channel *nc;
1764 	bool chained;
1765 	int old_state;
1766 	unsigned long flags;
1767 
1768 	/* Stop the channel monitor on any active channels. Don't reset the
1769 	 * channel state so we know which were active when ncsi_start_dev()
1770 	 * is next called.
1771 	 */
1772 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1773 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1774 			ncsi_stop_channel_monitor(nc);
1775 
1776 			spin_lock_irqsave(&nc->lock, flags);
1777 			chained = !list_empty(&nc->link);
1778 			old_state = nc->state;
1779 			spin_unlock_irqrestore(&nc->lock, flags);
1780 
1781 			WARN_ON_ONCE(chained ||
1782 				     old_state == NCSI_CHANNEL_INVISIBLE);
1783 		}
1784 	}
1785 
1786 	netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
1787 	ncsi_report_link(ndp, true);
1788 }
1789 EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1790 
ncsi_reset_dev(struct ncsi_dev * nd)1791 int ncsi_reset_dev(struct ncsi_dev *nd)
1792 {
1793 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1794 	struct ncsi_channel *nc, *active, *tmp;
1795 	struct ncsi_package *np;
1796 	unsigned long flags;
1797 
1798 	spin_lock_irqsave(&ndp->lock, flags);
1799 
1800 	if (!(ndp->flags & NCSI_DEV_RESET)) {
1801 		/* Haven't been called yet, check states */
1802 		switch (nd->state & ncsi_dev_state_major) {
1803 		case ncsi_dev_state_registered:
1804 		case ncsi_dev_state_probe:
1805 			/* Not even probed yet - do nothing */
1806 			spin_unlock_irqrestore(&ndp->lock, flags);
1807 			return 0;
1808 		case ncsi_dev_state_suspend:
1809 		case ncsi_dev_state_config:
1810 			/* Wait for the channel to finish its suspend/config
1811 			 * operation; once it finishes it will check for
1812 			 * NCSI_DEV_RESET and reset the state.
1813 			 */
1814 			ndp->flags |= NCSI_DEV_RESET;
1815 			spin_unlock_irqrestore(&ndp->lock, flags);
1816 			return 0;
1817 		}
1818 	} else {
1819 		switch (nd->state) {
1820 		case ncsi_dev_state_suspend_done:
1821 		case ncsi_dev_state_config_done:
1822 		case ncsi_dev_state_functional:
1823 			/* Ok */
1824 			break;
1825 		default:
1826 			/* Current reset operation happening */
1827 			spin_unlock_irqrestore(&ndp->lock, flags);
1828 			return 0;
1829 		}
1830 	}
1831 
1832 	if (!list_empty(&ndp->channel_queue)) {
1833 		/* Clear any channel queue we may have interrupted */
1834 		list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link)
1835 			list_del_init(&nc->link);
1836 	}
1837 	spin_unlock_irqrestore(&ndp->lock, flags);
1838 
1839 	active = NULL;
1840 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1841 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1842 			spin_lock_irqsave(&nc->lock, flags);
1843 
1844 			if (nc->state == NCSI_CHANNEL_ACTIVE) {
1845 				active = nc;
1846 				nc->state = NCSI_CHANNEL_INVISIBLE;
1847 				spin_unlock_irqrestore(&nc->lock, flags);
1848 				ncsi_stop_channel_monitor(nc);
1849 				break;
1850 			}
1851 
1852 			spin_unlock_irqrestore(&nc->lock, flags);
1853 		}
1854 		if (active)
1855 			break;
1856 	}
1857 
1858 	if (!active) {
1859 		/* Done */
1860 		spin_lock_irqsave(&ndp->lock, flags);
1861 		ndp->flags &= ~NCSI_DEV_RESET;
1862 		spin_unlock_irqrestore(&ndp->lock, flags);
1863 		return ncsi_choose_active_channel(ndp);
1864 	}
1865 
1866 	spin_lock_irqsave(&ndp->lock, flags);
1867 	ndp->flags |= NCSI_DEV_RESET;
1868 	ndp->active_channel = active;
1869 	ndp->active_package = active->package;
1870 	spin_unlock_irqrestore(&ndp->lock, flags);
1871 
1872 	nd->state = ncsi_dev_state_suspend;
1873 	schedule_work(&ndp->work);
1874 	return 0;
1875 }
1876 
ncsi_unregister_dev(struct ncsi_dev * nd)1877 void ncsi_unregister_dev(struct ncsi_dev *nd)
1878 {
1879 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1880 	struct ncsi_package *np, *tmp;
1881 	unsigned long flags;
1882 
1883 	dev_remove_pack(&ndp->ptype);
1884 
1885 	list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1886 		ncsi_remove_package(np);
1887 
1888 	spin_lock_irqsave(&ncsi_dev_lock, flags);
1889 	list_del_rcu(&ndp->node);
1890 	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1891 
1892 	kfree(ndp);
1893 }
1894 EXPORT_SYMBOL_GPL(ncsi_unregister_dev);
1895