1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file is part of wlcore
4  *
5  * Copyright (C) 2008-2010 Nokia Corporation
6  * Copyright (C) 2011-2013 Texas Instruments Inc.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/firmware.h>
11 #include <linux/etherdevice.h>
12 #include <linux/vmalloc.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_wakeirq.h>
17 
18 #include "wlcore.h"
19 #include "debug.h"
20 #include "wl12xx_80211.h"
21 #include "io.h"
22 #include "tx.h"
23 #include "ps.h"
24 #include "init.h"
25 #include "debugfs.h"
26 #include "testmode.h"
27 #include "vendor_cmd.h"
28 #include "scan.h"
29 #include "hw_ops.h"
30 #include "sysfs.h"
31 
32 #define WL1271_BOOT_RETRIES 3
33 #define WL1271_WAKEUP_TIMEOUT 500
34 
35 static char *fwlog_param;
36 static int fwlog_mem_blocks = -1;
37 static int bug_on_recovery = -1;
38 static int no_recovery     = -1;
39 
40 static void __wl1271_op_remove_interface(struct wl1271 *wl,
41 					 struct ieee80211_vif *vif,
42 					 bool reset_tx_queues);
43 static void wlcore_op_stop_locked(struct wl1271 *wl);
44 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
45 
wl12xx_set_authorized(struct wl1271 * wl,struct wl12xx_vif * wlvif)46 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
47 {
48 	int ret;
49 
50 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
51 		return -EINVAL;
52 
53 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
54 		return 0;
55 
56 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
57 		return 0;
58 
59 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
60 	if (ret < 0)
61 		return ret;
62 
63 	wl1271_info("Association completed.");
64 	return 0;
65 }
66 
wl1271_reg_notify(struct wiphy * wiphy,struct regulatory_request * request)67 static void wl1271_reg_notify(struct wiphy *wiphy,
68 			      struct regulatory_request *request)
69 {
70 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
71 	struct wl1271 *wl = hw->priv;
72 
73 	/* copy the current dfs region */
74 	if (request)
75 		wl->dfs_region = request->dfs_region;
76 
77 	wlcore_regdomain_config(wl);
78 }
79 
wl1271_set_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool enable)80 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
81 				   bool enable)
82 {
83 	int ret = 0;
84 
85 	/* we should hold wl->mutex */
86 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
87 	if (ret < 0)
88 		goto out;
89 
90 	if (enable)
91 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
92 	else
93 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
94 out:
95 	return ret;
96 }
97 
98 /*
99  * this function is being called when the rx_streaming interval
100  * has beed changed or rx_streaming should be disabled
101  */
wl1271_recalc_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif)102 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
103 {
104 	int ret = 0;
105 	int period = wl->conf.rx_streaming.interval;
106 
107 	/* don't reconfigure if rx_streaming is disabled */
108 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
109 		goto out;
110 
111 	/* reconfigure/disable according to new streaming_period */
112 	if (period &&
113 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
114 	    (wl->conf.rx_streaming.always ||
115 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
116 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
117 	else {
118 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
119 		/* don't cancel_work_sync since we might deadlock */
120 		del_timer_sync(&wlvif->rx_streaming_timer);
121 	}
122 out:
123 	return ret;
124 }
125 
wl1271_rx_streaming_enable_work(struct work_struct * work)126 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
127 {
128 	int ret;
129 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
130 						rx_streaming_enable_work);
131 	struct wl1271 *wl = wlvif->wl;
132 
133 	mutex_lock(&wl->mutex);
134 
135 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
136 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
137 	    (!wl->conf.rx_streaming.always &&
138 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
139 		goto out;
140 
141 	if (!wl->conf.rx_streaming.interval)
142 		goto out;
143 
144 	ret = pm_runtime_get_sync(wl->dev);
145 	if (ret < 0) {
146 		pm_runtime_put_noidle(wl->dev);
147 		goto out;
148 	}
149 
150 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
151 	if (ret < 0)
152 		goto out_sleep;
153 
154 	/* stop it after some time of inactivity */
155 	mod_timer(&wlvif->rx_streaming_timer,
156 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
157 
158 out_sleep:
159 	pm_runtime_mark_last_busy(wl->dev);
160 	pm_runtime_put_autosuspend(wl->dev);
161 out:
162 	mutex_unlock(&wl->mutex);
163 }
164 
wl1271_rx_streaming_disable_work(struct work_struct * work)165 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
166 {
167 	int ret;
168 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
169 						rx_streaming_disable_work);
170 	struct wl1271 *wl = wlvif->wl;
171 
172 	mutex_lock(&wl->mutex);
173 
174 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
175 		goto out;
176 
177 	ret = pm_runtime_get_sync(wl->dev);
178 	if (ret < 0) {
179 		pm_runtime_put_noidle(wl->dev);
180 		goto out;
181 	}
182 
183 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
184 	if (ret)
185 		goto out_sleep;
186 
187 out_sleep:
188 	pm_runtime_mark_last_busy(wl->dev);
189 	pm_runtime_put_autosuspend(wl->dev);
190 out:
191 	mutex_unlock(&wl->mutex);
192 }
193 
wl1271_rx_streaming_timer(struct timer_list * t)194 static void wl1271_rx_streaming_timer(struct timer_list *t)
195 {
196 	struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
197 	struct wl1271 *wl = wlvif->wl;
198 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
199 }
200 
201 /* wl->mutex must be taken */
wl12xx_rearm_tx_watchdog_locked(struct wl1271 * wl)202 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
203 {
204 	/* if the watchdog is not armed, don't do anything */
205 	if (wl->tx_allocated_blocks == 0)
206 		return;
207 
208 	cancel_delayed_work(&wl->tx_watchdog_work);
209 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
210 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
211 }
212 
wlcore_rc_update_work(struct work_struct * work)213 static void wlcore_rc_update_work(struct work_struct *work)
214 {
215 	int ret;
216 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
217 						rc_update_work);
218 	struct wl1271 *wl = wlvif->wl;
219 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
220 
221 	mutex_lock(&wl->mutex);
222 
223 	if (unlikely(wl->state != WLCORE_STATE_ON))
224 		goto out;
225 
226 	ret = pm_runtime_get_sync(wl->dev);
227 	if (ret < 0) {
228 		pm_runtime_put_noidle(wl->dev);
229 		goto out;
230 	}
231 
232 	if (ieee80211_vif_is_mesh(vif)) {
233 		ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
234 						     true, wlvif->sta.hlid);
235 		if (ret < 0)
236 			goto out_sleep;
237 	} else {
238 		wlcore_hw_sta_rc_update(wl, wlvif);
239 	}
240 
241 out_sleep:
242 	pm_runtime_mark_last_busy(wl->dev);
243 	pm_runtime_put_autosuspend(wl->dev);
244 out:
245 	mutex_unlock(&wl->mutex);
246 }
247 
wl12xx_tx_watchdog_work(struct work_struct * work)248 static void wl12xx_tx_watchdog_work(struct work_struct *work)
249 {
250 	struct delayed_work *dwork;
251 	struct wl1271 *wl;
252 
253 	dwork = to_delayed_work(work);
254 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
255 
256 	mutex_lock(&wl->mutex);
257 
258 	if (unlikely(wl->state != WLCORE_STATE_ON))
259 		goto out;
260 
261 	/* Tx went out in the meantime - everything is ok */
262 	if (unlikely(wl->tx_allocated_blocks == 0))
263 		goto out;
264 
265 	/*
266 	 * if a ROC is in progress, we might not have any Tx for a long
267 	 * time (e.g. pending Tx on the non-ROC channels)
268 	 */
269 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
270 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
271 			     wl->conf.tx.tx_watchdog_timeout);
272 		wl12xx_rearm_tx_watchdog_locked(wl);
273 		goto out;
274 	}
275 
276 	/*
277 	 * if a scan is in progress, we might not have any Tx for a long
278 	 * time
279 	 */
280 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
281 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
282 			     wl->conf.tx.tx_watchdog_timeout);
283 		wl12xx_rearm_tx_watchdog_locked(wl);
284 		goto out;
285 	}
286 
287 	/*
288 	* AP might cache a frame for a long time for a sleeping station,
289 	* so rearm the timer if there's an AP interface with stations. If
290 	* Tx is genuinely stuck we will most hopefully discover it when all
291 	* stations are removed due to inactivity.
292 	*/
293 	if (wl->active_sta_count) {
294 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
295 			     " %d stations",
296 			      wl->conf.tx.tx_watchdog_timeout,
297 			      wl->active_sta_count);
298 		wl12xx_rearm_tx_watchdog_locked(wl);
299 		goto out;
300 	}
301 
302 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
303 		     wl->conf.tx.tx_watchdog_timeout);
304 	wl12xx_queue_recovery_work(wl);
305 
306 out:
307 	mutex_unlock(&wl->mutex);
308 }
309 
wlcore_adjust_conf(struct wl1271 * wl)310 static void wlcore_adjust_conf(struct wl1271 *wl)
311 {
312 
313 	if (fwlog_param) {
314 		if (!strcmp(fwlog_param, "continuous")) {
315 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
316 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
317 		} else if (!strcmp(fwlog_param, "dbgpins")) {
318 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
319 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
320 		} else if (!strcmp(fwlog_param, "disable")) {
321 			wl->conf.fwlog.mem_blocks = 0;
322 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
323 		} else {
324 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
325 		}
326 	}
327 
328 	if (bug_on_recovery != -1)
329 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
330 
331 	if (no_recovery != -1)
332 		wl->conf.recovery.no_recovery = (u8) no_recovery;
333 }
334 
wl12xx_irq_ps_regulate_link(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,u8 tx_pkts)335 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
336 					struct wl12xx_vif *wlvif,
337 					u8 hlid, u8 tx_pkts)
338 {
339 	bool fw_ps;
340 
341 	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
342 
343 	/*
344 	 * Wake up from high level PS if the STA is asleep with too little
345 	 * packets in FW or if the STA is awake.
346 	 */
347 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
348 		wl12xx_ps_link_end(wl, wlvif, hlid);
349 
350 	/*
351 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
352 	 * Make an exception if this is the only connected link. In this
353 	 * case FW-memory congestion is less of a problem.
354 	 * Note that a single connected STA means 2*ap_count + 1 active links,
355 	 * since we must account for the global and broadcast AP links
356 	 * for each AP. The "fw_ps" check assures us the other link is a STA
357 	 * connected to the AP. Otherwise the FW would not set the PSM bit.
358 	 */
359 	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
360 		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
361 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
362 }
363 
wl12xx_irq_update_links_status(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl_fw_status * status)364 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
365 					   struct wl12xx_vif *wlvif,
366 					   struct wl_fw_status *status)
367 {
368 	unsigned long cur_fw_ps_map;
369 	u8 hlid;
370 
371 	cur_fw_ps_map = status->link_ps_bitmap;
372 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
373 		wl1271_debug(DEBUG_PSM,
374 			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
375 			     wl->ap_fw_ps_map, cur_fw_ps_map,
376 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
377 
378 		wl->ap_fw_ps_map = cur_fw_ps_map;
379 	}
380 
381 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
382 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
383 					    wl->links[hlid].allocated_pkts);
384 }
385 
wlcore_fw_status(struct wl1271 * wl,struct wl_fw_status * status)386 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
387 {
388 	struct wl12xx_vif *wlvif;
389 	u32 old_tx_blk_count = wl->tx_blocks_available;
390 	int avail, freed_blocks;
391 	int i;
392 	int ret;
393 	struct wl1271_link *lnk;
394 
395 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
396 				   wl->raw_fw_status,
397 				   wl->fw_status_len, false);
398 	if (ret < 0)
399 		return ret;
400 
401 	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
402 
403 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
404 		     "drv_rx_counter = %d, tx_results_counter = %d)",
405 		     status->intr,
406 		     status->fw_rx_counter,
407 		     status->drv_rx_counter,
408 		     status->tx_results_counter);
409 
410 	for (i = 0; i < NUM_TX_QUEUES; i++) {
411 		/* prevent wrap-around in freed-packets counter */
412 		wl->tx_allocated_pkts[i] -=
413 				(status->counters.tx_released_pkts[i] -
414 				wl->tx_pkts_freed[i]) & 0xff;
415 
416 		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
417 	}
418 
419 
420 	for_each_set_bit(i, wl->links_map, wl->num_links) {
421 		u8 diff;
422 		lnk = &wl->links[i];
423 
424 		/* prevent wrap-around in freed-packets counter */
425 		diff = (status->counters.tx_lnk_free_pkts[i] -
426 		       lnk->prev_freed_pkts) & 0xff;
427 
428 		if (diff == 0)
429 			continue;
430 
431 		lnk->allocated_pkts -= diff;
432 		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
433 
434 		/* accumulate the prev_freed_pkts counter */
435 		lnk->total_freed_pkts += diff;
436 	}
437 
438 	/* prevent wrap-around in total blocks counter */
439 	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
440 		freed_blocks = status->total_released_blks -
441 			       wl->tx_blocks_freed;
442 	else
443 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
444 			       status->total_released_blks;
445 
446 	wl->tx_blocks_freed = status->total_released_blks;
447 
448 	wl->tx_allocated_blocks -= freed_blocks;
449 
450 	/*
451 	 * If the FW freed some blocks:
452 	 * If we still have allocated blocks - re-arm the timer, Tx is
453 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
454 	 */
455 	if (freed_blocks) {
456 		if (wl->tx_allocated_blocks)
457 			wl12xx_rearm_tx_watchdog_locked(wl);
458 		else
459 			cancel_delayed_work(&wl->tx_watchdog_work);
460 	}
461 
462 	avail = status->tx_total - wl->tx_allocated_blocks;
463 
464 	/*
465 	 * The FW might change the total number of TX memblocks before
466 	 * we get a notification about blocks being released. Thus, the
467 	 * available blocks calculation might yield a temporary result
468 	 * which is lower than the actual available blocks. Keeping in
469 	 * mind that only blocks that were allocated can be moved from
470 	 * TX to RX, tx_blocks_available should never decrease here.
471 	 */
472 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
473 				      avail);
474 
475 	/* if more blocks are available now, tx work can be scheduled */
476 	if (wl->tx_blocks_available > old_tx_blk_count)
477 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
478 
479 	/* for AP update num of allocated TX blocks per link and ps status */
480 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
481 		wl12xx_irq_update_links_status(wl, wlvif, status);
482 	}
483 
484 	/* update the host-chipset time offset */
485 	wl->time_offset = (ktime_get_boottime_ns() >> 10) -
486 		(s64)(status->fw_localtime);
487 
488 	wl->fw_fast_lnk_map = status->link_fast_bitmap;
489 
490 	return 0;
491 }
492 
wl1271_flush_deferred_work(struct wl1271 * wl)493 static void wl1271_flush_deferred_work(struct wl1271 *wl)
494 {
495 	struct sk_buff *skb;
496 
497 	/* Pass all received frames to the network stack */
498 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
499 		ieee80211_rx_ni(wl->hw, skb);
500 
501 	/* Return sent skbs to the network stack */
502 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
503 		ieee80211_tx_status_ni(wl->hw, skb);
504 }
505 
wl1271_netstack_work(struct work_struct * work)506 static void wl1271_netstack_work(struct work_struct *work)
507 {
508 	struct wl1271 *wl =
509 		container_of(work, struct wl1271, netstack_work);
510 
511 	do {
512 		wl1271_flush_deferred_work(wl);
513 	} while (skb_queue_len(&wl->deferred_rx_queue));
514 }
515 
516 #define WL1271_IRQ_MAX_LOOPS 256
517 
wlcore_irq_locked(struct wl1271 * wl)518 static int wlcore_irq_locked(struct wl1271 *wl)
519 {
520 	int ret = 0;
521 	u32 intr;
522 	int loopcount = WL1271_IRQ_MAX_LOOPS;
523 	bool run_tx_queue = true;
524 	bool done = false;
525 	unsigned int defer_count;
526 	unsigned long flags;
527 
528 	/*
529 	 * In case edge triggered interrupt must be used, we cannot iterate
530 	 * more than once without introducing race conditions with the hardirq.
531 	 */
532 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
533 		loopcount = 1;
534 
535 	wl1271_debug(DEBUG_IRQ, "IRQ work");
536 
537 	if (unlikely(wl->state != WLCORE_STATE_ON))
538 		goto out;
539 
540 	ret = pm_runtime_get_sync(wl->dev);
541 	if (ret < 0) {
542 		pm_runtime_put_noidle(wl->dev);
543 		goto out;
544 	}
545 
546 	while (!done && loopcount--) {
547 		smp_mb__after_atomic();
548 
549 		ret = wlcore_fw_status(wl, wl->fw_status);
550 		if (ret < 0)
551 			goto err_ret;
552 
553 		wlcore_hw_tx_immediate_compl(wl);
554 
555 		intr = wl->fw_status->intr;
556 		intr &= WLCORE_ALL_INTR_MASK;
557 		if (!intr) {
558 			done = true;
559 			continue;
560 		}
561 
562 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
563 			wl1271_error("HW watchdog interrupt received! starting recovery.");
564 			wl->watchdog_recovery = true;
565 			ret = -EIO;
566 
567 			/* restarting the chip. ignore any other interrupt. */
568 			goto err_ret;
569 		}
570 
571 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
572 			wl1271_error("SW watchdog interrupt received! "
573 				     "starting recovery.");
574 			wl->watchdog_recovery = true;
575 			ret = -EIO;
576 
577 			/* restarting the chip. ignore any other interrupt. */
578 			goto err_ret;
579 		}
580 
581 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
582 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
583 
584 			ret = wlcore_rx(wl, wl->fw_status);
585 			if (ret < 0)
586 				goto err_ret;
587 
588 			/* Check if any tx blocks were freed */
589 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
590 				if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
591 					if (!wl1271_tx_total_queue_count(wl))
592 						run_tx_queue = false;
593 					spin_unlock_irqrestore(&wl->wl_lock, flags);
594 				}
595 
596 				/*
597 				 * In order to avoid starvation of the TX path,
598 				 * call the work function directly.
599 				 */
600 				if (run_tx_queue) {
601 					ret = wlcore_tx_work_locked(wl);
602 					if (ret < 0)
603 						goto err_ret;
604 				}
605 			}
606 
607 			/* check for tx results */
608 			ret = wlcore_hw_tx_delayed_compl(wl);
609 			if (ret < 0)
610 				goto err_ret;
611 
612 			/* Make sure the deferred queues don't get too long */
613 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
614 				      skb_queue_len(&wl->deferred_rx_queue);
615 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
616 				wl1271_flush_deferred_work(wl);
617 		}
618 
619 		if (intr & WL1271_ACX_INTR_EVENT_A) {
620 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
621 			ret = wl1271_event_handle(wl, 0);
622 			if (ret < 0)
623 				goto err_ret;
624 		}
625 
626 		if (intr & WL1271_ACX_INTR_EVENT_B) {
627 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
628 			ret = wl1271_event_handle(wl, 1);
629 			if (ret < 0)
630 				goto err_ret;
631 		}
632 
633 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
634 			wl1271_debug(DEBUG_IRQ,
635 				     "WL1271_ACX_INTR_INIT_COMPLETE");
636 
637 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
638 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
639 	}
640 
641 err_ret:
642 	pm_runtime_mark_last_busy(wl->dev);
643 	pm_runtime_put_autosuspend(wl->dev);
644 
645 out:
646 	return ret;
647 }
648 
wlcore_irq(int irq,void * cookie)649 static irqreturn_t wlcore_irq(int irq, void *cookie)
650 {
651 	int ret;
652 	unsigned long flags;
653 	struct wl1271 *wl = cookie;
654 	bool queue_tx_work = true;
655 
656 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
657 
658 	/* complete the ELP completion */
659 	if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) {
660 		spin_lock_irqsave(&wl->wl_lock, flags);
661 		if (wl->elp_compl)
662 			complete(wl->elp_compl);
663 		spin_unlock_irqrestore(&wl->wl_lock, flags);
664 	}
665 
666 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
667 		/* don't enqueue a work right now. mark it as pending */
668 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
669 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
670 		spin_lock_irqsave(&wl->wl_lock, flags);
671 		disable_irq_nosync(wl->irq);
672 		pm_wakeup_event(wl->dev, 0);
673 		spin_unlock_irqrestore(&wl->wl_lock, flags);
674 		goto out_handled;
675 	}
676 
677 	/* TX might be handled here, avoid redundant work */
678 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
679 	cancel_work_sync(&wl->tx_work);
680 
681 	mutex_lock(&wl->mutex);
682 
683 	ret = wlcore_irq_locked(wl);
684 	if (ret)
685 		wl12xx_queue_recovery_work(wl);
686 
687 	/* In case TX was not handled in wlcore_irq_locked(), queue TX work */
688 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
689 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
690 		if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
691 			if (!wl1271_tx_total_queue_count(wl))
692 				queue_tx_work = false;
693 			spin_unlock_irqrestore(&wl->wl_lock, flags);
694 		}
695 		if (queue_tx_work)
696 			ieee80211_queue_work(wl->hw, &wl->tx_work);
697 	}
698 
699 	mutex_unlock(&wl->mutex);
700 
701 out_handled:
702 	clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
703 
704 	return IRQ_HANDLED;
705 }
706 
707 struct vif_counter_data {
708 	u8 counter;
709 
710 	struct ieee80211_vif *cur_vif;
711 	bool cur_vif_running;
712 };
713 
wl12xx_vif_count_iter(void * data,u8 * mac,struct ieee80211_vif * vif)714 static void wl12xx_vif_count_iter(void *data, u8 *mac,
715 				  struct ieee80211_vif *vif)
716 {
717 	struct vif_counter_data *counter = data;
718 
719 	counter->counter++;
720 	if (counter->cur_vif == vif)
721 		counter->cur_vif_running = true;
722 }
723 
724 /* caller must not hold wl->mutex, as it might deadlock */
wl12xx_get_vif_count(struct ieee80211_hw * hw,struct ieee80211_vif * cur_vif,struct vif_counter_data * data)725 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
726 			       struct ieee80211_vif *cur_vif,
727 			       struct vif_counter_data *data)
728 {
729 	memset(data, 0, sizeof(*data));
730 	data->cur_vif = cur_vif;
731 
732 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
733 					    wl12xx_vif_count_iter, data);
734 }
735 
wl12xx_fetch_firmware(struct wl1271 * wl,bool plt)736 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
737 {
738 	const struct firmware *fw;
739 	const char *fw_name;
740 	enum wl12xx_fw_type fw_type;
741 	int ret;
742 
743 	if (plt) {
744 		fw_type = WL12XX_FW_TYPE_PLT;
745 		fw_name = wl->plt_fw_name;
746 	} else {
747 		/*
748 		 * we can't call wl12xx_get_vif_count() here because
749 		 * wl->mutex is taken, so use the cached last_vif_count value
750 		 */
751 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
752 			fw_type = WL12XX_FW_TYPE_MULTI;
753 			fw_name = wl->mr_fw_name;
754 		} else {
755 			fw_type = WL12XX_FW_TYPE_NORMAL;
756 			fw_name = wl->sr_fw_name;
757 		}
758 	}
759 
760 	if (wl->fw_type == fw_type)
761 		return 0;
762 
763 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
764 
765 	ret = request_firmware(&fw, fw_name, wl->dev);
766 
767 	if (ret < 0) {
768 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
769 		return ret;
770 	}
771 
772 	if (fw->size % 4) {
773 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
774 			     fw->size);
775 		ret = -EILSEQ;
776 		goto out;
777 	}
778 
779 	vfree(wl->fw);
780 	wl->fw_type = WL12XX_FW_TYPE_NONE;
781 	wl->fw_len = fw->size;
782 	wl->fw = vmalloc(wl->fw_len);
783 
784 	if (!wl->fw) {
785 		wl1271_error("could not allocate memory for the firmware");
786 		ret = -ENOMEM;
787 		goto out;
788 	}
789 
790 	memcpy(wl->fw, fw->data, wl->fw_len);
791 	ret = 0;
792 	wl->fw_type = fw_type;
793 out:
794 	release_firmware(fw);
795 
796 	return ret;
797 }
798 
wl12xx_queue_recovery_work(struct wl1271 * wl)799 void wl12xx_queue_recovery_work(struct wl1271 *wl)
800 {
801 	/* Avoid a recursive recovery */
802 	if (wl->state == WLCORE_STATE_ON) {
803 		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
804 				  &wl->flags));
805 
806 		wl->state = WLCORE_STATE_RESTARTING;
807 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
808 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
809 	}
810 }
811 
wl12xx_copy_fwlog(struct wl1271 * wl,u8 * memblock,size_t maxlen)812 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
813 {
814 	size_t len;
815 
816 	/* Make sure we have enough room */
817 	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
818 
819 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
820 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
821 	wl->fwlog_size += len;
822 
823 	return len;
824 }
825 
wl12xx_read_fwlog_panic(struct wl1271 * wl)826 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
827 {
828 	u32 end_of_log = 0;
829 	int error;
830 
831 	if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
832 		return;
833 
834 	wl1271_info("Reading FW panic log");
835 
836 	/*
837 	 * Make sure the chip is awake and the logger isn't active.
838 	 * Do not send a stop fwlog command if the fw is hanged or if
839 	 * dbgpins are used (due to some fw bug).
840 	 */
841 	error = pm_runtime_get_sync(wl->dev);
842 	if (error < 0) {
843 		pm_runtime_put_noidle(wl->dev);
844 		return;
845 	}
846 	if (!wl->watchdog_recovery &&
847 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
848 		wl12xx_cmd_stop_fwlog(wl);
849 
850 	/* Traverse the memory blocks linked list */
851 	do {
852 		end_of_log = wlcore_event_fw_logger(wl);
853 		if (end_of_log == 0) {
854 			msleep(100);
855 			end_of_log = wlcore_event_fw_logger(wl);
856 		}
857 	} while (end_of_log != 0);
858 }
859 
wlcore_save_freed_pkts(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,struct ieee80211_sta * sta)860 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
861 				   u8 hlid, struct ieee80211_sta *sta)
862 {
863 	struct wl1271_station *wl_sta;
864 	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
865 
866 	wl_sta = (void *)sta->drv_priv;
867 	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
868 
869 	/*
870 	 * increment the initial seq number on recovery to account for
871 	 * transmitted packets that we haven't yet got in the FW status
872 	 */
873 	if (wlvif->encryption_type == KEY_GEM)
874 		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
875 
876 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
877 		wl_sta->total_freed_pkts += sqn_recovery_padding;
878 }
879 
wlcore_save_freed_pkts_addr(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,const u8 * addr)880 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
881 					struct wl12xx_vif *wlvif,
882 					u8 hlid, const u8 *addr)
883 {
884 	struct ieee80211_sta *sta;
885 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
886 
887 	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
888 		    is_zero_ether_addr(addr)))
889 		return;
890 
891 	rcu_read_lock();
892 	sta = ieee80211_find_sta(vif, addr);
893 	if (sta)
894 		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
895 	rcu_read_unlock();
896 }
897 
wlcore_print_recovery(struct wl1271 * wl)898 static void wlcore_print_recovery(struct wl1271 *wl)
899 {
900 	u32 pc = 0;
901 	u32 hint_sts = 0;
902 	int ret;
903 
904 	wl1271_info("Hardware recovery in progress. FW ver: %s",
905 		    wl->chip.fw_ver_str);
906 
907 	/* change partitions momentarily so we can read the FW pc */
908 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
909 	if (ret < 0)
910 		return;
911 
912 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
913 	if (ret < 0)
914 		return;
915 
916 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
917 	if (ret < 0)
918 		return;
919 
920 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
921 				pc, hint_sts, ++wl->recovery_count);
922 
923 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
924 }
925 
926 
wl1271_recovery_work(struct work_struct * work)927 static void wl1271_recovery_work(struct work_struct *work)
928 {
929 	struct wl1271 *wl =
930 		container_of(work, struct wl1271, recovery_work);
931 	struct wl12xx_vif *wlvif;
932 	struct ieee80211_vif *vif;
933 	int error;
934 
935 	mutex_lock(&wl->mutex);
936 
937 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
938 		goto out_unlock;
939 
940 	error = pm_runtime_get_sync(wl->dev);
941 	if (error < 0) {
942 		wl1271_warning("Enable for recovery failed");
943 		pm_runtime_put_noidle(wl->dev);
944 	}
945 	wlcore_disable_interrupts_nosync(wl);
946 
947 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
948 		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
949 			wl12xx_read_fwlog_panic(wl);
950 		wlcore_print_recovery(wl);
951 	}
952 
953 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
954 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
955 
956 	clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
957 
958 	if (wl->conf.recovery.no_recovery) {
959 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
960 		goto out_unlock;
961 	}
962 
963 	/* Prevent spurious TX during FW restart */
964 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
965 
966 	/* reboot the chipset */
967 	while (!list_empty(&wl->wlvif_list)) {
968 		wlvif = list_first_entry(&wl->wlvif_list,
969 				       struct wl12xx_vif, list);
970 		vif = wl12xx_wlvif_to_vif(wlvif);
971 
972 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
973 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
974 			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
975 						    vif->bss_conf.bssid);
976 		}
977 
978 		__wl1271_op_remove_interface(wl, vif, false);
979 	}
980 
981 	wlcore_op_stop_locked(wl);
982 	pm_runtime_mark_last_busy(wl->dev);
983 	pm_runtime_put_autosuspend(wl->dev);
984 
985 	ieee80211_restart_hw(wl->hw);
986 
987 	/*
988 	 * Its safe to enable TX now - the queues are stopped after a request
989 	 * to restart the HW.
990 	 */
991 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
992 
993 out_unlock:
994 	wl->watchdog_recovery = false;
995 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
996 	mutex_unlock(&wl->mutex);
997 }
998 
wlcore_fw_wakeup(struct wl1271 * wl)999 static int wlcore_fw_wakeup(struct wl1271 *wl)
1000 {
1001 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1002 }
1003 
wl1271_setup(struct wl1271 * wl)1004 static int wl1271_setup(struct wl1271 *wl)
1005 {
1006 	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1007 	if (!wl->raw_fw_status)
1008 		goto err;
1009 
1010 	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1011 	if (!wl->fw_status)
1012 		goto err;
1013 
1014 	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1015 	if (!wl->tx_res_if)
1016 		goto err;
1017 
1018 	return 0;
1019 err:
1020 	kfree(wl->fw_status);
1021 	kfree(wl->raw_fw_status);
1022 	return -ENOMEM;
1023 }
1024 
wl12xx_set_power_on(struct wl1271 * wl)1025 static int wl12xx_set_power_on(struct wl1271 *wl)
1026 {
1027 	int ret;
1028 
1029 	msleep(WL1271_PRE_POWER_ON_SLEEP);
1030 	ret = wl1271_power_on(wl);
1031 	if (ret < 0)
1032 		goto out;
1033 	msleep(WL1271_POWER_ON_SLEEP);
1034 	wl1271_io_reset(wl);
1035 	wl1271_io_init(wl);
1036 
1037 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1038 	if (ret < 0)
1039 		goto fail;
1040 
1041 	/* ELP module wake up */
1042 	ret = wlcore_fw_wakeup(wl);
1043 	if (ret < 0)
1044 		goto fail;
1045 
1046 out:
1047 	return ret;
1048 
1049 fail:
1050 	wl1271_power_off(wl);
1051 	return ret;
1052 }
1053 
wl12xx_chip_wakeup(struct wl1271 * wl,bool plt)1054 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1055 {
1056 	int ret = 0;
1057 
1058 	ret = wl12xx_set_power_on(wl);
1059 	if (ret < 0)
1060 		goto out;
1061 
1062 	/*
1063 	 * For wl127x based devices we could use the default block
1064 	 * size (512 bytes), but due to a bug in the sdio driver, we
1065 	 * need to set it explicitly after the chip is powered on.  To
1066 	 * simplify the code and since the performance impact is
1067 	 * negligible, we use the same block size for all different
1068 	 * chip types.
1069 	 *
1070 	 * Check if the bus supports blocksize alignment and, if it
1071 	 * doesn't, make sure we don't have the quirk.
1072 	 */
1073 	if (!wl1271_set_block_size(wl))
1074 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1075 
1076 	/* TODO: make sure the lower driver has set things up correctly */
1077 
1078 	ret = wl1271_setup(wl);
1079 	if (ret < 0)
1080 		goto out;
1081 
1082 	ret = wl12xx_fetch_firmware(wl, plt);
1083 	if (ret < 0) {
1084 		kfree(wl->fw_status);
1085 		kfree(wl->raw_fw_status);
1086 		kfree(wl->tx_res_if);
1087 	}
1088 
1089 out:
1090 	return ret;
1091 }
1092 
wl1271_plt_start(struct wl1271 * wl,const enum plt_mode plt_mode)1093 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1094 {
1095 	int retries = WL1271_BOOT_RETRIES;
1096 	struct wiphy *wiphy = wl->hw->wiphy;
1097 
1098 	static const char* const PLT_MODE[] = {
1099 		"PLT_OFF",
1100 		"PLT_ON",
1101 		"PLT_FEM_DETECT",
1102 		"PLT_CHIP_AWAKE"
1103 	};
1104 
1105 	int ret;
1106 
1107 	mutex_lock(&wl->mutex);
1108 
1109 	wl1271_notice("power up");
1110 
1111 	if (wl->state != WLCORE_STATE_OFF) {
1112 		wl1271_error("cannot go into PLT state because not "
1113 			     "in off state: %d", wl->state);
1114 		ret = -EBUSY;
1115 		goto out;
1116 	}
1117 
1118 	/* Indicate to lower levels that we are now in PLT mode */
1119 	wl->plt = true;
1120 	wl->plt_mode = plt_mode;
1121 
1122 	while (retries) {
1123 		retries--;
1124 		ret = wl12xx_chip_wakeup(wl, true);
1125 		if (ret < 0)
1126 			goto power_off;
1127 
1128 		if (plt_mode != PLT_CHIP_AWAKE) {
1129 			ret = wl->ops->plt_init(wl);
1130 			if (ret < 0)
1131 				goto power_off;
1132 		}
1133 
1134 		wl->state = WLCORE_STATE_ON;
1135 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1136 			      PLT_MODE[plt_mode],
1137 			      wl->chip.fw_ver_str);
1138 
1139 		/* update hw/fw version info in wiphy struct */
1140 		wiphy->hw_version = wl->chip.id;
1141 		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1142 			sizeof(wiphy->fw_version));
1143 
1144 		goto out;
1145 
1146 power_off:
1147 		wl1271_power_off(wl);
1148 	}
1149 
1150 	wl->plt = false;
1151 	wl->plt_mode = PLT_OFF;
1152 
1153 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1154 		     WL1271_BOOT_RETRIES);
1155 out:
1156 	mutex_unlock(&wl->mutex);
1157 
1158 	return ret;
1159 }
1160 
wl1271_plt_stop(struct wl1271 * wl)1161 int wl1271_plt_stop(struct wl1271 *wl)
1162 {
1163 	int ret = 0;
1164 
1165 	wl1271_notice("power down");
1166 
1167 	/*
1168 	 * Interrupts must be disabled before setting the state to OFF.
1169 	 * Otherwise, the interrupt handler might be called and exit without
1170 	 * reading the interrupt status.
1171 	 */
1172 	wlcore_disable_interrupts(wl);
1173 	mutex_lock(&wl->mutex);
1174 	if (!wl->plt) {
1175 		mutex_unlock(&wl->mutex);
1176 
1177 		/*
1178 		 * This will not necessarily enable interrupts as interrupts
1179 		 * may have been disabled when op_stop was called. It will,
1180 		 * however, balance the above call to disable_interrupts().
1181 		 */
1182 		wlcore_enable_interrupts(wl);
1183 
1184 		wl1271_error("cannot power down because not in PLT "
1185 			     "state: %d", wl->state);
1186 		ret = -EBUSY;
1187 		goto out;
1188 	}
1189 
1190 	mutex_unlock(&wl->mutex);
1191 
1192 	wl1271_flush_deferred_work(wl);
1193 	cancel_work_sync(&wl->netstack_work);
1194 	cancel_work_sync(&wl->recovery_work);
1195 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1196 
1197 	mutex_lock(&wl->mutex);
1198 	wl1271_power_off(wl);
1199 	wl->flags = 0;
1200 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1201 	wl->state = WLCORE_STATE_OFF;
1202 	wl->plt = false;
1203 	wl->plt_mode = PLT_OFF;
1204 	wl->rx_counter = 0;
1205 	mutex_unlock(&wl->mutex);
1206 
1207 out:
1208 	return ret;
1209 }
1210 
wl1271_op_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)1211 static void wl1271_op_tx(struct ieee80211_hw *hw,
1212 			 struct ieee80211_tx_control *control,
1213 			 struct sk_buff *skb)
1214 {
1215 	struct wl1271 *wl = hw->priv;
1216 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1217 	struct ieee80211_vif *vif = info->control.vif;
1218 	struct wl12xx_vif *wlvif = NULL;
1219 	unsigned long flags;
1220 	int q, mapping;
1221 	u8 hlid;
1222 
1223 	if (!vif) {
1224 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1225 		ieee80211_free_txskb(hw, skb);
1226 		return;
1227 	}
1228 
1229 	wlvif = wl12xx_vif_to_data(vif);
1230 	mapping = skb_get_queue_mapping(skb);
1231 	q = wl1271_tx_get_queue(mapping);
1232 
1233 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1234 
1235 	spin_lock_irqsave(&wl->wl_lock, flags);
1236 
1237 	/*
1238 	 * drop the packet if the link is invalid or the queue is stopped
1239 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1240 	 * allow these packets through.
1241 	 */
1242 	if (hlid == WL12XX_INVALID_LINK_ID ||
1243 	    (!test_bit(hlid, wlvif->links_map)) ||
1244 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1245 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1246 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1247 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1248 		ieee80211_free_txskb(hw, skb);
1249 		goto out;
1250 	}
1251 
1252 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1253 		     hlid, q, skb->len);
1254 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1255 
1256 	wl->tx_queue_count[q]++;
1257 	wlvif->tx_queue_count[q]++;
1258 
1259 	/*
1260 	 * The workqueue is slow to process the tx_queue and we need stop
1261 	 * the queue here, otherwise the queue will get too long.
1262 	 */
1263 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1264 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1265 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1266 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1267 		wlcore_stop_queue_locked(wl, wlvif, q,
1268 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1269 	}
1270 
1271 	/*
1272 	 * The chip specific setup must run before the first TX packet -
1273 	 * before that, the tx_work will not be initialized!
1274 	 */
1275 
1276 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1277 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1278 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1279 
1280 out:
1281 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1282 }
1283 
wl1271_tx_dummy_packet(struct wl1271 * wl)1284 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1285 {
1286 	unsigned long flags;
1287 	int q;
1288 
1289 	/* no need to queue a new dummy packet if one is already pending */
1290 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1291 		return 0;
1292 
1293 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1294 
1295 	spin_lock_irqsave(&wl->wl_lock, flags);
1296 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1297 	wl->tx_queue_count[q]++;
1298 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1299 
1300 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1301 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1302 		return wlcore_tx_work_locked(wl);
1303 
1304 	/*
1305 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1306 	 * interrupt handler function
1307 	 */
1308 	return 0;
1309 }
1310 
1311 /*
1312  * The size of the dummy packet should be at least 1400 bytes. However, in
1313  * order to minimize the number of bus transactions, aligning it to 512 bytes
1314  * boundaries could be beneficial, performance wise
1315  */
1316 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1317 
wl12xx_alloc_dummy_packet(struct wl1271 * wl)1318 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1319 {
1320 	struct sk_buff *skb;
1321 	struct ieee80211_hdr_3addr *hdr;
1322 	unsigned int dummy_packet_size;
1323 
1324 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1325 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1326 
1327 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1328 	if (!skb) {
1329 		wl1271_warning("Failed to allocate a dummy packet skb");
1330 		return NULL;
1331 	}
1332 
1333 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1334 
1335 	hdr = skb_put_zero(skb, sizeof(*hdr));
1336 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1337 					 IEEE80211_STYPE_NULLFUNC |
1338 					 IEEE80211_FCTL_TODS);
1339 
1340 	skb_put_zero(skb, dummy_packet_size);
1341 
1342 	/* Dummy packets require the TID to be management */
1343 	skb->priority = WL1271_TID_MGMT;
1344 
1345 	/* Initialize all fields that might be used */
1346 	skb_set_queue_mapping(skb, 0);
1347 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1348 
1349 	return skb;
1350 }
1351 
1352 
1353 static int
wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern * p)1354 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1355 {
1356 	int num_fields = 0, in_field = 0, fields_size = 0;
1357 	int i, pattern_len = 0;
1358 
1359 	if (!p->mask) {
1360 		wl1271_warning("No mask in WoWLAN pattern");
1361 		return -EINVAL;
1362 	}
1363 
1364 	/*
1365 	 * The pattern is broken up into segments of bytes at different offsets
1366 	 * that need to be checked by the FW filter. Each segment is called
1367 	 * a field in the FW API. We verify that the total number of fields
1368 	 * required for this pattern won't exceed FW limits (8)
1369 	 * as well as the total fields buffer won't exceed the FW limit.
1370 	 * Note that if there's a pattern which crosses Ethernet/IP header
1371 	 * boundary a new field is required.
1372 	 */
1373 	for (i = 0; i < p->pattern_len; i++) {
1374 		if (test_bit(i, (unsigned long *)p->mask)) {
1375 			if (!in_field) {
1376 				in_field = 1;
1377 				pattern_len = 1;
1378 			} else {
1379 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1380 					num_fields++;
1381 					fields_size += pattern_len +
1382 						RX_FILTER_FIELD_OVERHEAD;
1383 					pattern_len = 1;
1384 				} else
1385 					pattern_len++;
1386 			}
1387 		} else {
1388 			if (in_field) {
1389 				in_field = 0;
1390 				fields_size += pattern_len +
1391 					RX_FILTER_FIELD_OVERHEAD;
1392 				num_fields++;
1393 			}
1394 		}
1395 	}
1396 
1397 	if (in_field) {
1398 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1399 		num_fields++;
1400 	}
1401 
1402 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1403 		wl1271_warning("RX Filter too complex. Too many segments");
1404 		return -EINVAL;
1405 	}
1406 
1407 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1408 		wl1271_warning("RX filter pattern is too big");
1409 		return -E2BIG;
1410 	}
1411 
1412 	return 0;
1413 }
1414 
wl1271_rx_filter_alloc(void)1415 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1416 {
1417 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1418 }
1419 
wl1271_rx_filter_free(struct wl12xx_rx_filter * filter)1420 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1421 {
1422 	int i;
1423 
1424 	if (filter == NULL)
1425 		return;
1426 
1427 	for (i = 0; i < filter->num_fields; i++)
1428 		kfree(filter->fields[i].pattern);
1429 
1430 	kfree(filter);
1431 }
1432 
wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter * filter,u16 offset,u8 flags,const u8 * pattern,u8 len)1433 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1434 				 u16 offset, u8 flags,
1435 				 const u8 *pattern, u8 len)
1436 {
1437 	struct wl12xx_rx_filter_field *field;
1438 
1439 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1440 		wl1271_warning("Max fields per RX filter. can't alloc another");
1441 		return -EINVAL;
1442 	}
1443 
1444 	field = &filter->fields[filter->num_fields];
1445 
1446 	field->pattern = kmemdup(pattern, len, GFP_KERNEL);
1447 	if (!field->pattern) {
1448 		wl1271_warning("Failed to allocate RX filter pattern");
1449 		return -ENOMEM;
1450 	}
1451 
1452 	filter->num_fields++;
1453 
1454 	field->offset = cpu_to_le16(offset);
1455 	field->flags = flags;
1456 	field->len = len;
1457 
1458 	return 0;
1459 }
1460 
wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter * filter)1461 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1462 {
1463 	int i, fields_size = 0;
1464 
1465 	for (i = 0; i < filter->num_fields; i++)
1466 		fields_size += filter->fields[i].len +
1467 			sizeof(struct wl12xx_rx_filter_field) -
1468 			sizeof(u8 *);
1469 
1470 	return fields_size;
1471 }
1472 
wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter * filter,u8 * buf)1473 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1474 				    u8 *buf)
1475 {
1476 	int i;
1477 	struct wl12xx_rx_filter_field *field;
1478 
1479 	for (i = 0; i < filter->num_fields; i++) {
1480 		field = (struct wl12xx_rx_filter_field *)buf;
1481 
1482 		field->offset = filter->fields[i].offset;
1483 		field->flags = filter->fields[i].flags;
1484 		field->len = filter->fields[i].len;
1485 
1486 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1487 		buf += sizeof(struct wl12xx_rx_filter_field) -
1488 			sizeof(u8 *) + field->len;
1489 	}
1490 }
1491 
1492 /*
1493  * Allocates an RX filter returned through f
1494  * which needs to be freed using rx_filter_free()
1495  */
1496 static int
wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern * p,struct wl12xx_rx_filter ** f)1497 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1498 					   struct wl12xx_rx_filter **f)
1499 {
1500 	int i, j, ret = 0;
1501 	struct wl12xx_rx_filter *filter;
1502 	u16 offset;
1503 	u8 flags, len;
1504 
1505 	filter = wl1271_rx_filter_alloc();
1506 	if (!filter) {
1507 		wl1271_warning("Failed to alloc rx filter");
1508 		ret = -ENOMEM;
1509 		goto err;
1510 	}
1511 
1512 	i = 0;
1513 	while (i < p->pattern_len) {
1514 		if (!test_bit(i, (unsigned long *)p->mask)) {
1515 			i++;
1516 			continue;
1517 		}
1518 
1519 		for (j = i; j < p->pattern_len; j++) {
1520 			if (!test_bit(j, (unsigned long *)p->mask))
1521 				break;
1522 
1523 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1524 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1525 				break;
1526 		}
1527 
1528 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1529 			offset = i;
1530 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1531 		} else {
1532 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1533 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1534 		}
1535 
1536 		len = j - i;
1537 
1538 		ret = wl1271_rx_filter_alloc_field(filter,
1539 						   offset,
1540 						   flags,
1541 						   &p->pattern[i], len);
1542 		if (ret)
1543 			goto err;
1544 
1545 		i = j;
1546 	}
1547 
1548 	filter->action = FILTER_SIGNAL;
1549 
1550 	*f = filter;
1551 	return 0;
1552 
1553 err:
1554 	wl1271_rx_filter_free(filter);
1555 	*f = NULL;
1556 
1557 	return ret;
1558 }
1559 
wl1271_configure_wowlan(struct wl1271 * wl,struct cfg80211_wowlan * wow)1560 static int wl1271_configure_wowlan(struct wl1271 *wl,
1561 				   struct cfg80211_wowlan *wow)
1562 {
1563 	int i, ret;
1564 
1565 	if (!wow || wow->any || !wow->n_patterns) {
1566 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1567 							  FILTER_SIGNAL);
1568 		if (ret)
1569 			goto out;
1570 
1571 		ret = wl1271_rx_filter_clear_all(wl);
1572 		if (ret)
1573 			goto out;
1574 
1575 		return 0;
1576 	}
1577 
1578 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1579 		return -EINVAL;
1580 
1581 	/* Validate all incoming patterns before clearing current FW state */
1582 	for (i = 0; i < wow->n_patterns; i++) {
1583 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1584 		if (ret) {
1585 			wl1271_warning("Bad wowlan pattern %d", i);
1586 			return ret;
1587 		}
1588 	}
1589 
1590 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1591 	if (ret)
1592 		goto out;
1593 
1594 	ret = wl1271_rx_filter_clear_all(wl);
1595 	if (ret)
1596 		goto out;
1597 
1598 	/* Translate WoWLAN patterns into filters */
1599 	for (i = 0; i < wow->n_patterns; i++) {
1600 		struct cfg80211_pkt_pattern *p;
1601 		struct wl12xx_rx_filter *filter = NULL;
1602 
1603 		p = &wow->patterns[i];
1604 
1605 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1606 		if (ret) {
1607 			wl1271_warning("Failed to create an RX filter from "
1608 				       "wowlan pattern %d", i);
1609 			goto out;
1610 		}
1611 
1612 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1613 
1614 		wl1271_rx_filter_free(filter);
1615 		if (ret)
1616 			goto out;
1617 	}
1618 
1619 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1620 
1621 out:
1622 	return ret;
1623 }
1624 
wl1271_configure_suspend_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1625 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1626 					struct wl12xx_vif *wlvif,
1627 					struct cfg80211_wowlan *wow)
1628 {
1629 	int ret = 0;
1630 
1631 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1632 		goto out;
1633 
1634 	ret = wl1271_configure_wowlan(wl, wow);
1635 	if (ret < 0)
1636 		goto out;
1637 
1638 	if ((wl->conf.conn.suspend_wake_up_event ==
1639 	     wl->conf.conn.wake_up_event) &&
1640 	    (wl->conf.conn.suspend_listen_interval ==
1641 	     wl->conf.conn.listen_interval))
1642 		goto out;
1643 
1644 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1645 				    wl->conf.conn.suspend_wake_up_event,
1646 				    wl->conf.conn.suspend_listen_interval);
1647 
1648 	if (ret < 0)
1649 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1650 out:
1651 	return ret;
1652 
1653 }
1654 
wl1271_configure_suspend_ap(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1655 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1656 					struct wl12xx_vif *wlvif,
1657 					struct cfg80211_wowlan *wow)
1658 {
1659 	int ret = 0;
1660 
1661 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1662 		goto out;
1663 
1664 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1665 	if (ret < 0)
1666 		goto out;
1667 
1668 	ret = wl1271_configure_wowlan(wl, wow);
1669 	if (ret < 0)
1670 		goto out;
1671 
1672 out:
1673 	return ret;
1674 
1675 }
1676 
wl1271_configure_suspend(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1677 static int wl1271_configure_suspend(struct wl1271 *wl,
1678 				    struct wl12xx_vif *wlvif,
1679 				    struct cfg80211_wowlan *wow)
1680 {
1681 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1682 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1683 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1684 		return wl1271_configure_suspend_ap(wl, wlvif, wow);
1685 	return 0;
1686 }
1687 
wl1271_configure_resume(struct wl1271 * wl,struct wl12xx_vif * wlvif)1688 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1689 {
1690 	int ret = 0;
1691 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1692 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1693 
1694 	if ((!is_ap) && (!is_sta))
1695 		return;
1696 
1697 	if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1698 	    (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1699 		return;
1700 
1701 	wl1271_configure_wowlan(wl, NULL);
1702 
1703 	if (is_sta) {
1704 		if ((wl->conf.conn.suspend_wake_up_event ==
1705 		     wl->conf.conn.wake_up_event) &&
1706 		    (wl->conf.conn.suspend_listen_interval ==
1707 		     wl->conf.conn.listen_interval))
1708 			return;
1709 
1710 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1711 				    wl->conf.conn.wake_up_event,
1712 				    wl->conf.conn.listen_interval);
1713 
1714 		if (ret < 0)
1715 			wl1271_error("resume: wake up conditions failed: %d",
1716 				     ret);
1717 
1718 	} else if (is_ap) {
1719 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1720 	}
1721 }
1722 
wl1271_op_suspend(struct ieee80211_hw * hw,struct cfg80211_wowlan * wow)1723 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1724 					    struct cfg80211_wowlan *wow)
1725 {
1726 	struct wl1271 *wl = hw->priv;
1727 	struct wl12xx_vif *wlvif;
1728 	unsigned long flags;
1729 	int ret;
1730 
1731 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1732 	WARN_ON(!wow);
1733 
1734 	/* we want to perform the recovery before suspending */
1735 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1736 		wl1271_warning("postponing suspend to perform recovery");
1737 		return -EBUSY;
1738 	}
1739 
1740 	wl1271_tx_flush(wl);
1741 
1742 	mutex_lock(&wl->mutex);
1743 
1744 	ret = pm_runtime_get_sync(wl->dev);
1745 	if (ret < 0) {
1746 		pm_runtime_put_noidle(wl->dev);
1747 		mutex_unlock(&wl->mutex);
1748 		return ret;
1749 	}
1750 
1751 	wl->wow_enabled = true;
1752 	wl12xx_for_each_wlvif(wl, wlvif) {
1753 		if (wlcore_is_p2p_mgmt(wlvif))
1754 			continue;
1755 
1756 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1757 		if (ret < 0) {
1758 			goto out_sleep;
1759 		}
1760 	}
1761 
1762 	/* disable fast link flow control notifications from FW */
1763 	ret = wlcore_hw_interrupt_notify(wl, false);
1764 	if (ret < 0)
1765 		goto out_sleep;
1766 
1767 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1768 	ret = wlcore_hw_rx_ba_filter(wl,
1769 				     !!wl->conf.conn.suspend_rx_ba_activity);
1770 	if (ret < 0)
1771 		goto out_sleep;
1772 
1773 out_sleep:
1774 	pm_runtime_put_noidle(wl->dev);
1775 	mutex_unlock(&wl->mutex);
1776 
1777 	if (ret < 0) {
1778 		wl1271_warning("couldn't prepare device to suspend");
1779 		return ret;
1780 	}
1781 
1782 	/* flush any remaining work */
1783 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1784 
1785 	flush_work(&wl->tx_work);
1786 
1787 	/*
1788 	 * Cancel the watchdog even if above tx_flush failed. We will detect
1789 	 * it on resume anyway.
1790 	 */
1791 	cancel_delayed_work(&wl->tx_watchdog_work);
1792 
1793 	/*
1794 	 * set suspended flag to avoid triggering a new threaded_irq
1795 	 * work.
1796 	 */
1797 	spin_lock_irqsave(&wl->wl_lock, flags);
1798 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1799 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1800 
1801 	return pm_runtime_force_suspend(wl->dev);
1802 }
1803 
wl1271_op_resume(struct ieee80211_hw * hw)1804 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1805 {
1806 	struct wl1271 *wl = hw->priv;
1807 	struct wl12xx_vif *wlvif;
1808 	unsigned long flags;
1809 	bool run_irq_work = false, pending_recovery;
1810 	int ret;
1811 
1812 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1813 		     wl->wow_enabled);
1814 	WARN_ON(!wl->wow_enabled);
1815 
1816 	ret = pm_runtime_force_resume(wl->dev);
1817 	if (ret < 0) {
1818 		wl1271_error("ELP wakeup failure!");
1819 		goto out_sleep;
1820 	}
1821 
1822 	/*
1823 	 * re-enable irq_work enqueuing, and call irq_work directly if
1824 	 * there is a pending work.
1825 	 */
1826 	spin_lock_irqsave(&wl->wl_lock, flags);
1827 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1828 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1829 		run_irq_work = true;
1830 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1831 
1832 	mutex_lock(&wl->mutex);
1833 
1834 	/* test the recovery flag before calling any SDIO functions */
1835 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1836 				    &wl->flags);
1837 
1838 	if (run_irq_work) {
1839 		wl1271_debug(DEBUG_MAC80211,
1840 			     "run postponed irq_work directly");
1841 
1842 		/* don't talk to the HW if recovery is pending */
1843 		if (!pending_recovery) {
1844 			ret = wlcore_irq_locked(wl);
1845 			if (ret)
1846 				wl12xx_queue_recovery_work(wl);
1847 		}
1848 
1849 		wlcore_enable_interrupts(wl);
1850 	}
1851 
1852 	if (pending_recovery) {
1853 		wl1271_warning("queuing forgotten recovery on resume");
1854 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1855 		goto out_sleep;
1856 	}
1857 
1858 	ret = pm_runtime_get_sync(wl->dev);
1859 	if (ret < 0) {
1860 		pm_runtime_put_noidle(wl->dev);
1861 		goto out;
1862 	}
1863 
1864 	wl12xx_for_each_wlvif(wl, wlvif) {
1865 		if (wlcore_is_p2p_mgmt(wlvif))
1866 			continue;
1867 
1868 		wl1271_configure_resume(wl, wlvif);
1869 	}
1870 
1871 	ret = wlcore_hw_interrupt_notify(wl, true);
1872 	if (ret < 0)
1873 		goto out_sleep;
1874 
1875 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1876 	ret = wlcore_hw_rx_ba_filter(wl, false);
1877 	if (ret < 0)
1878 		goto out_sleep;
1879 
1880 out_sleep:
1881 	pm_runtime_mark_last_busy(wl->dev);
1882 	pm_runtime_put_autosuspend(wl->dev);
1883 
1884 out:
1885 	wl->wow_enabled = false;
1886 
1887 	/*
1888 	 * Set a flag to re-init the watchdog on the first Tx after resume.
1889 	 * That way we avoid possible conditions where Tx-complete interrupts
1890 	 * fail to arrive and we perform a spurious recovery.
1891 	 */
1892 	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1893 	mutex_unlock(&wl->mutex);
1894 
1895 	return 0;
1896 }
1897 
wl1271_op_start(struct ieee80211_hw * hw)1898 static int wl1271_op_start(struct ieee80211_hw *hw)
1899 {
1900 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1901 
1902 	/*
1903 	 * We have to delay the booting of the hardware because
1904 	 * we need to know the local MAC address before downloading and
1905 	 * initializing the firmware. The MAC address cannot be changed
1906 	 * after boot, and without the proper MAC address, the firmware
1907 	 * will not function properly.
1908 	 *
1909 	 * The MAC address is first known when the corresponding interface
1910 	 * is added. That is where we will initialize the hardware.
1911 	 */
1912 
1913 	return 0;
1914 }
1915 
wlcore_op_stop_locked(struct wl1271 * wl)1916 static void wlcore_op_stop_locked(struct wl1271 *wl)
1917 {
1918 	int i;
1919 
1920 	if (wl->state == WLCORE_STATE_OFF) {
1921 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1922 					&wl->flags))
1923 			wlcore_enable_interrupts(wl);
1924 
1925 		return;
1926 	}
1927 
1928 	/*
1929 	 * this must be before the cancel_work calls below, so that the work
1930 	 * functions don't perform further work.
1931 	 */
1932 	wl->state = WLCORE_STATE_OFF;
1933 
1934 	/*
1935 	 * Use the nosync variant to disable interrupts, so the mutex could be
1936 	 * held while doing so without deadlocking.
1937 	 */
1938 	wlcore_disable_interrupts_nosync(wl);
1939 
1940 	mutex_unlock(&wl->mutex);
1941 
1942 	wlcore_synchronize_interrupts(wl);
1943 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1944 		cancel_work_sync(&wl->recovery_work);
1945 	wl1271_flush_deferred_work(wl);
1946 	cancel_delayed_work_sync(&wl->scan_complete_work);
1947 	cancel_work_sync(&wl->netstack_work);
1948 	cancel_work_sync(&wl->tx_work);
1949 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1950 
1951 	/* let's notify MAC80211 about the remaining pending TX frames */
1952 	mutex_lock(&wl->mutex);
1953 	wl12xx_tx_reset(wl);
1954 
1955 	wl1271_power_off(wl);
1956 	/*
1957 	 * In case a recovery was scheduled, interrupts were disabled to avoid
1958 	 * an interrupt storm. Now that the power is down, it is safe to
1959 	 * re-enable interrupts to balance the disable depth
1960 	 */
1961 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1962 		wlcore_enable_interrupts(wl);
1963 
1964 	wl->band = NL80211_BAND_2GHZ;
1965 
1966 	wl->rx_counter = 0;
1967 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1968 	wl->channel_type = NL80211_CHAN_NO_HT;
1969 	wl->tx_blocks_available = 0;
1970 	wl->tx_allocated_blocks = 0;
1971 	wl->tx_results_count = 0;
1972 	wl->tx_packets_count = 0;
1973 	wl->time_offset = 0;
1974 	wl->ap_fw_ps_map = 0;
1975 	wl->ap_ps_map = 0;
1976 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1977 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
1978 	memset(wl->links_map, 0, sizeof(wl->links_map));
1979 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
1980 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
1981 	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1982 	wl->active_sta_count = 0;
1983 	wl->active_link_count = 0;
1984 
1985 	/* The system link is always allocated */
1986 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1987 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1988 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1989 
1990 	/*
1991 	 * this is performed after the cancel_work calls and the associated
1992 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1993 	 * get executed before all these vars have been reset.
1994 	 */
1995 	wl->flags = 0;
1996 
1997 	wl->tx_blocks_freed = 0;
1998 
1999 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2000 		wl->tx_pkts_freed[i] = 0;
2001 		wl->tx_allocated_pkts[i] = 0;
2002 	}
2003 
2004 	wl1271_debugfs_reset(wl);
2005 
2006 	kfree(wl->raw_fw_status);
2007 	wl->raw_fw_status = NULL;
2008 	kfree(wl->fw_status);
2009 	wl->fw_status = NULL;
2010 	kfree(wl->tx_res_if);
2011 	wl->tx_res_if = NULL;
2012 	kfree(wl->target_mem_map);
2013 	wl->target_mem_map = NULL;
2014 
2015 	/*
2016 	 * FW channels must be re-calibrated after recovery,
2017 	 * save current Reg-Domain channel configuration and clear it.
2018 	 */
2019 	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2020 	       sizeof(wl->reg_ch_conf_pending));
2021 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2022 }
2023 
wlcore_op_stop(struct ieee80211_hw * hw)2024 static void wlcore_op_stop(struct ieee80211_hw *hw)
2025 {
2026 	struct wl1271 *wl = hw->priv;
2027 
2028 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2029 
2030 	mutex_lock(&wl->mutex);
2031 
2032 	wlcore_op_stop_locked(wl);
2033 
2034 	mutex_unlock(&wl->mutex);
2035 }
2036 
wlcore_channel_switch_work(struct work_struct * work)2037 static void wlcore_channel_switch_work(struct work_struct *work)
2038 {
2039 	struct delayed_work *dwork;
2040 	struct wl1271 *wl;
2041 	struct ieee80211_vif *vif;
2042 	struct wl12xx_vif *wlvif;
2043 	int ret;
2044 
2045 	dwork = to_delayed_work(work);
2046 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2047 	wl = wlvif->wl;
2048 
2049 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2050 
2051 	mutex_lock(&wl->mutex);
2052 
2053 	if (unlikely(wl->state != WLCORE_STATE_ON))
2054 		goto out;
2055 
2056 	/* check the channel switch is still ongoing */
2057 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2058 		goto out;
2059 
2060 	vif = wl12xx_wlvif_to_vif(wlvif);
2061 	ieee80211_chswitch_done(vif, false);
2062 
2063 	ret = pm_runtime_get_sync(wl->dev);
2064 	if (ret < 0) {
2065 		pm_runtime_put_noidle(wl->dev);
2066 		goto out;
2067 	}
2068 
2069 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2070 
2071 	pm_runtime_mark_last_busy(wl->dev);
2072 	pm_runtime_put_autosuspend(wl->dev);
2073 out:
2074 	mutex_unlock(&wl->mutex);
2075 }
2076 
wlcore_connection_loss_work(struct work_struct * work)2077 static void wlcore_connection_loss_work(struct work_struct *work)
2078 {
2079 	struct delayed_work *dwork;
2080 	struct wl1271 *wl;
2081 	struct ieee80211_vif *vif;
2082 	struct wl12xx_vif *wlvif;
2083 
2084 	dwork = to_delayed_work(work);
2085 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2086 	wl = wlvif->wl;
2087 
2088 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2089 
2090 	mutex_lock(&wl->mutex);
2091 
2092 	if (unlikely(wl->state != WLCORE_STATE_ON))
2093 		goto out;
2094 
2095 	/* Call mac80211 connection loss */
2096 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2097 		goto out;
2098 
2099 	vif = wl12xx_wlvif_to_vif(wlvif);
2100 	ieee80211_connection_loss(vif);
2101 out:
2102 	mutex_unlock(&wl->mutex);
2103 }
2104 
wlcore_pending_auth_complete_work(struct work_struct * work)2105 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2106 {
2107 	struct delayed_work *dwork;
2108 	struct wl1271 *wl;
2109 	struct wl12xx_vif *wlvif;
2110 	unsigned long time_spare;
2111 	int ret;
2112 
2113 	dwork = to_delayed_work(work);
2114 	wlvif = container_of(dwork, struct wl12xx_vif,
2115 			     pending_auth_complete_work);
2116 	wl = wlvif->wl;
2117 
2118 	mutex_lock(&wl->mutex);
2119 
2120 	if (unlikely(wl->state != WLCORE_STATE_ON))
2121 		goto out;
2122 
2123 	/*
2124 	 * Make sure a second really passed since the last auth reply. Maybe
2125 	 * a second auth reply arrived while we were stuck on the mutex.
2126 	 * Check for a little less than the timeout to protect from scheduler
2127 	 * irregularities.
2128 	 */
2129 	time_spare = jiffies +
2130 			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2131 	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2132 		goto out;
2133 
2134 	ret = pm_runtime_get_sync(wl->dev);
2135 	if (ret < 0) {
2136 		pm_runtime_put_noidle(wl->dev);
2137 		goto out;
2138 	}
2139 
2140 	/* cancel the ROC if active */
2141 	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2142 
2143 	pm_runtime_mark_last_busy(wl->dev);
2144 	pm_runtime_put_autosuspend(wl->dev);
2145 out:
2146 	mutex_unlock(&wl->mutex);
2147 }
2148 
wl12xx_allocate_rate_policy(struct wl1271 * wl,u8 * idx)2149 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2150 {
2151 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2152 					WL12XX_MAX_RATE_POLICIES);
2153 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2154 		return -EBUSY;
2155 
2156 	__set_bit(policy, wl->rate_policies_map);
2157 	*idx = policy;
2158 	return 0;
2159 }
2160 
wl12xx_free_rate_policy(struct wl1271 * wl,u8 * idx)2161 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2162 {
2163 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2164 		return;
2165 
2166 	__clear_bit(*idx, wl->rate_policies_map);
2167 	*idx = WL12XX_MAX_RATE_POLICIES;
2168 }
2169 
wlcore_allocate_klv_template(struct wl1271 * wl,u8 * idx)2170 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2171 {
2172 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2173 					WLCORE_MAX_KLV_TEMPLATES);
2174 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2175 		return -EBUSY;
2176 
2177 	__set_bit(policy, wl->klv_templates_map);
2178 	*idx = policy;
2179 	return 0;
2180 }
2181 
wlcore_free_klv_template(struct wl1271 * wl,u8 * idx)2182 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2183 {
2184 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2185 		return;
2186 
2187 	__clear_bit(*idx, wl->klv_templates_map);
2188 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2189 }
2190 
wl12xx_get_role_type(struct wl1271 * wl,struct wl12xx_vif * wlvif)2191 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2192 {
2193 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2194 
2195 	switch (wlvif->bss_type) {
2196 	case BSS_TYPE_AP_BSS:
2197 		if (wlvif->p2p)
2198 			return WL1271_ROLE_P2P_GO;
2199 		else if (ieee80211_vif_is_mesh(vif))
2200 			return WL1271_ROLE_MESH_POINT;
2201 		else
2202 			return WL1271_ROLE_AP;
2203 
2204 	case BSS_TYPE_STA_BSS:
2205 		if (wlvif->p2p)
2206 			return WL1271_ROLE_P2P_CL;
2207 		else
2208 			return WL1271_ROLE_STA;
2209 
2210 	case BSS_TYPE_IBSS:
2211 		return WL1271_ROLE_IBSS;
2212 
2213 	default:
2214 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2215 	}
2216 	return WL12XX_INVALID_ROLE_TYPE;
2217 }
2218 
wl12xx_init_vif_data(struct wl1271 * wl,struct ieee80211_vif * vif)2219 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2220 {
2221 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2222 	int i;
2223 
2224 	/* clear everything but the persistent data */
2225 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2226 
2227 	switch (ieee80211_vif_type_p2p(vif)) {
2228 	case NL80211_IFTYPE_P2P_CLIENT:
2229 		wlvif->p2p = 1;
2230 		/* fall-through */
2231 	case NL80211_IFTYPE_STATION:
2232 	case NL80211_IFTYPE_P2P_DEVICE:
2233 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2234 		break;
2235 	case NL80211_IFTYPE_ADHOC:
2236 		wlvif->bss_type = BSS_TYPE_IBSS;
2237 		break;
2238 	case NL80211_IFTYPE_P2P_GO:
2239 		wlvif->p2p = 1;
2240 		/* fall-through */
2241 	case NL80211_IFTYPE_AP:
2242 	case NL80211_IFTYPE_MESH_POINT:
2243 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2244 		break;
2245 	default:
2246 		wlvif->bss_type = MAX_BSS_TYPE;
2247 		return -EOPNOTSUPP;
2248 	}
2249 
2250 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2251 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2252 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2253 
2254 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2255 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2256 		/* init sta/ibss data */
2257 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2258 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2259 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2260 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2261 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2262 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2263 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2264 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2265 	} else {
2266 		/* init ap data */
2267 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2268 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2269 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2270 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2271 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2272 			wl12xx_allocate_rate_policy(wl,
2273 						&wlvif->ap.ucast_rate_idx[i]);
2274 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2275 		/*
2276 		 * TODO: check if basic_rate shouldn't be
2277 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2278 		 * instead (the same thing for STA above).
2279 		*/
2280 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2281 		/* TODO: this seems to be used only for STA, check it */
2282 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2283 	}
2284 
2285 	wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2286 	wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2287 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2288 
2289 	/*
2290 	 * mac80211 configures some values globally, while we treat them
2291 	 * per-interface. thus, on init, we have to copy them from wl
2292 	 */
2293 	wlvif->band = wl->band;
2294 	wlvif->channel = wl->channel;
2295 	wlvif->power_level = wl->power_level;
2296 	wlvif->channel_type = wl->channel_type;
2297 
2298 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2299 		  wl1271_rx_streaming_enable_work);
2300 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2301 		  wl1271_rx_streaming_disable_work);
2302 	INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2303 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2304 			  wlcore_channel_switch_work);
2305 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2306 			  wlcore_connection_loss_work);
2307 	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2308 			  wlcore_pending_auth_complete_work);
2309 	INIT_LIST_HEAD(&wlvif->list);
2310 
2311 	timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2312 	return 0;
2313 }
2314 
wl12xx_init_fw(struct wl1271 * wl)2315 static int wl12xx_init_fw(struct wl1271 *wl)
2316 {
2317 	int retries = WL1271_BOOT_RETRIES;
2318 	bool booted = false;
2319 	struct wiphy *wiphy = wl->hw->wiphy;
2320 	int ret;
2321 
2322 	while (retries) {
2323 		retries--;
2324 		ret = wl12xx_chip_wakeup(wl, false);
2325 		if (ret < 0)
2326 			goto power_off;
2327 
2328 		ret = wl->ops->boot(wl);
2329 		if (ret < 0)
2330 			goto power_off;
2331 
2332 		ret = wl1271_hw_init(wl);
2333 		if (ret < 0)
2334 			goto irq_disable;
2335 
2336 		booted = true;
2337 		break;
2338 
2339 irq_disable:
2340 		mutex_unlock(&wl->mutex);
2341 		/* Unlocking the mutex in the middle of handling is
2342 		   inherently unsafe. In this case we deem it safe to do,
2343 		   because we need to let any possibly pending IRQ out of
2344 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2345 		   work function will not do anything.) Also, any other
2346 		   possible concurrent operations will fail due to the
2347 		   current state, hence the wl1271 struct should be safe. */
2348 		wlcore_disable_interrupts(wl);
2349 		wl1271_flush_deferred_work(wl);
2350 		cancel_work_sync(&wl->netstack_work);
2351 		mutex_lock(&wl->mutex);
2352 power_off:
2353 		wl1271_power_off(wl);
2354 	}
2355 
2356 	if (!booted) {
2357 		wl1271_error("firmware boot failed despite %d retries",
2358 			     WL1271_BOOT_RETRIES);
2359 		goto out;
2360 	}
2361 
2362 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2363 
2364 	/* update hw/fw version info in wiphy struct */
2365 	wiphy->hw_version = wl->chip.id;
2366 	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2367 		sizeof(wiphy->fw_version));
2368 
2369 	/*
2370 	 * Now we know if 11a is supported (info from the NVS), so disable
2371 	 * 11a channels if not supported
2372 	 */
2373 	if (!wl->enable_11a)
2374 		wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2375 
2376 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2377 		     wl->enable_11a ? "" : "not ");
2378 
2379 	wl->state = WLCORE_STATE_ON;
2380 out:
2381 	return ret;
2382 }
2383 
wl12xx_dev_role_started(struct wl12xx_vif * wlvif)2384 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2385 {
2386 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2387 }
2388 
2389 /*
2390  * Check whether a fw switch (i.e. moving from one loaded
2391  * fw to another) is needed. This function is also responsible
2392  * for updating wl->last_vif_count, so it must be called before
2393  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2394  * will be used).
2395  */
wl12xx_need_fw_change(struct wl1271 * wl,struct vif_counter_data vif_counter_data,bool add)2396 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2397 				  struct vif_counter_data vif_counter_data,
2398 				  bool add)
2399 {
2400 	enum wl12xx_fw_type current_fw = wl->fw_type;
2401 	u8 vif_count = vif_counter_data.counter;
2402 
2403 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2404 		return false;
2405 
2406 	/* increase the vif count if this is a new vif */
2407 	if (add && !vif_counter_data.cur_vif_running)
2408 		vif_count++;
2409 
2410 	wl->last_vif_count = vif_count;
2411 
2412 	/* no need for fw change if the device is OFF */
2413 	if (wl->state == WLCORE_STATE_OFF)
2414 		return false;
2415 
2416 	/* no need for fw change if a single fw is used */
2417 	if (!wl->mr_fw_name)
2418 		return false;
2419 
2420 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2421 		return true;
2422 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2423 		return true;
2424 
2425 	return false;
2426 }
2427 
2428 /*
2429  * Enter "forced psm". Make sure the sta is in psm against the ap,
2430  * to make the fw switch a bit more disconnection-persistent.
2431  */
wl12xx_force_active_psm(struct wl1271 * wl)2432 static void wl12xx_force_active_psm(struct wl1271 *wl)
2433 {
2434 	struct wl12xx_vif *wlvif;
2435 
2436 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2437 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2438 	}
2439 }
2440 
2441 struct wlcore_hw_queue_iter_data {
2442 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2443 	/* current vif */
2444 	struct ieee80211_vif *vif;
2445 	/* is the current vif among those iterated */
2446 	bool cur_running;
2447 };
2448 
wlcore_hw_queue_iter(void * data,u8 * mac,struct ieee80211_vif * vif)2449 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2450 				 struct ieee80211_vif *vif)
2451 {
2452 	struct wlcore_hw_queue_iter_data *iter_data = data;
2453 
2454 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2455 	    WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2456 		return;
2457 
2458 	if (iter_data->cur_running || vif == iter_data->vif) {
2459 		iter_data->cur_running = true;
2460 		return;
2461 	}
2462 
2463 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2464 }
2465 
wlcore_allocate_hw_queue_base(struct wl1271 * wl,struct wl12xx_vif * wlvif)2466 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2467 					 struct wl12xx_vif *wlvif)
2468 {
2469 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2470 	struct wlcore_hw_queue_iter_data iter_data = {};
2471 	int i, q_base;
2472 
2473 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2474 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2475 		return 0;
2476 	}
2477 
2478 	iter_data.vif = vif;
2479 
2480 	/* mark all bits taken by active interfaces */
2481 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2482 					IEEE80211_IFACE_ITER_RESUME_ALL,
2483 					wlcore_hw_queue_iter, &iter_data);
2484 
2485 	/* the current vif is already running in mac80211 (resume/recovery) */
2486 	if (iter_data.cur_running) {
2487 		wlvif->hw_queue_base = vif->hw_queue[0];
2488 		wl1271_debug(DEBUG_MAC80211,
2489 			     "using pre-allocated hw queue base %d",
2490 			     wlvif->hw_queue_base);
2491 
2492 		/* interface type might have changed type */
2493 		goto adjust_cab_queue;
2494 	}
2495 
2496 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2497 				     WLCORE_NUM_MAC_ADDRESSES);
2498 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2499 		return -EBUSY;
2500 
2501 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2502 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2503 		     wlvif->hw_queue_base);
2504 
2505 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2506 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2507 		/* register hw queues in mac80211 */
2508 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2509 	}
2510 
2511 adjust_cab_queue:
2512 	/* the last places are reserved for cab queues per interface */
2513 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2514 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2515 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2516 	else
2517 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2518 
2519 	return 0;
2520 }
2521 
wl1271_op_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2522 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2523 				   struct ieee80211_vif *vif)
2524 {
2525 	struct wl1271 *wl = hw->priv;
2526 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2527 	struct vif_counter_data vif_count;
2528 	int ret = 0;
2529 	u8 role_type;
2530 
2531 	if (wl->plt) {
2532 		wl1271_error("Adding Interface not allowed while in PLT mode");
2533 		return -EBUSY;
2534 	}
2535 
2536 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2537 			     IEEE80211_VIF_SUPPORTS_UAPSD |
2538 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2539 
2540 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2541 		     ieee80211_vif_type_p2p(vif), vif->addr);
2542 
2543 	wl12xx_get_vif_count(hw, vif, &vif_count);
2544 
2545 	mutex_lock(&wl->mutex);
2546 
2547 	/*
2548 	 * in some very corner case HW recovery scenarios its possible to
2549 	 * get here before __wl1271_op_remove_interface is complete, so
2550 	 * opt out if that is the case.
2551 	 */
2552 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2553 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2554 		ret = -EBUSY;
2555 		goto out;
2556 	}
2557 
2558 
2559 	ret = wl12xx_init_vif_data(wl, vif);
2560 	if (ret < 0)
2561 		goto out;
2562 
2563 	wlvif->wl = wl;
2564 	role_type = wl12xx_get_role_type(wl, wlvif);
2565 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2566 		ret = -EINVAL;
2567 		goto out;
2568 	}
2569 
2570 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2571 	if (ret < 0)
2572 		goto out;
2573 
2574 	/*
2575 	 * TODO: after the nvs issue will be solved, move this block
2576 	 * to start(), and make sure here the driver is ON.
2577 	 */
2578 	if (wl->state == WLCORE_STATE_OFF) {
2579 		/*
2580 		 * we still need this in order to configure the fw
2581 		 * while uploading the nvs
2582 		 */
2583 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2584 
2585 		ret = wl12xx_init_fw(wl);
2586 		if (ret < 0)
2587 			goto out;
2588 	}
2589 
2590 	/*
2591 	 * Call runtime PM only after possible wl12xx_init_fw() above
2592 	 * is done. Otherwise we do not have interrupts enabled.
2593 	 */
2594 	ret = pm_runtime_get_sync(wl->dev);
2595 	if (ret < 0) {
2596 		pm_runtime_put_noidle(wl->dev);
2597 		goto out_unlock;
2598 	}
2599 
2600 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2601 		wl12xx_force_active_psm(wl);
2602 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2603 		mutex_unlock(&wl->mutex);
2604 		wl1271_recovery_work(&wl->recovery_work);
2605 		return 0;
2606 	}
2607 
2608 	if (!wlcore_is_p2p_mgmt(wlvif)) {
2609 		ret = wl12xx_cmd_role_enable(wl, vif->addr,
2610 					     role_type, &wlvif->role_id);
2611 		if (ret < 0)
2612 			goto out;
2613 
2614 		ret = wl1271_init_vif_specific(wl, vif);
2615 		if (ret < 0)
2616 			goto out;
2617 
2618 	} else {
2619 		ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2620 					     &wlvif->dev_role_id);
2621 		if (ret < 0)
2622 			goto out;
2623 
2624 		/* needed mainly for configuring rate policies */
2625 		ret = wl1271_sta_hw_init(wl, wlvif);
2626 		if (ret < 0)
2627 			goto out;
2628 	}
2629 
2630 	list_add(&wlvif->list, &wl->wlvif_list);
2631 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2632 
2633 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2634 		wl->ap_count++;
2635 	else
2636 		wl->sta_count++;
2637 out:
2638 	pm_runtime_mark_last_busy(wl->dev);
2639 	pm_runtime_put_autosuspend(wl->dev);
2640 out_unlock:
2641 	mutex_unlock(&wl->mutex);
2642 
2643 	return ret;
2644 }
2645 
__wl1271_op_remove_interface(struct wl1271 * wl,struct ieee80211_vif * vif,bool reset_tx_queues)2646 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2647 					 struct ieee80211_vif *vif,
2648 					 bool reset_tx_queues)
2649 {
2650 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2651 	int i, ret;
2652 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2653 
2654 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2655 
2656 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2657 		return;
2658 
2659 	/* because of hardware recovery, we may get here twice */
2660 	if (wl->state == WLCORE_STATE_OFF)
2661 		return;
2662 
2663 	wl1271_info("down");
2664 
2665 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2666 	    wl->scan_wlvif == wlvif) {
2667 		struct cfg80211_scan_info info = {
2668 			.aborted = true,
2669 		};
2670 
2671 		/*
2672 		 * Rearm the tx watchdog just before idling scan. This
2673 		 * prevents just-finished scans from triggering the watchdog
2674 		 */
2675 		wl12xx_rearm_tx_watchdog_locked(wl);
2676 
2677 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2678 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2679 		wl->scan_wlvif = NULL;
2680 		wl->scan.req = NULL;
2681 		ieee80211_scan_completed(wl->hw, &info);
2682 	}
2683 
2684 	if (wl->sched_vif == wlvif)
2685 		wl->sched_vif = NULL;
2686 
2687 	if (wl->roc_vif == vif) {
2688 		wl->roc_vif = NULL;
2689 		ieee80211_remain_on_channel_expired(wl->hw);
2690 	}
2691 
2692 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2693 		/* disable active roles */
2694 		ret = pm_runtime_get_sync(wl->dev);
2695 		if (ret < 0) {
2696 			pm_runtime_put_noidle(wl->dev);
2697 			goto deinit;
2698 		}
2699 
2700 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2701 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2702 			if (wl12xx_dev_role_started(wlvif))
2703 				wl12xx_stop_dev(wl, wlvif);
2704 		}
2705 
2706 		if (!wlcore_is_p2p_mgmt(wlvif)) {
2707 			ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2708 			if (ret < 0) {
2709 				pm_runtime_put_noidle(wl->dev);
2710 				goto deinit;
2711 			}
2712 		} else {
2713 			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2714 			if (ret < 0) {
2715 				pm_runtime_put_noidle(wl->dev);
2716 				goto deinit;
2717 			}
2718 		}
2719 
2720 		pm_runtime_mark_last_busy(wl->dev);
2721 		pm_runtime_put_autosuspend(wl->dev);
2722 	}
2723 deinit:
2724 	wl12xx_tx_reset_wlvif(wl, wlvif);
2725 
2726 	/* clear all hlids (except system_hlid) */
2727 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2728 
2729 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2730 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2731 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2732 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2733 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2734 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2735 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2736 	} else {
2737 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2738 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2739 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2740 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2741 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2742 			wl12xx_free_rate_policy(wl,
2743 						&wlvif->ap.ucast_rate_idx[i]);
2744 		wl1271_free_ap_keys(wl, wlvif);
2745 	}
2746 
2747 	dev_kfree_skb(wlvif->probereq);
2748 	wlvif->probereq = NULL;
2749 	if (wl->last_wlvif == wlvif)
2750 		wl->last_wlvif = NULL;
2751 	list_del(&wlvif->list);
2752 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2753 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2754 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2755 
2756 	if (is_ap)
2757 		wl->ap_count--;
2758 	else
2759 		wl->sta_count--;
2760 
2761 	/*
2762 	 * Last AP, have more stations. Configure sleep auth according to STA.
2763 	 * Don't do thin on unintended recovery.
2764 	 */
2765 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2766 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2767 		goto unlock;
2768 
2769 	if (wl->ap_count == 0 && is_ap) {
2770 		/* mask ap events */
2771 		wl->event_mask &= ~wl->ap_event_mask;
2772 		wl1271_event_unmask(wl);
2773 	}
2774 
2775 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2776 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2777 		/* Configure for power according to debugfs */
2778 		if (sta_auth != WL1271_PSM_ILLEGAL)
2779 			wl1271_acx_sleep_auth(wl, sta_auth);
2780 		/* Configure for ELP power saving */
2781 		else
2782 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2783 	}
2784 
2785 unlock:
2786 	mutex_unlock(&wl->mutex);
2787 
2788 	del_timer_sync(&wlvif->rx_streaming_timer);
2789 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2790 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2791 	cancel_work_sync(&wlvif->rc_update_work);
2792 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2793 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2794 	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2795 
2796 	mutex_lock(&wl->mutex);
2797 }
2798 
wl1271_op_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2799 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2800 				       struct ieee80211_vif *vif)
2801 {
2802 	struct wl1271 *wl = hw->priv;
2803 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2804 	struct wl12xx_vif *iter;
2805 	struct vif_counter_data vif_count;
2806 
2807 	wl12xx_get_vif_count(hw, vif, &vif_count);
2808 	mutex_lock(&wl->mutex);
2809 
2810 	if (wl->state == WLCORE_STATE_OFF ||
2811 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2812 		goto out;
2813 
2814 	/*
2815 	 * wl->vif can be null here if someone shuts down the interface
2816 	 * just when hardware recovery has been started.
2817 	 */
2818 	wl12xx_for_each_wlvif(wl, iter) {
2819 		if (iter != wlvif)
2820 			continue;
2821 
2822 		__wl1271_op_remove_interface(wl, vif, true);
2823 		break;
2824 	}
2825 	WARN_ON(iter != wlvif);
2826 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2827 		wl12xx_force_active_psm(wl);
2828 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2829 		wl12xx_queue_recovery_work(wl);
2830 	}
2831 out:
2832 	mutex_unlock(&wl->mutex);
2833 }
2834 
wl12xx_op_change_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum nl80211_iftype new_type,bool p2p)2835 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2836 				      struct ieee80211_vif *vif,
2837 				      enum nl80211_iftype new_type, bool p2p)
2838 {
2839 	struct wl1271 *wl = hw->priv;
2840 	int ret;
2841 
2842 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2843 	wl1271_op_remove_interface(hw, vif);
2844 
2845 	vif->type = new_type;
2846 	vif->p2p = p2p;
2847 	ret = wl1271_op_add_interface(hw, vif);
2848 
2849 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2850 	return ret;
2851 }
2852 
wlcore_join(struct wl1271 * wl,struct wl12xx_vif * wlvif)2853 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2854 {
2855 	int ret;
2856 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2857 
2858 	/*
2859 	 * One of the side effects of the JOIN command is that is clears
2860 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2861 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2862 	 * Currently the only valid scenario for JOIN during association
2863 	 * is on roaming, in which case we will also be given new keys.
2864 	 * Keep the below message for now, unless it starts bothering
2865 	 * users who really like to roam a lot :)
2866 	 */
2867 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2868 		wl1271_info("JOIN while associated.");
2869 
2870 	/* clear encryption type */
2871 	wlvif->encryption_type = KEY_NONE;
2872 
2873 	if (is_ibss)
2874 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2875 	else {
2876 		if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2877 			/*
2878 			 * TODO: this is an ugly workaround for wl12xx fw
2879 			 * bug - we are not able to tx/rx after the first
2880 			 * start_sta, so make dummy start+stop calls,
2881 			 * and then call start_sta again.
2882 			 * this should be fixed in the fw.
2883 			 */
2884 			wl12xx_cmd_role_start_sta(wl, wlvif);
2885 			wl12xx_cmd_role_stop_sta(wl, wlvif);
2886 		}
2887 
2888 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2889 	}
2890 
2891 	return ret;
2892 }
2893 
wl1271_ssid_set(struct wl12xx_vif * wlvif,struct sk_buff * skb,int offset)2894 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2895 			    int offset)
2896 {
2897 	u8 ssid_len;
2898 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2899 					 skb->len - offset);
2900 
2901 	if (!ptr) {
2902 		wl1271_error("No SSID in IEs!");
2903 		return -ENOENT;
2904 	}
2905 
2906 	ssid_len = ptr[1];
2907 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2908 		wl1271_error("SSID is too long!");
2909 		return -EINVAL;
2910 	}
2911 
2912 	wlvif->ssid_len = ssid_len;
2913 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2914 	return 0;
2915 }
2916 
wlcore_set_ssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)2917 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2918 {
2919 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2920 	struct sk_buff *skb;
2921 	int ieoffset;
2922 
2923 	/* we currently only support setting the ssid from the ap probe req */
2924 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2925 		return -EINVAL;
2926 
2927 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2928 	if (!skb)
2929 		return -EINVAL;
2930 
2931 	ieoffset = offsetof(struct ieee80211_mgmt,
2932 			    u.probe_req.variable);
2933 	wl1271_ssid_set(wlvif, skb, ieoffset);
2934 	dev_kfree_skb(skb);
2935 
2936 	return 0;
2937 }
2938 
wlcore_set_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_bss_conf * bss_conf,u32 sta_rate_set)2939 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2940 			    struct ieee80211_bss_conf *bss_conf,
2941 			    u32 sta_rate_set)
2942 {
2943 	int ieoffset;
2944 	int ret;
2945 
2946 	wlvif->aid = bss_conf->aid;
2947 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2948 	wlvif->beacon_int = bss_conf->beacon_int;
2949 	wlvif->wmm_enabled = bss_conf->qos;
2950 
2951 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2952 
2953 	/*
2954 	 * with wl1271, we don't need to update the
2955 	 * beacon_int and dtim_period, because the firmware
2956 	 * updates it by itself when the first beacon is
2957 	 * received after a join.
2958 	 */
2959 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2960 	if (ret < 0)
2961 		return ret;
2962 
2963 	/*
2964 	 * Get a template for hardware connection maintenance
2965 	 */
2966 	dev_kfree_skb(wlvif->probereq);
2967 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2968 							wlvif,
2969 							NULL);
2970 	ieoffset = offsetof(struct ieee80211_mgmt,
2971 			    u.probe_req.variable);
2972 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2973 
2974 	/* enable the connection monitoring feature */
2975 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2976 	if (ret < 0)
2977 		return ret;
2978 
2979 	/*
2980 	 * The join command disable the keep-alive mode, shut down its process,
2981 	 * and also clear the template config, so we need to reset it all after
2982 	 * the join. The acx_aid starts the keep-alive process, and the order
2983 	 * of the commands below is relevant.
2984 	 */
2985 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2986 	if (ret < 0)
2987 		return ret;
2988 
2989 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2990 	if (ret < 0)
2991 		return ret;
2992 
2993 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2994 	if (ret < 0)
2995 		return ret;
2996 
2997 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
2998 					   wlvif->sta.klv_template_id,
2999 					   ACX_KEEP_ALIVE_TPL_VALID);
3000 	if (ret < 0)
3001 		return ret;
3002 
3003 	/*
3004 	 * The default fw psm configuration is AUTO, while mac80211 default
3005 	 * setting is off (ACTIVE), so sync the fw with the correct value.
3006 	 */
3007 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3008 	if (ret < 0)
3009 		return ret;
3010 
3011 	if (sta_rate_set) {
3012 		wlvif->rate_set =
3013 			wl1271_tx_enabled_rates_get(wl,
3014 						    sta_rate_set,
3015 						    wlvif->band);
3016 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3017 		if (ret < 0)
3018 			return ret;
3019 	}
3020 
3021 	return ret;
3022 }
3023 
wlcore_unset_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3024 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3025 {
3026 	int ret;
3027 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3028 
3029 	/* make sure we are connected (sta) joined */
3030 	if (sta &&
3031 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3032 		return false;
3033 
3034 	/* make sure we are joined (ibss) */
3035 	if (!sta &&
3036 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3037 		return false;
3038 
3039 	if (sta) {
3040 		/* use defaults when not associated */
3041 		wlvif->aid = 0;
3042 
3043 		/* free probe-request template */
3044 		dev_kfree_skb(wlvif->probereq);
3045 		wlvif->probereq = NULL;
3046 
3047 		/* disable connection monitor features */
3048 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3049 		if (ret < 0)
3050 			return ret;
3051 
3052 		/* Disable the keep-alive feature */
3053 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3054 		if (ret < 0)
3055 			return ret;
3056 
3057 		/* disable beacon filtering */
3058 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3059 		if (ret < 0)
3060 			return ret;
3061 	}
3062 
3063 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3064 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3065 
3066 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3067 		ieee80211_chswitch_done(vif, false);
3068 		cancel_delayed_work(&wlvif->channel_switch_work);
3069 	}
3070 
3071 	/* invalidate keep-alive template */
3072 	wl1271_acx_keep_alive_config(wl, wlvif,
3073 				     wlvif->sta.klv_template_id,
3074 				     ACX_KEEP_ALIVE_TPL_INVALID);
3075 
3076 	return 0;
3077 }
3078 
wl1271_set_band_rate(struct wl1271 * wl,struct wl12xx_vif * wlvif)3079 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3080 {
3081 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3082 	wlvif->rate_set = wlvif->basic_rate_set;
3083 }
3084 
wl1271_sta_handle_idle(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool idle)3085 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3086 				   bool idle)
3087 {
3088 	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3089 
3090 	if (idle == cur_idle)
3091 		return;
3092 
3093 	if (idle) {
3094 		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3095 	} else {
3096 		/* The current firmware only supports sched_scan in idle */
3097 		if (wl->sched_vif == wlvif)
3098 			wl->ops->sched_scan_stop(wl, wlvif);
3099 
3100 		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3101 	}
3102 }
3103 
wl12xx_config_vif(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_conf * conf,u32 changed)3104 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3105 			     struct ieee80211_conf *conf, u32 changed)
3106 {
3107 	int ret;
3108 
3109 	if (wlcore_is_p2p_mgmt(wlvif))
3110 		return 0;
3111 
3112 	if (conf->power_level != wlvif->power_level) {
3113 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3114 		if (ret < 0)
3115 			return ret;
3116 
3117 		wlvif->power_level = conf->power_level;
3118 	}
3119 
3120 	return 0;
3121 }
3122 
wl1271_op_config(struct ieee80211_hw * hw,u32 changed)3123 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3124 {
3125 	struct wl1271 *wl = hw->priv;
3126 	struct wl12xx_vif *wlvif;
3127 	struct ieee80211_conf *conf = &hw->conf;
3128 	int ret = 0;
3129 
3130 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3131 		     " changed 0x%x",
3132 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3133 		     conf->power_level,
3134 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3135 			 changed);
3136 
3137 	mutex_lock(&wl->mutex);
3138 
3139 	if (changed & IEEE80211_CONF_CHANGE_POWER)
3140 		wl->power_level = conf->power_level;
3141 
3142 	if (unlikely(wl->state != WLCORE_STATE_ON))
3143 		goto out;
3144 
3145 	ret = pm_runtime_get_sync(wl->dev);
3146 	if (ret < 0) {
3147 		pm_runtime_put_noidle(wl->dev);
3148 		goto out;
3149 	}
3150 
3151 	/* configure each interface */
3152 	wl12xx_for_each_wlvif(wl, wlvif) {
3153 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3154 		if (ret < 0)
3155 			goto out_sleep;
3156 	}
3157 
3158 out_sleep:
3159 	pm_runtime_mark_last_busy(wl->dev);
3160 	pm_runtime_put_autosuspend(wl->dev);
3161 
3162 out:
3163 	mutex_unlock(&wl->mutex);
3164 
3165 	return ret;
3166 }
3167 
3168 struct wl1271_filter_params {
3169 	bool enabled;
3170 	int mc_list_length;
3171 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3172 };
3173 
wl1271_op_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)3174 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3175 				       struct netdev_hw_addr_list *mc_list)
3176 {
3177 	struct wl1271_filter_params *fp;
3178 	struct netdev_hw_addr *ha;
3179 
3180 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3181 	if (!fp) {
3182 		wl1271_error("Out of memory setting filters.");
3183 		return 0;
3184 	}
3185 
3186 	/* update multicast filtering parameters */
3187 	fp->mc_list_length = 0;
3188 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3189 		fp->enabled = false;
3190 	} else {
3191 		fp->enabled = true;
3192 		netdev_hw_addr_list_for_each(ha, mc_list) {
3193 			memcpy(fp->mc_list[fp->mc_list_length],
3194 					ha->addr, ETH_ALEN);
3195 			fp->mc_list_length++;
3196 		}
3197 	}
3198 
3199 	return (u64)(unsigned long)fp;
3200 }
3201 
3202 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3203 				  FIF_FCSFAIL | \
3204 				  FIF_BCN_PRBRESP_PROMISC | \
3205 				  FIF_CONTROL | \
3206 				  FIF_OTHER_BSS)
3207 
wl1271_op_configure_filter(struct ieee80211_hw * hw,unsigned int changed,unsigned int * total,u64 multicast)3208 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3209 				       unsigned int changed,
3210 				       unsigned int *total, u64 multicast)
3211 {
3212 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3213 	struct wl1271 *wl = hw->priv;
3214 	struct wl12xx_vif *wlvif;
3215 
3216 	int ret;
3217 
3218 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3219 		     " total %x", changed, *total);
3220 
3221 	mutex_lock(&wl->mutex);
3222 
3223 	*total &= WL1271_SUPPORTED_FILTERS;
3224 	changed &= WL1271_SUPPORTED_FILTERS;
3225 
3226 	if (unlikely(wl->state != WLCORE_STATE_ON))
3227 		goto out;
3228 
3229 	ret = pm_runtime_get_sync(wl->dev);
3230 	if (ret < 0) {
3231 		pm_runtime_put_noidle(wl->dev);
3232 		goto out;
3233 	}
3234 
3235 	wl12xx_for_each_wlvif(wl, wlvif) {
3236 		if (wlcore_is_p2p_mgmt(wlvif))
3237 			continue;
3238 
3239 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3240 			if (*total & FIF_ALLMULTI)
3241 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3242 								   false,
3243 								   NULL, 0);
3244 			else if (fp)
3245 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3246 							fp->enabled,
3247 							fp->mc_list,
3248 							fp->mc_list_length);
3249 			if (ret < 0)
3250 				goto out_sleep;
3251 		}
3252 
3253 		/*
3254 		 * If interface in AP mode and created with allmulticast then disable
3255 		 * the firmware filters so that all multicast packets are passed
3256 		 * This is mandatory for MDNS based discovery protocols
3257 		 */
3258  		if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3259  			if (*total & FIF_ALLMULTI) {
3260 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3261 							false,
3262 							NULL, 0);
3263 				if (ret < 0)
3264 					goto out_sleep;
3265 			}
3266 		}
3267 	}
3268 
3269 	/*
3270 	 * the fw doesn't provide an api to configure the filters. instead,
3271 	 * the filters configuration is based on the active roles / ROC
3272 	 * state.
3273 	 */
3274 
3275 out_sleep:
3276 	pm_runtime_mark_last_busy(wl->dev);
3277 	pm_runtime_put_autosuspend(wl->dev);
3278 
3279 out:
3280 	mutex_unlock(&wl->mutex);
3281 	kfree(fp);
3282 }
3283 
wl1271_record_ap_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 id,u8 key_type,u8 key_size,const u8 * key,u8 hlid,u32 tx_seq_32,u16 tx_seq_16,bool is_pairwise)3284 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3285 				u8 id, u8 key_type, u8 key_size,
3286 				const u8 *key, u8 hlid, u32 tx_seq_32,
3287 				u16 tx_seq_16, bool is_pairwise)
3288 {
3289 	struct wl1271_ap_key *ap_key;
3290 	int i;
3291 
3292 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3293 
3294 	if (key_size > MAX_KEY_SIZE)
3295 		return -EINVAL;
3296 
3297 	/*
3298 	 * Find next free entry in ap_keys. Also check we are not replacing
3299 	 * an existing key.
3300 	 */
3301 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3302 		if (wlvif->ap.recorded_keys[i] == NULL)
3303 			break;
3304 
3305 		if (wlvif->ap.recorded_keys[i]->id == id) {
3306 			wl1271_warning("trying to record key replacement");
3307 			return -EINVAL;
3308 		}
3309 	}
3310 
3311 	if (i == MAX_NUM_KEYS)
3312 		return -EBUSY;
3313 
3314 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3315 	if (!ap_key)
3316 		return -ENOMEM;
3317 
3318 	ap_key->id = id;
3319 	ap_key->key_type = key_type;
3320 	ap_key->key_size = key_size;
3321 	memcpy(ap_key->key, key, key_size);
3322 	ap_key->hlid = hlid;
3323 	ap_key->tx_seq_32 = tx_seq_32;
3324 	ap_key->tx_seq_16 = tx_seq_16;
3325 	ap_key->is_pairwise = is_pairwise;
3326 
3327 	wlvif->ap.recorded_keys[i] = ap_key;
3328 	return 0;
3329 }
3330 
wl1271_free_ap_keys(struct wl1271 * wl,struct wl12xx_vif * wlvif)3331 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3332 {
3333 	int i;
3334 
3335 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3336 		kfree(wlvif->ap.recorded_keys[i]);
3337 		wlvif->ap.recorded_keys[i] = NULL;
3338 	}
3339 }
3340 
wl1271_ap_init_hwenc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3341 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3342 {
3343 	int i, ret = 0;
3344 	struct wl1271_ap_key *key;
3345 	bool wep_key_added = false;
3346 
3347 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3348 		u8 hlid;
3349 		if (wlvif->ap.recorded_keys[i] == NULL)
3350 			break;
3351 
3352 		key = wlvif->ap.recorded_keys[i];
3353 		hlid = key->hlid;
3354 		if (hlid == WL12XX_INVALID_LINK_ID)
3355 			hlid = wlvif->ap.bcast_hlid;
3356 
3357 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3358 					    key->id, key->key_type,
3359 					    key->key_size, key->key,
3360 					    hlid, key->tx_seq_32,
3361 					    key->tx_seq_16, key->is_pairwise);
3362 		if (ret < 0)
3363 			goto out;
3364 
3365 		if (key->key_type == KEY_WEP)
3366 			wep_key_added = true;
3367 	}
3368 
3369 	if (wep_key_added) {
3370 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3371 						     wlvif->ap.bcast_hlid);
3372 		if (ret < 0)
3373 			goto out;
3374 	}
3375 
3376 out:
3377 	wl1271_free_ap_keys(wl, wlvif);
3378 	return ret;
3379 }
3380 
wl1271_set_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u16 action,u8 id,u8 key_type,u8 key_size,const u8 * key,u32 tx_seq_32,u16 tx_seq_16,struct ieee80211_sta * sta,bool is_pairwise)3381 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3382 		       u16 action, u8 id, u8 key_type,
3383 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3384 		       u16 tx_seq_16, struct ieee80211_sta *sta,
3385 		       bool is_pairwise)
3386 {
3387 	int ret;
3388 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3389 
3390 	if (is_ap) {
3391 		struct wl1271_station *wl_sta;
3392 		u8 hlid;
3393 
3394 		if (sta) {
3395 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3396 			hlid = wl_sta->hlid;
3397 		} else {
3398 			hlid = wlvif->ap.bcast_hlid;
3399 		}
3400 
3401 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3402 			/*
3403 			 * We do not support removing keys after AP shutdown.
3404 			 * Pretend we do to make mac80211 happy.
3405 			 */
3406 			if (action != KEY_ADD_OR_REPLACE)
3407 				return 0;
3408 
3409 			ret = wl1271_record_ap_key(wl, wlvif, id,
3410 					     key_type, key_size,
3411 					     key, hlid, tx_seq_32,
3412 					     tx_seq_16, is_pairwise);
3413 		} else {
3414 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3415 					     id, key_type, key_size,
3416 					     key, hlid, tx_seq_32,
3417 					     tx_seq_16, is_pairwise);
3418 		}
3419 
3420 		if (ret < 0)
3421 			return ret;
3422 	} else {
3423 		const u8 *addr;
3424 		static const u8 bcast_addr[ETH_ALEN] = {
3425 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3426 		};
3427 
3428 		addr = sta ? sta->addr : bcast_addr;
3429 
3430 		if (is_zero_ether_addr(addr)) {
3431 			/* We dont support TX only encryption */
3432 			return -EOPNOTSUPP;
3433 		}
3434 
3435 		/* The wl1271 does not allow to remove unicast keys - they
3436 		   will be cleared automatically on next CMD_JOIN. Ignore the
3437 		   request silently, as we dont want the mac80211 to emit
3438 		   an error message. */
3439 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3440 			return 0;
3441 
3442 		/* don't remove key if hlid was already deleted */
3443 		if (action == KEY_REMOVE &&
3444 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3445 			return 0;
3446 
3447 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3448 					     id, key_type, key_size,
3449 					     key, addr, tx_seq_32,
3450 					     tx_seq_16);
3451 		if (ret < 0)
3452 			return ret;
3453 
3454 	}
3455 
3456 	return 0;
3457 }
3458 
wlcore_op_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3459 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3460 			     struct ieee80211_vif *vif,
3461 			     struct ieee80211_sta *sta,
3462 			     struct ieee80211_key_conf *key_conf)
3463 {
3464 	struct wl1271 *wl = hw->priv;
3465 	int ret;
3466 	bool might_change_spare =
3467 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3468 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3469 
3470 	if (might_change_spare) {
3471 		/*
3472 		 * stop the queues and flush to ensure the next packets are
3473 		 * in sync with FW spare block accounting
3474 		 */
3475 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3476 		wl1271_tx_flush(wl);
3477 	}
3478 
3479 	mutex_lock(&wl->mutex);
3480 
3481 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3482 		ret = -EAGAIN;
3483 		goto out_wake_queues;
3484 	}
3485 
3486 	ret = pm_runtime_get_sync(wl->dev);
3487 	if (ret < 0) {
3488 		pm_runtime_put_noidle(wl->dev);
3489 		goto out_wake_queues;
3490 	}
3491 
3492 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3493 
3494 	pm_runtime_mark_last_busy(wl->dev);
3495 	pm_runtime_put_autosuspend(wl->dev);
3496 
3497 out_wake_queues:
3498 	if (might_change_spare)
3499 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3500 
3501 	mutex_unlock(&wl->mutex);
3502 
3503 	return ret;
3504 }
3505 
wlcore_set_key(struct wl1271 * wl,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3506 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3507 		   struct ieee80211_vif *vif,
3508 		   struct ieee80211_sta *sta,
3509 		   struct ieee80211_key_conf *key_conf)
3510 {
3511 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3512 	int ret;
3513 	u32 tx_seq_32 = 0;
3514 	u16 tx_seq_16 = 0;
3515 	u8 key_type;
3516 	u8 hlid;
3517 	bool is_pairwise;
3518 
3519 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3520 
3521 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3522 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3523 		     key_conf->cipher, key_conf->keyidx,
3524 		     key_conf->keylen, key_conf->flags);
3525 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3526 
3527 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3528 		if (sta) {
3529 			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3530 			hlid = wl_sta->hlid;
3531 		} else {
3532 			hlid = wlvif->ap.bcast_hlid;
3533 		}
3534 	else
3535 		hlid = wlvif->sta.hlid;
3536 
3537 	if (hlid != WL12XX_INVALID_LINK_ID) {
3538 		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3539 		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3540 		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3541 	}
3542 
3543 	switch (key_conf->cipher) {
3544 	case WLAN_CIPHER_SUITE_WEP40:
3545 	case WLAN_CIPHER_SUITE_WEP104:
3546 		key_type = KEY_WEP;
3547 
3548 		key_conf->hw_key_idx = key_conf->keyidx;
3549 		break;
3550 	case WLAN_CIPHER_SUITE_TKIP:
3551 		key_type = KEY_TKIP;
3552 		key_conf->hw_key_idx = key_conf->keyidx;
3553 		break;
3554 	case WLAN_CIPHER_SUITE_CCMP:
3555 		key_type = KEY_AES;
3556 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3557 		break;
3558 	case WL1271_CIPHER_SUITE_GEM:
3559 		key_type = KEY_GEM;
3560 		break;
3561 	default:
3562 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3563 
3564 		return -EOPNOTSUPP;
3565 	}
3566 
3567 	is_pairwise = key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
3568 
3569 	switch (cmd) {
3570 	case SET_KEY:
3571 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3572 				 key_conf->keyidx, key_type,
3573 				 key_conf->keylen, key_conf->key,
3574 				 tx_seq_32, tx_seq_16, sta, is_pairwise);
3575 		if (ret < 0) {
3576 			wl1271_error("Could not add or replace key");
3577 			return ret;
3578 		}
3579 
3580 		/*
3581 		 * reconfiguring arp response if the unicast (or common)
3582 		 * encryption key type was changed
3583 		 */
3584 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3585 		    (sta || key_type == KEY_WEP) &&
3586 		    wlvif->encryption_type != key_type) {
3587 			wlvif->encryption_type = key_type;
3588 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3589 			if (ret < 0) {
3590 				wl1271_warning("build arp rsp failed: %d", ret);
3591 				return ret;
3592 			}
3593 		}
3594 		break;
3595 
3596 	case DISABLE_KEY:
3597 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3598 				     key_conf->keyidx, key_type,
3599 				     key_conf->keylen, key_conf->key,
3600 				     0, 0, sta, is_pairwise);
3601 		if (ret < 0) {
3602 			wl1271_error("Could not remove key");
3603 			return ret;
3604 		}
3605 		break;
3606 
3607 	default:
3608 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3609 		return -EOPNOTSUPP;
3610 	}
3611 
3612 	return ret;
3613 }
3614 EXPORT_SYMBOL_GPL(wlcore_set_key);
3615 
wl1271_op_set_default_key_idx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int key_idx)3616 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3617 					  struct ieee80211_vif *vif,
3618 					  int key_idx)
3619 {
3620 	struct wl1271 *wl = hw->priv;
3621 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3622 	int ret;
3623 
3624 	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3625 		     key_idx);
3626 
3627 	/* we don't handle unsetting of default key */
3628 	if (key_idx == -1)
3629 		return;
3630 
3631 	mutex_lock(&wl->mutex);
3632 
3633 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3634 		ret = -EAGAIN;
3635 		goto out_unlock;
3636 	}
3637 
3638 	ret = pm_runtime_get_sync(wl->dev);
3639 	if (ret < 0) {
3640 		pm_runtime_put_noidle(wl->dev);
3641 		goto out_unlock;
3642 	}
3643 
3644 	wlvif->default_key = key_idx;
3645 
3646 	/* the default WEP key needs to be configured at least once */
3647 	if (wlvif->encryption_type == KEY_WEP) {
3648 		ret = wl12xx_cmd_set_default_wep_key(wl,
3649 				key_idx,
3650 				wlvif->sta.hlid);
3651 		if (ret < 0)
3652 			goto out_sleep;
3653 	}
3654 
3655 out_sleep:
3656 	pm_runtime_mark_last_busy(wl->dev);
3657 	pm_runtime_put_autosuspend(wl->dev);
3658 
3659 out_unlock:
3660 	mutex_unlock(&wl->mutex);
3661 }
3662 
wlcore_regdomain_config(struct wl1271 * wl)3663 void wlcore_regdomain_config(struct wl1271 *wl)
3664 {
3665 	int ret;
3666 
3667 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3668 		return;
3669 
3670 	mutex_lock(&wl->mutex);
3671 
3672 	if (unlikely(wl->state != WLCORE_STATE_ON))
3673 		goto out;
3674 
3675 	ret = pm_runtime_get_sync(wl->dev);
3676 	if (ret < 0) {
3677 		pm_runtime_put_autosuspend(wl->dev);
3678 		goto out;
3679 	}
3680 
3681 	ret = wlcore_cmd_regdomain_config_locked(wl);
3682 	if (ret < 0) {
3683 		wl12xx_queue_recovery_work(wl);
3684 		goto out;
3685 	}
3686 
3687 	pm_runtime_mark_last_busy(wl->dev);
3688 	pm_runtime_put_autosuspend(wl->dev);
3689 out:
3690 	mutex_unlock(&wl->mutex);
3691 }
3692 
wl1271_op_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)3693 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3694 			     struct ieee80211_vif *vif,
3695 			     struct ieee80211_scan_request *hw_req)
3696 {
3697 	struct cfg80211_scan_request *req = &hw_req->req;
3698 	struct wl1271 *wl = hw->priv;
3699 	int ret;
3700 	u8 *ssid = NULL;
3701 	size_t len = 0;
3702 
3703 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3704 
3705 	if (req->n_ssids) {
3706 		ssid = req->ssids[0].ssid;
3707 		len = req->ssids[0].ssid_len;
3708 	}
3709 
3710 	mutex_lock(&wl->mutex);
3711 
3712 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3713 		/*
3714 		 * We cannot return -EBUSY here because cfg80211 will expect
3715 		 * a call to ieee80211_scan_completed if we do - in this case
3716 		 * there won't be any call.
3717 		 */
3718 		ret = -EAGAIN;
3719 		goto out;
3720 	}
3721 
3722 	ret = pm_runtime_get_sync(wl->dev);
3723 	if (ret < 0) {
3724 		pm_runtime_put_noidle(wl->dev);
3725 		goto out;
3726 	}
3727 
3728 	/* fail if there is any role in ROC */
3729 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3730 		/* don't allow scanning right now */
3731 		ret = -EBUSY;
3732 		goto out_sleep;
3733 	}
3734 
3735 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3736 out_sleep:
3737 	pm_runtime_mark_last_busy(wl->dev);
3738 	pm_runtime_put_autosuspend(wl->dev);
3739 out:
3740 	mutex_unlock(&wl->mutex);
3741 
3742 	return ret;
3743 }
3744 
wl1271_op_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3745 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3746 				     struct ieee80211_vif *vif)
3747 {
3748 	struct wl1271 *wl = hw->priv;
3749 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3750 	struct cfg80211_scan_info info = {
3751 		.aborted = true,
3752 	};
3753 	int ret;
3754 
3755 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3756 
3757 	mutex_lock(&wl->mutex);
3758 
3759 	if (unlikely(wl->state != WLCORE_STATE_ON))
3760 		goto out;
3761 
3762 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3763 		goto out;
3764 
3765 	ret = pm_runtime_get_sync(wl->dev);
3766 	if (ret < 0) {
3767 		pm_runtime_put_noidle(wl->dev);
3768 		goto out;
3769 	}
3770 
3771 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3772 		ret = wl->ops->scan_stop(wl, wlvif);
3773 		if (ret < 0)
3774 			goto out_sleep;
3775 	}
3776 
3777 	/*
3778 	 * Rearm the tx watchdog just before idling scan. This
3779 	 * prevents just-finished scans from triggering the watchdog
3780 	 */
3781 	wl12xx_rearm_tx_watchdog_locked(wl);
3782 
3783 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3784 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3785 	wl->scan_wlvif = NULL;
3786 	wl->scan.req = NULL;
3787 	ieee80211_scan_completed(wl->hw, &info);
3788 
3789 out_sleep:
3790 	pm_runtime_mark_last_busy(wl->dev);
3791 	pm_runtime_put_autosuspend(wl->dev);
3792 out:
3793 	mutex_unlock(&wl->mutex);
3794 
3795 	cancel_delayed_work_sync(&wl->scan_complete_work);
3796 }
3797 
wl1271_op_sched_scan_start(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_sched_scan_request * req,struct ieee80211_scan_ies * ies)3798 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3799 				      struct ieee80211_vif *vif,
3800 				      struct cfg80211_sched_scan_request *req,
3801 				      struct ieee80211_scan_ies *ies)
3802 {
3803 	struct wl1271 *wl = hw->priv;
3804 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3805 	int ret;
3806 
3807 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3808 
3809 	mutex_lock(&wl->mutex);
3810 
3811 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3812 		ret = -EAGAIN;
3813 		goto out;
3814 	}
3815 
3816 	ret = pm_runtime_get_sync(wl->dev);
3817 	if (ret < 0) {
3818 		pm_runtime_put_noidle(wl->dev);
3819 		goto out;
3820 	}
3821 
3822 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3823 	if (ret < 0)
3824 		goto out_sleep;
3825 
3826 	wl->sched_vif = wlvif;
3827 
3828 out_sleep:
3829 	pm_runtime_mark_last_busy(wl->dev);
3830 	pm_runtime_put_autosuspend(wl->dev);
3831 out:
3832 	mutex_unlock(&wl->mutex);
3833 	return ret;
3834 }
3835 
wl1271_op_sched_scan_stop(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3836 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3837 				     struct ieee80211_vif *vif)
3838 {
3839 	struct wl1271 *wl = hw->priv;
3840 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3841 	int ret;
3842 
3843 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3844 
3845 	mutex_lock(&wl->mutex);
3846 
3847 	if (unlikely(wl->state != WLCORE_STATE_ON))
3848 		goto out;
3849 
3850 	ret = pm_runtime_get_sync(wl->dev);
3851 	if (ret < 0) {
3852 		pm_runtime_put_noidle(wl->dev);
3853 		goto out;
3854 	}
3855 
3856 	wl->ops->sched_scan_stop(wl, wlvif);
3857 
3858 	pm_runtime_mark_last_busy(wl->dev);
3859 	pm_runtime_put_autosuspend(wl->dev);
3860 out:
3861 	mutex_unlock(&wl->mutex);
3862 
3863 	return 0;
3864 }
3865 
wl1271_op_set_frag_threshold(struct ieee80211_hw * hw,u32 value)3866 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3867 {
3868 	struct wl1271 *wl = hw->priv;
3869 	int ret = 0;
3870 
3871 	mutex_lock(&wl->mutex);
3872 
3873 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3874 		ret = -EAGAIN;
3875 		goto out;
3876 	}
3877 
3878 	ret = pm_runtime_get_sync(wl->dev);
3879 	if (ret < 0) {
3880 		pm_runtime_put_noidle(wl->dev);
3881 		goto out;
3882 	}
3883 
3884 	ret = wl1271_acx_frag_threshold(wl, value);
3885 	if (ret < 0)
3886 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3887 
3888 	pm_runtime_mark_last_busy(wl->dev);
3889 	pm_runtime_put_autosuspend(wl->dev);
3890 
3891 out:
3892 	mutex_unlock(&wl->mutex);
3893 
3894 	return ret;
3895 }
3896 
wl1271_op_set_rts_threshold(struct ieee80211_hw * hw,u32 value)3897 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3898 {
3899 	struct wl1271 *wl = hw->priv;
3900 	struct wl12xx_vif *wlvif;
3901 	int ret = 0;
3902 
3903 	mutex_lock(&wl->mutex);
3904 
3905 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3906 		ret = -EAGAIN;
3907 		goto out;
3908 	}
3909 
3910 	ret = pm_runtime_get_sync(wl->dev);
3911 	if (ret < 0) {
3912 		pm_runtime_put_noidle(wl->dev);
3913 		goto out;
3914 	}
3915 
3916 	wl12xx_for_each_wlvif(wl, wlvif) {
3917 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3918 		if (ret < 0)
3919 			wl1271_warning("set rts threshold failed: %d", ret);
3920 	}
3921 	pm_runtime_mark_last_busy(wl->dev);
3922 	pm_runtime_put_autosuspend(wl->dev);
3923 
3924 out:
3925 	mutex_unlock(&wl->mutex);
3926 
3927 	return ret;
3928 }
3929 
wl12xx_remove_ie(struct sk_buff * skb,u8 eid,int ieoffset)3930 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3931 {
3932 	int len;
3933 	const u8 *next, *end = skb->data + skb->len;
3934 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3935 					skb->len - ieoffset);
3936 	if (!ie)
3937 		return;
3938 	len = ie[1] + 2;
3939 	next = ie + len;
3940 	memmove(ie, next, end - next);
3941 	skb_trim(skb, skb->len - len);
3942 }
3943 
wl12xx_remove_vendor_ie(struct sk_buff * skb,unsigned int oui,u8 oui_type,int ieoffset)3944 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3945 					    unsigned int oui, u8 oui_type,
3946 					    int ieoffset)
3947 {
3948 	int len;
3949 	const u8 *next, *end = skb->data + skb->len;
3950 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3951 					       skb->data + ieoffset,
3952 					       skb->len - ieoffset);
3953 	if (!ie)
3954 		return;
3955 	len = ie[1] + 2;
3956 	next = ie + len;
3957 	memmove(ie, next, end - next);
3958 	skb_trim(skb, skb->len - len);
3959 }
3960 
wl1271_ap_set_probe_resp_tmpl(struct wl1271 * wl,u32 rates,struct ieee80211_vif * vif)3961 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3962 					 struct ieee80211_vif *vif)
3963 {
3964 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3965 	struct sk_buff *skb;
3966 	int ret;
3967 
3968 	skb = ieee80211_proberesp_get(wl->hw, vif);
3969 	if (!skb)
3970 		return -EOPNOTSUPP;
3971 
3972 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3973 				      CMD_TEMPL_AP_PROBE_RESPONSE,
3974 				      skb->data,
3975 				      skb->len, 0,
3976 				      rates);
3977 	dev_kfree_skb(skb);
3978 
3979 	if (ret < 0)
3980 		goto out;
3981 
3982 	wl1271_debug(DEBUG_AP, "probe response updated");
3983 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3984 
3985 out:
3986 	return ret;
3987 }
3988 
wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 * wl,struct ieee80211_vif * vif,u8 * probe_rsp_data,size_t probe_rsp_len,u32 rates)3989 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3990 					     struct ieee80211_vif *vif,
3991 					     u8 *probe_rsp_data,
3992 					     size_t probe_rsp_len,
3993 					     u32 rates)
3994 {
3995 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3996 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3997 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3998 	int ssid_ie_offset, ie_offset, templ_len;
3999 	const u8 *ptr;
4000 
4001 	/* no need to change probe response if the SSID is set correctly */
4002 	if (wlvif->ssid_len > 0)
4003 		return wl1271_cmd_template_set(wl, wlvif->role_id,
4004 					       CMD_TEMPL_AP_PROBE_RESPONSE,
4005 					       probe_rsp_data,
4006 					       probe_rsp_len, 0,
4007 					       rates);
4008 
4009 	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
4010 		wl1271_error("probe_rsp template too big");
4011 		return -EINVAL;
4012 	}
4013 
4014 	/* start searching from IE offset */
4015 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4016 
4017 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4018 			       probe_rsp_len - ie_offset);
4019 	if (!ptr) {
4020 		wl1271_error("No SSID in beacon!");
4021 		return -EINVAL;
4022 	}
4023 
4024 	ssid_ie_offset = ptr - probe_rsp_data;
4025 	ptr += (ptr[1] + 2);
4026 
4027 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4028 
4029 	/* insert SSID from bss_conf */
4030 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4031 	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
4032 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4033 	       bss_conf->ssid, bss_conf->ssid_len);
4034 	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
4035 
4036 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
4037 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
4038 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4039 
4040 	return wl1271_cmd_template_set(wl, wlvif->role_id,
4041 				       CMD_TEMPL_AP_PROBE_RESPONSE,
4042 				       probe_rsp_templ,
4043 				       templ_len, 0,
4044 				       rates);
4045 }
4046 
wl1271_bss_erp_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4047 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4048 				       struct ieee80211_vif *vif,
4049 				       struct ieee80211_bss_conf *bss_conf,
4050 				       u32 changed)
4051 {
4052 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4053 	int ret = 0;
4054 
4055 	if (changed & BSS_CHANGED_ERP_SLOT) {
4056 		if (bss_conf->use_short_slot)
4057 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4058 		else
4059 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4060 		if (ret < 0) {
4061 			wl1271_warning("Set slot time failed %d", ret);
4062 			goto out;
4063 		}
4064 	}
4065 
4066 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4067 		if (bss_conf->use_short_preamble)
4068 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4069 		else
4070 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4071 	}
4072 
4073 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4074 		if (bss_conf->use_cts_prot)
4075 			ret = wl1271_acx_cts_protect(wl, wlvif,
4076 						     CTSPROTECT_ENABLE);
4077 		else
4078 			ret = wl1271_acx_cts_protect(wl, wlvif,
4079 						     CTSPROTECT_DISABLE);
4080 		if (ret < 0) {
4081 			wl1271_warning("Set ctsprotect failed %d", ret);
4082 			goto out;
4083 		}
4084 	}
4085 
4086 out:
4087 	return ret;
4088 }
4089 
wlcore_set_beacon_template(struct wl1271 * wl,struct ieee80211_vif * vif,bool is_ap)4090 static int wlcore_set_beacon_template(struct wl1271 *wl,
4091 				      struct ieee80211_vif *vif,
4092 				      bool is_ap)
4093 {
4094 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4095 	struct ieee80211_hdr *hdr;
4096 	u32 min_rate;
4097 	int ret;
4098 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4099 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4100 	u16 tmpl_id;
4101 
4102 	if (!beacon) {
4103 		ret = -EINVAL;
4104 		goto out;
4105 	}
4106 
4107 	wl1271_debug(DEBUG_MASTER, "beacon updated");
4108 
4109 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4110 	if (ret < 0) {
4111 		dev_kfree_skb(beacon);
4112 		goto out;
4113 	}
4114 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4115 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4116 		CMD_TEMPL_BEACON;
4117 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4118 				      beacon->data,
4119 				      beacon->len, 0,
4120 				      min_rate);
4121 	if (ret < 0) {
4122 		dev_kfree_skb(beacon);
4123 		goto out;
4124 	}
4125 
4126 	wlvif->wmm_enabled =
4127 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4128 					WLAN_OUI_TYPE_MICROSOFT_WMM,
4129 					beacon->data + ieoffset,
4130 					beacon->len - ieoffset);
4131 
4132 	/*
4133 	 * In case we already have a probe-resp beacon set explicitly
4134 	 * by usermode, don't use the beacon data.
4135 	 */
4136 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4137 		goto end_bcn;
4138 
4139 	/* remove TIM ie from probe response */
4140 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4141 
4142 	/*
4143 	 * remove p2p ie from probe response.
4144 	 * the fw reponds to probe requests that don't include
4145 	 * the p2p ie. probe requests with p2p ie will be passed,
4146 	 * and will be responded by the supplicant (the spec
4147 	 * forbids including the p2p ie when responding to probe
4148 	 * requests that didn't include it).
4149 	 */
4150 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4151 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4152 
4153 	hdr = (struct ieee80211_hdr *) beacon->data;
4154 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4155 					 IEEE80211_STYPE_PROBE_RESP);
4156 	if (is_ap)
4157 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4158 							   beacon->data,
4159 							   beacon->len,
4160 							   min_rate);
4161 	else
4162 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4163 					      CMD_TEMPL_PROBE_RESPONSE,
4164 					      beacon->data,
4165 					      beacon->len, 0,
4166 					      min_rate);
4167 end_bcn:
4168 	dev_kfree_skb(beacon);
4169 	if (ret < 0)
4170 		goto out;
4171 
4172 out:
4173 	return ret;
4174 }
4175 
wl1271_bss_beacon_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4176 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4177 					  struct ieee80211_vif *vif,
4178 					  struct ieee80211_bss_conf *bss_conf,
4179 					  u32 changed)
4180 {
4181 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4182 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4183 	int ret = 0;
4184 
4185 	if (changed & BSS_CHANGED_BEACON_INT) {
4186 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4187 			bss_conf->beacon_int);
4188 
4189 		wlvif->beacon_int = bss_conf->beacon_int;
4190 	}
4191 
4192 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4193 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4194 
4195 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4196 	}
4197 
4198 	if (changed & BSS_CHANGED_BEACON) {
4199 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4200 		if (ret < 0)
4201 			goto out;
4202 
4203 		if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4204 				       &wlvif->flags)) {
4205 			ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4206 			if (ret < 0)
4207 				goto out;
4208 		}
4209 	}
4210 out:
4211 	if (ret != 0)
4212 		wl1271_error("beacon info change failed: %d", ret);
4213 	return ret;
4214 }
4215 
4216 /* AP mode changes */
wl1271_bss_info_changed_ap(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4217 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4218 				       struct ieee80211_vif *vif,
4219 				       struct ieee80211_bss_conf *bss_conf,
4220 				       u32 changed)
4221 {
4222 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4223 	int ret = 0;
4224 
4225 	if (changed & BSS_CHANGED_BASIC_RATES) {
4226 		u32 rates = bss_conf->basic_rates;
4227 
4228 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4229 								 wlvif->band);
4230 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4231 							wlvif->basic_rate_set);
4232 
4233 		ret = wl1271_init_ap_rates(wl, wlvif);
4234 		if (ret < 0) {
4235 			wl1271_error("AP rate policy change failed %d", ret);
4236 			goto out;
4237 		}
4238 
4239 		ret = wl1271_ap_init_templates(wl, vif);
4240 		if (ret < 0)
4241 			goto out;
4242 
4243 		/* No need to set probe resp template for mesh */
4244 		if (!ieee80211_vif_is_mesh(vif)) {
4245 			ret = wl1271_ap_set_probe_resp_tmpl(wl,
4246 							    wlvif->basic_rate,
4247 							    vif);
4248 			if (ret < 0)
4249 				goto out;
4250 		}
4251 
4252 		ret = wlcore_set_beacon_template(wl, vif, true);
4253 		if (ret < 0)
4254 			goto out;
4255 	}
4256 
4257 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4258 	if (ret < 0)
4259 		goto out;
4260 
4261 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4262 		if (bss_conf->enable_beacon) {
4263 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4264 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4265 				if (ret < 0)
4266 					goto out;
4267 
4268 				ret = wl1271_ap_init_hwenc(wl, wlvif);
4269 				if (ret < 0)
4270 					goto out;
4271 
4272 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4273 				wl1271_debug(DEBUG_AP, "started AP");
4274 			}
4275 		} else {
4276 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4277 				/*
4278 				 * AP might be in ROC in case we have just
4279 				 * sent auth reply. handle it.
4280 				 */
4281 				if (test_bit(wlvif->role_id, wl->roc_map))
4282 					wl12xx_croc(wl, wlvif->role_id);
4283 
4284 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4285 				if (ret < 0)
4286 					goto out;
4287 
4288 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4289 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4290 					  &wlvif->flags);
4291 				wl1271_debug(DEBUG_AP, "stopped AP");
4292 			}
4293 		}
4294 	}
4295 
4296 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4297 	if (ret < 0)
4298 		goto out;
4299 
4300 	/* Handle HT information change */
4301 	if ((changed & BSS_CHANGED_HT) &&
4302 	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4303 		ret = wl1271_acx_set_ht_information(wl, wlvif,
4304 					bss_conf->ht_operation_mode);
4305 		if (ret < 0) {
4306 			wl1271_warning("Set ht information failed %d", ret);
4307 			goto out;
4308 		}
4309 	}
4310 
4311 out:
4312 	return;
4313 }
4314 
wlcore_set_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_bss_conf * bss_conf,u32 sta_rate_set)4315 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4316 			    struct ieee80211_bss_conf *bss_conf,
4317 			    u32 sta_rate_set)
4318 {
4319 	u32 rates;
4320 	int ret;
4321 
4322 	wl1271_debug(DEBUG_MAC80211,
4323 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4324 	     bss_conf->bssid, bss_conf->aid,
4325 	     bss_conf->beacon_int,
4326 	     bss_conf->basic_rates, sta_rate_set);
4327 
4328 	wlvif->beacon_int = bss_conf->beacon_int;
4329 	rates = bss_conf->basic_rates;
4330 	wlvif->basic_rate_set =
4331 		wl1271_tx_enabled_rates_get(wl, rates,
4332 					    wlvif->band);
4333 	wlvif->basic_rate =
4334 		wl1271_tx_min_rate_get(wl,
4335 				       wlvif->basic_rate_set);
4336 
4337 	if (sta_rate_set)
4338 		wlvif->rate_set =
4339 			wl1271_tx_enabled_rates_get(wl,
4340 						sta_rate_set,
4341 						wlvif->band);
4342 
4343 	/* we only support sched_scan while not connected */
4344 	if (wl->sched_vif == wlvif)
4345 		wl->ops->sched_scan_stop(wl, wlvif);
4346 
4347 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4348 	if (ret < 0)
4349 		return ret;
4350 
4351 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4352 	if (ret < 0)
4353 		return ret;
4354 
4355 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4356 	if (ret < 0)
4357 		return ret;
4358 
4359 	wlcore_set_ssid(wl, wlvif);
4360 
4361 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4362 
4363 	return 0;
4364 }
4365 
wlcore_clear_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)4366 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4367 {
4368 	int ret;
4369 
4370 	/* revert back to minimum rates for the current band */
4371 	wl1271_set_band_rate(wl, wlvif);
4372 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4373 
4374 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4375 	if (ret < 0)
4376 		return ret;
4377 
4378 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4379 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4380 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4381 		if (ret < 0)
4382 			return ret;
4383 	}
4384 
4385 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4386 	return 0;
4387 }
4388 /* STA/IBSS mode changes */
wl1271_bss_info_changed_sta(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4389 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4390 					struct ieee80211_vif *vif,
4391 					struct ieee80211_bss_conf *bss_conf,
4392 					u32 changed)
4393 {
4394 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4395 	bool do_join = false;
4396 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4397 	bool ibss_joined = false;
4398 	u32 sta_rate_set = 0;
4399 	int ret;
4400 	struct ieee80211_sta *sta;
4401 	bool sta_exists = false;
4402 	struct ieee80211_sta_ht_cap sta_ht_cap;
4403 
4404 	if (is_ibss) {
4405 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4406 						     changed);
4407 		if (ret < 0)
4408 			goto out;
4409 	}
4410 
4411 	if (changed & BSS_CHANGED_IBSS) {
4412 		if (bss_conf->ibss_joined) {
4413 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4414 			ibss_joined = true;
4415 		} else {
4416 			wlcore_unset_assoc(wl, wlvif);
4417 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4418 		}
4419 	}
4420 
4421 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4422 		do_join = true;
4423 
4424 	/* Need to update the SSID (for filtering etc) */
4425 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4426 		do_join = true;
4427 
4428 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4429 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4430 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4431 
4432 		do_join = true;
4433 	}
4434 
4435 	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4436 		wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4437 
4438 	if (changed & BSS_CHANGED_CQM) {
4439 		bool enable = false;
4440 		if (bss_conf->cqm_rssi_thold)
4441 			enable = true;
4442 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4443 						  bss_conf->cqm_rssi_thold,
4444 						  bss_conf->cqm_rssi_hyst);
4445 		if (ret < 0)
4446 			goto out;
4447 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4448 	}
4449 
4450 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4451 		       BSS_CHANGED_ASSOC)) {
4452 		rcu_read_lock();
4453 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4454 		if (sta) {
4455 			u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4456 
4457 			/* save the supp_rates of the ap */
4458 			sta_rate_set = sta->supp_rates[wlvif->band];
4459 			if (sta->ht_cap.ht_supported)
4460 				sta_rate_set |=
4461 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4462 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4463 			sta_ht_cap = sta->ht_cap;
4464 			sta_exists = true;
4465 		}
4466 
4467 		rcu_read_unlock();
4468 	}
4469 
4470 	if (changed & BSS_CHANGED_BSSID) {
4471 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4472 			ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4473 					       sta_rate_set);
4474 			if (ret < 0)
4475 				goto out;
4476 
4477 			/* Need to update the BSSID (for filtering etc) */
4478 			do_join = true;
4479 		} else {
4480 			ret = wlcore_clear_bssid(wl, wlvif);
4481 			if (ret < 0)
4482 				goto out;
4483 		}
4484 	}
4485 
4486 	if (changed & BSS_CHANGED_IBSS) {
4487 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4488 			     bss_conf->ibss_joined);
4489 
4490 		if (bss_conf->ibss_joined) {
4491 			u32 rates = bss_conf->basic_rates;
4492 			wlvif->basic_rate_set =
4493 				wl1271_tx_enabled_rates_get(wl, rates,
4494 							    wlvif->band);
4495 			wlvif->basic_rate =
4496 				wl1271_tx_min_rate_get(wl,
4497 						       wlvif->basic_rate_set);
4498 
4499 			/* by default, use 11b + OFDM rates */
4500 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4501 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4502 			if (ret < 0)
4503 				goto out;
4504 		}
4505 	}
4506 
4507 	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4508 		/* enable beacon filtering */
4509 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4510 		if (ret < 0)
4511 			goto out;
4512 	}
4513 
4514 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4515 	if (ret < 0)
4516 		goto out;
4517 
4518 	if (do_join) {
4519 		ret = wlcore_join(wl, wlvif);
4520 		if (ret < 0) {
4521 			wl1271_warning("cmd join failed %d", ret);
4522 			goto out;
4523 		}
4524 	}
4525 
4526 	if (changed & BSS_CHANGED_ASSOC) {
4527 		if (bss_conf->assoc) {
4528 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4529 					       sta_rate_set);
4530 			if (ret < 0)
4531 				goto out;
4532 
4533 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4534 				wl12xx_set_authorized(wl, wlvif);
4535 		} else {
4536 			wlcore_unset_assoc(wl, wlvif);
4537 		}
4538 	}
4539 
4540 	if (changed & BSS_CHANGED_PS) {
4541 		if ((bss_conf->ps) &&
4542 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4543 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4544 			int ps_mode;
4545 			char *ps_mode_str;
4546 
4547 			if (wl->conf.conn.forced_ps) {
4548 				ps_mode = STATION_POWER_SAVE_MODE;
4549 				ps_mode_str = "forced";
4550 			} else {
4551 				ps_mode = STATION_AUTO_PS_MODE;
4552 				ps_mode_str = "auto";
4553 			}
4554 
4555 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4556 
4557 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4558 			if (ret < 0)
4559 				wl1271_warning("enter %s ps failed %d",
4560 					       ps_mode_str, ret);
4561 		} else if (!bss_conf->ps &&
4562 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4563 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4564 
4565 			ret = wl1271_ps_set_mode(wl, wlvif,
4566 						 STATION_ACTIVE_MODE);
4567 			if (ret < 0)
4568 				wl1271_warning("exit auto ps failed %d", ret);
4569 		}
4570 	}
4571 
4572 	/* Handle new association with HT. Do this after join. */
4573 	if (sta_exists) {
4574 		bool enabled =
4575 			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4576 
4577 		ret = wlcore_hw_set_peer_cap(wl,
4578 					     &sta_ht_cap,
4579 					     enabled,
4580 					     wlvif->rate_set,
4581 					     wlvif->sta.hlid);
4582 		if (ret < 0) {
4583 			wl1271_warning("Set ht cap failed %d", ret);
4584 			goto out;
4585 
4586 		}
4587 
4588 		if (enabled) {
4589 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4590 						bss_conf->ht_operation_mode);
4591 			if (ret < 0) {
4592 				wl1271_warning("Set ht information failed %d",
4593 					       ret);
4594 				goto out;
4595 			}
4596 		}
4597 	}
4598 
4599 	/* Handle arp filtering. Done after join. */
4600 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4601 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4602 		__be32 addr = bss_conf->arp_addr_list[0];
4603 		wlvif->sta.qos = bss_conf->qos;
4604 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4605 
4606 		if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4607 			wlvif->ip_addr = addr;
4608 			/*
4609 			 * The template should have been configured only upon
4610 			 * association. however, it seems that the correct ip
4611 			 * isn't being set (when sending), so we have to
4612 			 * reconfigure the template upon every ip change.
4613 			 */
4614 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4615 			if (ret < 0) {
4616 				wl1271_warning("build arp rsp failed: %d", ret);
4617 				goto out;
4618 			}
4619 
4620 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4621 				(ACX_ARP_FILTER_ARP_FILTERING |
4622 				 ACX_ARP_FILTER_AUTO_ARP),
4623 				addr);
4624 		} else {
4625 			wlvif->ip_addr = 0;
4626 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4627 		}
4628 
4629 		if (ret < 0)
4630 			goto out;
4631 	}
4632 
4633 out:
4634 	return;
4635 }
4636 
wl1271_op_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4637 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4638 				       struct ieee80211_vif *vif,
4639 				       struct ieee80211_bss_conf *bss_conf,
4640 				       u32 changed)
4641 {
4642 	struct wl1271 *wl = hw->priv;
4643 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4644 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4645 	int ret;
4646 
4647 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4648 		     wlvif->role_id, (int)changed);
4649 
4650 	/*
4651 	 * make sure to cancel pending disconnections if our association
4652 	 * state changed
4653 	 */
4654 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4655 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4656 
4657 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4658 	    !bss_conf->enable_beacon)
4659 		wl1271_tx_flush(wl);
4660 
4661 	mutex_lock(&wl->mutex);
4662 
4663 	if (unlikely(wl->state != WLCORE_STATE_ON))
4664 		goto out;
4665 
4666 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4667 		goto out;
4668 
4669 	ret = pm_runtime_get_sync(wl->dev);
4670 	if (ret < 0) {
4671 		pm_runtime_put_noidle(wl->dev);
4672 		goto out;
4673 	}
4674 
4675 	if ((changed & BSS_CHANGED_TXPOWER) &&
4676 	    bss_conf->txpower != wlvif->power_level) {
4677 
4678 		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4679 		if (ret < 0)
4680 			goto out;
4681 
4682 		wlvif->power_level = bss_conf->txpower;
4683 	}
4684 
4685 	if (is_ap)
4686 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4687 	else
4688 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4689 
4690 	pm_runtime_mark_last_busy(wl->dev);
4691 	pm_runtime_put_autosuspend(wl->dev);
4692 
4693 out:
4694 	mutex_unlock(&wl->mutex);
4695 }
4696 
wlcore_op_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4697 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4698 				 struct ieee80211_chanctx_conf *ctx)
4699 {
4700 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4701 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4702 		     cfg80211_get_chandef_type(&ctx->def));
4703 	return 0;
4704 }
4705 
wlcore_op_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4706 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4707 				     struct ieee80211_chanctx_conf *ctx)
4708 {
4709 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4710 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4711 		     cfg80211_get_chandef_type(&ctx->def));
4712 }
4713 
wlcore_op_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)4714 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4715 				     struct ieee80211_chanctx_conf *ctx,
4716 				     u32 changed)
4717 {
4718 	struct wl1271 *wl = hw->priv;
4719 	struct wl12xx_vif *wlvif;
4720 	int ret;
4721 	int channel = ieee80211_frequency_to_channel(
4722 		ctx->def.chan->center_freq);
4723 
4724 	wl1271_debug(DEBUG_MAC80211,
4725 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4726 		     channel, cfg80211_get_chandef_type(&ctx->def), changed);
4727 
4728 	mutex_lock(&wl->mutex);
4729 
4730 	ret = pm_runtime_get_sync(wl->dev);
4731 	if (ret < 0) {
4732 		pm_runtime_put_noidle(wl->dev);
4733 		goto out;
4734 	}
4735 
4736 	wl12xx_for_each_wlvif(wl, wlvif) {
4737 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4738 
4739 		rcu_read_lock();
4740 		if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4741 			rcu_read_unlock();
4742 			continue;
4743 		}
4744 		rcu_read_unlock();
4745 
4746 		/* start radar if needed */
4747 		if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4748 		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
4749 		    ctx->radar_enabled && !wlvif->radar_enabled &&
4750 		    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4751 			wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4752 			wlcore_hw_set_cac(wl, wlvif, true);
4753 			wlvif->radar_enabled = true;
4754 		}
4755 	}
4756 
4757 	pm_runtime_mark_last_busy(wl->dev);
4758 	pm_runtime_put_autosuspend(wl->dev);
4759 out:
4760 	mutex_unlock(&wl->mutex);
4761 }
4762 
wlcore_op_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)4763 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4764 					struct ieee80211_vif *vif,
4765 					struct ieee80211_chanctx_conf *ctx)
4766 {
4767 	struct wl1271 *wl = hw->priv;
4768 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4769 	int channel = ieee80211_frequency_to_channel(
4770 		ctx->def.chan->center_freq);
4771 	int ret = -EINVAL;
4772 
4773 	wl1271_debug(DEBUG_MAC80211,
4774 		     "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4775 		     wlvif->role_id, channel,
4776 		     cfg80211_get_chandef_type(&ctx->def),
4777 		     ctx->radar_enabled, ctx->def.chan->dfs_state);
4778 
4779 	mutex_lock(&wl->mutex);
4780 
4781 	if (unlikely(wl->state != WLCORE_STATE_ON))
4782 		goto out;
4783 
4784 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4785 		goto out;
4786 
4787 	ret = pm_runtime_get_sync(wl->dev);
4788 	if (ret < 0) {
4789 		pm_runtime_put_noidle(wl->dev);
4790 		goto out;
4791 	}
4792 
4793 	wlvif->band = ctx->def.chan->band;
4794 	wlvif->channel = channel;
4795 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4796 
4797 	/* update default rates according to the band */
4798 	wl1271_set_band_rate(wl, wlvif);
4799 
4800 	if (ctx->radar_enabled &&
4801 	    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4802 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4803 		wlcore_hw_set_cac(wl, wlvif, true);
4804 		wlvif->radar_enabled = true;
4805 	}
4806 
4807 	pm_runtime_mark_last_busy(wl->dev);
4808 	pm_runtime_put_autosuspend(wl->dev);
4809 out:
4810 	mutex_unlock(&wl->mutex);
4811 
4812 	return 0;
4813 }
4814 
wlcore_op_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)4815 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4816 					   struct ieee80211_vif *vif,
4817 					   struct ieee80211_chanctx_conf *ctx)
4818 {
4819 	struct wl1271 *wl = hw->priv;
4820 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4821 	int ret;
4822 
4823 	wl1271_debug(DEBUG_MAC80211,
4824 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4825 		     wlvif->role_id,
4826 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4827 		     cfg80211_get_chandef_type(&ctx->def));
4828 
4829 	wl1271_tx_flush(wl);
4830 
4831 	mutex_lock(&wl->mutex);
4832 
4833 	if (unlikely(wl->state != WLCORE_STATE_ON))
4834 		goto out;
4835 
4836 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4837 		goto out;
4838 
4839 	ret = pm_runtime_get_sync(wl->dev);
4840 	if (ret < 0) {
4841 		pm_runtime_put_noidle(wl->dev);
4842 		goto out;
4843 	}
4844 
4845 	if (wlvif->radar_enabled) {
4846 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4847 		wlcore_hw_set_cac(wl, wlvif, false);
4848 		wlvif->radar_enabled = false;
4849 	}
4850 
4851 	pm_runtime_mark_last_busy(wl->dev);
4852 	pm_runtime_put_autosuspend(wl->dev);
4853 out:
4854 	mutex_unlock(&wl->mutex);
4855 }
4856 
__wlcore_switch_vif_chan(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_chanctx_conf * new_ctx)4857 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4858 				    struct wl12xx_vif *wlvif,
4859 				    struct ieee80211_chanctx_conf *new_ctx)
4860 {
4861 	int channel = ieee80211_frequency_to_channel(
4862 		new_ctx->def.chan->center_freq);
4863 
4864 	wl1271_debug(DEBUG_MAC80211,
4865 		     "switch vif (role %d) %d -> %d chan_type: %d",
4866 		     wlvif->role_id, wlvif->channel, channel,
4867 		     cfg80211_get_chandef_type(&new_ctx->def));
4868 
4869 	if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4870 		return 0;
4871 
4872 	WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4873 
4874 	if (wlvif->radar_enabled) {
4875 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4876 		wlcore_hw_set_cac(wl, wlvif, false);
4877 		wlvif->radar_enabled = false;
4878 	}
4879 
4880 	wlvif->band = new_ctx->def.chan->band;
4881 	wlvif->channel = channel;
4882 	wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4883 
4884 	/* start radar if needed */
4885 	if (new_ctx->radar_enabled) {
4886 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4887 		wlcore_hw_set_cac(wl, wlvif, true);
4888 		wlvif->radar_enabled = true;
4889 	}
4890 
4891 	return 0;
4892 }
4893 
4894 static int
wlcore_op_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)4895 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4896 			     struct ieee80211_vif_chanctx_switch *vifs,
4897 			     int n_vifs,
4898 			     enum ieee80211_chanctx_switch_mode mode)
4899 {
4900 	struct wl1271 *wl = hw->priv;
4901 	int i, ret;
4902 
4903 	wl1271_debug(DEBUG_MAC80211,
4904 		     "mac80211 switch chanctx n_vifs %d mode %d",
4905 		     n_vifs, mode);
4906 
4907 	mutex_lock(&wl->mutex);
4908 
4909 	ret = pm_runtime_get_sync(wl->dev);
4910 	if (ret < 0) {
4911 		pm_runtime_put_noidle(wl->dev);
4912 		goto out;
4913 	}
4914 
4915 	for (i = 0; i < n_vifs; i++) {
4916 		struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4917 
4918 		ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4919 		if (ret)
4920 			goto out_sleep;
4921 	}
4922 out_sleep:
4923 	pm_runtime_mark_last_busy(wl->dev);
4924 	pm_runtime_put_autosuspend(wl->dev);
4925 out:
4926 	mutex_unlock(&wl->mutex);
4927 
4928 	return 0;
4929 }
4930 
wl1271_op_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 queue,const struct ieee80211_tx_queue_params * params)4931 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4932 			     struct ieee80211_vif *vif, u16 queue,
4933 			     const struct ieee80211_tx_queue_params *params)
4934 {
4935 	struct wl1271 *wl = hw->priv;
4936 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4937 	u8 ps_scheme;
4938 	int ret = 0;
4939 
4940 	if (wlcore_is_p2p_mgmt(wlvif))
4941 		return 0;
4942 
4943 	mutex_lock(&wl->mutex);
4944 
4945 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4946 
4947 	if (params->uapsd)
4948 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4949 	else
4950 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4951 
4952 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4953 		goto out;
4954 
4955 	ret = pm_runtime_get_sync(wl->dev);
4956 	if (ret < 0) {
4957 		pm_runtime_put_noidle(wl->dev);
4958 		goto out;
4959 	}
4960 
4961 	/*
4962 	 * the txop is confed in units of 32us by the mac80211,
4963 	 * we need us
4964 	 */
4965 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4966 				params->cw_min, params->cw_max,
4967 				params->aifs, params->txop << 5);
4968 	if (ret < 0)
4969 		goto out_sleep;
4970 
4971 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4972 				 CONF_CHANNEL_TYPE_EDCF,
4973 				 wl1271_tx_get_queue(queue),
4974 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4975 				 0, 0);
4976 
4977 out_sleep:
4978 	pm_runtime_mark_last_busy(wl->dev);
4979 	pm_runtime_put_autosuspend(wl->dev);
4980 
4981 out:
4982 	mutex_unlock(&wl->mutex);
4983 
4984 	return ret;
4985 }
4986 
wl1271_op_get_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif)4987 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4988 			     struct ieee80211_vif *vif)
4989 {
4990 
4991 	struct wl1271 *wl = hw->priv;
4992 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4993 	u64 mactime = ULLONG_MAX;
4994 	int ret;
4995 
4996 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4997 
4998 	mutex_lock(&wl->mutex);
4999 
5000 	if (unlikely(wl->state != WLCORE_STATE_ON))
5001 		goto out;
5002 
5003 	ret = pm_runtime_get_sync(wl->dev);
5004 	if (ret < 0) {
5005 		pm_runtime_put_noidle(wl->dev);
5006 		goto out;
5007 	}
5008 
5009 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
5010 	if (ret < 0)
5011 		goto out_sleep;
5012 
5013 out_sleep:
5014 	pm_runtime_mark_last_busy(wl->dev);
5015 	pm_runtime_put_autosuspend(wl->dev);
5016 
5017 out:
5018 	mutex_unlock(&wl->mutex);
5019 	return mactime;
5020 }
5021 
wl1271_op_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)5022 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5023 				struct survey_info *survey)
5024 {
5025 	struct ieee80211_conf *conf = &hw->conf;
5026 
5027 	if (idx != 0)
5028 		return -ENOENT;
5029 
5030 	survey->channel = conf->chandef.chan;
5031 	survey->filled = 0;
5032 	return 0;
5033 }
5034 
wl1271_allocate_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5035 static int wl1271_allocate_sta(struct wl1271 *wl,
5036 			     struct wl12xx_vif *wlvif,
5037 			     struct ieee80211_sta *sta)
5038 {
5039 	struct wl1271_station *wl_sta;
5040 	int ret;
5041 
5042 
5043 	if (wl->active_sta_count >= wl->max_ap_stations) {
5044 		wl1271_warning("could not allocate HLID - too much stations");
5045 		return -EBUSY;
5046 	}
5047 
5048 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5049 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5050 	if (ret < 0) {
5051 		wl1271_warning("could not allocate HLID - too many links");
5052 		return -EBUSY;
5053 	}
5054 
5055 	/* use the previous security seq, if this is a recovery/resume */
5056 	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5057 
5058 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5059 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5060 	wl->active_sta_count++;
5061 	return 0;
5062 }
5063 
wl1271_free_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid)5064 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5065 {
5066 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5067 		return;
5068 
5069 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
5070 	__clear_bit(hlid, &wl->ap_ps_map);
5071 	__clear_bit(hlid, &wl->ap_fw_ps_map);
5072 
5073 	/*
5074 	 * save the last used PN in the private part of iee80211_sta,
5075 	 * in case of recovery/suspend
5076 	 */
5077 	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5078 
5079 	wl12xx_free_link(wl, wlvif, &hlid);
5080 	wl->active_sta_count--;
5081 
5082 	/*
5083 	 * rearm the tx watchdog when the last STA is freed - give the FW a
5084 	 * chance to return STA-buffered packets before complaining.
5085 	 */
5086 	if (wl->active_sta_count == 0)
5087 		wl12xx_rearm_tx_watchdog_locked(wl);
5088 }
5089 
wl12xx_sta_add(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5090 static int wl12xx_sta_add(struct wl1271 *wl,
5091 			  struct wl12xx_vif *wlvif,
5092 			  struct ieee80211_sta *sta)
5093 {
5094 	struct wl1271_station *wl_sta;
5095 	int ret = 0;
5096 	u8 hlid;
5097 
5098 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5099 
5100 	ret = wl1271_allocate_sta(wl, wlvif, sta);
5101 	if (ret < 0)
5102 		return ret;
5103 
5104 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5105 	hlid = wl_sta->hlid;
5106 
5107 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5108 	if (ret < 0)
5109 		wl1271_free_sta(wl, wlvif, hlid);
5110 
5111 	return ret;
5112 }
5113 
wl12xx_sta_remove(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5114 static int wl12xx_sta_remove(struct wl1271 *wl,
5115 			     struct wl12xx_vif *wlvif,
5116 			     struct ieee80211_sta *sta)
5117 {
5118 	struct wl1271_station *wl_sta;
5119 	int ret = 0, id;
5120 
5121 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5122 
5123 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5124 	id = wl_sta->hlid;
5125 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5126 		return -EINVAL;
5127 
5128 	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5129 	if (ret < 0)
5130 		return ret;
5131 
5132 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5133 	return ret;
5134 }
5135 
wlcore_roc_if_possible(struct wl1271 * wl,struct wl12xx_vif * wlvif)5136 static void wlcore_roc_if_possible(struct wl1271 *wl,
5137 				   struct wl12xx_vif *wlvif)
5138 {
5139 	if (find_first_bit(wl->roc_map,
5140 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5141 		return;
5142 
5143 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5144 		return;
5145 
5146 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5147 }
5148 
5149 /*
5150  * when wl_sta is NULL, we treat this call as if coming from a
5151  * pending auth reply.
5152  * wl->mutex must be taken and the FW must be awake when the call
5153  * takes place.
5154  */
wlcore_update_inconn_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl1271_station * wl_sta,bool in_conn)5155 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5156 			      struct wl1271_station *wl_sta, bool in_conn)
5157 {
5158 	if (in_conn) {
5159 		if (WARN_ON(wl_sta && wl_sta->in_connection))
5160 			return;
5161 
5162 		if (!wlvif->ap_pending_auth_reply &&
5163 		    !wlvif->inconn_count)
5164 			wlcore_roc_if_possible(wl, wlvif);
5165 
5166 		if (wl_sta) {
5167 			wl_sta->in_connection = true;
5168 			wlvif->inconn_count++;
5169 		} else {
5170 			wlvif->ap_pending_auth_reply = true;
5171 		}
5172 	} else {
5173 		if (wl_sta && !wl_sta->in_connection)
5174 			return;
5175 
5176 		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5177 			return;
5178 
5179 		if (WARN_ON(wl_sta && !wlvif->inconn_count))
5180 			return;
5181 
5182 		if (wl_sta) {
5183 			wl_sta->in_connection = false;
5184 			wlvif->inconn_count--;
5185 		} else {
5186 			wlvif->ap_pending_auth_reply = false;
5187 		}
5188 
5189 		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5190 		    test_bit(wlvif->role_id, wl->roc_map))
5191 			wl12xx_croc(wl, wlvif->role_id);
5192 	}
5193 }
5194 
wl12xx_update_sta_state(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5195 static int wl12xx_update_sta_state(struct wl1271 *wl,
5196 				   struct wl12xx_vif *wlvif,
5197 				   struct ieee80211_sta *sta,
5198 				   enum ieee80211_sta_state old_state,
5199 				   enum ieee80211_sta_state new_state)
5200 {
5201 	struct wl1271_station *wl_sta;
5202 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5203 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5204 	int ret;
5205 
5206 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5207 
5208 	/* Add station (AP mode) */
5209 	if (is_ap &&
5210 	    old_state == IEEE80211_STA_NOTEXIST &&
5211 	    new_state == IEEE80211_STA_NONE) {
5212 		ret = wl12xx_sta_add(wl, wlvif, sta);
5213 		if (ret)
5214 			return ret;
5215 
5216 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5217 	}
5218 
5219 	/* Remove station (AP mode) */
5220 	if (is_ap &&
5221 	    old_state == IEEE80211_STA_NONE &&
5222 	    new_state == IEEE80211_STA_NOTEXIST) {
5223 		/* must not fail */
5224 		wl12xx_sta_remove(wl, wlvif, sta);
5225 
5226 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5227 	}
5228 
5229 	/* Authorize station (AP mode) */
5230 	if (is_ap &&
5231 	    new_state == IEEE80211_STA_AUTHORIZED) {
5232 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5233 		if (ret < 0)
5234 			return ret;
5235 
5236 		/* reconfigure rates */
5237 		ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5238 		if (ret < 0)
5239 			return ret;
5240 
5241 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5242 						     wl_sta->hlid);
5243 		if (ret)
5244 			return ret;
5245 
5246 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5247 	}
5248 
5249 	/* Authorize station */
5250 	if (is_sta &&
5251 	    new_state == IEEE80211_STA_AUTHORIZED) {
5252 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5253 		ret = wl12xx_set_authorized(wl, wlvif);
5254 		if (ret)
5255 			return ret;
5256 	}
5257 
5258 	if (is_sta &&
5259 	    old_state == IEEE80211_STA_AUTHORIZED &&
5260 	    new_state == IEEE80211_STA_ASSOC) {
5261 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5262 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5263 	}
5264 
5265 	/* save seq number on disassoc (suspend) */
5266 	if (is_sta &&
5267 	    old_state == IEEE80211_STA_ASSOC &&
5268 	    new_state == IEEE80211_STA_AUTH) {
5269 		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5270 		wlvif->total_freed_pkts = 0;
5271 	}
5272 
5273 	/* restore seq number on assoc (resume) */
5274 	if (is_sta &&
5275 	    old_state == IEEE80211_STA_AUTH &&
5276 	    new_state == IEEE80211_STA_ASSOC) {
5277 		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5278 	}
5279 
5280 	/* clear ROCs on failure or authorization */
5281 	if (is_sta &&
5282 	    (new_state == IEEE80211_STA_AUTHORIZED ||
5283 	     new_state == IEEE80211_STA_NOTEXIST)) {
5284 		if (test_bit(wlvif->role_id, wl->roc_map))
5285 			wl12xx_croc(wl, wlvif->role_id);
5286 	}
5287 
5288 	if (is_sta &&
5289 	    old_state == IEEE80211_STA_NOTEXIST &&
5290 	    new_state == IEEE80211_STA_NONE) {
5291 		if (find_first_bit(wl->roc_map,
5292 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5293 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5294 			wl12xx_roc(wl, wlvif, wlvif->role_id,
5295 				   wlvif->band, wlvif->channel);
5296 		}
5297 	}
5298 	return 0;
5299 }
5300 
wl12xx_op_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5301 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5302 			       struct ieee80211_vif *vif,
5303 			       struct ieee80211_sta *sta,
5304 			       enum ieee80211_sta_state old_state,
5305 			       enum ieee80211_sta_state new_state)
5306 {
5307 	struct wl1271 *wl = hw->priv;
5308 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5309 	int ret;
5310 
5311 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5312 		     sta->aid, old_state, new_state);
5313 
5314 	mutex_lock(&wl->mutex);
5315 
5316 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5317 		ret = -EBUSY;
5318 		goto out;
5319 	}
5320 
5321 	ret = pm_runtime_get_sync(wl->dev);
5322 	if (ret < 0) {
5323 		pm_runtime_put_noidle(wl->dev);
5324 		goto out;
5325 	}
5326 
5327 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5328 
5329 	pm_runtime_mark_last_busy(wl->dev);
5330 	pm_runtime_put_autosuspend(wl->dev);
5331 out:
5332 	mutex_unlock(&wl->mutex);
5333 	if (new_state < old_state)
5334 		return 0;
5335 	return ret;
5336 }
5337 
wl1271_op_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_ampdu_params * params)5338 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5339 				  struct ieee80211_vif *vif,
5340 				  struct ieee80211_ampdu_params *params)
5341 {
5342 	struct wl1271 *wl = hw->priv;
5343 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5344 	int ret;
5345 	u8 hlid, *ba_bitmap;
5346 	struct ieee80211_sta *sta = params->sta;
5347 	enum ieee80211_ampdu_mlme_action action = params->action;
5348 	u16 tid = params->tid;
5349 	u16 *ssn = &params->ssn;
5350 
5351 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5352 		     tid);
5353 
5354 	/* sanity check - the fields in FW are only 8bits wide */
5355 	if (WARN_ON(tid > 0xFF))
5356 		return -ENOTSUPP;
5357 
5358 	mutex_lock(&wl->mutex);
5359 
5360 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5361 		ret = -EAGAIN;
5362 		goto out;
5363 	}
5364 
5365 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5366 		hlid = wlvif->sta.hlid;
5367 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5368 		struct wl1271_station *wl_sta;
5369 
5370 		wl_sta = (struct wl1271_station *)sta->drv_priv;
5371 		hlid = wl_sta->hlid;
5372 	} else {
5373 		ret = -EINVAL;
5374 		goto out;
5375 	}
5376 
5377 	ba_bitmap = &wl->links[hlid].ba_bitmap;
5378 
5379 	ret = pm_runtime_get_sync(wl->dev);
5380 	if (ret < 0) {
5381 		pm_runtime_put_noidle(wl->dev);
5382 		goto out;
5383 	}
5384 
5385 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5386 		     tid, action);
5387 
5388 	switch (action) {
5389 	case IEEE80211_AMPDU_RX_START:
5390 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5391 			ret = -ENOTSUPP;
5392 			break;
5393 		}
5394 
5395 		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5396 			ret = -EBUSY;
5397 			wl1271_error("exceeded max RX BA sessions");
5398 			break;
5399 		}
5400 
5401 		if (*ba_bitmap & BIT(tid)) {
5402 			ret = -EINVAL;
5403 			wl1271_error("cannot enable RX BA session on active "
5404 				     "tid: %d", tid);
5405 			break;
5406 		}
5407 
5408 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5409 				hlid,
5410 				params->buf_size);
5411 
5412 		if (!ret) {
5413 			*ba_bitmap |= BIT(tid);
5414 			wl->ba_rx_session_count++;
5415 		}
5416 		break;
5417 
5418 	case IEEE80211_AMPDU_RX_STOP:
5419 		if (!(*ba_bitmap & BIT(tid))) {
5420 			/*
5421 			 * this happens on reconfig - so only output a debug
5422 			 * message for now, and don't fail the function.
5423 			 */
5424 			wl1271_debug(DEBUG_MAC80211,
5425 				     "no active RX BA session on tid: %d",
5426 				     tid);
5427 			ret = 0;
5428 			break;
5429 		}
5430 
5431 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5432 							 hlid, 0);
5433 		if (!ret) {
5434 			*ba_bitmap &= ~BIT(tid);
5435 			wl->ba_rx_session_count--;
5436 		}
5437 		break;
5438 
5439 	/*
5440 	 * The BA initiator session management in FW independently.
5441 	 * Falling break here on purpose for all TX APDU commands.
5442 	 */
5443 	case IEEE80211_AMPDU_TX_START:
5444 	case IEEE80211_AMPDU_TX_STOP_CONT:
5445 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5446 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5447 	case IEEE80211_AMPDU_TX_OPERATIONAL:
5448 		ret = -EINVAL;
5449 		break;
5450 
5451 	default:
5452 		wl1271_error("Incorrect ampdu action id=%x\n", action);
5453 		ret = -EINVAL;
5454 	}
5455 
5456 	pm_runtime_mark_last_busy(wl->dev);
5457 	pm_runtime_put_autosuspend(wl->dev);
5458 
5459 out:
5460 	mutex_unlock(&wl->mutex);
5461 
5462 	return ret;
5463 }
5464 
wl12xx_set_bitrate_mask(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct cfg80211_bitrate_mask * mask)5465 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5466 				   struct ieee80211_vif *vif,
5467 				   const struct cfg80211_bitrate_mask *mask)
5468 {
5469 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5470 	struct wl1271 *wl = hw->priv;
5471 	int i, ret = 0;
5472 
5473 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5474 		mask->control[NL80211_BAND_2GHZ].legacy,
5475 		mask->control[NL80211_BAND_5GHZ].legacy);
5476 
5477 	mutex_lock(&wl->mutex);
5478 
5479 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5480 		wlvif->bitrate_masks[i] =
5481 			wl1271_tx_enabled_rates_get(wl,
5482 						    mask->control[i].legacy,
5483 						    i);
5484 
5485 	if (unlikely(wl->state != WLCORE_STATE_ON))
5486 		goto out;
5487 
5488 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5489 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5490 
5491 		ret = pm_runtime_get_sync(wl->dev);
5492 		if (ret < 0) {
5493 			pm_runtime_put_noidle(wl->dev);
5494 			goto out;
5495 		}
5496 
5497 		wl1271_set_band_rate(wl, wlvif);
5498 		wlvif->basic_rate =
5499 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5500 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5501 
5502 		pm_runtime_mark_last_busy(wl->dev);
5503 		pm_runtime_put_autosuspend(wl->dev);
5504 	}
5505 out:
5506 	mutex_unlock(&wl->mutex);
5507 
5508 	return ret;
5509 }
5510 
wl12xx_op_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * ch_switch)5511 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5512 				     struct ieee80211_vif *vif,
5513 				     struct ieee80211_channel_switch *ch_switch)
5514 {
5515 	struct wl1271 *wl = hw->priv;
5516 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5517 	int ret;
5518 
5519 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5520 
5521 	wl1271_tx_flush(wl);
5522 
5523 	mutex_lock(&wl->mutex);
5524 
5525 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5526 		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5527 			ieee80211_chswitch_done(vif, false);
5528 		goto out;
5529 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5530 		goto out;
5531 	}
5532 
5533 	ret = pm_runtime_get_sync(wl->dev);
5534 	if (ret < 0) {
5535 		pm_runtime_put_noidle(wl->dev);
5536 		goto out;
5537 	}
5538 
5539 	/* TODO: change mac80211 to pass vif as param */
5540 
5541 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5542 		unsigned long delay_usec;
5543 
5544 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5545 		if (ret)
5546 			goto out_sleep;
5547 
5548 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5549 
5550 		/* indicate failure 5 seconds after channel switch time */
5551 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5552 			ch_switch->count;
5553 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5554 					     usecs_to_jiffies(delay_usec) +
5555 					     msecs_to_jiffies(5000));
5556 	}
5557 
5558 out_sleep:
5559 	pm_runtime_mark_last_busy(wl->dev);
5560 	pm_runtime_put_autosuspend(wl->dev);
5561 
5562 out:
5563 	mutex_unlock(&wl->mutex);
5564 }
5565 
wlcore_get_beacon_ie(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 eid)5566 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5567 					struct wl12xx_vif *wlvif,
5568 					u8 eid)
5569 {
5570 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5571 	struct sk_buff *beacon =
5572 		ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5573 
5574 	if (!beacon)
5575 		return NULL;
5576 
5577 	return cfg80211_find_ie(eid,
5578 				beacon->data + ieoffset,
5579 				beacon->len - ieoffset);
5580 }
5581 
wlcore_get_csa_count(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 * csa_count)5582 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5583 				u8 *csa_count)
5584 {
5585 	const u8 *ie;
5586 	const struct ieee80211_channel_sw_ie *ie_csa;
5587 
5588 	ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5589 	if (!ie)
5590 		return -EINVAL;
5591 
5592 	ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5593 	*csa_count = ie_csa->count;
5594 
5595 	return 0;
5596 }
5597 
wlcore_op_channel_switch_beacon(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_chan_def * chandef)5598 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5599 					    struct ieee80211_vif *vif,
5600 					    struct cfg80211_chan_def *chandef)
5601 {
5602 	struct wl1271 *wl = hw->priv;
5603 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5604 	struct ieee80211_channel_switch ch_switch = {
5605 		.block_tx = true,
5606 		.chandef = *chandef,
5607 	};
5608 	int ret;
5609 
5610 	wl1271_debug(DEBUG_MAC80211,
5611 		     "mac80211 channel switch beacon (role %d)",
5612 		     wlvif->role_id);
5613 
5614 	ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5615 	if (ret < 0) {
5616 		wl1271_error("error getting beacon (for CSA counter)");
5617 		return;
5618 	}
5619 
5620 	mutex_lock(&wl->mutex);
5621 
5622 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5623 		ret = -EBUSY;
5624 		goto out;
5625 	}
5626 
5627 	ret = pm_runtime_get_sync(wl->dev);
5628 	if (ret < 0) {
5629 		pm_runtime_put_noidle(wl->dev);
5630 		goto out;
5631 	}
5632 
5633 	ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5634 	if (ret)
5635 		goto out_sleep;
5636 
5637 	set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5638 
5639 out_sleep:
5640 	pm_runtime_mark_last_busy(wl->dev);
5641 	pm_runtime_put_autosuspend(wl->dev);
5642 out:
5643 	mutex_unlock(&wl->mutex);
5644 }
5645 
wlcore_op_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)5646 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5647 			    u32 queues, bool drop)
5648 {
5649 	struct wl1271 *wl = hw->priv;
5650 
5651 	wl1271_tx_flush(wl);
5652 }
5653 
wlcore_op_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * chan,int duration,enum ieee80211_roc_type type)5654 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5655 				       struct ieee80211_vif *vif,
5656 				       struct ieee80211_channel *chan,
5657 				       int duration,
5658 				       enum ieee80211_roc_type type)
5659 {
5660 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5661 	struct wl1271 *wl = hw->priv;
5662 	int channel, active_roc, ret = 0;
5663 
5664 	channel = ieee80211_frequency_to_channel(chan->center_freq);
5665 
5666 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5667 		     channel, wlvif->role_id);
5668 
5669 	mutex_lock(&wl->mutex);
5670 
5671 	if (unlikely(wl->state != WLCORE_STATE_ON))
5672 		goto out;
5673 
5674 	/* return EBUSY if we can't ROC right now */
5675 	active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5676 	if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5677 		wl1271_warning("active roc on role %d", active_roc);
5678 		ret = -EBUSY;
5679 		goto out;
5680 	}
5681 
5682 	ret = pm_runtime_get_sync(wl->dev);
5683 	if (ret < 0) {
5684 		pm_runtime_put_noidle(wl->dev);
5685 		goto out;
5686 	}
5687 
5688 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5689 	if (ret < 0)
5690 		goto out_sleep;
5691 
5692 	wl->roc_vif = vif;
5693 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5694 				     msecs_to_jiffies(duration));
5695 out_sleep:
5696 	pm_runtime_mark_last_busy(wl->dev);
5697 	pm_runtime_put_autosuspend(wl->dev);
5698 out:
5699 	mutex_unlock(&wl->mutex);
5700 	return ret;
5701 }
5702 
__wlcore_roc_completed(struct wl1271 * wl)5703 static int __wlcore_roc_completed(struct wl1271 *wl)
5704 {
5705 	struct wl12xx_vif *wlvif;
5706 	int ret;
5707 
5708 	/* already completed */
5709 	if (unlikely(!wl->roc_vif))
5710 		return 0;
5711 
5712 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5713 
5714 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5715 		return -EBUSY;
5716 
5717 	ret = wl12xx_stop_dev(wl, wlvif);
5718 	if (ret < 0)
5719 		return ret;
5720 
5721 	wl->roc_vif = NULL;
5722 
5723 	return 0;
5724 }
5725 
wlcore_roc_completed(struct wl1271 * wl)5726 static int wlcore_roc_completed(struct wl1271 *wl)
5727 {
5728 	int ret;
5729 
5730 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5731 
5732 	mutex_lock(&wl->mutex);
5733 
5734 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5735 		ret = -EBUSY;
5736 		goto out;
5737 	}
5738 
5739 	ret = pm_runtime_get_sync(wl->dev);
5740 	if (ret < 0) {
5741 		pm_runtime_put_noidle(wl->dev);
5742 		goto out;
5743 	}
5744 
5745 	ret = __wlcore_roc_completed(wl);
5746 
5747 	pm_runtime_mark_last_busy(wl->dev);
5748 	pm_runtime_put_autosuspend(wl->dev);
5749 out:
5750 	mutex_unlock(&wl->mutex);
5751 
5752 	return ret;
5753 }
5754 
wlcore_roc_complete_work(struct work_struct * work)5755 static void wlcore_roc_complete_work(struct work_struct *work)
5756 {
5757 	struct delayed_work *dwork;
5758 	struct wl1271 *wl;
5759 	int ret;
5760 
5761 	dwork = to_delayed_work(work);
5762 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5763 
5764 	ret = wlcore_roc_completed(wl);
5765 	if (!ret)
5766 		ieee80211_remain_on_channel_expired(wl->hw);
5767 }
5768 
wlcore_op_cancel_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif)5769 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
5770 					      struct ieee80211_vif *vif)
5771 {
5772 	struct wl1271 *wl = hw->priv;
5773 
5774 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5775 
5776 	/* TODO: per-vif */
5777 	wl1271_tx_flush(wl);
5778 
5779 	/*
5780 	 * we can't just flush_work here, because it might deadlock
5781 	 * (as we might get called from the same workqueue)
5782 	 */
5783 	cancel_delayed_work_sync(&wl->roc_complete_work);
5784 	wlcore_roc_completed(wl);
5785 
5786 	return 0;
5787 }
5788 
wlcore_op_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 changed)5789 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5790 				    struct ieee80211_vif *vif,
5791 				    struct ieee80211_sta *sta,
5792 				    u32 changed)
5793 {
5794 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5795 
5796 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5797 
5798 	if (!(changed & IEEE80211_RC_BW_CHANGED))
5799 		return;
5800 
5801 	/* this callback is atomic, so schedule a new work */
5802 	wlvif->rc_update_bw = sta->bandwidth;
5803 	memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5804 	ieee80211_queue_work(hw, &wlvif->rc_update_work);
5805 }
5806 
wlcore_op_sta_statistics(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct station_info * sinfo)5807 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5808 				     struct ieee80211_vif *vif,
5809 				     struct ieee80211_sta *sta,
5810 				     struct station_info *sinfo)
5811 {
5812 	struct wl1271 *wl = hw->priv;
5813 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5814 	s8 rssi_dbm;
5815 	int ret;
5816 
5817 	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5818 
5819 	mutex_lock(&wl->mutex);
5820 
5821 	if (unlikely(wl->state != WLCORE_STATE_ON))
5822 		goto out;
5823 
5824 	ret = pm_runtime_get_sync(wl->dev);
5825 	if (ret < 0) {
5826 		pm_runtime_put_noidle(wl->dev);
5827 		goto out_sleep;
5828 	}
5829 
5830 	ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5831 	if (ret < 0)
5832 		goto out_sleep;
5833 
5834 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5835 	sinfo->signal = rssi_dbm;
5836 
5837 out_sleep:
5838 	pm_runtime_mark_last_busy(wl->dev);
5839 	pm_runtime_put_autosuspend(wl->dev);
5840 
5841 out:
5842 	mutex_unlock(&wl->mutex);
5843 }
5844 
wlcore_op_get_expected_throughput(struct ieee80211_hw * hw,struct ieee80211_sta * sta)5845 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5846 					     struct ieee80211_sta *sta)
5847 {
5848 	struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5849 	struct wl1271 *wl = hw->priv;
5850 	u8 hlid = wl_sta->hlid;
5851 
5852 	/* return in units of Kbps */
5853 	return (wl->links[hlid].fw_rate_mbps * 1000);
5854 }
5855 
wl1271_tx_frames_pending(struct ieee80211_hw * hw)5856 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5857 {
5858 	struct wl1271 *wl = hw->priv;
5859 	bool ret = false;
5860 
5861 	mutex_lock(&wl->mutex);
5862 
5863 	if (unlikely(wl->state != WLCORE_STATE_ON))
5864 		goto out;
5865 
5866 	/* packets are considered pending if in the TX queue or the FW */
5867 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5868 out:
5869 	mutex_unlock(&wl->mutex);
5870 
5871 	return ret;
5872 }
5873 
5874 /* can't be const, mac80211 writes to this */
5875 static struct ieee80211_rate wl1271_rates[] = {
5876 	{ .bitrate = 10,
5877 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5878 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5879 	{ .bitrate = 20,
5880 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5881 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5882 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5883 	{ .bitrate = 55,
5884 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5885 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5886 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5887 	{ .bitrate = 110,
5888 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5889 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5890 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5891 	{ .bitrate = 60,
5892 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5893 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5894 	{ .bitrate = 90,
5895 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5896 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5897 	{ .bitrate = 120,
5898 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5899 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5900 	{ .bitrate = 180,
5901 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5902 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5903 	{ .bitrate = 240,
5904 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5905 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5906 	{ .bitrate = 360,
5907 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5908 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5909 	{ .bitrate = 480,
5910 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5911 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5912 	{ .bitrate = 540,
5913 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5914 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5915 };
5916 
5917 /* can't be const, mac80211 writes to this */
5918 static struct ieee80211_channel wl1271_channels[] = {
5919 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5920 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5921 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5922 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5923 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5924 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5925 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5926 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5927 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5928 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5929 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5930 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5931 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5932 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5933 };
5934 
5935 /* can't be const, mac80211 writes to this */
5936 static struct ieee80211_supported_band wl1271_band_2ghz = {
5937 	.channels = wl1271_channels,
5938 	.n_channels = ARRAY_SIZE(wl1271_channels),
5939 	.bitrates = wl1271_rates,
5940 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5941 };
5942 
5943 /* 5 GHz data rates for WL1273 */
5944 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5945 	{ .bitrate = 60,
5946 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5947 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5948 	{ .bitrate = 90,
5949 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5950 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5951 	{ .bitrate = 120,
5952 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5953 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5954 	{ .bitrate = 180,
5955 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5956 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5957 	{ .bitrate = 240,
5958 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5959 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5960 	{ .bitrate = 360,
5961 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5962 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5963 	{ .bitrate = 480,
5964 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5965 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5966 	{ .bitrate = 540,
5967 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5968 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5969 };
5970 
5971 /* 5 GHz band channels for WL1273 */
5972 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5973 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5974 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5975 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5976 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5977 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5978 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5979 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5980 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5981 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5982 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5983 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5984 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5985 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5986 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5987 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5988 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5989 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5990 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5991 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5992 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5993 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5994 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5995 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5996 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5997 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5998 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5999 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
6000 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
6001 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
6002 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
6003 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
6004 };
6005 
6006 static struct ieee80211_supported_band wl1271_band_5ghz = {
6007 	.channels = wl1271_channels_5ghz,
6008 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
6009 	.bitrates = wl1271_rates_5ghz,
6010 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
6011 };
6012 
6013 static const struct ieee80211_ops wl1271_ops = {
6014 	.start = wl1271_op_start,
6015 	.stop = wlcore_op_stop,
6016 	.add_interface = wl1271_op_add_interface,
6017 	.remove_interface = wl1271_op_remove_interface,
6018 	.change_interface = wl12xx_op_change_interface,
6019 #ifdef CONFIG_PM
6020 	.suspend = wl1271_op_suspend,
6021 	.resume = wl1271_op_resume,
6022 #endif
6023 	.config = wl1271_op_config,
6024 	.prepare_multicast = wl1271_op_prepare_multicast,
6025 	.configure_filter = wl1271_op_configure_filter,
6026 	.tx = wl1271_op_tx,
6027 	.set_key = wlcore_op_set_key,
6028 	.hw_scan = wl1271_op_hw_scan,
6029 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
6030 	.sched_scan_start = wl1271_op_sched_scan_start,
6031 	.sched_scan_stop = wl1271_op_sched_scan_stop,
6032 	.bss_info_changed = wl1271_op_bss_info_changed,
6033 	.set_frag_threshold = wl1271_op_set_frag_threshold,
6034 	.set_rts_threshold = wl1271_op_set_rts_threshold,
6035 	.conf_tx = wl1271_op_conf_tx,
6036 	.get_tsf = wl1271_op_get_tsf,
6037 	.get_survey = wl1271_op_get_survey,
6038 	.sta_state = wl12xx_op_sta_state,
6039 	.ampdu_action = wl1271_op_ampdu_action,
6040 	.tx_frames_pending = wl1271_tx_frames_pending,
6041 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
6042 	.set_default_unicast_key = wl1271_op_set_default_key_idx,
6043 	.channel_switch = wl12xx_op_channel_switch,
6044 	.channel_switch_beacon = wlcore_op_channel_switch_beacon,
6045 	.flush = wlcore_op_flush,
6046 	.remain_on_channel = wlcore_op_remain_on_channel,
6047 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6048 	.add_chanctx = wlcore_op_add_chanctx,
6049 	.remove_chanctx = wlcore_op_remove_chanctx,
6050 	.change_chanctx = wlcore_op_change_chanctx,
6051 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6052 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6053 	.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6054 	.sta_rc_update = wlcore_op_sta_rc_update,
6055 	.sta_statistics = wlcore_op_sta_statistics,
6056 	.get_expected_throughput = wlcore_op_get_expected_throughput,
6057 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6058 };
6059 
6060 
wlcore_rate_to_idx(struct wl1271 * wl,u8 rate,enum nl80211_band band)6061 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6062 {
6063 	u8 idx;
6064 
6065 	BUG_ON(band >= 2);
6066 
6067 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6068 		wl1271_error("Illegal RX rate from HW: %d", rate);
6069 		return 0;
6070 	}
6071 
6072 	idx = wl->band_rate_to_idx[band][rate];
6073 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6074 		wl1271_error("Unsupported RX rate from HW: %d", rate);
6075 		return 0;
6076 	}
6077 
6078 	return idx;
6079 }
6080 
wl12xx_derive_mac_addresses(struct wl1271 * wl,u32 oui,u32 nic)6081 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6082 {
6083 	int i;
6084 
6085 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6086 		     oui, nic);
6087 
6088 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6089 		wl1271_warning("NIC part of the MAC address wraps around!");
6090 
6091 	for (i = 0; i < wl->num_mac_addr; i++) {
6092 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
6093 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
6094 		wl->addresses[i].addr[2] = (u8) oui;
6095 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
6096 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
6097 		wl->addresses[i].addr[5] = (u8) nic;
6098 		nic++;
6099 	}
6100 
6101 	/* we may be one address short at the most */
6102 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6103 
6104 	/*
6105 	 * turn on the LAA bit in the first address and use it as
6106 	 * the last address.
6107 	 */
6108 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6109 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6110 		memcpy(&wl->addresses[idx], &wl->addresses[0],
6111 		       sizeof(wl->addresses[0]));
6112 		/* LAA bit */
6113 		wl->addresses[idx].addr[0] |= BIT(1);
6114 	}
6115 
6116 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6117 	wl->hw->wiphy->addresses = wl->addresses;
6118 }
6119 
wl12xx_get_hw_info(struct wl1271 * wl)6120 static int wl12xx_get_hw_info(struct wl1271 *wl)
6121 {
6122 	int ret;
6123 
6124 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6125 	if (ret < 0)
6126 		goto out;
6127 
6128 	wl->fuse_oui_addr = 0;
6129 	wl->fuse_nic_addr = 0;
6130 
6131 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6132 	if (ret < 0)
6133 		goto out;
6134 
6135 	if (wl->ops->get_mac)
6136 		ret = wl->ops->get_mac(wl);
6137 
6138 out:
6139 	return ret;
6140 }
6141 
wl1271_register_hw(struct wl1271 * wl)6142 static int wl1271_register_hw(struct wl1271 *wl)
6143 {
6144 	int ret;
6145 	u32 oui_addr = 0, nic_addr = 0;
6146 	struct platform_device *pdev = wl->pdev;
6147 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6148 
6149 	if (wl->mac80211_registered)
6150 		return 0;
6151 
6152 	if (wl->nvs_len >= 12) {
6153 		/* NOTE: The wl->nvs->nvs element must be first, in
6154 		 * order to simplify the casting, we assume it is at
6155 		 * the beginning of the wl->nvs structure.
6156 		 */
6157 		u8 *nvs_ptr = (u8 *)wl->nvs;
6158 
6159 		oui_addr =
6160 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6161 		nic_addr =
6162 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6163 	}
6164 
6165 	/* if the MAC address is zeroed in the NVS derive from fuse */
6166 	if (oui_addr == 0 && nic_addr == 0) {
6167 		oui_addr = wl->fuse_oui_addr;
6168 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6169 		nic_addr = wl->fuse_nic_addr + 1;
6170 	}
6171 
6172 	if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6173 		wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6174 		if (!strcmp(pdev_data->family->name, "wl18xx")) {
6175 			wl1271_warning("This default nvs file can be removed from the file system");
6176 		} else {
6177 			wl1271_warning("Your device performance is not optimized.");
6178 			wl1271_warning("Please use the calibrator tool to configure your device.");
6179 		}
6180 
6181 		if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6182 			wl1271_warning("Fuse mac address is zero. using random mac");
6183 			/* Use TI oui and a random nic */
6184 			oui_addr = WLCORE_TI_OUI_ADDRESS;
6185 			nic_addr = get_random_int();
6186 		} else {
6187 			oui_addr = wl->fuse_oui_addr;
6188 			/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6189 			nic_addr = wl->fuse_nic_addr + 1;
6190 		}
6191 	}
6192 
6193 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6194 
6195 	ret = ieee80211_register_hw(wl->hw);
6196 	if (ret < 0) {
6197 		wl1271_error("unable to register mac80211 hw: %d", ret);
6198 		goto out;
6199 	}
6200 
6201 	wl->mac80211_registered = true;
6202 
6203 	wl1271_debugfs_init(wl);
6204 
6205 	wl1271_notice("loaded");
6206 
6207 out:
6208 	return ret;
6209 }
6210 
wl1271_unregister_hw(struct wl1271 * wl)6211 static void wl1271_unregister_hw(struct wl1271 *wl)
6212 {
6213 	if (wl->plt)
6214 		wl1271_plt_stop(wl);
6215 
6216 	ieee80211_unregister_hw(wl->hw);
6217 	wl->mac80211_registered = false;
6218 
6219 }
6220 
wl1271_init_ieee80211(struct wl1271 * wl)6221 static int wl1271_init_ieee80211(struct wl1271 *wl)
6222 {
6223 	int i;
6224 	static const u32 cipher_suites[] = {
6225 		WLAN_CIPHER_SUITE_WEP40,
6226 		WLAN_CIPHER_SUITE_WEP104,
6227 		WLAN_CIPHER_SUITE_TKIP,
6228 		WLAN_CIPHER_SUITE_CCMP,
6229 		WL1271_CIPHER_SUITE_GEM,
6230 	};
6231 
6232 	/* The tx descriptor buffer */
6233 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6234 
6235 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6236 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6237 
6238 	/* unit us */
6239 	/* FIXME: find a proper value */
6240 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6241 
6242 	ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6243 	ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6244 	ieee80211_hw_set(wl->hw, SUPPORTS_PER_STA_GTK);
6245 	ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6246 	ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6247 	ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6248 	ieee80211_hw_set(wl->hw, AP_LINK_PS);
6249 	ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6250 	ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6251 	ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6252 	ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6253 	ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6254 	ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6255 	ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6256 	ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6257 
6258 	wl->hw->wiphy->cipher_suites = cipher_suites;
6259 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6260 
6261 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6262 					 BIT(NL80211_IFTYPE_AP) |
6263 					 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6264 					 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6265 #ifdef CONFIG_MAC80211_MESH
6266 					 BIT(NL80211_IFTYPE_MESH_POINT) |
6267 #endif
6268 					 BIT(NL80211_IFTYPE_P2P_GO);
6269 
6270 	wl->hw->wiphy->max_scan_ssids = 1;
6271 	wl->hw->wiphy->max_sched_scan_ssids = 16;
6272 	wl->hw->wiphy->max_match_sets = 16;
6273 	/*
6274 	 * Maximum length of elements in scanning probe request templates
6275 	 * should be the maximum length possible for a template, without
6276 	 * the IEEE80211 header of the template
6277 	 */
6278 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6279 			sizeof(struct ieee80211_header);
6280 
6281 	wl->hw->wiphy->max_sched_scan_reqs = 1;
6282 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6283 		sizeof(struct ieee80211_header);
6284 
6285 	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6286 
6287 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6288 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6289 				WIPHY_FLAG_HAS_CHANNEL_SWITCH |
6290 				WIPHY_FLAG_IBSS_RSN;
6291 
6292 	wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6293 
6294 	/* make sure all our channels fit in the scanned_ch bitmask */
6295 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6296 		     ARRAY_SIZE(wl1271_channels_5ghz) >
6297 		     WL1271_MAX_CHANNELS);
6298 	/*
6299 	* clear channel flags from the previous usage
6300 	* and restore max_power & max_antenna_gain values.
6301 	*/
6302 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6303 		wl1271_band_2ghz.channels[i].flags = 0;
6304 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6305 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6306 	}
6307 
6308 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6309 		wl1271_band_5ghz.channels[i].flags = 0;
6310 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6311 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6312 	}
6313 
6314 	/*
6315 	 * We keep local copies of the band structs because we need to
6316 	 * modify them on a per-device basis.
6317 	 */
6318 	memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6319 	       sizeof(wl1271_band_2ghz));
6320 	memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6321 	       &wl->ht_cap[NL80211_BAND_2GHZ],
6322 	       sizeof(*wl->ht_cap));
6323 	memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6324 	       sizeof(wl1271_band_5ghz));
6325 	memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6326 	       &wl->ht_cap[NL80211_BAND_5GHZ],
6327 	       sizeof(*wl->ht_cap));
6328 
6329 	wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6330 		&wl->bands[NL80211_BAND_2GHZ];
6331 	wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6332 		&wl->bands[NL80211_BAND_5GHZ];
6333 
6334 	/*
6335 	 * allow 4 queues per mac address we support +
6336 	 * 1 cab queue per mac + one global offchannel Tx queue
6337 	 */
6338 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6339 
6340 	/* the last queue is the offchannel queue */
6341 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6342 	wl->hw->max_rates = 1;
6343 
6344 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6345 
6346 	/* the FW answers probe-requests in AP-mode */
6347 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6348 	wl->hw->wiphy->probe_resp_offload =
6349 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6350 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6351 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6352 
6353 	/* allowed interface combinations */
6354 	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6355 	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6356 
6357 	/* register vendor commands */
6358 	wlcore_set_vendor_commands(wl->hw->wiphy);
6359 
6360 	SET_IEEE80211_DEV(wl->hw, wl->dev);
6361 
6362 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
6363 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6364 
6365 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6366 
6367 	return 0;
6368 }
6369 
wlcore_alloc_hw(size_t priv_size,u32 aggr_buf_size,u32 mbox_size)6370 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6371 				     u32 mbox_size)
6372 {
6373 	struct ieee80211_hw *hw;
6374 	struct wl1271 *wl;
6375 	int i, j, ret;
6376 	unsigned int order;
6377 
6378 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6379 	if (!hw) {
6380 		wl1271_error("could not alloc ieee80211_hw");
6381 		ret = -ENOMEM;
6382 		goto err_hw_alloc;
6383 	}
6384 
6385 	wl = hw->priv;
6386 	memset(wl, 0, sizeof(*wl));
6387 
6388 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
6389 	if (!wl->priv) {
6390 		wl1271_error("could not alloc wl priv");
6391 		ret = -ENOMEM;
6392 		goto err_priv_alloc;
6393 	}
6394 
6395 	INIT_LIST_HEAD(&wl->wlvif_list);
6396 
6397 	wl->hw = hw;
6398 
6399 	/*
6400 	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6401 	 * we don't allocate any additional resource here, so that's fine.
6402 	 */
6403 	for (i = 0; i < NUM_TX_QUEUES; i++)
6404 		for (j = 0; j < WLCORE_MAX_LINKS; j++)
6405 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
6406 
6407 	skb_queue_head_init(&wl->deferred_rx_queue);
6408 	skb_queue_head_init(&wl->deferred_tx_queue);
6409 
6410 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6411 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
6412 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6413 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6414 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6415 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6416 
6417 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6418 	if (!wl->freezable_wq) {
6419 		ret = -ENOMEM;
6420 		goto err_hw;
6421 	}
6422 
6423 	wl->channel = 0;
6424 	wl->rx_counter = 0;
6425 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6426 	wl->band = NL80211_BAND_2GHZ;
6427 	wl->channel_type = NL80211_CHAN_NO_HT;
6428 	wl->flags = 0;
6429 	wl->sg_enabled = true;
6430 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
6431 	wl->recovery_count = 0;
6432 	wl->hw_pg_ver = -1;
6433 	wl->ap_ps_map = 0;
6434 	wl->ap_fw_ps_map = 0;
6435 	wl->quirks = 0;
6436 	wl->system_hlid = WL12XX_SYSTEM_HLID;
6437 	wl->active_sta_count = 0;
6438 	wl->active_link_count = 0;
6439 	wl->fwlog_size = 0;
6440 
6441 	/* The system link is always allocated */
6442 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6443 
6444 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6445 	for (i = 0; i < wl->num_tx_desc; i++)
6446 		wl->tx_frames[i] = NULL;
6447 
6448 	spin_lock_init(&wl->wl_lock);
6449 
6450 	wl->state = WLCORE_STATE_OFF;
6451 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6452 	mutex_init(&wl->mutex);
6453 	mutex_init(&wl->flush_mutex);
6454 	init_completion(&wl->nvs_loading_complete);
6455 
6456 	order = get_order(aggr_buf_size);
6457 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6458 	if (!wl->aggr_buf) {
6459 		ret = -ENOMEM;
6460 		goto err_wq;
6461 	}
6462 	wl->aggr_buf_size = aggr_buf_size;
6463 
6464 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6465 	if (!wl->dummy_packet) {
6466 		ret = -ENOMEM;
6467 		goto err_aggr;
6468 	}
6469 
6470 	/* Allocate one page for the FW log */
6471 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6472 	if (!wl->fwlog) {
6473 		ret = -ENOMEM;
6474 		goto err_dummy_packet;
6475 	}
6476 
6477 	wl->mbox_size = mbox_size;
6478 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6479 	if (!wl->mbox) {
6480 		ret = -ENOMEM;
6481 		goto err_fwlog;
6482 	}
6483 
6484 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6485 	if (!wl->buffer_32) {
6486 		ret = -ENOMEM;
6487 		goto err_mbox;
6488 	}
6489 
6490 	return hw;
6491 
6492 err_mbox:
6493 	kfree(wl->mbox);
6494 
6495 err_fwlog:
6496 	free_page((unsigned long)wl->fwlog);
6497 
6498 err_dummy_packet:
6499 	dev_kfree_skb(wl->dummy_packet);
6500 
6501 err_aggr:
6502 	free_pages((unsigned long)wl->aggr_buf, order);
6503 
6504 err_wq:
6505 	destroy_workqueue(wl->freezable_wq);
6506 
6507 err_hw:
6508 	wl1271_debugfs_exit(wl);
6509 	kfree(wl->priv);
6510 
6511 err_priv_alloc:
6512 	ieee80211_free_hw(hw);
6513 
6514 err_hw_alloc:
6515 
6516 	return ERR_PTR(ret);
6517 }
6518 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6519 
wlcore_free_hw(struct wl1271 * wl)6520 int wlcore_free_hw(struct wl1271 *wl)
6521 {
6522 	/* Unblock any fwlog readers */
6523 	mutex_lock(&wl->mutex);
6524 	wl->fwlog_size = -1;
6525 	mutex_unlock(&wl->mutex);
6526 
6527 	wlcore_sysfs_free(wl);
6528 
6529 	kfree(wl->buffer_32);
6530 	kfree(wl->mbox);
6531 	free_page((unsigned long)wl->fwlog);
6532 	dev_kfree_skb(wl->dummy_packet);
6533 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6534 
6535 	wl1271_debugfs_exit(wl);
6536 
6537 	vfree(wl->fw);
6538 	wl->fw = NULL;
6539 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6540 	kfree(wl->nvs);
6541 	wl->nvs = NULL;
6542 
6543 	kfree(wl->raw_fw_status);
6544 	kfree(wl->fw_status);
6545 	kfree(wl->tx_res_if);
6546 	destroy_workqueue(wl->freezable_wq);
6547 
6548 	kfree(wl->priv);
6549 	ieee80211_free_hw(wl->hw);
6550 
6551 	return 0;
6552 }
6553 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6554 
6555 #ifdef CONFIG_PM
6556 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6557 	.flags = WIPHY_WOWLAN_ANY,
6558 	.n_patterns = WL1271_MAX_RX_FILTERS,
6559 	.pattern_min_len = 1,
6560 	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6561 };
6562 #endif
6563 
wlcore_hardirq(int irq,void * cookie)6564 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6565 {
6566 	return IRQ_WAKE_THREAD;
6567 }
6568 
wlcore_nvs_cb(const struct firmware * fw,void * context)6569 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6570 {
6571 	struct wl1271 *wl = context;
6572 	struct platform_device *pdev = wl->pdev;
6573 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6574 	struct resource *res;
6575 
6576 	int ret;
6577 	irq_handler_t hardirq_fn = NULL;
6578 
6579 	if (fw) {
6580 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6581 		if (!wl->nvs) {
6582 			wl1271_error("Could not allocate nvs data");
6583 			goto out;
6584 		}
6585 		wl->nvs_len = fw->size;
6586 	} else if (pdev_data->family->nvs_name) {
6587 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6588 			     pdev_data->family->nvs_name);
6589 		wl->nvs = NULL;
6590 		wl->nvs_len = 0;
6591 	} else {
6592 		wl->nvs = NULL;
6593 		wl->nvs_len = 0;
6594 	}
6595 
6596 	ret = wl->ops->setup(wl);
6597 	if (ret < 0)
6598 		goto out_free_nvs;
6599 
6600 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6601 
6602 	/* adjust some runtime configuration parameters */
6603 	wlcore_adjust_conf(wl);
6604 
6605 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6606 	if (!res) {
6607 		wl1271_error("Could not get IRQ resource");
6608 		goto out_free_nvs;
6609 	}
6610 
6611 	wl->irq = res->start;
6612 	wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6613 	wl->if_ops = pdev_data->if_ops;
6614 
6615 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6616 		hardirq_fn = wlcore_hardirq;
6617 	else
6618 		wl->irq_flags |= IRQF_ONESHOT;
6619 
6620 	ret = wl12xx_set_power_on(wl);
6621 	if (ret < 0)
6622 		goto out_free_nvs;
6623 
6624 	ret = wl12xx_get_hw_info(wl);
6625 	if (ret < 0) {
6626 		wl1271_error("couldn't get hw info");
6627 		wl1271_power_off(wl);
6628 		goto out_free_nvs;
6629 	}
6630 
6631 	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6632 				   wl->irq_flags, pdev->name, wl);
6633 	if (ret < 0) {
6634 		wl1271_error("interrupt configuration failed");
6635 		wl1271_power_off(wl);
6636 		goto out_free_nvs;
6637 	}
6638 
6639 #ifdef CONFIG_PM
6640 	device_init_wakeup(wl->dev, true);
6641 
6642 	ret = enable_irq_wake(wl->irq);
6643 	if (!ret) {
6644 		wl->irq_wake_enabled = true;
6645 		if (pdev_data->pwr_in_suspend)
6646 			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6647 	}
6648 
6649 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
6650 	if (res) {
6651 		wl->wakeirq = res->start;
6652 		wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
6653 		ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
6654 		if (ret)
6655 			wl->wakeirq = -ENODEV;
6656 	} else {
6657 		wl->wakeirq = -ENODEV;
6658 	}
6659 #endif
6660 	disable_irq(wl->irq);
6661 	wl1271_power_off(wl);
6662 
6663 	ret = wl->ops->identify_chip(wl);
6664 	if (ret < 0)
6665 		goto out_irq;
6666 
6667 	ret = wl1271_init_ieee80211(wl);
6668 	if (ret)
6669 		goto out_irq;
6670 
6671 	ret = wl1271_register_hw(wl);
6672 	if (ret)
6673 		goto out_irq;
6674 
6675 	ret = wlcore_sysfs_init(wl);
6676 	if (ret)
6677 		goto out_unreg;
6678 
6679 	wl->initialized = true;
6680 	goto out;
6681 
6682 out_unreg:
6683 	wl1271_unregister_hw(wl);
6684 
6685 out_irq:
6686 	if (wl->wakeirq >= 0)
6687 		dev_pm_clear_wake_irq(wl->dev);
6688 	device_init_wakeup(wl->dev, false);
6689 	free_irq(wl->irq, wl);
6690 
6691 out_free_nvs:
6692 	kfree(wl->nvs);
6693 
6694 out:
6695 	release_firmware(fw);
6696 	complete_all(&wl->nvs_loading_complete);
6697 }
6698 
wlcore_runtime_suspend(struct device * dev)6699 static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6700 {
6701 	struct wl1271 *wl = dev_get_drvdata(dev);
6702 	struct wl12xx_vif *wlvif;
6703 	int error;
6704 
6705 	/* We do not enter elp sleep in PLT mode */
6706 	if (wl->plt)
6707 		return 0;
6708 
6709 	/* Nothing to do if no ELP mode requested */
6710 	if (wl->sleep_auth != WL1271_PSM_ELP)
6711 		return 0;
6712 
6713 	wl12xx_for_each_wlvif(wl, wlvif) {
6714 		if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6715 		    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6716 			return -EBUSY;
6717 	}
6718 
6719 	wl1271_debug(DEBUG_PSM, "chip to elp");
6720 	error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6721 	if (error < 0) {
6722 		wl12xx_queue_recovery_work(wl);
6723 
6724 		return error;
6725 	}
6726 
6727 	set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6728 
6729 	return 0;
6730 }
6731 
wlcore_runtime_resume(struct device * dev)6732 static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6733 {
6734 	struct wl1271 *wl = dev_get_drvdata(dev);
6735 	DECLARE_COMPLETION_ONSTACK(compl);
6736 	unsigned long flags;
6737 	int ret;
6738 	unsigned long start_time = jiffies;
6739 	bool recovery = false;
6740 
6741 	/* Nothing to do if no ELP mode requested */
6742 	if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6743 		return 0;
6744 
6745 	wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6746 
6747 	spin_lock_irqsave(&wl->wl_lock, flags);
6748 	wl->elp_compl = &compl;
6749 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6750 
6751 	ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6752 	if (ret < 0) {
6753 		recovery = true;
6754 	} else if (!test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) {
6755 		ret = wait_for_completion_timeout(&compl,
6756 			msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6757 		if (ret == 0) {
6758 			wl1271_warning("ELP wakeup timeout!");
6759 			recovery = true;
6760 		}
6761 	}
6762 
6763 	spin_lock_irqsave(&wl->wl_lock, flags);
6764 	wl->elp_compl = NULL;
6765 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6766 	clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6767 
6768 	if (recovery) {
6769 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6770 		wl12xx_queue_recovery_work(wl);
6771 	} else {
6772 		wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6773 			     jiffies_to_msecs(jiffies - start_time));
6774 	}
6775 
6776 	return 0;
6777 }
6778 
6779 static const struct dev_pm_ops wlcore_pm_ops = {
6780 	SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6781 			   wlcore_runtime_resume,
6782 			   NULL)
6783 };
6784 
wlcore_probe(struct wl1271 * wl,struct platform_device * pdev)6785 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6786 {
6787 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6788 	const char *nvs_name;
6789 	int ret = 0;
6790 
6791 	if (!wl->ops || !wl->ptable || !pdev_data)
6792 		return -EINVAL;
6793 
6794 	wl->dev = &pdev->dev;
6795 	wl->pdev = pdev;
6796 	platform_set_drvdata(pdev, wl);
6797 
6798 	if (pdev_data->family && pdev_data->family->nvs_name) {
6799 		nvs_name = pdev_data->family->nvs_name;
6800 		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6801 					      nvs_name, &pdev->dev, GFP_KERNEL,
6802 					      wl, wlcore_nvs_cb);
6803 		if (ret < 0) {
6804 			wl1271_error("request_firmware_nowait failed for %s: %d",
6805 				     nvs_name, ret);
6806 			complete_all(&wl->nvs_loading_complete);
6807 		}
6808 	} else {
6809 		wlcore_nvs_cb(NULL, wl);
6810 	}
6811 
6812 	wl->dev->driver->pm = &wlcore_pm_ops;
6813 	pm_runtime_set_autosuspend_delay(wl->dev, 50);
6814 	pm_runtime_use_autosuspend(wl->dev);
6815 	pm_runtime_enable(wl->dev);
6816 
6817 	return ret;
6818 }
6819 EXPORT_SYMBOL_GPL(wlcore_probe);
6820 
wlcore_remove(struct platform_device * pdev)6821 int wlcore_remove(struct platform_device *pdev)
6822 {
6823 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6824 	struct wl1271 *wl = platform_get_drvdata(pdev);
6825 	int error;
6826 
6827 	error = pm_runtime_get_sync(wl->dev);
6828 	if (error < 0)
6829 		dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6830 
6831 	wl->dev->driver->pm = NULL;
6832 
6833 	if (pdev_data->family && pdev_data->family->nvs_name)
6834 		wait_for_completion(&wl->nvs_loading_complete);
6835 	if (!wl->initialized)
6836 		return 0;
6837 
6838 	if (wl->wakeirq >= 0) {
6839 		dev_pm_clear_wake_irq(wl->dev);
6840 		wl->wakeirq = -ENODEV;
6841 	}
6842 
6843 	device_init_wakeup(wl->dev, false);
6844 
6845 	if (wl->irq_wake_enabled)
6846 		disable_irq_wake(wl->irq);
6847 
6848 	wl1271_unregister_hw(wl);
6849 
6850 	pm_runtime_put_sync(wl->dev);
6851 	pm_runtime_dont_use_autosuspend(wl->dev);
6852 	pm_runtime_disable(wl->dev);
6853 
6854 	free_irq(wl->irq, wl);
6855 	wlcore_free_hw(wl);
6856 
6857 	return 0;
6858 }
6859 EXPORT_SYMBOL_GPL(wlcore_remove);
6860 
6861 u32 wl12xx_debug_level = DEBUG_NONE;
6862 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6863 module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6864 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6865 
6866 module_param_named(fwlog, fwlog_param, charp, 0);
6867 MODULE_PARM_DESC(fwlog,
6868 		 "FW logger options: continuous, dbgpins or disable");
6869 
6870 module_param(fwlog_mem_blocks, int, 0600);
6871 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6872 
6873 module_param(bug_on_recovery, int, 0600);
6874 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6875 
6876 module_param(no_recovery, int, 0600);
6877 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6878 
6879 MODULE_LICENSE("GPL");
6880 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6881 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6882