1 /*
2  * Marvell Wireless LAN device driver: AP TX and RX data handling
3  *
4  * Copyright (C) 2012-2014, Marvell International Ltd.
5  *
6  * This software file (the "File") is distributed by Marvell International
7  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8  * (the "License").  You may use, redistribute and/or modify this File in
9  * accordance with the terms and conditions of the License, a copy of which
10  * is available by writing to the Free Software Foundation, Inc.,
11  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12  * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13  *
14  * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16  * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
17  * this warranty disclaimer.
18  */
19 
20 #include "decl.h"
21 #include "ioctl.h"
22 #include "main.h"
23 #include "wmm.h"
24 #include "11n_aggr.h"
25 #include "11n_rxreorder.h"
26 
27 /* This function checks if particular RA list has packets more than low bridge
28  * packet threshold and then deletes packet from this RA list.
29  * Function deletes packets from such RA list and returns true. If no such list
30  * is found, false is returned.
31  */
32 static bool
mwifiex_uap_del_tx_pkts_in_ralist(struct mwifiex_private * priv,struct list_head * ra_list_head,int tid)33 mwifiex_uap_del_tx_pkts_in_ralist(struct mwifiex_private *priv,
34 				  struct list_head *ra_list_head,
35 				  int tid)
36 {
37 	struct mwifiex_ra_list_tbl *ra_list;
38 	struct sk_buff *skb, *tmp;
39 	bool pkt_deleted = false;
40 	struct mwifiex_txinfo *tx_info;
41 	struct mwifiex_adapter *adapter = priv->adapter;
42 
43 	list_for_each_entry(ra_list, ra_list_head, list) {
44 		if (skb_queue_empty(&ra_list->skb_head))
45 			continue;
46 
47 		skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) {
48 			tx_info = MWIFIEX_SKB_TXCB(skb);
49 			if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) {
50 				__skb_unlink(skb, &ra_list->skb_head);
51 				mwifiex_write_data_complete(adapter, skb, 0,
52 							    -1);
53 				if (ra_list->tx_paused)
54 					priv->wmm.pkts_paused[tid]--;
55 				else
56 					atomic_dec(&priv->wmm.tx_pkts_queued);
57 				pkt_deleted = true;
58 			}
59 			if ((atomic_read(&adapter->pending_bridged_pkts) <=
60 					     MWIFIEX_BRIDGED_PKTS_THR_LOW))
61 				break;
62 		}
63 	}
64 
65 	return pkt_deleted;
66 }
67 
68 /* This function deletes packets from particular RA List. RA list index
69  * from which packets are deleted is preserved so that packets from next RA
70  * list are deleted upon subsequent call thus maintaining fairness.
71  */
mwifiex_uap_cleanup_tx_queues(struct mwifiex_private * priv)72 static void mwifiex_uap_cleanup_tx_queues(struct mwifiex_private *priv)
73 {
74 	struct list_head *ra_list;
75 	int i;
76 
77 	spin_lock_bh(&priv->wmm.ra_list_spinlock);
78 
79 	for (i = 0; i < MAX_NUM_TID; i++, priv->del_list_idx++) {
80 		if (priv->del_list_idx == MAX_NUM_TID)
81 			priv->del_list_idx = 0;
82 		ra_list = &priv->wmm.tid_tbl_ptr[priv->del_list_idx].ra_list;
83 		if (mwifiex_uap_del_tx_pkts_in_ralist(priv, ra_list, i)) {
84 			priv->del_list_idx++;
85 			break;
86 		}
87 	}
88 
89 	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
90 }
91 
92 
mwifiex_uap_queue_bridged_pkt(struct mwifiex_private * priv,struct sk_buff * skb)93 static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
94 					 struct sk_buff *skb)
95 {
96 	struct mwifiex_adapter *adapter = priv->adapter;
97 	struct uap_rxpd *uap_rx_pd;
98 	struct rx_packet_hdr *rx_pkt_hdr;
99 	struct sk_buff *new_skb;
100 	struct mwifiex_txinfo *tx_info;
101 	int hdr_chop;
102 	struct ethhdr *p_ethhdr;
103 	struct mwifiex_sta_node *src_node;
104 	int index;
105 
106 	uap_rx_pd = (struct uap_rxpd *)(skb->data);
107 	rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
108 
109 	if ((atomic_read(&adapter->pending_bridged_pkts) >=
110 					     MWIFIEX_BRIDGED_PKTS_THR_HIGH)) {
111 		mwifiex_dbg(priv->adapter, ERROR,
112 			    "Tx: Bridge packet limit reached. Drop packet!\n");
113 		kfree_skb(skb);
114 		mwifiex_uap_cleanup_tx_queues(priv);
115 		return;
116 	}
117 
118 	if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
119 		     sizeof(bridge_tunnel_header))) ||
120 	    (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
121 		     sizeof(rfc1042_header)) &&
122 	     ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
123 	     ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
124 		/* Replace the 803 header and rfc1042 header (llc/snap) with
125 		 * an Ethernet II header, keep the src/dst and snap_type
126 		 * (ethertype).
127 		 *
128 		 * The firmware only passes up SNAP frames converting all RX
129 		 * data from 802.11 to 802.2/LLC/SNAP frames.
130 		 *
131 		 * To create the Ethernet II, just move the src, dst address
132 		 * right before the snap_type.
133 		 */
134 		p_ethhdr = (struct ethhdr *)
135 			((u8 *)(&rx_pkt_hdr->eth803_hdr)
136 			 + sizeof(rx_pkt_hdr->eth803_hdr)
137 			 + sizeof(rx_pkt_hdr->rfc1042_hdr)
138 			 - sizeof(rx_pkt_hdr->eth803_hdr.h_dest)
139 			 - sizeof(rx_pkt_hdr->eth803_hdr.h_source)
140 			 - sizeof(rx_pkt_hdr->rfc1042_hdr.snap_type));
141 		memcpy(p_ethhdr->h_source, rx_pkt_hdr->eth803_hdr.h_source,
142 		       sizeof(p_ethhdr->h_source));
143 		memcpy(p_ethhdr->h_dest, rx_pkt_hdr->eth803_hdr.h_dest,
144 		       sizeof(p_ethhdr->h_dest));
145 		/* Chop off the rxpd + the excess memory from
146 		 * 802.2/llc/snap header that was removed.
147 		 */
148 		hdr_chop = (u8 *)p_ethhdr - (u8 *)uap_rx_pd;
149 	} else {
150 		/* Chop off the rxpd */
151 		hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd;
152 	}
153 
154 	/* Chop off the leading header bytes so that it points
155 	 * to the start of either the reconstructed EthII frame
156 	 * or the 802.2/llc/snap frame.
157 	 */
158 	skb_pull(skb, hdr_chop);
159 
160 	if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
161 		mwifiex_dbg(priv->adapter, ERROR,
162 			    "data: Tx: insufficient skb headroom %d\n",
163 			    skb_headroom(skb));
164 		/* Insufficient skb headroom - allocate a new skb */
165 		new_skb =
166 			skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
167 		if (unlikely(!new_skb)) {
168 			mwifiex_dbg(priv->adapter, ERROR,
169 				    "Tx: cannot allocate new_skb\n");
170 			kfree_skb(skb);
171 			priv->stats.tx_dropped++;
172 			return;
173 		}
174 
175 		kfree_skb(skb);
176 		skb = new_skb;
177 		mwifiex_dbg(priv->adapter, INFO,
178 			    "info: new skb headroom %d\n",
179 			    skb_headroom(skb));
180 	}
181 
182 	tx_info = MWIFIEX_SKB_TXCB(skb);
183 	memset(tx_info, 0, sizeof(*tx_info));
184 	tx_info->bss_num = priv->bss_num;
185 	tx_info->bss_type = priv->bss_type;
186 	tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
187 
188 	src_node = mwifiex_get_sta_entry(priv, rx_pkt_hdr->eth803_hdr.h_source);
189 	if (src_node) {
190 		src_node->stats.last_rx = jiffies;
191 		src_node->stats.rx_bytes += skb->len;
192 		src_node->stats.rx_packets++;
193 		src_node->stats.last_tx_rate = uap_rx_pd->rx_rate;
194 		src_node->stats.last_tx_htinfo = uap_rx_pd->ht_info;
195 	}
196 
197 	if (is_unicast_ether_addr(rx_pkt_hdr->eth803_hdr.h_dest)) {
198 		/* Update bridge packet statistics as the
199 		 * packet is not going to kernel/upper layer.
200 		 */
201 		priv->stats.rx_bytes += skb->len;
202 		priv->stats.rx_packets++;
203 
204 		/* Sending bridge packet to TX queue, so save the packet
205 		 * length in TXCB to update statistics in TX complete.
206 		 */
207 		tx_info->pkt_len = skb->len;
208 	}
209 
210 	__net_timestamp(skb);
211 
212 	index = mwifiex_1d_to_wmm_queue[skb->priority];
213 	atomic_inc(&priv->wmm_tx_pending[index]);
214 	mwifiex_wmm_add_buf_txqueue(priv, skb);
215 	atomic_inc(&adapter->tx_pending);
216 	atomic_inc(&adapter->pending_bridged_pkts);
217 
218 	mwifiex_queue_main_work(priv->adapter);
219 
220 	return;
221 }
222 
223 /*
224  * This function contains logic for AP packet forwarding.
225  *
226  * If a packet is multicast/broadcast, it is sent to kernel/upper layer
227  * as well as queued back to AP TX queue so that it can be sent to other
228  * associated stations.
229  * If a packet is unicast and RA is present in associated station list,
230  * it is again requeued into AP TX queue.
231  * If a packet is unicast and RA is not in associated station list,
232  * packet is forwarded to kernel to handle routing logic.
233  */
mwifiex_handle_uap_rx_forward(struct mwifiex_private * priv,struct sk_buff * skb)234 int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
235 				  struct sk_buff *skb)
236 {
237 	struct mwifiex_adapter *adapter = priv->adapter;
238 	struct uap_rxpd *uap_rx_pd;
239 	struct rx_packet_hdr *rx_pkt_hdr;
240 	u8 ra[ETH_ALEN];
241 	struct sk_buff *skb_uap;
242 
243 	uap_rx_pd = (struct uap_rxpd *)(skb->data);
244 	rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
245 
246 	/* don't do packet forwarding in disconnected state */
247 	if (!priv->media_connected) {
248 		mwifiex_dbg(adapter, ERROR,
249 			    "drop packet in disconnected state.\n");
250 		dev_kfree_skb_any(skb);
251 		return 0;
252 	}
253 
254 	memcpy(ra, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN);
255 
256 	if (is_multicast_ether_addr(ra)) {
257 		skb_uap = skb_copy(skb, GFP_ATOMIC);
258 		mwifiex_uap_queue_bridged_pkt(priv, skb_uap);
259 	} else {
260 		if (mwifiex_get_sta_entry(priv, ra)) {
261 			/* Requeue Intra-BSS packet */
262 			mwifiex_uap_queue_bridged_pkt(priv, skb);
263 			return 0;
264 		}
265 	}
266 
267 	/* Forward unicat/Inter-BSS packets to kernel. */
268 	return mwifiex_process_rx_packet(priv, skb);
269 }
270 
mwifiex_uap_recv_packet(struct mwifiex_private * priv,struct sk_buff * skb)271 int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
272 			    struct sk_buff *skb)
273 {
274 	struct mwifiex_adapter *adapter = priv->adapter;
275 	struct mwifiex_sta_node *src_node;
276 	struct ethhdr *p_ethhdr;
277 	struct sk_buff *skb_uap;
278 	struct mwifiex_txinfo *tx_info;
279 
280 	if (!skb)
281 		return -1;
282 
283 	p_ethhdr = (void *)skb->data;
284 	src_node = mwifiex_get_sta_entry(priv, p_ethhdr->h_source);
285 	if (src_node) {
286 		src_node->stats.last_rx = jiffies;
287 		src_node->stats.rx_bytes += skb->len;
288 		src_node->stats.rx_packets++;
289 	}
290 
291 	if (is_multicast_ether_addr(p_ethhdr->h_dest) ||
292 	    mwifiex_get_sta_entry(priv, p_ethhdr->h_dest)) {
293 		if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN)
294 			skb_uap =
295 			skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
296 		else
297 			skb_uap = skb_copy(skb, GFP_ATOMIC);
298 
299 		if (likely(skb_uap)) {
300 			tx_info = MWIFIEX_SKB_TXCB(skb_uap);
301 			memset(tx_info, 0, sizeof(*tx_info));
302 			tx_info->bss_num = priv->bss_num;
303 			tx_info->bss_type = priv->bss_type;
304 			tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
305 			__net_timestamp(skb_uap);
306 			mwifiex_wmm_add_buf_txqueue(priv, skb_uap);
307 			atomic_inc(&adapter->tx_pending);
308 			atomic_inc(&adapter->pending_bridged_pkts);
309 			if ((atomic_read(&adapter->pending_bridged_pkts) >=
310 					MWIFIEX_BRIDGED_PKTS_THR_HIGH)) {
311 				mwifiex_dbg(adapter, ERROR,
312 					    "Tx: Bridge packet limit reached. Drop packet!\n");
313 				mwifiex_uap_cleanup_tx_queues(priv);
314 			}
315 
316 		} else {
317 			mwifiex_dbg(adapter, ERROR, "failed to allocate skb_uap");
318 		}
319 
320 		mwifiex_queue_main_work(adapter);
321 		/* Don't forward Intra-BSS unicast packet to upper layer*/
322 		if (mwifiex_get_sta_entry(priv, p_ethhdr->h_dest))
323 			return 0;
324 	}
325 
326 	skb->dev = priv->netdev;
327 	skb->protocol = eth_type_trans(skb, priv->netdev);
328 	skb->ip_summed = CHECKSUM_NONE;
329 
330 	/* This is required only in case of 11n and USB/PCIE as we alloc
331 	 * a buffer of 4K only if its 11N (to be able to receive 4K
332 	 * AMSDU packets). In case of SD we allocate buffers based
333 	 * on the size of packet and hence this is not needed.
334 	 *
335 	 * Modifying the truesize here as our allocation for each
336 	 * skb is 4K but we only receive 2K packets and this cause
337 	 * the kernel to start dropping packets in case where
338 	 * application has allocated buffer based on 2K size i.e.
339 	 * if there a 64K packet received (in IP fragments and
340 	 * application allocates 64K to receive this packet but
341 	 * this packet would almost double up because we allocate
342 	 * each 1.5K fragment in 4K and pass it up. As soon as the
343 	 * 64K limit hits kernel will start to drop rest of the
344 	 * fragments. Currently we fail the Filesndl-ht.scr script
345 	 * for UDP, hence this fix
346 	 */
347 	if ((adapter->iface_type == MWIFIEX_USB ||
348 	     adapter->iface_type == MWIFIEX_PCIE) &&
349 	    skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE)
350 		skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
351 
352 	/* Forward multicast/broadcast packet to upper layer*/
353 	if (in_interrupt())
354 		netif_rx(skb);
355 	else
356 		netif_rx_ni(skb);
357 
358 	return 0;
359 }
360 
361 /*
362  * This function processes the packet received on AP interface.
363  *
364  * The function looks into the RxPD and performs sanity tests on the
365  * received buffer to ensure its a valid packet before processing it
366  * further. If the packet is determined to be aggregated, it is
367  * de-aggregated accordingly. Then skb is passed to AP packet forwarding logic.
368  *
369  * The completion callback is called after processing is complete.
370  */
mwifiex_process_uap_rx_packet(struct mwifiex_private * priv,struct sk_buff * skb)371 int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
372 				  struct sk_buff *skb)
373 {
374 	struct mwifiex_adapter *adapter = priv->adapter;
375 	int ret;
376 	struct uap_rxpd *uap_rx_pd;
377 	struct rx_packet_hdr *rx_pkt_hdr;
378 	u16 rx_pkt_type;
379 	u8 ta[ETH_ALEN], pkt_type;
380 	struct mwifiex_sta_node *node;
381 
382 	uap_rx_pd = (struct uap_rxpd *)(skb->data);
383 	rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type);
384 	rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
385 
386 	ether_addr_copy(ta, rx_pkt_hdr->eth803_hdr.h_source);
387 
388 	if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
389 	     le16_to_cpu(uap_rx_pd->rx_pkt_length)) > (u16) skb->len) {
390 		mwifiex_dbg(adapter, ERROR,
391 			    "wrong rx packet: len=%d, offset=%d, length=%d\n",
392 			    skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
393 			    le16_to_cpu(uap_rx_pd->rx_pkt_length));
394 		priv->stats.rx_dropped++;
395 
396 		node = mwifiex_get_sta_entry(priv, ta);
397 		if (node)
398 			node->stats.tx_failed++;
399 
400 		dev_kfree_skb_any(skb);
401 		return 0;
402 	}
403 
404 	if (rx_pkt_type == PKT_TYPE_MGMT) {
405 		ret = mwifiex_process_mgmt_packet(priv, skb);
406 		if (ret)
407 			mwifiex_dbg(adapter, DATA, "Rx of mgmt packet failed");
408 		dev_kfree_skb_any(skb);
409 		return ret;
410 	}
411 
412 
413 	if (rx_pkt_type != PKT_TYPE_BAR && uap_rx_pd->priority < MAX_NUM_TID) {
414 		spin_lock_bh(&priv->sta_list_spinlock);
415 		node = mwifiex_get_sta_entry(priv, ta);
416 		if (node)
417 			node->rx_seq[uap_rx_pd->priority] =
418 						le16_to_cpu(uap_rx_pd->seq_num);
419 		spin_unlock_bh(&priv->sta_list_spinlock);
420 	}
421 
422 	if (!priv->ap_11n_enabled ||
423 	    (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
424 	    (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
425 		ret = mwifiex_handle_uap_rx_forward(priv, skb);
426 		return ret;
427 	}
428 
429 	/* Reorder and send to kernel */
430 	pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
431 	ret = mwifiex_11n_rx_reorder_pkt(priv, le16_to_cpu(uap_rx_pd->seq_num),
432 					 uap_rx_pd->priority, ta, pkt_type,
433 					 skb);
434 
435 	if (ret || (rx_pkt_type == PKT_TYPE_BAR))
436 		dev_kfree_skb_any(skb);
437 
438 	if (ret)
439 		priv->stats.rx_dropped++;
440 
441 	return ret;
442 }
443 
444 /*
445  * This function fills the TxPD for AP tx packets.
446  *
447  * The Tx buffer received by this function should already have the
448  * header space allocated for TxPD.
449  *
450  * This function inserts the TxPD in between interface header and actual
451  * data and adjusts the buffer pointers accordingly.
452  *
453  * The following TxPD fields are set by this function, as required -
454  *      - BSS number
455  *      - Tx packet length and offset
456  *      - Priority
457  *      - Packet delay
458  *      - Priority specific Tx control
459  *      - Flags
460  */
mwifiex_process_uap_txpd(struct mwifiex_private * priv,struct sk_buff * skb)461 void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
462 			       struct sk_buff *skb)
463 {
464 	struct mwifiex_adapter *adapter = priv->adapter;
465 	struct uap_txpd *txpd;
466 	struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
467 	int pad;
468 	u16 pkt_type, pkt_offset;
469 	int hroom = adapter->intf_hdr_len;
470 
471 	if (!skb->len) {
472 		mwifiex_dbg(adapter, ERROR,
473 			    "Tx: bad packet length: %d\n", skb->len);
474 		tx_info->status_code = -1;
475 		return skb->data;
476 	}
477 
478 	BUG_ON(skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN);
479 
480 	pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
481 
482 	pad = ((void *)skb->data - (sizeof(*txpd) + hroom) - NULL) &
483 			(MWIFIEX_DMA_ALIGN_SZ - 1);
484 
485 	skb_push(skb, sizeof(*txpd) + pad);
486 
487 	txpd = (struct uap_txpd *)skb->data;
488 	memset(txpd, 0, sizeof(*txpd));
489 	txpd->bss_num = priv->bss_num;
490 	txpd->bss_type = priv->bss_type;
491 	txpd->tx_pkt_length = cpu_to_le16((u16)(skb->len - (sizeof(*txpd) +
492 						pad)));
493 	txpd->priority = (u8)skb->priority;
494 
495 	txpd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
496 
497 	if (tx_info->flags & MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS ||
498 	    tx_info->flags & MWIFIEX_BUF_FLAG_ACTION_TX_STATUS) {
499 		txpd->tx_token_id = tx_info->ack_frame_id;
500 		txpd->flags |= MWIFIEX_TXPD_FLAGS_REQ_TX_STATUS;
501 	}
502 
503 	if (txpd->priority < ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl))
504 		/*
505 		 * Set the priority specific tx_control field, setting of 0 will
506 		 * cause the default value to be used later in this function.
507 		 */
508 		txpd->tx_control =
509 		    cpu_to_le32(priv->wmm.user_pri_pkt_tx_ctrl[txpd->priority]);
510 
511 	/* Offset of actual data */
512 	pkt_offset = sizeof(*txpd) + pad;
513 	if (pkt_type == PKT_TYPE_MGMT) {
514 		/* Set the packet type and add header for management frame */
515 		txpd->tx_pkt_type = cpu_to_le16(pkt_type);
516 		pkt_offset += MWIFIEX_MGMT_FRAME_HEADER_SIZE;
517 	}
518 
519 	txpd->tx_pkt_offset = cpu_to_le16(pkt_offset);
520 
521 	/* make space for adapter->intf_hdr_len */
522 	skb_push(skb, hroom);
523 
524 	if (!txpd->tx_control)
525 		/* TxCtrl set by user or default */
526 		txpd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
527 
528 	return skb->data;
529 }
530