1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Huawei HiNIC PCI Express Linux driver
4  * Copyright(c) 2017 Huawei Technologies Co., Ltd
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/netdevice.h>
9 #include <linux/u64_stats_sync.h>
10 #include <linux/errno.h>
11 #include <linux/types.h>
12 #include <linux/pci.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/slab.h>
16 #include <linux/interrupt.h>
17 #include <linux/skbuff.h>
18 #include <linux/smp.h>
19 #include <asm/byteorder.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/sctp.h>
23 #include <linux/ipv6.h>
24 #include <net/ipv6.h>
25 #include <net/checksum.h>
26 #include <net/ip6_checksum.h>
27 
28 #include "hinic_common.h"
29 #include "hinic_hw_if.h"
30 #include "hinic_hw_wqe.h"
31 #include "hinic_hw_wq.h"
32 #include "hinic_hw_qp.h"
33 #include "hinic_hw_dev.h"
34 #include "hinic_dev.h"
35 #include "hinic_tx.h"
36 
37 #define TX_IRQ_NO_PENDING               0
38 #define TX_IRQ_NO_COALESC               0
39 #define TX_IRQ_NO_LLI_TIMER             0
40 #define TX_IRQ_NO_CREDIT                0
41 #define TX_IRQ_NO_RESEND_TIMER          0
42 
43 #define CI_UPDATE_NO_PENDING            0
44 #define CI_UPDATE_NO_COALESC            0
45 
46 #define HW_CONS_IDX(sq)                 be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
47 
48 #define MIN_SKB_LEN			32
49 
50 #define	MAX_PAYLOAD_OFFSET	        221
51 #define TRANSPORT_OFFSET(l4_hdr, skb)	((u32)((l4_hdr) - (skb)->data))
52 
53 union hinic_l3 {
54 	struct iphdr *v4;
55 	struct ipv6hdr *v6;
56 	unsigned char *hdr;
57 };
58 
59 union hinic_l4 {
60 	struct tcphdr *tcp;
61 	struct udphdr *udp;
62 	unsigned char *hdr;
63 };
64 
65 enum hinic_offload_type {
66 	TX_OFFLOAD_TSO     = BIT(0),
67 	TX_OFFLOAD_CSUM    = BIT(1),
68 	TX_OFFLOAD_VLAN    = BIT(2),
69 	TX_OFFLOAD_INVALID = BIT(3),
70 };
71 
72 /**
73  * hinic_txq_clean_stats - Clean the statistics of specific queue
74  * @txq: Logical Tx Queue
75  **/
hinic_txq_clean_stats(struct hinic_txq * txq)76 void hinic_txq_clean_stats(struct hinic_txq *txq)
77 {
78 	struct hinic_txq_stats *txq_stats = &txq->txq_stats;
79 
80 	u64_stats_update_begin(&txq_stats->syncp);
81 	txq_stats->pkts    = 0;
82 	txq_stats->bytes   = 0;
83 	txq_stats->tx_busy = 0;
84 	txq_stats->tx_wake = 0;
85 	txq_stats->tx_dropped = 0;
86 	txq_stats->big_frags_pkts = 0;
87 	u64_stats_update_end(&txq_stats->syncp);
88 }
89 
90 /**
91  * hinic_txq_get_stats - get statistics of Tx Queue
92  * @txq: Logical Tx Queue
93  * @stats: return updated stats here
94  **/
hinic_txq_get_stats(struct hinic_txq * txq,struct hinic_txq_stats * stats)95 void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
96 {
97 	struct hinic_txq_stats *txq_stats = &txq->txq_stats;
98 	unsigned int start;
99 
100 	u64_stats_update_begin(&stats->syncp);
101 	do {
102 		start = u64_stats_fetch_begin(&txq_stats->syncp);
103 		stats->pkts    = txq_stats->pkts;
104 		stats->bytes   = txq_stats->bytes;
105 		stats->tx_busy = txq_stats->tx_busy;
106 		stats->tx_wake = txq_stats->tx_wake;
107 		stats->tx_dropped = txq_stats->tx_dropped;
108 		stats->big_frags_pkts = txq_stats->big_frags_pkts;
109 	} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
110 	u64_stats_update_end(&stats->syncp);
111 }
112 
113 /**
114  * txq_stats_init - Initialize the statistics of specific queue
115  * @txq: Logical Tx Queue
116  **/
txq_stats_init(struct hinic_txq * txq)117 static void txq_stats_init(struct hinic_txq *txq)
118 {
119 	struct hinic_txq_stats *txq_stats = &txq->txq_stats;
120 
121 	u64_stats_init(&txq_stats->syncp);
122 	hinic_txq_clean_stats(txq);
123 }
124 
125 /**
126  * tx_map_skb - dma mapping for skb and return sges
127  * @nic_dev: nic device
128  * @skb: the skb
129  * @sges: returned sges
130  *
131  * Return 0 - Success, negative - Failure
132  **/
tx_map_skb(struct hinic_dev * nic_dev,struct sk_buff * skb,struct hinic_sge * sges)133 static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
134 		      struct hinic_sge *sges)
135 {
136 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
137 	struct hinic_hwif *hwif = hwdev->hwif;
138 	struct pci_dev *pdev = hwif->pdev;
139 	skb_frag_t *frag;
140 	dma_addr_t dma_addr;
141 	int i, j;
142 
143 	dma_addr = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
144 				  DMA_TO_DEVICE);
145 	if (dma_mapping_error(&pdev->dev, dma_addr)) {
146 		dev_err(&pdev->dev, "Failed to map Tx skb data\n");
147 		return -EFAULT;
148 	}
149 
150 	hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb));
151 
152 	for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) {
153 		frag = &skb_shinfo(skb)->frags[i];
154 
155 		dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0,
156 					    skb_frag_size(frag),
157 					    DMA_TO_DEVICE);
158 		if (dma_mapping_error(&pdev->dev, dma_addr)) {
159 			dev_err(&pdev->dev, "Failed to map Tx skb frag\n");
160 			goto err_tx_map;
161 		}
162 
163 		hinic_set_sge(&sges[i + 1], dma_addr, skb_frag_size(frag));
164 	}
165 
166 	return 0;
167 
168 err_tx_map:
169 	for (j = 0; j < i; j++)
170 		dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[j + 1]),
171 			       sges[j + 1].len, DMA_TO_DEVICE);
172 
173 	dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
174 			 DMA_TO_DEVICE);
175 	return -EFAULT;
176 }
177 
178 /**
179  * tx_unmap_skb - unmap the dma address of the skb
180  * @nic_dev: nic device
181  * @skb: the skb
182  * @sges: the sges that are connected to the skb
183  **/
tx_unmap_skb(struct hinic_dev * nic_dev,struct sk_buff * skb,struct hinic_sge * sges)184 static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
185 			 struct hinic_sge *sges)
186 {
187 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
188 	struct hinic_hwif *hwif = hwdev->hwif;
189 	struct pci_dev *pdev = hwif->pdev;
190 	int i;
191 
192 	for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++)
193 		dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]),
194 			       sges[i + 1].len, DMA_TO_DEVICE);
195 
196 	dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
197 			 DMA_TO_DEVICE);
198 }
199 
get_inner_l3_l4_type(struct sk_buff * skb,union hinic_l3 * ip,union hinic_l4 * l4,enum hinic_offload_type offload_type,enum hinic_l3_offload_type * l3_type,u8 * l4_proto)200 static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_l3 *ip,
201 				 union hinic_l4 *l4,
202 				 enum hinic_offload_type offload_type,
203 				 enum hinic_l3_offload_type *l3_type,
204 				 u8 *l4_proto)
205 {
206 	u8 *exthdr;
207 
208 	if (ip->v4->version == 4) {
209 		*l3_type = (offload_type == TX_OFFLOAD_CSUM) ?
210 			   IPV4_PKT_NO_CHKSUM_OFFLOAD :
211 			   IPV4_PKT_WITH_CHKSUM_OFFLOAD;
212 		*l4_proto = ip->v4->protocol;
213 	} else if (ip->v4->version == 6) {
214 		*l3_type = IPV6_PKT;
215 		exthdr = ip->hdr + sizeof(*ip->v6);
216 		*l4_proto = ip->v6->nexthdr;
217 		if (exthdr != l4->hdr) {
218 			int start = exthdr - skb->data;
219 			__be16 frag_off;
220 
221 			ipv6_skip_exthdr(skb, start, l4_proto, &frag_off);
222 		}
223 	} else {
224 		*l3_type = L3TYPE_UNKNOWN;
225 		*l4_proto = 0;
226 	}
227 }
228 
get_inner_l4_info(struct sk_buff * skb,union hinic_l4 * l4,enum hinic_offload_type offload_type,u8 l4_proto,enum hinic_l4_offload_type * l4_offload,u32 * l4_len,u32 * offset)229 static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4,
230 			      enum hinic_offload_type offload_type, u8 l4_proto,
231 			      enum hinic_l4_offload_type *l4_offload,
232 			      u32 *l4_len, u32 *offset)
233 {
234 	*l4_offload = OFFLOAD_DISABLE;
235 	*offset = 0;
236 	*l4_len = 0;
237 
238 	switch (l4_proto) {
239 	case IPPROTO_TCP:
240 		*l4_offload = TCP_OFFLOAD_ENABLE;
241 		/* doff in unit of 4B */
242 		*l4_len = l4->tcp->doff * 4;
243 		*offset = *l4_len + TRANSPORT_OFFSET(l4->hdr, skb);
244 		break;
245 
246 	case IPPROTO_UDP:
247 		*l4_offload = UDP_OFFLOAD_ENABLE;
248 		*l4_len = sizeof(struct udphdr);
249 		*offset = TRANSPORT_OFFSET(l4->hdr, skb);
250 		break;
251 
252 	case IPPROTO_SCTP:
253 		/* only csum offload support sctp */
254 		if (offload_type != TX_OFFLOAD_CSUM)
255 			break;
256 
257 		*l4_offload = SCTP_OFFLOAD_ENABLE;
258 		*l4_len = sizeof(struct sctphdr);
259 		*offset = TRANSPORT_OFFSET(l4->hdr, skb);
260 		break;
261 
262 	default:
263 		break;
264 	}
265 }
266 
csum_magic(union hinic_l3 * ip,unsigned short proto)267 static __sum16 csum_magic(union hinic_l3 *ip, unsigned short proto)
268 {
269 	return (ip->v4->version == 4) ?
270 		csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) :
271 		csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
272 }
273 
offload_tso(struct hinic_sq_task * task,u32 * queue_info,struct sk_buff * skb)274 static int offload_tso(struct hinic_sq_task *task, u32 *queue_info,
275 		       struct sk_buff *skb)
276 {
277 	u32 offset, l4_len, ip_identify, network_hdr_len;
278 	enum hinic_l3_offload_type l3_offload;
279 	enum hinic_l4_offload_type l4_offload;
280 	union hinic_l3 ip;
281 	union hinic_l4 l4;
282 	u8 l4_proto;
283 
284 	if (!skb_is_gso(skb))
285 		return 0;
286 
287 	if (skb_cow_head(skb, 0) < 0)
288 		return -EPROTONOSUPPORT;
289 
290 	if (skb->encapsulation) {
291 		u32 gso_type = skb_shinfo(skb)->gso_type;
292 		u32 tunnel_type = 0;
293 		u32 l4_tunnel_len;
294 
295 		ip.hdr = skb_network_header(skb);
296 		l4.hdr = skb_transport_header(skb);
297 		network_hdr_len = skb_inner_network_header_len(skb);
298 
299 		if (ip.v4->version == 4) {
300 			ip.v4->tot_len = 0;
301 			l3_offload = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
302 		} else if (ip.v4->version == 6) {
303 			l3_offload = IPV6_PKT;
304 		} else {
305 			l3_offload = 0;
306 		}
307 
308 		hinic_task_set_outter_l3(task, l3_offload,
309 					 skb_network_header_len(skb));
310 
311 		if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
312 			l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
313 			tunnel_type = TUNNEL_UDP_CSUM;
314 		} else if (gso_type & SKB_GSO_UDP_TUNNEL) {
315 			tunnel_type = TUNNEL_UDP_NO_CSUM;
316 		}
317 
318 		l4_tunnel_len = skb_inner_network_offset(skb) -
319 				skb_transport_offset(skb);
320 		hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
321 
322 		ip.hdr = skb_inner_network_header(skb);
323 		l4.hdr = skb_inner_transport_header(skb);
324 	} else {
325 		ip.hdr = skb_network_header(skb);
326 		l4.hdr = skb_transport_header(skb);
327 		network_hdr_len = skb_network_header_len(skb);
328 	}
329 
330 	/* initialize inner IP header fields */
331 	if (ip.v4->version == 4)
332 		ip.v4->tot_len = 0;
333 	else
334 		ip.v6->payload_len = 0;
335 
336 	get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO, &l3_offload,
337 			     &l4_proto);
338 
339 	hinic_task_set_inner_l3(task, l3_offload, network_hdr_len);
340 
341 	ip_identify = 0;
342 	if (l4_proto == IPPROTO_TCP)
343 		l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP);
344 
345 	get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto, &l4_offload,
346 			  &l4_len, &offset);
347 
348 	hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len, offset,
349 			       ip_identify, skb_shinfo(skb)->gso_size);
350 
351 	return 1;
352 }
353 
offload_csum(struct hinic_sq_task * task,u32 * queue_info,struct sk_buff * skb)354 static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
355 			struct sk_buff *skb)
356 {
357 	enum hinic_l4_offload_type l4_offload;
358 	u32 offset, l4_len, network_hdr_len;
359 	enum hinic_l3_offload_type l3_type;
360 	u32 tunnel_type = NOT_TUNNEL;
361 	union hinic_l3 ip;
362 	union hinic_l4 l4;
363 	u8 l4_proto;
364 
365 	if (skb->ip_summed != CHECKSUM_PARTIAL)
366 		return 0;
367 
368 	if (skb->encapsulation) {
369 		u32 l4_tunnel_len;
370 
371 		tunnel_type = TUNNEL_UDP_NO_CSUM;
372 		ip.hdr = skb_network_header(skb);
373 
374 		if (ip.v4->version == 4) {
375 			l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
376 			l4_proto = ip.v4->protocol;
377 		} else if (ip.v4->version == 6) {
378 			unsigned char *exthdr;
379 			__be16 frag_off;
380 			l3_type = IPV6_PKT;
381 			tunnel_type = TUNNEL_UDP_CSUM;
382 			exthdr = ip.hdr + sizeof(*ip.v6);
383 			l4_proto = ip.v6->nexthdr;
384 			l4.hdr = skb_transport_header(skb);
385 			if (l4.hdr != exthdr)
386 				ipv6_skip_exthdr(skb, exthdr - skb->data,
387 						 &l4_proto, &frag_off);
388 		} else {
389 			l3_type = L3TYPE_UNKNOWN;
390 			l4_proto = IPPROTO_RAW;
391 		}
392 
393 		hinic_task_set_outter_l3(task, l3_type,
394 					 skb_network_header_len(skb));
395 
396 		switch (l4_proto) {
397 		case IPPROTO_UDP:
398 			l4_tunnel_len = skb_inner_network_offset(skb) -
399 					skb_transport_offset(skb);
400 			ip.hdr = skb_inner_network_header(skb);
401 			l4.hdr = skb_inner_transport_header(skb);
402 			network_hdr_len = skb_inner_network_header_len(skb);
403 			break;
404 		case IPPROTO_IPIP:
405 		case IPPROTO_IPV6:
406 			tunnel_type = NOT_TUNNEL;
407 			l4_tunnel_len = 0;
408 
409 			ip.hdr = skb_inner_network_header(skb);
410 			l4.hdr = skb_transport_header(skb);
411 			network_hdr_len = skb_network_header_len(skb);
412 			break;
413 		default:
414 			/* Unsupported tunnel packet, disable csum offload */
415 			skb_checksum_help(skb);
416 			return 0;
417 		}
418 
419 		hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
420 	} else {
421 		ip.hdr = skb_network_header(skb);
422 		l4.hdr = skb_transport_header(skb);
423 		network_hdr_len = skb_network_header_len(skb);
424 	}
425 
426 	get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM, &l3_type,
427 			     &l4_proto);
428 
429 	hinic_task_set_inner_l3(task, l3_type, network_hdr_len);
430 
431 	get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto, &l4_offload,
432 			  &l4_len, &offset);
433 
434 	hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset);
435 
436 	return 1;
437 }
438 
offload_vlan(struct hinic_sq_task * task,u32 * queue_info,u16 vlan_tag,u16 vlan_pri)439 static void offload_vlan(struct hinic_sq_task *task, u32 *queue_info,
440 			 u16 vlan_tag, u16 vlan_pri)
441 {
442 	task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) |
443 				HINIC_SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD);
444 
445 	*queue_info |= HINIC_SQ_CTRL_SET(vlan_pri, QUEUE_INFO_PRI);
446 }
447 
hinic_tx_offload(struct sk_buff * skb,struct hinic_sq_task * task,u32 * queue_info)448 static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task,
449 			    u32 *queue_info)
450 {
451 	enum hinic_offload_type offload = 0;
452 	u16 vlan_tag;
453 	int enabled;
454 
455 	enabled = offload_tso(task, queue_info, skb);
456 	if (enabled > 0) {
457 		offload |= TX_OFFLOAD_TSO;
458 	} else if (enabled == 0) {
459 		enabled = offload_csum(task, queue_info, skb);
460 		if (enabled)
461 			offload |= TX_OFFLOAD_CSUM;
462 	} else {
463 		return -EPROTONOSUPPORT;
464 	}
465 
466 	if (unlikely(skb_vlan_tag_present(skb))) {
467 		vlan_tag = skb_vlan_tag_get(skb);
468 		offload_vlan(task, queue_info, vlan_tag,
469 			     vlan_tag >> VLAN_PRIO_SHIFT);
470 		offload |= TX_OFFLOAD_VLAN;
471 	}
472 
473 	if (offload)
474 		hinic_task_set_l2hdr(task, skb_network_offset(skb));
475 
476 	/* payload offset should not more than 221 */
477 	if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_PLDOFF) >
478 	    MAX_PAYLOAD_OFFSET) {
479 		return -EPROTONOSUPPORT;
480 	}
481 
482 	/* mss should not less than 80 */
483 	if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_MSS) < HINIC_MSS_MIN) {
484 		*queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
485 		*queue_info |= HINIC_SQ_CTRL_SET(HINIC_MSS_MIN, QUEUE_INFO_MSS);
486 	}
487 
488 	return 0;
489 }
490 
hinic_lb_xmit_frame(struct sk_buff * skb,struct net_device * netdev)491 netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
492 {
493 	struct hinic_dev *nic_dev = netdev_priv(netdev);
494 	u16 prod_idx, q_id = skb->queue_mapping;
495 	struct netdev_queue *netdev_txq;
496 	int nr_sges, err = NETDEV_TX_OK;
497 	struct hinic_sq_wqe *sq_wqe;
498 	unsigned int wqe_size;
499 	struct hinic_txq *txq;
500 	struct hinic_qp *qp;
501 
502 	txq = &nic_dev->txqs[q_id];
503 	qp = container_of(txq->sq, struct hinic_qp, sq);
504 	nr_sges = skb_shinfo(skb)->nr_frags + 1;
505 
506 	err = tx_map_skb(nic_dev, skb, txq->sges);
507 	if (err)
508 		goto skb_error;
509 
510 	wqe_size = HINIC_SQ_WQE_SIZE(nr_sges);
511 
512 	sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
513 	if (!sq_wqe) {
514 		netif_stop_subqueue(netdev, qp->q_id);
515 
516 		sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
517 		if (sq_wqe) {
518 			netif_wake_subqueue(nic_dev->netdev, qp->q_id);
519 			goto process_sq_wqe;
520 		}
521 
522 		tx_unmap_skb(nic_dev, skb, txq->sges);
523 
524 		u64_stats_update_begin(&txq->txq_stats.syncp);
525 		txq->txq_stats.tx_busy++;
526 		u64_stats_update_end(&txq->txq_stats.syncp);
527 		err = NETDEV_TX_BUSY;
528 		wqe_size = 0;
529 		goto flush_skbs;
530 	}
531 
532 process_sq_wqe:
533 	hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
534 	hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
535 
536 flush_skbs:
537 	netdev_txq = netdev_get_tx_queue(netdev, q_id);
538 	if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
539 		hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
540 
541 	return err;
542 
543 skb_error:
544 	dev_kfree_skb_any(skb);
545 	u64_stats_update_begin(&txq->txq_stats.syncp);
546 	txq->txq_stats.tx_dropped++;
547 	u64_stats_update_end(&txq->txq_stats.syncp);
548 
549 	return NETDEV_TX_OK;
550 }
551 
hinic_xmit_frame(struct sk_buff * skb,struct net_device * netdev)552 netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
553 {
554 	struct hinic_dev *nic_dev = netdev_priv(netdev);
555 	u16 prod_idx, q_id = skb->queue_mapping;
556 	struct netdev_queue *netdev_txq;
557 	int nr_sges, err = NETDEV_TX_OK;
558 	struct hinic_sq_wqe *sq_wqe;
559 	unsigned int wqe_size;
560 	struct hinic_txq *txq;
561 	struct hinic_qp *qp;
562 
563 	txq = &nic_dev->txqs[q_id];
564 	qp = container_of(txq->sq, struct hinic_qp, sq);
565 
566 	if (skb->len < MIN_SKB_LEN) {
567 		if (skb_pad(skb, MIN_SKB_LEN - skb->len)) {
568 			netdev_err(netdev, "Failed to pad skb\n");
569 			goto update_error_stats;
570 		}
571 
572 		skb->len = MIN_SKB_LEN;
573 	}
574 
575 	nr_sges = skb_shinfo(skb)->nr_frags + 1;
576 	if (nr_sges > 17) {
577 		u64_stats_update_begin(&txq->txq_stats.syncp);
578 		txq->txq_stats.big_frags_pkts++;
579 		u64_stats_update_end(&txq->txq_stats.syncp);
580 	}
581 
582 	if (nr_sges > txq->max_sges) {
583 		netdev_err(netdev, "Too many Tx sges\n");
584 		goto skb_error;
585 	}
586 
587 	err = tx_map_skb(nic_dev, skb, txq->sges);
588 	if (err)
589 		goto skb_error;
590 
591 	wqe_size = HINIC_SQ_WQE_SIZE(nr_sges);
592 
593 	sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
594 	if (!sq_wqe) {
595 		netif_stop_subqueue(netdev, qp->q_id);
596 
597 		/* Check for the case free_tx_poll is called in another cpu
598 		 * and we stopped the subqueue after free_tx_poll check.
599 		 */
600 		sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
601 		if (sq_wqe) {
602 			netif_wake_subqueue(nic_dev->netdev, qp->q_id);
603 			goto process_sq_wqe;
604 		}
605 
606 		tx_unmap_skb(nic_dev, skb, txq->sges);
607 
608 		u64_stats_update_begin(&txq->txq_stats.syncp);
609 		txq->txq_stats.tx_busy++;
610 		u64_stats_update_end(&txq->txq_stats.syncp);
611 		err = NETDEV_TX_BUSY;
612 		wqe_size = 0;
613 		goto flush_skbs;
614 	}
615 
616 process_sq_wqe:
617 	hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
618 
619 	err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info);
620 	if (err)
621 		goto offload_error;
622 
623 	hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
624 
625 flush_skbs:
626 	netdev_txq = netdev_get_tx_queue(netdev, q_id);
627 	if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
628 		hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
629 
630 	return err;
631 
632 offload_error:
633 	hinic_sq_return_wqe(txq->sq, wqe_size);
634 	tx_unmap_skb(nic_dev, skb, txq->sges);
635 
636 skb_error:
637 	dev_kfree_skb_any(skb);
638 
639 update_error_stats:
640 	u64_stats_update_begin(&txq->txq_stats.syncp);
641 	txq->txq_stats.tx_dropped++;
642 	u64_stats_update_end(&txq->txq_stats.syncp);
643 
644 	return NETDEV_TX_OK;
645 }
646 
647 /**
648  * tx_free_skb - unmap and free skb
649  * @nic_dev: nic device
650  * @skb: the skb
651  * @sges: the sges that are connected to the skb
652  **/
tx_free_skb(struct hinic_dev * nic_dev,struct sk_buff * skb,struct hinic_sge * sges)653 static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
654 			struct hinic_sge *sges)
655 {
656 	tx_unmap_skb(nic_dev, skb, sges);
657 
658 	dev_kfree_skb_any(skb);
659 }
660 
661 /**
662  * free_all_rx_skbs - free all skbs in tx queue
663  * @txq: tx queue
664  **/
free_all_tx_skbs(struct hinic_txq * txq)665 static void free_all_tx_skbs(struct hinic_txq *txq)
666 {
667 	struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
668 	struct hinic_sq *sq = txq->sq;
669 	struct hinic_sq_wqe *sq_wqe;
670 	unsigned int wqe_size;
671 	struct sk_buff *skb;
672 	int nr_sges;
673 	u16 ci;
674 
675 	while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) {
676 		sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci);
677 		if (!sq_wqe)
678 			break;
679 
680 		nr_sges = skb_shinfo(skb)->nr_frags + 1;
681 
682 		hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
683 
684 		hinic_sq_put_wqe(sq, wqe_size);
685 
686 		tx_free_skb(nic_dev, skb, txq->free_sges);
687 	}
688 }
689 
690 /**
691  * free_tx_poll - free finished tx skbs in tx queue that connected to napi
692  * @napi: napi
693  * @budget: number of tx
694  *
695  * Return 0 - Success, negative - Failure
696  **/
free_tx_poll(struct napi_struct * napi,int budget)697 static int free_tx_poll(struct napi_struct *napi, int budget)
698 {
699 	struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi);
700 	struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq);
701 	struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
702 	struct netdev_queue *netdev_txq;
703 	struct hinic_sq *sq = txq->sq;
704 	struct hinic_wq *wq = sq->wq;
705 	struct hinic_sq_wqe *sq_wqe;
706 	unsigned int wqe_size;
707 	int nr_sges, pkts = 0;
708 	struct sk_buff *skb;
709 	u64 tx_bytes = 0;
710 	u16 hw_ci, sw_ci;
711 
712 	do {
713 		hw_ci = HW_CONS_IDX(sq) & wq->mask;
714 
715 		dma_rmb();
716 
717 		/* Reading a WQEBB to get real WQE size and consumer index. */
718 		sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
719 		if ((!sq_wqe) ||
720 		    (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size))
721 			break;
722 
723 		/* If this WQE have multiple WQEBBs, we will read again to get
724 		 * full size WQE.
725 		 */
726 		if (wqe_size > wq->wqebb_size) {
727 			sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &sw_ci);
728 			if (unlikely(!sq_wqe))
729 				break;
730 		}
731 
732 		tx_bytes += skb->len;
733 		pkts++;
734 
735 		nr_sges = skb_shinfo(skb)->nr_frags + 1;
736 
737 		hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
738 
739 		hinic_sq_put_wqe(sq, wqe_size);
740 
741 		tx_free_skb(nic_dev, skb, txq->free_sges);
742 	} while (pkts < budget);
743 
744 	if (__netif_subqueue_stopped(nic_dev->netdev, qp->q_id) &&
745 	    hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) {
746 		netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id);
747 
748 		__netif_tx_lock(netdev_txq, smp_processor_id());
749 		if (!netif_testing(nic_dev->netdev))
750 			netif_wake_subqueue(nic_dev->netdev, qp->q_id);
751 
752 		__netif_tx_unlock(netdev_txq);
753 
754 		u64_stats_update_begin(&txq->txq_stats.syncp);
755 		txq->txq_stats.tx_wake++;
756 		u64_stats_update_end(&txq->txq_stats.syncp);
757 	}
758 
759 	u64_stats_update_begin(&txq->txq_stats.syncp);
760 	txq->txq_stats.bytes += tx_bytes;
761 	txq->txq_stats.pkts += pkts;
762 	u64_stats_update_end(&txq->txq_stats.syncp);
763 
764 	if (pkts < budget) {
765 		napi_complete(napi);
766 		if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
767 			hinic_hwdev_set_msix_state(nic_dev->hwdev,
768 						   sq->msix_entry,
769 						   HINIC_MSIX_ENABLE);
770 
771 		return pkts;
772 	}
773 
774 	return budget;
775 }
776 
tx_irq(int irq,void * data)777 static irqreturn_t tx_irq(int irq, void *data)
778 {
779 	struct hinic_txq *txq = data;
780 	struct hinic_dev *nic_dev;
781 
782 	nic_dev = netdev_priv(txq->netdev);
783 
784 	if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
785 		/* Disable the interrupt until napi will be completed */
786 		hinic_hwdev_set_msix_state(nic_dev->hwdev,
787 					   txq->sq->msix_entry,
788 					   HINIC_MSIX_DISABLE);
789 
790 	hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry);
791 
792 	napi_schedule(&txq->napi);
793 	return IRQ_HANDLED;
794 }
795 
tx_request_irq(struct hinic_txq * txq)796 static int tx_request_irq(struct hinic_txq *txq)
797 {
798 	struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
799 	struct hinic_msix_config interrupt_info = {0};
800 	struct hinic_intr_coal_info *intr_coal = NULL;
801 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
802 	struct hinic_hwif *hwif = hwdev->hwif;
803 	struct pci_dev *pdev = hwif->pdev;
804 	struct hinic_sq *sq = txq->sq;
805 	struct hinic_qp *qp;
806 	int err;
807 
808 	qp = container_of(sq, struct hinic_qp, sq);
809 
810 	netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, nic_dev->tx_weight);
811 
812 	hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry,
813 			     TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC,
814 			     TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT,
815 			     TX_IRQ_NO_RESEND_TIMER);
816 
817 	intr_coal = &nic_dev->tx_intr_coalesce[qp->q_id];
818 	interrupt_info.msix_index = sq->msix_entry;
819 	interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
820 	interrupt_info.pending_cnt = intr_coal->pending_limt;
821 	interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
822 
823 	err = hinic_set_interrupt_cfg(hwdev, &interrupt_info);
824 	if (err) {
825 		netif_err(nic_dev, drv, txq->netdev,
826 			  "Failed to set TX interrupt coalescing attribute\n");
827 		netif_napi_del(&txq->napi);
828 		return err;
829 	}
830 
831 	err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq);
832 	if (err) {
833 		dev_err(&pdev->dev, "Failed to request Tx irq\n");
834 		netif_napi_del(&txq->napi);
835 		return err;
836 	}
837 
838 	return 0;
839 }
840 
tx_free_irq(struct hinic_txq * txq)841 static void tx_free_irq(struct hinic_txq *txq)
842 {
843 	struct hinic_sq *sq = txq->sq;
844 
845 	free_irq(sq->irq, txq);
846 	netif_napi_del(&txq->napi);
847 }
848 
849 /**
850  * hinic_init_txq - Initialize the Tx Queue
851  * @txq: Logical Tx Queue
852  * @sq: Hardware Tx Queue to connect the Logical queue with
853  * @netdev: network device to connect the Logical queue with
854  *
855  * Return 0 - Success, negative - Failure
856  **/
hinic_init_txq(struct hinic_txq * txq,struct hinic_sq * sq,struct net_device * netdev)857 int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
858 		   struct net_device *netdev)
859 {
860 	struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
861 	struct hinic_dev *nic_dev = netdev_priv(netdev);
862 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
863 	int err, irqname_len;
864 	size_t sges_size;
865 
866 	txq->netdev = netdev;
867 	txq->sq = sq;
868 
869 	txq_stats_init(txq);
870 
871 	txq->max_sges = HINIC_MAX_SQ_BUFDESCS;
872 
873 	sges_size = txq->max_sges * sizeof(*txq->sges);
874 	txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
875 	if (!txq->sges)
876 		return -ENOMEM;
877 
878 	sges_size = txq->max_sges * sizeof(*txq->free_sges);
879 	txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
880 	if (!txq->free_sges) {
881 		err = -ENOMEM;
882 		goto err_alloc_free_sges;
883 	}
884 
885 	irqname_len = snprintf(NULL, 0, "%s_txq%d", netdev->name, qp->q_id) + 1;
886 	txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL);
887 	if (!txq->irq_name) {
888 		err = -ENOMEM;
889 		goto err_alloc_irqname;
890 	}
891 
892 	sprintf(txq->irq_name, "%s_txq%d", netdev->name, qp->q_id);
893 
894 	err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING,
895 					 CI_UPDATE_NO_COALESC);
896 	if (err)
897 		goto err_hw_ci;
898 
899 	err = tx_request_irq(txq);
900 	if (err) {
901 		netdev_err(netdev, "Failed to request Tx irq\n");
902 		goto err_req_tx_irq;
903 	}
904 
905 	return 0;
906 
907 err_req_tx_irq:
908 err_hw_ci:
909 	devm_kfree(&netdev->dev, txq->irq_name);
910 
911 err_alloc_irqname:
912 	devm_kfree(&netdev->dev, txq->free_sges);
913 
914 err_alloc_free_sges:
915 	devm_kfree(&netdev->dev, txq->sges);
916 	return err;
917 }
918 
919 /**
920  * hinic_clean_txq - Clean the Tx Queue
921  * @txq: Logical Tx Queue
922  **/
hinic_clean_txq(struct hinic_txq * txq)923 void hinic_clean_txq(struct hinic_txq *txq)
924 {
925 	struct net_device *netdev = txq->netdev;
926 
927 	tx_free_irq(txq);
928 
929 	free_all_tx_skbs(txq);
930 
931 	devm_kfree(&netdev->dev, txq->irq_name);
932 	devm_kfree(&netdev->dev, txq->free_sges);
933 	devm_kfree(&netdev->dev, txq->sges);
934 }
935