1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 /*! \file octeon_main.h
19  *  \brief Host Driver: This file is included by all host driver source files
20  *  to include common definitions.
21  */
22 
23 #ifndef _OCTEON_MAIN_H_
24 #define  _OCTEON_MAIN_H_
25 
26 #include <linux/sched/signal.h>
27 
28 #if BITS_PER_LONG == 32
29 #define CVM_CAST64(v) ((long long)(v))
30 #elif BITS_PER_LONG == 64
31 #define CVM_CAST64(v) ((long long)(long)(v))
32 #else
33 #error "Unknown system architecture"
34 #endif
35 
36 #define DRV_NAME "LiquidIO"
37 
38 struct octeon_device_priv {
39 	/** Tasklet structures for this device. */
40 	struct tasklet_struct droq_tasklet;
41 	unsigned long napi_mask;
42 };
43 
44 /** This structure is used by NIC driver to store information required
45  * to free the sk_buff when the packet has been fetched by Octeon.
46  * Bytes offset below assume worst-case of a 64-bit system.
47  */
48 struct octnet_buf_free_info {
49 	/** Bytes 1-8.  Pointer to network device private structure. */
50 	struct lio *lio;
51 
52 	/** Bytes 9-16.  Pointer to sk_buff. */
53 	struct sk_buff *skb;
54 
55 	/** Bytes 17-24.  Pointer to gather list. */
56 	struct octnic_gather *g;
57 
58 	/** Bytes 25-32. Physical address of skb->data or gather list. */
59 	u64 dptr;
60 
61 	/** Bytes 33-47. Piggybacked soft command, if any */
62 	struct octeon_soft_command *sc;
63 };
64 
65 /* BQL-related functions */
66 int octeon_report_sent_bytes_to_bql(void *buf, int reqtype);
67 void octeon_update_tx_completion_counters(void *buf, int reqtype,
68 					  unsigned int *pkts_compl,
69 					  unsigned int *bytes_compl);
70 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
71 					unsigned int bytes_compl);
72 void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac);
73 
74 void octeon_schedule_rxq_oom_work(struct octeon_device *oct,
75 				  struct octeon_droq *droq);
76 
77 /** Swap 8B blocks */
octeon_swap_8B_data(u64 * data,u32 blocks)78 static inline void octeon_swap_8B_data(u64 *data, u32 blocks)
79 {
80 	while (blocks) {
81 		cpu_to_be64s(data);
82 		blocks--;
83 		data++;
84 	}
85 }
86 
87 /**
88  * \brief unmaps a PCI BAR
89  * @param oct Pointer to Octeon device
90  * @param baridx bar index
91  */
octeon_unmap_pci_barx(struct octeon_device * oct,int baridx)92 static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx)
93 {
94 	dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n",
95 		baridx);
96 
97 	if (oct->mmio[baridx].done)
98 		iounmap(oct->mmio[baridx].hw_addr);
99 
100 	if (oct->mmio[baridx].start)
101 		pci_release_region(oct->pci_dev, baridx * 2);
102 }
103 
104 /**
105  * \brief maps a PCI BAR
106  * @param oct Pointer to Octeon device
107  * @param baridx bar index
108  * @param max_map_len maximum length of mapped memory
109  */
octeon_map_pci_barx(struct octeon_device * oct,int baridx,int max_map_len)110 static inline int octeon_map_pci_barx(struct octeon_device *oct,
111 				      int baridx, int max_map_len)
112 {
113 	u32 mapped_len = 0;
114 
115 	if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) {
116 		dev_err(&oct->pci_dev->dev, "pci_request_region failed for bar %d\n",
117 			baridx);
118 		return 1;
119 	}
120 
121 	oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2);
122 	oct->mmio[baridx].len = pci_resource_len(oct->pci_dev, baridx * 2);
123 
124 	mapped_len = oct->mmio[baridx].len;
125 	if (!mapped_len)
126 		goto err_release_region;
127 
128 	if (max_map_len && (mapped_len > max_map_len))
129 		mapped_len = max_map_len;
130 
131 	oct->mmio[baridx].hw_addr =
132 		ioremap(oct->mmio[baridx].start, mapped_len);
133 	oct->mmio[baridx].mapped_len = mapped_len;
134 
135 	dev_dbg(&oct->pci_dev->dev, "BAR%d start: 0x%llx mapped %u of %u bytes\n",
136 		baridx, oct->mmio[baridx].start, mapped_len,
137 		oct->mmio[baridx].len);
138 
139 	if (!oct->mmio[baridx].hw_addr) {
140 		dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n",
141 			baridx);
142 		goto err_release_region;
143 	}
144 	oct->mmio[baridx].done = 1;
145 
146 	return 0;
147 
148 err_release_region:
149 	pci_release_region(oct->pci_dev, baridx * 2);
150 	return 1;
151 }
152 
153 /* input parameter:
154  * sc: pointer to a soft request
155  * timeout: milli sec which an application wants to wait for the
156 	    response of the request.
157  *          0: the request will wait until its response gets back
158  *	       from the firmware within LIO_SC_MAX_TMO_MS milli sec.
159  *	       It the response does not return within
160  *	       LIO_SC_MAX_TMO_MS milli sec, lio_process_ordered_list()
161  *	       will move the request to zombie response list.
162  *
163  * return value:
164  * 0: got the response from firmware for the sc request.
165  * errno -EINTR: user abort the command.
166  * errno -ETIME: user spefified timeout value has been expired.
167  * errno -EBUSY: the response of the request does not return in
168  *               resonable time (LIO_SC_MAX_TMO_MS).
169  *               the sc wll be move to zombie response list by
170  *               lio_process_ordered_list()
171  *
172  * A request with non-zero return value, the sc->caller_is_done
173  *  will be marked 1.
174  * When getting a request with zero return value, the requestor
175  *  should mark sc->caller_is_done with 1 after examing the
176  *  response of sc.
177  * lio_process_ordered_list() will free the soft command on behalf
178  * of the soft command requestor.
179  * This is to fix the possible race condition of both timeout process
180  * and lio_process_ordered_list()/callback function to free a
181  * sc strucutre.
182  */
183 static inline int
wait_for_sc_completion_timeout(struct octeon_device * oct_dev,struct octeon_soft_command * sc,unsigned long timeout)184 wait_for_sc_completion_timeout(struct octeon_device *oct_dev,
185 			       struct octeon_soft_command *sc,
186 			       unsigned long timeout)
187 {
188 	int errno = 0;
189 	long timeout_jiff;
190 
191 	if (timeout)
192 		timeout_jiff = msecs_to_jiffies(timeout);
193 	else
194 		timeout_jiff = MAX_SCHEDULE_TIMEOUT;
195 
196 	timeout_jiff =
197 		wait_for_completion_interruptible_timeout(&sc->complete,
198 							  timeout_jiff);
199 	if (timeout_jiff == 0) {
200 		dev_err(&oct_dev->pci_dev->dev, "%s: sc is timeout\n",
201 			__func__);
202 		WRITE_ONCE(sc->caller_is_done, true);
203 		errno = -ETIME;
204 	} else if (timeout_jiff == -ERESTARTSYS) {
205 		dev_err(&oct_dev->pci_dev->dev, "%s: sc is interrupted\n",
206 			__func__);
207 		WRITE_ONCE(sc->caller_is_done, true);
208 		errno = -EINTR;
209 	} else  if (sc->sc_status == OCTEON_REQUEST_TIMEOUT) {
210 		dev_err(&oct_dev->pci_dev->dev, "%s: sc has fatal timeout\n",
211 			__func__);
212 		WRITE_ONCE(sc->caller_is_done, true);
213 		errno = -EBUSY;
214 	}
215 
216 	return errno;
217 }
218 
219 #ifndef ROUNDUP4
220 #define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
221 #endif
222 
223 #ifndef ROUNDUP8
224 #define ROUNDUP8(val) (((val) + 7) & 0xfffffff8)
225 #endif
226 
227 #ifndef ROUNDUP16
228 #define ROUNDUP16(val) (((val) + 15) & 0xfffffff0)
229 #endif
230 
231 #ifndef ROUNDUP128
232 #define ROUNDUP128(val) (((val) + 127) & 0xffffff80)
233 #endif
234 
235 #endif /* _OCTEON_MAIN_H_ */
236