1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  *  linux/drivers/net/ethernet/ibm/ehea/ehea.h
4  *
5  *  eHEA ethernet device driver for IBM eServer System p
6  *
7  *  (C) Copyright IBM Corp. 2006
8  *
9  *  Authors:
10  *       Christoph Raisch <raisch@de.ibm.com>
11  *       Jan-Bernd Themann <themann@de.ibm.com>
12  *       Thomas Klein <tklein@de.ibm.com>
13  */
14 
15 #ifndef __EHEA_H__
16 #define __EHEA_H__
17 
18 #include <linux/module.h>
19 #include <linux/ethtool.h>
20 #include <linux/vmalloc.h>
21 #include <linux/if_vlan.h>
22 
23 #include <asm/ibmebus.h>
24 #include <asm/io.h>
25 
26 #define DRV_NAME	"ehea"
27 #define DRV_VERSION	"EHEA_0107"
28 
29 /* eHEA capability flags */
30 #define DLPAR_PORT_ADD_REM 1
31 #define DLPAR_MEM_ADD      2
32 #define DLPAR_MEM_REM      4
33 #define EHEA_CAPABILITIES  (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD | DLPAR_MEM_REM)
34 
35 #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
36 	| NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
37 
38 #define EHEA_MAX_ENTRIES_RQ1 32767
39 #define EHEA_MAX_ENTRIES_RQ2 16383
40 #define EHEA_MAX_ENTRIES_RQ3 16383
41 #define EHEA_MAX_ENTRIES_SQ  32767
42 #define EHEA_MIN_ENTRIES_QP  127
43 
44 #define EHEA_SMALL_QUEUES
45 
46 #ifdef EHEA_SMALL_QUEUES
47 #define EHEA_MAX_CQE_COUNT      1023
48 #define EHEA_DEF_ENTRIES_SQ     1023
49 #define EHEA_DEF_ENTRIES_RQ1    1023
50 #define EHEA_DEF_ENTRIES_RQ2    1023
51 #define EHEA_DEF_ENTRIES_RQ3    511
52 #else
53 #define EHEA_MAX_CQE_COUNT      4080
54 #define EHEA_DEF_ENTRIES_SQ     4080
55 #define EHEA_DEF_ENTRIES_RQ1    8160
56 #define EHEA_DEF_ENTRIES_RQ2    2040
57 #define EHEA_DEF_ENTRIES_RQ3    2040
58 #endif
59 
60 #define EHEA_MAX_ENTRIES_EQ 20
61 
62 #define EHEA_SG_SQ  2
63 #define EHEA_SG_RQ1 1
64 #define EHEA_SG_RQ2 0
65 #define EHEA_SG_RQ3 0
66 
67 #define EHEA_MAX_PACKET_SIZE    9022	/* for jumbo frames */
68 #define EHEA_RQ2_PKT_SIZE       2048
69 #define EHEA_L_PKT_SIZE         256	/* low latency */
70 
71 /* Send completion signaling */
72 
73 /* Protection Domain Identifier */
74 #define EHEA_PD_ID        0xaabcdeff
75 
76 #define EHEA_RQ2_THRESHOLD 	   1
77 #define EHEA_RQ3_THRESHOLD	   4	/* use RQ3 threshold of 2048 bytes */
78 
79 #define EHEA_SPEED_10G         10000
80 #define EHEA_SPEED_1G           1000
81 #define EHEA_SPEED_100M          100
82 #define EHEA_SPEED_10M            10
83 #define EHEA_SPEED_AUTONEG         0
84 
85 /* Broadcast/Multicast registration types */
86 #define EHEA_BCMC_SCOPE_ALL	0x08
87 #define EHEA_BCMC_SCOPE_SINGLE	0x00
88 #define EHEA_BCMC_MULTICAST	0x04
89 #define EHEA_BCMC_BROADCAST	0x00
90 #define EHEA_BCMC_UNTAGGED	0x02
91 #define EHEA_BCMC_TAGGED	0x00
92 #define EHEA_BCMC_VLANID_ALL	0x01
93 #define EHEA_BCMC_VLANID_SINGLE	0x00
94 
95 #define EHEA_CACHE_LINE          128
96 
97 /* Memory Regions */
98 #define EHEA_MR_ACC_CTRL       0x00800000
99 
100 #define EHEA_BUSMAP_START      0x8000000000000000ULL
101 #define EHEA_INVAL_ADDR        0xFFFFFFFFFFFFFFFFULL
102 #define EHEA_DIR_INDEX_SHIFT 13                   /* 8k Entries in 64k block */
103 #define EHEA_TOP_INDEX_SHIFT (EHEA_DIR_INDEX_SHIFT * 2)
104 #define EHEA_MAP_ENTRIES (1 << EHEA_DIR_INDEX_SHIFT)
105 #define EHEA_MAP_SIZE (0x10000)                   /* currently fixed map size */
106 #define EHEA_INDEX_MASK (EHEA_MAP_ENTRIES - 1)
107 
108 
109 #define EHEA_WATCH_DOG_TIMEOUT 10*HZ
110 
111 /* utility functions */
112 
113 void ehea_dump(void *adr, int len, char *msg);
114 
115 #define EHEA_BMASK(pos, length) (((pos) << 16) + (length))
116 
117 #define EHEA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
118 
119 #define EHEA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
120 
121 #define EHEA_BMASK_MASK(mask) \
122 	(0xffffffffffffffffULL >> ((64 - (mask)) & 0xffff))
123 
124 #define EHEA_BMASK_SET(mask, value) \
125 	((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask))
126 
127 #define EHEA_BMASK_GET(mask, value) \
128 	(EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask)))
129 
130 /*
131  * Generic ehea page
132  */
133 struct ehea_page {
134 	u8 entries[PAGE_SIZE];
135 };
136 
137 /*
138  * Generic queue in linux kernel virtual memory
139  */
140 struct hw_queue {
141 	u64 current_q_offset;		/* current queue entry */
142 	struct ehea_page **queue_pages;	/* array of pages belonging to queue */
143 	u32 qe_size;			/* queue entry size */
144 	u32 queue_length;      		/* queue length allocated in bytes */
145 	u32 pagesize;
146 	u32 toggle_state;		/* toggle flag - per page */
147 	u32 reserved;			/* 64 bit alignment */
148 };
149 
150 /*
151  * For pSeries this is a 64bit memory address where
152  * I/O memory is mapped into CPU address space
153  */
154 struct h_epa {
155 	void __iomem *addr;
156 };
157 
158 struct h_epa_user {
159 	u64 addr;
160 };
161 
162 struct h_epas {
163 	struct h_epa kernel;	/* kernel space accessible resource,
164 				   set to 0 if unused */
165 	struct h_epa_user user;	/* user space accessible resource
166 				   set to 0 if unused */
167 };
168 
169 /*
170  * Memory map data structures
171  */
172 struct ehea_dir_bmap
173 {
174 	u64 ent[EHEA_MAP_ENTRIES];
175 };
176 struct ehea_top_bmap
177 {
178 	struct ehea_dir_bmap *dir[EHEA_MAP_ENTRIES];
179 };
180 struct ehea_bmap
181 {
182 	struct ehea_top_bmap *top[EHEA_MAP_ENTRIES];
183 };
184 
185 struct ehea_qp;
186 struct ehea_cq;
187 struct ehea_eq;
188 struct ehea_port;
189 struct ehea_av;
190 
191 /*
192  * Queue attributes passed to ehea_create_qp()
193  */
194 struct ehea_qp_init_attr {
195 	/* input parameter */
196 	u32 qp_token;           /* queue token */
197 	u8 low_lat_rq1;
198 	u8 signalingtype;       /* cqe generation flag */
199 	u8 rq_count;            /* num of receive queues */
200 	u8 eqe_gen;             /* eqe generation flag */
201 	u16 max_nr_send_wqes;   /* max number of send wqes */
202 	u16 max_nr_rwqes_rq1;   /* max number of receive wqes */
203 	u16 max_nr_rwqes_rq2;
204 	u16 max_nr_rwqes_rq3;
205 	u8 wqe_size_enc_sq;
206 	u8 wqe_size_enc_rq1;
207 	u8 wqe_size_enc_rq2;
208 	u8 wqe_size_enc_rq3;
209 	u8 swqe_imm_data_len;   /* immediate data length for swqes */
210 	u16 port_nr;
211 	u16 rq2_threshold;
212 	u16 rq3_threshold;
213 	u64 send_cq_handle;
214 	u64 recv_cq_handle;
215 	u64 aff_eq_handle;
216 
217 	/* output parameter */
218 	u32 qp_nr;
219 	u16 act_nr_send_wqes;
220 	u16 act_nr_rwqes_rq1;
221 	u16 act_nr_rwqes_rq2;
222 	u16 act_nr_rwqes_rq3;
223 	u8 act_wqe_size_enc_sq;
224 	u8 act_wqe_size_enc_rq1;
225 	u8 act_wqe_size_enc_rq2;
226 	u8 act_wqe_size_enc_rq3;
227 	u32 nr_sq_pages;
228 	u32 nr_rq1_pages;
229 	u32 nr_rq2_pages;
230 	u32 nr_rq3_pages;
231 	u32 liobn_sq;
232 	u32 liobn_rq1;
233 	u32 liobn_rq2;
234 	u32 liobn_rq3;
235 };
236 
237 /*
238  * Event Queue attributes, passed as parameter
239  */
240 struct ehea_eq_attr {
241 	u32 type;
242 	u32 max_nr_of_eqes;
243 	u8 eqe_gen;        /* generate eqe flag */
244 	u64 eq_handle;
245 	u32 act_nr_of_eqes;
246 	u32 nr_pages;
247 	u32 ist1;          /* Interrupt service token */
248 	u32 ist2;
249 	u32 ist3;
250 	u32 ist4;
251 };
252 
253 
254 /*
255  * Event Queue
256  */
257 struct ehea_eq {
258 	struct ehea_adapter *adapter;
259 	struct hw_queue hw_queue;
260 	u64 fw_handle;
261 	struct h_epas epas;
262 	spinlock_t spinlock;
263 	struct ehea_eq_attr attr;
264 };
265 
266 /*
267  * HEA Queues
268  */
269 struct ehea_qp {
270 	struct ehea_adapter *adapter;
271 	u64 fw_handle;			/* QP handle for firmware calls */
272 	struct hw_queue hw_squeue;
273 	struct hw_queue hw_rqueue1;
274 	struct hw_queue hw_rqueue2;
275 	struct hw_queue hw_rqueue3;
276 	struct h_epas epas;
277 	struct ehea_qp_init_attr init_attr;
278 };
279 
280 /*
281  * Completion Queue attributes
282  */
283 struct ehea_cq_attr {
284 	/* input parameter */
285 	u32 max_nr_of_cqes;
286 	u32 cq_token;
287 	u64 eq_handle;
288 
289 	/* output parameter */
290 	u32 act_nr_of_cqes;
291 	u32 nr_pages;
292 };
293 
294 /*
295  * Completion Queue
296  */
297 struct ehea_cq {
298 	struct ehea_adapter *adapter;
299 	u64 fw_handle;
300 	struct hw_queue hw_queue;
301 	struct h_epas epas;
302 	struct ehea_cq_attr attr;
303 };
304 
305 /*
306  * Memory Region
307  */
308 struct ehea_mr {
309 	struct ehea_adapter *adapter;
310 	u64 handle;
311 	u64 vaddr;
312 	u32 lkey;
313 };
314 
315 /*
316  * Port state information
317  */
318 struct port_stats {
319 	int poll_receive_errors;
320 	int queue_stopped;
321 	int err_tcp_cksum;
322 	int err_ip_cksum;
323 	int err_frame_crc;
324 };
325 
326 #define EHEA_IRQ_NAME_SIZE 20
327 
328 /*
329  * Queue SKB Array
330  */
331 struct ehea_q_skb_arr {
332 	struct sk_buff **arr;		/* skb array for queue */
333 	int len;                	/* array length */
334 	int index;			/* array index */
335 	int os_skbs;			/* rq2/rq3 only: outstanding skbs */
336 };
337 
338 /*
339  * Port resources
340  */
341 struct ehea_port_res {
342 	struct napi_struct napi;
343 	struct port_stats p_stats;
344 	struct ehea_mr send_mr;       	/* send memory region */
345 	struct ehea_mr recv_mr;       	/* receive memory region */
346 	struct ehea_port *port;
347 	char int_recv_name[EHEA_IRQ_NAME_SIZE];
348 	char int_send_name[EHEA_IRQ_NAME_SIZE];
349 	struct ehea_qp *qp;
350 	struct ehea_cq *send_cq;
351 	struct ehea_cq *recv_cq;
352 	struct ehea_eq *eq;
353 	struct ehea_q_skb_arr rq1_skba;
354 	struct ehea_q_skb_arr rq2_skba;
355 	struct ehea_q_skb_arr rq3_skba;
356 	struct ehea_q_skb_arr sq_skba;
357 	int sq_skba_size;
358 	int swqe_refill_th;
359 	atomic_t swqe_avail;
360 	int swqe_ll_count;
361 	u32 swqe_id_counter;
362 	u64 tx_packets;
363 	u64 tx_bytes;
364 	u64 rx_packets;
365 	u64 rx_bytes;
366 	int sq_restart_flag;
367 };
368 
369 
370 #define EHEA_MAX_PORTS 16
371 
372 #define EHEA_NUM_PORTRES_FW_HANDLES    6  /* QP handle, SendCQ handle,
373 					     RecvCQ handle, EQ handle,
374 					     SendMR handle, RecvMR handle */
375 #define EHEA_NUM_PORT_FW_HANDLES       1  /* EQ handle */
376 #define EHEA_NUM_ADAPTER_FW_HANDLES    2  /* MR handle, NEQ handle */
377 
378 struct ehea_adapter {
379 	u64 handle;
380 	struct platform_device *ofdev;
381 	struct ehea_port *port[EHEA_MAX_PORTS];
382 	struct ehea_eq *neq;       /* notification event queue */
383 	struct tasklet_struct neq_tasklet;
384 	struct ehea_mr mr;
385 	u32 pd;                    /* protection domain */
386 	u64 max_mc_mac;            /* max number of multicast mac addresses */
387 	int active_ports;
388 	struct list_head list;
389 };
390 
391 
392 struct ehea_mc_list {
393 	struct list_head list;
394 	u64 macaddr;
395 };
396 
397 /* kdump support */
398 struct ehea_fw_handle_entry {
399 	u64 adh;               /* Adapter Handle */
400 	u64 fwh;               /* Firmware Handle */
401 };
402 
403 struct ehea_fw_handle_array {
404 	struct ehea_fw_handle_entry *arr;
405 	int num_entries;
406 	struct mutex lock;
407 };
408 
409 struct ehea_bcmc_reg_entry {
410 	u64 adh;               /* Adapter Handle */
411 	u32 port_id;           /* Logical Port Id */
412 	u8 reg_type;           /* Registration Type */
413 	u64 macaddr;
414 };
415 
416 struct ehea_bcmc_reg_array {
417 	struct ehea_bcmc_reg_entry *arr;
418 	int num_entries;
419 	spinlock_t lock;
420 };
421 
422 #define EHEA_PORT_UP 1
423 #define EHEA_PORT_DOWN 0
424 #define EHEA_PHY_LINK_UP 1
425 #define EHEA_PHY_LINK_DOWN 0
426 #define EHEA_MAX_PORT_RES 16
427 struct ehea_port {
428 	struct ehea_adapter *adapter;	 /* adapter that owns this port */
429 	struct net_device *netdev;
430 	struct rtnl_link_stats64 stats;
431 	struct ehea_port_res port_res[EHEA_MAX_PORT_RES];
432 	struct platform_device  ofdev; /* Open Firmware Device */
433 	struct ehea_mc_list *mc_list;	 /* Multicast MAC addresses */
434 	struct ehea_eq *qp_eq;
435 	struct work_struct reset_task;
436 	struct delayed_work stats_work;
437 	struct mutex port_lock;
438 	char int_aff_name[EHEA_IRQ_NAME_SIZE];
439 	int allmulti;			 /* Indicates IFF_ALLMULTI state */
440 	int promisc;		 	 /* Indicates IFF_PROMISC state */
441 	int num_mcs;
442 	int resets;
443 	unsigned long flags;
444 	u64 mac_addr;
445 	u32 logical_port_id;
446 	u32 port_speed;
447 	u32 msg_enable;
448 	u32 sig_comp_iv;
449 	u32 state;
450 	u8 phy_link;
451 	u8 full_duplex;
452 	u8 autoneg;
453 	u8 num_def_qps;
454 	wait_queue_head_t swqe_avail_wq;
455 	wait_queue_head_t restart_wq;
456 };
457 
458 struct port_res_cfg {
459 	int max_entries_rcq;
460 	int max_entries_scq;
461 	int max_entries_sq;
462 	int max_entries_rq1;
463 	int max_entries_rq2;
464 	int max_entries_rq3;
465 };
466 
467 enum ehea_flag_bits {
468 	__EHEA_STOP_XFER,
469 	__EHEA_DISABLE_PORT_RESET
470 };
471 
472 void ehea_set_ethtool_ops(struct net_device *netdev);
473 int ehea_sense_port_attr(struct ehea_port *port);
474 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed);
475 
476 #endif	/* __EHEA_H__ */
477