1  /****************************************************************************
2   * Driver for Solarflare network controllers and boards
3   * Copyright 2005-2006 Fen Systems Ltd.
4   * Copyright 2006-2013 Solarflare Communications Inc.
5   *
6   * This program is free software; you can redistribute it and/or modify it
7   * under the terms of the GNU General Public License version 2 as published
8   * by the Free Software Foundation, incorporated herein by reference.
9   */
10  
11  #include <linux/bitops.h>
12  #include <linux/delay.h>
13  #include <linux/pci.h>
14  #include <linux/module.h>
15  #include <linux/seq_file.h>
16  #include <linux/i2c.h>
17  #include <linux/mii.h>
18  #include <linux/slab.h>
19  #include <linux/sched/signal.h>
20  
21  #include "net_driver.h"
22  #include "bitfield.h"
23  #include "efx.h"
24  #include "nic.h"
25  #include "farch_regs.h"
26  #include "io.h"
27  #include "phy.h"
28  #include "workarounds.h"
29  #include "selftest.h"
30  #include "mdio_10g.h"
31  
32  /* Hardware control for SFC4000 (aka Falcon). */
33  
34  /**************************************************************************
35   *
36   * NIC stats
37   *
38   **************************************************************************
39   */
40  
41  #define FALCON_MAC_STATS_SIZE 0x100
42  
43  #define XgRxOctets_offset 0x0
44  #define XgRxOctets_WIDTH 48
45  #define XgRxOctetsOK_offset 0x8
46  #define XgRxOctetsOK_WIDTH 48
47  #define XgRxPkts_offset 0x10
48  #define XgRxPkts_WIDTH 32
49  #define XgRxPktsOK_offset 0x14
50  #define XgRxPktsOK_WIDTH 32
51  #define XgRxBroadcastPkts_offset 0x18
52  #define XgRxBroadcastPkts_WIDTH 32
53  #define XgRxMulticastPkts_offset 0x1C
54  #define XgRxMulticastPkts_WIDTH 32
55  #define XgRxUnicastPkts_offset 0x20
56  #define XgRxUnicastPkts_WIDTH 32
57  #define XgRxUndersizePkts_offset 0x24
58  #define XgRxUndersizePkts_WIDTH 32
59  #define XgRxOversizePkts_offset 0x28
60  #define XgRxOversizePkts_WIDTH 32
61  #define XgRxJabberPkts_offset 0x2C
62  #define XgRxJabberPkts_WIDTH 32
63  #define XgRxUndersizeFCSerrorPkts_offset 0x30
64  #define XgRxUndersizeFCSerrorPkts_WIDTH 32
65  #define XgRxDropEvents_offset 0x34
66  #define XgRxDropEvents_WIDTH 32
67  #define XgRxFCSerrorPkts_offset 0x38
68  #define XgRxFCSerrorPkts_WIDTH 32
69  #define XgRxAlignError_offset 0x3C
70  #define XgRxAlignError_WIDTH 32
71  #define XgRxSymbolError_offset 0x40
72  #define XgRxSymbolError_WIDTH 32
73  #define XgRxInternalMACError_offset 0x44
74  #define XgRxInternalMACError_WIDTH 32
75  #define XgRxControlPkts_offset 0x48
76  #define XgRxControlPkts_WIDTH 32
77  #define XgRxPausePkts_offset 0x4C
78  #define XgRxPausePkts_WIDTH 32
79  #define XgRxPkts64Octets_offset 0x50
80  #define XgRxPkts64Octets_WIDTH 32
81  #define XgRxPkts65to127Octets_offset 0x54
82  #define XgRxPkts65to127Octets_WIDTH 32
83  #define XgRxPkts128to255Octets_offset 0x58
84  #define XgRxPkts128to255Octets_WIDTH 32
85  #define XgRxPkts256to511Octets_offset 0x5C
86  #define XgRxPkts256to511Octets_WIDTH 32
87  #define XgRxPkts512to1023Octets_offset 0x60
88  #define XgRxPkts512to1023Octets_WIDTH 32
89  #define XgRxPkts1024to15xxOctets_offset 0x64
90  #define XgRxPkts1024to15xxOctets_WIDTH 32
91  #define XgRxPkts15xxtoMaxOctets_offset 0x68
92  #define XgRxPkts15xxtoMaxOctets_WIDTH 32
93  #define XgRxLengthError_offset 0x6C
94  #define XgRxLengthError_WIDTH 32
95  #define XgTxPkts_offset 0x80
96  #define XgTxPkts_WIDTH 32
97  #define XgTxOctets_offset 0x88
98  #define XgTxOctets_WIDTH 48
99  #define XgTxMulticastPkts_offset 0x90
100  #define XgTxMulticastPkts_WIDTH 32
101  #define XgTxBroadcastPkts_offset 0x94
102  #define XgTxBroadcastPkts_WIDTH 32
103  #define XgTxUnicastPkts_offset 0x98
104  #define XgTxUnicastPkts_WIDTH 32
105  #define XgTxControlPkts_offset 0x9C
106  #define XgTxControlPkts_WIDTH 32
107  #define XgTxPausePkts_offset 0xA0
108  #define XgTxPausePkts_WIDTH 32
109  #define XgTxPkts64Octets_offset 0xA4
110  #define XgTxPkts64Octets_WIDTH 32
111  #define XgTxPkts65to127Octets_offset 0xA8
112  #define XgTxPkts65to127Octets_WIDTH 32
113  #define XgTxPkts128to255Octets_offset 0xAC
114  #define XgTxPkts128to255Octets_WIDTH 32
115  #define XgTxPkts256to511Octets_offset 0xB0
116  #define XgTxPkts256to511Octets_WIDTH 32
117  #define XgTxPkts512to1023Octets_offset 0xB4
118  #define XgTxPkts512to1023Octets_WIDTH 32
119  #define XgTxPkts1024to15xxOctets_offset 0xB8
120  #define XgTxPkts1024to15xxOctets_WIDTH 32
121  #define XgTxPkts1519toMaxOctets_offset 0xBC
122  #define XgTxPkts1519toMaxOctets_WIDTH 32
123  #define XgTxUndersizePkts_offset 0xC0
124  #define XgTxUndersizePkts_WIDTH 32
125  #define XgTxOversizePkts_offset 0xC4
126  #define XgTxOversizePkts_WIDTH 32
127  #define XgTxNonTcpUdpPkt_offset 0xC8
128  #define XgTxNonTcpUdpPkt_WIDTH 16
129  #define XgTxMacSrcErrPkt_offset 0xCC
130  #define XgTxMacSrcErrPkt_WIDTH 16
131  #define XgTxIpSrcErrPkt_offset 0xD0
132  #define XgTxIpSrcErrPkt_WIDTH 16
133  #define XgDmaDone_offset 0xD4
134  #define XgDmaDone_WIDTH 32
135  
136  #define FALCON_XMAC_STATS_DMA_FLAG(efx)				\
137  	(*(u32 *)((efx)->stats_buffer.addr + XgDmaDone_offset))
138  
139  #define FALCON_DMA_STAT(ext_name, hw_name)				\
140  	[FALCON_STAT_ ## ext_name] =					\
141  	{ #ext_name,							\
142  	  /* 48-bit stats are zero-padded to 64 on DMA */		\
143  	  hw_name ## _ ## WIDTH == 48 ? 64 : hw_name ## _ ## WIDTH,	\
144  	  hw_name ## _ ## offset }
145  #define FALCON_OTHER_STAT(ext_name)					\
146  	[FALCON_STAT_ ## ext_name] = { #ext_name, 0, 0 }
147  #define GENERIC_SW_STAT(ext_name)				\
148  	[GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
149  
150  static const struct ef4_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = {
151  	FALCON_DMA_STAT(tx_bytes, XgTxOctets),
152  	FALCON_DMA_STAT(tx_packets, XgTxPkts),
153  	FALCON_DMA_STAT(tx_pause, XgTxPausePkts),
154  	FALCON_DMA_STAT(tx_control, XgTxControlPkts),
155  	FALCON_DMA_STAT(tx_unicast, XgTxUnicastPkts),
156  	FALCON_DMA_STAT(tx_multicast, XgTxMulticastPkts),
157  	FALCON_DMA_STAT(tx_broadcast, XgTxBroadcastPkts),
158  	FALCON_DMA_STAT(tx_lt64, XgTxUndersizePkts),
159  	FALCON_DMA_STAT(tx_64, XgTxPkts64Octets),
160  	FALCON_DMA_STAT(tx_65_to_127, XgTxPkts65to127Octets),
161  	FALCON_DMA_STAT(tx_128_to_255, XgTxPkts128to255Octets),
162  	FALCON_DMA_STAT(tx_256_to_511, XgTxPkts256to511Octets),
163  	FALCON_DMA_STAT(tx_512_to_1023, XgTxPkts512to1023Octets),
164  	FALCON_DMA_STAT(tx_1024_to_15xx, XgTxPkts1024to15xxOctets),
165  	FALCON_DMA_STAT(tx_15xx_to_jumbo, XgTxPkts1519toMaxOctets),
166  	FALCON_DMA_STAT(tx_gtjumbo, XgTxOversizePkts),
167  	FALCON_DMA_STAT(tx_non_tcpudp, XgTxNonTcpUdpPkt),
168  	FALCON_DMA_STAT(tx_mac_src_error, XgTxMacSrcErrPkt),
169  	FALCON_DMA_STAT(tx_ip_src_error, XgTxIpSrcErrPkt),
170  	FALCON_DMA_STAT(rx_bytes, XgRxOctets),
171  	FALCON_DMA_STAT(rx_good_bytes, XgRxOctetsOK),
172  	FALCON_OTHER_STAT(rx_bad_bytes),
173  	FALCON_DMA_STAT(rx_packets, XgRxPkts),
174  	FALCON_DMA_STAT(rx_good, XgRxPktsOK),
175  	FALCON_DMA_STAT(rx_bad, XgRxFCSerrorPkts),
176  	FALCON_DMA_STAT(rx_pause, XgRxPausePkts),
177  	FALCON_DMA_STAT(rx_control, XgRxControlPkts),
178  	FALCON_DMA_STAT(rx_unicast, XgRxUnicastPkts),
179  	FALCON_DMA_STAT(rx_multicast, XgRxMulticastPkts),
180  	FALCON_DMA_STAT(rx_broadcast, XgRxBroadcastPkts),
181  	FALCON_DMA_STAT(rx_lt64, XgRxUndersizePkts),
182  	FALCON_DMA_STAT(rx_64, XgRxPkts64Octets),
183  	FALCON_DMA_STAT(rx_65_to_127, XgRxPkts65to127Octets),
184  	FALCON_DMA_STAT(rx_128_to_255, XgRxPkts128to255Octets),
185  	FALCON_DMA_STAT(rx_256_to_511, XgRxPkts256to511Octets),
186  	FALCON_DMA_STAT(rx_512_to_1023, XgRxPkts512to1023Octets),
187  	FALCON_DMA_STAT(rx_1024_to_15xx, XgRxPkts1024to15xxOctets),
188  	FALCON_DMA_STAT(rx_15xx_to_jumbo, XgRxPkts15xxtoMaxOctets),
189  	FALCON_DMA_STAT(rx_gtjumbo, XgRxOversizePkts),
190  	FALCON_DMA_STAT(rx_bad_lt64, XgRxUndersizeFCSerrorPkts),
191  	FALCON_DMA_STAT(rx_bad_gtjumbo, XgRxJabberPkts),
192  	FALCON_DMA_STAT(rx_overflow, XgRxDropEvents),
193  	FALCON_DMA_STAT(rx_symbol_error, XgRxSymbolError),
194  	FALCON_DMA_STAT(rx_align_error, XgRxAlignError),
195  	FALCON_DMA_STAT(rx_length_error, XgRxLengthError),
196  	FALCON_DMA_STAT(rx_internal_error, XgRxInternalMACError),
197  	FALCON_OTHER_STAT(rx_nodesc_drop_cnt),
198  	GENERIC_SW_STAT(rx_nodesc_trunc),
199  	GENERIC_SW_STAT(rx_noskb_drops),
200  };
201  static const unsigned long falcon_stat_mask[] = {
202  	[0 ... BITS_TO_LONGS(FALCON_STAT_COUNT) - 1] = ~0UL,
203  };
204  
205  /**************************************************************************
206   *
207   * Basic SPI command set and bit definitions
208   *
209   *************************************************************************/
210  
211  #define SPI_WRSR 0x01		/* Write status register */
212  #define SPI_WRITE 0x02		/* Write data to memory array */
213  #define SPI_READ 0x03		/* Read data from memory array */
214  #define SPI_WRDI 0x04		/* Reset write enable latch */
215  #define SPI_RDSR 0x05		/* Read status register */
216  #define SPI_WREN 0x06		/* Set write enable latch */
217  #define SPI_SST_EWSR 0x50	/* SST: Enable write to status register */
218  
219  #define SPI_STATUS_WPEN 0x80	/* Write-protect pin enabled */
220  #define SPI_STATUS_BP2 0x10	/* Block protection bit 2 */
221  #define SPI_STATUS_BP1 0x08	/* Block protection bit 1 */
222  #define SPI_STATUS_BP0 0x04	/* Block protection bit 0 */
223  #define SPI_STATUS_WEN 0x02	/* State of the write enable latch */
224  #define SPI_STATUS_NRDY 0x01	/* Device busy flag */
225  
226  /**************************************************************************
227   *
228   * Non-volatile memory layout
229   *
230   **************************************************************************
231   */
232  
233  /* SFC4000 flash is partitioned into:
234   *     0-0x400       chip and board config (see struct falcon_nvconfig)
235   *     0x400-0x8000  unused (or may contain VPD if EEPROM not present)
236   *     0x8000-end    boot code (mapped to PCI expansion ROM)
237   * SFC4000 small EEPROM (size < 0x400) is used for VPD only.
238   * SFC4000 large EEPROM (size >= 0x400) is partitioned into:
239   *     0-0x400       chip and board config
240   *     configurable  VPD
241   *     0x800-0x1800  boot config
242   * Aside from the chip and board config, all of these are optional and may
243   * be absent or truncated depending on the devices used.
244   */
245  #define FALCON_NVCONFIG_END 0x400U
246  #define FALCON_FLASH_BOOTCODE_START 0x8000U
247  #define FALCON_EEPROM_BOOTCONFIG_START 0x800U
248  #define FALCON_EEPROM_BOOTCONFIG_END 0x1800U
249  
250  /* Board configuration v2 (v1 is obsolete; later versions are compatible) */
251  struct falcon_nvconfig_board_v2 {
252  	__le16 nports;
253  	u8 port0_phy_addr;
254  	u8 port0_phy_type;
255  	u8 port1_phy_addr;
256  	u8 port1_phy_type;
257  	__le16 asic_sub_revision;
258  	__le16 board_revision;
259  } __packed;
260  
261  /* Board configuration v3 extra information */
262  struct falcon_nvconfig_board_v3 {
263  	__le32 spi_device_type[2];
264  } __packed;
265  
266  /* Bit numbers for spi_device_type */
267  #define SPI_DEV_TYPE_SIZE_LBN 0
268  #define SPI_DEV_TYPE_SIZE_WIDTH 5
269  #define SPI_DEV_TYPE_ADDR_LEN_LBN 6
270  #define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
271  #define SPI_DEV_TYPE_ERASE_CMD_LBN 8
272  #define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
273  #define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
274  #define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
275  #define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
276  #define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
277  #define SPI_DEV_TYPE_FIELD(type, field)					\
278  	(((type) >> EF4_LOW_BIT(field)) & EF4_MASK32(EF4_WIDTH(field)))
279  
280  #define FALCON_NVCONFIG_OFFSET 0x300
281  
282  #define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
283  struct falcon_nvconfig {
284  	ef4_oword_t ee_vpd_cfg_reg;			/* 0x300 */
285  	u8 mac_address[2][8];			/* 0x310 */
286  	ef4_oword_t pcie_sd_ctl0123_reg;		/* 0x320 */
287  	ef4_oword_t pcie_sd_ctl45_reg;			/* 0x330 */
288  	ef4_oword_t pcie_pcs_ctl_stat_reg;		/* 0x340 */
289  	ef4_oword_t hw_init_reg;			/* 0x350 */
290  	ef4_oword_t nic_stat_reg;			/* 0x360 */
291  	ef4_oword_t glb_ctl_reg;			/* 0x370 */
292  	ef4_oword_t srm_cfg_reg;			/* 0x380 */
293  	ef4_oword_t spare_reg;				/* 0x390 */
294  	__le16 board_magic_num;			/* 0x3A0 */
295  	__le16 board_struct_ver;
296  	__le16 board_checksum;
297  	struct falcon_nvconfig_board_v2 board_v2;
298  	ef4_oword_t ee_base_page_reg;			/* 0x3B0 */
299  	struct falcon_nvconfig_board_v3 board_v3;	/* 0x3C0 */
300  } __packed;
301  
302  /*************************************************************************/
303  
304  static int falcon_reset_hw(struct ef4_nic *efx, enum reset_type method);
305  static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx);
306  
307  static const unsigned int
308  /* "Large" EEPROM device: Atmel AT25640 or similar
309   * 8 KB, 16-bit address, 32 B write block */
310  large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
311  		     | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
312  		     | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
313  /* Default flash device: Atmel AT25F1024
314   * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
315  default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
316  		      | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
317  		      | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
318  		      | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
319  		      | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
320  
321  /**************************************************************************
322   *
323   * I2C bus - this is a bit-bashing interface using GPIO pins
324   * Note that it uses the output enables to tristate the outputs
325   * SDA is the data pin and SCL is the clock
326   *
327   **************************************************************************
328   */
falcon_setsda(void * data,int state)329  static void falcon_setsda(void *data, int state)
330  {
331  	struct ef4_nic *efx = (struct ef4_nic *)data;
332  	ef4_oword_t reg;
333  
334  	ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
335  	EF4_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
336  	ef4_writeo(efx, &reg, FR_AB_GPIO_CTL);
337  }
338  
falcon_setscl(void * data,int state)339  static void falcon_setscl(void *data, int state)
340  {
341  	struct ef4_nic *efx = (struct ef4_nic *)data;
342  	ef4_oword_t reg;
343  
344  	ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
345  	EF4_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
346  	ef4_writeo(efx, &reg, FR_AB_GPIO_CTL);
347  }
348  
falcon_getsda(void * data)349  static int falcon_getsda(void *data)
350  {
351  	struct ef4_nic *efx = (struct ef4_nic *)data;
352  	ef4_oword_t reg;
353  
354  	ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
355  	return EF4_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
356  }
357  
falcon_getscl(void * data)358  static int falcon_getscl(void *data)
359  {
360  	struct ef4_nic *efx = (struct ef4_nic *)data;
361  	ef4_oword_t reg;
362  
363  	ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
364  	return EF4_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
365  }
366  
367  static const struct i2c_algo_bit_data falcon_i2c_bit_operations = {
368  	.setsda		= falcon_setsda,
369  	.setscl		= falcon_setscl,
370  	.getsda		= falcon_getsda,
371  	.getscl		= falcon_getscl,
372  	.udelay		= 5,
373  	/* Wait up to 50 ms for slave to let us pull SCL high */
374  	.timeout	= DIV_ROUND_UP(HZ, 20),
375  };
376  
falcon_push_irq_moderation(struct ef4_channel * channel)377  static void falcon_push_irq_moderation(struct ef4_channel *channel)
378  {
379  	ef4_dword_t timer_cmd;
380  	struct ef4_nic *efx = channel->efx;
381  
382  	/* Set timer register */
383  	if (channel->irq_moderation_us) {
384  		unsigned int ticks;
385  
386  		ticks = ef4_usecs_to_ticks(efx, channel->irq_moderation_us);
387  		EF4_POPULATE_DWORD_2(timer_cmd,
388  				     FRF_AB_TC_TIMER_MODE,
389  				     FFE_BB_TIMER_MODE_INT_HLDOFF,
390  				     FRF_AB_TC_TIMER_VAL,
391  				     ticks - 1);
392  	} else {
393  		EF4_POPULATE_DWORD_2(timer_cmd,
394  				     FRF_AB_TC_TIMER_MODE,
395  				     FFE_BB_TIMER_MODE_DIS,
396  				     FRF_AB_TC_TIMER_VAL, 0);
397  	}
398  	BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
399  	ef4_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
400  			       channel->channel);
401  }
402  
403  static void falcon_deconfigure_mac_wrapper(struct ef4_nic *efx);
404  
falcon_prepare_flush(struct ef4_nic * efx)405  static void falcon_prepare_flush(struct ef4_nic *efx)
406  {
407  	falcon_deconfigure_mac_wrapper(efx);
408  
409  	/* Wait for the tx and rx fifo's to get to the next packet boundary
410  	 * (~1ms without back-pressure), then to drain the remainder of the
411  	 * fifo's at data path speeds (negligible), with a healthy margin. */
412  	msleep(10);
413  }
414  
415  /* Acknowledge a legacy interrupt from Falcon
416   *
417   * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
418   *
419   * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
420   * BIU. Interrupt acknowledge is read sensitive so must write instead
421   * (then read to ensure the BIU collector is flushed)
422   *
423   * NB most hardware supports MSI interrupts
424   */
falcon_irq_ack_a1(struct ef4_nic * efx)425  static inline void falcon_irq_ack_a1(struct ef4_nic *efx)
426  {
427  	ef4_dword_t reg;
428  
429  	EF4_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
430  	ef4_writed(efx, &reg, FR_AA_INT_ACK_KER);
431  	ef4_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
432  }
433  
falcon_legacy_interrupt_a1(int irq,void * dev_id)434  static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
435  {
436  	struct ef4_nic *efx = dev_id;
437  	ef4_oword_t *int_ker = efx->irq_status.addr;
438  	int syserr;
439  	int queues;
440  
441  	/* Check to see if this is our interrupt.  If it isn't, we
442  	 * exit without having touched the hardware.
443  	 */
444  	if (unlikely(EF4_OWORD_IS_ZERO(*int_ker))) {
445  		netif_vdbg(efx, intr, efx->net_dev,
446  			   "IRQ %d on CPU %d not for me\n", irq,
447  			   raw_smp_processor_id());
448  		return IRQ_NONE;
449  	}
450  	efx->last_irq_cpu = raw_smp_processor_id();
451  	netif_vdbg(efx, intr, efx->net_dev,
452  		   "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
453  		   irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
454  
455  	if (!likely(READ_ONCE(efx->irq_soft_enabled)))
456  		return IRQ_HANDLED;
457  
458  	/* Check to see if we have a serious error condition */
459  	syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
460  	if (unlikely(syserr))
461  		return ef4_farch_fatal_interrupt(efx);
462  
463  	/* Determine interrupting queues, clear interrupt status
464  	 * register and acknowledge the device interrupt.
465  	 */
466  	BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EF4_MAX_CHANNELS);
467  	queues = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
468  	EF4_ZERO_OWORD(*int_ker);
469  	wmb(); /* Ensure the vector is cleared before interrupt ack */
470  	falcon_irq_ack_a1(efx);
471  
472  	if (queues & 1)
473  		ef4_schedule_channel_irq(ef4_get_channel(efx, 0));
474  	if (queues & 2)
475  		ef4_schedule_channel_irq(ef4_get_channel(efx, 1));
476  	return IRQ_HANDLED;
477  }
478  
479  /**************************************************************************
480   *
481   * RSS
482   *
483   **************************************************************************
484   */
dummy_rx_push_rss_config(struct ef4_nic * efx,bool user,const u32 * rx_indir_table)485  static int dummy_rx_push_rss_config(struct ef4_nic *efx, bool user,
486  				    const u32 *rx_indir_table)
487  {
488  	(void) efx;
489  	(void) user;
490  	(void) rx_indir_table;
491  	return -ENOSYS;
492  }
493  
falcon_b0_rx_push_rss_config(struct ef4_nic * efx,bool user,const u32 * rx_indir_table)494  static int falcon_b0_rx_push_rss_config(struct ef4_nic *efx, bool user,
495  					const u32 *rx_indir_table)
496  {
497  	ef4_oword_t temp;
498  
499  	(void) user;
500  	/* Set hash key for IPv4 */
501  	memcpy(&temp, efx->rx_hash_key, sizeof(temp));
502  	ef4_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
503  
504  	memcpy(efx->rx_indir_table, rx_indir_table,
505  	       sizeof(efx->rx_indir_table));
506  	ef4_farch_rx_push_indir_table(efx);
507  	return 0;
508  }
509  
510  /**************************************************************************
511   *
512   * EEPROM/flash
513   *
514   **************************************************************************
515   */
516  
517  #define FALCON_SPI_MAX_LEN sizeof(ef4_oword_t)
518  
falcon_spi_poll(struct ef4_nic * efx)519  static int falcon_spi_poll(struct ef4_nic *efx)
520  {
521  	ef4_oword_t reg;
522  	ef4_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
523  	return EF4_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
524  }
525  
526  /* Wait for SPI command completion */
falcon_spi_wait(struct ef4_nic * efx)527  static int falcon_spi_wait(struct ef4_nic *efx)
528  {
529  	/* Most commands will finish quickly, so we start polling at
530  	 * very short intervals.  Sometimes the command may have to
531  	 * wait for VPD or expansion ROM access outside of our
532  	 * control, so we allow up to 100 ms. */
533  	unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
534  	int i;
535  
536  	for (i = 0; i < 10; i++) {
537  		if (!falcon_spi_poll(efx))
538  			return 0;
539  		udelay(10);
540  	}
541  
542  	for (;;) {
543  		if (!falcon_spi_poll(efx))
544  			return 0;
545  		if (time_after_eq(jiffies, timeout)) {
546  			netif_err(efx, hw, efx->net_dev,
547  				  "timed out waiting for SPI\n");
548  			return -ETIMEDOUT;
549  		}
550  		schedule_timeout_uninterruptible(1);
551  	}
552  }
553  
554  static int
falcon_spi_cmd(struct ef4_nic * efx,const struct falcon_spi_device * spi,unsigned int command,int address,const void * in,void * out,size_t len)555  falcon_spi_cmd(struct ef4_nic *efx, const struct falcon_spi_device *spi,
556  	       unsigned int command, int address,
557  	       const void *in, void *out, size_t len)
558  {
559  	bool addressed = (address >= 0);
560  	bool reading = (out != NULL);
561  	ef4_oword_t reg;
562  	int rc;
563  
564  	/* Input validation */
565  	if (len > FALCON_SPI_MAX_LEN)
566  		return -EINVAL;
567  
568  	/* Check that previous command is not still running */
569  	rc = falcon_spi_poll(efx);
570  	if (rc)
571  		return rc;
572  
573  	/* Program address register, if we have an address */
574  	if (addressed) {
575  		EF4_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
576  		ef4_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
577  	}
578  
579  	/* Program data register, if we have data */
580  	if (in != NULL) {
581  		memcpy(&reg, in, len);
582  		ef4_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
583  	}
584  
585  	/* Issue read/write command */
586  	EF4_POPULATE_OWORD_7(reg,
587  			     FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
588  			     FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
589  			     FRF_AB_EE_SPI_HCMD_DABCNT, len,
590  			     FRF_AB_EE_SPI_HCMD_READ, reading,
591  			     FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
592  			     FRF_AB_EE_SPI_HCMD_ADBCNT,
593  			     (addressed ? spi->addr_len : 0),
594  			     FRF_AB_EE_SPI_HCMD_ENC, command);
595  	ef4_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
596  
597  	/* Wait for read/write to complete */
598  	rc = falcon_spi_wait(efx);
599  	if (rc)
600  		return rc;
601  
602  	/* Read data */
603  	if (out != NULL) {
604  		ef4_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
605  		memcpy(out, &reg, len);
606  	}
607  
608  	return 0;
609  }
610  
611  static inline u8
falcon_spi_munge_command(const struct falcon_spi_device * spi,const u8 command,const unsigned int address)612  falcon_spi_munge_command(const struct falcon_spi_device *spi,
613  			 const u8 command, const unsigned int address)
614  {
615  	return command | (((address >> 8) & spi->munge_address) << 3);
616  }
617  
618  static int
falcon_spi_read(struct ef4_nic * efx,const struct falcon_spi_device * spi,loff_t start,size_t len,size_t * retlen,u8 * buffer)619  falcon_spi_read(struct ef4_nic *efx, const struct falcon_spi_device *spi,
620  		loff_t start, size_t len, size_t *retlen, u8 *buffer)
621  {
622  	size_t block_len, pos = 0;
623  	unsigned int command;
624  	int rc = 0;
625  
626  	while (pos < len) {
627  		block_len = min(len - pos, FALCON_SPI_MAX_LEN);
628  
629  		command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
630  		rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
631  				    buffer + pos, block_len);
632  		if (rc)
633  			break;
634  		pos += block_len;
635  
636  		/* Avoid locking up the system */
637  		cond_resched();
638  		if (signal_pending(current)) {
639  			rc = -EINTR;
640  			break;
641  		}
642  	}
643  
644  	if (retlen)
645  		*retlen = pos;
646  	return rc;
647  }
648  
649  #ifdef CONFIG_SFC_FALCON_MTD
650  
651  struct falcon_mtd_partition {
652  	struct ef4_mtd_partition common;
653  	const struct falcon_spi_device *spi;
654  	size_t offset;
655  };
656  
657  #define to_falcon_mtd_partition(mtd)				\
658  	container_of(mtd, struct falcon_mtd_partition, common.mtd)
659  
660  static size_t
falcon_spi_write_limit(const struct falcon_spi_device * spi,size_t start)661  falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start)
662  {
663  	return min(FALCON_SPI_MAX_LEN,
664  		   (spi->block_size - (start & (spi->block_size - 1))));
665  }
666  
667  /* Wait up to 10 ms for buffered write completion */
668  static int
falcon_spi_wait_write(struct ef4_nic * efx,const struct falcon_spi_device * spi)669  falcon_spi_wait_write(struct ef4_nic *efx, const struct falcon_spi_device *spi)
670  {
671  	unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
672  	u8 status;
673  	int rc;
674  
675  	for (;;) {
676  		rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
677  				    &status, sizeof(status));
678  		if (rc)
679  			return rc;
680  		if (!(status & SPI_STATUS_NRDY))
681  			return 0;
682  		if (time_after_eq(jiffies, timeout)) {
683  			netif_err(efx, hw, efx->net_dev,
684  				  "SPI write timeout on device %d"
685  				  " last status=0x%02x\n",
686  				  spi->device_id, status);
687  			return -ETIMEDOUT;
688  		}
689  		schedule_timeout_uninterruptible(1);
690  	}
691  }
692  
693  static int
falcon_spi_write(struct ef4_nic * efx,const struct falcon_spi_device * spi,loff_t start,size_t len,size_t * retlen,const u8 * buffer)694  falcon_spi_write(struct ef4_nic *efx, const struct falcon_spi_device *spi,
695  		 loff_t start, size_t len, size_t *retlen, const u8 *buffer)
696  {
697  	u8 verify_buffer[FALCON_SPI_MAX_LEN];
698  	size_t block_len, pos = 0;
699  	unsigned int command;
700  	int rc = 0;
701  
702  	while (pos < len) {
703  		rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
704  		if (rc)
705  			break;
706  
707  		block_len = min(len - pos,
708  				falcon_spi_write_limit(spi, start + pos));
709  		command = falcon_spi_munge_command(spi, SPI_WRITE, start + pos);
710  		rc = falcon_spi_cmd(efx, spi, command, start + pos,
711  				    buffer + pos, NULL, block_len);
712  		if (rc)
713  			break;
714  
715  		rc = falcon_spi_wait_write(efx, spi);
716  		if (rc)
717  			break;
718  
719  		command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
720  		rc = falcon_spi_cmd(efx, spi, command, start + pos,
721  				    NULL, verify_buffer, block_len);
722  		if (memcmp(verify_buffer, buffer + pos, block_len)) {
723  			rc = -EIO;
724  			break;
725  		}
726  
727  		pos += block_len;
728  
729  		/* Avoid locking up the system */
730  		cond_resched();
731  		if (signal_pending(current)) {
732  			rc = -EINTR;
733  			break;
734  		}
735  	}
736  
737  	if (retlen)
738  		*retlen = pos;
739  	return rc;
740  }
741  
742  static int
falcon_spi_slow_wait(struct falcon_mtd_partition * part,bool uninterruptible)743  falcon_spi_slow_wait(struct falcon_mtd_partition *part, bool uninterruptible)
744  {
745  	const struct falcon_spi_device *spi = part->spi;
746  	struct ef4_nic *efx = part->common.mtd.priv;
747  	u8 status;
748  	int rc, i;
749  
750  	/* Wait up to 4s for flash/EEPROM to finish a slow operation. */
751  	for (i = 0; i < 40; i++) {
752  		__set_current_state(uninterruptible ?
753  				    TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
754  		schedule_timeout(HZ / 10);
755  		rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
756  				    &status, sizeof(status));
757  		if (rc)
758  			return rc;
759  		if (!(status & SPI_STATUS_NRDY))
760  			return 0;
761  		if (signal_pending(current))
762  			return -EINTR;
763  	}
764  	pr_err("%s: timed out waiting for %s\n",
765  	       part->common.name, part->common.dev_type_name);
766  	return -ETIMEDOUT;
767  }
768  
769  static int
falcon_spi_unlock(struct ef4_nic * efx,const struct falcon_spi_device * spi)770  falcon_spi_unlock(struct ef4_nic *efx, const struct falcon_spi_device *spi)
771  {
772  	const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
773  				SPI_STATUS_BP0);
774  	u8 status;
775  	int rc;
776  
777  	rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
778  			    &status, sizeof(status));
779  	if (rc)
780  		return rc;
781  
782  	if (!(status & unlock_mask))
783  		return 0; /* already unlocked */
784  
785  	rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
786  	if (rc)
787  		return rc;
788  	rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
789  	if (rc)
790  		return rc;
791  
792  	status &= ~unlock_mask;
793  	rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
794  			    NULL, sizeof(status));
795  	if (rc)
796  		return rc;
797  	rc = falcon_spi_wait_write(efx, spi);
798  	if (rc)
799  		return rc;
800  
801  	return 0;
802  }
803  
804  #define FALCON_SPI_VERIFY_BUF_LEN 16
805  
806  static int
falcon_spi_erase(struct falcon_mtd_partition * part,loff_t start,size_t len)807  falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len)
808  {
809  	const struct falcon_spi_device *spi = part->spi;
810  	struct ef4_nic *efx = part->common.mtd.priv;
811  	unsigned pos, block_len;
812  	u8 empty[FALCON_SPI_VERIFY_BUF_LEN];
813  	u8 buffer[FALCON_SPI_VERIFY_BUF_LEN];
814  	int rc;
815  
816  	if (len != spi->erase_size)
817  		return -EINVAL;
818  
819  	if (spi->erase_command == 0)
820  		return -EOPNOTSUPP;
821  
822  	rc = falcon_spi_unlock(efx, spi);
823  	if (rc)
824  		return rc;
825  	rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
826  	if (rc)
827  		return rc;
828  	rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
829  			    NULL, 0);
830  	if (rc)
831  		return rc;
832  	rc = falcon_spi_slow_wait(part, false);
833  
834  	/* Verify the entire region has been wiped */
835  	memset(empty, 0xff, sizeof(empty));
836  	for (pos = 0; pos < len; pos += block_len) {
837  		block_len = min(len - pos, sizeof(buffer));
838  		rc = falcon_spi_read(efx, spi, start + pos, block_len,
839  				     NULL, buffer);
840  		if (rc)
841  			return rc;
842  		if (memcmp(empty, buffer, block_len))
843  			return -EIO;
844  
845  		/* Avoid locking up the system */
846  		cond_resched();
847  		if (signal_pending(current))
848  			return -EINTR;
849  	}
850  
851  	return rc;
852  }
853  
falcon_mtd_rename(struct ef4_mtd_partition * part)854  static void falcon_mtd_rename(struct ef4_mtd_partition *part)
855  {
856  	struct ef4_nic *efx = part->mtd.priv;
857  
858  	snprintf(part->name, sizeof(part->name), "%s %s",
859  		 efx->name, part->type_name);
860  }
861  
falcon_mtd_read(struct mtd_info * mtd,loff_t start,size_t len,size_t * retlen,u8 * buffer)862  static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
863  			   size_t len, size_t *retlen, u8 *buffer)
864  {
865  	struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
866  	struct ef4_nic *efx = mtd->priv;
867  	struct falcon_nic_data *nic_data = efx->nic_data;
868  	int rc;
869  
870  	rc = mutex_lock_interruptible(&nic_data->spi_lock);
871  	if (rc)
872  		return rc;
873  	rc = falcon_spi_read(efx, part->spi, part->offset + start,
874  			     len, retlen, buffer);
875  	mutex_unlock(&nic_data->spi_lock);
876  	return rc;
877  }
878  
falcon_mtd_erase(struct mtd_info * mtd,loff_t start,size_t len)879  static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
880  {
881  	struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
882  	struct ef4_nic *efx = mtd->priv;
883  	struct falcon_nic_data *nic_data = efx->nic_data;
884  	int rc;
885  
886  	rc = mutex_lock_interruptible(&nic_data->spi_lock);
887  	if (rc)
888  		return rc;
889  	rc = falcon_spi_erase(part, part->offset + start, len);
890  	mutex_unlock(&nic_data->spi_lock);
891  	return rc;
892  }
893  
falcon_mtd_write(struct mtd_info * mtd,loff_t start,size_t len,size_t * retlen,const u8 * buffer)894  static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
895  			    size_t len, size_t *retlen, const u8 *buffer)
896  {
897  	struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
898  	struct ef4_nic *efx = mtd->priv;
899  	struct falcon_nic_data *nic_data = efx->nic_data;
900  	int rc;
901  
902  	rc = mutex_lock_interruptible(&nic_data->spi_lock);
903  	if (rc)
904  		return rc;
905  	rc = falcon_spi_write(efx, part->spi, part->offset + start,
906  			      len, retlen, buffer);
907  	mutex_unlock(&nic_data->spi_lock);
908  	return rc;
909  }
910  
falcon_mtd_sync(struct mtd_info * mtd)911  static int falcon_mtd_sync(struct mtd_info *mtd)
912  {
913  	struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
914  	struct ef4_nic *efx = mtd->priv;
915  	struct falcon_nic_data *nic_data = efx->nic_data;
916  	int rc;
917  
918  	mutex_lock(&nic_data->spi_lock);
919  	rc = falcon_spi_slow_wait(part, true);
920  	mutex_unlock(&nic_data->spi_lock);
921  	return rc;
922  }
923  
falcon_mtd_probe(struct ef4_nic * efx)924  static int falcon_mtd_probe(struct ef4_nic *efx)
925  {
926  	struct falcon_nic_data *nic_data = efx->nic_data;
927  	struct falcon_mtd_partition *parts;
928  	struct falcon_spi_device *spi;
929  	size_t n_parts;
930  	int rc = -ENODEV;
931  
932  	ASSERT_RTNL();
933  
934  	/* Allocate space for maximum number of partitions */
935  	parts = kcalloc(2, sizeof(*parts), GFP_KERNEL);
936  	if (!parts)
937  		return -ENOMEM;
938  	n_parts = 0;
939  
940  	spi = &nic_data->spi_flash;
941  	if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
942  		parts[n_parts].spi = spi;
943  		parts[n_parts].offset = FALCON_FLASH_BOOTCODE_START;
944  		parts[n_parts].common.dev_type_name = "flash";
945  		parts[n_parts].common.type_name = "sfc_flash_bootrom";
946  		parts[n_parts].common.mtd.type = MTD_NORFLASH;
947  		parts[n_parts].common.mtd.flags = MTD_CAP_NORFLASH;
948  		parts[n_parts].common.mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
949  		parts[n_parts].common.mtd.erasesize = spi->erase_size;
950  		n_parts++;
951  	}
952  
953  	spi = &nic_data->spi_eeprom;
954  	if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) {
955  		parts[n_parts].spi = spi;
956  		parts[n_parts].offset = FALCON_EEPROM_BOOTCONFIG_START;
957  		parts[n_parts].common.dev_type_name = "EEPROM";
958  		parts[n_parts].common.type_name = "sfc_bootconfig";
959  		parts[n_parts].common.mtd.type = MTD_RAM;
960  		parts[n_parts].common.mtd.flags = MTD_CAP_RAM;
961  		parts[n_parts].common.mtd.size =
962  			min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) -
963  			FALCON_EEPROM_BOOTCONFIG_START;
964  		parts[n_parts].common.mtd.erasesize = spi->erase_size;
965  		n_parts++;
966  	}
967  
968  	rc = ef4_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
969  	if (rc)
970  		kfree(parts);
971  	return rc;
972  }
973  
974  #endif /* CONFIG_SFC_FALCON_MTD */
975  
976  /**************************************************************************
977   *
978   * XMAC operations
979   *
980   **************************************************************************
981   */
982  
983  /* Configure the XAUI driver that is an output from Falcon */
falcon_setup_xaui(struct ef4_nic * efx)984  static void falcon_setup_xaui(struct ef4_nic *efx)
985  {
986  	ef4_oword_t sdctl, txdrv;
987  
988  	/* Move the XAUI into low power, unless there is no PHY, in
989  	 * which case the XAUI will have to drive a cable. */
990  	if (efx->phy_type == PHY_TYPE_NONE)
991  		return;
992  
993  	ef4_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
994  	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
995  	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
996  	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
997  	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
998  	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
999  	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
1000  	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
1001  	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
1002  	ef4_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
1003  
1004  	EF4_POPULATE_OWORD_8(txdrv,
1005  			     FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
1006  			     FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
1007  			     FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
1008  			     FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
1009  			     FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
1010  			     FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
1011  			     FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
1012  			     FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
1013  	ef4_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
1014  }
1015  
falcon_reset_xaui(struct ef4_nic * efx)1016  int falcon_reset_xaui(struct ef4_nic *efx)
1017  {
1018  	struct falcon_nic_data *nic_data = efx->nic_data;
1019  	ef4_oword_t reg;
1020  	int count;
1021  
1022  	/* Don't fetch MAC statistics over an XMAC reset */
1023  	WARN_ON(nic_data->stats_disable_count == 0);
1024  
1025  	/* Start reset sequence */
1026  	EF4_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
1027  	ef4_writeo(efx, &reg, FR_AB_XX_PWR_RST);
1028  
1029  	/* Wait up to 10 ms for completion, then reinitialise */
1030  	for (count = 0; count < 1000; count++) {
1031  		ef4_reado(efx, &reg, FR_AB_XX_PWR_RST);
1032  		if (EF4_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
1033  		    EF4_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
1034  			falcon_setup_xaui(efx);
1035  			return 0;
1036  		}
1037  		udelay(10);
1038  	}
1039  	netif_err(efx, hw, efx->net_dev,
1040  		  "timed out waiting for XAUI/XGXS reset\n");
1041  	return -ETIMEDOUT;
1042  }
1043  
falcon_ack_status_intr(struct ef4_nic * efx)1044  static void falcon_ack_status_intr(struct ef4_nic *efx)
1045  {
1046  	struct falcon_nic_data *nic_data = efx->nic_data;
1047  	ef4_oword_t reg;
1048  
1049  	if ((ef4_nic_rev(efx) != EF4_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
1050  		return;
1051  
1052  	/* We expect xgmii faults if the wireside link is down */
1053  	if (!efx->link_state.up)
1054  		return;
1055  
1056  	/* We can only use this interrupt to signal the negative edge of
1057  	 * xaui_align [we have to poll the positive edge]. */
1058  	if (nic_data->xmac_poll_required)
1059  		return;
1060  
1061  	ef4_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
1062  }
1063  
falcon_xgxs_link_ok(struct ef4_nic * efx)1064  static bool falcon_xgxs_link_ok(struct ef4_nic *efx)
1065  {
1066  	ef4_oword_t reg;
1067  	bool align_done, link_ok = false;
1068  	int sync_status;
1069  
1070  	/* Read link status */
1071  	ef4_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1072  
1073  	align_done = EF4_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
1074  	sync_status = EF4_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
1075  	if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
1076  		link_ok = true;
1077  
1078  	/* Clear link status ready for next read */
1079  	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
1080  	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
1081  	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
1082  	ef4_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
1083  
1084  	return link_ok;
1085  }
1086  
falcon_xmac_link_ok(struct ef4_nic * efx)1087  static bool falcon_xmac_link_ok(struct ef4_nic *efx)
1088  {
1089  	/*
1090  	 * Check MAC's XGXS link status except when using XGMII loopback
1091  	 * which bypasses the XGXS block.
1092  	 * If possible, check PHY's XGXS link status except when using
1093  	 * MAC loopback.
1094  	 */
1095  	return (efx->loopback_mode == LOOPBACK_XGMII ||
1096  		falcon_xgxs_link_ok(efx)) &&
1097  		(!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
1098  		 LOOPBACK_INTERNAL(efx) ||
1099  		 ef4_mdio_phyxgxs_lane_sync(efx));
1100  }
1101  
falcon_reconfigure_xmac_core(struct ef4_nic * efx)1102  static void falcon_reconfigure_xmac_core(struct ef4_nic *efx)
1103  {
1104  	unsigned int max_frame_len;
1105  	ef4_oword_t reg;
1106  	bool rx_fc = !!(efx->link_state.fc & EF4_FC_RX);
1107  	bool tx_fc = !!(efx->link_state.fc & EF4_FC_TX);
1108  
1109  	/* Configure MAC  - cut-thru mode is hard wired on */
1110  	EF4_POPULATE_OWORD_3(reg,
1111  			     FRF_AB_XM_RX_JUMBO_MODE, 1,
1112  			     FRF_AB_XM_TX_STAT_EN, 1,
1113  			     FRF_AB_XM_RX_STAT_EN, 1);
1114  	ef4_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
1115  
1116  	/* Configure TX */
1117  	EF4_POPULATE_OWORD_6(reg,
1118  			     FRF_AB_XM_TXEN, 1,
1119  			     FRF_AB_XM_TX_PRMBL, 1,
1120  			     FRF_AB_XM_AUTO_PAD, 1,
1121  			     FRF_AB_XM_TXCRC, 1,
1122  			     FRF_AB_XM_FCNTL, tx_fc,
1123  			     FRF_AB_XM_IPG, 0x3);
1124  	ef4_writeo(efx, &reg, FR_AB_XM_TX_CFG);
1125  
1126  	/* Configure RX */
1127  	EF4_POPULATE_OWORD_5(reg,
1128  			     FRF_AB_XM_RXEN, 1,
1129  			     FRF_AB_XM_AUTO_DEPAD, 0,
1130  			     FRF_AB_XM_ACPT_ALL_MCAST, 1,
1131  			     FRF_AB_XM_ACPT_ALL_UCAST, !efx->unicast_filter,
1132  			     FRF_AB_XM_PASS_CRC_ERR, 1);
1133  	ef4_writeo(efx, &reg, FR_AB_XM_RX_CFG);
1134  
1135  	/* Set frame length */
1136  	max_frame_len = EF4_MAX_FRAME_LEN(efx->net_dev->mtu);
1137  	EF4_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
1138  	ef4_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
1139  	EF4_POPULATE_OWORD_2(reg,
1140  			     FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
1141  			     FRF_AB_XM_TX_JUMBO_MODE, 1);
1142  	ef4_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
1143  
1144  	EF4_POPULATE_OWORD_2(reg,
1145  			     FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
1146  			     FRF_AB_XM_DIS_FCNTL, !rx_fc);
1147  	ef4_writeo(efx, &reg, FR_AB_XM_FC);
1148  
1149  	/* Set MAC address */
1150  	memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
1151  	ef4_writeo(efx, &reg, FR_AB_XM_ADR_LO);
1152  	memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
1153  	ef4_writeo(efx, &reg, FR_AB_XM_ADR_HI);
1154  }
1155  
falcon_reconfigure_xgxs_core(struct ef4_nic * efx)1156  static void falcon_reconfigure_xgxs_core(struct ef4_nic *efx)
1157  {
1158  	ef4_oword_t reg;
1159  	bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
1160  	bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
1161  	bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
1162  	bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
1163  
1164  	/* XGXS block is flaky and will need to be reset if moving
1165  	 * into our out of XGMII, XGXS or XAUI loopbacks. */
1166  	ef4_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1167  	old_xgxs_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
1168  	old_xgmii_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
1169  
1170  	ef4_reado(efx, &reg, FR_AB_XX_SD_CTL);
1171  	old_xaui_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
1172  
1173  	/* The PHY driver may have turned XAUI off */
1174  	if ((xgxs_loopback != old_xgxs_loopback) ||
1175  	    (xaui_loopback != old_xaui_loopback) ||
1176  	    (xgmii_loopback != old_xgmii_loopback))
1177  		falcon_reset_xaui(efx);
1178  
1179  	ef4_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1180  	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
1181  			    (xgxs_loopback || xaui_loopback) ?
1182  			    FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
1183  	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
1184  	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
1185  	ef4_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
1186  
1187  	ef4_reado(efx, &reg, FR_AB_XX_SD_CTL);
1188  	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
1189  	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
1190  	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
1191  	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
1192  	ef4_writeo(efx, &reg, FR_AB_XX_SD_CTL);
1193  }
1194  
1195  
1196  /* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
falcon_xmac_link_ok_retry(struct ef4_nic * efx,int tries)1197  static bool falcon_xmac_link_ok_retry(struct ef4_nic *efx, int tries)
1198  {
1199  	bool mac_up = falcon_xmac_link_ok(efx);
1200  
1201  	if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
1202  	    ef4_phy_mode_disabled(efx->phy_mode))
1203  		/* XAUI link is expected to be down */
1204  		return mac_up;
1205  
1206  	falcon_stop_nic_stats(efx);
1207  
1208  	while (!mac_up && tries) {
1209  		netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
1210  		falcon_reset_xaui(efx);
1211  		udelay(200);
1212  
1213  		mac_up = falcon_xmac_link_ok(efx);
1214  		--tries;
1215  	}
1216  
1217  	falcon_start_nic_stats(efx);
1218  
1219  	return mac_up;
1220  }
1221  
falcon_xmac_check_fault(struct ef4_nic * efx)1222  static bool falcon_xmac_check_fault(struct ef4_nic *efx)
1223  {
1224  	return !falcon_xmac_link_ok_retry(efx, 5);
1225  }
1226  
falcon_reconfigure_xmac(struct ef4_nic * efx)1227  static int falcon_reconfigure_xmac(struct ef4_nic *efx)
1228  {
1229  	struct falcon_nic_data *nic_data = efx->nic_data;
1230  
1231  	ef4_farch_filter_sync_rx_mode(efx);
1232  
1233  	falcon_reconfigure_xgxs_core(efx);
1234  	falcon_reconfigure_xmac_core(efx);
1235  
1236  	falcon_reconfigure_mac_wrapper(efx);
1237  
1238  	nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
1239  	falcon_ack_status_intr(efx);
1240  
1241  	return 0;
1242  }
1243  
falcon_poll_xmac(struct ef4_nic * efx)1244  static void falcon_poll_xmac(struct ef4_nic *efx)
1245  {
1246  	struct falcon_nic_data *nic_data = efx->nic_data;
1247  
1248  	/* We expect xgmii faults if the wireside link is down */
1249  	if (!efx->link_state.up || !nic_data->xmac_poll_required)
1250  		return;
1251  
1252  	nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
1253  	falcon_ack_status_intr(efx);
1254  }
1255  
1256  /**************************************************************************
1257   *
1258   * MAC wrapper
1259   *
1260   **************************************************************************
1261   */
1262  
falcon_push_multicast_hash(struct ef4_nic * efx)1263  static void falcon_push_multicast_hash(struct ef4_nic *efx)
1264  {
1265  	union ef4_multicast_hash *mc_hash = &efx->multicast_hash;
1266  
1267  	WARN_ON(!mutex_is_locked(&efx->mac_lock));
1268  
1269  	ef4_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
1270  	ef4_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
1271  }
1272  
falcon_reset_macs(struct ef4_nic * efx)1273  static void falcon_reset_macs(struct ef4_nic *efx)
1274  {
1275  	struct falcon_nic_data *nic_data = efx->nic_data;
1276  	ef4_oword_t reg, mac_ctrl;
1277  	int count;
1278  
1279  	if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0) {
1280  		/* It's not safe to use GLB_CTL_REG to reset the
1281  		 * macs, so instead use the internal MAC resets
1282  		 */
1283  		EF4_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
1284  		ef4_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
1285  
1286  		for (count = 0; count < 10000; count++) {
1287  			ef4_reado(efx, &reg, FR_AB_XM_GLB_CFG);
1288  			if (EF4_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
1289  			    0)
1290  				return;
1291  			udelay(10);
1292  		}
1293  
1294  		netif_err(efx, hw, efx->net_dev,
1295  			  "timed out waiting for XMAC core reset\n");
1296  	}
1297  
1298  	/* Mac stats will fail whist the TX fifo is draining */
1299  	WARN_ON(nic_data->stats_disable_count == 0);
1300  
1301  	ef4_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1302  	EF4_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1);
1303  	ef4_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1304  
1305  	ef4_reado(efx, &reg, FR_AB_GLB_CTL);
1306  	EF4_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
1307  	EF4_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
1308  	EF4_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
1309  	ef4_writeo(efx, &reg, FR_AB_GLB_CTL);
1310  
1311  	count = 0;
1312  	while (1) {
1313  		ef4_reado(efx, &reg, FR_AB_GLB_CTL);
1314  		if (!EF4_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
1315  		    !EF4_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
1316  		    !EF4_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
1317  			netif_dbg(efx, hw, efx->net_dev,
1318  				  "Completed MAC reset after %d loops\n",
1319  				  count);
1320  			break;
1321  		}
1322  		if (count > 20) {
1323  			netif_err(efx, hw, efx->net_dev, "MAC reset failed\n");
1324  			break;
1325  		}
1326  		count++;
1327  		udelay(10);
1328  	}
1329  
1330  	/* Ensure the correct MAC is selected before statistics
1331  	 * are re-enabled by the caller */
1332  	ef4_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1333  
1334  	falcon_setup_xaui(efx);
1335  }
1336  
falcon_drain_tx_fifo(struct ef4_nic * efx)1337  static void falcon_drain_tx_fifo(struct ef4_nic *efx)
1338  {
1339  	ef4_oword_t reg;
1340  
1341  	if ((ef4_nic_rev(efx) < EF4_REV_FALCON_B0) ||
1342  	    (efx->loopback_mode != LOOPBACK_NONE))
1343  		return;
1344  
1345  	ef4_reado(efx, &reg, FR_AB_MAC_CTRL);
1346  	/* There is no point in draining more than once */
1347  	if (EF4_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
1348  		return;
1349  
1350  	falcon_reset_macs(efx);
1351  }
1352  
falcon_deconfigure_mac_wrapper(struct ef4_nic * efx)1353  static void falcon_deconfigure_mac_wrapper(struct ef4_nic *efx)
1354  {
1355  	ef4_oword_t reg;
1356  
1357  	if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0)
1358  		return;
1359  
1360  	/* Isolate the MAC -> RX */
1361  	ef4_reado(efx, &reg, FR_AZ_RX_CFG);
1362  	EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
1363  	ef4_writeo(efx, &reg, FR_AZ_RX_CFG);
1364  
1365  	/* Isolate TX -> MAC */
1366  	falcon_drain_tx_fifo(efx);
1367  }
1368  
falcon_reconfigure_mac_wrapper(struct ef4_nic * efx)1369  static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx)
1370  {
1371  	struct ef4_link_state *link_state = &efx->link_state;
1372  	ef4_oword_t reg;
1373  	int link_speed, isolate;
1374  
1375  	isolate = !!READ_ONCE(efx->reset_pending);
1376  
1377  	switch (link_state->speed) {
1378  	case 10000: link_speed = 3; break;
1379  	case 1000:  link_speed = 2; break;
1380  	case 100:   link_speed = 1; break;
1381  	default:    link_speed = 0; break;
1382  	}
1383  
1384  	/* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1385  	 * as advertised.  Disable to ensure packets are not
1386  	 * indefinitely held and TX queue can be flushed at any point
1387  	 * while the link is down. */
1388  	EF4_POPULATE_OWORD_5(reg,
1389  			     FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
1390  			     FRF_AB_MAC_BCAD_ACPT, 1,
1391  			     FRF_AB_MAC_UC_PROM, !efx->unicast_filter,
1392  			     FRF_AB_MAC_LINK_STATUS, 1, /* always set */
1393  			     FRF_AB_MAC_SPEED, link_speed);
1394  	/* On B0, MAC backpressure can be disabled and packets get
1395  	 * discarded. */
1396  	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
1397  		EF4_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
1398  				    !link_state->up || isolate);
1399  	}
1400  
1401  	ef4_writeo(efx, &reg, FR_AB_MAC_CTRL);
1402  
1403  	/* Restore the multicast hash registers. */
1404  	falcon_push_multicast_hash(efx);
1405  
1406  	ef4_reado(efx, &reg, FR_AZ_RX_CFG);
1407  	/* Enable XOFF signal from RX FIFO (we enabled it during NIC
1408  	 * initialisation but it may read back as 0) */
1409  	EF4_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
1410  	/* Unisolate the MAC -> RX */
1411  	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
1412  		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
1413  	ef4_writeo(efx, &reg, FR_AZ_RX_CFG);
1414  }
1415  
falcon_stats_request(struct ef4_nic * efx)1416  static void falcon_stats_request(struct ef4_nic *efx)
1417  {
1418  	struct falcon_nic_data *nic_data = efx->nic_data;
1419  	ef4_oword_t reg;
1420  
1421  	WARN_ON(nic_data->stats_pending);
1422  	WARN_ON(nic_data->stats_disable_count);
1423  
1424  	FALCON_XMAC_STATS_DMA_FLAG(efx) = 0;
1425  	nic_data->stats_pending = true;
1426  	wmb(); /* ensure done flag is clear */
1427  
1428  	/* Initiate DMA transfer of stats */
1429  	EF4_POPULATE_OWORD_2(reg,
1430  			     FRF_AB_MAC_STAT_DMA_CMD, 1,
1431  			     FRF_AB_MAC_STAT_DMA_ADR,
1432  			     efx->stats_buffer.dma_addr);
1433  	ef4_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
1434  
1435  	mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
1436  }
1437  
falcon_stats_complete(struct ef4_nic * efx)1438  static void falcon_stats_complete(struct ef4_nic *efx)
1439  {
1440  	struct falcon_nic_data *nic_data = efx->nic_data;
1441  
1442  	if (!nic_data->stats_pending)
1443  		return;
1444  
1445  	nic_data->stats_pending = false;
1446  	if (FALCON_XMAC_STATS_DMA_FLAG(efx)) {
1447  		rmb(); /* read the done flag before the stats */
1448  		ef4_nic_update_stats(falcon_stat_desc, FALCON_STAT_COUNT,
1449  				     falcon_stat_mask, nic_data->stats,
1450  				     efx->stats_buffer.addr, true);
1451  	} else {
1452  		netif_err(efx, hw, efx->net_dev,
1453  			  "timed out waiting for statistics\n");
1454  	}
1455  }
1456  
falcon_stats_timer_func(struct timer_list * t)1457  static void falcon_stats_timer_func(struct timer_list *t)
1458  {
1459  	struct falcon_nic_data *nic_data = from_timer(nic_data, t,
1460  						      stats_timer);
1461  	struct ef4_nic *efx = nic_data->efx;
1462  
1463  	spin_lock(&efx->stats_lock);
1464  
1465  	falcon_stats_complete(efx);
1466  	if (nic_data->stats_disable_count == 0)
1467  		falcon_stats_request(efx);
1468  
1469  	spin_unlock(&efx->stats_lock);
1470  }
1471  
falcon_loopback_link_poll(struct ef4_nic * efx)1472  static bool falcon_loopback_link_poll(struct ef4_nic *efx)
1473  {
1474  	struct ef4_link_state old_state = efx->link_state;
1475  
1476  	WARN_ON(!mutex_is_locked(&efx->mac_lock));
1477  	WARN_ON(!LOOPBACK_INTERNAL(efx));
1478  
1479  	efx->link_state.fd = true;
1480  	efx->link_state.fc = efx->wanted_fc;
1481  	efx->link_state.up = true;
1482  	efx->link_state.speed = 10000;
1483  
1484  	return !ef4_link_state_equal(&efx->link_state, &old_state);
1485  }
1486  
falcon_reconfigure_port(struct ef4_nic * efx)1487  static int falcon_reconfigure_port(struct ef4_nic *efx)
1488  {
1489  	int rc;
1490  
1491  	WARN_ON(ef4_nic_rev(efx) > EF4_REV_FALCON_B0);
1492  
1493  	/* Poll the PHY link state *before* reconfiguring it. This means we
1494  	 * will pick up the correct speed (in loopback) to select the correct
1495  	 * MAC.
1496  	 */
1497  	if (LOOPBACK_INTERNAL(efx))
1498  		falcon_loopback_link_poll(efx);
1499  	else
1500  		efx->phy_op->poll(efx);
1501  
1502  	falcon_stop_nic_stats(efx);
1503  	falcon_deconfigure_mac_wrapper(efx);
1504  
1505  	falcon_reset_macs(efx);
1506  
1507  	efx->phy_op->reconfigure(efx);
1508  	rc = falcon_reconfigure_xmac(efx);
1509  	BUG_ON(rc);
1510  
1511  	falcon_start_nic_stats(efx);
1512  
1513  	/* Synchronise efx->link_state with the kernel */
1514  	ef4_link_status_changed(efx);
1515  
1516  	return 0;
1517  }
1518  
1519  /* TX flow control may automatically turn itself off if the link
1520   * partner (intermittently) stops responding to pause frames. There
1521   * isn't any indication that this has happened, so the best we do is
1522   * leave it up to the user to spot this and fix it by cycling transmit
1523   * flow control on this end.
1524   */
1525  
falcon_a1_prepare_enable_fc_tx(struct ef4_nic * efx)1526  static void falcon_a1_prepare_enable_fc_tx(struct ef4_nic *efx)
1527  {
1528  	/* Schedule a reset to recover */
1529  	ef4_schedule_reset(efx, RESET_TYPE_INVISIBLE);
1530  }
1531  
falcon_b0_prepare_enable_fc_tx(struct ef4_nic * efx)1532  static void falcon_b0_prepare_enable_fc_tx(struct ef4_nic *efx)
1533  {
1534  	/* Recover by resetting the EM block */
1535  	falcon_stop_nic_stats(efx);
1536  	falcon_drain_tx_fifo(efx);
1537  	falcon_reconfigure_xmac(efx);
1538  	falcon_start_nic_stats(efx);
1539  }
1540  
1541  /**************************************************************************
1542   *
1543   * PHY access via GMII
1544   *
1545   **************************************************************************
1546   */
1547  
1548  /* Wait for GMII access to complete */
falcon_gmii_wait(struct ef4_nic * efx)1549  static int falcon_gmii_wait(struct ef4_nic *efx)
1550  {
1551  	ef4_oword_t md_stat;
1552  	int count;
1553  
1554  	/* wait up to 50ms - taken max from datasheet */
1555  	for (count = 0; count < 5000; count++) {
1556  		ef4_reado(efx, &md_stat, FR_AB_MD_STAT);
1557  		if (EF4_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
1558  			if (EF4_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
1559  			    EF4_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
1560  				netif_err(efx, hw, efx->net_dev,
1561  					  "error from GMII access "
1562  					  EF4_OWORD_FMT"\n",
1563  					  EF4_OWORD_VAL(md_stat));
1564  				return -EIO;
1565  			}
1566  			return 0;
1567  		}
1568  		udelay(10);
1569  	}
1570  	netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n");
1571  	return -ETIMEDOUT;
1572  }
1573  
1574  /* Write an MDIO register of a PHY connected to Falcon. */
falcon_mdio_write(struct net_device * net_dev,int prtad,int devad,u16 addr,u16 value)1575  static int falcon_mdio_write(struct net_device *net_dev,
1576  			     int prtad, int devad, u16 addr, u16 value)
1577  {
1578  	struct ef4_nic *efx = netdev_priv(net_dev);
1579  	struct falcon_nic_data *nic_data = efx->nic_data;
1580  	ef4_oword_t reg;
1581  	int rc;
1582  
1583  	netif_vdbg(efx, hw, efx->net_dev,
1584  		   "writing MDIO %d register %d.%d with 0x%04x\n",
1585  		    prtad, devad, addr, value);
1586  
1587  	mutex_lock(&nic_data->mdio_lock);
1588  
1589  	/* Check MDIO not currently being accessed */
1590  	rc = falcon_gmii_wait(efx);
1591  	if (rc)
1592  		goto out;
1593  
1594  	/* Write the address/ID register */
1595  	EF4_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
1596  	ef4_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
1597  
1598  	EF4_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
1599  			     FRF_AB_MD_DEV_ADR, devad);
1600  	ef4_writeo(efx, &reg, FR_AB_MD_ID);
1601  
1602  	/* Write data */
1603  	EF4_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
1604  	ef4_writeo(efx, &reg, FR_AB_MD_TXD);
1605  
1606  	EF4_POPULATE_OWORD_2(reg,
1607  			     FRF_AB_MD_WRC, 1,
1608  			     FRF_AB_MD_GC, 0);
1609  	ef4_writeo(efx, &reg, FR_AB_MD_CS);
1610  
1611  	/* Wait for data to be written */
1612  	rc = falcon_gmii_wait(efx);
1613  	if (rc) {
1614  		/* Abort the write operation */
1615  		EF4_POPULATE_OWORD_2(reg,
1616  				     FRF_AB_MD_WRC, 0,
1617  				     FRF_AB_MD_GC, 1);
1618  		ef4_writeo(efx, &reg, FR_AB_MD_CS);
1619  		udelay(10);
1620  	}
1621  
1622  out:
1623  	mutex_unlock(&nic_data->mdio_lock);
1624  	return rc;
1625  }
1626  
1627  /* Read an MDIO register of a PHY connected to Falcon. */
falcon_mdio_read(struct net_device * net_dev,int prtad,int devad,u16 addr)1628  static int falcon_mdio_read(struct net_device *net_dev,
1629  			    int prtad, int devad, u16 addr)
1630  {
1631  	struct ef4_nic *efx = netdev_priv(net_dev);
1632  	struct falcon_nic_data *nic_data = efx->nic_data;
1633  	ef4_oword_t reg;
1634  	int rc;
1635  
1636  	mutex_lock(&nic_data->mdio_lock);
1637  
1638  	/* Check MDIO not currently being accessed */
1639  	rc = falcon_gmii_wait(efx);
1640  	if (rc)
1641  		goto out;
1642  
1643  	EF4_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
1644  	ef4_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
1645  
1646  	EF4_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
1647  			     FRF_AB_MD_DEV_ADR, devad);
1648  	ef4_writeo(efx, &reg, FR_AB_MD_ID);
1649  
1650  	/* Request data to be read */
1651  	EF4_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
1652  	ef4_writeo(efx, &reg, FR_AB_MD_CS);
1653  
1654  	/* Wait for data to become available */
1655  	rc = falcon_gmii_wait(efx);
1656  	if (rc == 0) {
1657  		ef4_reado(efx, &reg, FR_AB_MD_RXD);
1658  		rc = EF4_OWORD_FIELD(reg, FRF_AB_MD_RXD);
1659  		netif_vdbg(efx, hw, efx->net_dev,
1660  			   "read from MDIO %d register %d.%d, got %04x\n",
1661  			   prtad, devad, addr, rc);
1662  	} else {
1663  		/* Abort the read operation */
1664  		EF4_POPULATE_OWORD_2(reg,
1665  				     FRF_AB_MD_RIC, 0,
1666  				     FRF_AB_MD_GC, 1);
1667  		ef4_writeo(efx, &reg, FR_AB_MD_CS);
1668  
1669  		netif_dbg(efx, hw, efx->net_dev,
1670  			  "read from MDIO %d register %d.%d, got error %d\n",
1671  			  prtad, devad, addr, rc);
1672  	}
1673  
1674  out:
1675  	mutex_unlock(&nic_data->mdio_lock);
1676  	return rc;
1677  }
1678  
1679  /* This call is responsible for hooking in the MAC and PHY operations */
falcon_probe_port(struct ef4_nic * efx)1680  static int falcon_probe_port(struct ef4_nic *efx)
1681  {
1682  	struct falcon_nic_data *nic_data = efx->nic_data;
1683  	int rc;
1684  
1685  	switch (efx->phy_type) {
1686  	case PHY_TYPE_SFX7101:
1687  		efx->phy_op = &falcon_sfx7101_phy_ops;
1688  		break;
1689  	case PHY_TYPE_QT2022C2:
1690  	case PHY_TYPE_QT2025C:
1691  		efx->phy_op = &falcon_qt202x_phy_ops;
1692  		break;
1693  	case PHY_TYPE_TXC43128:
1694  		efx->phy_op = &falcon_txc_phy_ops;
1695  		break;
1696  	default:
1697  		netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
1698  			  efx->phy_type);
1699  		return -ENODEV;
1700  	}
1701  
1702  	/* Fill out MDIO structure and loopback modes */
1703  	mutex_init(&nic_data->mdio_lock);
1704  	efx->mdio.mdio_read = falcon_mdio_read;
1705  	efx->mdio.mdio_write = falcon_mdio_write;
1706  	rc = efx->phy_op->probe(efx);
1707  	if (rc != 0)
1708  		return rc;
1709  
1710  	/* Initial assumption */
1711  	efx->link_state.speed = 10000;
1712  	efx->link_state.fd = true;
1713  
1714  	/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
1715  	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
1716  		efx->wanted_fc = EF4_FC_RX | EF4_FC_TX;
1717  	else
1718  		efx->wanted_fc = EF4_FC_RX;
1719  	if (efx->mdio.mmds & MDIO_DEVS_AN)
1720  		efx->wanted_fc |= EF4_FC_AUTO;
1721  
1722  	/* Allocate buffer for stats */
1723  	rc = ef4_nic_alloc_buffer(efx, &efx->stats_buffer,
1724  				  FALCON_MAC_STATS_SIZE, GFP_KERNEL);
1725  	if (rc)
1726  		return rc;
1727  	netif_dbg(efx, probe, efx->net_dev,
1728  		  "stats buffer at %llx (virt %p phys %llx)\n",
1729  		  (u64)efx->stats_buffer.dma_addr,
1730  		  efx->stats_buffer.addr,
1731  		  (u64)virt_to_phys(efx->stats_buffer.addr));
1732  
1733  	return 0;
1734  }
1735  
falcon_remove_port(struct ef4_nic * efx)1736  static void falcon_remove_port(struct ef4_nic *efx)
1737  {
1738  	efx->phy_op->remove(efx);
1739  	ef4_nic_free_buffer(efx, &efx->stats_buffer);
1740  }
1741  
1742  /* Global events are basically PHY events */
1743  static bool
falcon_handle_global_event(struct ef4_channel * channel,ef4_qword_t * event)1744  falcon_handle_global_event(struct ef4_channel *channel, ef4_qword_t *event)
1745  {
1746  	struct ef4_nic *efx = channel->efx;
1747  	struct falcon_nic_data *nic_data = efx->nic_data;
1748  
1749  	if (EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
1750  	    EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
1751  	    EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
1752  		/* Ignored */
1753  		return true;
1754  
1755  	if ((ef4_nic_rev(efx) == EF4_REV_FALCON_B0) &&
1756  	    EF4_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
1757  		nic_data->xmac_poll_required = true;
1758  		return true;
1759  	}
1760  
1761  	if (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1 ?
1762  	    EF4_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
1763  	    EF4_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
1764  		netif_err(efx, rx_err, efx->net_dev,
1765  			  "channel %d seen global RX_RESET event. Resetting.\n",
1766  			  channel->channel);
1767  
1768  		atomic_inc(&efx->rx_reset);
1769  		ef4_schedule_reset(efx, EF4_WORKAROUND_6555(efx) ?
1770  				   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
1771  		return true;
1772  	}
1773  
1774  	return false;
1775  }
1776  
1777  /**************************************************************************
1778   *
1779   * Falcon test code
1780   *
1781   **************************************************************************/
1782  
1783  static int
falcon_read_nvram(struct ef4_nic * efx,struct falcon_nvconfig * nvconfig_out)1784  falcon_read_nvram(struct ef4_nic *efx, struct falcon_nvconfig *nvconfig_out)
1785  {
1786  	struct falcon_nic_data *nic_data = efx->nic_data;
1787  	struct falcon_nvconfig *nvconfig;
1788  	struct falcon_spi_device *spi;
1789  	void *region;
1790  	int rc, magic_num, struct_ver;
1791  	__le16 *word, *limit;
1792  	u32 csum;
1793  
1794  	if (falcon_spi_present(&nic_data->spi_flash))
1795  		spi = &nic_data->spi_flash;
1796  	else if (falcon_spi_present(&nic_data->spi_eeprom))
1797  		spi = &nic_data->spi_eeprom;
1798  	else
1799  		return -EINVAL;
1800  
1801  	region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
1802  	if (!region)
1803  		return -ENOMEM;
1804  	nvconfig = region + FALCON_NVCONFIG_OFFSET;
1805  
1806  	mutex_lock(&nic_data->spi_lock);
1807  	rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
1808  	mutex_unlock(&nic_data->spi_lock);
1809  	if (rc) {
1810  		netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
1811  			  falcon_spi_present(&nic_data->spi_flash) ?
1812  			  "flash" : "EEPROM");
1813  		rc = -EIO;
1814  		goto out;
1815  	}
1816  
1817  	magic_num = le16_to_cpu(nvconfig->board_magic_num);
1818  	struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
1819  
1820  	rc = -EINVAL;
1821  	if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
1822  		netif_err(efx, hw, efx->net_dev,
1823  			  "NVRAM bad magic 0x%x\n", magic_num);
1824  		goto out;
1825  	}
1826  	if (struct_ver < 2) {
1827  		netif_err(efx, hw, efx->net_dev,
1828  			  "NVRAM has ancient version 0x%x\n", struct_ver);
1829  		goto out;
1830  	} else if (struct_ver < 4) {
1831  		word = &nvconfig->board_magic_num;
1832  		limit = (__le16 *) (nvconfig + 1);
1833  	} else {
1834  		word = region;
1835  		limit = region + FALCON_NVCONFIG_END;
1836  	}
1837  	for (csum = 0; word < limit; ++word)
1838  		csum += le16_to_cpu(*word);
1839  
1840  	if (~csum & 0xffff) {
1841  		netif_err(efx, hw, efx->net_dev,
1842  			  "NVRAM has incorrect checksum\n");
1843  		goto out;
1844  	}
1845  
1846  	rc = 0;
1847  	if (nvconfig_out)
1848  		memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
1849  
1850   out:
1851  	kfree(region);
1852  	return rc;
1853  }
1854  
falcon_test_nvram(struct ef4_nic * efx)1855  static int falcon_test_nvram(struct ef4_nic *efx)
1856  {
1857  	return falcon_read_nvram(efx, NULL);
1858  }
1859  
1860  static const struct ef4_farch_register_test falcon_b0_register_tests[] = {
1861  	{ FR_AZ_ADR_REGION,
1862  	  EF4_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
1863  	{ FR_AZ_RX_CFG,
1864  	  EF4_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
1865  	{ FR_AZ_TX_CFG,
1866  	  EF4_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
1867  	{ FR_AZ_TX_RESERVED,
1868  	  EF4_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
1869  	{ FR_AB_MAC_CTRL,
1870  	  EF4_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
1871  	{ FR_AZ_SRM_TX_DC_CFG,
1872  	  EF4_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
1873  	{ FR_AZ_RX_DC_CFG,
1874  	  EF4_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
1875  	{ FR_AZ_RX_DC_PF_WM,
1876  	  EF4_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
1877  	{ FR_BZ_DP_CTRL,
1878  	  EF4_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
1879  	{ FR_AB_GM_CFG2,
1880  	  EF4_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
1881  	{ FR_AB_GMF_CFG0,
1882  	  EF4_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
1883  	{ FR_AB_XM_GLB_CFG,
1884  	  EF4_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
1885  	{ FR_AB_XM_TX_CFG,
1886  	  EF4_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
1887  	{ FR_AB_XM_RX_CFG,
1888  	  EF4_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
1889  	{ FR_AB_XM_RX_PARAM,
1890  	  EF4_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
1891  	{ FR_AB_XM_FC,
1892  	  EF4_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
1893  	{ FR_AB_XM_ADR_LO,
1894  	  EF4_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
1895  	{ FR_AB_XX_SD_CTL,
1896  	  EF4_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
1897  };
1898  
1899  static int
falcon_b0_test_chip(struct ef4_nic * efx,struct ef4_self_tests * tests)1900  falcon_b0_test_chip(struct ef4_nic *efx, struct ef4_self_tests *tests)
1901  {
1902  	enum reset_type reset_method = RESET_TYPE_INVISIBLE;
1903  	int rc, rc2;
1904  
1905  	mutex_lock(&efx->mac_lock);
1906  	if (efx->loopback_modes) {
1907  		/* We need the 312 clock from the PHY to test the XMAC
1908  		 * registers, so move into XGMII loopback if available */
1909  		if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
1910  			efx->loopback_mode = LOOPBACK_XGMII;
1911  		else
1912  			efx->loopback_mode = __ffs(efx->loopback_modes);
1913  	}
1914  	__ef4_reconfigure_port(efx);
1915  	mutex_unlock(&efx->mac_lock);
1916  
1917  	ef4_reset_down(efx, reset_method);
1918  
1919  	tests->registers =
1920  		ef4_farch_test_registers(efx, falcon_b0_register_tests,
1921  					 ARRAY_SIZE(falcon_b0_register_tests))
1922  		? -1 : 1;
1923  
1924  	rc = falcon_reset_hw(efx, reset_method);
1925  	rc2 = ef4_reset_up(efx, reset_method, rc == 0);
1926  	return rc ? rc : rc2;
1927  }
1928  
1929  /**************************************************************************
1930   *
1931   * Device reset
1932   *
1933   **************************************************************************
1934   */
1935  
falcon_map_reset_reason(enum reset_type reason)1936  static enum reset_type falcon_map_reset_reason(enum reset_type reason)
1937  {
1938  	switch (reason) {
1939  	case RESET_TYPE_RX_RECOVERY:
1940  	case RESET_TYPE_DMA_ERROR:
1941  	case RESET_TYPE_TX_SKIP:
1942  		/* These can occasionally occur due to hardware bugs.
1943  		 * We try to reset without disrupting the link.
1944  		 */
1945  		return RESET_TYPE_INVISIBLE;
1946  	default:
1947  		return RESET_TYPE_ALL;
1948  	}
1949  }
1950  
falcon_map_reset_flags(u32 * flags)1951  static int falcon_map_reset_flags(u32 *flags)
1952  {
1953  	enum {
1954  		FALCON_RESET_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER |
1955  					  ETH_RESET_OFFLOAD | ETH_RESET_MAC),
1956  		FALCON_RESET_ALL = FALCON_RESET_INVISIBLE | ETH_RESET_PHY,
1957  		FALCON_RESET_WORLD = FALCON_RESET_ALL | ETH_RESET_IRQ,
1958  	};
1959  
1960  	if ((*flags & FALCON_RESET_WORLD) == FALCON_RESET_WORLD) {
1961  		*flags &= ~FALCON_RESET_WORLD;
1962  		return RESET_TYPE_WORLD;
1963  	}
1964  
1965  	if ((*flags & FALCON_RESET_ALL) == FALCON_RESET_ALL) {
1966  		*flags &= ~FALCON_RESET_ALL;
1967  		return RESET_TYPE_ALL;
1968  	}
1969  
1970  	if ((*flags & FALCON_RESET_INVISIBLE) == FALCON_RESET_INVISIBLE) {
1971  		*flags &= ~FALCON_RESET_INVISIBLE;
1972  		return RESET_TYPE_INVISIBLE;
1973  	}
1974  
1975  	return -EINVAL;
1976  }
1977  
1978  /* Resets NIC to known state.  This routine must be called in process
1979   * context and is allowed to sleep. */
__falcon_reset_hw(struct ef4_nic * efx,enum reset_type method)1980  static int __falcon_reset_hw(struct ef4_nic *efx, enum reset_type method)
1981  {
1982  	struct falcon_nic_data *nic_data = efx->nic_data;
1983  	ef4_oword_t glb_ctl_reg_ker;
1984  	int rc;
1985  
1986  	netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n",
1987  		  RESET_TYPE(method));
1988  
1989  	/* Initiate device reset */
1990  	if (method == RESET_TYPE_WORLD) {
1991  		rc = pci_save_state(efx->pci_dev);
1992  		if (rc) {
1993  			netif_err(efx, drv, efx->net_dev,
1994  				  "failed to backup PCI state of primary "
1995  				  "function prior to hardware reset\n");
1996  			goto fail1;
1997  		}
1998  		if (ef4_nic_is_dual_func(efx)) {
1999  			rc = pci_save_state(nic_data->pci_dev2);
2000  			if (rc) {
2001  				netif_err(efx, drv, efx->net_dev,
2002  					  "failed to backup PCI state of "
2003  					  "secondary function prior to "
2004  					  "hardware reset\n");
2005  				goto fail2;
2006  			}
2007  		}
2008  
2009  		EF4_POPULATE_OWORD_2(glb_ctl_reg_ker,
2010  				     FRF_AB_EXT_PHY_RST_DUR,
2011  				     FFE_AB_EXT_PHY_RST_DUR_10240US,
2012  				     FRF_AB_SWRST, 1);
2013  	} else {
2014  		EF4_POPULATE_OWORD_7(glb_ctl_reg_ker,
2015  				     /* exclude PHY from "invisible" reset */
2016  				     FRF_AB_EXT_PHY_RST_CTL,
2017  				     method == RESET_TYPE_INVISIBLE,
2018  				     /* exclude EEPROM/flash and PCIe */
2019  				     FRF_AB_PCIE_CORE_RST_CTL, 1,
2020  				     FRF_AB_PCIE_NSTKY_RST_CTL, 1,
2021  				     FRF_AB_PCIE_SD_RST_CTL, 1,
2022  				     FRF_AB_EE_RST_CTL, 1,
2023  				     FRF_AB_EXT_PHY_RST_DUR,
2024  				     FFE_AB_EXT_PHY_RST_DUR_10240US,
2025  				     FRF_AB_SWRST, 1);
2026  	}
2027  	ef4_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2028  
2029  	netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n");
2030  	schedule_timeout_uninterruptible(HZ / 20);
2031  
2032  	/* Restore PCI configuration if needed */
2033  	if (method == RESET_TYPE_WORLD) {
2034  		if (ef4_nic_is_dual_func(efx))
2035  			pci_restore_state(nic_data->pci_dev2);
2036  		pci_restore_state(efx->pci_dev);
2037  		netif_dbg(efx, drv, efx->net_dev,
2038  			  "successfully restored PCI config\n");
2039  	}
2040  
2041  	/* Assert that reset complete */
2042  	ef4_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2043  	if (EF4_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
2044  		rc = -ETIMEDOUT;
2045  		netif_err(efx, hw, efx->net_dev,
2046  			  "timed out waiting for hardware reset\n");
2047  		goto fail3;
2048  	}
2049  	netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
2050  
2051  	return 0;
2052  
2053  	/* pci_save_state() and pci_restore_state() MUST be called in pairs */
2054  fail2:
2055  	pci_restore_state(efx->pci_dev);
2056  fail1:
2057  fail3:
2058  	return rc;
2059  }
2060  
falcon_reset_hw(struct ef4_nic * efx,enum reset_type method)2061  static int falcon_reset_hw(struct ef4_nic *efx, enum reset_type method)
2062  {
2063  	struct falcon_nic_data *nic_data = efx->nic_data;
2064  	int rc;
2065  
2066  	mutex_lock(&nic_data->spi_lock);
2067  	rc = __falcon_reset_hw(efx, method);
2068  	mutex_unlock(&nic_data->spi_lock);
2069  
2070  	return rc;
2071  }
2072  
falcon_monitor(struct ef4_nic * efx)2073  static void falcon_monitor(struct ef4_nic *efx)
2074  {
2075  	bool link_changed;
2076  	int rc;
2077  
2078  	BUG_ON(!mutex_is_locked(&efx->mac_lock));
2079  
2080  	rc = falcon_board(efx)->type->monitor(efx);
2081  	if (rc) {
2082  		netif_err(efx, hw, efx->net_dev,
2083  			  "Board sensor %s; shutting down PHY\n",
2084  			  (rc == -ERANGE) ? "reported fault" : "failed");
2085  		efx->phy_mode |= PHY_MODE_LOW_POWER;
2086  		rc = __ef4_reconfigure_port(efx);
2087  		WARN_ON(rc);
2088  	}
2089  
2090  	if (LOOPBACK_INTERNAL(efx))
2091  		link_changed = falcon_loopback_link_poll(efx);
2092  	else
2093  		link_changed = efx->phy_op->poll(efx);
2094  
2095  	if (link_changed) {
2096  		falcon_stop_nic_stats(efx);
2097  		falcon_deconfigure_mac_wrapper(efx);
2098  
2099  		falcon_reset_macs(efx);
2100  		rc = falcon_reconfigure_xmac(efx);
2101  		BUG_ON(rc);
2102  
2103  		falcon_start_nic_stats(efx);
2104  
2105  		ef4_link_status_changed(efx);
2106  	}
2107  
2108  	falcon_poll_xmac(efx);
2109  }
2110  
2111  /* Zeroes out the SRAM contents.  This routine must be called in
2112   * process context and is allowed to sleep.
2113   */
falcon_reset_sram(struct ef4_nic * efx)2114  static int falcon_reset_sram(struct ef4_nic *efx)
2115  {
2116  	ef4_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
2117  	int count;
2118  
2119  	/* Set the SRAM wake/sleep GPIO appropriately. */
2120  	ef4_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2121  	EF4_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
2122  	EF4_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
2123  	ef4_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2124  
2125  	/* Initiate SRAM reset */
2126  	EF4_POPULATE_OWORD_2(srm_cfg_reg_ker,
2127  			     FRF_AZ_SRM_INIT_EN, 1,
2128  			     FRF_AZ_SRM_NB_SZ, 0);
2129  	ef4_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2130  
2131  	/* Wait for SRAM reset to complete */
2132  	count = 0;
2133  	do {
2134  		netif_dbg(efx, hw, efx->net_dev,
2135  			  "waiting for SRAM reset (attempt %d)...\n", count);
2136  
2137  		/* SRAM reset is slow; expect around 16ms */
2138  		schedule_timeout_uninterruptible(HZ / 50);
2139  
2140  		/* Check for reset complete */
2141  		ef4_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2142  		if (!EF4_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
2143  			netif_dbg(efx, hw, efx->net_dev,
2144  				  "SRAM reset complete\n");
2145  
2146  			return 0;
2147  		}
2148  	} while (++count < 20);	/* wait up to 0.4 sec */
2149  
2150  	netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n");
2151  	return -ETIMEDOUT;
2152  }
2153  
falcon_spi_device_init(struct ef4_nic * efx,struct falcon_spi_device * spi_device,unsigned int device_id,u32 device_type)2154  static void falcon_spi_device_init(struct ef4_nic *efx,
2155  				  struct falcon_spi_device *spi_device,
2156  				  unsigned int device_id, u32 device_type)
2157  {
2158  	if (device_type != 0) {
2159  		spi_device->device_id = device_id;
2160  		spi_device->size =
2161  			1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
2162  		spi_device->addr_len =
2163  			SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
2164  		spi_device->munge_address = (spi_device->size == 1 << 9 &&
2165  					     spi_device->addr_len == 1);
2166  		spi_device->erase_command =
2167  			SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
2168  		spi_device->erase_size =
2169  			1 << SPI_DEV_TYPE_FIELD(device_type,
2170  						SPI_DEV_TYPE_ERASE_SIZE);
2171  		spi_device->block_size =
2172  			1 << SPI_DEV_TYPE_FIELD(device_type,
2173  						SPI_DEV_TYPE_BLOCK_SIZE);
2174  	} else {
2175  		spi_device->size = 0;
2176  	}
2177  }
2178  
2179  /* Extract non-volatile configuration */
falcon_probe_nvconfig(struct ef4_nic * efx)2180  static int falcon_probe_nvconfig(struct ef4_nic *efx)
2181  {
2182  	struct falcon_nic_data *nic_data = efx->nic_data;
2183  	struct falcon_nvconfig *nvconfig;
2184  	int rc;
2185  
2186  	nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
2187  	if (!nvconfig)
2188  		return -ENOMEM;
2189  
2190  	rc = falcon_read_nvram(efx, nvconfig);
2191  	if (rc)
2192  		goto out;
2193  
2194  	efx->phy_type = nvconfig->board_v2.port0_phy_type;
2195  	efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr;
2196  
2197  	if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
2198  		falcon_spi_device_init(
2199  			efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
2200  			le32_to_cpu(nvconfig->board_v3
2201  				    .spi_device_type[FFE_AB_SPI_DEVICE_FLASH]));
2202  		falcon_spi_device_init(
2203  			efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
2204  			le32_to_cpu(nvconfig->board_v3
2205  				    .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM]));
2206  	}
2207  
2208  	/* Read the MAC addresses */
2209  	ether_addr_copy(efx->net_dev->perm_addr, nvconfig->mac_address[0]);
2210  
2211  	netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
2212  		  efx->phy_type, efx->mdio.prtad);
2213  
2214  	rc = falcon_probe_board(efx,
2215  				le16_to_cpu(nvconfig->board_v2.board_revision));
2216  out:
2217  	kfree(nvconfig);
2218  	return rc;
2219  }
2220  
falcon_dimension_resources(struct ef4_nic * efx)2221  static int falcon_dimension_resources(struct ef4_nic *efx)
2222  {
2223  	efx->rx_dc_base = 0x20000;
2224  	efx->tx_dc_base = 0x26000;
2225  	return 0;
2226  }
2227  
2228  /* Probe all SPI devices on the NIC */
falcon_probe_spi_devices(struct ef4_nic * efx)2229  static void falcon_probe_spi_devices(struct ef4_nic *efx)
2230  {
2231  	struct falcon_nic_data *nic_data = efx->nic_data;
2232  	ef4_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
2233  	int boot_dev;
2234  
2235  	ef4_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
2236  	ef4_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2237  	ef4_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2238  
2239  	if (EF4_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
2240  		boot_dev = (EF4_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
2241  			    FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
2242  		netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n",
2243  			  boot_dev == FFE_AB_SPI_DEVICE_FLASH ?
2244  			  "flash" : "EEPROM");
2245  	} else {
2246  		/* Disable VPD and set clock dividers to safe
2247  		 * values for initial programming. */
2248  		boot_dev = -1;
2249  		netif_dbg(efx, probe, efx->net_dev,
2250  			  "Booted from internal ASIC settings;"
2251  			  " setting SPI config\n");
2252  		EF4_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
2253  				     /* 125 MHz / 7 ~= 20 MHz */
2254  				     FRF_AB_EE_SF_CLOCK_DIV, 7,
2255  				     /* 125 MHz / 63 ~= 2 MHz */
2256  				     FRF_AB_EE_EE_CLOCK_DIV, 63);
2257  		ef4_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2258  	}
2259  
2260  	mutex_init(&nic_data->spi_lock);
2261  
2262  	if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
2263  		falcon_spi_device_init(efx, &nic_data->spi_flash,
2264  				       FFE_AB_SPI_DEVICE_FLASH,
2265  				       default_flash_type);
2266  	if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
2267  		falcon_spi_device_init(efx, &nic_data->spi_eeprom,
2268  				       FFE_AB_SPI_DEVICE_EEPROM,
2269  				       large_eeprom_type);
2270  }
2271  
falcon_a1_mem_map_size(struct ef4_nic * efx)2272  static unsigned int falcon_a1_mem_map_size(struct ef4_nic *efx)
2273  {
2274  	return 0x20000;
2275  }
2276  
falcon_b0_mem_map_size(struct ef4_nic * efx)2277  static unsigned int falcon_b0_mem_map_size(struct ef4_nic *efx)
2278  {
2279  	/* Map everything up to and including the RSS indirection table.
2280  	 * The PCI core takes care of mapping the MSI-X tables.
2281  	 */
2282  	return FR_BZ_RX_INDIRECTION_TBL +
2283  		FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS;
2284  }
2285  
falcon_probe_nic(struct ef4_nic * efx)2286  static int falcon_probe_nic(struct ef4_nic *efx)
2287  {
2288  	struct falcon_nic_data *nic_data;
2289  	struct falcon_board *board;
2290  	int rc;
2291  
2292  	efx->primary = efx; /* only one usable function per controller */
2293  
2294  	/* Allocate storage for hardware specific data */
2295  	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2296  	if (!nic_data)
2297  		return -ENOMEM;
2298  	efx->nic_data = nic_data;
2299  	nic_data->efx = efx;
2300  
2301  	rc = -ENODEV;
2302  
2303  	if (ef4_farch_fpga_ver(efx) != 0) {
2304  		netif_err(efx, probe, efx->net_dev,
2305  			  "Falcon FPGA not supported\n");
2306  		goto fail1;
2307  	}
2308  
2309  	if (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1) {
2310  		ef4_oword_t nic_stat;
2311  		struct pci_dev *dev;
2312  		u8 pci_rev = efx->pci_dev->revision;
2313  
2314  		if ((pci_rev == 0xff) || (pci_rev == 0)) {
2315  			netif_err(efx, probe, efx->net_dev,
2316  				  "Falcon rev A0 not supported\n");
2317  			goto fail1;
2318  		}
2319  		ef4_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2320  		if (EF4_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
2321  			netif_err(efx, probe, efx->net_dev,
2322  				  "Falcon rev A1 1G not supported\n");
2323  			goto fail1;
2324  		}
2325  		if (EF4_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
2326  			netif_err(efx, probe, efx->net_dev,
2327  				  "Falcon rev A1 PCI-X not supported\n");
2328  			goto fail1;
2329  		}
2330  
2331  		dev = pci_dev_get(efx->pci_dev);
2332  		while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE,
2333  					     PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1,
2334  					     dev))) {
2335  			if (dev->bus == efx->pci_dev->bus &&
2336  			    dev->devfn == efx->pci_dev->devfn + 1) {
2337  				nic_data->pci_dev2 = dev;
2338  				break;
2339  			}
2340  		}
2341  		if (!nic_data->pci_dev2) {
2342  			netif_err(efx, probe, efx->net_dev,
2343  				  "failed to find secondary function\n");
2344  			rc = -ENODEV;
2345  			goto fail2;
2346  		}
2347  	}
2348  
2349  	/* Now we can reset the NIC */
2350  	rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
2351  	if (rc) {
2352  		netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
2353  		goto fail3;
2354  	}
2355  
2356  	/* Allocate memory for INT_KER */
2357  	rc = ef4_nic_alloc_buffer(efx, &efx->irq_status, sizeof(ef4_oword_t),
2358  				  GFP_KERNEL);
2359  	if (rc)
2360  		goto fail4;
2361  	BUG_ON(efx->irq_status.dma_addr & 0x0f);
2362  
2363  	netif_dbg(efx, probe, efx->net_dev,
2364  		  "INT_KER at %llx (virt %p phys %llx)\n",
2365  		  (u64)efx->irq_status.dma_addr,
2366  		  efx->irq_status.addr,
2367  		  (u64)virt_to_phys(efx->irq_status.addr));
2368  
2369  	falcon_probe_spi_devices(efx);
2370  
2371  	/* Read in the non-volatile configuration */
2372  	rc = falcon_probe_nvconfig(efx);
2373  	if (rc) {
2374  		if (rc == -EINVAL)
2375  			netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
2376  		goto fail5;
2377  	}
2378  
2379  	efx->max_channels = (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1 ? 4 :
2380  			     EF4_MAX_CHANNELS);
2381  	efx->max_tx_channels = efx->max_channels;
2382  	efx->timer_quantum_ns = 4968; /* 621 cycles */
2383  	efx->timer_max_ns = efx->type->timer_period_max *
2384  			    efx->timer_quantum_ns;
2385  
2386  	/* Initialise I2C adapter */
2387  	board = falcon_board(efx);
2388  	board->i2c_adap.owner = THIS_MODULE;
2389  	board->i2c_data = falcon_i2c_bit_operations;
2390  	board->i2c_data.data = efx;
2391  	board->i2c_adap.algo_data = &board->i2c_data;
2392  	board->i2c_adap.dev.parent = &efx->pci_dev->dev;
2393  	strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
2394  		sizeof(board->i2c_adap.name));
2395  	rc = i2c_bit_add_bus(&board->i2c_adap);
2396  	if (rc)
2397  		goto fail5;
2398  
2399  	rc = falcon_board(efx)->type->init(efx);
2400  	if (rc) {
2401  		netif_err(efx, probe, efx->net_dev,
2402  			  "failed to initialise board\n");
2403  		goto fail6;
2404  	}
2405  
2406  	nic_data->stats_disable_count = 1;
2407  	timer_setup(&nic_data->stats_timer, falcon_stats_timer_func, 0);
2408  
2409  	return 0;
2410  
2411   fail6:
2412  	i2c_del_adapter(&board->i2c_adap);
2413  	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
2414   fail5:
2415  	ef4_nic_free_buffer(efx, &efx->irq_status);
2416   fail4:
2417   fail3:
2418  	if (nic_data->pci_dev2) {
2419  		pci_dev_put(nic_data->pci_dev2);
2420  		nic_data->pci_dev2 = NULL;
2421  	}
2422   fail2:
2423   fail1:
2424  	kfree(efx->nic_data);
2425  	return rc;
2426  }
2427  
falcon_init_rx_cfg(struct ef4_nic * efx)2428  static void falcon_init_rx_cfg(struct ef4_nic *efx)
2429  {
2430  	/* RX control FIFO thresholds (32 entries) */
2431  	const unsigned ctrl_xon_thr = 20;
2432  	const unsigned ctrl_xoff_thr = 25;
2433  	ef4_oword_t reg;
2434  
2435  	ef4_reado(efx, &reg, FR_AZ_RX_CFG);
2436  	if (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1) {
2437  		/* Data FIFO size is 5.5K.  The RX DMA engine only
2438  		 * supports scattering for user-mode queues, but will
2439  		 * split DMA writes at intervals of RX_USR_BUF_SIZE
2440  		 * (32-byte units) even for kernel-mode queues.  We
2441  		 * set it to be so large that that never happens.
2442  		 */
2443  		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
2444  		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
2445  				    (3 * 4096) >> 5);
2446  		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
2447  		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
2448  		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
2449  		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
2450  	} else {
2451  		/* Data FIFO size is 80K; register fields moved */
2452  		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
2453  		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
2454  				    EF4_RX_USR_BUF_SIZE >> 5);
2455  		/* Send XON and XOFF at ~3 * max MTU away from empty/full */
2456  		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
2457  		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
2458  		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
2459  		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
2460  		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
2461  
2462  		/* Enable hash insertion. This is broken for the
2463  		 * 'Falcon' hash so also select Toeplitz TCP/IPv4 and
2464  		 * IPv4 hashes. */
2465  		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1);
2466  		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1);
2467  		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1);
2468  	}
2469  	/* Always enable XOFF signal from RX FIFO.  We enable
2470  	 * or disable transmission of pause frames at the MAC. */
2471  	EF4_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
2472  	ef4_writeo(efx, &reg, FR_AZ_RX_CFG);
2473  }
2474  
2475  /* This call performs hardware-specific global initialisation, such as
2476   * defining the descriptor cache sizes and number of RSS channels.
2477   * It does not set up any buffers, descriptor rings or event queues.
2478   */
falcon_init_nic(struct ef4_nic * efx)2479  static int falcon_init_nic(struct ef4_nic *efx)
2480  {
2481  	ef4_oword_t temp;
2482  	int rc;
2483  
2484  	/* Use on-chip SRAM */
2485  	ef4_reado(efx, &temp, FR_AB_NIC_STAT);
2486  	EF4_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
2487  	ef4_writeo(efx, &temp, FR_AB_NIC_STAT);
2488  
2489  	rc = falcon_reset_sram(efx);
2490  	if (rc)
2491  		return rc;
2492  
2493  	/* Clear the parity enables on the TX data fifos as
2494  	 * they produce false parity errors because of timing issues
2495  	 */
2496  	if (EF4_WORKAROUND_5129(efx)) {
2497  		ef4_reado(efx, &temp, FR_AZ_CSR_SPARE);
2498  		EF4_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
2499  		ef4_writeo(efx, &temp, FR_AZ_CSR_SPARE);
2500  	}
2501  
2502  	if (EF4_WORKAROUND_7244(efx)) {
2503  		ef4_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
2504  		EF4_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
2505  		EF4_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
2506  		EF4_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
2507  		EF4_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
2508  		ef4_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
2509  	}
2510  
2511  	/* XXX This is documented only for Falcon A0/A1 */
2512  	/* Setup RX.  Wait for descriptor is broken and must
2513  	 * be disabled.  RXDP recovery shouldn't be needed, but is.
2514  	 */
2515  	ef4_reado(efx, &temp, FR_AA_RX_SELF_RST);
2516  	EF4_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
2517  	EF4_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
2518  	if (EF4_WORKAROUND_5583(efx))
2519  		EF4_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
2520  	ef4_writeo(efx, &temp, FR_AA_RX_SELF_RST);
2521  
2522  	/* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
2523  	 * descriptors (which is bad).
2524  	 */
2525  	ef4_reado(efx, &temp, FR_AZ_TX_CFG);
2526  	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
2527  	ef4_writeo(efx, &temp, FR_AZ_TX_CFG);
2528  
2529  	falcon_init_rx_cfg(efx);
2530  
2531  	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
2532  		falcon_b0_rx_push_rss_config(efx, false, efx->rx_indir_table);
2533  
2534  		/* Set destination of both TX and RX Flush events */
2535  		EF4_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
2536  		ef4_writeo(efx, &temp, FR_BZ_DP_CTRL);
2537  	}
2538  
2539  	ef4_farch_init_common(efx);
2540  
2541  	return 0;
2542  }
2543  
falcon_remove_nic(struct ef4_nic * efx)2544  static void falcon_remove_nic(struct ef4_nic *efx)
2545  {
2546  	struct falcon_nic_data *nic_data = efx->nic_data;
2547  	struct falcon_board *board = falcon_board(efx);
2548  
2549  	board->type->fini(efx);
2550  
2551  	/* Remove I2C adapter and clear it in preparation for a retry */
2552  	i2c_del_adapter(&board->i2c_adap);
2553  	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
2554  
2555  	ef4_nic_free_buffer(efx, &efx->irq_status);
2556  
2557  	__falcon_reset_hw(efx, RESET_TYPE_ALL);
2558  
2559  	/* Release the second function after the reset */
2560  	if (nic_data->pci_dev2) {
2561  		pci_dev_put(nic_data->pci_dev2);
2562  		nic_data->pci_dev2 = NULL;
2563  	}
2564  
2565  	/* Tear down the private nic state */
2566  	kfree(efx->nic_data);
2567  	efx->nic_data = NULL;
2568  }
2569  
falcon_describe_nic_stats(struct ef4_nic * efx,u8 * names)2570  static size_t falcon_describe_nic_stats(struct ef4_nic *efx, u8 *names)
2571  {
2572  	return ef4_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT,
2573  				      falcon_stat_mask, names);
2574  }
2575  
falcon_update_nic_stats(struct ef4_nic * efx,u64 * full_stats,struct rtnl_link_stats64 * core_stats)2576  static size_t falcon_update_nic_stats(struct ef4_nic *efx, u64 *full_stats,
2577  				      struct rtnl_link_stats64 *core_stats)
2578  {
2579  	struct falcon_nic_data *nic_data = efx->nic_data;
2580  	u64 *stats = nic_data->stats;
2581  	ef4_oword_t cnt;
2582  
2583  	if (!nic_data->stats_disable_count) {
2584  		ef4_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
2585  		stats[FALCON_STAT_rx_nodesc_drop_cnt] +=
2586  			EF4_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
2587  
2588  		if (nic_data->stats_pending &&
2589  		    FALCON_XMAC_STATS_DMA_FLAG(efx)) {
2590  			nic_data->stats_pending = false;
2591  			rmb(); /* read the done flag before the stats */
2592  			ef4_nic_update_stats(
2593  				falcon_stat_desc, FALCON_STAT_COUNT,
2594  				falcon_stat_mask,
2595  				stats, efx->stats_buffer.addr, true);
2596  		}
2597  
2598  		/* Update derived statistic */
2599  		ef4_update_diff_stat(&stats[FALCON_STAT_rx_bad_bytes],
2600  				     stats[FALCON_STAT_rx_bytes] -
2601  				     stats[FALCON_STAT_rx_good_bytes] -
2602  				     stats[FALCON_STAT_rx_control] * 64);
2603  		ef4_update_sw_stats(efx, stats);
2604  	}
2605  
2606  	if (full_stats)
2607  		memcpy(full_stats, stats, sizeof(u64) * FALCON_STAT_COUNT);
2608  
2609  	if (core_stats) {
2610  		core_stats->rx_packets = stats[FALCON_STAT_rx_packets];
2611  		core_stats->tx_packets = stats[FALCON_STAT_tx_packets];
2612  		core_stats->rx_bytes = stats[FALCON_STAT_rx_bytes];
2613  		core_stats->tx_bytes = stats[FALCON_STAT_tx_bytes];
2614  		core_stats->rx_dropped = stats[FALCON_STAT_rx_nodesc_drop_cnt] +
2615  					 stats[GENERIC_STAT_rx_nodesc_trunc] +
2616  					 stats[GENERIC_STAT_rx_noskb_drops];
2617  		core_stats->multicast = stats[FALCON_STAT_rx_multicast];
2618  		core_stats->rx_length_errors =
2619  			stats[FALCON_STAT_rx_gtjumbo] +
2620  			stats[FALCON_STAT_rx_length_error];
2621  		core_stats->rx_crc_errors = stats[FALCON_STAT_rx_bad];
2622  		core_stats->rx_frame_errors = stats[FALCON_STAT_rx_align_error];
2623  		core_stats->rx_fifo_errors = stats[FALCON_STAT_rx_overflow];
2624  
2625  		core_stats->rx_errors = (core_stats->rx_length_errors +
2626  					 core_stats->rx_crc_errors +
2627  					 core_stats->rx_frame_errors +
2628  					 stats[FALCON_STAT_rx_symbol_error]);
2629  	}
2630  
2631  	return FALCON_STAT_COUNT;
2632  }
2633  
falcon_start_nic_stats(struct ef4_nic * efx)2634  void falcon_start_nic_stats(struct ef4_nic *efx)
2635  {
2636  	struct falcon_nic_data *nic_data = efx->nic_data;
2637  
2638  	spin_lock_bh(&efx->stats_lock);
2639  	if (--nic_data->stats_disable_count == 0)
2640  		falcon_stats_request(efx);
2641  	spin_unlock_bh(&efx->stats_lock);
2642  }
2643  
2644  /* We don't acutally pull stats on falcon. Wait 10ms so that
2645   * they arrive when we call this just after start_stats
2646   */
falcon_pull_nic_stats(struct ef4_nic * efx)2647  static void falcon_pull_nic_stats(struct ef4_nic *efx)
2648  {
2649  	msleep(10);
2650  }
2651  
falcon_stop_nic_stats(struct ef4_nic * efx)2652  void falcon_stop_nic_stats(struct ef4_nic *efx)
2653  {
2654  	struct falcon_nic_data *nic_data = efx->nic_data;
2655  	int i;
2656  
2657  	might_sleep();
2658  
2659  	spin_lock_bh(&efx->stats_lock);
2660  	++nic_data->stats_disable_count;
2661  	spin_unlock_bh(&efx->stats_lock);
2662  
2663  	del_timer_sync(&nic_data->stats_timer);
2664  
2665  	/* Wait enough time for the most recent transfer to
2666  	 * complete. */
2667  	for (i = 0; i < 4 && nic_data->stats_pending; i++) {
2668  		if (FALCON_XMAC_STATS_DMA_FLAG(efx))
2669  			break;
2670  		msleep(1);
2671  	}
2672  
2673  	spin_lock_bh(&efx->stats_lock);
2674  	falcon_stats_complete(efx);
2675  	spin_unlock_bh(&efx->stats_lock);
2676  }
2677  
falcon_set_id_led(struct ef4_nic * efx,enum ef4_led_mode mode)2678  static void falcon_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode)
2679  {
2680  	falcon_board(efx)->type->set_id_led(efx, mode);
2681  }
2682  
2683  /**************************************************************************
2684   *
2685   * Wake on LAN
2686   *
2687   **************************************************************************
2688   */
2689  
falcon_get_wol(struct ef4_nic * efx,struct ethtool_wolinfo * wol)2690  static void falcon_get_wol(struct ef4_nic *efx, struct ethtool_wolinfo *wol)
2691  {
2692  	wol->supported = 0;
2693  	wol->wolopts = 0;
2694  	memset(&wol->sopass, 0, sizeof(wol->sopass));
2695  }
2696  
falcon_set_wol(struct ef4_nic * efx,u32 type)2697  static int falcon_set_wol(struct ef4_nic *efx, u32 type)
2698  {
2699  	if (type != 0)
2700  		return -EINVAL;
2701  	return 0;
2702  }
2703  
2704  /**************************************************************************
2705   *
2706   * Revision-dependent attributes used by efx.c and nic.c
2707   *
2708   **************************************************************************
2709   */
2710  
2711  const struct ef4_nic_type falcon_a1_nic_type = {
2712  	.mem_bar = EF4_MEM_BAR,
2713  	.mem_map_size = falcon_a1_mem_map_size,
2714  	.probe = falcon_probe_nic,
2715  	.remove = falcon_remove_nic,
2716  	.init = falcon_init_nic,
2717  	.dimension_resources = falcon_dimension_resources,
2718  	.fini = falcon_irq_ack_a1,
2719  	.monitor = falcon_monitor,
2720  	.map_reset_reason = falcon_map_reset_reason,
2721  	.map_reset_flags = falcon_map_reset_flags,
2722  	.reset = falcon_reset_hw,
2723  	.probe_port = falcon_probe_port,
2724  	.remove_port = falcon_remove_port,
2725  	.handle_global_event = falcon_handle_global_event,
2726  	.fini_dmaq = ef4_farch_fini_dmaq,
2727  	.prepare_flush = falcon_prepare_flush,
2728  	.finish_flush = ef4_port_dummy_op_void,
2729  	.prepare_flr = ef4_port_dummy_op_void,
2730  	.finish_flr = ef4_farch_finish_flr,
2731  	.describe_stats = falcon_describe_nic_stats,
2732  	.update_stats = falcon_update_nic_stats,
2733  	.start_stats = falcon_start_nic_stats,
2734  	.pull_stats = falcon_pull_nic_stats,
2735  	.stop_stats = falcon_stop_nic_stats,
2736  	.set_id_led = falcon_set_id_led,
2737  	.push_irq_moderation = falcon_push_irq_moderation,
2738  	.reconfigure_port = falcon_reconfigure_port,
2739  	.prepare_enable_fc_tx = falcon_a1_prepare_enable_fc_tx,
2740  	.reconfigure_mac = falcon_reconfigure_xmac,
2741  	.check_mac_fault = falcon_xmac_check_fault,
2742  	.get_wol = falcon_get_wol,
2743  	.set_wol = falcon_set_wol,
2744  	.resume_wol = ef4_port_dummy_op_void,
2745  	.test_nvram = falcon_test_nvram,
2746  	.irq_enable_master = ef4_farch_irq_enable_master,
2747  	.irq_test_generate = ef4_farch_irq_test_generate,
2748  	.irq_disable_non_ev = ef4_farch_irq_disable_master,
2749  	.irq_handle_msi = ef4_farch_msi_interrupt,
2750  	.irq_handle_legacy = falcon_legacy_interrupt_a1,
2751  	.tx_probe = ef4_farch_tx_probe,
2752  	.tx_init = ef4_farch_tx_init,
2753  	.tx_remove = ef4_farch_tx_remove,
2754  	.tx_write = ef4_farch_tx_write,
2755  	.tx_limit_len = ef4_farch_tx_limit_len,
2756  	.rx_push_rss_config = dummy_rx_push_rss_config,
2757  	.rx_probe = ef4_farch_rx_probe,
2758  	.rx_init = ef4_farch_rx_init,
2759  	.rx_remove = ef4_farch_rx_remove,
2760  	.rx_write = ef4_farch_rx_write,
2761  	.rx_defer_refill = ef4_farch_rx_defer_refill,
2762  	.ev_probe = ef4_farch_ev_probe,
2763  	.ev_init = ef4_farch_ev_init,
2764  	.ev_fini = ef4_farch_ev_fini,
2765  	.ev_remove = ef4_farch_ev_remove,
2766  	.ev_process = ef4_farch_ev_process,
2767  	.ev_read_ack = ef4_farch_ev_read_ack,
2768  	.ev_test_generate = ef4_farch_ev_test_generate,
2769  
2770  	/* We don't expose the filter table on Falcon A1 as it is not
2771  	 * mapped into function 0, but these implementations still
2772  	 * work with a degenerate case of all tables set to size 0.
2773  	 */
2774  	.filter_table_probe = ef4_farch_filter_table_probe,
2775  	.filter_table_restore = ef4_farch_filter_table_restore,
2776  	.filter_table_remove = ef4_farch_filter_table_remove,
2777  	.filter_insert = ef4_farch_filter_insert,
2778  	.filter_remove_safe = ef4_farch_filter_remove_safe,
2779  	.filter_get_safe = ef4_farch_filter_get_safe,
2780  	.filter_clear_rx = ef4_farch_filter_clear_rx,
2781  	.filter_count_rx_used = ef4_farch_filter_count_rx_used,
2782  	.filter_get_rx_id_limit = ef4_farch_filter_get_rx_id_limit,
2783  	.filter_get_rx_ids = ef4_farch_filter_get_rx_ids,
2784  
2785  #ifdef CONFIG_SFC_FALCON_MTD
2786  	.mtd_probe = falcon_mtd_probe,
2787  	.mtd_rename = falcon_mtd_rename,
2788  	.mtd_read = falcon_mtd_read,
2789  	.mtd_erase = falcon_mtd_erase,
2790  	.mtd_write = falcon_mtd_write,
2791  	.mtd_sync = falcon_mtd_sync,
2792  #endif
2793  
2794  	.revision = EF4_REV_FALCON_A1,
2795  	.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
2796  	.rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
2797  	.buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
2798  	.evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
2799  	.evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
2800  	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
2801  	.rx_buffer_padding = 0x24,
2802  	.can_rx_scatter = false,
2803  	.max_interrupt_mode = EF4_INT_MODE_MSI,
2804  	.timer_period_max =  1 << FRF_AB_TC_TIMER_VAL_WIDTH,
2805  	.offload_features = NETIF_F_IP_CSUM,
2806  };
2807  
2808  const struct ef4_nic_type falcon_b0_nic_type = {
2809  	.mem_bar = EF4_MEM_BAR,
2810  	.mem_map_size = falcon_b0_mem_map_size,
2811  	.probe = falcon_probe_nic,
2812  	.remove = falcon_remove_nic,
2813  	.init = falcon_init_nic,
2814  	.dimension_resources = falcon_dimension_resources,
2815  	.fini = ef4_port_dummy_op_void,
2816  	.monitor = falcon_monitor,
2817  	.map_reset_reason = falcon_map_reset_reason,
2818  	.map_reset_flags = falcon_map_reset_flags,
2819  	.reset = falcon_reset_hw,
2820  	.probe_port = falcon_probe_port,
2821  	.remove_port = falcon_remove_port,
2822  	.handle_global_event = falcon_handle_global_event,
2823  	.fini_dmaq = ef4_farch_fini_dmaq,
2824  	.prepare_flush = falcon_prepare_flush,
2825  	.finish_flush = ef4_port_dummy_op_void,
2826  	.prepare_flr = ef4_port_dummy_op_void,
2827  	.finish_flr = ef4_farch_finish_flr,
2828  	.describe_stats = falcon_describe_nic_stats,
2829  	.update_stats = falcon_update_nic_stats,
2830  	.start_stats = falcon_start_nic_stats,
2831  	.pull_stats = falcon_pull_nic_stats,
2832  	.stop_stats = falcon_stop_nic_stats,
2833  	.set_id_led = falcon_set_id_led,
2834  	.push_irq_moderation = falcon_push_irq_moderation,
2835  	.reconfigure_port = falcon_reconfigure_port,
2836  	.prepare_enable_fc_tx = falcon_b0_prepare_enable_fc_tx,
2837  	.reconfigure_mac = falcon_reconfigure_xmac,
2838  	.check_mac_fault = falcon_xmac_check_fault,
2839  	.get_wol = falcon_get_wol,
2840  	.set_wol = falcon_set_wol,
2841  	.resume_wol = ef4_port_dummy_op_void,
2842  	.test_chip = falcon_b0_test_chip,
2843  	.test_nvram = falcon_test_nvram,
2844  	.irq_enable_master = ef4_farch_irq_enable_master,
2845  	.irq_test_generate = ef4_farch_irq_test_generate,
2846  	.irq_disable_non_ev = ef4_farch_irq_disable_master,
2847  	.irq_handle_msi = ef4_farch_msi_interrupt,
2848  	.irq_handle_legacy = ef4_farch_legacy_interrupt,
2849  	.tx_probe = ef4_farch_tx_probe,
2850  	.tx_init = ef4_farch_tx_init,
2851  	.tx_remove = ef4_farch_tx_remove,
2852  	.tx_write = ef4_farch_tx_write,
2853  	.tx_limit_len = ef4_farch_tx_limit_len,
2854  	.rx_push_rss_config = falcon_b0_rx_push_rss_config,
2855  	.rx_probe = ef4_farch_rx_probe,
2856  	.rx_init = ef4_farch_rx_init,
2857  	.rx_remove = ef4_farch_rx_remove,
2858  	.rx_write = ef4_farch_rx_write,
2859  	.rx_defer_refill = ef4_farch_rx_defer_refill,
2860  	.ev_probe = ef4_farch_ev_probe,
2861  	.ev_init = ef4_farch_ev_init,
2862  	.ev_fini = ef4_farch_ev_fini,
2863  	.ev_remove = ef4_farch_ev_remove,
2864  	.ev_process = ef4_farch_ev_process,
2865  	.ev_read_ack = ef4_farch_ev_read_ack,
2866  	.ev_test_generate = ef4_farch_ev_test_generate,
2867  	.filter_table_probe = ef4_farch_filter_table_probe,
2868  	.filter_table_restore = ef4_farch_filter_table_restore,
2869  	.filter_table_remove = ef4_farch_filter_table_remove,
2870  	.filter_update_rx_scatter = ef4_farch_filter_update_rx_scatter,
2871  	.filter_insert = ef4_farch_filter_insert,
2872  	.filter_remove_safe = ef4_farch_filter_remove_safe,
2873  	.filter_get_safe = ef4_farch_filter_get_safe,
2874  	.filter_clear_rx = ef4_farch_filter_clear_rx,
2875  	.filter_count_rx_used = ef4_farch_filter_count_rx_used,
2876  	.filter_get_rx_id_limit = ef4_farch_filter_get_rx_id_limit,
2877  	.filter_get_rx_ids = ef4_farch_filter_get_rx_ids,
2878  #ifdef CONFIG_RFS_ACCEL
2879  	.filter_rfs_insert = ef4_farch_filter_rfs_insert,
2880  	.filter_rfs_expire_one = ef4_farch_filter_rfs_expire_one,
2881  #endif
2882  #ifdef CONFIG_SFC_FALCON_MTD
2883  	.mtd_probe = falcon_mtd_probe,
2884  	.mtd_rename = falcon_mtd_rename,
2885  	.mtd_read = falcon_mtd_read,
2886  	.mtd_erase = falcon_mtd_erase,
2887  	.mtd_write = falcon_mtd_write,
2888  	.mtd_sync = falcon_mtd_sync,
2889  #endif
2890  
2891  	.revision = EF4_REV_FALCON_B0,
2892  	.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
2893  	.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
2894  	.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
2895  	.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
2896  	.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
2897  	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
2898  	.rx_prefix_size = FS_BZ_RX_PREFIX_SIZE,
2899  	.rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
2900  	.rx_buffer_padding = 0,
2901  	.can_rx_scatter = true,
2902  	.max_interrupt_mode = EF4_INT_MODE_MSIX,
2903  	.timer_period_max =  1 << FRF_AB_TC_TIMER_VAL_WIDTH,
2904  	.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
2905  	.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
2906  };
2907