1 /**
2  * drivers/net/ethernet/micrel/ks8851_mll.c
3  * Copyright (c) 2009 Micrel Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17  */
18 
19 /* Supports:
20  * KS8851 16bit MLL chip from Micrel Inc.
21  */
22 
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 
25 #include <linux/interrupt.h>
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/cache.h>
32 #include <linux/crc32.h>
33 #include <linux/crc32poly.h>
34 #include <linux/mii.h>
35 #include <linux/platform_device.h>
36 #include <linux/delay.h>
37 #include <linux/slab.h>
38 #include <linux/ks8851_mll.h>
39 #include <linux/of.h>
40 #include <linux/of_device.h>
41 #include <linux/of_net.h>
42 
43 #define	DRV_NAME	"ks8851_mll"
44 
45 static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
46 #define MAX_RECV_FRAMES			255
47 #define MAX_BUF_SIZE			2048
48 #define TX_BUF_SIZE			2000
49 #define RX_BUF_SIZE			2000
50 
51 #define KS_CCR				0x08
52 #define CCR_EEPROM			(1 << 9)
53 #define CCR_SPI				(1 << 8)
54 #define CCR_8BIT			(1 << 7)
55 #define CCR_16BIT			(1 << 6)
56 #define CCR_32BIT			(1 << 5)
57 #define CCR_SHARED			(1 << 4)
58 #define CCR_32PIN			(1 << 0)
59 
60 /* MAC address registers */
61 #define KS_MARL				0x10
62 #define KS_MARM				0x12
63 #define KS_MARH				0x14
64 
65 #define KS_OBCR				0x20
66 #define OBCR_ODS_16MA			(1 << 6)
67 
68 #define KS_EEPCR			0x22
69 #define EEPCR_EESA			(1 << 4)
70 #define EEPCR_EESB			(1 << 3)
71 #define EEPCR_EEDO			(1 << 2)
72 #define EEPCR_EESCK			(1 << 1)
73 #define EEPCR_EECS			(1 << 0)
74 
75 #define KS_MBIR				0x24
76 #define MBIR_TXMBF			(1 << 12)
77 #define MBIR_TXMBFA			(1 << 11)
78 #define MBIR_RXMBF			(1 << 4)
79 #define MBIR_RXMBFA			(1 << 3)
80 
81 #define KS_GRR				0x26
82 #define GRR_QMU				(1 << 1)
83 #define GRR_GSR				(1 << 0)
84 
85 #define KS_WFCR				0x2A
86 #define WFCR_MPRXE			(1 << 7)
87 #define WFCR_WF3E			(1 << 3)
88 #define WFCR_WF2E			(1 << 2)
89 #define WFCR_WF1E			(1 << 1)
90 #define WFCR_WF0E			(1 << 0)
91 
92 #define KS_WF0CRC0			0x30
93 #define KS_WF0CRC1			0x32
94 #define KS_WF0BM0			0x34
95 #define KS_WF0BM1			0x36
96 #define KS_WF0BM2			0x38
97 #define KS_WF0BM3			0x3A
98 
99 #define KS_WF1CRC0			0x40
100 #define KS_WF1CRC1			0x42
101 #define KS_WF1BM0			0x44
102 #define KS_WF1BM1			0x46
103 #define KS_WF1BM2			0x48
104 #define KS_WF1BM3			0x4A
105 
106 #define KS_WF2CRC0			0x50
107 #define KS_WF2CRC1			0x52
108 #define KS_WF2BM0			0x54
109 #define KS_WF2BM1			0x56
110 #define KS_WF2BM2			0x58
111 #define KS_WF2BM3			0x5A
112 
113 #define KS_WF3CRC0			0x60
114 #define KS_WF3CRC1			0x62
115 #define KS_WF3BM0			0x64
116 #define KS_WF3BM1			0x66
117 #define KS_WF3BM2			0x68
118 #define KS_WF3BM3			0x6A
119 
120 #define KS_TXCR				0x70
121 #define TXCR_TCGICMP			(1 << 8)
122 #define TXCR_TCGUDP			(1 << 7)
123 #define TXCR_TCGTCP			(1 << 6)
124 #define TXCR_TCGIP			(1 << 5)
125 #define TXCR_FTXQ			(1 << 4)
126 #define TXCR_TXFCE			(1 << 3)
127 #define TXCR_TXPE			(1 << 2)
128 #define TXCR_TXCRC			(1 << 1)
129 #define TXCR_TXE			(1 << 0)
130 
131 #define KS_TXSR				0x72
132 #define TXSR_TXLC			(1 << 13)
133 #define TXSR_TXMC			(1 << 12)
134 #define TXSR_TXFID_MASK			(0x3f << 0)
135 #define TXSR_TXFID_SHIFT		(0)
136 #define TXSR_TXFID_GET(_v)		(((_v) >> 0) & 0x3f)
137 
138 
139 #define KS_RXCR1			0x74
140 #define RXCR1_FRXQ			(1 << 15)
141 #define RXCR1_RXUDPFCC			(1 << 14)
142 #define RXCR1_RXTCPFCC			(1 << 13)
143 #define RXCR1_RXIPFCC			(1 << 12)
144 #define RXCR1_RXPAFMA			(1 << 11)
145 #define RXCR1_RXFCE			(1 << 10)
146 #define RXCR1_RXEFE			(1 << 9)
147 #define RXCR1_RXMAFMA			(1 << 8)
148 #define RXCR1_RXBE			(1 << 7)
149 #define RXCR1_RXME			(1 << 6)
150 #define RXCR1_RXUE			(1 << 5)
151 #define RXCR1_RXAE			(1 << 4)
152 #define RXCR1_RXINVF			(1 << 1)
153 #define RXCR1_RXE			(1 << 0)
154 #define RXCR1_FILTER_MASK    		(RXCR1_RXINVF | RXCR1_RXAE | \
155 					 RXCR1_RXMAFMA | RXCR1_RXPAFMA)
156 
157 #define KS_RXCR2			0x76
158 #define RXCR2_SRDBL_MASK		(0x7 << 5)
159 #define RXCR2_SRDBL_SHIFT		(5)
160 #define RXCR2_SRDBL_4B			(0x0 << 5)
161 #define RXCR2_SRDBL_8B			(0x1 << 5)
162 #define RXCR2_SRDBL_16B			(0x2 << 5)
163 #define RXCR2_SRDBL_32B			(0x3 << 5)
164 /* #define RXCR2_SRDBL_FRAME		(0x4 << 5) */
165 #define RXCR2_IUFFP			(1 << 4)
166 #define RXCR2_RXIUFCEZ			(1 << 3)
167 #define RXCR2_UDPLFE			(1 << 2)
168 #define RXCR2_RXICMPFCC			(1 << 1)
169 #define RXCR2_RXSAF			(1 << 0)
170 
171 #define KS_TXMIR			0x78
172 
173 #define KS_RXFHSR			0x7C
174 #define RXFSHR_RXFV			(1 << 15)
175 #define RXFSHR_RXICMPFCS		(1 << 13)
176 #define RXFSHR_RXIPFCS			(1 << 12)
177 #define RXFSHR_RXTCPFCS			(1 << 11)
178 #define RXFSHR_RXUDPFCS			(1 << 10)
179 #define RXFSHR_RXBF			(1 << 7)
180 #define RXFSHR_RXMF			(1 << 6)
181 #define RXFSHR_RXUF			(1 << 5)
182 #define RXFSHR_RXMR			(1 << 4)
183 #define RXFSHR_RXFT			(1 << 3)
184 #define RXFSHR_RXFTL			(1 << 2)
185 #define RXFSHR_RXRF			(1 << 1)
186 #define RXFSHR_RXCE			(1 << 0)
187 #define	RXFSHR_ERR			(RXFSHR_RXCE | RXFSHR_RXRF |\
188 					RXFSHR_RXFTL | RXFSHR_RXMR |\
189 					RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
190 					RXFSHR_RXTCPFCS)
191 #define KS_RXFHBCR			0x7E
192 #define RXFHBCR_CNT_MASK		0x0FFF
193 
194 #define KS_TXQCR			0x80
195 #define TXQCR_AETFE			(1 << 2)
196 #define TXQCR_TXQMAM			(1 << 1)
197 #define TXQCR_METFE			(1 << 0)
198 
199 #define KS_RXQCR			0x82
200 #define RXQCR_RXDTTS			(1 << 12)
201 #define RXQCR_RXDBCTS			(1 << 11)
202 #define RXQCR_RXFCTS			(1 << 10)
203 #define RXQCR_RXIPHTOE			(1 << 9)
204 #define RXQCR_RXDTTE			(1 << 7)
205 #define RXQCR_RXDBCTE			(1 << 6)
206 #define RXQCR_RXFCTE			(1 << 5)
207 #define RXQCR_ADRFE			(1 << 4)
208 #define RXQCR_SDA			(1 << 3)
209 #define RXQCR_RRXEF			(1 << 0)
210 #define RXQCR_CMD_CNTL                	(RXQCR_RXFCTE|RXQCR_ADRFE)
211 
212 #define KS_TXFDPR			0x84
213 #define TXFDPR_TXFPAI			(1 << 14)
214 #define TXFDPR_TXFP_MASK		(0x7ff << 0)
215 #define TXFDPR_TXFP_SHIFT		(0)
216 
217 #define KS_RXFDPR			0x86
218 #define RXFDPR_RXFPAI			(1 << 14)
219 
220 #define KS_RXDTTR			0x8C
221 #define KS_RXDBCTR			0x8E
222 
223 #define KS_IER				0x90
224 #define KS_ISR				0x92
225 #define IRQ_LCI				(1 << 15)
226 #define IRQ_TXI				(1 << 14)
227 #define IRQ_RXI				(1 << 13)
228 #define IRQ_RXOI			(1 << 11)
229 #define IRQ_TXPSI			(1 << 9)
230 #define IRQ_RXPSI			(1 << 8)
231 #define IRQ_TXSAI			(1 << 6)
232 #define IRQ_RXWFDI			(1 << 5)
233 #define IRQ_RXMPDI			(1 << 4)
234 #define IRQ_LDI				(1 << 3)
235 #define IRQ_EDI				(1 << 2)
236 #define IRQ_SPIBEI			(1 << 1)
237 #define IRQ_DEDI			(1 << 0)
238 
239 #define KS_RXFCTR			0x9C
240 #define RXFCTR_THRESHOLD_MASK     	0x00FF
241 
242 #define KS_RXFC				0x9D
243 #define RXFCTR_RXFC_MASK		(0xff << 8)
244 #define RXFCTR_RXFC_SHIFT		(8)
245 #define RXFCTR_RXFC_GET(_v)		(((_v) >> 8) & 0xff)
246 #define RXFCTR_RXFCT_MASK		(0xff << 0)
247 #define RXFCTR_RXFCT_SHIFT		(0)
248 
249 #define KS_TXNTFSR			0x9E
250 
251 #define KS_MAHTR0			0xA0
252 #define KS_MAHTR1			0xA2
253 #define KS_MAHTR2			0xA4
254 #define KS_MAHTR3			0xA6
255 
256 #define KS_FCLWR			0xB0
257 #define KS_FCHWR			0xB2
258 #define KS_FCOWR			0xB4
259 
260 #define KS_CIDER			0xC0
261 #define CIDER_ID			0x8870
262 #define CIDER_REV_MASK			(0x7 << 1)
263 #define CIDER_REV_SHIFT			(1)
264 #define CIDER_REV_GET(_v)		(((_v) >> 1) & 0x7)
265 
266 #define KS_CGCR				0xC6
267 #define KS_IACR				0xC8
268 #define IACR_RDEN			(1 << 12)
269 #define IACR_TSEL_MASK			(0x3 << 10)
270 #define IACR_TSEL_SHIFT			(10)
271 #define IACR_TSEL_MIB			(0x3 << 10)
272 #define IACR_ADDR_MASK			(0x1f << 0)
273 #define IACR_ADDR_SHIFT			(0)
274 
275 #define KS_IADLR			0xD0
276 #define KS_IAHDR			0xD2
277 
278 #define KS_PMECR			0xD4
279 #define PMECR_PME_DELAY			(1 << 14)
280 #define PMECR_PME_POL			(1 << 12)
281 #define PMECR_WOL_WAKEUP		(1 << 11)
282 #define PMECR_WOL_MAGICPKT		(1 << 10)
283 #define PMECR_WOL_LINKUP		(1 << 9)
284 #define PMECR_WOL_ENERGY		(1 << 8)
285 #define PMECR_AUTO_WAKE_EN		(1 << 7)
286 #define PMECR_WAKEUP_NORMAL		(1 << 6)
287 #define PMECR_WKEVT_MASK		(0xf << 2)
288 #define PMECR_WKEVT_SHIFT		(2)
289 #define PMECR_WKEVT_GET(_v)		(((_v) >> 2) & 0xf)
290 #define PMECR_WKEVT_ENERGY		(0x1 << 2)
291 #define PMECR_WKEVT_LINK		(0x2 << 2)
292 #define PMECR_WKEVT_MAGICPKT		(0x4 << 2)
293 #define PMECR_WKEVT_FRAME		(0x8 << 2)
294 #define PMECR_PM_MASK			(0x3 << 0)
295 #define PMECR_PM_SHIFT			(0)
296 #define PMECR_PM_NORMAL			(0x0 << 0)
297 #define PMECR_PM_ENERGY			(0x1 << 0)
298 #define PMECR_PM_SOFTDOWN		(0x2 << 0)
299 #define PMECR_PM_POWERSAVE		(0x3 << 0)
300 
301 /* Standard MII PHY data */
302 #define KS_P1MBCR			0xE4
303 #define P1MBCR_FORCE_FDX		(1 << 8)
304 
305 #define KS_P1MBSR			0xE6
306 #define P1MBSR_AN_COMPLETE		(1 << 5)
307 #define P1MBSR_AN_CAPABLE		(1 << 3)
308 #define P1MBSR_LINK_UP			(1 << 2)
309 
310 #define KS_PHY1ILR			0xE8
311 #define KS_PHY1IHR			0xEA
312 #define KS_P1ANAR			0xEC
313 #define KS_P1ANLPR			0xEE
314 
315 #define KS_P1SCLMD			0xF4
316 #define P1SCLMD_LEDOFF			(1 << 15)
317 #define P1SCLMD_TXIDS			(1 << 14)
318 #define P1SCLMD_RESTARTAN		(1 << 13)
319 #define P1SCLMD_DISAUTOMDIX		(1 << 10)
320 #define P1SCLMD_FORCEMDIX		(1 << 9)
321 #define P1SCLMD_AUTONEGEN		(1 << 7)
322 #define P1SCLMD_FORCE100		(1 << 6)
323 #define P1SCLMD_FORCEFDX		(1 << 5)
324 #define P1SCLMD_ADV_FLOW		(1 << 4)
325 #define P1SCLMD_ADV_100BT_FDX		(1 << 3)
326 #define P1SCLMD_ADV_100BT_HDX		(1 << 2)
327 #define P1SCLMD_ADV_10BT_FDX		(1 << 1)
328 #define P1SCLMD_ADV_10BT_HDX		(1 << 0)
329 
330 #define KS_P1CR				0xF6
331 #define P1CR_HP_MDIX			(1 << 15)
332 #define P1CR_REV_POL			(1 << 13)
333 #define P1CR_OP_100M			(1 << 10)
334 #define P1CR_OP_FDX			(1 << 9)
335 #define P1CR_OP_MDI			(1 << 7)
336 #define P1CR_AN_DONE			(1 << 6)
337 #define P1CR_LINK_GOOD			(1 << 5)
338 #define P1CR_PNTR_FLOW			(1 << 4)
339 #define P1CR_PNTR_100BT_FDX		(1 << 3)
340 #define P1CR_PNTR_100BT_HDX		(1 << 2)
341 #define P1CR_PNTR_10BT_FDX		(1 << 1)
342 #define P1CR_PNTR_10BT_HDX		(1 << 0)
343 
344 /* TX Frame control */
345 
346 #define TXFR_TXIC			(1 << 15)
347 #define TXFR_TXFID_MASK			(0x3f << 0)
348 #define TXFR_TXFID_SHIFT		(0)
349 
350 #define KS_P1SR				0xF8
351 #define P1SR_HP_MDIX			(1 << 15)
352 #define P1SR_REV_POL			(1 << 13)
353 #define P1SR_OP_100M			(1 << 10)
354 #define P1SR_OP_FDX			(1 << 9)
355 #define P1SR_OP_MDI			(1 << 7)
356 #define P1SR_AN_DONE			(1 << 6)
357 #define P1SR_LINK_GOOD			(1 << 5)
358 #define P1SR_PNTR_FLOW			(1 << 4)
359 #define P1SR_PNTR_100BT_FDX		(1 << 3)
360 #define P1SR_PNTR_100BT_HDX		(1 << 2)
361 #define P1SR_PNTR_10BT_FDX		(1 << 1)
362 #define P1SR_PNTR_10BT_HDX		(1 << 0)
363 
364 #define	ENUM_BUS_NONE			0
365 #define	ENUM_BUS_8BIT			1
366 #define	ENUM_BUS_16BIT			2
367 #define	ENUM_BUS_32BIT			3
368 
369 #define MAX_MCAST_LST			32
370 #define HW_MCAST_SIZE			8
371 
372 /**
373  * union ks_tx_hdr - tx header data
374  * @txb: The header as bytes
375  * @txw: The header as 16bit, little-endian words
376  *
377  * A dual representation of the tx header data to allow
378  * access to individual bytes, and to allow 16bit accesses
379  * with 16bit alignment.
380  */
381 union ks_tx_hdr {
382 	u8      txb[4];
383 	__le16  txw[2];
384 };
385 
386 /**
387  * struct ks_net - KS8851 driver private data
388  * @net_device 	: The network device we're bound to
389  * @hw_addr	: start address of data register.
390  * @hw_addr_cmd	: start address of command register.
391  * @txh    	: temporaly buffer to save status/length.
392  * @lock	: Lock to ensure that the device is not accessed when busy.
393  * @pdev	: Pointer to platform device.
394  * @mii		: The MII state information for the mii calls.
395  * @frame_head_info   	: frame header information for multi-pkt rx.
396  * @statelock	: Lock on this structure for tx list.
397  * @msg_enable	: The message flags controlling driver output (see ethtool).
398  * @frame_cnt  	: number of frames received.
399  * @bus_width  	: i/o bus width.
400  * @rc_rxqcr	: Cached copy of KS_RXQCR.
401  * @rc_txcr	: Cached copy of KS_TXCR.
402  * @rc_ier	: Cached copy of KS_IER.
403  * @sharedbus  	: Multipex(addr and data bus) mode indicator.
404  * @cmd_reg_cache	: command register cached.
405  * @cmd_reg_cache_int	: command register cached. Used in the irq handler.
406  * @promiscuous	: promiscuous mode indicator.
407  * @all_mcast  	: mutlicast indicator.
408  * @mcast_lst_size   	: size of multicast list.
409  * @mcast_lst    	: multicast list.
410  * @mcast_bits    	: multicast enabed.
411  * @mac_addr   		: MAC address assigned to this device.
412  * @fid    		: frame id.
413  * @extra_byte    	: number of extra byte prepended rx pkt.
414  * @enabled    		: indicator this device works.
415  *
416  * The @lock ensures that the chip is protected when certain operations are
417  * in progress. When the read or write packet transfer is in progress, most
418  * of the chip registers are not accessible until the transfer is finished and
419  * the DMA has been de-asserted.
420  *
421  * The @statelock is used to protect information in the structure which may
422  * need to be accessed via several sources, such as the network driver layer
423  * or one of the work queues.
424  *
425  */
426 
427 /* Receive multiplex framer header info */
428 struct type_frame_head {
429 	u16	sts;         /* Frame status */
430 	u16	len;         /* Byte count */
431 };
432 
433 struct ks_net {
434 	struct net_device	*netdev;
435 	void __iomem    	*hw_addr;
436 	void __iomem    	*hw_addr_cmd;
437 	union ks_tx_hdr		txh ____cacheline_aligned;
438 	struct mutex      	lock; /* spinlock to be interrupt safe */
439 	struct platform_device *pdev;
440 	struct mii_if_info	mii;
441 	struct type_frame_head	*frame_head_info;
442 	spinlock_t		statelock;
443 	u32			msg_enable;
444 	u32			frame_cnt;
445 	int			bus_width;
446 
447 	u16			rc_rxqcr;
448 	u16			rc_txcr;
449 	u16			rc_ier;
450 	u16			sharedbus;
451 	u16			cmd_reg_cache;
452 	u16			cmd_reg_cache_int;
453 	u16			promiscuous;
454 	u16			all_mcast;
455 	u16			mcast_lst_size;
456 	u8			mcast_lst[MAX_MCAST_LST][ETH_ALEN];
457 	u8			mcast_bits[HW_MCAST_SIZE];
458 	u8			mac_addr[6];
459 	u8                      fid;
460 	u8			extra_byte;
461 	u8			enabled;
462 };
463 
464 static int msg_enable;
465 
466 #define BE3             0x8000      /* Byte Enable 3 */
467 #define BE2             0x4000      /* Byte Enable 2 */
468 #define BE1             0x2000      /* Byte Enable 1 */
469 #define BE0             0x1000      /* Byte Enable 0 */
470 
471 /* register read/write calls.
472  *
473  * All these calls issue transactions to access the chip's registers. They
474  * all require that the necessary lock is held to prevent accesses when the
475  * chip is busy transferring packet data (RX/TX FIFO accesses).
476  */
477 
478 /**
479  * ks_rdreg8 - read 8 bit register from device
480  * @ks	  : The chip information
481  * @offset: The register address
482  *
483  * Read a 8bit register from the chip, returning the result
484  */
ks_rdreg8(struct ks_net * ks,int offset)485 static u8 ks_rdreg8(struct ks_net *ks, int offset)
486 {
487 	u16 data;
488 	u8 shift_bit = offset & 0x03;
489 	u8 shift_data = (offset & 1) << 3;
490 	ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
491 	iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
492 	data  = ioread16(ks->hw_addr);
493 	return (u8)(data >> shift_data);
494 }
495 
496 /**
497  * ks_rdreg16 - read 16 bit register from device
498  * @ks	  : The chip information
499  * @offset: The register address
500  *
501  * Read a 16bit register from the chip, returning the result
502  */
503 
ks_rdreg16(struct ks_net * ks,int offset)504 static u16 ks_rdreg16(struct ks_net *ks, int offset)
505 {
506 	ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
507 	iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
508 	return ioread16(ks->hw_addr);
509 }
510 
511 /**
512  * ks_wrreg8 - write 8bit register value to chip
513  * @ks: The chip information
514  * @offset: The register address
515  * @value: The value to write
516  *
517  */
ks_wrreg8(struct ks_net * ks,int offset,u8 value)518 static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
519 {
520 	u8  shift_bit = (offset & 0x03);
521 	u16 value_write = (u16)(value << ((offset & 1) << 3));
522 	ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
523 	iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
524 	iowrite16(value_write, ks->hw_addr);
525 }
526 
527 /**
528  * ks_wrreg16 - write 16bit register value to chip
529  * @ks: The chip information
530  * @offset: The register address
531  * @value: The value to write
532  *
533  */
534 
ks_wrreg16(struct ks_net * ks,int offset,u16 value)535 static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
536 {
537 	ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
538 	iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
539 	iowrite16(value, ks->hw_addr);
540 }
541 
542 /**
543  * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled.
544  * @ks: The chip state
545  * @wptr: buffer address to save data
546  * @len: length in byte to read
547  *
548  */
ks_inblk(struct ks_net * ks,u16 * wptr,u32 len)549 static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
550 {
551 	len >>= 1;
552 	while (len--)
553 		*wptr++ = (u16)ioread16(ks->hw_addr);
554 }
555 
556 /**
557  * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled.
558  * @ks: The chip information
559  * @wptr: buffer address
560  * @len: length in byte to write
561  *
562  */
ks_outblk(struct ks_net * ks,u16 * wptr,u32 len)563 static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
564 {
565 	len >>= 1;
566 	while (len--)
567 		iowrite16(*wptr++, ks->hw_addr);
568 }
569 
ks_disable_int(struct ks_net * ks)570 static void ks_disable_int(struct ks_net *ks)
571 {
572 	ks_wrreg16(ks, KS_IER, 0x0000);
573 }  /* ks_disable_int */
574 
ks_enable_int(struct ks_net * ks)575 static void ks_enable_int(struct ks_net *ks)
576 {
577 	ks_wrreg16(ks, KS_IER, ks->rc_ier);
578 }  /* ks_enable_int */
579 
580 /**
581  * ks_tx_fifo_space - return the available hardware buffer size.
582  * @ks: The chip information
583  *
584  */
ks_tx_fifo_space(struct ks_net * ks)585 static inline u16 ks_tx_fifo_space(struct ks_net *ks)
586 {
587 	return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
588 }
589 
590 /**
591  * ks_save_cmd_reg - save the command register from the cache.
592  * @ks: The chip information
593  *
594  */
ks_save_cmd_reg(struct ks_net * ks)595 static inline void ks_save_cmd_reg(struct ks_net *ks)
596 {
597 	/*ks8851 MLL has a bug to read back the command register.
598 	* So rely on software to save the content of command register.
599 	*/
600 	ks->cmd_reg_cache_int = ks->cmd_reg_cache;
601 }
602 
603 /**
604  * ks_restore_cmd_reg - restore the command register from the cache and
605  * 	write to hardware register.
606  * @ks: The chip information
607  *
608  */
ks_restore_cmd_reg(struct ks_net * ks)609 static inline void ks_restore_cmd_reg(struct ks_net *ks)
610 {
611 	ks->cmd_reg_cache = ks->cmd_reg_cache_int;
612 	iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
613 }
614 
615 /**
616  * ks_set_powermode - set power mode of the device
617  * @ks: The chip information
618  * @pwrmode: The power mode value to write to KS_PMECR.
619  *
620  * Change the power mode of the chip.
621  */
ks_set_powermode(struct ks_net * ks,unsigned pwrmode)622 static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
623 {
624 	unsigned pmecr;
625 
626 	netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
627 
628 	ks_rdreg16(ks, KS_GRR);
629 	pmecr = ks_rdreg16(ks, KS_PMECR);
630 	pmecr &= ~PMECR_PM_MASK;
631 	pmecr |= pwrmode;
632 
633 	ks_wrreg16(ks, KS_PMECR, pmecr);
634 }
635 
636 /**
637  * ks_read_config - read chip configuration of bus width.
638  * @ks: The chip information
639  *
640  */
ks_read_config(struct ks_net * ks)641 static void ks_read_config(struct ks_net *ks)
642 {
643 	u16 reg_data = 0;
644 
645 	/* Regardless of bus width, 8 bit read should always work.*/
646 	reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
647 	reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
648 
649 	/* addr/data bus are multiplexed */
650 	ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
651 
652 	/* There are garbage data when reading data from QMU,
653 	depending on bus-width.
654 	*/
655 
656 	if (reg_data & CCR_8BIT) {
657 		ks->bus_width = ENUM_BUS_8BIT;
658 		ks->extra_byte = 1;
659 	} else if (reg_data & CCR_16BIT) {
660 		ks->bus_width = ENUM_BUS_16BIT;
661 		ks->extra_byte = 2;
662 	} else {
663 		ks->bus_width = ENUM_BUS_32BIT;
664 		ks->extra_byte = 4;
665 	}
666 }
667 
668 /**
669  * ks_soft_reset - issue one of the soft reset to the device
670  * @ks: The device state.
671  * @op: The bit(s) to set in the GRR
672  *
673  * Issue the relevant soft-reset command to the device's GRR register
674  * specified by @op.
675  *
676  * Note, the delays are in there as a caution to ensure that the reset
677  * has time to take effect and then complete. Since the datasheet does
678  * not currently specify the exact sequence, we have chosen something
679  * that seems to work with our device.
680  */
ks_soft_reset(struct ks_net * ks,unsigned op)681 static void ks_soft_reset(struct ks_net *ks, unsigned op)
682 {
683 	/* Disable interrupt first */
684 	ks_wrreg16(ks, KS_IER, 0x0000);
685 	ks_wrreg16(ks, KS_GRR, op);
686 	mdelay(10);	/* wait a short time to effect reset */
687 	ks_wrreg16(ks, KS_GRR, 0);
688 	mdelay(1);	/* wait for condition to clear */
689 }
690 
691 
ks_enable_qmu(struct ks_net * ks)692 static void ks_enable_qmu(struct ks_net *ks)
693 {
694 	u16 w;
695 
696 	w = ks_rdreg16(ks, KS_TXCR);
697 	/* Enables QMU Transmit (TXCR). */
698 	ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
699 
700 	/*
701 	 * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
702 	 * Enable
703 	 */
704 
705 	w = ks_rdreg16(ks, KS_RXQCR);
706 	ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
707 
708 	/* Enables QMU Receive (RXCR1). */
709 	w = ks_rdreg16(ks, KS_RXCR1);
710 	ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
711 	ks->enabled = true;
712 }  /* ks_enable_qmu */
713 
ks_disable_qmu(struct ks_net * ks)714 static void ks_disable_qmu(struct ks_net *ks)
715 {
716 	u16	w;
717 
718 	w = ks_rdreg16(ks, KS_TXCR);
719 
720 	/* Disables QMU Transmit (TXCR). */
721 	w  &= ~TXCR_TXE;
722 	ks_wrreg16(ks, KS_TXCR, w);
723 
724 	/* Disables QMU Receive (RXCR1). */
725 	w = ks_rdreg16(ks, KS_RXCR1);
726 	w &= ~RXCR1_RXE ;
727 	ks_wrreg16(ks, KS_RXCR1, w);
728 
729 	ks->enabled = false;
730 
731 }  /* ks_disable_qmu */
732 
733 /**
734  * ks_read_qmu - read 1 pkt data from the QMU.
735  * @ks: The chip information
736  * @buf: buffer address to save 1 pkt
737  * @len: Pkt length
738  * Here is the sequence to read 1 pkt:
739  *	1. set sudo DMA mode
740  *	2. read prepend data
741  *	3. read pkt data
742  *	4. reset sudo DMA Mode
743  */
ks_read_qmu(struct ks_net * ks,u16 * buf,u32 len)744 static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
745 {
746 	u32 r =  ks->extra_byte & 0x1 ;
747 	u32 w = ks->extra_byte - r;
748 
749 	/* 1. set sudo DMA mode */
750 	ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
751 	ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
752 
753 	/* 2. read prepend data */
754 	/**
755 	 * read 4 + extra bytes and discard them.
756 	 * extra bytes for dummy, 2 for status, 2 for len
757 	 */
758 
759 	/* use likely(r) for 8 bit access for performance */
760 	if (unlikely(r))
761 		ioread8(ks->hw_addr);
762 	ks_inblk(ks, buf, w + 2 + 2);
763 
764 	/* 3. read pkt data */
765 	ks_inblk(ks, buf, ALIGN(len, 4));
766 
767 	/* 4. reset sudo DMA Mode */
768 	ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
769 }
770 
771 /**
772  * ks_rcv - read multiple pkts data from the QMU.
773  * @ks: The chip information
774  * @netdev: The network device being opened.
775  *
776  * Read all of header information before reading pkt content.
777  * It is not allowed only port of pkts in QMU after issuing
778  * interrupt ack.
779  */
ks_rcv(struct ks_net * ks,struct net_device * netdev)780 static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
781 {
782 	u32	i;
783 	struct type_frame_head *frame_hdr = ks->frame_head_info;
784 	struct sk_buff *skb;
785 
786 	ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
787 
788 	/* read all header information */
789 	for (i = 0; i < ks->frame_cnt; i++) {
790 		/* Checking Received packet status */
791 		frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
792 		/* Get packet len from hardware */
793 		frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
794 		frame_hdr++;
795 	}
796 
797 	frame_hdr = ks->frame_head_info;
798 	while (ks->frame_cnt--) {
799 		if (unlikely(!(frame_hdr->sts & RXFSHR_RXFV) ||
800 			     frame_hdr->len >= RX_BUF_SIZE ||
801 			     frame_hdr->len <= 0)) {
802 
803 			/* discard an invalid packet */
804 			ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
805 			netdev->stats.rx_dropped++;
806 			if (!(frame_hdr->sts & RXFSHR_RXFV))
807 				netdev->stats.rx_frame_errors++;
808 			else
809 				netdev->stats.rx_length_errors++;
810 			frame_hdr++;
811 			continue;
812 		}
813 
814 		skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
815 		if (likely(skb)) {
816 			skb_reserve(skb, 2);
817 			/* read data block including CRC 4 bytes */
818 			ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
819 			skb_put(skb, frame_hdr->len - 4);
820 			skb->protocol = eth_type_trans(skb, netdev);
821 			netif_rx(skb);
822 			/* exclude CRC size */
823 			netdev->stats.rx_bytes += frame_hdr->len - 4;
824 			netdev->stats.rx_packets++;
825 		} else {
826 			ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
827 			netdev->stats.rx_dropped++;
828 		}
829 		frame_hdr++;
830 	}
831 }
832 
833 /**
834  * ks_update_link_status - link status update.
835  * @netdev: The network device being opened.
836  * @ks: The chip information
837  *
838  */
839 
ks_update_link_status(struct net_device * netdev,struct ks_net * ks)840 static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
841 {
842 	/* check the status of the link */
843 	u32 link_up_status;
844 	if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
845 		netif_carrier_on(netdev);
846 		link_up_status = true;
847 	} else {
848 		netif_carrier_off(netdev);
849 		link_up_status = false;
850 	}
851 	netif_dbg(ks, link, ks->netdev,
852 		  "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
853 }
854 
855 /**
856  * ks_irq - device interrupt handler
857  * @irq: Interrupt number passed from the IRQ handler.
858  * @pw: The private word passed to register_irq(), our struct ks_net.
859  *
860  * This is the handler invoked to find out what happened
861  *
862  * Read the interrupt status, work out what needs to be done and then clear
863  * any of the interrupts that are not needed.
864  */
865 
ks_irq(int irq,void * pw)866 static irqreturn_t ks_irq(int irq, void *pw)
867 {
868 	struct net_device *netdev = pw;
869 	struct ks_net *ks = netdev_priv(netdev);
870 	u16 status;
871 
872 	/*this should be the first in IRQ handler */
873 	ks_save_cmd_reg(ks);
874 
875 	status = ks_rdreg16(ks, KS_ISR);
876 	if (unlikely(!status)) {
877 		ks_restore_cmd_reg(ks);
878 		return IRQ_NONE;
879 	}
880 
881 	ks_wrreg16(ks, KS_ISR, status);
882 
883 	if (likely(status & IRQ_RXI))
884 		ks_rcv(ks, netdev);
885 
886 	if (unlikely(status & IRQ_LCI))
887 		ks_update_link_status(netdev, ks);
888 
889 	if (unlikely(status & IRQ_TXI))
890 		netif_wake_queue(netdev);
891 
892 	if (unlikely(status & IRQ_LDI)) {
893 
894 		u16 pmecr = ks_rdreg16(ks, KS_PMECR);
895 		pmecr &= ~PMECR_WKEVT_MASK;
896 		ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
897 	}
898 
899 	if (unlikely(status & IRQ_RXOI))
900 		ks->netdev->stats.rx_over_errors++;
901 	/* this should be the last in IRQ handler*/
902 	ks_restore_cmd_reg(ks);
903 	return IRQ_HANDLED;
904 }
905 
906 
907 /**
908  * ks_net_open - open network device
909  * @netdev: The network device being opened.
910  *
911  * Called when the network device is marked active, such as a user executing
912  * 'ifconfig up' on the device.
913  */
ks_net_open(struct net_device * netdev)914 static int ks_net_open(struct net_device *netdev)
915 {
916 	struct ks_net *ks = netdev_priv(netdev);
917 	int err;
918 
919 #define	KS_INT_FLAGS	IRQF_TRIGGER_LOW
920 	/* lock the card, even if we may not actually do anything
921 	 * else at the moment.
922 	 */
923 
924 	netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
925 
926 	/* reset the HW */
927 	err = request_irq(netdev->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
928 
929 	if (err) {
930 		pr_err("Failed to request IRQ: %d: %d\n", netdev->irq, err);
931 		return err;
932 	}
933 
934 	/* wake up powermode to normal mode */
935 	ks_set_powermode(ks, PMECR_PM_NORMAL);
936 	mdelay(1);	/* wait for normal mode to take effect */
937 
938 	ks_wrreg16(ks, KS_ISR, 0xffff);
939 	ks_enable_int(ks);
940 	ks_enable_qmu(ks);
941 	netif_start_queue(ks->netdev);
942 
943 	netif_dbg(ks, ifup, ks->netdev, "network device up\n");
944 
945 	return 0;
946 }
947 
948 /**
949  * ks_net_stop - close network device
950  * @netdev: The device being closed.
951  *
952  * Called to close down a network device which has been active. Cancell any
953  * work, shutdown the RX and TX process and then place the chip into a low
954  * power state whilst it is not being used.
955  */
ks_net_stop(struct net_device * netdev)956 static int ks_net_stop(struct net_device *netdev)
957 {
958 	struct ks_net *ks = netdev_priv(netdev);
959 
960 	netif_info(ks, ifdown, netdev, "shutting down\n");
961 
962 	netif_stop_queue(netdev);
963 
964 	mutex_lock(&ks->lock);
965 
966 	/* turn off the IRQs and ack any outstanding */
967 	ks_wrreg16(ks, KS_IER, 0x0000);
968 	ks_wrreg16(ks, KS_ISR, 0xffff);
969 
970 	/* shutdown RX/TX QMU */
971 	ks_disable_qmu(ks);
972 
973 	/* set powermode to soft power down to save power */
974 	ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
975 	free_irq(netdev->irq, netdev);
976 	mutex_unlock(&ks->lock);
977 	return 0;
978 }
979 
980 
981 /**
982  * ks_write_qmu - write 1 pkt data to the QMU.
983  * @ks: The chip information
984  * @pdata: buffer address to save 1 pkt
985  * @len: Pkt length in byte
986  * Here is the sequence to write 1 pkt:
987  *	1. set sudo DMA mode
988  *	2. write status/length
989  *	3. write pkt data
990  *	4. reset sudo DMA Mode
991  *	5. reset sudo DMA mode
992  *	6. Wait until pkt is out
993  */
ks_write_qmu(struct ks_net * ks,u8 * pdata,u16 len)994 static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
995 {
996 	/* start header at txb[0] to align txw entries */
997 	ks->txh.txw[0] = 0;
998 	ks->txh.txw[1] = cpu_to_le16(len);
999 
1000 	/* 1. set sudo-DMA mode */
1001 	ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
1002 	/* 2. write status/lenth info */
1003 	ks_outblk(ks, ks->txh.txw, 4);
1004 	/* 3. write pkt data */
1005 	ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
1006 	/* 4. reset sudo-DMA mode */
1007 	ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
1008 	/* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
1009 	ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
1010 	/* 6. wait until TXQCR_METFE is auto-cleared */
1011 	while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
1012 		;
1013 }
1014 
1015 /**
1016  * ks_start_xmit - transmit packet
1017  * @skb		: The buffer to transmit
1018  * @netdev	: The device used to transmit the packet.
1019  *
1020  * Called by the network layer to transmit the @skb.
1021  * spin_lock_irqsave is required because tx and rx should be mutual exclusive.
1022  * So while tx is in-progress, prevent IRQ interrupt from happenning.
1023  */
ks_start_xmit(struct sk_buff * skb,struct net_device * netdev)1024 static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1025 {
1026 	int retv = NETDEV_TX_OK;
1027 	struct ks_net *ks = netdev_priv(netdev);
1028 
1029 	disable_irq(netdev->irq);
1030 	ks_disable_int(ks);
1031 	spin_lock(&ks->statelock);
1032 
1033 	/* Extra space are required:
1034 	*  4 byte for alignment, 4 for status/length, 4 for CRC
1035 	*/
1036 
1037 	if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
1038 		ks_write_qmu(ks, skb->data, skb->len);
1039 		/* add tx statistics */
1040 		netdev->stats.tx_bytes += skb->len;
1041 		netdev->stats.tx_packets++;
1042 		dev_kfree_skb(skb);
1043 	} else
1044 		retv = NETDEV_TX_BUSY;
1045 	spin_unlock(&ks->statelock);
1046 	ks_enable_int(ks);
1047 	enable_irq(netdev->irq);
1048 	return retv;
1049 }
1050 
1051 /**
1052  * ks_start_rx - ready to serve pkts
1053  * @ks		: The chip information
1054  *
1055  */
ks_start_rx(struct ks_net * ks)1056 static void ks_start_rx(struct ks_net *ks)
1057 {
1058 	u16 cntl;
1059 
1060 	/* Enables QMU Receive (RXCR1). */
1061 	cntl = ks_rdreg16(ks, KS_RXCR1);
1062 	cntl |= RXCR1_RXE ;
1063 	ks_wrreg16(ks, KS_RXCR1, cntl);
1064 }  /* ks_start_rx */
1065 
1066 /**
1067  * ks_stop_rx - stop to serve pkts
1068  * @ks		: The chip information
1069  *
1070  */
ks_stop_rx(struct ks_net * ks)1071 static void ks_stop_rx(struct ks_net *ks)
1072 {
1073 	u16 cntl;
1074 
1075 	/* Disables QMU Receive (RXCR1). */
1076 	cntl = ks_rdreg16(ks, KS_RXCR1);
1077 	cntl &= ~RXCR1_RXE ;
1078 	ks_wrreg16(ks, KS_RXCR1, cntl);
1079 
1080 }  /* ks_stop_rx */
1081 
1082 static unsigned long const ethernet_polynomial = CRC32_POLY_BE;
1083 
ether_gen_crc(int length,u8 * data)1084 static unsigned long ether_gen_crc(int length, u8 *data)
1085 {
1086 	long crc = -1;
1087 	while (--length >= 0) {
1088 		u8 current_octet = *data++;
1089 		int bit;
1090 
1091 		for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
1092 			crc = (crc << 1) ^
1093 				((crc < 0) ^ (current_octet & 1) ?
1094 			ethernet_polynomial : 0);
1095 		}
1096 	}
1097 	return (unsigned long)crc;
1098 }  /* ether_gen_crc */
1099 
1100 /**
1101 * ks_set_grpaddr - set multicast information
1102 * @ks : The chip information
1103 */
1104 
ks_set_grpaddr(struct ks_net * ks)1105 static void ks_set_grpaddr(struct ks_net *ks)
1106 {
1107 	u8	i;
1108 	u32	index, position, value;
1109 
1110 	memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
1111 
1112 	for (i = 0; i < ks->mcast_lst_size; i++) {
1113 		position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
1114 		index = position >> 3;
1115 		value = 1 << (position & 7);
1116 		ks->mcast_bits[index] |= (u8)value;
1117 	}
1118 
1119 	for (i  = 0; i < HW_MCAST_SIZE; i++) {
1120 		if (i & 1) {
1121 			ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
1122 				(ks->mcast_bits[i] << 8) |
1123 				ks->mcast_bits[i - 1]);
1124 		}
1125 	}
1126 }  /* ks_set_grpaddr */
1127 
1128 /**
1129 * ks_clear_mcast - clear multicast information
1130 *
1131 * @ks : The chip information
1132 * This routine removes all mcast addresses set in the hardware.
1133 */
1134 
ks_clear_mcast(struct ks_net * ks)1135 static void ks_clear_mcast(struct ks_net *ks)
1136 {
1137 	u16	i, mcast_size;
1138 	for (i = 0; i < HW_MCAST_SIZE; i++)
1139 		ks->mcast_bits[i] = 0;
1140 
1141 	mcast_size = HW_MCAST_SIZE >> 2;
1142 	for (i = 0; i < mcast_size; i++)
1143 		ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
1144 }
1145 
ks_set_promis(struct ks_net * ks,u16 promiscuous_mode)1146 static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
1147 {
1148 	u16		cntl;
1149 	ks->promiscuous = promiscuous_mode;
1150 	ks_stop_rx(ks);  /* Stop receiving for reconfiguration */
1151 	cntl = ks_rdreg16(ks, KS_RXCR1);
1152 
1153 	cntl &= ~RXCR1_FILTER_MASK;
1154 	if (promiscuous_mode)
1155 		/* Enable Promiscuous mode */
1156 		cntl |= RXCR1_RXAE | RXCR1_RXINVF;
1157 	else
1158 		/* Disable Promiscuous mode (default normal mode) */
1159 		cntl |= RXCR1_RXPAFMA;
1160 
1161 	ks_wrreg16(ks, KS_RXCR1, cntl);
1162 
1163 	if (ks->enabled)
1164 		ks_start_rx(ks);
1165 
1166 }  /* ks_set_promis */
1167 
ks_set_mcast(struct ks_net * ks,u16 mcast)1168 static void ks_set_mcast(struct ks_net *ks, u16 mcast)
1169 {
1170 	u16	cntl;
1171 
1172 	ks->all_mcast = mcast;
1173 	ks_stop_rx(ks);  /* Stop receiving for reconfiguration */
1174 	cntl = ks_rdreg16(ks, KS_RXCR1);
1175 	cntl &= ~RXCR1_FILTER_MASK;
1176 	if (mcast)
1177 		/* Enable "Perfect with Multicast address passed mode" */
1178 		cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1179 	else
1180 		/**
1181 		 * Disable "Perfect with Multicast address passed
1182 		 * mode" (normal mode).
1183 		 */
1184 		cntl |= RXCR1_RXPAFMA;
1185 
1186 	ks_wrreg16(ks, KS_RXCR1, cntl);
1187 
1188 	if (ks->enabled)
1189 		ks_start_rx(ks);
1190 }  /* ks_set_mcast */
1191 
ks_set_rx_mode(struct net_device * netdev)1192 static void ks_set_rx_mode(struct net_device *netdev)
1193 {
1194 	struct ks_net *ks = netdev_priv(netdev);
1195 	struct netdev_hw_addr *ha;
1196 
1197 	/* Turn on/off promiscuous mode. */
1198 	if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
1199 		ks_set_promis(ks,
1200 			(u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
1201 	/* Turn on/off all mcast mode. */
1202 	else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
1203 		ks_set_mcast(ks,
1204 			(u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
1205 	else
1206 		ks_set_promis(ks, false);
1207 
1208 	if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
1209 		if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
1210 			int i = 0;
1211 
1212 			netdev_for_each_mc_addr(ha, netdev) {
1213 				if (i >= MAX_MCAST_LST)
1214 					break;
1215 				memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
1216 			}
1217 			ks->mcast_lst_size = (u8)i;
1218 			ks_set_grpaddr(ks);
1219 		} else {
1220 			/**
1221 			 * List too big to support so
1222 			 * turn on all mcast mode.
1223 			 */
1224 			ks->mcast_lst_size = MAX_MCAST_LST;
1225 			ks_set_mcast(ks, true);
1226 		}
1227 	} else {
1228 		ks->mcast_lst_size = 0;
1229 		ks_clear_mcast(ks);
1230 	}
1231 } /* ks_set_rx_mode */
1232 
ks_set_mac(struct ks_net * ks,u8 * data)1233 static void ks_set_mac(struct ks_net *ks, u8 *data)
1234 {
1235 	u16 *pw = (u16 *)data;
1236 	u16 w, u;
1237 
1238 	ks_stop_rx(ks);  /* Stop receiving for reconfiguration */
1239 
1240 	u = *pw++;
1241 	w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1242 	ks_wrreg16(ks, KS_MARH, w);
1243 
1244 	u = *pw++;
1245 	w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1246 	ks_wrreg16(ks, KS_MARM, w);
1247 
1248 	u = *pw;
1249 	w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1250 	ks_wrreg16(ks, KS_MARL, w);
1251 
1252 	memcpy(ks->mac_addr, data, ETH_ALEN);
1253 
1254 	if (ks->enabled)
1255 		ks_start_rx(ks);
1256 }
1257 
ks_set_mac_address(struct net_device * netdev,void * paddr)1258 static int ks_set_mac_address(struct net_device *netdev, void *paddr)
1259 {
1260 	struct ks_net *ks = netdev_priv(netdev);
1261 	struct sockaddr *addr = paddr;
1262 	u8 *da;
1263 
1264 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1265 
1266 	da = (u8 *)netdev->dev_addr;
1267 
1268 	ks_set_mac(ks, da);
1269 	return 0;
1270 }
1271 
ks_net_ioctl(struct net_device * netdev,struct ifreq * req,int cmd)1272 static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1273 {
1274 	struct ks_net *ks = netdev_priv(netdev);
1275 
1276 	if (!netif_running(netdev))
1277 		return -EINVAL;
1278 
1279 	return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
1280 }
1281 
1282 static const struct net_device_ops ks_netdev_ops = {
1283 	.ndo_open		= ks_net_open,
1284 	.ndo_stop		= ks_net_stop,
1285 	.ndo_do_ioctl		= ks_net_ioctl,
1286 	.ndo_start_xmit		= ks_start_xmit,
1287 	.ndo_set_mac_address	= ks_set_mac_address,
1288 	.ndo_set_rx_mode	= ks_set_rx_mode,
1289 	.ndo_validate_addr	= eth_validate_addr,
1290 };
1291 
1292 /* ethtool support */
1293 
ks_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * di)1294 static void ks_get_drvinfo(struct net_device *netdev,
1295 			       struct ethtool_drvinfo *di)
1296 {
1297 	strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
1298 	strlcpy(di->version, "1.00", sizeof(di->version));
1299 	strlcpy(di->bus_info, dev_name(netdev->dev.parent),
1300 		sizeof(di->bus_info));
1301 }
1302 
ks_get_msglevel(struct net_device * netdev)1303 static u32 ks_get_msglevel(struct net_device *netdev)
1304 {
1305 	struct ks_net *ks = netdev_priv(netdev);
1306 	return ks->msg_enable;
1307 }
1308 
ks_set_msglevel(struct net_device * netdev,u32 to)1309 static void ks_set_msglevel(struct net_device *netdev, u32 to)
1310 {
1311 	struct ks_net *ks = netdev_priv(netdev);
1312 	ks->msg_enable = to;
1313 }
1314 
ks_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)1315 static int ks_get_link_ksettings(struct net_device *netdev,
1316 				 struct ethtool_link_ksettings *cmd)
1317 {
1318 	struct ks_net *ks = netdev_priv(netdev);
1319 
1320 	mii_ethtool_get_link_ksettings(&ks->mii, cmd);
1321 
1322 	return 0;
1323 }
1324 
ks_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * cmd)1325 static int ks_set_link_ksettings(struct net_device *netdev,
1326 				 const struct ethtool_link_ksettings *cmd)
1327 {
1328 	struct ks_net *ks = netdev_priv(netdev);
1329 	return mii_ethtool_set_link_ksettings(&ks->mii, cmd);
1330 }
1331 
ks_get_link(struct net_device * netdev)1332 static u32 ks_get_link(struct net_device *netdev)
1333 {
1334 	struct ks_net *ks = netdev_priv(netdev);
1335 	return mii_link_ok(&ks->mii);
1336 }
1337 
ks_nway_reset(struct net_device * netdev)1338 static int ks_nway_reset(struct net_device *netdev)
1339 {
1340 	struct ks_net *ks = netdev_priv(netdev);
1341 	return mii_nway_restart(&ks->mii);
1342 }
1343 
1344 static const struct ethtool_ops ks_ethtool_ops = {
1345 	.get_drvinfo	= ks_get_drvinfo,
1346 	.get_msglevel	= ks_get_msglevel,
1347 	.set_msglevel	= ks_set_msglevel,
1348 	.get_link	= ks_get_link,
1349 	.nway_reset	= ks_nway_reset,
1350 	.get_link_ksettings = ks_get_link_ksettings,
1351 	.set_link_ksettings = ks_set_link_ksettings,
1352 };
1353 
1354 /* MII interface controls */
1355 
1356 /**
1357  * ks_phy_reg - convert MII register into a KS8851 register
1358  * @reg: MII register number.
1359  *
1360  * Return the KS8851 register number for the corresponding MII PHY register
1361  * if possible. Return zero if the MII register has no direct mapping to the
1362  * KS8851 register set.
1363  */
ks_phy_reg(int reg)1364 static int ks_phy_reg(int reg)
1365 {
1366 	switch (reg) {
1367 	case MII_BMCR:
1368 		return KS_P1MBCR;
1369 	case MII_BMSR:
1370 		return KS_P1MBSR;
1371 	case MII_PHYSID1:
1372 		return KS_PHY1ILR;
1373 	case MII_PHYSID2:
1374 		return KS_PHY1IHR;
1375 	case MII_ADVERTISE:
1376 		return KS_P1ANAR;
1377 	case MII_LPA:
1378 		return KS_P1ANLPR;
1379 	}
1380 
1381 	return 0x0;
1382 }
1383 
1384 /**
1385  * ks_phy_read - MII interface PHY register read.
1386  * @netdev: The network device the PHY is on.
1387  * @phy_addr: Address of PHY (ignored as we only have one)
1388  * @reg: The register to read.
1389  *
1390  * This call reads data from the PHY register specified in @reg. Since the
1391  * device does not support all the MII registers, the non-existent values
1392  * are always returned as zero.
1393  *
1394  * We return zero for unsupported registers as the MII code does not check
1395  * the value returned for any error status, and simply returns it to the
1396  * caller. The mii-tool that the driver was tested with takes any -ve error
1397  * as real PHY capabilities, thus displaying incorrect data to the user.
1398  */
ks_phy_read(struct net_device * netdev,int phy_addr,int reg)1399 static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
1400 {
1401 	struct ks_net *ks = netdev_priv(netdev);
1402 	int ksreg;
1403 	int result;
1404 
1405 	ksreg = ks_phy_reg(reg);
1406 	if (!ksreg)
1407 		return 0x0;	/* no error return allowed, so use zero */
1408 
1409 	mutex_lock(&ks->lock);
1410 	result = ks_rdreg16(ks, ksreg);
1411 	mutex_unlock(&ks->lock);
1412 
1413 	return result;
1414 }
1415 
ks_phy_write(struct net_device * netdev,int phy,int reg,int value)1416 static void ks_phy_write(struct net_device *netdev,
1417 			     int phy, int reg, int value)
1418 {
1419 	struct ks_net *ks = netdev_priv(netdev);
1420 	int ksreg;
1421 
1422 	ksreg = ks_phy_reg(reg);
1423 	if (ksreg) {
1424 		mutex_lock(&ks->lock);
1425 		ks_wrreg16(ks, ksreg, value);
1426 		mutex_unlock(&ks->lock);
1427 	}
1428 }
1429 
1430 /**
1431  * ks_read_selftest - read the selftest memory info.
1432  * @ks: The device state
1433  *
1434  * Read and check the TX/RX memory selftest information.
1435  */
ks_read_selftest(struct ks_net * ks)1436 static int ks_read_selftest(struct ks_net *ks)
1437 {
1438 	unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
1439 	int ret = 0;
1440 	unsigned rd;
1441 
1442 	rd = ks_rdreg16(ks, KS_MBIR);
1443 
1444 	if ((rd & both_done) != both_done) {
1445 		netdev_warn(ks->netdev, "Memory selftest not finished\n");
1446 		return 0;
1447 	}
1448 
1449 	if (rd & MBIR_TXMBFA) {
1450 		netdev_err(ks->netdev, "TX memory selftest fails\n");
1451 		ret |= 1;
1452 	}
1453 
1454 	if (rd & MBIR_RXMBFA) {
1455 		netdev_err(ks->netdev, "RX memory selftest fails\n");
1456 		ret |= 2;
1457 	}
1458 
1459 	netdev_info(ks->netdev, "the selftest passes\n");
1460 	return ret;
1461 }
1462 
ks_setup(struct ks_net * ks)1463 static void ks_setup(struct ks_net *ks)
1464 {
1465 	u16	w;
1466 
1467 	/**
1468 	 * Configure QMU Transmit
1469 	 */
1470 
1471 	/* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
1472 	ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
1473 
1474 	/* Setup Receive Frame Data Pointer Auto-Increment */
1475 	ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
1476 
1477 	/* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
1478 	ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
1479 
1480 	/* Setup RxQ Command Control (RXQCR) */
1481 	ks->rc_rxqcr = RXQCR_CMD_CNTL;
1482 	ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
1483 
1484 	/**
1485 	 * set the force mode to half duplex, default is full duplex
1486 	 *  because if the auto-negotiation fails, most switch uses
1487 	 *  half-duplex.
1488 	 */
1489 
1490 	w = ks_rdreg16(ks, KS_P1MBCR);
1491 	w &= ~P1MBCR_FORCE_FDX;
1492 	ks_wrreg16(ks, KS_P1MBCR, w);
1493 
1494 	w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
1495 	ks_wrreg16(ks, KS_TXCR, w);
1496 
1497 	w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC;
1498 
1499 	if (ks->promiscuous)         /* bPromiscuous */
1500 		w |= (RXCR1_RXAE | RXCR1_RXINVF);
1501 	else if (ks->all_mcast) /* Multicast address passed mode */
1502 		w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1503 	else                                   /* Normal mode */
1504 		w |= RXCR1_RXPAFMA;
1505 
1506 	ks_wrreg16(ks, KS_RXCR1, w);
1507 }  /*ks_setup */
1508 
1509 
ks_setup_int(struct ks_net * ks)1510 static void ks_setup_int(struct ks_net *ks)
1511 {
1512 	ks->rc_ier = 0x00;
1513 	/* Clear the interrupts status of the hardware. */
1514 	ks_wrreg16(ks, KS_ISR, 0xffff);
1515 
1516 	/* Enables the interrupts of the hardware. */
1517 	ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
1518 }  /* ks_setup_int */
1519 
ks_hw_init(struct ks_net * ks)1520 static int ks_hw_init(struct ks_net *ks)
1521 {
1522 #define	MHEADER_SIZE	(sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
1523 	ks->promiscuous = 0;
1524 	ks->all_mcast = 0;
1525 	ks->mcast_lst_size = 0;
1526 
1527 	ks->frame_head_info = devm_kmalloc(&ks->pdev->dev, MHEADER_SIZE,
1528 					   GFP_KERNEL);
1529 	if (!ks->frame_head_info)
1530 		return false;
1531 
1532 	ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
1533 	return true;
1534 }
1535 
1536 #if defined(CONFIG_OF)
1537 static const struct of_device_id ks8851_ml_dt_ids[] = {
1538 	{ .compatible = "micrel,ks8851-mll" },
1539 	{ /* sentinel */ }
1540 };
1541 MODULE_DEVICE_TABLE(of, ks8851_ml_dt_ids);
1542 #endif
1543 
ks8851_probe(struct platform_device * pdev)1544 static int ks8851_probe(struct platform_device *pdev)
1545 {
1546 	int err;
1547 	struct resource *io_d, *io_c;
1548 	struct net_device *netdev;
1549 	struct ks_net *ks;
1550 	u16 id, data;
1551 	const char *mac;
1552 
1553 	netdev = alloc_etherdev(sizeof(struct ks_net));
1554 	if (!netdev)
1555 		return -ENOMEM;
1556 
1557 	SET_NETDEV_DEV(netdev, &pdev->dev);
1558 
1559 	ks = netdev_priv(netdev);
1560 	ks->netdev = netdev;
1561 
1562 	io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1563 	ks->hw_addr = devm_ioremap_resource(&pdev->dev, io_d);
1564 	if (IS_ERR(ks->hw_addr)) {
1565 		err = PTR_ERR(ks->hw_addr);
1566 		goto err_free;
1567 	}
1568 
1569 	io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1570 	ks->hw_addr_cmd = devm_ioremap_resource(&pdev->dev, io_c);
1571 	if (IS_ERR(ks->hw_addr_cmd)) {
1572 		err = PTR_ERR(ks->hw_addr_cmd);
1573 		goto err_free;
1574 	}
1575 
1576 	netdev->irq = platform_get_irq(pdev, 0);
1577 
1578 	if ((int)netdev->irq < 0) {
1579 		err = netdev->irq;
1580 		goto err_free;
1581 	}
1582 
1583 	ks->pdev = pdev;
1584 
1585 	mutex_init(&ks->lock);
1586 	spin_lock_init(&ks->statelock);
1587 
1588 	netdev->netdev_ops = &ks_netdev_ops;
1589 	netdev->ethtool_ops = &ks_ethtool_ops;
1590 
1591 	/* setup mii state */
1592 	ks->mii.dev             = netdev;
1593 	ks->mii.phy_id          = 1,
1594 	ks->mii.phy_id_mask     = 1;
1595 	ks->mii.reg_num_mask    = 0xf;
1596 	ks->mii.mdio_read       = ks_phy_read;
1597 	ks->mii.mdio_write      = ks_phy_write;
1598 
1599 	netdev_info(netdev, "message enable is %d\n", msg_enable);
1600 	/* set the default message enable */
1601 	ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1602 						     NETIF_MSG_PROBE |
1603 						     NETIF_MSG_LINK));
1604 	ks_read_config(ks);
1605 
1606 	/* simple check for a valid chip being connected to the bus */
1607 	if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
1608 		netdev_err(netdev, "failed to read device ID\n");
1609 		err = -ENODEV;
1610 		goto err_free;
1611 	}
1612 
1613 	if (ks_read_selftest(ks)) {
1614 		netdev_err(netdev, "failed to read device ID\n");
1615 		err = -ENODEV;
1616 		goto err_free;
1617 	}
1618 
1619 	err = register_netdev(netdev);
1620 	if (err)
1621 		goto err_free;
1622 
1623 	platform_set_drvdata(pdev, netdev);
1624 
1625 	ks_soft_reset(ks, GRR_GSR);
1626 	ks_hw_init(ks);
1627 	ks_disable_qmu(ks);
1628 	ks_setup(ks);
1629 	ks_setup_int(ks);
1630 
1631 	data = ks_rdreg16(ks, KS_OBCR);
1632 	ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
1633 
1634 	/* overwriting the default MAC address */
1635 	if (pdev->dev.of_node) {
1636 		mac = of_get_mac_address(pdev->dev.of_node);
1637 		if (mac)
1638 			memcpy(ks->mac_addr, mac, ETH_ALEN);
1639 	} else {
1640 		struct ks8851_mll_platform_data *pdata;
1641 
1642 		pdata = dev_get_platdata(&pdev->dev);
1643 		if (!pdata) {
1644 			netdev_err(netdev, "No platform data\n");
1645 			err = -ENODEV;
1646 			goto err_pdata;
1647 		}
1648 		memcpy(ks->mac_addr, pdata->mac_addr, ETH_ALEN);
1649 	}
1650 	if (!is_valid_ether_addr(ks->mac_addr)) {
1651 		/* Use random MAC address if none passed */
1652 		eth_random_addr(ks->mac_addr);
1653 		netdev_info(netdev, "Using random mac address\n");
1654 	}
1655 	netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
1656 
1657 	memcpy(netdev->dev_addr, ks->mac_addr, ETH_ALEN);
1658 
1659 	ks_set_mac(ks, netdev->dev_addr);
1660 
1661 	id = ks_rdreg16(ks, KS_CIDER);
1662 
1663 	netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1664 		    (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1665 	return 0;
1666 
1667 err_pdata:
1668 	unregister_netdev(netdev);
1669 err_free:
1670 	free_netdev(netdev);
1671 	return err;
1672 }
1673 
ks8851_remove(struct platform_device * pdev)1674 static int ks8851_remove(struct platform_device *pdev)
1675 {
1676 	struct net_device *netdev = platform_get_drvdata(pdev);
1677 
1678 	unregister_netdev(netdev);
1679 	free_netdev(netdev);
1680 	return 0;
1681 
1682 }
1683 
1684 static struct platform_driver ks8851_platform_driver = {
1685 	.driver = {
1686 		.name = DRV_NAME,
1687 		.of_match_table	= of_match_ptr(ks8851_ml_dt_ids),
1688 	},
1689 	.probe = ks8851_probe,
1690 	.remove = ks8851_remove,
1691 };
1692 
1693 module_platform_driver(ks8851_platform_driver);
1694 
1695 MODULE_DESCRIPTION("KS8851 MLL Network driver");
1696 MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
1697 MODULE_LICENSE("GPL");
1698 module_param_named(message, msg_enable, int, 0);
1699 MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
1700 
1701