1 /*
2  * B53 switch driver main logic
3  *
4  * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
5  * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com>
6  *
7  * Permission to use, copy, modify, and/or distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <linux/delay.h>
21 #include <linux/export.h>
22 #include <linux/gpio.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/platform_data/b53.h>
26 #include <linux/phy.h>
27 #include <linux/phylink.h>
28 #include <linux/etherdevice.h>
29 #include <linux/if_bridge.h>
30 #include <net/dsa.h>
31 
32 #include "b53_regs.h"
33 #include "b53_priv.h"
34 
35 struct b53_mib_desc {
36 	u8 size;
37 	u8 offset;
38 	const char *name;
39 };
40 
41 /* BCM5365 MIB counters */
42 static const struct b53_mib_desc b53_mibs_65[] = {
43 	{ 8, 0x00, "TxOctets" },
44 	{ 4, 0x08, "TxDropPkts" },
45 	{ 4, 0x10, "TxBroadcastPkts" },
46 	{ 4, 0x14, "TxMulticastPkts" },
47 	{ 4, 0x18, "TxUnicastPkts" },
48 	{ 4, 0x1c, "TxCollisions" },
49 	{ 4, 0x20, "TxSingleCollision" },
50 	{ 4, 0x24, "TxMultipleCollision" },
51 	{ 4, 0x28, "TxDeferredTransmit" },
52 	{ 4, 0x2c, "TxLateCollision" },
53 	{ 4, 0x30, "TxExcessiveCollision" },
54 	{ 4, 0x38, "TxPausePkts" },
55 	{ 8, 0x44, "RxOctets" },
56 	{ 4, 0x4c, "RxUndersizePkts" },
57 	{ 4, 0x50, "RxPausePkts" },
58 	{ 4, 0x54, "Pkts64Octets" },
59 	{ 4, 0x58, "Pkts65to127Octets" },
60 	{ 4, 0x5c, "Pkts128to255Octets" },
61 	{ 4, 0x60, "Pkts256to511Octets" },
62 	{ 4, 0x64, "Pkts512to1023Octets" },
63 	{ 4, 0x68, "Pkts1024to1522Octets" },
64 	{ 4, 0x6c, "RxOversizePkts" },
65 	{ 4, 0x70, "RxJabbers" },
66 	{ 4, 0x74, "RxAlignmentErrors" },
67 	{ 4, 0x78, "RxFCSErrors" },
68 	{ 8, 0x7c, "RxGoodOctets" },
69 	{ 4, 0x84, "RxDropPkts" },
70 	{ 4, 0x88, "RxUnicastPkts" },
71 	{ 4, 0x8c, "RxMulticastPkts" },
72 	{ 4, 0x90, "RxBroadcastPkts" },
73 	{ 4, 0x94, "RxSAChanges" },
74 	{ 4, 0x98, "RxFragments" },
75 };
76 
77 #define B53_MIBS_65_SIZE	ARRAY_SIZE(b53_mibs_65)
78 
79 /* BCM63xx MIB counters */
80 static const struct b53_mib_desc b53_mibs_63xx[] = {
81 	{ 8, 0x00, "TxOctets" },
82 	{ 4, 0x08, "TxDropPkts" },
83 	{ 4, 0x0c, "TxQoSPkts" },
84 	{ 4, 0x10, "TxBroadcastPkts" },
85 	{ 4, 0x14, "TxMulticastPkts" },
86 	{ 4, 0x18, "TxUnicastPkts" },
87 	{ 4, 0x1c, "TxCollisions" },
88 	{ 4, 0x20, "TxSingleCollision" },
89 	{ 4, 0x24, "TxMultipleCollision" },
90 	{ 4, 0x28, "TxDeferredTransmit" },
91 	{ 4, 0x2c, "TxLateCollision" },
92 	{ 4, 0x30, "TxExcessiveCollision" },
93 	{ 4, 0x38, "TxPausePkts" },
94 	{ 8, 0x3c, "TxQoSOctets" },
95 	{ 8, 0x44, "RxOctets" },
96 	{ 4, 0x4c, "RxUndersizePkts" },
97 	{ 4, 0x50, "RxPausePkts" },
98 	{ 4, 0x54, "Pkts64Octets" },
99 	{ 4, 0x58, "Pkts65to127Octets" },
100 	{ 4, 0x5c, "Pkts128to255Octets" },
101 	{ 4, 0x60, "Pkts256to511Octets" },
102 	{ 4, 0x64, "Pkts512to1023Octets" },
103 	{ 4, 0x68, "Pkts1024to1522Octets" },
104 	{ 4, 0x6c, "RxOversizePkts" },
105 	{ 4, 0x70, "RxJabbers" },
106 	{ 4, 0x74, "RxAlignmentErrors" },
107 	{ 4, 0x78, "RxFCSErrors" },
108 	{ 8, 0x7c, "RxGoodOctets" },
109 	{ 4, 0x84, "RxDropPkts" },
110 	{ 4, 0x88, "RxUnicastPkts" },
111 	{ 4, 0x8c, "RxMulticastPkts" },
112 	{ 4, 0x90, "RxBroadcastPkts" },
113 	{ 4, 0x94, "RxSAChanges" },
114 	{ 4, 0x98, "RxFragments" },
115 	{ 4, 0xa0, "RxSymbolErrors" },
116 	{ 4, 0xa4, "RxQoSPkts" },
117 	{ 8, 0xa8, "RxQoSOctets" },
118 	{ 4, 0xb0, "Pkts1523to2047Octets" },
119 	{ 4, 0xb4, "Pkts2048to4095Octets" },
120 	{ 4, 0xb8, "Pkts4096to8191Octets" },
121 	{ 4, 0xbc, "Pkts8192to9728Octets" },
122 	{ 4, 0xc0, "RxDiscarded" },
123 };
124 
125 #define B53_MIBS_63XX_SIZE	ARRAY_SIZE(b53_mibs_63xx)
126 
127 /* MIB counters */
128 static const struct b53_mib_desc b53_mibs[] = {
129 	{ 8, 0x00, "TxOctets" },
130 	{ 4, 0x08, "TxDropPkts" },
131 	{ 4, 0x10, "TxBroadcastPkts" },
132 	{ 4, 0x14, "TxMulticastPkts" },
133 	{ 4, 0x18, "TxUnicastPkts" },
134 	{ 4, 0x1c, "TxCollisions" },
135 	{ 4, 0x20, "TxSingleCollision" },
136 	{ 4, 0x24, "TxMultipleCollision" },
137 	{ 4, 0x28, "TxDeferredTransmit" },
138 	{ 4, 0x2c, "TxLateCollision" },
139 	{ 4, 0x30, "TxExcessiveCollision" },
140 	{ 4, 0x38, "TxPausePkts" },
141 	{ 8, 0x50, "RxOctets" },
142 	{ 4, 0x58, "RxUndersizePkts" },
143 	{ 4, 0x5c, "RxPausePkts" },
144 	{ 4, 0x60, "Pkts64Octets" },
145 	{ 4, 0x64, "Pkts65to127Octets" },
146 	{ 4, 0x68, "Pkts128to255Octets" },
147 	{ 4, 0x6c, "Pkts256to511Octets" },
148 	{ 4, 0x70, "Pkts512to1023Octets" },
149 	{ 4, 0x74, "Pkts1024to1522Octets" },
150 	{ 4, 0x78, "RxOversizePkts" },
151 	{ 4, 0x7c, "RxJabbers" },
152 	{ 4, 0x80, "RxAlignmentErrors" },
153 	{ 4, 0x84, "RxFCSErrors" },
154 	{ 8, 0x88, "RxGoodOctets" },
155 	{ 4, 0x90, "RxDropPkts" },
156 	{ 4, 0x94, "RxUnicastPkts" },
157 	{ 4, 0x98, "RxMulticastPkts" },
158 	{ 4, 0x9c, "RxBroadcastPkts" },
159 	{ 4, 0xa0, "RxSAChanges" },
160 	{ 4, 0xa4, "RxFragments" },
161 	{ 4, 0xa8, "RxJumboPkts" },
162 	{ 4, 0xac, "RxSymbolErrors" },
163 	{ 4, 0xc0, "RxDiscarded" },
164 };
165 
166 #define B53_MIBS_SIZE	ARRAY_SIZE(b53_mibs)
167 
168 static const struct b53_mib_desc b53_mibs_58xx[] = {
169 	{ 8, 0x00, "TxOctets" },
170 	{ 4, 0x08, "TxDropPkts" },
171 	{ 4, 0x0c, "TxQPKTQ0" },
172 	{ 4, 0x10, "TxBroadcastPkts" },
173 	{ 4, 0x14, "TxMulticastPkts" },
174 	{ 4, 0x18, "TxUnicastPKts" },
175 	{ 4, 0x1c, "TxCollisions" },
176 	{ 4, 0x20, "TxSingleCollision" },
177 	{ 4, 0x24, "TxMultipleCollision" },
178 	{ 4, 0x28, "TxDeferredCollision" },
179 	{ 4, 0x2c, "TxLateCollision" },
180 	{ 4, 0x30, "TxExcessiveCollision" },
181 	{ 4, 0x34, "TxFrameInDisc" },
182 	{ 4, 0x38, "TxPausePkts" },
183 	{ 4, 0x3c, "TxQPKTQ1" },
184 	{ 4, 0x40, "TxQPKTQ2" },
185 	{ 4, 0x44, "TxQPKTQ3" },
186 	{ 4, 0x48, "TxQPKTQ4" },
187 	{ 4, 0x4c, "TxQPKTQ5" },
188 	{ 8, 0x50, "RxOctets" },
189 	{ 4, 0x58, "RxUndersizePkts" },
190 	{ 4, 0x5c, "RxPausePkts" },
191 	{ 4, 0x60, "RxPkts64Octets" },
192 	{ 4, 0x64, "RxPkts65to127Octets" },
193 	{ 4, 0x68, "RxPkts128to255Octets" },
194 	{ 4, 0x6c, "RxPkts256to511Octets" },
195 	{ 4, 0x70, "RxPkts512to1023Octets" },
196 	{ 4, 0x74, "RxPkts1024toMaxPktsOctets" },
197 	{ 4, 0x78, "RxOversizePkts" },
198 	{ 4, 0x7c, "RxJabbers" },
199 	{ 4, 0x80, "RxAlignmentErrors" },
200 	{ 4, 0x84, "RxFCSErrors" },
201 	{ 8, 0x88, "RxGoodOctets" },
202 	{ 4, 0x90, "RxDropPkts" },
203 	{ 4, 0x94, "RxUnicastPkts" },
204 	{ 4, 0x98, "RxMulticastPkts" },
205 	{ 4, 0x9c, "RxBroadcastPkts" },
206 	{ 4, 0xa0, "RxSAChanges" },
207 	{ 4, 0xa4, "RxFragments" },
208 	{ 4, 0xa8, "RxJumboPkt" },
209 	{ 4, 0xac, "RxSymblErr" },
210 	{ 4, 0xb0, "InRangeErrCount" },
211 	{ 4, 0xb4, "OutRangeErrCount" },
212 	{ 4, 0xb8, "EEELpiEvent" },
213 	{ 4, 0xbc, "EEELpiDuration" },
214 	{ 4, 0xc0, "RxDiscard" },
215 	{ 4, 0xc8, "TxQPKTQ6" },
216 	{ 4, 0xcc, "TxQPKTQ7" },
217 	{ 4, 0xd0, "TxPkts64Octets" },
218 	{ 4, 0xd4, "TxPkts65to127Octets" },
219 	{ 4, 0xd8, "TxPkts128to255Octets" },
220 	{ 4, 0xdc, "TxPkts256to511Ocets" },
221 	{ 4, 0xe0, "TxPkts512to1023Ocets" },
222 	{ 4, 0xe4, "TxPkts1024toMaxPktOcets" },
223 };
224 
225 #define B53_MIBS_58XX_SIZE	ARRAY_SIZE(b53_mibs_58xx)
226 
b53_do_vlan_op(struct b53_device * dev,u8 op)227 static int b53_do_vlan_op(struct b53_device *dev, u8 op)
228 {
229 	unsigned int i;
230 
231 	b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op);
232 
233 	for (i = 0; i < 10; i++) {
234 		u8 vta;
235 
236 		b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta);
237 		if (!(vta & VTA_START_CMD))
238 			return 0;
239 
240 		usleep_range(100, 200);
241 	}
242 
243 	return -EIO;
244 }
245 
b53_set_vlan_entry(struct b53_device * dev,u16 vid,struct b53_vlan * vlan)246 static void b53_set_vlan_entry(struct b53_device *dev, u16 vid,
247 			       struct b53_vlan *vlan)
248 {
249 	if (is5325(dev)) {
250 		u32 entry = 0;
251 
252 		if (vlan->members) {
253 			entry = ((vlan->untag & VA_UNTAG_MASK_25) <<
254 				 VA_UNTAG_S_25) | vlan->members;
255 			if (dev->core_rev >= 3)
256 				entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S;
257 			else
258 				entry |= VA_VALID_25;
259 		}
260 
261 		b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry);
262 		b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
263 			    VTA_RW_STATE_WR | VTA_RW_OP_EN);
264 	} else if (is5365(dev)) {
265 		u16 entry = 0;
266 
267 		if (vlan->members)
268 			entry = ((vlan->untag & VA_UNTAG_MASK_65) <<
269 				 VA_UNTAG_S_65) | vlan->members | VA_VALID_65;
270 
271 		b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry);
272 		b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
273 			    VTA_RW_STATE_WR | VTA_RW_OP_EN);
274 	} else {
275 		b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
276 		b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2],
277 			    (vlan->untag << VTE_UNTAG_S) | vlan->members);
278 
279 		b53_do_vlan_op(dev, VTA_CMD_WRITE);
280 	}
281 
282 	dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n",
283 		vid, vlan->members, vlan->untag);
284 }
285 
b53_get_vlan_entry(struct b53_device * dev,u16 vid,struct b53_vlan * vlan)286 static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
287 			       struct b53_vlan *vlan)
288 {
289 	if (is5325(dev)) {
290 		u32 entry = 0;
291 
292 		b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
293 			    VTA_RW_STATE_RD | VTA_RW_OP_EN);
294 		b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry);
295 
296 		if (dev->core_rev >= 3)
297 			vlan->valid = !!(entry & VA_VALID_25_R4);
298 		else
299 			vlan->valid = !!(entry & VA_VALID_25);
300 		vlan->members = entry & VA_MEMBER_MASK;
301 		vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25;
302 
303 	} else if (is5365(dev)) {
304 		u16 entry = 0;
305 
306 		b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
307 			    VTA_RW_STATE_WR | VTA_RW_OP_EN);
308 		b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry);
309 
310 		vlan->valid = !!(entry & VA_VALID_65);
311 		vlan->members = entry & VA_MEMBER_MASK;
312 		vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65;
313 	} else {
314 		u32 entry = 0;
315 
316 		b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
317 		b53_do_vlan_op(dev, VTA_CMD_READ);
318 		b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry);
319 		vlan->members = entry & VTE_MEMBERS;
320 		vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS;
321 		vlan->valid = true;
322 	}
323 }
324 
b53_set_forwarding(struct b53_device * dev,int enable)325 static void b53_set_forwarding(struct b53_device *dev, int enable)
326 {
327 	u8 mgmt;
328 
329 	b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
330 
331 	if (enable)
332 		mgmt |= SM_SW_FWD_EN;
333 	else
334 		mgmt &= ~SM_SW_FWD_EN;
335 
336 	b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
337 
338 	/* Include IMP port in dumb forwarding mode
339 	 */
340 	b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
341 	mgmt |= B53_MII_DUMB_FWDG_EN;
342 	b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
343 
344 	/* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
345 	 * frames should be flooded or not.
346 	 */
347 	b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
348 	mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
349 	b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
350 }
351 
b53_enable_vlan(struct b53_device * dev,int port,bool enable,bool enable_filtering)352 static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
353 			    bool enable_filtering)
354 {
355 	u8 mgmt, vc0, vc1, vc4 = 0, vc5;
356 
357 	b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
358 	b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0);
359 	b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1);
360 
361 	if (is5325(dev) || is5365(dev)) {
362 		b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
363 		b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5);
364 	} else if (is63xx(dev)) {
365 		b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4);
366 		b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5);
367 	} else {
368 		b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4);
369 		b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
370 	}
371 
372 	if (enable) {
373 		vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
374 		vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
375 		vc4 &= ~VC4_ING_VID_CHECK_MASK;
376 		if (enable_filtering) {
377 			vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
378 			vc5 |= VC5_DROP_VTABLE_MISS;
379 		} else {
380 			vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
381 			vc5 &= ~VC5_DROP_VTABLE_MISS;
382 		}
383 
384 		if (is5325(dev))
385 			vc0 &= ~VC0_RESERVED_1;
386 
387 		if (is5325(dev) || is5365(dev))
388 			vc1 |= VC1_RX_MCST_TAG_EN;
389 
390 	} else {
391 		vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
392 		vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
393 		vc4 &= ~VC4_ING_VID_CHECK_MASK;
394 		vc5 &= ~VC5_DROP_VTABLE_MISS;
395 
396 		if (is5325(dev) || is5365(dev))
397 			vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
398 		else
399 			vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S;
400 
401 		if (is5325(dev) || is5365(dev))
402 			vc1 &= ~VC1_RX_MCST_TAG_EN;
403 	}
404 
405 	if (!is5325(dev) && !is5365(dev))
406 		vc5 &= ~VC5_VID_FFF_EN;
407 
408 	b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0);
409 	b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1);
410 
411 	if (is5325(dev) || is5365(dev)) {
412 		/* enable the high 8 bit vid check on 5325 */
413 		if (is5325(dev) && enable)
414 			b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3,
415 				   VC3_HIGH_8BIT_EN);
416 		else
417 			b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
418 
419 		b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4);
420 		b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5);
421 	} else if (is63xx(dev)) {
422 		b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0);
423 		b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4);
424 		b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5);
425 	} else {
426 		b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
427 		b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4);
428 		b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5);
429 	}
430 
431 	b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
432 
433 	dev->vlan_enabled = enable;
434 
435 	dev_dbg(dev->dev, "Port %d VLAN enabled: %d, filtering: %d\n",
436 		port, enable, enable_filtering);
437 }
438 
b53_set_jumbo(struct b53_device * dev,bool enable,bool allow_10_100)439 static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
440 {
441 	u32 port_mask = 0;
442 	u16 max_size = JMS_MIN_SIZE;
443 
444 	if (is5325(dev) || is5365(dev))
445 		return -EINVAL;
446 
447 	if (enable) {
448 		port_mask = dev->enabled_ports;
449 		max_size = JMS_MAX_SIZE;
450 		if (allow_10_100)
451 			port_mask |= JPM_10_100_JUMBO_EN;
452 	}
453 
454 	b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask);
455 	return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size);
456 }
457 
b53_flush_arl(struct b53_device * dev,u8 mask)458 static int b53_flush_arl(struct b53_device *dev, u8 mask)
459 {
460 	unsigned int i;
461 
462 	b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
463 		   FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask);
464 
465 	for (i = 0; i < 10; i++) {
466 		u8 fast_age_ctrl;
467 
468 		b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
469 			  &fast_age_ctrl);
470 
471 		if (!(fast_age_ctrl & FAST_AGE_DONE))
472 			goto out;
473 
474 		msleep(1);
475 	}
476 
477 	return -ETIMEDOUT;
478 out:
479 	/* Only age dynamic entries (default behavior) */
480 	b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC);
481 	return 0;
482 }
483 
b53_fast_age_port(struct b53_device * dev,int port)484 static int b53_fast_age_port(struct b53_device *dev, int port)
485 {
486 	b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port);
487 
488 	return b53_flush_arl(dev, FAST_AGE_PORT);
489 }
490 
b53_fast_age_vlan(struct b53_device * dev,u16 vid)491 static int b53_fast_age_vlan(struct b53_device *dev, u16 vid)
492 {
493 	b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid);
494 
495 	return b53_flush_arl(dev, FAST_AGE_VLAN);
496 }
497 
b53_imp_vlan_setup(struct dsa_switch * ds,int cpu_port)498 void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
499 {
500 	struct b53_device *dev = ds->priv;
501 	unsigned int i;
502 	u16 pvlan;
503 
504 	/* Enable the IMP port to be in the same VLAN as the other ports
505 	 * on a per-port basis such that we only have Port i and IMP in
506 	 * the same VLAN.
507 	 */
508 	b53_for_each_port(dev, i) {
509 		b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan);
510 		pvlan |= BIT(cpu_port);
511 		b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan);
512 	}
513 }
514 EXPORT_SYMBOL(b53_imp_vlan_setup);
515 
b53_port_set_ucast_flood(struct b53_device * dev,int port,bool unicast)516 static void b53_port_set_ucast_flood(struct b53_device *dev, int port,
517 				     bool unicast)
518 {
519 	u16 uc;
520 
521 	b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
522 	if (unicast)
523 		uc |= BIT(port);
524 	else
525 		uc &= ~BIT(port);
526 	b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
527 }
528 
b53_port_set_mcast_flood(struct b53_device * dev,int port,bool multicast)529 static void b53_port_set_mcast_flood(struct b53_device *dev, int port,
530 				     bool multicast)
531 {
532 	u16 mc;
533 
534 	b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
535 	if (multicast)
536 		mc |= BIT(port);
537 	else
538 		mc &= ~BIT(port);
539 	b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
540 
541 	b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
542 	if (multicast)
543 		mc |= BIT(port);
544 	else
545 		mc &= ~BIT(port);
546 	b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
547 }
548 
b53_port_set_learning(struct b53_device * dev,int port,bool learning)549 static void b53_port_set_learning(struct b53_device *dev, int port,
550 				  bool learning)
551 {
552 	u16 reg;
553 
554 	b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, &reg);
555 	if (learning)
556 		reg &= ~BIT(port);
557 	else
558 		reg |= BIT(port);
559 	b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg);
560 }
561 
b53_enable_port(struct dsa_switch * ds,int port,struct phy_device * phy)562 int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
563 {
564 	struct b53_device *dev = ds->priv;
565 	unsigned int cpu_port;
566 	int ret = 0;
567 	u16 pvlan;
568 
569 	if (!dsa_is_user_port(ds, port))
570 		return 0;
571 
572 	cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
573 
574 	b53_port_set_ucast_flood(dev, port, true);
575 	b53_port_set_mcast_flood(dev, port, true);
576 	b53_port_set_learning(dev, port, false);
577 
578 	if (dev->ops->irq_enable)
579 		ret = dev->ops->irq_enable(dev, port);
580 	if (ret)
581 		return ret;
582 
583 	/* Clear the Rx and Tx disable bits and set to no spanning tree */
584 	b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0);
585 
586 	/* Set this port, and only this one to be in the default VLAN,
587 	 * if member of a bridge, restore its membership prior to
588 	 * bringing down this port.
589 	 */
590 	b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
591 	pvlan &= ~0x1ff;
592 	pvlan |= BIT(port);
593 	pvlan |= dev->ports[port].vlan_ctl_mask;
594 	b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
595 
596 	b53_imp_vlan_setup(ds, cpu_port);
597 
598 	/* If EEE was enabled, restore it */
599 	if (dev->ports[port].eee.eee_enabled)
600 		b53_eee_enable_set(ds, port, true);
601 
602 	return 0;
603 }
604 EXPORT_SYMBOL(b53_enable_port);
605 
b53_disable_port(struct dsa_switch * ds,int port)606 void b53_disable_port(struct dsa_switch *ds, int port)
607 {
608 	struct b53_device *dev = ds->priv;
609 	u8 reg;
610 
611 	/* Disable Tx/Rx for the port */
612 	b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
613 	reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE;
614 	b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
615 
616 	if (dev->ops->irq_disable)
617 		dev->ops->irq_disable(dev, port);
618 }
619 EXPORT_SYMBOL(b53_disable_port);
620 
b53_brcm_hdr_setup(struct dsa_switch * ds,int port)621 void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
622 {
623 	struct b53_device *dev = ds->priv;
624 	bool tag_en = !(dev->tag_protocol == DSA_TAG_PROTO_NONE);
625 	u8 hdr_ctl, val;
626 	u16 reg;
627 
628 	/* Resolve which bit controls the Broadcom tag */
629 	switch (port) {
630 	case 8:
631 		val = BRCM_HDR_P8_EN;
632 		break;
633 	case 7:
634 		val = BRCM_HDR_P7_EN;
635 		break;
636 	case 5:
637 		val = BRCM_HDR_P5_EN;
638 		break;
639 	default:
640 		val = 0;
641 		break;
642 	}
643 
644 	/* Enable management mode if tagging is requested */
645 	b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &hdr_ctl);
646 	if (tag_en)
647 		hdr_ctl |= SM_SW_FWD_MODE;
648 	else
649 		hdr_ctl &= ~SM_SW_FWD_MODE;
650 	b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, hdr_ctl);
651 
652 	/* Configure the appropriate IMP port */
653 	b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &hdr_ctl);
654 	if (port == 8)
655 		hdr_ctl |= GC_FRM_MGMT_PORT_MII;
656 	else if (port == 5)
657 		hdr_ctl |= GC_FRM_MGMT_PORT_M;
658 	b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl);
659 
660 	/* Enable Broadcom tags for IMP port */
661 	b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl);
662 	if (tag_en)
663 		hdr_ctl |= val;
664 	else
665 		hdr_ctl &= ~val;
666 	b53_write8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, hdr_ctl);
667 
668 	/* Registers below are only accessible on newer devices */
669 	if (!is58xx(dev))
670 		return;
671 
672 	/* Enable reception Broadcom tag for CPU TX (switch RX) to
673 	 * allow us to tag outgoing frames
674 	 */
675 	b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, &reg);
676 	if (tag_en)
677 		reg &= ~BIT(port);
678 	else
679 		reg |= BIT(port);
680 	b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, reg);
681 
682 	/* Enable transmission of Broadcom tags from the switch (CPU RX) to
683 	 * allow delivering frames to the per-port net_devices
684 	 */
685 	b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, &reg);
686 	if (tag_en)
687 		reg &= ~BIT(port);
688 	else
689 		reg |= BIT(port);
690 	b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, reg);
691 }
692 EXPORT_SYMBOL(b53_brcm_hdr_setup);
693 
b53_enable_cpu_port(struct b53_device * dev,int port)694 static void b53_enable_cpu_port(struct b53_device *dev, int port)
695 {
696 	u8 port_ctrl;
697 
698 	/* BCM5325 CPU port is at 8 */
699 	if ((is5325(dev) || is5365(dev)) && port == B53_CPU_PORT_25)
700 		port = B53_CPU_PORT;
701 
702 	port_ctrl = PORT_CTRL_RX_BCST_EN |
703 		    PORT_CTRL_RX_MCST_EN |
704 		    PORT_CTRL_RX_UCST_EN;
705 	b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
706 
707 	b53_brcm_hdr_setup(dev->ds, port);
708 
709 	b53_port_set_ucast_flood(dev, port, true);
710 	b53_port_set_mcast_flood(dev, port, true);
711 	b53_port_set_learning(dev, port, false);
712 }
713 
b53_enable_mib(struct b53_device * dev)714 static void b53_enable_mib(struct b53_device *dev)
715 {
716 	u8 gc;
717 
718 	b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
719 	gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN);
720 	b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
721 }
722 
b53_default_pvid(struct b53_device * dev)723 static u16 b53_default_pvid(struct b53_device *dev)
724 {
725 	if (is5325(dev) || is5365(dev))
726 		return 1;
727 	else
728 		return 0;
729 }
730 
b53_vlan_port_needs_forced_tagged(struct dsa_switch * ds,int port)731 static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port)
732 {
733 	struct b53_device *dev = ds->priv;
734 
735 	return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port);
736 }
737 
b53_configure_vlan(struct dsa_switch * ds)738 int b53_configure_vlan(struct dsa_switch *ds)
739 {
740 	struct b53_device *dev = ds->priv;
741 	struct b53_vlan vl = { 0 };
742 	struct b53_vlan *v;
743 	int i, def_vid;
744 	u16 vid;
745 
746 	def_vid = b53_default_pvid(dev);
747 
748 	/* clear all vlan entries */
749 	if (is5325(dev) || is5365(dev)) {
750 		for (i = def_vid; i < dev->num_vlans; i++)
751 			b53_set_vlan_entry(dev, i, &vl);
752 	} else {
753 		b53_do_vlan_op(dev, VTA_CMD_CLEAR);
754 	}
755 
756 	b53_enable_vlan(dev, -1, dev->vlan_enabled, ds->vlan_filtering);
757 
758 	/* Create an untagged VLAN entry for the default PVID in case
759 	 * CONFIG_VLAN_8021Q is disabled and there are no calls to
760 	 * dsa_slave_vlan_rx_add_vid() to create the default VLAN
761 	 * entry. Do this only when the tagging protocol is not
762 	 * DSA_TAG_PROTO_NONE
763 	 */
764 	b53_for_each_port(dev, i) {
765 		v = &dev->vlans[def_vid];
766 		v->members |= BIT(i);
767 		if (!b53_vlan_port_needs_forced_tagged(ds, i))
768 			v->untag = v->members;
769 		b53_write16(dev, B53_VLAN_PAGE,
770 			    B53_VLAN_PORT_DEF_TAG(i), def_vid);
771 	}
772 
773 	/* Upon initial call we have not set-up any VLANs, but upon
774 	 * system resume, we need to restore all VLAN entries.
775 	 */
776 	for (vid = def_vid; vid < dev->num_vlans; vid++) {
777 		v = &dev->vlans[vid];
778 
779 		if (!v->members)
780 			continue;
781 
782 		b53_set_vlan_entry(dev, vid, v);
783 		b53_fast_age_vlan(dev, vid);
784 	}
785 
786 	return 0;
787 }
788 EXPORT_SYMBOL(b53_configure_vlan);
789 
b53_switch_reset_gpio(struct b53_device * dev)790 static void b53_switch_reset_gpio(struct b53_device *dev)
791 {
792 	int gpio = dev->reset_gpio;
793 
794 	if (gpio < 0)
795 		return;
796 
797 	/* Reset sequence: RESET low(50ms)->high(20ms)
798 	 */
799 	gpio_set_value(gpio, 0);
800 	mdelay(50);
801 
802 	gpio_set_value(gpio, 1);
803 	mdelay(20);
804 
805 	dev->current_page = 0xff;
806 }
807 
b53_switch_reset(struct b53_device * dev)808 static int b53_switch_reset(struct b53_device *dev)
809 {
810 	unsigned int timeout = 1000;
811 	u8 mgmt, reg;
812 
813 	b53_switch_reset_gpio(dev);
814 
815 	if (is539x(dev)) {
816 		b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83);
817 		b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00);
818 	}
819 
820 	/* This is specific to 58xx devices here, do not use is58xx() which
821 	 * covers the larger Starfigther 2 family, including 7445/7278 which
822 	 * still use this driver as a library and need to perform the reset
823 	 * earlier.
824 	 */
825 	if (dev->chip_id == BCM58XX_DEVICE_ID ||
826 	    dev->chip_id == BCM583XX_DEVICE_ID) {
827 		b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg);
828 		reg |= SW_RST | EN_SW_RST | EN_CH_RST;
829 		b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg);
830 
831 		do {
832 			b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg);
833 			if (!(reg & SW_RST))
834 				break;
835 
836 			usleep_range(1000, 2000);
837 		} while (timeout-- > 0);
838 
839 		if (timeout == 0) {
840 			dev_err(dev->dev,
841 				"Timeout waiting for SW_RST to clear!\n");
842 			return -ETIMEDOUT;
843 		}
844 	}
845 
846 	b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
847 
848 	if (!(mgmt & SM_SW_FWD_EN)) {
849 		mgmt &= ~SM_SW_FWD_MODE;
850 		mgmt |= SM_SW_FWD_EN;
851 
852 		b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
853 		b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
854 
855 		if (!(mgmt & SM_SW_FWD_EN)) {
856 			dev_err(dev->dev, "Failed to enable switch!\n");
857 			return -EINVAL;
858 		}
859 	}
860 
861 	b53_enable_mib(dev);
862 
863 	return b53_flush_arl(dev, FAST_AGE_STATIC);
864 }
865 
b53_phy_read16(struct dsa_switch * ds,int addr,int reg)866 static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg)
867 {
868 	struct b53_device *priv = ds->priv;
869 	u16 value = 0;
870 	int ret;
871 
872 	if (priv->ops->phy_read16)
873 		ret = priv->ops->phy_read16(priv, addr, reg, &value);
874 	else
875 		ret = b53_read16(priv, B53_PORT_MII_PAGE(addr),
876 				 reg * 2, &value);
877 
878 	return ret ? ret : value;
879 }
880 
b53_phy_write16(struct dsa_switch * ds,int addr,int reg,u16 val)881 static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
882 {
883 	struct b53_device *priv = ds->priv;
884 
885 	if (priv->ops->phy_write16)
886 		return priv->ops->phy_write16(priv, addr, reg, val);
887 
888 	return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val);
889 }
890 
b53_reset_switch(struct b53_device * priv)891 static int b53_reset_switch(struct b53_device *priv)
892 {
893 	/* reset vlans */
894 	memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans);
895 	memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports);
896 
897 	priv->serdes_lane = B53_INVALID_LANE;
898 
899 	return b53_switch_reset(priv);
900 }
901 
b53_apply_config(struct b53_device * priv)902 static int b53_apply_config(struct b53_device *priv)
903 {
904 	/* disable switching */
905 	b53_set_forwarding(priv, 0);
906 
907 	b53_configure_vlan(priv->ds);
908 
909 	/* enable switching */
910 	b53_set_forwarding(priv, 1);
911 
912 	return 0;
913 }
914 
b53_reset_mib(struct b53_device * priv)915 static void b53_reset_mib(struct b53_device *priv)
916 {
917 	u8 gc;
918 
919 	b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
920 
921 	b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB);
922 	msleep(1);
923 	b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB);
924 	msleep(1);
925 }
926 
b53_get_mib(struct b53_device * dev)927 static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev)
928 {
929 	if (is5365(dev))
930 		return b53_mibs_65;
931 	else if (is63xx(dev))
932 		return b53_mibs_63xx;
933 	else if (is58xx(dev))
934 		return b53_mibs_58xx;
935 	else
936 		return b53_mibs;
937 }
938 
b53_get_mib_size(struct b53_device * dev)939 static unsigned int b53_get_mib_size(struct b53_device *dev)
940 {
941 	if (is5365(dev))
942 		return B53_MIBS_65_SIZE;
943 	else if (is63xx(dev))
944 		return B53_MIBS_63XX_SIZE;
945 	else if (is58xx(dev))
946 		return B53_MIBS_58XX_SIZE;
947 	else
948 		return B53_MIBS_SIZE;
949 }
950 
b53_get_phy_device(struct dsa_switch * ds,int port)951 static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port)
952 {
953 	/* These ports typically do not have built-in PHYs */
954 	switch (port) {
955 	case B53_CPU_PORT_25:
956 	case 7:
957 	case B53_CPU_PORT:
958 		return NULL;
959 	}
960 
961 	return mdiobus_get_phy(ds->slave_mii_bus, port);
962 }
963 
b53_get_strings(struct dsa_switch * ds,int port,u32 stringset,uint8_t * data)964 void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
965 		     uint8_t *data)
966 {
967 	struct b53_device *dev = ds->priv;
968 	const struct b53_mib_desc *mibs = b53_get_mib(dev);
969 	unsigned int mib_size = b53_get_mib_size(dev);
970 	struct phy_device *phydev;
971 	unsigned int i;
972 
973 	if (stringset == ETH_SS_STATS) {
974 		for (i = 0; i < mib_size; i++)
975 			strlcpy(data + i * ETH_GSTRING_LEN,
976 				mibs[i].name, ETH_GSTRING_LEN);
977 	} else if (stringset == ETH_SS_PHY_STATS) {
978 		phydev = b53_get_phy_device(ds, port);
979 		if (!phydev)
980 			return;
981 
982 		phy_ethtool_get_strings(phydev, data);
983 	}
984 }
985 EXPORT_SYMBOL(b53_get_strings);
986 
b53_get_ethtool_stats(struct dsa_switch * ds,int port,uint64_t * data)987 void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
988 {
989 	struct b53_device *dev = ds->priv;
990 	const struct b53_mib_desc *mibs = b53_get_mib(dev);
991 	unsigned int mib_size = b53_get_mib_size(dev);
992 	const struct b53_mib_desc *s;
993 	unsigned int i;
994 	u64 val = 0;
995 
996 	if (is5365(dev) && port == 5)
997 		port = 8;
998 
999 	mutex_lock(&dev->stats_mutex);
1000 
1001 	for (i = 0; i < mib_size; i++) {
1002 		s = &mibs[i];
1003 
1004 		if (s->size == 8) {
1005 			b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val);
1006 		} else {
1007 			u32 val32;
1008 
1009 			b53_read32(dev, B53_MIB_PAGE(port), s->offset,
1010 				   &val32);
1011 			val = val32;
1012 		}
1013 		data[i] = (u64)val;
1014 	}
1015 
1016 	mutex_unlock(&dev->stats_mutex);
1017 }
1018 EXPORT_SYMBOL(b53_get_ethtool_stats);
1019 
b53_get_ethtool_phy_stats(struct dsa_switch * ds,int port,uint64_t * data)1020 void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data)
1021 {
1022 	struct phy_device *phydev;
1023 
1024 	phydev = b53_get_phy_device(ds, port);
1025 	if (!phydev)
1026 		return;
1027 
1028 	phy_ethtool_get_stats(phydev, NULL, data);
1029 }
1030 EXPORT_SYMBOL(b53_get_ethtool_phy_stats);
1031 
b53_get_sset_count(struct dsa_switch * ds,int port,int sset)1032 int b53_get_sset_count(struct dsa_switch *ds, int port, int sset)
1033 {
1034 	struct b53_device *dev = ds->priv;
1035 	struct phy_device *phydev;
1036 
1037 	if (sset == ETH_SS_STATS) {
1038 		return b53_get_mib_size(dev);
1039 	} else if (sset == ETH_SS_PHY_STATS) {
1040 		phydev = b53_get_phy_device(ds, port);
1041 		if (!phydev)
1042 			return 0;
1043 
1044 		return phy_ethtool_get_sset_count(phydev);
1045 	}
1046 
1047 	return 0;
1048 }
1049 EXPORT_SYMBOL(b53_get_sset_count);
1050 
1051 enum b53_devlink_resource_id {
1052 	B53_DEVLINK_PARAM_ID_VLAN_TABLE,
1053 };
1054 
b53_devlink_vlan_table_get(void * priv)1055 static u64 b53_devlink_vlan_table_get(void *priv)
1056 {
1057 	struct b53_device *dev = priv;
1058 	struct b53_vlan *vl;
1059 	unsigned int i;
1060 	u64 count = 0;
1061 
1062 	for (i = 0; i < dev->num_vlans; i++) {
1063 		vl = &dev->vlans[i];
1064 		if (vl->members)
1065 			count++;
1066 	}
1067 
1068 	return count;
1069 }
1070 
b53_setup_devlink_resources(struct dsa_switch * ds)1071 int b53_setup_devlink_resources(struct dsa_switch *ds)
1072 {
1073 	struct devlink_resource_size_params size_params;
1074 	struct b53_device *dev = ds->priv;
1075 	int err;
1076 
1077 	devlink_resource_size_params_init(&size_params, dev->num_vlans,
1078 					  dev->num_vlans,
1079 					  1, DEVLINK_RESOURCE_UNIT_ENTRY);
1080 
1081 	err = dsa_devlink_resource_register(ds, "VLAN", dev->num_vlans,
1082 					    B53_DEVLINK_PARAM_ID_VLAN_TABLE,
1083 					    DEVLINK_RESOURCE_ID_PARENT_TOP,
1084 					    &size_params);
1085 	if (err)
1086 		goto out;
1087 
1088 	dsa_devlink_resource_occ_get_register(ds,
1089 					      B53_DEVLINK_PARAM_ID_VLAN_TABLE,
1090 					      b53_devlink_vlan_table_get, dev);
1091 
1092 	return 0;
1093 out:
1094 	dsa_devlink_resources_unregister(ds);
1095 	return err;
1096 }
1097 EXPORT_SYMBOL(b53_setup_devlink_resources);
1098 
b53_setup(struct dsa_switch * ds)1099 static int b53_setup(struct dsa_switch *ds)
1100 {
1101 	struct b53_device *dev = ds->priv;
1102 	unsigned int port;
1103 	int ret;
1104 
1105 	/* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set
1106 	 * which forces the CPU port to be tagged in all VLANs.
1107 	 */
1108 	ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE;
1109 
1110 	ret = b53_reset_switch(dev);
1111 	if (ret) {
1112 		dev_err(ds->dev, "failed to reset switch\n");
1113 		return ret;
1114 	}
1115 
1116 	b53_reset_mib(dev);
1117 
1118 	ret = b53_apply_config(dev);
1119 	if (ret) {
1120 		dev_err(ds->dev, "failed to apply configuration\n");
1121 		return ret;
1122 	}
1123 
1124 	/* Configure IMP/CPU port, disable all other ports. Enabled
1125 	 * ports will be configured with .port_enable
1126 	 */
1127 	for (port = 0; port < dev->num_ports; port++) {
1128 		if (dsa_is_cpu_port(ds, port))
1129 			b53_enable_cpu_port(dev, port);
1130 		else
1131 			b53_disable_port(ds, port);
1132 	}
1133 
1134 	return b53_setup_devlink_resources(ds);
1135 }
1136 
b53_teardown(struct dsa_switch * ds)1137 static void b53_teardown(struct dsa_switch *ds)
1138 {
1139 	dsa_devlink_resources_unregister(ds);
1140 }
1141 
b53_force_link(struct b53_device * dev,int port,int link)1142 static void b53_force_link(struct b53_device *dev, int port, int link)
1143 {
1144 	u8 reg, val, off;
1145 
1146 	/* Override the port settings */
1147 	if (port == dev->imp_port) {
1148 		off = B53_PORT_OVERRIDE_CTRL;
1149 		val = PORT_OVERRIDE_EN;
1150 	} else {
1151 		off = B53_GMII_PORT_OVERRIDE_CTRL(port);
1152 		val = GMII_PO_EN;
1153 	}
1154 
1155 	b53_read8(dev, B53_CTRL_PAGE, off, &reg);
1156 	reg |= val;
1157 	if (link)
1158 		reg |= PORT_OVERRIDE_LINK;
1159 	else
1160 		reg &= ~PORT_OVERRIDE_LINK;
1161 	b53_write8(dev, B53_CTRL_PAGE, off, reg);
1162 }
1163 
b53_force_port_config(struct b53_device * dev,int port,int speed,int duplex,bool tx_pause,bool rx_pause)1164 static void b53_force_port_config(struct b53_device *dev, int port,
1165 				  int speed, int duplex,
1166 				  bool tx_pause, bool rx_pause)
1167 {
1168 	u8 reg, val, off;
1169 
1170 	/* Override the port settings */
1171 	if (port == dev->imp_port) {
1172 		off = B53_PORT_OVERRIDE_CTRL;
1173 		val = PORT_OVERRIDE_EN;
1174 	} else {
1175 		off = B53_GMII_PORT_OVERRIDE_CTRL(port);
1176 		val = GMII_PO_EN;
1177 	}
1178 
1179 	b53_read8(dev, B53_CTRL_PAGE, off, &reg);
1180 	reg |= val;
1181 	if (duplex == DUPLEX_FULL)
1182 		reg |= PORT_OVERRIDE_FULL_DUPLEX;
1183 	else
1184 		reg &= ~PORT_OVERRIDE_FULL_DUPLEX;
1185 
1186 	switch (speed) {
1187 	case 2000:
1188 		reg |= PORT_OVERRIDE_SPEED_2000M;
1189 		fallthrough;
1190 	case SPEED_1000:
1191 		reg |= PORT_OVERRIDE_SPEED_1000M;
1192 		break;
1193 	case SPEED_100:
1194 		reg |= PORT_OVERRIDE_SPEED_100M;
1195 		break;
1196 	case SPEED_10:
1197 		reg |= PORT_OVERRIDE_SPEED_10M;
1198 		break;
1199 	default:
1200 		dev_err(dev->dev, "unknown speed: %d\n", speed);
1201 		return;
1202 	}
1203 
1204 	if (rx_pause)
1205 		reg |= PORT_OVERRIDE_RX_FLOW;
1206 	if (tx_pause)
1207 		reg |= PORT_OVERRIDE_TX_FLOW;
1208 
1209 	b53_write8(dev, B53_CTRL_PAGE, off, reg);
1210 }
1211 
b53_adjust_link(struct dsa_switch * ds,int port,struct phy_device * phydev)1212 static void b53_adjust_link(struct dsa_switch *ds, int port,
1213 			    struct phy_device *phydev)
1214 {
1215 	struct b53_device *dev = ds->priv;
1216 	struct ethtool_eee *p = &dev->ports[port].eee;
1217 	u8 rgmii_ctrl = 0, reg = 0, off;
1218 	bool tx_pause = false;
1219 	bool rx_pause = false;
1220 
1221 	if (!phy_is_pseudo_fixed_link(phydev))
1222 		return;
1223 
1224 	/* Enable flow control on BCM5301x's CPU port */
1225 	if (is5301x(dev) && port == dev->cpu_port)
1226 		tx_pause = rx_pause = true;
1227 
1228 	if (phydev->pause) {
1229 		if (phydev->asym_pause)
1230 			tx_pause = true;
1231 		rx_pause = true;
1232 	}
1233 
1234 	b53_force_port_config(dev, port, phydev->speed, phydev->duplex,
1235 			      tx_pause, rx_pause);
1236 	b53_force_link(dev, port, phydev->link);
1237 
1238 	if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
1239 		if (port == dev->imp_port)
1240 			off = B53_RGMII_CTRL_IMP;
1241 		else
1242 			off = B53_RGMII_CTRL_P(port);
1243 
1244 		/* Configure the port RGMII clock delay by DLL disabled and
1245 		 * tx_clk aligned timing (restoring to reset defaults)
1246 		 */
1247 		b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
1248 		rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
1249 				RGMII_CTRL_TIMING_SEL);
1250 
1251 		/* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
1252 		 * sure that we enable the port TX clock internal delay to
1253 		 * account for this internal delay that is inserted, otherwise
1254 		 * the switch won't be able to receive correctly.
1255 		 *
1256 		 * PHY_INTERFACE_MODE_RGMII means that we are not introducing
1257 		 * any delay neither on transmission nor reception, so the
1258 		 * BCM53125 must also be configured accordingly to account for
1259 		 * the lack of delay and introduce
1260 		 *
1261 		 * The BCM53125 switch has its RX clock and TX clock control
1262 		 * swapped, hence the reason why we modify the TX clock path in
1263 		 * the "RGMII" case
1264 		 */
1265 		if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
1266 			rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
1267 		if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
1268 			rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
1269 		rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
1270 		b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
1271 
1272 		dev_info(ds->dev, "Configured port %d for %s\n", port,
1273 			 phy_modes(phydev->interface));
1274 	}
1275 
1276 	/* configure MII port if necessary */
1277 	if (is5325(dev)) {
1278 		b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1279 			  &reg);
1280 
1281 		/* reverse mii needs to be enabled */
1282 		if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
1283 			b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1284 				   reg | PORT_OVERRIDE_RV_MII_25);
1285 			b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
1286 				  &reg);
1287 
1288 			if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
1289 				dev_err(ds->dev,
1290 					"Failed to enable reverse MII mode\n");
1291 				return;
1292 			}
1293 		}
1294 	} else if (is5301x(dev)) {
1295 		if (port != dev->cpu_port) {
1296 			b53_force_port_config(dev, dev->cpu_port, 2000,
1297 					      DUPLEX_FULL, true, true);
1298 			b53_force_link(dev, dev->cpu_port, 1);
1299 		}
1300 	}
1301 
1302 	/* Re-negotiate EEE if it was enabled already */
1303 	p->eee_enabled = b53_eee_init(ds, port, phydev);
1304 }
1305 
b53_port_event(struct dsa_switch * ds,int port)1306 void b53_port_event(struct dsa_switch *ds, int port)
1307 {
1308 	struct b53_device *dev = ds->priv;
1309 	bool link;
1310 	u16 sts;
1311 
1312 	b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts);
1313 	link = !!(sts & BIT(port));
1314 	dsa_port_phylink_mac_change(ds, port, link);
1315 }
1316 EXPORT_SYMBOL(b53_port_event);
1317 
b53_phylink_validate(struct dsa_switch * ds,int port,unsigned long * supported,struct phylink_link_state * state)1318 void b53_phylink_validate(struct dsa_switch *ds, int port,
1319 			  unsigned long *supported,
1320 			  struct phylink_link_state *state)
1321 {
1322 	struct b53_device *dev = ds->priv;
1323 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1324 
1325 	if (dev->ops->serdes_phylink_validate)
1326 		dev->ops->serdes_phylink_validate(dev, port, mask, state);
1327 
1328 	/* Allow all the expected bits */
1329 	phylink_set(mask, Autoneg);
1330 	phylink_set_port_modes(mask);
1331 	phylink_set(mask, Pause);
1332 	phylink_set(mask, Asym_Pause);
1333 
1334 	/* With the exclusion of 5325/5365, MII, Reverse MII and 802.3z, we
1335 	 * support Gigabit, including Half duplex.
1336 	 */
1337 	if (state->interface != PHY_INTERFACE_MODE_MII &&
1338 	    state->interface != PHY_INTERFACE_MODE_REVMII &&
1339 	    !phy_interface_mode_is_8023z(state->interface) &&
1340 	    !(is5325(dev) || is5365(dev))) {
1341 		phylink_set(mask, 1000baseT_Full);
1342 		phylink_set(mask, 1000baseT_Half);
1343 	}
1344 
1345 	if (!phy_interface_mode_is_8023z(state->interface)) {
1346 		phylink_set(mask, 10baseT_Half);
1347 		phylink_set(mask, 10baseT_Full);
1348 		phylink_set(mask, 100baseT_Half);
1349 		phylink_set(mask, 100baseT_Full);
1350 	}
1351 
1352 	bitmap_and(supported, supported, mask,
1353 		   __ETHTOOL_LINK_MODE_MASK_NBITS);
1354 	bitmap_and(state->advertising, state->advertising, mask,
1355 		   __ETHTOOL_LINK_MODE_MASK_NBITS);
1356 
1357 	phylink_helper_basex_speed(state);
1358 }
1359 EXPORT_SYMBOL(b53_phylink_validate);
1360 
b53_phylink_mac_link_state(struct dsa_switch * ds,int port,struct phylink_link_state * state)1361 int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
1362 			       struct phylink_link_state *state)
1363 {
1364 	struct b53_device *dev = ds->priv;
1365 	int ret = -EOPNOTSUPP;
1366 
1367 	if ((phy_interface_mode_is_8023z(state->interface) ||
1368 	     state->interface == PHY_INTERFACE_MODE_SGMII) &&
1369 	     dev->ops->serdes_link_state)
1370 		ret = dev->ops->serdes_link_state(dev, port, state);
1371 
1372 	return ret;
1373 }
1374 EXPORT_SYMBOL(b53_phylink_mac_link_state);
1375 
b53_phylink_mac_config(struct dsa_switch * ds,int port,unsigned int mode,const struct phylink_link_state * state)1376 void b53_phylink_mac_config(struct dsa_switch *ds, int port,
1377 			    unsigned int mode,
1378 			    const struct phylink_link_state *state)
1379 {
1380 	struct b53_device *dev = ds->priv;
1381 
1382 	if (mode == MLO_AN_PHY || mode == MLO_AN_FIXED)
1383 		return;
1384 
1385 	if ((phy_interface_mode_is_8023z(state->interface) ||
1386 	     state->interface == PHY_INTERFACE_MODE_SGMII) &&
1387 	     dev->ops->serdes_config)
1388 		dev->ops->serdes_config(dev, port, mode, state);
1389 }
1390 EXPORT_SYMBOL(b53_phylink_mac_config);
1391 
b53_phylink_mac_an_restart(struct dsa_switch * ds,int port)1392 void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port)
1393 {
1394 	struct b53_device *dev = ds->priv;
1395 
1396 	if (dev->ops->serdes_an_restart)
1397 		dev->ops->serdes_an_restart(dev, port);
1398 }
1399 EXPORT_SYMBOL(b53_phylink_mac_an_restart);
1400 
b53_phylink_mac_link_down(struct dsa_switch * ds,int port,unsigned int mode,phy_interface_t interface)1401 void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
1402 			       unsigned int mode,
1403 			       phy_interface_t interface)
1404 {
1405 	struct b53_device *dev = ds->priv;
1406 
1407 	if (mode == MLO_AN_PHY)
1408 		return;
1409 
1410 	if (mode == MLO_AN_FIXED) {
1411 		b53_force_link(dev, port, false);
1412 		return;
1413 	}
1414 
1415 	if (phy_interface_mode_is_8023z(interface) &&
1416 	    dev->ops->serdes_link_set)
1417 		dev->ops->serdes_link_set(dev, port, mode, interface, false);
1418 }
1419 EXPORT_SYMBOL(b53_phylink_mac_link_down);
1420 
b53_phylink_mac_link_up(struct dsa_switch * ds,int port,unsigned int mode,phy_interface_t interface,struct phy_device * phydev,int speed,int duplex,bool tx_pause,bool rx_pause)1421 void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
1422 			     unsigned int mode,
1423 			     phy_interface_t interface,
1424 			     struct phy_device *phydev,
1425 			     int speed, int duplex,
1426 			     bool tx_pause, bool rx_pause)
1427 {
1428 	struct b53_device *dev = ds->priv;
1429 
1430 	if (mode == MLO_AN_PHY)
1431 		return;
1432 
1433 	if (mode == MLO_AN_FIXED) {
1434 		b53_force_port_config(dev, port, speed, duplex,
1435 				      tx_pause, rx_pause);
1436 		b53_force_link(dev, port, true);
1437 		return;
1438 	}
1439 
1440 	if (phy_interface_mode_is_8023z(interface) &&
1441 	    dev->ops->serdes_link_set)
1442 		dev->ops->serdes_link_set(dev, port, mode, interface, true);
1443 }
1444 EXPORT_SYMBOL(b53_phylink_mac_link_up);
1445 
b53_vlan_filtering(struct dsa_switch * ds,int port,bool vlan_filtering,struct netlink_ext_ack * extack)1446 int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
1447 		       struct netlink_ext_ack *extack)
1448 {
1449 	struct b53_device *dev = ds->priv;
1450 
1451 	b53_enable_vlan(dev, port, dev->vlan_enabled, vlan_filtering);
1452 
1453 	return 0;
1454 }
1455 EXPORT_SYMBOL(b53_vlan_filtering);
1456 
b53_vlan_prepare(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan)1457 static int b53_vlan_prepare(struct dsa_switch *ds, int port,
1458 			    const struct switchdev_obj_port_vlan *vlan)
1459 {
1460 	struct b53_device *dev = ds->priv;
1461 
1462 	if ((is5325(dev) || is5365(dev)) && vlan->vid == 0)
1463 		return -EOPNOTSUPP;
1464 
1465 	/* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of
1466 	 * receiving VLAN tagged frames at all, we can still allow the port to
1467 	 * be configured for egress untagged.
1468 	 */
1469 	if (dev->chip_id == BCM7278_DEVICE_ID && port == 7 &&
1470 	    !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED))
1471 		return -EINVAL;
1472 
1473 	if (vlan->vid >= dev->num_vlans)
1474 		return -ERANGE;
1475 
1476 	b53_enable_vlan(dev, port, true, ds->vlan_filtering);
1477 
1478 	return 0;
1479 }
1480 
b53_vlan_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)1481 int b53_vlan_add(struct dsa_switch *ds, int port,
1482 		 const struct switchdev_obj_port_vlan *vlan,
1483 		 struct netlink_ext_ack *extack)
1484 {
1485 	struct b53_device *dev = ds->priv;
1486 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1487 	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1488 	struct b53_vlan *vl;
1489 	int err;
1490 
1491 	err = b53_vlan_prepare(ds, port, vlan);
1492 	if (err)
1493 		return err;
1494 
1495 	vl = &dev->vlans[vlan->vid];
1496 
1497 	b53_get_vlan_entry(dev, vlan->vid, vl);
1498 
1499 	if (vlan->vid == 0 && vlan->vid == b53_default_pvid(dev))
1500 		untagged = true;
1501 
1502 	vl->members |= BIT(port);
1503 	if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
1504 		vl->untag |= BIT(port);
1505 	else
1506 		vl->untag &= ~BIT(port);
1507 
1508 	b53_set_vlan_entry(dev, vlan->vid, vl);
1509 	b53_fast_age_vlan(dev, vlan->vid);
1510 
1511 	if (pvid && !dsa_is_cpu_port(ds, port)) {
1512 		b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
1513 			    vlan->vid);
1514 		b53_fast_age_vlan(dev, vlan->vid);
1515 	}
1516 
1517 	return 0;
1518 }
1519 EXPORT_SYMBOL(b53_vlan_add);
1520 
b53_vlan_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan)1521 int b53_vlan_del(struct dsa_switch *ds, int port,
1522 		 const struct switchdev_obj_port_vlan *vlan)
1523 {
1524 	struct b53_device *dev = ds->priv;
1525 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1526 	struct b53_vlan *vl;
1527 	u16 pvid;
1528 
1529 	b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
1530 
1531 	vl = &dev->vlans[vlan->vid];
1532 
1533 	b53_get_vlan_entry(dev, vlan->vid, vl);
1534 
1535 	vl->members &= ~BIT(port);
1536 
1537 	if (pvid == vlan->vid)
1538 		pvid = b53_default_pvid(dev);
1539 
1540 	if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
1541 		vl->untag &= ~(BIT(port));
1542 
1543 	b53_set_vlan_entry(dev, vlan->vid, vl);
1544 	b53_fast_age_vlan(dev, vlan->vid);
1545 
1546 	b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
1547 	b53_fast_age_vlan(dev, pvid);
1548 
1549 	return 0;
1550 }
1551 EXPORT_SYMBOL(b53_vlan_del);
1552 
1553 /* Address Resolution Logic routines */
b53_arl_op_wait(struct b53_device * dev)1554 static int b53_arl_op_wait(struct b53_device *dev)
1555 {
1556 	unsigned int timeout = 10;
1557 	u8 reg;
1558 
1559 	do {
1560 		b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, &reg);
1561 		if (!(reg & ARLTBL_START_DONE))
1562 			return 0;
1563 
1564 		usleep_range(1000, 2000);
1565 	} while (timeout--);
1566 
1567 	dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg);
1568 
1569 	return -ETIMEDOUT;
1570 }
1571 
b53_arl_rw_op(struct b53_device * dev,unsigned int op)1572 static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
1573 {
1574 	u8 reg;
1575 
1576 	if (op > ARLTBL_RW)
1577 		return -EINVAL;
1578 
1579 	b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, &reg);
1580 	reg |= ARLTBL_START_DONE;
1581 	if (op)
1582 		reg |= ARLTBL_RW;
1583 	else
1584 		reg &= ~ARLTBL_RW;
1585 	if (dev->vlan_enabled)
1586 		reg &= ~ARLTBL_IVL_SVL_SELECT;
1587 	else
1588 		reg |= ARLTBL_IVL_SVL_SELECT;
1589 	b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg);
1590 
1591 	return b53_arl_op_wait(dev);
1592 }
1593 
b53_arl_read(struct b53_device * dev,u64 mac,u16 vid,struct b53_arl_entry * ent,u8 * idx)1594 static int b53_arl_read(struct b53_device *dev, u64 mac,
1595 			u16 vid, struct b53_arl_entry *ent, u8 *idx)
1596 {
1597 	DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES);
1598 	unsigned int i;
1599 	int ret;
1600 
1601 	ret = b53_arl_op_wait(dev);
1602 	if (ret)
1603 		return ret;
1604 
1605 	bitmap_zero(free_bins, dev->num_arl_bins);
1606 
1607 	/* Read the bins */
1608 	for (i = 0; i < dev->num_arl_bins; i++) {
1609 		u64 mac_vid;
1610 		u32 fwd_entry;
1611 
1612 		b53_read64(dev, B53_ARLIO_PAGE,
1613 			   B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid);
1614 		b53_read32(dev, B53_ARLIO_PAGE,
1615 			   B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
1616 		b53_arl_to_entry(ent, mac_vid, fwd_entry);
1617 
1618 		if (!(fwd_entry & ARLTBL_VALID)) {
1619 			set_bit(i, free_bins);
1620 			continue;
1621 		}
1622 		if ((mac_vid & ARLTBL_MAC_MASK) != mac)
1623 			continue;
1624 		if (dev->vlan_enabled &&
1625 		    ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid)
1626 			continue;
1627 		*idx = i;
1628 		return 0;
1629 	}
1630 
1631 	if (bitmap_weight(free_bins, dev->num_arl_bins) == 0)
1632 		return -ENOSPC;
1633 
1634 	*idx = find_first_bit(free_bins, dev->num_arl_bins);
1635 
1636 	return -ENOENT;
1637 }
1638 
b53_arl_op(struct b53_device * dev,int op,int port,const unsigned char * addr,u16 vid,bool is_valid)1639 static int b53_arl_op(struct b53_device *dev, int op, int port,
1640 		      const unsigned char *addr, u16 vid, bool is_valid)
1641 {
1642 	struct b53_arl_entry ent;
1643 	u32 fwd_entry;
1644 	u64 mac, mac_vid = 0;
1645 	u8 idx = 0;
1646 	int ret;
1647 
1648 	/* Convert the array into a 64-bit MAC */
1649 	mac = ether_addr_to_u64(addr);
1650 
1651 	/* Perform a read for the given MAC and VID */
1652 	b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
1653 	b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
1654 
1655 	/* Issue a read operation for this MAC */
1656 	ret = b53_arl_rw_op(dev, 1);
1657 	if (ret)
1658 		return ret;
1659 
1660 	ret = b53_arl_read(dev, mac, vid, &ent, &idx);
1661 
1662 	/* If this is a read, just finish now */
1663 	if (op)
1664 		return ret;
1665 
1666 	switch (ret) {
1667 	case -ETIMEDOUT:
1668 		return ret;
1669 	case -ENOSPC:
1670 		dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
1671 			addr, vid);
1672 		return is_valid ? ret : 0;
1673 	case -ENOENT:
1674 		/* We could not find a matching MAC, so reset to a new entry */
1675 		dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n",
1676 			addr, vid, idx);
1677 		fwd_entry = 0;
1678 		break;
1679 	default:
1680 		dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n",
1681 			addr, vid, idx);
1682 		break;
1683 	}
1684 
1685 	/* For multicast address, the port is a bitmask and the validity
1686 	 * is determined by having at least one port being still active
1687 	 */
1688 	if (!is_multicast_ether_addr(addr)) {
1689 		ent.port = port;
1690 		ent.is_valid = is_valid;
1691 	} else {
1692 		if (is_valid)
1693 			ent.port |= BIT(port);
1694 		else
1695 			ent.port &= ~BIT(port);
1696 
1697 		ent.is_valid = !!(ent.port);
1698 	}
1699 
1700 	ent.vid = vid;
1701 	ent.is_static = true;
1702 	ent.is_age = false;
1703 	memcpy(ent.mac, addr, ETH_ALEN);
1704 	b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
1705 
1706 	b53_write64(dev, B53_ARLIO_PAGE,
1707 		    B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
1708 	b53_write32(dev, B53_ARLIO_PAGE,
1709 		    B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
1710 
1711 	return b53_arl_rw_op(dev, 0);
1712 }
1713 
b53_fdb_add(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid)1714 int b53_fdb_add(struct dsa_switch *ds, int port,
1715 		const unsigned char *addr, u16 vid)
1716 {
1717 	struct b53_device *priv = ds->priv;
1718 
1719 	/* 5325 and 5365 require some more massaging, but could
1720 	 * be supported eventually
1721 	 */
1722 	if (is5325(priv) || is5365(priv))
1723 		return -EOPNOTSUPP;
1724 
1725 	return b53_arl_op(priv, 0, port, addr, vid, true);
1726 }
1727 EXPORT_SYMBOL(b53_fdb_add);
1728 
b53_fdb_del(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid)1729 int b53_fdb_del(struct dsa_switch *ds, int port,
1730 		const unsigned char *addr, u16 vid)
1731 {
1732 	struct b53_device *priv = ds->priv;
1733 
1734 	return b53_arl_op(priv, 0, port, addr, vid, false);
1735 }
1736 EXPORT_SYMBOL(b53_fdb_del);
1737 
b53_arl_search_wait(struct b53_device * dev)1738 static int b53_arl_search_wait(struct b53_device *dev)
1739 {
1740 	unsigned int timeout = 1000;
1741 	u8 reg;
1742 
1743 	do {
1744 		b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, &reg);
1745 		if (!(reg & ARL_SRCH_STDN))
1746 			return 0;
1747 
1748 		if (reg & ARL_SRCH_VLID)
1749 			return 0;
1750 
1751 		usleep_range(1000, 2000);
1752 	} while (timeout--);
1753 
1754 	return -ETIMEDOUT;
1755 }
1756 
b53_arl_search_rd(struct b53_device * dev,u8 idx,struct b53_arl_entry * ent)1757 static void b53_arl_search_rd(struct b53_device *dev, u8 idx,
1758 			      struct b53_arl_entry *ent)
1759 {
1760 	u64 mac_vid;
1761 	u32 fwd_entry;
1762 
1763 	b53_read64(dev, B53_ARLIO_PAGE,
1764 		   B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid);
1765 	b53_read32(dev, B53_ARLIO_PAGE,
1766 		   B53_ARL_SRCH_RSTL(idx), &fwd_entry);
1767 	b53_arl_to_entry(ent, mac_vid, fwd_entry);
1768 }
1769 
b53_fdb_copy(int port,const struct b53_arl_entry * ent,dsa_fdb_dump_cb_t * cb,void * data)1770 static int b53_fdb_copy(int port, const struct b53_arl_entry *ent,
1771 			dsa_fdb_dump_cb_t *cb, void *data)
1772 {
1773 	if (!ent->is_valid)
1774 		return 0;
1775 
1776 	if (port != ent->port)
1777 		return 0;
1778 
1779 	return cb(ent->mac, ent->vid, ent->is_static, data);
1780 }
1781 
b53_fdb_dump(struct dsa_switch * ds,int port,dsa_fdb_dump_cb_t * cb,void * data)1782 int b53_fdb_dump(struct dsa_switch *ds, int port,
1783 		 dsa_fdb_dump_cb_t *cb, void *data)
1784 {
1785 	struct b53_device *priv = ds->priv;
1786 	struct b53_arl_entry results[2];
1787 	unsigned int count = 0;
1788 	int ret;
1789 	u8 reg;
1790 
1791 	/* Start search operation */
1792 	reg = ARL_SRCH_STDN;
1793 	b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
1794 
1795 	do {
1796 		ret = b53_arl_search_wait(priv);
1797 		if (ret)
1798 			return ret;
1799 
1800 		b53_arl_search_rd(priv, 0, &results[0]);
1801 		ret = b53_fdb_copy(port, &results[0], cb, data);
1802 		if (ret)
1803 			return ret;
1804 
1805 		if (priv->num_arl_bins > 2) {
1806 			b53_arl_search_rd(priv, 1, &results[1]);
1807 			ret = b53_fdb_copy(port, &results[1], cb, data);
1808 			if (ret)
1809 				return ret;
1810 
1811 			if (!results[0].is_valid && !results[1].is_valid)
1812 				break;
1813 		}
1814 
1815 	} while (count++ < b53_max_arl_entries(priv) / 2);
1816 
1817 	return 0;
1818 }
1819 EXPORT_SYMBOL(b53_fdb_dump);
1820 
b53_mdb_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb)1821 int b53_mdb_add(struct dsa_switch *ds, int port,
1822 		const struct switchdev_obj_port_mdb *mdb)
1823 {
1824 	struct b53_device *priv = ds->priv;
1825 
1826 	/* 5325 and 5365 require some more massaging, but could
1827 	 * be supported eventually
1828 	 */
1829 	if (is5325(priv) || is5365(priv))
1830 		return -EOPNOTSUPP;
1831 
1832 	return b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
1833 }
1834 EXPORT_SYMBOL(b53_mdb_add);
1835 
b53_mdb_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb)1836 int b53_mdb_del(struct dsa_switch *ds, int port,
1837 		const struct switchdev_obj_port_mdb *mdb)
1838 {
1839 	struct b53_device *priv = ds->priv;
1840 	int ret;
1841 
1842 	ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false);
1843 	if (ret)
1844 		dev_err(ds->dev, "failed to delete MDB entry\n");
1845 
1846 	return ret;
1847 }
1848 EXPORT_SYMBOL(b53_mdb_del);
1849 
b53_br_join(struct dsa_switch * ds,int port,struct net_device * br)1850 int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
1851 {
1852 	struct b53_device *dev = ds->priv;
1853 	s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
1854 	u16 pvlan, reg;
1855 	unsigned int i;
1856 
1857 	/* On 7278, port 7 which connects to the ASP should only receive
1858 	 * traffic from matching CFP rules.
1859 	 */
1860 	if (dev->chip_id == BCM7278_DEVICE_ID && port == 7)
1861 		return -EINVAL;
1862 
1863 	/* Make this port leave the all VLANs join since we will have proper
1864 	 * VLAN entries from now on
1865 	 */
1866 	if (is58xx(dev)) {
1867 		b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
1868 		reg &= ~BIT(port);
1869 		if ((reg & BIT(cpu_port)) == BIT(cpu_port))
1870 			reg &= ~BIT(cpu_port);
1871 		b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
1872 	}
1873 
1874 	b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
1875 
1876 	b53_for_each_port(dev, i) {
1877 		if (dsa_to_port(ds, i)->bridge_dev != br)
1878 			continue;
1879 
1880 		/* Add this local port to the remote port VLAN control
1881 		 * membership and update the remote port bitmask
1882 		 */
1883 		b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
1884 		reg |= BIT(port);
1885 		b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
1886 		dev->ports[i].vlan_ctl_mask = reg;
1887 
1888 		pvlan |= BIT(i);
1889 	}
1890 
1891 	/* Configure the local port VLAN control membership to include
1892 	 * remote ports and update the local port bitmask
1893 	 */
1894 	b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
1895 	dev->ports[port].vlan_ctl_mask = pvlan;
1896 
1897 	return 0;
1898 }
1899 EXPORT_SYMBOL(b53_br_join);
1900 
b53_br_leave(struct dsa_switch * ds,int port,struct net_device * br)1901 void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
1902 {
1903 	struct b53_device *dev = ds->priv;
1904 	struct b53_vlan *vl = &dev->vlans[0];
1905 	s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
1906 	unsigned int i;
1907 	u16 pvlan, reg, pvid;
1908 
1909 	b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
1910 
1911 	b53_for_each_port(dev, i) {
1912 		/* Don't touch the remaining ports */
1913 		if (dsa_to_port(ds, i)->bridge_dev != br)
1914 			continue;
1915 
1916 		b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
1917 		reg &= ~BIT(port);
1918 		b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
1919 		dev->ports[port].vlan_ctl_mask = reg;
1920 
1921 		/* Prevent self removal to preserve isolation */
1922 		if (port != i)
1923 			pvlan &= ~BIT(i);
1924 	}
1925 
1926 	b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
1927 	dev->ports[port].vlan_ctl_mask = pvlan;
1928 
1929 	pvid = b53_default_pvid(dev);
1930 
1931 	/* Make this port join all VLANs without VLAN entries */
1932 	if (is58xx(dev)) {
1933 		b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
1934 		reg |= BIT(port);
1935 		if (!(reg & BIT(cpu_port)))
1936 			reg |= BIT(cpu_port);
1937 		b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
1938 	} else {
1939 		b53_get_vlan_entry(dev, pvid, vl);
1940 		vl->members |= BIT(port) | BIT(cpu_port);
1941 		vl->untag |= BIT(port) | BIT(cpu_port);
1942 		b53_set_vlan_entry(dev, pvid, vl);
1943 	}
1944 }
1945 EXPORT_SYMBOL(b53_br_leave);
1946 
b53_br_set_stp_state(struct dsa_switch * ds,int port,u8 state)1947 void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
1948 {
1949 	struct b53_device *dev = ds->priv;
1950 	u8 hw_state;
1951 	u8 reg;
1952 
1953 	switch (state) {
1954 	case BR_STATE_DISABLED:
1955 		hw_state = PORT_CTRL_DIS_STATE;
1956 		break;
1957 	case BR_STATE_LISTENING:
1958 		hw_state = PORT_CTRL_LISTEN_STATE;
1959 		break;
1960 	case BR_STATE_LEARNING:
1961 		hw_state = PORT_CTRL_LEARN_STATE;
1962 		break;
1963 	case BR_STATE_FORWARDING:
1964 		hw_state = PORT_CTRL_FWD_STATE;
1965 		break;
1966 	case BR_STATE_BLOCKING:
1967 		hw_state = PORT_CTRL_BLOCK_STATE;
1968 		break;
1969 	default:
1970 		dev_err(ds->dev, "invalid STP state: %d\n", state);
1971 		return;
1972 	}
1973 
1974 	b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
1975 	reg &= ~PORT_CTRL_STP_STATE_MASK;
1976 	reg |= hw_state;
1977 	b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
1978 }
1979 EXPORT_SYMBOL(b53_br_set_stp_state);
1980 
b53_br_fast_age(struct dsa_switch * ds,int port)1981 void b53_br_fast_age(struct dsa_switch *ds, int port)
1982 {
1983 	struct b53_device *dev = ds->priv;
1984 
1985 	if (b53_fast_age_port(dev, port))
1986 		dev_err(ds->dev, "fast ageing failed\n");
1987 }
1988 EXPORT_SYMBOL(b53_br_fast_age);
1989 
b53_br_flags_pre(struct dsa_switch * ds,int port,struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)1990 int b53_br_flags_pre(struct dsa_switch *ds, int port,
1991 		     struct switchdev_brport_flags flags,
1992 		     struct netlink_ext_ack *extack)
1993 {
1994 	if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_LEARNING))
1995 		return -EINVAL;
1996 
1997 	return 0;
1998 }
1999 EXPORT_SYMBOL(b53_br_flags_pre);
2000 
b53_br_flags(struct dsa_switch * ds,int port,struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)2001 int b53_br_flags(struct dsa_switch *ds, int port,
2002 		 struct switchdev_brport_flags flags,
2003 		 struct netlink_ext_ack *extack)
2004 {
2005 	if (flags.mask & BR_FLOOD)
2006 		b53_port_set_ucast_flood(ds->priv, port,
2007 					 !!(flags.val & BR_FLOOD));
2008 	if (flags.mask & BR_MCAST_FLOOD)
2009 		b53_port_set_mcast_flood(ds->priv, port,
2010 					 !!(flags.val & BR_MCAST_FLOOD));
2011 	if (flags.mask & BR_LEARNING)
2012 		b53_port_set_learning(ds->priv, port,
2013 				      !!(flags.val & BR_LEARNING));
2014 
2015 	return 0;
2016 }
2017 EXPORT_SYMBOL(b53_br_flags);
2018 
b53_possible_cpu_port(struct dsa_switch * ds,int port)2019 static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
2020 {
2021 	/* Broadcom switches will accept enabling Broadcom tags on the
2022 	 * following ports: 5, 7 and 8, any other port is not supported
2023 	 */
2024 	switch (port) {
2025 	case B53_CPU_PORT_25:
2026 	case 7:
2027 	case B53_CPU_PORT:
2028 		return true;
2029 	}
2030 
2031 	return false;
2032 }
2033 
b53_can_enable_brcm_tags(struct dsa_switch * ds,int port,enum dsa_tag_protocol tag_protocol)2034 static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port,
2035 				     enum dsa_tag_protocol tag_protocol)
2036 {
2037 	bool ret = b53_possible_cpu_port(ds, port);
2038 
2039 	if (!ret) {
2040 		dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n",
2041 			 port);
2042 		return ret;
2043 	}
2044 
2045 	switch (tag_protocol) {
2046 	case DSA_TAG_PROTO_BRCM:
2047 	case DSA_TAG_PROTO_BRCM_PREPEND:
2048 		dev_warn(ds->dev,
2049 			 "Port %d is stacked to Broadcom tag switch\n", port);
2050 		ret = false;
2051 		break;
2052 	default:
2053 		ret = true;
2054 		break;
2055 	}
2056 
2057 	return ret;
2058 }
2059 
b53_get_tag_protocol(struct dsa_switch * ds,int port,enum dsa_tag_protocol mprot)2060 enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
2061 					   enum dsa_tag_protocol mprot)
2062 {
2063 	struct b53_device *dev = ds->priv;
2064 
2065 	if (!b53_can_enable_brcm_tags(ds, port, mprot)) {
2066 		dev->tag_protocol = DSA_TAG_PROTO_NONE;
2067 		goto out;
2068 	}
2069 
2070 	/* Older models require a different 6 byte tag */
2071 	if (is5325(dev) || is5365(dev) || is63xx(dev)) {
2072 		dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY;
2073 		goto out;
2074 	}
2075 
2076 	/* Broadcom BCM58xx chips have a flow accelerator on Port 8
2077 	 * which requires us to use the prepended Broadcom tag type
2078 	 */
2079 	if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT) {
2080 		dev->tag_protocol = DSA_TAG_PROTO_BRCM_PREPEND;
2081 		goto out;
2082 	}
2083 
2084 	dev->tag_protocol = DSA_TAG_PROTO_BRCM;
2085 out:
2086 	return dev->tag_protocol;
2087 }
2088 EXPORT_SYMBOL(b53_get_tag_protocol);
2089 
b53_mirror_add(struct dsa_switch * ds,int port,struct dsa_mall_mirror_tc_entry * mirror,bool ingress)2090 int b53_mirror_add(struct dsa_switch *ds, int port,
2091 		   struct dsa_mall_mirror_tc_entry *mirror, bool ingress)
2092 {
2093 	struct b53_device *dev = ds->priv;
2094 	u16 reg, loc;
2095 
2096 	if (ingress)
2097 		loc = B53_IG_MIR_CTL;
2098 	else
2099 		loc = B53_EG_MIR_CTL;
2100 
2101 	b53_read16(dev, B53_MGMT_PAGE, loc, &reg);
2102 	reg |= BIT(port);
2103 	b53_write16(dev, B53_MGMT_PAGE, loc, reg);
2104 
2105 	b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, &reg);
2106 	reg &= ~CAP_PORT_MASK;
2107 	reg |= mirror->to_local_port;
2108 	reg |= MIRROR_EN;
2109 	b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
2110 
2111 	return 0;
2112 }
2113 EXPORT_SYMBOL(b53_mirror_add);
2114 
b53_mirror_del(struct dsa_switch * ds,int port,struct dsa_mall_mirror_tc_entry * mirror)2115 void b53_mirror_del(struct dsa_switch *ds, int port,
2116 		    struct dsa_mall_mirror_tc_entry *mirror)
2117 {
2118 	struct b53_device *dev = ds->priv;
2119 	bool loc_disable = false, other_loc_disable = false;
2120 	u16 reg, loc;
2121 
2122 	if (mirror->ingress)
2123 		loc = B53_IG_MIR_CTL;
2124 	else
2125 		loc = B53_EG_MIR_CTL;
2126 
2127 	/* Update the desired ingress/egress register */
2128 	b53_read16(dev, B53_MGMT_PAGE, loc, &reg);
2129 	reg &= ~BIT(port);
2130 	if (!(reg & MIRROR_MASK))
2131 		loc_disable = true;
2132 	b53_write16(dev, B53_MGMT_PAGE, loc, reg);
2133 
2134 	/* Now look at the other one to know if we can disable mirroring
2135 	 * entirely
2136 	 */
2137 	if (mirror->ingress)
2138 		b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, &reg);
2139 	else
2140 		b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, &reg);
2141 	if (!(reg & MIRROR_MASK))
2142 		other_loc_disable = true;
2143 
2144 	b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, &reg);
2145 	/* Both no longer have ports, let's disable mirroring */
2146 	if (loc_disable && other_loc_disable) {
2147 		reg &= ~MIRROR_EN;
2148 		reg &= ~mirror->to_local_port;
2149 	}
2150 	b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
2151 }
2152 EXPORT_SYMBOL(b53_mirror_del);
2153 
b53_eee_enable_set(struct dsa_switch * ds,int port,bool enable)2154 void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
2155 {
2156 	struct b53_device *dev = ds->priv;
2157 	u16 reg;
2158 
2159 	b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, &reg);
2160 	if (enable)
2161 		reg |= BIT(port);
2162 	else
2163 		reg &= ~BIT(port);
2164 	b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
2165 }
2166 EXPORT_SYMBOL(b53_eee_enable_set);
2167 
2168 
2169 /* Returns 0 if EEE was not enabled, or 1 otherwise
2170  */
b53_eee_init(struct dsa_switch * ds,int port,struct phy_device * phy)2171 int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
2172 {
2173 	int ret;
2174 
2175 	ret = phy_init_eee(phy, 0);
2176 	if (ret)
2177 		return 0;
2178 
2179 	b53_eee_enable_set(ds, port, true);
2180 
2181 	return 1;
2182 }
2183 EXPORT_SYMBOL(b53_eee_init);
2184 
b53_get_mac_eee(struct dsa_switch * ds,int port,struct ethtool_eee * e)2185 int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
2186 {
2187 	struct b53_device *dev = ds->priv;
2188 	struct ethtool_eee *p = &dev->ports[port].eee;
2189 	u16 reg;
2190 
2191 	if (is5325(dev) || is5365(dev))
2192 		return -EOPNOTSUPP;
2193 
2194 	b53_read16(dev, B53_EEE_PAGE, B53_EEE_LPI_INDICATE, &reg);
2195 	e->eee_enabled = p->eee_enabled;
2196 	e->eee_active = !!(reg & BIT(port));
2197 
2198 	return 0;
2199 }
2200 EXPORT_SYMBOL(b53_get_mac_eee);
2201 
b53_set_mac_eee(struct dsa_switch * ds,int port,struct ethtool_eee * e)2202 int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
2203 {
2204 	struct b53_device *dev = ds->priv;
2205 	struct ethtool_eee *p = &dev->ports[port].eee;
2206 
2207 	if (is5325(dev) || is5365(dev))
2208 		return -EOPNOTSUPP;
2209 
2210 	p->eee_enabled = e->eee_enabled;
2211 	b53_eee_enable_set(ds, port, e->eee_enabled);
2212 
2213 	return 0;
2214 }
2215 EXPORT_SYMBOL(b53_set_mac_eee);
2216 
b53_change_mtu(struct dsa_switch * ds,int port,int mtu)2217 static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu)
2218 {
2219 	struct b53_device *dev = ds->priv;
2220 	bool enable_jumbo;
2221 	bool allow_10_100;
2222 
2223 	if (is5325(dev) || is5365(dev))
2224 		return -EOPNOTSUPP;
2225 
2226 	enable_jumbo = (mtu >= JMS_MIN_SIZE);
2227 	allow_10_100 = (dev->chip_id == BCM583XX_DEVICE_ID);
2228 
2229 	return b53_set_jumbo(dev, enable_jumbo, allow_10_100);
2230 }
2231 
b53_get_max_mtu(struct dsa_switch * ds,int port)2232 static int b53_get_max_mtu(struct dsa_switch *ds, int port)
2233 {
2234 	return JMS_MAX_SIZE;
2235 }
2236 
2237 static const struct dsa_switch_ops b53_switch_ops = {
2238 	.get_tag_protocol	= b53_get_tag_protocol,
2239 	.setup			= b53_setup,
2240 	.teardown		= b53_teardown,
2241 	.get_strings		= b53_get_strings,
2242 	.get_ethtool_stats	= b53_get_ethtool_stats,
2243 	.get_sset_count		= b53_get_sset_count,
2244 	.get_ethtool_phy_stats	= b53_get_ethtool_phy_stats,
2245 	.phy_read		= b53_phy_read16,
2246 	.phy_write		= b53_phy_write16,
2247 	.adjust_link		= b53_adjust_link,
2248 	.phylink_validate	= b53_phylink_validate,
2249 	.phylink_mac_link_state	= b53_phylink_mac_link_state,
2250 	.phylink_mac_config	= b53_phylink_mac_config,
2251 	.phylink_mac_an_restart	= b53_phylink_mac_an_restart,
2252 	.phylink_mac_link_down	= b53_phylink_mac_link_down,
2253 	.phylink_mac_link_up	= b53_phylink_mac_link_up,
2254 	.port_enable		= b53_enable_port,
2255 	.port_disable		= b53_disable_port,
2256 	.get_mac_eee		= b53_get_mac_eee,
2257 	.set_mac_eee		= b53_set_mac_eee,
2258 	.port_bridge_join	= b53_br_join,
2259 	.port_bridge_leave	= b53_br_leave,
2260 	.port_pre_bridge_flags	= b53_br_flags_pre,
2261 	.port_bridge_flags	= b53_br_flags,
2262 	.port_stp_state_set	= b53_br_set_stp_state,
2263 	.port_fast_age		= b53_br_fast_age,
2264 	.port_vlan_filtering	= b53_vlan_filtering,
2265 	.port_vlan_add		= b53_vlan_add,
2266 	.port_vlan_del		= b53_vlan_del,
2267 	.port_fdb_dump		= b53_fdb_dump,
2268 	.port_fdb_add		= b53_fdb_add,
2269 	.port_fdb_del		= b53_fdb_del,
2270 	.port_mirror_add	= b53_mirror_add,
2271 	.port_mirror_del	= b53_mirror_del,
2272 	.port_mdb_add		= b53_mdb_add,
2273 	.port_mdb_del		= b53_mdb_del,
2274 	.port_max_mtu		= b53_get_max_mtu,
2275 	.port_change_mtu	= b53_change_mtu,
2276 };
2277 
2278 struct b53_chip_data {
2279 	u32 chip_id;
2280 	const char *dev_name;
2281 	u16 vlans;
2282 	u16 enabled_ports;
2283 	u8 imp_port;
2284 	u8 cpu_port;
2285 	u8 vta_regs[3];
2286 	u8 arl_bins;
2287 	u16 arl_buckets;
2288 	u8 duplex_reg;
2289 	u8 jumbo_pm_reg;
2290 	u8 jumbo_size_reg;
2291 };
2292 
2293 #define B53_VTA_REGS	\
2294 	{ B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY }
2295 #define B53_VTA_REGS_9798 \
2296 	{ B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 }
2297 #define B53_VTA_REGS_63XX \
2298 	{ B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX }
2299 
2300 static const struct b53_chip_data b53_switch_chips[] = {
2301 	{
2302 		.chip_id = BCM5325_DEVICE_ID,
2303 		.dev_name = "BCM5325",
2304 		.vlans = 16,
2305 		.enabled_ports = 0x1f,
2306 		.arl_bins = 2,
2307 		.arl_buckets = 1024,
2308 		.imp_port = 5,
2309 		.cpu_port = B53_CPU_PORT_25,
2310 		.duplex_reg = B53_DUPLEX_STAT_FE,
2311 	},
2312 	{
2313 		.chip_id = BCM5365_DEVICE_ID,
2314 		.dev_name = "BCM5365",
2315 		.vlans = 256,
2316 		.enabled_ports = 0x1f,
2317 		.arl_bins = 2,
2318 		.arl_buckets = 1024,
2319 		.imp_port = 5,
2320 		.cpu_port = B53_CPU_PORT_25,
2321 		.duplex_reg = B53_DUPLEX_STAT_FE,
2322 	},
2323 	{
2324 		.chip_id = BCM5389_DEVICE_ID,
2325 		.dev_name = "BCM5389",
2326 		.vlans = 4096,
2327 		.enabled_ports = 0x1f,
2328 		.arl_bins = 4,
2329 		.arl_buckets = 1024,
2330 		.imp_port = 8,
2331 		.cpu_port = B53_CPU_PORT,
2332 		.vta_regs = B53_VTA_REGS,
2333 		.duplex_reg = B53_DUPLEX_STAT_GE,
2334 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2335 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2336 	},
2337 	{
2338 		.chip_id = BCM5395_DEVICE_ID,
2339 		.dev_name = "BCM5395",
2340 		.vlans = 4096,
2341 		.enabled_ports = 0x1f,
2342 		.arl_bins = 4,
2343 		.arl_buckets = 1024,
2344 		.imp_port = 8,
2345 		.cpu_port = B53_CPU_PORT,
2346 		.vta_regs = B53_VTA_REGS,
2347 		.duplex_reg = B53_DUPLEX_STAT_GE,
2348 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2349 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2350 	},
2351 	{
2352 		.chip_id = BCM5397_DEVICE_ID,
2353 		.dev_name = "BCM5397",
2354 		.vlans = 4096,
2355 		.enabled_ports = 0x1f,
2356 		.arl_bins = 4,
2357 		.arl_buckets = 1024,
2358 		.imp_port = 8,
2359 		.cpu_port = B53_CPU_PORT,
2360 		.vta_regs = B53_VTA_REGS_9798,
2361 		.duplex_reg = B53_DUPLEX_STAT_GE,
2362 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2363 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2364 	},
2365 	{
2366 		.chip_id = BCM5398_DEVICE_ID,
2367 		.dev_name = "BCM5398",
2368 		.vlans = 4096,
2369 		.enabled_ports = 0x7f,
2370 		.arl_bins = 4,
2371 		.arl_buckets = 1024,
2372 		.imp_port = 8,
2373 		.cpu_port = B53_CPU_PORT,
2374 		.vta_regs = B53_VTA_REGS_9798,
2375 		.duplex_reg = B53_DUPLEX_STAT_GE,
2376 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2377 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2378 	},
2379 	{
2380 		.chip_id = BCM53115_DEVICE_ID,
2381 		.dev_name = "BCM53115",
2382 		.vlans = 4096,
2383 		.enabled_ports = 0x1f,
2384 		.arl_bins = 4,
2385 		.arl_buckets = 1024,
2386 		.vta_regs = B53_VTA_REGS,
2387 		.imp_port = 8,
2388 		.cpu_port = B53_CPU_PORT,
2389 		.duplex_reg = B53_DUPLEX_STAT_GE,
2390 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2391 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2392 	},
2393 	{
2394 		.chip_id = BCM53125_DEVICE_ID,
2395 		.dev_name = "BCM53125",
2396 		.vlans = 4096,
2397 		.enabled_ports = 0xff,
2398 		.arl_bins = 4,
2399 		.arl_buckets = 1024,
2400 		.imp_port = 8,
2401 		.cpu_port = B53_CPU_PORT,
2402 		.vta_regs = B53_VTA_REGS,
2403 		.duplex_reg = B53_DUPLEX_STAT_GE,
2404 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2405 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2406 	},
2407 	{
2408 		.chip_id = BCM53128_DEVICE_ID,
2409 		.dev_name = "BCM53128",
2410 		.vlans = 4096,
2411 		.enabled_ports = 0x1ff,
2412 		.arl_bins = 4,
2413 		.arl_buckets = 1024,
2414 		.imp_port = 8,
2415 		.cpu_port = B53_CPU_PORT,
2416 		.vta_regs = B53_VTA_REGS,
2417 		.duplex_reg = B53_DUPLEX_STAT_GE,
2418 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2419 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2420 	},
2421 	{
2422 		.chip_id = BCM63XX_DEVICE_ID,
2423 		.dev_name = "BCM63xx",
2424 		.vlans = 4096,
2425 		.enabled_ports = 0, /* pdata must provide them */
2426 		.arl_bins = 4,
2427 		.arl_buckets = 1024,
2428 		.imp_port = 8,
2429 		.cpu_port = B53_CPU_PORT,
2430 		.vta_regs = B53_VTA_REGS_63XX,
2431 		.duplex_reg = B53_DUPLEX_STAT_63XX,
2432 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
2433 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
2434 	},
2435 	{
2436 		.chip_id = BCM53010_DEVICE_ID,
2437 		.dev_name = "BCM53010",
2438 		.vlans = 4096,
2439 		.enabled_ports = 0x1f,
2440 		.arl_bins = 4,
2441 		.arl_buckets = 1024,
2442 		.imp_port = 8,
2443 		.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2444 		.vta_regs = B53_VTA_REGS,
2445 		.duplex_reg = B53_DUPLEX_STAT_GE,
2446 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2447 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2448 	},
2449 	{
2450 		.chip_id = BCM53011_DEVICE_ID,
2451 		.dev_name = "BCM53011",
2452 		.vlans = 4096,
2453 		.enabled_ports = 0x1bf,
2454 		.arl_bins = 4,
2455 		.arl_buckets = 1024,
2456 		.imp_port = 8,
2457 		.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2458 		.vta_regs = B53_VTA_REGS,
2459 		.duplex_reg = B53_DUPLEX_STAT_GE,
2460 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2461 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2462 	},
2463 	{
2464 		.chip_id = BCM53012_DEVICE_ID,
2465 		.dev_name = "BCM53012",
2466 		.vlans = 4096,
2467 		.enabled_ports = 0x1bf,
2468 		.arl_bins = 4,
2469 		.arl_buckets = 1024,
2470 		.imp_port = 8,
2471 		.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2472 		.vta_regs = B53_VTA_REGS,
2473 		.duplex_reg = B53_DUPLEX_STAT_GE,
2474 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2475 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2476 	},
2477 	{
2478 		.chip_id = BCM53018_DEVICE_ID,
2479 		.dev_name = "BCM53018",
2480 		.vlans = 4096,
2481 		.enabled_ports = 0x1f,
2482 		.arl_bins = 4,
2483 		.arl_buckets = 1024,
2484 		.imp_port = 8,
2485 		.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2486 		.vta_regs = B53_VTA_REGS,
2487 		.duplex_reg = B53_DUPLEX_STAT_GE,
2488 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2489 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2490 	},
2491 	{
2492 		.chip_id = BCM53019_DEVICE_ID,
2493 		.dev_name = "BCM53019",
2494 		.vlans = 4096,
2495 		.enabled_ports = 0x1f,
2496 		.arl_bins = 4,
2497 		.arl_buckets = 1024,
2498 		.imp_port = 8,
2499 		.cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
2500 		.vta_regs = B53_VTA_REGS,
2501 		.duplex_reg = B53_DUPLEX_STAT_GE,
2502 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2503 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2504 	},
2505 	{
2506 		.chip_id = BCM58XX_DEVICE_ID,
2507 		.dev_name = "BCM585xx/586xx/88312",
2508 		.vlans	= 4096,
2509 		.enabled_ports = 0x1ff,
2510 		.arl_bins = 4,
2511 		.arl_buckets = 1024,
2512 		.imp_port = 8,
2513 		.cpu_port = B53_CPU_PORT,
2514 		.vta_regs = B53_VTA_REGS,
2515 		.duplex_reg = B53_DUPLEX_STAT_GE,
2516 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2517 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2518 	},
2519 	{
2520 		.chip_id = BCM583XX_DEVICE_ID,
2521 		.dev_name = "BCM583xx/11360",
2522 		.vlans = 4096,
2523 		.enabled_ports = 0x103,
2524 		.arl_bins = 4,
2525 		.arl_buckets = 1024,
2526 		.imp_port = 8,
2527 		.cpu_port = B53_CPU_PORT,
2528 		.vta_regs = B53_VTA_REGS,
2529 		.duplex_reg = B53_DUPLEX_STAT_GE,
2530 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2531 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2532 	},
2533 	/* Starfighter 2 */
2534 	{
2535 		.chip_id = BCM4908_DEVICE_ID,
2536 		.dev_name = "BCM4908",
2537 		.vlans = 4096,
2538 		.enabled_ports = 0x1bf,
2539 		.arl_bins = 4,
2540 		.arl_buckets = 256,
2541 		.imp_port = 8,
2542 		.cpu_port = 8, /* TODO: ports 4, 5, 8 */
2543 		.vta_regs = B53_VTA_REGS,
2544 		.duplex_reg = B53_DUPLEX_STAT_GE,
2545 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2546 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2547 	},
2548 	{
2549 		.chip_id = BCM7445_DEVICE_ID,
2550 		.dev_name = "BCM7445",
2551 		.vlans	= 4096,
2552 		.enabled_ports = 0x1ff,
2553 		.arl_bins = 4,
2554 		.arl_buckets = 1024,
2555 		.imp_port = 8,
2556 		.cpu_port = B53_CPU_PORT,
2557 		.vta_regs = B53_VTA_REGS,
2558 		.duplex_reg = B53_DUPLEX_STAT_GE,
2559 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2560 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2561 	},
2562 	{
2563 		.chip_id = BCM7278_DEVICE_ID,
2564 		.dev_name = "BCM7278",
2565 		.vlans = 4096,
2566 		.enabled_ports = 0x1ff,
2567 		.arl_bins = 4,
2568 		.arl_buckets = 256,
2569 		.imp_port = 8,
2570 		.cpu_port = B53_CPU_PORT,
2571 		.vta_regs = B53_VTA_REGS,
2572 		.duplex_reg = B53_DUPLEX_STAT_GE,
2573 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
2574 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
2575 	},
2576 };
2577 
b53_switch_init(struct b53_device * dev)2578 static int b53_switch_init(struct b53_device *dev)
2579 {
2580 	unsigned int i;
2581 	int ret;
2582 
2583 	for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) {
2584 		const struct b53_chip_data *chip = &b53_switch_chips[i];
2585 
2586 		if (chip->chip_id == dev->chip_id) {
2587 			if (!dev->enabled_ports)
2588 				dev->enabled_ports = chip->enabled_ports;
2589 			dev->name = chip->dev_name;
2590 			dev->duplex_reg = chip->duplex_reg;
2591 			dev->vta_regs[0] = chip->vta_regs[0];
2592 			dev->vta_regs[1] = chip->vta_regs[1];
2593 			dev->vta_regs[2] = chip->vta_regs[2];
2594 			dev->jumbo_pm_reg = chip->jumbo_pm_reg;
2595 			dev->imp_port = chip->imp_port;
2596 			dev->cpu_port = chip->cpu_port;
2597 			dev->num_vlans = chip->vlans;
2598 			dev->num_arl_bins = chip->arl_bins;
2599 			dev->num_arl_buckets = chip->arl_buckets;
2600 			break;
2601 		}
2602 	}
2603 
2604 	/* check which BCM5325x version we have */
2605 	if (is5325(dev)) {
2606 		u8 vc4;
2607 
2608 		b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
2609 
2610 		/* check reserved bits */
2611 		switch (vc4 & 3) {
2612 		case 1:
2613 			/* BCM5325E */
2614 			break;
2615 		case 3:
2616 			/* BCM5325F - do not use port 4 */
2617 			dev->enabled_ports &= ~BIT(4);
2618 			break;
2619 		default:
2620 /* On the BCM47XX SoCs this is the supported internal switch.*/
2621 #ifndef CONFIG_BCM47XX
2622 			/* BCM5325M */
2623 			return -EINVAL;
2624 #else
2625 			break;
2626 #endif
2627 		}
2628 	} else if (dev->chip_id == BCM53115_DEVICE_ID) {
2629 		u64 strap_value;
2630 
2631 		b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value);
2632 		/* use second IMP port if GMII is enabled */
2633 		if (strap_value & SV_GMII_CTRL_115)
2634 			dev->cpu_port = 5;
2635 	}
2636 
2637 	dev->enabled_ports |= BIT(dev->cpu_port);
2638 	dev->num_ports = fls(dev->enabled_ports);
2639 
2640 	dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS);
2641 
2642 	/* Include non standard CPU port built-in PHYs to be probed */
2643 	if (is539x(dev) || is531x5(dev)) {
2644 		for (i = 0; i < dev->num_ports; i++) {
2645 			if (!(dev->ds->phys_mii_mask & BIT(i)) &&
2646 			    !b53_possible_cpu_port(dev->ds, i))
2647 				dev->ds->phys_mii_mask |= BIT(i);
2648 		}
2649 	}
2650 
2651 	dev->ports = devm_kcalloc(dev->dev,
2652 				  dev->num_ports, sizeof(struct b53_port),
2653 				  GFP_KERNEL);
2654 	if (!dev->ports)
2655 		return -ENOMEM;
2656 
2657 	dev->vlans = devm_kcalloc(dev->dev,
2658 				  dev->num_vlans, sizeof(struct b53_vlan),
2659 				  GFP_KERNEL);
2660 	if (!dev->vlans)
2661 		return -ENOMEM;
2662 
2663 	dev->reset_gpio = b53_switch_get_reset_gpio(dev);
2664 	if (dev->reset_gpio >= 0) {
2665 		ret = devm_gpio_request_one(dev->dev, dev->reset_gpio,
2666 					    GPIOF_OUT_INIT_HIGH, "robo_reset");
2667 		if (ret)
2668 			return ret;
2669 	}
2670 
2671 	return 0;
2672 }
2673 
b53_switch_alloc(struct device * base,const struct b53_io_ops * ops,void * priv)2674 struct b53_device *b53_switch_alloc(struct device *base,
2675 				    const struct b53_io_ops *ops,
2676 				    void *priv)
2677 {
2678 	struct dsa_switch *ds;
2679 	struct b53_device *dev;
2680 
2681 	ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
2682 	if (!ds)
2683 		return NULL;
2684 
2685 	ds->dev = base;
2686 
2687 	dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
2688 	if (!dev)
2689 		return NULL;
2690 
2691 	ds->priv = dev;
2692 	dev->dev = base;
2693 
2694 	dev->ds = ds;
2695 	dev->priv = priv;
2696 	dev->ops = ops;
2697 	ds->ops = &b53_switch_ops;
2698 	dev->vlan_enabled = true;
2699 	/* Let DSA handle the case were multiple bridges span the same switch
2700 	 * device and different VLAN awareness settings are requested, which
2701 	 * would be breaking filtering semantics for any of the other bridge
2702 	 * devices. (not hardware supported)
2703 	 */
2704 	ds->vlan_filtering_is_global = true;
2705 
2706 	mutex_init(&dev->reg_mutex);
2707 	mutex_init(&dev->stats_mutex);
2708 
2709 	return dev;
2710 }
2711 EXPORT_SYMBOL(b53_switch_alloc);
2712 
b53_switch_detect(struct b53_device * dev)2713 int b53_switch_detect(struct b53_device *dev)
2714 {
2715 	u32 id32;
2716 	u16 tmp;
2717 	u8 id8;
2718 	int ret;
2719 
2720 	ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8);
2721 	if (ret)
2722 		return ret;
2723 
2724 	switch (id8) {
2725 	case 0:
2726 		/* BCM5325 and BCM5365 do not have this register so reads
2727 		 * return 0. But the read operation did succeed, so assume this
2728 		 * is one of them.
2729 		 *
2730 		 * Next check if we can write to the 5325's VTA register; for
2731 		 * 5365 it is read only.
2732 		 */
2733 		b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf);
2734 		b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp);
2735 
2736 		if (tmp == 0xf)
2737 			dev->chip_id = BCM5325_DEVICE_ID;
2738 		else
2739 			dev->chip_id = BCM5365_DEVICE_ID;
2740 		break;
2741 	case BCM5389_DEVICE_ID:
2742 	case BCM5395_DEVICE_ID:
2743 	case BCM5397_DEVICE_ID:
2744 	case BCM5398_DEVICE_ID:
2745 		dev->chip_id = id8;
2746 		break;
2747 	default:
2748 		ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32);
2749 		if (ret)
2750 			return ret;
2751 
2752 		switch (id32) {
2753 		case BCM53115_DEVICE_ID:
2754 		case BCM53125_DEVICE_ID:
2755 		case BCM53128_DEVICE_ID:
2756 		case BCM53010_DEVICE_ID:
2757 		case BCM53011_DEVICE_ID:
2758 		case BCM53012_DEVICE_ID:
2759 		case BCM53018_DEVICE_ID:
2760 		case BCM53019_DEVICE_ID:
2761 			dev->chip_id = id32;
2762 			break;
2763 		default:
2764 			dev_err(dev->dev,
2765 				"unsupported switch detected (BCM53%02x/BCM%x)\n",
2766 				id8, id32);
2767 			return -ENODEV;
2768 		}
2769 	}
2770 
2771 	if (dev->chip_id == BCM5325_DEVICE_ID)
2772 		return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25,
2773 				 &dev->core_rev);
2774 	else
2775 		return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID,
2776 				 &dev->core_rev);
2777 }
2778 EXPORT_SYMBOL(b53_switch_detect);
2779 
b53_switch_register(struct b53_device * dev)2780 int b53_switch_register(struct b53_device *dev)
2781 {
2782 	int ret;
2783 
2784 	if (dev->pdata) {
2785 		dev->chip_id = dev->pdata->chip_id;
2786 		dev->enabled_ports = dev->pdata->enabled_ports;
2787 	}
2788 
2789 	if (!dev->chip_id && b53_switch_detect(dev))
2790 		return -EINVAL;
2791 
2792 	ret = b53_switch_init(dev);
2793 	if (ret)
2794 		return ret;
2795 
2796 	dev_info(dev->dev, "found switch: %s, rev %i\n",
2797 		 dev->name, dev->core_rev);
2798 
2799 	return dsa_register_switch(dev->ds);
2800 }
2801 EXPORT_SYMBOL(b53_switch_register);
2802 
2803 MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
2804 MODULE_DESCRIPTION("B53 switch library");
2805 MODULE_LICENSE("Dual BSD/GPL");
2806