1 /* de4x5.c: A DIGITAL DC21x4x DECchip and DE425/DE434/DE435/DE450/DE500
2 ethernet driver for Linux.
3
4 Copyright 1994, 1995 Digital Equipment Corporation.
5
6 Testing resources for this driver have been made available
7 in part by NASA Ames Research Center (mjacob@nas.nasa.gov).
8
9 The author may be reached at davies@maniac.ultranet.com.
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of the GNU General Public License as published by the
13 Free Software Foundation; either version 2 of the License, or (at your
14 option) any later version.
15
16 THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
17 WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
19 NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
22 USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
23 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27 You should have received a copy of the GNU General Public License along
28 with this program; if not, write to the Free Software Foundation, Inc.,
29 675 Mass Ave, Cambridge, MA 02139, USA.
30
31 Originally, this driver was written for the Digital Equipment
32 Corporation series of EtherWORKS ethernet cards:
33
34 DE425 TP/COAX EISA
35 DE434 TP PCI
36 DE435 TP/COAX/AUI PCI
37 DE450 TP/COAX/AUI PCI
38 DE500 10/100 PCI Fasternet
39
40 but it will now attempt to support all cards which conform to the
41 Digital Semiconductor SROM Specification. The driver currently
42 recognises the following chips:
43
44 DC21040 (no SROM)
45 DC21041[A]
46 DC21140[A]
47 DC21142
48 DC21143
49
50 So far the driver is known to work with the following cards:
51
52 KINGSTON
53 Linksys
54 ZNYX342
55 SMC8432
56 SMC9332 (w/new SROM)
57 ZNYX31[45]
58 ZNYX346 10/100 4 port (can act as a 10/100 bridge!)
59
60 The driver has been tested on a relatively busy network using the DE425,
61 DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred
62 16M of data to a DECstation 5000/200 as follows:
63
64 TCP UDP
65 TX RX TX RX
66 DE425 1030k 997k 1170k 1128k
67 DE434 1063k 995k 1170k 1125k
68 DE435 1063k 995k 1170k 1125k
69 DE500 1063k 998k 1170k 1125k in 10Mb/s mode
70
71 All values are typical (in kBytes/sec) from a sample of 4 for each
72 measurement. Their error is +/-20k on a quiet (private) network and also
73 depend on what load the CPU has.
74
75 =========================================================================
76 This driver has been written substantially from scratch, although its
77 inheritance of style and stack interface from 'ewrk3.c' and in turn from
78 Donald Becker's 'lance.c' should be obvious. With the module autoload of
79 every usable DECchip board, I pinched Donald's 'next_module' field to
80 link my modules together.
81
82 Up to 15 EISA cards can be supported under this driver, limited primarily
83 by the available IRQ lines. I have checked different configurations of
84 multiple depca, EtherWORKS 3 cards and de4x5 cards and have not found a
85 problem yet (provided you have at least depca.c v0.38) ...
86
87 PCI support has been added to allow the driver to work with the DE434,
88 DE435, DE450 and DE500 cards. The I/O accesses are a bit of a kludge due
89 to the differences in the EISA and PCI CSR address offsets from the base
90 address.
91
92 The ability to load this driver as a loadable module has been included
93 and used extensively during the driver development (to save those long
94 reboot sequences). Loadable module support under PCI and EISA has been
95 achieved by letting the driver autoprobe as if it were compiled into the
96 kernel. Do make sure you're not sharing interrupts with anything that
97 cannot accommodate interrupt sharing!
98
99 To utilise this ability, you have to do 8 things:
100
101 0) have a copy of the loadable modules code installed on your system.
102 1) copy de4x5.c from the /linux/drivers/net directory to your favourite
103 temporary directory.
104 2) for fixed autoprobes (not recommended), edit the source code near
105 line 5594 to reflect the I/O address you're using, or assign these when
106 loading by:
107
108 insmod de4x5 io=0xghh where g = bus number
109 hh = device number
110
111 NB: autoprobing for modules is now supported by default. You may just
112 use:
113
114 insmod de4x5
115
116 to load all available boards. For a specific board, still use
117 the 'io=?' above.
118 3) compile de4x5.c, but include -DMODULE in the command line to ensure
119 that the correct bits are compiled (see end of source code).
120 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
121 kernel with the de4x5 configuration turned off and reboot.
122 5) insmod de4x5 [io=0xghh]
123 6) run the net startup bits for your new eth?? interface(s) manually
124 (usually /etc/rc.inet[12] at boot time).
125 7) enjoy!
126
127 To unload a module, turn off the associated interface(s)
128 'ifconfig eth?? down' then 'rmmod de4x5'.
129
130 Automedia detection is included so that in principal you can disconnect
131 from, e.g. TP, reconnect to BNC and things will still work (after a
132 pause whilst the driver figures out where its media went). My tests
133 using ping showed that it appears to work....
134
135 By default, the driver will now autodetect any DECchip based card.
136 Should you have a need to restrict the driver to DIGITAL only cards, you
137 can compile with a DEC_ONLY define, or if loading as a module, use the
138 'dec_only=1' parameter.
139
140 I've changed the timing routines to use the kernel timer and scheduling
141 functions so that the hangs and other assorted problems that occurred
142 while autosensing the media should be gone. A bonus for the DC21040
143 auto media sense algorithm is that it can now use one that is more in
144 line with the rest (the DC21040 chip doesn't have a hardware timer).
145 The downside is the 1 'jiffies' (10ms) resolution.
146
147 IEEE 802.3u MII interface code has been added in anticipation that some
148 products may use it in the future.
149
150 The SMC9332 card has a non-compliant SROM which needs fixing - I have
151 patched this driver to detect it because the SROM format used complies
152 to a previous DEC-STD format.
153
154 I have removed the buffer copies needed for receive on Intels. I cannot
155 remove them for Alphas since the Tulip hardware only does longword
156 aligned DMA transfers and the Alphas get alignment traps with non
157 longword aligned data copies (which makes them really slow). No comment.
158
159 I have added SROM decoding routines to make this driver work with any
160 card that supports the Digital Semiconductor SROM spec. This will help
161 all cards running the dc2114x series chips in particular. Cards using
162 the dc2104x chips should run correctly with the basic driver. I'm in
163 debt to <mjacob@feral.com> for the testing and feedback that helped get
164 this feature working. So far we have tested KINGSTON, SMC8432, SMC9332
165 (with the latest SROM complying with the SROM spec V3: their first was
166 broken), ZNYX342 and LinkSys. ZYNX314 (dual 21041 MAC) and ZNYX 315
167 (quad 21041 MAC) cards also appear to work despite their incorrectly
168 wired IRQs.
169
170 I have added a temporary fix for interrupt problems when some SCSI cards
171 share the same interrupt as the DECchip based cards. The problem occurs
172 because the SCSI card wants to grab the interrupt as a fast interrupt
173 (runs the service routine with interrupts turned off) vs. this card
174 which really needs to run the service routine with interrupts turned on.
175 This driver will now add the interrupt service routine as a fast
176 interrupt if it is bounced from the slow interrupt. THIS IS NOT A
177 RECOMMENDED WAY TO RUN THE DRIVER and has been done for a limited time
178 until people sort out their compatibility issues and the kernel
179 interrupt service code is fixed. YOU SHOULD SEPARATE OUT THE FAST
180 INTERRUPT CARDS FROM THE SLOW INTERRUPT CARDS to ensure that they do not
181 run on the same interrupt. PCMCIA/CardBus is another can of worms...
182
183 Finally, I think I have really fixed the module loading problem with
184 more than one DECchip based card. As a side effect, I don't mess with
185 the device structure any more which means that if more than 1 card in
186 2.0.x is installed (4 in 2.1.x), the user will have to edit
187 linux/drivers/net/Space.c to make room for them. Hence, module loading
188 is the preferred way to use this driver, since it doesn't have this
189 limitation.
190
191 Where SROM media detection is used and full duplex is specified in the
192 SROM, the feature is ignored unless lp->params.fdx is set at compile
193 time OR during a module load (insmod de4x5 args='eth??:fdx' [see
194 below]). This is because there is no way to automatically detect full
195 duplex links except through autonegotiation. When I include the
196 autonegotiation feature in the SROM autoconf code, this detection will
197 occur automatically for that case.
198
199 Command line arguments are now allowed, similar to passing arguments
200 through LILO. This will allow a per adapter board set up of full duplex
201 and media. The only lexical constraints are: the board name (dev->name)
202 appears in the list before its parameters. The list of parameters ends
203 either at the end of the parameter list or with another board name. The
204 following parameters are allowed:
205
206 fdx for full duplex
207 autosense to set the media/speed; with the following
208 sub-parameters:
209 TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO
210
211 Case sensitivity is important for the sub-parameters. They *must* be
212 upper case. Examples:
213
214 insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
215
216 For a compiled in driver, at or above line 548, place e.g.
217 #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP"
218
219 Yes, I know full duplex isn't permissible on BNC or AUI; they're just
220 examples. By default, full duplex is turned off and AUTO is the default
221 autosense setting. In reality, I expect only the full duplex option to
222 be used. Note the use of single quotes in the two examples above and the
223 lack of commas to separate items. ALSO, you must get the requested media
224 correct in relation to what the adapter SROM says it has. There's no way
225 to determine this in advance other than by trial and error and common
226 sense, e.g. call a BNC connectored port 'BNC', not '10Mb'.
227
228 Changed the bus probing. EISA used to be done first, followed by PCI.
229 Most people probably don't even know what a de425 is today and the EISA
230 probe has messed up some SCSI cards in the past, so now PCI is always
231 probed first followed by EISA if a) the architecture allows EISA and
232 either b) there have been no PCI cards detected or c) an EISA probe is
233 forced by the user. To force a probe include "force_eisa" in your
234 insmod "args" line; for built-in kernels either change the driver to do
235 this automatically or include #define DE4X5_FORCE_EISA on or before
236 line 1040 in the driver.
237
238 TO DO:
239 ------
240
241 Revision History
242 ----------------
243
244 Version Date Description
245
246 0.1 17-Nov-94 Initial writing. ALPHA code release.
247 0.2 13-Jan-95 Added PCI support for DE435's.
248 0.21 19-Jan-95 Added auto media detection.
249 0.22 10-Feb-95 Fix interrupt handler call <chris@cosy.sbg.ac.at>.
250 Fix recognition bug reported by <bkm@star.rl.ac.uk>.
251 Add request/release_region code.
252 Add loadable modules support for PCI.
253 Clean up loadable modules support.
254 0.23 28-Feb-95 Added DC21041 and DC21140 support.
255 Fix missed frame counter value and initialisation.
256 Fixed EISA probe.
257 0.24 11-Apr-95 Change delay routine to use <linux/udelay>.
258 Change TX_BUFFS_AVAIL macro.
259 Change media autodetection to allow manual setting.
260 Completed DE500 (DC21140) support.
261 0.241 18-Apr-95 Interim release without DE500 Autosense Algorithm.
262 0.242 10-May-95 Minor changes.
263 0.30 12-Jun-95 Timer fix for DC21140.
264 Portability changes.
265 Add ALPHA changes from <jestabro@ant.tay1.dec.com>.
266 Add DE500 semi automatic autosense.
267 Add Link Fail interrupt TP failure detection.
268 Add timer based link change detection.
269 Plugged a memory leak in de4x5_queue_pkt().
270 0.31 13-Jun-95 Fixed PCI stuff for 1.3.1.
271 0.32 26-Jun-95 Added verify_area() calls in de4x5_ioctl() from a
272 suggestion by <heiko@colossus.escape.de>.
273 0.33 8-Aug-95 Add shared interrupt support (not released yet).
274 0.331 21-Aug-95 Fix de4x5_open() with fast CPUs.
275 Fix de4x5_interrupt().
276 Fix dc21140_autoconf() mess.
277 No shared interrupt support.
278 0.332 11-Sep-95 Added MII management interface routines.
279 0.40 5-Mar-96 Fix setup frame timeout <maartenb@hpkuipc.cern.ch>.
280 Add kernel timer code (h/w is too flaky).
281 Add MII based PHY autosense.
282 Add new multicasting code.
283 Add new autosense algorithms for media/mode
284 selection using kernel scheduling/timing.
285 Re-formatted.
286 Made changes suggested by <jeff@router.patch.net>:
287 Change driver to detect all DECchip based cards
288 with DEC_ONLY restriction a special case.
289 Changed driver to autoprobe as a module. No irq
290 checking is done now - assume BIOS is good!
291 Added SMC9332 detection <manabe@Roy.dsl.tutics.ac.jp>
292 0.41 21-Mar-96 Don't check for get_hw_addr checksum unless DEC card
293 only <niles@axp745gsfc.nasa.gov>
294 Fix for multiple PCI cards reported by <jos@xos.nl>
295 Duh, put the IRQF_SHARED flag into request_interrupt().
296 Fix SMC ethernet address in enet_det[].
297 Print chip name instead of "UNKNOWN" during boot.
298 0.42 26-Apr-96 Fix MII write TA bit error.
299 Fix bug in dc21040 and dc21041 autosense code.
300 Remove buffer copies on receive for Intels.
301 Change sk_buff handling during media disconnects to
302 eliminate DUP packets.
303 Add dynamic TX thresholding.
304 Change all chips to use perfect multicast filtering.
305 Fix alloc_device() bug <jari@markkus2.fimr.fi>
306 0.43 21-Jun-96 Fix unconnected media TX retry bug.
307 Add Accton to the list of broken cards.
308 Fix TX under-run bug for non DC21140 chips.
309 Fix boot command probe bug in alloc_device() as
310 reported by <koen.gadeyne@barco.com> and
311 <orava@nether.tky.hut.fi>.
312 Add cache locks to prevent a race condition as
313 reported by <csd@microplex.com> and
314 <baba@beckman.uiuc.edu>.
315 Upgraded alloc_device() code.
316 0.431 28-Jun-96 Fix potential bug in queue_pkt() from discussion
317 with <csd@microplex.com>
318 0.44 13-Aug-96 Fix RX overflow bug in 2114[023] chips.
319 Fix EISA probe bugs reported by <os2@kpi.kharkov.ua>
320 and <michael@compurex.com>.
321 0.441 9-Sep-96 Change dc21041_autoconf() to probe quiet BNC media
322 with a loopback packet.
323 0.442 9-Sep-96 Include AUI in dc21041 media printout. Bug reported
324 by <bhat@mundook.cs.mu.OZ.AU>
325 0.45 8-Dec-96 Include endian functions for PPC use, from work
326 by <cort@cs.nmt.edu> and <g.thomas@opengroup.org>.
327 0.451 28-Dec-96 Added fix to allow autoprobe for modules after
328 suggestion from <mjacob@feral.com>.
329 0.5 30-Jan-97 Added SROM decoding functions.
330 Updated debug flags.
331 Fix sleep/wakeup calls for PCI cards, bug reported
332 by <cross@gweep.lkg.dec.com>.
333 Added multi-MAC, one SROM feature from discussion
334 with <mjacob@feral.com>.
335 Added full module autoprobe capability.
336 Added attempt to use an SMC9332 with broken SROM.
337 Added fix for ZYNX multi-mac cards that didn't
338 get their IRQs wired correctly.
339 0.51 13-Feb-97 Added endian fixes for the SROM accesses from
340 <paubert@iram.es>
341 Fix init_connection() to remove extra device reset.
342 Fix MAC/PHY reset ordering in dc21140m_autoconf().
343 Fix initialisation problem with lp->timeout in
344 typeX_infoblock() from <paubert@iram.es>.
345 Fix MII PHY reset problem from work done by
346 <paubert@iram.es>.
347 0.52 26-Apr-97 Some changes may not credit the right people -
348 a disk crash meant I lost some mail.
349 Change RX interrupt routine to drop rather than
350 defer packets to avoid hang reported by
351 <g.thomas@opengroup.org>.
352 Fix srom_exec() to return for COMPACT and type 1
353 infoblocks.
354 Added DC21142 and DC21143 functions.
355 Added byte counters from <phil@tazenda.demon.co.uk>
356 Added IRQF_DISABLED temporary fix from
357 <mjacob@feral.com>.
358 0.53 12-Nov-97 Fix the *_probe() to include 'eth??' name during
359 module load: bug reported by
360 <Piete.Brooks@cl.cam.ac.uk>
361 Fix multi-MAC, one SROM, to work with 2114x chips:
362 bug reported by <cmetz@inner.net>.
363 Make above search independent of BIOS device scan
364 direction.
365 Completed DC2114[23] autosense functions.
366 0.531 21-Dec-97 Fix DE500-XA 100Mb/s bug reported by
367 <robin@intercore.com
368 Fix type1_infoblock() bug introduced in 0.53, from
369 problem reports by
370 <parmee@postecss.ncrfran.france.ncr.com> and
371 <jo@ice.dillingen.baynet.de>.
372 Added argument list to set up each board from either
373 a module's command line or a compiled in #define.
374 Added generic MII PHY functionality to deal with
375 newer PHY chips.
376 Fix the mess in 2.1.67.
377 0.532 5-Jan-98 Fix bug in mii_get_phy() reported by
378 <redhat@cococo.net>.
379 Fix bug in pci_probe() for 64 bit systems reported
380 by <belliott@accessone.com>.
381 0.533 9-Jan-98 Fix more 64 bit bugs reported by <jal@cs.brown.edu>.
382 0.534 24-Jan-98 Fix last (?) endian bug from <geert@linux-m68k.org>
383 0.535 21-Feb-98 Fix Ethernet Address PROM reset bug for DC21040.
384 0.536 21-Mar-98 Change pci_probe() to use the pci_dev structure.
385 **Incompatible with 2.0.x from here.**
386 0.540 5-Jul-98 Atomicize assertion of dev->interrupt for SMP
387 from <lma@varesearch.com>
388 Add TP, AUI and BNC cases to 21140m_autoconf() for
389 case where a 21140 under SROM control uses, e.g. AUI
390 from problem report by <delchini@lpnp09.in2p3.fr>
391 Add MII parallel detection to 2114x_autoconf() for
392 case where no autonegotiation partner exists from
393 problem report by <mlapsley@ndirect.co.uk>.
394 Add ability to force connection type directly even
395 when using SROM control from problem report by
396 <earl@exis.net>.
397 Updated the PCI interface to conform with the latest
398 version. I hope nothing is broken...
399 Add TX done interrupt modification from suggestion
400 by <Austin.Donnelly@cl.cam.ac.uk>.
401 Fix is_anc_capable() bug reported by
402 <Austin.Donnelly@cl.cam.ac.uk>.
403 Fix type[13]_infoblock() bug: during MII search, PHY
404 lp->rst not run because lp->ibn not initialised -
405 from report & fix by <paubert@iram.es>.
406 Fix probe bug with EISA & PCI cards present from
407 report by <eirik@netcom.com>.
408 0.541 24-Aug-98 Fix compiler problems associated with i386-string
409 ops from multiple bug reports and temporary fix
410 from <paubert@iram.es>.
411 Fix pci_probe() to correctly emulate the old
412 pcibios_find_class() function.
413 Add an_exception() for old ZYNX346 and fix compile
414 warning on PPC & SPARC, from <ecd@skynet.be>.
415 Fix lastPCI to correctly work with compiled in
416 kernels and modules from bug report by
417 <Zlatko.Calusic@CARNet.hr> et al.
418 0.542 15-Sep-98 Fix dc2114x_autoconf() to stop multiple messages
419 when media is unconnected.
420 Change dev->interrupt to lp->interrupt to ensure
421 alignment for Alpha's and avoid their unaligned
422 access traps. This flag is merely for log messages:
423 should do something more definitive though...
424 0.543 30-Dec-98 Add SMP spin locking.
425 0.544 8-May-99 Fix for buggy SROM in Motorola embedded boards using
426 a 21143 by <mmporter@home.com>.
427 Change PCI/EISA bus probing order.
428 0.545 28-Nov-99 Further Moto SROM bug fix from
429 <mporter@eng.mcd.mot.com>
430 Remove double checking for DEBUG_RX in de4x5_dbg_rx()
431 from report by <geert@linux-m68k.org>
432 0.546 22-Feb-01 Fixes Alpha XP1000 oops. The srom_search function
433 was causing a page fault when initializing the
434 variable 'pb', on a non de4x5 PCI device, in this
435 case a PCI bridge (DEC chip 21152). The value of
436 'pb' is now only initialized if a de4x5 chip is
437 present.
438 <france@handhelds.org>
439 0.547 08-Nov-01 Use library crc32 functions by <Matt_Domsch@dell.com>
440 0.548 30-Aug-03 Big 2.6 cleanup. Ported to PCI/EISA probing and
441 generic DMA APIs. Fixed DE425 support on Alpha.
442 <maz@wild-wind.fr.eu.org>
443 =========================================================================
444 */
445
446 #include <linux/module.h>
447 #include <linux/kernel.h>
448 #include <linux/string.h>
449 #include <linux/interrupt.h>
450 #include <linux/ptrace.h>
451 #include <linux/errno.h>
452 #include <linux/ioport.h>
453 #include <linux/pci.h>
454 #include <linux/eisa.h>
455 #include <linux/delay.h>
456 #include <linux/init.h>
457 #include <linux/spinlock.h>
458 #include <linux/crc32.h>
459 #include <linux/netdevice.h>
460 #include <linux/etherdevice.h>
461 #include <linux/skbuff.h>
462 #include <linux/time.h>
463 #include <linux/types.h>
464 #include <linux/unistd.h>
465 #include <linux/ctype.h>
466 #include <linux/dma-mapping.h>
467 #include <linux/moduleparam.h>
468 #include <linux/bitops.h>
469 #include <linux/gfp.h>
470
471 #include <asm/io.h>
472 #include <asm/dma.h>
473 #include <asm/byteorder.h>
474 #include <asm/unaligned.h>
475 #include <linux/uaccess.h>
476 #ifdef CONFIG_PPC_PMAC
477 #include <asm/machdep.h>
478 #endif /* CONFIG_PPC_PMAC */
479
480 #include "de4x5.h"
481
482 static const char version[] =
483 KERN_INFO "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n";
484
485 #define c_char const char
486
487 /*
488 ** MII Information
489 */
490 struct phy_table {
491 int reset; /* Hard reset required? */
492 int id; /* IEEE OUI */
493 int ta; /* One cycle TA time - 802.3u is confusing here */
494 struct { /* Non autonegotiation (parallel) speed det. */
495 int reg;
496 int mask;
497 int value;
498 } spd;
499 };
500
501 struct mii_phy {
502 int reset; /* Hard reset required? */
503 int id; /* IEEE OUI */
504 int ta; /* One cycle TA time */
505 struct { /* Non autonegotiation (parallel) speed det. */
506 int reg;
507 int mask;
508 int value;
509 } spd;
510 int addr; /* MII address for the PHY */
511 u_char *gep; /* Start of GEP sequence block in SROM */
512 u_char *rst; /* Start of reset sequence in SROM */
513 u_int mc; /* Media Capabilities */
514 u_int ana; /* NWay Advertisement */
515 u_int fdx; /* Full DupleX capabilities for each media */
516 u_int ttm; /* Transmit Threshold Mode for each media */
517 u_int mci; /* 21142 MII Connector Interrupt info */
518 };
519
520 #define DE4X5_MAX_PHY 8 /* Allow up to 8 attached PHY devices per board */
521
522 struct sia_phy {
523 u_char mc; /* Media Code */
524 u_char ext; /* csr13-15 valid when set */
525 int csr13; /* SIA Connectivity Register */
526 int csr14; /* SIA TX/RX Register */
527 int csr15; /* SIA General Register */
528 int gepc; /* SIA GEP Control Information */
529 int gep; /* SIA GEP Data */
530 };
531
532 /*
533 ** Define the know universe of PHY devices that can be
534 ** recognised by this driver.
535 */
536 static struct phy_table phy_info[] = {
537 {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}}, /* National TX */
538 {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}}, /* Broadcom T4 */
539 {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}}, /* SEEQ T4 */
540 {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}}, /* Cypress T4 */
541 {0, 0x7810 , 1, {0x14, 0x0800, 0x0800}} /* Level One LTX970 */
542 };
543
544 /*
545 ** These GENERIC values assumes that the PHY devices follow 802.3u and
546 ** allow parallel detection to set the link partner ability register.
547 ** Detection of 100Base-TX [H/F Duplex] and 100Base-T4 is supported.
548 */
549 #define GENERIC_REG 0x05 /* Autoneg. Link Partner Advertisement Reg. */
550 #define GENERIC_MASK MII_ANLPA_100M /* All 100Mb/s Technologies */
551 #define GENERIC_VALUE MII_ANLPA_100M /* 100B-TX, 100B-TX FDX, 100B-T4 */
552
553 /*
554 ** Define special SROM detection cases
555 */
556 static c_char enet_det[][ETH_ALEN] = {
557 {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00},
558 {0x00, 0x00, 0xe8, 0x00, 0x00, 0x00}
559 };
560
561 #define SMC 1
562 #define ACCTON 2
563
564 /*
565 ** SROM Repair definitions. If a broken SROM is detected a card may
566 ** use this information to help figure out what to do. This is a
567 ** "stab in the dark" and so far for SMC9332's only.
568 */
569 static c_char srom_repair_info[][100] = {
570 {0x00,0x1e,0x00,0x00,0x00,0x08, /* SMC9332 */
571 0x1f,0x01,0x8f,0x01,0x00,0x01,0x00,0x02,
572 0x01,0x00,0x00,0x78,0xe0,0x01,0x00,0x50,
573 0x00,0x18,}
574 };
575
576
577 #ifdef DE4X5_DEBUG
578 static int de4x5_debug = DE4X5_DEBUG;
579 #else
580 /*static int de4x5_debug = (DEBUG_MII | DEBUG_SROM | DEBUG_PCICFG | DEBUG_MEDIA | DEBUG_VERSION);*/
581 static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION);
582 #endif
583
584 /*
585 ** Allow per adapter set up. For modules this is simply a command line
586 ** parameter, e.g.:
587 ** insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
588 **
589 ** For a compiled in driver, place e.g.
590 ** #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP"
591 ** here
592 */
593 #ifdef DE4X5_PARM
594 static char *args = DE4X5_PARM;
595 #else
596 static char *args;
597 #endif
598
599 struct parameters {
600 bool fdx;
601 int autosense;
602 };
603
604 #define DE4X5_AUTOSENSE_MS 250 /* msec autosense tick (DE500) */
605
606 #define DE4X5_NDA 0xffe0 /* No Device (I/O) Address */
607
608 /*
609 ** Ethernet PROM defines
610 */
611 #define PROBE_LENGTH 32
612 #define ETH_PROM_SIG 0xAA5500FFUL
613
614 /*
615 ** Ethernet Info
616 */
617 #define PKT_BUF_SZ 1536 /* Buffer size for each Tx/Rx buffer */
618 #define IEEE802_3_SZ 1518 /* Packet + CRC */
619 #define MAX_PKT_SZ 1514 /* Maximum ethernet packet length */
620 #define MAX_DAT_SZ 1500 /* Maximum ethernet data length */
621 #define MIN_DAT_SZ 1 /* Minimum ethernet data length */
622 #define PKT_HDR_LEN 14 /* Addresses and data length info */
623 #define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
624 #define QUEUE_PKT_TIMEOUT (3*HZ) /* 3 second timeout */
625
626
627 /*
628 ** EISA bus defines
629 */
630 #define DE4X5_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
631 #define DE4X5_EISA_TOTAL_SIZE 0x100 /* I/O address extent */
632
633 #define EISA_ALLOWED_IRQ_LIST {5, 9, 10, 11}
634
635 #define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"}
636 #define DE4X5_NAME_LENGTH 8
637
638 static c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
639
640 /*
641 ** Ethernet PROM defines for DC21040
642 */
643 #define PROBE_LENGTH 32
644 #define ETH_PROM_SIG 0xAA5500FFUL
645
646 /*
647 ** PCI Bus defines
648 */
649 #define PCI_MAX_BUS_NUM 8
650 #define DE4X5_PCI_TOTAL_SIZE 0x80 /* I/O address extent */
651 #define DE4X5_CLASS_CODE 0x00020000 /* Network controller, Ethernet */
652
653 /*
654 ** Memory Alignment. Each descriptor is 4 longwords long. To force a
655 ** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
656 ** DESC_ALIGN. ALIGN aligns the start address of the private memory area
657 ** and hence the RX descriptor ring's first entry.
658 */
659 #define DE4X5_ALIGN4 ((u_long)4 - 1) /* 1 longword align */
660 #define DE4X5_ALIGN8 ((u_long)8 - 1) /* 2 longword align */
661 #define DE4X5_ALIGN16 ((u_long)16 - 1) /* 4 longword align */
662 #define DE4X5_ALIGN32 ((u_long)32 - 1) /* 8 longword align */
663 #define DE4X5_ALIGN64 ((u_long)64 - 1) /* 16 longword align */
664 #define DE4X5_ALIGN128 ((u_long)128 - 1) /* 32 longword align */
665
666 #define DE4X5_ALIGN DE4X5_ALIGN32 /* Keep the DC21040 happy... */
667 #define DE4X5_CACHE_ALIGN CAL_16LONG
668 #define DESC_SKIP_LEN DSL_0 /* Must agree with DESC_ALIGN */
669 /*#define DESC_ALIGN u32 dummy[4]; / * Must agree with DESC_SKIP_LEN */
670 #define DESC_ALIGN
671
672 #ifndef DEC_ONLY /* See README.de4x5 for using this */
673 static int dec_only;
674 #else
675 static int dec_only = 1;
676 #endif
677
678 /*
679 ** DE4X5 IRQ ENABLE/DISABLE
680 */
681 #define ENABLE_IRQs { \
682 imr |= lp->irq_en;\
683 outl(imr, DE4X5_IMR); /* Enable the IRQs */\
684 }
685
686 #define DISABLE_IRQs {\
687 imr = inl(DE4X5_IMR);\
688 imr &= ~lp->irq_en;\
689 outl(imr, DE4X5_IMR); /* Disable the IRQs */\
690 }
691
692 #define UNMASK_IRQs {\
693 imr |= lp->irq_mask;\
694 outl(imr, DE4X5_IMR); /* Unmask the IRQs */\
695 }
696
697 #define MASK_IRQs {\
698 imr = inl(DE4X5_IMR);\
699 imr &= ~lp->irq_mask;\
700 outl(imr, DE4X5_IMR); /* Mask the IRQs */\
701 }
702
703 /*
704 ** DE4X5 START/STOP
705 */
706 #define START_DE4X5 {\
707 omr = inl(DE4X5_OMR);\
708 omr |= OMR_ST | OMR_SR;\
709 outl(omr, DE4X5_OMR); /* Enable the TX and/or RX */\
710 }
711
712 #define STOP_DE4X5 {\
713 omr = inl(DE4X5_OMR);\
714 omr &= ~(OMR_ST|OMR_SR);\
715 outl(omr, DE4X5_OMR); /* Disable the TX and/or RX */ \
716 }
717
718 /*
719 ** DE4X5 SIA RESET
720 */
721 #define RESET_SIA outl(0, DE4X5_SICR); /* Reset SIA connectivity regs */
722
723 /*
724 ** DE500 AUTOSENSE TIMER INTERVAL (MILLISECS)
725 */
726 #define DE4X5_AUTOSENSE_MS 250
727
728 /*
729 ** SROM Structure
730 */
731 struct de4x5_srom {
732 char sub_vendor_id[2];
733 char sub_system_id[2];
734 char reserved[12];
735 char id_block_crc;
736 char reserved2;
737 char version;
738 char num_controllers;
739 char ieee_addr[6];
740 char info[100];
741 short chksum;
742 };
743 #define SUB_VENDOR_ID 0x500a
744
745 /*
746 ** DE4X5 Descriptors. Make sure that all the RX buffers are contiguous
747 ** and have sizes of both a power of 2 and a multiple of 4.
748 ** A size of 256 bytes for each buffer could be chosen because over 90% of
749 ** all packets in our network are <256 bytes long and 64 longword alignment
750 ** is possible. 1536 showed better 'ttcp' performance. Take your pick. 32 TX
751 ** descriptors are needed for machines with an ALPHA CPU.
752 */
753 #define NUM_RX_DESC 8 /* Number of RX descriptors */
754 #define NUM_TX_DESC 32 /* Number of TX descriptors */
755 #define RX_BUFF_SZ 1536 /* Power of 2 for kmalloc and */
756 /* Multiple of 4 for DC21040 */
757 /* Allows 512 byte alignment */
758 struct de4x5_desc {
759 volatile __le32 status;
760 __le32 des1;
761 __le32 buf;
762 __le32 next;
763 DESC_ALIGN
764 };
765
766 /*
767 ** The DE4X5 private structure
768 */
769 #define DE4X5_PKT_STAT_SZ 16
770 #define DE4X5_PKT_BIN_SZ 128 /* Should be >=100 unless you
771 increase DE4X5_PKT_STAT_SZ */
772
773 struct pkt_stats {
774 u_int bins[DE4X5_PKT_STAT_SZ]; /* Private stats counters */
775 u_int unicast;
776 u_int multicast;
777 u_int broadcast;
778 u_int excessive_collisions;
779 u_int tx_underruns;
780 u_int excessive_underruns;
781 u_int rx_runt_frames;
782 u_int rx_collision;
783 u_int rx_dribble;
784 u_int rx_overflow;
785 };
786
787 struct de4x5_private {
788 char adapter_name[80]; /* Adapter name */
789 u_long interrupt; /* Aligned ISR flag */
790 struct de4x5_desc *rx_ring; /* RX descriptor ring */
791 struct de4x5_desc *tx_ring; /* TX descriptor ring */
792 struct sk_buff *tx_skb[NUM_TX_DESC]; /* TX skb for freeing when sent */
793 struct sk_buff *rx_skb[NUM_RX_DESC]; /* RX skb's */
794 int rx_new, rx_old; /* RX descriptor ring pointers */
795 int tx_new, tx_old; /* TX descriptor ring pointers */
796 char setup_frame[SETUP_FRAME_LEN]; /* Holds MCA and PA info. */
797 char frame[64]; /* Min sized packet for loopback*/
798 spinlock_t lock; /* Adapter specific spinlock */
799 struct net_device_stats stats; /* Public stats */
800 struct pkt_stats pktStats; /* Private stats counters */
801 char rxRingSize;
802 char txRingSize;
803 int bus; /* EISA or PCI */
804 int bus_num; /* PCI Bus number */
805 int device; /* Device number on PCI bus */
806 int state; /* Adapter OPENED or CLOSED */
807 int chipset; /* DC21040, DC21041 or DC21140 */
808 s32 irq_mask; /* Interrupt Mask (Enable) bits */
809 s32 irq_en; /* Summary interrupt bits */
810 int media; /* Media (eg TP), mode (eg 100B)*/
811 int c_media; /* Remember the last media conn */
812 bool fdx; /* media full duplex flag */
813 int linkOK; /* Link is OK */
814 int autosense; /* Allow/disallow autosensing */
815 bool tx_enable; /* Enable descriptor polling */
816 int setup_f; /* Setup frame filtering type */
817 int local_state; /* State within a 'media' state */
818 struct mii_phy phy[DE4X5_MAX_PHY]; /* List of attached PHY devices */
819 struct sia_phy sia; /* SIA PHY Information */
820 int active; /* Index to active PHY device */
821 int mii_cnt; /* Number of attached PHY's */
822 int timeout; /* Scheduling counter */
823 struct timer_list timer; /* Timer info for kernel */
824 int tmp; /* Temporary global per card */
825 struct {
826 u_long lock; /* Lock the cache accesses */
827 s32 csr0; /* Saved Bus Mode Register */
828 s32 csr6; /* Saved Operating Mode Reg. */
829 s32 csr7; /* Saved IRQ Mask Register */
830 s32 gep; /* Saved General Purpose Reg. */
831 s32 gepc; /* Control info for GEP */
832 s32 csr13; /* Saved SIA Connectivity Reg. */
833 s32 csr14; /* Saved SIA TX/RX Register */
834 s32 csr15; /* Saved SIA General Register */
835 int save_cnt; /* Flag if state already saved */
836 struct sk_buff_head queue; /* Save the (re-ordered) skb's */
837 } cache;
838 struct de4x5_srom srom; /* A copy of the SROM */
839 int cfrv; /* Card CFRV copy */
840 int rx_ovf; /* Check for 'RX overflow' tag */
841 bool useSROM; /* For non-DEC card use SROM */
842 bool useMII; /* Infoblock using the MII */
843 int asBitValid; /* Autosense bits in GEP? */
844 int asPolarity; /* 0 => asserted high */
845 int asBit; /* Autosense bit number in GEP */
846 int defMedium; /* SROM default medium */
847 int tcount; /* Last infoblock number */
848 int infoblock_init; /* Initialised this infoblock? */
849 int infoleaf_offset; /* SROM infoleaf for controller */
850 s32 infoblock_csr6; /* csr6 value in SROM infoblock */
851 int infoblock_media; /* infoblock media */
852 int (*infoleaf_fn)(struct net_device *); /* Pointer to infoleaf function */
853 u_char *rst; /* Pointer to Type 5 reset info */
854 u_char ibn; /* Infoblock number */
855 struct parameters params; /* Command line/ #defined params */
856 struct device *gendev; /* Generic device */
857 dma_addr_t dma_rings; /* DMA handle for rings */
858 int dma_size; /* Size of the DMA area */
859 char *rx_bufs; /* rx bufs on alpha, sparc, ... */
860 };
861
862 /*
863 ** To get around certain poxy cards that don't provide an SROM
864 ** for the second and more DECchip, I have to key off the first
865 ** chip's address. I'll assume there's not a bad SROM iff:
866 **
867 ** o the chipset is the same
868 ** o the bus number is the same and > 0
869 ** o the sum of all the returned hw address bytes is 0 or 0x5fa
870 **
871 ** Also have to save the irq for those cards whose hardware designers
872 ** can't follow the PCI to PCI Bridge Architecture spec.
873 */
874 static struct {
875 int chipset;
876 int bus;
877 int irq;
878 u_char addr[ETH_ALEN];
879 } last = {0,};
880
881 /*
882 ** The transmit ring full condition is described by the tx_old and tx_new
883 ** pointers by:
884 ** tx_old = tx_new Empty ring
885 ** tx_old = tx_new+1 Full ring
886 ** tx_old+txRingSize = tx_new+1 Full ring (wrapped condition)
887 */
888 #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
889 lp->tx_old+lp->txRingSize-lp->tx_new-1:\
890 lp->tx_old -lp->tx_new-1)
891
892 #define TX_PKT_PENDING (lp->tx_old != lp->tx_new)
893
894 /*
895 ** Public Functions
896 */
897 static int de4x5_open(struct net_device *dev);
898 static netdev_tx_t de4x5_queue_pkt(struct sk_buff *skb,
899 struct net_device *dev);
900 static irqreturn_t de4x5_interrupt(int irq, void *dev_id);
901 static int de4x5_close(struct net_device *dev);
902 static struct net_device_stats *de4x5_get_stats(struct net_device *dev);
903 static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len);
904 static void set_multicast_list(struct net_device *dev);
905 static int de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
906
907 /*
908 ** Private functions
909 */
910 static int de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev);
911 static int de4x5_init(struct net_device *dev);
912 static int de4x5_sw_reset(struct net_device *dev);
913 static int de4x5_rx(struct net_device *dev);
914 static int de4x5_tx(struct net_device *dev);
915 static void de4x5_ast(struct timer_list *t);
916 static int de4x5_txur(struct net_device *dev);
917 static int de4x5_rx_ovfc(struct net_device *dev);
918
919 static int autoconf_media(struct net_device *dev);
920 static void create_packet(struct net_device *dev, char *frame, int len);
921 static void load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb);
922 static int dc21040_autoconf(struct net_device *dev);
923 static int dc21041_autoconf(struct net_device *dev);
924 static int dc21140m_autoconf(struct net_device *dev);
925 static int dc2114x_autoconf(struct net_device *dev);
926 static int srom_autoconf(struct net_device *dev);
927 static int de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state, int (*fn)(struct net_device *, int), int (*asfn)(struct net_device *));
928 static int dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct net_device *, int));
929 static int test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
930 static int test_for_100Mb(struct net_device *dev, int msec);
931 static int wait_for_link(struct net_device *dev);
932 static int test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec);
933 static int is_spd_100(struct net_device *dev);
934 static int is_100_up(struct net_device *dev);
935 static int is_10_up(struct net_device *dev);
936 static int is_anc_capable(struct net_device *dev);
937 static int ping_media(struct net_device *dev, int msec);
938 static struct sk_buff *de4x5_alloc_rx_buff(struct net_device *dev, int index, int len);
939 static void de4x5_free_rx_buffs(struct net_device *dev);
940 static void de4x5_free_tx_buffs(struct net_device *dev);
941 static void de4x5_save_skbs(struct net_device *dev);
942 static void de4x5_rst_desc_ring(struct net_device *dev);
943 static void de4x5_cache_state(struct net_device *dev, int flag);
944 static void de4x5_put_cache(struct net_device *dev, struct sk_buff *skb);
945 static void de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb);
946 static struct sk_buff *de4x5_get_cache(struct net_device *dev);
947 static void de4x5_setup_intr(struct net_device *dev);
948 static void de4x5_init_connection(struct net_device *dev);
949 static int de4x5_reset_phy(struct net_device *dev);
950 static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 sigr);
951 static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec);
952 static int test_tp(struct net_device *dev, s32 msec);
953 static int EISA_signature(char *name, struct device *device);
954 static int PCI_signature(char *name, struct de4x5_private *lp);
955 static void DevicePresent(struct net_device *dev, u_long iobase);
956 static void enet_addr_rst(u_long aprom_addr);
957 static int de4x5_bad_srom(struct de4x5_private *lp);
958 static short srom_rd(u_long address, u_char offset);
959 static void srom_latch(u_int command, u_long address);
960 static void srom_command(u_int command, u_long address);
961 static void srom_address(u_int command, u_long address, u_char offset);
962 static short srom_data(u_int command, u_long address);
963 /*static void srom_busy(u_int command, u_long address);*/
964 static void sendto_srom(u_int command, u_long addr);
965 static int getfrom_srom(u_long addr);
966 static int srom_map_media(struct net_device *dev);
967 static int srom_infoleaf_info(struct net_device *dev);
968 static void srom_init(struct net_device *dev);
969 static void srom_exec(struct net_device *dev, u_char *p);
970 static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr);
971 static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr);
972 static int mii_rdata(u_long ioaddr);
973 static void mii_wdata(int data, int len, u_long ioaddr);
974 static void mii_ta(u_long rw, u_long ioaddr);
975 static int mii_swap(int data, int len);
976 static void mii_address(u_char addr, u_long ioaddr);
977 static void sendto_mii(u32 command, int data, u_long ioaddr);
978 static int getfrom_mii(u32 command, u_long ioaddr);
979 static int mii_get_oui(u_char phyaddr, u_long ioaddr);
980 static int mii_get_phy(struct net_device *dev);
981 static void SetMulticastFilter(struct net_device *dev);
982 static int get_hw_addr(struct net_device *dev);
983 static void srom_repair(struct net_device *dev, int card);
984 static int test_bad_enet(struct net_device *dev, int status);
985 static int an_exception(struct de4x5_private *lp);
986 static char *build_setup_frame(struct net_device *dev, int mode);
987 static void disable_ast(struct net_device *dev);
988 static long de4x5_switch_mac_port(struct net_device *dev);
989 static int gep_rd(struct net_device *dev);
990 static void gep_wr(s32 data, struct net_device *dev);
991 static void yawn(struct net_device *dev, int state);
992 static void de4x5_parse_params(struct net_device *dev);
993 static void de4x5_dbg_open(struct net_device *dev);
994 static void de4x5_dbg_mii(struct net_device *dev, int k);
995 static void de4x5_dbg_media(struct net_device *dev);
996 static void de4x5_dbg_srom(struct de4x5_srom *p);
997 static void de4x5_dbg_rx(struct sk_buff *skb, int len);
998 static int dc21041_infoleaf(struct net_device *dev);
999 static int dc21140_infoleaf(struct net_device *dev);
1000 static int dc21142_infoleaf(struct net_device *dev);
1001 static int dc21143_infoleaf(struct net_device *dev);
1002 static int type0_infoblock(struct net_device *dev, u_char count, u_char *p);
1003 static int type1_infoblock(struct net_device *dev, u_char count, u_char *p);
1004 static int type2_infoblock(struct net_device *dev, u_char count, u_char *p);
1005 static int type3_infoblock(struct net_device *dev, u_char count, u_char *p);
1006 static int type4_infoblock(struct net_device *dev, u_char count, u_char *p);
1007 static int type5_infoblock(struct net_device *dev, u_char count, u_char *p);
1008 static int compact_infoblock(struct net_device *dev, u_char count, u_char *p);
1009
1010 /*
1011 ** Note now that module autoprobing is allowed under EISA and PCI. The
1012 ** IRQ lines will not be auto-detected; instead I'll rely on the BIOSes
1013 ** to "do the right thing".
1014 */
1015
1016 static int io=0x0;/* EDIT THIS LINE FOR YOUR CONFIGURATION IF NEEDED */
1017
1018 module_param_hw(io, int, ioport, 0);
1019 module_param(de4x5_debug, int, 0);
1020 module_param(dec_only, int, 0);
1021 module_param(args, charp, 0);
1022
1023 MODULE_PARM_DESC(io, "de4x5 I/O base address");
1024 MODULE_PARM_DESC(de4x5_debug, "de4x5 debug mask");
1025 MODULE_PARM_DESC(dec_only, "de4x5 probe only for Digital boards (0-1)");
1026 MODULE_PARM_DESC(args, "de4x5 full duplex and media type settings; see de4x5.c for details");
1027 MODULE_LICENSE("GPL");
1028
1029 /*
1030 ** List the SROM infoleaf functions and chipsets
1031 */
1032 struct InfoLeaf {
1033 int chipset;
1034 int (*fn)(struct net_device *);
1035 };
1036 static struct InfoLeaf infoleaf_array[] = {
1037 {DC21041, dc21041_infoleaf},
1038 {DC21140, dc21140_infoleaf},
1039 {DC21142, dc21142_infoleaf},
1040 {DC21143, dc21143_infoleaf}
1041 };
1042 #define INFOLEAF_SIZE ARRAY_SIZE(infoleaf_array)
1043
1044 /*
1045 ** List the SROM info block functions
1046 */
1047 static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = {
1048 type0_infoblock,
1049 type1_infoblock,
1050 type2_infoblock,
1051 type3_infoblock,
1052 type4_infoblock,
1053 type5_infoblock,
1054 compact_infoblock
1055 };
1056
1057 #define COMPACT (ARRAY_SIZE(dc_infoblock) - 1)
1058
1059 /*
1060 ** Miscellaneous defines...
1061 */
1062 #define RESET_DE4X5 {\
1063 int i;\
1064 i=inl(DE4X5_BMR);\
1065 mdelay(1);\
1066 outl(i | BMR_SWR, DE4X5_BMR);\
1067 mdelay(1);\
1068 outl(i, DE4X5_BMR);\
1069 mdelay(1);\
1070 for (i=0;i<5;i++) {inl(DE4X5_BMR); mdelay(1);}\
1071 mdelay(1);\
1072 }
1073
1074 #define PHY_HARD_RESET {\
1075 outl(GEP_HRST, DE4X5_GEP); /* Hard RESET the PHY dev. */\
1076 mdelay(1); /* Assert for 1ms */\
1077 outl(0x00, DE4X5_GEP);\
1078 mdelay(2); /* Wait for 2ms */\
1079 }
1080
1081 static const struct net_device_ops de4x5_netdev_ops = {
1082 .ndo_open = de4x5_open,
1083 .ndo_stop = de4x5_close,
1084 .ndo_start_xmit = de4x5_queue_pkt,
1085 .ndo_get_stats = de4x5_get_stats,
1086 .ndo_set_rx_mode = set_multicast_list,
1087 .ndo_do_ioctl = de4x5_ioctl,
1088 .ndo_set_mac_address= eth_mac_addr,
1089 .ndo_validate_addr = eth_validate_addr,
1090 };
1091
1092
1093 static int
de4x5_hw_init(struct net_device * dev,u_long iobase,struct device * gendev)1094 de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1095 {
1096 char name[DE4X5_NAME_LENGTH + 1];
1097 struct de4x5_private *lp = netdev_priv(dev);
1098 struct pci_dev *pdev = NULL;
1099 int i, status=0;
1100
1101 dev_set_drvdata(gendev, dev);
1102
1103 /* Ensure we're not sleeping */
1104 if (lp->bus == EISA) {
1105 outb(WAKEUP, PCI_CFPM);
1106 } else {
1107 pdev = to_pci_dev (gendev);
1108 pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
1109 }
1110 mdelay(10);
1111
1112 RESET_DE4X5;
1113
1114 if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
1115 return -ENXIO; /* Hardware could not reset */
1116 }
1117
1118 /*
1119 ** Now find out what kind of DC21040/DC21041/DC21140 board we have.
1120 */
1121 lp->useSROM = false;
1122 if (lp->bus == PCI) {
1123 PCI_signature(name, lp);
1124 } else {
1125 EISA_signature(name, gendev);
1126 }
1127
1128 if (*name == '\0') { /* Not found a board signature */
1129 return -ENXIO;
1130 }
1131
1132 dev->base_addr = iobase;
1133 printk ("%s: %s at 0x%04lx", dev_name(gendev), name, iobase);
1134
1135 status = get_hw_addr(dev);
1136 printk(", h/w address %pM\n", dev->dev_addr);
1137
1138 if (status != 0) {
1139 printk(" which has an Ethernet PROM CRC error.\n");
1140 return -ENXIO;
1141 } else {
1142 skb_queue_head_init(&lp->cache.queue);
1143 lp->cache.gepc = GEP_INIT;
1144 lp->asBit = GEP_SLNK;
1145 lp->asPolarity = GEP_SLNK;
1146 lp->asBitValid = ~0;
1147 lp->timeout = -1;
1148 lp->gendev = gendev;
1149 spin_lock_init(&lp->lock);
1150 timer_setup(&lp->timer, de4x5_ast, 0);
1151 de4x5_parse_params(dev);
1152
1153 /*
1154 ** Choose correct autosensing in case someone messed up
1155 */
1156 lp->autosense = lp->params.autosense;
1157 if (lp->chipset != DC21140) {
1158 if ((lp->chipset==DC21040) && (lp->params.autosense&TP_NW)) {
1159 lp->params.autosense = TP;
1160 }
1161 if ((lp->chipset==DC21041) && (lp->params.autosense&BNC_AUI)) {
1162 lp->params.autosense = BNC;
1163 }
1164 }
1165 lp->fdx = lp->params.fdx;
1166 sprintf(lp->adapter_name,"%s (%s)", name, dev_name(gendev));
1167
1168 lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc);
1169 #if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY)
1170 lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN;
1171 #endif
1172 lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size,
1173 &lp->dma_rings, GFP_ATOMIC);
1174 if (lp->rx_ring == NULL) {
1175 return -ENOMEM;
1176 }
1177
1178 lp->tx_ring = lp->rx_ring + NUM_RX_DESC;
1179
1180 /*
1181 ** Set up the RX descriptor ring (Intels)
1182 ** Allocate contiguous receive buffers, long word aligned (Alphas)
1183 */
1184 #if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
1185 for (i=0; i<NUM_RX_DESC; i++) {
1186 lp->rx_ring[i].status = 0;
1187 lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
1188 lp->rx_ring[i].buf = 0;
1189 lp->rx_ring[i].next = 0;
1190 lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */
1191 }
1192
1193 #else
1194 {
1195 dma_addr_t dma_rx_bufs;
1196
1197 dma_rx_bufs = lp->dma_rings + (NUM_RX_DESC + NUM_TX_DESC)
1198 * sizeof(struct de4x5_desc);
1199 dma_rx_bufs = (dma_rx_bufs + DE4X5_ALIGN) & ~DE4X5_ALIGN;
1200 lp->rx_bufs = (char *)(((long)(lp->rx_ring + NUM_RX_DESC
1201 + NUM_TX_DESC) + DE4X5_ALIGN) & ~DE4X5_ALIGN);
1202 for (i=0; i<NUM_RX_DESC; i++) {
1203 lp->rx_ring[i].status = 0;
1204 lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
1205 lp->rx_ring[i].buf =
1206 cpu_to_le32(dma_rx_bufs+i*RX_BUFF_SZ);
1207 lp->rx_ring[i].next = 0;
1208 lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */
1209 }
1210
1211 }
1212 #endif
1213
1214 barrier();
1215
1216 lp->rxRingSize = NUM_RX_DESC;
1217 lp->txRingSize = NUM_TX_DESC;
1218
1219 /* Write the end of list marker to the descriptor lists */
1220 lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER);
1221 lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
1222
1223 /* Tell the adapter where the TX/RX rings are located. */
1224 outl(lp->dma_rings, DE4X5_RRBA);
1225 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
1226 DE4X5_TRBA);
1227
1228 /* Initialise the IRQ mask and Enable/Disable */
1229 lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
1230 lp->irq_en = IMR_NIM | IMR_AIM;
1231
1232 /* Create a loopback packet frame for later media probing */
1233 create_packet(dev, lp->frame, sizeof(lp->frame));
1234
1235 /* Check if the RX overflow bug needs testing for */
1236 i = lp->cfrv & 0x000000fe;
1237 if ((lp->chipset == DC21140) && (i == 0x20)) {
1238 lp->rx_ovf = 1;
1239 }
1240
1241 /* Initialise the SROM pointers if possible */
1242 if (lp->useSROM) {
1243 lp->state = INITIALISED;
1244 if (srom_infoleaf_info(dev)) {
1245 dma_free_coherent (gendev, lp->dma_size,
1246 lp->rx_ring, lp->dma_rings);
1247 return -ENXIO;
1248 }
1249 srom_init(dev);
1250 }
1251
1252 lp->state = CLOSED;
1253
1254 /*
1255 ** Check for an MII interface
1256 */
1257 if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) {
1258 mii_get_phy(dev);
1259 }
1260
1261 printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
1262 ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
1263 }
1264
1265 if (de4x5_debug & DEBUG_VERSION) {
1266 printk(version);
1267 }
1268
1269 /* The DE4X5-specific entries in the device structure. */
1270 SET_NETDEV_DEV(dev, gendev);
1271 dev->netdev_ops = &de4x5_netdev_ops;
1272 dev->mem_start = 0;
1273
1274 /* Fill in the generic fields of the device structure. */
1275 if ((status = register_netdev (dev))) {
1276 dma_free_coherent (gendev, lp->dma_size,
1277 lp->rx_ring, lp->dma_rings);
1278 return status;
1279 }
1280
1281 /* Let the adapter sleep to save power */
1282 yawn(dev, SLEEP);
1283
1284 return status;
1285 }
1286
1287
1288 static int
de4x5_open(struct net_device * dev)1289 de4x5_open(struct net_device *dev)
1290 {
1291 struct de4x5_private *lp = netdev_priv(dev);
1292 u_long iobase = dev->base_addr;
1293 int i, status = 0;
1294 s32 omr;
1295
1296 /* Allocate the RX buffers */
1297 for (i=0; i<lp->rxRingSize; i++) {
1298 if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) {
1299 de4x5_free_rx_buffs(dev);
1300 return -EAGAIN;
1301 }
1302 }
1303
1304 /*
1305 ** Wake up the adapter
1306 */
1307 yawn(dev, WAKEUP);
1308
1309 /*
1310 ** Re-initialize the DE4X5...
1311 */
1312 status = de4x5_init(dev);
1313 spin_lock_init(&lp->lock);
1314 lp->state = OPEN;
1315 de4x5_dbg_open(dev);
1316
1317 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
1318 lp->adapter_name, dev)) {
1319 printk("de4x5_open(): Requested IRQ%d is busy - attempting FAST/SHARE...", dev->irq);
1320 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
1321 lp->adapter_name, dev)) {
1322 printk("\n Cannot get IRQ- reconfigure your hardware.\n");
1323 disable_ast(dev);
1324 de4x5_free_rx_buffs(dev);
1325 de4x5_free_tx_buffs(dev);
1326 yawn(dev, SLEEP);
1327 lp->state = CLOSED;
1328 return -EAGAIN;
1329 } else {
1330 printk("\n Succeeded, but you should reconfigure your hardware to avoid this.\n");
1331 printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n");
1332 }
1333 }
1334
1335 lp->interrupt = UNMASK_INTERRUPTS;
1336 netif_trans_update(dev); /* prevent tx timeout */
1337
1338 START_DE4X5;
1339
1340 de4x5_setup_intr(dev);
1341
1342 if (de4x5_debug & DEBUG_OPEN) {
1343 printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
1344 printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
1345 printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
1346 printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
1347 printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
1348 printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
1349 printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
1350 printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
1351 }
1352
1353 return status;
1354 }
1355
1356 /*
1357 ** Initialize the DE4X5 operating conditions. NB: a chip problem with the
1358 ** DC21140 requires using perfect filtering mode for that chip. Since I can't
1359 ** see why I'd want > 14 multicast addresses, I have changed all chips to use
1360 ** the perfect filtering mode. Keep the DMA burst length at 8: there seems
1361 ** to be data corruption problems if it is larger (UDP errors seen from a
1362 ** ttcp source).
1363 */
1364 static int
de4x5_init(struct net_device * dev)1365 de4x5_init(struct net_device *dev)
1366 {
1367 /* Lock out other processes whilst setting up the hardware */
1368 netif_stop_queue(dev);
1369
1370 de4x5_sw_reset(dev);
1371
1372 /* Autoconfigure the connected port */
1373 autoconf_media(dev);
1374
1375 return 0;
1376 }
1377
1378 static int
de4x5_sw_reset(struct net_device * dev)1379 de4x5_sw_reset(struct net_device *dev)
1380 {
1381 struct de4x5_private *lp = netdev_priv(dev);
1382 u_long iobase = dev->base_addr;
1383 int i, j, status = 0;
1384 s32 bmr, omr;
1385
1386 /* Select the MII or SRL port now and RESET the MAC */
1387 if (!lp->useSROM) {
1388 if (lp->phy[lp->active].id != 0) {
1389 lp->infoblock_csr6 = OMR_SDP | OMR_PS | OMR_HBD;
1390 } else {
1391 lp->infoblock_csr6 = OMR_SDP | OMR_TTM;
1392 }
1393 de4x5_switch_mac_port(dev);
1394 }
1395
1396 /*
1397 ** Set the programmable burst length to 8 longwords for all the DC21140
1398 ** Fasternet chips and 4 longwords for all others: DMA errors result
1399 ** without these values. Cache align 16 long.
1400 */
1401 bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | DE4X5_CACHE_ALIGN;
1402 bmr |= ((lp->chipset & ~0x00ff)==DC2114x ? BMR_RML : 0);
1403 outl(bmr, DE4X5_BMR);
1404
1405 omr = inl(DE4X5_OMR) & ~OMR_PR; /* Turn off promiscuous mode */
1406 if (lp->chipset == DC21140) {
1407 omr |= (OMR_SDP | OMR_SB);
1408 }
1409 lp->setup_f = PERFECT;
1410 outl(lp->dma_rings, DE4X5_RRBA);
1411 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
1412 DE4X5_TRBA);
1413
1414 lp->rx_new = lp->rx_old = 0;
1415 lp->tx_new = lp->tx_old = 0;
1416
1417 for (i = 0; i < lp->rxRingSize; i++) {
1418 lp->rx_ring[i].status = cpu_to_le32(R_OWN);
1419 }
1420
1421 for (i = 0; i < lp->txRingSize; i++) {
1422 lp->tx_ring[i].status = cpu_to_le32(0);
1423 }
1424
1425 barrier();
1426
1427 /* Build the setup frame depending on filtering mode */
1428 SetMulticastFilter(dev);
1429
1430 load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1);
1431 outl(omr|OMR_ST, DE4X5_OMR);
1432
1433 /* Poll for setup frame completion (adapter interrupts are disabled now) */
1434
1435 for (j=0, i=0;(i<500) && (j==0);i++) { /* Up to 500ms delay */
1436 mdelay(1);
1437 if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1;
1438 }
1439 outl(omr, DE4X5_OMR); /* Stop everything! */
1440
1441 if (j == 0) {
1442 printk("%s: Setup frame timed out, status %08x\n", dev->name,
1443 inl(DE4X5_STS));
1444 status = -EIO;
1445 }
1446
1447 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1448 lp->tx_old = lp->tx_new;
1449
1450 return status;
1451 }
1452
1453 /*
1454 ** Writes a socket buffer address to the next available transmit descriptor.
1455 */
1456 static netdev_tx_t
de4x5_queue_pkt(struct sk_buff * skb,struct net_device * dev)1457 de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
1458 {
1459 struct de4x5_private *lp = netdev_priv(dev);
1460 u_long iobase = dev->base_addr;
1461 u_long flags = 0;
1462
1463 netif_stop_queue(dev);
1464 if (!lp->tx_enable) /* Cannot send for now */
1465 goto tx_err;
1466
1467 /*
1468 ** Clean out the TX ring asynchronously to interrupts - sometimes the
1469 ** interrupts are lost by delayed descriptor status updates relative to
1470 ** the irq assertion, especially with a busy PCI bus.
1471 */
1472 spin_lock_irqsave(&lp->lock, flags);
1473 de4x5_tx(dev);
1474 spin_unlock_irqrestore(&lp->lock, flags);
1475
1476 /* Test if cache is already locked - requeue skb if so */
1477 if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
1478 goto tx_err;
1479
1480 /* Transmit descriptor ring full or stale skb */
1481 if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
1482 if (lp->interrupt) {
1483 de4x5_putb_cache(dev, skb); /* Requeue the buffer */
1484 } else {
1485 de4x5_put_cache(dev, skb);
1486 }
1487 if (de4x5_debug & DEBUG_TX) {
1488 printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), netif_queue_stopped(dev), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO");
1489 }
1490 } else if (skb->len > 0) {
1491 /* If we already have stuff queued locally, use that first */
1492 if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) {
1493 de4x5_put_cache(dev, skb);
1494 skb = de4x5_get_cache(dev);
1495 }
1496
1497 while (skb && !netif_queue_stopped(dev) &&
1498 (u_long) lp->tx_skb[lp->tx_new] <= 1) {
1499 spin_lock_irqsave(&lp->lock, flags);
1500 netif_stop_queue(dev);
1501 load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
1502 lp->stats.tx_bytes += skb->len;
1503 outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
1504
1505 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1506
1507 if (TX_BUFFS_AVAIL) {
1508 netif_start_queue(dev); /* Another pkt may be queued */
1509 }
1510 skb = de4x5_get_cache(dev);
1511 spin_unlock_irqrestore(&lp->lock, flags);
1512 }
1513 if (skb) de4x5_putb_cache(dev, skb);
1514 }
1515
1516 lp->cache.lock = 0;
1517
1518 return NETDEV_TX_OK;
1519 tx_err:
1520 dev_kfree_skb_any(skb);
1521 return NETDEV_TX_OK;
1522 }
1523
1524 /*
1525 ** The DE4X5 interrupt handler.
1526 **
1527 ** I/O Read/Writes through intermediate PCI bridges are never 'posted',
1528 ** so that the asserted interrupt always has some real data to work with -
1529 ** if these I/O accesses are ever changed to memory accesses, ensure the
1530 ** STS write is read immediately to complete the transaction if the adapter
1531 ** is not on bus 0. Lost interrupts can still occur when the PCI bus load
1532 ** is high and descriptor status bits cannot be set before the associated
1533 ** interrupt is asserted and this routine entered.
1534 */
1535 static irqreturn_t
de4x5_interrupt(int irq,void * dev_id)1536 de4x5_interrupt(int irq, void *dev_id)
1537 {
1538 struct net_device *dev = dev_id;
1539 struct de4x5_private *lp;
1540 s32 imr, omr, sts, limit;
1541 u_long iobase;
1542 unsigned int handled = 0;
1543
1544 lp = netdev_priv(dev);
1545 spin_lock(&lp->lock);
1546 iobase = dev->base_addr;
1547
1548 DISABLE_IRQs; /* Ensure non re-entrancy */
1549
1550 if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
1551 printk("%s: Re-entering the interrupt handler.\n", dev->name);
1552
1553 synchronize_irq(dev->irq);
1554
1555 for (limit=0; limit<8; limit++) {
1556 sts = inl(DE4X5_STS); /* Read IRQ status */
1557 outl(sts, DE4X5_STS); /* Reset the board interrupts */
1558
1559 if (!(sts & lp->irq_mask)) break;/* All done */
1560 handled = 1;
1561
1562 if (sts & (STS_RI | STS_RU)) /* Rx interrupt (packet[s] arrived) */
1563 de4x5_rx(dev);
1564
1565 if (sts & (STS_TI | STS_TU)) /* Tx interrupt (packet sent) */
1566 de4x5_tx(dev);
1567
1568 if (sts & STS_LNF) { /* TP Link has failed */
1569 lp->irq_mask &= ~IMR_LFM;
1570 }
1571
1572 if (sts & STS_UNF) { /* Transmit underrun */
1573 de4x5_txur(dev);
1574 }
1575
1576 if (sts & STS_SE) { /* Bus Error */
1577 STOP_DE4X5;
1578 printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
1579 dev->name, sts);
1580 spin_unlock(&lp->lock);
1581 return IRQ_HANDLED;
1582 }
1583 }
1584
1585 /* Load the TX ring with any locally stored packets */
1586 if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
1587 while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) {
1588 de4x5_queue_pkt(de4x5_get_cache(dev), dev);
1589 }
1590 lp->cache.lock = 0;
1591 }
1592
1593 lp->interrupt = UNMASK_INTERRUPTS;
1594 ENABLE_IRQs;
1595 spin_unlock(&lp->lock);
1596
1597 return IRQ_RETVAL(handled);
1598 }
1599
1600 static int
de4x5_rx(struct net_device * dev)1601 de4x5_rx(struct net_device *dev)
1602 {
1603 struct de4x5_private *lp = netdev_priv(dev);
1604 u_long iobase = dev->base_addr;
1605 int entry;
1606 s32 status;
1607
1608 for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
1609 entry=lp->rx_new) {
1610 status = (s32)le32_to_cpu(lp->rx_ring[entry].status);
1611
1612 if (lp->rx_ovf) {
1613 if (inl(DE4X5_MFC) & MFC_FOCM) {
1614 de4x5_rx_ovfc(dev);
1615 break;
1616 }
1617 }
1618
1619 if (status & RD_FS) { /* Remember the start of frame */
1620 lp->rx_old = entry;
1621 }
1622
1623 if (status & RD_LS) { /* Valid frame status */
1624 if (lp->tx_enable) lp->linkOK++;
1625 if (status & RD_ES) { /* There was an error. */
1626 lp->stats.rx_errors++; /* Update the error stats. */
1627 if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
1628 if (status & RD_CE) lp->stats.rx_crc_errors++;
1629 if (status & RD_OF) lp->stats.rx_fifo_errors++;
1630 if (status & RD_TL) lp->stats.rx_length_errors++;
1631 if (status & RD_RF) lp->pktStats.rx_runt_frames++;
1632 if (status & RD_CS) lp->pktStats.rx_collision++;
1633 if (status & RD_DB) lp->pktStats.rx_dribble++;
1634 if (status & RD_OF) lp->pktStats.rx_overflow++;
1635 } else { /* A valid frame received */
1636 struct sk_buff *skb;
1637 short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
1638 >> 16) - 4;
1639
1640 if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
1641 printk("%s: Insufficient memory; nuking packet.\n",
1642 dev->name);
1643 lp->stats.rx_dropped++;
1644 } else {
1645 de4x5_dbg_rx(skb, pkt_len);
1646
1647 /* Push up the protocol stack */
1648 skb->protocol=eth_type_trans(skb,dev);
1649 de4x5_local_stats(dev, skb->data, pkt_len);
1650 netif_rx(skb);
1651
1652 /* Update stats */
1653 lp->stats.rx_packets++;
1654 lp->stats.rx_bytes += pkt_len;
1655 }
1656 }
1657
1658 /* Change buffer ownership for this frame, back to the adapter */
1659 for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old + 1)%lp->rxRingSize) {
1660 lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
1661 barrier();
1662 }
1663 lp->rx_ring[entry].status = cpu_to_le32(R_OWN);
1664 barrier();
1665 }
1666
1667 /*
1668 ** Update entry information
1669 */
1670 lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
1671 }
1672
1673 return 0;
1674 }
1675
1676 static inline void
de4x5_free_tx_buff(struct de4x5_private * lp,int entry)1677 de4x5_free_tx_buff(struct de4x5_private *lp, int entry)
1678 {
1679 dma_unmap_single(lp->gendev, le32_to_cpu(lp->tx_ring[entry].buf),
1680 le32_to_cpu(lp->tx_ring[entry].des1) & TD_TBS1,
1681 DMA_TO_DEVICE);
1682 if ((u_long) lp->tx_skb[entry] > 1)
1683 dev_kfree_skb_irq(lp->tx_skb[entry]);
1684 lp->tx_skb[entry] = NULL;
1685 }
1686
1687 /*
1688 ** Buffer sent - check for TX buffer errors.
1689 */
1690 static int
de4x5_tx(struct net_device * dev)1691 de4x5_tx(struct net_device *dev)
1692 {
1693 struct de4x5_private *lp = netdev_priv(dev);
1694 u_long iobase = dev->base_addr;
1695 int entry;
1696 s32 status;
1697
1698 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
1699 status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
1700 if (status < 0) { /* Buffer not sent yet */
1701 break;
1702 } else if (status != 0x7fffffff) { /* Not setup frame */
1703 if (status & TD_ES) { /* An error happened */
1704 lp->stats.tx_errors++;
1705 if (status & TD_NC) lp->stats.tx_carrier_errors++;
1706 if (status & TD_LC) lp->stats.tx_window_errors++;
1707 if (status & TD_UF) lp->stats.tx_fifo_errors++;
1708 if (status & TD_EC) lp->pktStats.excessive_collisions++;
1709 if (status & TD_DE) lp->stats.tx_aborted_errors++;
1710
1711 if (TX_PKT_PENDING) {
1712 outl(POLL_DEMAND, DE4X5_TPD);/* Restart a stalled TX */
1713 }
1714 } else { /* Packet sent */
1715 lp->stats.tx_packets++;
1716 if (lp->tx_enable) lp->linkOK++;
1717 }
1718 /* Update the collision counter */
1719 lp->stats.collisions += ((status & TD_EC) ? 16 :
1720 ((status & TD_CC) >> 3));
1721
1722 /* Free the buffer. */
1723 if (lp->tx_skb[entry] != NULL)
1724 de4x5_free_tx_buff(lp, entry);
1725 }
1726
1727 /* Update all the pointers */
1728 lp->tx_old = (lp->tx_old + 1) % lp->txRingSize;
1729 }
1730
1731 /* Any resources available? */
1732 if (TX_BUFFS_AVAIL && netif_queue_stopped(dev)) {
1733 if (lp->interrupt)
1734 netif_wake_queue(dev);
1735 else
1736 netif_start_queue(dev);
1737 }
1738
1739 return 0;
1740 }
1741
1742 static void
de4x5_ast(struct timer_list * t)1743 de4x5_ast(struct timer_list *t)
1744 {
1745 struct de4x5_private *lp = from_timer(lp, t, timer);
1746 struct net_device *dev = dev_get_drvdata(lp->gendev);
1747 int next_tick = DE4X5_AUTOSENSE_MS;
1748 int dt;
1749
1750 if (lp->useSROM)
1751 next_tick = srom_autoconf(dev);
1752 else if (lp->chipset == DC21140)
1753 next_tick = dc21140m_autoconf(dev);
1754 else if (lp->chipset == DC21041)
1755 next_tick = dc21041_autoconf(dev);
1756 else if (lp->chipset == DC21040)
1757 next_tick = dc21040_autoconf(dev);
1758 lp->linkOK = 0;
1759
1760 dt = (next_tick * HZ) / 1000;
1761
1762 if (!dt)
1763 dt = 1;
1764
1765 mod_timer(&lp->timer, jiffies + dt);
1766 }
1767
1768 static int
de4x5_txur(struct net_device * dev)1769 de4x5_txur(struct net_device *dev)
1770 {
1771 struct de4x5_private *lp = netdev_priv(dev);
1772 u_long iobase = dev->base_addr;
1773 int omr;
1774
1775 omr = inl(DE4X5_OMR);
1776 if (!(omr & OMR_SF) || (lp->chipset==DC21041) || (lp->chipset==DC21040)) {
1777 omr &= ~(OMR_ST|OMR_SR);
1778 outl(omr, DE4X5_OMR);
1779 while (inl(DE4X5_STS) & STS_TS);
1780 if ((omr & OMR_TR) < OMR_TR) {
1781 omr += 0x4000;
1782 } else {
1783 omr |= OMR_SF;
1784 }
1785 outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
1786 }
1787
1788 return 0;
1789 }
1790
1791 static int
de4x5_rx_ovfc(struct net_device * dev)1792 de4x5_rx_ovfc(struct net_device *dev)
1793 {
1794 struct de4x5_private *lp = netdev_priv(dev);
1795 u_long iobase = dev->base_addr;
1796 int omr;
1797
1798 omr = inl(DE4X5_OMR);
1799 outl(omr & ~OMR_SR, DE4X5_OMR);
1800 while (inl(DE4X5_STS) & STS_RS);
1801
1802 for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) {
1803 lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN);
1804 lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
1805 }
1806
1807 outl(omr, DE4X5_OMR);
1808
1809 return 0;
1810 }
1811
1812 static int
de4x5_close(struct net_device * dev)1813 de4x5_close(struct net_device *dev)
1814 {
1815 struct de4x5_private *lp = netdev_priv(dev);
1816 u_long iobase = dev->base_addr;
1817 s32 imr, omr;
1818
1819 disable_ast(dev);
1820
1821 netif_stop_queue(dev);
1822
1823 if (de4x5_debug & DEBUG_CLOSE) {
1824 printk("%s: Shutting down ethercard, status was %8.8x.\n",
1825 dev->name, inl(DE4X5_STS));
1826 }
1827
1828 /*
1829 ** We stop the DE4X5 here... mask interrupts and stop TX & RX
1830 */
1831 DISABLE_IRQs;
1832 STOP_DE4X5;
1833
1834 /* Free the associated irq */
1835 free_irq(dev->irq, dev);
1836 lp->state = CLOSED;
1837
1838 /* Free any socket buffers */
1839 de4x5_free_rx_buffs(dev);
1840 de4x5_free_tx_buffs(dev);
1841
1842 /* Put the adapter to sleep to save power */
1843 yawn(dev, SLEEP);
1844
1845 return 0;
1846 }
1847
1848 static struct net_device_stats *
de4x5_get_stats(struct net_device * dev)1849 de4x5_get_stats(struct net_device *dev)
1850 {
1851 struct de4x5_private *lp = netdev_priv(dev);
1852 u_long iobase = dev->base_addr;
1853
1854 lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
1855
1856 return &lp->stats;
1857 }
1858
1859 static void
de4x5_local_stats(struct net_device * dev,char * buf,int pkt_len)1860 de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
1861 {
1862 struct de4x5_private *lp = netdev_priv(dev);
1863 int i;
1864
1865 for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
1866 if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
1867 lp->pktStats.bins[i]++;
1868 i = DE4X5_PKT_STAT_SZ;
1869 }
1870 }
1871 if (is_multicast_ether_addr(buf)) {
1872 if (is_broadcast_ether_addr(buf)) {
1873 lp->pktStats.broadcast++;
1874 } else {
1875 lp->pktStats.multicast++;
1876 }
1877 } else if (ether_addr_equal(buf, dev->dev_addr)) {
1878 lp->pktStats.unicast++;
1879 }
1880
1881 lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
1882 if (lp->pktStats.bins[0] == 0) { /* Reset counters */
1883 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
1884 }
1885 }
1886
1887 /*
1888 ** Removes the TD_IC flag from previous descriptor to improve TX performance.
1889 ** If the flag is changed on a descriptor that is being read by the hardware,
1890 ** I assume PCI transaction ordering will mean you are either successful or
1891 ** just miss asserting the change to the hardware. Anyway you're messing with
1892 ** a descriptor you don't own, but this shouldn't kill the chip provided
1893 ** the descriptor register is read only to the hardware.
1894 */
1895 static void
load_packet(struct net_device * dev,char * buf,u32 flags,struct sk_buff * skb)1896 load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb)
1897 {
1898 struct de4x5_private *lp = netdev_priv(dev);
1899 int entry = (lp->tx_new ? lp->tx_new-1 : lp->txRingSize-1);
1900 dma_addr_t buf_dma = dma_map_single(lp->gendev, buf, flags & TD_TBS1, DMA_TO_DEVICE);
1901
1902 lp->tx_ring[lp->tx_new].buf = cpu_to_le32(buf_dma);
1903 lp->tx_ring[lp->tx_new].des1 &= cpu_to_le32(TD_TER);
1904 lp->tx_ring[lp->tx_new].des1 |= cpu_to_le32(flags);
1905 lp->tx_skb[lp->tx_new] = skb;
1906 lp->tx_ring[entry].des1 &= cpu_to_le32(~TD_IC);
1907 barrier();
1908
1909 lp->tx_ring[lp->tx_new].status = cpu_to_le32(T_OWN);
1910 barrier();
1911 }
1912
1913 /*
1914 ** Set or clear the multicast filter for this adaptor.
1915 */
1916 static void
set_multicast_list(struct net_device * dev)1917 set_multicast_list(struct net_device *dev)
1918 {
1919 struct de4x5_private *lp = netdev_priv(dev);
1920 u_long iobase = dev->base_addr;
1921
1922 /* First, double check that the adapter is open */
1923 if (lp->state == OPEN) {
1924 if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
1925 u32 omr;
1926 omr = inl(DE4X5_OMR);
1927 omr |= OMR_PR;
1928 outl(omr, DE4X5_OMR);
1929 } else {
1930 SetMulticastFilter(dev);
1931 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
1932 SETUP_FRAME_LEN, (struct sk_buff *)1);
1933
1934 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1935 outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
1936 netif_trans_update(dev); /* prevent tx timeout */
1937 }
1938 }
1939 }
1940
1941 /*
1942 ** Calculate the hash code and update the logical address filter
1943 ** from a list of ethernet multicast addresses.
1944 ** Little endian crc one liner from Matt Thomas, DEC.
1945 */
1946 static void
SetMulticastFilter(struct net_device * dev)1947 SetMulticastFilter(struct net_device *dev)
1948 {
1949 struct de4x5_private *lp = netdev_priv(dev);
1950 struct netdev_hw_addr *ha;
1951 u_long iobase = dev->base_addr;
1952 int i, bit, byte;
1953 u16 hashcode;
1954 u32 omr, crc;
1955 char *pa;
1956 unsigned char *addrs;
1957
1958 omr = inl(DE4X5_OMR);
1959 omr &= ~(OMR_PR | OMR_PM);
1960 pa = build_setup_frame(dev, ALL); /* Build the basic frame */
1961
1962 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
1963 omr |= OMR_PM; /* Pass all multicasts */
1964 } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
1965 netdev_for_each_mc_addr(ha, dev) {
1966 crc = ether_crc_le(ETH_ALEN, ha->addr);
1967 hashcode = crc & DE4X5_HASH_BITS; /* hashcode is 9 LSb of CRC */
1968
1969 byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
1970 bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */
1971
1972 byte <<= 1; /* calc offset into setup frame */
1973 if (byte & 0x02) {
1974 byte -= 1;
1975 }
1976 lp->setup_frame[byte] |= bit;
1977 }
1978 } else { /* Perfect filtering */
1979 netdev_for_each_mc_addr(ha, dev) {
1980 addrs = ha->addr;
1981 for (i=0; i<ETH_ALEN; i++) {
1982 *(pa + (i&1)) = *addrs++;
1983 if (i & 0x01) pa += 4;
1984 }
1985 }
1986 }
1987 outl(omr, DE4X5_OMR);
1988 }
1989
1990 #ifdef CONFIG_EISA
1991
1992 static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
1993
de4x5_eisa_probe(struct device * gendev)1994 static int de4x5_eisa_probe(struct device *gendev)
1995 {
1996 struct eisa_device *edev;
1997 u_long iobase;
1998 u_char irq, regval;
1999 u_short vendor;
2000 u32 cfid;
2001 int status, device;
2002 struct net_device *dev;
2003 struct de4x5_private *lp;
2004
2005 edev = to_eisa_device (gendev);
2006 iobase = edev->base_addr;
2007
2008 if (!request_region (iobase, DE4X5_EISA_TOTAL_SIZE, "de4x5"))
2009 return -EBUSY;
2010
2011 if (!request_region (iobase + DE4X5_EISA_IO_PORTS,
2012 DE4X5_EISA_TOTAL_SIZE, "de4x5")) {
2013 status = -EBUSY;
2014 goto release_reg_1;
2015 }
2016
2017 if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
2018 status = -ENOMEM;
2019 goto release_reg_2;
2020 }
2021 lp = netdev_priv(dev);
2022
2023 cfid = (u32) inl(PCI_CFID);
2024 lp->cfrv = (u_short) inl(PCI_CFRV);
2025 device = (cfid >> 8) & 0x00ffff00;
2026 vendor = (u_short) cfid;
2027
2028 /* Read the EISA Configuration Registers */
2029 regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT);
2030 #ifdef CONFIG_ALPHA
2031 /* Looks like the Jensen firmware (rev 2.2) doesn't really
2032 * care about the EISA configuration, and thus doesn't
2033 * configure the PLX bridge properly. Oh well... Simply mimic
2034 * the EISA config file to sort it out. */
2035
2036 /* EISA REG1: Assert DecChip 21040 HW Reset */
2037 outb (ER1_IAM | 1, EISA_REG1);
2038 mdelay (1);
2039
2040 /* EISA REG1: Deassert DecChip 21040 HW Reset */
2041 outb (ER1_IAM, EISA_REG1);
2042 mdelay (1);
2043
2044 /* EISA REG3: R/W Burst Transfer Enable */
2045 outb (ER3_BWE | ER3_BRE, EISA_REG3);
2046
2047 /* 32_bit slave/master, Preempt Time=23 bclks, Unlatched Interrupt */
2048 outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0);
2049 #endif
2050 irq = de4x5_irq[(regval >> 1) & 0x03];
2051
2052 if (is_DC2114x) {
2053 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
2054 }
2055 lp->chipset = device;
2056 lp->bus = EISA;
2057
2058 /* Write the PCI Configuration Registers */
2059 outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
2060 outl(0x00006000, PCI_CFLT);
2061 outl(iobase, PCI_CBIO);
2062
2063 DevicePresent(dev, EISA_APROM);
2064
2065 dev->irq = irq;
2066
2067 if (!(status = de4x5_hw_init (dev, iobase, gendev))) {
2068 return 0;
2069 }
2070
2071 free_netdev (dev);
2072 release_reg_2:
2073 release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
2074 release_reg_1:
2075 release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
2076
2077 return status;
2078 }
2079
de4x5_eisa_remove(struct device * device)2080 static int de4x5_eisa_remove(struct device *device)
2081 {
2082 struct net_device *dev;
2083 u_long iobase;
2084
2085 dev = dev_get_drvdata(device);
2086 iobase = dev->base_addr;
2087
2088 unregister_netdev (dev);
2089 free_netdev (dev);
2090 release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
2091 release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
2092
2093 return 0;
2094 }
2095
2096 static const struct eisa_device_id de4x5_eisa_ids[] = {
2097 { "DEC4250", 0 }, /* 0 is the board name index... */
2098 { "" }
2099 };
2100 MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
2101
2102 static struct eisa_driver de4x5_eisa_driver = {
2103 .id_table = de4x5_eisa_ids,
2104 .driver = {
2105 .name = "de4x5",
2106 .probe = de4x5_eisa_probe,
2107 .remove = de4x5_eisa_remove,
2108 }
2109 };
2110 MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
2111 #endif
2112
2113 #ifdef CONFIG_PCI
2114
2115 /*
2116 ** This function searches the current bus (which is >0) for a DECchip with an
2117 ** SROM, so that in multiport cards that have one SROM shared between multiple
2118 ** DECchips, we can find the base SROM irrespective of the BIOS scan direction.
2119 ** For single port cards this is a time waster...
2120 */
2121 static void
srom_search(struct net_device * dev,struct pci_dev * pdev)2122 srom_search(struct net_device *dev, struct pci_dev *pdev)
2123 {
2124 u_char pb;
2125 u_short vendor, status;
2126 u_int irq = 0, device;
2127 u_long iobase = 0; /* Clear upper 32 bits in Alphas */
2128 int i, j;
2129 struct de4x5_private *lp = netdev_priv(dev);
2130 struct pci_dev *this_dev;
2131
2132 list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) {
2133 vendor = this_dev->vendor;
2134 device = this_dev->device << 8;
2135 if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
2136
2137 /* Get the chip configuration revision register */
2138 pb = this_dev->bus->number;
2139
2140 /* Set the device number information */
2141 lp->device = PCI_SLOT(this_dev->devfn);
2142 lp->bus_num = pb;
2143
2144 /* Set the chipset information */
2145 if (is_DC2114x) {
2146 device = ((this_dev->revision & CFRV_RN) < DC2114x_BRK
2147 ? DC21142 : DC21143);
2148 }
2149 lp->chipset = device;
2150
2151 /* Get the board I/O address (64 bits on sparc64) */
2152 iobase = pci_resource_start(this_dev, 0);
2153
2154 /* Fetch the IRQ to be used */
2155 irq = this_dev->irq;
2156 if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
2157
2158 /* Check if I/O accesses are enabled */
2159 pci_read_config_word(this_dev, PCI_COMMAND, &status);
2160 if (!(status & PCI_COMMAND_IO)) continue;
2161
2162 /* Search for a valid SROM attached to this DECchip */
2163 DevicePresent(dev, DE4X5_APROM);
2164 for (j=0, i=0; i<ETH_ALEN; i++) {
2165 j += (u_char) *((u_char *)&lp->srom + SROM_HWADD + i);
2166 }
2167 if (j != 0 && j != 6 * 0xff) {
2168 last.chipset = device;
2169 last.bus = pb;
2170 last.irq = irq;
2171 for (i=0; i<ETH_ALEN; i++) {
2172 last.addr[i] = (u_char)*((u_char *)&lp->srom + SROM_HWADD + i);
2173 }
2174 return;
2175 }
2176 }
2177 }
2178
2179 /*
2180 ** PCI bus I/O device probe
2181 ** NB: PCI I/O accesses and Bus Mastering are enabled by the PCI BIOS, not
2182 ** the driver. Some PCI BIOS's, pre V2.1, need the slot + features to be
2183 ** enabled by the user first in the set up utility. Hence we just check for
2184 ** enabled features and silently ignore the card if they're not.
2185 **
2186 ** STOP PRESS: Some BIOS's __require__ the driver to enable the bus mastering
2187 ** bit. Here, check for I/O accesses and then set BM. If you put the card in
2188 ** a non BM slot, you're on your own (and complain to the PC vendor that your
2189 ** PC doesn't conform to the PCI standard)!
2190 **
2191 ** This function is only compatible with the *latest* 2.1.x kernels. For 2.0.x
2192 ** kernels use the V0.535[n] drivers.
2193 */
2194
de4x5_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2195 static int de4x5_pci_probe(struct pci_dev *pdev,
2196 const struct pci_device_id *ent)
2197 {
2198 u_char pb, pbus = 0, dev_num, dnum = 0, timer;
2199 u_short vendor, status;
2200 u_int irq = 0, device;
2201 u_long iobase = 0; /* Clear upper 32 bits in Alphas */
2202 int error;
2203 struct net_device *dev;
2204 struct de4x5_private *lp;
2205
2206 dev_num = PCI_SLOT(pdev->devfn);
2207 pb = pdev->bus->number;
2208
2209 if (io) { /* probe a single PCI device */
2210 pbus = (u_short)(io >> 8);
2211 dnum = (u_short)(io & 0xff);
2212 if ((pbus != pb) || (dnum != dev_num))
2213 return -ENODEV;
2214 }
2215
2216 vendor = pdev->vendor;
2217 device = pdev->device << 8;
2218 if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x))
2219 return -ENODEV;
2220
2221 /* Ok, the device seems to be for us. */
2222 if ((error = pci_enable_device (pdev)))
2223 return error;
2224
2225 if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
2226 error = -ENOMEM;
2227 goto disable_dev;
2228 }
2229
2230 lp = netdev_priv(dev);
2231 lp->bus = PCI;
2232 lp->bus_num = 0;
2233
2234 /* Search for an SROM on this bus */
2235 if (lp->bus_num != pb) {
2236 lp->bus_num = pb;
2237 srom_search(dev, pdev);
2238 }
2239
2240 /* Get the chip configuration revision register */
2241 lp->cfrv = pdev->revision;
2242
2243 /* Set the device number information */
2244 lp->device = dev_num;
2245 lp->bus_num = pb;
2246
2247 /* Set the chipset information */
2248 if (is_DC2114x) {
2249 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
2250 }
2251 lp->chipset = device;
2252
2253 /* Get the board I/O address (64 bits on sparc64) */
2254 iobase = pci_resource_start(pdev, 0);
2255
2256 /* Fetch the IRQ to be used */
2257 irq = pdev->irq;
2258 if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) {
2259 error = -ENODEV;
2260 goto free_dev;
2261 }
2262
2263 /* Check if I/O accesses and Bus Mastering are enabled */
2264 pci_read_config_word(pdev, PCI_COMMAND, &status);
2265 #ifdef __powerpc__
2266 if (!(status & PCI_COMMAND_IO)) {
2267 status |= PCI_COMMAND_IO;
2268 pci_write_config_word(pdev, PCI_COMMAND, status);
2269 pci_read_config_word(pdev, PCI_COMMAND, &status);
2270 }
2271 #endif /* __powerpc__ */
2272 if (!(status & PCI_COMMAND_IO)) {
2273 error = -ENODEV;
2274 goto free_dev;
2275 }
2276
2277 if (!(status & PCI_COMMAND_MASTER)) {
2278 status |= PCI_COMMAND_MASTER;
2279 pci_write_config_word(pdev, PCI_COMMAND, status);
2280 pci_read_config_word(pdev, PCI_COMMAND, &status);
2281 }
2282 if (!(status & PCI_COMMAND_MASTER)) {
2283 error = -ENODEV;
2284 goto free_dev;
2285 }
2286
2287 /* Check the latency timer for values >= 0x60 */
2288 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &timer);
2289 if (timer < 0x60) {
2290 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x60);
2291 }
2292
2293 DevicePresent(dev, DE4X5_APROM);
2294
2295 if (!request_region (iobase, DE4X5_PCI_TOTAL_SIZE, "de4x5")) {
2296 error = -EBUSY;
2297 goto free_dev;
2298 }
2299
2300 dev->irq = irq;
2301
2302 if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) {
2303 goto release;
2304 }
2305
2306 return 0;
2307
2308 release:
2309 release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
2310 free_dev:
2311 free_netdev (dev);
2312 disable_dev:
2313 pci_disable_device (pdev);
2314 return error;
2315 }
2316
de4x5_pci_remove(struct pci_dev * pdev)2317 static void de4x5_pci_remove(struct pci_dev *pdev)
2318 {
2319 struct net_device *dev;
2320 u_long iobase;
2321
2322 dev = pci_get_drvdata(pdev);
2323 iobase = dev->base_addr;
2324
2325 unregister_netdev (dev);
2326 free_netdev (dev);
2327 release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
2328 pci_disable_device (pdev);
2329 }
2330
2331 static const struct pci_device_id de4x5_pci_tbl[] = {
2332 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
2333 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
2334 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
2335 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
2336 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
2337 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
2338 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142,
2339 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
2340 { },
2341 };
2342
2343 static struct pci_driver de4x5_pci_driver = {
2344 .name = "de4x5",
2345 .id_table = de4x5_pci_tbl,
2346 .probe = de4x5_pci_probe,
2347 .remove = de4x5_pci_remove,
2348 };
2349
2350 #endif
2351
2352 /*
2353 ** Auto configure the media here rather than setting the port at compile
2354 ** time. This routine is called by de4x5_init() and when a loss of media is
2355 ** detected (excessive collisions, loss of carrier, no carrier or link fail
2356 ** [TP] or no recent receive activity) to check whether the user has been
2357 ** sneaky and changed the port on us.
2358 */
2359 static int
autoconf_media(struct net_device * dev)2360 autoconf_media(struct net_device *dev)
2361 {
2362 struct de4x5_private *lp = netdev_priv(dev);
2363 u_long iobase = dev->base_addr;
2364
2365 disable_ast(dev);
2366
2367 lp->c_media = AUTO; /* Bogus last media */
2368 inl(DE4X5_MFC); /* Zero the lost frames counter */
2369 lp->media = INIT;
2370 lp->tcount = 0;
2371
2372 de4x5_ast(&lp->timer);
2373
2374 return lp->media;
2375 }
2376
2377 /*
2378 ** Autoconfigure the media when using the DC21040. AUI cannot be distinguished
2379 ** from BNC as the port has a jumper to set thick or thin wire. When set for
2380 ** BNC, the BNC port will indicate activity if it's not terminated correctly.
2381 ** The only way to test for that is to place a loopback packet onto the
2382 ** network and watch for errors. Since we're messing with the interrupt mask
2383 ** register, disable the board interrupts and do not allow any more packets to
2384 ** be queued to the hardware. Re-enable everything only when the media is
2385 ** found.
2386 ** I may have to "age out" locally queued packets so that the higher layer
2387 ** timeouts don't effectively duplicate packets on the network.
2388 */
2389 static int
dc21040_autoconf(struct net_device * dev)2390 dc21040_autoconf(struct net_device *dev)
2391 {
2392 struct de4x5_private *lp = netdev_priv(dev);
2393 u_long iobase = dev->base_addr;
2394 int next_tick = DE4X5_AUTOSENSE_MS;
2395 s32 imr;
2396
2397 switch (lp->media) {
2398 case INIT:
2399 DISABLE_IRQs;
2400 lp->tx_enable = false;
2401 lp->timeout = -1;
2402 de4x5_save_skbs(dev);
2403 if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
2404 lp->media = TP;
2405 } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) {
2406 lp->media = BNC_AUI;
2407 } else if (lp->autosense == EXT_SIA) {
2408 lp->media = EXT_SIA;
2409 } else {
2410 lp->media = NC;
2411 }
2412 lp->local_state = 0;
2413 next_tick = dc21040_autoconf(dev);
2414 break;
2415
2416 case TP:
2417 next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
2418 TP_SUSPECT, test_tp);
2419 break;
2420
2421 case TP_SUSPECT:
2422 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
2423 break;
2424
2425 case BNC:
2426 case AUI:
2427 case BNC_AUI:
2428 next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
2429 BNC_AUI_SUSPECT, ping_media);
2430 break;
2431
2432 case BNC_AUI_SUSPECT:
2433 next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
2434 break;
2435
2436 case EXT_SIA:
2437 next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
2438 NC, EXT_SIA_SUSPECT, ping_media);
2439 break;
2440
2441 case EXT_SIA_SUSPECT:
2442 next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
2443 break;
2444
2445 case NC:
2446 /* default to TP for all */
2447 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
2448 if (lp->media != lp->c_media) {
2449 de4x5_dbg_media(dev);
2450 lp->c_media = lp->media;
2451 }
2452 lp->media = INIT;
2453 lp->tx_enable = false;
2454 break;
2455 }
2456
2457 return next_tick;
2458 }
2459
2460 static int
dc21040_state(struct net_device * dev,int csr13,int csr14,int csr15,int timeout,int next_state,int suspect_state,int (* fn)(struct net_device *,int))2461 dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout,
2462 int next_state, int suspect_state,
2463 int (*fn)(struct net_device *, int))
2464 {
2465 struct de4x5_private *lp = netdev_priv(dev);
2466 int next_tick = DE4X5_AUTOSENSE_MS;
2467 int linkBad;
2468
2469 switch (lp->local_state) {
2470 case 0:
2471 reset_init_sia(dev, csr13, csr14, csr15);
2472 lp->local_state++;
2473 next_tick = 500;
2474 break;
2475
2476 case 1:
2477 if (!lp->tx_enable) {
2478 linkBad = fn(dev, timeout);
2479 if (linkBad < 0) {
2480 next_tick = linkBad & ~TIMER_CB;
2481 } else {
2482 if (linkBad && (lp->autosense == AUTO)) {
2483 lp->local_state = 0;
2484 lp->media = next_state;
2485 } else {
2486 de4x5_init_connection(dev);
2487 }
2488 }
2489 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2490 lp->media = suspect_state;
2491 next_tick = 3000;
2492 }
2493 break;
2494 }
2495
2496 return next_tick;
2497 }
2498
2499 static int
de4x5_suspect_state(struct net_device * dev,int timeout,int prev_state,int (* fn)(struct net_device *,int),int (* asfn)(struct net_device *))2500 de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state,
2501 int (*fn)(struct net_device *, int),
2502 int (*asfn)(struct net_device *))
2503 {
2504 struct de4x5_private *lp = netdev_priv(dev);
2505 int next_tick = DE4X5_AUTOSENSE_MS;
2506 int linkBad;
2507
2508 switch (lp->local_state) {
2509 case 1:
2510 if (lp->linkOK) {
2511 lp->media = prev_state;
2512 } else {
2513 lp->local_state++;
2514 next_tick = asfn(dev);
2515 }
2516 break;
2517
2518 case 2:
2519 linkBad = fn(dev, timeout);
2520 if (linkBad < 0) {
2521 next_tick = linkBad & ~TIMER_CB;
2522 } else if (!linkBad) {
2523 lp->local_state--;
2524 lp->media = prev_state;
2525 } else {
2526 lp->media = INIT;
2527 lp->tcount++;
2528 }
2529 }
2530
2531 return next_tick;
2532 }
2533
2534 /*
2535 ** Autoconfigure the media when using the DC21041. AUI needs to be tested
2536 ** before BNC, because the BNC port will indicate activity if it's not
2537 ** terminated correctly. The only way to test for that is to place a loopback
2538 ** packet onto the network and watch for errors. Since we're messing with
2539 ** the interrupt mask register, disable the board interrupts and do not allow
2540 ** any more packets to be queued to the hardware. Re-enable everything only
2541 ** when the media is found.
2542 */
2543 static int
dc21041_autoconf(struct net_device * dev)2544 dc21041_autoconf(struct net_device *dev)
2545 {
2546 struct de4x5_private *lp = netdev_priv(dev);
2547 u_long iobase = dev->base_addr;
2548 s32 sts, irqs, irq_mask, imr, omr;
2549 int next_tick = DE4X5_AUTOSENSE_MS;
2550
2551 switch (lp->media) {
2552 case INIT:
2553 DISABLE_IRQs;
2554 lp->tx_enable = false;
2555 lp->timeout = -1;
2556 de4x5_save_skbs(dev); /* Save non transmitted skb's */
2557 if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
2558 lp->media = TP; /* On chip auto negotiation is broken */
2559 } else if (lp->autosense == TP) {
2560 lp->media = TP;
2561 } else if (lp->autosense == BNC) {
2562 lp->media = BNC;
2563 } else if (lp->autosense == AUI) {
2564 lp->media = AUI;
2565 } else {
2566 lp->media = NC;
2567 }
2568 lp->local_state = 0;
2569 next_tick = dc21041_autoconf(dev);
2570 break;
2571
2572 case TP_NW:
2573 if (lp->timeout < 0) {
2574 omr = inl(DE4X5_OMR);/* Set up full duplex for the autonegotiate */
2575 outl(omr | OMR_FDX, DE4X5_OMR);
2576 }
2577 irqs = STS_LNF | STS_LNP;
2578 irq_mask = IMR_LFM | IMR_LPM;
2579 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
2580 if (sts < 0) {
2581 next_tick = sts & ~TIMER_CB;
2582 } else {
2583 if (sts & STS_LNP) {
2584 lp->media = ANS;
2585 } else {
2586 lp->media = AUI;
2587 }
2588 next_tick = dc21041_autoconf(dev);
2589 }
2590 break;
2591
2592 case ANS:
2593 if (!lp->tx_enable) {
2594 irqs = STS_LNP;
2595 irq_mask = IMR_LPM;
2596 sts = test_ans(dev, irqs, irq_mask, 3000);
2597 if (sts < 0) {
2598 next_tick = sts & ~TIMER_CB;
2599 } else {
2600 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2601 lp->media = TP;
2602 next_tick = dc21041_autoconf(dev);
2603 } else {
2604 lp->local_state = 1;
2605 de4x5_init_connection(dev);
2606 }
2607 }
2608 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2609 lp->media = ANS_SUSPECT;
2610 next_tick = 3000;
2611 }
2612 break;
2613
2614 case ANS_SUSPECT:
2615 next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
2616 break;
2617
2618 case TP:
2619 if (!lp->tx_enable) {
2620 if (lp->timeout < 0) {
2621 omr = inl(DE4X5_OMR); /* Set up half duplex for TP */
2622 outl(omr & ~OMR_FDX, DE4X5_OMR);
2623 }
2624 irqs = STS_LNF | STS_LNP;
2625 irq_mask = IMR_LFM | IMR_LPM;
2626 sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
2627 if (sts < 0) {
2628 next_tick = sts & ~TIMER_CB;
2629 } else {
2630 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2631 if (inl(DE4X5_SISR) & SISR_NRA) {
2632 lp->media = AUI; /* Non selected port activity */
2633 } else {
2634 lp->media = BNC;
2635 }
2636 next_tick = dc21041_autoconf(dev);
2637 } else {
2638 lp->local_state = 1;
2639 de4x5_init_connection(dev);
2640 }
2641 }
2642 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2643 lp->media = TP_SUSPECT;
2644 next_tick = 3000;
2645 }
2646 break;
2647
2648 case TP_SUSPECT:
2649 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
2650 break;
2651
2652 case AUI:
2653 if (!lp->tx_enable) {
2654 if (lp->timeout < 0) {
2655 omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */
2656 outl(omr & ~OMR_FDX, DE4X5_OMR);
2657 }
2658 irqs = 0;
2659 irq_mask = 0;
2660 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x000e, 1000);
2661 if (sts < 0) {
2662 next_tick = sts & ~TIMER_CB;
2663 } else {
2664 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
2665 lp->media = BNC;
2666 next_tick = dc21041_autoconf(dev);
2667 } else {
2668 lp->local_state = 1;
2669 de4x5_init_connection(dev);
2670 }
2671 }
2672 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2673 lp->media = AUI_SUSPECT;
2674 next_tick = 3000;
2675 }
2676 break;
2677
2678 case AUI_SUSPECT:
2679 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
2680 break;
2681
2682 case BNC:
2683 switch (lp->local_state) {
2684 case 0:
2685 if (lp->timeout < 0) {
2686 omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */
2687 outl(omr & ~OMR_FDX, DE4X5_OMR);
2688 }
2689 irqs = 0;
2690 irq_mask = 0;
2691 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x0006, 1000);
2692 if (sts < 0) {
2693 next_tick = sts & ~TIMER_CB;
2694 } else {
2695 lp->local_state++; /* Ensure media connected */
2696 next_tick = dc21041_autoconf(dev);
2697 }
2698 break;
2699
2700 case 1:
2701 if (!lp->tx_enable) {
2702 if ((sts = ping_media(dev, 3000)) < 0) {
2703 next_tick = sts & ~TIMER_CB;
2704 } else {
2705 if (sts) {
2706 lp->local_state = 0;
2707 lp->media = NC;
2708 } else {
2709 de4x5_init_connection(dev);
2710 }
2711 }
2712 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2713 lp->media = BNC_SUSPECT;
2714 next_tick = 3000;
2715 }
2716 break;
2717 }
2718 break;
2719
2720 case BNC_SUSPECT:
2721 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
2722 break;
2723
2724 case NC:
2725 omr = inl(DE4X5_OMR); /* Set up full duplex for the autonegotiate */
2726 outl(omr | OMR_FDX, DE4X5_OMR);
2727 reset_init_sia(dev, 0xef01, 0xffff, 0x0008);/* Initialise the SIA */
2728 if (lp->media != lp->c_media) {
2729 de4x5_dbg_media(dev);
2730 lp->c_media = lp->media;
2731 }
2732 lp->media = INIT;
2733 lp->tx_enable = false;
2734 break;
2735 }
2736
2737 return next_tick;
2738 }
2739
2740 /*
2741 ** Some autonegotiation chips are broken in that they do not return the
2742 ** acknowledge bit (anlpa & MII_ANLPA_ACK) in the link partner advertisement
2743 ** register, except at the first power up negotiation.
2744 */
2745 static int
dc21140m_autoconf(struct net_device * dev)2746 dc21140m_autoconf(struct net_device *dev)
2747 {
2748 struct de4x5_private *lp = netdev_priv(dev);
2749 int ana, anlpa, cap, cr, slnk, sr;
2750 int next_tick = DE4X5_AUTOSENSE_MS;
2751 u_long imr, omr, iobase = dev->base_addr;
2752
2753 switch(lp->media) {
2754 case INIT:
2755 if (lp->timeout < 0) {
2756 DISABLE_IRQs;
2757 lp->tx_enable = false;
2758 lp->linkOK = 0;
2759 de4x5_save_skbs(dev); /* Save non transmitted skb's */
2760 }
2761 if ((next_tick = de4x5_reset_phy(dev)) < 0) {
2762 next_tick &= ~TIMER_CB;
2763 } else {
2764 if (lp->useSROM) {
2765 if (srom_map_media(dev) < 0) {
2766 lp->tcount++;
2767 return next_tick;
2768 }
2769 srom_exec(dev, lp->phy[lp->active].gep);
2770 if (lp->infoblock_media == ANS) {
2771 ana = lp->phy[lp->active].ana | MII_ANA_CSMA;
2772 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2773 }
2774 } else {
2775 lp->tmp = MII_SR_ASSC; /* Fake out the MII speed set */
2776 SET_10Mb;
2777 if (lp->autosense == _100Mb) {
2778 lp->media = _100Mb;
2779 } else if (lp->autosense == _10Mb) {
2780 lp->media = _10Mb;
2781 } else if ((lp->autosense == AUTO) &&
2782 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
2783 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2784 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
2785 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2786 lp->media = ANS;
2787 } else if (lp->autosense == AUTO) {
2788 lp->media = SPD_DET;
2789 } else if (is_spd_100(dev) && is_100_up(dev)) {
2790 lp->media = _100Mb;
2791 } else {
2792 lp->media = NC;
2793 }
2794 }
2795 lp->local_state = 0;
2796 next_tick = dc21140m_autoconf(dev);
2797 }
2798 break;
2799
2800 case ANS:
2801 switch (lp->local_state) {
2802 case 0:
2803 if (lp->timeout < 0) {
2804 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2805 }
2806 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
2807 if (cr < 0) {
2808 next_tick = cr & ~TIMER_CB;
2809 } else {
2810 if (cr) {
2811 lp->local_state = 0;
2812 lp->media = SPD_DET;
2813 } else {
2814 lp->local_state++;
2815 }
2816 next_tick = dc21140m_autoconf(dev);
2817 }
2818 break;
2819
2820 case 1:
2821 if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000)) < 0) {
2822 next_tick = sr & ~TIMER_CB;
2823 } else {
2824 lp->media = SPD_DET;
2825 lp->local_state = 0;
2826 if (sr) { /* Success! */
2827 lp->tmp = MII_SR_ASSC;
2828 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
2829 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2830 if (!(anlpa & MII_ANLPA_RF) &&
2831 (cap = anlpa & MII_ANLPA_TAF & ana)) {
2832 if (cap & MII_ANA_100M) {
2833 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
2834 lp->media = _100Mb;
2835 } else if (cap & MII_ANA_10M) {
2836 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
2837
2838 lp->media = _10Mb;
2839 }
2840 }
2841 } /* Auto Negotiation failed to finish */
2842 next_tick = dc21140m_autoconf(dev);
2843 } /* Auto Negotiation failed to start */
2844 break;
2845 }
2846 break;
2847
2848 case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
2849 if (lp->timeout < 0) {
2850 lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
2851 (~gep_rd(dev) & GEP_LNP));
2852 SET_100Mb_PDET;
2853 }
2854 if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
2855 next_tick = slnk & ~TIMER_CB;
2856 } else {
2857 if (is_spd_100(dev) && is_100_up(dev)) {
2858 lp->media = _100Mb;
2859 } else if ((!is_spd_100(dev) && (is_10_up(dev) & lp->tmp))) {
2860 lp->media = _10Mb;
2861 } else {
2862 lp->media = NC;
2863 }
2864 next_tick = dc21140m_autoconf(dev);
2865 }
2866 break;
2867
2868 case _100Mb: /* Set 100Mb/s */
2869 next_tick = 3000;
2870 if (!lp->tx_enable) {
2871 SET_100Mb;
2872 de4x5_init_connection(dev);
2873 } else {
2874 if (!lp->linkOK && (lp->autosense == AUTO)) {
2875 if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
2876 lp->media = INIT;
2877 lp->tcount++;
2878 next_tick = DE4X5_AUTOSENSE_MS;
2879 }
2880 }
2881 }
2882 break;
2883
2884 case BNC:
2885 case AUI:
2886 case _10Mb: /* Set 10Mb/s */
2887 next_tick = 3000;
2888 if (!lp->tx_enable) {
2889 SET_10Mb;
2890 de4x5_init_connection(dev);
2891 } else {
2892 if (!lp->linkOK && (lp->autosense == AUTO)) {
2893 if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
2894 lp->media = INIT;
2895 lp->tcount++;
2896 next_tick = DE4X5_AUTOSENSE_MS;
2897 }
2898 }
2899 }
2900 break;
2901
2902 case NC:
2903 if (lp->media != lp->c_media) {
2904 de4x5_dbg_media(dev);
2905 lp->c_media = lp->media;
2906 }
2907 lp->media = INIT;
2908 lp->tx_enable = false;
2909 break;
2910 }
2911
2912 return next_tick;
2913 }
2914
2915 /*
2916 ** This routine may be merged into dc21140m_autoconf() sometime as I'm
2917 ** changing how I figure out the media - but trying to keep it backwards
2918 ** compatible with the de500-xa and de500-aa.
2919 ** Whether it's BNC, AUI, SYM or MII is sorted out in the infoblock
2920 ** functions and set during de4x5_mac_port() and/or de4x5_reset_phy().
2921 ** This routine just has to figure out whether 10Mb/s or 100Mb/s is
2922 ** active.
2923 ** When autonegotiation is working, the ANS part searches the SROM for
2924 ** the highest common speed (TP) link that both can run and if that can
2925 ** be full duplex. That infoblock is executed and then the link speed set.
2926 **
2927 ** Only _10Mb and _100Mb are tested here.
2928 */
2929 static int
dc2114x_autoconf(struct net_device * dev)2930 dc2114x_autoconf(struct net_device *dev)
2931 {
2932 struct de4x5_private *lp = netdev_priv(dev);
2933 u_long iobase = dev->base_addr;
2934 s32 cr, anlpa, ana, cap, irqs, irq_mask, imr, omr, slnk, sr, sts;
2935 int next_tick = DE4X5_AUTOSENSE_MS;
2936
2937 switch (lp->media) {
2938 case INIT:
2939 if (lp->timeout < 0) {
2940 DISABLE_IRQs;
2941 lp->tx_enable = false;
2942 lp->linkOK = 0;
2943 lp->timeout = -1;
2944 de4x5_save_skbs(dev); /* Save non transmitted skb's */
2945 if (lp->params.autosense & ~AUTO) {
2946 srom_map_media(dev); /* Fixed media requested */
2947 if (lp->media != lp->params.autosense) {
2948 lp->tcount++;
2949 lp->media = INIT;
2950 return next_tick;
2951 }
2952 lp->media = INIT;
2953 }
2954 }
2955 if ((next_tick = de4x5_reset_phy(dev)) < 0) {
2956 next_tick &= ~TIMER_CB;
2957 } else {
2958 if (lp->autosense == _100Mb) {
2959 lp->media = _100Mb;
2960 } else if (lp->autosense == _10Mb) {
2961 lp->media = _10Mb;
2962 } else if (lp->autosense == TP) {
2963 lp->media = TP;
2964 } else if (lp->autosense == BNC) {
2965 lp->media = BNC;
2966 } else if (lp->autosense == AUI) {
2967 lp->media = AUI;
2968 } else {
2969 lp->media = SPD_DET;
2970 if ((lp->infoblock_media == ANS) &&
2971 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
2972 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2973 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
2974 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2975 lp->media = ANS;
2976 }
2977 }
2978 lp->local_state = 0;
2979 next_tick = dc2114x_autoconf(dev);
2980 }
2981 break;
2982
2983 case ANS:
2984 switch (lp->local_state) {
2985 case 0:
2986 if (lp->timeout < 0) {
2987 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2988 }
2989 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
2990 if (cr < 0) {
2991 next_tick = cr & ~TIMER_CB;
2992 } else {
2993 if (cr) {
2994 lp->local_state = 0;
2995 lp->media = SPD_DET;
2996 } else {
2997 lp->local_state++;
2998 }
2999 next_tick = dc2114x_autoconf(dev);
3000 }
3001 break;
3002
3003 case 1:
3004 sr = test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000);
3005 if (sr < 0) {
3006 next_tick = sr & ~TIMER_CB;
3007 } else {
3008 lp->media = SPD_DET;
3009 lp->local_state = 0;
3010 if (sr) { /* Success! */
3011 lp->tmp = MII_SR_ASSC;
3012 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
3013 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
3014 if (!(anlpa & MII_ANLPA_RF) &&
3015 (cap = anlpa & MII_ANLPA_TAF & ana)) {
3016 if (cap & MII_ANA_100M) {
3017 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
3018 lp->media = _100Mb;
3019 } else if (cap & MII_ANA_10M) {
3020 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
3021 lp->media = _10Mb;
3022 }
3023 }
3024 } /* Auto Negotiation failed to finish */
3025 next_tick = dc2114x_autoconf(dev);
3026 } /* Auto Negotiation failed to start */
3027 break;
3028 }
3029 break;
3030
3031 case AUI:
3032 if (!lp->tx_enable) {
3033 if (lp->timeout < 0) {
3034 omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */
3035 outl(omr & ~OMR_FDX, DE4X5_OMR);
3036 }
3037 irqs = 0;
3038 irq_mask = 0;
3039 sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
3040 if (sts < 0) {
3041 next_tick = sts & ~TIMER_CB;
3042 } else {
3043 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
3044 lp->media = BNC;
3045 next_tick = dc2114x_autoconf(dev);
3046 } else {
3047 lp->local_state = 1;
3048 de4x5_init_connection(dev);
3049 }
3050 }
3051 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
3052 lp->media = AUI_SUSPECT;
3053 next_tick = 3000;
3054 }
3055 break;
3056
3057 case AUI_SUSPECT:
3058 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf);
3059 break;
3060
3061 case BNC:
3062 switch (lp->local_state) {
3063 case 0:
3064 if (lp->timeout < 0) {
3065 omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */
3066 outl(omr & ~OMR_FDX, DE4X5_OMR);
3067 }
3068 irqs = 0;
3069 irq_mask = 0;
3070 sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
3071 if (sts < 0) {
3072 next_tick = sts & ~TIMER_CB;
3073 } else {
3074 lp->local_state++; /* Ensure media connected */
3075 next_tick = dc2114x_autoconf(dev);
3076 }
3077 break;
3078
3079 case 1:
3080 if (!lp->tx_enable) {
3081 if ((sts = ping_media(dev, 3000)) < 0) {
3082 next_tick = sts & ~TIMER_CB;
3083 } else {
3084 if (sts) {
3085 lp->local_state = 0;
3086 lp->tcount++;
3087 lp->media = INIT;
3088 } else {
3089 de4x5_init_connection(dev);
3090 }
3091 }
3092 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
3093 lp->media = BNC_SUSPECT;
3094 next_tick = 3000;
3095 }
3096 break;
3097 }
3098 break;
3099
3100 case BNC_SUSPECT:
3101 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf);
3102 break;
3103
3104 case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
3105 if (srom_map_media(dev) < 0) {
3106 lp->tcount++;
3107 lp->media = INIT;
3108 return next_tick;
3109 }
3110 if (lp->media == _100Mb) {
3111 if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
3112 lp->media = SPD_DET;
3113 return slnk & ~TIMER_CB;
3114 }
3115 } else {
3116 if (wait_for_link(dev) < 0) {
3117 lp->media = SPD_DET;
3118 return PDET_LINK_WAIT;
3119 }
3120 }
3121 if (lp->media == ANS) { /* Do MII parallel detection */
3122 if (is_spd_100(dev)) {
3123 lp->media = _100Mb;
3124 } else {
3125 lp->media = _10Mb;
3126 }
3127 next_tick = dc2114x_autoconf(dev);
3128 } else if (((lp->media == _100Mb) && is_100_up(dev)) ||
3129 (((lp->media == _10Mb) || (lp->media == TP) ||
3130 (lp->media == BNC) || (lp->media == AUI)) &&
3131 is_10_up(dev))) {
3132 next_tick = dc2114x_autoconf(dev);
3133 } else {
3134 lp->tcount++;
3135 lp->media = INIT;
3136 }
3137 break;
3138
3139 case _10Mb:
3140 next_tick = 3000;
3141 if (!lp->tx_enable) {
3142 SET_10Mb;
3143 de4x5_init_connection(dev);
3144 } else {
3145 if (!lp->linkOK && (lp->autosense == AUTO)) {
3146 if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
3147 lp->media = INIT;
3148 lp->tcount++;
3149 next_tick = DE4X5_AUTOSENSE_MS;
3150 }
3151 }
3152 }
3153 break;
3154
3155 case _100Mb:
3156 next_tick = 3000;
3157 if (!lp->tx_enable) {
3158 SET_100Mb;
3159 de4x5_init_connection(dev);
3160 } else {
3161 if (!lp->linkOK && (lp->autosense == AUTO)) {
3162 if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
3163 lp->media = INIT;
3164 lp->tcount++;
3165 next_tick = DE4X5_AUTOSENSE_MS;
3166 }
3167 }
3168 }
3169 break;
3170
3171 default:
3172 lp->tcount++;
3173 printk("Huh?: media:%02x\n", lp->media);
3174 lp->media = INIT;
3175 break;
3176 }
3177
3178 return next_tick;
3179 }
3180
3181 static int
srom_autoconf(struct net_device * dev)3182 srom_autoconf(struct net_device *dev)
3183 {
3184 struct de4x5_private *lp = netdev_priv(dev);
3185
3186 return lp->infoleaf_fn(dev);
3187 }
3188
3189 /*
3190 ** This mapping keeps the original media codes and FDX flag unchanged.
3191 ** While it isn't strictly necessary, it helps me for the moment...
3192 ** The early return avoids a media state / SROM media space clash.
3193 */
3194 static int
srom_map_media(struct net_device * dev)3195 srom_map_media(struct net_device *dev)
3196 {
3197 struct de4x5_private *lp = netdev_priv(dev);
3198
3199 lp->fdx = false;
3200 if (lp->infoblock_media == lp->media)
3201 return 0;
3202
3203 switch(lp->infoblock_media) {
3204 case SROM_10BASETF:
3205 if (!lp->params.fdx) return -1;
3206 lp->fdx = true;
3207 /* fall through */
3208
3209 case SROM_10BASET:
3210 if (lp->params.fdx && !lp->fdx) return -1;
3211 if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
3212 lp->media = _10Mb;
3213 } else {
3214 lp->media = TP;
3215 }
3216 break;
3217
3218 case SROM_10BASE2:
3219 lp->media = BNC;
3220 break;
3221
3222 case SROM_10BASE5:
3223 lp->media = AUI;
3224 break;
3225
3226 case SROM_100BASETF:
3227 if (!lp->params.fdx) return -1;
3228 lp->fdx = true;
3229 /* fall through */
3230
3231 case SROM_100BASET:
3232 if (lp->params.fdx && !lp->fdx) return -1;
3233 lp->media = _100Mb;
3234 break;
3235
3236 case SROM_100BASET4:
3237 lp->media = _100Mb;
3238 break;
3239
3240 case SROM_100BASEFF:
3241 if (!lp->params.fdx) return -1;
3242 lp->fdx = true;
3243 /* fall through */
3244
3245 case SROM_100BASEF:
3246 if (lp->params.fdx && !lp->fdx) return -1;
3247 lp->media = _100Mb;
3248 break;
3249
3250 case ANS:
3251 lp->media = ANS;
3252 lp->fdx = lp->params.fdx;
3253 break;
3254
3255 default:
3256 printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
3257 lp->infoblock_media);
3258 return -1;
3259 }
3260
3261 return 0;
3262 }
3263
3264 static void
de4x5_init_connection(struct net_device * dev)3265 de4x5_init_connection(struct net_device *dev)
3266 {
3267 struct de4x5_private *lp = netdev_priv(dev);
3268 u_long iobase = dev->base_addr;
3269 u_long flags = 0;
3270
3271 if (lp->media != lp->c_media) {
3272 de4x5_dbg_media(dev);
3273 lp->c_media = lp->media; /* Stop scrolling media messages */
3274 }
3275
3276 spin_lock_irqsave(&lp->lock, flags);
3277 de4x5_rst_desc_ring(dev);
3278 de4x5_setup_intr(dev);
3279 lp->tx_enable = true;
3280 spin_unlock_irqrestore(&lp->lock, flags);
3281 outl(POLL_DEMAND, DE4X5_TPD);
3282
3283 netif_wake_queue(dev);
3284 }
3285
3286 /*
3287 ** General PHY reset function. Some MII devices don't reset correctly
3288 ** since their MII address pins can float at voltages that are dependent
3289 ** on the signal pin use. Do a double reset to ensure a reset.
3290 */
3291 static int
de4x5_reset_phy(struct net_device * dev)3292 de4x5_reset_phy(struct net_device *dev)
3293 {
3294 struct de4x5_private *lp = netdev_priv(dev);
3295 u_long iobase = dev->base_addr;
3296 int next_tick = 0;
3297
3298 if ((lp->useSROM) || (lp->phy[lp->active].id)) {
3299 if (lp->timeout < 0) {
3300 if (lp->useSROM) {
3301 if (lp->phy[lp->active].rst) {
3302 srom_exec(dev, lp->phy[lp->active].rst);
3303 srom_exec(dev, lp->phy[lp->active].rst);
3304 } else if (lp->rst) { /* Type 5 infoblock reset */
3305 srom_exec(dev, lp->rst);
3306 srom_exec(dev, lp->rst);
3307 }
3308 } else {
3309 PHY_HARD_RESET;
3310 }
3311 if (lp->useMII) {
3312 mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
3313 }
3314 }
3315 if (lp->useMII) {
3316 next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, false, 500);
3317 }
3318 } else if (lp->chipset == DC21140) {
3319 PHY_HARD_RESET;
3320 }
3321
3322 return next_tick;
3323 }
3324
3325 static int
test_media(struct net_device * dev,s32 irqs,s32 irq_mask,s32 csr13,s32 csr14,s32 csr15,s32 msec)3326 test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
3327 {
3328 struct de4x5_private *lp = netdev_priv(dev);
3329 u_long iobase = dev->base_addr;
3330 s32 sts, csr12;
3331
3332 if (lp->timeout < 0) {
3333 lp->timeout = msec/100;
3334 if (!lp->useSROM) { /* Already done if by SROM, else dc2104[01] */
3335 reset_init_sia(dev, csr13, csr14, csr15);
3336 }
3337
3338 /* set up the interrupt mask */
3339 outl(irq_mask, DE4X5_IMR);
3340
3341 /* clear all pending interrupts */
3342 sts = inl(DE4X5_STS);
3343 outl(sts, DE4X5_STS);
3344
3345 /* clear csr12 NRA and SRA bits */
3346 if ((lp->chipset == DC21041) || lp->useSROM) {
3347 csr12 = inl(DE4X5_SISR);
3348 outl(csr12, DE4X5_SISR);
3349 }
3350 }
3351
3352 sts = inl(DE4X5_STS) & ~TIMER_CB;
3353
3354 if (!(sts & irqs) && --lp->timeout) {
3355 sts = 100 | TIMER_CB;
3356 } else {
3357 lp->timeout = -1;
3358 }
3359
3360 return sts;
3361 }
3362
3363 static int
test_tp(struct net_device * dev,s32 msec)3364 test_tp(struct net_device *dev, s32 msec)
3365 {
3366 struct de4x5_private *lp = netdev_priv(dev);
3367 u_long iobase = dev->base_addr;
3368 int sisr;
3369
3370 if (lp->timeout < 0) {
3371 lp->timeout = msec/100;
3372 }
3373
3374 sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
3375
3376 if (sisr && --lp->timeout) {
3377 sisr = 100 | TIMER_CB;
3378 } else {
3379 lp->timeout = -1;
3380 }
3381
3382 return sisr;
3383 }
3384
3385 /*
3386 ** Samples the 100Mb Link State Signal. The sample interval is important
3387 ** because too fast a rate can give erroneous results and confuse the
3388 ** speed sense algorithm.
3389 */
3390 #define SAMPLE_INTERVAL 500 /* ms */
3391 #define SAMPLE_DELAY 2000 /* ms */
3392 static int
test_for_100Mb(struct net_device * dev,int msec)3393 test_for_100Mb(struct net_device *dev, int msec)
3394 {
3395 struct de4x5_private *lp = netdev_priv(dev);
3396 int gep = 0, ret = ((lp->chipset & ~0x00ff)==DC2114x? -1 :GEP_SLNK);
3397
3398 if (lp->timeout < 0) {
3399 if ((msec/SAMPLE_INTERVAL) <= 0) return 0;
3400 if (msec > SAMPLE_DELAY) {
3401 lp->timeout = (msec - SAMPLE_DELAY)/SAMPLE_INTERVAL;
3402 gep = SAMPLE_DELAY | TIMER_CB;
3403 return gep;
3404 } else {
3405 lp->timeout = msec/SAMPLE_INTERVAL;
3406 }
3407 }
3408
3409 if (lp->phy[lp->active].id || lp->useSROM) {
3410 gep = is_100_up(dev) | is_spd_100(dev);
3411 } else {
3412 gep = (~gep_rd(dev) & (GEP_SLNK | GEP_LNP));
3413 }
3414 if (!(gep & ret) && --lp->timeout) {
3415 gep = SAMPLE_INTERVAL | TIMER_CB;
3416 } else {
3417 lp->timeout = -1;
3418 }
3419
3420 return gep;
3421 }
3422
3423 static int
wait_for_link(struct net_device * dev)3424 wait_for_link(struct net_device *dev)
3425 {
3426 struct de4x5_private *lp = netdev_priv(dev);
3427
3428 if (lp->timeout < 0) {
3429 lp->timeout = 1;
3430 }
3431
3432 if (lp->timeout--) {
3433 return TIMER_CB;
3434 } else {
3435 lp->timeout = -1;
3436 }
3437
3438 return 0;
3439 }
3440
3441 /*
3442 **
3443 **
3444 */
3445 static int
test_mii_reg(struct net_device * dev,int reg,int mask,bool pol,long msec)3446 test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec)
3447 {
3448 struct de4x5_private *lp = netdev_priv(dev);
3449 int test;
3450 u_long iobase = dev->base_addr;
3451
3452 if (lp->timeout < 0) {
3453 lp->timeout = msec/100;
3454 }
3455
3456 reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
3457 test = (reg ^ (pol ? ~0 : 0)) & mask;
3458
3459 if (test && --lp->timeout) {
3460 reg = 100 | TIMER_CB;
3461 } else {
3462 lp->timeout = -1;
3463 }
3464
3465 return reg;
3466 }
3467
3468 static int
is_spd_100(struct net_device * dev)3469 is_spd_100(struct net_device *dev)
3470 {
3471 struct de4x5_private *lp = netdev_priv(dev);
3472 u_long iobase = dev->base_addr;
3473 int spd;
3474
3475 if (lp->useMII) {
3476 spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
3477 spd = ~(spd ^ lp->phy[lp->active].spd.value);
3478 spd &= lp->phy[lp->active].spd.mask;
3479 } else if (!lp->useSROM) { /* de500-xa */
3480 spd = ((~gep_rd(dev)) & GEP_SLNK);
3481 } else {
3482 if ((lp->ibn == 2) || !lp->asBitValid)
3483 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3484
3485 spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
3486 (lp->linkOK & ~lp->asBitValid);
3487 }
3488
3489 return spd;
3490 }
3491
3492 static int
is_100_up(struct net_device * dev)3493 is_100_up(struct net_device *dev)
3494 {
3495 struct de4x5_private *lp = netdev_priv(dev);
3496 u_long iobase = dev->base_addr;
3497
3498 if (lp->useMII) {
3499 /* Double read for sticky bits & temporary drops */
3500 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3501 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3502 } else if (!lp->useSROM) { /* de500-xa */
3503 return (~gep_rd(dev)) & GEP_SLNK;
3504 } else {
3505 if ((lp->ibn == 2) || !lp->asBitValid)
3506 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3507
3508 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3509 (lp->linkOK & ~lp->asBitValid);
3510 }
3511 }
3512
3513 static int
is_10_up(struct net_device * dev)3514 is_10_up(struct net_device *dev)
3515 {
3516 struct de4x5_private *lp = netdev_priv(dev);
3517 u_long iobase = dev->base_addr;
3518
3519 if (lp->useMII) {
3520 /* Double read for sticky bits & temporary drops */
3521 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3522 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3523 } else if (!lp->useSROM) { /* de500-xa */
3524 return (~gep_rd(dev)) & GEP_LNP;
3525 } else {
3526 if ((lp->ibn == 2) || !lp->asBitValid)
3527 return ((lp->chipset & ~0x00ff) == DC2114x) ?
3528 (~inl(DE4X5_SISR)&SISR_LS10):
3529 0;
3530
3531 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3532 (lp->linkOK & ~lp->asBitValid);
3533 }
3534 }
3535
3536 static int
is_anc_capable(struct net_device * dev)3537 is_anc_capable(struct net_device *dev)
3538 {
3539 struct de4x5_private *lp = netdev_priv(dev);
3540 u_long iobase = dev->base_addr;
3541
3542 if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
3543 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3544 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
3545 return (inl(DE4X5_SISR) & SISR_LPN) >> 12;
3546 } else {
3547 return 0;
3548 }
3549 }
3550
3551 /*
3552 ** Send a packet onto the media and watch for send errors that indicate the
3553 ** media is bad or unconnected.
3554 */
3555 static int
ping_media(struct net_device * dev,int msec)3556 ping_media(struct net_device *dev, int msec)
3557 {
3558 struct de4x5_private *lp = netdev_priv(dev);
3559 u_long iobase = dev->base_addr;
3560 int sisr;
3561
3562 if (lp->timeout < 0) {
3563 lp->timeout = msec/100;
3564
3565 lp->tmp = lp->tx_new; /* Remember the ring position */
3566 load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
3567 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
3568 outl(POLL_DEMAND, DE4X5_TPD);
3569 }
3570
3571 sisr = inl(DE4X5_SISR);
3572
3573 if ((!(sisr & SISR_NCR)) &&
3574 ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
3575 (--lp->timeout)) {
3576 sisr = 100 | TIMER_CB;
3577 } else {
3578 if ((!(sisr & SISR_NCR)) &&
3579 !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
3580 lp->timeout) {
3581 sisr = 0;
3582 } else {
3583 sisr = 1;
3584 }
3585 lp->timeout = -1;
3586 }
3587
3588 return sisr;
3589 }
3590
3591 /*
3592 ** This function does 2 things: on Intels it kmalloc's another buffer to
3593 ** replace the one about to be passed up. On Alpha's it kmallocs a buffer
3594 ** into which the packet is copied.
3595 */
3596 static struct sk_buff *
de4x5_alloc_rx_buff(struct net_device * dev,int index,int len)3597 de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
3598 {
3599 struct de4x5_private *lp = netdev_priv(dev);
3600 struct sk_buff *p;
3601
3602 #if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
3603 struct sk_buff *ret;
3604 u_long i=0, tmp;
3605
3606 p = netdev_alloc_skb(dev, IEEE802_3_SZ + DE4X5_ALIGN + 2);
3607 if (!p) return NULL;
3608
3609 tmp = virt_to_bus(p->data);
3610 i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp;
3611 skb_reserve(p, i);
3612 lp->rx_ring[index].buf = cpu_to_le32(tmp + i);
3613
3614 ret = lp->rx_skb[index];
3615 lp->rx_skb[index] = p;
3616
3617 if ((u_long) ret > 1) {
3618 skb_put(ret, len);
3619 }
3620
3621 return ret;
3622
3623 #else
3624 if (lp->state != OPEN) return (struct sk_buff *)1; /* Fake out the open */
3625
3626 p = netdev_alloc_skb(dev, len + 2);
3627 if (!p) return NULL;
3628
3629 skb_reserve(p, 2); /* Align */
3630 if (index < lp->rx_old) { /* Wrapped buffer */
3631 short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
3632 skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, tlen);
3633 skb_put_data(p, lp->rx_bufs, len - tlen);
3634 } else { /* Linear buffer */
3635 skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, len);
3636 }
3637
3638 return p;
3639 #endif
3640 }
3641
3642 static void
de4x5_free_rx_buffs(struct net_device * dev)3643 de4x5_free_rx_buffs(struct net_device *dev)
3644 {
3645 struct de4x5_private *lp = netdev_priv(dev);
3646 int i;
3647
3648 for (i=0; i<lp->rxRingSize; i++) {
3649 if ((u_long) lp->rx_skb[i] > 1) {
3650 dev_kfree_skb(lp->rx_skb[i]);
3651 }
3652 lp->rx_ring[i].status = 0;
3653 lp->rx_skb[i] = (struct sk_buff *)1; /* Dummy entry */
3654 }
3655 }
3656
3657 static void
de4x5_free_tx_buffs(struct net_device * dev)3658 de4x5_free_tx_buffs(struct net_device *dev)
3659 {
3660 struct de4x5_private *lp = netdev_priv(dev);
3661 int i;
3662
3663 for (i=0; i<lp->txRingSize; i++) {
3664 if (lp->tx_skb[i])
3665 de4x5_free_tx_buff(lp, i);
3666 lp->tx_ring[i].status = 0;
3667 }
3668
3669 /* Unload the locally queued packets */
3670 __skb_queue_purge(&lp->cache.queue);
3671 }
3672
3673 /*
3674 ** When a user pulls a connection, the DECchip can end up in a
3675 ** 'running - waiting for end of transmission' state. This means that we
3676 ** have to perform a chip soft reset to ensure that we can synchronize
3677 ** the hardware and software and make any media probes using a loopback
3678 ** packet meaningful.
3679 */
3680 static void
de4x5_save_skbs(struct net_device * dev)3681 de4x5_save_skbs(struct net_device *dev)
3682 {
3683 struct de4x5_private *lp = netdev_priv(dev);
3684 u_long iobase = dev->base_addr;
3685 s32 omr;
3686
3687 if (!lp->cache.save_cnt) {
3688 STOP_DE4X5;
3689 de4x5_tx(dev); /* Flush any sent skb's */
3690 de4x5_free_tx_buffs(dev);
3691 de4x5_cache_state(dev, DE4X5_SAVE_STATE);
3692 de4x5_sw_reset(dev);
3693 de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
3694 lp->cache.save_cnt++;
3695 START_DE4X5;
3696 }
3697 }
3698
3699 static void
de4x5_rst_desc_ring(struct net_device * dev)3700 de4x5_rst_desc_ring(struct net_device *dev)
3701 {
3702 struct de4x5_private *lp = netdev_priv(dev);
3703 u_long iobase = dev->base_addr;
3704 int i;
3705 s32 omr;
3706
3707 if (lp->cache.save_cnt) {
3708 STOP_DE4X5;
3709 outl(lp->dma_rings, DE4X5_RRBA);
3710 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
3711 DE4X5_TRBA);
3712
3713 lp->rx_new = lp->rx_old = 0;
3714 lp->tx_new = lp->tx_old = 0;
3715
3716 for (i = 0; i < lp->rxRingSize; i++) {
3717 lp->rx_ring[i].status = cpu_to_le32(R_OWN);
3718 }
3719
3720 for (i = 0; i < lp->txRingSize; i++) {
3721 lp->tx_ring[i].status = cpu_to_le32(0);
3722 }
3723
3724 barrier();
3725 lp->cache.save_cnt--;
3726 START_DE4X5;
3727 }
3728 }
3729
3730 static void
de4x5_cache_state(struct net_device * dev,int flag)3731 de4x5_cache_state(struct net_device *dev, int flag)
3732 {
3733 struct de4x5_private *lp = netdev_priv(dev);
3734 u_long iobase = dev->base_addr;
3735
3736 switch(flag) {
3737 case DE4X5_SAVE_STATE:
3738 lp->cache.csr0 = inl(DE4X5_BMR);
3739 lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR));
3740 lp->cache.csr7 = inl(DE4X5_IMR);
3741 break;
3742
3743 case DE4X5_RESTORE_STATE:
3744 outl(lp->cache.csr0, DE4X5_BMR);
3745 outl(lp->cache.csr6, DE4X5_OMR);
3746 outl(lp->cache.csr7, DE4X5_IMR);
3747 if (lp->chipset == DC21140) {
3748 gep_wr(lp->cache.gepc, dev);
3749 gep_wr(lp->cache.gep, dev);
3750 } else {
3751 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
3752 lp->cache.csr15);
3753 }
3754 break;
3755 }
3756 }
3757
3758 static void
de4x5_put_cache(struct net_device * dev,struct sk_buff * skb)3759 de4x5_put_cache(struct net_device *dev, struct sk_buff *skb)
3760 {
3761 struct de4x5_private *lp = netdev_priv(dev);
3762
3763 __skb_queue_tail(&lp->cache.queue, skb);
3764 }
3765
3766 static void
de4x5_putb_cache(struct net_device * dev,struct sk_buff * skb)3767 de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb)
3768 {
3769 struct de4x5_private *lp = netdev_priv(dev);
3770
3771 __skb_queue_head(&lp->cache.queue, skb);
3772 }
3773
3774 static struct sk_buff *
de4x5_get_cache(struct net_device * dev)3775 de4x5_get_cache(struct net_device *dev)
3776 {
3777 struct de4x5_private *lp = netdev_priv(dev);
3778
3779 return __skb_dequeue(&lp->cache.queue);
3780 }
3781
3782 /*
3783 ** Check the Auto Negotiation State. Return OK when a link pass interrupt
3784 ** is received and the auto-negotiation status is NWAY OK.
3785 */
3786 static int
test_ans(struct net_device * dev,s32 irqs,s32 irq_mask,s32 msec)3787 test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec)
3788 {
3789 struct de4x5_private *lp = netdev_priv(dev);
3790 u_long iobase = dev->base_addr;
3791 s32 sts, ans;
3792
3793 if (lp->timeout < 0) {
3794 lp->timeout = msec/100;
3795 outl(irq_mask, DE4X5_IMR);
3796
3797 /* clear all pending interrupts */
3798 sts = inl(DE4X5_STS);
3799 outl(sts, DE4X5_STS);
3800 }
3801
3802 ans = inl(DE4X5_SISR) & SISR_ANS;
3803 sts = inl(DE4X5_STS) & ~TIMER_CB;
3804
3805 if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
3806 sts = 100 | TIMER_CB;
3807 } else {
3808 lp->timeout = -1;
3809 }
3810
3811 return sts;
3812 }
3813
3814 static void
de4x5_setup_intr(struct net_device * dev)3815 de4x5_setup_intr(struct net_device *dev)
3816 {
3817 struct de4x5_private *lp = netdev_priv(dev);
3818 u_long iobase = dev->base_addr;
3819 s32 imr, sts;
3820
3821 if (inl(DE4X5_OMR) & OMR_SR) { /* Only unmask if TX/RX is enabled */
3822 imr = 0;
3823 UNMASK_IRQs;
3824 sts = inl(DE4X5_STS); /* Reset any pending (stale) interrupts */
3825 outl(sts, DE4X5_STS);
3826 ENABLE_IRQs;
3827 }
3828 }
3829
3830 /*
3831 **
3832 */
3833 static void
reset_init_sia(struct net_device * dev,s32 csr13,s32 csr14,s32 csr15)3834 reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15)
3835 {
3836 struct de4x5_private *lp = netdev_priv(dev);
3837 u_long iobase = dev->base_addr;
3838
3839 RESET_SIA;
3840 if (lp->useSROM) {
3841 if (lp->ibn == 3) {
3842 srom_exec(dev, lp->phy[lp->active].rst);
3843 srom_exec(dev, lp->phy[lp->active].gep);
3844 outl(1, DE4X5_SICR);
3845 return;
3846 } else {
3847 csr15 = lp->cache.csr15;
3848 csr14 = lp->cache.csr14;
3849 csr13 = lp->cache.csr13;
3850 outl(csr15 | lp->cache.gepc, DE4X5_SIGR);
3851 outl(csr15 | lp->cache.gep, DE4X5_SIGR);
3852 }
3853 } else {
3854 outl(csr15, DE4X5_SIGR);
3855 }
3856 outl(csr14, DE4X5_STRR);
3857 outl(csr13, DE4X5_SICR);
3858
3859 mdelay(10);
3860 }
3861
3862 /*
3863 ** Create a loopback ethernet packet
3864 */
3865 static void
create_packet(struct net_device * dev,char * frame,int len)3866 create_packet(struct net_device *dev, char *frame, int len)
3867 {
3868 int i;
3869 char *buf = frame;
3870
3871 for (i=0; i<ETH_ALEN; i++) { /* Use this source address */
3872 *buf++ = dev->dev_addr[i];
3873 }
3874 for (i=0; i<ETH_ALEN; i++) { /* Use this destination address */
3875 *buf++ = dev->dev_addr[i];
3876 }
3877
3878 *buf++ = 0; /* Packet length (2 bytes) */
3879 *buf++ = 1;
3880 }
3881
3882 /*
3883 ** Look for a particular board name in the EISA configuration space
3884 */
3885 static int
EISA_signature(char * name,struct device * device)3886 EISA_signature(char *name, struct device *device)
3887 {
3888 int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
3889 struct eisa_device *edev;
3890
3891 *name = '\0';
3892 edev = to_eisa_device (device);
3893 i = edev->id.driver_data;
3894
3895 if (i >= 0 && i < siglen) {
3896 strcpy (name, de4x5_signatures[i]);
3897 status = 1;
3898 }
3899
3900 return status; /* return the device name string */
3901 }
3902
3903 /*
3904 ** Look for a particular board name in the PCI configuration space
3905 */
3906 static int
PCI_signature(char * name,struct de4x5_private * lp)3907 PCI_signature(char *name, struct de4x5_private *lp)
3908 {
3909 int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
3910
3911 if (lp->chipset == DC21040) {
3912 strcpy(name, "DE434/5");
3913 return status;
3914 } else { /* Search for a DEC name in the SROM */
3915 int tmp = *((char *)&lp->srom + 19) * 3;
3916 strncpy(name, (char *)&lp->srom + 26 + tmp, 8);
3917 }
3918 name[8] = '\0';
3919 for (i=0; i<siglen; i++) {
3920 if (strstr(name,de4x5_signatures[i])!=NULL) break;
3921 }
3922 if (i == siglen) {
3923 if (dec_only) {
3924 *name = '\0';
3925 } else { /* Use chip name to avoid confusion */
3926 strcpy(name, (((lp->chipset == DC21040) ? "DC21040" :
3927 ((lp->chipset == DC21041) ? "DC21041" :
3928 ((lp->chipset == DC21140) ? "DC21140" :
3929 ((lp->chipset == DC21142) ? "DC21142" :
3930 ((lp->chipset == DC21143) ? "DC21143" : "UNKNOWN"
3931 )))))));
3932 }
3933 if (lp->chipset != DC21041) {
3934 lp->useSROM = true; /* card is not recognisably DEC */
3935 }
3936 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
3937 lp->useSROM = true;
3938 }
3939
3940 return status;
3941 }
3942
3943 /*
3944 ** Set up the Ethernet PROM counter to the start of the Ethernet address on
3945 ** the DC21040, else read the SROM for the other chips.
3946 ** The SROM may not be present in a multi-MAC card, so first read the
3947 ** MAC address and check for a bad address. If there is a bad one then exit
3948 ** immediately with the prior srom contents intact (the h/w address will
3949 ** be fixed up later).
3950 */
3951 static void
DevicePresent(struct net_device * dev,u_long aprom_addr)3952 DevicePresent(struct net_device *dev, u_long aprom_addr)
3953 {
3954 int i, j=0;
3955 struct de4x5_private *lp = netdev_priv(dev);
3956
3957 if (lp->chipset == DC21040) {
3958 if (lp->bus == EISA) {
3959 enet_addr_rst(aprom_addr); /* Reset Ethernet Address ROM Pointer */
3960 } else {
3961 outl(0, aprom_addr); /* Reset Ethernet Address ROM Pointer */
3962 }
3963 } else { /* Read new srom */
3964 u_short tmp;
3965 __le16 *p = (__le16 *)((char *)&lp->srom + SROM_HWADD);
3966 for (i=0; i<(ETH_ALEN>>1); i++) {
3967 tmp = srom_rd(aprom_addr, (SROM_HWADD>>1) + i);
3968 j += tmp; /* for check for 0:0:0:0:0:0 or ff:ff:ff:ff:ff:ff */
3969 *p = cpu_to_le16(tmp);
3970 }
3971 if (j == 0 || j == 3 * 0xffff) {
3972 /* could get 0 only from all-0 and 3 * 0xffff only from all-1 */
3973 return;
3974 }
3975
3976 p = (__le16 *)&lp->srom;
3977 for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
3978 tmp = srom_rd(aprom_addr, i);
3979 *p++ = cpu_to_le16(tmp);
3980 }
3981 de4x5_dbg_srom(&lp->srom);
3982 }
3983 }
3984
3985 /*
3986 ** Since the write on the Enet PROM register doesn't seem to reset the PROM
3987 ** pointer correctly (at least on my DE425 EISA card), this routine should do
3988 ** it...from depca.c.
3989 */
3990 static void
enet_addr_rst(u_long aprom_addr)3991 enet_addr_rst(u_long aprom_addr)
3992 {
3993 union {
3994 struct {
3995 u32 a;
3996 u32 b;
3997 } llsig;
3998 char Sig[sizeof(u32) << 1];
3999 } dev;
4000 short sigLength=0;
4001 s8 data;
4002 int i, j;
4003
4004 dev.llsig.a = ETH_PROM_SIG;
4005 dev.llsig.b = ETH_PROM_SIG;
4006 sigLength = sizeof(u32) << 1;
4007
4008 for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
4009 data = inb(aprom_addr);
4010 if (dev.Sig[j] == data) { /* track signature */
4011 j++;
4012 } else { /* lost signature; begin search again */
4013 if (data == dev.Sig[0]) { /* rare case.... */
4014 j=1;
4015 } else {
4016 j=0;
4017 }
4018 }
4019 }
4020 }
4021
4022 /*
4023 ** For the bad status case and no SROM, then add one to the previous
4024 ** address. However, need to add one backwards in case we have 0xff
4025 ** as one or more of the bytes. Only the last 3 bytes should be checked
4026 ** as the first three are invariant - assigned to an organisation.
4027 */
4028 static int
get_hw_addr(struct net_device * dev)4029 get_hw_addr(struct net_device *dev)
4030 {
4031 u_long iobase = dev->base_addr;
4032 int broken, i, k, tmp, status = 0;
4033 u_short j,chksum;
4034 struct de4x5_private *lp = netdev_priv(dev);
4035
4036 broken = de4x5_bad_srom(lp);
4037
4038 for (i=0,k=0,j=0;j<3;j++) {
4039 k <<= 1;
4040 if (k > 0xffff) k-=0xffff;
4041
4042 if (lp->bus == PCI) {
4043 if (lp->chipset == DC21040) {
4044 while ((tmp = inl(DE4X5_APROM)) < 0);
4045 k += (u_char) tmp;
4046 dev->dev_addr[i++] = (u_char) tmp;
4047 while ((tmp = inl(DE4X5_APROM)) < 0);
4048 k += (u_short) (tmp << 8);
4049 dev->dev_addr[i++] = (u_char) tmp;
4050 } else if (!broken) {
4051 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
4052 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
4053 } else if ((broken == SMC) || (broken == ACCTON)) {
4054 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
4055 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
4056 }
4057 } else {
4058 k += (u_char) (tmp = inb(EISA_APROM));
4059 dev->dev_addr[i++] = (u_char) tmp;
4060 k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
4061 dev->dev_addr[i++] = (u_char) tmp;
4062 }
4063
4064 if (k > 0xffff) k-=0xffff;
4065 }
4066 if (k == 0xffff) k=0;
4067
4068 if (lp->bus == PCI) {
4069 if (lp->chipset == DC21040) {
4070 while ((tmp = inl(DE4X5_APROM)) < 0);
4071 chksum = (u_char) tmp;
4072 while ((tmp = inl(DE4X5_APROM)) < 0);
4073 chksum |= (u_short) (tmp << 8);
4074 if ((k != chksum) && (dec_only)) status = -1;
4075 }
4076 } else {
4077 chksum = (u_char) inb(EISA_APROM);
4078 chksum |= (u_short) (inb(EISA_APROM) << 8);
4079 if ((k != chksum) && (dec_only)) status = -1;
4080 }
4081
4082 /* If possible, try to fix a broken card - SMC only so far */
4083 srom_repair(dev, broken);
4084
4085 #ifdef CONFIG_PPC_PMAC
4086 /*
4087 ** If the address starts with 00 a0, we have to bit-reverse
4088 ** each byte of the address.
4089 */
4090 if ( machine_is(powermac) &&
4091 (dev->dev_addr[0] == 0) &&
4092 (dev->dev_addr[1] == 0xa0) )
4093 {
4094 for (i = 0; i < ETH_ALEN; ++i)
4095 {
4096 int x = dev->dev_addr[i];
4097 x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4);
4098 x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2);
4099 dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
4100 }
4101 }
4102 #endif /* CONFIG_PPC_PMAC */
4103
4104 /* Test for a bad enet address */
4105 status = test_bad_enet(dev, status);
4106
4107 return status;
4108 }
4109
4110 /*
4111 ** Test for enet addresses in the first 32 bytes.
4112 */
4113 static int
de4x5_bad_srom(struct de4x5_private * lp)4114 de4x5_bad_srom(struct de4x5_private *lp)
4115 {
4116 int i, status = 0;
4117
4118 for (i = 0; i < ARRAY_SIZE(enet_det); i++) {
4119 if (!memcmp(&lp->srom, &enet_det[i], 3) &&
4120 !memcmp((char *)&lp->srom+0x10, &enet_det[i], 3)) {
4121 if (i == 0) {
4122 status = SMC;
4123 } else if (i == 1) {
4124 status = ACCTON;
4125 }
4126 break;
4127 }
4128 }
4129
4130 return status;
4131 }
4132
4133 static void
srom_repair(struct net_device * dev,int card)4134 srom_repair(struct net_device *dev, int card)
4135 {
4136 struct de4x5_private *lp = netdev_priv(dev);
4137
4138 switch(card) {
4139 case SMC:
4140 memset((char *)&lp->srom, 0, sizeof(struct de4x5_srom));
4141 memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN);
4142 memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100);
4143 lp->useSROM = true;
4144 break;
4145 }
4146 }
4147
4148 /*
4149 ** Assume that the irq's do not follow the PCI spec - this is seems
4150 ** to be true so far (2 for 2).
4151 */
4152 static int
test_bad_enet(struct net_device * dev,int status)4153 test_bad_enet(struct net_device *dev, int status)
4154 {
4155 struct de4x5_private *lp = netdev_priv(dev);
4156 int i, tmp;
4157
4158 for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i];
4159 if ((tmp == 0) || (tmp == 0x5fa)) {
4160 if ((lp->chipset == last.chipset) &&
4161 (lp->bus_num == last.bus) && (lp->bus_num > 0)) {
4162 for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i];
4163 for (i=ETH_ALEN-1; i>2; --i) {
4164 dev->dev_addr[i] += 1;
4165 if (dev->dev_addr[i] != 0) break;
4166 }
4167 for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
4168 if (!an_exception(lp)) {
4169 dev->irq = last.irq;
4170 }
4171
4172 status = 0;
4173 }
4174 } else if (!status) {
4175 last.chipset = lp->chipset;
4176 last.bus = lp->bus_num;
4177 last.irq = dev->irq;
4178 for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
4179 }
4180
4181 return status;
4182 }
4183
4184 /*
4185 ** List of board exceptions with correctly wired IRQs
4186 */
4187 static int
an_exception(struct de4x5_private * lp)4188 an_exception(struct de4x5_private *lp)
4189 {
4190 if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
4191 (*(u_short *)lp->srom.sub_system_id == 0x95e0)) {
4192 return -1;
4193 }
4194
4195 return 0;
4196 }
4197
4198 /*
4199 ** SROM Read
4200 */
4201 static short
srom_rd(u_long addr,u_char offset)4202 srom_rd(u_long addr, u_char offset)
4203 {
4204 sendto_srom(SROM_RD | SROM_SR, addr);
4205
4206 srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
4207 srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
4208 srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
4209
4210 return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
4211 }
4212
4213 static void
srom_latch(u_int command,u_long addr)4214 srom_latch(u_int command, u_long addr)
4215 {
4216 sendto_srom(command, addr);
4217 sendto_srom(command | DT_CLK, addr);
4218 sendto_srom(command, addr);
4219 }
4220
4221 static void
srom_command(u_int command,u_long addr)4222 srom_command(u_int command, u_long addr)
4223 {
4224 srom_latch(command, addr);
4225 srom_latch(command, addr);
4226 srom_latch((command & 0x0000ff00) | DT_CS, addr);
4227 }
4228
4229 static void
srom_address(u_int command,u_long addr,u_char offset)4230 srom_address(u_int command, u_long addr, u_char offset)
4231 {
4232 int i, a;
4233
4234 a = offset << 2;
4235 for (i=0; i<6; i++, a <<= 1) {
4236 srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr);
4237 }
4238 udelay(1);
4239
4240 i = (getfrom_srom(addr) >> 3) & 0x01;
4241 }
4242
4243 static short
srom_data(u_int command,u_long addr)4244 srom_data(u_int command, u_long addr)
4245 {
4246 int i;
4247 short word = 0;
4248 s32 tmp;
4249
4250 for (i=0; i<16; i++) {
4251 sendto_srom(command | DT_CLK, addr);
4252 tmp = getfrom_srom(addr);
4253 sendto_srom(command, addr);
4254
4255 word = (word << 1) | ((tmp >> 3) & 0x01);
4256 }
4257
4258 sendto_srom(command & 0x0000ff00, addr);
4259
4260 return word;
4261 }
4262
4263 /*
4264 static void
4265 srom_busy(u_int command, u_long addr)
4266 {
4267 sendto_srom((command & 0x0000ff00) | DT_CS, addr);
4268
4269 while (!((getfrom_srom(addr) >> 3) & 0x01)) {
4270 mdelay(1);
4271 }
4272
4273 sendto_srom(command & 0x0000ff00, addr);
4274 }
4275 */
4276
4277 static void
sendto_srom(u_int command,u_long addr)4278 sendto_srom(u_int command, u_long addr)
4279 {
4280 outl(command, addr);
4281 udelay(1);
4282 }
4283
4284 static int
getfrom_srom(u_long addr)4285 getfrom_srom(u_long addr)
4286 {
4287 s32 tmp;
4288
4289 tmp = inl(addr);
4290 udelay(1);
4291
4292 return tmp;
4293 }
4294
4295 static int
srom_infoleaf_info(struct net_device * dev)4296 srom_infoleaf_info(struct net_device *dev)
4297 {
4298 struct de4x5_private *lp = netdev_priv(dev);
4299 int i, count;
4300 u_char *p;
4301
4302 /* Find the infoleaf decoder function that matches this chipset */
4303 for (i=0; i<INFOLEAF_SIZE; i++) {
4304 if (lp->chipset == infoleaf_array[i].chipset) break;
4305 }
4306 if (i == INFOLEAF_SIZE) {
4307 lp->useSROM = false;
4308 printk("%s: Cannot find correct chipset for SROM decoding!\n",
4309 dev->name);
4310 return -ENXIO;
4311 }
4312
4313 lp->infoleaf_fn = infoleaf_array[i].fn;
4314
4315 /* Find the information offset that this function should use */
4316 count = *((u_char *)&lp->srom + 19);
4317 p = (u_char *)&lp->srom + 26;
4318
4319 if (count > 1) {
4320 for (i=count; i; --i, p+=3) {
4321 if (lp->device == *p) break;
4322 }
4323 if (i == 0) {
4324 lp->useSROM = false;
4325 printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
4326 dev->name, lp->device);
4327 return -ENXIO;
4328 }
4329 }
4330
4331 lp->infoleaf_offset = get_unaligned_le16(p + 1);
4332
4333 return 0;
4334 }
4335
4336 /*
4337 ** This routine loads any type 1 or 3 MII info into the mii device
4338 ** struct and executes any type 5 code to reset PHY devices for this
4339 ** controller.
4340 ** The info for the MII devices will be valid since the index used
4341 ** will follow the discovery process from MII address 1-31 then 0.
4342 */
4343 static void
srom_init(struct net_device * dev)4344 srom_init(struct net_device *dev)
4345 {
4346 struct de4x5_private *lp = netdev_priv(dev);
4347 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4348 u_char count;
4349
4350 p+=2;
4351 if (lp->chipset == DC21140) {
4352 lp->cache.gepc = (*p++ | GEP_CTRL);
4353 gep_wr(lp->cache.gepc, dev);
4354 }
4355
4356 /* Block count */
4357 count = *p++;
4358
4359 /* Jump the infoblocks to find types */
4360 for (;count; --count) {
4361 if (*p < 128) {
4362 p += COMPACT_LEN;
4363 } else if (*(p+1) == 5) {
4364 type5_infoblock(dev, 1, p);
4365 p += ((*p & BLOCK_LEN) + 1);
4366 } else if (*(p+1) == 4) {
4367 p += ((*p & BLOCK_LEN) + 1);
4368 } else if (*(p+1) == 3) {
4369 type3_infoblock(dev, 1, p);
4370 p += ((*p & BLOCK_LEN) + 1);
4371 } else if (*(p+1) == 2) {
4372 p += ((*p & BLOCK_LEN) + 1);
4373 } else if (*(p+1) == 1) {
4374 type1_infoblock(dev, 1, p);
4375 p += ((*p & BLOCK_LEN) + 1);
4376 } else {
4377 p += ((*p & BLOCK_LEN) + 1);
4378 }
4379 }
4380 }
4381
4382 /*
4383 ** A generic routine that writes GEP control, data and reset information
4384 ** to the GEP register (21140) or csr15 GEP portion (2114[23]).
4385 */
4386 static void
srom_exec(struct net_device * dev,u_char * p)4387 srom_exec(struct net_device *dev, u_char *p)
4388 {
4389 struct de4x5_private *lp = netdev_priv(dev);
4390 u_long iobase = dev->base_addr;
4391 u_char count = (p ? *p++ : 0);
4392 u_short *w = (u_short *)p;
4393
4394 if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return;
4395
4396 if (lp->chipset != DC21140) RESET_SIA;
4397
4398 while (count--) {
4399 gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
4400 *p++ : get_unaligned_le16(w++)), dev);
4401 mdelay(2); /* 2ms per action */
4402 }
4403
4404 if (lp->chipset != DC21140) {
4405 outl(lp->cache.csr14, DE4X5_STRR);
4406 outl(lp->cache.csr13, DE4X5_SICR);
4407 }
4408 }
4409
4410 /*
4411 ** Basically this function is a NOP since it will never be called,
4412 ** unless I implement the DC21041 SROM functions. There's no need
4413 ** since the existing code will be satisfactory for all boards.
4414 */
4415 static int
dc21041_infoleaf(struct net_device * dev)4416 dc21041_infoleaf(struct net_device *dev)
4417 {
4418 return DE4X5_AUTOSENSE_MS;
4419 }
4420
4421 static int
dc21140_infoleaf(struct net_device * dev)4422 dc21140_infoleaf(struct net_device *dev)
4423 {
4424 struct de4x5_private *lp = netdev_priv(dev);
4425 u_char count = 0;
4426 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4427 int next_tick = DE4X5_AUTOSENSE_MS;
4428
4429 /* Read the connection type */
4430 p+=2;
4431
4432 /* GEP control */
4433 lp->cache.gepc = (*p++ | GEP_CTRL);
4434
4435 /* Block count */
4436 count = *p++;
4437
4438 /* Recursively figure out the info blocks */
4439 if (*p < 128) {
4440 next_tick = dc_infoblock[COMPACT](dev, count, p);
4441 } else {
4442 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4443 }
4444
4445 if (lp->tcount == count) {
4446 lp->media = NC;
4447 if (lp->media != lp->c_media) {
4448 de4x5_dbg_media(dev);
4449 lp->c_media = lp->media;
4450 }
4451 lp->media = INIT;
4452 lp->tcount = 0;
4453 lp->tx_enable = false;
4454 }
4455
4456 return next_tick & ~TIMER_CB;
4457 }
4458
4459 static int
dc21142_infoleaf(struct net_device * dev)4460 dc21142_infoleaf(struct net_device *dev)
4461 {
4462 struct de4x5_private *lp = netdev_priv(dev);
4463 u_char count = 0;
4464 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4465 int next_tick = DE4X5_AUTOSENSE_MS;
4466
4467 /* Read the connection type */
4468 p+=2;
4469
4470 /* Block count */
4471 count = *p++;
4472
4473 /* Recursively figure out the info blocks */
4474 if (*p < 128) {
4475 next_tick = dc_infoblock[COMPACT](dev, count, p);
4476 } else {
4477 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4478 }
4479
4480 if (lp->tcount == count) {
4481 lp->media = NC;
4482 if (lp->media != lp->c_media) {
4483 de4x5_dbg_media(dev);
4484 lp->c_media = lp->media;
4485 }
4486 lp->media = INIT;
4487 lp->tcount = 0;
4488 lp->tx_enable = false;
4489 }
4490
4491 return next_tick & ~TIMER_CB;
4492 }
4493
4494 static int
dc21143_infoleaf(struct net_device * dev)4495 dc21143_infoleaf(struct net_device *dev)
4496 {
4497 struct de4x5_private *lp = netdev_priv(dev);
4498 u_char count = 0;
4499 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4500 int next_tick = DE4X5_AUTOSENSE_MS;
4501
4502 /* Read the connection type */
4503 p+=2;
4504
4505 /* Block count */
4506 count = *p++;
4507
4508 /* Recursively figure out the info blocks */
4509 if (*p < 128) {
4510 next_tick = dc_infoblock[COMPACT](dev, count, p);
4511 } else {
4512 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4513 }
4514 if (lp->tcount == count) {
4515 lp->media = NC;
4516 if (lp->media != lp->c_media) {
4517 de4x5_dbg_media(dev);
4518 lp->c_media = lp->media;
4519 }
4520 lp->media = INIT;
4521 lp->tcount = 0;
4522 lp->tx_enable = false;
4523 }
4524
4525 return next_tick & ~TIMER_CB;
4526 }
4527
4528 /*
4529 ** The compact infoblock is only designed for DC21140[A] chips, so
4530 ** we'll reuse the dc21140m_autoconf function. Non MII media only.
4531 */
4532 static int
compact_infoblock(struct net_device * dev,u_char count,u_char * p)4533 compact_infoblock(struct net_device *dev, u_char count, u_char *p)
4534 {
4535 struct de4x5_private *lp = netdev_priv(dev);
4536 u_char flags, csr6;
4537
4538 /* Recursively figure out the info blocks */
4539 if (--count > lp->tcount) {
4540 if (*(p+COMPACT_LEN) < 128) {
4541 return dc_infoblock[COMPACT](dev, count, p+COMPACT_LEN);
4542 } else {
4543 return dc_infoblock[*(p+COMPACT_LEN+1)](dev, count, p+COMPACT_LEN);
4544 }
4545 }
4546
4547 if ((lp->media == INIT) && (lp->timeout < 0)) {
4548 lp->ibn = COMPACT;
4549 lp->active = 0;
4550 gep_wr(lp->cache.gepc, dev);
4551 lp->infoblock_media = (*p++) & COMPACT_MC;
4552 lp->cache.gep = *p++;
4553 csr6 = *p++;
4554 flags = *p++;
4555
4556 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4557 lp->defMedium = (flags & 0x40) ? -1 : 0;
4558 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4559 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4560 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4561 lp->useMII = false;
4562
4563 de4x5_switch_mac_port(dev);
4564 }
4565
4566 return dc21140m_autoconf(dev);
4567 }
4568
4569 /*
4570 ** This block describes non MII media for the DC21140[A] only.
4571 */
4572 static int
type0_infoblock(struct net_device * dev,u_char count,u_char * p)4573 type0_infoblock(struct net_device *dev, u_char count, u_char *p)
4574 {
4575 struct de4x5_private *lp = netdev_priv(dev);
4576 u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
4577
4578 /* Recursively figure out the info blocks */
4579 if (--count > lp->tcount) {
4580 if (*(p+len) < 128) {
4581 return dc_infoblock[COMPACT](dev, count, p+len);
4582 } else {
4583 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4584 }
4585 }
4586
4587 if ((lp->media == INIT) && (lp->timeout < 0)) {
4588 lp->ibn = 0;
4589 lp->active = 0;
4590 gep_wr(lp->cache.gepc, dev);
4591 p+=2;
4592 lp->infoblock_media = (*p++) & BLOCK0_MC;
4593 lp->cache.gep = *p++;
4594 csr6 = *p++;
4595 flags = *p++;
4596
4597 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4598 lp->defMedium = (flags & 0x40) ? -1 : 0;
4599 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4600 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4601 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4602 lp->useMII = false;
4603
4604 de4x5_switch_mac_port(dev);
4605 }
4606
4607 return dc21140m_autoconf(dev);
4608 }
4609
4610 /* These functions are under construction! */
4611
4612 static int
type1_infoblock(struct net_device * dev,u_char count,u_char * p)4613 type1_infoblock(struct net_device *dev, u_char count, u_char *p)
4614 {
4615 struct de4x5_private *lp = netdev_priv(dev);
4616 u_char len = (*p & BLOCK_LEN)+1;
4617
4618 /* Recursively figure out the info blocks */
4619 if (--count > lp->tcount) {
4620 if (*(p+len) < 128) {
4621 return dc_infoblock[COMPACT](dev, count, p+len);
4622 } else {
4623 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4624 }
4625 }
4626
4627 p += 2;
4628 if (lp->state == INITIALISED) {
4629 lp->ibn = 1;
4630 lp->active = *p++;
4631 lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1);
4632 lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1);
4633 lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
4634 lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
4635 lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
4636 lp->phy[lp->active].ttm = get_unaligned_le16(p);
4637 return 0;
4638 } else if ((lp->media == INIT) && (lp->timeout < 0)) {
4639 lp->ibn = 1;
4640 lp->active = *p;
4641 lp->infoblock_csr6 = OMR_MII_100;
4642 lp->useMII = true;
4643 lp->infoblock_media = ANS;
4644
4645 de4x5_switch_mac_port(dev);
4646 }
4647
4648 return dc21140m_autoconf(dev);
4649 }
4650
4651 static int
type2_infoblock(struct net_device * dev,u_char count,u_char * p)4652 type2_infoblock(struct net_device *dev, u_char count, u_char *p)
4653 {
4654 struct de4x5_private *lp = netdev_priv(dev);
4655 u_char len = (*p & BLOCK_LEN)+1;
4656
4657 /* Recursively figure out the info blocks */
4658 if (--count > lp->tcount) {
4659 if (*(p+len) < 128) {
4660 return dc_infoblock[COMPACT](dev, count, p+len);
4661 } else {
4662 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4663 }
4664 }
4665
4666 if ((lp->media == INIT) && (lp->timeout < 0)) {
4667 lp->ibn = 2;
4668 lp->active = 0;
4669 p += 2;
4670 lp->infoblock_media = (*p) & MEDIA_CODE;
4671
4672 if ((*p++) & EXT_FIELD) {
4673 lp->cache.csr13 = get_unaligned_le16(p); p += 2;
4674 lp->cache.csr14 = get_unaligned_le16(p); p += 2;
4675 lp->cache.csr15 = get_unaligned_le16(p); p += 2;
4676 } else {
4677 lp->cache.csr13 = CSR13;
4678 lp->cache.csr14 = CSR14;
4679 lp->cache.csr15 = CSR15;
4680 }
4681 lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4682 lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16);
4683 lp->infoblock_csr6 = OMR_SIA;
4684 lp->useMII = false;
4685
4686 de4x5_switch_mac_port(dev);
4687 }
4688
4689 return dc2114x_autoconf(dev);
4690 }
4691
4692 static int
type3_infoblock(struct net_device * dev,u_char count,u_char * p)4693 type3_infoblock(struct net_device *dev, u_char count, u_char *p)
4694 {
4695 struct de4x5_private *lp = netdev_priv(dev);
4696 u_char len = (*p & BLOCK_LEN)+1;
4697
4698 /* Recursively figure out the info blocks */
4699 if (--count > lp->tcount) {
4700 if (*(p+len) < 128) {
4701 return dc_infoblock[COMPACT](dev, count, p+len);
4702 } else {
4703 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4704 }
4705 }
4706
4707 p += 2;
4708 if (lp->state == INITIALISED) {
4709 lp->ibn = 3;
4710 lp->active = *p++;
4711 if (MOTO_SROM_BUG) lp->active = 0;
4712 lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
4713 lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
4714 lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
4715 lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
4716 lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
4717 lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2;
4718 lp->phy[lp->active].mci = *p;
4719 return 0;
4720 } else if ((lp->media == INIT) && (lp->timeout < 0)) {
4721 lp->ibn = 3;
4722 lp->active = *p;
4723 if (MOTO_SROM_BUG) lp->active = 0;
4724 lp->infoblock_csr6 = OMR_MII_100;
4725 lp->useMII = true;
4726 lp->infoblock_media = ANS;
4727
4728 de4x5_switch_mac_port(dev);
4729 }
4730
4731 return dc2114x_autoconf(dev);
4732 }
4733
4734 static int
type4_infoblock(struct net_device * dev,u_char count,u_char * p)4735 type4_infoblock(struct net_device *dev, u_char count, u_char *p)
4736 {
4737 struct de4x5_private *lp = netdev_priv(dev);
4738 u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
4739
4740 /* Recursively figure out the info blocks */
4741 if (--count > lp->tcount) {
4742 if (*(p+len) < 128) {
4743 return dc_infoblock[COMPACT](dev, count, p+len);
4744 } else {
4745 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4746 }
4747 }
4748
4749 if ((lp->media == INIT) && (lp->timeout < 0)) {
4750 lp->ibn = 4;
4751 lp->active = 0;
4752 p+=2;
4753 lp->infoblock_media = (*p++) & MEDIA_CODE;
4754 lp->cache.csr13 = CSR13; /* Hard coded defaults */
4755 lp->cache.csr14 = CSR14;
4756 lp->cache.csr15 = CSR15;
4757 lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4758 lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4759 csr6 = *p++;
4760 flags = *p++;
4761
4762 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4763 lp->defMedium = (flags & 0x40) ? -1 : 0;
4764 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4765 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4766 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4767 lp->useMII = false;
4768
4769 de4x5_switch_mac_port(dev);
4770 }
4771
4772 return dc2114x_autoconf(dev);
4773 }
4774
4775 /*
4776 ** This block type provides information for resetting external devices
4777 ** (chips) through the General Purpose Register.
4778 */
4779 static int
type5_infoblock(struct net_device * dev,u_char count,u_char * p)4780 type5_infoblock(struct net_device *dev, u_char count, u_char *p)
4781 {
4782 struct de4x5_private *lp = netdev_priv(dev);
4783 u_char len = (*p & BLOCK_LEN)+1;
4784
4785 /* Recursively figure out the info blocks */
4786 if (--count > lp->tcount) {
4787 if (*(p+len) < 128) {
4788 return dc_infoblock[COMPACT](dev, count, p+len);
4789 } else {
4790 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4791 }
4792 }
4793
4794 /* Must be initializing to run this code */
4795 if ((lp->state == INITIALISED) || (lp->media == INIT)) {
4796 p+=2;
4797 lp->rst = p;
4798 srom_exec(dev, lp->rst);
4799 }
4800
4801 return DE4X5_AUTOSENSE_MS;
4802 }
4803
4804 /*
4805 ** MII Read/Write
4806 */
4807
4808 static int
mii_rd(u_char phyreg,u_char phyaddr,u_long ioaddr)4809 mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
4810 {
4811 mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */
4812 mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */
4813 mii_wdata(MII_STRD, 4, ioaddr); /* SFD and Read operation */
4814 mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
4815 mii_address(phyreg, ioaddr); /* PHY Register to read */
4816 mii_ta(MII_STRD, ioaddr); /* Turn around time - 2 MDC */
4817
4818 return mii_rdata(ioaddr); /* Read data */
4819 }
4820
4821 static void
mii_wr(int data,u_char phyreg,u_char phyaddr,u_long ioaddr)4822 mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
4823 {
4824 mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */
4825 mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */
4826 mii_wdata(MII_STWR, 4, ioaddr); /* SFD and Write operation */
4827 mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
4828 mii_address(phyreg, ioaddr); /* PHY Register to write */
4829 mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */
4830 data = mii_swap(data, 16); /* Swap data bit ordering */
4831 mii_wdata(data, 16, ioaddr); /* Write data */
4832 }
4833
4834 static int
mii_rdata(u_long ioaddr)4835 mii_rdata(u_long ioaddr)
4836 {
4837 int i;
4838 s32 tmp = 0;
4839
4840 for (i=0; i<16; i++) {
4841 tmp <<= 1;
4842 tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
4843 }
4844
4845 return tmp;
4846 }
4847
4848 static void
mii_wdata(int data,int len,u_long ioaddr)4849 mii_wdata(int data, int len, u_long ioaddr)
4850 {
4851 int i;
4852
4853 for (i=0; i<len; i++) {
4854 sendto_mii(MII_MWR | MII_WR, data, ioaddr);
4855 data >>= 1;
4856 }
4857 }
4858
4859 static void
mii_address(u_char addr,u_long ioaddr)4860 mii_address(u_char addr, u_long ioaddr)
4861 {
4862 int i;
4863
4864 addr = mii_swap(addr, 5);
4865 for (i=0; i<5; i++) {
4866 sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
4867 addr >>= 1;
4868 }
4869 }
4870
4871 static void
mii_ta(u_long rw,u_long ioaddr)4872 mii_ta(u_long rw, u_long ioaddr)
4873 {
4874 if (rw == MII_STWR) {
4875 sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
4876 sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
4877 } else {
4878 getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */
4879 }
4880 }
4881
4882 static int
mii_swap(int data,int len)4883 mii_swap(int data, int len)
4884 {
4885 int i, tmp = 0;
4886
4887 for (i=0; i<len; i++) {
4888 tmp <<= 1;
4889 tmp |= (data & 1);
4890 data >>= 1;
4891 }
4892
4893 return tmp;
4894 }
4895
4896 static void
sendto_mii(u32 command,int data,u_long ioaddr)4897 sendto_mii(u32 command, int data, u_long ioaddr)
4898 {
4899 u32 j;
4900
4901 j = (data & 1) << 17;
4902 outl(command | j, ioaddr);
4903 udelay(1);
4904 outl(command | MII_MDC | j, ioaddr);
4905 udelay(1);
4906 }
4907
4908 static int
getfrom_mii(u32 command,u_long ioaddr)4909 getfrom_mii(u32 command, u_long ioaddr)
4910 {
4911 outl(command, ioaddr);
4912 udelay(1);
4913 outl(command | MII_MDC, ioaddr);
4914 udelay(1);
4915
4916 return (inl(ioaddr) >> 19) & 1;
4917 }
4918
4919 /*
4920 ** Here's 3 ways to calculate the OUI from the ID registers.
4921 */
4922 static int
mii_get_oui(u_char phyaddr,u_long ioaddr)4923 mii_get_oui(u_char phyaddr, u_long ioaddr)
4924 {
4925 /*
4926 union {
4927 u_short reg;
4928 u_char breg[2];
4929 } a;
4930 int i, r2, r3, ret=0;*/
4931 int r2, r3;
4932
4933 /* Read r2 and r3 */
4934 r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
4935 r3 = mii_rd(MII_ID1, phyaddr, ioaddr);
4936 /* SEEQ and Cypress way * /
4937 / * Shuffle r2 and r3 * /
4938 a.reg=0;
4939 r3 = ((r3>>10)|(r2<<6))&0x0ff;
4940 r2 = ((r2>>2)&0x3fff);
4941
4942 / * Bit reverse r3 * /
4943 for (i=0;i<8;i++) {
4944 ret<<=1;
4945 ret |= (r3&1);
4946 r3>>=1;
4947 }
4948
4949 / * Bit reverse r2 * /
4950 for (i=0;i<16;i++) {
4951 a.reg<<=1;
4952 a.reg |= (r2&1);
4953 r2>>=1;
4954 }
4955
4956 / * Swap r2 bytes * /
4957 i=a.breg[0];
4958 a.breg[0]=a.breg[1];
4959 a.breg[1]=i;
4960
4961 return (a.reg<<8)|ret; */ /* SEEQ and Cypress way */
4962 /* return (r2<<6)|(u_int)(r3>>10); */ /* NATIONAL and BROADCOM way */
4963 return r2; /* (I did it) My way */
4964 }
4965
4966 /*
4967 ** The SROM spec forces us to search addresses [1-31 0]. Bummer.
4968 */
4969 static int
mii_get_phy(struct net_device * dev)4970 mii_get_phy(struct net_device *dev)
4971 {
4972 struct de4x5_private *lp = netdev_priv(dev);
4973 u_long iobase = dev->base_addr;
4974 int i, j, k, n, limit=ARRAY_SIZE(phy_info);
4975 int id;
4976
4977 lp->active = 0;
4978 lp->useMII = true;
4979
4980 /* Search the MII address space for possible PHY devices */
4981 for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) {
4982 lp->phy[lp->active].addr = i;
4983 if (i==0) n++; /* Count cycles */
4984 while (de4x5_reset_phy(dev)<0) udelay(100);/* Wait for reset */
4985 id = mii_get_oui(i, DE4X5_MII);
4986 if ((id == 0) || (id == 65535)) continue; /* Valid ID? */
4987 for (j=0; j<limit; j++) { /* Search PHY table */
4988 if (id != phy_info[j].id) continue; /* ID match? */
4989 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
4990 if (k < DE4X5_MAX_PHY) {
4991 memcpy((char *)&lp->phy[k],
4992 (char *)&phy_info[j], sizeof(struct phy_table));
4993 lp->phy[k].addr = i;
4994 lp->mii_cnt++;
4995 lp->active++;
4996 } else {
4997 goto purgatory; /* Stop the search */
4998 }
4999 break;
5000 }
5001 if ((j == limit) && (i < DE4X5_MAX_MII)) {
5002 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
5003 lp->phy[k].addr = i;
5004 lp->phy[k].id = id;
5005 lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
5006 lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
5007 lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
5008 lp->mii_cnt++;
5009 lp->active++;
5010 printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
5011 j = de4x5_debug;
5012 de4x5_debug |= DEBUG_MII;
5013 de4x5_dbg_mii(dev, k);
5014 de4x5_debug = j;
5015 printk("\n");
5016 }
5017 }
5018 purgatory:
5019 lp->active = 0;
5020 if (lp->phy[0].id) { /* Reset the PHY devices */
5021 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) { /*For each PHY*/
5022 mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
5023 while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
5024
5025 de4x5_dbg_mii(dev, k);
5026 }
5027 }
5028 if (!lp->mii_cnt) lp->useMII = false;
5029
5030 return lp->mii_cnt;
5031 }
5032
5033 static char *
build_setup_frame(struct net_device * dev,int mode)5034 build_setup_frame(struct net_device *dev, int mode)
5035 {
5036 struct de4x5_private *lp = netdev_priv(dev);
5037 int i;
5038 char *pa = lp->setup_frame;
5039
5040 /* Initialise the setup frame */
5041 if (mode == ALL) {
5042 memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
5043 }
5044
5045 if (lp->setup_f == HASH_PERF) {
5046 for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
5047 *(pa + i) = dev->dev_addr[i]; /* Host address */
5048 if (i & 0x01) pa += 2;
5049 }
5050 *(lp->setup_frame + (DE4X5_HASH_TABLE_LEN >> 3) - 3) = 0x80;
5051 } else {
5052 for (i=0; i<ETH_ALEN; i++) { /* Host address */
5053 *(pa + (i&1)) = dev->dev_addr[i];
5054 if (i & 0x01) pa += 4;
5055 }
5056 for (i=0; i<ETH_ALEN; i++) { /* Broadcast address */
5057 *(pa + (i&1)) = (char) 0xff;
5058 if (i & 0x01) pa += 4;
5059 }
5060 }
5061
5062 return pa; /* Points to the next entry */
5063 }
5064
5065 static void
disable_ast(struct net_device * dev)5066 disable_ast(struct net_device *dev)
5067 {
5068 struct de4x5_private *lp = netdev_priv(dev);
5069 del_timer_sync(&lp->timer);
5070 }
5071
5072 static long
de4x5_switch_mac_port(struct net_device * dev)5073 de4x5_switch_mac_port(struct net_device *dev)
5074 {
5075 struct de4x5_private *lp = netdev_priv(dev);
5076 u_long iobase = dev->base_addr;
5077 s32 omr;
5078
5079 STOP_DE4X5;
5080
5081 /* Assert the OMR_PS bit in CSR6 */
5082 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR |
5083 OMR_FDX));
5084 omr |= lp->infoblock_csr6;
5085 if (omr & OMR_PS) omr |= OMR_HBD;
5086 outl(omr, DE4X5_OMR);
5087
5088 /* Soft Reset */
5089 RESET_DE4X5;
5090
5091 /* Restore the GEP - especially for COMPACT and Type 0 Infoblocks */
5092 if (lp->chipset == DC21140) {
5093 gep_wr(lp->cache.gepc, dev);
5094 gep_wr(lp->cache.gep, dev);
5095 } else if ((lp->chipset & ~0x0ff) == DC2114x) {
5096 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, lp->cache.csr15);
5097 }
5098
5099 /* Restore CSR6 */
5100 outl(omr, DE4X5_OMR);
5101
5102 /* Reset CSR8 */
5103 inl(DE4X5_MFC);
5104
5105 return omr;
5106 }
5107
5108 static void
gep_wr(s32 data,struct net_device * dev)5109 gep_wr(s32 data, struct net_device *dev)
5110 {
5111 struct de4x5_private *lp = netdev_priv(dev);
5112 u_long iobase = dev->base_addr;
5113
5114 if (lp->chipset == DC21140) {
5115 outl(data, DE4X5_GEP);
5116 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
5117 outl((data<<16) | lp->cache.csr15, DE4X5_SIGR);
5118 }
5119 }
5120
5121 static int
gep_rd(struct net_device * dev)5122 gep_rd(struct net_device *dev)
5123 {
5124 struct de4x5_private *lp = netdev_priv(dev);
5125 u_long iobase = dev->base_addr;
5126
5127 if (lp->chipset == DC21140) {
5128 return inl(DE4X5_GEP);
5129 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
5130 return inl(DE4X5_SIGR) & 0x000fffff;
5131 }
5132
5133 return 0;
5134 }
5135
5136 static void
yawn(struct net_device * dev,int state)5137 yawn(struct net_device *dev, int state)
5138 {
5139 struct de4x5_private *lp = netdev_priv(dev);
5140 u_long iobase = dev->base_addr;
5141
5142 if ((lp->chipset == DC21040) || (lp->chipset == DC21140)) return;
5143
5144 if(lp->bus == EISA) {
5145 switch(state) {
5146 case WAKEUP:
5147 outb(WAKEUP, PCI_CFPM);
5148 mdelay(10);
5149 break;
5150
5151 case SNOOZE:
5152 outb(SNOOZE, PCI_CFPM);
5153 break;
5154
5155 case SLEEP:
5156 outl(0, DE4X5_SICR);
5157 outb(SLEEP, PCI_CFPM);
5158 break;
5159 }
5160 } else {
5161 struct pci_dev *pdev = to_pci_dev (lp->gendev);
5162 switch(state) {
5163 case WAKEUP:
5164 pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
5165 mdelay(10);
5166 break;
5167
5168 case SNOOZE:
5169 pci_write_config_byte(pdev, PCI_CFDA_PSM, SNOOZE);
5170 break;
5171
5172 case SLEEP:
5173 outl(0, DE4X5_SICR);
5174 pci_write_config_byte(pdev, PCI_CFDA_PSM, SLEEP);
5175 break;
5176 }
5177 }
5178 }
5179
5180 static void
de4x5_parse_params(struct net_device * dev)5181 de4x5_parse_params(struct net_device *dev)
5182 {
5183 struct de4x5_private *lp = netdev_priv(dev);
5184 char *p, *q, t;
5185
5186 lp->params.fdx = false;
5187 lp->params.autosense = AUTO;
5188
5189 if (args == NULL) return;
5190
5191 if ((p = strstr(args, dev->name))) {
5192 if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p);
5193 t = *q;
5194 *q = '\0';
5195
5196 if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
5197
5198 if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
5199 if (strstr(p, "TP_NW")) {
5200 lp->params.autosense = TP_NW;
5201 } else if (strstr(p, "TP")) {
5202 lp->params.autosense = TP;
5203 } else if (strstr(p, "BNC_AUI")) {
5204 lp->params.autosense = BNC;
5205 } else if (strstr(p, "BNC")) {
5206 lp->params.autosense = BNC;
5207 } else if (strstr(p, "AUI")) {
5208 lp->params.autosense = AUI;
5209 } else if (strstr(p, "10Mb")) {
5210 lp->params.autosense = _10Mb;
5211 } else if (strstr(p, "100Mb")) {
5212 lp->params.autosense = _100Mb;
5213 } else if (strstr(p, "AUTO")) {
5214 lp->params.autosense = AUTO;
5215 }
5216 }
5217 *q = t;
5218 }
5219 }
5220
5221 static void
de4x5_dbg_open(struct net_device * dev)5222 de4x5_dbg_open(struct net_device *dev)
5223 {
5224 struct de4x5_private *lp = netdev_priv(dev);
5225 int i;
5226
5227 if (de4x5_debug & DEBUG_OPEN) {
5228 printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
5229 printk("\tphysical address: %pM\n", dev->dev_addr);
5230 printk("Descriptor head addresses:\n");
5231 printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
5232 printk("Descriptor addresses:\nRX: ");
5233 for (i=0;i<lp->rxRingSize-1;i++){
5234 if (i < 3) {
5235 printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
5236 }
5237 }
5238 printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
5239 printk("TX: ");
5240 for (i=0;i<lp->txRingSize-1;i++){
5241 if (i < 3) {
5242 printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
5243 }
5244 }
5245 printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
5246 printk("Descriptor buffers:\nRX: ");
5247 for (i=0;i<lp->rxRingSize-1;i++){
5248 if (i < 3) {
5249 printk("0x%8.8x ",le32_to_cpu(lp->rx_ring[i].buf));
5250 }
5251 }
5252 printk("...0x%8.8x\n",le32_to_cpu(lp->rx_ring[i].buf));
5253 printk("TX: ");
5254 for (i=0;i<lp->txRingSize-1;i++){
5255 if (i < 3) {
5256 printk("0x%8.8x ", le32_to_cpu(lp->tx_ring[i].buf));
5257 }
5258 }
5259 printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
5260 printk("Ring size:\nRX: %d\nTX: %d\n",
5261 (short)lp->rxRingSize,
5262 (short)lp->txRingSize);
5263 }
5264 }
5265
5266 static void
de4x5_dbg_mii(struct net_device * dev,int k)5267 de4x5_dbg_mii(struct net_device *dev, int k)
5268 {
5269 struct de4x5_private *lp = netdev_priv(dev);
5270 u_long iobase = dev->base_addr;
5271
5272 if (de4x5_debug & DEBUG_MII) {
5273 printk("\nMII device address: %d\n", lp->phy[k].addr);
5274 printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
5275 printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII));
5276 printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII));
5277 printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII));
5278 if (lp->phy[k].id != BROADCOM_T4) {
5279 printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII));
5280 printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII));
5281 }
5282 printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII));
5283 if (lp->phy[k].id != BROADCOM_T4) {
5284 printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII));
5285 printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII));
5286 } else {
5287 printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
5288 }
5289 }
5290 }
5291
5292 static void
de4x5_dbg_media(struct net_device * dev)5293 de4x5_dbg_media(struct net_device *dev)
5294 {
5295 struct de4x5_private *lp = netdev_priv(dev);
5296
5297 if (lp->media != lp->c_media) {
5298 if (de4x5_debug & DEBUG_MEDIA) {
5299 printk("%s: media is %s%s\n", dev->name,
5300 (lp->media == NC ? "unconnected, link down or incompatible connection" :
5301 (lp->media == TP ? "TP" :
5302 (lp->media == ANS ? "TP/Nway" :
5303 (lp->media == BNC ? "BNC" :
5304 (lp->media == AUI ? "AUI" :
5305 (lp->media == BNC_AUI ? "BNC/AUI" :
5306 (lp->media == EXT_SIA ? "EXT SIA" :
5307 (lp->media == _100Mb ? "100Mb/s" :
5308 (lp->media == _10Mb ? "10Mb/s" :
5309 "???"
5310 ))))))))), (lp->fdx?" full duplex.":"."));
5311 }
5312 lp->c_media = lp->media;
5313 }
5314 }
5315
5316 static void
de4x5_dbg_srom(struct de4x5_srom * p)5317 de4x5_dbg_srom(struct de4x5_srom *p)
5318 {
5319 int i;
5320
5321 if (de4x5_debug & DEBUG_SROM) {
5322 printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id));
5323 printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id));
5324 printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc));
5325 printk("SROM version: %02x\n", (u_char)(p->version));
5326 printk("# controllers: %02x\n", (u_char)(p->num_controllers));
5327
5328 printk("Hardware Address: %pM\n", p->ieee_addr);
5329 printk("CRC checksum: %04x\n", (u_short)(p->chksum));
5330 for (i=0; i<64; i++) {
5331 printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
5332 }
5333 }
5334 }
5335
5336 static void
de4x5_dbg_rx(struct sk_buff * skb,int len)5337 de4x5_dbg_rx(struct sk_buff *skb, int len)
5338 {
5339 int i, j;
5340
5341 if (de4x5_debug & DEBUG_RX) {
5342 printk("R: %pM <- %pM len/SAP:%02x%02x [%d]\n",
5343 skb->data, &skb->data[6],
5344 (u_char)skb->data[12],
5345 (u_char)skb->data[13],
5346 len);
5347 for (j=0; len>0;j+=16, len-=16) {
5348 printk(" %03x: ",j);
5349 for (i=0; i<16 && i<len; i++) {
5350 printk("%02x ",(u_char)skb->data[i+j]);
5351 }
5352 printk("\n");
5353 }
5354 }
5355 }
5356
5357 /*
5358 ** Perform IOCTL call functions here. Some are privileged operations and the
5359 ** effective uid is checked in those cases. In the normal course of events
5360 ** this function is only used for my testing.
5361 */
5362 static int
de4x5_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)5363 de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5364 {
5365 struct de4x5_private *lp = netdev_priv(dev);
5366 struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru;
5367 u_long iobase = dev->base_addr;
5368 int i, j, status = 0;
5369 s32 omr;
5370 union {
5371 u8 addr[144];
5372 u16 sval[72];
5373 u32 lval[36];
5374 } tmp;
5375 u_long flags = 0;
5376
5377 switch(ioc->cmd) {
5378 case DE4X5_GET_HWADDR: /* Get the hardware address */
5379 ioc->len = ETH_ALEN;
5380 for (i=0; i<ETH_ALEN; i++) {
5381 tmp.addr[i] = dev->dev_addr[i];
5382 }
5383 if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
5384 break;
5385
5386 case DE4X5_SET_HWADDR: /* Set the hardware address */
5387 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5388 if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) return -EFAULT;
5389 if (netif_queue_stopped(dev))
5390 return -EBUSY;
5391 netif_stop_queue(dev);
5392 for (i=0; i<ETH_ALEN; i++) {
5393 dev->dev_addr[i] = tmp.addr[i];
5394 }
5395 build_setup_frame(dev, PHYS_ADDR_ONLY);
5396 /* Set up the descriptor and give ownership to the card */
5397 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
5398 SETUP_FRAME_LEN, (struct sk_buff *)1);
5399 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
5400 outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
5401 netif_wake_queue(dev); /* Unlock the TX ring */
5402 break;
5403
5404 case DE4X5_SAY_BOO: /* Say "Boo!" to the kernel log file */
5405 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5406 printk("%s: Boo!\n", dev->name);
5407 break;
5408
5409 case DE4X5_MCA_EN: /* Enable pass all multicast addressing */
5410 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5411 omr = inl(DE4X5_OMR);
5412 omr |= OMR_PM;
5413 outl(omr, DE4X5_OMR);
5414 break;
5415
5416 case DE4X5_GET_STATS: /* Get the driver statistics */
5417 {
5418 struct pkt_stats statbuf;
5419 ioc->len = sizeof(statbuf);
5420 spin_lock_irqsave(&lp->lock, flags);
5421 memcpy(&statbuf, &lp->pktStats, ioc->len);
5422 spin_unlock_irqrestore(&lp->lock, flags);
5423 if (copy_to_user(ioc->data, &statbuf, ioc->len))
5424 return -EFAULT;
5425 break;
5426 }
5427 case DE4X5_CLR_STATS: /* Zero out the driver statistics */
5428 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5429 spin_lock_irqsave(&lp->lock, flags);
5430 memset(&lp->pktStats, 0, sizeof(lp->pktStats));
5431 spin_unlock_irqrestore(&lp->lock, flags);
5432 break;
5433
5434 case DE4X5_GET_OMR: /* Get the OMR Register contents */
5435 tmp.addr[0] = inl(DE4X5_OMR);
5436 if (copy_to_user(ioc->data, tmp.addr, 1)) return -EFAULT;
5437 break;
5438
5439 case DE4X5_SET_OMR: /* Set the OMR Register contents */
5440 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5441 if (copy_from_user(tmp.addr, ioc->data, 1)) return -EFAULT;
5442 outl(tmp.addr[0], DE4X5_OMR);
5443 break;
5444
5445 case DE4X5_GET_REG: /* Get the DE4X5 Registers */
5446 j = 0;
5447 tmp.lval[0] = inl(DE4X5_STS); j+=4;
5448 tmp.lval[1] = inl(DE4X5_BMR); j+=4;
5449 tmp.lval[2] = inl(DE4X5_IMR); j+=4;
5450 tmp.lval[3] = inl(DE4X5_OMR); j+=4;
5451 tmp.lval[4] = inl(DE4X5_SISR); j+=4;
5452 tmp.lval[5] = inl(DE4X5_SICR); j+=4;
5453 tmp.lval[6] = inl(DE4X5_STRR); j+=4;
5454 tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
5455 ioc->len = j;
5456 if (copy_to_user(ioc->data, tmp.lval, ioc->len))
5457 return -EFAULT;
5458 break;
5459
5460 #define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */
5461 /*
5462 case DE4X5_DUMP:
5463 j = 0;
5464 tmp.addr[j++] = dev->irq;
5465 for (i=0; i<ETH_ALEN; i++) {
5466 tmp.addr[j++] = dev->dev_addr[i];
5467 }
5468 tmp.addr[j++] = lp->rxRingSize;
5469 tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
5470 tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
5471
5472 for (i=0;i<lp->rxRingSize-1;i++){
5473 if (i < 3) {
5474 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
5475 }
5476 }
5477 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
5478 for (i=0;i<lp->txRingSize-1;i++){
5479 if (i < 3) {
5480 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
5481 }
5482 }
5483 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
5484
5485 for (i=0;i<lp->rxRingSize-1;i++){
5486 if (i < 3) {
5487 tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
5488 }
5489 }
5490 tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
5491 for (i=0;i<lp->txRingSize-1;i++){
5492 if (i < 3) {
5493 tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
5494 }
5495 }
5496 tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
5497
5498 for (i=0;i<lp->rxRingSize;i++){
5499 tmp.lval[j>>2] = le32_to_cpu(lp->rx_ring[i].status); j+=4;
5500 }
5501 for (i=0;i<lp->txRingSize;i++){
5502 tmp.lval[j>>2] = le32_to_cpu(lp->tx_ring[i].status); j+=4;
5503 }
5504
5505 tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
5506 tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4;
5507 tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4;
5508 tmp.lval[j>>2] = inl(DE4X5_RRBA); j+=4;
5509 tmp.lval[j>>2] = inl(DE4X5_TRBA); j+=4;
5510 tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
5511 tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
5512 tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
5513 tmp.lval[j>>2] = lp->chipset; j+=4;
5514 if (lp->chipset == DC21140) {
5515 tmp.lval[j>>2] = gep_rd(dev); j+=4;
5516 } else {
5517 tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
5518 tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
5519 tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
5520 tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
5521 }
5522 tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4;
5523 if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
5524 tmp.lval[j>>2] = lp->active; j+=4;
5525 tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5526 tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5527 tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5528 tmp.lval[j>>2]=mii_rd(MII_ID1,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5529 if (lp->phy[lp->active].id != BROADCOM_T4) {
5530 tmp.lval[j>>2]=mii_rd(MII_ANA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5531 tmp.lval[j>>2]=mii_rd(MII_ANLPA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5532 }
5533 tmp.lval[j>>2]=mii_rd(0x10,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5534 if (lp->phy[lp->active].id != BROADCOM_T4) {
5535 tmp.lval[j>>2]=mii_rd(0x11,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5536 tmp.lval[j>>2]=mii_rd(0x12,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5537 } else {
5538 tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5539 }
5540 }
5541
5542 tmp.addr[j++] = lp->txRingSize;
5543 tmp.addr[j++] = netif_queue_stopped(dev);
5544
5545 ioc->len = j;
5546 if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
5547 break;
5548
5549 */
5550 default:
5551 return -EOPNOTSUPP;
5552 }
5553
5554 return status;
5555 }
5556
de4x5_module_init(void)5557 static int __init de4x5_module_init (void)
5558 {
5559 int err = 0;
5560
5561 #ifdef CONFIG_PCI
5562 err = pci_register_driver(&de4x5_pci_driver);
5563 #endif
5564 #ifdef CONFIG_EISA
5565 err |= eisa_driver_register (&de4x5_eisa_driver);
5566 #endif
5567
5568 return err;
5569 }
5570
de4x5_module_exit(void)5571 static void __exit de4x5_module_exit (void)
5572 {
5573 #ifdef CONFIG_PCI
5574 pci_unregister_driver (&de4x5_pci_driver);
5575 #endif
5576 #ifdef CONFIG_EISA
5577 eisa_driver_unregister (&de4x5_eisa_driver);
5578 #endif
5579 }
5580
5581 module_init (de4x5_module_init);
5582 module_exit (de4x5_module_exit);
5583