1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25 /*
26 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
27 */
28
29 /*
30 * SunOS MT STREAMS ERI(PCI) 10/100 Mb Ethernet Device Driver
31 */
32
33 #include <sys/types.h>
34 #include <sys/debug.h>
35 #include <sys/stropts.h>
36 #include <sys/stream.h>
37 #include <sys/strsubr.h>
38 #include <sys/kmem.h>
39 #include <sys/crc32.h>
40 #include <sys/ddi.h>
41 #include <sys/sunddi.h>
42 #include <sys/strsun.h>
43 #include <sys/stat.h>
44 #include <sys/cpu.h>
45 #include <sys/kstat.h>
46 #include <inet/common.h>
47 #include <sys/pattr.h>
48 #include <inet/mi.h>
49 #include <inet/nd.h>
50 #include <sys/ethernet.h>
51 #include <sys/vlan.h>
52 #include <sys/policy.h>
53 #include <sys/mac_provider.h>
54 #include <sys/mac_ether.h>
55 #include <sys/dlpi.h>
56
57 #include <sys/pci.h>
58
59 #include "eri_phy.h"
60 #include "eri_mac.h"
61 #include "eri.h"
62 #include "eri_common.h"
63
64 #include "eri_msg.h"
65
66 /*
67 * **** Function Prototypes *****
68 */
69 /*
70 * Entry points (man9e)
71 */
72 static int eri_attach(dev_info_t *, ddi_attach_cmd_t);
73 static int eri_detach(dev_info_t *, ddi_detach_cmd_t);
74 static uint_t eri_intr(caddr_t);
75
76 /*
77 * I/O (Input/Output) Functions
78 */
79 static boolean_t eri_send_msg(struct eri *, mblk_t *);
80 static mblk_t *eri_read_dma(struct eri *, volatile struct rmd *,
81 volatile int, uint64_t flags);
82
83 /*
84 * Initialization Functions
85 */
86 static boolean_t eri_init(struct eri *);
87 static int eri_allocthings(struct eri *);
88 static int eri_init_xfer_params(struct eri *);
89 static void eri_statinit(struct eri *);
90 static int eri_burstsize(struct eri *);
91
92 static void eri_setup_mac_address(struct eri *, dev_info_t *);
93
94 static uint32_t eri_init_rx_channel(struct eri *);
95 static void eri_init_rx(struct eri *);
96 static void eri_init_txmac(struct eri *);
97
98 /*
99 * Un-init Functions
100 */
101 static uint32_t eri_txmac_disable(struct eri *);
102 static uint32_t eri_rxmac_disable(struct eri *);
103 static int eri_stop(struct eri *);
104 static void eri_uninit(struct eri *erip);
105 static int eri_freebufs(struct eri *);
106 static boolean_t eri_reclaim(struct eri *, uint32_t);
107
108 /*
109 * Transceiver (xcvr) Functions
110 */
111 static int eri_new_xcvr(struct eri *); /* Initializes & detects xcvrs */
112 static int eri_reset_xcvr(struct eri *);
113
114 #ifdef ERI_10_10_FORCE_SPEED_WORKAROUND
115 static void eri_xcvr_force_mode(struct eri *, uint32_t *);
116 #endif
117
118 static void eri_mif_poll(struct eri *, soft_mif_enable_t);
119 static void eri_check_link(struct eri *);
120 static uint32_t eri_check_link_noind(struct eri *);
121 static link_state_t eri_mif_check(struct eri *, uint16_t, uint16_t);
122 static void eri_mii_write(struct eri *, uint8_t, uint16_t);
123 static uint32_t eri_mii_read(struct eri *, uint8_t, uint16_t *);
124
125 /*
126 * Reset Functions
127 */
128 static uint32_t eri_etx_reset(struct eri *);
129 static uint32_t eri_erx_reset(struct eri *);
130
131 /*
132 * Error Functions
133 */
134 static void eri_fatal_err(struct eri *, uint32_t);
135 static void eri_nonfatal_err(struct eri *, uint32_t);
136
137 #ifdef ERI_TX_HUNG
138 static int eri_check_txhung(struct eri *);
139 #endif
140
141 /*
142 * Hardening Functions
143 */
144 static void eri_fault_msg(struct eri *, uint_t, msg_t, const char *, ...);
145
146 /*
147 * Misc Functions
148 */
149 static void eri_savecntrs(struct eri *);
150
151 static void eri_stop_timer(struct eri *erip);
152 static void eri_start_timer(struct eri *erip, fptrv_t func, clock_t msec);
153
154 static void eri_bb_force_idle(struct eri *);
155
156 /*
157 * Utility Functions
158 */
159 static mblk_t *eri_allocb(size_t size);
160 static mblk_t *eri_allocb_sp(size_t size);
161 static int eri_param_get(queue_t *q, mblk_t *mp, caddr_t cp);
162 static int eri_param_set(queue_t *, mblk_t *, char *, caddr_t);
163
164 /*
165 * Functions to support ndd
166 */
167 static void eri_nd_free(caddr_t *nd_pparam);
168
169 static boolean_t eri_nd_load(caddr_t *nd_pparam, char *name,
170 pfi_t get_pfi, pfi_t set_pfi, caddr_t data);
171
172 static int eri_nd_getset(queue_t *q, caddr_t nd_param, MBLKP mp);
173 static void eri_param_cleanup(struct eri *);
174 static int eri_param_register(struct eri *, param_t *, int);
175 static void eri_process_ndd_ioctl(struct eri *, queue_t *, mblk_t *, int);
176 static int eri_mk_mblk_tail_space(mblk_t *, mblk_t **, size_t);
177
178
179 static void eri_loopback(struct eri *, queue_t *, mblk_t *);
180
181 static uint32_t eri_ladrf_bit(const uint8_t *);
182
183
184 /*
185 * Nemo (GLDv3) Functions.
186 */
187 static int eri_m_stat(void *, uint_t, uint64_t *);
188 static int eri_m_start(void *);
189 static void eri_m_stop(void *);
190 static int eri_m_promisc(void *, boolean_t);
191 static int eri_m_multicst(void *, boolean_t, const uint8_t *);
192 static int eri_m_unicst(void *, const uint8_t *);
193 static void eri_m_ioctl(void *, queue_t *, mblk_t *);
194 static boolean_t eri_m_getcapab(void *, mac_capab_t, void *);
195 static mblk_t *eri_m_tx(void *, mblk_t *);
196
197 static mac_callbacks_t eri_m_callbacks = {
198 MC_IOCTL | MC_GETCAPAB,
199 eri_m_stat,
200 eri_m_start,
201 eri_m_stop,
202 eri_m_promisc,
203 eri_m_multicst,
204 eri_m_unicst,
205 eri_m_tx,
206 NULL,
207 eri_m_ioctl,
208 eri_m_getcapab
209 };
210
211 /*
212 * Define PHY Vendors: Matches to IEEE
213 * Organizationally Unique Identifier (OUI)
214 */
215 /*
216 * The first two are supported as Internal XCVRs
217 */
218 #define PHY_VENDOR_LUCENT 0x601d
219
220 #define PHY_LINK_NONE 0 /* Not attempted yet or retry */
221 #define PHY_LINK_DOWN 1 /* Not being used */
222 #define PHY_LINK_UP 2 /* Not being used */
223
224 #define AUTO_SPEED 0
225 #define FORCE_SPEED 1
226
227 /*
228 * MIB II broadcast/multicast packets
229 */
230
231 #define IS_BROADCAST(pkt) (bcmp(pkt, ðerbroadcastaddr, ETHERADDRL) == 0)
232 #define IS_MULTICAST(pkt) ((pkt[0] & 01) == 1)
233
234 #define BUMP_InNUcast(erip, pkt) \
235 if (IS_BROADCAST(pkt)) { \
236 HSTAT(erip, brdcstrcv); \
237 } else if (IS_MULTICAST(pkt)) { \
238 HSTAT(erip, multircv); \
239 }
240
241 #define BUMP_OutNUcast(erip, pkt) \
242 if (IS_BROADCAST(pkt)) { \
243 HSTAT(erip, brdcstxmt); \
244 } else if (IS_MULTICAST(pkt)) { \
245 HSTAT(erip, multixmt); \
246 }
247
248 #define NEXTTMDP(tbasep, tmdlimp, tmdp) (((tmdp) + 1) == tmdlimp \
249 ? tbasep : ((tmdp) + 1))
250
251 #define ETHERHEADER_SIZE (sizeof (struct ether_header))
252
253 #ifdef ERI_RCV_CKSUM
254 #define ERI_PROCESS_READ(erip, bp, sum) \
255 { \
256 t_uscalar_t type; \
257 uint_t start_offset, end_offset; \
258 \
259 *(bp->b_wptr) = 0; /* pad byte */ \
260 \
261 /* \
262 * update MIB II statistics \
263 */ \
264 HSTAT(erip, ipackets64); \
265 HSTATN(erip, rbytes64, len); \
266 BUMP_InNUcast(erip, bp->b_rptr); \
267 type = get_ether_type(bp->b_rptr); \
268 if (type == ETHERTYPE_IP || type == ETHERTYPE_IPV6) { \
269 start_offset = 0; \
270 end_offset = MBLKL(bp) - ETHERHEADER_SIZE; \
271 mac_hcksum_set(bp, \
272 start_offset, 0, end_offset, sum, \
273 HCK_PARTIALCKSUM); \
274 } else { \
275 /* \
276 * Strip the PADS for 802.3 \
277 */ \
278 if (type <= ETHERMTU) \
279 bp->b_wptr = bp->b_rptr + \
280 ETHERHEADER_SIZE + type; \
281 } \
282 }
283 #else
284
285 #define ERI_PROCESS_READ(erip, bp) \
286 { \
287 t_uscalar_t type; \
288 type = get_ether_type(bp->b_rptr); \
289 \
290 /* \
291 * update MIB II statistics \
292 */ \
293 HSTAT(erip, ipackets64); \
294 HSTATN(erip, rbytes64, len); \
295 BUMP_InNUcast(erip, bp->b_rptr); \
296 /* \
297 * Strip the PADS for 802.3 \
298 */ \
299 if (type <= ETHERMTU) \
300 bp->b_wptr = bp->b_rptr + ETHERHEADER_SIZE + \
301 type; \
302 }
303 #endif /* ERI_RCV_CKSUM */
304
305 /*
306 * TX Interrupt Rate
307 */
308 static int tx_interrupt_rate = 16;
309
310 /*
311 * Ethernet broadcast address definition.
312 */
313 static uint8_t etherbroadcastaddr[] = {
314 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
315 };
316
317 /*
318 * The following variables are used for configuring various features
319 */
320 #define ERI_DESC_HANDLE_ALLOC 0x0001
321 #define ERI_DESC_MEM_ALLOC 0x0002
322 #define ERI_DESC_MEM_MAP 0x0004
323 #define ERI_RCV_HANDLE_ALLOC 0x0020
324 #define ERI_RCV_HANDLE_BIND 0x0040
325 #define ERI_XMIT_DVMA_ALLOC 0x0100
326 #define ERI_RCV_DVMA_ALLOC 0x0200
327 #define ERI_XBUFS_HANDLE_ALLOC 0x0400
328 #define ERI_XBUFS_KMEM_ALLOC 0x0800
329 #define ERI_XBUFS_KMEM_DMABIND 0x1000
330
331
332 #define ERI_DONT_STRIP_CRC
333 /*
334 * Translate a kernel virtual address to i/o address.
335 */
336 #define ERI_IOPBIOADDR(erip, a) \
337 ((erip)->iopbiobase + ((uintptr_t)a - (erip)->iopbkbase))
338
339 /*
340 * ERI Configuration Register Value
341 * Used to configure parameters that define DMA burst
342 * and internal arbitration behavior.
343 * for equal TX and RX bursts, set the following in global
344 * configuration register.
345 * static int global_config = 0x42;
346 */
347
348 /*
349 * ERI ERX Interrupt Blanking Time
350 * Each count is about 16 us (2048 clocks) for 66 MHz PCI.
351 */
352 volatile int intr_blank_time = 6; /* for about 96 us */
353 volatile int intr_blank_packets = 8; /* */
354
355 /*
356 * ERX PAUSE Threshold Register value
357 * The following value is for an OFF Threshold of about 15.5 Kbytes
358 * and an ON Threshold of 4K bytes.
359 */
360 static int rx_pause_threshold = 0xf8 | (0x40 << 12);
361 static int eri_reinit_fatal = 0;
362 #ifdef DEBUG
363 static int noteri = 0;
364 #endif
365
366 #ifdef ERI_TX_HUNG
367 static int eri_reinit_txhung = 0;
368 #endif
369
370 #ifdef ERI_HDX_BUG_WORKAROUND
371 /*
372 * By default enable padding in hdx mode to 97 bytes.
373 * To disabled, in /etc/system:
374 * set eri:eri_hdx_pad_enable=0
375 */
376 static uchar_t eri_hdx_pad_enable = 1;
377 #endif
378
379 /*
380 * Default values to initialize the cache line size and latency timer
381 * registers in the PCI configuration space.
382 * ERI_G_CACHE_LINE_SIZE_16 is defined as 16 since RIO expects in units
383 * of 4 bytes.
384 */
385 #ifdef ERI_PM_WORKAROUND_PCI
386 static int eri_pci_cache_line = ERI_G_CACHE_LINE_SIZE_32; /* 128 bytes */
387 static int eri_pci_latency_timer = 0xff; /* 255 PCI cycles */
388 #else
389 static int eri_pci_cache_line = ERI_G_CACHE_LINE_SIZE_16; /* 64 bytes */
390 static int eri_pci_latency_timer = 0x40; /* 64 PCI cycles */
391 #endif
392 #define ERI_CACHE_LINE_SIZE (eri_pci_cache_line << ERI_G_CACHE_BIT)
393
394 /*
395 * Claim the device is ultra-capable of burst in the beginning. Use
396 * the value returned by ddi_dma_burstsizes() to actually set the ERI
397 * global configuration register later.
398 *
399 * PCI_ERI supports Infinite burst or 64-byte-multiple bursts.
400 */
401 #define ERI_LIMADDRLO ((uint64_t)0x00000000)
402 #define ERI_LIMADDRHI ((uint64_t)0xffffffff)
403
404 static ddi_dma_attr_t dma_attr = {
405 DMA_ATTR_V0, /* version number. */
406 (uint64_t)ERI_LIMADDRLO, /* low address */
407 (uint64_t)ERI_LIMADDRHI, /* high address */
408 (uint64_t)0x00ffffff, /* address counter max */
409 (uint64_t)1, /* alignment */
410 (uint_t)0xe000e0, /* dlim_burstsizes for 32 4 bit xfers */
411 (uint32_t)0x1, /* minimum transfer size */
412 (uint64_t)0x7fffffff, /* maximum transfer size */
413 (uint64_t)0x00ffffff, /* maximum segment size */
414 1, /* scatter/gather list length */
415 (uint32_t)1, /* granularity */
416 (uint_t)0 /* attribute flags */
417 };
418
419 static ddi_dma_attr_t desc_dma_attr = {
420 DMA_ATTR_V0, /* version number. */
421 (uint64_t)ERI_LIMADDRLO, /* low address */
422 (uint64_t)ERI_LIMADDRHI, /* high address */
423 (uint64_t)0x00ffffff, /* address counter max */
424 (uint64_t)8, /* alignment */
425 (uint_t)0xe000e0, /* dlim_burstsizes for 32 4 bit xfers */
426 (uint32_t)0x1, /* minimum transfer size */
427 (uint64_t)0x7fffffff, /* maximum transfer size */
428 (uint64_t)0x00ffffff, /* maximum segment size */
429 1, /* scatter/gather list length */
430 16, /* granularity */
431 0 /* attribute flags */
432 };
433
434 static ddi_device_acc_attr_t buf_attr = {
435 DDI_DEVICE_ATTR_V0, /* devacc_attr_version */
436 DDI_NEVERSWAP_ACC, /* devacc_attr_endian_flags */
437 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */
438 DDI_DEFAULT_ACC, /* devacc_attr_access */
439 };
440
441 ddi_dma_lim_t eri_dma_limits = {
442 (uint64_t)ERI_LIMADDRLO, /* dlim_addr_lo */
443 (uint64_t)ERI_LIMADDRHI, /* dlim_addr_hi */
444 (uint64_t)ERI_LIMADDRHI, /* dlim_cntr_max */
445 (uint_t)0x00e000e0, /* dlim_burstsizes for 32 and 64 bit xfers */
446 (uint32_t)0x1, /* dlim_minxfer */
447 1024 /* dlim_speed */
448 };
449
450 /*
451 * Link Configuration variables
452 *
453 * On Motherboard implementations, 10/100 Mbps speeds may be supported
454 * by using both the Serial Link and the MII on Non-serial-link interface.
455 * When both links are present, the driver automatically tries to bring up
456 * both. If both are up, the Gigabit Serial Link is selected for use, by
457 * default. The following configuration variable is used to force the selection
458 * of one of the links when both are up.
459 * To change the default selection to the MII link when both the Serial
460 * Link and the MII link are up, change eri_default_link to 1.
461 *
462 * Once a link is in use, the driver will continue to use that link till it
463 * goes down. When it goes down, the driver will look at the status of both the
464 * links again for link selection.
465 *
466 * Currently the standard is not stable w.r.t. gigabit link configuration
467 * using auto-negotiation procedures. Meanwhile, the link may be configured
468 * in "forced" mode using the "autonegotiation enable" bit (bit-12) in the
469 * PCS MII Command Register. In this mode the PCS sends "idles" until sees
470 * "idles" as initialization instead of the Link Configuration protocol
471 * where a Config register is exchanged. In this mode, the ERI is programmed
472 * for full-duplex operation with both pauseTX and pauseRX (for flow control)
473 * enabled.
474 */
475
476 static int select_link = 0; /* automatic selection */
477 static int default_link = 0; /* Select Serial link if both are up */
478
479 /*
480 * The following variables are used for configuring link-operation
481 * for all the "eri" interfaces in the system.
482 * Later these parameters may be changed per interface using "ndd" command
483 * These parameters may also be specified as properties using the .conf
484 * file mechanism for each interface.
485 */
486
487 /*
488 * The following variable value will be overridden by "link-pulse-disabled"
489 * property which may be created by OBP or eri.conf file. This property is
490 * applicable only for 10 Mbps links.
491 */
492 static int link_pulse_disabled = 0; /* link pulse disabled */
493
494 /* For MII-based FastEthernet links */
495 static int adv_autoneg_cap = 1;
496 static int adv_100T4_cap = 0;
497 static int adv_100fdx_cap = 1;
498 static int adv_100hdx_cap = 1;
499 static int adv_10fdx_cap = 1;
500 static int adv_10hdx_cap = 1;
501 static int adv_pauseTX_cap = 0;
502 static int adv_pauseRX_cap = 0;
503
504 /*
505 * The following gap parameters are in terms of byte times.
506 */
507 static int ipg0 = 8;
508 static int ipg1 = 8;
509 static int ipg2 = 4;
510
511 static int lance_mode = 1; /* to enable LANCE mode */
512 static int mifpoll_enable = 0; /* to enable mif poll */
513 static int ngu_enable = 0; /* to enable Never Give Up mode */
514
515 static int eri_force_mlf = 0; /* to enable mif poll */
516 static int eri_phy_mintrans = 1; /* Lu3X31T mintrans algorithm */
517 /*
518 * For the MII interface, the External Transceiver is selected when present.
519 * The following variable is used to select the Internal Transceiver even
520 * when the External Transceiver is present.
521 */
522 static int use_int_xcvr = 0;
523 static int pace_size = 0; /* Do not use pacing for now */
524
525 static int eri_use_dvma_rx = 0; /* =1:use dvma */
526 static int eri_rx_bcopy_max = RX_BCOPY_MAX; /* =1:use bcopy() */
527 static int eri_overflow_reset = 1; /* global reset if rx_fifo_overflow */
528 static int eri_tx_ring_size = 2048; /* number of entries in tx ring */
529 static int eri_rx_ring_size = 1024; /* number of entries in rx ring */
530 /*
531 * The following parameters may be configured by the user. If they are not
532 * configured by the user, the values will be based on the capabilities of
533 * the transceiver.
534 * The value "ERI_NOTUSR" is ORed with the parameter value to indicate values
535 * which are NOT configured by the user.
536 */
537
538 #define ERI_NOTUSR 0x0f000000
539 #define ERI_MASK_1BIT 0x1
540 #define ERI_MASK_2BIT 0x3
541 #define ERI_MASK_8BIT 0xff
542
543
544 /*
545 * Note:
546 * ERI has all of the above capabilities.
547 * Only when an External Transceiver is selected for MII-based FastEthernet
548 * link operation, the capabilities depend upon the capabilities of the
549 * External Transceiver.
550 */
551
552 /* ------------------------------------------------------------------------- */
553
554 static param_t param_arr[] = {
555 /* min max value r/w/hidden+name */
556 { 0, 2, 2, "-transceiver_inuse"},
557 { 0, 1, 0, "-link_status"},
558 { 0, 1, 0, "-link_speed"},
559 { 0, 1, 0, "-link_mode"},
560 { 0, 255, 8, "+ipg1"},
561 { 0, 255, 4, "+ipg2"},
562 { 0, 1, 0, "+use_int_xcvr"},
563 { 0, 255, 0, "+pace_size"},
564 { 0, 1, 1, "+adv_autoneg_cap"},
565 { 0, 1, 1, "+adv_100T4_cap"},
566 { 0, 1, 1, "+adv_100fdx_cap"},
567 { 0, 1, 1, "+adv_100hdx_cap"},
568 { 0, 1, 1, "+adv_10fdx_cap"},
569 { 0, 1, 1, "+adv_10hdx_cap"},
570 { 0, 1, 1, "-autoneg_cap"},
571 { 0, 1, 1, "-100T4_cap"},
572 { 0, 1, 1, "-100fdx_cap"},
573 { 0, 1, 1, "-100hdx_cap"},
574 { 0, 1, 1, "-10fdx_cap"},
575 { 0, 1, 1, "-10hdx_cap"},
576 { 0, 1, 0, "-lp_autoneg_cap"},
577 { 0, 1, 0, "-lp_100T4_cap"},
578 { 0, 1, 0, "-lp_100fdx_cap"},
579 { 0, 1, 0, "-lp_100hdx_cap"},
580 { 0, 1, 0, "-lp_10fdx_cap"},
581 { 0, 1, 0, "-lp_10hdx_cap"},
582 { 0, 1, 1, "+lance_mode"},
583 { 0, 31, 8, "+ipg0"},
584 { 0, 127, 6, "+intr_blank_time"},
585 { 0, 255, 8, "+intr_blank_packets"},
586 { 0, 1, 1, "!serial-link"},
587 { 0, 2, 1, "!non-serial-link"},
588 { 0, 1, 0, "%select-link"},
589 { 0, 1, 0, "%default-link"},
590 { 0, 2, 0, "!link-in-use"},
591 { 0, 1, 1, "%adv_asm_dir_cap"},
592 { 0, 1, 1, "%adv_pause_cap"},
593 { 0, 1, 0, "!asm_dir_cap"},
594 { 0, 1, 0, "!pause_cap"},
595 { 0, 1, 0, "!lp_asm_dir_cap"},
596 { 0, 1, 0, "!lp_pause_cap"},
597 };
598
599 DDI_DEFINE_STREAM_OPS(eri_dev_ops, nulldev, nulldev, eri_attach, eri_detach,
600 nodev, NULL, D_MP, NULL, ddi_quiesce_not_supported);
601
602 /*
603 * This is the loadable module wrapper.
604 */
605 #include <sys/modctl.h>
606
607 /*
608 * Module linkage information for the kernel.
609 */
610 static struct modldrv modldrv = {
611 &mod_driverops, /* Type of module. This one is a driver */
612 "Sun RIO 10/100 Mb Ethernet",
613 &eri_dev_ops, /* driver ops */
614 };
615
616 static struct modlinkage modlinkage = {
617 MODREV_1, &modldrv, NULL
618 };
619
620 /*
621 * Hardware Independent Functions
622 * New Section
623 */
624
625 int
626 _init(void)
627 {
628 int status;
629
630 mac_init_ops(&eri_dev_ops, "eri");
631 if ((status = mod_install(&modlinkage)) != 0) {
632 mac_fini_ops(&eri_dev_ops);
633 }
634 return (status);
635 }
636
637 int
638 _fini(void)
639 {
640 int status;
641
642 status = mod_remove(&modlinkage);
643 if (status == 0) {
644 mac_fini_ops(&eri_dev_ops);
645 }
646 return (status);
647 }
648
649 int
650 _info(struct modinfo *modinfop)
651 {
652 return (mod_info(&modlinkage, modinfop));
653 }
654
655
656 /*
657 * Interface exists: make available by filling in network interface
658 * record. System will initialize the interface when it is ready
659 * to accept packets.
660 */
661 static int
662 eri_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
663 {
664 struct eri *erip = NULL;
665 mac_register_t *macp = NULL;
666 int regno;
667 boolean_t doinit;
668 boolean_t mutex_inited = B_FALSE;
669 boolean_t intr_add = B_FALSE;
670
671 switch (cmd) {
672 case DDI_ATTACH:
673 break;
674
675 case DDI_RESUME:
676 if ((erip = ddi_get_driver_private(dip)) == NULL)
677 return (DDI_FAILURE);
678
679 mutex_enter(&erip->intrlock);
680 erip->flags &= ~ERI_SUSPENDED;
681 erip->init_macregs = 1;
682 param_linkup = 0;
683 erip->stats.link_up = LINK_STATE_DOWN;
684 erip->linkcheck = 0;
685
686 doinit = (erip->flags & ERI_STARTED) ? B_TRUE : B_FALSE;
687 mutex_exit(&erip->intrlock);
688
689 if (doinit && !eri_init(erip)) {
690 return (DDI_FAILURE);
691 }
692 return (DDI_SUCCESS);
693
694 default:
695 return (DDI_FAILURE);
696 }
697
698 /*
699 * Allocate soft device data structure
700 */
701 erip = kmem_zalloc(sizeof (struct eri), KM_SLEEP);
702
703 /*
704 * Initialize as many elements as possible.
705 */
706 ddi_set_driver_private(dip, erip);
707 erip->dip = dip; /* dip */
708 erip->instance = ddi_get_instance(dip); /* instance */
709 erip->flags = 0;
710 erip->multi_refcnt = 0;
711 erip->promisc = B_FALSE;
712
713 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
714 ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG,
715 "mac_alloc failed");
716 goto attach_fail;
717 }
718 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
719 macp->m_driver = erip;
720 macp->m_dip = dip;
721 macp->m_src_addr = erip->ouraddr;
722 macp->m_callbacks = &eri_m_callbacks;
723 macp->m_min_sdu = 0;
724 macp->m_max_sdu = ETHERMTU;
725 macp->m_margin = VLAN_TAGSZ;
726
727 /*
728 * Map in the device registers.
729 * Separate pointers will be set up for the following
730 * register groups within the GEM Register Space:
731 * Global register set
732 * ETX register set
733 * ERX register set
734 * BigMAC register set.
735 * MIF register set
736 */
737
738 if (ddi_dev_nregs(dip, ®no) != (DDI_SUCCESS)) {
739 ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
740 "ddi_dev_nregs failed, returned %d", regno);
741 goto attach_fail;
742 }
743
744 /*
745 * Map the PCI config space
746 */
747 if (pci_config_setup(dip, &erip->pci_config_handle) != DDI_SUCCESS) {
748 ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
749 "%s pci_config_setup()", config_space_fatal_msg);
750 goto attach_fail;
751 }
752
753 /*
754 * Initialize device attributes structure
755 */
756 erip->dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
757 erip->dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
758 erip->dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
759
760 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&(erip->globregp), 0, 0,
761 &erip->dev_attr, &erip->globregh)) {
762 goto attach_fail;
763 }
764 erip->etxregh = erip->globregh;
765 erip->erxregh = erip->globregh;
766 erip->bmacregh = erip->globregh;
767 erip->mifregh = erip->globregh;
768
769 erip->etxregp = (void *)(((caddr_t)erip->globregp) + 0x2000);
770 erip->erxregp = (void *)(((caddr_t)erip->globregp) + 0x4000);
771 erip->bmacregp = (void *)(((caddr_t)erip->globregp) + 0x6000);
772 erip->mifregp = (void *)(((caddr_t)erip->globregp) + 0x6200);
773
774 /*
775 * Map the software reset register.
776 */
777 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&(erip->sw_reset_reg),
778 0x1010, 4, &erip->dev_attr, &erip->sw_reset_regh)) {
779 ERI_FAULT_MSG1(erip, SEVERITY_MID, ERI_VERB_MSG,
780 mregs_4soft_reset_fail_msg);
781 goto attach_fail;
782 }
783
784 /*
785 * Try and stop the device.
786 * This is done until we want to handle interrupts.
787 */
788 if (eri_stop(erip))
789 goto attach_fail;
790
791 /*
792 * set PCI latency timer register.
793 */
794 pci_config_put8(erip->pci_config_handle, PCI_CONF_LATENCY_TIMER,
795 (uchar_t)eri_pci_latency_timer);
796
797 if (ddi_intr_hilevel(dip, 0)) {
798 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
799 " high-level interrupts are not supported");
800 goto attach_fail;
801 }
802
803 /*
804 * Get the interrupt cookie so the mutexes can be
805 * Initialized.
806 */
807 if (ddi_get_iblock_cookie(dip, 0, &erip->cookie) != DDI_SUCCESS)
808 goto attach_fail;
809
810 /*
811 * Initialize mutex's for this device.
812 */
813 mutex_init(&erip->xmitlock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
814 mutex_init(&erip->intrlock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
815 mutex_init(&erip->linklock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
816 mutex_init(&erip->xcvrlock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
817
818 mutex_inited = B_TRUE;
819
820 /*
821 * Add interrupt to system
822 */
823 if (ddi_add_intr(dip, 0, &erip->cookie, 0, eri_intr, (caddr_t)erip) ==
824 DDI_SUCCESS)
825 intr_add = B_TRUE;
826 else {
827 goto attach_fail;
828 }
829
830 /*
831 * Set up the ethernet mac address.
832 */
833 (void) eri_setup_mac_address(erip, dip);
834
835 if (eri_init_xfer_params(erip))
836 goto attach_fail;
837
838 if (eri_burstsize(erip) == DDI_FAILURE) {
839 goto attach_fail;
840 }
841
842 /*
843 * Setup fewer receive bufers.
844 */
845 ERI_RPENDING = eri_rx_ring_size;
846 ERI_TPENDING = eri_tx_ring_size;
847
848 erip->rpending_mask = ERI_RPENDING - 1;
849 erip->rmdmax_mask = ERI_RPENDING - 1;
850 erip->mif_config = (ERI_PHY_BMSR << ERI_MIF_CFGPR_SHIFT);
851
852 erip->stats.pmcap = ERI_PMCAP_NONE;
853 if (pci_report_pmcap(dip, PCI_PM_IDLESPEED, (void *)4000) ==
854 DDI_SUCCESS)
855 erip->stats.pmcap = ERI_PMCAP_4MHZ;
856
857 if (mac_register(macp, &erip->mh) != 0)
858 goto attach_fail;
859
860 mac_free(macp);
861
862 return (DDI_SUCCESS);
863
864 attach_fail:
865 if (erip->pci_config_handle)
866 (void) pci_config_teardown(&erip->pci_config_handle);
867
868 if (mutex_inited) {
869 mutex_destroy(&erip->xmitlock);
870 mutex_destroy(&erip->intrlock);
871 mutex_destroy(&erip->linklock);
872 mutex_destroy(&erip->xcvrlock);
873 }
874
875 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, attach_fail_msg);
876
877 if (intr_add)
878 ddi_remove_intr(dip, 0, erip->cookie);
879
880 if (erip->globregh)
881 ddi_regs_map_free(&erip->globregh);
882
883 if (macp != NULL)
884 mac_free(macp);
885 if (erip != NULL)
886 kmem_free(erip, sizeof (*erip));
887
888 return (DDI_FAILURE);
889 }
890
891 static int
892 eri_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
893 {
894 struct eri *erip;
895 int i;
896
897 if ((erip = ddi_get_driver_private(dip)) == NULL) {
898 /*
899 * No resources allocated.
900 */
901 return (DDI_FAILURE);
902 }
903
904 switch (cmd) {
905 case DDI_DETACH:
906 break;
907
908 case DDI_SUSPEND:
909 erip->flags |= ERI_SUSPENDED;
910 eri_uninit(erip);
911 return (DDI_SUCCESS);
912
913 default:
914 return (DDI_FAILURE);
915 }
916
917 if (erip->flags & (ERI_RUNNING | ERI_SUSPENDED)) {
918 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, busy_msg);
919 return (DDI_FAILURE);
920 }
921
922 if (mac_unregister(erip->mh) != 0) {
923 return (DDI_FAILURE);
924 }
925
926 /*
927 * Make the device quiescent
928 */
929 (void) eri_stop(erip);
930
931 /*
932 * Remove instance of the intr
933 */
934 ddi_remove_intr(dip, 0, erip->cookie);
935
936 if (erip->pci_config_handle)
937 (void) pci_config_teardown(&erip->pci_config_handle);
938
939 /*
940 * Destroy all mutexes and data structures allocated during
941 * attach time.
942 */
943
944 if (erip->globregh)
945 ddi_regs_map_free(&erip->globregh);
946
947 erip->etxregh = NULL;
948 erip->erxregh = NULL;
949 erip->bmacregh = NULL;
950 erip->mifregh = NULL;
951 erip->globregh = NULL;
952
953 if (erip->sw_reset_regh)
954 ddi_regs_map_free(&erip->sw_reset_regh);
955
956 if (erip->ksp)
957 kstat_delete(erip->ksp);
958
959 eri_stop_timer(erip); /* acquire linklock */
960 eri_start_timer(erip, eri_check_link, 0);
961 mutex_destroy(&erip->xmitlock);
962 mutex_destroy(&erip->intrlock);
963 mutex_destroy(&erip->linklock);
964 mutex_destroy(&erip->xcvrlock);
965
966 if (erip->md_h) {
967 if (ddi_dma_unbind_handle(erip->md_h) ==
968 DDI_FAILURE)
969 return (DDI_FAILURE);
970 ddi_dma_mem_free(&erip->mdm_h);
971 ddi_dma_free_handle(&erip->md_h);
972 }
973
974 if (eri_freebufs(erip))
975 return (DDI_FAILURE);
976
977 /* dvma handle case */
978
979 if (erip->eri_dvmarh) {
980 (void) dvma_release(erip->eri_dvmarh);
981 erip->eri_dvmarh = NULL;
982 }
983 /*
984 * xmit_dma_mode, erip->ndmaxh[i]=NULL for dvma
985 */
986 else {
987 for (i = 0; i < ERI_RPENDING; i++)
988 if (erip->ndmarh[i])
989 ddi_dma_free_handle(&erip->ndmarh[i]);
990 }
991 /*
992 * Release TX buffer
993 */
994 if (erip->tbuf_ioaddr != 0) {
995 (void) ddi_dma_unbind_handle(erip->tbuf_handle);
996 erip->tbuf_ioaddr = 0;
997 }
998 if (erip->tbuf_kaddr != NULL) {
999 ddi_dma_mem_free(&erip->tbuf_acch);
1000 erip->tbuf_kaddr = NULL;
1001 }
1002 if (erip->tbuf_handle != NULL) {
1003 ddi_dma_free_handle(&erip->tbuf_handle);
1004 erip->tbuf_handle = NULL;
1005 }
1006
1007 eri_param_cleanup(erip);
1008
1009 ddi_set_driver_private(dip, NULL);
1010 kmem_free((caddr_t)erip, sizeof (struct eri));
1011
1012 return (DDI_SUCCESS);
1013 }
1014
1015 /*
1016 * To set up the mac address for the network interface:
1017 * The adapter card may support a local mac address which is published
1018 * in a device node property "local-mac-address". This mac address is
1019 * treated as the factory-installed mac address for DLPI interface.
1020 * If the adapter firmware has used the device for diskless boot
1021 * operation it publishes a property called "mac-address" for use by
1022 * inetboot and the device driver.
1023 * If "mac-address" is not found, the system options property
1024 * "local-mac-address" is used to select the mac-address. If this option
1025 * is set to "true", and "local-mac-address" has been found, then
1026 * local-mac-address is used; otherwise the system mac address is used
1027 * by calling the "localetheraddr()" function.
1028 */
1029
1030 static void
1031 eri_setup_mac_address(struct eri *erip, dev_info_t *dip)
1032 {
1033 uchar_t *prop;
1034 char *uselocal;
1035 unsigned prop_len;
1036 uint32_t addrflags = 0;
1037 struct ether_addr factaddr;
1038
1039 /*
1040 * Check if it is an adapter with its own local mac address
1041 * If it is present, save it as the "factory-address"
1042 * for this adapter.
1043 */
1044 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1045 "local-mac-address", &prop, &prop_len) == DDI_PROP_SUCCESS) {
1046 if (prop_len == ETHERADDRL) {
1047 addrflags = ERI_FACTADDR_PRESENT;
1048 bcopy(prop, &factaddr, ETHERADDRL);
1049 ERI_FAULT_MSG2(erip, SEVERITY_NONE, ERI_VERB_MSG,
1050 lether_addr_msg, ether_sprintf(&factaddr));
1051 }
1052 ddi_prop_free(prop);
1053 }
1054 /*
1055 * Check if the adapter has published "mac-address" property.
1056 * If it is present, use it as the mac address for this device.
1057 */
1058 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1059 "mac-address", &prop, &prop_len) == DDI_PROP_SUCCESS) {
1060 if (prop_len >= ETHERADDRL) {
1061 bcopy(prop, erip->ouraddr, ETHERADDRL);
1062 ddi_prop_free(prop);
1063 return;
1064 }
1065 ddi_prop_free(prop);
1066 }
1067
1068 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, "local-mac-address?",
1069 &uselocal) == DDI_PROP_SUCCESS) {
1070 if ((strcmp("true", uselocal) == 0) &&
1071 (addrflags & ERI_FACTADDR_PRESENT)) {
1072 addrflags |= ERI_FACTADDR_USE;
1073 bcopy(&factaddr, erip->ouraddr, ETHERADDRL);
1074 ddi_prop_free(uselocal);
1075 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
1076 lmac_addr_msg);
1077 return;
1078 }
1079 ddi_prop_free(uselocal);
1080 }
1081
1082 /*
1083 * Get the system ethernet address.
1084 */
1085 (void) localetheraddr(NULL, &factaddr);
1086 bcopy(&factaddr, erip->ouraddr, ETHERADDRL);
1087 }
1088
1089
1090 /*
1091 * Calculate the bit in the multicast address filter that selects the given
1092 * address.
1093 * Note: For ERI, the last 8-bits are used.
1094 */
1095
1096 static uint32_t
1097 eri_ladrf_bit(const uint8_t *addr)
1098 {
1099 uint32_t crc;
1100
1101 CRC32(crc, addr, ETHERADDRL, -1U, crc32_table);
1102
1103 /*
1104 * Just want the 8 most significant bits.
1105 */
1106 return ((~crc) >> 24);
1107 }
1108
1109 static void
1110 eri_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1111 {
1112 struct eri *erip = arg;
1113 struct iocblk *iocp = (void *)mp->b_rptr;
1114 int err;
1115
1116 ASSERT(erip != NULL);
1117
1118 /*
1119 * Privilege checks.
1120 */
1121 switch (iocp->ioc_cmd) {
1122 case ERI_SET_LOOP_MODE:
1123 case ERI_ND_SET:
1124 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1125 if (err != 0) {
1126 miocnak(wq, mp, 0, err);
1127 return;
1128 }
1129 break;
1130 default:
1131 break;
1132 }
1133
1134 switch (iocp->ioc_cmd) {
1135 case ERI_ND_GET:
1136 case ERI_ND_SET:
1137 eri_process_ndd_ioctl(erip, wq, mp, iocp->ioc_cmd);
1138 break;
1139
1140 case ERI_SET_LOOP_MODE:
1141 case ERI_GET_LOOP_MODE:
1142 /*
1143 * XXX: Consider updating this to the new netlb ioctls.
1144 */
1145 eri_loopback(erip, wq, mp);
1146 break;
1147
1148 default:
1149 miocnak(wq, mp, 0, EINVAL);
1150 break;
1151 }
1152
1153 ASSERT(!MUTEX_HELD(&erip->linklock));
1154 }
1155
1156 static void
1157 eri_loopback(struct eri *erip, queue_t *wq, mblk_t *mp)
1158 {
1159 struct iocblk *iocp = (void *)mp->b_rptr;
1160 loopback_t *al;
1161
1162 if (mp->b_cont == NULL || MBLKL(mp->b_cont) < sizeof (loopback_t)) {
1163 miocnak(wq, mp, 0, EINVAL);
1164 return;
1165 }
1166
1167 al = (void *)mp->b_cont->b_rptr;
1168
1169 switch (iocp->ioc_cmd) {
1170 case ERI_SET_LOOP_MODE:
1171 switch (al->loopback) {
1172 case ERI_LOOPBACK_OFF:
1173 erip->flags &= (~ERI_MACLOOPBACK & ~ERI_SERLOOPBACK);
1174 /* force link status to go down */
1175 param_linkup = 0;
1176 erip->stats.link_up = LINK_STATE_DOWN;
1177 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1178 (void) eri_init(erip);
1179 break;
1180
1181 case ERI_MAC_LOOPBACK_ON:
1182 erip->flags |= ERI_MACLOOPBACK;
1183 erip->flags &= ~ERI_SERLOOPBACK;
1184 param_linkup = 0;
1185 erip->stats.link_up = LINK_STATE_DOWN;
1186 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1187 (void) eri_init(erip);
1188 break;
1189
1190 case ERI_PCS_LOOPBACK_ON:
1191 break;
1192
1193 case ERI_SER_LOOPBACK_ON:
1194 erip->flags |= ERI_SERLOOPBACK;
1195 erip->flags &= ~ERI_MACLOOPBACK;
1196 /* force link status to go down */
1197 param_linkup = 0;
1198 erip->stats.link_up = LINK_STATE_DOWN;
1199 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1200 (void) eri_init(erip);
1201 break;
1202
1203 default:
1204 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
1205 loopback_val_default);
1206 miocnak(wq, mp, 0, EINVAL);
1207 return;
1208 }
1209 miocnak(wq, mp, 0, 0);
1210 break;
1211
1212 case ERI_GET_LOOP_MODE:
1213 al->loopback = ERI_MAC_LOOPBACK_ON | ERI_PCS_LOOPBACK_ON |
1214 ERI_SER_LOOPBACK_ON;
1215 miocack(wq, mp, sizeof (loopback_t), 0);
1216 break;
1217
1218 default:
1219 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1220 loopback_cmd_default);
1221 }
1222 }
1223
1224 static int
1225 eri_m_promisc(void *arg, boolean_t on)
1226 {
1227 struct eri *erip = arg;
1228
1229 mutex_enter(&erip->intrlock);
1230 erip->promisc = on;
1231 eri_init_rx(erip);
1232 mutex_exit(&erip->intrlock);
1233 return (0);
1234 }
1235
1236 /*
1237 * This is to support unlimited number of members
1238 * in Multicast.
1239 */
1240 static int
1241 eri_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1242 {
1243 struct eri *erip = arg;
1244 uint32_t ladrf_bit;
1245
1246 /*
1247 * If this address's bit was not already set in the local address
1248 * filter, add it and re-initialize the Hardware.
1249 */
1250 ladrf_bit = eri_ladrf_bit(mca);
1251
1252 mutex_enter(&erip->intrlock);
1253 if (add) {
1254 erip->ladrf_refcnt[ladrf_bit]++;
1255 if (erip->ladrf_refcnt[ladrf_bit] == 1) {
1256 LADRF_SET(erip, ladrf_bit);
1257 erip->multi_refcnt++;
1258 eri_init_rx(erip);
1259 }
1260 } else {
1261 erip->ladrf_refcnt[ladrf_bit]--;
1262 if (erip->ladrf_refcnt[ladrf_bit] == 0) {
1263 LADRF_CLR(erip, ladrf_bit);
1264 erip->multi_refcnt--;
1265 eri_init_rx(erip);
1266 }
1267 }
1268 mutex_exit(&erip->intrlock);
1269 return (0);
1270 }
1271
1272 static int
1273 eri_m_unicst(void *arg, const uint8_t *macaddr)
1274 {
1275 struct eri *erip = arg;
1276
1277 /*
1278 * Set new interface local address and re-init device.
1279 * This is destructive to any other streams attached
1280 * to this device.
1281 */
1282 mutex_enter(&erip->intrlock);
1283 bcopy(macaddr, &erip->ouraddr, ETHERADDRL);
1284 eri_init_rx(erip);
1285 mutex_exit(&erip->intrlock);
1286 return (0);
1287 }
1288
1289 /*ARGSUSED*/
1290 static boolean_t
1291 eri_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1292 {
1293 switch (cap) {
1294 case MAC_CAPAB_HCKSUM: {
1295 uint32_t *hcksum_txflags = cap_data;
1296 *hcksum_txflags = HCKSUM_INET_PARTIAL;
1297 return (B_TRUE);
1298 }
1299 default:
1300 return (B_FALSE);
1301 }
1302 }
1303
1304 static int
1305 eri_m_start(void *arg)
1306 {
1307 struct eri *erip = arg;
1308
1309 mutex_enter(&erip->intrlock);
1310 erip->flags |= ERI_STARTED;
1311 mutex_exit(&erip->intrlock);
1312
1313 if (!eri_init(erip)) {
1314 mutex_enter(&erip->intrlock);
1315 erip->flags &= ~ERI_STARTED;
1316 mutex_exit(&erip->intrlock);
1317 return (EIO);
1318 }
1319 return (0);
1320 }
1321
1322 static void
1323 eri_m_stop(void *arg)
1324 {
1325 struct eri *erip = arg;
1326
1327 mutex_enter(&erip->intrlock);
1328 erip->flags &= ~ERI_STARTED;
1329 mutex_exit(&erip->intrlock);
1330 eri_uninit(erip);
1331 }
1332
1333 static int
1334 eri_m_stat(void *arg, uint_t stat, uint64_t *val)
1335 {
1336 struct eri *erip = arg;
1337 struct stats *esp;
1338 boolean_t macupdate = B_FALSE;
1339
1340 esp = &erip->stats;
1341
1342 mutex_enter(&erip->xmitlock);
1343 if ((erip->flags & ERI_RUNNING) && (erip->flags & ERI_TXINIT)) {
1344 erip->tx_completion =
1345 GET_ETXREG(tx_completion) & ETX_COMPLETION_MASK;
1346 macupdate |= eri_reclaim(erip, erip->tx_completion);
1347 }
1348 mutex_exit(&erip->xmitlock);
1349 if (macupdate)
1350 mac_tx_update(erip->mh);
1351
1352 eri_savecntrs(erip);
1353
1354 switch (stat) {
1355 case MAC_STAT_IFSPEED:
1356 *val = esp->ifspeed * 1000000ULL;
1357 break;
1358 case MAC_STAT_MULTIRCV:
1359 *val = esp->multircv;
1360 break;
1361 case MAC_STAT_BRDCSTRCV:
1362 *val = esp->brdcstrcv;
1363 break;
1364 case MAC_STAT_IPACKETS:
1365 *val = esp->ipackets64;
1366 break;
1367 case MAC_STAT_RBYTES:
1368 *val = esp->rbytes64;
1369 break;
1370 case MAC_STAT_OBYTES:
1371 *val = esp->obytes64;
1372 break;
1373 case MAC_STAT_OPACKETS:
1374 *val = esp->opackets64;
1375 break;
1376 case MAC_STAT_IERRORS:
1377 *val = esp->ierrors;
1378 break;
1379 case MAC_STAT_OERRORS:
1380 *val = esp->oerrors;
1381 break;
1382 case MAC_STAT_MULTIXMT:
1383 *val = esp->multixmt;
1384 break;
1385 case MAC_STAT_BRDCSTXMT:
1386 *val = esp->brdcstxmt;
1387 break;
1388 case MAC_STAT_NORCVBUF:
1389 *val = esp->norcvbuf;
1390 break;
1391 case MAC_STAT_NOXMTBUF:
1392 *val = esp->noxmtbuf;
1393 break;
1394 case MAC_STAT_UNDERFLOWS:
1395 *val = esp->txmac_urun;
1396 break;
1397 case MAC_STAT_OVERFLOWS:
1398 *val = esp->rx_overflow;
1399 break;
1400 case MAC_STAT_COLLISIONS:
1401 *val = esp->collisions;
1402 break;
1403 case ETHER_STAT_ALIGN_ERRORS:
1404 *val = esp->rx_align_err;
1405 break;
1406 case ETHER_STAT_FCS_ERRORS:
1407 *val = esp->rx_crc_err;
1408 break;
1409 case ETHER_STAT_EX_COLLISIONS:
1410 *val = esp->excessive_coll;
1411 break;
1412 case ETHER_STAT_TX_LATE_COLLISIONS:
1413 *val = esp->late_coll;
1414 break;
1415 case ETHER_STAT_FIRST_COLLISIONS:
1416 *val = esp->first_coll;
1417 break;
1418 case ETHER_STAT_LINK_DUPLEX:
1419 *val = esp->link_duplex;
1420 break;
1421 case ETHER_STAT_TOOLONG_ERRORS:
1422 *val = esp->rx_toolong_pkts;
1423 break;
1424 case ETHER_STAT_TOOSHORT_ERRORS:
1425 *val = esp->rx_runt;
1426 break;
1427
1428 case ETHER_STAT_XCVR_ADDR:
1429 *val = erip->phyad;
1430 break;
1431
1432 case ETHER_STAT_XCVR_INUSE:
1433 *val = XCVR_100X; /* should always be 100X for now */
1434 break;
1435
1436 case ETHER_STAT_CAP_100FDX:
1437 *val = param_bmsr_100fdx;
1438 break;
1439 case ETHER_STAT_CAP_100HDX:
1440 *val = param_bmsr_100hdx;
1441 break;
1442 case ETHER_STAT_CAP_10FDX:
1443 *val = param_bmsr_10fdx;
1444 break;
1445 case ETHER_STAT_CAP_10HDX:
1446 *val = param_bmsr_10hdx;
1447 break;
1448 case ETHER_STAT_CAP_AUTONEG:
1449 *val = param_bmsr_ancap;
1450 break;
1451 case ETHER_STAT_CAP_ASMPAUSE:
1452 *val = param_bmsr_asm_dir;
1453 break;
1454 case ETHER_STAT_CAP_PAUSE:
1455 *val = param_bmsr_pause;
1456 break;
1457 case ETHER_STAT_ADV_CAP_100FDX:
1458 *val = param_anar_100fdx;
1459 break;
1460 case ETHER_STAT_ADV_CAP_100HDX:
1461 *val = param_anar_100hdx;
1462 break;
1463 case ETHER_STAT_ADV_CAP_10FDX:
1464 *val = param_anar_10fdx;
1465 break;
1466 case ETHER_STAT_ADV_CAP_10HDX:
1467 *val = param_anar_10hdx;
1468 break;
1469 case ETHER_STAT_ADV_CAP_AUTONEG:
1470 *val = param_autoneg;
1471 break;
1472 case ETHER_STAT_ADV_CAP_ASMPAUSE:
1473 *val = param_anar_asm_dir;
1474 break;
1475 case ETHER_STAT_ADV_CAP_PAUSE:
1476 *val = param_anar_pause;
1477 break;
1478 case ETHER_STAT_LP_CAP_100FDX:
1479 *val = param_anlpar_100fdx;
1480 break;
1481 case ETHER_STAT_LP_CAP_100HDX:
1482 *val = param_anlpar_100hdx;
1483 break;
1484 case ETHER_STAT_LP_CAP_10FDX:
1485 *val = param_anlpar_10fdx;
1486 break;
1487 case ETHER_STAT_LP_CAP_10HDX:
1488 *val = param_anlpar_10hdx;
1489 break;
1490 case ETHER_STAT_LP_CAP_AUTONEG:
1491 *val = param_aner_lpancap;
1492 break;
1493 case ETHER_STAT_LP_CAP_ASMPAUSE:
1494 *val = param_anlpar_pauseTX;
1495 break;
1496 case ETHER_STAT_LP_CAP_PAUSE:
1497 *val = param_anlpar_pauseRX;
1498 break;
1499 case ETHER_STAT_LINK_PAUSE:
1500 *val = esp->pausing;
1501 break;
1502 case ETHER_STAT_LINK_ASMPAUSE:
1503 *val = param_anar_asm_dir &&
1504 param_anlpar_pauseTX &&
1505 (param_anar_pause != param_anlpar_pauseRX);
1506 break;
1507 case ETHER_STAT_LINK_AUTONEG:
1508 *val = param_autoneg && param_aner_lpancap;
1509 break;
1510 }
1511 return (0);
1512 }
1513
1514 /*
1515 * Hardware Functions
1516 * New Section
1517 */
1518
1519 /*
1520 * Initialize the MAC registers. Some of of the MAC registers are initialized
1521 * just once since Global Reset or MAC reset doesn't clear them. Others (like
1522 * Host MAC Address Registers) are cleared on every reset and have to be
1523 * reinitialized.
1524 */
1525 static void
1526 eri_init_macregs_generic(struct eri *erip)
1527 {
1528 /*
1529 * set up the MAC parameter registers once
1530 * after power cycle. SUSPEND/RESUME also requires
1531 * setting these registers.
1532 */
1533 if ((erip->stats.inits == 1) || (erip->init_macregs)) {
1534 erip->init_macregs = 0;
1535 PUT_MACREG(ipg0, param_ipg0);
1536 PUT_MACREG(ipg1, param_ipg1);
1537 PUT_MACREG(ipg2, param_ipg2);
1538 PUT_MACREG(macmin, BMAC_MIN_FRAME_SIZE);
1539 #ifdef ERI_RX_TAG_ERROR_WORKAROUND
1540 PUT_MACREG(macmax, BMAC_MAX_FRAME_SIZE_TAG | BMAC_MAX_BURST);
1541 #else
1542 PUT_MACREG(macmax, BMAC_MAX_FRAME_SIZE | BMAC_MAX_BURST);
1543 #endif
1544 PUT_MACREG(palen, BMAC_PREAMBLE_SIZE);
1545 PUT_MACREG(jam, BMAC_JAM_SIZE);
1546 PUT_MACREG(alimit, BMAC_ATTEMPT_LIMIT);
1547 PUT_MACREG(macctl_type, BMAC_CONTROL_TYPE);
1548 PUT_MACREG(rseed,
1549 ((erip->ouraddr[0] & 0x3) << 8) | erip->ouraddr[1]);
1550
1551 PUT_MACREG(madd3, BMAC_ADDRESS_3);
1552 PUT_MACREG(madd4, BMAC_ADDRESS_4);
1553 PUT_MACREG(madd5, BMAC_ADDRESS_5);
1554
1555 /* Program MAC Control address */
1556 PUT_MACREG(madd6, BMAC_ADDRESS_6);
1557 PUT_MACREG(madd7, BMAC_ADDRESS_7);
1558 PUT_MACREG(madd8, BMAC_ADDRESS_8);
1559
1560 PUT_MACREG(afr0, BMAC_AF_0);
1561 PUT_MACREG(afr1, BMAC_AF_1);
1562 PUT_MACREG(afr2, BMAC_AF_2);
1563 PUT_MACREG(afmr1_2, BMAC_AF21_MASK);
1564 PUT_MACREG(afmr0, BMAC_AF0_MASK);
1565 }
1566
1567 /* The counters need to be zeroed */
1568 PUT_MACREG(nccnt, 0);
1569 PUT_MACREG(fccnt, 0);
1570 PUT_MACREG(excnt, 0);
1571 PUT_MACREG(ltcnt, 0);
1572 PUT_MACREG(dcnt, 0);
1573 PUT_MACREG(frcnt, 0);
1574 PUT_MACREG(lecnt, 0);
1575 PUT_MACREG(aecnt, 0);
1576 PUT_MACREG(fecnt, 0);
1577 PUT_MACREG(rxcv, 0);
1578
1579 if (erip->pauseTX)
1580 PUT_MACREG(spcmd, BMAC_SEND_PAUSE_CMD);
1581 else
1582 PUT_MACREG(spcmd, 0);
1583
1584 /*
1585 * Program BigMAC with local individual ethernet address.
1586 */
1587
1588 PUT_MACREG(madd0, (erip->ouraddr[4] << 8) | erip->ouraddr[5]);
1589 PUT_MACREG(madd1, (erip->ouraddr[2] << 8) | erip->ouraddr[3]);
1590 PUT_MACREG(madd2, (erip->ouraddr[0] << 8) | erip->ouraddr[1]);
1591
1592 /*
1593 * Install multicast address filter.
1594 */
1595
1596 PUT_MACREG(hash0, erip->ladrf[0]);
1597 PUT_MACREG(hash1, erip->ladrf[1]);
1598 PUT_MACREG(hash2, erip->ladrf[2]);
1599 PUT_MACREG(hash3, erip->ladrf[3]);
1600 PUT_MACREG(hash4, erip->ladrf[4]);
1601 PUT_MACREG(hash5, erip->ladrf[5]);
1602 PUT_MACREG(hash6, erip->ladrf[6]);
1603 PUT_MACREG(hash7, erip->ladrf[7]);
1604 PUT_MACREG(hash8, erip->ladrf[8]);
1605 PUT_MACREG(hash9, erip->ladrf[9]);
1606 PUT_MACREG(hash10, erip->ladrf[10]);
1607 PUT_MACREG(hash11, erip->ladrf[11]);
1608 PUT_MACREG(hash12, erip->ladrf[12]);
1609 PUT_MACREG(hash13, erip->ladrf[13]);
1610 PUT_MACREG(hash14, erip->ladrf[14]);
1611 }
1612
1613 static int
1614 eri_flush_rxbufs(struct eri *erip)
1615 {
1616 uint_t i;
1617 int status = 0;
1618 /*
1619 * Free and dvma_unload pending recv buffers.
1620 * Maintaining the 1-to-1 ordered sequence of
1621 * dvma_load() followed by dvma_unload() is critical.
1622 * Always unload anything before loading it again.
1623 * Never unload anything twice. Always unload
1624 * before freeing the buffer. We satisfy these
1625 * requirements by unloading only those descriptors
1626 * which currently have an mblk associated with them.
1627 */
1628 for (i = 0; i < ERI_RPENDING; i++) {
1629 if (erip->rmblkp[i]) {
1630 if (erip->eri_dvmarh)
1631 dvma_unload(erip->eri_dvmarh, 2 * i,
1632 DDI_DMA_SYNC_FORCPU);
1633 else if ((ddi_dma_unbind_handle(erip->ndmarh[i]) ==
1634 DDI_FAILURE))
1635 status = -1;
1636 freeb(erip->rmblkp[i]);
1637 erip->rmblkp[i] = NULL;
1638 }
1639 }
1640 return (status);
1641 }
1642
1643 static void
1644 eri_init_txbufs(struct eri *erip)
1645 {
1646 /*
1647 * Clear TX descriptors.
1648 */
1649 bzero((caddr_t)erip->eri_tmdp, ERI_TPENDING * sizeof (struct eri_tmd));
1650
1651 /*
1652 * sync TXDMA descriptors.
1653 */
1654 ERI_SYNCIOPB(erip, erip->eri_tmdp,
1655 (ERI_TPENDING * sizeof (struct eri_tmd)), DDI_DMA_SYNC_FORDEV);
1656 /*
1657 * Reset TMD 'walking' pointers.
1658 */
1659 erip->tcurp = erip->eri_tmdp;
1660 erip->tnextp = erip->eri_tmdp;
1661 erip->tx_cur_cnt = 0;
1662 erip->tx_kick = 0;
1663 erip->tx_completion = 0;
1664 }
1665
1666 static int
1667 eri_init_rxbufs(struct eri *erip)
1668 {
1669
1670 ddi_dma_cookie_t dma_cookie;
1671 mblk_t *bp;
1672 int i, status = 0;
1673 uint32_t ccnt;
1674
1675 /*
1676 * clear rcv descriptors
1677 */
1678 bzero((caddr_t)erip->rmdp, ERI_RPENDING * sizeof (struct rmd));
1679
1680 for (i = 0; i < ERI_RPENDING; i++) {
1681 if ((bp = eri_allocb(ERI_BUFSIZE)) == NULL) {
1682 status = -1;
1683 continue;
1684 }
1685 /* Load data buffer to DVMA space */
1686 if (erip->eri_dvmarh)
1687 dvma_kaddr_load(erip->eri_dvmarh,
1688 (caddr_t)bp->b_rptr, ERI_BUFSIZE,
1689 2 * i, &dma_cookie);
1690 /*
1691 * Bind data buffer to DMA handle
1692 */
1693 else if (ddi_dma_addr_bind_handle(erip->ndmarh[i], NULL,
1694 (caddr_t)bp->b_rptr, ERI_BUFSIZE,
1695 DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1696 &dma_cookie, &ccnt) != DDI_DMA_MAPPED)
1697 status = -1;
1698
1699 PUT_RMD((&erip->rmdp[i]), dma_cookie);
1700 erip->rmblkp[i] = bp; /* save for later use */
1701 }
1702
1703 /*
1704 * sync RXDMA descriptors.
1705 */
1706 ERI_SYNCIOPB(erip, erip->rmdp, (ERI_RPENDING * sizeof (struct rmd)),
1707 DDI_DMA_SYNC_FORDEV);
1708 /*
1709 * Reset RMD 'walking' pointers.
1710 */
1711 erip->rnextp = erip->rmdp;
1712 erip->rx_completion = 0;
1713 erip->rx_kick = ERI_RPENDING - 4;
1714 return (status);
1715 }
1716
1717 static uint32_t
1718 eri_txmac_disable(struct eri *erip)
1719 {
1720 int n;
1721
1722 PUT_MACREG(txcfg, GET_MACREG(txcfg) & ~BMAC_TXCFG_ENAB);
1723 n = (BMACTXRSTDELAY * 10) / ERI_WAITPERIOD;
1724
1725 while (--n > 0) {
1726 drv_usecwait(ERI_WAITPERIOD);
1727 if ((GET_MACREG(txcfg) & 1) == 0)
1728 return (0);
1729 }
1730 return (1);
1731 }
1732
1733 static uint32_t
1734 eri_rxmac_disable(struct eri *erip)
1735 {
1736 int n;
1737 PUT_MACREG(rxcfg, GET_MACREG(rxcfg) & ~BMAC_RXCFG_ENAB);
1738 n = BMACRXRSTDELAY / ERI_WAITPERIOD;
1739
1740 while (--n > 0) {
1741 drv_usecwait(ERI_WAITPERIOD);
1742 if ((GET_MACREG(rxcfg) & 1) == 0)
1743 return (0);
1744 }
1745 return (1);
1746 }
1747
1748 /*
1749 * Return 0 upon success, 1 on failure.
1750 */
1751 static int
1752 eri_stop(struct eri *erip)
1753 {
1754 (void) eri_erx_reset(erip);
1755 (void) eri_etx_reset(erip);
1756
1757 /*
1758 * set up cache line to 16 for 64 bytes of pci burst size
1759 */
1760 PUT_SWRSTREG(reset, ERI_G_RESET_GLOBAL | ERI_CACHE_LINE_SIZE);
1761
1762 if (erip->linkcheck) {
1763 erip->linkcheck = 0;
1764 erip->global_reset_issued = 2;
1765 } else {
1766 param_linkup = 0;
1767 erip->stats.link_up = LINK_STATE_DOWN;
1768 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1769 erip->global_reset_issued = -1;
1770 }
1771
1772 ERI_DELAY((GET_SWRSTREG(reset) == ERI_CACHE_LINE_SIZE),
1773 ERI_MAX_RST_DELAY);
1774 erip->rx_reset_issued = -1;
1775 erip->tx_reset_issued = -1;
1776
1777 /*
1778 * workaround for RIO not resetting the interrupt mask
1779 * register to default value 0xffffffff.
1780 */
1781 PUT_GLOBREG(intmask, ERI_G_MASK_ALL);
1782
1783 if (GET_SWRSTREG(reset) == ERI_CACHE_LINE_SIZE) {
1784 return (0);
1785 } else {
1786 return (1);
1787 }
1788 }
1789
1790 /*
1791 * Reset Just the RX Portion
1792 * Return 0 upon success, 1 on failure.
1793 *
1794 * Resetting the rxdma while there is a rx dma transaction going on the
1795 * bus, will cause bus hang or parity errors. To avoid this, we would first
1796 * disable the rxdma by clearing the ENABLE bit (bit 0). To make sure it is
1797 * disabled, we will poll it until it realy clears. Furthermore, to verify
1798 * any RX DMA activity is subsided, we delay for 5 msec.
1799 */
1800 static uint32_t
1801 eri_erx_reset(struct eri *erip)
1802 {
1803 (void) eri_rxmac_disable(erip); /* Disable the RX MAC */
1804
1805 /* Disable the RX DMA */
1806 PUT_ERXREG(config, GET_ERXREG(config) & ~GET_CONFIG_RXDMA_EN);
1807 ERI_DELAY(((GET_ERXREG(config) & 1) == 0), ERI_MAX_RST_DELAY);
1808 if ((GET_ERXREG(config) & 1) != 0)
1809 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1810 disable_erx_msg);
1811
1812 drv_usecwait(5000); /* Delay to insure no RX DMA activity */
1813
1814 PUT_SWRSTREG(reset, ERI_G_RESET_ERX | ERI_CACHE_LINE_SIZE);
1815 /*
1816 * Wait until the reset is completed which is indicated by
1817 * the reset bit cleared or time out..
1818 */
1819 ERI_DELAY(((GET_SWRSTREG(reset) & (ERI_G_RESET_ERX)) ==
1820 ERI_CACHE_LINE_SIZE), ERI_MAX_RST_DELAY);
1821 erip->rx_reset_issued = -1;
1822
1823 return ((GET_SWRSTREG(reset) & (ERI_G_RESET_ERX)) ? 1 : 0);
1824 }
1825
1826 /*
1827 * Reset Just the TX Portion
1828 * Return 0 upon success, 1 on failure.
1829 * Resetting the txdma while there is a tx dma transaction on the bus, may cause
1830 * bus hang or parity errors. To avoid this we would first disable the txdma by
1831 * clearing the ENABLE bit (bit 0). To make sure it is disabled, we will poll
1832 * it until it realy clears. Furthermore, to any TX DMA activity is subsided,
1833 * we delay for 1 msec.
1834 */
1835 static uint32_t
1836 eri_etx_reset(struct eri *erip)
1837 {
1838 (void) eri_txmac_disable(erip);
1839
1840 /* Disable the TX DMA */
1841 PUT_ETXREG(config, GET_ETXREG(config) & ~GET_CONFIG_TXDMA_EN);
1842 #ifdef ORIG
1843 ERI_DELAY(((GET_ETXREG(config) & 1) == 0), ERI_MAX_RST_DELAY);
1844 if ((GET_ETXREG(config) & 1) != 0)
1845 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1846 disable_etx_msg);
1847 drv_usecwait(5000); /* Delay to ensure DMA completed (if any). */
1848 #endif
1849 drv_usecwait(5000); /* Delay to ensure DMA completed (if any). */
1850 ERI_DELAY(((GET_ETXREG(config) & 1) == 0), ERI_MAX_RST_DELAY);
1851 if ((GET_ETXREG(config) & 1) != 0)
1852 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1853 disable_etx_msg);
1854
1855 PUT_SWRSTREG(reset, ERI_G_RESET_ETX | ERI_CACHE_LINE_SIZE);
1856
1857 /*
1858 * Wait until the reset is completed which is indicated by the reset bit
1859 * cleared or time out..
1860 */
1861 ERI_DELAY(((GET_SWRSTREG(reset) & (ERI_G_RESET_ETX)) ==
1862 ERI_CACHE_LINE_SIZE), ERI_MAX_RST_DELAY);
1863 erip->tx_reset_issued = -1;
1864
1865 if (GET_SWRSTREG(reset) & (ERI_G_RESET_ETX)) {
1866 return (1);
1867 } else
1868 return (0);
1869 }
1870
1871
1872 /*
1873 * Initialize the TX DMA registers and Enable the TX DMA.
1874 */
1875 static uint32_t
1876 eri_init_txregs(struct eri *erip)
1877 {
1878
1879 uint32_t i;
1880 uint64_t tx_ring;
1881
1882 /*
1883 * Initialize ETX Registers:
1884 * config, txring_lo, txring_hi
1885 */
1886 tx_ring = ERI_IOPBIOADDR(erip, erip->eri_tmdp);
1887 PUT_ETXREG(txring_lo, (uint32_t)(tx_ring));
1888 PUT_ETXREG(txring_hi, (uint32_t)(tx_ring >> 32));
1889
1890 /*
1891 * Get TX Ring Size Masks.
1892 * The ring size ERI_TPENDING is defined in eri_mac.h.
1893 */
1894 switch (ERI_TPENDING) {
1895 case 32: i = ETX_RINGSZ_32;
1896 break;
1897 case 64: i = ETX_RINGSZ_64;
1898 break;
1899 case 128: i = ETX_RINGSZ_128;
1900 break;
1901 case 256: i = ETX_RINGSZ_256;
1902 break;
1903 case 512: i = ETX_RINGSZ_512;
1904 break;
1905 case 1024: i = ETX_RINGSZ_1024;
1906 break;
1907 case 2048: i = ETX_RINGSZ_2048;
1908 break;
1909 case 4096: i = ETX_RINGSZ_4096;
1910 break;
1911 default:
1912 ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
1913 unk_tx_descr_sze_msg, ERI_TPENDING);
1914 return (1);
1915 }
1916
1917 i <<= ERI_TX_RINGSZ_SHIFT;
1918 PUT_ETXREG(config, ETX_CONFIG_THRESHOLD | i);
1919 ENABLE_TXDMA(erip);
1920 ENABLE_MAC(erip);
1921 return (0);
1922 }
1923
1924
1925 /*
1926 * Initialize the RX DMA registers and Enable the RX DMA.
1927 */
1928 static uint32_t
1929 eri_init_rxregs(struct eri *erip)
1930 {
1931 int i;
1932 uint64_t rx_ring;
1933
1934 /*
1935 * Initialize ERX Registers:
1936 * rxring_lo, rxring_hi, config, rx_blanking, rx_pause_threshold.
1937 * Also, rx_kick
1938 * Read and save rxfifo_size.
1939 * XXX: Use this to properly configure PAUSE threshold values.
1940 */
1941 rx_ring = ERI_IOPBIOADDR(erip, erip->rmdp);
1942 PUT_ERXREG(rxring_lo, (uint32_t)(rx_ring));
1943 PUT_ERXREG(rxring_hi, (uint32_t)(rx_ring >> 32));
1944 PUT_ERXREG(rx_kick, erip->rx_kick);
1945
1946 /*
1947 * The Max ring size, ERI_RMDMAX is defined in eri_mac.h.
1948 * More ERI_RPENDING will provide better performance but requires more
1949 * system DVMA memory.
1950 * eri_rx_ring_size can be used to tune this value from /etc/system
1951 * eri_rx_ring_size cannot be NDD'able due to non-recoverable errors
1952 * which cannot be detected from NDD operations
1953 */
1954
1955 /*
1956 * get the rxring size bits
1957 */
1958 switch (ERI_RPENDING) {
1959 case 32: i = ERX_RINGSZ_32;
1960 break;
1961 case 64: i = ERX_RINGSZ_64;
1962 break;
1963 case 128: i = ERX_RINGSZ_128;
1964 break;
1965 case 256: i = ERX_RINGSZ_256;
1966 break;
1967 case 512: i = ERX_RINGSZ_512;
1968 break;
1969 case 1024: i = ERX_RINGSZ_1024;
1970 break;
1971 case 2048: i = ERX_RINGSZ_2048;
1972 break;
1973 case 4096: i = ERX_RINGSZ_4096;
1974 break;
1975 default:
1976 ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
1977 unk_rx_descr_sze_msg, ERI_RPENDING);
1978 return (1);
1979 }
1980
1981 i <<= ERI_RX_RINGSZ_SHIFT;
1982 i |= (ERI_FSTBYTE_OFFSET << ERI_RX_CONFIG_FBO_SHIFT) |
1983 (ETHERHEADER_SIZE << ERI_RX_CONFIG_RX_CSSTART_SHIFT) |
1984 (ERI_RX_FIFOTH_1024 << ERI_RX_CONFIG_RXFIFOTH_SHIFT);
1985
1986 PUT_ERXREG(config, i);
1987 PUT_ERXREG(rx_blanking,
1988 (param_intr_blank_time << ERI_RX_BLNK_INTR_TIME_SHIFT) |
1989 param_intr_blank_packets);
1990
1991 PUT_ERXREG(rx_pause_threshold, rx_pause_threshold);
1992 erip->rxfifo_size = GET_ERXREG(rxfifo_size);
1993 ENABLE_RXDMA(erip);
1994 return (0);
1995 }
1996
1997 static int
1998 eri_freebufs(struct eri *erip)
1999 {
2000 int status = 0;
2001
2002 status = eri_flush_rxbufs(erip);
2003 return (status);
2004 }
2005
2006 static void
2007 eri_update_rxbufs(struct eri *erip)
2008 {
2009 int i;
2010 volatile struct rmd *rmdp, *rmdpbase;
2011
2012 /*
2013 * Hang out receive buffers.
2014 */
2015 rmdpbase = erip->rmdp;
2016 for (i = 0; i < ERI_RPENDING; i++) {
2017 rmdp = rmdpbase + i;
2018 UPDATE_RMD(rmdp);
2019 }
2020
2021 /*
2022 * sync RXDMA descriptors.
2023 */
2024 ERI_SYNCIOPB(erip, erip->rmdp, (ERI_RPENDING * sizeof (struct rmd)),
2025 DDI_DMA_SYNC_FORDEV);
2026 /*
2027 * Reset RMD 'walking' pointers.
2028 */
2029 erip->rnextp = erip->rmdp;
2030 erip->rx_completion = 0;
2031 erip->rx_kick = ERI_RPENDING - 4;
2032 }
2033
2034 /*
2035 * This routine is used to reset the RX DMA only. In the case of RX
2036 * failures such as RX Tag Error, RX hang etc... we don't want to
2037 * do global reset which takes down the link and clears the FIFO's
2038 * By doing RX only reset, we leave the TX and the link intact.
2039 */
2040 static uint32_t
2041 eri_init_rx_channel(struct eri *erip)
2042 {
2043 erip->flags &= ~ERI_RXINIT;
2044 (void) eri_erx_reset(erip);
2045 eri_update_rxbufs(erip);
2046 if (eri_init_rxregs(erip))
2047 return (1);
2048 PUT_MACREG(rxmask, BMAC_RXINTR_MASK);
2049 PUT_MACREG(rxcfg, GET_MACREG(rxcfg) | BMAC_RXCFG_ENAB);
2050 erip->rx_reset_issued = 0;
2051 HSTAT(erip, rx_inits);
2052 erip->flags |= ERI_RXINIT;
2053 return (0);
2054 }
2055
2056 static void
2057 eri_init_rx(struct eri *erip)
2058 {
2059 uint16_t *ladrf;
2060
2061 /*
2062 * First of all make sure the Receive MAC is stop.
2063 */
2064 (void) eri_rxmac_disable(erip); /* Disable the RX MAC */
2065
2066 /*
2067 * Program BigMAC with local individual ethernet address.
2068 */
2069
2070 PUT_MACREG(madd0, (erip->ouraddr[4] << 8) | erip->ouraddr[5]);
2071 PUT_MACREG(madd1, (erip->ouraddr[2] << 8) | erip->ouraddr[3]);
2072 PUT_MACREG(madd2, (erip->ouraddr[0] << 8) | erip->ouraddr[1]);
2073
2074 /*
2075 * Set up multicast address filter by passing all multicast
2076 * addresses through a crc generator, and then using the
2077 * low order 8 bits as a index into the 256 bit logical
2078 * address filter. The high order four bits select the word,
2079 * while the rest of the bits select the bit within the word.
2080 */
2081
2082 ladrf = erip->ladrf;
2083
2084 PUT_MACREG(hash0, ladrf[0]);
2085 PUT_MACREG(hash1, ladrf[1]);
2086 PUT_MACREG(hash2, ladrf[2]);
2087 PUT_MACREG(hash3, ladrf[3]);
2088 PUT_MACREG(hash4, ladrf[4]);
2089 PUT_MACREG(hash5, ladrf[5]);
2090 PUT_MACREG(hash6, ladrf[6]);
2091 PUT_MACREG(hash7, ladrf[7]);
2092 PUT_MACREG(hash8, ladrf[8]);
2093 PUT_MACREG(hash9, ladrf[9]);
2094 PUT_MACREG(hash10, ladrf[10]);
2095 PUT_MACREG(hash11, ladrf[11]);
2096 PUT_MACREG(hash12, ladrf[12]);
2097 PUT_MACREG(hash13, ladrf[13]);
2098 PUT_MACREG(hash14, ladrf[14]);
2099 PUT_MACREG(hash15, ladrf[15]);
2100
2101 #ifdef ERI_DONT_STRIP_CRC
2102 PUT_MACREG(rxcfg,
2103 ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2104 (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2105 BMAC_RXCFG_ENAB));
2106 #else
2107 PUT_MACREG(rxcfg,
2108 ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2109 (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2110 BMAC_RXCFG_ENAB | BMAC_RXCFG_STRIP_CRC));
2111 #endif
2112 /* wait after setting Hash Enable bit */
2113 /* drv_usecwait(10); */
2114
2115 HSTAT(erip, rx_inits);
2116 }
2117
2118 /*
2119 * This routine is used to init the TX MAC only.
2120 * &erip->xmitlock is held before calling this routine.
2121 */
2122 void
2123 eri_init_txmac(struct eri *erip)
2124 {
2125 uint32_t carrier_ext = 0;
2126
2127 erip->flags &= ~ERI_TXINIT;
2128 /*
2129 * Stop the Transmit MAC.
2130 */
2131 (void) eri_txmac_disable(erip);
2132
2133 /*
2134 * Must be Internal Transceiver
2135 */
2136 if (param_mode)
2137 PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2138 BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE));
2139 else
2140 PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2141 BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE |
2142 BMAC_XIFC_DIS_ECHO));
2143
2144 /*
2145 * Initialize the interpacket gap registers
2146 */
2147 PUT_MACREG(ipg1, param_ipg1);
2148 PUT_MACREG(ipg2, param_ipg2);
2149
2150 if (erip->ngu_enable)
2151 PUT_MACREG(txcfg, ((param_mode ? BMAC_TXCFG_FDX: 0) |
2152 ((param_lance_mode && (erip->lance_mode_enable)) ?
2153 BMAC_TXCFG_ENIPG0 : 0) |
2154 (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0) |
2155 BMAC_TXCFG_NGU));
2156 else
2157 PUT_MACREG(txcfg, ((param_mode ? BMAC_TXCFG_FDX: 0) |
2158 ((param_lance_mode && (erip->lance_mode_enable)) ?
2159 BMAC_TXCFG_ENIPG0 : 0) |
2160 (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0)));
2161
2162 ENABLE_TXDMA(erip);
2163 ENABLE_TXMAC(erip);
2164
2165 HSTAT(erip, tx_inits);
2166 erip->flags |= ERI_TXINIT;
2167 }
2168
2169 static void
2170 eri_unallocthings(struct eri *erip)
2171 {
2172 uint32_t flag;
2173 uint32_t i;
2174
2175 flag = erip->alloc_flag;
2176
2177 if (flag & ERI_DESC_MEM_MAP)
2178 (void) ddi_dma_unbind_handle(erip->md_h);
2179
2180 if (flag & ERI_DESC_MEM_ALLOC) {
2181 ddi_dma_mem_free(&erip->mdm_h);
2182 erip->rmdp = NULL;
2183 erip->eri_tmdp = NULL;
2184 }
2185
2186 if (flag & ERI_DESC_HANDLE_ALLOC)
2187 ddi_dma_free_handle(&erip->md_h);
2188
2189 (void) eri_freebufs(erip);
2190
2191 if (flag & ERI_RCV_HANDLE_ALLOC)
2192 for (i = 0; i < erip->rcv_handle_cnt; i++)
2193 ddi_dma_free_handle(&erip->ndmarh[i]);
2194
2195 if (flag & ERI_RCV_DVMA_ALLOC) {
2196 (void) dvma_release(erip->eri_dvmarh);
2197 erip->eri_dvmarh = NULL;
2198 }
2199
2200 if (flag & ERI_XBUFS_KMEM_DMABIND) {
2201 (void) ddi_dma_unbind_handle(erip->tbuf_handle);
2202 erip->tbuf_ioaddr = 0;
2203 }
2204
2205 if (flag & ERI_XBUFS_KMEM_ALLOC) {
2206 ddi_dma_mem_free(&erip->tbuf_acch);
2207 erip->tbuf_kaddr = NULL;
2208 }
2209
2210 if (flag & ERI_XBUFS_HANDLE_ALLOC) {
2211 ddi_dma_free_handle(&erip->tbuf_handle);
2212 erip->tbuf_handle = NULL;
2213 }
2214
2215 }
2216
2217 /*
2218 * Initialize channel.
2219 * Return true on success, false on error.
2220 *
2221 * The recommended sequence for initialization is:
2222 * 1. Issue a Global Reset command to the Ethernet Channel.
2223 * 2. Poll the Global_Reset bits until the execution of the reset has been
2224 * completed.
2225 * 2(a). Use the MIF Frame/Output register to reset the transceiver.
2226 * Poll Register 0 to till the Resetbit is 0.
2227 * 2(b). Use the MIF Frame/Output register to set the PHY in in Normal-Op,
2228 * 100Mbps and Non-Isolated mode. The main point here is to bring the
2229 * PHY out of Isolate mode so that it can generate the rx_clk and tx_clk
2230 * to the MII interface so that the Bigmac core can correctly reset
2231 * upon a software reset.
2232 * 2(c). Issue another Global Reset command to the Ethernet Channel and poll
2233 * the Global_Reset bits till completion.
2234 * 3. Set up all the data structures in the host memory.
2235 * 4. Program the TX_MAC registers/counters (excluding the TX_MAC Configuration
2236 * Register).
2237 * 5. Program the RX_MAC registers/counters (excluding the RX_MAC Configuration
2238 * Register).
2239 * 6. Program the Transmit Descriptor Ring Base Address in the ETX.
2240 * 7. Program the Receive Descriptor Ring Base Address in the ERX.
2241 * 8. Program the Global Configuration and the Global Interrupt Mask Registers.
2242 * 9. Program the ETX Configuration register (enable the Transmit DMA channel).
2243 * 10. Program the ERX Configuration register (enable the Receive DMA channel).
2244 * 11. Program the XIF Configuration Register (enable the XIF).
2245 * 12. Program the RX_MAC Configuration Register (Enable the RX_MAC).
2246 * 13. Program the TX_MAC Configuration Register (Enable the TX_MAC).
2247 */
2248 /*
2249 * lock order:
2250 * intrlock->linklock->xmitlock->xcvrlock
2251 */
2252 static boolean_t
2253 eri_init(struct eri *erip)
2254 {
2255 uint32_t init_stat = 0;
2256 uint32_t partial_init = 0;
2257 uint32_t carrier_ext = 0;
2258 uint32_t mac_ctl = 0;
2259 boolean_t ret;
2260 uint32_t link_timeout = ERI_LINKCHECK_TIMER;
2261 link_state_t linkupdate = LINK_STATE_UNKNOWN;
2262
2263 /*
2264 * Just return successfully if device is suspended.
2265 * eri_init() will be called again from resume.
2266 */
2267 ASSERT(erip != NULL);
2268
2269 if (erip->flags & ERI_SUSPENDED) {
2270 ret = B_TRUE;
2271 goto init_exit;
2272 }
2273
2274 mutex_enter(&erip->intrlock);
2275 eri_stop_timer(erip); /* acquire linklock */
2276 mutex_enter(&erip->xmitlock);
2277 erip->flags &= (ERI_DLPI_LINKUP | ERI_STARTED);
2278 erip->wantw = B_FALSE;
2279 HSTAT(erip, inits);
2280 erip->txhung = 0;
2281
2282 if ((erip->stats.inits > 1) && (erip->init_macregs == 0))
2283 eri_savecntrs(erip);
2284
2285 mutex_enter(&erip->xcvrlock);
2286 if (!param_linkup || erip->linkcheck) {
2287 if (!erip->linkcheck)
2288 linkupdate = LINK_STATE_DOWN;
2289 (void) eri_stop(erip);
2290 }
2291 if (!(erip->flags & ERI_DLPI_LINKUP) || !param_linkup) {
2292 erip->flags |= ERI_DLPI_LINKUP;
2293 eri_mif_poll(erip, MIF_POLL_STOP);
2294 (void) eri_new_xcvr(erip);
2295 ERI_DEBUG_MSG1(erip, XCVR_MSG, "New transceiver detected.");
2296 if (param_transceiver != NO_XCVR) {
2297 /*
2298 * Reset the new PHY and bring up the
2299 * link
2300 */
2301 if (eri_reset_xcvr(erip)) {
2302 ERI_FAULT_MSG1(erip, SEVERITY_NONE,
2303 ERI_VERB_MSG, "In Init after reset");
2304 mutex_exit(&erip->xcvrlock);
2305 link_timeout = 0;
2306 goto done;
2307 }
2308 if (erip->stats.link_up == LINK_STATE_UP)
2309 linkupdate = LINK_STATE_UP;
2310 } else {
2311 erip->flags |= (ERI_RUNNING | ERI_INITIALIZED);
2312 param_linkup = 0;
2313 erip->stats.link_up = LINK_STATE_DOWN;
2314 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2315 linkupdate = LINK_STATE_DOWN;
2316 /*
2317 * Still go on and complete the MAC initialization as
2318 * xcvr might show up later.
2319 * you must return to their mutex ordering.
2320 */
2321 }
2322 eri_mif_poll(erip, MIF_POLL_START);
2323 }
2324
2325 mutex_exit(&erip->xcvrlock);
2326
2327 /*
2328 * Allocate data structures.
2329 */
2330 if (erip->global_reset_issued) {
2331 if (erip->global_reset_issued == 2) { /* fast path */
2332
2333 /*
2334 * Hang out/Initialize descriptors and buffers.
2335 */
2336 eri_init_txbufs(erip);
2337
2338 eri_update_rxbufs(erip);
2339 } else {
2340 init_stat = eri_allocthings(erip);
2341 if (init_stat)
2342 goto done;
2343
2344 if (eri_freebufs(erip))
2345 goto done;
2346 /*
2347 * Hang out/Initialize descriptors and buffers.
2348 */
2349 eri_init_txbufs(erip);
2350 if (eri_init_rxbufs(erip))
2351 goto done;
2352 }
2353 }
2354
2355 /*
2356 * BigMAC requires that we confirm that tx, rx and hash are in
2357 * quiescent state.
2358 * MAC will not reset successfully if the transceiver is not reset and
2359 * brought out of Isolate mode correctly. TXMAC reset may fail if the
2360 * ext. transceiver is just disconnected. If it fails, try again by
2361 * checking the transceiver.
2362 */
2363 if (eri_txmac_disable(erip)) {
2364 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
2365 disable_txmac_msg);
2366 param_linkup = 0; /* force init again */
2367 erip->stats.link_up = LINK_STATE_DOWN;
2368 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2369 linkupdate = LINK_STATE_DOWN;
2370 goto done;
2371 }
2372
2373 if (eri_rxmac_disable(erip)) {
2374 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
2375 disable_rxmac_msg);
2376 param_linkup = 0; /* force init again */
2377 erip->stats.link_up = LINK_STATE_DOWN;
2378 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2379 linkupdate = LINK_STATE_DOWN;
2380 goto done;
2381 }
2382
2383 eri_init_macregs_generic(erip);
2384
2385 /*
2386 * Initialize ERI Global registers :
2387 * config
2388 * For PCI : err_mask, bif_cfg
2389 *
2390 * Use user-configurable parameter for enabling 64-bit transfers.
2391 * Note:For PCI, burst sizes are in multiples of 64-bytes.
2392 */
2393
2394 /*
2395 * Significant performance improvements can be achieved by
2396 * disabling transmit interrupt. Thus TMD's are reclaimed
2397 * only very infrequently.
2398 * The PCS Interrupt is masked here. It is enabled only when
2399 * a PCS link is brought up because there is no second level
2400 * mask for this interrupt..
2401 * Init GLOBAL, TXMAC, RXMAC and MACCTL interrupt masks here.
2402 */
2403 if (! partial_init) {
2404 PUT_GLOBREG(intmask, ERI_G_MASK_INTR);
2405 erip->tx_int_me = 0;
2406 PUT_MACREG(txmask, BMAC_TXINTR_MASK);
2407 PUT_MACREG(rxmask, BMAC_RXINTR_MASK);
2408 PUT_MACREG(macctl_mask, ERI_MACCTL_INTR_MASK);
2409 }
2410
2411 if (erip->global_reset_issued) {
2412 /*
2413 * Initialize ETX Registers:
2414 * config, txring_lo, txring_hi
2415 */
2416 if (eri_init_txregs(erip))
2417 goto done;
2418 /*
2419 * Initialize ERX Registers:
2420 * rxring_lo, rxring_hi, config, rx_blanking,
2421 * rx_pause_threshold. Also, rx_kick
2422 * Read and save rxfifo_size.
2423 */
2424 if (eri_init_rxregs(erip))
2425 goto done;
2426 }
2427
2428 PUT_MACREG(macctl_mask, ERI_MACCTL_INTR_MASK);
2429
2430 /*
2431 * Set up the slottime,and rxconfig, txconfig without enabling
2432 * the latter two at this time
2433 */
2434 PUT_MACREG(slot, BMAC_SLOT_TIME);
2435 carrier_ext = 0;
2436
2437 #ifdef ERI_DONT_STRIP_CRC
2438 PUT_MACREG(rxcfg,
2439 ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2440 (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2441 (carrier_ext ? BMAC_RXCFG_CARR_EXT : 0)));
2442 #else
2443 PUT_MACREG(rxcfg,
2444 ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2445 (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2446 BMAC_RXCFG_STRIP_CRC |
2447 (carrier_ext ? BMAC_RXCFG_CARR_EXT : 0)));
2448 #endif
2449 drv_usecwait(10); /* wait after setting Hash Enable bit */
2450
2451 if (erip->ngu_enable)
2452 PUT_MACREG(txcfg,
2453 ((param_mode ? BMAC_TXCFG_FDX: 0) |
2454 ((param_lance_mode && (erip->lance_mode_enable)) ?
2455 BMAC_TXCFG_ENIPG0 : 0) |
2456 (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0) |
2457 BMAC_TXCFG_NGU));
2458 else
2459 PUT_MACREG(txcfg,
2460 ((param_mode ? BMAC_TXCFG_FDX: 0) |
2461 ((param_lance_mode && (erip->lance_mode_enable)) ?
2462 BMAC_TXCFG_ENIPG0 : 0) |
2463 (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0)));
2464
2465 if (erip->pauseRX)
2466 mac_ctl = ERI_MCTLCFG_RXPAUSE;
2467 if (erip->pauseTX)
2468 mac_ctl |= ERI_MCTLCFG_TXPAUSE;
2469
2470 PUT_MACREG(macctl_cfg, mac_ctl);
2471
2472 /*
2473 * Must be Internal Transceiver
2474 */
2475 if (param_mode)
2476 PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2477 BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE));
2478 else {
2479 PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2480 BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE |
2481 BMAC_XIFC_DIS_ECHO));
2482
2483 link_timeout = ERI_CHECK_HANG_TIMER;
2484 }
2485
2486 /*
2487 * if MAC int loopback flag is set, put xifc reg in mii loopback
2488 * mode {DIAG}
2489 */
2490 if (erip->flags & ERI_MACLOOPBACK) {
2491 PUT_MACREG(xifc, GET_MACREG(xifc) | BMAC_XIFC_MIILPBK);
2492 }
2493
2494 /*
2495 * Enable TX and RX MACs.
2496 */
2497 ENABLE_MAC(erip);
2498 erip->flags |= (ERI_RUNNING | ERI_INITIALIZED |
2499 ERI_TXINIT | ERI_RXINIT);
2500 mac_tx_update(erip->mh);
2501 erip->global_reset_issued = 0;
2502
2503 #ifdef ERI_10_10_FORCE_SPEED_WORKAROUND
2504 eri_xcvr_force_mode(erip, &link_timeout);
2505 #endif
2506
2507 done:
2508 if (init_stat)
2509 eri_unallocthings(erip);
2510
2511 mutex_exit(&erip->xmitlock);
2512 eri_start_timer(erip, eri_check_link, link_timeout);
2513 mutex_exit(&erip->intrlock);
2514
2515 if (linkupdate != LINK_STATE_UNKNOWN)
2516 mac_link_update(erip->mh, linkupdate);
2517
2518 ret = (erip->flags & ERI_RUNNING) ? B_TRUE : B_FALSE;
2519 if (!ret) {
2520 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
2521 "eri_init failed");
2522 }
2523
2524 init_exit:
2525 ASSERT(!MUTEX_HELD(&erip->linklock));
2526 return (ret);
2527 }
2528
2529 /*
2530 * 0 as burstsize upon failure as it signifies no burst size.
2531 */
2532 static int
2533 eri_burstsize(struct eri *erip)
2534 {
2535 ddi_dma_handle_t handle;
2536
2537 if (ddi_dma_alloc_handle(erip->dip, &dma_attr, DDI_DMA_DONTWAIT,
2538 NULL, &handle))
2539 return (DDI_FAILURE);
2540
2541 erip->burstsizes = ddi_dma_burstsizes(handle);
2542 ddi_dma_free_handle(&handle);
2543
2544 if (erip->burstsizes)
2545 return (DDI_SUCCESS);
2546
2547 return (DDI_FAILURE);
2548 }
2549
2550 /*
2551 * Un-initialize (STOP) ERI channel.
2552 */
2553 static void
2554 eri_uninit(struct eri *erip)
2555 {
2556 boolean_t needind;
2557
2558 /*
2559 * Allow up to 'ERI_DRAINTIME' for pending xmit's to complete.
2560 */
2561 ERI_DELAY((erip->tcurp == erip->tnextp), ERI_DRAINTIME);
2562
2563 mutex_enter(&erip->intrlock);
2564 eri_stop_timer(erip); /* acquire linklock */
2565 mutex_enter(&erip->xmitlock);
2566 mutex_enter(&erip->xcvrlock);
2567 eri_mif_poll(erip, MIF_POLL_STOP);
2568 erip->flags &= ~ERI_DLPI_LINKUP;
2569 mutex_exit(&erip->xcvrlock);
2570
2571 needind = !erip->linkcheck;
2572 (void) eri_stop(erip);
2573 erip->flags &= ~ERI_RUNNING;
2574
2575 mutex_exit(&erip->xmitlock);
2576 eri_start_timer(erip, eri_check_link, 0);
2577 mutex_exit(&erip->intrlock);
2578
2579 if (needind)
2580 mac_link_update(erip->mh, LINK_STATE_DOWN);
2581 }
2582
2583 /*
2584 * Allocate CONSISTENT memory for rmds and tmds with appropriate alignment and
2585 * map it in IO space.
2586 *
2587 * The driver allocates STREAMS buffers which will be mapped in DVMA
2588 * space using DDI DMA resources.
2589 *
2590 */
2591 static int
2592 eri_allocthings(struct eri *erip)
2593 {
2594
2595 uintptr_t a;
2596 int size;
2597 uint32_t rval;
2598 int i;
2599 size_t real_len;
2600 uint32_t cookiec;
2601 int alloc_stat = 0;
2602 ddi_dma_cookie_t dma_cookie;
2603
2604 /*
2605 * Return if resources are already allocated.
2606 */
2607 if (erip->rmdp)
2608 return (alloc_stat);
2609
2610 erip->alloc_flag = 0;
2611
2612 /*
2613 * Allocate the TMD and RMD descriptors and extra for alignments.
2614 */
2615 size = (ERI_RPENDING * sizeof (struct rmd) +
2616 ERI_TPENDING * sizeof (struct eri_tmd)) + ERI_GMDALIGN;
2617
2618 rval = ddi_dma_alloc_handle(erip->dip, &desc_dma_attr,
2619 DDI_DMA_DONTWAIT, 0, &erip->md_h);
2620 if (rval != DDI_SUCCESS) {
2621 return (++alloc_stat);
2622 }
2623 erip->alloc_flag |= ERI_DESC_HANDLE_ALLOC;
2624
2625 rval = ddi_dma_mem_alloc(erip->md_h, size, &erip->dev_attr,
2626 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
2627 (caddr_t *)&erip->iopbkbase, &real_len, &erip->mdm_h);
2628 if (rval != DDI_SUCCESS) {
2629 return (++alloc_stat);
2630 }
2631 erip->alloc_flag |= ERI_DESC_MEM_ALLOC;
2632
2633 rval = ddi_dma_addr_bind_handle(erip->md_h, NULL,
2634 (caddr_t)erip->iopbkbase, size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2635 DDI_DMA_DONTWAIT, 0, &erip->md_c, &cookiec);
2636
2637 if (rval != DDI_DMA_MAPPED)
2638 return (++alloc_stat);
2639
2640 erip->alloc_flag |= ERI_DESC_MEM_MAP;
2641
2642 if (cookiec != 1)
2643 return (++alloc_stat);
2644
2645 erip->iopbiobase = erip->md_c.dmac_address;
2646
2647 a = erip->iopbkbase;
2648 a = ROUNDUP(a, ERI_GMDALIGN);
2649 erip->rmdp = (struct rmd *)a;
2650 a += ERI_RPENDING * sizeof (struct rmd);
2651 erip->eri_tmdp = (struct eri_tmd *)a;
2652 /*
2653 * Specifically we reserve n (ERI_TPENDING + ERI_RPENDING)
2654 * pagetable entries. Therefore we have 2 ptes for each
2655 * descriptor. Since the ethernet buffers are 1518 bytes
2656 * so they can at most use 2 ptes.
2657 * Will do a ddi_dma_addr_setup for each bufer
2658 */
2659 /*
2660 * In the current implementation, we use the ddi compliant
2661 * dma interface. We allocate ERI_RPENDING dma handles for receive
2662 * activity. The actual dma mapping is done in the io function
2663 * eri_read_dma(), by calling the ddi_dma_addr_bind_handle.
2664 * Dma resources are deallocated by calling ddi_dma_unbind_handle
2665 * in eri_reclaim() for transmit and eri_read_dma(), for receive io.
2666 */
2667
2668 if (eri_use_dvma_rx &&
2669 (dvma_reserve(erip->dip, &eri_dma_limits, (ERI_RPENDING * 2),
2670 &erip->eri_dvmarh)) == DDI_SUCCESS) {
2671 erip->alloc_flag |= ERI_RCV_DVMA_ALLOC;
2672 } else {
2673 erip->eri_dvmarh = NULL;
2674
2675 for (i = 0; i < ERI_RPENDING; i++) {
2676 rval = ddi_dma_alloc_handle(erip->dip,
2677 &dma_attr, DDI_DMA_DONTWAIT,
2678 0, &erip->ndmarh[i]);
2679
2680 if (rval != DDI_SUCCESS) {
2681 ERI_FAULT_MSG1(erip, SEVERITY_HIGH,
2682 ERI_VERB_MSG, alloc_rx_dmah_msg);
2683 alloc_stat++;
2684 break;
2685 }
2686 }
2687
2688 erip->rcv_handle_cnt = i;
2689
2690 if (i)
2691 erip->alloc_flag |= ERI_RCV_HANDLE_ALLOC;
2692
2693 if (alloc_stat)
2694 return (alloc_stat);
2695
2696 }
2697
2698 /*
2699 * Allocate TX buffer
2700 * Note: buffers must always be allocated in the native
2701 * ordering of the CPU (always big-endian for Sparc).
2702 * ddi_dma_mem_alloc returns memory in the native ordering
2703 * of the bus (big endian for SBus, little endian for PCI).
2704 * So we cannot use ddi_dma_mem_alloc(, &erip->ge_dev_attr)
2705 * because we'll get little endian memory on PCI.
2706 */
2707 if (ddi_dma_alloc_handle(erip->dip, &desc_dma_attr, DDI_DMA_DONTWAIT,
2708 0, &erip->tbuf_handle) != DDI_SUCCESS) {
2709 ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG,
2710 alloc_tx_dmah_msg);
2711 return (++alloc_stat);
2712 }
2713 erip->alloc_flag |= ERI_XBUFS_HANDLE_ALLOC;
2714 size = ERI_TPENDING * ERI_BUFSIZE;
2715 if (ddi_dma_mem_alloc(erip->tbuf_handle, size, &buf_attr,
2716 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, &erip->tbuf_kaddr,
2717 &real_len, &erip->tbuf_acch) != DDI_SUCCESS) {
2718 ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG,
2719 alloc_tx_dmah_msg);
2720 return (++alloc_stat);
2721 }
2722 erip->alloc_flag |= ERI_XBUFS_KMEM_ALLOC;
2723 if (ddi_dma_addr_bind_handle(erip->tbuf_handle, NULL,
2724 erip->tbuf_kaddr, size, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2725 DDI_DMA_DONTWAIT, 0, &dma_cookie, &cookiec) != DDI_DMA_MAPPED) {
2726 return (++alloc_stat);
2727 }
2728 erip->tbuf_ioaddr = dma_cookie.dmac_address;
2729 erip->alloc_flag |= ERI_XBUFS_KMEM_DMABIND;
2730 if (cookiec != 1)
2731 return (++alloc_stat);
2732
2733 /*
2734 * Keep handy limit values for RMD, TMD, and Buffers.
2735 */
2736 erip->rmdlimp = &((erip->rmdp)[ERI_RPENDING]);
2737 erip->eri_tmdlimp = &((erip->eri_tmdp)[ERI_TPENDING]);
2738
2739 /*
2740 * Zero out RCV holders.
2741 */
2742 bzero((caddr_t)erip->rmblkp, sizeof (erip->rmblkp));
2743 return (alloc_stat);
2744 }
2745
2746 /* <<<<<<<<<<<<<<<<< INTERRUPT HANDLING FUNCTION >>>>>>>>>>>>>>>>>>>> */
2747 /*
2748 * First check to see if it is our device interrupting.
2749 */
2750 static uint_t
2751 eri_intr(caddr_t arg)
2752 {
2753 struct eri *erip = (void *)arg;
2754 uint32_t erisbits;
2755 uint32_t mif_status;
2756 uint32_t serviced = DDI_INTR_UNCLAIMED;
2757 link_state_t linkupdate = LINK_STATE_UNKNOWN;
2758 boolean_t macupdate = B_FALSE;
2759 mblk_t *mp;
2760 mblk_t *head;
2761 mblk_t **tail;
2762
2763 head = NULL;
2764 tail = &head;
2765
2766 mutex_enter(&erip->intrlock);
2767
2768 erisbits = GET_GLOBREG(status);
2769
2770 /*
2771 * Check if it is only the RX_DONE interrupt, which is
2772 * the most frequent one.
2773 */
2774 if (((erisbits & ERI_G_STATUS_RX_INT) == ERI_G_STATUS_RX_DONE) &&
2775 (erip->flags & ERI_RUNNING)) {
2776 serviced = DDI_INTR_CLAIMED;
2777 goto rx_done_int;
2778 }
2779
2780 /* Claim the first interrupt after initialization */
2781 if (erip->flags & ERI_INITIALIZED) {
2782 erip->flags &= ~ERI_INITIALIZED;
2783 serviced = DDI_INTR_CLAIMED;
2784 }
2785
2786 /* Check for interesting events */
2787 if ((erisbits & ERI_G_STATUS_INTR) == 0) {
2788 #ifdef ESTAR_WORKAROUND
2789 uint32_t linkupdate;
2790 #endif
2791
2792 ERI_DEBUG_MSG2(erip, DIAG_MSG,
2793 "eri_intr: Interrupt Not Claimed gsbits %X", erisbits);
2794 #ifdef DEBUG
2795 noteri++;
2796 #endif
2797 ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:MIF Config = 0x%X",
2798 GET_MIFREG(mif_cfg));
2799 ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:MIF imask = 0x%X",
2800 GET_MIFREG(mif_imask));
2801 ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:INT imask = 0x%X",
2802 GET_GLOBREG(intmask));
2803 ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:alias %X",
2804 GET_GLOBREG(status_alias));
2805 #ifdef ESTAR_WORKAROUND
2806 linkupdate = eri_check_link_noind(erip);
2807 #endif
2808 mutex_exit(&erip->intrlock);
2809 #ifdef ESTAR_WORKAROUND
2810 if (linkupdate != LINK_STATE_UNKNOWN)
2811 mac_link_update(erip->mh, linkupdate);
2812 #endif
2813 return (serviced);
2814 }
2815 serviced = DDI_INTR_CLAIMED;
2816
2817 if (!(erip->flags & ERI_RUNNING)) {
2818 mutex_exit(&erip->intrlock);
2819 eri_uninit(erip);
2820 return (serviced);
2821 }
2822
2823 if (erisbits & ERI_G_STATUS_FATAL_ERR) {
2824 ERI_DEBUG_MSG2(erip, INTR_MSG,
2825 "eri_intr: fatal error: erisbits = %X", erisbits);
2826 (void) eri_fatal_err(erip, erisbits);
2827 eri_reinit_fatal++;
2828
2829 if (erip->rx_reset_issued) {
2830 erip->rx_reset_issued = 0;
2831 (void) eri_init_rx_channel(erip);
2832 mutex_exit(&erip->intrlock);
2833 } else {
2834 param_linkup = 0;
2835 erip->stats.link_up = LINK_STATE_DOWN;
2836 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2837 DISABLE_MAC(erip);
2838 mutex_exit(&erip->intrlock);
2839 (void) eri_init(erip);
2840 }
2841 return (serviced);
2842 }
2843
2844 if (erisbits & ERI_G_STATUS_NONFATAL_ERR) {
2845 ERI_DEBUG_MSG2(erip, INTR_MSG,
2846 "eri_intr: non-fatal error: erisbits = %X", erisbits);
2847 (void) eri_nonfatal_err(erip, erisbits);
2848 if (erip->linkcheck) {
2849 mutex_exit(&erip->intrlock);
2850 (void) eri_init(erip);
2851 return (serviced);
2852 }
2853 }
2854
2855 if (erisbits & ERI_G_STATUS_MIF_INT) {
2856 uint16_t stat;
2857 ERI_DEBUG_MSG2(erip, XCVR_MSG,
2858 "eri_intr:MIF Interrupt:mii_status %X", erip->mii_status);
2859 eri_stop_timer(erip); /* acquire linklock */
2860
2861 mutex_enter(&erip->xmitlock);
2862 mutex_enter(&erip->xcvrlock);
2863 #ifdef ERI_MIF_POLL_STATUS_WORKAROUND
2864 mif_status = GET_MIFREG(mif_bsts);
2865 eri_mif_poll(erip, MIF_POLL_STOP);
2866 ERI_DEBUG_MSG3(erip, XCVR_MSG,
2867 "eri_intr: new MIF interrupt status %X XCVR status %X",
2868 mif_status, erip->mii_status);
2869 (void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
2870 linkupdate = eri_mif_check(erip, stat, stat);
2871
2872 #else
2873 mif_status = GET_MIFREG(mif_bsts);
2874 eri_mif_poll(erip, MIF_POLL_STOP);
2875 linkupdate = eri_mif_check(erip, (uint16_t)mif_status,
2876 (uint16_t)(mif_status >> 16));
2877 #endif
2878 eri_mif_poll(erip, MIF_POLL_START);
2879 mutex_exit(&erip->xcvrlock);
2880 mutex_exit(&erip->xmitlock);
2881
2882 if (!erip->openloop_autoneg)
2883 eri_start_timer(erip, eri_check_link,
2884 ERI_LINKCHECK_TIMER);
2885 else
2886 eri_start_timer(erip, eri_check_link,
2887 ERI_P_FAULT_TIMER);
2888 }
2889
2890 ERI_DEBUG_MSG2(erip, INTR_MSG,
2891 "eri_intr:May have Read Interrupt status:status %X", erisbits);
2892
2893 rx_done_int:
2894 if ((erisbits & (ERI_G_STATUS_TX_INT_ME)) ||
2895 (erip->tx_cur_cnt >= tx_interrupt_rate)) {
2896 mutex_enter(&erip->xmitlock);
2897 erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
2898 ETX_COMPLETION_MASK);
2899
2900 macupdate |= eri_reclaim(erip, erip->tx_completion);
2901 if (macupdate)
2902 erip->wantw = B_FALSE;
2903
2904 mutex_exit(&erip->xmitlock);
2905 }
2906
2907 if (erisbits & ERI_G_STATUS_RX_DONE) {
2908 volatile struct rmd *rmdp, *rmdpbase;
2909 volatile uint32_t rmdi;
2910 uint8_t loop_limit = 0x20;
2911 uint64_t flags;
2912 uint32_t rmdmax_mask = erip->rmdmax_mask;
2913
2914 rmdpbase = erip->rmdp;
2915 rmdi = erip->rx_completion;
2916 rmdp = rmdpbase + rmdi;
2917
2918 /*
2919 * Sync RMD before looking at it.
2920 */
2921 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
2922 DDI_DMA_SYNC_FORCPU);
2923 /*
2924 * Loop through each RMD.
2925 */
2926
2927 flags = GET_RMD_FLAGS(rmdp);
2928 while (((flags & ERI_RMD_OWN) == 0) && (loop_limit)) {
2929 /* process one packet */
2930 mp = eri_read_dma(erip, rmdp, rmdi, flags);
2931 rmdi = (rmdi + 1) & rmdmax_mask;
2932 rmdp = rmdpbase + rmdi;
2933
2934 if (mp != NULL) {
2935 *tail = mp;
2936 tail = &mp->b_next;
2937 }
2938
2939 /*
2940 * ERI RCV DMA fetches or updates four descriptors
2941 * a time. Also we don't want to update the desc.
2942 * batch we just received packet on. So we update
2943 * descriptors for every 4 packets and we update
2944 * the group of 4 after the current batch.
2945 */
2946
2947 if (!(rmdi % 4)) {
2948 if (eri_overflow_reset &&
2949 (GET_GLOBREG(status_alias) &
2950 ERI_G_STATUS_NONFATAL_ERR)) {
2951 loop_limit = 1;
2952 } else {
2953 erip->rx_kick =
2954 (rmdi + ERI_RPENDING - 4) &
2955 rmdmax_mask;
2956 PUT_ERXREG(rx_kick, erip->rx_kick);
2957 }
2958 }
2959
2960 /*
2961 * Sync the next RMD before looking at it.
2962 */
2963 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
2964 DDI_DMA_SYNC_FORCPU);
2965 flags = GET_RMD_FLAGS(rmdp);
2966 loop_limit--;
2967 }
2968 erip->rx_completion = rmdi;
2969 }
2970
2971 mutex_exit(&erip->intrlock);
2972
2973 if (head)
2974 mac_rx(erip->mh, NULL, head);
2975
2976 if (macupdate)
2977 mac_tx_update(erip->mh);
2978
2979 if (linkupdate != LINK_STATE_UNKNOWN)
2980 mac_link_update(erip->mh, linkupdate);
2981
2982 return (serviced);
2983 }
2984
2985 /*
2986 * Handle interrupts for fatal errors
2987 * Need reinitialization.
2988 */
2989 #define PCI_DATA_PARITY_REP (1 << 8)
2990 #define PCI_SING_TARGET_ABORT (1 << 11)
2991 #define PCI_RCV_TARGET_ABORT (1 << 12)
2992 #define PCI_RCV_MASTER_ABORT (1 << 13)
2993 #define PCI_SING_SYSTEM_ERR (1 << 14)
2994 #define PCI_DATA_PARITY_ERR (1 << 15)
2995
2996 /* called with intrlock held */
2997 static void
2998 eri_fatal_err(struct eri *erip, uint32_t erisbits)
2999 {
3000 uint16_t pci_status;
3001 uint32_t pci_error_int = 0;
3002
3003 if (erisbits & ERI_G_STATUS_RX_TAG_ERR) {
3004 erip->rx_reset_issued = 1;
3005 HSTAT(erip, rxtag_err);
3006 } else {
3007 erip->global_reset_issued = 1;
3008 if (erisbits & ERI_G_STATUS_BUS_ERR_INT) {
3009 pci_error_int = 1;
3010 HSTAT(erip, pci_error_int);
3011 } else if (erisbits & ERI_G_STATUS_PERR_INT) {
3012 HSTAT(erip, parity_error);
3013 } else {
3014 HSTAT(erip, unknown_fatal);
3015 }
3016 }
3017
3018 /*
3019 * PCI bus error
3020 */
3021 if (pci_error_int && erip->pci_config_handle) {
3022 pci_status = pci_config_get16(erip->pci_config_handle,
3023 PCI_CONF_STAT);
3024 ERI_DEBUG_MSG2(erip, FATAL_ERR_MSG, "Bus Error Status %x",
3025 pci_status);
3026 if (pci_status & PCI_DATA_PARITY_REP)
3027 HSTAT(erip, pci_data_parity_err);
3028 if (pci_status & PCI_SING_TARGET_ABORT)
3029 HSTAT(erip, pci_signal_target_abort);
3030 if (pci_status & PCI_RCV_TARGET_ABORT)
3031 HSTAT(erip, pci_rcvd_target_abort);
3032 if (pci_status & PCI_RCV_MASTER_ABORT)
3033 HSTAT(erip, pci_rcvd_master_abort);
3034 if (pci_status & PCI_SING_SYSTEM_ERR)
3035 HSTAT(erip, pci_signal_system_err);
3036 if (pci_status & PCI_DATA_PARITY_ERR)
3037 HSTAT(erip, pci_signal_system_err);
3038 /*
3039 * clear it by writing the value that was read back.
3040 */
3041 pci_config_put16(erip->pci_config_handle, PCI_CONF_STAT,
3042 pci_status);
3043 }
3044 }
3045
3046 /*
3047 * Handle interrupts regarding non-fatal events.
3048 * TXMAC, RXMAC and MACCTL events
3049 */
3050 static void
3051 eri_nonfatal_err(struct eri *erip, uint32_t erisbits)
3052 {
3053
3054 uint32_t txmac_sts, rxmac_sts, macctl_sts, pause_time;
3055
3056 #ifdef ERI_PM_WORKAROUND
3057 if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
3058 PCI_PM_IDLESPEED_NONE) == DDI_SUCCESS)
3059 erip->stats.pmcap = ERI_PMCAP_NONE;
3060 #endif
3061
3062 if (erisbits & ERI_G_STATUS_TX_MAC_INT) {
3063 txmac_sts = GET_MACREG(txsts);
3064 if (txmac_sts & BMAC_TXSTS_TX_URUN) {
3065 erip->linkcheck = 1;
3066 HSTAT(erip, txmac_urun);
3067 HSTAT(erip, oerrors);
3068 }
3069
3070 if (txmac_sts & BMAC_TXSTS_MAXPKT_ERR) {
3071 erip->linkcheck = 1;
3072 HSTAT(erip, txmac_maxpkt_err);
3073 HSTAT(erip, oerrors);
3074 }
3075 if (txmac_sts & BMAC_TXSTS_NCC_EXP) {
3076 erip->stats.collisions += 0x10000;
3077 }
3078
3079 if (txmac_sts & BMAC_TXSTS_ECC_EXP) {
3080 erip->stats.excessive_coll += 0x10000;
3081 }
3082
3083 if (txmac_sts & BMAC_TXSTS_LCC_EXP) {
3084 erip->stats.late_coll += 0x10000;
3085 }
3086
3087 if (txmac_sts & BMAC_TXSTS_FCC_EXP) {
3088 erip->stats.first_coll += 0x10000;
3089 }
3090
3091 if (txmac_sts & BMAC_TXSTS_DEFER_EXP) {
3092 HSTAT(erip, defer_timer_exp);
3093 }
3094
3095 if (txmac_sts & BMAC_TXSTS_PEAK_EXP) {
3096 erip->stats.peak_attempt_cnt += 0x100;
3097 }
3098 }
3099
3100 if (erisbits & ERI_G_STATUS_RX_NO_BUF) {
3101 ERI_DEBUG_MSG1(erip, NONFATAL_MSG, "rx dropped/no free desc");
3102
3103 if (eri_overflow_reset)
3104 erip->linkcheck = 1;
3105
3106 HSTAT(erip, no_free_rx_desc);
3107 HSTAT(erip, ierrors);
3108 }
3109 if (erisbits & ERI_G_STATUS_RX_MAC_INT) {
3110 rxmac_sts = GET_MACREG(rxsts);
3111 if (rxmac_sts & BMAC_RXSTS_RX_OVF) {
3112 #ifndef ERI_RMAC_HANG_WORKAROUND
3113 eri_stop_timer(erip); /* acquire linklock */
3114 erip->check_rmac_hang ++;
3115 erip->check2_rmac_hang = 0;
3116 erip->rxfifo_wr_ptr = GET_ERXREG(rxfifo_wr_ptr);
3117 erip->rxfifo_rd_ptr = GET_ERXREG(rxfifo_rd_ptr);
3118
3119 ERI_DEBUG_MSG5(erip, NONFATAL_MSG,
3120 "overflow intr %d: %8x wr:%2x rd:%2x",
3121 erip->check_rmac_hang,
3122 GET_MACREG(macsm),
3123 GET_ERXREG(rxfifo_wr_ptr),
3124 GET_ERXREG(rxfifo_rd_ptr));
3125
3126 eri_start_timer(erip, eri_check_link,
3127 ERI_CHECK_HANG_TIMER);
3128 #endif
3129 if (eri_overflow_reset)
3130 erip->linkcheck = 1;
3131
3132 HSTAT(erip, rx_overflow);
3133 HSTAT(erip, ierrors);
3134 }
3135
3136 if (rxmac_sts & BMAC_RXSTS_ALE_EXP) {
3137 erip->stats.rx_align_err += 0x10000;
3138 erip->stats.ierrors += 0x10000;
3139 }
3140
3141 if (rxmac_sts & BMAC_RXSTS_CRC_EXP) {
3142 erip->stats.rx_crc_err += 0x10000;
3143 erip->stats.ierrors += 0x10000;
3144 }
3145
3146 if (rxmac_sts & BMAC_RXSTS_LEN_EXP) {
3147 erip->stats.rx_length_err += 0x10000;
3148 erip->stats.ierrors += 0x10000;
3149 }
3150
3151 if (rxmac_sts & BMAC_RXSTS_CVI_EXP) {
3152 erip->stats.rx_code_viol_err += 0x10000;
3153 erip->stats.ierrors += 0x10000;
3154 }
3155 }
3156
3157 if (erisbits & ERI_G_STATUS_MAC_CTRL_INT) {
3158
3159 macctl_sts = GET_MACREG(macctl_sts);
3160 if (macctl_sts & ERI_MCTLSTS_PAUSE_RCVD) {
3161 pause_time = ((macctl_sts &
3162 ERI_MCTLSTS_PAUSE_TIME) >> 16);
3163 ERI_DEBUG_MSG2(erip, NONFATAL_MSG,
3164 "PAUSE Received. pause time = %X slot_times",
3165 pause_time);
3166 HSTAT(erip, pause_rxcount);
3167 erip->stats.pause_time_count += pause_time;
3168 }
3169
3170 if (macctl_sts & ERI_MCTLSTS_PAUSE_STATE) {
3171 HSTAT(erip, pause_oncount);
3172 erip->stats.pausing = 1;
3173 }
3174
3175 if (macctl_sts & ERI_MCTLSTS_NONPAUSE) {
3176 HSTAT(erip, pause_offcount);
3177 erip->stats.pausing = 0;
3178 }
3179 }
3180
3181 }
3182
3183 /*
3184 * if this is the first init do not bother to save the
3185 * counters.
3186 */
3187 static void
3188 eri_savecntrs(struct eri *erip)
3189 {
3190 uint32_t fecnt, aecnt, lecnt, rxcv;
3191 uint32_t ltcnt, excnt, fccnt;
3192
3193 /* XXX What all gets added in ierrors and oerrors? */
3194 fecnt = GET_MACREG(fecnt);
3195 HSTATN(erip, rx_crc_err, fecnt);
3196 PUT_MACREG(fecnt, 0);
3197
3198 aecnt = GET_MACREG(aecnt);
3199 HSTATN(erip, rx_align_err, aecnt);
3200 PUT_MACREG(aecnt, 0);
3201
3202 lecnt = GET_MACREG(lecnt);
3203 HSTATN(erip, rx_length_err, lecnt);
3204 PUT_MACREG(lecnt, 0);
3205
3206 rxcv = GET_MACREG(rxcv);
3207 HSTATN(erip, rx_code_viol_err, rxcv);
3208 PUT_MACREG(rxcv, 0);
3209
3210 ltcnt = GET_MACREG(ltcnt);
3211 HSTATN(erip, late_coll, ltcnt);
3212 PUT_MACREG(ltcnt, 0);
3213
3214 erip->stats.collisions += (GET_MACREG(nccnt) + ltcnt);
3215 PUT_MACREG(nccnt, 0);
3216
3217 excnt = GET_MACREG(excnt);
3218 HSTATN(erip, excessive_coll, excnt);
3219 PUT_MACREG(excnt, 0);
3220
3221 fccnt = GET_MACREG(fccnt);
3222 HSTATN(erip, first_coll, fccnt);
3223 PUT_MACREG(fccnt, 0);
3224
3225 /*
3226 * Do not add code violations to input errors.
3227 * They are already counted in CRC errors
3228 */
3229 HSTATN(erip, ierrors, (fecnt + aecnt + lecnt));
3230 HSTATN(erip, oerrors, (ltcnt + excnt));
3231 }
3232
3233 mblk_t *
3234 eri_allocb_sp(size_t size)
3235 {
3236 mblk_t *mp;
3237
3238 size += 128;
3239 if ((mp = allocb(size + 3 * ERI_BURSTSIZE, BPRI_HI)) == NULL) {
3240 return (NULL);
3241 }
3242 mp->b_wptr += 128;
3243 mp->b_wptr = (uint8_t *)ROUNDUP2(mp->b_wptr, ERI_BURSTSIZE);
3244 mp->b_rptr = mp->b_wptr;
3245
3246 return (mp);
3247 }
3248
3249 mblk_t *
3250 eri_allocb(size_t size)
3251 {
3252 mblk_t *mp;
3253
3254 if ((mp = allocb(size + 3 * ERI_BURSTSIZE, BPRI_HI)) == NULL) {
3255 return (NULL);
3256 }
3257 mp->b_wptr = (uint8_t *)ROUNDUP2(mp->b_wptr, ERI_BURSTSIZE);
3258 mp->b_rptr = mp->b_wptr;
3259
3260 return (mp);
3261 }
3262
3263 /*
3264 * Hardware Dependent Functions
3265 * New Section.
3266 */
3267
3268 /* <<<<<<<<<<<<<<<< Fast Ethernet PHY Bit Bang Operations >>>>>>>>>>>>>>>>>> */
3269
3270 static void
3271 send_bit(struct eri *erip, uint32_t x)
3272 {
3273 PUT_MIFREG(mif_bbdata, x);
3274 PUT_MIFREG(mif_bbclk, ERI_BBCLK_LOW);
3275 PUT_MIFREG(mif_bbclk, ERI_BBCLK_HIGH);
3276 }
3277
3278 /*
3279 * To read the MII register bits according to the IEEE Standard
3280 */
3281 static uint32_t
3282 get_bit_std(struct eri *erip)
3283 {
3284 uint32_t x;
3285
3286 PUT_MIFREG(mif_bbclk, ERI_BBCLK_LOW);
3287 drv_usecwait(1); /* wait for >330 ns for stable data */
3288 if (param_transceiver == INTERNAL_XCVR)
3289 x = (GET_MIFREG(mif_cfg) & ERI_MIF_CFGM0) ? 1 : 0;
3290 else
3291 x = (GET_MIFREG(mif_cfg) & ERI_MIF_CFGM1) ? 1 : 0;
3292 PUT_MIFREG(mif_bbclk, ERI_BBCLK_HIGH);
3293 return (x);
3294 }
3295
3296 #define SEND_BIT(x) send_bit(erip, x)
3297 #define GET_BIT_STD(x) x = get_bit_std(erip)
3298
3299
3300 static void
3301 eri_bb_mii_write(struct eri *erip, uint8_t regad, uint16_t data)
3302 {
3303 uint8_t phyad;
3304 int i;
3305
3306 PUT_MIFREG(mif_bbopenb, 1); /* Enable the MII driver */
3307 phyad = erip->phyad;
3308 (void) eri_bb_force_idle(erip);
3309 SEND_BIT(0); SEND_BIT(1); /* <ST> */
3310 SEND_BIT(0); SEND_BIT(1); /* <OP> */
3311 for (i = 4; i >= 0; i--) { /* <AAAAA> */
3312 SEND_BIT((phyad >> i) & 1);
3313 }
3314 for (i = 4; i >= 0; i--) { /* <RRRRR> */
3315 SEND_BIT((regad >> i) & 1);
3316 }
3317 SEND_BIT(1); SEND_BIT(0); /* <TA> */
3318 for (i = 0xf; i >= 0; i--) { /* <DDDDDDDDDDDDDDDD> */
3319 SEND_BIT((data >> i) & 1);
3320 }
3321 PUT_MIFREG(mif_bbopenb, 0); /* Disable the MII driver */
3322 }
3323
3324 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
3325 static uint32_t
3326 eri_bb_mii_read(struct eri *erip, uint8_t regad, uint16_t *datap)
3327 {
3328 uint8_t phyad;
3329 int i;
3330 uint32_t x;
3331 uint32_t y;
3332
3333 *datap = 0;
3334
3335 PUT_MIFREG(mif_bbopenb, 1); /* Enable the MII driver */
3336 phyad = erip->phyad;
3337 (void) eri_bb_force_idle(erip);
3338 SEND_BIT(0); SEND_BIT(1); /* <ST> */
3339 SEND_BIT(1); SEND_BIT(0); /* <OP> */
3340 for (i = 4; i >= 0; i--) { /* <AAAAA> */
3341 SEND_BIT((phyad >> i) & 1);
3342 }
3343 for (i = 4; i >= 0; i--) { /* <RRRRR> */
3344 SEND_BIT((regad >> i) & 1);
3345 }
3346
3347 PUT_MIFREG(mif_bbopenb, 0); /* Disable the MII driver */
3348
3349 GET_BIT_STD(x);
3350 GET_BIT_STD(y); /* <TA> */
3351 for (i = 0xf; i >= 0; i--) { /* <DDDDDDDDDDDDDDDD> */
3352 GET_BIT_STD(x);
3353 *datap += (x << i);
3354 }
3355 /* Kludge to get the Transceiver out of hung mode */
3356 /* XXX: Test if this is still needed */
3357 GET_BIT_STD(x);
3358 GET_BIT_STD(x);
3359 GET_BIT_STD(x);
3360
3361 return (y);
3362 }
3363
3364 static void
3365 eri_bb_force_idle(struct eri *erip)
3366 {
3367 int i;
3368
3369 for (i = 0; i < 33; i++) {
3370 SEND_BIT(1);
3371 }
3372 }
3373
3374 /* <<<<<<<<<<<<<<<<<<<<End of Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */
3375
3376
3377 /* <<<<<<<<<<<<< Frame Register used for MII operations >>>>>>>>>>>>>>>>>>>> */
3378
3379 #ifdef ERI_FRM_DEBUG
3380 int frame_flag = 0;
3381 #endif
3382
3383 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
3384 static uint32_t
3385 eri_mii_read(struct eri *erip, uint8_t regad, uint16_t *datap)
3386 {
3387 uint32_t frame;
3388 uint8_t phyad;
3389
3390 if (param_transceiver == NO_XCVR)
3391 return (1); /* No xcvr present */
3392
3393 if (!erip->frame_enable)
3394 return (eri_bb_mii_read(erip, regad, datap));
3395
3396 phyad = erip->phyad;
3397 #ifdef ERI_FRM_DEBUG
3398 if (!frame_flag) {
3399 eri_errror(erip->dip, "Frame Register used for MII");
3400 frame_flag = 1;
3401 }
3402 #endif
3403 ERI_DEBUG_MSG3(erip, FRM_MSG,
3404 "Frame Reg :mii_read: phyad = %X reg = %X ", phyad, regad);
3405
3406 PUT_MIFREG(mif_frame, ERI_MIF_FRREAD |
3407 (phyad << ERI_MIF_FRPHYAD_SHIFT) |
3408 (regad << ERI_MIF_FRREGAD_SHIFT));
3409 MIF_ERIDELAY(300, phyad, regad);
3410 frame = GET_MIFREG(mif_frame);
3411 if ((frame & ERI_MIF_FRTA0) == 0) {
3412 return (1);
3413 } else {
3414 *datap = (uint16_t)(frame & ERI_MIF_FRDATA);
3415 return (0);
3416 }
3417
3418 }
3419
3420 static void
3421 eri_mii_write(struct eri *erip, uint8_t regad, uint16_t data)
3422 {
3423 uint8_t phyad;
3424
3425 if (!erip->frame_enable) {
3426 eri_bb_mii_write(erip, regad, data);
3427 return;
3428 }
3429
3430 phyad = erip->phyad;
3431
3432 PUT_MIFREG(mif_frame, (ERI_MIF_FRWRITE |
3433 (phyad << ERI_MIF_FRPHYAD_SHIFT) |
3434 (regad << ERI_MIF_FRREGAD_SHIFT) | data));
3435 MIF_ERIDELAY(300, phyad, regad);
3436 (void) GET_MIFREG(mif_frame);
3437 }
3438
3439
3440 /* <<<<<<<<<<<<<<<<< PACKET TRANSMIT FUNCTIONS >>>>>>>>>>>>>>>>>>>> */
3441
3442 #define ERI_CROSS_PAGE_BOUNDRY(i, size, pagesize) \
3443 ((i & pagesize) != ((i + size) & pagesize))
3444
3445 /*
3446 * Send a single mblk. Returns B_TRUE if the packet is sent, or disposed of
3447 * by freemsg. Returns B_FALSE if the packet was not sent or queued, and
3448 * should be retried later (due to tx resource exhaustion.)
3449 */
3450 static boolean_t
3451 eri_send_msg(struct eri *erip, mblk_t *mp)
3452 {
3453 volatile struct eri_tmd *tmdp = NULL;
3454 volatile struct eri_tmd *tbasep = NULL;
3455 uint32_t len_msg = 0;
3456 uint32_t i;
3457 uint64_t int_me = 0;
3458 uint_t tmdcsum = 0;
3459 uint_t start_offset = 0;
3460 uint_t stuff_offset = 0;
3461 uint_t flags = 0;
3462
3463 caddr_t ptr;
3464 uint32_t offset;
3465 uint64_t ctrl;
3466 ddi_dma_cookie_t c;
3467
3468 if (!param_linkup) {
3469 freemsg(mp);
3470 HSTAT(erip, tnocar);
3471 HSTAT(erip, oerrors);
3472 return (B_TRUE);
3473 }
3474
3475 #ifdef ERI_HWCSUM
3476 mac_hcksum_get(mp, &start_offset, &stuff_offset, NULL, NULL, &flags);
3477
3478 if (flags & HCK_PARTIALCKSUM) {
3479 if (get_ether_type(mp->b_rptr) == ETHERTYPE_VLAN) {
3480 start_offset += ETHERHEADER_SIZE + 4;
3481 stuff_offset += ETHERHEADER_SIZE + 4;
3482 } else {
3483 start_offset += ETHERHEADER_SIZE;
3484 stuff_offset += ETHERHEADER_SIZE;
3485 }
3486 tmdcsum = ERI_TMD_CSENABL;
3487 }
3488 #endif /* ERI_HWCSUM */
3489
3490 if ((len_msg = msgsize(mp)) > ERI_BUFSIZE) {
3491 /*
3492 * This sholdn't ever occur, as GLD should not send us
3493 * packets that are too big.
3494 */
3495 HSTAT(erip, oerrors);
3496 freemsg(mp);
3497 return (B_TRUE);
3498 }
3499
3500 /*
3501 * update MIB II statistics
3502 */
3503 BUMP_OutNUcast(erip, mp->b_rptr);
3504
3505 mutex_enter(&erip->xmitlock);
3506
3507 tbasep = erip->eri_tmdp;
3508
3509 /* Check if there are enough descriptors for this packet */
3510 tmdp = erip->tnextp;
3511
3512 if (tmdp >= erip->tcurp) /* check notmds */
3513 i = tmdp - erip->tcurp;
3514 else
3515 i = tmdp + ERI_TPENDING - erip->tcurp;
3516
3517 if (i > (ERI_TPENDING - 4))
3518 goto notmds;
3519
3520 if (i >= (ERI_TPENDING >> 1) && !(erip->starts & 0x7)) {
3521 int_me = ERI_TMD_INTME;
3522
3523 if (!erip->tx_int_me) {
3524 PUT_GLOBREG(intmask, GET_GLOBREG(intmask) &
3525 ~(ERI_G_MASK_TX_INT_ME));
3526 erip->tx_int_me = 1;
3527 }
3528 }
3529
3530 i = tmdp - tbasep; /* index */
3531
3532 offset = (i * ERI_BUFSIZE);
3533 ptr = erip->tbuf_kaddr + offset;
3534
3535 mcopymsg(mp, ptr);
3536
3537 #ifdef ERI_HDX_BUG_WORKAROUND
3538 if ((param_mode) || (eri_hdx_pad_enable == 0)) {
3539 if (len_msg < ETHERMIN) {
3540 bzero((ptr + len_msg), (ETHERMIN - len_msg));
3541 len_msg = ETHERMIN;
3542 }
3543 } else {
3544 if (len_msg < 97) {
3545 bzero((ptr + len_msg), (97 - len_msg));
3546 len_msg = 97;
3547 }
3548 }
3549 #endif
3550 c.dmac_address = erip->tbuf_ioaddr + offset;
3551 (void) ddi_dma_sync(erip->tbuf_handle,
3552 (off_t)offset, len_msg, DDI_DMA_SYNC_FORDEV);
3553
3554 /* first and last (and only!) descr of packet */
3555 ctrl = ERI_TMD_SOP | ERI_TMD_EOP | int_me | tmdcsum |
3556 (start_offset << ERI_TMD_CSSTART_SHIFT) |
3557 (stuff_offset << ERI_TMD_CSSTUFF_SHIFT);
3558
3559 PUT_TMD(tmdp, c, len_msg, ctrl);
3560 ERI_SYNCIOPB(erip, tmdp, sizeof (struct eri_tmd),
3561 DDI_DMA_SYNC_FORDEV);
3562
3563 tmdp = NEXTTMD(erip, tmdp);
3564 erip->tx_cur_cnt++;
3565
3566 erip->tx_kick = tmdp - tbasep;
3567 PUT_ETXREG(tx_kick, erip->tx_kick);
3568 erip->tnextp = tmdp;
3569
3570 erip->starts++;
3571
3572 if (erip->tx_cur_cnt >= tx_interrupt_rate) {
3573 erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
3574 ETX_COMPLETION_MASK);
3575 (void) eri_reclaim(erip, erip->tx_completion);
3576 }
3577 mutex_exit(&erip->xmitlock);
3578
3579 return (B_TRUE);
3580
3581 notmds:
3582 HSTAT(erip, notmds);
3583 erip->wantw = B_TRUE;
3584
3585 mutex_exit(&erip->xmitlock);
3586
3587 return (B_FALSE);
3588 }
3589
3590 static mblk_t *
3591 eri_m_tx(void *arg, mblk_t *mp)
3592 {
3593 struct eri *erip = arg;
3594 mblk_t *next;
3595
3596 while (mp != NULL) {
3597 next = mp->b_next;
3598 mp->b_next = NULL;
3599 if (!eri_send_msg(erip, mp)) {
3600 mp->b_next = next;
3601 break;
3602 }
3603 mp = next;
3604 }
3605
3606 return (mp);
3607 }
3608
3609 /*
3610 * Transmit completion reclaiming.
3611 */
3612 static boolean_t
3613 eri_reclaim(struct eri *erip, uint32_t tx_completion)
3614 {
3615 volatile struct eri_tmd *tmdp;
3616 struct eri_tmd *tcomp;
3617 struct eri_tmd *tbasep;
3618 struct eri_tmd *tlimp;
3619 uint64_t flags;
3620 uint_t reclaimed = 0;
3621
3622 tbasep = erip->eri_tmdp;
3623 tlimp = erip->eri_tmdlimp;
3624
3625 tmdp = erip->tcurp;
3626 tcomp = tbasep + tx_completion; /* pointer to completion tmd */
3627
3628 /*
3629 * Loop through each TMD starting from tcurp and upto tcomp.
3630 */
3631 while (tmdp != tcomp) {
3632 flags = GET_TMD_FLAGS(tmdp);
3633 if (flags & (ERI_TMD_SOP))
3634 HSTAT(erip, opackets64);
3635
3636 HSTATN(erip, obytes64, (flags & ERI_TMD_BUFSIZE));
3637
3638 tmdp = NEXTTMDP(tbasep, tlimp, tmdp);
3639 reclaimed++;
3640 }
3641
3642 erip->tcurp = tmdp;
3643 erip->tx_cur_cnt -= reclaimed;
3644
3645 return (erip->wantw && reclaimed ? B_TRUE : B_FALSE);
3646 }
3647
3648
3649 /* <<<<<<<<<<<<<<<<<<< PACKET RECEIVE FUNCTIONS >>>>>>>>>>>>>>>>>>> */
3650 static mblk_t *
3651 eri_read_dma(struct eri *erip, volatile struct rmd *rmdp,
3652 int rmdi, uint64_t flags)
3653 {
3654 mblk_t *bp, *nbp;
3655 int len;
3656 uint_t ccnt;
3657 ddi_dma_cookie_t c;
3658 #ifdef ERI_RCV_CKSUM
3659 ushort_t sum;
3660 #endif /* ERI_RCV_CKSUM */
3661 mblk_t *retmp = NULL;
3662
3663 bp = erip->rmblkp[rmdi];
3664 len = (flags & ERI_RMD_BUFSIZE) >> ERI_RMD_BUFSIZE_SHIFT;
3665 #ifdef ERI_DONT_STRIP_CRC
3666 len -= 4;
3667 #endif
3668 /*
3669 * In the event of RX FIFO overflow error, ERI REV 1.0 ASIC can
3670 * corrupt packets following the descriptor corresponding the
3671 * overflow. To detect the corrupted packets, we disable the
3672 * dropping of the "bad" packets at the MAC. The descriptor
3673 * then would have the "BAD" bit set. We drop the overflowing
3674 * packet and the packet following it. We could have done some sort
3675 * of checking to determine if the second packet was indeed bad
3676 * (using CRC or checksum) but it would be expensive in this
3677 * routine, since it is run in interrupt context.
3678 */
3679 if ((flags & ERI_RMD_BAD) || (len < ETHERMIN) || (len > ETHERMAX+4)) {
3680
3681 HSTAT(erip, rx_bad_pkts);
3682 if ((flags & ERI_RMD_BAD) == 0)
3683 HSTAT(erip, ierrors);
3684 if (len < ETHERMIN) {
3685 HSTAT(erip, rx_runt);
3686 } else if (len > ETHERMAX+4) {
3687 HSTAT(erip, rx_toolong_pkts);
3688 }
3689 HSTAT(erip, drop);
3690 UPDATE_RMD(rmdp);
3691
3692 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3693 DDI_DMA_SYNC_FORDEV);
3694 return (NULL);
3695 }
3696 #ifdef ERI_DONT_STRIP_CRC
3697 {
3698 uint32_t hw_fcs, tail_fcs;
3699 /*
3700 * since we don't let the hardware strip the CRC in hdx
3701 * then the driver needs to do it.
3702 * this is to workaround a hardware bug
3703 */
3704 bp->b_wptr = bp->b_rptr + ERI_FSTBYTE_OFFSET + len;
3705 /*
3706 * Get the Checksum calculated by the hardware.
3707 */
3708 hw_fcs = flags & ERI_RMD_CKSUM;
3709 /*
3710 * Catch the case when the CRC starts on an odd
3711 * boundary.
3712 */
3713 tail_fcs = bp->b_wptr[0] << 8 | bp->b_wptr[1];
3714 tail_fcs += bp->b_wptr[2] << 8 | bp->b_wptr[3];
3715 tail_fcs = (tail_fcs & 0xffff) + (tail_fcs >> 16);
3716 if ((uintptr_t)(bp->b_wptr) & 1) {
3717 tail_fcs = (tail_fcs << 8) & 0xffff | (tail_fcs >> 8);
3718 }
3719 hw_fcs += tail_fcs;
3720 hw_fcs = (hw_fcs & 0xffff) + (hw_fcs >> 16);
3721 hw_fcs &= 0xffff;
3722 /*
3723 * Now we can replace what the hardware wrote, make believe
3724 * it got it right in the first place.
3725 */
3726 flags = (flags & ~(uint64_t)ERI_RMD_CKSUM) | hw_fcs;
3727 }
3728 #endif
3729 /*
3730 * Packet Processing
3731 * Once we get a packet bp, we try allocate a new mblk, nbp
3732 * to replace this one. If we succeed, we map it to the current
3733 * dma handle and update the descriptor with the new cookie. We
3734 * then put bp in our read service queue erip->ipq, if it exists
3735 * or we just bp to the streams expecting it.
3736 * If allocation of the new mblk fails, we implicitly drop the
3737 * current packet, i.e do not pass up the mblk and re-use it.
3738 * Re-mapping is not required.
3739 */
3740
3741 if (len < eri_rx_bcopy_max) {
3742 if ((nbp = eri_allocb_sp(len + ERI_FSTBYTE_OFFSET))) {
3743 (void) ddi_dma_sync(erip->ndmarh[rmdi], 0,
3744 len + ERI_FSTBYTE_OFFSET, DDI_DMA_SYNC_FORCPU);
3745 DB_TYPE(nbp) = M_DATA;
3746 bcopy(bp->b_rptr, nbp->b_rptr,
3747 len + ERI_FSTBYTE_OFFSET);
3748 UPDATE_RMD(rmdp);
3749 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3750 DDI_DMA_SYNC_FORDEV);
3751
3752 /* Add the First Byte offset to the b_rptr */
3753 nbp->b_rptr += ERI_FSTBYTE_OFFSET;
3754 nbp->b_wptr = nbp->b_rptr + len;
3755
3756 #ifdef ERI_RCV_CKSUM
3757 sum = ~(uint16_t)(flags & ERI_RMD_CKSUM);
3758 ERI_PROCESS_READ(erip, nbp, sum);
3759 #else
3760 ERI_PROCESS_READ(erip, nbp);
3761 #endif
3762 retmp = nbp;
3763 } else {
3764
3765 /*
3766 * mblk allocation has failed. Re-use the old mblk for
3767 * the next packet. Re-mapping is not required since
3768 * the same mblk and dma cookie is to be used again.
3769 */
3770 HSTAT(erip, ierrors);
3771 HSTAT(erip, allocbfail);
3772 HSTAT(erip, norcvbuf);
3773
3774 UPDATE_RMD(rmdp);
3775 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3776 DDI_DMA_SYNC_FORDEV);
3777 ERI_DEBUG_MSG1(erip, RESOURCE_MSG, "allocb fail");
3778 }
3779 } else {
3780 /* Use dma unmap/map */
3781 if ((nbp = eri_allocb_sp(ERI_BUFSIZE))) {
3782 /*
3783 * How do we harden this, specially if unbind
3784 * succeeds and then bind fails?
3785 * If Unbind fails, we can leave without updating
3786 * the descriptor but would it continue to work on
3787 * next round?
3788 */
3789 (void) ddi_dma_unbind_handle(erip->ndmarh[rmdi]);
3790 (void) ddi_dma_addr_bind_handle(erip->ndmarh[rmdi],
3791 NULL, (caddr_t)nbp->b_rptr, ERI_BUFSIZE,
3792 DDI_DMA_READ | DDI_DMA_CONSISTENT,
3793 DDI_DMA_DONTWAIT, 0, &c, &ccnt);
3794
3795 erip->rmblkp[rmdi] = nbp;
3796 PUT_RMD(rmdp, c);
3797 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3798 DDI_DMA_SYNC_FORDEV);
3799
3800 /* Add the First Byte offset to the b_rptr */
3801
3802 bp->b_rptr += ERI_FSTBYTE_OFFSET;
3803 bp->b_wptr = bp->b_rptr + len;
3804
3805 #ifdef ERI_RCV_CKSUM
3806 sum = ~(uint16_t)(flags & ERI_RMD_CKSUM);
3807 ERI_PROCESS_READ(erip, bp, sum);
3808 #else
3809 ERI_PROCESS_READ(erip, bp);
3810 #endif
3811 retmp = bp;
3812 } else {
3813
3814 /*
3815 * mblk allocation has failed. Re-use the old mblk for
3816 * the next packet. Re-mapping is not required since
3817 * the same mblk and dma cookie is to be used again.
3818 */
3819 HSTAT(erip, ierrors);
3820 HSTAT(erip, allocbfail);
3821 HSTAT(erip, norcvbuf);
3822
3823 UPDATE_RMD(rmdp);
3824 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3825 DDI_DMA_SYNC_FORDEV);
3826 ERI_DEBUG_MSG1(erip, RESOURCE_MSG, "allocb fail");
3827 }
3828 }
3829
3830 return (retmp);
3831 }
3832
3833 #define LINK_STAT_DISPLAY_TIME 20
3834
3835 static int
3836 eri_init_xfer_params(struct eri *erip)
3837 {
3838 int i;
3839 dev_info_t *dip;
3840
3841 dip = erip->dip;
3842
3843 for (i = 0; i < A_CNT(param_arr); i++)
3844 erip->param_arr[i] = param_arr[i];
3845
3846 erip->xmit_dma_mode = 0;
3847 erip->rcv_dma_mode = 0;
3848 erip->mifpoll_enable = mifpoll_enable;
3849 erip->lance_mode_enable = lance_mode;
3850 erip->frame_enable = 1;
3851 erip->ngu_enable = ngu_enable;
3852
3853 if (!erip->g_nd && !eri_param_register(erip,
3854 erip->param_arr, A_CNT(param_arr))) {
3855 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
3856 param_reg_fail_msg);
3857 return (-1);
3858 }
3859
3860 /*
3861 * Set up the start-up values for user-configurable parameters
3862 * Get the values from the global variables first.
3863 * Use the MASK to limit the value to allowed maximum.
3864 */
3865
3866 param_transceiver = NO_XCVR;
3867
3868 /*
3869 * The link speed may be forced to either 10 Mbps or 100 Mbps using the
3870 * property "transfer-speed". This may be done in OBP by using the command
3871 * "apply transfer-speed=<speed> <device>". The speed may be either 10 or 100.
3872 */
3873 i = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "transfer-speed", 0);
3874 if (i != 0) {
3875 param_autoneg = 0; /* force speed */
3876 param_anar_100T4 = 0;
3877 param_anar_10fdx = 0;
3878 param_anar_10hdx = 0;
3879 param_anar_100fdx = 0;
3880 param_anar_100hdx = 0;
3881 param_anar_asm_dir = 0;
3882 param_anar_pause = 0;
3883
3884 if (i == 10)
3885 param_anar_10hdx = 1;
3886 else if (i == 100)
3887 param_anar_100hdx = 1;
3888 }
3889
3890 /*
3891 * Get the parameter values configured in .conf file.
3892 */
3893 param_ipg1 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "ipg1", ipg1) &
3894 ERI_MASK_8BIT;
3895
3896 param_ipg2 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "ipg2", ipg2) &
3897 ERI_MASK_8BIT;
3898
3899 param_use_intphy = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3900 "use_int_xcvr", use_int_xcvr) & ERI_MASK_1BIT;
3901
3902 param_use_intphy = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3903 "pace_size", pace_size) & ERI_MASK_8BIT;
3904
3905 param_autoneg = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3906 "adv_autoneg_cap", adv_autoneg_cap) & ERI_MASK_1BIT;
3907
3908 param_autoneg = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3909 "adv_autoneg_cap", adv_autoneg_cap) & ERI_MASK_1BIT;
3910
3911 param_anar_100T4 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3912 "adv_100T4_cap", adv_100T4_cap) & ERI_MASK_1BIT;
3913
3914 param_anar_100fdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3915 "adv_100fdx_cap", adv_100fdx_cap) & ERI_MASK_1BIT;
3916
3917 param_anar_100hdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3918 "adv_100hdx_cap", adv_100hdx_cap) & ERI_MASK_1BIT;
3919
3920 param_anar_10fdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3921 "adv_10fdx_cap", adv_10fdx_cap) & ERI_MASK_1BIT;
3922
3923 param_anar_10hdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3924 "adv_10hdx_cap", adv_10hdx_cap) & ERI_MASK_1BIT;
3925
3926 param_ipg0 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "ipg0", ipg0) &
3927 ERI_MASK_8BIT;
3928
3929 param_intr_blank_time = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3930 "intr_blank_time", intr_blank_time) & ERI_MASK_8BIT;
3931
3932 param_intr_blank_packets = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3933 "intr_blank_packets", intr_blank_packets) & ERI_MASK_8BIT;
3934
3935 param_lance_mode = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3936 "lance_mode", lance_mode) & ERI_MASK_1BIT;
3937
3938 param_select_link = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3939 "select_link", select_link) & ERI_MASK_1BIT;
3940
3941 param_default_link = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3942 "default_link", default_link) & ERI_MASK_1BIT;
3943
3944 param_anar_asm_dir = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3945 "adv_asm_dir_cap", adv_pauseTX_cap) & ERI_MASK_1BIT;
3946
3947 param_anar_pause = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3948 "adv_pause_cap", adv_pauseRX_cap) & ERI_MASK_1BIT;
3949
3950 if (link_pulse_disabled)
3951 erip->link_pulse_disabled = 1;
3952 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, 0, "link-pulse-disabled"))
3953 erip->link_pulse_disabled = 1;
3954
3955 eri_statinit(erip);
3956 return (0);
3957
3958 }
3959
3960 static void
3961 eri_process_ndd_ioctl(struct eri *erip, queue_t *wq, mblk_t *mp, int cmd)
3962 {
3963
3964 uint32_t old_ipg1, old_ipg2, old_use_int_xcvr, old_autoneg;
3965 uint32_t old_100T4;
3966 uint32_t old_100fdx, old_100hdx, old_10fdx, old_10hdx;
3967 uint32_t old_ipg0, old_lance_mode;
3968 uint32_t old_intr_blank_time, old_intr_blank_packets;
3969 uint32_t old_asm_dir, old_pause;
3970 uint32_t old_select_link, old_default_link;
3971
3972 switch (cmd) {
3973 case ERI_ND_GET:
3974
3975 old_autoneg = param_autoneg;
3976 old_100T4 = param_anar_100T4;
3977 old_100fdx = param_anar_100fdx;
3978 old_100hdx = param_anar_100hdx;
3979 old_10fdx = param_anar_10fdx;
3980 old_10hdx = param_anar_10hdx;
3981 old_asm_dir = param_anar_asm_dir;
3982 old_pause = param_anar_pause;
3983
3984 param_autoneg = old_autoneg & ~ERI_NOTUSR;
3985 param_anar_100T4 = old_100T4 & ~ERI_NOTUSR;
3986 param_anar_100fdx = old_100fdx & ~ERI_NOTUSR;
3987 param_anar_100hdx = old_100hdx & ~ERI_NOTUSR;
3988 param_anar_10fdx = old_10fdx & ~ERI_NOTUSR;
3989 param_anar_10hdx = old_10hdx & ~ERI_NOTUSR;
3990 param_anar_asm_dir = old_asm_dir & ~ERI_NOTUSR;
3991 param_anar_pause = old_pause & ~ERI_NOTUSR;
3992
3993 if (!eri_nd_getset(wq, erip->g_nd, mp)) {
3994 param_autoneg = old_autoneg;
3995 param_anar_100T4 = old_100T4;
3996 param_anar_100fdx = old_100fdx;
3997 param_anar_100hdx = old_100hdx;
3998 param_anar_10fdx = old_10fdx;
3999 param_anar_10hdx = old_10hdx;
4000 param_anar_asm_dir = old_asm_dir;
4001 param_anar_pause = old_pause;
4002 miocnak(wq, mp, 0, EINVAL);
4003 return;
4004 }
4005 param_autoneg = old_autoneg;
4006 param_anar_100T4 = old_100T4;
4007 param_anar_100fdx = old_100fdx;
4008 param_anar_100hdx = old_100hdx;
4009 param_anar_10fdx = old_10fdx;
4010 param_anar_10hdx = old_10hdx;
4011 param_anar_asm_dir = old_asm_dir;
4012 param_anar_pause = old_pause;
4013
4014 qreply(wq, mp);
4015 break;
4016
4017 case ERI_ND_SET:
4018 old_ipg0 = param_ipg0;
4019 old_intr_blank_time = param_intr_blank_time;
4020 old_intr_blank_packets = param_intr_blank_packets;
4021 old_lance_mode = param_lance_mode;
4022 old_ipg1 = param_ipg1;
4023 old_ipg2 = param_ipg2;
4024 old_use_int_xcvr = param_use_intphy;
4025 old_autoneg = param_autoneg;
4026 old_100T4 = param_anar_100T4;
4027 old_100fdx = param_anar_100fdx;
4028 old_100hdx = param_anar_100hdx;
4029 old_10fdx = param_anar_10fdx;
4030 old_10hdx = param_anar_10hdx;
4031 param_autoneg = 0xff;
4032 old_asm_dir = param_anar_asm_dir;
4033 param_anar_asm_dir = 0xff;
4034 old_pause = param_anar_pause;
4035 param_anar_pause = 0xff;
4036 old_select_link = param_select_link;
4037 old_default_link = param_default_link;
4038
4039 if (!eri_nd_getset(wq, erip->g_nd, mp)) {
4040 param_autoneg = old_autoneg;
4041 miocnak(wq, mp, 0, EINVAL);
4042 return;
4043 }
4044
4045 qreply(wq, mp);
4046
4047 if (param_autoneg != 0xff) {
4048 ERI_DEBUG_MSG2(erip, NDD_MSG,
4049 "ndd_ioctl: new param_autoneg %d", param_autoneg);
4050 param_linkup = 0;
4051 erip->stats.link_up = LINK_STATE_DOWN;
4052 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
4053 (void) eri_init(erip);
4054 } else {
4055 param_autoneg = old_autoneg;
4056 if ((old_use_int_xcvr != param_use_intphy) ||
4057 (old_default_link != param_default_link) ||
4058 (old_select_link != param_select_link)) {
4059 param_linkup = 0;
4060 erip->stats.link_up = LINK_STATE_DOWN;
4061 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
4062 (void) eri_init(erip);
4063 } else if ((old_ipg1 != param_ipg1) ||
4064 (old_ipg2 != param_ipg2) ||
4065 (old_ipg0 != param_ipg0) ||
4066 (old_intr_blank_time != param_intr_blank_time) ||
4067 (old_intr_blank_packets !=
4068 param_intr_blank_packets) ||
4069 (old_lance_mode != param_lance_mode)) {
4070 param_linkup = 0;
4071 erip->stats.link_up = LINK_STATE_DOWN;
4072 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
4073 (void) eri_init(erip);
4074 }
4075 }
4076 break;
4077 }
4078 }
4079
4080
4081 static int
4082 eri_stat_kstat_update(kstat_t *ksp, int rw)
4083 {
4084 struct eri *erip;
4085 struct erikstat *erikp;
4086 struct stats *esp;
4087 boolean_t macupdate = B_FALSE;
4088
4089 erip = (struct eri *)ksp->ks_private;
4090 erikp = (struct erikstat *)ksp->ks_data;
4091
4092 if (rw != KSTAT_READ)
4093 return (EACCES);
4094 /*
4095 * Update all the stats by reading all the counter registers.
4096 * Counter register stats are not updated till they overflow
4097 * and interrupt.
4098 */
4099
4100 mutex_enter(&erip->xmitlock);
4101 if ((erip->flags & ERI_RUNNING) && (erip->flags & ERI_TXINIT)) {
4102 erip->tx_completion =
4103 GET_ETXREG(tx_completion) & ETX_COMPLETION_MASK;
4104 macupdate |= eri_reclaim(erip, erip->tx_completion);
4105 }
4106 mutex_exit(&erip->xmitlock);
4107 if (macupdate)
4108 mac_tx_update(erip->mh);
4109
4110 eri_savecntrs(erip);
4111
4112 esp = &erip->stats;
4113
4114 erikp->erik_txmac_maxpkt_err.value.ul = esp->txmac_maxpkt_err;
4115 erikp->erik_defer_timer_exp.value.ul = esp->defer_timer_exp;
4116 erikp->erik_peak_attempt_cnt.value.ul = esp->peak_attempt_cnt;
4117 erikp->erik_tx_hang.value.ul = esp->tx_hang;
4118
4119 erikp->erik_no_free_rx_desc.value.ul = esp->no_free_rx_desc;
4120
4121 erikp->erik_rx_hang.value.ul = esp->rx_hang;
4122 erikp->erik_rx_length_err.value.ul = esp->rx_length_err;
4123 erikp->erik_rx_code_viol_err.value.ul = esp->rx_code_viol_err;
4124 erikp->erik_pause_rxcount.value.ul = esp->pause_rxcount;
4125 erikp->erik_pause_oncount.value.ul = esp->pause_oncount;
4126 erikp->erik_pause_offcount.value.ul = esp->pause_offcount;
4127 erikp->erik_pause_time_count.value.ul = esp->pause_time_count;
4128
4129 erikp->erik_inits.value.ul = esp->inits;
4130 erikp->erik_jab.value.ul = esp->jab;
4131 erikp->erik_notmds.value.ul = esp->notmds;
4132 erikp->erik_allocbfail.value.ul = esp->allocbfail;
4133 erikp->erik_drop.value.ul = esp->drop;
4134 erikp->erik_rx_bad_pkts.value.ul = esp->rx_bad_pkts;
4135 erikp->erik_rx_inits.value.ul = esp->rx_inits;
4136 erikp->erik_tx_inits.value.ul = esp->tx_inits;
4137 erikp->erik_rxtag_err.value.ul = esp->rxtag_err;
4138 erikp->erik_parity_error.value.ul = esp->parity_error;
4139 erikp->erik_pci_error_int.value.ul = esp->pci_error_int;
4140 erikp->erik_unknown_fatal.value.ul = esp->unknown_fatal;
4141 erikp->erik_pci_data_parity_err.value.ul = esp->pci_data_parity_err;
4142 erikp->erik_pci_signal_target_abort.value.ul =
4143 esp->pci_signal_target_abort;
4144 erikp->erik_pci_rcvd_target_abort.value.ul =
4145 esp->pci_rcvd_target_abort;
4146 erikp->erik_pci_rcvd_master_abort.value.ul =
4147 esp->pci_rcvd_master_abort;
4148 erikp->erik_pci_signal_system_err.value.ul =
4149 esp->pci_signal_system_err;
4150 erikp->erik_pci_det_parity_err.value.ul = esp->pci_det_parity_err;
4151
4152 erikp->erik_pmcap.value.ul = esp->pmcap;
4153
4154 return (0);
4155 }
4156
4157 static void
4158 eri_statinit(struct eri *erip)
4159 {
4160 struct kstat *ksp;
4161 struct erikstat *erikp;
4162
4163 if ((ksp = kstat_create("eri", erip->instance, "driver_info", "net",
4164 KSTAT_TYPE_NAMED,
4165 sizeof (struct erikstat) / sizeof (kstat_named_t), 0)) == NULL) {
4166 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
4167 kstat_create_fail_msg);
4168 return;
4169 }
4170
4171 erip->ksp = ksp;
4172 erikp = (struct erikstat *)(ksp->ks_data);
4173 /*
4174 * MIB II kstat variables
4175 */
4176
4177 kstat_named_init(&erikp->erik_inits, "inits", KSTAT_DATA_ULONG);
4178
4179 kstat_named_init(&erikp->erik_txmac_maxpkt_err, "txmac_maxpkt_err",
4180 KSTAT_DATA_ULONG);
4181 kstat_named_init(&erikp->erik_defer_timer_exp, "defer_timer_exp",
4182 KSTAT_DATA_ULONG);
4183 kstat_named_init(&erikp->erik_peak_attempt_cnt, "peak_attempt_cnt",
4184 KSTAT_DATA_ULONG);
4185 kstat_named_init(&erikp->erik_tx_hang, "tx_hang", KSTAT_DATA_ULONG);
4186
4187 kstat_named_init(&erikp->erik_no_free_rx_desc, "no_free_rx_desc",
4188 KSTAT_DATA_ULONG);
4189 kstat_named_init(&erikp->erik_rx_hang, "rx_hang", KSTAT_DATA_ULONG);
4190 kstat_named_init(&erikp->erik_rx_length_err, "rx_length_err",
4191 KSTAT_DATA_ULONG);
4192 kstat_named_init(&erikp->erik_rx_code_viol_err, "rx_code_viol_err",
4193 KSTAT_DATA_ULONG);
4194
4195 kstat_named_init(&erikp->erik_pause_rxcount, "pause_rcv_cnt",
4196 KSTAT_DATA_ULONG);
4197
4198 kstat_named_init(&erikp->erik_pause_oncount, "pause_on_cnt",
4199 KSTAT_DATA_ULONG);
4200
4201 kstat_named_init(&erikp->erik_pause_offcount, "pause_off_cnt",
4202 KSTAT_DATA_ULONG);
4203 kstat_named_init(&erikp->erik_pause_time_count, "pause_time_cnt",
4204 KSTAT_DATA_ULONG);
4205
4206 kstat_named_init(&erikp->erik_jab, "jabber", KSTAT_DATA_ULONG);
4207 kstat_named_init(&erikp->erik_notmds, "no_tmds", KSTAT_DATA_ULONG);
4208 kstat_named_init(&erikp->erik_allocbfail, "allocbfail",
4209 KSTAT_DATA_ULONG);
4210
4211 kstat_named_init(&erikp->erik_drop, "drop", KSTAT_DATA_ULONG);
4212
4213 kstat_named_init(&erikp->erik_rx_bad_pkts, "bad_pkts",
4214 KSTAT_DATA_ULONG);
4215
4216 kstat_named_init(&erikp->erik_rx_inits, "rx_inits", KSTAT_DATA_ULONG);
4217
4218 kstat_named_init(&erikp->erik_tx_inits, "tx_inits", KSTAT_DATA_ULONG);
4219
4220 kstat_named_init(&erikp->erik_rxtag_err, "rxtag_error",
4221 KSTAT_DATA_ULONG);
4222
4223 kstat_named_init(&erikp->erik_parity_error, "parity_error",
4224 KSTAT_DATA_ULONG);
4225
4226 kstat_named_init(&erikp->erik_pci_error_int, "pci_error_interrupt",
4227 KSTAT_DATA_ULONG);
4228 kstat_named_init(&erikp->erik_unknown_fatal, "unknown_fatal",
4229 KSTAT_DATA_ULONG);
4230 kstat_named_init(&erikp->erik_pci_data_parity_err,
4231 "pci_data_parity_err", KSTAT_DATA_ULONG);
4232 kstat_named_init(&erikp->erik_pci_signal_target_abort,
4233 "pci_signal_target_abort", KSTAT_DATA_ULONG);
4234 kstat_named_init(&erikp->erik_pci_rcvd_target_abort,
4235 "pci_rcvd_target_abort", KSTAT_DATA_ULONG);
4236 kstat_named_init(&erikp->erik_pci_rcvd_master_abort,
4237 "pci_rcvd_master_abort", KSTAT_DATA_ULONG);
4238 kstat_named_init(&erikp->erik_pci_signal_system_err,
4239 "pci_signal_system_err", KSTAT_DATA_ULONG);
4240 kstat_named_init(&erikp->erik_pci_det_parity_err,
4241 "pci_det_parity_err", KSTAT_DATA_ULONG);
4242
4243 kstat_named_init(&erikp->erik_pmcap, "pmcap", KSTAT_DATA_ULONG);
4244
4245
4246 ksp->ks_update = eri_stat_kstat_update;
4247 ksp->ks_private = (void *) erip;
4248 kstat_install(ksp);
4249 }
4250
4251
4252 /* <<<<<<<<<<<<<<<<<<<<<<< NDD SUPPORT FUNCTIONS >>>>>>>>>>>>>>>>>>> */
4253 /*
4254 * ndd support functions to get/set parameters
4255 */
4256 /* Free the Named Dispatch Table by calling eri_nd_free */
4257 static void
4258 eri_param_cleanup(struct eri *erip)
4259 {
4260 if (erip->g_nd)
4261 (void) eri_nd_free(&erip->g_nd);
4262 }
4263
4264 /*
4265 * Extracts the value from the eri parameter array and prints the
4266 * parameter value. cp points to the required parameter.
4267 */
4268 /* ARGSUSED */
4269 static int
4270 eri_param_get(queue_t *q, mblk_t *mp, caddr_t cp)
4271 {
4272 param_t *eripa = (void *)cp;
4273 int param_len = 1;
4274 uint32_t param_val;
4275 mblk_t *nmp;
4276 int ok;
4277
4278 param_val = eripa->param_val;
4279 /*
4280 * Calculate space required in mblk.
4281 * Remember to include NULL terminator.
4282 */
4283 do {
4284 param_len++;
4285 param_val /= 10;
4286 } while (param_val);
4287
4288 ok = eri_mk_mblk_tail_space(mp, &nmp, param_len);
4289 if (ok == 0) {
4290 (void) sprintf((char *)nmp->b_wptr, "%d", eripa->param_val);
4291 nmp->b_wptr += param_len;
4292 }
4293
4294 return (ok);
4295 }
4296
4297 /*
4298 * Check if there is space for p_val at the end if mblk.
4299 * If not, allocate new 1k mblk.
4300 */
4301 static int
4302 eri_mk_mblk_tail_space(mblk_t *mp, mblk_t **nmp, size_t sz)
4303 {
4304 mblk_t *tmp = mp;
4305
4306 while (tmp->b_cont)
4307 tmp = tmp->b_cont;
4308
4309 if (MBLKTAIL(tmp) < sz) {
4310 if ((tmp->b_cont = allocb(1024, BPRI_HI)) == NULL)
4311 return (ENOMEM);
4312 tmp = tmp->b_cont;
4313 }
4314 *nmp = tmp;
4315 return (0);
4316 }
4317
4318 /*
4319 * Register each element of the parameter array with the
4320 * named dispatch handler. Each element is loaded using
4321 * eri_nd_load()
4322 */
4323 static int
4324 eri_param_register(struct eri *erip, param_t *eripa, int cnt)
4325 {
4326 /* cnt gives the count of the number of */
4327 /* elements present in the parameter array */
4328
4329 int i;
4330
4331 for (i = 0; i < cnt; i++, eripa++) {
4332 pfi_t setter = (pfi_t)eri_param_set;
4333
4334 switch (eripa->param_name[0]) {
4335 case '+': /* read-write */
4336 setter = (pfi_t)eri_param_set;
4337 break;
4338
4339 case '-': /* read-only */
4340 setter = NULL;
4341 break;
4342
4343 case '!': /* read-only, not displayed */
4344 case '%': /* read-write, not displayed */
4345 continue;
4346 }
4347
4348 if (!eri_nd_load(&erip->g_nd, eripa->param_name + 1,
4349 (pfi_t)eri_param_get, setter, (caddr_t)eripa)) {
4350 (void) eri_nd_free(&erip->g_nd);
4351 return (B_FALSE);
4352 }
4353 }
4354
4355 return (B_TRUE);
4356 }
4357
4358 /*
4359 * Sets the eri parameter to the value in the param_register using
4360 * eri_nd_load().
4361 */
4362 /* ARGSUSED */
4363 static int
4364 eri_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp)
4365 {
4366 char *end;
4367 long new_value;
4368 param_t *eripa = (void *)cp;
4369
4370 if (ddi_strtol(value, &end, 10, &new_value) != 0)
4371 return (EINVAL);
4372 if (end == value || new_value < eripa->param_min ||
4373 new_value > eripa->param_max) {
4374 return (EINVAL);
4375 }
4376 eripa->param_val = (uint32_t)new_value;
4377 return (0);
4378
4379 }
4380
4381 /* Free the table pointed to by 'ndp' */
4382 static void
4383 eri_nd_free(caddr_t *nd_pparam)
4384 {
4385 ND *nd;
4386
4387 if ((nd = (void *)(*nd_pparam)) != NULL) {
4388 if (nd->nd_tbl)
4389 kmem_free(nd->nd_tbl, nd->nd_size);
4390 kmem_free(nd, sizeof (ND));
4391 *nd_pparam = NULL;
4392 }
4393 }
4394
4395 static int
4396 eri_nd_getset(queue_t *q, caddr_t nd_param, MBLKP mp)
4397 {
4398 int err;
4399 IOCP iocp;
4400 MBLKP mp1;
4401 ND *nd;
4402 NDE *nde;
4403 char *valp;
4404 size_t avail;
4405 mblk_t *nmp;
4406
4407 if (!nd_param)
4408 return (B_FALSE);
4409
4410 nd = (void *)nd_param;
4411 iocp = (void *)mp->b_rptr;
4412 if ((iocp->ioc_count == 0) || !(mp1 = mp->b_cont)) {
4413 mp->b_datap->db_type = M_IOCACK;
4414 iocp->ioc_count = 0;
4415 iocp->ioc_error = EINVAL;
4416 return (B_TRUE);
4417 }
4418 /*
4419 * NOTE - logic throughout nd_xxx assumes single data block for ioctl.
4420 * However, existing code sends in some big buffers.
4421 */
4422 avail = iocp->ioc_count;
4423 if (mp1->b_cont) {
4424 freemsg(mp1->b_cont);
4425 mp1->b_cont = NULL;
4426 }
4427
4428 mp1->b_datap->db_lim[-1] = '\0'; /* Force null termination */
4429 valp = (char *)mp1->b_rptr;
4430
4431 for (nde = nd->nd_tbl; /* */; nde++) {
4432 if (!nde->nde_name)
4433 return (B_FALSE);
4434 if (strcmp(nde->nde_name, valp) == 0)
4435 break;
4436 }
4437 err = EINVAL;
4438
4439 while (*valp++)
4440 ;
4441
4442 if (!*valp || valp >= (char *)mp1->b_wptr)
4443 valp = NULL;
4444
4445 switch (iocp->ioc_cmd) {
4446 case ND_GET:
4447 /*
4448 * (XXX) hack: "*valp" is size of user buffer for copyout. If result
4449 * of action routine is too big, free excess and return ioc_rval as buf
4450 * size needed. Return as many mblocks as will fit, free the rest. For
4451 * backward compatibility, assume size of orig ioctl buffer if "*valp"
4452 * bad or not given.
4453 */
4454 if (valp)
4455 (void) ddi_strtol(valp, NULL, 10, (long *)&avail);
4456 /* We overwrite the name/value with the reply data */
4457 {
4458 mblk_t *mp2 = mp1;
4459
4460 while (mp2) {
4461 mp2->b_wptr = mp2->b_rptr;
4462 mp2 = mp2->b_cont;
4463 }
4464 }
4465 err = (*nde->nde_get_pfi)(q, mp1, nde->nde_data, iocp->ioc_cr);
4466 if (!err) {
4467 size_t size_out;
4468 ssize_t excess;
4469
4470 iocp->ioc_rval = 0;
4471
4472 /* Tack on the null */
4473 err = eri_mk_mblk_tail_space(mp1, &nmp, 1);
4474 if (!err) {
4475 *nmp->b_wptr++ = '\0';
4476 size_out = msgdsize(mp1);
4477 excess = size_out - avail;
4478 if (excess > 0) {
4479 iocp->ioc_rval = (unsigned)size_out;
4480 size_out -= excess;
4481 (void) adjmsg(mp1, -(excess + 1));
4482 err = eri_mk_mblk_tail_space(mp1,
4483 &nmp, 1);
4484 if (!err)
4485 *nmp->b_wptr++ = '\0';
4486 else
4487 size_out = 0;
4488 }
4489
4490 } else
4491 size_out = 0;
4492
4493 iocp->ioc_count = size_out;
4494 }
4495 break;
4496
4497 case ND_SET:
4498 if (valp) {
4499 err = (*nde->nde_set_pfi)(q, mp1, valp,
4500 nde->nde_data, iocp->ioc_cr);
4501 iocp->ioc_count = 0;
4502 freemsg(mp1);
4503 mp->b_cont = NULL;
4504 }
4505 break;
4506 }
4507
4508 iocp->ioc_error = err;
4509 mp->b_datap->db_type = M_IOCACK;
4510 return (B_TRUE);
4511 }
4512
4513 /*
4514 * Load 'name' into the named dispatch table pointed to by 'ndp'.
4515 * 'ndp' should be the address of a char pointer cell. If the table
4516 * does not exist (*ndp == 0), a new table is allocated and 'ndp'
4517 * is stuffed. If there is not enough space in the table for a new
4518 * entry, more space is allocated.
4519 */
4520 static boolean_t
4521 eri_nd_load(caddr_t *nd_pparam, char *name, pfi_t get_pfi,
4522 pfi_t set_pfi, caddr_t data)
4523 {
4524 ND *nd;
4525 NDE *nde;
4526
4527 if (!nd_pparam)
4528 return (B_FALSE);
4529
4530 if ((nd = (void *)(*nd_pparam)) == NULL) {
4531 if ((nd = (ND *)kmem_zalloc(sizeof (ND), KM_NOSLEEP))
4532 == NULL)
4533 return (B_FALSE);
4534 *nd_pparam = (caddr_t)nd;
4535 }
4536 if (nd->nd_tbl) {
4537 for (nde = nd->nd_tbl; nde->nde_name; nde++) {
4538 if (strcmp(name, nde->nde_name) == 0)
4539 goto fill_it;
4540 }
4541 }
4542 if (nd->nd_free_count <= 1) {
4543 if ((nde = (NDE *)kmem_zalloc(nd->nd_size +
4544 NDE_ALLOC_SIZE, KM_NOSLEEP)) == NULL)
4545 return (B_FALSE);
4546
4547 nd->nd_free_count += NDE_ALLOC_COUNT;
4548 if (nd->nd_tbl) {
4549 bcopy((char *)nd->nd_tbl, (char *)nde, nd->nd_size);
4550 kmem_free((char *)nd->nd_tbl, nd->nd_size);
4551 } else {
4552 nd->nd_free_count--;
4553 nde->nde_name = "?";
4554 nde->nde_get_pfi = nd_get_names;
4555 nde->nde_set_pfi = nd_set_default;
4556 }
4557 nde->nde_data = (caddr_t)nd;
4558 nd->nd_tbl = nde;
4559 nd->nd_size += NDE_ALLOC_SIZE;
4560 }
4561 for (nde = nd->nd_tbl; nde->nde_name; nde++)
4562 ;
4563 nd->nd_free_count--;
4564 fill_it:
4565 nde->nde_name = name;
4566 nde->nde_get_pfi = get_pfi ? get_pfi : nd_get_default;
4567 nde->nde_set_pfi = set_pfi ? set_pfi : nd_set_default;
4568 nde->nde_data = data;
4569 return (B_TRUE);
4570 }
4571
4572 /*
4573 * Hardening Functions
4574 * New Section
4575 */
4576 #ifdef DEBUG
4577 /*PRINTFLIKE5*/
4578 static void
4579 eri_debug_msg(const char *file, int line, struct eri *erip,
4580 debug_msg_t type, const char *fmt, ...)
4581 {
4582 char msg_buffer[255];
4583 va_list ap;
4584
4585 va_start(ap, fmt);
4586 (void) vsprintf(msg_buffer, fmt, ap);
4587 va_end(ap);
4588
4589 if (eri_msg_out & ERI_CON_MSG) {
4590 if (((type <= eri_debug_level) && eri_debug_all) ||
4591 ((type == eri_debug_level) && !eri_debug_all)) {
4592 if (erip)
4593 cmn_err(CE_CONT, "D: %s %s%d:(%s%d) %s\n",
4594 debug_msg_string[type], file, line,
4595 ddi_driver_name(erip->dip), erip->instance,
4596 msg_buffer);
4597 else
4598 cmn_err(CE_CONT, "D: %s %s(%d): %s\n",
4599 debug_msg_string[type], file,
4600 line, msg_buffer);
4601 }
4602 }
4603 }
4604 #endif
4605
4606
4607 /*PRINTFLIKE4*/
4608 static void
4609 eri_fault_msg(struct eri *erip, uint_t severity, msg_t type,
4610 const char *fmt, ...)
4611 {
4612 char msg_buffer[255];
4613 va_list ap;
4614
4615 va_start(ap, fmt);
4616 (void) vsprintf(msg_buffer, fmt, ap);
4617 va_end(ap);
4618
4619 if (erip == NULL) {
4620 cmn_err(CE_NOTE, "eri : %s", msg_buffer);
4621 return;
4622 }
4623
4624 if (severity == SEVERITY_HIGH) {
4625 cmn_err(CE_WARN, "%s%d : %s", ddi_driver_name(erip->dip),
4626 erip->instance, msg_buffer);
4627 } else switch (type) {
4628 case ERI_VERB_MSG:
4629 cmn_err(CE_CONT, "?%s%d : %s", ddi_driver_name(erip->dip),
4630 erip->instance, msg_buffer);
4631 break;
4632 case ERI_LOG_MSG:
4633 cmn_err(CE_NOTE, "^%s%d : %s", ddi_driver_name(erip->dip),
4634 erip->instance, msg_buffer);
4635 break;
4636 case ERI_BUF_MSG:
4637 cmn_err(CE_NOTE, "!%s%d : %s", ddi_driver_name(erip->dip),
4638 erip->instance, msg_buffer);
4639 break;
4640 case ERI_CON_MSG:
4641 cmn_err(CE_CONT, "%s%d : %s", ddi_driver_name(erip->dip),
4642 erip->instance, msg_buffer);
4643 default:
4644 break;
4645 }
4646 }
4647
4648 /*
4649 * Transceiver (xcvr) Functions
4650 * New Section
4651 */
4652 /*
4653 * eri_stop_timer function is used by a function before doing link-related
4654 * processing. It locks the "linklock" to protect the link-related data
4655 * structures. This lock will be subsequently released in eri_start_timer().
4656 */
4657 static void
4658 eri_stop_timer(struct eri *erip)
4659 {
4660 timeout_id_t id;
4661 mutex_enter(&erip->linklock);
4662 if (erip->timerid) {
4663 erip->flags |= ERI_NOTIMEOUTS; /* prevent multiple timeout */
4664 id = erip->timerid;
4665 erip->timerid = 0; /* prevent other thread do untimeout */
4666 mutex_exit(&erip->linklock); /* no mutex across untimeout() */
4667
4668 (void) untimeout(id);
4669 mutex_enter(&erip->linklock); /* acquire mutex again */
4670 erip->flags &= ~ERI_NOTIMEOUTS;
4671 }
4672 }
4673
4674 /*
4675 * If msec parameter is zero, just release "linklock".
4676 */
4677 static void
4678 eri_start_timer(struct eri *erip, fptrv_t func, clock_t msec)
4679 {
4680 if (msec) {
4681 if (!(erip->flags & ERI_NOTIMEOUTS) &&
4682 (erip->flags & ERI_RUNNING)) {
4683 erip->timerid = timeout(func, (caddr_t)erip,
4684 drv_usectohz(1000*msec));
4685 }
4686 }
4687
4688 mutex_exit(&erip->linklock);
4689 }
4690
4691 static int
4692 eri_new_xcvr(struct eri *erip)
4693 {
4694 int status;
4695 uint32_t cfg;
4696 int old_transceiver;
4697
4698 if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
4699 PCI_PM_IDLESPEED_NONE) == DDI_SUCCESS)
4700 erip->stats.pmcap = ERI_PMCAP_NONE;
4701
4702 status = B_FALSE; /* no change */
4703 cfg = GET_MIFREG(mif_cfg);
4704 ERI_DEBUG_MSG2(erip, MIF_MSG, "cfg value = %X", cfg);
4705 old_transceiver = param_transceiver;
4706
4707 if ((cfg & ERI_MIF_CFGM1) && !use_int_xcvr) {
4708 ERI_DEBUG_MSG1(erip, PHY_MSG, "Found External XCVR");
4709 /*
4710 * An External Transceiver was found and it takes priority
4711 * over an internal, given the use_int_xcvr flag
4712 * is false.
4713 */
4714 if (old_transceiver != EXTERNAL_XCVR) {
4715 /*
4716 * External transceiver has just been plugged
4717 * in. Isolate the internal Transceiver.
4718 */
4719 if (old_transceiver == INTERNAL_XCVR) {
4720 eri_mii_write(erip, ERI_PHY_BMCR,
4721 (PHY_BMCR_ISOLATE | PHY_BMCR_PWRDN |
4722 PHY_BMCR_LPBK));
4723 }
4724 status = B_TRUE;
4725 }
4726 /*
4727 * Select the external Transceiver.
4728 */
4729 erip->phyad = ERI_EXTERNAL_PHYAD;
4730 param_transceiver = EXTERNAL_XCVR;
4731 erip->mif_config &= ~ERI_MIF_CFGPD;
4732 erip->mif_config |= (erip->phyad << ERI_MIF_CFGPD_SHIFT);
4733 erip->mif_config |= ERI_MIF_CFGPS;
4734 PUT_MIFREG(mif_cfg, erip->mif_config);
4735
4736 PUT_MACREG(xifc, GET_MACREG(xifc) | BMAC_XIFC_MIIBUF_OE);
4737 drv_usecwait(ERI_MIF_POLL_DELAY);
4738 } else if (cfg & ERI_MIF_CFGM0) {
4739 ERI_DEBUG_MSG1(erip, PHY_MSG, "Found Internal XCVR");
4740 /*
4741 * An Internal Transceiver was found or the
4742 * use_int_xcvr flag is true.
4743 */
4744 if (old_transceiver != INTERNAL_XCVR) {
4745 /*
4746 * The external transceiver has just been
4747 * disconnected or we're moving from a no
4748 * transceiver state.
4749 */
4750 if ((old_transceiver == EXTERNAL_XCVR) &&
4751 (cfg & ERI_MIF_CFGM0)) {
4752 eri_mii_write(erip, ERI_PHY_BMCR,
4753 (PHY_BMCR_ISOLATE | PHY_BMCR_PWRDN |
4754 PHY_BMCR_LPBK));
4755 }
4756 status = B_TRUE;
4757 }
4758 /*
4759 * Select the internal transceiver.
4760 */
4761 erip->phyad = ERI_INTERNAL_PHYAD;
4762 param_transceiver = INTERNAL_XCVR;
4763 erip->mif_config &= ~ERI_MIF_CFGPD;
4764 erip->mif_config |= (erip->phyad << ERI_MIF_CFGPD_SHIFT);
4765 erip->mif_config &= ~ERI_MIF_CFGPS;
4766 PUT_MIFREG(mif_cfg, erip->mif_config);
4767
4768 PUT_MACREG(xifc, GET_MACREG(xifc) & ~ BMAC_XIFC_MIIBUF_OE);
4769 drv_usecwait(ERI_MIF_POLL_DELAY);
4770 } else {
4771 /*
4772 * Did not find a valid xcvr.
4773 */
4774 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
4775 "Eri_new_xcvr : Select None");
4776 param_transceiver = NO_XCVR;
4777 erip->xcvr_status = PHY_LINK_DOWN;
4778 }
4779
4780 if (erip->stats.pmcap == ERI_PMCAP_NONE) {
4781 if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
4782 (void *)4000) == DDI_SUCCESS)
4783 erip->stats.pmcap = ERI_PMCAP_4MHZ;
4784 }
4785
4786 return (status);
4787 }
4788
4789 /*
4790 * This function is used for timers. No locks are held on timer expiry.
4791 */
4792 static void
4793 eri_check_link(struct eri *erip)
4794 {
4795 link_state_t linkupdate = eri_check_link_noind(erip);
4796
4797 if (linkupdate != LINK_STATE_UNKNOWN)
4798 mac_link_update(erip->mh, linkupdate);
4799 }
4800
4801 /*
4802 * Compare our xcvr in our structure to the xcvr that we get from
4803 * eri_check_mii_xcvr(). If they are different then mark the
4804 * link down, reset xcvr, and return.
4805 *
4806 * Note without the MII connector, conditions can not change that
4807 * will then use a external phy, thus this code has been cleaned
4808 * to not even call the function or to possibly change the xcvr.
4809 */
4810 static uint32_t
4811 eri_check_link_noind(struct eri *erip)
4812 {
4813 uint16_t stat, control, mif_ints;
4814 uint32_t link_timeout = ERI_LINKCHECK_TIMER;
4815 uint32_t linkupdate = 0;
4816
4817 eri_stop_timer(erip); /* acquire linklock */
4818
4819 mutex_enter(&erip->xmitlock);
4820 mutex_enter(&erip->xcvrlock);
4821 eri_mif_poll(erip, MIF_POLL_STOP);
4822
4823 (void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
4824 mif_ints = erip->mii_status ^ stat;
4825
4826 if (erip->openloop_autoneg) {
4827 (void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
4828 ERI_DEBUG_MSG3(erip, XCVR_MSG,
4829 "eri_check_link:openloop stat %X mii_status %X",
4830 stat, erip->mii_status);
4831 (void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
4832 if (!(stat & PHY_BMSR_LNKSTS) &&
4833 (erip->openloop_autoneg < 2)) {
4834 if (param_speed) {
4835 control &= ~PHY_BMCR_100M;
4836 param_anlpar_100hdx = 0;
4837 param_anlpar_10hdx = 1;
4838 param_speed = 0;
4839 erip->stats.ifspeed = 10;
4840
4841 } else {
4842 control |= PHY_BMCR_100M;
4843 param_anlpar_100hdx = 1;
4844 param_anlpar_10hdx = 0;
4845 param_speed = 1;
4846 erip->stats.ifspeed = 100;
4847 }
4848 ERI_DEBUG_MSG3(erip, XCVR_MSG,
4849 "eri_check_link: trying speed %X stat %X",
4850 param_speed, stat);
4851
4852 erip->openloop_autoneg ++;
4853 eri_mii_write(erip, ERI_PHY_BMCR, control);
4854 link_timeout = ERI_P_FAULT_TIMER;
4855 } else {
4856 erip->openloop_autoneg = 0;
4857 linkupdate = eri_mif_check(erip, stat, stat);
4858 if (erip->openloop_autoneg)
4859 link_timeout = ERI_P_FAULT_TIMER;
4860 }
4861 eri_mif_poll(erip, MIF_POLL_START);
4862 mutex_exit(&erip->xcvrlock);
4863 mutex_exit(&erip->xmitlock);
4864
4865 eri_start_timer(erip, eri_check_link, link_timeout);
4866 return (linkupdate);
4867 }
4868
4869 linkupdate = eri_mif_check(erip, mif_ints, stat);
4870 eri_mif_poll(erip, MIF_POLL_START);
4871 mutex_exit(&erip->xcvrlock);
4872 mutex_exit(&erip->xmitlock);
4873
4874 #ifdef ERI_RMAC_HANG_WORKAROUND
4875 /*
4876 * Check if rx hung.
4877 */
4878 if ((erip->flags & ERI_RUNNING) && param_linkup) {
4879 if (erip->check_rmac_hang) {
4880 ERI_DEBUG_MSG5(erip,
4881 NONFATAL_MSG,
4882 "check1 %d: macsm:%8x wr:%2x rd:%2x",
4883 erip->check_rmac_hang,
4884 GET_MACREG(macsm),
4885 GET_ERXREG(rxfifo_wr_ptr),
4886 GET_ERXREG(rxfifo_rd_ptr));
4887
4888 erip->check_rmac_hang = 0;
4889 erip->check2_rmac_hang ++;
4890
4891 erip->rxfifo_wr_ptr_c = GET_ERXREG(rxfifo_wr_ptr);
4892 erip->rxfifo_rd_ptr_c = GET_ERXREG(rxfifo_rd_ptr);
4893
4894 eri_start_timer(erip, eri_check_link,
4895 ERI_CHECK_HANG_TIMER);
4896 return (linkupdate);
4897 }
4898
4899 if (erip->check2_rmac_hang) {
4900 ERI_DEBUG_MSG5(erip,
4901 NONFATAL_MSG,
4902 "check2 %d: macsm:%8x wr:%2x rd:%2x",
4903 erip->check2_rmac_hang,
4904 GET_MACREG(macsm),
4905 GET_ERXREG(rxfifo_wr_ptr),
4906 GET_ERXREG(rxfifo_rd_ptr));
4907
4908 erip->check2_rmac_hang = 0;
4909
4910 erip->rxfifo_wr_ptr = GET_ERXREG(rxfifo_wr_ptr);
4911 erip->rxfifo_rd_ptr = GET_ERXREG(rxfifo_rd_ptr);
4912
4913 if (((GET_MACREG(macsm) & BMAC_OVERFLOW_STATE) ==
4914 BMAC_OVERFLOW_STATE) &&
4915 ((erip->rxfifo_wr_ptr_c == erip->rxfifo_rd_ptr_c) ||
4916 ((erip->rxfifo_rd_ptr == erip->rxfifo_rd_ptr_c) &&
4917 (erip->rxfifo_wr_ptr == erip->rxfifo_wr_ptr_c)))) {
4918 ERI_DEBUG_MSG1(erip,
4919 NONFATAL_MSG,
4920 "RX hang: Reset mac");
4921
4922 HSTAT(erip, rx_hang);
4923 erip->linkcheck = 1;
4924
4925 eri_start_timer(erip, eri_check_link,
4926 ERI_LINKCHECK_TIMER);
4927 (void) eri_init(erip);
4928 return (linkupdate);
4929 }
4930 }
4931 }
4932 #endif
4933
4934 /*
4935 * Check if tx hung.
4936 */
4937 #ifdef ERI_TX_HUNG
4938 if ((erip->flags & ERI_RUNNING) && param_linkup &&
4939 (eri_check_txhung(erip))) {
4940 HSTAT(erip, tx_hang);
4941 eri_reinit_txhung++;
4942 erip->linkcheck = 1;
4943 eri_start_timer(erip, eri_check_link, ERI_CHECK_HANG_TIMER);
4944 (void) eri_init(erip);
4945 return (linkupdate);
4946 }
4947 #endif
4948
4949 #ifdef ERI_PM_WORKAROUND
4950 if (erip->stats.pmcap == ERI_PMCAP_NONE) {
4951 if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
4952 (void *)4000) == DDI_SUCCESS)
4953 erip->stats.pmcap = ERI_PMCAP_4MHZ;
4954
4955 ERI_DEBUG_MSG2(erip, NONFATAL_MSG,
4956 "eri_check_link: PMCAP %d", erip->stats.pmcap);
4957 }
4958 #endif
4959 if ((!param_mode) && (param_transceiver != NO_XCVR))
4960 eri_start_timer(erip, eri_check_link, ERI_CHECK_HANG_TIMER);
4961 else
4962 eri_start_timer(erip, eri_check_link, ERI_LINKCHECK_TIMER);
4963 return (linkupdate);
4964 }
4965
4966 static link_state_t
4967 eri_mif_check(struct eri *erip, uint16_t mif_ints, uint16_t mif_data)
4968 {
4969 uint16_t control, aner, anlpar, anar, an_common;
4970 uint16_t old_mintrans;
4971 int restart_autoneg = 0;
4972 link_state_t retv;
4973
4974 ERI_DEBUG_MSG4(erip, XCVR_MSG, "eri_mif_check: mif_mask: %X, %X, %X",
4975 erip->mif_mask, mif_ints, mif_data);
4976
4977 mif_ints &= ~erip->mif_mask;
4978 erip->mii_status = mif_data;
4979 /*
4980 * Now check if someone has pulled the xcvr or
4981 * a new xcvr has shown up
4982 * If so try to find out what the new xcvr setup is.
4983 */
4984 if (((mif_ints & PHY_BMSR_RES1) && (mif_data == 0xFFFF)) ||
4985 (param_transceiver == NO_XCVR)) {
4986 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
4987 "No status transceiver gone");
4988 if (eri_new_xcvr(erip)) {
4989 if (param_transceiver != NO_XCVR) {
4990 /*
4991 * Reset the new PHY and bring up the link
4992 */
4993 (void) eri_reset_xcvr(erip);
4994 }
4995 }
4996 return (LINK_STATE_UNKNOWN);
4997 }
4998
4999 if (param_autoneg && (mif_ints & PHY_BMSR_LNKSTS) &&
5000 (mif_data & PHY_BMSR_LNKSTS) && (mif_data & PHY_BMSR_ANC)) {
5001 mif_ints |= PHY_BMSR_ANC;
5002 ERI_DEBUG_MSG3(erip, PHY_MSG,
5003 "eri_mif_check: Set ANC bit mif_data %X mig_ints %X",
5004 mif_data, mif_ints);
5005 }
5006
5007 if ((mif_ints & PHY_BMSR_ANC) && (mif_data & PHY_BMSR_ANC)) {
5008 ERI_DEBUG_MSG1(erip, PHY_MSG, "Auto-negotiation interrupt.");
5009
5010 /*
5011 * Switch off Auto-negotiation interrupts and switch on
5012 * Link ststus interrupts.
5013 */
5014 erip->mif_mask |= PHY_BMSR_ANC;
5015 erip->mif_mask &= ~PHY_BMSR_LNKSTS;
5016 (void) eri_mii_read(erip, ERI_PHY_ANER, &aner);
5017 param_aner_lpancap = 1 && (aner & PHY_ANER_LPNW);
5018 if ((aner & PHY_ANER_MLF) || (eri_force_mlf)) {
5019 ERI_DEBUG_MSG1(erip, XCVR_MSG,
5020 "parallel detection fault");
5021 /*
5022 * Consider doing open loop auto-negotiation.
5023 */
5024 ERI_DEBUG_MSG1(erip, XCVR_MSG,
5025 "Going into Open loop Auto-neg");
5026 (void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5027
5028 control &= ~(PHY_BMCR_ANE | PHY_BMCR_RAN |
5029 PHY_BMCR_FDX);
5030 if (param_anar_100fdx || param_anar_100hdx) {
5031 control |= PHY_BMCR_100M;
5032 param_anlpar_100hdx = 1;
5033 param_anlpar_10hdx = 0;
5034 param_speed = 1;
5035 erip->stats.ifspeed = 100;
5036
5037 } else if (param_anar_10fdx || param_anar_10hdx) {
5038 control &= ~PHY_BMCR_100M;
5039 param_anlpar_100hdx = 0;
5040 param_anlpar_10hdx = 1;
5041 param_speed = 0;
5042 erip->stats.ifspeed = 10;
5043 } else {
5044 ERI_FAULT_MSG1(erip, SEVERITY_NONE,
5045 ERI_VERB_MSG,
5046 "Transceiver speed set incorrectly.");
5047 return (0);
5048 }
5049
5050 (void) eri_mii_write(erip, ERI_PHY_BMCR, control);
5051 param_anlpar_100fdx = 0;
5052 param_anlpar_10fdx = 0;
5053 param_mode = 0;
5054 erip->openloop_autoneg = 1;
5055 return (0);
5056 }
5057 (void) eri_mii_read(erip, ERI_PHY_ANLPAR, &anlpar);
5058 (void) eri_mii_read(erip, ERI_PHY_ANAR, &anar);
5059 an_common = anar & anlpar;
5060
5061 ERI_DEBUG_MSG2(erip, XCVR_MSG, "an_common = 0x%X", an_common);
5062
5063 if (an_common & (PHY_ANLPAR_TXFDX | PHY_ANLPAR_TX)) {
5064 param_speed = 1;
5065 erip->stats.ifspeed = 100;
5066 param_mode = 1 && (an_common & PHY_ANLPAR_TXFDX);
5067
5068 } else if (an_common & (PHY_ANLPAR_10FDX | PHY_ANLPAR_10)) {
5069 param_speed = 0;
5070 erip->stats.ifspeed = 10;
5071 param_mode = 1 && (an_common & PHY_ANLPAR_10FDX);
5072
5073 } else an_common = 0x0;
5074
5075 if (!an_common) {
5076 ERI_FAULT_MSG1(erip, SEVERITY_MID, ERI_VERB_MSG,
5077 "Transceiver: anar not set with speed selection");
5078 }
5079 param_anlpar_100T4 = 1 && (anlpar & PHY_ANLPAR_T4);
5080 param_anlpar_100fdx = 1 && (anlpar & PHY_ANLPAR_TXFDX);
5081 param_anlpar_100hdx = 1 && (anlpar & PHY_ANLPAR_TX);
5082 param_anlpar_10fdx = 1 && (anlpar & PHY_ANLPAR_10FDX);
5083 param_anlpar_10hdx = 1 && (anlpar & PHY_ANLPAR_10);
5084
5085 ERI_DEBUG_MSG2(erip, PHY_MSG,
5086 "Link duplex = 0x%X", param_mode);
5087 ERI_DEBUG_MSG2(erip, PHY_MSG,
5088 "Link speed = 0x%X", param_speed);
5089 /* mif_ints |= PHY_BMSR_LNKSTS; prevent double msg */
5090 /* mif_data |= PHY_BMSR_LNKSTS; prevent double msg */
5091 }
5092 retv = LINK_STATE_UNKNOWN;
5093 if (mif_ints & PHY_BMSR_LNKSTS) {
5094 if (mif_data & PHY_BMSR_LNKSTS) {
5095 ERI_DEBUG_MSG1(erip, PHY_MSG, "Link Up");
5096 /*
5097 * Program Lu3X31T for mininum transition
5098 */
5099 if (eri_phy_mintrans) {
5100 eri_mii_write(erip, 31, 0x8000);
5101 (void) eri_mii_read(erip, 0, &old_mintrans);
5102 eri_mii_write(erip, 0, 0x00F1);
5103 eri_mii_write(erip, 31, 0x0000);
5104 }
5105 /*
5106 * The link is up.
5107 */
5108 eri_init_txmac(erip);
5109 param_linkup = 1;
5110 erip->stats.link_up = LINK_STATE_UP;
5111 if (param_mode)
5112 erip->stats.link_duplex = LINK_DUPLEX_FULL;
5113 else
5114 erip->stats.link_duplex = LINK_DUPLEX_HALF;
5115
5116 retv = LINK_STATE_UP;
5117 } else {
5118 ERI_DEBUG_MSG1(erip, PHY_MSG, "Link down.");
5119 param_linkup = 0;
5120 erip->stats.link_up = LINK_STATE_DOWN;
5121 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
5122 retv = LINK_STATE_DOWN;
5123 if (param_autoneg) {
5124 restart_autoneg = 1;
5125 }
5126 }
5127 } else {
5128 if (mif_data & PHY_BMSR_LNKSTS) {
5129 if (!param_linkup) {
5130 ERI_DEBUG_MSG1(erip, PHY_MSG,
5131 "eri_mif_check: MIF data link up");
5132 /*
5133 * Program Lu3X31T for minimum transition
5134 */
5135 if (eri_phy_mintrans) {
5136 eri_mii_write(erip, 31, 0x8000);
5137 (void) eri_mii_read(erip, 0,
5138 &old_mintrans);
5139 eri_mii_write(erip, 0, 0x00F1);
5140 eri_mii_write(erip, 31, 0x0000);
5141 }
5142 /*
5143 * The link is up.
5144 */
5145 eri_init_txmac(erip);
5146
5147 param_linkup = 1;
5148 erip->stats.link_up = LINK_STATE_UP;
5149 if (param_mode)
5150 erip->stats.link_duplex =
5151 LINK_DUPLEX_FULL;
5152 else
5153 erip->stats.link_duplex =
5154 LINK_DUPLEX_HALF;
5155
5156 retv = LINK_STATE_UP;
5157 }
5158 } else if (param_linkup) {
5159 /*
5160 * The link is down now.
5161 */
5162 ERI_DEBUG_MSG1(erip, PHY_MSG,
5163 "eri_mif_check:Link was up and went down");
5164 param_linkup = 0;
5165 erip->stats.link_up = LINK_STATE_DOWN;
5166 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
5167 retv = LINK_STATE_DOWN;
5168 if (param_autoneg)
5169 restart_autoneg = 1;
5170 }
5171 }
5172 if (restart_autoneg) {
5173 /*
5174 * Restart normal auto-negotiation.
5175 */
5176 ERI_DEBUG_MSG1(erip, PHY_MSG,
5177 "eri_mif_check:Restart AUto Negotiation");
5178 erip->openloop_autoneg = 0;
5179 param_mode = 0;
5180 param_speed = 0;
5181 param_anlpar_100T4 = 0;
5182 param_anlpar_100fdx = 0;
5183 param_anlpar_100hdx = 0;
5184 param_anlpar_10fdx = 0;
5185 param_anlpar_10hdx = 0;
5186 param_aner_lpancap = 0;
5187 (void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5188 control |= (PHY_BMCR_ANE | PHY_BMCR_RAN);
5189 eri_mii_write(erip, ERI_PHY_BMCR, control);
5190 }
5191 if (mif_ints & PHY_BMSR_JABDET) {
5192 if (mif_data & PHY_BMSR_JABDET) {
5193 ERI_DEBUG_MSG1(erip, PHY_MSG, "Jabber detected.");
5194 HSTAT(erip, jab);
5195 /*
5196 * Reset the new PHY and bring up the link
5197 * (Check for failure?)
5198 */
5199 (void) eri_reset_xcvr(erip);
5200 }
5201 }
5202 return (retv);
5203 }
5204
5205 #define PHYRST_PERIOD 500
5206 static int
5207 eri_reset_xcvr(struct eri *erip)
5208 {
5209 uint16_t stat;
5210 uint16_t anar;
5211 uint16_t control;
5212 uint16_t idr1;
5213 uint16_t idr2;
5214 uint16_t nicr;
5215 uint32_t speed_100;
5216 uint32_t speed_10;
5217 int n;
5218
5219 #ifdef ERI_10_10_FORCE_SPEED_WORKAROUND
5220 erip->ifspeed_old = erip->stats.ifspeed;
5221 #endif
5222 /*
5223 * Reset Open loop auto-negotiation this means you can try
5224 * Normal auto-negotiation, until you get a Multiple Link fault
5225 * at which point you try 100M half duplex then 10M half duplex
5226 * until you get a Link up.
5227 */
5228 erip->openloop_autoneg = 0;
5229
5230 /*
5231 * Reset the xcvr.
5232 */
5233 eri_mii_write(erip, ERI_PHY_BMCR, PHY_BMCR_RESET);
5234
5235 /* Check for transceiver reset completion */
5236
5237 n = 1000;
5238 while (--n > 0) {
5239 drv_usecwait((clock_t)PHYRST_PERIOD);
5240 if (eri_mii_read(erip, ERI_PHY_BMCR, &control) == 1) {
5241 /* Transceiver does not talk MII */
5242 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
5243 "eri_reset_xcvr: no mii");
5244 }
5245 if ((control & PHY_BMCR_RESET) == 0)
5246 goto reset_done;
5247 }
5248 ERI_FAULT_MSG2(erip, SEVERITY_NONE, ERI_VERB_MSG,
5249 "eri_reset_xcvr:reset_failed n == 0, control %x", control);
5250 goto eri_reset_xcvr_failed;
5251
5252 reset_done:
5253
5254 ERI_DEBUG_MSG2(erip, AUTOCONFIG_MSG,
5255 "eri_reset_xcvr: reset complete in %d us",
5256 (1000 - n) * PHYRST_PERIOD);
5257
5258 (void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
5259 (void) eri_mii_read(erip, ERI_PHY_ANAR, &anar);
5260 (void) eri_mii_read(erip, ERI_PHY_IDR1, &idr1);
5261 (void) eri_mii_read(erip, ERI_PHY_IDR2, &idr2);
5262
5263 ERI_DEBUG_MSG4(erip, XCVR_MSG,
5264 "eri_reset_xcvr: control %x stat %x anar %x", control, stat, anar);
5265
5266 /*
5267 * Initialize the read only transceiver ndd information
5268 * the values are either 0 or 1.
5269 */
5270 param_bmsr_ancap = 1 && (stat & PHY_BMSR_ACFG);
5271 param_bmsr_100T4 = 1 && (stat & PHY_BMSR_100T4);
5272 param_bmsr_100fdx = 1 && (stat & PHY_BMSR_100FDX);
5273 param_bmsr_100hdx = 1 && (stat & PHY_BMSR_100HDX);
5274 param_bmsr_10fdx = 1 && (stat & PHY_BMSR_10FDX);
5275 param_bmsr_10hdx = 1 && (stat & PHY_BMSR_10HDX);
5276
5277 /*
5278 * Match up the ndd capabilities with the transceiver.
5279 */
5280 param_autoneg &= param_bmsr_ancap;
5281 param_anar_100fdx &= param_bmsr_100fdx;
5282 param_anar_100hdx &= param_bmsr_100hdx;
5283 param_anar_10fdx &= param_bmsr_10fdx;
5284 param_anar_10hdx &= param_bmsr_10hdx;
5285
5286 /*
5287 * Select the operation mode of the transceiver.
5288 */
5289 if (param_autoneg) {
5290 /*
5291 * Initialize our auto-negotiation capabilities.
5292 */
5293 anar = PHY_SELECTOR;
5294 if (param_anar_100T4)
5295 anar |= PHY_ANAR_T4;
5296 if (param_anar_100fdx)
5297 anar |= PHY_ANAR_TXFDX;
5298 if (param_anar_100hdx)
5299 anar |= PHY_ANAR_TX;
5300 if (param_anar_10fdx)
5301 anar |= PHY_ANAR_10FDX;
5302 if (param_anar_10hdx)
5303 anar |= PHY_ANAR_10;
5304 ERI_DEBUG_MSG2(erip, XCVR_MSG, "anar = %x", anar);
5305 eri_mii_write(erip, ERI_PHY_ANAR, anar);
5306 }
5307
5308 /* Place the Transceiver in normal operation mode */
5309 if ((control & PHY_BMCR_ISOLATE) || (control & PHY_BMCR_LPBK)) {
5310 control &= ~(PHY_BMCR_ISOLATE | PHY_BMCR_LPBK);
5311 eri_mii_write(erip, ERI_PHY_BMCR,
5312 (control & ~PHY_BMCR_ISOLATE));
5313 }
5314
5315 /*
5316 * If Lu3X31T then allow nonzero eri_phy_mintrans
5317 */
5318 if (eri_phy_mintrans &&
5319 (idr1 != 0x43 || (idr2 & 0xFFF0) != 0x7420)) {
5320 eri_phy_mintrans = 0;
5321 }
5322 /*
5323 * Initialize the mif interrupt mask.
5324 */
5325 erip->mif_mask = (uint16_t)(~PHY_BMSR_RES1);
5326
5327 /*
5328 * Establish link speeds and do necessary special stuff based
5329 * in the speed.
5330 */
5331 speed_100 = param_anar_100fdx | param_anar_100hdx;
5332 speed_10 = param_anar_10fdx | param_anar_10hdx;
5333
5334 ERI_DEBUG_MSG5(erip, XCVR_MSG, "eri_reset_xcvr: %d %d %d %d",
5335 param_anar_100fdx, param_anar_100hdx, param_anar_10fdx,
5336 param_anar_10hdx);
5337
5338 ERI_DEBUG_MSG3(erip, XCVR_MSG,
5339 "eri_reset_xcvr: speed_100 %d speed_10 %d", speed_100, speed_10);
5340
5341 if ((!speed_100) && (speed_10)) {
5342 erip->mif_mask &= ~PHY_BMSR_JABDET;
5343 if (!(param_anar_10fdx) &&
5344 (param_anar_10hdx) &&
5345 (erip->link_pulse_disabled)) {
5346 param_speed = 0;
5347 param_mode = 0;
5348 (void) eri_mii_read(erip, ERI_PHY_NICR, &nicr);
5349 nicr &= ~PHY_NICR_LD;
5350 eri_mii_write(erip, ERI_PHY_NICR, nicr);
5351 param_linkup = 1;
5352 erip->stats.link_up = LINK_STATE_UP;
5353 if (param_mode)
5354 erip->stats.link_duplex = LINK_DUPLEX_FULL;
5355 else
5356 erip->stats.link_duplex = LINK_DUPLEX_HALF;
5357 }
5358 }
5359
5360 /*
5361 * Clear the autonegotitation before re-starting
5362 */
5363 control = PHY_BMCR_100M | PHY_BMCR_FDX;
5364 /* eri_mii_write(erip, ERI_PHY_BMCR, control); */
5365 if (param_autoneg) {
5366 /*
5367 * Setup the transceiver for autonegotiation.
5368 */
5369 erip->mif_mask &= ~PHY_BMSR_ANC;
5370
5371 /*
5372 * Clear the Auto-negotiation before re-starting
5373 */
5374 eri_mii_write(erip, ERI_PHY_BMCR, control & ~PHY_BMCR_ANE);
5375
5376 /*
5377 * Switch on auto-negotiation.
5378 */
5379 control |= (PHY_BMCR_ANE | PHY_BMCR_RAN);
5380
5381 eri_mii_write(erip, ERI_PHY_BMCR, control);
5382 } else {
5383 /*
5384 * Force the transceiver.
5385 */
5386 erip->mif_mask &= ~PHY_BMSR_LNKSTS;
5387
5388 /*
5389 * Switch off auto-negotiation.
5390 */
5391 control &= ~(PHY_BMCR_FDX | PHY_BMCR_ANE | PHY_BMCR_RAN);
5392
5393 if (speed_100) {
5394 control |= PHY_BMCR_100M;
5395 param_aner_lpancap = 0; /* Clear LP nway */
5396 param_anlpar_10fdx = 0;
5397 param_anlpar_10hdx = 0;
5398 param_anlpar_100T4 = param_anar_100T4;
5399 param_anlpar_100fdx = param_anar_100fdx;
5400 param_anlpar_100hdx = param_anar_100hdx;
5401 param_speed = 1;
5402 erip->stats.ifspeed = 100;
5403 param_mode = param_anar_100fdx;
5404 if (param_mode) {
5405 param_anlpar_100hdx = 0;
5406 erip->stats.link_duplex = LINK_DUPLEX_FULL;
5407 } else {
5408 erip->stats.link_duplex = LINK_DUPLEX_HALF;
5409 }
5410 } else if (speed_10) {
5411 control &= ~PHY_BMCR_100M;
5412 param_aner_lpancap = 0; /* Clear LP nway */
5413 param_anlpar_100fdx = 0;
5414 param_anlpar_100hdx = 0;
5415 param_anlpar_100T4 = 0;
5416 param_anlpar_10fdx = param_anar_10fdx;
5417 param_anlpar_10hdx = param_anar_10hdx;
5418 param_speed = 0;
5419 erip->stats.ifspeed = 10;
5420 param_mode = param_anar_10fdx;
5421 if (param_mode) {
5422 param_anlpar_10hdx = 0;
5423 erip->stats.link_duplex = LINK_DUPLEX_FULL;
5424 } else {
5425 erip->stats.link_duplex = LINK_DUPLEX_HALF;
5426 }
5427 } else {
5428 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
5429 "Transceiver speed set incorrectly.");
5430 }
5431
5432 if (param_mode) {
5433 control |= PHY_BMCR_FDX;
5434 }
5435
5436 ERI_DEBUG_MSG4(erip, PHY_MSG,
5437 "control = %x status = %x param_mode %d",
5438 control, stat, param_mode);
5439
5440 eri_mii_write(erip, ERI_PHY_BMCR, control);
5441 /*
5442 * if (param_mode) {
5443 * control |= PHY_BMCR_FDX;
5444 * }
5445 * control &= ~(PHY_BMCR_FDX | PHY_BMCR_ANE | PHY_BMCR_RAN);
5446 * eri_mii_write(erip, ERI_PHY_BMCR, control);
5447 */
5448 }
5449
5450 #ifdef DEBUG
5451 (void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5452 (void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
5453 (void) eri_mii_read(erip, ERI_PHY_ANAR, &anar);
5454 #endif
5455 ERI_DEBUG_MSG4(erip, PHY_MSG,
5456 "control %X status %X anar %X", control, stat, anar);
5457
5458 eri_reset_xcvr_exit:
5459 return (0);
5460
5461 eri_reset_xcvr_failed:
5462 return (1);
5463 }
5464
5465 #ifdef ERI_10_10_FORCE_SPEED_WORKAROUND
5466
5467 static void
5468 eri_xcvr_force_mode(struct eri *erip, uint32_t *link_timeout)
5469 {
5470
5471 if (!param_autoneg && !param_linkup && (erip->stats.ifspeed == 10) &&
5472 (param_anar_10fdx | param_anar_10hdx)) {
5473 *link_timeout = SECOND(1);
5474 return;
5475 }
5476
5477 if (!param_autoneg && !param_linkup && (erip->ifspeed_old == 10) &&
5478 (param_anar_100fdx | param_anar_100hdx)) {
5479 /*
5480 * May have to set link partner's speed and mode.
5481 */
5482 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_LOG_MSG,
5483 "May have to set link partner's speed and duplex mode.");
5484 }
5485 }
5486 #endif
5487
5488 static void
5489 eri_mif_poll(struct eri *erip, soft_mif_enable_t enable)
5490 {
5491 if (enable == MIF_POLL_START) {
5492 if (erip->mifpoll_enable && !erip->openloop_autoneg) {
5493 erip->mif_config |= ERI_MIF_CFGPE;
5494 PUT_MIFREG(mif_cfg, erip->mif_config);
5495 drv_usecwait(ERI_MIF_POLL_DELAY);
5496 PUT_GLOBREG(intmask, GET_GLOBREG(intmask) &
5497 ~ERI_G_MASK_MIF_INT);
5498 PUT_MIFREG(mif_imask, erip->mif_mask);
5499 }
5500 } else if (enable == MIF_POLL_STOP) {
5501 erip->mif_config &= ~ERI_MIF_CFGPE;
5502 PUT_MIFREG(mif_cfg, erip->mif_config);
5503 drv_usecwait(ERI_MIF_POLL_DELAY);
5504 PUT_GLOBREG(intmask, GET_GLOBREG(intmask) |
5505 ERI_G_MASK_MIF_INT);
5506 PUT_MIFREG(mif_imask, ERI_MIF_INTMASK);
5507 }
5508 ERI_DEBUG_MSG2(erip, XCVR_MSG, "MIF Config = 0x%X",
5509 GET_MIFREG(mif_cfg));
5510 ERI_DEBUG_MSG2(erip, XCVR_MSG, "MIF imask = 0x%X",
5511 GET_MIFREG(mif_imask));
5512 ERI_DEBUG_MSG2(erip, XCVR_MSG, "INT imask = 0x%X",
5513 GET_GLOBREG(intmask));
5514 ERI_DEBUG_MSG1(erip, XCVR_MSG, "<== mif_poll");
5515 }
5516
5517 /* Decide if transmitter went dead and reinitialize everything */
5518 #ifdef ERI_TX_HUNG
5519 static int eri_txhung_limit = 2;
5520 static int
5521 eri_check_txhung(struct eri *erip)
5522 {
5523 boolean_t macupdate = B_FALSE;
5524
5525 mutex_enter(&erip->xmitlock);
5526 if (erip->flags & ERI_RUNNING)
5527 erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
5528 ETX_COMPLETION_MASK);
5529 macupdate |= eri_reclaim(erip, erip->tx_completion);
5530
5531 /* Something needs to be sent out but it is not going out */
5532 if ((erip->tcurp != erip->tnextp) &&
5533 (erip->stats.opackets64 == erip->erisave.reclaim_opackets) &&
5534 (erip->stats.collisions == erip->erisave.starts))
5535 erip->txhung++;
5536 else
5537 erip->txhung = 0;
5538
5539 erip->erisave.reclaim_opackets = erip->stats.opackets64;
5540 erip->erisave.starts = erip->stats.collisions;
5541 mutex_exit(&erip->xmitlock);
5542
5543 if (macupdate)
5544 mac_tx_update(erip->mh);
5545
5546 return (erip->txhung >= eri_txhung_limit);
5547 }
5548 #endif