Print this page
re #13613 rb4516 Tunables needs volatile keyword
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/sun/io/eri/eri.c
+++ new/usr/src/uts/sun/io/eri/eri.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 +/*
26 + * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
27 + */
25 28
26 29 /*
27 30 * SunOS MT STREAMS ERI(PCI) 10/100 Mb Ethernet Device Driver
28 31 */
29 32
30 33 #include <sys/types.h>
31 34 #include <sys/debug.h>
32 35 #include <sys/stropts.h>
33 36 #include <sys/stream.h>
34 37 #include <sys/strsubr.h>
35 38 #include <sys/kmem.h>
36 39 #include <sys/crc32.h>
37 40 #include <sys/ddi.h>
38 41 #include <sys/sunddi.h>
39 42 #include <sys/strsun.h>
40 43 #include <sys/stat.h>
41 44 #include <sys/cpu.h>
42 45 #include <sys/kstat.h>
43 46 #include <inet/common.h>
44 47 #include <sys/pattr.h>
45 48 #include <inet/mi.h>
46 49 #include <inet/nd.h>
47 50 #include <sys/ethernet.h>
48 51 #include <sys/vlan.h>
49 52 #include <sys/policy.h>
50 53 #include <sys/mac_provider.h>
51 54 #include <sys/mac_ether.h>
52 55 #include <sys/dlpi.h>
53 56
54 57 #include <sys/pci.h>
55 58
56 59 #include "eri_phy.h"
57 60 #include "eri_mac.h"
58 61 #include "eri.h"
59 62 #include "eri_common.h"
60 63
61 64 #include "eri_msg.h"
62 65
63 66 /*
64 67 * **** Function Prototypes *****
65 68 */
66 69 /*
67 70 * Entry points (man9e)
68 71 */
69 72 static int eri_attach(dev_info_t *, ddi_attach_cmd_t);
70 73 static int eri_detach(dev_info_t *, ddi_detach_cmd_t);
71 74 static uint_t eri_intr(caddr_t);
72 75
73 76 /*
74 77 * I/O (Input/Output) Functions
75 78 */
76 79 static boolean_t eri_send_msg(struct eri *, mblk_t *);
77 80 static mblk_t *eri_read_dma(struct eri *, volatile struct rmd *,
78 81 volatile int, uint64_t flags);
79 82
80 83 /*
81 84 * Initialization Functions
82 85 */
83 86 static boolean_t eri_init(struct eri *);
84 87 static int eri_allocthings(struct eri *);
85 88 static int eri_init_xfer_params(struct eri *);
86 89 static void eri_statinit(struct eri *);
87 90 static int eri_burstsize(struct eri *);
88 91
89 92 static void eri_setup_mac_address(struct eri *, dev_info_t *);
90 93
91 94 static uint32_t eri_init_rx_channel(struct eri *);
92 95 static void eri_init_rx(struct eri *);
93 96 static void eri_init_txmac(struct eri *);
94 97
95 98 /*
96 99 * Un-init Functions
97 100 */
98 101 static uint32_t eri_txmac_disable(struct eri *);
99 102 static uint32_t eri_rxmac_disable(struct eri *);
100 103 static int eri_stop(struct eri *);
101 104 static void eri_uninit(struct eri *erip);
102 105 static int eri_freebufs(struct eri *);
103 106 static boolean_t eri_reclaim(struct eri *, uint32_t);
104 107
105 108 /*
106 109 * Transceiver (xcvr) Functions
107 110 */
108 111 static int eri_new_xcvr(struct eri *); /* Initializes & detects xcvrs */
109 112 static int eri_reset_xcvr(struct eri *);
110 113
111 114 #ifdef ERI_10_10_FORCE_SPEED_WORKAROUND
112 115 static void eri_xcvr_force_mode(struct eri *, uint32_t *);
113 116 #endif
114 117
115 118 static void eri_mif_poll(struct eri *, soft_mif_enable_t);
116 119 static void eri_check_link(struct eri *);
117 120 static uint32_t eri_check_link_noind(struct eri *);
118 121 static link_state_t eri_mif_check(struct eri *, uint16_t, uint16_t);
119 122 static void eri_mii_write(struct eri *, uint8_t, uint16_t);
120 123 static uint32_t eri_mii_read(struct eri *, uint8_t, uint16_t *);
121 124
122 125 /*
123 126 * Reset Functions
124 127 */
125 128 static uint32_t eri_etx_reset(struct eri *);
126 129 static uint32_t eri_erx_reset(struct eri *);
127 130
128 131 /*
129 132 * Error Functions
130 133 */
131 134 static void eri_fatal_err(struct eri *, uint32_t);
132 135 static void eri_nonfatal_err(struct eri *, uint32_t);
133 136
134 137 #ifdef ERI_TX_HUNG
135 138 static int eri_check_txhung(struct eri *);
136 139 #endif
137 140
138 141 /*
139 142 * Hardening Functions
140 143 */
141 144 static void eri_fault_msg(struct eri *, uint_t, msg_t, const char *, ...);
142 145
143 146 /*
144 147 * Misc Functions
145 148 */
146 149 static void eri_savecntrs(struct eri *);
147 150
148 151 static void eri_stop_timer(struct eri *erip);
149 152 static void eri_start_timer(struct eri *erip, fptrv_t func, clock_t msec);
150 153
151 154 static void eri_bb_force_idle(struct eri *);
152 155
153 156 /*
154 157 * Utility Functions
155 158 */
156 159 static mblk_t *eri_allocb(size_t size);
157 160 static mblk_t *eri_allocb_sp(size_t size);
158 161 static int eri_param_get(queue_t *q, mblk_t *mp, caddr_t cp);
159 162 static int eri_param_set(queue_t *, mblk_t *, char *, caddr_t);
160 163
161 164 /*
162 165 * Functions to support ndd
163 166 */
164 167 static void eri_nd_free(caddr_t *nd_pparam);
165 168
166 169 static boolean_t eri_nd_load(caddr_t *nd_pparam, char *name,
167 170 pfi_t get_pfi, pfi_t set_pfi, caddr_t data);
168 171
169 172 static int eri_nd_getset(queue_t *q, caddr_t nd_param, MBLKP mp);
170 173 static void eri_param_cleanup(struct eri *);
171 174 static int eri_param_register(struct eri *, param_t *, int);
172 175 static void eri_process_ndd_ioctl(struct eri *, queue_t *, mblk_t *, int);
173 176 static int eri_mk_mblk_tail_space(mblk_t *, mblk_t **, size_t);
174 177
175 178
176 179 static void eri_loopback(struct eri *, queue_t *, mblk_t *);
177 180
178 181 static uint32_t eri_ladrf_bit(const uint8_t *);
179 182
180 183
181 184 /*
182 185 * Nemo (GLDv3) Functions.
183 186 */
184 187 static int eri_m_stat(void *, uint_t, uint64_t *);
185 188 static int eri_m_start(void *);
186 189 static void eri_m_stop(void *);
187 190 static int eri_m_promisc(void *, boolean_t);
188 191 static int eri_m_multicst(void *, boolean_t, const uint8_t *);
189 192 static int eri_m_unicst(void *, const uint8_t *);
190 193 static void eri_m_ioctl(void *, queue_t *, mblk_t *);
191 194 static boolean_t eri_m_getcapab(void *, mac_capab_t, void *);
192 195 static mblk_t *eri_m_tx(void *, mblk_t *);
193 196
194 197 static mac_callbacks_t eri_m_callbacks = {
195 198 MC_IOCTL | MC_GETCAPAB,
196 199 eri_m_stat,
197 200 eri_m_start,
198 201 eri_m_stop,
199 202 eri_m_promisc,
200 203 eri_m_multicst,
201 204 eri_m_unicst,
202 205 eri_m_tx,
203 206 NULL,
204 207 eri_m_ioctl,
205 208 eri_m_getcapab
206 209 };
207 210
208 211 /*
209 212 * Define PHY Vendors: Matches to IEEE
210 213 * Organizationally Unique Identifier (OUI)
211 214 */
212 215 /*
213 216 * The first two are supported as Internal XCVRs
214 217 */
215 218 #define PHY_VENDOR_LUCENT 0x601d
216 219
217 220 #define PHY_LINK_NONE 0 /* Not attempted yet or retry */
218 221 #define PHY_LINK_DOWN 1 /* Not being used */
219 222 #define PHY_LINK_UP 2 /* Not being used */
220 223
221 224 #define AUTO_SPEED 0
222 225 #define FORCE_SPEED 1
223 226
224 227 /*
225 228 * MIB II broadcast/multicast packets
226 229 */
227 230
228 231 #define IS_BROADCAST(pkt) (bcmp(pkt, ðerbroadcastaddr, ETHERADDRL) == 0)
229 232 #define IS_MULTICAST(pkt) ((pkt[0] & 01) == 1)
230 233
231 234 #define BUMP_InNUcast(erip, pkt) \
232 235 if (IS_BROADCAST(pkt)) { \
233 236 HSTAT(erip, brdcstrcv); \
234 237 } else if (IS_MULTICAST(pkt)) { \
235 238 HSTAT(erip, multircv); \
236 239 }
237 240
238 241 #define BUMP_OutNUcast(erip, pkt) \
239 242 if (IS_BROADCAST(pkt)) { \
240 243 HSTAT(erip, brdcstxmt); \
241 244 } else if (IS_MULTICAST(pkt)) { \
242 245 HSTAT(erip, multixmt); \
243 246 }
244 247
245 248 #define NEXTTMDP(tbasep, tmdlimp, tmdp) (((tmdp) + 1) == tmdlimp \
246 249 ? tbasep : ((tmdp) + 1))
247 250
248 251 #define ETHERHEADER_SIZE (sizeof (struct ether_header))
249 252
250 253 #ifdef ERI_RCV_CKSUM
251 254 #define ERI_PROCESS_READ(erip, bp, sum) \
252 255 { \
253 256 t_uscalar_t type; \
254 257 uint_t start_offset, end_offset; \
255 258 \
256 259 *(bp->b_wptr) = 0; /* pad byte */ \
257 260 \
258 261 /* \
259 262 * update MIB II statistics \
260 263 */ \
261 264 HSTAT(erip, ipackets64); \
262 265 HSTATN(erip, rbytes64, len); \
263 266 BUMP_InNUcast(erip, bp->b_rptr); \
264 267 type = get_ether_type(bp->b_rptr); \
265 268 if (type == ETHERTYPE_IP || type == ETHERTYPE_IPV6) { \
266 269 start_offset = 0; \
267 270 end_offset = MBLKL(bp) - ETHERHEADER_SIZE; \
268 271 mac_hcksum_set(bp, \
269 272 start_offset, 0, end_offset, sum, \
270 273 HCK_PARTIALCKSUM); \
271 274 } else { \
272 275 /* \
273 276 * Strip the PADS for 802.3 \
274 277 */ \
275 278 if (type <= ETHERMTU) \
276 279 bp->b_wptr = bp->b_rptr + \
277 280 ETHERHEADER_SIZE + type; \
278 281 } \
279 282 }
280 283 #else
281 284
282 285 #define ERI_PROCESS_READ(erip, bp) \
283 286 { \
284 287 t_uscalar_t type; \
285 288 type = get_ether_type(bp->b_rptr); \
286 289 \
287 290 /* \
288 291 * update MIB II statistics \
289 292 */ \
290 293 HSTAT(erip, ipackets64); \
291 294 HSTATN(erip, rbytes64, len); \
292 295 BUMP_InNUcast(erip, bp->b_rptr); \
293 296 /* \
294 297 * Strip the PADS for 802.3 \
295 298 */ \
296 299 if (type <= ETHERMTU) \
297 300 bp->b_wptr = bp->b_rptr + ETHERHEADER_SIZE + \
298 301 type; \
299 302 }
300 303 #endif /* ERI_RCV_CKSUM */
301 304
302 305 /*
303 306 * TX Interrupt Rate
304 307 */
305 308 static int tx_interrupt_rate = 16;
306 309
307 310 /*
308 311 * Ethernet broadcast address definition.
309 312 */
310 313 static uint8_t etherbroadcastaddr[] = {
311 314 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
312 315 };
313 316
314 317 /*
315 318 * The following variables are used for configuring various features
316 319 */
317 320 #define ERI_DESC_HANDLE_ALLOC 0x0001
318 321 #define ERI_DESC_MEM_ALLOC 0x0002
319 322 #define ERI_DESC_MEM_MAP 0x0004
320 323 #define ERI_RCV_HANDLE_ALLOC 0x0020
321 324 #define ERI_RCV_HANDLE_BIND 0x0040
322 325 #define ERI_XMIT_DVMA_ALLOC 0x0100
323 326 #define ERI_RCV_DVMA_ALLOC 0x0200
324 327 #define ERI_XBUFS_HANDLE_ALLOC 0x0400
325 328 #define ERI_XBUFS_KMEM_ALLOC 0x0800
326 329 #define ERI_XBUFS_KMEM_DMABIND 0x1000
327 330
328 331
329 332 #define ERI_DONT_STRIP_CRC
330 333 /*
331 334 * Translate a kernel virtual address to i/o address.
332 335 */
333 336 #define ERI_IOPBIOADDR(erip, a) \
334 337 ((erip)->iopbiobase + ((uintptr_t)a - (erip)->iopbkbase))
335 338
336 339 /*
337 340 * ERI Configuration Register Value
338 341 * Used to configure parameters that define DMA burst
|
↓ open down ↓ |
304 lines elided |
↑ open up ↑ |
339 342 * and internal arbitration behavior.
340 343 * for equal TX and RX bursts, set the following in global
341 344 * configuration register.
342 345 * static int global_config = 0x42;
343 346 */
344 347
345 348 /*
346 349 * ERI ERX Interrupt Blanking Time
347 350 * Each count is about 16 us (2048 clocks) for 66 MHz PCI.
348 351 */
349 -static int intr_blank_time = 6; /* for about 96 us */
350 -static int intr_blank_packets = 8; /* */
352 +volatile int intr_blank_time = 6; /* for about 96 us */
353 +volatile int intr_blank_packets = 8; /* */
351 354
352 355 /*
353 356 * ERX PAUSE Threshold Register value
354 357 * The following value is for an OFF Threshold of about 15.5 Kbytes
355 358 * and an ON Threshold of 4K bytes.
356 359 */
357 360 static int rx_pause_threshold = 0xf8 | (0x40 << 12);
358 361 static int eri_reinit_fatal = 0;
359 362 #ifdef DEBUG
360 363 static int noteri = 0;
361 364 #endif
362 365
363 366 #ifdef ERI_TX_HUNG
364 367 static int eri_reinit_txhung = 0;
365 368 #endif
366 369
367 370 #ifdef ERI_HDX_BUG_WORKAROUND
368 371 /*
369 372 * By default enable padding in hdx mode to 97 bytes.
370 373 * To disabled, in /etc/system:
371 374 * set eri:eri_hdx_pad_enable=0
372 375 */
373 376 static uchar_t eri_hdx_pad_enable = 1;
374 377 #endif
375 378
376 379 /*
377 380 * Default values to initialize the cache line size and latency timer
378 381 * registers in the PCI configuration space.
379 382 * ERI_G_CACHE_LINE_SIZE_16 is defined as 16 since RIO expects in units
380 383 * of 4 bytes.
381 384 */
382 385 #ifdef ERI_PM_WORKAROUND_PCI
383 386 static int eri_pci_cache_line = ERI_G_CACHE_LINE_SIZE_32; /* 128 bytes */
384 387 static int eri_pci_latency_timer = 0xff; /* 255 PCI cycles */
385 388 #else
386 389 static int eri_pci_cache_line = ERI_G_CACHE_LINE_SIZE_16; /* 64 bytes */
387 390 static int eri_pci_latency_timer = 0x40; /* 64 PCI cycles */
388 391 #endif
389 392 #define ERI_CACHE_LINE_SIZE (eri_pci_cache_line << ERI_G_CACHE_BIT)
390 393
391 394 /*
392 395 * Claim the device is ultra-capable of burst in the beginning. Use
393 396 * the value returned by ddi_dma_burstsizes() to actually set the ERI
394 397 * global configuration register later.
395 398 *
396 399 * PCI_ERI supports Infinite burst or 64-byte-multiple bursts.
397 400 */
398 401 #define ERI_LIMADDRLO ((uint64_t)0x00000000)
399 402 #define ERI_LIMADDRHI ((uint64_t)0xffffffff)
400 403
401 404 static ddi_dma_attr_t dma_attr = {
402 405 DMA_ATTR_V0, /* version number. */
403 406 (uint64_t)ERI_LIMADDRLO, /* low address */
404 407 (uint64_t)ERI_LIMADDRHI, /* high address */
405 408 (uint64_t)0x00ffffff, /* address counter max */
406 409 (uint64_t)1, /* alignment */
407 410 (uint_t)0xe000e0, /* dlim_burstsizes for 32 4 bit xfers */
408 411 (uint32_t)0x1, /* minimum transfer size */
409 412 (uint64_t)0x7fffffff, /* maximum transfer size */
410 413 (uint64_t)0x00ffffff, /* maximum segment size */
411 414 1, /* scatter/gather list length */
412 415 (uint32_t)1, /* granularity */
413 416 (uint_t)0 /* attribute flags */
414 417 };
415 418
416 419 static ddi_dma_attr_t desc_dma_attr = {
417 420 DMA_ATTR_V0, /* version number. */
418 421 (uint64_t)ERI_LIMADDRLO, /* low address */
419 422 (uint64_t)ERI_LIMADDRHI, /* high address */
420 423 (uint64_t)0x00ffffff, /* address counter max */
421 424 (uint64_t)8, /* alignment */
422 425 (uint_t)0xe000e0, /* dlim_burstsizes for 32 4 bit xfers */
423 426 (uint32_t)0x1, /* minimum transfer size */
424 427 (uint64_t)0x7fffffff, /* maximum transfer size */
425 428 (uint64_t)0x00ffffff, /* maximum segment size */
426 429 1, /* scatter/gather list length */
427 430 16, /* granularity */
428 431 0 /* attribute flags */
429 432 };
430 433
431 434 static ddi_device_acc_attr_t buf_attr = {
432 435 DDI_DEVICE_ATTR_V0, /* devacc_attr_version */
433 436 DDI_NEVERSWAP_ACC, /* devacc_attr_endian_flags */
434 437 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */
435 438 DDI_DEFAULT_ACC, /* devacc_attr_access */
436 439 };
437 440
438 441 ddi_dma_lim_t eri_dma_limits = {
439 442 (uint64_t)ERI_LIMADDRLO, /* dlim_addr_lo */
440 443 (uint64_t)ERI_LIMADDRHI, /* dlim_addr_hi */
441 444 (uint64_t)ERI_LIMADDRHI, /* dlim_cntr_max */
442 445 (uint_t)0x00e000e0, /* dlim_burstsizes for 32 and 64 bit xfers */
443 446 (uint32_t)0x1, /* dlim_minxfer */
444 447 1024 /* dlim_speed */
445 448 };
446 449
447 450 /*
448 451 * Link Configuration variables
449 452 *
450 453 * On Motherboard implementations, 10/100 Mbps speeds may be supported
451 454 * by using both the Serial Link and the MII on Non-serial-link interface.
452 455 * When both links are present, the driver automatically tries to bring up
453 456 * both. If both are up, the Gigabit Serial Link is selected for use, by
454 457 * default. The following configuration variable is used to force the selection
455 458 * of one of the links when both are up.
456 459 * To change the default selection to the MII link when both the Serial
457 460 * Link and the MII link are up, change eri_default_link to 1.
458 461 *
459 462 * Once a link is in use, the driver will continue to use that link till it
460 463 * goes down. When it goes down, the driver will look at the status of both the
461 464 * links again for link selection.
462 465 *
463 466 * Currently the standard is not stable w.r.t. gigabit link configuration
464 467 * using auto-negotiation procedures. Meanwhile, the link may be configured
465 468 * in "forced" mode using the "autonegotiation enable" bit (bit-12) in the
466 469 * PCS MII Command Register. In this mode the PCS sends "idles" until sees
467 470 * "idles" as initialization instead of the Link Configuration protocol
468 471 * where a Config register is exchanged. In this mode, the ERI is programmed
469 472 * for full-duplex operation with both pauseTX and pauseRX (for flow control)
470 473 * enabled.
471 474 */
472 475
473 476 static int select_link = 0; /* automatic selection */
474 477 static int default_link = 0; /* Select Serial link if both are up */
475 478
476 479 /*
477 480 * The following variables are used for configuring link-operation
478 481 * for all the "eri" interfaces in the system.
479 482 * Later these parameters may be changed per interface using "ndd" command
480 483 * These parameters may also be specified as properties using the .conf
481 484 * file mechanism for each interface.
482 485 */
483 486
484 487 /*
485 488 * The following variable value will be overridden by "link-pulse-disabled"
486 489 * property which may be created by OBP or eri.conf file. This property is
487 490 * applicable only for 10 Mbps links.
488 491 */
489 492 static int link_pulse_disabled = 0; /* link pulse disabled */
490 493
491 494 /* For MII-based FastEthernet links */
492 495 static int adv_autoneg_cap = 1;
493 496 static int adv_100T4_cap = 0;
494 497 static int adv_100fdx_cap = 1;
495 498 static int adv_100hdx_cap = 1;
496 499 static int adv_10fdx_cap = 1;
497 500 static int adv_10hdx_cap = 1;
498 501 static int adv_pauseTX_cap = 0;
499 502 static int adv_pauseRX_cap = 0;
500 503
501 504 /*
502 505 * The following gap parameters are in terms of byte times.
503 506 */
504 507 static int ipg0 = 8;
505 508 static int ipg1 = 8;
506 509 static int ipg2 = 4;
507 510
508 511 static int lance_mode = 1; /* to enable LANCE mode */
509 512 static int mifpoll_enable = 0; /* to enable mif poll */
510 513 static int ngu_enable = 0; /* to enable Never Give Up mode */
511 514
512 515 static int eri_force_mlf = 0; /* to enable mif poll */
513 516 static int eri_phy_mintrans = 1; /* Lu3X31T mintrans algorithm */
514 517 /*
515 518 * For the MII interface, the External Transceiver is selected when present.
516 519 * The following variable is used to select the Internal Transceiver even
517 520 * when the External Transceiver is present.
518 521 */
519 522 static int use_int_xcvr = 0;
520 523 static int pace_size = 0; /* Do not use pacing for now */
521 524
522 525 static int eri_use_dvma_rx = 0; /* =1:use dvma */
523 526 static int eri_rx_bcopy_max = RX_BCOPY_MAX; /* =1:use bcopy() */
524 527 static int eri_overflow_reset = 1; /* global reset if rx_fifo_overflow */
525 528 static int eri_tx_ring_size = 2048; /* number of entries in tx ring */
526 529 static int eri_rx_ring_size = 1024; /* number of entries in rx ring */
527 530 /*
528 531 * The following parameters may be configured by the user. If they are not
529 532 * configured by the user, the values will be based on the capabilities of
530 533 * the transceiver.
531 534 * The value "ERI_NOTUSR" is ORed with the parameter value to indicate values
532 535 * which are NOT configured by the user.
533 536 */
534 537
535 538 #define ERI_NOTUSR 0x0f000000
536 539 #define ERI_MASK_1BIT 0x1
537 540 #define ERI_MASK_2BIT 0x3
538 541 #define ERI_MASK_8BIT 0xff
539 542
540 543
541 544 /*
542 545 * Note:
543 546 * ERI has all of the above capabilities.
544 547 * Only when an External Transceiver is selected for MII-based FastEthernet
545 548 * link operation, the capabilities depend upon the capabilities of the
546 549 * External Transceiver.
547 550 */
548 551
549 552 /* ------------------------------------------------------------------------- */
550 553
551 554 static param_t param_arr[] = {
552 555 /* min max value r/w/hidden+name */
553 556 { 0, 2, 2, "-transceiver_inuse"},
554 557 { 0, 1, 0, "-link_status"},
555 558 { 0, 1, 0, "-link_speed"},
556 559 { 0, 1, 0, "-link_mode"},
557 560 { 0, 255, 8, "+ipg1"},
558 561 { 0, 255, 4, "+ipg2"},
559 562 { 0, 1, 0, "+use_int_xcvr"},
560 563 { 0, 255, 0, "+pace_size"},
561 564 { 0, 1, 1, "+adv_autoneg_cap"},
562 565 { 0, 1, 1, "+adv_100T4_cap"},
563 566 { 0, 1, 1, "+adv_100fdx_cap"},
564 567 { 0, 1, 1, "+adv_100hdx_cap"},
565 568 { 0, 1, 1, "+adv_10fdx_cap"},
566 569 { 0, 1, 1, "+adv_10hdx_cap"},
567 570 { 0, 1, 1, "-autoneg_cap"},
568 571 { 0, 1, 1, "-100T4_cap"},
569 572 { 0, 1, 1, "-100fdx_cap"},
570 573 { 0, 1, 1, "-100hdx_cap"},
571 574 { 0, 1, 1, "-10fdx_cap"},
572 575 { 0, 1, 1, "-10hdx_cap"},
573 576 { 0, 1, 0, "-lp_autoneg_cap"},
574 577 { 0, 1, 0, "-lp_100T4_cap"},
575 578 { 0, 1, 0, "-lp_100fdx_cap"},
576 579 { 0, 1, 0, "-lp_100hdx_cap"},
577 580 { 0, 1, 0, "-lp_10fdx_cap"},
578 581 { 0, 1, 0, "-lp_10hdx_cap"},
579 582 { 0, 1, 1, "+lance_mode"},
580 583 { 0, 31, 8, "+ipg0"},
581 584 { 0, 127, 6, "+intr_blank_time"},
582 585 { 0, 255, 8, "+intr_blank_packets"},
583 586 { 0, 1, 1, "!serial-link"},
584 587 { 0, 2, 1, "!non-serial-link"},
585 588 { 0, 1, 0, "%select-link"},
586 589 { 0, 1, 0, "%default-link"},
587 590 { 0, 2, 0, "!link-in-use"},
588 591 { 0, 1, 1, "%adv_asm_dir_cap"},
589 592 { 0, 1, 1, "%adv_pause_cap"},
590 593 { 0, 1, 0, "!asm_dir_cap"},
591 594 { 0, 1, 0, "!pause_cap"},
592 595 { 0, 1, 0, "!lp_asm_dir_cap"},
593 596 { 0, 1, 0, "!lp_pause_cap"},
594 597 };
595 598
596 599 DDI_DEFINE_STREAM_OPS(eri_dev_ops, nulldev, nulldev, eri_attach, eri_detach,
597 600 nodev, NULL, D_MP, NULL, ddi_quiesce_not_supported);
598 601
599 602 /*
600 603 * This is the loadable module wrapper.
601 604 */
602 605 #include <sys/modctl.h>
603 606
604 607 /*
605 608 * Module linkage information for the kernel.
606 609 */
607 610 static struct modldrv modldrv = {
608 611 &mod_driverops, /* Type of module. This one is a driver */
609 612 "Sun RIO 10/100 Mb Ethernet",
610 613 &eri_dev_ops, /* driver ops */
611 614 };
612 615
613 616 static struct modlinkage modlinkage = {
614 617 MODREV_1, &modldrv, NULL
615 618 };
616 619
617 620 /*
618 621 * Hardware Independent Functions
619 622 * New Section
620 623 */
621 624
622 625 int
623 626 _init(void)
624 627 {
625 628 int status;
626 629
627 630 mac_init_ops(&eri_dev_ops, "eri");
628 631 if ((status = mod_install(&modlinkage)) != 0) {
629 632 mac_fini_ops(&eri_dev_ops);
630 633 }
631 634 return (status);
632 635 }
633 636
634 637 int
635 638 _fini(void)
636 639 {
637 640 int status;
638 641
639 642 status = mod_remove(&modlinkage);
640 643 if (status == 0) {
641 644 mac_fini_ops(&eri_dev_ops);
642 645 }
643 646 return (status);
644 647 }
645 648
646 649 int
647 650 _info(struct modinfo *modinfop)
648 651 {
649 652 return (mod_info(&modlinkage, modinfop));
650 653 }
651 654
652 655
653 656 /*
654 657 * Interface exists: make available by filling in network interface
655 658 * record. System will initialize the interface when it is ready
656 659 * to accept packets.
657 660 */
658 661 static int
659 662 eri_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
660 663 {
661 664 struct eri *erip = NULL;
662 665 mac_register_t *macp = NULL;
663 666 int regno;
664 667 boolean_t doinit;
665 668 boolean_t mutex_inited = B_FALSE;
666 669 boolean_t intr_add = B_FALSE;
667 670
668 671 switch (cmd) {
669 672 case DDI_ATTACH:
670 673 break;
671 674
672 675 case DDI_RESUME:
673 676 if ((erip = ddi_get_driver_private(dip)) == NULL)
674 677 return (DDI_FAILURE);
675 678
676 679 mutex_enter(&erip->intrlock);
677 680 erip->flags &= ~ERI_SUSPENDED;
678 681 erip->init_macregs = 1;
679 682 param_linkup = 0;
680 683 erip->stats.link_up = LINK_STATE_DOWN;
681 684 erip->linkcheck = 0;
682 685
683 686 doinit = (erip->flags & ERI_STARTED) ? B_TRUE : B_FALSE;
684 687 mutex_exit(&erip->intrlock);
685 688
686 689 if (doinit && !eri_init(erip)) {
687 690 return (DDI_FAILURE);
688 691 }
689 692 return (DDI_SUCCESS);
690 693
691 694 default:
692 695 return (DDI_FAILURE);
693 696 }
694 697
695 698 /*
696 699 * Allocate soft device data structure
697 700 */
698 701 erip = kmem_zalloc(sizeof (struct eri), KM_SLEEP);
699 702
700 703 /*
701 704 * Initialize as many elements as possible.
702 705 */
703 706 ddi_set_driver_private(dip, erip);
704 707 erip->dip = dip; /* dip */
705 708 erip->instance = ddi_get_instance(dip); /* instance */
706 709 erip->flags = 0;
707 710 erip->multi_refcnt = 0;
708 711 erip->promisc = B_FALSE;
709 712
710 713 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
711 714 ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG,
712 715 "mac_alloc failed");
713 716 goto attach_fail;
714 717 }
715 718 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
716 719 macp->m_driver = erip;
717 720 macp->m_dip = dip;
718 721 macp->m_src_addr = erip->ouraddr;
719 722 macp->m_callbacks = &eri_m_callbacks;
720 723 macp->m_min_sdu = 0;
721 724 macp->m_max_sdu = ETHERMTU;
722 725 macp->m_margin = VLAN_TAGSZ;
723 726
724 727 /*
725 728 * Map in the device registers.
726 729 * Separate pointers will be set up for the following
727 730 * register groups within the GEM Register Space:
728 731 * Global register set
729 732 * ETX register set
730 733 * ERX register set
731 734 * BigMAC register set.
732 735 * MIF register set
733 736 */
734 737
735 738 if (ddi_dev_nregs(dip, ®no) != (DDI_SUCCESS)) {
736 739 ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
737 740 "ddi_dev_nregs failed, returned %d", regno);
738 741 goto attach_fail;
739 742 }
740 743
741 744 /*
742 745 * Map the PCI config space
743 746 */
744 747 if (pci_config_setup(dip, &erip->pci_config_handle) != DDI_SUCCESS) {
745 748 ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
746 749 "%s pci_config_setup()", config_space_fatal_msg);
747 750 goto attach_fail;
748 751 }
749 752
750 753 /*
751 754 * Initialize device attributes structure
752 755 */
753 756 erip->dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
754 757 erip->dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
755 758 erip->dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
756 759
757 760 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&(erip->globregp), 0, 0,
758 761 &erip->dev_attr, &erip->globregh)) {
759 762 goto attach_fail;
760 763 }
761 764 erip->etxregh = erip->globregh;
762 765 erip->erxregh = erip->globregh;
763 766 erip->bmacregh = erip->globregh;
764 767 erip->mifregh = erip->globregh;
765 768
766 769 erip->etxregp = (void *)(((caddr_t)erip->globregp) + 0x2000);
767 770 erip->erxregp = (void *)(((caddr_t)erip->globregp) + 0x4000);
768 771 erip->bmacregp = (void *)(((caddr_t)erip->globregp) + 0x6000);
769 772 erip->mifregp = (void *)(((caddr_t)erip->globregp) + 0x6200);
770 773
771 774 /*
772 775 * Map the software reset register.
773 776 */
774 777 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&(erip->sw_reset_reg),
775 778 0x1010, 4, &erip->dev_attr, &erip->sw_reset_regh)) {
776 779 ERI_FAULT_MSG1(erip, SEVERITY_MID, ERI_VERB_MSG,
777 780 mregs_4soft_reset_fail_msg);
778 781 goto attach_fail;
779 782 }
780 783
781 784 /*
782 785 * Try and stop the device.
783 786 * This is done until we want to handle interrupts.
784 787 */
785 788 if (eri_stop(erip))
786 789 goto attach_fail;
787 790
788 791 /*
789 792 * set PCI latency timer register.
790 793 */
791 794 pci_config_put8(erip->pci_config_handle, PCI_CONF_LATENCY_TIMER,
792 795 (uchar_t)eri_pci_latency_timer);
793 796
794 797 if (ddi_intr_hilevel(dip, 0)) {
795 798 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
796 799 " high-level interrupts are not supported");
797 800 goto attach_fail;
798 801 }
799 802
800 803 /*
801 804 * Get the interrupt cookie so the mutexes can be
802 805 * Initialized.
803 806 */
804 807 if (ddi_get_iblock_cookie(dip, 0, &erip->cookie) != DDI_SUCCESS)
805 808 goto attach_fail;
806 809
807 810 /*
808 811 * Initialize mutex's for this device.
809 812 */
810 813 mutex_init(&erip->xmitlock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
811 814 mutex_init(&erip->intrlock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
812 815 mutex_init(&erip->linklock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
813 816 mutex_init(&erip->xcvrlock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
814 817
815 818 mutex_inited = B_TRUE;
816 819
817 820 /*
818 821 * Add interrupt to system
819 822 */
820 823 if (ddi_add_intr(dip, 0, &erip->cookie, 0, eri_intr, (caddr_t)erip) ==
821 824 DDI_SUCCESS)
822 825 intr_add = B_TRUE;
823 826 else {
824 827 goto attach_fail;
825 828 }
826 829
827 830 /*
828 831 * Set up the ethernet mac address.
829 832 */
830 833 (void) eri_setup_mac_address(erip, dip);
831 834
832 835 if (eri_init_xfer_params(erip))
833 836 goto attach_fail;
834 837
835 838 if (eri_burstsize(erip) == DDI_FAILURE) {
836 839 goto attach_fail;
837 840 }
838 841
839 842 /*
840 843 * Setup fewer receive bufers.
841 844 */
842 845 ERI_RPENDING = eri_rx_ring_size;
843 846 ERI_TPENDING = eri_tx_ring_size;
844 847
845 848 erip->rpending_mask = ERI_RPENDING - 1;
846 849 erip->rmdmax_mask = ERI_RPENDING - 1;
847 850 erip->mif_config = (ERI_PHY_BMSR << ERI_MIF_CFGPR_SHIFT);
848 851
849 852 erip->stats.pmcap = ERI_PMCAP_NONE;
850 853 if (pci_report_pmcap(dip, PCI_PM_IDLESPEED, (void *)4000) ==
851 854 DDI_SUCCESS)
852 855 erip->stats.pmcap = ERI_PMCAP_4MHZ;
853 856
854 857 if (mac_register(macp, &erip->mh) != 0)
855 858 goto attach_fail;
856 859
857 860 mac_free(macp);
858 861
859 862 return (DDI_SUCCESS);
860 863
861 864 attach_fail:
862 865 if (erip->pci_config_handle)
863 866 (void) pci_config_teardown(&erip->pci_config_handle);
864 867
865 868 if (mutex_inited) {
866 869 mutex_destroy(&erip->xmitlock);
867 870 mutex_destroy(&erip->intrlock);
868 871 mutex_destroy(&erip->linklock);
869 872 mutex_destroy(&erip->xcvrlock);
870 873 }
871 874
872 875 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, attach_fail_msg);
873 876
874 877 if (intr_add)
875 878 ddi_remove_intr(dip, 0, erip->cookie);
876 879
877 880 if (erip->globregh)
878 881 ddi_regs_map_free(&erip->globregh);
879 882
880 883 if (macp != NULL)
881 884 mac_free(macp);
882 885 if (erip != NULL)
883 886 kmem_free(erip, sizeof (*erip));
884 887
885 888 return (DDI_FAILURE);
886 889 }
887 890
888 891 static int
889 892 eri_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
890 893 {
891 894 struct eri *erip;
892 895 int i;
893 896
894 897 if ((erip = ddi_get_driver_private(dip)) == NULL) {
895 898 /*
896 899 * No resources allocated.
897 900 */
898 901 return (DDI_FAILURE);
899 902 }
900 903
901 904 switch (cmd) {
902 905 case DDI_DETACH:
903 906 break;
904 907
905 908 case DDI_SUSPEND:
906 909 erip->flags |= ERI_SUSPENDED;
907 910 eri_uninit(erip);
908 911 return (DDI_SUCCESS);
909 912
910 913 default:
911 914 return (DDI_FAILURE);
912 915 }
913 916
914 917 if (erip->flags & (ERI_RUNNING | ERI_SUSPENDED)) {
915 918 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, busy_msg);
916 919 return (DDI_FAILURE);
917 920 }
918 921
919 922 if (mac_unregister(erip->mh) != 0) {
920 923 return (DDI_FAILURE);
921 924 }
922 925
923 926 /*
924 927 * Make the device quiescent
925 928 */
926 929 (void) eri_stop(erip);
927 930
928 931 /*
929 932 * Remove instance of the intr
930 933 */
931 934 ddi_remove_intr(dip, 0, erip->cookie);
932 935
933 936 if (erip->pci_config_handle)
934 937 (void) pci_config_teardown(&erip->pci_config_handle);
935 938
936 939 /*
937 940 * Destroy all mutexes and data structures allocated during
938 941 * attach time.
939 942 */
940 943
941 944 if (erip->globregh)
942 945 ddi_regs_map_free(&erip->globregh);
943 946
944 947 erip->etxregh = NULL;
945 948 erip->erxregh = NULL;
946 949 erip->bmacregh = NULL;
947 950 erip->mifregh = NULL;
948 951 erip->globregh = NULL;
949 952
950 953 if (erip->sw_reset_regh)
951 954 ddi_regs_map_free(&erip->sw_reset_regh);
952 955
953 956 if (erip->ksp)
954 957 kstat_delete(erip->ksp);
955 958
956 959 eri_stop_timer(erip); /* acquire linklock */
957 960 eri_start_timer(erip, eri_check_link, 0);
958 961 mutex_destroy(&erip->xmitlock);
959 962 mutex_destroy(&erip->intrlock);
960 963 mutex_destroy(&erip->linklock);
961 964 mutex_destroy(&erip->xcvrlock);
962 965
963 966 if (erip->md_h) {
964 967 if (ddi_dma_unbind_handle(erip->md_h) ==
965 968 DDI_FAILURE)
966 969 return (DDI_FAILURE);
967 970 ddi_dma_mem_free(&erip->mdm_h);
968 971 ddi_dma_free_handle(&erip->md_h);
969 972 }
970 973
971 974 if (eri_freebufs(erip))
972 975 return (DDI_FAILURE);
973 976
974 977 /* dvma handle case */
975 978
976 979 if (erip->eri_dvmarh) {
977 980 (void) dvma_release(erip->eri_dvmarh);
978 981 erip->eri_dvmarh = NULL;
979 982 }
980 983 /*
981 984 * xmit_dma_mode, erip->ndmaxh[i]=NULL for dvma
982 985 */
983 986 else {
984 987 for (i = 0; i < ERI_RPENDING; i++)
985 988 if (erip->ndmarh[i])
986 989 ddi_dma_free_handle(&erip->ndmarh[i]);
987 990 }
988 991 /*
989 992 * Release TX buffer
990 993 */
991 994 if (erip->tbuf_ioaddr != 0) {
992 995 (void) ddi_dma_unbind_handle(erip->tbuf_handle);
993 996 erip->tbuf_ioaddr = 0;
994 997 }
995 998 if (erip->tbuf_kaddr != NULL) {
996 999 ddi_dma_mem_free(&erip->tbuf_acch);
997 1000 erip->tbuf_kaddr = NULL;
998 1001 }
999 1002 if (erip->tbuf_handle != NULL) {
1000 1003 ddi_dma_free_handle(&erip->tbuf_handle);
1001 1004 erip->tbuf_handle = NULL;
1002 1005 }
1003 1006
1004 1007 eri_param_cleanup(erip);
1005 1008
1006 1009 ddi_set_driver_private(dip, NULL);
1007 1010 kmem_free((caddr_t)erip, sizeof (struct eri));
1008 1011
1009 1012 return (DDI_SUCCESS);
1010 1013 }
1011 1014
1012 1015 /*
1013 1016 * To set up the mac address for the network interface:
1014 1017 * The adapter card may support a local mac address which is published
1015 1018 * in a device node property "local-mac-address". This mac address is
1016 1019 * treated as the factory-installed mac address for DLPI interface.
1017 1020 * If the adapter firmware has used the device for diskless boot
1018 1021 * operation it publishes a property called "mac-address" for use by
1019 1022 * inetboot and the device driver.
1020 1023 * If "mac-address" is not found, the system options property
1021 1024 * "local-mac-address" is used to select the mac-address. If this option
1022 1025 * is set to "true", and "local-mac-address" has been found, then
1023 1026 * local-mac-address is used; otherwise the system mac address is used
1024 1027 * by calling the "localetheraddr()" function.
1025 1028 */
1026 1029
1027 1030 static void
1028 1031 eri_setup_mac_address(struct eri *erip, dev_info_t *dip)
1029 1032 {
1030 1033 uchar_t *prop;
1031 1034 char *uselocal;
1032 1035 unsigned prop_len;
1033 1036 uint32_t addrflags = 0;
1034 1037 struct ether_addr factaddr;
1035 1038
1036 1039 /*
1037 1040 * Check if it is an adapter with its own local mac address
1038 1041 * If it is present, save it as the "factory-address"
1039 1042 * for this adapter.
1040 1043 */
1041 1044 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1042 1045 "local-mac-address", &prop, &prop_len) == DDI_PROP_SUCCESS) {
1043 1046 if (prop_len == ETHERADDRL) {
1044 1047 addrflags = ERI_FACTADDR_PRESENT;
1045 1048 bcopy(prop, &factaddr, ETHERADDRL);
1046 1049 ERI_FAULT_MSG2(erip, SEVERITY_NONE, ERI_VERB_MSG,
1047 1050 lether_addr_msg, ether_sprintf(&factaddr));
1048 1051 }
1049 1052 ddi_prop_free(prop);
1050 1053 }
1051 1054 /*
1052 1055 * Check if the adapter has published "mac-address" property.
1053 1056 * If it is present, use it as the mac address for this device.
1054 1057 */
1055 1058 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1056 1059 "mac-address", &prop, &prop_len) == DDI_PROP_SUCCESS) {
1057 1060 if (prop_len >= ETHERADDRL) {
1058 1061 bcopy(prop, erip->ouraddr, ETHERADDRL);
1059 1062 ddi_prop_free(prop);
1060 1063 return;
1061 1064 }
1062 1065 ddi_prop_free(prop);
1063 1066 }
1064 1067
1065 1068 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, "local-mac-address?",
1066 1069 &uselocal) == DDI_PROP_SUCCESS) {
1067 1070 if ((strcmp("true", uselocal) == 0) &&
1068 1071 (addrflags & ERI_FACTADDR_PRESENT)) {
1069 1072 addrflags |= ERI_FACTADDR_USE;
1070 1073 bcopy(&factaddr, erip->ouraddr, ETHERADDRL);
1071 1074 ddi_prop_free(uselocal);
1072 1075 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
1073 1076 lmac_addr_msg);
1074 1077 return;
1075 1078 }
1076 1079 ddi_prop_free(uselocal);
1077 1080 }
1078 1081
1079 1082 /*
1080 1083 * Get the system ethernet address.
1081 1084 */
1082 1085 (void) localetheraddr(NULL, &factaddr);
1083 1086 bcopy(&factaddr, erip->ouraddr, ETHERADDRL);
1084 1087 }
1085 1088
1086 1089
1087 1090 /*
1088 1091 * Calculate the bit in the multicast address filter that selects the given
1089 1092 * address.
1090 1093 * Note: For ERI, the last 8-bits are used.
1091 1094 */
1092 1095
1093 1096 static uint32_t
1094 1097 eri_ladrf_bit(const uint8_t *addr)
1095 1098 {
1096 1099 uint32_t crc;
1097 1100
1098 1101 CRC32(crc, addr, ETHERADDRL, -1U, crc32_table);
1099 1102
1100 1103 /*
1101 1104 * Just want the 8 most significant bits.
1102 1105 */
1103 1106 return ((~crc) >> 24);
1104 1107 }
1105 1108
1106 1109 static void
1107 1110 eri_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1108 1111 {
1109 1112 struct eri *erip = arg;
1110 1113 struct iocblk *iocp = (void *)mp->b_rptr;
1111 1114 int err;
1112 1115
1113 1116 ASSERT(erip != NULL);
1114 1117
1115 1118 /*
1116 1119 * Privilege checks.
1117 1120 */
1118 1121 switch (iocp->ioc_cmd) {
1119 1122 case ERI_SET_LOOP_MODE:
1120 1123 case ERI_ND_SET:
1121 1124 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1122 1125 if (err != 0) {
1123 1126 miocnak(wq, mp, 0, err);
1124 1127 return;
1125 1128 }
1126 1129 break;
1127 1130 default:
1128 1131 break;
1129 1132 }
1130 1133
1131 1134 switch (iocp->ioc_cmd) {
1132 1135 case ERI_ND_GET:
1133 1136 case ERI_ND_SET:
1134 1137 eri_process_ndd_ioctl(erip, wq, mp, iocp->ioc_cmd);
1135 1138 break;
1136 1139
1137 1140 case ERI_SET_LOOP_MODE:
1138 1141 case ERI_GET_LOOP_MODE:
1139 1142 /*
1140 1143 * XXX: Consider updating this to the new netlb ioctls.
1141 1144 */
1142 1145 eri_loopback(erip, wq, mp);
1143 1146 break;
1144 1147
1145 1148 default:
1146 1149 miocnak(wq, mp, 0, EINVAL);
1147 1150 break;
1148 1151 }
1149 1152
1150 1153 ASSERT(!MUTEX_HELD(&erip->linklock));
1151 1154 }
1152 1155
1153 1156 static void
1154 1157 eri_loopback(struct eri *erip, queue_t *wq, mblk_t *mp)
1155 1158 {
1156 1159 struct iocblk *iocp = (void *)mp->b_rptr;
1157 1160 loopback_t *al;
1158 1161
1159 1162 if (mp->b_cont == NULL || MBLKL(mp->b_cont) < sizeof (loopback_t)) {
1160 1163 miocnak(wq, mp, 0, EINVAL);
1161 1164 return;
1162 1165 }
1163 1166
1164 1167 al = (void *)mp->b_cont->b_rptr;
1165 1168
1166 1169 switch (iocp->ioc_cmd) {
1167 1170 case ERI_SET_LOOP_MODE:
1168 1171 switch (al->loopback) {
1169 1172 case ERI_LOOPBACK_OFF:
1170 1173 erip->flags &= (~ERI_MACLOOPBACK & ~ERI_SERLOOPBACK);
1171 1174 /* force link status to go down */
1172 1175 param_linkup = 0;
1173 1176 erip->stats.link_up = LINK_STATE_DOWN;
1174 1177 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1175 1178 (void) eri_init(erip);
1176 1179 break;
1177 1180
1178 1181 case ERI_MAC_LOOPBACK_ON:
1179 1182 erip->flags |= ERI_MACLOOPBACK;
1180 1183 erip->flags &= ~ERI_SERLOOPBACK;
1181 1184 param_linkup = 0;
1182 1185 erip->stats.link_up = LINK_STATE_DOWN;
1183 1186 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1184 1187 (void) eri_init(erip);
1185 1188 break;
1186 1189
1187 1190 case ERI_PCS_LOOPBACK_ON:
1188 1191 break;
1189 1192
1190 1193 case ERI_SER_LOOPBACK_ON:
1191 1194 erip->flags |= ERI_SERLOOPBACK;
1192 1195 erip->flags &= ~ERI_MACLOOPBACK;
1193 1196 /* force link status to go down */
1194 1197 param_linkup = 0;
1195 1198 erip->stats.link_up = LINK_STATE_DOWN;
1196 1199 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1197 1200 (void) eri_init(erip);
1198 1201 break;
1199 1202
1200 1203 default:
1201 1204 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
1202 1205 loopback_val_default);
1203 1206 miocnak(wq, mp, 0, EINVAL);
1204 1207 return;
1205 1208 }
1206 1209 miocnak(wq, mp, 0, 0);
1207 1210 break;
1208 1211
1209 1212 case ERI_GET_LOOP_MODE:
1210 1213 al->loopback = ERI_MAC_LOOPBACK_ON | ERI_PCS_LOOPBACK_ON |
1211 1214 ERI_SER_LOOPBACK_ON;
1212 1215 miocack(wq, mp, sizeof (loopback_t), 0);
1213 1216 break;
1214 1217
1215 1218 default:
1216 1219 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1217 1220 loopback_cmd_default);
1218 1221 }
1219 1222 }
1220 1223
1221 1224 static int
1222 1225 eri_m_promisc(void *arg, boolean_t on)
1223 1226 {
1224 1227 struct eri *erip = arg;
1225 1228
1226 1229 mutex_enter(&erip->intrlock);
1227 1230 erip->promisc = on;
1228 1231 eri_init_rx(erip);
1229 1232 mutex_exit(&erip->intrlock);
1230 1233 return (0);
1231 1234 }
1232 1235
1233 1236 /*
1234 1237 * This is to support unlimited number of members
1235 1238 * in Multicast.
1236 1239 */
1237 1240 static int
1238 1241 eri_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1239 1242 {
1240 1243 struct eri *erip = arg;
1241 1244 uint32_t ladrf_bit;
1242 1245
1243 1246 /*
1244 1247 * If this address's bit was not already set in the local address
1245 1248 * filter, add it and re-initialize the Hardware.
1246 1249 */
1247 1250 ladrf_bit = eri_ladrf_bit(mca);
1248 1251
1249 1252 mutex_enter(&erip->intrlock);
1250 1253 if (add) {
1251 1254 erip->ladrf_refcnt[ladrf_bit]++;
1252 1255 if (erip->ladrf_refcnt[ladrf_bit] == 1) {
1253 1256 LADRF_SET(erip, ladrf_bit);
1254 1257 erip->multi_refcnt++;
1255 1258 eri_init_rx(erip);
1256 1259 }
1257 1260 } else {
1258 1261 erip->ladrf_refcnt[ladrf_bit]--;
1259 1262 if (erip->ladrf_refcnt[ladrf_bit] == 0) {
1260 1263 LADRF_CLR(erip, ladrf_bit);
1261 1264 erip->multi_refcnt--;
1262 1265 eri_init_rx(erip);
1263 1266 }
1264 1267 }
1265 1268 mutex_exit(&erip->intrlock);
1266 1269 return (0);
1267 1270 }
1268 1271
1269 1272 static int
1270 1273 eri_m_unicst(void *arg, const uint8_t *macaddr)
1271 1274 {
1272 1275 struct eri *erip = arg;
1273 1276
1274 1277 /*
1275 1278 * Set new interface local address and re-init device.
1276 1279 * This is destructive to any other streams attached
1277 1280 * to this device.
1278 1281 */
1279 1282 mutex_enter(&erip->intrlock);
1280 1283 bcopy(macaddr, &erip->ouraddr, ETHERADDRL);
1281 1284 eri_init_rx(erip);
1282 1285 mutex_exit(&erip->intrlock);
1283 1286 return (0);
1284 1287 }
1285 1288
1286 1289 /*ARGSUSED*/
1287 1290 static boolean_t
1288 1291 eri_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1289 1292 {
1290 1293 switch (cap) {
1291 1294 case MAC_CAPAB_HCKSUM: {
1292 1295 uint32_t *hcksum_txflags = cap_data;
1293 1296 *hcksum_txflags = HCKSUM_INET_PARTIAL;
1294 1297 return (B_TRUE);
1295 1298 }
1296 1299 default:
1297 1300 return (B_FALSE);
1298 1301 }
1299 1302 }
1300 1303
1301 1304 static int
1302 1305 eri_m_start(void *arg)
1303 1306 {
1304 1307 struct eri *erip = arg;
1305 1308
1306 1309 mutex_enter(&erip->intrlock);
1307 1310 erip->flags |= ERI_STARTED;
1308 1311 mutex_exit(&erip->intrlock);
1309 1312
1310 1313 if (!eri_init(erip)) {
1311 1314 mutex_enter(&erip->intrlock);
1312 1315 erip->flags &= ~ERI_STARTED;
1313 1316 mutex_exit(&erip->intrlock);
1314 1317 return (EIO);
1315 1318 }
1316 1319 return (0);
1317 1320 }
1318 1321
1319 1322 static void
1320 1323 eri_m_stop(void *arg)
1321 1324 {
1322 1325 struct eri *erip = arg;
1323 1326
1324 1327 mutex_enter(&erip->intrlock);
1325 1328 erip->flags &= ~ERI_STARTED;
1326 1329 mutex_exit(&erip->intrlock);
1327 1330 eri_uninit(erip);
1328 1331 }
1329 1332
1330 1333 static int
1331 1334 eri_m_stat(void *arg, uint_t stat, uint64_t *val)
1332 1335 {
1333 1336 struct eri *erip = arg;
1334 1337 struct stats *esp;
1335 1338 boolean_t macupdate = B_FALSE;
1336 1339
1337 1340 esp = &erip->stats;
1338 1341
1339 1342 mutex_enter(&erip->xmitlock);
1340 1343 if ((erip->flags & ERI_RUNNING) && (erip->flags & ERI_TXINIT)) {
1341 1344 erip->tx_completion =
1342 1345 GET_ETXREG(tx_completion) & ETX_COMPLETION_MASK;
1343 1346 macupdate |= eri_reclaim(erip, erip->tx_completion);
1344 1347 }
1345 1348 mutex_exit(&erip->xmitlock);
1346 1349 if (macupdate)
1347 1350 mac_tx_update(erip->mh);
1348 1351
1349 1352 eri_savecntrs(erip);
1350 1353
1351 1354 switch (stat) {
1352 1355 case MAC_STAT_IFSPEED:
1353 1356 *val = esp->ifspeed * 1000000ULL;
1354 1357 break;
1355 1358 case MAC_STAT_MULTIRCV:
1356 1359 *val = esp->multircv;
1357 1360 break;
1358 1361 case MAC_STAT_BRDCSTRCV:
1359 1362 *val = esp->brdcstrcv;
1360 1363 break;
1361 1364 case MAC_STAT_IPACKETS:
1362 1365 *val = esp->ipackets64;
1363 1366 break;
1364 1367 case MAC_STAT_RBYTES:
1365 1368 *val = esp->rbytes64;
1366 1369 break;
1367 1370 case MAC_STAT_OBYTES:
1368 1371 *val = esp->obytes64;
1369 1372 break;
1370 1373 case MAC_STAT_OPACKETS:
1371 1374 *val = esp->opackets64;
1372 1375 break;
1373 1376 case MAC_STAT_IERRORS:
1374 1377 *val = esp->ierrors;
1375 1378 break;
1376 1379 case MAC_STAT_OERRORS:
1377 1380 *val = esp->oerrors;
1378 1381 break;
1379 1382 case MAC_STAT_MULTIXMT:
1380 1383 *val = esp->multixmt;
1381 1384 break;
1382 1385 case MAC_STAT_BRDCSTXMT:
1383 1386 *val = esp->brdcstxmt;
1384 1387 break;
1385 1388 case MAC_STAT_NORCVBUF:
1386 1389 *val = esp->norcvbuf;
1387 1390 break;
1388 1391 case MAC_STAT_NOXMTBUF:
1389 1392 *val = esp->noxmtbuf;
1390 1393 break;
1391 1394 case MAC_STAT_UNDERFLOWS:
1392 1395 *val = esp->txmac_urun;
1393 1396 break;
1394 1397 case MAC_STAT_OVERFLOWS:
1395 1398 *val = esp->rx_overflow;
1396 1399 break;
1397 1400 case MAC_STAT_COLLISIONS:
1398 1401 *val = esp->collisions;
1399 1402 break;
1400 1403 case ETHER_STAT_ALIGN_ERRORS:
1401 1404 *val = esp->rx_align_err;
1402 1405 break;
1403 1406 case ETHER_STAT_FCS_ERRORS:
1404 1407 *val = esp->rx_crc_err;
1405 1408 break;
1406 1409 case ETHER_STAT_EX_COLLISIONS:
1407 1410 *val = esp->excessive_coll;
1408 1411 break;
1409 1412 case ETHER_STAT_TX_LATE_COLLISIONS:
1410 1413 *val = esp->late_coll;
1411 1414 break;
1412 1415 case ETHER_STAT_FIRST_COLLISIONS:
1413 1416 *val = esp->first_coll;
1414 1417 break;
1415 1418 case ETHER_STAT_LINK_DUPLEX:
1416 1419 *val = esp->link_duplex;
1417 1420 break;
1418 1421 case ETHER_STAT_TOOLONG_ERRORS:
1419 1422 *val = esp->rx_toolong_pkts;
1420 1423 break;
1421 1424 case ETHER_STAT_TOOSHORT_ERRORS:
1422 1425 *val = esp->rx_runt;
1423 1426 break;
1424 1427
1425 1428 case ETHER_STAT_XCVR_ADDR:
1426 1429 *val = erip->phyad;
1427 1430 break;
1428 1431
1429 1432 case ETHER_STAT_XCVR_INUSE:
1430 1433 *val = XCVR_100X; /* should always be 100X for now */
1431 1434 break;
1432 1435
1433 1436 case ETHER_STAT_CAP_100FDX:
1434 1437 *val = param_bmsr_100fdx;
1435 1438 break;
1436 1439 case ETHER_STAT_CAP_100HDX:
1437 1440 *val = param_bmsr_100hdx;
1438 1441 break;
1439 1442 case ETHER_STAT_CAP_10FDX:
1440 1443 *val = param_bmsr_10fdx;
1441 1444 break;
1442 1445 case ETHER_STAT_CAP_10HDX:
1443 1446 *val = param_bmsr_10hdx;
1444 1447 break;
1445 1448 case ETHER_STAT_CAP_AUTONEG:
1446 1449 *val = param_bmsr_ancap;
1447 1450 break;
1448 1451 case ETHER_STAT_CAP_ASMPAUSE:
1449 1452 *val = param_bmsr_asm_dir;
1450 1453 break;
1451 1454 case ETHER_STAT_CAP_PAUSE:
1452 1455 *val = param_bmsr_pause;
1453 1456 break;
1454 1457 case ETHER_STAT_ADV_CAP_100FDX:
1455 1458 *val = param_anar_100fdx;
1456 1459 break;
1457 1460 case ETHER_STAT_ADV_CAP_100HDX:
1458 1461 *val = param_anar_100hdx;
1459 1462 break;
1460 1463 case ETHER_STAT_ADV_CAP_10FDX:
1461 1464 *val = param_anar_10fdx;
1462 1465 break;
1463 1466 case ETHER_STAT_ADV_CAP_10HDX:
1464 1467 *val = param_anar_10hdx;
1465 1468 break;
1466 1469 case ETHER_STAT_ADV_CAP_AUTONEG:
1467 1470 *val = param_autoneg;
1468 1471 break;
1469 1472 case ETHER_STAT_ADV_CAP_ASMPAUSE:
1470 1473 *val = param_anar_asm_dir;
1471 1474 break;
1472 1475 case ETHER_STAT_ADV_CAP_PAUSE:
1473 1476 *val = param_anar_pause;
1474 1477 break;
1475 1478 case ETHER_STAT_LP_CAP_100FDX:
1476 1479 *val = param_anlpar_100fdx;
1477 1480 break;
1478 1481 case ETHER_STAT_LP_CAP_100HDX:
1479 1482 *val = param_anlpar_100hdx;
1480 1483 break;
1481 1484 case ETHER_STAT_LP_CAP_10FDX:
1482 1485 *val = param_anlpar_10fdx;
1483 1486 break;
1484 1487 case ETHER_STAT_LP_CAP_10HDX:
1485 1488 *val = param_anlpar_10hdx;
1486 1489 break;
1487 1490 case ETHER_STAT_LP_CAP_AUTONEG:
1488 1491 *val = param_aner_lpancap;
1489 1492 break;
1490 1493 case ETHER_STAT_LP_CAP_ASMPAUSE:
1491 1494 *val = param_anlpar_pauseTX;
1492 1495 break;
1493 1496 case ETHER_STAT_LP_CAP_PAUSE:
1494 1497 *val = param_anlpar_pauseRX;
1495 1498 break;
1496 1499 case ETHER_STAT_LINK_PAUSE:
1497 1500 *val = esp->pausing;
1498 1501 break;
1499 1502 case ETHER_STAT_LINK_ASMPAUSE:
1500 1503 *val = param_anar_asm_dir &&
1501 1504 param_anlpar_pauseTX &&
1502 1505 (param_anar_pause != param_anlpar_pauseRX);
1503 1506 break;
1504 1507 case ETHER_STAT_LINK_AUTONEG:
1505 1508 *val = param_autoneg && param_aner_lpancap;
1506 1509 break;
1507 1510 }
1508 1511 return (0);
1509 1512 }
1510 1513
1511 1514 /*
1512 1515 * Hardware Functions
1513 1516 * New Section
1514 1517 */
1515 1518
1516 1519 /*
1517 1520 * Initialize the MAC registers. Some of of the MAC registers are initialized
1518 1521 * just once since Global Reset or MAC reset doesn't clear them. Others (like
1519 1522 * Host MAC Address Registers) are cleared on every reset and have to be
1520 1523 * reinitialized.
1521 1524 */
1522 1525 static void
1523 1526 eri_init_macregs_generic(struct eri *erip)
1524 1527 {
1525 1528 /*
1526 1529 * set up the MAC parameter registers once
1527 1530 * after power cycle. SUSPEND/RESUME also requires
1528 1531 * setting these registers.
1529 1532 */
1530 1533 if ((erip->stats.inits == 1) || (erip->init_macregs)) {
1531 1534 erip->init_macregs = 0;
1532 1535 PUT_MACREG(ipg0, param_ipg0);
1533 1536 PUT_MACREG(ipg1, param_ipg1);
1534 1537 PUT_MACREG(ipg2, param_ipg2);
1535 1538 PUT_MACREG(macmin, BMAC_MIN_FRAME_SIZE);
1536 1539 #ifdef ERI_RX_TAG_ERROR_WORKAROUND
1537 1540 PUT_MACREG(macmax, BMAC_MAX_FRAME_SIZE_TAG | BMAC_MAX_BURST);
1538 1541 #else
1539 1542 PUT_MACREG(macmax, BMAC_MAX_FRAME_SIZE | BMAC_MAX_BURST);
1540 1543 #endif
1541 1544 PUT_MACREG(palen, BMAC_PREAMBLE_SIZE);
1542 1545 PUT_MACREG(jam, BMAC_JAM_SIZE);
1543 1546 PUT_MACREG(alimit, BMAC_ATTEMPT_LIMIT);
1544 1547 PUT_MACREG(macctl_type, BMAC_CONTROL_TYPE);
1545 1548 PUT_MACREG(rseed,
1546 1549 ((erip->ouraddr[0] & 0x3) << 8) | erip->ouraddr[1]);
1547 1550
1548 1551 PUT_MACREG(madd3, BMAC_ADDRESS_3);
1549 1552 PUT_MACREG(madd4, BMAC_ADDRESS_4);
1550 1553 PUT_MACREG(madd5, BMAC_ADDRESS_5);
1551 1554
1552 1555 /* Program MAC Control address */
1553 1556 PUT_MACREG(madd6, BMAC_ADDRESS_6);
1554 1557 PUT_MACREG(madd7, BMAC_ADDRESS_7);
1555 1558 PUT_MACREG(madd8, BMAC_ADDRESS_8);
1556 1559
1557 1560 PUT_MACREG(afr0, BMAC_AF_0);
1558 1561 PUT_MACREG(afr1, BMAC_AF_1);
1559 1562 PUT_MACREG(afr2, BMAC_AF_2);
1560 1563 PUT_MACREG(afmr1_2, BMAC_AF21_MASK);
1561 1564 PUT_MACREG(afmr0, BMAC_AF0_MASK);
1562 1565 }
1563 1566
1564 1567 /* The counters need to be zeroed */
1565 1568 PUT_MACREG(nccnt, 0);
1566 1569 PUT_MACREG(fccnt, 0);
1567 1570 PUT_MACREG(excnt, 0);
1568 1571 PUT_MACREG(ltcnt, 0);
1569 1572 PUT_MACREG(dcnt, 0);
1570 1573 PUT_MACREG(frcnt, 0);
1571 1574 PUT_MACREG(lecnt, 0);
1572 1575 PUT_MACREG(aecnt, 0);
1573 1576 PUT_MACREG(fecnt, 0);
1574 1577 PUT_MACREG(rxcv, 0);
1575 1578
1576 1579 if (erip->pauseTX)
1577 1580 PUT_MACREG(spcmd, BMAC_SEND_PAUSE_CMD);
1578 1581 else
1579 1582 PUT_MACREG(spcmd, 0);
1580 1583
1581 1584 /*
1582 1585 * Program BigMAC with local individual ethernet address.
1583 1586 */
1584 1587
1585 1588 PUT_MACREG(madd0, (erip->ouraddr[4] << 8) | erip->ouraddr[5]);
1586 1589 PUT_MACREG(madd1, (erip->ouraddr[2] << 8) | erip->ouraddr[3]);
1587 1590 PUT_MACREG(madd2, (erip->ouraddr[0] << 8) | erip->ouraddr[1]);
1588 1591
1589 1592 /*
1590 1593 * Install multicast address filter.
1591 1594 */
1592 1595
1593 1596 PUT_MACREG(hash0, erip->ladrf[0]);
1594 1597 PUT_MACREG(hash1, erip->ladrf[1]);
1595 1598 PUT_MACREG(hash2, erip->ladrf[2]);
1596 1599 PUT_MACREG(hash3, erip->ladrf[3]);
1597 1600 PUT_MACREG(hash4, erip->ladrf[4]);
1598 1601 PUT_MACREG(hash5, erip->ladrf[5]);
1599 1602 PUT_MACREG(hash6, erip->ladrf[6]);
1600 1603 PUT_MACREG(hash7, erip->ladrf[7]);
1601 1604 PUT_MACREG(hash8, erip->ladrf[8]);
1602 1605 PUT_MACREG(hash9, erip->ladrf[9]);
1603 1606 PUT_MACREG(hash10, erip->ladrf[10]);
1604 1607 PUT_MACREG(hash11, erip->ladrf[11]);
1605 1608 PUT_MACREG(hash12, erip->ladrf[12]);
1606 1609 PUT_MACREG(hash13, erip->ladrf[13]);
1607 1610 PUT_MACREG(hash14, erip->ladrf[14]);
1608 1611 }
1609 1612
1610 1613 static int
1611 1614 eri_flush_rxbufs(struct eri *erip)
1612 1615 {
1613 1616 uint_t i;
1614 1617 int status = 0;
1615 1618 /*
1616 1619 * Free and dvma_unload pending recv buffers.
1617 1620 * Maintaining the 1-to-1 ordered sequence of
1618 1621 * dvma_load() followed by dvma_unload() is critical.
1619 1622 * Always unload anything before loading it again.
1620 1623 * Never unload anything twice. Always unload
1621 1624 * before freeing the buffer. We satisfy these
1622 1625 * requirements by unloading only those descriptors
1623 1626 * which currently have an mblk associated with them.
1624 1627 */
1625 1628 for (i = 0; i < ERI_RPENDING; i++) {
1626 1629 if (erip->rmblkp[i]) {
1627 1630 if (erip->eri_dvmarh)
1628 1631 dvma_unload(erip->eri_dvmarh, 2 * i,
1629 1632 DDI_DMA_SYNC_FORCPU);
1630 1633 else if ((ddi_dma_unbind_handle(erip->ndmarh[i]) ==
1631 1634 DDI_FAILURE))
1632 1635 status = -1;
1633 1636 freeb(erip->rmblkp[i]);
1634 1637 erip->rmblkp[i] = NULL;
1635 1638 }
1636 1639 }
1637 1640 return (status);
1638 1641 }
1639 1642
1640 1643 static void
1641 1644 eri_init_txbufs(struct eri *erip)
1642 1645 {
1643 1646 /*
1644 1647 * Clear TX descriptors.
1645 1648 */
1646 1649 bzero((caddr_t)erip->eri_tmdp, ERI_TPENDING * sizeof (struct eri_tmd));
1647 1650
1648 1651 /*
1649 1652 * sync TXDMA descriptors.
1650 1653 */
1651 1654 ERI_SYNCIOPB(erip, erip->eri_tmdp,
1652 1655 (ERI_TPENDING * sizeof (struct eri_tmd)), DDI_DMA_SYNC_FORDEV);
1653 1656 /*
1654 1657 * Reset TMD 'walking' pointers.
1655 1658 */
1656 1659 erip->tcurp = erip->eri_tmdp;
1657 1660 erip->tnextp = erip->eri_tmdp;
1658 1661 erip->tx_cur_cnt = 0;
1659 1662 erip->tx_kick = 0;
1660 1663 erip->tx_completion = 0;
1661 1664 }
1662 1665
1663 1666 static int
1664 1667 eri_init_rxbufs(struct eri *erip)
1665 1668 {
1666 1669
1667 1670 ddi_dma_cookie_t dma_cookie;
1668 1671 mblk_t *bp;
1669 1672 int i, status = 0;
1670 1673 uint32_t ccnt;
1671 1674
1672 1675 /*
1673 1676 * clear rcv descriptors
1674 1677 */
1675 1678 bzero((caddr_t)erip->rmdp, ERI_RPENDING * sizeof (struct rmd));
1676 1679
1677 1680 for (i = 0; i < ERI_RPENDING; i++) {
1678 1681 if ((bp = eri_allocb(ERI_BUFSIZE)) == NULL) {
1679 1682 status = -1;
1680 1683 continue;
1681 1684 }
1682 1685 /* Load data buffer to DVMA space */
1683 1686 if (erip->eri_dvmarh)
1684 1687 dvma_kaddr_load(erip->eri_dvmarh,
1685 1688 (caddr_t)bp->b_rptr, ERI_BUFSIZE,
1686 1689 2 * i, &dma_cookie);
1687 1690 /*
1688 1691 * Bind data buffer to DMA handle
1689 1692 */
1690 1693 else if (ddi_dma_addr_bind_handle(erip->ndmarh[i], NULL,
1691 1694 (caddr_t)bp->b_rptr, ERI_BUFSIZE,
1692 1695 DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1693 1696 &dma_cookie, &ccnt) != DDI_DMA_MAPPED)
1694 1697 status = -1;
1695 1698
1696 1699 PUT_RMD((&erip->rmdp[i]), dma_cookie);
1697 1700 erip->rmblkp[i] = bp; /* save for later use */
1698 1701 }
1699 1702
1700 1703 /*
1701 1704 * sync RXDMA descriptors.
1702 1705 */
1703 1706 ERI_SYNCIOPB(erip, erip->rmdp, (ERI_RPENDING * sizeof (struct rmd)),
1704 1707 DDI_DMA_SYNC_FORDEV);
1705 1708 /*
1706 1709 * Reset RMD 'walking' pointers.
1707 1710 */
1708 1711 erip->rnextp = erip->rmdp;
1709 1712 erip->rx_completion = 0;
1710 1713 erip->rx_kick = ERI_RPENDING - 4;
1711 1714 return (status);
1712 1715 }
1713 1716
1714 1717 static uint32_t
1715 1718 eri_txmac_disable(struct eri *erip)
1716 1719 {
1717 1720 int n;
1718 1721
1719 1722 PUT_MACREG(txcfg, GET_MACREG(txcfg) & ~BMAC_TXCFG_ENAB);
1720 1723 n = (BMACTXRSTDELAY * 10) / ERI_WAITPERIOD;
1721 1724
1722 1725 while (--n > 0) {
1723 1726 drv_usecwait(ERI_WAITPERIOD);
1724 1727 if ((GET_MACREG(txcfg) & 1) == 0)
1725 1728 return (0);
1726 1729 }
1727 1730 return (1);
1728 1731 }
1729 1732
1730 1733 static uint32_t
1731 1734 eri_rxmac_disable(struct eri *erip)
1732 1735 {
1733 1736 int n;
1734 1737 PUT_MACREG(rxcfg, GET_MACREG(rxcfg) & ~BMAC_RXCFG_ENAB);
1735 1738 n = BMACRXRSTDELAY / ERI_WAITPERIOD;
1736 1739
1737 1740 while (--n > 0) {
1738 1741 drv_usecwait(ERI_WAITPERIOD);
1739 1742 if ((GET_MACREG(rxcfg) & 1) == 0)
1740 1743 return (0);
1741 1744 }
1742 1745 return (1);
1743 1746 }
1744 1747
1745 1748 /*
1746 1749 * Return 0 upon success, 1 on failure.
1747 1750 */
1748 1751 static int
1749 1752 eri_stop(struct eri *erip)
1750 1753 {
1751 1754 (void) eri_erx_reset(erip);
1752 1755 (void) eri_etx_reset(erip);
1753 1756
1754 1757 /*
1755 1758 * set up cache line to 16 for 64 bytes of pci burst size
1756 1759 */
1757 1760 PUT_SWRSTREG(reset, ERI_G_RESET_GLOBAL | ERI_CACHE_LINE_SIZE);
1758 1761
1759 1762 if (erip->linkcheck) {
1760 1763 erip->linkcheck = 0;
1761 1764 erip->global_reset_issued = 2;
1762 1765 } else {
1763 1766 param_linkup = 0;
1764 1767 erip->stats.link_up = LINK_STATE_DOWN;
1765 1768 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1766 1769 erip->global_reset_issued = -1;
1767 1770 }
1768 1771
1769 1772 ERI_DELAY((GET_SWRSTREG(reset) == ERI_CACHE_LINE_SIZE),
1770 1773 ERI_MAX_RST_DELAY);
1771 1774 erip->rx_reset_issued = -1;
1772 1775 erip->tx_reset_issued = -1;
1773 1776
1774 1777 /*
1775 1778 * workaround for RIO not resetting the interrupt mask
1776 1779 * register to default value 0xffffffff.
1777 1780 */
1778 1781 PUT_GLOBREG(intmask, ERI_G_MASK_ALL);
1779 1782
1780 1783 if (GET_SWRSTREG(reset) == ERI_CACHE_LINE_SIZE) {
1781 1784 return (0);
1782 1785 } else {
1783 1786 return (1);
1784 1787 }
1785 1788 }
1786 1789
1787 1790 /*
1788 1791 * Reset Just the RX Portion
1789 1792 * Return 0 upon success, 1 on failure.
1790 1793 *
1791 1794 * Resetting the rxdma while there is a rx dma transaction going on the
1792 1795 * bus, will cause bus hang or parity errors. To avoid this, we would first
1793 1796 * disable the rxdma by clearing the ENABLE bit (bit 0). To make sure it is
1794 1797 * disabled, we will poll it until it realy clears. Furthermore, to verify
1795 1798 * any RX DMA activity is subsided, we delay for 5 msec.
1796 1799 */
1797 1800 static uint32_t
1798 1801 eri_erx_reset(struct eri *erip)
1799 1802 {
1800 1803 (void) eri_rxmac_disable(erip); /* Disable the RX MAC */
1801 1804
1802 1805 /* Disable the RX DMA */
1803 1806 PUT_ERXREG(config, GET_ERXREG(config) & ~GET_CONFIG_RXDMA_EN);
1804 1807 ERI_DELAY(((GET_ERXREG(config) & 1) == 0), ERI_MAX_RST_DELAY);
1805 1808 if ((GET_ERXREG(config) & 1) != 0)
1806 1809 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1807 1810 disable_erx_msg);
1808 1811
1809 1812 drv_usecwait(5000); /* Delay to insure no RX DMA activity */
1810 1813
1811 1814 PUT_SWRSTREG(reset, ERI_G_RESET_ERX | ERI_CACHE_LINE_SIZE);
1812 1815 /*
1813 1816 * Wait until the reset is completed which is indicated by
1814 1817 * the reset bit cleared or time out..
1815 1818 */
1816 1819 ERI_DELAY(((GET_SWRSTREG(reset) & (ERI_G_RESET_ERX)) ==
1817 1820 ERI_CACHE_LINE_SIZE), ERI_MAX_RST_DELAY);
1818 1821 erip->rx_reset_issued = -1;
1819 1822
1820 1823 return ((GET_SWRSTREG(reset) & (ERI_G_RESET_ERX)) ? 1 : 0);
1821 1824 }
1822 1825
1823 1826 /*
1824 1827 * Reset Just the TX Portion
1825 1828 * Return 0 upon success, 1 on failure.
1826 1829 * Resetting the txdma while there is a tx dma transaction on the bus, may cause
1827 1830 * bus hang or parity errors. To avoid this we would first disable the txdma by
1828 1831 * clearing the ENABLE bit (bit 0). To make sure it is disabled, we will poll
1829 1832 * it until it realy clears. Furthermore, to any TX DMA activity is subsided,
1830 1833 * we delay for 1 msec.
1831 1834 */
1832 1835 static uint32_t
1833 1836 eri_etx_reset(struct eri *erip)
1834 1837 {
1835 1838 (void) eri_txmac_disable(erip);
1836 1839
1837 1840 /* Disable the TX DMA */
1838 1841 PUT_ETXREG(config, GET_ETXREG(config) & ~GET_CONFIG_TXDMA_EN);
1839 1842 #ifdef ORIG
1840 1843 ERI_DELAY(((GET_ETXREG(config) & 1) == 0), ERI_MAX_RST_DELAY);
1841 1844 if ((GET_ETXREG(config) & 1) != 0)
1842 1845 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1843 1846 disable_etx_msg);
1844 1847 drv_usecwait(5000); /* Delay to ensure DMA completed (if any). */
1845 1848 #endif
1846 1849 drv_usecwait(5000); /* Delay to ensure DMA completed (if any). */
1847 1850 ERI_DELAY(((GET_ETXREG(config) & 1) == 0), ERI_MAX_RST_DELAY);
1848 1851 if ((GET_ETXREG(config) & 1) != 0)
1849 1852 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1850 1853 disable_etx_msg);
1851 1854
1852 1855 PUT_SWRSTREG(reset, ERI_G_RESET_ETX | ERI_CACHE_LINE_SIZE);
1853 1856
1854 1857 /*
1855 1858 * Wait until the reset is completed which is indicated by the reset bit
1856 1859 * cleared or time out..
1857 1860 */
1858 1861 ERI_DELAY(((GET_SWRSTREG(reset) & (ERI_G_RESET_ETX)) ==
1859 1862 ERI_CACHE_LINE_SIZE), ERI_MAX_RST_DELAY);
1860 1863 erip->tx_reset_issued = -1;
1861 1864
1862 1865 if (GET_SWRSTREG(reset) & (ERI_G_RESET_ETX)) {
1863 1866 return (1);
1864 1867 } else
1865 1868 return (0);
1866 1869 }
1867 1870
1868 1871
1869 1872 /*
1870 1873 * Initialize the TX DMA registers and Enable the TX DMA.
1871 1874 */
1872 1875 static uint32_t
1873 1876 eri_init_txregs(struct eri *erip)
1874 1877 {
1875 1878
1876 1879 uint32_t i;
1877 1880 uint64_t tx_ring;
1878 1881
1879 1882 /*
1880 1883 * Initialize ETX Registers:
1881 1884 * config, txring_lo, txring_hi
1882 1885 */
1883 1886 tx_ring = ERI_IOPBIOADDR(erip, erip->eri_tmdp);
1884 1887 PUT_ETXREG(txring_lo, (uint32_t)(tx_ring));
1885 1888 PUT_ETXREG(txring_hi, (uint32_t)(tx_ring >> 32));
1886 1889
1887 1890 /*
1888 1891 * Get TX Ring Size Masks.
1889 1892 * The ring size ERI_TPENDING is defined in eri_mac.h.
1890 1893 */
1891 1894 switch (ERI_TPENDING) {
1892 1895 case 32: i = ETX_RINGSZ_32;
1893 1896 break;
1894 1897 case 64: i = ETX_RINGSZ_64;
1895 1898 break;
1896 1899 case 128: i = ETX_RINGSZ_128;
1897 1900 break;
1898 1901 case 256: i = ETX_RINGSZ_256;
1899 1902 break;
1900 1903 case 512: i = ETX_RINGSZ_512;
1901 1904 break;
1902 1905 case 1024: i = ETX_RINGSZ_1024;
1903 1906 break;
1904 1907 case 2048: i = ETX_RINGSZ_2048;
1905 1908 break;
1906 1909 case 4096: i = ETX_RINGSZ_4096;
1907 1910 break;
1908 1911 default:
1909 1912 ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
1910 1913 unk_tx_descr_sze_msg, ERI_TPENDING);
1911 1914 return (1);
1912 1915 }
1913 1916
1914 1917 i <<= ERI_TX_RINGSZ_SHIFT;
1915 1918 PUT_ETXREG(config, ETX_CONFIG_THRESHOLD | i);
1916 1919 ENABLE_TXDMA(erip);
1917 1920 ENABLE_MAC(erip);
1918 1921 return (0);
1919 1922 }
1920 1923
1921 1924
1922 1925 /*
1923 1926 * Initialize the RX DMA registers and Enable the RX DMA.
1924 1927 */
1925 1928 static uint32_t
1926 1929 eri_init_rxregs(struct eri *erip)
1927 1930 {
1928 1931 int i;
1929 1932 uint64_t rx_ring;
1930 1933
1931 1934 /*
1932 1935 * Initialize ERX Registers:
1933 1936 * rxring_lo, rxring_hi, config, rx_blanking, rx_pause_threshold.
1934 1937 * Also, rx_kick
1935 1938 * Read and save rxfifo_size.
1936 1939 * XXX: Use this to properly configure PAUSE threshold values.
1937 1940 */
1938 1941 rx_ring = ERI_IOPBIOADDR(erip, erip->rmdp);
1939 1942 PUT_ERXREG(rxring_lo, (uint32_t)(rx_ring));
1940 1943 PUT_ERXREG(rxring_hi, (uint32_t)(rx_ring >> 32));
1941 1944 PUT_ERXREG(rx_kick, erip->rx_kick);
1942 1945
1943 1946 /*
1944 1947 * The Max ring size, ERI_RMDMAX is defined in eri_mac.h.
1945 1948 * More ERI_RPENDING will provide better performance but requires more
1946 1949 * system DVMA memory.
1947 1950 * eri_rx_ring_size can be used to tune this value from /etc/system
1948 1951 * eri_rx_ring_size cannot be NDD'able due to non-recoverable errors
1949 1952 * which cannot be detected from NDD operations
1950 1953 */
1951 1954
1952 1955 /*
1953 1956 * get the rxring size bits
1954 1957 */
1955 1958 switch (ERI_RPENDING) {
1956 1959 case 32: i = ERX_RINGSZ_32;
1957 1960 break;
1958 1961 case 64: i = ERX_RINGSZ_64;
1959 1962 break;
1960 1963 case 128: i = ERX_RINGSZ_128;
1961 1964 break;
1962 1965 case 256: i = ERX_RINGSZ_256;
1963 1966 break;
1964 1967 case 512: i = ERX_RINGSZ_512;
1965 1968 break;
1966 1969 case 1024: i = ERX_RINGSZ_1024;
1967 1970 break;
1968 1971 case 2048: i = ERX_RINGSZ_2048;
1969 1972 break;
1970 1973 case 4096: i = ERX_RINGSZ_4096;
1971 1974 break;
1972 1975 default:
1973 1976 ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
1974 1977 unk_rx_descr_sze_msg, ERI_RPENDING);
1975 1978 return (1);
1976 1979 }
1977 1980
1978 1981 i <<= ERI_RX_RINGSZ_SHIFT;
1979 1982 i |= (ERI_FSTBYTE_OFFSET << ERI_RX_CONFIG_FBO_SHIFT) |
1980 1983 (ETHERHEADER_SIZE << ERI_RX_CONFIG_RX_CSSTART_SHIFT) |
1981 1984 (ERI_RX_FIFOTH_1024 << ERI_RX_CONFIG_RXFIFOTH_SHIFT);
1982 1985
1983 1986 PUT_ERXREG(config, i);
1984 1987 PUT_ERXREG(rx_blanking,
1985 1988 (param_intr_blank_time << ERI_RX_BLNK_INTR_TIME_SHIFT) |
1986 1989 param_intr_blank_packets);
1987 1990
1988 1991 PUT_ERXREG(rx_pause_threshold, rx_pause_threshold);
1989 1992 erip->rxfifo_size = GET_ERXREG(rxfifo_size);
1990 1993 ENABLE_RXDMA(erip);
1991 1994 return (0);
1992 1995 }
1993 1996
1994 1997 static int
1995 1998 eri_freebufs(struct eri *erip)
1996 1999 {
1997 2000 int status = 0;
1998 2001
1999 2002 status = eri_flush_rxbufs(erip);
2000 2003 return (status);
2001 2004 }
2002 2005
2003 2006 static void
2004 2007 eri_update_rxbufs(struct eri *erip)
2005 2008 {
2006 2009 int i;
2007 2010 volatile struct rmd *rmdp, *rmdpbase;
2008 2011
2009 2012 /*
2010 2013 * Hang out receive buffers.
2011 2014 */
2012 2015 rmdpbase = erip->rmdp;
2013 2016 for (i = 0; i < ERI_RPENDING; i++) {
2014 2017 rmdp = rmdpbase + i;
2015 2018 UPDATE_RMD(rmdp);
2016 2019 }
2017 2020
2018 2021 /*
2019 2022 * sync RXDMA descriptors.
2020 2023 */
2021 2024 ERI_SYNCIOPB(erip, erip->rmdp, (ERI_RPENDING * sizeof (struct rmd)),
2022 2025 DDI_DMA_SYNC_FORDEV);
2023 2026 /*
2024 2027 * Reset RMD 'walking' pointers.
2025 2028 */
2026 2029 erip->rnextp = erip->rmdp;
2027 2030 erip->rx_completion = 0;
2028 2031 erip->rx_kick = ERI_RPENDING - 4;
2029 2032 }
2030 2033
2031 2034 /*
2032 2035 * This routine is used to reset the RX DMA only. In the case of RX
2033 2036 * failures such as RX Tag Error, RX hang etc... we don't want to
2034 2037 * do global reset which takes down the link and clears the FIFO's
2035 2038 * By doing RX only reset, we leave the TX and the link intact.
2036 2039 */
2037 2040 static uint32_t
2038 2041 eri_init_rx_channel(struct eri *erip)
2039 2042 {
2040 2043 erip->flags &= ~ERI_RXINIT;
2041 2044 (void) eri_erx_reset(erip);
2042 2045 eri_update_rxbufs(erip);
2043 2046 if (eri_init_rxregs(erip))
2044 2047 return (1);
2045 2048 PUT_MACREG(rxmask, BMAC_RXINTR_MASK);
2046 2049 PUT_MACREG(rxcfg, GET_MACREG(rxcfg) | BMAC_RXCFG_ENAB);
2047 2050 erip->rx_reset_issued = 0;
2048 2051 HSTAT(erip, rx_inits);
2049 2052 erip->flags |= ERI_RXINIT;
2050 2053 return (0);
2051 2054 }
2052 2055
2053 2056 static void
2054 2057 eri_init_rx(struct eri *erip)
2055 2058 {
2056 2059 uint16_t *ladrf;
2057 2060
2058 2061 /*
2059 2062 * First of all make sure the Receive MAC is stop.
2060 2063 */
2061 2064 (void) eri_rxmac_disable(erip); /* Disable the RX MAC */
2062 2065
2063 2066 /*
2064 2067 * Program BigMAC with local individual ethernet address.
2065 2068 */
2066 2069
2067 2070 PUT_MACREG(madd0, (erip->ouraddr[4] << 8) | erip->ouraddr[5]);
2068 2071 PUT_MACREG(madd1, (erip->ouraddr[2] << 8) | erip->ouraddr[3]);
2069 2072 PUT_MACREG(madd2, (erip->ouraddr[0] << 8) | erip->ouraddr[1]);
2070 2073
2071 2074 /*
2072 2075 * Set up multicast address filter by passing all multicast
2073 2076 * addresses through a crc generator, and then using the
2074 2077 * low order 8 bits as a index into the 256 bit logical
2075 2078 * address filter. The high order four bits select the word,
2076 2079 * while the rest of the bits select the bit within the word.
2077 2080 */
2078 2081
2079 2082 ladrf = erip->ladrf;
2080 2083
2081 2084 PUT_MACREG(hash0, ladrf[0]);
2082 2085 PUT_MACREG(hash1, ladrf[1]);
2083 2086 PUT_MACREG(hash2, ladrf[2]);
2084 2087 PUT_MACREG(hash3, ladrf[3]);
2085 2088 PUT_MACREG(hash4, ladrf[4]);
2086 2089 PUT_MACREG(hash5, ladrf[5]);
2087 2090 PUT_MACREG(hash6, ladrf[6]);
2088 2091 PUT_MACREG(hash7, ladrf[7]);
2089 2092 PUT_MACREG(hash8, ladrf[8]);
2090 2093 PUT_MACREG(hash9, ladrf[9]);
2091 2094 PUT_MACREG(hash10, ladrf[10]);
2092 2095 PUT_MACREG(hash11, ladrf[11]);
2093 2096 PUT_MACREG(hash12, ladrf[12]);
2094 2097 PUT_MACREG(hash13, ladrf[13]);
2095 2098 PUT_MACREG(hash14, ladrf[14]);
2096 2099 PUT_MACREG(hash15, ladrf[15]);
2097 2100
2098 2101 #ifdef ERI_DONT_STRIP_CRC
2099 2102 PUT_MACREG(rxcfg,
2100 2103 ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2101 2104 (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2102 2105 BMAC_RXCFG_ENAB));
2103 2106 #else
2104 2107 PUT_MACREG(rxcfg,
2105 2108 ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2106 2109 (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2107 2110 BMAC_RXCFG_ENAB | BMAC_RXCFG_STRIP_CRC));
2108 2111 #endif
2109 2112 /* wait after setting Hash Enable bit */
2110 2113 /* drv_usecwait(10); */
2111 2114
2112 2115 HSTAT(erip, rx_inits);
2113 2116 }
2114 2117
2115 2118 /*
2116 2119 * This routine is used to init the TX MAC only.
2117 2120 * &erip->xmitlock is held before calling this routine.
2118 2121 */
2119 2122 void
2120 2123 eri_init_txmac(struct eri *erip)
2121 2124 {
2122 2125 uint32_t carrier_ext = 0;
2123 2126
2124 2127 erip->flags &= ~ERI_TXINIT;
2125 2128 /*
2126 2129 * Stop the Transmit MAC.
2127 2130 */
2128 2131 (void) eri_txmac_disable(erip);
2129 2132
2130 2133 /*
2131 2134 * Must be Internal Transceiver
2132 2135 */
2133 2136 if (param_mode)
2134 2137 PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2135 2138 BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE));
2136 2139 else
2137 2140 PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2138 2141 BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE |
2139 2142 BMAC_XIFC_DIS_ECHO));
2140 2143
2141 2144 /*
2142 2145 * Initialize the interpacket gap registers
2143 2146 */
2144 2147 PUT_MACREG(ipg1, param_ipg1);
2145 2148 PUT_MACREG(ipg2, param_ipg2);
2146 2149
2147 2150 if (erip->ngu_enable)
2148 2151 PUT_MACREG(txcfg, ((param_mode ? BMAC_TXCFG_FDX: 0) |
2149 2152 ((param_lance_mode && (erip->lance_mode_enable)) ?
2150 2153 BMAC_TXCFG_ENIPG0 : 0) |
2151 2154 (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0) |
2152 2155 BMAC_TXCFG_NGU));
2153 2156 else
2154 2157 PUT_MACREG(txcfg, ((param_mode ? BMAC_TXCFG_FDX: 0) |
2155 2158 ((param_lance_mode && (erip->lance_mode_enable)) ?
2156 2159 BMAC_TXCFG_ENIPG0 : 0) |
2157 2160 (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0)));
2158 2161
2159 2162 ENABLE_TXDMA(erip);
2160 2163 ENABLE_TXMAC(erip);
2161 2164
2162 2165 HSTAT(erip, tx_inits);
2163 2166 erip->flags |= ERI_TXINIT;
2164 2167 }
2165 2168
2166 2169 static void
2167 2170 eri_unallocthings(struct eri *erip)
2168 2171 {
2169 2172 uint32_t flag;
2170 2173 uint32_t i;
2171 2174
2172 2175 flag = erip->alloc_flag;
2173 2176
2174 2177 if (flag & ERI_DESC_MEM_MAP)
2175 2178 (void) ddi_dma_unbind_handle(erip->md_h);
2176 2179
2177 2180 if (flag & ERI_DESC_MEM_ALLOC) {
2178 2181 ddi_dma_mem_free(&erip->mdm_h);
2179 2182 erip->rmdp = NULL;
2180 2183 erip->eri_tmdp = NULL;
2181 2184 }
2182 2185
2183 2186 if (flag & ERI_DESC_HANDLE_ALLOC)
2184 2187 ddi_dma_free_handle(&erip->md_h);
2185 2188
2186 2189 (void) eri_freebufs(erip);
2187 2190
2188 2191 if (flag & ERI_RCV_HANDLE_ALLOC)
2189 2192 for (i = 0; i < erip->rcv_handle_cnt; i++)
2190 2193 ddi_dma_free_handle(&erip->ndmarh[i]);
2191 2194
2192 2195 if (flag & ERI_RCV_DVMA_ALLOC) {
2193 2196 (void) dvma_release(erip->eri_dvmarh);
2194 2197 erip->eri_dvmarh = NULL;
2195 2198 }
2196 2199
2197 2200 if (flag & ERI_XBUFS_KMEM_DMABIND) {
2198 2201 (void) ddi_dma_unbind_handle(erip->tbuf_handle);
2199 2202 erip->tbuf_ioaddr = 0;
2200 2203 }
2201 2204
2202 2205 if (flag & ERI_XBUFS_KMEM_ALLOC) {
2203 2206 ddi_dma_mem_free(&erip->tbuf_acch);
2204 2207 erip->tbuf_kaddr = NULL;
2205 2208 }
2206 2209
2207 2210 if (flag & ERI_XBUFS_HANDLE_ALLOC) {
2208 2211 ddi_dma_free_handle(&erip->tbuf_handle);
2209 2212 erip->tbuf_handle = NULL;
2210 2213 }
2211 2214
2212 2215 }
2213 2216
2214 2217 /*
2215 2218 * Initialize channel.
2216 2219 * Return true on success, false on error.
2217 2220 *
2218 2221 * The recommended sequence for initialization is:
2219 2222 * 1. Issue a Global Reset command to the Ethernet Channel.
2220 2223 * 2. Poll the Global_Reset bits until the execution of the reset has been
2221 2224 * completed.
2222 2225 * 2(a). Use the MIF Frame/Output register to reset the transceiver.
2223 2226 * Poll Register 0 to till the Resetbit is 0.
2224 2227 * 2(b). Use the MIF Frame/Output register to set the PHY in in Normal-Op,
2225 2228 * 100Mbps and Non-Isolated mode. The main point here is to bring the
2226 2229 * PHY out of Isolate mode so that it can generate the rx_clk and tx_clk
2227 2230 * to the MII interface so that the Bigmac core can correctly reset
2228 2231 * upon a software reset.
2229 2232 * 2(c). Issue another Global Reset command to the Ethernet Channel and poll
2230 2233 * the Global_Reset bits till completion.
2231 2234 * 3. Set up all the data structures in the host memory.
2232 2235 * 4. Program the TX_MAC registers/counters (excluding the TX_MAC Configuration
2233 2236 * Register).
2234 2237 * 5. Program the RX_MAC registers/counters (excluding the RX_MAC Configuration
2235 2238 * Register).
2236 2239 * 6. Program the Transmit Descriptor Ring Base Address in the ETX.
2237 2240 * 7. Program the Receive Descriptor Ring Base Address in the ERX.
2238 2241 * 8. Program the Global Configuration and the Global Interrupt Mask Registers.
2239 2242 * 9. Program the ETX Configuration register (enable the Transmit DMA channel).
2240 2243 * 10. Program the ERX Configuration register (enable the Receive DMA channel).
2241 2244 * 11. Program the XIF Configuration Register (enable the XIF).
2242 2245 * 12. Program the RX_MAC Configuration Register (Enable the RX_MAC).
2243 2246 * 13. Program the TX_MAC Configuration Register (Enable the TX_MAC).
2244 2247 */
2245 2248 /*
2246 2249 * lock order:
2247 2250 * intrlock->linklock->xmitlock->xcvrlock
2248 2251 */
2249 2252 static boolean_t
2250 2253 eri_init(struct eri *erip)
2251 2254 {
2252 2255 uint32_t init_stat = 0;
2253 2256 uint32_t partial_init = 0;
2254 2257 uint32_t carrier_ext = 0;
2255 2258 uint32_t mac_ctl = 0;
2256 2259 boolean_t ret;
2257 2260 uint32_t link_timeout = ERI_LINKCHECK_TIMER;
2258 2261 link_state_t linkupdate = LINK_STATE_UNKNOWN;
2259 2262
2260 2263 /*
2261 2264 * Just return successfully if device is suspended.
2262 2265 * eri_init() will be called again from resume.
2263 2266 */
2264 2267 ASSERT(erip != NULL);
2265 2268
2266 2269 if (erip->flags & ERI_SUSPENDED) {
2267 2270 ret = B_TRUE;
2268 2271 goto init_exit;
2269 2272 }
2270 2273
2271 2274 mutex_enter(&erip->intrlock);
2272 2275 eri_stop_timer(erip); /* acquire linklock */
2273 2276 mutex_enter(&erip->xmitlock);
2274 2277 erip->flags &= (ERI_DLPI_LINKUP | ERI_STARTED);
2275 2278 erip->wantw = B_FALSE;
2276 2279 HSTAT(erip, inits);
2277 2280 erip->txhung = 0;
2278 2281
2279 2282 if ((erip->stats.inits > 1) && (erip->init_macregs == 0))
2280 2283 eri_savecntrs(erip);
2281 2284
2282 2285 mutex_enter(&erip->xcvrlock);
2283 2286 if (!param_linkup || erip->linkcheck) {
2284 2287 if (!erip->linkcheck)
2285 2288 linkupdate = LINK_STATE_DOWN;
2286 2289 (void) eri_stop(erip);
2287 2290 }
2288 2291 if (!(erip->flags & ERI_DLPI_LINKUP) || !param_linkup) {
2289 2292 erip->flags |= ERI_DLPI_LINKUP;
2290 2293 eri_mif_poll(erip, MIF_POLL_STOP);
2291 2294 (void) eri_new_xcvr(erip);
2292 2295 ERI_DEBUG_MSG1(erip, XCVR_MSG, "New transceiver detected.");
2293 2296 if (param_transceiver != NO_XCVR) {
2294 2297 /*
2295 2298 * Reset the new PHY and bring up the
2296 2299 * link
2297 2300 */
2298 2301 if (eri_reset_xcvr(erip)) {
2299 2302 ERI_FAULT_MSG1(erip, SEVERITY_NONE,
2300 2303 ERI_VERB_MSG, "In Init after reset");
2301 2304 mutex_exit(&erip->xcvrlock);
2302 2305 link_timeout = 0;
2303 2306 goto done;
2304 2307 }
2305 2308 if (erip->stats.link_up == LINK_STATE_UP)
2306 2309 linkupdate = LINK_STATE_UP;
2307 2310 } else {
2308 2311 erip->flags |= (ERI_RUNNING | ERI_INITIALIZED);
2309 2312 param_linkup = 0;
2310 2313 erip->stats.link_up = LINK_STATE_DOWN;
2311 2314 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2312 2315 linkupdate = LINK_STATE_DOWN;
2313 2316 /*
2314 2317 * Still go on and complete the MAC initialization as
2315 2318 * xcvr might show up later.
2316 2319 * you must return to their mutex ordering.
2317 2320 */
2318 2321 }
2319 2322 eri_mif_poll(erip, MIF_POLL_START);
2320 2323 }
2321 2324
2322 2325 mutex_exit(&erip->xcvrlock);
2323 2326
2324 2327 /*
2325 2328 * Allocate data structures.
2326 2329 */
2327 2330 if (erip->global_reset_issued) {
2328 2331 if (erip->global_reset_issued == 2) { /* fast path */
2329 2332
2330 2333 /*
2331 2334 * Hang out/Initialize descriptors and buffers.
2332 2335 */
2333 2336 eri_init_txbufs(erip);
2334 2337
2335 2338 eri_update_rxbufs(erip);
2336 2339 } else {
2337 2340 init_stat = eri_allocthings(erip);
2338 2341 if (init_stat)
2339 2342 goto done;
2340 2343
2341 2344 if (eri_freebufs(erip))
2342 2345 goto done;
2343 2346 /*
2344 2347 * Hang out/Initialize descriptors and buffers.
2345 2348 */
2346 2349 eri_init_txbufs(erip);
2347 2350 if (eri_init_rxbufs(erip))
2348 2351 goto done;
2349 2352 }
2350 2353 }
2351 2354
2352 2355 /*
2353 2356 * BigMAC requires that we confirm that tx, rx and hash are in
2354 2357 * quiescent state.
2355 2358 * MAC will not reset successfully if the transceiver is not reset and
2356 2359 * brought out of Isolate mode correctly. TXMAC reset may fail if the
2357 2360 * ext. transceiver is just disconnected. If it fails, try again by
2358 2361 * checking the transceiver.
2359 2362 */
2360 2363 if (eri_txmac_disable(erip)) {
2361 2364 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
2362 2365 disable_txmac_msg);
2363 2366 param_linkup = 0; /* force init again */
2364 2367 erip->stats.link_up = LINK_STATE_DOWN;
2365 2368 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2366 2369 linkupdate = LINK_STATE_DOWN;
2367 2370 goto done;
2368 2371 }
2369 2372
2370 2373 if (eri_rxmac_disable(erip)) {
2371 2374 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
2372 2375 disable_rxmac_msg);
2373 2376 param_linkup = 0; /* force init again */
2374 2377 erip->stats.link_up = LINK_STATE_DOWN;
2375 2378 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2376 2379 linkupdate = LINK_STATE_DOWN;
2377 2380 goto done;
2378 2381 }
2379 2382
2380 2383 eri_init_macregs_generic(erip);
2381 2384
2382 2385 /*
2383 2386 * Initialize ERI Global registers :
2384 2387 * config
2385 2388 * For PCI : err_mask, bif_cfg
2386 2389 *
2387 2390 * Use user-configurable parameter for enabling 64-bit transfers.
2388 2391 * Note:For PCI, burst sizes are in multiples of 64-bytes.
2389 2392 */
2390 2393
2391 2394 /*
2392 2395 * Significant performance improvements can be achieved by
2393 2396 * disabling transmit interrupt. Thus TMD's are reclaimed
2394 2397 * only very infrequently.
2395 2398 * The PCS Interrupt is masked here. It is enabled only when
2396 2399 * a PCS link is brought up because there is no second level
2397 2400 * mask for this interrupt..
2398 2401 * Init GLOBAL, TXMAC, RXMAC and MACCTL interrupt masks here.
2399 2402 */
2400 2403 if (! partial_init) {
2401 2404 PUT_GLOBREG(intmask, ERI_G_MASK_INTR);
2402 2405 erip->tx_int_me = 0;
2403 2406 PUT_MACREG(txmask, BMAC_TXINTR_MASK);
2404 2407 PUT_MACREG(rxmask, BMAC_RXINTR_MASK);
2405 2408 PUT_MACREG(macctl_mask, ERI_MACCTL_INTR_MASK);
2406 2409 }
2407 2410
2408 2411 if (erip->global_reset_issued) {
2409 2412 /*
2410 2413 * Initialize ETX Registers:
2411 2414 * config, txring_lo, txring_hi
2412 2415 */
2413 2416 if (eri_init_txregs(erip))
2414 2417 goto done;
2415 2418 /*
2416 2419 * Initialize ERX Registers:
2417 2420 * rxring_lo, rxring_hi, config, rx_blanking,
2418 2421 * rx_pause_threshold. Also, rx_kick
2419 2422 * Read and save rxfifo_size.
2420 2423 */
2421 2424 if (eri_init_rxregs(erip))
2422 2425 goto done;
2423 2426 }
2424 2427
2425 2428 PUT_MACREG(macctl_mask, ERI_MACCTL_INTR_MASK);
2426 2429
2427 2430 /*
2428 2431 * Set up the slottime,and rxconfig, txconfig without enabling
2429 2432 * the latter two at this time
2430 2433 */
2431 2434 PUT_MACREG(slot, BMAC_SLOT_TIME);
2432 2435 carrier_ext = 0;
2433 2436
2434 2437 #ifdef ERI_DONT_STRIP_CRC
2435 2438 PUT_MACREG(rxcfg,
2436 2439 ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2437 2440 (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2438 2441 (carrier_ext ? BMAC_RXCFG_CARR_EXT : 0)));
2439 2442 #else
2440 2443 PUT_MACREG(rxcfg,
2441 2444 ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2442 2445 (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2443 2446 BMAC_RXCFG_STRIP_CRC |
2444 2447 (carrier_ext ? BMAC_RXCFG_CARR_EXT : 0)));
2445 2448 #endif
2446 2449 drv_usecwait(10); /* wait after setting Hash Enable bit */
2447 2450
2448 2451 if (erip->ngu_enable)
2449 2452 PUT_MACREG(txcfg,
2450 2453 ((param_mode ? BMAC_TXCFG_FDX: 0) |
2451 2454 ((param_lance_mode && (erip->lance_mode_enable)) ?
2452 2455 BMAC_TXCFG_ENIPG0 : 0) |
2453 2456 (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0) |
2454 2457 BMAC_TXCFG_NGU));
2455 2458 else
2456 2459 PUT_MACREG(txcfg,
2457 2460 ((param_mode ? BMAC_TXCFG_FDX: 0) |
2458 2461 ((param_lance_mode && (erip->lance_mode_enable)) ?
2459 2462 BMAC_TXCFG_ENIPG0 : 0) |
2460 2463 (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0)));
2461 2464
2462 2465 if (erip->pauseRX)
2463 2466 mac_ctl = ERI_MCTLCFG_RXPAUSE;
2464 2467 if (erip->pauseTX)
2465 2468 mac_ctl |= ERI_MCTLCFG_TXPAUSE;
2466 2469
2467 2470 PUT_MACREG(macctl_cfg, mac_ctl);
2468 2471
2469 2472 /*
2470 2473 * Must be Internal Transceiver
2471 2474 */
2472 2475 if (param_mode)
2473 2476 PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2474 2477 BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE));
2475 2478 else {
2476 2479 PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2477 2480 BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE |
2478 2481 BMAC_XIFC_DIS_ECHO));
2479 2482
2480 2483 link_timeout = ERI_CHECK_HANG_TIMER;
2481 2484 }
2482 2485
2483 2486 /*
2484 2487 * if MAC int loopback flag is set, put xifc reg in mii loopback
2485 2488 * mode {DIAG}
2486 2489 */
2487 2490 if (erip->flags & ERI_MACLOOPBACK) {
2488 2491 PUT_MACREG(xifc, GET_MACREG(xifc) | BMAC_XIFC_MIILPBK);
2489 2492 }
2490 2493
2491 2494 /*
2492 2495 * Enable TX and RX MACs.
2493 2496 */
2494 2497 ENABLE_MAC(erip);
2495 2498 erip->flags |= (ERI_RUNNING | ERI_INITIALIZED |
2496 2499 ERI_TXINIT | ERI_RXINIT);
2497 2500 mac_tx_update(erip->mh);
2498 2501 erip->global_reset_issued = 0;
2499 2502
2500 2503 #ifdef ERI_10_10_FORCE_SPEED_WORKAROUND
2501 2504 eri_xcvr_force_mode(erip, &link_timeout);
2502 2505 #endif
2503 2506
2504 2507 done:
2505 2508 if (init_stat)
2506 2509 eri_unallocthings(erip);
2507 2510
2508 2511 mutex_exit(&erip->xmitlock);
2509 2512 eri_start_timer(erip, eri_check_link, link_timeout);
2510 2513 mutex_exit(&erip->intrlock);
2511 2514
2512 2515 if (linkupdate != LINK_STATE_UNKNOWN)
2513 2516 mac_link_update(erip->mh, linkupdate);
2514 2517
2515 2518 ret = (erip->flags & ERI_RUNNING) ? B_TRUE : B_FALSE;
2516 2519 if (!ret) {
2517 2520 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
2518 2521 "eri_init failed");
2519 2522 }
2520 2523
2521 2524 init_exit:
2522 2525 ASSERT(!MUTEX_HELD(&erip->linklock));
2523 2526 return (ret);
2524 2527 }
2525 2528
2526 2529 /*
2527 2530 * 0 as burstsize upon failure as it signifies no burst size.
2528 2531 */
2529 2532 static int
2530 2533 eri_burstsize(struct eri *erip)
2531 2534 {
2532 2535 ddi_dma_handle_t handle;
2533 2536
2534 2537 if (ddi_dma_alloc_handle(erip->dip, &dma_attr, DDI_DMA_DONTWAIT,
2535 2538 NULL, &handle))
2536 2539 return (DDI_FAILURE);
2537 2540
2538 2541 erip->burstsizes = ddi_dma_burstsizes(handle);
2539 2542 ddi_dma_free_handle(&handle);
2540 2543
2541 2544 if (erip->burstsizes)
2542 2545 return (DDI_SUCCESS);
2543 2546
2544 2547 return (DDI_FAILURE);
2545 2548 }
2546 2549
2547 2550 /*
2548 2551 * Un-initialize (STOP) ERI channel.
2549 2552 */
2550 2553 static void
2551 2554 eri_uninit(struct eri *erip)
2552 2555 {
2553 2556 boolean_t needind;
2554 2557
2555 2558 /*
2556 2559 * Allow up to 'ERI_DRAINTIME' for pending xmit's to complete.
2557 2560 */
2558 2561 ERI_DELAY((erip->tcurp == erip->tnextp), ERI_DRAINTIME);
2559 2562
2560 2563 mutex_enter(&erip->intrlock);
2561 2564 eri_stop_timer(erip); /* acquire linklock */
2562 2565 mutex_enter(&erip->xmitlock);
2563 2566 mutex_enter(&erip->xcvrlock);
2564 2567 eri_mif_poll(erip, MIF_POLL_STOP);
2565 2568 erip->flags &= ~ERI_DLPI_LINKUP;
2566 2569 mutex_exit(&erip->xcvrlock);
2567 2570
2568 2571 needind = !erip->linkcheck;
2569 2572 (void) eri_stop(erip);
2570 2573 erip->flags &= ~ERI_RUNNING;
2571 2574
2572 2575 mutex_exit(&erip->xmitlock);
2573 2576 eri_start_timer(erip, eri_check_link, 0);
2574 2577 mutex_exit(&erip->intrlock);
2575 2578
2576 2579 if (needind)
2577 2580 mac_link_update(erip->mh, LINK_STATE_DOWN);
2578 2581 }
2579 2582
2580 2583 /*
2581 2584 * Allocate CONSISTENT memory for rmds and tmds with appropriate alignment and
2582 2585 * map it in IO space.
2583 2586 *
2584 2587 * The driver allocates STREAMS buffers which will be mapped in DVMA
2585 2588 * space using DDI DMA resources.
2586 2589 *
2587 2590 */
2588 2591 static int
2589 2592 eri_allocthings(struct eri *erip)
2590 2593 {
2591 2594
2592 2595 uintptr_t a;
2593 2596 int size;
2594 2597 uint32_t rval;
2595 2598 int i;
2596 2599 size_t real_len;
2597 2600 uint32_t cookiec;
2598 2601 int alloc_stat = 0;
2599 2602 ddi_dma_cookie_t dma_cookie;
2600 2603
2601 2604 /*
2602 2605 * Return if resources are already allocated.
2603 2606 */
2604 2607 if (erip->rmdp)
2605 2608 return (alloc_stat);
2606 2609
2607 2610 erip->alloc_flag = 0;
2608 2611
2609 2612 /*
2610 2613 * Allocate the TMD and RMD descriptors and extra for alignments.
2611 2614 */
2612 2615 size = (ERI_RPENDING * sizeof (struct rmd) +
2613 2616 ERI_TPENDING * sizeof (struct eri_tmd)) + ERI_GMDALIGN;
2614 2617
2615 2618 rval = ddi_dma_alloc_handle(erip->dip, &desc_dma_attr,
2616 2619 DDI_DMA_DONTWAIT, 0, &erip->md_h);
2617 2620 if (rval != DDI_SUCCESS) {
2618 2621 return (++alloc_stat);
2619 2622 }
2620 2623 erip->alloc_flag |= ERI_DESC_HANDLE_ALLOC;
2621 2624
2622 2625 rval = ddi_dma_mem_alloc(erip->md_h, size, &erip->dev_attr,
2623 2626 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
2624 2627 (caddr_t *)&erip->iopbkbase, &real_len, &erip->mdm_h);
2625 2628 if (rval != DDI_SUCCESS) {
2626 2629 return (++alloc_stat);
2627 2630 }
2628 2631 erip->alloc_flag |= ERI_DESC_MEM_ALLOC;
2629 2632
2630 2633 rval = ddi_dma_addr_bind_handle(erip->md_h, NULL,
2631 2634 (caddr_t)erip->iopbkbase, size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2632 2635 DDI_DMA_DONTWAIT, 0, &erip->md_c, &cookiec);
2633 2636
2634 2637 if (rval != DDI_DMA_MAPPED)
2635 2638 return (++alloc_stat);
2636 2639
2637 2640 erip->alloc_flag |= ERI_DESC_MEM_MAP;
2638 2641
2639 2642 if (cookiec != 1)
2640 2643 return (++alloc_stat);
2641 2644
2642 2645 erip->iopbiobase = erip->md_c.dmac_address;
2643 2646
2644 2647 a = erip->iopbkbase;
2645 2648 a = ROUNDUP(a, ERI_GMDALIGN);
2646 2649 erip->rmdp = (struct rmd *)a;
2647 2650 a += ERI_RPENDING * sizeof (struct rmd);
2648 2651 erip->eri_tmdp = (struct eri_tmd *)a;
2649 2652 /*
2650 2653 * Specifically we reserve n (ERI_TPENDING + ERI_RPENDING)
2651 2654 * pagetable entries. Therefore we have 2 ptes for each
2652 2655 * descriptor. Since the ethernet buffers are 1518 bytes
2653 2656 * so they can at most use 2 ptes.
2654 2657 * Will do a ddi_dma_addr_setup for each bufer
2655 2658 */
2656 2659 /*
2657 2660 * In the current implementation, we use the ddi compliant
2658 2661 * dma interface. We allocate ERI_RPENDING dma handles for receive
2659 2662 * activity. The actual dma mapping is done in the io function
2660 2663 * eri_read_dma(), by calling the ddi_dma_addr_bind_handle.
2661 2664 * Dma resources are deallocated by calling ddi_dma_unbind_handle
2662 2665 * in eri_reclaim() for transmit and eri_read_dma(), for receive io.
2663 2666 */
2664 2667
2665 2668 if (eri_use_dvma_rx &&
2666 2669 (dvma_reserve(erip->dip, &eri_dma_limits, (ERI_RPENDING * 2),
2667 2670 &erip->eri_dvmarh)) == DDI_SUCCESS) {
2668 2671 erip->alloc_flag |= ERI_RCV_DVMA_ALLOC;
2669 2672 } else {
2670 2673 erip->eri_dvmarh = NULL;
2671 2674
2672 2675 for (i = 0; i < ERI_RPENDING; i++) {
2673 2676 rval = ddi_dma_alloc_handle(erip->dip,
2674 2677 &dma_attr, DDI_DMA_DONTWAIT,
2675 2678 0, &erip->ndmarh[i]);
2676 2679
2677 2680 if (rval != DDI_SUCCESS) {
2678 2681 ERI_FAULT_MSG1(erip, SEVERITY_HIGH,
2679 2682 ERI_VERB_MSG, alloc_rx_dmah_msg);
2680 2683 alloc_stat++;
2681 2684 break;
2682 2685 }
2683 2686 }
2684 2687
2685 2688 erip->rcv_handle_cnt = i;
2686 2689
2687 2690 if (i)
2688 2691 erip->alloc_flag |= ERI_RCV_HANDLE_ALLOC;
2689 2692
2690 2693 if (alloc_stat)
2691 2694 return (alloc_stat);
2692 2695
2693 2696 }
2694 2697
2695 2698 /*
2696 2699 * Allocate TX buffer
2697 2700 * Note: buffers must always be allocated in the native
2698 2701 * ordering of the CPU (always big-endian for Sparc).
2699 2702 * ddi_dma_mem_alloc returns memory in the native ordering
2700 2703 * of the bus (big endian for SBus, little endian for PCI).
2701 2704 * So we cannot use ddi_dma_mem_alloc(, &erip->ge_dev_attr)
2702 2705 * because we'll get little endian memory on PCI.
2703 2706 */
2704 2707 if (ddi_dma_alloc_handle(erip->dip, &desc_dma_attr, DDI_DMA_DONTWAIT,
2705 2708 0, &erip->tbuf_handle) != DDI_SUCCESS) {
2706 2709 ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG,
2707 2710 alloc_tx_dmah_msg);
2708 2711 return (++alloc_stat);
2709 2712 }
2710 2713 erip->alloc_flag |= ERI_XBUFS_HANDLE_ALLOC;
2711 2714 size = ERI_TPENDING * ERI_BUFSIZE;
2712 2715 if (ddi_dma_mem_alloc(erip->tbuf_handle, size, &buf_attr,
2713 2716 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, &erip->tbuf_kaddr,
2714 2717 &real_len, &erip->tbuf_acch) != DDI_SUCCESS) {
2715 2718 ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG,
2716 2719 alloc_tx_dmah_msg);
2717 2720 return (++alloc_stat);
2718 2721 }
2719 2722 erip->alloc_flag |= ERI_XBUFS_KMEM_ALLOC;
2720 2723 if (ddi_dma_addr_bind_handle(erip->tbuf_handle, NULL,
2721 2724 erip->tbuf_kaddr, size, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2722 2725 DDI_DMA_DONTWAIT, 0, &dma_cookie, &cookiec) != DDI_DMA_MAPPED) {
2723 2726 return (++alloc_stat);
2724 2727 }
2725 2728 erip->tbuf_ioaddr = dma_cookie.dmac_address;
2726 2729 erip->alloc_flag |= ERI_XBUFS_KMEM_DMABIND;
2727 2730 if (cookiec != 1)
2728 2731 return (++alloc_stat);
2729 2732
2730 2733 /*
2731 2734 * Keep handy limit values for RMD, TMD, and Buffers.
2732 2735 */
2733 2736 erip->rmdlimp = &((erip->rmdp)[ERI_RPENDING]);
2734 2737 erip->eri_tmdlimp = &((erip->eri_tmdp)[ERI_TPENDING]);
2735 2738
2736 2739 /*
2737 2740 * Zero out RCV holders.
2738 2741 */
2739 2742 bzero((caddr_t)erip->rmblkp, sizeof (erip->rmblkp));
2740 2743 return (alloc_stat);
2741 2744 }
2742 2745
2743 2746 /* <<<<<<<<<<<<<<<<< INTERRUPT HANDLING FUNCTION >>>>>>>>>>>>>>>>>>>> */
2744 2747 /*
2745 2748 * First check to see if it is our device interrupting.
2746 2749 */
2747 2750 static uint_t
2748 2751 eri_intr(caddr_t arg)
2749 2752 {
2750 2753 struct eri *erip = (void *)arg;
2751 2754 uint32_t erisbits;
2752 2755 uint32_t mif_status;
2753 2756 uint32_t serviced = DDI_INTR_UNCLAIMED;
2754 2757 link_state_t linkupdate = LINK_STATE_UNKNOWN;
2755 2758 boolean_t macupdate = B_FALSE;
2756 2759 mblk_t *mp;
2757 2760 mblk_t *head;
2758 2761 mblk_t **tail;
2759 2762
2760 2763 head = NULL;
2761 2764 tail = &head;
2762 2765
2763 2766 mutex_enter(&erip->intrlock);
2764 2767
2765 2768 erisbits = GET_GLOBREG(status);
2766 2769
2767 2770 /*
2768 2771 * Check if it is only the RX_DONE interrupt, which is
2769 2772 * the most frequent one.
2770 2773 */
2771 2774 if (((erisbits & ERI_G_STATUS_RX_INT) == ERI_G_STATUS_RX_DONE) &&
2772 2775 (erip->flags & ERI_RUNNING)) {
2773 2776 serviced = DDI_INTR_CLAIMED;
2774 2777 goto rx_done_int;
2775 2778 }
2776 2779
2777 2780 /* Claim the first interrupt after initialization */
2778 2781 if (erip->flags & ERI_INITIALIZED) {
2779 2782 erip->flags &= ~ERI_INITIALIZED;
2780 2783 serviced = DDI_INTR_CLAIMED;
2781 2784 }
2782 2785
2783 2786 /* Check for interesting events */
2784 2787 if ((erisbits & ERI_G_STATUS_INTR) == 0) {
2785 2788 #ifdef ESTAR_WORKAROUND
2786 2789 uint32_t linkupdate;
2787 2790 #endif
2788 2791
2789 2792 ERI_DEBUG_MSG2(erip, DIAG_MSG,
2790 2793 "eri_intr: Interrupt Not Claimed gsbits %X", erisbits);
2791 2794 #ifdef DEBUG
2792 2795 noteri++;
2793 2796 #endif
2794 2797 ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:MIF Config = 0x%X",
2795 2798 GET_MIFREG(mif_cfg));
2796 2799 ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:MIF imask = 0x%X",
2797 2800 GET_MIFREG(mif_imask));
2798 2801 ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:INT imask = 0x%X",
2799 2802 GET_GLOBREG(intmask));
2800 2803 ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:alias %X",
2801 2804 GET_GLOBREG(status_alias));
2802 2805 #ifdef ESTAR_WORKAROUND
2803 2806 linkupdate = eri_check_link_noind(erip);
2804 2807 #endif
2805 2808 mutex_exit(&erip->intrlock);
2806 2809 #ifdef ESTAR_WORKAROUND
2807 2810 if (linkupdate != LINK_STATE_UNKNOWN)
2808 2811 mac_link_update(erip->mh, linkupdate);
2809 2812 #endif
2810 2813 return (serviced);
2811 2814 }
2812 2815 serviced = DDI_INTR_CLAIMED;
2813 2816
2814 2817 if (!(erip->flags & ERI_RUNNING)) {
2815 2818 mutex_exit(&erip->intrlock);
2816 2819 eri_uninit(erip);
2817 2820 return (serviced);
2818 2821 }
2819 2822
2820 2823 if (erisbits & ERI_G_STATUS_FATAL_ERR) {
2821 2824 ERI_DEBUG_MSG2(erip, INTR_MSG,
2822 2825 "eri_intr: fatal error: erisbits = %X", erisbits);
2823 2826 (void) eri_fatal_err(erip, erisbits);
2824 2827 eri_reinit_fatal++;
2825 2828
2826 2829 if (erip->rx_reset_issued) {
2827 2830 erip->rx_reset_issued = 0;
2828 2831 (void) eri_init_rx_channel(erip);
2829 2832 mutex_exit(&erip->intrlock);
2830 2833 } else {
2831 2834 param_linkup = 0;
2832 2835 erip->stats.link_up = LINK_STATE_DOWN;
2833 2836 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2834 2837 DISABLE_MAC(erip);
2835 2838 mutex_exit(&erip->intrlock);
2836 2839 (void) eri_init(erip);
2837 2840 }
2838 2841 return (serviced);
2839 2842 }
2840 2843
2841 2844 if (erisbits & ERI_G_STATUS_NONFATAL_ERR) {
2842 2845 ERI_DEBUG_MSG2(erip, INTR_MSG,
2843 2846 "eri_intr: non-fatal error: erisbits = %X", erisbits);
2844 2847 (void) eri_nonfatal_err(erip, erisbits);
2845 2848 if (erip->linkcheck) {
2846 2849 mutex_exit(&erip->intrlock);
2847 2850 (void) eri_init(erip);
2848 2851 return (serviced);
2849 2852 }
2850 2853 }
2851 2854
2852 2855 if (erisbits & ERI_G_STATUS_MIF_INT) {
2853 2856 uint16_t stat;
2854 2857 ERI_DEBUG_MSG2(erip, XCVR_MSG,
2855 2858 "eri_intr:MIF Interrupt:mii_status %X", erip->mii_status);
2856 2859 eri_stop_timer(erip); /* acquire linklock */
2857 2860
2858 2861 mutex_enter(&erip->xmitlock);
2859 2862 mutex_enter(&erip->xcvrlock);
2860 2863 #ifdef ERI_MIF_POLL_STATUS_WORKAROUND
2861 2864 mif_status = GET_MIFREG(mif_bsts);
2862 2865 eri_mif_poll(erip, MIF_POLL_STOP);
2863 2866 ERI_DEBUG_MSG3(erip, XCVR_MSG,
2864 2867 "eri_intr: new MIF interrupt status %X XCVR status %X",
2865 2868 mif_status, erip->mii_status);
2866 2869 (void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
2867 2870 linkupdate = eri_mif_check(erip, stat, stat);
2868 2871
2869 2872 #else
2870 2873 mif_status = GET_MIFREG(mif_bsts);
2871 2874 eri_mif_poll(erip, MIF_POLL_STOP);
2872 2875 linkupdate = eri_mif_check(erip, (uint16_t)mif_status,
2873 2876 (uint16_t)(mif_status >> 16));
2874 2877 #endif
2875 2878 eri_mif_poll(erip, MIF_POLL_START);
2876 2879 mutex_exit(&erip->xcvrlock);
2877 2880 mutex_exit(&erip->xmitlock);
2878 2881
2879 2882 if (!erip->openloop_autoneg)
2880 2883 eri_start_timer(erip, eri_check_link,
2881 2884 ERI_LINKCHECK_TIMER);
2882 2885 else
2883 2886 eri_start_timer(erip, eri_check_link,
2884 2887 ERI_P_FAULT_TIMER);
2885 2888 }
2886 2889
2887 2890 ERI_DEBUG_MSG2(erip, INTR_MSG,
2888 2891 "eri_intr:May have Read Interrupt status:status %X", erisbits);
2889 2892
2890 2893 rx_done_int:
2891 2894 if ((erisbits & (ERI_G_STATUS_TX_INT_ME)) ||
2892 2895 (erip->tx_cur_cnt >= tx_interrupt_rate)) {
2893 2896 mutex_enter(&erip->xmitlock);
2894 2897 erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
2895 2898 ETX_COMPLETION_MASK);
2896 2899
2897 2900 macupdate |= eri_reclaim(erip, erip->tx_completion);
2898 2901 if (macupdate)
2899 2902 erip->wantw = B_FALSE;
2900 2903
2901 2904 mutex_exit(&erip->xmitlock);
2902 2905 }
2903 2906
2904 2907 if (erisbits & ERI_G_STATUS_RX_DONE) {
2905 2908 volatile struct rmd *rmdp, *rmdpbase;
2906 2909 volatile uint32_t rmdi;
2907 2910 uint8_t loop_limit = 0x20;
2908 2911 uint64_t flags;
2909 2912 uint32_t rmdmax_mask = erip->rmdmax_mask;
2910 2913
2911 2914 rmdpbase = erip->rmdp;
2912 2915 rmdi = erip->rx_completion;
2913 2916 rmdp = rmdpbase + rmdi;
2914 2917
2915 2918 /*
2916 2919 * Sync RMD before looking at it.
2917 2920 */
2918 2921 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
2919 2922 DDI_DMA_SYNC_FORCPU);
2920 2923 /*
2921 2924 * Loop through each RMD.
2922 2925 */
2923 2926
2924 2927 flags = GET_RMD_FLAGS(rmdp);
2925 2928 while (((flags & ERI_RMD_OWN) == 0) && (loop_limit)) {
2926 2929 /* process one packet */
2927 2930 mp = eri_read_dma(erip, rmdp, rmdi, flags);
2928 2931 rmdi = (rmdi + 1) & rmdmax_mask;
2929 2932 rmdp = rmdpbase + rmdi;
2930 2933
2931 2934 if (mp != NULL) {
2932 2935 *tail = mp;
2933 2936 tail = &mp->b_next;
2934 2937 }
2935 2938
2936 2939 /*
2937 2940 * ERI RCV DMA fetches or updates four descriptors
2938 2941 * a time. Also we don't want to update the desc.
2939 2942 * batch we just received packet on. So we update
2940 2943 * descriptors for every 4 packets and we update
2941 2944 * the group of 4 after the current batch.
2942 2945 */
2943 2946
2944 2947 if (!(rmdi % 4)) {
2945 2948 if (eri_overflow_reset &&
2946 2949 (GET_GLOBREG(status_alias) &
2947 2950 ERI_G_STATUS_NONFATAL_ERR)) {
2948 2951 loop_limit = 1;
2949 2952 } else {
2950 2953 erip->rx_kick =
2951 2954 (rmdi + ERI_RPENDING - 4) &
2952 2955 rmdmax_mask;
2953 2956 PUT_ERXREG(rx_kick, erip->rx_kick);
2954 2957 }
2955 2958 }
2956 2959
2957 2960 /*
2958 2961 * Sync the next RMD before looking at it.
2959 2962 */
2960 2963 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
2961 2964 DDI_DMA_SYNC_FORCPU);
2962 2965 flags = GET_RMD_FLAGS(rmdp);
2963 2966 loop_limit--;
2964 2967 }
2965 2968 erip->rx_completion = rmdi;
2966 2969 }
2967 2970
2968 2971 mutex_exit(&erip->intrlock);
2969 2972
2970 2973 if (head)
2971 2974 mac_rx(erip->mh, NULL, head);
2972 2975
2973 2976 if (macupdate)
2974 2977 mac_tx_update(erip->mh);
2975 2978
2976 2979 if (linkupdate != LINK_STATE_UNKNOWN)
2977 2980 mac_link_update(erip->mh, linkupdate);
2978 2981
2979 2982 return (serviced);
2980 2983 }
2981 2984
2982 2985 /*
2983 2986 * Handle interrupts for fatal errors
2984 2987 * Need reinitialization.
2985 2988 */
2986 2989 #define PCI_DATA_PARITY_REP (1 << 8)
2987 2990 #define PCI_SING_TARGET_ABORT (1 << 11)
2988 2991 #define PCI_RCV_TARGET_ABORT (1 << 12)
2989 2992 #define PCI_RCV_MASTER_ABORT (1 << 13)
2990 2993 #define PCI_SING_SYSTEM_ERR (1 << 14)
2991 2994 #define PCI_DATA_PARITY_ERR (1 << 15)
2992 2995
2993 2996 /* called with intrlock held */
2994 2997 static void
2995 2998 eri_fatal_err(struct eri *erip, uint32_t erisbits)
2996 2999 {
2997 3000 uint16_t pci_status;
2998 3001 uint32_t pci_error_int = 0;
2999 3002
3000 3003 if (erisbits & ERI_G_STATUS_RX_TAG_ERR) {
3001 3004 erip->rx_reset_issued = 1;
3002 3005 HSTAT(erip, rxtag_err);
3003 3006 } else {
3004 3007 erip->global_reset_issued = 1;
3005 3008 if (erisbits & ERI_G_STATUS_BUS_ERR_INT) {
3006 3009 pci_error_int = 1;
3007 3010 HSTAT(erip, pci_error_int);
3008 3011 } else if (erisbits & ERI_G_STATUS_PERR_INT) {
3009 3012 HSTAT(erip, parity_error);
3010 3013 } else {
3011 3014 HSTAT(erip, unknown_fatal);
3012 3015 }
3013 3016 }
3014 3017
3015 3018 /*
3016 3019 * PCI bus error
3017 3020 */
3018 3021 if (pci_error_int && erip->pci_config_handle) {
3019 3022 pci_status = pci_config_get16(erip->pci_config_handle,
3020 3023 PCI_CONF_STAT);
3021 3024 ERI_DEBUG_MSG2(erip, FATAL_ERR_MSG, "Bus Error Status %x",
3022 3025 pci_status);
3023 3026 if (pci_status & PCI_DATA_PARITY_REP)
3024 3027 HSTAT(erip, pci_data_parity_err);
3025 3028 if (pci_status & PCI_SING_TARGET_ABORT)
3026 3029 HSTAT(erip, pci_signal_target_abort);
3027 3030 if (pci_status & PCI_RCV_TARGET_ABORT)
3028 3031 HSTAT(erip, pci_rcvd_target_abort);
3029 3032 if (pci_status & PCI_RCV_MASTER_ABORT)
3030 3033 HSTAT(erip, pci_rcvd_master_abort);
3031 3034 if (pci_status & PCI_SING_SYSTEM_ERR)
3032 3035 HSTAT(erip, pci_signal_system_err);
3033 3036 if (pci_status & PCI_DATA_PARITY_ERR)
3034 3037 HSTAT(erip, pci_signal_system_err);
3035 3038 /*
3036 3039 * clear it by writing the value that was read back.
3037 3040 */
3038 3041 pci_config_put16(erip->pci_config_handle, PCI_CONF_STAT,
3039 3042 pci_status);
3040 3043 }
3041 3044 }
3042 3045
3043 3046 /*
3044 3047 * Handle interrupts regarding non-fatal events.
3045 3048 * TXMAC, RXMAC and MACCTL events
3046 3049 */
3047 3050 static void
3048 3051 eri_nonfatal_err(struct eri *erip, uint32_t erisbits)
3049 3052 {
3050 3053
3051 3054 uint32_t txmac_sts, rxmac_sts, macctl_sts, pause_time;
3052 3055
3053 3056 #ifdef ERI_PM_WORKAROUND
3054 3057 if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
3055 3058 PCI_PM_IDLESPEED_NONE) == DDI_SUCCESS)
3056 3059 erip->stats.pmcap = ERI_PMCAP_NONE;
3057 3060 #endif
3058 3061
3059 3062 if (erisbits & ERI_G_STATUS_TX_MAC_INT) {
3060 3063 txmac_sts = GET_MACREG(txsts);
3061 3064 if (txmac_sts & BMAC_TXSTS_TX_URUN) {
3062 3065 erip->linkcheck = 1;
3063 3066 HSTAT(erip, txmac_urun);
3064 3067 HSTAT(erip, oerrors);
3065 3068 }
3066 3069
3067 3070 if (txmac_sts & BMAC_TXSTS_MAXPKT_ERR) {
3068 3071 erip->linkcheck = 1;
3069 3072 HSTAT(erip, txmac_maxpkt_err);
3070 3073 HSTAT(erip, oerrors);
3071 3074 }
3072 3075 if (txmac_sts & BMAC_TXSTS_NCC_EXP) {
3073 3076 erip->stats.collisions += 0x10000;
3074 3077 }
3075 3078
3076 3079 if (txmac_sts & BMAC_TXSTS_ECC_EXP) {
3077 3080 erip->stats.excessive_coll += 0x10000;
3078 3081 }
3079 3082
3080 3083 if (txmac_sts & BMAC_TXSTS_LCC_EXP) {
3081 3084 erip->stats.late_coll += 0x10000;
3082 3085 }
3083 3086
3084 3087 if (txmac_sts & BMAC_TXSTS_FCC_EXP) {
3085 3088 erip->stats.first_coll += 0x10000;
3086 3089 }
3087 3090
3088 3091 if (txmac_sts & BMAC_TXSTS_DEFER_EXP) {
3089 3092 HSTAT(erip, defer_timer_exp);
3090 3093 }
3091 3094
3092 3095 if (txmac_sts & BMAC_TXSTS_PEAK_EXP) {
3093 3096 erip->stats.peak_attempt_cnt += 0x100;
3094 3097 }
3095 3098 }
3096 3099
3097 3100 if (erisbits & ERI_G_STATUS_RX_NO_BUF) {
3098 3101 ERI_DEBUG_MSG1(erip, NONFATAL_MSG, "rx dropped/no free desc");
3099 3102
3100 3103 if (eri_overflow_reset)
3101 3104 erip->linkcheck = 1;
3102 3105
3103 3106 HSTAT(erip, no_free_rx_desc);
3104 3107 HSTAT(erip, ierrors);
3105 3108 }
3106 3109 if (erisbits & ERI_G_STATUS_RX_MAC_INT) {
3107 3110 rxmac_sts = GET_MACREG(rxsts);
3108 3111 if (rxmac_sts & BMAC_RXSTS_RX_OVF) {
3109 3112 #ifndef ERI_RMAC_HANG_WORKAROUND
3110 3113 eri_stop_timer(erip); /* acquire linklock */
3111 3114 erip->check_rmac_hang ++;
3112 3115 erip->check2_rmac_hang = 0;
3113 3116 erip->rxfifo_wr_ptr = GET_ERXREG(rxfifo_wr_ptr);
3114 3117 erip->rxfifo_rd_ptr = GET_ERXREG(rxfifo_rd_ptr);
3115 3118
3116 3119 ERI_DEBUG_MSG5(erip, NONFATAL_MSG,
3117 3120 "overflow intr %d: %8x wr:%2x rd:%2x",
3118 3121 erip->check_rmac_hang,
3119 3122 GET_MACREG(macsm),
3120 3123 GET_ERXREG(rxfifo_wr_ptr),
3121 3124 GET_ERXREG(rxfifo_rd_ptr));
3122 3125
3123 3126 eri_start_timer(erip, eri_check_link,
3124 3127 ERI_CHECK_HANG_TIMER);
3125 3128 #endif
3126 3129 if (eri_overflow_reset)
3127 3130 erip->linkcheck = 1;
3128 3131
3129 3132 HSTAT(erip, rx_overflow);
3130 3133 HSTAT(erip, ierrors);
3131 3134 }
3132 3135
3133 3136 if (rxmac_sts & BMAC_RXSTS_ALE_EXP) {
3134 3137 erip->stats.rx_align_err += 0x10000;
3135 3138 erip->stats.ierrors += 0x10000;
3136 3139 }
3137 3140
3138 3141 if (rxmac_sts & BMAC_RXSTS_CRC_EXP) {
3139 3142 erip->stats.rx_crc_err += 0x10000;
3140 3143 erip->stats.ierrors += 0x10000;
3141 3144 }
3142 3145
3143 3146 if (rxmac_sts & BMAC_RXSTS_LEN_EXP) {
3144 3147 erip->stats.rx_length_err += 0x10000;
3145 3148 erip->stats.ierrors += 0x10000;
3146 3149 }
3147 3150
3148 3151 if (rxmac_sts & BMAC_RXSTS_CVI_EXP) {
3149 3152 erip->stats.rx_code_viol_err += 0x10000;
3150 3153 erip->stats.ierrors += 0x10000;
3151 3154 }
3152 3155 }
3153 3156
3154 3157 if (erisbits & ERI_G_STATUS_MAC_CTRL_INT) {
3155 3158
3156 3159 macctl_sts = GET_MACREG(macctl_sts);
3157 3160 if (macctl_sts & ERI_MCTLSTS_PAUSE_RCVD) {
3158 3161 pause_time = ((macctl_sts &
3159 3162 ERI_MCTLSTS_PAUSE_TIME) >> 16);
3160 3163 ERI_DEBUG_MSG2(erip, NONFATAL_MSG,
3161 3164 "PAUSE Received. pause time = %X slot_times",
3162 3165 pause_time);
3163 3166 HSTAT(erip, pause_rxcount);
3164 3167 erip->stats.pause_time_count += pause_time;
3165 3168 }
3166 3169
3167 3170 if (macctl_sts & ERI_MCTLSTS_PAUSE_STATE) {
3168 3171 HSTAT(erip, pause_oncount);
3169 3172 erip->stats.pausing = 1;
3170 3173 }
3171 3174
3172 3175 if (macctl_sts & ERI_MCTLSTS_NONPAUSE) {
3173 3176 HSTAT(erip, pause_offcount);
3174 3177 erip->stats.pausing = 0;
3175 3178 }
3176 3179 }
3177 3180
3178 3181 }
3179 3182
3180 3183 /*
3181 3184 * if this is the first init do not bother to save the
3182 3185 * counters.
3183 3186 */
3184 3187 static void
3185 3188 eri_savecntrs(struct eri *erip)
3186 3189 {
3187 3190 uint32_t fecnt, aecnt, lecnt, rxcv;
3188 3191 uint32_t ltcnt, excnt, fccnt;
3189 3192
3190 3193 /* XXX What all gets added in ierrors and oerrors? */
3191 3194 fecnt = GET_MACREG(fecnt);
3192 3195 HSTATN(erip, rx_crc_err, fecnt);
3193 3196 PUT_MACREG(fecnt, 0);
3194 3197
3195 3198 aecnt = GET_MACREG(aecnt);
3196 3199 HSTATN(erip, rx_align_err, aecnt);
3197 3200 PUT_MACREG(aecnt, 0);
3198 3201
3199 3202 lecnt = GET_MACREG(lecnt);
3200 3203 HSTATN(erip, rx_length_err, lecnt);
3201 3204 PUT_MACREG(lecnt, 0);
3202 3205
3203 3206 rxcv = GET_MACREG(rxcv);
3204 3207 HSTATN(erip, rx_code_viol_err, rxcv);
3205 3208 PUT_MACREG(rxcv, 0);
3206 3209
3207 3210 ltcnt = GET_MACREG(ltcnt);
3208 3211 HSTATN(erip, late_coll, ltcnt);
3209 3212 PUT_MACREG(ltcnt, 0);
3210 3213
3211 3214 erip->stats.collisions += (GET_MACREG(nccnt) + ltcnt);
3212 3215 PUT_MACREG(nccnt, 0);
3213 3216
3214 3217 excnt = GET_MACREG(excnt);
3215 3218 HSTATN(erip, excessive_coll, excnt);
3216 3219 PUT_MACREG(excnt, 0);
3217 3220
3218 3221 fccnt = GET_MACREG(fccnt);
3219 3222 HSTATN(erip, first_coll, fccnt);
3220 3223 PUT_MACREG(fccnt, 0);
3221 3224
3222 3225 /*
3223 3226 * Do not add code violations to input errors.
3224 3227 * They are already counted in CRC errors
3225 3228 */
3226 3229 HSTATN(erip, ierrors, (fecnt + aecnt + lecnt));
3227 3230 HSTATN(erip, oerrors, (ltcnt + excnt));
3228 3231 }
3229 3232
3230 3233 mblk_t *
3231 3234 eri_allocb_sp(size_t size)
3232 3235 {
3233 3236 mblk_t *mp;
3234 3237
3235 3238 size += 128;
3236 3239 if ((mp = allocb(size + 3 * ERI_BURSTSIZE, BPRI_HI)) == NULL) {
3237 3240 return (NULL);
3238 3241 }
3239 3242 mp->b_wptr += 128;
3240 3243 mp->b_wptr = (uint8_t *)ROUNDUP2(mp->b_wptr, ERI_BURSTSIZE);
3241 3244 mp->b_rptr = mp->b_wptr;
3242 3245
3243 3246 return (mp);
3244 3247 }
3245 3248
3246 3249 mblk_t *
3247 3250 eri_allocb(size_t size)
3248 3251 {
3249 3252 mblk_t *mp;
3250 3253
3251 3254 if ((mp = allocb(size + 3 * ERI_BURSTSIZE, BPRI_HI)) == NULL) {
3252 3255 return (NULL);
3253 3256 }
3254 3257 mp->b_wptr = (uint8_t *)ROUNDUP2(mp->b_wptr, ERI_BURSTSIZE);
3255 3258 mp->b_rptr = mp->b_wptr;
3256 3259
3257 3260 return (mp);
3258 3261 }
3259 3262
3260 3263 /*
3261 3264 * Hardware Dependent Functions
3262 3265 * New Section.
3263 3266 */
3264 3267
3265 3268 /* <<<<<<<<<<<<<<<< Fast Ethernet PHY Bit Bang Operations >>>>>>>>>>>>>>>>>> */
3266 3269
3267 3270 static void
3268 3271 send_bit(struct eri *erip, uint32_t x)
3269 3272 {
3270 3273 PUT_MIFREG(mif_bbdata, x);
3271 3274 PUT_MIFREG(mif_bbclk, ERI_BBCLK_LOW);
3272 3275 PUT_MIFREG(mif_bbclk, ERI_BBCLK_HIGH);
3273 3276 }
3274 3277
3275 3278 /*
3276 3279 * To read the MII register bits according to the IEEE Standard
3277 3280 */
3278 3281 static uint32_t
3279 3282 get_bit_std(struct eri *erip)
3280 3283 {
3281 3284 uint32_t x;
3282 3285
3283 3286 PUT_MIFREG(mif_bbclk, ERI_BBCLK_LOW);
3284 3287 drv_usecwait(1); /* wait for >330 ns for stable data */
3285 3288 if (param_transceiver == INTERNAL_XCVR)
3286 3289 x = (GET_MIFREG(mif_cfg) & ERI_MIF_CFGM0) ? 1 : 0;
3287 3290 else
3288 3291 x = (GET_MIFREG(mif_cfg) & ERI_MIF_CFGM1) ? 1 : 0;
3289 3292 PUT_MIFREG(mif_bbclk, ERI_BBCLK_HIGH);
3290 3293 return (x);
3291 3294 }
3292 3295
3293 3296 #define SEND_BIT(x) send_bit(erip, x)
3294 3297 #define GET_BIT_STD(x) x = get_bit_std(erip)
3295 3298
3296 3299
3297 3300 static void
3298 3301 eri_bb_mii_write(struct eri *erip, uint8_t regad, uint16_t data)
3299 3302 {
3300 3303 uint8_t phyad;
3301 3304 int i;
3302 3305
3303 3306 PUT_MIFREG(mif_bbopenb, 1); /* Enable the MII driver */
3304 3307 phyad = erip->phyad;
3305 3308 (void) eri_bb_force_idle(erip);
3306 3309 SEND_BIT(0); SEND_BIT(1); /* <ST> */
3307 3310 SEND_BIT(0); SEND_BIT(1); /* <OP> */
3308 3311 for (i = 4; i >= 0; i--) { /* <AAAAA> */
3309 3312 SEND_BIT((phyad >> i) & 1);
3310 3313 }
3311 3314 for (i = 4; i >= 0; i--) { /* <RRRRR> */
3312 3315 SEND_BIT((regad >> i) & 1);
3313 3316 }
3314 3317 SEND_BIT(1); SEND_BIT(0); /* <TA> */
3315 3318 for (i = 0xf; i >= 0; i--) { /* <DDDDDDDDDDDDDDDD> */
3316 3319 SEND_BIT((data >> i) & 1);
3317 3320 }
3318 3321 PUT_MIFREG(mif_bbopenb, 0); /* Disable the MII driver */
3319 3322 }
3320 3323
3321 3324 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
3322 3325 static uint32_t
3323 3326 eri_bb_mii_read(struct eri *erip, uint8_t regad, uint16_t *datap)
3324 3327 {
3325 3328 uint8_t phyad;
3326 3329 int i;
3327 3330 uint32_t x;
3328 3331 uint32_t y;
3329 3332
3330 3333 *datap = 0;
3331 3334
3332 3335 PUT_MIFREG(mif_bbopenb, 1); /* Enable the MII driver */
3333 3336 phyad = erip->phyad;
3334 3337 (void) eri_bb_force_idle(erip);
3335 3338 SEND_BIT(0); SEND_BIT(1); /* <ST> */
3336 3339 SEND_BIT(1); SEND_BIT(0); /* <OP> */
3337 3340 for (i = 4; i >= 0; i--) { /* <AAAAA> */
3338 3341 SEND_BIT((phyad >> i) & 1);
3339 3342 }
3340 3343 for (i = 4; i >= 0; i--) { /* <RRRRR> */
3341 3344 SEND_BIT((regad >> i) & 1);
3342 3345 }
3343 3346
3344 3347 PUT_MIFREG(mif_bbopenb, 0); /* Disable the MII driver */
3345 3348
3346 3349 GET_BIT_STD(x);
3347 3350 GET_BIT_STD(y); /* <TA> */
3348 3351 for (i = 0xf; i >= 0; i--) { /* <DDDDDDDDDDDDDDDD> */
3349 3352 GET_BIT_STD(x);
3350 3353 *datap += (x << i);
3351 3354 }
3352 3355 /* Kludge to get the Transceiver out of hung mode */
3353 3356 /* XXX: Test if this is still needed */
3354 3357 GET_BIT_STD(x);
3355 3358 GET_BIT_STD(x);
3356 3359 GET_BIT_STD(x);
3357 3360
3358 3361 return (y);
3359 3362 }
3360 3363
3361 3364 static void
3362 3365 eri_bb_force_idle(struct eri *erip)
3363 3366 {
3364 3367 int i;
3365 3368
3366 3369 for (i = 0; i < 33; i++) {
3367 3370 SEND_BIT(1);
3368 3371 }
3369 3372 }
3370 3373
3371 3374 /* <<<<<<<<<<<<<<<<<<<<End of Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */
3372 3375
3373 3376
3374 3377 /* <<<<<<<<<<<<< Frame Register used for MII operations >>>>>>>>>>>>>>>>>>>> */
3375 3378
3376 3379 #ifdef ERI_FRM_DEBUG
3377 3380 int frame_flag = 0;
3378 3381 #endif
3379 3382
3380 3383 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
3381 3384 static uint32_t
3382 3385 eri_mii_read(struct eri *erip, uint8_t regad, uint16_t *datap)
3383 3386 {
3384 3387 uint32_t frame;
3385 3388 uint8_t phyad;
3386 3389
3387 3390 if (param_transceiver == NO_XCVR)
3388 3391 return (1); /* No xcvr present */
3389 3392
3390 3393 if (!erip->frame_enable)
3391 3394 return (eri_bb_mii_read(erip, regad, datap));
3392 3395
3393 3396 phyad = erip->phyad;
3394 3397 #ifdef ERI_FRM_DEBUG
3395 3398 if (!frame_flag) {
3396 3399 eri_errror(erip->dip, "Frame Register used for MII");
3397 3400 frame_flag = 1;
3398 3401 }
3399 3402 #endif
3400 3403 ERI_DEBUG_MSG3(erip, FRM_MSG,
3401 3404 "Frame Reg :mii_read: phyad = %X reg = %X ", phyad, regad);
3402 3405
3403 3406 PUT_MIFREG(mif_frame, ERI_MIF_FRREAD |
3404 3407 (phyad << ERI_MIF_FRPHYAD_SHIFT) |
3405 3408 (regad << ERI_MIF_FRREGAD_SHIFT));
3406 3409 MIF_ERIDELAY(300, phyad, regad);
3407 3410 frame = GET_MIFREG(mif_frame);
3408 3411 if ((frame & ERI_MIF_FRTA0) == 0) {
3409 3412 return (1);
3410 3413 } else {
3411 3414 *datap = (uint16_t)(frame & ERI_MIF_FRDATA);
3412 3415 return (0);
3413 3416 }
3414 3417
3415 3418 }
3416 3419
3417 3420 static void
3418 3421 eri_mii_write(struct eri *erip, uint8_t regad, uint16_t data)
3419 3422 {
3420 3423 uint8_t phyad;
3421 3424
3422 3425 if (!erip->frame_enable) {
3423 3426 eri_bb_mii_write(erip, regad, data);
3424 3427 return;
3425 3428 }
3426 3429
3427 3430 phyad = erip->phyad;
3428 3431
3429 3432 PUT_MIFREG(mif_frame, (ERI_MIF_FRWRITE |
3430 3433 (phyad << ERI_MIF_FRPHYAD_SHIFT) |
3431 3434 (regad << ERI_MIF_FRREGAD_SHIFT) | data));
3432 3435 MIF_ERIDELAY(300, phyad, regad);
3433 3436 (void) GET_MIFREG(mif_frame);
3434 3437 }
3435 3438
3436 3439
3437 3440 /* <<<<<<<<<<<<<<<<< PACKET TRANSMIT FUNCTIONS >>>>>>>>>>>>>>>>>>>> */
3438 3441
3439 3442 #define ERI_CROSS_PAGE_BOUNDRY(i, size, pagesize) \
3440 3443 ((i & pagesize) != ((i + size) & pagesize))
3441 3444
3442 3445 /*
3443 3446 * Send a single mblk. Returns B_TRUE if the packet is sent, or disposed of
3444 3447 * by freemsg. Returns B_FALSE if the packet was not sent or queued, and
3445 3448 * should be retried later (due to tx resource exhaustion.)
3446 3449 */
3447 3450 static boolean_t
3448 3451 eri_send_msg(struct eri *erip, mblk_t *mp)
3449 3452 {
3450 3453 volatile struct eri_tmd *tmdp = NULL;
3451 3454 volatile struct eri_tmd *tbasep = NULL;
3452 3455 uint32_t len_msg = 0;
3453 3456 uint32_t i;
3454 3457 uint64_t int_me = 0;
3455 3458 uint_t tmdcsum = 0;
3456 3459 uint_t start_offset = 0;
3457 3460 uint_t stuff_offset = 0;
3458 3461 uint_t flags = 0;
3459 3462
3460 3463 caddr_t ptr;
3461 3464 uint32_t offset;
3462 3465 uint64_t ctrl;
3463 3466 ddi_dma_cookie_t c;
3464 3467
3465 3468 if (!param_linkup) {
3466 3469 freemsg(mp);
3467 3470 HSTAT(erip, tnocar);
3468 3471 HSTAT(erip, oerrors);
3469 3472 return (B_TRUE);
3470 3473 }
3471 3474
3472 3475 #ifdef ERI_HWCSUM
3473 3476 mac_hcksum_get(mp, &start_offset, &stuff_offset, NULL, NULL, &flags);
3474 3477
3475 3478 if (flags & HCK_PARTIALCKSUM) {
3476 3479 if (get_ether_type(mp->b_rptr) == ETHERTYPE_VLAN) {
3477 3480 start_offset += ETHERHEADER_SIZE + 4;
3478 3481 stuff_offset += ETHERHEADER_SIZE + 4;
3479 3482 } else {
3480 3483 start_offset += ETHERHEADER_SIZE;
3481 3484 stuff_offset += ETHERHEADER_SIZE;
3482 3485 }
3483 3486 tmdcsum = ERI_TMD_CSENABL;
3484 3487 }
3485 3488 #endif /* ERI_HWCSUM */
3486 3489
3487 3490 if ((len_msg = msgsize(mp)) > ERI_BUFSIZE) {
3488 3491 /*
3489 3492 * This sholdn't ever occur, as GLD should not send us
3490 3493 * packets that are too big.
3491 3494 */
3492 3495 HSTAT(erip, oerrors);
3493 3496 freemsg(mp);
3494 3497 return (B_TRUE);
3495 3498 }
3496 3499
3497 3500 /*
3498 3501 * update MIB II statistics
3499 3502 */
3500 3503 BUMP_OutNUcast(erip, mp->b_rptr);
3501 3504
3502 3505 mutex_enter(&erip->xmitlock);
3503 3506
3504 3507 tbasep = erip->eri_tmdp;
3505 3508
3506 3509 /* Check if there are enough descriptors for this packet */
3507 3510 tmdp = erip->tnextp;
3508 3511
3509 3512 if (tmdp >= erip->tcurp) /* check notmds */
3510 3513 i = tmdp - erip->tcurp;
3511 3514 else
3512 3515 i = tmdp + ERI_TPENDING - erip->tcurp;
3513 3516
3514 3517 if (i > (ERI_TPENDING - 4))
3515 3518 goto notmds;
3516 3519
3517 3520 if (i >= (ERI_TPENDING >> 1) && !(erip->starts & 0x7)) {
3518 3521 int_me = ERI_TMD_INTME;
3519 3522
3520 3523 if (!erip->tx_int_me) {
3521 3524 PUT_GLOBREG(intmask, GET_GLOBREG(intmask) &
3522 3525 ~(ERI_G_MASK_TX_INT_ME));
3523 3526 erip->tx_int_me = 1;
3524 3527 }
3525 3528 }
3526 3529
3527 3530 i = tmdp - tbasep; /* index */
3528 3531
3529 3532 offset = (i * ERI_BUFSIZE);
3530 3533 ptr = erip->tbuf_kaddr + offset;
3531 3534
3532 3535 mcopymsg(mp, ptr);
3533 3536
3534 3537 #ifdef ERI_HDX_BUG_WORKAROUND
3535 3538 if ((param_mode) || (eri_hdx_pad_enable == 0)) {
3536 3539 if (len_msg < ETHERMIN) {
3537 3540 bzero((ptr + len_msg), (ETHERMIN - len_msg));
3538 3541 len_msg = ETHERMIN;
3539 3542 }
3540 3543 } else {
3541 3544 if (len_msg < 97) {
3542 3545 bzero((ptr + len_msg), (97 - len_msg));
3543 3546 len_msg = 97;
3544 3547 }
3545 3548 }
3546 3549 #endif
3547 3550 c.dmac_address = erip->tbuf_ioaddr + offset;
3548 3551 (void) ddi_dma_sync(erip->tbuf_handle,
3549 3552 (off_t)offset, len_msg, DDI_DMA_SYNC_FORDEV);
3550 3553
3551 3554 /* first and last (and only!) descr of packet */
3552 3555 ctrl = ERI_TMD_SOP | ERI_TMD_EOP | int_me | tmdcsum |
3553 3556 (start_offset << ERI_TMD_CSSTART_SHIFT) |
3554 3557 (stuff_offset << ERI_TMD_CSSTUFF_SHIFT);
3555 3558
3556 3559 PUT_TMD(tmdp, c, len_msg, ctrl);
3557 3560 ERI_SYNCIOPB(erip, tmdp, sizeof (struct eri_tmd),
3558 3561 DDI_DMA_SYNC_FORDEV);
3559 3562
3560 3563 tmdp = NEXTTMD(erip, tmdp);
3561 3564 erip->tx_cur_cnt++;
3562 3565
3563 3566 erip->tx_kick = tmdp - tbasep;
3564 3567 PUT_ETXREG(tx_kick, erip->tx_kick);
3565 3568 erip->tnextp = tmdp;
3566 3569
3567 3570 erip->starts++;
3568 3571
3569 3572 if (erip->tx_cur_cnt >= tx_interrupt_rate) {
3570 3573 erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
3571 3574 ETX_COMPLETION_MASK);
3572 3575 (void) eri_reclaim(erip, erip->tx_completion);
3573 3576 }
3574 3577 mutex_exit(&erip->xmitlock);
3575 3578
3576 3579 return (B_TRUE);
3577 3580
3578 3581 notmds:
3579 3582 HSTAT(erip, notmds);
3580 3583 erip->wantw = B_TRUE;
3581 3584
3582 3585 mutex_exit(&erip->xmitlock);
3583 3586
3584 3587 return (B_FALSE);
3585 3588 }
3586 3589
3587 3590 static mblk_t *
3588 3591 eri_m_tx(void *arg, mblk_t *mp)
3589 3592 {
3590 3593 struct eri *erip = arg;
3591 3594 mblk_t *next;
3592 3595
3593 3596 while (mp != NULL) {
3594 3597 next = mp->b_next;
3595 3598 mp->b_next = NULL;
3596 3599 if (!eri_send_msg(erip, mp)) {
3597 3600 mp->b_next = next;
3598 3601 break;
3599 3602 }
3600 3603 mp = next;
3601 3604 }
3602 3605
3603 3606 return (mp);
3604 3607 }
3605 3608
3606 3609 /*
3607 3610 * Transmit completion reclaiming.
3608 3611 */
3609 3612 static boolean_t
3610 3613 eri_reclaim(struct eri *erip, uint32_t tx_completion)
3611 3614 {
3612 3615 volatile struct eri_tmd *tmdp;
3613 3616 struct eri_tmd *tcomp;
3614 3617 struct eri_tmd *tbasep;
3615 3618 struct eri_tmd *tlimp;
3616 3619 uint64_t flags;
3617 3620 uint_t reclaimed = 0;
3618 3621
3619 3622 tbasep = erip->eri_tmdp;
3620 3623 tlimp = erip->eri_tmdlimp;
3621 3624
3622 3625 tmdp = erip->tcurp;
3623 3626 tcomp = tbasep + tx_completion; /* pointer to completion tmd */
3624 3627
3625 3628 /*
3626 3629 * Loop through each TMD starting from tcurp and upto tcomp.
3627 3630 */
3628 3631 while (tmdp != tcomp) {
3629 3632 flags = GET_TMD_FLAGS(tmdp);
3630 3633 if (flags & (ERI_TMD_SOP))
3631 3634 HSTAT(erip, opackets64);
3632 3635
3633 3636 HSTATN(erip, obytes64, (flags & ERI_TMD_BUFSIZE));
3634 3637
3635 3638 tmdp = NEXTTMDP(tbasep, tlimp, tmdp);
3636 3639 reclaimed++;
3637 3640 }
3638 3641
3639 3642 erip->tcurp = tmdp;
3640 3643 erip->tx_cur_cnt -= reclaimed;
3641 3644
3642 3645 return (erip->wantw && reclaimed ? B_TRUE : B_FALSE);
3643 3646 }
3644 3647
3645 3648
3646 3649 /* <<<<<<<<<<<<<<<<<<< PACKET RECEIVE FUNCTIONS >>>>>>>>>>>>>>>>>>> */
3647 3650 static mblk_t *
3648 3651 eri_read_dma(struct eri *erip, volatile struct rmd *rmdp,
3649 3652 int rmdi, uint64_t flags)
3650 3653 {
3651 3654 mblk_t *bp, *nbp;
3652 3655 int len;
3653 3656 uint_t ccnt;
3654 3657 ddi_dma_cookie_t c;
3655 3658 #ifdef ERI_RCV_CKSUM
3656 3659 ushort_t sum;
3657 3660 #endif /* ERI_RCV_CKSUM */
3658 3661 mblk_t *retmp = NULL;
3659 3662
3660 3663 bp = erip->rmblkp[rmdi];
3661 3664 len = (flags & ERI_RMD_BUFSIZE) >> ERI_RMD_BUFSIZE_SHIFT;
3662 3665 #ifdef ERI_DONT_STRIP_CRC
3663 3666 len -= 4;
3664 3667 #endif
3665 3668 /*
3666 3669 * In the event of RX FIFO overflow error, ERI REV 1.0 ASIC can
3667 3670 * corrupt packets following the descriptor corresponding the
3668 3671 * overflow. To detect the corrupted packets, we disable the
3669 3672 * dropping of the "bad" packets at the MAC. The descriptor
3670 3673 * then would have the "BAD" bit set. We drop the overflowing
3671 3674 * packet and the packet following it. We could have done some sort
3672 3675 * of checking to determine if the second packet was indeed bad
3673 3676 * (using CRC or checksum) but it would be expensive in this
3674 3677 * routine, since it is run in interrupt context.
3675 3678 */
3676 3679 if ((flags & ERI_RMD_BAD) || (len < ETHERMIN) || (len > ETHERMAX+4)) {
3677 3680
3678 3681 HSTAT(erip, rx_bad_pkts);
3679 3682 if ((flags & ERI_RMD_BAD) == 0)
3680 3683 HSTAT(erip, ierrors);
3681 3684 if (len < ETHERMIN) {
3682 3685 HSTAT(erip, rx_runt);
3683 3686 } else if (len > ETHERMAX+4) {
3684 3687 HSTAT(erip, rx_toolong_pkts);
3685 3688 }
3686 3689 HSTAT(erip, drop);
3687 3690 UPDATE_RMD(rmdp);
3688 3691
3689 3692 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3690 3693 DDI_DMA_SYNC_FORDEV);
3691 3694 return (NULL);
3692 3695 }
3693 3696 #ifdef ERI_DONT_STRIP_CRC
3694 3697 {
3695 3698 uint32_t hw_fcs, tail_fcs;
3696 3699 /*
3697 3700 * since we don't let the hardware strip the CRC in hdx
3698 3701 * then the driver needs to do it.
3699 3702 * this is to workaround a hardware bug
3700 3703 */
3701 3704 bp->b_wptr = bp->b_rptr + ERI_FSTBYTE_OFFSET + len;
3702 3705 /*
3703 3706 * Get the Checksum calculated by the hardware.
3704 3707 */
3705 3708 hw_fcs = flags & ERI_RMD_CKSUM;
3706 3709 /*
3707 3710 * Catch the case when the CRC starts on an odd
3708 3711 * boundary.
3709 3712 */
3710 3713 tail_fcs = bp->b_wptr[0] << 8 | bp->b_wptr[1];
3711 3714 tail_fcs += bp->b_wptr[2] << 8 | bp->b_wptr[3];
3712 3715 tail_fcs = (tail_fcs & 0xffff) + (tail_fcs >> 16);
3713 3716 if ((uintptr_t)(bp->b_wptr) & 1) {
3714 3717 tail_fcs = (tail_fcs << 8) & 0xffff | (tail_fcs >> 8);
3715 3718 }
3716 3719 hw_fcs += tail_fcs;
3717 3720 hw_fcs = (hw_fcs & 0xffff) + (hw_fcs >> 16);
3718 3721 hw_fcs &= 0xffff;
3719 3722 /*
3720 3723 * Now we can replace what the hardware wrote, make believe
3721 3724 * it got it right in the first place.
3722 3725 */
3723 3726 flags = (flags & ~(uint64_t)ERI_RMD_CKSUM) | hw_fcs;
3724 3727 }
3725 3728 #endif
3726 3729 /*
3727 3730 * Packet Processing
3728 3731 * Once we get a packet bp, we try allocate a new mblk, nbp
3729 3732 * to replace this one. If we succeed, we map it to the current
3730 3733 * dma handle and update the descriptor with the new cookie. We
3731 3734 * then put bp in our read service queue erip->ipq, if it exists
3732 3735 * or we just bp to the streams expecting it.
3733 3736 * If allocation of the new mblk fails, we implicitly drop the
3734 3737 * current packet, i.e do not pass up the mblk and re-use it.
3735 3738 * Re-mapping is not required.
3736 3739 */
3737 3740
3738 3741 if (len < eri_rx_bcopy_max) {
3739 3742 if ((nbp = eri_allocb_sp(len + ERI_FSTBYTE_OFFSET))) {
3740 3743 (void) ddi_dma_sync(erip->ndmarh[rmdi], 0,
3741 3744 len + ERI_FSTBYTE_OFFSET, DDI_DMA_SYNC_FORCPU);
3742 3745 DB_TYPE(nbp) = M_DATA;
3743 3746 bcopy(bp->b_rptr, nbp->b_rptr,
3744 3747 len + ERI_FSTBYTE_OFFSET);
3745 3748 UPDATE_RMD(rmdp);
3746 3749 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3747 3750 DDI_DMA_SYNC_FORDEV);
3748 3751
3749 3752 /* Add the First Byte offset to the b_rptr */
3750 3753 nbp->b_rptr += ERI_FSTBYTE_OFFSET;
3751 3754 nbp->b_wptr = nbp->b_rptr + len;
3752 3755
3753 3756 #ifdef ERI_RCV_CKSUM
3754 3757 sum = ~(uint16_t)(flags & ERI_RMD_CKSUM);
3755 3758 ERI_PROCESS_READ(erip, nbp, sum);
3756 3759 #else
3757 3760 ERI_PROCESS_READ(erip, nbp);
3758 3761 #endif
3759 3762 retmp = nbp;
3760 3763 } else {
3761 3764
3762 3765 /*
3763 3766 * mblk allocation has failed. Re-use the old mblk for
3764 3767 * the next packet. Re-mapping is not required since
3765 3768 * the same mblk and dma cookie is to be used again.
3766 3769 */
3767 3770 HSTAT(erip, ierrors);
3768 3771 HSTAT(erip, allocbfail);
3769 3772 HSTAT(erip, norcvbuf);
3770 3773
3771 3774 UPDATE_RMD(rmdp);
3772 3775 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3773 3776 DDI_DMA_SYNC_FORDEV);
3774 3777 ERI_DEBUG_MSG1(erip, RESOURCE_MSG, "allocb fail");
3775 3778 }
3776 3779 } else {
3777 3780 /* Use dma unmap/map */
3778 3781 if ((nbp = eri_allocb_sp(ERI_BUFSIZE))) {
3779 3782 /*
3780 3783 * How do we harden this, specially if unbind
3781 3784 * succeeds and then bind fails?
3782 3785 * If Unbind fails, we can leave without updating
3783 3786 * the descriptor but would it continue to work on
3784 3787 * next round?
3785 3788 */
3786 3789 (void) ddi_dma_unbind_handle(erip->ndmarh[rmdi]);
3787 3790 (void) ddi_dma_addr_bind_handle(erip->ndmarh[rmdi],
3788 3791 NULL, (caddr_t)nbp->b_rptr, ERI_BUFSIZE,
3789 3792 DDI_DMA_READ | DDI_DMA_CONSISTENT,
3790 3793 DDI_DMA_DONTWAIT, 0, &c, &ccnt);
3791 3794
3792 3795 erip->rmblkp[rmdi] = nbp;
3793 3796 PUT_RMD(rmdp, c);
3794 3797 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3795 3798 DDI_DMA_SYNC_FORDEV);
3796 3799
3797 3800 /* Add the First Byte offset to the b_rptr */
3798 3801
3799 3802 bp->b_rptr += ERI_FSTBYTE_OFFSET;
3800 3803 bp->b_wptr = bp->b_rptr + len;
3801 3804
3802 3805 #ifdef ERI_RCV_CKSUM
3803 3806 sum = ~(uint16_t)(flags & ERI_RMD_CKSUM);
3804 3807 ERI_PROCESS_READ(erip, bp, sum);
3805 3808 #else
3806 3809 ERI_PROCESS_READ(erip, bp);
3807 3810 #endif
3808 3811 retmp = bp;
3809 3812 } else {
3810 3813
3811 3814 /*
3812 3815 * mblk allocation has failed. Re-use the old mblk for
3813 3816 * the next packet. Re-mapping is not required since
3814 3817 * the same mblk and dma cookie is to be used again.
3815 3818 */
3816 3819 HSTAT(erip, ierrors);
3817 3820 HSTAT(erip, allocbfail);
3818 3821 HSTAT(erip, norcvbuf);
3819 3822
3820 3823 UPDATE_RMD(rmdp);
3821 3824 ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3822 3825 DDI_DMA_SYNC_FORDEV);
3823 3826 ERI_DEBUG_MSG1(erip, RESOURCE_MSG, "allocb fail");
3824 3827 }
3825 3828 }
3826 3829
3827 3830 return (retmp);
3828 3831 }
3829 3832
3830 3833 #define LINK_STAT_DISPLAY_TIME 20
3831 3834
3832 3835 static int
3833 3836 eri_init_xfer_params(struct eri *erip)
3834 3837 {
3835 3838 int i;
3836 3839 dev_info_t *dip;
3837 3840
3838 3841 dip = erip->dip;
3839 3842
3840 3843 for (i = 0; i < A_CNT(param_arr); i++)
3841 3844 erip->param_arr[i] = param_arr[i];
3842 3845
3843 3846 erip->xmit_dma_mode = 0;
3844 3847 erip->rcv_dma_mode = 0;
3845 3848 erip->mifpoll_enable = mifpoll_enable;
3846 3849 erip->lance_mode_enable = lance_mode;
3847 3850 erip->frame_enable = 1;
3848 3851 erip->ngu_enable = ngu_enable;
3849 3852
3850 3853 if (!erip->g_nd && !eri_param_register(erip,
3851 3854 erip->param_arr, A_CNT(param_arr))) {
3852 3855 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
3853 3856 param_reg_fail_msg);
3854 3857 return (-1);
3855 3858 }
3856 3859
3857 3860 /*
3858 3861 * Set up the start-up values for user-configurable parameters
3859 3862 * Get the values from the global variables first.
3860 3863 * Use the MASK to limit the value to allowed maximum.
3861 3864 */
3862 3865
3863 3866 param_transceiver = NO_XCVR;
3864 3867
3865 3868 /*
3866 3869 * The link speed may be forced to either 10 Mbps or 100 Mbps using the
3867 3870 * property "transfer-speed". This may be done in OBP by using the command
3868 3871 * "apply transfer-speed=<speed> <device>". The speed may be either 10 or 100.
3869 3872 */
3870 3873 i = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "transfer-speed", 0);
3871 3874 if (i != 0) {
3872 3875 param_autoneg = 0; /* force speed */
3873 3876 param_anar_100T4 = 0;
3874 3877 param_anar_10fdx = 0;
3875 3878 param_anar_10hdx = 0;
3876 3879 param_anar_100fdx = 0;
3877 3880 param_anar_100hdx = 0;
3878 3881 param_anar_asm_dir = 0;
3879 3882 param_anar_pause = 0;
3880 3883
3881 3884 if (i == 10)
3882 3885 param_anar_10hdx = 1;
3883 3886 else if (i == 100)
3884 3887 param_anar_100hdx = 1;
3885 3888 }
3886 3889
3887 3890 /*
3888 3891 * Get the parameter values configured in .conf file.
3889 3892 */
3890 3893 param_ipg1 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "ipg1", ipg1) &
3891 3894 ERI_MASK_8BIT;
3892 3895
3893 3896 param_ipg2 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "ipg2", ipg2) &
3894 3897 ERI_MASK_8BIT;
3895 3898
3896 3899 param_use_intphy = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3897 3900 "use_int_xcvr", use_int_xcvr) & ERI_MASK_1BIT;
3898 3901
3899 3902 param_use_intphy = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3900 3903 "pace_size", pace_size) & ERI_MASK_8BIT;
3901 3904
3902 3905 param_autoneg = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3903 3906 "adv_autoneg_cap", adv_autoneg_cap) & ERI_MASK_1BIT;
3904 3907
3905 3908 param_autoneg = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3906 3909 "adv_autoneg_cap", adv_autoneg_cap) & ERI_MASK_1BIT;
3907 3910
3908 3911 param_anar_100T4 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3909 3912 "adv_100T4_cap", adv_100T4_cap) & ERI_MASK_1BIT;
3910 3913
3911 3914 param_anar_100fdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3912 3915 "adv_100fdx_cap", adv_100fdx_cap) & ERI_MASK_1BIT;
3913 3916
3914 3917 param_anar_100hdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3915 3918 "adv_100hdx_cap", adv_100hdx_cap) & ERI_MASK_1BIT;
3916 3919
3917 3920 param_anar_10fdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3918 3921 "adv_10fdx_cap", adv_10fdx_cap) & ERI_MASK_1BIT;
3919 3922
3920 3923 param_anar_10hdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3921 3924 "adv_10hdx_cap", adv_10hdx_cap) & ERI_MASK_1BIT;
3922 3925
3923 3926 param_ipg0 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "ipg0", ipg0) &
3924 3927 ERI_MASK_8BIT;
3925 3928
3926 3929 param_intr_blank_time = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3927 3930 "intr_blank_time", intr_blank_time) & ERI_MASK_8BIT;
3928 3931
3929 3932 param_intr_blank_packets = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3930 3933 "intr_blank_packets", intr_blank_packets) & ERI_MASK_8BIT;
3931 3934
3932 3935 param_lance_mode = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3933 3936 "lance_mode", lance_mode) & ERI_MASK_1BIT;
3934 3937
3935 3938 param_select_link = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3936 3939 "select_link", select_link) & ERI_MASK_1BIT;
3937 3940
3938 3941 param_default_link = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3939 3942 "default_link", default_link) & ERI_MASK_1BIT;
3940 3943
3941 3944 param_anar_asm_dir = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3942 3945 "adv_asm_dir_cap", adv_pauseTX_cap) & ERI_MASK_1BIT;
3943 3946
3944 3947 param_anar_pause = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3945 3948 "adv_pause_cap", adv_pauseRX_cap) & ERI_MASK_1BIT;
3946 3949
3947 3950 if (link_pulse_disabled)
3948 3951 erip->link_pulse_disabled = 1;
3949 3952 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, 0, "link-pulse-disabled"))
3950 3953 erip->link_pulse_disabled = 1;
3951 3954
3952 3955 eri_statinit(erip);
3953 3956 return (0);
3954 3957
3955 3958 }
3956 3959
3957 3960 static void
3958 3961 eri_process_ndd_ioctl(struct eri *erip, queue_t *wq, mblk_t *mp, int cmd)
3959 3962 {
3960 3963
3961 3964 uint32_t old_ipg1, old_ipg2, old_use_int_xcvr, old_autoneg;
3962 3965 uint32_t old_100T4;
3963 3966 uint32_t old_100fdx, old_100hdx, old_10fdx, old_10hdx;
3964 3967 uint32_t old_ipg0, old_lance_mode;
3965 3968 uint32_t old_intr_blank_time, old_intr_blank_packets;
3966 3969 uint32_t old_asm_dir, old_pause;
3967 3970 uint32_t old_select_link, old_default_link;
3968 3971
3969 3972 switch (cmd) {
3970 3973 case ERI_ND_GET:
3971 3974
3972 3975 old_autoneg = param_autoneg;
3973 3976 old_100T4 = param_anar_100T4;
3974 3977 old_100fdx = param_anar_100fdx;
3975 3978 old_100hdx = param_anar_100hdx;
3976 3979 old_10fdx = param_anar_10fdx;
3977 3980 old_10hdx = param_anar_10hdx;
3978 3981 old_asm_dir = param_anar_asm_dir;
3979 3982 old_pause = param_anar_pause;
3980 3983
3981 3984 param_autoneg = old_autoneg & ~ERI_NOTUSR;
3982 3985 param_anar_100T4 = old_100T4 & ~ERI_NOTUSR;
3983 3986 param_anar_100fdx = old_100fdx & ~ERI_NOTUSR;
3984 3987 param_anar_100hdx = old_100hdx & ~ERI_NOTUSR;
3985 3988 param_anar_10fdx = old_10fdx & ~ERI_NOTUSR;
3986 3989 param_anar_10hdx = old_10hdx & ~ERI_NOTUSR;
3987 3990 param_anar_asm_dir = old_asm_dir & ~ERI_NOTUSR;
3988 3991 param_anar_pause = old_pause & ~ERI_NOTUSR;
3989 3992
3990 3993 if (!eri_nd_getset(wq, erip->g_nd, mp)) {
3991 3994 param_autoneg = old_autoneg;
3992 3995 param_anar_100T4 = old_100T4;
3993 3996 param_anar_100fdx = old_100fdx;
3994 3997 param_anar_100hdx = old_100hdx;
3995 3998 param_anar_10fdx = old_10fdx;
3996 3999 param_anar_10hdx = old_10hdx;
3997 4000 param_anar_asm_dir = old_asm_dir;
3998 4001 param_anar_pause = old_pause;
3999 4002 miocnak(wq, mp, 0, EINVAL);
4000 4003 return;
4001 4004 }
4002 4005 param_autoneg = old_autoneg;
4003 4006 param_anar_100T4 = old_100T4;
4004 4007 param_anar_100fdx = old_100fdx;
4005 4008 param_anar_100hdx = old_100hdx;
4006 4009 param_anar_10fdx = old_10fdx;
4007 4010 param_anar_10hdx = old_10hdx;
4008 4011 param_anar_asm_dir = old_asm_dir;
4009 4012 param_anar_pause = old_pause;
4010 4013
4011 4014 qreply(wq, mp);
4012 4015 break;
4013 4016
4014 4017 case ERI_ND_SET:
4015 4018 old_ipg0 = param_ipg0;
4016 4019 old_intr_blank_time = param_intr_blank_time;
4017 4020 old_intr_blank_packets = param_intr_blank_packets;
4018 4021 old_lance_mode = param_lance_mode;
4019 4022 old_ipg1 = param_ipg1;
4020 4023 old_ipg2 = param_ipg2;
4021 4024 old_use_int_xcvr = param_use_intphy;
4022 4025 old_autoneg = param_autoneg;
4023 4026 old_100T4 = param_anar_100T4;
4024 4027 old_100fdx = param_anar_100fdx;
4025 4028 old_100hdx = param_anar_100hdx;
4026 4029 old_10fdx = param_anar_10fdx;
4027 4030 old_10hdx = param_anar_10hdx;
4028 4031 param_autoneg = 0xff;
4029 4032 old_asm_dir = param_anar_asm_dir;
4030 4033 param_anar_asm_dir = 0xff;
4031 4034 old_pause = param_anar_pause;
4032 4035 param_anar_pause = 0xff;
4033 4036 old_select_link = param_select_link;
4034 4037 old_default_link = param_default_link;
4035 4038
4036 4039 if (!eri_nd_getset(wq, erip->g_nd, mp)) {
4037 4040 param_autoneg = old_autoneg;
4038 4041 miocnak(wq, mp, 0, EINVAL);
4039 4042 return;
4040 4043 }
4041 4044
4042 4045 qreply(wq, mp);
4043 4046
4044 4047 if (param_autoneg != 0xff) {
4045 4048 ERI_DEBUG_MSG2(erip, NDD_MSG,
4046 4049 "ndd_ioctl: new param_autoneg %d", param_autoneg);
4047 4050 param_linkup = 0;
4048 4051 erip->stats.link_up = LINK_STATE_DOWN;
4049 4052 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
4050 4053 (void) eri_init(erip);
4051 4054 } else {
4052 4055 param_autoneg = old_autoneg;
4053 4056 if ((old_use_int_xcvr != param_use_intphy) ||
4054 4057 (old_default_link != param_default_link) ||
4055 4058 (old_select_link != param_select_link)) {
4056 4059 param_linkup = 0;
4057 4060 erip->stats.link_up = LINK_STATE_DOWN;
4058 4061 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
4059 4062 (void) eri_init(erip);
4060 4063 } else if ((old_ipg1 != param_ipg1) ||
4061 4064 (old_ipg2 != param_ipg2) ||
4062 4065 (old_ipg0 != param_ipg0) ||
4063 4066 (old_intr_blank_time != param_intr_blank_time) ||
4064 4067 (old_intr_blank_packets !=
4065 4068 param_intr_blank_packets) ||
4066 4069 (old_lance_mode != param_lance_mode)) {
4067 4070 param_linkup = 0;
4068 4071 erip->stats.link_up = LINK_STATE_DOWN;
4069 4072 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
4070 4073 (void) eri_init(erip);
4071 4074 }
4072 4075 }
4073 4076 break;
4074 4077 }
4075 4078 }
4076 4079
4077 4080
4078 4081 static int
4079 4082 eri_stat_kstat_update(kstat_t *ksp, int rw)
4080 4083 {
4081 4084 struct eri *erip;
4082 4085 struct erikstat *erikp;
4083 4086 struct stats *esp;
4084 4087 boolean_t macupdate = B_FALSE;
4085 4088
4086 4089 erip = (struct eri *)ksp->ks_private;
4087 4090 erikp = (struct erikstat *)ksp->ks_data;
4088 4091
4089 4092 if (rw != KSTAT_READ)
4090 4093 return (EACCES);
4091 4094 /*
4092 4095 * Update all the stats by reading all the counter registers.
4093 4096 * Counter register stats are not updated till they overflow
4094 4097 * and interrupt.
4095 4098 */
4096 4099
4097 4100 mutex_enter(&erip->xmitlock);
4098 4101 if ((erip->flags & ERI_RUNNING) && (erip->flags & ERI_TXINIT)) {
4099 4102 erip->tx_completion =
4100 4103 GET_ETXREG(tx_completion) & ETX_COMPLETION_MASK;
4101 4104 macupdate |= eri_reclaim(erip, erip->tx_completion);
4102 4105 }
4103 4106 mutex_exit(&erip->xmitlock);
4104 4107 if (macupdate)
4105 4108 mac_tx_update(erip->mh);
4106 4109
4107 4110 eri_savecntrs(erip);
4108 4111
4109 4112 esp = &erip->stats;
4110 4113
4111 4114 erikp->erik_txmac_maxpkt_err.value.ul = esp->txmac_maxpkt_err;
4112 4115 erikp->erik_defer_timer_exp.value.ul = esp->defer_timer_exp;
4113 4116 erikp->erik_peak_attempt_cnt.value.ul = esp->peak_attempt_cnt;
4114 4117 erikp->erik_tx_hang.value.ul = esp->tx_hang;
4115 4118
4116 4119 erikp->erik_no_free_rx_desc.value.ul = esp->no_free_rx_desc;
4117 4120
4118 4121 erikp->erik_rx_hang.value.ul = esp->rx_hang;
4119 4122 erikp->erik_rx_length_err.value.ul = esp->rx_length_err;
4120 4123 erikp->erik_rx_code_viol_err.value.ul = esp->rx_code_viol_err;
4121 4124 erikp->erik_pause_rxcount.value.ul = esp->pause_rxcount;
4122 4125 erikp->erik_pause_oncount.value.ul = esp->pause_oncount;
4123 4126 erikp->erik_pause_offcount.value.ul = esp->pause_offcount;
4124 4127 erikp->erik_pause_time_count.value.ul = esp->pause_time_count;
4125 4128
4126 4129 erikp->erik_inits.value.ul = esp->inits;
4127 4130 erikp->erik_jab.value.ul = esp->jab;
4128 4131 erikp->erik_notmds.value.ul = esp->notmds;
4129 4132 erikp->erik_allocbfail.value.ul = esp->allocbfail;
4130 4133 erikp->erik_drop.value.ul = esp->drop;
4131 4134 erikp->erik_rx_bad_pkts.value.ul = esp->rx_bad_pkts;
4132 4135 erikp->erik_rx_inits.value.ul = esp->rx_inits;
4133 4136 erikp->erik_tx_inits.value.ul = esp->tx_inits;
4134 4137 erikp->erik_rxtag_err.value.ul = esp->rxtag_err;
4135 4138 erikp->erik_parity_error.value.ul = esp->parity_error;
4136 4139 erikp->erik_pci_error_int.value.ul = esp->pci_error_int;
4137 4140 erikp->erik_unknown_fatal.value.ul = esp->unknown_fatal;
4138 4141 erikp->erik_pci_data_parity_err.value.ul = esp->pci_data_parity_err;
4139 4142 erikp->erik_pci_signal_target_abort.value.ul =
4140 4143 esp->pci_signal_target_abort;
4141 4144 erikp->erik_pci_rcvd_target_abort.value.ul =
4142 4145 esp->pci_rcvd_target_abort;
4143 4146 erikp->erik_pci_rcvd_master_abort.value.ul =
4144 4147 esp->pci_rcvd_master_abort;
4145 4148 erikp->erik_pci_signal_system_err.value.ul =
4146 4149 esp->pci_signal_system_err;
4147 4150 erikp->erik_pci_det_parity_err.value.ul = esp->pci_det_parity_err;
4148 4151
4149 4152 erikp->erik_pmcap.value.ul = esp->pmcap;
4150 4153
4151 4154 return (0);
4152 4155 }
4153 4156
4154 4157 static void
4155 4158 eri_statinit(struct eri *erip)
4156 4159 {
4157 4160 struct kstat *ksp;
4158 4161 struct erikstat *erikp;
4159 4162
4160 4163 if ((ksp = kstat_create("eri", erip->instance, "driver_info", "net",
4161 4164 KSTAT_TYPE_NAMED,
4162 4165 sizeof (struct erikstat) / sizeof (kstat_named_t), 0)) == NULL) {
4163 4166 ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
4164 4167 kstat_create_fail_msg);
4165 4168 return;
4166 4169 }
4167 4170
4168 4171 erip->ksp = ksp;
4169 4172 erikp = (struct erikstat *)(ksp->ks_data);
4170 4173 /*
4171 4174 * MIB II kstat variables
4172 4175 */
4173 4176
4174 4177 kstat_named_init(&erikp->erik_inits, "inits", KSTAT_DATA_ULONG);
4175 4178
4176 4179 kstat_named_init(&erikp->erik_txmac_maxpkt_err, "txmac_maxpkt_err",
4177 4180 KSTAT_DATA_ULONG);
4178 4181 kstat_named_init(&erikp->erik_defer_timer_exp, "defer_timer_exp",
4179 4182 KSTAT_DATA_ULONG);
4180 4183 kstat_named_init(&erikp->erik_peak_attempt_cnt, "peak_attempt_cnt",
4181 4184 KSTAT_DATA_ULONG);
4182 4185 kstat_named_init(&erikp->erik_tx_hang, "tx_hang", KSTAT_DATA_ULONG);
4183 4186
4184 4187 kstat_named_init(&erikp->erik_no_free_rx_desc, "no_free_rx_desc",
4185 4188 KSTAT_DATA_ULONG);
4186 4189 kstat_named_init(&erikp->erik_rx_hang, "rx_hang", KSTAT_DATA_ULONG);
4187 4190 kstat_named_init(&erikp->erik_rx_length_err, "rx_length_err",
4188 4191 KSTAT_DATA_ULONG);
4189 4192 kstat_named_init(&erikp->erik_rx_code_viol_err, "rx_code_viol_err",
4190 4193 KSTAT_DATA_ULONG);
4191 4194
4192 4195 kstat_named_init(&erikp->erik_pause_rxcount, "pause_rcv_cnt",
4193 4196 KSTAT_DATA_ULONG);
4194 4197
4195 4198 kstat_named_init(&erikp->erik_pause_oncount, "pause_on_cnt",
4196 4199 KSTAT_DATA_ULONG);
4197 4200
4198 4201 kstat_named_init(&erikp->erik_pause_offcount, "pause_off_cnt",
4199 4202 KSTAT_DATA_ULONG);
4200 4203 kstat_named_init(&erikp->erik_pause_time_count, "pause_time_cnt",
4201 4204 KSTAT_DATA_ULONG);
4202 4205
4203 4206 kstat_named_init(&erikp->erik_jab, "jabber", KSTAT_DATA_ULONG);
4204 4207 kstat_named_init(&erikp->erik_notmds, "no_tmds", KSTAT_DATA_ULONG);
4205 4208 kstat_named_init(&erikp->erik_allocbfail, "allocbfail",
4206 4209 KSTAT_DATA_ULONG);
4207 4210
4208 4211 kstat_named_init(&erikp->erik_drop, "drop", KSTAT_DATA_ULONG);
4209 4212
4210 4213 kstat_named_init(&erikp->erik_rx_bad_pkts, "bad_pkts",
4211 4214 KSTAT_DATA_ULONG);
4212 4215
4213 4216 kstat_named_init(&erikp->erik_rx_inits, "rx_inits", KSTAT_DATA_ULONG);
4214 4217
4215 4218 kstat_named_init(&erikp->erik_tx_inits, "tx_inits", KSTAT_DATA_ULONG);
4216 4219
4217 4220 kstat_named_init(&erikp->erik_rxtag_err, "rxtag_error",
4218 4221 KSTAT_DATA_ULONG);
4219 4222
4220 4223 kstat_named_init(&erikp->erik_parity_error, "parity_error",
4221 4224 KSTAT_DATA_ULONG);
4222 4225
4223 4226 kstat_named_init(&erikp->erik_pci_error_int, "pci_error_interrupt",
4224 4227 KSTAT_DATA_ULONG);
4225 4228 kstat_named_init(&erikp->erik_unknown_fatal, "unknown_fatal",
4226 4229 KSTAT_DATA_ULONG);
4227 4230 kstat_named_init(&erikp->erik_pci_data_parity_err,
4228 4231 "pci_data_parity_err", KSTAT_DATA_ULONG);
4229 4232 kstat_named_init(&erikp->erik_pci_signal_target_abort,
4230 4233 "pci_signal_target_abort", KSTAT_DATA_ULONG);
4231 4234 kstat_named_init(&erikp->erik_pci_rcvd_target_abort,
4232 4235 "pci_rcvd_target_abort", KSTAT_DATA_ULONG);
4233 4236 kstat_named_init(&erikp->erik_pci_rcvd_master_abort,
4234 4237 "pci_rcvd_master_abort", KSTAT_DATA_ULONG);
4235 4238 kstat_named_init(&erikp->erik_pci_signal_system_err,
4236 4239 "pci_signal_system_err", KSTAT_DATA_ULONG);
4237 4240 kstat_named_init(&erikp->erik_pci_det_parity_err,
4238 4241 "pci_det_parity_err", KSTAT_DATA_ULONG);
4239 4242
4240 4243 kstat_named_init(&erikp->erik_pmcap, "pmcap", KSTAT_DATA_ULONG);
4241 4244
4242 4245
4243 4246 ksp->ks_update = eri_stat_kstat_update;
4244 4247 ksp->ks_private = (void *) erip;
4245 4248 kstat_install(ksp);
4246 4249 }
4247 4250
4248 4251
4249 4252 /* <<<<<<<<<<<<<<<<<<<<<<< NDD SUPPORT FUNCTIONS >>>>>>>>>>>>>>>>>>> */
4250 4253 /*
4251 4254 * ndd support functions to get/set parameters
4252 4255 */
4253 4256 /* Free the Named Dispatch Table by calling eri_nd_free */
4254 4257 static void
4255 4258 eri_param_cleanup(struct eri *erip)
4256 4259 {
4257 4260 if (erip->g_nd)
4258 4261 (void) eri_nd_free(&erip->g_nd);
4259 4262 }
4260 4263
4261 4264 /*
4262 4265 * Extracts the value from the eri parameter array and prints the
4263 4266 * parameter value. cp points to the required parameter.
4264 4267 */
4265 4268 /* ARGSUSED */
4266 4269 static int
4267 4270 eri_param_get(queue_t *q, mblk_t *mp, caddr_t cp)
4268 4271 {
4269 4272 param_t *eripa = (void *)cp;
4270 4273 int param_len = 1;
4271 4274 uint32_t param_val;
4272 4275 mblk_t *nmp;
4273 4276 int ok;
4274 4277
4275 4278 param_val = eripa->param_val;
4276 4279 /*
4277 4280 * Calculate space required in mblk.
4278 4281 * Remember to include NULL terminator.
4279 4282 */
4280 4283 do {
4281 4284 param_len++;
4282 4285 param_val /= 10;
4283 4286 } while (param_val);
4284 4287
4285 4288 ok = eri_mk_mblk_tail_space(mp, &nmp, param_len);
4286 4289 if (ok == 0) {
4287 4290 (void) sprintf((char *)nmp->b_wptr, "%d", eripa->param_val);
4288 4291 nmp->b_wptr += param_len;
4289 4292 }
4290 4293
4291 4294 return (ok);
4292 4295 }
4293 4296
4294 4297 /*
4295 4298 * Check if there is space for p_val at the end if mblk.
4296 4299 * If not, allocate new 1k mblk.
4297 4300 */
4298 4301 static int
4299 4302 eri_mk_mblk_tail_space(mblk_t *mp, mblk_t **nmp, size_t sz)
4300 4303 {
4301 4304 mblk_t *tmp = mp;
4302 4305
4303 4306 while (tmp->b_cont)
4304 4307 tmp = tmp->b_cont;
4305 4308
4306 4309 if (MBLKTAIL(tmp) < sz) {
4307 4310 if ((tmp->b_cont = allocb(1024, BPRI_HI)) == NULL)
4308 4311 return (ENOMEM);
4309 4312 tmp = tmp->b_cont;
4310 4313 }
4311 4314 *nmp = tmp;
4312 4315 return (0);
4313 4316 }
4314 4317
4315 4318 /*
4316 4319 * Register each element of the parameter array with the
4317 4320 * named dispatch handler. Each element is loaded using
4318 4321 * eri_nd_load()
4319 4322 */
4320 4323 static int
4321 4324 eri_param_register(struct eri *erip, param_t *eripa, int cnt)
4322 4325 {
4323 4326 /* cnt gives the count of the number of */
4324 4327 /* elements present in the parameter array */
4325 4328
4326 4329 int i;
4327 4330
4328 4331 for (i = 0; i < cnt; i++, eripa++) {
4329 4332 pfi_t setter = (pfi_t)eri_param_set;
4330 4333
4331 4334 switch (eripa->param_name[0]) {
4332 4335 case '+': /* read-write */
4333 4336 setter = (pfi_t)eri_param_set;
4334 4337 break;
4335 4338
4336 4339 case '-': /* read-only */
4337 4340 setter = NULL;
4338 4341 break;
4339 4342
4340 4343 case '!': /* read-only, not displayed */
4341 4344 case '%': /* read-write, not displayed */
4342 4345 continue;
4343 4346 }
4344 4347
4345 4348 if (!eri_nd_load(&erip->g_nd, eripa->param_name + 1,
4346 4349 (pfi_t)eri_param_get, setter, (caddr_t)eripa)) {
4347 4350 (void) eri_nd_free(&erip->g_nd);
4348 4351 return (B_FALSE);
4349 4352 }
4350 4353 }
4351 4354
4352 4355 return (B_TRUE);
4353 4356 }
4354 4357
4355 4358 /*
4356 4359 * Sets the eri parameter to the value in the param_register using
4357 4360 * eri_nd_load().
4358 4361 */
4359 4362 /* ARGSUSED */
4360 4363 static int
4361 4364 eri_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp)
4362 4365 {
4363 4366 char *end;
4364 4367 long new_value;
4365 4368 param_t *eripa = (void *)cp;
4366 4369
4367 4370 if (ddi_strtol(value, &end, 10, &new_value) != 0)
4368 4371 return (EINVAL);
4369 4372 if (end == value || new_value < eripa->param_min ||
4370 4373 new_value > eripa->param_max) {
4371 4374 return (EINVAL);
4372 4375 }
4373 4376 eripa->param_val = (uint32_t)new_value;
4374 4377 return (0);
4375 4378
4376 4379 }
4377 4380
4378 4381 /* Free the table pointed to by 'ndp' */
4379 4382 static void
4380 4383 eri_nd_free(caddr_t *nd_pparam)
4381 4384 {
4382 4385 ND *nd;
4383 4386
4384 4387 if ((nd = (void *)(*nd_pparam)) != NULL) {
4385 4388 if (nd->nd_tbl)
4386 4389 kmem_free(nd->nd_tbl, nd->nd_size);
4387 4390 kmem_free(nd, sizeof (ND));
4388 4391 *nd_pparam = NULL;
4389 4392 }
4390 4393 }
4391 4394
4392 4395 static int
4393 4396 eri_nd_getset(queue_t *q, caddr_t nd_param, MBLKP mp)
4394 4397 {
4395 4398 int err;
4396 4399 IOCP iocp;
4397 4400 MBLKP mp1;
4398 4401 ND *nd;
4399 4402 NDE *nde;
4400 4403 char *valp;
4401 4404 size_t avail;
4402 4405 mblk_t *nmp;
4403 4406
4404 4407 if (!nd_param)
4405 4408 return (B_FALSE);
4406 4409
4407 4410 nd = (void *)nd_param;
4408 4411 iocp = (void *)mp->b_rptr;
4409 4412 if ((iocp->ioc_count == 0) || !(mp1 = mp->b_cont)) {
4410 4413 mp->b_datap->db_type = M_IOCACK;
4411 4414 iocp->ioc_count = 0;
4412 4415 iocp->ioc_error = EINVAL;
4413 4416 return (B_TRUE);
4414 4417 }
4415 4418 /*
4416 4419 * NOTE - logic throughout nd_xxx assumes single data block for ioctl.
4417 4420 * However, existing code sends in some big buffers.
4418 4421 */
4419 4422 avail = iocp->ioc_count;
4420 4423 if (mp1->b_cont) {
4421 4424 freemsg(mp1->b_cont);
4422 4425 mp1->b_cont = NULL;
4423 4426 }
4424 4427
4425 4428 mp1->b_datap->db_lim[-1] = '\0'; /* Force null termination */
4426 4429 valp = (char *)mp1->b_rptr;
4427 4430
4428 4431 for (nde = nd->nd_tbl; /* */; nde++) {
4429 4432 if (!nde->nde_name)
4430 4433 return (B_FALSE);
4431 4434 if (strcmp(nde->nde_name, valp) == 0)
4432 4435 break;
4433 4436 }
4434 4437 err = EINVAL;
4435 4438
4436 4439 while (*valp++)
4437 4440 ;
4438 4441
4439 4442 if (!*valp || valp >= (char *)mp1->b_wptr)
4440 4443 valp = NULL;
4441 4444
4442 4445 switch (iocp->ioc_cmd) {
4443 4446 case ND_GET:
4444 4447 /*
4445 4448 * (XXX) hack: "*valp" is size of user buffer for copyout. If result
4446 4449 * of action routine is too big, free excess and return ioc_rval as buf
4447 4450 * size needed. Return as many mblocks as will fit, free the rest. For
4448 4451 * backward compatibility, assume size of orig ioctl buffer if "*valp"
4449 4452 * bad or not given.
4450 4453 */
4451 4454 if (valp)
4452 4455 (void) ddi_strtol(valp, NULL, 10, (long *)&avail);
4453 4456 /* We overwrite the name/value with the reply data */
4454 4457 {
4455 4458 mblk_t *mp2 = mp1;
4456 4459
4457 4460 while (mp2) {
4458 4461 mp2->b_wptr = mp2->b_rptr;
4459 4462 mp2 = mp2->b_cont;
4460 4463 }
4461 4464 }
4462 4465 err = (*nde->nde_get_pfi)(q, mp1, nde->nde_data, iocp->ioc_cr);
4463 4466 if (!err) {
4464 4467 size_t size_out;
4465 4468 ssize_t excess;
4466 4469
4467 4470 iocp->ioc_rval = 0;
4468 4471
4469 4472 /* Tack on the null */
4470 4473 err = eri_mk_mblk_tail_space(mp1, &nmp, 1);
4471 4474 if (!err) {
4472 4475 *nmp->b_wptr++ = '\0';
4473 4476 size_out = msgdsize(mp1);
4474 4477 excess = size_out - avail;
4475 4478 if (excess > 0) {
4476 4479 iocp->ioc_rval = (unsigned)size_out;
4477 4480 size_out -= excess;
4478 4481 (void) adjmsg(mp1, -(excess + 1));
4479 4482 err = eri_mk_mblk_tail_space(mp1,
4480 4483 &nmp, 1);
4481 4484 if (!err)
4482 4485 *nmp->b_wptr++ = '\0';
4483 4486 else
4484 4487 size_out = 0;
4485 4488 }
4486 4489
4487 4490 } else
4488 4491 size_out = 0;
4489 4492
4490 4493 iocp->ioc_count = size_out;
4491 4494 }
4492 4495 break;
4493 4496
4494 4497 case ND_SET:
4495 4498 if (valp) {
4496 4499 err = (*nde->nde_set_pfi)(q, mp1, valp,
4497 4500 nde->nde_data, iocp->ioc_cr);
4498 4501 iocp->ioc_count = 0;
4499 4502 freemsg(mp1);
4500 4503 mp->b_cont = NULL;
4501 4504 }
4502 4505 break;
4503 4506 }
4504 4507
4505 4508 iocp->ioc_error = err;
4506 4509 mp->b_datap->db_type = M_IOCACK;
4507 4510 return (B_TRUE);
4508 4511 }
4509 4512
4510 4513 /*
4511 4514 * Load 'name' into the named dispatch table pointed to by 'ndp'.
4512 4515 * 'ndp' should be the address of a char pointer cell. If the table
4513 4516 * does not exist (*ndp == 0), a new table is allocated and 'ndp'
4514 4517 * is stuffed. If there is not enough space in the table for a new
4515 4518 * entry, more space is allocated.
4516 4519 */
4517 4520 static boolean_t
4518 4521 eri_nd_load(caddr_t *nd_pparam, char *name, pfi_t get_pfi,
4519 4522 pfi_t set_pfi, caddr_t data)
4520 4523 {
4521 4524 ND *nd;
4522 4525 NDE *nde;
4523 4526
4524 4527 if (!nd_pparam)
4525 4528 return (B_FALSE);
4526 4529
4527 4530 if ((nd = (void *)(*nd_pparam)) == NULL) {
4528 4531 if ((nd = (ND *)kmem_zalloc(sizeof (ND), KM_NOSLEEP))
4529 4532 == NULL)
4530 4533 return (B_FALSE);
4531 4534 *nd_pparam = (caddr_t)nd;
4532 4535 }
4533 4536 if (nd->nd_tbl) {
4534 4537 for (nde = nd->nd_tbl; nde->nde_name; nde++) {
4535 4538 if (strcmp(name, nde->nde_name) == 0)
4536 4539 goto fill_it;
4537 4540 }
4538 4541 }
4539 4542 if (nd->nd_free_count <= 1) {
4540 4543 if ((nde = (NDE *)kmem_zalloc(nd->nd_size +
4541 4544 NDE_ALLOC_SIZE, KM_NOSLEEP)) == NULL)
4542 4545 return (B_FALSE);
4543 4546
4544 4547 nd->nd_free_count += NDE_ALLOC_COUNT;
4545 4548 if (nd->nd_tbl) {
4546 4549 bcopy((char *)nd->nd_tbl, (char *)nde, nd->nd_size);
4547 4550 kmem_free((char *)nd->nd_tbl, nd->nd_size);
4548 4551 } else {
4549 4552 nd->nd_free_count--;
4550 4553 nde->nde_name = "?";
4551 4554 nde->nde_get_pfi = nd_get_names;
4552 4555 nde->nde_set_pfi = nd_set_default;
4553 4556 }
4554 4557 nde->nde_data = (caddr_t)nd;
4555 4558 nd->nd_tbl = nde;
4556 4559 nd->nd_size += NDE_ALLOC_SIZE;
4557 4560 }
4558 4561 for (nde = nd->nd_tbl; nde->nde_name; nde++)
4559 4562 ;
4560 4563 nd->nd_free_count--;
4561 4564 fill_it:
4562 4565 nde->nde_name = name;
4563 4566 nde->nde_get_pfi = get_pfi ? get_pfi : nd_get_default;
4564 4567 nde->nde_set_pfi = set_pfi ? set_pfi : nd_set_default;
4565 4568 nde->nde_data = data;
4566 4569 return (B_TRUE);
4567 4570 }
4568 4571
4569 4572 /*
4570 4573 * Hardening Functions
4571 4574 * New Section
4572 4575 */
4573 4576 #ifdef DEBUG
4574 4577 /*PRINTFLIKE5*/
4575 4578 static void
4576 4579 eri_debug_msg(const char *file, int line, struct eri *erip,
4577 4580 debug_msg_t type, const char *fmt, ...)
4578 4581 {
4579 4582 char msg_buffer[255];
4580 4583 va_list ap;
4581 4584
4582 4585 va_start(ap, fmt);
4583 4586 (void) vsprintf(msg_buffer, fmt, ap);
4584 4587 va_end(ap);
4585 4588
4586 4589 if (eri_msg_out & ERI_CON_MSG) {
4587 4590 if (((type <= eri_debug_level) && eri_debug_all) ||
4588 4591 ((type == eri_debug_level) && !eri_debug_all)) {
4589 4592 if (erip)
4590 4593 cmn_err(CE_CONT, "D: %s %s%d:(%s%d) %s\n",
4591 4594 debug_msg_string[type], file, line,
4592 4595 ddi_driver_name(erip->dip), erip->instance,
4593 4596 msg_buffer);
4594 4597 else
4595 4598 cmn_err(CE_CONT, "D: %s %s(%d): %s\n",
4596 4599 debug_msg_string[type], file,
4597 4600 line, msg_buffer);
4598 4601 }
4599 4602 }
4600 4603 }
4601 4604 #endif
4602 4605
4603 4606
4604 4607 /*PRINTFLIKE4*/
4605 4608 static void
4606 4609 eri_fault_msg(struct eri *erip, uint_t severity, msg_t type,
4607 4610 const char *fmt, ...)
4608 4611 {
4609 4612 char msg_buffer[255];
4610 4613 va_list ap;
4611 4614
4612 4615 va_start(ap, fmt);
4613 4616 (void) vsprintf(msg_buffer, fmt, ap);
4614 4617 va_end(ap);
4615 4618
4616 4619 if (erip == NULL) {
4617 4620 cmn_err(CE_NOTE, "eri : %s", msg_buffer);
4618 4621 return;
4619 4622 }
4620 4623
4621 4624 if (severity == SEVERITY_HIGH) {
4622 4625 cmn_err(CE_WARN, "%s%d : %s", ddi_driver_name(erip->dip),
4623 4626 erip->instance, msg_buffer);
4624 4627 } else switch (type) {
4625 4628 case ERI_VERB_MSG:
4626 4629 cmn_err(CE_CONT, "?%s%d : %s", ddi_driver_name(erip->dip),
4627 4630 erip->instance, msg_buffer);
4628 4631 break;
4629 4632 case ERI_LOG_MSG:
4630 4633 cmn_err(CE_NOTE, "^%s%d : %s", ddi_driver_name(erip->dip),
4631 4634 erip->instance, msg_buffer);
4632 4635 break;
4633 4636 case ERI_BUF_MSG:
4634 4637 cmn_err(CE_NOTE, "!%s%d : %s", ddi_driver_name(erip->dip),
4635 4638 erip->instance, msg_buffer);
4636 4639 break;
4637 4640 case ERI_CON_MSG:
4638 4641 cmn_err(CE_CONT, "%s%d : %s", ddi_driver_name(erip->dip),
4639 4642 erip->instance, msg_buffer);
4640 4643 default:
4641 4644 break;
4642 4645 }
4643 4646 }
4644 4647
4645 4648 /*
4646 4649 * Transceiver (xcvr) Functions
4647 4650 * New Section
4648 4651 */
4649 4652 /*
4650 4653 * eri_stop_timer function is used by a function before doing link-related
4651 4654 * processing. It locks the "linklock" to protect the link-related data
4652 4655 * structures. This lock will be subsequently released in eri_start_timer().
4653 4656 */
4654 4657 static void
4655 4658 eri_stop_timer(struct eri *erip)
4656 4659 {
4657 4660 timeout_id_t id;
4658 4661 mutex_enter(&erip->linklock);
4659 4662 if (erip->timerid) {
4660 4663 erip->flags |= ERI_NOTIMEOUTS; /* prevent multiple timeout */
4661 4664 id = erip->timerid;
4662 4665 erip->timerid = 0; /* prevent other thread do untimeout */
4663 4666 mutex_exit(&erip->linklock); /* no mutex across untimeout() */
4664 4667
4665 4668 (void) untimeout(id);
4666 4669 mutex_enter(&erip->linklock); /* acquire mutex again */
4667 4670 erip->flags &= ~ERI_NOTIMEOUTS;
4668 4671 }
4669 4672 }
4670 4673
4671 4674 /*
4672 4675 * If msec parameter is zero, just release "linklock".
4673 4676 */
4674 4677 static void
4675 4678 eri_start_timer(struct eri *erip, fptrv_t func, clock_t msec)
4676 4679 {
4677 4680 if (msec) {
4678 4681 if (!(erip->flags & ERI_NOTIMEOUTS) &&
4679 4682 (erip->flags & ERI_RUNNING)) {
4680 4683 erip->timerid = timeout(func, (caddr_t)erip,
4681 4684 drv_usectohz(1000*msec));
4682 4685 }
4683 4686 }
4684 4687
4685 4688 mutex_exit(&erip->linklock);
4686 4689 }
4687 4690
4688 4691 static int
4689 4692 eri_new_xcvr(struct eri *erip)
4690 4693 {
4691 4694 int status;
4692 4695 uint32_t cfg;
4693 4696 int old_transceiver;
4694 4697
4695 4698 if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
4696 4699 PCI_PM_IDLESPEED_NONE) == DDI_SUCCESS)
4697 4700 erip->stats.pmcap = ERI_PMCAP_NONE;
4698 4701
4699 4702 status = B_FALSE; /* no change */
4700 4703 cfg = GET_MIFREG(mif_cfg);
4701 4704 ERI_DEBUG_MSG2(erip, MIF_MSG, "cfg value = %X", cfg);
4702 4705 old_transceiver = param_transceiver;
4703 4706
4704 4707 if ((cfg & ERI_MIF_CFGM1) && !use_int_xcvr) {
4705 4708 ERI_DEBUG_MSG1(erip, PHY_MSG, "Found External XCVR");
4706 4709 /*
4707 4710 * An External Transceiver was found and it takes priority
4708 4711 * over an internal, given the use_int_xcvr flag
4709 4712 * is false.
4710 4713 */
4711 4714 if (old_transceiver != EXTERNAL_XCVR) {
4712 4715 /*
4713 4716 * External transceiver has just been plugged
4714 4717 * in. Isolate the internal Transceiver.
4715 4718 */
4716 4719 if (old_transceiver == INTERNAL_XCVR) {
4717 4720 eri_mii_write(erip, ERI_PHY_BMCR,
4718 4721 (PHY_BMCR_ISOLATE | PHY_BMCR_PWRDN |
4719 4722 PHY_BMCR_LPBK));
4720 4723 }
4721 4724 status = B_TRUE;
4722 4725 }
4723 4726 /*
4724 4727 * Select the external Transceiver.
4725 4728 */
4726 4729 erip->phyad = ERI_EXTERNAL_PHYAD;
4727 4730 param_transceiver = EXTERNAL_XCVR;
4728 4731 erip->mif_config &= ~ERI_MIF_CFGPD;
4729 4732 erip->mif_config |= (erip->phyad << ERI_MIF_CFGPD_SHIFT);
4730 4733 erip->mif_config |= ERI_MIF_CFGPS;
4731 4734 PUT_MIFREG(mif_cfg, erip->mif_config);
4732 4735
4733 4736 PUT_MACREG(xifc, GET_MACREG(xifc) | BMAC_XIFC_MIIBUF_OE);
4734 4737 drv_usecwait(ERI_MIF_POLL_DELAY);
4735 4738 } else if (cfg & ERI_MIF_CFGM0) {
4736 4739 ERI_DEBUG_MSG1(erip, PHY_MSG, "Found Internal XCVR");
4737 4740 /*
4738 4741 * An Internal Transceiver was found or the
4739 4742 * use_int_xcvr flag is true.
4740 4743 */
4741 4744 if (old_transceiver != INTERNAL_XCVR) {
4742 4745 /*
4743 4746 * The external transceiver has just been
4744 4747 * disconnected or we're moving from a no
4745 4748 * transceiver state.
4746 4749 */
4747 4750 if ((old_transceiver == EXTERNAL_XCVR) &&
4748 4751 (cfg & ERI_MIF_CFGM0)) {
4749 4752 eri_mii_write(erip, ERI_PHY_BMCR,
4750 4753 (PHY_BMCR_ISOLATE | PHY_BMCR_PWRDN |
4751 4754 PHY_BMCR_LPBK));
4752 4755 }
4753 4756 status = B_TRUE;
4754 4757 }
4755 4758 /*
4756 4759 * Select the internal transceiver.
4757 4760 */
4758 4761 erip->phyad = ERI_INTERNAL_PHYAD;
4759 4762 param_transceiver = INTERNAL_XCVR;
4760 4763 erip->mif_config &= ~ERI_MIF_CFGPD;
4761 4764 erip->mif_config |= (erip->phyad << ERI_MIF_CFGPD_SHIFT);
4762 4765 erip->mif_config &= ~ERI_MIF_CFGPS;
4763 4766 PUT_MIFREG(mif_cfg, erip->mif_config);
4764 4767
4765 4768 PUT_MACREG(xifc, GET_MACREG(xifc) & ~ BMAC_XIFC_MIIBUF_OE);
4766 4769 drv_usecwait(ERI_MIF_POLL_DELAY);
4767 4770 } else {
4768 4771 /*
4769 4772 * Did not find a valid xcvr.
4770 4773 */
4771 4774 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
4772 4775 "Eri_new_xcvr : Select None");
4773 4776 param_transceiver = NO_XCVR;
4774 4777 erip->xcvr_status = PHY_LINK_DOWN;
4775 4778 }
4776 4779
4777 4780 if (erip->stats.pmcap == ERI_PMCAP_NONE) {
4778 4781 if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
4779 4782 (void *)4000) == DDI_SUCCESS)
4780 4783 erip->stats.pmcap = ERI_PMCAP_4MHZ;
4781 4784 }
4782 4785
4783 4786 return (status);
4784 4787 }
4785 4788
4786 4789 /*
4787 4790 * This function is used for timers. No locks are held on timer expiry.
4788 4791 */
4789 4792 static void
4790 4793 eri_check_link(struct eri *erip)
4791 4794 {
4792 4795 link_state_t linkupdate = eri_check_link_noind(erip);
4793 4796
4794 4797 if (linkupdate != LINK_STATE_UNKNOWN)
4795 4798 mac_link_update(erip->mh, linkupdate);
4796 4799 }
4797 4800
4798 4801 /*
4799 4802 * Compare our xcvr in our structure to the xcvr that we get from
4800 4803 * eri_check_mii_xcvr(). If they are different then mark the
4801 4804 * link down, reset xcvr, and return.
4802 4805 *
4803 4806 * Note without the MII connector, conditions can not change that
4804 4807 * will then use a external phy, thus this code has been cleaned
4805 4808 * to not even call the function or to possibly change the xcvr.
4806 4809 */
4807 4810 static uint32_t
4808 4811 eri_check_link_noind(struct eri *erip)
4809 4812 {
4810 4813 uint16_t stat, control, mif_ints;
4811 4814 uint32_t link_timeout = ERI_LINKCHECK_TIMER;
4812 4815 uint32_t linkupdate = 0;
4813 4816
4814 4817 eri_stop_timer(erip); /* acquire linklock */
4815 4818
4816 4819 mutex_enter(&erip->xmitlock);
4817 4820 mutex_enter(&erip->xcvrlock);
4818 4821 eri_mif_poll(erip, MIF_POLL_STOP);
4819 4822
4820 4823 (void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
4821 4824 mif_ints = erip->mii_status ^ stat;
4822 4825
4823 4826 if (erip->openloop_autoneg) {
4824 4827 (void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
4825 4828 ERI_DEBUG_MSG3(erip, XCVR_MSG,
4826 4829 "eri_check_link:openloop stat %X mii_status %X",
4827 4830 stat, erip->mii_status);
4828 4831 (void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
4829 4832 if (!(stat & PHY_BMSR_LNKSTS) &&
4830 4833 (erip->openloop_autoneg < 2)) {
4831 4834 if (param_speed) {
4832 4835 control &= ~PHY_BMCR_100M;
4833 4836 param_anlpar_100hdx = 0;
4834 4837 param_anlpar_10hdx = 1;
4835 4838 param_speed = 0;
4836 4839 erip->stats.ifspeed = 10;
4837 4840
4838 4841 } else {
4839 4842 control |= PHY_BMCR_100M;
4840 4843 param_anlpar_100hdx = 1;
4841 4844 param_anlpar_10hdx = 0;
4842 4845 param_speed = 1;
4843 4846 erip->stats.ifspeed = 100;
4844 4847 }
4845 4848 ERI_DEBUG_MSG3(erip, XCVR_MSG,
4846 4849 "eri_check_link: trying speed %X stat %X",
4847 4850 param_speed, stat);
4848 4851
4849 4852 erip->openloop_autoneg ++;
4850 4853 eri_mii_write(erip, ERI_PHY_BMCR, control);
4851 4854 link_timeout = ERI_P_FAULT_TIMER;
4852 4855 } else {
4853 4856 erip->openloop_autoneg = 0;
4854 4857 linkupdate = eri_mif_check(erip, stat, stat);
4855 4858 if (erip->openloop_autoneg)
4856 4859 link_timeout = ERI_P_FAULT_TIMER;
4857 4860 }
4858 4861 eri_mif_poll(erip, MIF_POLL_START);
4859 4862 mutex_exit(&erip->xcvrlock);
4860 4863 mutex_exit(&erip->xmitlock);
4861 4864
4862 4865 eri_start_timer(erip, eri_check_link, link_timeout);
4863 4866 return (linkupdate);
4864 4867 }
4865 4868
4866 4869 linkupdate = eri_mif_check(erip, mif_ints, stat);
4867 4870 eri_mif_poll(erip, MIF_POLL_START);
4868 4871 mutex_exit(&erip->xcvrlock);
4869 4872 mutex_exit(&erip->xmitlock);
4870 4873
4871 4874 #ifdef ERI_RMAC_HANG_WORKAROUND
4872 4875 /*
4873 4876 * Check if rx hung.
4874 4877 */
4875 4878 if ((erip->flags & ERI_RUNNING) && param_linkup) {
4876 4879 if (erip->check_rmac_hang) {
4877 4880 ERI_DEBUG_MSG5(erip,
4878 4881 NONFATAL_MSG,
4879 4882 "check1 %d: macsm:%8x wr:%2x rd:%2x",
4880 4883 erip->check_rmac_hang,
4881 4884 GET_MACREG(macsm),
4882 4885 GET_ERXREG(rxfifo_wr_ptr),
4883 4886 GET_ERXREG(rxfifo_rd_ptr));
4884 4887
4885 4888 erip->check_rmac_hang = 0;
4886 4889 erip->check2_rmac_hang ++;
4887 4890
4888 4891 erip->rxfifo_wr_ptr_c = GET_ERXREG(rxfifo_wr_ptr);
4889 4892 erip->rxfifo_rd_ptr_c = GET_ERXREG(rxfifo_rd_ptr);
4890 4893
4891 4894 eri_start_timer(erip, eri_check_link,
4892 4895 ERI_CHECK_HANG_TIMER);
4893 4896 return (linkupdate);
4894 4897 }
4895 4898
4896 4899 if (erip->check2_rmac_hang) {
4897 4900 ERI_DEBUG_MSG5(erip,
4898 4901 NONFATAL_MSG,
4899 4902 "check2 %d: macsm:%8x wr:%2x rd:%2x",
4900 4903 erip->check2_rmac_hang,
4901 4904 GET_MACREG(macsm),
4902 4905 GET_ERXREG(rxfifo_wr_ptr),
4903 4906 GET_ERXREG(rxfifo_rd_ptr));
4904 4907
4905 4908 erip->check2_rmac_hang = 0;
4906 4909
4907 4910 erip->rxfifo_wr_ptr = GET_ERXREG(rxfifo_wr_ptr);
4908 4911 erip->rxfifo_rd_ptr = GET_ERXREG(rxfifo_rd_ptr);
4909 4912
4910 4913 if (((GET_MACREG(macsm) & BMAC_OVERFLOW_STATE) ==
4911 4914 BMAC_OVERFLOW_STATE) &&
4912 4915 ((erip->rxfifo_wr_ptr_c == erip->rxfifo_rd_ptr_c) ||
4913 4916 ((erip->rxfifo_rd_ptr == erip->rxfifo_rd_ptr_c) &&
4914 4917 (erip->rxfifo_wr_ptr == erip->rxfifo_wr_ptr_c)))) {
4915 4918 ERI_DEBUG_MSG1(erip,
4916 4919 NONFATAL_MSG,
4917 4920 "RX hang: Reset mac");
4918 4921
4919 4922 HSTAT(erip, rx_hang);
4920 4923 erip->linkcheck = 1;
4921 4924
4922 4925 eri_start_timer(erip, eri_check_link,
4923 4926 ERI_LINKCHECK_TIMER);
4924 4927 (void) eri_init(erip);
4925 4928 return (linkupdate);
4926 4929 }
4927 4930 }
4928 4931 }
4929 4932 #endif
4930 4933
4931 4934 /*
4932 4935 * Check if tx hung.
4933 4936 */
4934 4937 #ifdef ERI_TX_HUNG
4935 4938 if ((erip->flags & ERI_RUNNING) && param_linkup &&
4936 4939 (eri_check_txhung(erip))) {
4937 4940 HSTAT(erip, tx_hang);
4938 4941 eri_reinit_txhung++;
4939 4942 erip->linkcheck = 1;
4940 4943 eri_start_timer(erip, eri_check_link, ERI_CHECK_HANG_TIMER);
4941 4944 (void) eri_init(erip);
4942 4945 return (linkupdate);
4943 4946 }
4944 4947 #endif
4945 4948
4946 4949 #ifdef ERI_PM_WORKAROUND
4947 4950 if (erip->stats.pmcap == ERI_PMCAP_NONE) {
4948 4951 if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
4949 4952 (void *)4000) == DDI_SUCCESS)
4950 4953 erip->stats.pmcap = ERI_PMCAP_4MHZ;
4951 4954
4952 4955 ERI_DEBUG_MSG2(erip, NONFATAL_MSG,
4953 4956 "eri_check_link: PMCAP %d", erip->stats.pmcap);
4954 4957 }
4955 4958 #endif
4956 4959 if ((!param_mode) && (param_transceiver != NO_XCVR))
4957 4960 eri_start_timer(erip, eri_check_link, ERI_CHECK_HANG_TIMER);
4958 4961 else
4959 4962 eri_start_timer(erip, eri_check_link, ERI_LINKCHECK_TIMER);
4960 4963 return (linkupdate);
4961 4964 }
4962 4965
4963 4966 static link_state_t
4964 4967 eri_mif_check(struct eri *erip, uint16_t mif_ints, uint16_t mif_data)
4965 4968 {
4966 4969 uint16_t control, aner, anlpar, anar, an_common;
4967 4970 uint16_t old_mintrans;
4968 4971 int restart_autoneg = 0;
4969 4972 link_state_t retv;
4970 4973
4971 4974 ERI_DEBUG_MSG4(erip, XCVR_MSG, "eri_mif_check: mif_mask: %X, %X, %X",
4972 4975 erip->mif_mask, mif_ints, mif_data);
4973 4976
4974 4977 mif_ints &= ~erip->mif_mask;
4975 4978 erip->mii_status = mif_data;
4976 4979 /*
4977 4980 * Now check if someone has pulled the xcvr or
4978 4981 * a new xcvr has shown up
4979 4982 * If so try to find out what the new xcvr setup is.
4980 4983 */
4981 4984 if (((mif_ints & PHY_BMSR_RES1) && (mif_data == 0xFFFF)) ||
4982 4985 (param_transceiver == NO_XCVR)) {
4983 4986 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
4984 4987 "No status transceiver gone");
4985 4988 if (eri_new_xcvr(erip)) {
4986 4989 if (param_transceiver != NO_XCVR) {
4987 4990 /*
4988 4991 * Reset the new PHY and bring up the link
4989 4992 */
4990 4993 (void) eri_reset_xcvr(erip);
4991 4994 }
4992 4995 }
4993 4996 return (LINK_STATE_UNKNOWN);
4994 4997 }
4995 4998
4996 4999 if (param_autoneg && (mif_ints & PHY_BMSR_LNKSTS) &&
4997 5000 (mif_data & PHY_BMSR_LNKSTS) && (mif_data & PHY_BMSR_ANC)) {
4998 5001 mif_ints |= PHY_BMSR_ANC;
4999 5002 ERI_DEBUG_MSG3(erip, PHY_MSG,
5000 5003 "eri_mif_check: Set ANC bit mif_data %X mig_ints %X",
5001 5004 mif_data, mif_ints);
5002 5005 }
5003 5006
5004 5007 if ((mif_ints & PHY_BMSR_ANC) && (mif_data & PHY_BMSR_ANC)) {
5005 5008 ERI_DEBUG_MSG1(erip, PHY_MSG, "Auto-negotiation interrupt.");
5006 5009
5007 5010 /*
5008 5011 * Switch off Auto-negotiation interrupts and switch on
5009 5012 * Link ststus interrupts.
5010 5013 */
5011 5014 erip->mif_mask |= PHY_BMSR_ANC;
5012 5015 erip->mif_mask &= ~PHY_BMSR_LNKSTS;
5013 5016 (void) eri_mii_read(erip, ERI_PHY_ANER, &aner);
5014 5017 param_aner_lpancap = 1 && (aner & PHY_ANER_LPNW);
5015 5018 if ((aner & PHY_ANER_MLF) || (eri_force_mlf)) {
5016 5019 ERI_DEBUG_MSG1(erip, XCVR_MSG,
5017 5020 "parallel detection fault");
5018 5021 /*
5019 5022 * Consider doing open loop auto-negotiation.
5020 5023 */
5021 5024 ERI_DEBUG_MSG1(erip, XCVR_MSG,
5022 5025 "Going into Open loop Auto-neg");
5023 5026 (void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5024 5027
5025 5028 control &= ~(PHY_BMCR_ANE | PHY_BMCR_RAN |
5026 5029 PHY_BMCR_FDX);
5027 5030 if (param_anar_100fdx || param_anar_100hdx) {
5028 5031 control |= PHY_BMCR_100M;
5029 5032 param_anlpar_100hdx = 1;
5030 5033 param_anlpar_10hdx = 0;
5031 5034 param_speed = 1;
5032 5035 erip->stats.ifspeed = 100;
5033 5036
5034 5037 } else if (param_anar_10fdx || param_anar_10hdx) {
5035 5038 control &= ~PHY_BMCR_100M;
5036 5039 param_anlpar_100hdx = 0;
5037 5040 param_anlpar_10hdx = 1;
5038 5041 param_speed = 0;
5039 5042 erip->stats.ifspeed = 10;
5040 5043 } else {
5041 5044 ERI_FAULT_MSG1(erip, SEVERITY_NONE,
5042 5045 ERI_VERB_MSG,
5043 5046 "Transceiver speed set incorrectly.");
5044 5047 return (0);
5045 5048 }
5046 5049
5047 5050 (void) eri_mii_write(erip, ERI_PHY_BMCR, control);
5048 5051 param_anlpar_100fdx = 0;
5049 5052 param_anlpar_10fdx = 0;
5050 5053 param_mode = 0;
5051 5054 erip->openloop_autoneg = 1;
5052 5055 return (0);
5053 5056 }
5054 5057 (void) eri_mii_read(erip, ERI_PHY_ANLPAR, &anlpar);
5055 5058 (void) eri_mii_read(erip, ERI_PHY_ANAR, &anar);
5056 5059 an_common = anar & anlpar;
5057 5060
5058 5061 ERI_DEBUG_MSG2(erip, XCVR_MSG, "an_common = 0x%X", an_common);
5059 5062
5060 5063 if (an_common & (PHY_ANLPAR_TXFDX | PHY_ANLPAR_TX)) {
5061 5064 param_speed = 1;
5062 5065 erip->stats.ifspeed = 100;
5063 5066 param_mode = 1 && (an_common & PHY_ANLPAR_TXFDX);
5064 5067
5065 5068 } else if (an_common & (PHY_ANLPAR_10FDX | PHY_ANLPAR_10)) {
5066 5069 param_speed = 0;
5067 5070 erip->stats.ifspeed = 10;
5068 5071 param_mode = 1 && (an_common & PHY_ANLPAR_10FDX);
5069 5072
5070 5073 } else an_common = 0x0;
5071 5074
5072 5075 if (!an_common) {
5073 5076 ERI_FAULT_MSG1(erip, SEVERITY_MID, ERI_VERB_MSG,
5074 5077 "Transceiver: anar not set with speed selection");
5075 5078 }
5076 5079 param_anlpar_100T4 = 1 && (anlpar & PHY_ANLPAR_T4);
5077 5080 param_anlpar_100fdx = 1 && (anlpar & PHY_ANLPAR_TXFDX);
5078 5081 param_anlpar_100hdx = 1 && (anlpar & PHY_ANLPAR_TX);
5079 5082 param_anlpar_10fdx = 1 && (anlpar & PHY_ANLPAR_10FDX);
5080 5083 param_anlpar_10hdx = 1 && (anlpar & PHY_ANLPAR_10);
5081 5084
5082 5085 ERI_DEBUG_MSG2(erip, PHY_MSG,
5083 5086 "Link duplex = 0x%X", param_mode);
5084 5087 ERI_DEBUG_MSG2(erip, PHY_MSG,
5085 5088 "Link speed = 0x%X", param_speed);
5086 5089 /* mif_ints |= PHY_BMSR_LNKSTS; prevent double msg */
5087 5090 /* mif_data |= PHY_BMSR_LNKSTS; prevent double msg */
5088 5091 }
5089 5092 retv = LINK_STATE_UNKNOWN;
5090 5093 if (mif_ints & PHY_BMSR_LNKSTS) {
5091 5094 if (mif_data & PHY_BMSR_LNKSTS) {
5092 5095 ERI_DEBUG_MSG1(erip, PHY_MSG, "Link Up");
5093 5096 /*
5094 5097 * Program Lu3X31T for mininum transition
5095 5098 */
5096 5099 if (eri_phy_mintrans) {
5097 5100 eri_mii_write(erip, 31, 0x8000);
5098 5101 (void) eri_mii_read(erip, 0, &old_mintrans);
5099 5102 eri_mii_write(erip, 0, 0x00F1);
5100 5103 eri_mii_write(erip, 31, 0x0000);
5101 5104 }
5102 5105 /*
5103 5106 * The link is up.
5104 5107 */
5105 5108 eri_init_txmac(erip);
5106 5109 param_linkup = 1;
5107 5110 erip->stats.link_up = LINK_STATE_UP;
5108 5111 if (param_mode)
5109 5112 erip->stats.link_duplex = LINK_DUPLEX_FULL;
5110 5113 else
5111 5114 erip->stats.link_duplex = LINK_DUPLEX_HALF;
5112 5115
5113 5116 retv = LINK_STATE_UP;
5114 5117 } else {
5115 5118 ERI_DEBUG_MSG1(erip, PHY_MSG, "Link down.");
5116 5119 param_linkup = 0;
5117 5120 erip->stats.link_up = LINK_STATE_DOWN;
5118 5121 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
5119 5122 retv = LINK_STATE_DOWN;
5120 5123 if (param_autoneg) {
5121 5124 restart_autoneg = 1;
5122 5125 }
5123 5126 }
5124 5127 } else {
5125 5128 if (mif_data & PHY_BMSR_LNKSTS) {
5126 5129 if (!param_linkup) {
5127 5130 ERI_DEBUG_MSG1(erip, PHY_MSG,
5128 5131 "eri_mif_check: MIF data link up");
5129 5132 /*
5130 5133 * Program Lu3X31T for minimum transition
5131 5134 */
5132 5135 if (eri_phy_mintrans) {
5133 5136 eri_mii_write(erip, 31, 0x8000);
5134 5137 (void) eri_mii_read(erip, 0,
5135 5138 &old_mintrans);
5136 5139 eri_mii_write(erip, 0, 0x00F1);
5137 5140 eri_mii_write(erip, 31, 0x0000);
5138 5141 }
5139 5142 /*
5140 5143 * The link is up.
5141 5144 */
5142 5145 eri_init_txmac(erip);
5143 5146
5144 5147 param_linkup = 1;
5145 5148 erip->stats.link_up = LINK_STATE_UP;
5146 5149 if (param_mode)
5147 5150 erip->stats.link_duplex =
5148 5151 LINK_DUPLEX_FULL;
5149 5152 else
5150 5153 erip->stats.link_duplex =
5151 5154 LINK_DUPLEX_HALF;
5152 5155
5153 5156 retv = LINK_STATE_UP;
5154 5157 }
5155 5158 } else if (param_linkup) {
5156 5159 /*
5157 5160 * The link is down now.
5158 5161 */
5159 5162 ERI_DEBUG_MSG1(erip, PHY_MSG,
5160 5163 "eri_mif_check:Link was up and went down");
5161 5164 param_linkup = 0;
5162 5165 erip->stats.link_up = LINK_STATE_DOWN;
5163 5166 erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
5164 5167 retv = LINK_STATE_DOWN;
5165 5168 if (param_autoneg)
5166 5169 restart_autoneg = 1;
5167 5170 }
5168 5171 }
5169 5172 if (restart_autoneg) {
5170 5173 /*
5171 5174 * Restart normal auto-negotiation.
5172 5175 */
5173 5176 ERI_DEBUG_MSG1(erip, PHY_MSG,
5174 5177 "eri_mif_check:Restart AUto Negotiation");
5175 5178 erip->openloop_autoneg = 0;
5176 5179 param_mode = 0;
5177 5180 param_speed = 0;
5178 5181 param_anlpar_100T4 = 0;
5179 5182 param_anlpar_100fdx = 0;
5180 5183 param_anlpar_100hdx = 0;
5181 5184 param_anlpar_10fdx = 0;
5182 5185 param_anlpar_10hdx = 0;
5183 5186 param_aner_lpancap = 0;
5184 5187 (void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5185 5188 control |= (PHY_BMCR_ANE | PHY_BMCR_RAN);
5186 5189 eri_mii_write(erip, ERI_PHY_BMCR, control);
5187 5190 }
5188 5191 if (mif_ints & PHY_BMSR_JABDET) {
5189 5192 if (mif_data & PHY_BMSR_JABDET) {
5190 5193 ERI_DEBUG_MSG1(erip, PHY_MSG, "Jabber detected.");
5191 5194 HSTAT(erip, jab);
5192 5195 /*
5193 5196 * Reset the new PHY and bring up the link
5194 5197 * (Check for failure?)
5195 5198 */
5196 5199 (void) eri_reset_xcvr(erip);
5197 5200 }
5198 5201 }
5199 5202 return (retv);
5200 5203 }
5201 5204
5202 5205 #define PHYRST_PERIOD 500
5203 5206 static int
5204 5207 eri_reset_xcvr(struct eri *erip)
5205 5208 {
5206 5209 uint16_t stat;
5207 5210 uint16_t anar;
5208 5211 uint16_t control;
5209 5212 uint16_t idr1;
5210 5213 uint16_t idr2;
5211 5214 uint16_t nicr;
5212 5215 uint32_t speed_100;
5213 5216 uint32_t speed_10;
5214 5217 int n;
5215 5218
5216 5219 #ifdef ERI_10_10_FORCE_SPEED_WORKAROUND
5217 5220 erip->ifspeed_old = erip->stats.ifspeed;
5218 5221 #endif
5219 5222 /*
5220 5223 * Reset Open loop auto-negotiation this means you can try
5221 5224 * Normal auto-negotiation, until you get a Multiple Link fault
5222 5225 * at which point you try 100M half duplex then 10M half duplex
5223 5226 * until you get a Link up.
5224 5227 */
5225 5228 erip->openloop_autoneg = 0;
5226 5229
5227 5230 /*
5228 5231 * Reset the xcvr.
5229 5232 */
5230 5233 eri_mii_write(erip, ERI_PHY_BMCR, PHY_BMCR_RESET);
5231 5234
5232 5235 /* Check for transceiver reset completion */
5233 5236
5234 5237 n = 1000;
5235 5238 while (--n > 0) {
5236 5239 drv_usecwait((clock_t)PHYRST_PERIOD);
5237 5240 if (eri_mii_read(erip, ERI_PHY_BMCR, &control) == 1) {
5238 5241 /* Transceiver does not talk MII */
5239 5242 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
5240 5243 "eri_reset_xcvr: no mii");
5241 5244 }
5242 5245 if ((control & PHY_BMCR_RESET) == 0)
5243 5246 goto reset_done;
5244 5247 }
5245 5248 ERI_FAULT_MSG2(erip, SEVERITY_NONE, ERI_VERB_MSG,
5246 5249 "eri_reset_xcvr:reset_failed n == 0, control %x", control);
5247 5250 goto eri_reset_xcvr_failed;
5248 5251
5249 5252 reset_done:
5250 5253
5251 5254 ERI_DEBUG_MSG2(erip, AUTOCONFIG_MSG,
5252 5255 "eri_reset_xcvr: reset complete in %d us",
5253 5256 (1000 - n) * PHYRST_PERIOD);
5254 5257
5255 5258 (void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
5256 5259 (void) eri_mii_read(erip, ERI_PHY_ANAR, &anar);
5257 5260 (void) eri_mii_read(erip, ERI_PHY_IDR1, &idr1);
5258 5261 (void) eri_mii_read(erip, ERI_PHY_IDR2, &idr2);
5259 5262
5260 5263 ERI_DEBUG_MSG4(erip, XCVR_MSG,
5261 5264 "eri_reset_xcvr: control %x stat %x anar %x", control, stat, anar);
5262 5265
5263 5266 /*
5264 5267 * Initialize the read only transceiver ndd information
5265 5268 * the values are either 0 or 1.
5266 5269 */
5267 5270 param_bmsr_ancap = 1 && (stat & PHY_BMSR_ACFG);
5268 5271 param_bmsr_100T4 = 1 && (stat & PHY_BMSR_100T4);
5269 5272 param_bmsr_100fdx = 1 && (stat & PHY_BMSR_100FDX);
5270 5273 param_bmsr_100hdx = 1 && (stat & PHY_BMSR_100HDX);
5271 5274 param_bmsr_10fdx = 1 && (stat & PHY_BMSR_10FDX);
5272 5275 param_bmsr_10hdx = 1 && (stat & PHY_BMSR_10HDX);
5273 5276
5274 5277 /*
5275 5278 * Match up the ndd capabilities with the transceiver.
5276 5279 */
5277 5280 param_autoneg &= param_bmsr_ancap;
5278 5281 param_anar_100fdx &= param_bmsr_100fdx;
5279 5282 param_anar_100hdx &= param_bmsr_100hdx;
5280 5283 param_anar_10fdx &= param_bmsr_10fdx;
5281 5284 param_anar_10hdx &= param_bmsr_10hdx;
5282 5285
5283 5286 /*
5284 5287 * Select the operation mode of the transceiver.
5285 5288 */
5286 5289 if (param_autoneg) {
5287 5290 /*
5288 5291 * Initialize our auto-negotiation capabilities.
5289 5292 */
5290 5293 anar = PHY_SELECTOR;
5291 5294 if (param_anar_100T4)
5292 5295 anar |= PHY_ANAR_T4;
5293 5296 if (param_anar_100fdx)
5294 5297 anar |= PHY_ANAR_TXFDX;
5295 5298 if (param_anar_100hdx)
5296 5299 anar |= PHY_ANAR_TX;
5297 5300 if (param_anar_10fdx)
5298 5301 anar |= PHY_ANAR_10FDX;
5299 5302 if (param_anar_10hdx)
5300 5303 anar |= PHY_ANAR_10;
5301 5304 ERI_DEBUG_MSG2(erip, XCVR_MSG, "anar = %x", anar);
5302 5305 eri_mii_write(erip, ERI_PHY_ANAR, anar);
5303 5306 }
5304 5307
5305 5308 /* Place the Transceiver in normal operation mode */
5306 5309 if ((control & PHY_BMCR_ISOLATE) || (control & PHY_BMCR_LPBK)) {
5307 5310 control &= ~(PHY_BMCR_ISOLATE | PHY_BMCR_LPBK);
5308 5311 eri_mii_write(erip, ERI_PHY_BMCR,
5309 5312 (control & ~PHY_BMCR_ISOLATE));
5310 5313 }
5311 5314
5312 5315 /*
5313 5316 * If Lu3X31T then allow nonzero eri_phy_mintrans
5314 5317 */
5315 5318 if (eri_phy_mintrans &&
5316 5319 (idr1 != 0x43 || (idr2 & 0xFFF0) != 0x7420)) {
5317 5320 eri_phy_mintrans = 0;
5318 5321 }
5319 5322 /*
5320 5323 * Initialize the mif interrupt mask.
5321 5324 */
5322 5325 erip->mif_mask = (uint16_t)(~PHY_BMSR_RES1);
5323 5326
5324 5327 /*
5325 5328 * Establish link speeds and do necessary special stuff based
5326 5329 * in the speed.
5327 5330 */
5328 5331 speed_100 = param_anar_100fdx | param_anar_100hdx;
5329 5332 speed_10 = param_anar_10fdx | param_anar_10hdx;
5330 5333
5331 5334 ERI_DEBUG_MSG5(erip, XCVR_MSG, "eri_reset_xcvr: %d %d %d %d",
5332 5335 param_anar_100fdx, param_anar_100hdx, param_anar_10fdx,
5333 5336 param_anar_10hdx);
5334 5337
5335 5338 ERI_DEBUG_MSG3(erip, XCVR_MSG,
5336 5339 "eri_reset_xcvr: speed_100 %d speed_10 %d", speed_100, speed_10);
5337 5340
5338 5341 if ((!speed_100) && (speed_10)) {
5339 5342 erip->mif_mask &= ~PHY_BMSR_JABDET;
5340 5343 if (!(param_anar_10fdx) &&
5341 5344 (param_anar_10hdx) &&
5342 5345 (erip->link_pulse_disabled)) {
5343 5346 param_speed = 0;
5344 5347 param_mode = 0;
5345 5348 (void) eri_mii_read(erip, ERI_PHY_NICR, &nicr);
5346 5349 nicr &= ~PHY_NICR_LD;
5347 5350 eri_mii_write(erip, ERI_PHY_NICR, nicr);
5348 5351 param_linkup = 1;
5349 5352 erip->stats.link_up = LINK_STATE_UP;
5350 5353 if (param_mode)
5351 5354 erip->stats.link_duplex = LINK_DUPLEX_FULL;
5352 5355 else
5353 5356 erip->stats.link_duplex = LINK_DUPLEX_HALF;
5354 5357 }
5355 5358 }
5356 5359
5357 5360 /*
5358 5361 * Clear the autonegotitation before re-starting
5359 5362 */
5360 5363 control = PHY_BMCR_100M | PHY_BMCR_FDX;
5361 5364 /* eri_mii_write(erip, ERI_PHY_BMCR, control); */
5362 5365 if (param_autoneg) {
5363 5366 /*
5364 5367 * Setup the transceiver for autonegotiation.
5365 5368 */
5366 5369 erip->mif_mask &= ~PHY_BMSR_ANC;
5367 5370
5368 5371 /*
5369 5372 * Clear the Auto-negotiation before re-starting
5370 5373 */
5371 5374 eri_mii_write(erip, ERI_PHY_BMCR, control & ~PHY_BMCR_ANE);
5372 5375
5373 5376 /*
5374 5377 * Switch on auto-negotiation.
5375 5378 */
5376 5379 control |= (PHY_BMCR_ANE | PHY_BMCR_RAN);
5377 5380
5378 5381 eri_mii_write(erip, ERI_PHY_BMCR, control);
5379 5382 } else {
5380 5383 /*
5381 5384 * Force the transceiver.
5382 5385 */
5383 5386 erip->mif_mask &= ~PHY_BMSR_LNKSTS;
5384 5387
5385 5388 /*
5386 5389 * Switch off auto-negotiation.
5387 5390 */
5388 5391 control &= ~(PHY_BMCR_FDX | PHY_BMCR_ANE | PHY_BMCR_RAN);
5389 5392
5390 5393 if (speed_100) {
5391 5394 control |= PHY_BMCR_100M;
5392 5395 param_aner_lpancap = 0; /* Clear LP nway */
5393 5396 param_anlpar_10fdx = 0;
5394 5397 param_anlpar_10hdx = 0;
5395 5398 param_anlpar_100T4 = param_anar_100T4;
5396 5399 param_anlpar_100fdx = param_anar_100fdx;
5397 5400 param_anlpar_100hdx = param_anar_100hdx;
5398 5401 param_speed = 1;
5399 5402 erip->stats.ifspeed = 100;
5400 5403 param_mode = param_anar_100fdx;
5401 5404 if (param_mode) {
5402 5405 param_anlpar_100hdx = 0;
5403 5406 erip->stats.link_duplex = LINK_DUPLEX_FULL;
5404 5407 } else {
5405 5408 erip->stats.link_duplex = LINK_DUPLEX_HALF;
5406 5409 }
5407 5410 } else if (speed_10) {
5408 5411 control &= ~PHY_BMCR_100M;
5409 5412 param_aner_lpancap = 0; /* Clear LP nway */
5410 5413 param_anlpar_100fdx = 0;
5411 5414 param_anlpar_100hdx = 0;
5412 5415 param_anlpar_100T4 = 0;
5413 5416 param_anlpar_10fdx = param_anar_10fdx;
5414 5417 param_anlpar_10hdx = param_anar_10hdx;
5415 5418 param_speed = 0;
5416 5419 erip->stats.ifspeed = 10;
5417 5420 param_mode = param_anar_10fdx;
5418 5421 if (param_mode) {
5419 5422 param_anlpar_10hdx = 0;
5420 5423 erip->stats.link_duplex = LINK_DUPLEX_FULL;
5421 5424 } else {
5422 5425 erip->stats.link_duplex = LINK_DUPLEX_HALF;
5423 5426 }
5424 5427 } else {
5425 5428 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
5426 5429 "Transceiver speed set incorrectly.");
5427 5430 }
5428 5431
5429 5432 if (param_mode) {
5430 5433 control |= PHY_BMCR_FDX;
5431 5434 }
5432 5435
5433 5436 ERI_DEBUG_MSG4(erip, PHY_MSG,
5434 5437 "control = %x status = %x param_mode %d",
5435 5438 control, stat, param_mode);
5436 5439
5437 5440 eri_mii_write(erip, ERI_PHY_BMCR, control);
5438 5441 /*
5439 5442 * if (param_mode) {
5440 5443 * control |= PHY_BMCR_FDX;
5441 5444 * }
5442 5445 * control &= ~(PHY_BMCR_FDX | PHY_BMCR_ANE | PHY_BMCR_RAN);
5443 5446 * eri_mii_write(erip, ERI_PHY_BMCR, control);
5444 5447 */
5445 5448 }
5446 5449
5447 5450 #ifdef DEBUG
5448 5451 (void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5449 5452 (void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
5450 5453 (void) eri_mii_read(erip, ERI_PHY_ANAR, &anar);
5451 5454 #endif
5452 5455 ERI_DEBUG_MSG4(erip, PHY_MSG,
5453 5456 "control %X status %X anar %X", control, stat, anar);
5454 5457
5455 5458 eri_reset_xcvr_exit:
5456 5459 return (0);
5457 5460
5458 5461 eri_reset_xcvr_failed:
5459 5462 return (1);
5460 5463 }
5461 5464
5462 5465 #ifdef ERI_10_10_FORCE_SPEED_WORKAROUND
5463 5466
5464 5467 static void
5465 5468 eri_xcvr_force_mode(struct eri *erip, uint32_t *link_timeout)
5466 5469 {
5467 5470
5468 5471 if (!param_autoneg && !param_linkup && (erip->stats.ifspeed == 10) &&
5469 5472 (param_anar_10fdx | param_anar_10hdx)) {
5470 5473 *link_timeout = SECOND(1);
5471 5474 return;
5472 5475 }
5473 5476
5474 5477 if (!param_autoneg && !param_linkup && (erip->ifspeed_old == 10) &&
5475 5478 (param_anar_100fdx | param_anar_100hdx)) {
5476 5479 /*
5477 5480 * May have to set link partner's speed and mode.
5478 5481 */
5479 5482 ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_LOG_MSG,
5480 5483 "May have to set link partner's speed and duplex mode.");
5481 5484 }
5482 5485 }
5483 5486 #endif
5484 5487
5485 5488 static void
5486 5489 eri_mif_poll(struct eri *erip, soft_mif_enable_t enable)
5487 5490 {
5488 5491 if (enable == MIF_POLL_START) {
5489 5492 if (erip->mifpoll_enable && !erip->openloop_autoneg) {
5490 5493 erip->mif_config |= ERI_MIF_CFGPE;
5491 5494 PUT_MIFREG(mif_cfg, erip->mif_config);
5492 5495 drv_usecwait(ERI_MIF_POLL_DELAY);
5493 5496 PUT_GLOBREG(intmask, GET_GLOBREG(intmask) &
5494 5497 ~ERI_G_MASK_MIF_INT);
5495 5498 PUT_MIFREG(mif_imask, erip->mif_mask);
5496 5499 }
5497 5500 } else if (enable == MIF_POLL_STOP) {
5498 5501 erip->mif_config &= ~ERI_MIF_CFGPE;
5499 5502 PUT_MIFREG(mif_cfg, erip->mif_config);
5500 5503 drv_usecwait(ERI_MIF_POLL_DELAY);
5501 5504 PUT_GLOBREG(intmask, GET_GLOBREG(intmask) |
5502 5505 ERI_G_MASK_MIF_INT);
5503 5506 PUT_MIFREG(mif_imask, ERI_MIF_INTMASK);
5504 5507 }
5505 5508 ERI_DEBUG_MSG2(erip, XCVR_MSG, "MIF Config = 0x%X",
5506 5509 GET_MIFREG(mif_cfg));
5507 5510 ERI_DEBUG_MSG2(erip, XCVR_MSG, "MIF imask = 0x%X",
5508 5511 GET_MIFREG(mif_imask));
5509 5512 ERI_DEBUG_MSG2(erip, XCVR_MSG, "INT imask = 0x%X",
5510 5513 GET_GLOBREG(intmask));
5511 5514 ERI_DEBUG_MSG1(erip, XCVR_MSG, "<== mif_poll");
5512 5515 }
5513 5516
5514 5517 /* Decide if transmitter went dead and reinitialize everything */
5515 5518 #ifdef ERI_TX_HUNG
5516 5519 static int eri_txhung_limit = 2;
5517 5520 static int
5518 5521 eri_check_txhung(struct eri *erip)
5519 5522 {
5520 5523 boolean_t macupdate = B_FALSE;
5521 5524
5522 5525 mutex_enter(&erip->xmitlock);
5523 5526 if (erip->flags & ERI_RUNNING)
5524 5527 erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
5525 5528 ETX_COMPLETION_MASK);
5526 5529 macupdate |= eri_reclaim(erip, erip->tx_completion);
5527 5530
5528 5531 /* Something needs to be sent out but it is not going out */
5529 5532 if ((erip->tcurp != erip->tnextp) &&
5530 5533 (erip->stats.opackets64 == erip->erisave.reclaim_opackets) &&
5531 5534 (erip->stats.collisions == erip->erisave.starts))
5532 5535 erip->txhung++;
5533 5536 else
5534 5537 erip->txhung = 0;
5535 5538
5536 5539 erip->erisave.reclaim_opackets = erip->stats.opackets64;
5537 5540 erip->erisave.starts = erip->stats.collisions;
5538 5541 mutex_exit(&erip->xmitlock);
5539 5542
5540 5543 if (macupdate)
5541 5544 mac_tx_update(erip->mh);
5542 5545
5543 5546 return (erip->txhung >= eri_txhung_limit);
5544 5547 }
5545 5548 #endif
|
↓ open down ↓ |
5185 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX