Print this page
Just the 5719/5720 changes
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/bge/bge_chip2.c
+++ new/usr/src/uts/common/io/bge/bge_chip2.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
|
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 /*
27 - * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
27 + * Copyright 2011, 2012 Nexenta Systems, Inc. All rights reserved.
28 28 */
29 29
30 30 #include "bge_impl.h"
31 31
32 32 #define PIO_ADDR(bgep, offset) ((void *)((caddr_t)(bgep)->io_regs+(offset)))
33 33
34 34 /*
35 35 * Future features ... ?
36 36 */
37 37 #define BGE_CFG_IO8 1 /* 8/16-bit cfg space BIS/BIC */
38 38 #define BGE_IND_IO32 1 /* indirect access code */
39 39 #define BGE_SEE_IO32 1 /* SEEPROM access code */
40 40 #define BGE_FLASH_IO32 1 /* FLASH access code */
41 41
42 42 /*
43 43 * BGE MSI tunable:
44 44 *
45 45 * By default MSI is enabled on all supported platforms but it is disabled
46 46 * for some Broadcom chips due to known MSI hardware issues. Currently MSI
47 47 * is enabled only for 5714C A2 and 5715C A2 broadcom chips.
48 48 */
49 49 boolean_t bge_enable_msi = B_TRUE;
50 50
51 51 /*
52 52 * PCI-X/PCI-E relaxed ordering tunable for OS/Nexus driver
53 53 */
54 54 boolean_t bge_relaxed_ordering = B_TRUE;
55 55
56 56 /*
57 57 * Property names
58 58 */
59 59 static char knownids_propname[] = "bge-known-subsystems";
60 60
61 61 /*
62 62 * Patchable globals:
63 63 *
64 64 * bge_autorecover
65 65 * Enables/disables automatic recovery after fault detection
66 66 *
67 67 * bge_mlcr_default
68 68 * Value to program into the MLCR; controls the chip's GPIO pins
69 69 *
70 70 * bge_dma_{rd,wr}prio
71 71 * Relative priorities of DMA reads & DMA writes respectively.
72 72 * These may each be patched to any value 0-3. Equal values
73 73 * will give "fair" (round-robin) arbitration for PCI access.
74 74 * Unequal values will give one or the other function priority.
75 75 *
76 76 * bge_dma_rwctrl
77 77 * Value to put in the Read/Write DMA control register. See
78 78 * the Broadcom PRM for things you can fiddle with in this
79 79 * register ...
80 80 *
81 81 * bge_{tx,rx}_{count,ticks}_{norm,intr}
82 82 * Send/receive interrupt coalescing parameters. Counts are
83 83 * #s of descriptors, ticks are in microseconds. *norm* values
84 84 * apply between status updates/interrupts; the *intr* values
85 85 * refer to the 'during-interrupt' versions - see the PRM.
86 86 *
87 87 * NOTE: these values have been determined by measurement. They
88 88 * differ significantly from the values recommended in the PRM.
89 89 */
90 90 static uint32_t bge_autorecover = 1;
91 91 static uint32_t bge_mlcr_default_5714 = MLCR_DEFAULT_5714;
92 92
93 93 static uint32_t bge_dma_rdprio = 1;
94 94 static uint32_t bge_dma_wrprio = 0;
95 95 static uint32_t bge_dma_rwctrl = PDRWCR_VAR_DEFAULT;
96 96 static uint32_t bge_dma_rwctrl_5721 = PDRWCR_VAR_5721;
97 97 static uint32_t bge_dma_rwctrl_5714 = PDRWCR_VAR_5714;
98 98 static uint32_t bge_dma_rwctrl_5715 = PDRWCR_VAR_5715;
99 99
100 100 uint32_t bge_rx_ticks_norm = 128;
101 101 uint32_t bge_tx_ticks_norm = 2048; /* 8 for FJ2+ !?!? */
102 102 uint32_t bge_rx_count_norm = 8;
103 103 uint32_t bge_tx_count_norm = 128;
104 104
105 105 static uint32_t bge_rx_ticks_intr = 128;
106 106 static uint32_t bge_tx_ticks_intr = 0; /* 8 for FJ2+ !?!? */
107 107 static uint32_t bge_rx_count_intr = 2;
108 108 static uint32_t bge_tx_count_intr = 0;
109 109
110 110 /*
111 111 * Memory pool configuration parameters.
112 112 *
113 113 * These are generally specific to each member of the chip family, since
114 114 * each one may have a different memory size/configuration.
115 115 *
116 116 * Setting the mbuf pool length for a specific type of chip to 0 inhibits
117 117 * the driver from programming the various registers; instead they are left
118 118 * at their hardware defaults. This is the preferred option for later chips
119 119 * (5705+), whereas the older chips *required* these registers to be set,
120 120 * since the h/w default was 0 ;-(
121 121 */
122 122 static uint32_t bge_mbuf_pool_base = MBUF_POOL_BASE_DEFAULT;
123 123 static uint32_t bge_mbuf_pool_base_5704 = MBUF_POOL_BASE_5704;
124 124 static uint32_t bge_mbuf_pool_base_5705 = MBUF_POOL_BASE_5705;
125 125 static uint32_t bge_mbuf_pool_base_5721 = MBUF_POOL_BASE_5721;
126 126 static uint32_t bge_mbuf_pool_len = MBUF_POOL_LENGTH_DEFAULT;
127 127 static uint32_t bge_mbuf_pool_len_5704 = MBUF_POOL_LENGTH_5704;
128 128 static uint32_t bge_mbuf_pool_len_5705 = 0; /* use h/w default */
129 129 static uint32_t bge_mbuf_pool_len_5721 = 0;
130 130
131 131 /*
132 132 * Various high and low water marks, thresholds, etc ...
133 133 *
134 134 * Note: these are taken from revision 7 of the PRM, and some are different
135 135 * from both the values in earlier PRMs *and* those determined experimentally
136 136 * and used in earlier versions of this driver ...
137 137 */
138 138 static uint32_t bge_mbuf_hi_water = MBUF_HIWAT_DEFAULT;
139 139 static uint32_t bge_mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_DEFAULT;
140 140 static uint32_t bge_mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_DEFAULT;
141 141
142 142 static uint32_t bge_dmad_lo_water = DMAD_POOL_LOWAT_DEFAULT;
143 143 static uint32_t bge_dmad_hi_water = DMAD_POOL_HIWAT_DEFAULT;
144 144 static uint32_t bge_lowat_recv_frames = LOWAT_MAX_RECV_FRAMES_DEFAULT;
145 145
146 146 static uint32_t bge_replenish_std = STD_RCV_BD_REPLENISH_DEFAULT;
147 147 static uint32_t bge_replenish_mini = MINI_RCV_BD_REPLENISH_DEFAULT;
148 148 static uint32_t bge_replenish_jumbo = JUMBO_RCV_BD_REPLENISH_DEFAULT;
149 149
150 150 static uint32_t bge_watchdog_count = 1 << 16;
151 151 static uint16_t bge_dma_miss_limit = 20;
152 152
153 153 static uint32_t bge_stop_start_on_sync = 0;
154 154
155 155 /*
156 156 * bge_intr_max_loop controls the maximum loop number within bge_intr.
157 157 * When loading NIC with heavy network traffic, it is useful.
158 158 * Increasing this value could have positive effect to throughput,
159 159 * but it might also increase ticks of a bge ISR stick on CPU, which might
160 160 * lead to bad UI interactive experience. So tune this with caution.
161 161 */
162 162 static int bge_intr_max_loop = 1;
163 163
164 164 /*
165 165 * ========== Low-level chip & ring buffer manipulation ==========
166 166 */
167 167
168 168 #define BGE_DBG BGE_DBG_REGS /* debug flag for this code */
169 169
170 170
171 171 /*
172 172 * Config space read-modify-write routines
173 173 */
174 174
175 175 #if BGE_CFG_IO8
176 176
177 177 static void bge_cfg_clr16(bge_t *bgep, bge_regno_t regno, uint16_t bits);
178 178 #pragma inline(bge_cfg_clr16)
179 179
180 180 static void
181 181 bge_cfg_clr16(bge_t *bgep, bge_regno_t regno, uint16_t bits)
182 182 {
183 183 uint16_t regval;
184 184
185 185 BGE_TRACE(("bge_cfg_clr16($%p, 0x%lx, 0x%x)",
186 186 (void *)bgep, regno, bits));
187 187
188 188 regval = pci_config_get16(bgep->cfg_handle, regno);
189 189
190 190 BGE_DEBUG(("bge_cfg_clr16($%p, 0x%lx, 0x%x): 0x%x => 0x%x",
191 191 (void *)bgep, regno, bits, regval, regval & ~bits));
192 192
193 193 regval &= ~bits;
194 194 pci_config_put16(bgep->cfg_handle, regno, regval);
195 195 }
196 196
197 197 #endif /* BGE_CFG_IO8 */
198 198
199 199 static void bge_cfg_clr32(bge_t *bgep, bge_regno_t regno, uint32_t bits);
200 200 #pragma inline(bge_cfg_clr32)
201 201
202 202 static void
203 203 bge_cfg_clr32(bge_t *bgep, bge_regno_t regno, uint32_t bits)
204 204 {
205 205 uint32_t regval;
206 206
207 207 BGE_TRACE(("bge_cfg_clr32($%p, 0x%lx, 0x%x)",
208 208 (void *)bgep, regno, bits));
209 209
210 210 regval = pci_config_get32(bgep->cfg_handle, regno);
211 211
212 212 BGE_DEBUG(("bge_cfg_clr32($%p, 0x%lx, 0x%x): 0x%x => 0x%x",
213 213 (void *)bgep, regno, bits, regval, regval & ~bits));
214 214
215 215 regval &= ~bits;
216 216 pci_config_put32(bgep->cfg_handle, regno, regval);
217 217 }
218 218
219 219 #if BGE_IND_IO32
220 220
221 221 /*
222 222 * Indirect access to registers & RISC scratchpads, using config space
223 223 * accesses only.
224 224 *
225 225 * This isn't currently used, but someday we might want to use it for
226 226 * restoring the Subsystem Device/Vendor registers (which aren't directly
227 227 * writable in Config Space), or for downloading firmware into the RISCs
228 228 *
229 229 * In any case there are endian issues to be resolved before this code is
230 230 * enabled; the bizarre way that bytes get twisted by this chip AND by
231 231 * the PCI bridge in SPARC systems mean that we shouldn't enable it until
232 232 * it's been thoroughly tested for all access sizes on all supported
233 233 * architectures (SPARC *and* x86!).
234 234 */
235 235 uint32_t bge_ind_get32(bge_t *bgep, bge_regno_t regno);
236 236 #pragma inline(bge_ind_get32)
237 237
238 238 uint32_t
239 239 bge_ind_get32(bge_t *bgep, bge_regno_t regno)
240 240 {
241 241 uint32_t val;
242 242
243 243 BGE_TRACE(("bge_ind_get32($%p, 0x%lx)", (void *)bgep, regno));
244 244
245 245 #ifdef __sparc
246 246 if (DEVICE_5717_SERIES_CHIPSETS(bgep))
247 247 regno = LE_32(regno);
248 248 #endif
249 249 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_RIAAR, regno);
250 250 val = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_RIADR);
251 251
252 252 BGE_DEBUG(("bge_ind_get32($%p, 0x%lx) => 0x%x",
253 253 (void *)bgep, regno, val));
254 254
255 255 val = LE_32(val);
256 256
257 257 return (val);
258 258 }
259 259
260 260 void bge_ind_put32(bge_t *bgep, bge_regno_t regno, uint32_t val);
261 261 #pragma inline(bge_ind_put32)
262 262
263 263 void
264 264 bge_ind_put32(bge_t *bgep, bge_regno_t regno, uint32_t val)
265 265 {
266 266 BGE_TRACE(("bge_ind_put32($%p, 0x%lx, 0x%x)",
267 267 (void *)bgep, regno, val));
268 268
269 269 val = LE_32(val);
270 270 #ifdef __sparc
271 271 if (DEVICE_5717_SERIES_CHIPSETS(bgep))
272 272 regno = LE_32(regno);
273 273 #endif
274 274 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_RIAAR, regno);
275 275 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_RIADR, val);
276 276 }
277 277
278 278 #endif /* BGE_IND_IO32 */
279 279
280 280 #if BGE_DEBUGGING
281 281
282 282 static void bge_pci_check(bge_t *bgep);
283 283 #pragma no_inline(bge_pci_check)
284 284
285 285 static void
286 286 bge_pci_check(bge_t *bgep)
287 287 {
288 288 uint16_t pcistatus;
289 289
290 290 pcistatus = pci_config_get16(bgep->cfg_handle, PCI_CONF_STAT);
291 291 if ((pcistatus & (PCI_STAT_R_MAST_AB | PCI_STAT_R_TARG_AB)) != 0)
292 292 BGE_DEBUG(("bge_pci_check($%p): PCI status 0x%x",
293 293 (void *)bgep, pcistatus));
294 294 }
295 295
296 296 #endif /* BGE_DEBUGGING */
297 297
298 298 /*
299 299 * Perform first-stage chip (re-)initialisation, using only config-space
300 300 * accesses:
301 301 *
302 302 * + Read the vendor/device/revision/subsystem/cache-line-size registers,
303 303 * returning the data in the structure pointed to by <idp>.
304 304 * + Configure the target-mode endianness (swap) options.
305 305 * + Disable interrupts and enable Memory Space accesses.
306 306 * + Enable or disable Bus Mastering according to the <enable_dma> flag.
307 307 *
308 308 * This sequence is adapted from Broadcom document 570X-PG102-R,
309 309 * page 102, steps 1-3, 6-8 and 11-13. The omitted parts of the sequence
310 310 * are 4 and 5 (Reset Core and wait) which are handled elsewhere.
311 311 *
312 312 * This function MUST be called before any non-config-space accesses
313 313 * are made; on this first call <enable_dma> is B_FALSE, and it
314 314 * effectively performs steps 3-1(!) of the initialisation sequence
315 315 * (the rest are not required but should be harmless).
316 316 *
317 317 * It MUST also be called after a chip reset, as this disables
318 318 * Memory Space cycles! In this case, <enable_dma> is B_TRUE, and
319 319 * it is effectively performing steps 6-8.
320 320 */
321 321 void bge_chip_cfg_init(bge_t *bgep, chip_id_t *cidp, boolean_t enable_dma);
322 322 #pragma no_inline(bge_chip_cfg_init)
323 323
324 324 void
325 325 bge_chip_cfg_init(bge_t *bgep, chip_id_t *cidp, boolean_t enable_dma)
326 326 {
327 327 ddi_acc_handle_t handle;
328 328 uint16_t command;
329 329 uint32_t mhcr;
330 330 uint16_t value16;
331 331 int i;
332 332
333 333 BGE_TRACE(("bge_chip_cfg_init($%p, $%p, %d)",
334 334 (void *)bgep, (void *)cidp, enable_dma));
335 335
336 336 /*
337 337 * Step 3: save PCI cache line size and subsystem vendor ID
338 338 *
339 339 * Read all the config-space registers that characterise the
340 340 * chip, specifically vendor/device/revision/subsystem vendor
341 341 * and subsystem device id. We expect (but don't check) that
342 342 * (vendor == VENDOR_ID_BROADCOM) && (device == DEVICE_ID_5704)
343 343 *
344 344 * Also save all bus-transaction related registers (cache-line
345 345 * size, bus-grant/latency parameters, etc). Some of these are
346 346 * cleared by reset, so we'll have to restore them later. This
347 347 * comes from the Broadcom document 570X-PG102-R ...
348 348 *
349 349 * Note: Broadcom document 570X-PG102-R seems to be in error
350 350 * here w.r.t. the offsets of the Subsystem Vendor ID and
351 351 * Subsystem (Device) ID registers, which are the opposite way
352 352 * round according to the PCI standard. For good measure, we
353 353 * save/restore both anyway.
354 354 */
355 355 handle = bgep->cfg_handle;
|
↓ open down ↓ |
318 lines elided |
↑ open up ↑ |
356 356
357 357 /*
358 358 * For some chipsets (e.g., BCM5718), if MHCR_ENABLE_ENDIAN_BYTE_SWAP
359 359 * has been set in PCI_CONF_COMM already, we need to write the
360 360 * byte-swapped value to it. So we just write zero first for simplicity.
361 361 */
362 362 cidp->device = pci_config_get16(handle, PCI_CONF_DEVID);
363 363 if (DEVICE_5717_SERIES_CHIPSETS(bgep))
364 364 pci_config_put32(handle, PCI_CONF_BGE_MHCR, 0);
365 365 mhcr = pci_config_get32(handle, PCI_CONF_BGE_MHCR);
366 - cidp->asic_rev = mhcr & MHCR_CHIP_REV_MASK;
366 + cidp->asic_rev = (mhcr & MHCR_CHIP_REV_MASK) >> MHCR_CHIP_REV_SHIFT;
367 + if (MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_PRODID) {
368 + uint32_t reg;
369 + switch (cidp->device) {
370 + case DEVICE_ID_5717:
371 + case DEVICE_ID_5718:
372 + case DEVICE_ID_5719:
373 + case DEVICE_ID_5720:
374 + reg = PCI_CONF_GEN2_PRODID_ASICREV;
375 + break;
376 + case DEVICE_ID_57781:
377 + case DEVICE_ID_57785:
378 + case DEVICE_ID_57761:
379 + case DEVICE_ID_57765:
380 + case DEVICE_ID_57791:
381 + case DEVICE_ID_57795:
382 + case DEVICE_ID_57762:
383 + case DEVICE_ID_57766:
384 + case DEVICE_ID_57782:
385 + case DEVICE_ID_57786:
386 + reg = PCI_CONF_GEN15_PRODID_ASICREV;
387 + break;
388 + default:
389 + reg = PCI_CONF_PRODID_ASICREV;
390 + break;
391 + }
392 + cidp->asic_rev = pci_config_get32(handle, reg);
393 + }
367 394 cidp->businfo = pci_config_get32(handle, PCI_CONF_BGE_PCISTATE);
368 395 cidp->command = pci_config_get16(handle, PCI_CONF_COMM);
369 396
370 397 cidp->vendor = pci_config_get16(handle, PCI_CONF_VENID);
371 398 cidp->subven = pci_config_get16(handle, PCI_CONF_SUBVENID);
372 399 cidp->subdev = pci_config_get16(handle, PCI_CONF_SUBSYSID);
373 400 cidp->revision = pci_config_get8(handle, PCI_CONF_REVID);
374 401 cidp->clsize = pci_config_get8(handle, PCI_CONF_CACHE_LINESZ);
375 402 cidp->latency = pci_config_get8(handle, PCI_CONF_LATENCY_TIMER);
376 403
377 404 BGE_DEBUG(("bge_chip_cfg_init: %s bus is %s and %s; #INTA is %s",
378 405 cidp->businfo & PCISTATE_BUS_IS_PCI ? "PCI" : "PCI-X",
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
379 406 cidp->businfo & PCISTATE_BUS_IS_FAST ? "fast" : "slow",
380 407 cidp->businfo & PCISTATE_BUS_IS_32_BIT ? "narrow" : "wide",
381 408 cidp->businfo & PCISTATE_INTA_STATE ? "high" : "low"));
382 409 BGE_DEBUG(("bge_chip_cfg_init: vendor 0x%x device 0x%x revision 0x%x",
383 410 cidp->vendor, cidp->device, cidp->revision));
384 411 BGE_DEBUG(("bge_chip_cfg_init: subven 0x%x subdev 0x%x asic_rev 0x%x",
385 412 cidp->subven, cidp->subdev, cidp->asic_rev));
386 413 BGE_DEBUG(("bge_chip_cfg_init: clsize %d latency %d command 0x%x",
387 414 cidp->clsize, cidp->latency, cidp->command));
388 415
416 + cidp->chip_type = 0;
417 + if (MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5717 ||
418 + MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5719 ||
419 + MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5720)
420 + cidp->chip_type |= CHIP_TYPE_5717_PLUS;
421 +
422 + if (MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_57765 ||
423 + MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_57766)
424 + cidp->chip_type |= CHIP_TYPE_57765_CLASS;
425 +
426 + if (cidp->chip_type & CHIP_TYPE_57765_CLASS ||
427 + cidp->chip_type & CHIP_TYPE_5717_PLUS)
428 + cidp->chip_type |= CHIP_TYPE_57765_PLUS;
429 +
430 + /* Intentionally exclude ASIC_REV_5906 */
431 + if (MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5755 ||
432 + MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5787 ||
433 + MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5784 ||
434 + MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5761 ||
435 + MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5785 ||
436 + MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_57780 ||
437 + cidp->chip_type & CHIP_TYPE_57765_PLUS)
438 + cidp->chip_type |= CHIP_TYPE_5755_PLUS;
439 +
440 + if (MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5780 ||
441 + MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5714)
442 + cidp->chip_type |= CHIP_TYPE_5780_CLASS;
443 +
444 + if (MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5750 ||
445 + MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5752 ||
446 + MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5906 ||
447 + cidp->chip_type & CHIP_TYPE_5755_PLUS ||
448 + cidp->chip_type & CHIP_TYPE_5780_CLASS)
449 + cidp->chip_type |= CHIP_TYPE_5750_PLUS;
450 +
451 + if (MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5705 ||
452 + cidp->chip_type & CHIP_TYPE_5750_PLUS)
453 + cidp->chip_type |= CHIP_TYPE_5705_PLUS;
454 +
389 455 /*
390 456 * Step 2 (also step 6): disable and clear interrupts.
391 457 * Steps 11-13: configure PIO endianness options, and enable
392 458 * indirect register access. We'll also select any other
393 459 * options controlled by the MHCR (e.g. tagged status, mask
394 460 * interrupt mode) at this stage ...
395 461 *
396 462 * Note: internally, the chip is 64-bit and BIG-endian, but
397 463 * since it talks to the host over a (LITTLE-endian) PCI bus,
398 464 * it normally swaps bytes around at the PCI interface.
399 465 * However, the PCI host bridge on SPARC systems normally
400 466 * swaps the byte lanes around too, since SPARCs are also
401 467 * BIG-endian. So it turns out that on SPARC, the right
402 468 * option is to tell the chip to swap (and the host bridge
403 469 * will swap back again), whereas on x86 we ask the chip
404 470 * NOT to swap, so the natural little-endianness of the
405 471 * PCI bus is assumed. Then the only thing that doesn't
406 472 * automatically work right is access to an 8-byte register
407 473 * by a little-endian host; but we don't want to set the
408 474 * MHCR_ENABLE_REGISTER_WORD_SWAP bit because then 4-byte
409 475 * accesses don't go where expected ;-( So we live with
410 476 * that, and perform word-swaps in software in the few cases
411 477 * where a chip register is defined as an 8-byte value --
412 478 * see the code below for details ...
413 479 *
414 480 * Note: the meaning of the 'MASK_INTERRUPT_MODE' bit isn't
415 481 * very clear in the register description in the PRM, but
416 482 * Broadcom document 570X-PG104-R page 248 explains a little
417 483 * more (under "Broadcom Mask Mode"). The bit changes the way
418 484 * the MASK_PCI_INT_OUTPUT bit works: with MASK_INTERRUPT_MODE
419 485 * clear, the chip interprets MASK_PCI_INT_OUTPUT in the same
420 486 * way as the 5700 did, which isn't very convenient. Setting
421 487 * the MASK_INTERRUPT_MODE bit makes the MASK_PCI_INT_OUTPUT
422 488 * bit do just what its name says -- MASK the PCI #INTA output
423 489 * (i.e. deassert the signal at the pin) leaving all internal
424 490 * state unchanged. This is much more convenient for our
425 491 * interrupt handler, so we set MASK_INTERRUPT_MODE here.
426 492 *
427 493 * Note: the inconvenient semantics of the interrupt mailbox
428 494 * (nonzero disables and acknowledges/clears the interrupt,
429 495 * zero enables AND CLEARS it) would make race conditions
430 496 * likely in the interrupt handler:
431 497 *
432 498 * (1) acknowledge & disable interrupts
433 499 * (2) while (more to do)
434 500 * process packets
435 501 * (3) enable interrupts -- also clears pending
436 502 *
437 503 * If the chip received more packets and internally generated
|
↓ open down ↓ |
39 lines elided |
↑ open up ↑ |
438 504 * an interrupt between the check at (2) and the mbox write
439 505 * at (3), this interrupt would be lost :-(
440 506 *
441 507 * The best way to avoid this is to use TAGGED STATUS mode,
442 508 * where the chip includes a unique tag in each status block
443 509 * update, and the host, when re-enabling interrupts, passes
444 510 * the last tag it saw back to the chip; then the chip can
445 511 * see whether the host is truly up to date, and regenerate
446 512 * its interrupt if not.
447 513 */
448 - mhcr = MHCR_ENABLE_INDIRECT_ACCESS |
514 + mhcr = MHCR_ENABLE_INDIRECT_ACCESS |
449 515 MHCR_ENABLE_TAGGED_STATUS_MODE |
516 + MHCR_ENABLE_PCI_STATE_WRITE |
450 517 MHCR_MASK_INTERRUPT_MODE |
451 518 MHCR_CLEAR_INTERRUPT_INTA;
452 519
453 520 if (bgep->intr_type == DDI_INTR_TYPE_FIXED)
454 521 mhcr |= MHCR_MASK_PCI_INT_OUTPUT;
455 522
456 523 #ifdef _BIG_ENDIAN
457 524 mhcr |= MHCR_ENABLE_ENDIAN_WORD_SWAP | MHCR_ENABLE_ENDIAN_BYTE_SWAP;
458 525 #endif /* _BIG_ENDIAN */
459 526
460 527 if (DEVICE_5717_SERIES_CHIPSETS(bgep))
461 528 pci_config_put32(handle, PCI_CONF_BGE_MHCR, 0);
462 529 pci_config_put32(handle, PCI_CONF_BGE_MHCR, mhcr);
463 530
464 531 #ifdef BGE_IPMI_ASF
465 532 bgep->asf_wordswapped = B_FALSE;
466 533 #endif
467 534 /*
468 535 * Step 1 (also step 7): Enable PCI Memory Space accesses
469 536 * Disable Memory Write/Invalidate
470 537 * Enable or disable Bus Mastering
471 538 *
472 539 * Note that all other bits are taken from the original value saved
473 540 * the first time through here, rather than from the current register
474 541 * value, 'cos that will have been cleared by a soft RESET since.
475 542 * In this way we preserve the OBP/nexus-parent's preferred settings
476 543 * of the parity-error and system-error enable bits across multiple
477 544 * chip RESETs.
478 545 */
479 546 command = bgep->chipid.command | PCI_COMM_MAE;
480 547 command &= ~(PCI_COMM_ME|PCI_COMM_MEMWR_INVAL);
481 548 if (enable_dma)
482 549 command |= PCI_COMM_ME;
483 550 /*
484 551 * on BCM5714 revision A0, false parity error gets generated
485 552 * due to a logic bug. Provide a workaround by disabling parity
486 553 * error.
487 554 */
488 555 if (((cidp->device == DEVICE_ID_5714C) ||
489 556 (cidp->device == DEVICE_ID_5714S)) &&
490 557 (cidp->revision == REVISION_ID_5714_A0)) {
491 558 command &= ~PCI_COMM_PARITY_DETECT;
492 559 }
493 560 pci_config_put16(handle, PCI_CONF_COMM, command);
494 561
495 562 /*
496 563 * On some PCI-E device, there were instances when
497 564 * the device was still link training.
498 565 */
499 566 if (bgep->chipid.pci_type == BGE_PCI_E) {
500 567 i = 0;
501 568 value16 = pci_config_get16(handle, PCI_CONF_COMM);
502 569 while ((value16 != command) && (i < 100)) {
503 570 drv_usecwait(200);
504 571 value16 = pci_config_get16(handle, PCI_CONF_COMM);
505 572 ++i;
506 573 }
507 574 }
508 575
509 576 /*
510 577 * Clear any remaining error status bits
511 578 */
512 579 pci_config_put16(handle, PCI_CONF_STAT, ~0);
513 580
514 581 /*
515 582 * Do following if and only if the device is NOT BCM5714C OR
516 583 * BCM5715C
517 584 */
518 585 if (!((cidp->device == DEVICE_ID_5714C) ||
519 586 (cidp->device == DEVICE_ID_5715C))) {
520 587 /*
521 588 * Make sure these indirect-access registers are sane
522 589 * rather than random after power-up or reset
523 590 */
524 591 pci_config_put32(handle, PCI_CONF_BGE_RIAAR, 0);
525 592 pci_config_put32(handle, PCI_CONF_BGE_MWBAR, 0);
526 593 }
527 594 /*
528 595 * Step 8: Disable PCI-X/PCI-E Relaxed Ordering
529 596 */
530 597 bge_cfg_clr16(bgep, PCIX_CONF_COMM, PCIX_COMM_RELAXED);
531 598
532 599 if (cidp->pci_type == BGE_PCI_E) {
533 600 if (DEVICE_5723_SERIES_CHIPSETS(bgep)) {
534 601 bge_cfg_clr16(bgep, PCI_CONF_DEV_CTRL_5723,
535 602 DEV_CTRL_NO_SNOOP | DEV_CTRL_RELAXED);
536 603 } else
537 604 bge_cfg_clr16(bgep, PCI_CONF_DEV_CTRL,
538 605 DEV_CTRL_NO_SNOOP | DEV_CTRL_RELAXED);
539 606 }
540 607 }
541 608
542 609 #ifdef __amd64
543 610 /*
544 611 * Distinguish CPU types
545 612 *
546 613 * These use to distinguish AMD64 or Intel EM64T of CPU running mode.
547 614 * If CPU runs on Intel EM64T mode,the 64bit operation cannot works fine
548 615 * for PCI-Express based network interface card. This is the work-around
549 616 * for those nics.
550 617 */
551 618 static boolean_t bge_get_em64t_type(void);
552 619 #pragma inline(bge_get_em64t_type)
553 620
554 621 static boolean_t
555 622 bge_get_em64t_type(void)
556 623 {
557 624
558 625 return (x86_vendor == X86_VENDOR_Intel);
559 626 }
560 627 #endif
561 628
562 629 /*
563 630 * Operating register get/set access routines
564 631 */
565 632
566 633 uint32_t bge_reg_get32(bge_t *bgep, bge_regno_t regno);
567 634 #pragma inline(bge_reg_get32)
568 635
569 636 uint32_t
570 637 bge_reg_get32(bge_t *bgep, bge_regno_t regno)
571 638 {
572 639 BGE_TRACE(("bge_reg_get32($%p, 0x%lx)",
573 640 (void *)bgep, regno));
574 641
575 642 return (ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno)));
576 643 }
577 644
578 645 void bge_reg_put32(bge_t *bgep, bge_regno_t regno, uint32_t data);
579 646 #pragma inline(bge_reg_put32)
580 647
581 648 void
582 649 bge_reg_put32(bge_t *bgep, bge_regno_t regno, uint32_t data)
583 650 {
584 651 BGE_TRACE(("bge_reg_put32($%p, 0x%lx, 0x%x)",
585 652 (void *)bgep, regno, data));
586 653
587 654 ddi_put32(bgep->io_handle, PIO_ADDR(bgep, regno), data);
588 655 BGE_PCICHK(bgep);
589 656 }
590 657
591 658 void bge_reg_set32(bge_t *bgep, bge_regno_t regno, uint32_t bits);
592 659 #pragma inline(bge_reg_set32)
593 660
594 661 void
595 662 bge_reg_set32(bge_t *bgep, bge_regno_t regno, uint32_t bits)
596 663 {
597 664 uint32_t regval;
598 665
599 666 BGE_TRACE(("bge_reg_set32($%p, 0x%lx, 0x%x)",
600 667 (void *)bgep, regno, bits));
601 668
602 669 regval = bge_reg_get32(bgep, regno);
603 670 regval |= bits;
604 671 bge_reg_put32(bgep, regno, regval);
605 672 }
606 673
607 674 void bge_reg_clr32(bge_t *bgep, bge_regno_t regno, uint32_t bits);
608 675 #pragma inline(bge_reg_clr32)
609 676
610 677 void
611 678 bge_reg_clr32(bge_t *bgep, bge_regno_t regno, uint32_t bits)
612 679 {
613 680 uint32_t regval;
614 681
615 682 BGE_TRACE(("bge_reg_clr32($%p, 0x%lx, 0x%x)",
616 683 (void *)bgep, regno, bits));
617 684
618 685 regval = bge_reg_get32(bgep, regno);
619 686 regval &= ~bits;
620 687 bge_reg_put32(bgep, regno, regval);
621 688 }
622 689
623 690 static uint64_t bge_reg_get64(bge_t *bgep, bge_regno_t regno);
624 691 #pragma inline(bge_reg_get64)
625 692
626 693 static uint64_t
627 694 bge_reg_get64(bge_t *bgep, bge_regno_t regno)
628 695 {
629 696 uint64_t regval;
630 697
631 698 #ifdef __amd64
632 699 if (DEVICE_5723_SERIES_CHIPSETS(bgep) || bge_get_em64t_type() ||
633 700 DEVICE_5717_SERIES_CHIPSETS(bgep)) {
634 701 regval = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno + 4));
635 702 regval <<= 32;
636 703 regval |= ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno));
637 704 } else {
638 705 regval = ddi_get64(bgep->io_handle, PIO_ADDR(bgep, regno));
639 706 }
640 707 #elif defined(__sparc)
641 708 if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
642 709 DEVICE_5717_SERIES_CHIPSETS(bgep)) {
643 710 regval = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno));
644 711 regval <<= 32;
645 712 regval |= ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno + 4));
646 713 } else {
647 714 regval = ddi_get64(bgep->io_handle, PIO_ADDR(bgep, regno));
648 715 }
649 716 #else
650 717 regval = ddi_get64(bgep->io_handle, PIO_ADDR(bgep, regno));
651 718 #endif
652 719
653 720 #ifdef _LITTLE_ENDIAN
654 721 regval = (regval >> 32) | (regval << 32);
655 722 #endif /* _LITTLE_ENDIAN */
656 723
657 724 BGE_TRACE(("bge_reg_get64($%p, 0x%lx) = 0x%016llx",
658 725 (void *)bgep, regno, regval));
659 726
660 727 return (regval);
661 728 }
662 729
663 730 static void bge_reg_put64(bge_t *bgep, bge_regno_t regno, uint64_t data);
664 731 #pragma inline(bge_reg_put64)
665 732
666 733 static void
667 734 bge_reg_put64(bge_t *bgep, bge_regno_t regno, uint64_t data)
668 735 {
669 736 BGE_TRACE(("bge_reg_put64($%p, 0x%lx, 0x%016llx)",
670 737 (void *)bgep, regno, data));
671 738
672 739 #ifdef _LITTLE_ENDIAN
673 740 data = ((data >> 32) | (data << 32));
674 741 #endif /* _LITTLE_ENDIAN */
675 742
676 743 #ifdef __amd64
677 744 if (DEVICE_5723_SERIES_CHIPSETS(bgep) || bge_get_em64t_type() ||
678 745 DEVICE_5717_SERIES_CHIPSETS(bgep)) {
679 746 ddi_put32(bgep->io_handle,
680 747 PIO_ADDR(bgep, regno), (uint32_t)data);
681 748 BGE_PCICHK(bgep);
682 749 ddi_put32(bgep->io_handle,
683 750 PIO_ADDR(bgep, regno + 4), (uint32_t)(data >> 32));
684 751
685 752 } else {
686 753 ddi_put64(bgep->io_handle, PIO_ADDR(bgep, regno), data);
687 754 }
688 755 #elif defined(__sparc)
689 756 if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
690 757 DEVICE_5717_SERIES_CHIPSETS(bgep)) {
691 758 ddi_put32(bgep->io_handle,
692 759 PIO_ADDR(bgep, regno + 4), (uint32_t)data);
693 760 BGE_PCICHK(bgep);
694 761 ddi_put32(bgep->io_handle,
695 762 PIO_ADDR(bgep, regno), (uint32_t)(data >> 32));
696 763 } else {
697 764 ddi_put64(bgep->io_handle, PIO_ADDR(bgep, regno), data);
698 765 }
699 766 #else
700 767 ddi_put64(bgep->io_handle, PIO_ADDR(bgep, regno), data);
701 768 #endif
702 769
703 770 BGE_PCICHK(bgep);
704 771 }
705 772
706 773 /*
707 774 * The DDI doesn't provide get/put functions for 128 bit data
708 775 * so we put RCBs out as two 64-bit chunks instead.
709 776 */
710 777 static void bge_reg_putrcb(bge_t *bgep, bge_regno_t addr, bge_rcb_t *rcbp);
711 778 #pragma inline(bge_reg_putrcb)
712 779
713 780 static void
714 781 bge_reg_putrcb(bge_t *bgep, bge_regno_t addr, bge_rcb_t *rcbp)
715 782 {
716 783 uint64_t *p;
717 784
718 785 BGE_TRACE(("bge_reg_putrcb($%p, 0x%lx, 0x%016llx:%04x:%04x:%08x)",
719 786 (void *)bgep, addr, rcbp->host_ring_addr,
720 787 rcbp->max_len, rcbp->flags, rcbp->nic_ring_addr));
721 788
722 789 ASSERT((addr % sizeof (*rcbp)) == 0);
723 790
724 791 p = (void *)rcbp;
725 792 bge_reg_put64(bgep, addr, *p++);
726 793 bge_reg_put64(bgep, addr+8, *p);
727 794 }
728 795
729 796 void bge_mbx_put(bge_t *bgep, bge_regno_t regno, uint64_t data);
730 797 #pragma inline(bge_mbx_put)
731 798
732 799 void
733 800 bge_mbx_put(bge_t *bgep, bge_regno_t regno, uint64_t data)
734 801 {
735 802 if (DEVICE_5906_SERIES_CHIPSETS(bgep))
736 803 regno += INTERRUPT_LP_MBOX_0_REG - INTERRUPT_MBOX_0_REG + 4;
737 804
738 805 BGE_TRACE(("bge_mbx_put($%p, 0x%lx, 0x%016llx)",
739 806 (void *)bgep, regno, data));
740 807
741 808 /*
742 809 * Mailbox registers are nominally 64 bits on the 5701, but
743 810 * the MSW isn't used. On the 5703, they're only 32 bits
744 811 * anyway. So here we just write the lower(!) 32 bits -
745 812 * remembering that the chip is big-endian, even though the
746 813 * PCI bus is little-endian ...
747 814 */
748 815 #ifdef _BIG_ENDIAN
749 816 ddi_put32(bgep->io_handle, PIO_ADDR(bgep, regno+4), (uint32_t)data);
750 817 #else
751 818 ddi_put32(bgep->io_handle, PIO_ADDR(bgep, regno), (uint32_t)data);
752 819 #endif /* _BIG_ENDIAN */
753 820 BGE_PCICHK(bgep);
754 821 }
755 822
756 823 uint32_t bge_mbx_get(bge_t *bgep, bge_regno_t regno);
757 824 #pragma inline(bge_mbx_get)
758 825
759 826 uint32_t
760 827 bge_mbx_get(bge_t *bgep, bge_regno_t regno)
761 828 {
762 829 uint32_t val32;
763 830
764 831 if (DEVICE_5906_SERIES_CHIPSETS(bgep))
765 832 regno += INTERRUPT_LP_MBOX_0_REG - INTERRUPT_MBOX_0_REG + 4;
766 833
767 834 BGE_TRACE(("bge_mbx_get($%p, 0x%lx)",
768 835 (void *)bgep, regno));
769 836
770 837 #ifdef _BIG_ENDIAN
771 838 val32 = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno+4));
772 839 #else
773 840 val32 = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno));
774 841 #endif /* _BIG_ENDIAN */
775 842 BGE_PCICHK(bgep);
776 843
777 844 BGE_DEBUG(("bge_mbx_get($%p, 0x%lx) => 0x%08x",
778 845 (void *)bgep, regno, val32));
779 846
780 847 return (val32);
781 848 }
782 849
783 850
784 851 #if BGE_DEBUGGING
785 852
786 853 void bge_led_mark(bge_t *bgep);
787 854 #pragma no_inline(bge_led_mark)
788 855
789 856 void
790 857 bge_led_mark(bge_t *bgep)
791 858 {
792 859 uint32_t led_ctrl = LED_CONTROL_OVERRIDE_LINK |
793 860 LED_CONTROL_1000MBPS_LED |
794 861 LED_CONTROL_100MBPS_LED |
795 862 LED_CONTROL_10MBPS_LED;
796 863
797 864 /*
798 865 * Blink all three LINK LEDs on simultaneously, then all off,
799 866 * then restore to automatic hardware control. This is used
800 867 * in laboratory testing to trigger a logic analyser or scope.
801 868 */
802 869 bge_reg_set32(bgep, ETHERNET_MAC_LED_CONTROL_REG, led_ctrl);
803 870 led_ctrl ^= LED_CONTROL_OVERRIDE_LINK;
804 871 bge_reg_clr32(bgep, ETHERNET_MAC_LED_CONTROL_REG, led_ctrl);
805 872 led_ctrl = LED_CONTROL_OVERRIDE_LINK;
806 873 bge_reg_clr32(bgep, ETHERNET_MAC_LED_CONTROL_REG, led_ctrl);
807 874 }
808 875
809 876 #endif /* BGE_DEBUGGING */
810 877
811 878 /*
812 879 * NIC on-chip memory access routines
813 880 *
814 881 * Only 32K of NIC memory is visible at a time, controlled by the
815 882 * Memory Window Base Address Register (in PCI config space). Once
816 883 * this is set, the 32K region of NIC-local memory that it refers
817 884 * to can be directly addressed in the upper 32K of the 64K of PCI
818 885 * memory space used for the device.
819 886 */
820 887
821 888 static void bge_nic_setwin(bge_t *bgep, bge_regno_t base);
822 889 #pragma inline(bge_nic_setwin)
823 890
824 891 static void
825 892 bge_nic_setwin(bge_t *bgep, bge_regno_t base)
826 893 {
827 894 chip_id_t *cidp;
828 895
829 896 BGE_TRACE(("bge_nic_setwin($%p, 0x%lx)",
830 897 (void *)bgep, base));
831 898
832 899 ASSERT((base & MWBAR_GRANULE_MASK) == 0);
833 900
834 901 /*
835 902 * Don't do repeated zero data writes,
836 903 * if the device is BCM5714C/15C.
837 904 */
838 905 cidp = &bgep->chipid;
839 906 if ((cidp->device == DEVICE_ID_5714C) ||
840 907 (cidp->device == DEVICE_ID_5715C)) {
841 908 if (bgep->lastWriteZeroData && (base == (bge_regno_t)0))
842 909 return;
843 910 /* Adjust lastWriteZeroData */
844 911 bgep->lastWriteZeroData = ((base == (bge_regno_t)0) ?
845 912 B_TRUE : B_FALSE);
846 913 }
847 914 #ifdef __sparc
848 915 if (DEVICE_5717_SERIES_CHIPSETS(bgep))
849 916 base = LE_32(base);
850 917 #endif
851 918 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWBAR, base);
852 919 }
853 920
854 921 static uint32_t bge_nic_get32(bge_t *bgep, bge_regno_t addr);
855 922 #pragma inline(bge_nic_get32)
856 923
857 924 static uint32_t
858 925 bge_nic_get32(bge_t *bgep, bge_regno_t addr)
859 926 {
860 927 uint32_t data;
861 928
862 929 #if defined(BGE_IPMI_ASF) && !defined(__sparc)
863 930 if (bgep->asf_enabled && !bgep->asf_wordswapped) {
864 931 /* workaround for word swap error */
865 932 if (addr & 4)
866 933 addr = addr - 4;
867 934 else
868 935 addr = addr + 4;
869 936 }
870 937 #endif
871 938
872 939 #ifdef __sparc
873 940 data = bge_nic_read32(bgep, addr);
874 941 #else
875 942 bge_nic_setwin(bgep, addr & ~MWBAR_GRANULE_MASK);
876 943 addr &= MWBAR_GRANULE_MASK;
877 944 addr += NIC_MEM_WINDOW_OFFSET;
878 945
879 946 data = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, addr));
880 947 #endif
881 948
882 949 BGE_TRACE(("bge_nic_get32($%p, 0x%lx) = 0x%08x",
883 950 (void *)bgep, addr, data));
884 951
885 952 return (data);
886 953 }
887 954
888 955 void bge_nic_put32(bge_t *bgep, bge_regno_t addr, uint32_t data);
889 956 #pragma inline(bge_nic_put32)
890 957
891 958 void
892 959 bge_nic_put32(bge_t *bgep, bge_regno_t addr, uint32_t data)
893 960 {
894 961 BGE_TRACE(("bge_nic_put32($%p, 0x%lx, 0x%08x)",
895 962 (void *)bgep, addr, data));
896 963
897 964 #if defined(BGE_IPMI_ASF) && !defined(__sparc)
898 965 if (bgep->asf_enabled && !bgep->asf_wordswapped) {
899 966 /* workaround for word swap error */
900 967 if (addr & 4)
901 968 addr = addr - 4;
902 969 else
903 970 addr = addr + 4;
904 971 }
905 972 #endif
906 973
907 974 #ifdef __sparc
908 975 if (DEVICE_5717_SERIES_CHIPSETS(bgep))
909 976 addr = LE_32(addr);
910 977 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWBAR, addr);
911 978 data = LE_32(data);
912 979 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWDAR, data);
913 980 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWBAR, 0);
914 981 #else
915 982 bge_nic_setwin(bgep, addr & ~MWBAR_GRANULE_MASK);
916 983 addr &= MWBAR_GRANULE_MASK;
917 984 addr += NIC_MEM_WINDOW_OFFSET;
918 985 ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr), data);
919 986 BGE_PCICHK(bgep);
920 987 #endif
921 988 }
922 989
923 990 static uint64_t bge_nic_get64(bge_t *bgep, bge_regno_t addr);
924 991 #pragma inline(bge_nic_get64)
925 992
926 993 static uint64_t
927 994 bge_nic_get64(bge_t *bgep, bge_regno_t addr)
928 995 {
929 996 uint64_t data;
930 997
931 998 bge_nic_setwin(bgep, addr & ~MWBAR_GRANULE_MASK);
932 999 addr &= MWBAR_GRANULE_MASK;
933 1000 addr += NIC_MEM_WINDOW_OFFSET;
934 1001
935 1002 #ifdef __amd64
936 1003 if (DEVICE_5723_SERIES_CHIPSETS(bgep) || bge_get_em64t_type() ||
937 1004 DEVICE_5717_SERIES_CHIPSETS(bgep)) {
938 1005 data = ddi_get32(bgep->io_handle,
939 1006 PIO_ADDR(bgep, addr + 4));
940 1007 data <<= 32;
941 1008 data |= ddi_get32(bgep->io_handle, PIO_ADDR(bgep, addr));
942 1009 } else {
943 1010 data = ddi_get64(bgep->io_handle, PIO_ADDR(bgep, addr));
944 1011 }
945 1012 #elif defined(__sparc)
946 1013 if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
947 1014 DEVICE_5717_SERIES_CHIPSETS(bgep)) {
948 1015 data = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, addr));
949 1016 data <<= 32;
950 1017 data |= ddi_get32(bgep->io_handle,
951 1018 PIO_ADDR(bgep, addr + 4));
952 1019 } else {
953 1020 data = ddi_get64(bgep->io_handle, PIO_ADDR(bgep, addr));
954 1021 }
955 1022 #else
956 1023 data = ddi_get64(bgep->io_handle, PIO_ADDR(bgep, addr));
957 1024 #endif
958 1025
959 1026 BGE_TRACE(("bge_nic_get64($%p, 0x%lx) = 0x%016llx",
960 1027 (void *)bgep, addr, data));
961 1028
962 1029 return (data);
963 1030 }
964 1031
965 1032 static void bge_nic_put64(bge_t *bgep, bge_regno_t addr, uint64_t data);
966 1033 #pragma inline(bge_nic_put64)
967 1034
968 1035 static void
969 1036 bge_nic_put64(bge_t *bgep, bge_regno_t addr, uint64_t data)
970 1037 {
971 1038 BGE_TRACE(("bge_nic_put64($%p, 0x%lx, 0x%016llx)",
972 1039 (void *)bgep, addr, data));
973 1040
974 1041 bge_nic_setwin(bgep, addr & ~MWBAR_GRANULE_MASK);
975 1042 addr &= MWBAR_GRANULE_MASK;
976 1043 addr += NIC_MEM_WINDOW_OFFSET;
977 1044
978 1045 #ifdef __amd64
979 1046 if (DEVICE_5723_SERIES_CHIPSETS(bgep) || bge_get_em64t_type() ||
980 1047 DEVICE_5717_SERIES_CHIPSETS(bgep)) {
981 1048 ddi_put32(bgep->io_handle,
982 1049 PIO_ADDR(bgep, addr + 4), (uint32_t)data);
983 1050 BGE_PCICHK(bgep);
984 1051 ddi_put32(bgep->io_handle,
985 1052 PIO_ADDR(bgep, addr), (uint32_t)(data >> 32));
986 1053 } else {
987 1054 ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr), data);
988 1055 }
989 1056 #elif defined(__sparc)
990 1057 if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
991 1058 DEVICE_5717_SERIES_CHIPSETS(bgep)) {
992 1059 ddi_put32(bgep->io_handle,
993 1060 PIO_ADDR(bgep, addr + 4), (uint32_t)data);
994 1061 BGE_PCICHK(bgep);
995 1062 ddi_put32(bgep->io_handle,
996 1063 PIO_ADDR(bgep, addr), (uint32_t)(data >> 32));
997 1064 } else {
998 1065 ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr), data);
999 1066 }
1000 1067 #else
1001 1068 ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr), data);
1002 1069 #endif
1003 1070
1004 1071 BGE_PCICHK(bgep);
1005 1072 }
1006 1073
1007 1074 /*
1008 1075 * The DDI doesn't provide get/put functions for 128 bit data
1009 1076 * so we put RCBs out as two 64-bit chunks instead.
1010 1077 */
1011 1078 static void bge_nic_putrcb(bge_t *bgep, bge_regno_t addr, bge_rcb_t *rcbp);
1012 1079 #pragma inline(bge_nic_putrcb)
1013 1080
1014 1081 static void
1015 1082 bge_nic_putrcb(bge_t *bgep, bge_regno_t addr, bge_rcb_t *rcbp)
1016 1083 {
1017 1084 uint64_t *p;
1018 1085
1019 1086 BGE_TRACE(("bge_nic_putrcb($%p, 0x%lx, 0x%016llx:%04x:%04x:%08x)",
1020 1087 (void *)bgep, addr, rcbp->host_ring_addr,
1021 1088 rcbp->max_len, rcbp->flags, rcbp->nic_ring_addr));
1022 1089
1023 1090 ASSERT((addr % sizeof (*rcbp)) == 0);
1024 1091
1025 1092 bge_nic_setwin(bgep, addr & ~MWBAR_GRANULE_MASK);
1026 1093 addr &= MWBAR_GRANULE_MASK;
1027 1094 addr += NIC_MEM_WINDOW_OFFSET;
1028 1095
1029 1096 p = (void *)rcbp;
1030 1097 #ifdef __amd64
1031 1098 if (DEVICE_5723_SERIES_CHIPSETS(bgep) || bge_get_em64t_type() ||
1032 1099 DEVICE_5717_SERIES_CHIPSETS(bgep)) {
1033 1100 ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr),
1034 1101 (uint32_t)(*p));
1035 1102 ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 4),
1036 1103 (uint32_t)(*p++ >> 32));
1037 1104 ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 8),
1038 1105 (uint32_t)(*p));
1039 1106 ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 12),
1040 1107 (uint32_t)(*p >> 32));
1041 1108
1042 1109 } else {
1043 1110 ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr), *p++);
1044 1111 ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr+8), *p);
1045 1112 }
1046 1113 #elif defined(__sparc)
1047 1114 if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
1048 1115 DEVICE_5717_SERIES_CHIPSETS(bgep)) {
1049 1116 ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 4),
1050 1117 (uint32_t)(*p));
1051 1118 ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr),
1052 1119 (uint32_t)(*p++ >> 32));
1053 1120 ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 12),
1054 1121 (uint32_t)(*p));
1055 1122 ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 8),
1056 1123 (uint32_t)(*p >> 32));
1057 1124 } else {
1058 1125 ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr), *p++);
1059 1126 ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr + 8), *p);
1060 1127 }
1061 1128 #else
1062 1129 ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr), *p++);
1063 1130 ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr + 8), *p);
1064 1131 #endif
1065 1132
1066 1133 BGE_PCICHK(bgep);
1067 1134 }
1068 1135
1069 1136 static void bge_nic_zero(bge_t *bgep, bge_regno_t addr, uint32_t nbytes);
1070 1137 #pragma inline(bge_nic_zero)
1071 1138
1072 1139 static void
1073 1140 bge_nic_zero(bge_t *bgep, bge_regno_t addr, uint32_t nbytes)
1074 1141 {
1075 1142 BGE_TRACE(("bge_nic_zero($%p, 0x%lx, 0x%x)",
1076 1143 (void *)bgep, addr, nbytes));
1077 1144
1078 1145 ASSERT((addr & ~MWBAR_GRANULE_MASK) ==
1079 1146 ((addr+nbytes) & ~MWBAR_GRANULE_MASK));
1080 1147
1081 1148 bge_nic_setwin(bgep, addr & ~MWBAR_GRANULE_MASK);
1082 1149 addr &= MWBAR_GRANULE_MASK;
1083 1150 addr += NIC_MEM_WINDOW_OFFSET;
1084 1151
1085 1152 (void) ddi_device_zero(bgep->io_handle, PIO_ADDR(bgep, addr),
1086 1153 nbytes, 1, DDI_DATA_SZ08_ACC);
1087 1154 BGE_PCICHK(bgep);
1088 1155 }
1089 1156
1090 1157 /*
1091 1158 * MII (PHY) register get/set access routines
1092 1159 *
1093 1160 * These use the chip's MII auto-access method, controlled by the
1094 1161 * MII Communication register at 0x044c, so the CPU doesn't have
1095 1162 * to fiddle with the individual bits.
1096 1163 */
1097 1164
1098 1165 #undef BGE_DBG
1099 1166 #define BGE_DBG BGE_DBG_MII /* debug flag for this code */
1100 1167
1101 1168 static uint16_t bge_mii_access(bge_t *bgep, bge_regno_t regno,
1102 1169 uint16_t data, uint32_t cmd);
1103 1170 #pragma no_inline(bge_mii_access)
1104 1171
1105 1172 static uint16_t
1106 1173 bge_mii_access(bge_t *bgep, bge_regno_t regno, uint16_t data, uint32_t cmd)
1107 1174 {
1108 1175 uint32_t timeout;
1109 1176 uint32_t regval1;
1110 1177 uint32_t regval2;
1111 1178
1112 1179 BGE_TRACE(("bge_mii_access($%p, 0x%lx, 0x%x, 0x%x)",
1113 1180 (void *)bgep, regno, data, cmd));
1114 1181
1115 1182 ASSERT(mutex_owned(bgep->genlock));
1116 1183
1117 1184 /*
1118 1185 * Assemble the command ...
1119 1186 */
1120 1187 cmd |= data << MI_COMMS_DATA_SHIFT;
1121 1188 cmd |= regno << MI_COMMS_REGISTER_SHIFT;
1122 1189 cmd |= bgep->phy_mii_addr << MI_COMMS_ADDRESS_SHIFT;
1123 1190 cmd |= MI_COMMS_START;
1124 1191
1125 1192 /*
1126 1193 * Wait for any command already in progress ...
1127 1194 *
1128 1195 * Note: this *shouldn't* ever find that there is a command
1129 1196 * in progress, because we already hold the <genlock> mutex.
1130 1197 * Nonetheless, we have sometimes seen the MI_COMMS_START
1131 1198 * bit set here -- it seems that the chip can initiate MII
1132 1199 * accesses internally, even with polling OFF.
1133 1200 */
1134 1201 regval1 = regval2 = bge_reg_get32(bgep, MI_COMMS_REG);
1135 1202 for (timeout = 100; ; ) {
1136 1203 if ((regval2 & MI_COMMS_START) == 0) {
1137 1204 bge_reg_put32(bgep, MI_COMMS_REG, cmd);
1138 1205 break;
1139 1206 }
1140 1207 if (--timeout == 0)
1141 1208 break;
1142 1209 drv_usecwait(10);
1143 1210 regval2 = bge_reg_get32(bgep, MI_COMMS_REG);
1144 1211 }
1145 1212
1146 1213 if (timeout == 0)
1147 1214 return ((uint16_t)~0u);
1148 1215
1149 1216 if (timeout != 100)
1150 1217 BGE_REPORT((bgep, "bge_mii_access: cmd 0x%x -- "
1151 1218 "MI_COMMS_START set for %d us; 0x%x->0x%x",
1152 1219 cmd, 10*(100-timeout), regval1, regval2));
1153 1220
1154 1221 regval1 = bge_reg_get32(bgep, MI_COMMS_REG);
1155 1222 for (timeout = 1000; ; ) {
1156 1223 if ((regval1 & MI_COMMS_START) == 0)
1157 1224 break;
1158 1225 if (--timeout == 0)
1159 1226 break;
1160 1227 drv_usecwait(10);
1161 1228 regval1 = bge_reg_get32(bgep, MI_COMMS_REG);
1162 1229 }
1163 1230
1164 1231 /*
1165 1232 * Drop out early if the READ FAILED bit is set -- this chip
1166 1233 * could be a 5703/4S, with a SerDes instead of a PHY!
1167 1234 */
1168 1235 if (regval2 & MI_COMMS_READ_FAILED)
1169 1236 return ((uint16_t)~0u);
1170 1237
1171 1238 if (timeout == 0)
1172 1239 return ((uint16_t)~0u);
1173 1240
1174 1241 /*
1175 1242 * The PRM says to wait 5us after seeing the START bit clear
1176 1243 * and then re-read the register to get the final value of the
1177 1244 * data field, in order to avoid a race condition where the
1178 1245 * START bit is clear but the data field isn't yet valid.
1179 1246 *
1180 1247 * Note: we don't actually seem to be encounter this race;
1181 1248 * except when the START bit is seen set again (see below),
1182 1249 * the data field doesn't change during this 5us interval.
1183 1250 */
1184 1251 drv_usecwait(5);
1185 1252 regval2 = bge_reg_get32(bgep, MI_COMMS_REG);
1186 1253
1187 1254 /*
1188 1255 * Unfortunately, when following the PRMs instructions above,
1189 1256 * we have occasionally seen the START bit set again(!) in the
1190 1257 * value read after the 5us delay. This seems to be due to the
1191 1258 * chip autonomously starting another MII access internally.
1192 1259 * In such cases, the command/data/etc fields relate to the
1193 1260 * internal command, rather than the one that we thought had
1194 1261 * just finished. So in this case, we fall back to returning
1195 1262 * the data from the original read that showed START clear.
1196 1263 */
1197 1264 if (regval2 & MI_COMMS_START) {
1198 1265 BGE_REPORT((bgep, "bge_mii_access: cmd 0x%x -- "
1199 1266 "MI_COMMS_START set after transaction; 0x%x->0x%x",
1200 1267 cmd, regval1, regval2));
1201 1268 regval2 = regval1;
1202 1269 }
1203 1270
1204 1271 if (regval2 & MI_COMMS_START)
1205 1272 return ((uint16_t)~0u);
1206 1273
1207 1274 if (regval2 & MI_COMMS_READ_FAILED)
1208 1275 return ((uint16_t)~0u);
1209 1276
1210 1277 return ((regval2 & MI_COMMS_DATA_MASK) >> MI_COMMS_DATA_SHIFT);
1211 1278 }
1212 1279
1213 1280 uint16_t bge_mii_get16(bge_t *bgep, bge_regno_t regno);
1214 1281 #pragma no_inline(bge_mii_get16)
1215 1282
1216 1283 uint16_t
1217 1284 bge_mii_get16(bge_t *bgep, bge_regno_t regno)
1218 1285 {
1219 1286 BGE_TRACE(("bge_mii_get16($%p, 0x%lx)",
1220 1287 (void *)bgep, regno));
1221 1288
1222 1289 ASSERT(mutex_owned(bgep->genlock));
1223 1290
1224 1291 if (DEVICE_5906_SERIES_CHIPSETS(bgep) && ((regno == MII_AUX_CONTROL) ||
1225 1292 (regno == MII_MSCONTROL)))
1226 1293 return (0);
1227 1294
1228 1295 return (bge_mii_access(bgep, regno, 0, MI_COMMS_COMMAND_READ));
1229 1296 }
1230 1297
1231 1298 void bge_mii_put16(bge_t *bgep, bge_regno_t regno, uint16_t data);
1232 1299 #pragma no_inline(bge_mii_put16)
1233 1300
1234 1301 void
1235 1302 bge_mii_put16(bge_t *bgep, bge_regno_t regno, uint16_t data)
1236 1303 {
1237 1304 BGE_TRACE(("bge_mii_put16($%p, 0x%lx, 0x%x)",
1238 1305 (void *)bgep, regno, data));
1239 1306
1240 1307 ASSERT(mutex_owned(bgep->genlock));
1241 1308
1242 1309 if (DEVICE_5906_SERIES_CHIPSETS(bgep) && ((regno == MII_AUX_CONTROL) ||
1243 1310 (regno == MII_MSCONTROL)))
1244 1311 return;
1245 1312
1246 1313 (void) bge_mii_access(bgep, regno, data, MI_COMMS_COMMAND_WRITE);
1247 1314 }
1248 1315
1249 1316 #undef BGE_DBG
1250 1317 #define BGE_DBG BGE_DBG_SEEPROM /* debug flag for this code */
1251 1318
1252 1319 #if BGE_SEE_IO32 || BGE_FLASH_IO32
1253 1320
1254 1321 /*
1255 1322 * Basic SEEPROM get/set access routine
1256 1323 *
1257 1324 * This uses the chip's SEEPROM auto-access method, controlled by the
1258 1325 * Serial EEPROM Address/Data Registers at 0x6838/683c, so the CPU
1259 1326 * doesn't have to fiddle with the individual bits.
1260 1327 *
1261 1328 * The caller should hold <genlock> and *also* have already acquired
1262 1329 * the right to access the SEEPROM, via bge_nvmem_acquire() above.
1263 1330 *
1264 1331 * Return value:
1265 1332 * 0 on success,
1266 1333 * ENODATA on access timeout (maybe retryable: device may just be busy)
1267 1334 * EPROTO on other h/w or s/w errors.
1268 1335 *
1269 1336 * <*dp> is an input to a SEEPROM_ACCESS_WRITE operation, or an output
1270 1337 * from a (successful) SEEPROM_ACCESS_READ.
1271 1338 */
1272 1339 static int bge_seeprom_access(bge_t *bgep, uint32_t cmd, bge_regno_t addr,
1273 1340 uint32_t *dp);
1274 1341 #pragma no_inline(bge_seeprom_access)
1275 1342
1276 1343 static int
1277 1344 bge_seeprom_access(bge_t *bgep, uint32_t cmd, bge_regno_t addr, uint32_t *dp)
1278 1345 {
1279 1346 uint32_t tries;
1280 1347 uint32_t regval;
1281 1348
1282 1349 ASSERT(mutex_owned(bgep->genlock));
1283 1350
1284 1351 /*
1285 1352 * On the newer chips that support both SEEPROM & Flash, we need
1286 1353 * to specifically enable SEEPROM access (Flash is the default).
1287 1354 * On older chips, we don't; SEEPROM is the only NVtype supported,
1288 1355 * and the NVM control registers don't exist ...
1289 1356 */
1290 1357 switch (bgep->chipid.nvtype) {
1291 1358 case BGE_NVTYPE_NONE:
1292 1359 case BGE_NVTYPE_UNKNOWN:
1293 1360 _NOTE(NOTREACHED)
1294 1361 case BGE_NVTYPE_SEEPROM:
1295 1362 break;
1296 1363
1297 1364 case BGE_NVTYPE_LEGACY_SEEPROM:
1298 1365 case BGE_NVTYPE_UNBUFFERED_FLASH:
1299 1366 case BGE_NVTYPE_BUFFERED_FLASH:
1300 1367 default:
1301 1368 bge_reg_set32(bgep, NVM_CONFIG1_REG,
1302 1369 NVM_CFG1_LEGACY_SEEPROM_MODE);
1303 1370 break;
1304 1371 }
1305 1372
1306 1373 /*
1307 1374 * Check there's no command in progress.
1308 1375 *
1309 1376 * Note: this *shouldn't* ever find that there is a command
1310 1377 * in progress, because we already hold the <genlock> mutex.
1311 1378 * Also, to ensure we don't have a conflict with the chip's
1312 1379 * internal firmware or a process accessing the same (shared)
1313 1380 * SEEPROM through the other port of a 5704, we've already
1314 1381 * been through the "software arbitration" protocol.
1315 1382 * So this is just a final consistency check: we shouldn't
1316 1383 * see EITHER the START bit (command started but not complete)
1317 1384 * OR the COMPLETE bit (command completed but not cleared).
1318 1385 */
1319 1386 regval = bge_reg_get32(bgep, SERIAL_EEPROM_ADDRESS_REG);
1320 1387 if (regval & SEEPROM_ACCESS_START)
1321 1388 return (EPROTO);
1322 1389 if (regval & SEEPROM_ACCESS_COMPLETE)
1323 1390 return (EPROTO);
1324 1391
1325 1392 /*
1326 1393 * Assemble the command ...
1327 1394 */
1328 1395 cmd |= addr & SEEPROM_ACCESS_ADDRESS_MASK;
1329 1396 addr >>= SEEPROM_ACCESS_ADDRESS_SIZE;
1330 1397 addr <<= SEEPROM_ACCESS_DEVID_SHIFT;
1331 1398 cmd |= addr & SEEPROM_ACCESS_DEVID_MASK;
1332 1399 cmd |= SEEPROM_ACCESS_START;
1333 1400 cmd |= SEEPROM_ACCESS_COMPLETE;
1334 1401 cmd |= regval & SEEPROM_ACCESS_HALFCLOCK_MASK;
1335 1402
1336 1403 bge_reg_put32(bgep, SERIAL_EEPROM_DATA_REG, *dp);
1337 1404 bge_reg_put32(bgep, SERIAL_EEPROM_ADDRESS_REG, cmd);
1338 1405
1339 1406 /*
1340 1407 * By observation, a successful access takes ~20us on a 5703/4,
1341 1408 * but apparently much longer (up to 1000us) on the obsolescent
1342 1409 * BCM5700/BCM5701. We want to be sure we don't get any false
1343 1410 * timeouts here; but OTOH, we don't want a bogus access to lock
1344 1411 * out interrupts for longer than necessary. So we'll allow up
1345 1412 * to 1000us ...
1346 1413 */
1347 1414 for (tries = 0; tries < 1000; ++tries) {
1348 1415 regval = bge_reg_get32(bgep, SERIAL_EEPROM_ADDRESS_REG);
1349 1416 if (regval & SEEPROM_ACCESS_COMPLETE)
1350 1417 break;
1351 1418 drv_usecwait(1);
1352 1419 }
1353 1420
1354 1421 if (regval & SEEPROM_ACCESS_COMPLETE) {
1355 1422 /*
1356 1423 * All OK; read the SEEPROM data register, then write back
1357 1424 * the value read from the address register in order to
1358 1425 * clear the <complete> bit and leave the SEEPROM access
1359 1426 * state machine idle, ready for the next access ...
1360 1427 */
1361 1428 BGE_DEBUG(("bge_seeprom_access: complete after %d us", tries));
1362 1429 *dp = bge_reg_get32(bgep, SERIAL_EEPROM_DATA_REG);
1363 1430 bge_reg_put32(bgep, SERIAL_EEPROM_ADDRESS_REG, regval);
1364 1431 return (0);
1365 1432 }
1366 1433
1367 1434 /*
1368 1435 * Hmm ... what happened here?
1369 1436 *
1370 1437 * Most likely, the user addressed a non-existent SEEPROM. Or
1371 1438 * maybe the SEEPROM was busy internally (e.g. processing a write)
1372 1439 * and didn't respond to being addressed. Either way, it's left
1373 1440 * the SEEPROM access state machine wedged. So we'll reset it
1374 1441 * before we leave, so it's ready for next time ...
1375 1442 */
1376 1443 BGE_DEBUG(("bge_seeprom_access: timed out after %d us", tries));
1377 1444 bge_reg_set32(bgep, SERIAL_EEPROM_ADDRESS_REG, SEEPROM_ACCESS_INIT);
1378 1445 return (ENODATA);
1379 1446 }
1380 1447
1381 1448 /*
1382 1449 * Basic Flash get/set access routine
1383 1450 *
1384 1451 * These use the chip's Flash auto-access method, controlled by the
1385 1452 * Flash Access Registers at 0x7000-701c, so the CPU doesn't have to
1386 1453 * fiddle with the individual bits.
1387 1454 *
1388 1455 * The caller should hold <genlock> and *also* have already acquired
1389 1456 * the right to access the Flash, via bge_nvmem_acquire() above.
1390 1457 *
1391 1458 * Return value:
1392 1459 * 0 on success,
1393 1460 * ENODATA on access timeout (maybe retryable: device may just be busy)
1394 1461 * ENODEV if the NVmem device is missing or otherwise unusable
1395 1462 *
1396 1463 * <*dp> is an input to a NVM_FLASH_CMD_WR operation, or an output
1397 1464 * from a (successful) NVM_FLASH_CMD_RD.
1398 1465 */
1399 1466 static int bge_flash_access(bge_t *bgep, uint32_t cmd, bge_regno_t addr,
1400 1467 uint32_t *dp);
1401 1468 #pragma no_inline(bge_flash_access)
1402 1469
1403 1470 static int
1404 1471 bge_flash_access(bge_t *bgep, uint32_t cmd, bge_regno_t addr, uint32_t *dp)
1405 1472 {
1406 1473 uint32_t tries;
1407 1474 uint32_t regval;
1408 1475
1409 1476 ASSERT(mutex_owned(bgep->genlock));
1410 1477
1411 1478 /*
1412 1479 * On the newer chips that support both SEEPROM & Flash, we need
1413 1480 * to specifically disable SEEPROM access while accessing Flash.
1414 1481 * The older chips don't support Flash, and the NVM registers don't
1415 1482 * exist, so we shouldn't be here at all!
1416 1483 */
1417 1484 switch (bgep->chipid.nvtype) {
1418 1485 case BGE_NVTYPE_NONE:
1419 1486 case BGE_NVTYPE_UNKNOWN:
1420 1487 _NOTE(NOTREACHED)
1421 1488 case BGE_NVTYPE_SEEPROM:
1422 1489 return (ENODEV);
1423 1490
1424 1491 case BGE_NVTYPE_LEGACY_SEEPROM:
1425 1492 case BGE_NVTYPE_UNBUFFERED_FLASH:
1426 1493 case BGE_NVTYPE_BUFFERED_FLASH:
1427 1494 default:
1428 1495 bge_reg_clr32(bgep, NVM_CONFIG1_REG,
1429 1496 NVM_CFG1_LEGACY_SEEPROM_MODE);
1430 1497 break;
1431 1498 }
1432 1499
1433 1500 /*
1434 1501 * Assemble the command ...
1435 1502 */
1436 1503 addr &= NVM_FLASH_ADDR_MASK;
1437 1504 cmd |= NVM_FLASH_CMD_DOIT;
1438 1505 cmd |= NVM_FLASH_CMD_FIRST;
1439 1506 cmd |= NVM_FLASH_CMD_LAST;
1440 1507 cmd |= NVM_FLASH_CMD_DONE;
1441 1508
1442 1509 bge_reg_put32(bgep, NVM_FLASH_WRITE_REG, *dp);
1443 1510 bge_reg_put32(bgep, NVM_FLASH_ADDR_REG, addr);
1444 1511 bge_reg_put32(bgep, NVM_FLASH_CMD_REG, cmd);
1445 1512
1446 1513 /*
1447 1514 * Allow up to 1000ms ...
1448 1515 */
1449 1516 for (tries = 0; tries < 1000; ++tries) {
1450 1517 regval = bge_reg_get32(bgep, NVM_FLASH_CMD_REG);
1451 1518 if (regval & NVM_FLASH_CMD_DONE)
1452 1519 break;
1453 1520 drv_usecwait(1);
1454 1521 }
1455 1522
1456 1523 if (regval & NVM_FLASH_CMD_DONE) {
1457 1524 /*
1458 1525 * All OK; read the data from the Flash read register
1459 1526 */
1460 1527 BGE_DEBUG(("bge_flash_access: complete after %d us", tries));
1461 1528 *dp = bge_reg_get32(bgep, NVM_FLASH_READ_REG);
1462 1529 return (0);
1463 1530 }
1464 1531
1465 1532 /*
1466 1533 * Hmm ... what happened here?
1467 1534 *
1468 1535 * Most likely, the user addressed a non-existent Flash. Or
1469 1536 * maybe the Flash was busy internally (e.g. processing a write)
1470 1537 * and didn't respond to being addressed. Either way, there's
1471 1538 * nothing we can here ...
1472 1539 */
1473 1540 BGE_DEBUG(("bge_flash_access: timed out after %d us", tries));
1474 1541 return (ENODATA);
1475 1542 }
1476 1543
1477 1544 /*
1478 1545 * The next two functions regulate access to the NVram (if fitted).
1479 1546 *
1480 1547 * On a 5704 (dual core) chip, there's only one SEEPROM and one Flash
1481 1548 * (SPI) interface, but they can be accessed through either port. These
1482 1549 * are managed by different instance of this driver and have no software
1483 1550 * state in common.
1484 1551 *
1485 1552 * In addition (and even on a single core chip) the chip's internal
1486 1553 * firmware can access the SEEPROM/Flash, most notably after a RESET
1487 1554 * when it may download code to run internally.
1488 1555 *
1489 1556 * So we need to arbitrate between these various software agents. For
1490 1557 * this purpose, the chip provides the Software Arbitration Register,
1491 1558 * which implements hardware(!) arbitration.
1492 1559 *
1493 1560 * This functionality didn't exist on older (5700/5701) chips, so there's
1494 1561 * nothing we can do by way of arbitration on those; also, if there's no
1495 1562 * SEEPROM/Flash fitted (or we couldn't determine what type), there's also
1496 1563 * nothing to do.
1497 1564 *
1498 1565 * The internal firmware appears to use Request 0, which is the highest
1499 1566 * priority. So we'd like to use Request 2, leaving one higher and one
1500 1567 * lower for any future developments ... but apparently this doesn't
1501 1568 * always work. So for now, the code uses Request 1 ;-(
1502 1569 */
1503 1570
1504 1571 #define NVM_READ_REQ NVM_READ_REQ1
1505 1572 #define NVM_RESET_REQ NVM_RESET_REQ1
1506 1573 #define NVM_SET_REQ NVM_SET_REQ1
1507 1574
1508 1575 static void bge_nvmem_relinquish(bge_t *bgep);
1509 1576 #pragma no_inline(bge_nvmem_relinquish)
1510 1577
1511 1578 static void
1512 1579 bge_nvmem_relinquish(bge_t *bgep)
1513 1580 {
1514 1581 ASSERT(mutex_owned(bgep->genlock));
1515 1582
1516 1583 switch (bgep->chipid.nvtype) {
1517 1584 case BGE_NVTYPE_NONE:
1518 1585 case BGE_NVTYPE_UNKNOWN:
1519 1586 _NOTE(NOTREACHED)
1520 1587 return;
1521 1588
1522 1589 case BGE_NVTYPE_SEEPROM:
1523 1590 /*
1524 1591 * No arbitration performed, no release needed
1525 1592 */
1526 1593 return;
1527 1594
1528 1595 case BGE_NVTYPE_LEGACY_SEEPROM:
1529 1596 case BGE_NVTYPE_UNBUFFERED_FLASH:
1530 1597 case BGE_NVTYPE_BUFFERED_FLASH:
1531 1598 default:
1532 1599 break;
1533 1600 }
1534 1601
1535 1602 /*
1536 1603 * Our own request should be present (whether or not granted) ...
1537 1604 */
1538 1605 (void) bge_reg_get32(bgep, NVM_SW_ARBITRATION_REG);
1539 1606
1540 1607 /*
1541 1608 * ... this will make it go away.
1542 1609 */
1543 1610 bge_reg_put32(bgep, NVM_SW_ARBITRATION_REG, NVM_RESET_REQ);
1544 1611 (void) bge_reg_get32(bgep, NVM_SW_ARBITRATION_REG);
1545 1612 }
1546 1613
1547 1614 /*
1548 1615 * Arbitrate for access to the NVmem, if necessary
1549 1616 *
1550 1617 * Return value:
1551 1618 * 0 on success
1552 1619 * EAGAIN if the device is in use (retryable)
1553 1620 * ENODEV if the NVmem device is missing or otherwise unusable
1554 1621 */
1555 1622 static int bge_nvmem_acquire(bge_t *bgep);
1556 1623 #pragma no_inline(bge_nvmem_acquire)
1557 1624
1558 1625 static int
1559 1626 bge_nvmem_acquire(bge_t *bgep)
1560 1627 {
1561 1628 uint32_t regval;
1562 1629 uint32_t tries;
1563 1630
1564 1631 ASSERT(mutex_owned(bgep->genlock));
1565 1632
1566 1633 switch (bgep->chipid.nvtype) {
1567 1634 case BGE_NVTYPE_NONE:
1568 1635 case BGE_NVTYPE_UNKNOWN:
1569 1636 /*
1570 1637 * Access denied: no (recognisable) device fitted
1571 1638 */
1572 1639 return (ENODEV);
1573 1640
1574 1641 case BGE_NVTYPE_SEEPROM:
1575 1642 /*
1576 1643 * Access granted: no arbitration needed (or possible)
1577 1644 */
1578 1645 return (0);
1579 1646
1580 1647 case BGE_NVTYPE_LEGACY_SEEPROM:
1581 1648 case BGE_NVTYPE_UNBUFFERED_FLASH:
1582 1649 case BGE_NVTYPE_BUFFERED_FLASH:
1583 1650 default:
1584 1651 /*
1585 1652 * Access conditional: conduct arbitration protocol
1586 1653 */
1587 1654 break;
1588 1655 }
1589 1656
1590 1657 /*
1591 1658 * We're holding the per-port mutex <genlock>, so no-one other
1592 1659 * thread can be attempting to access the NVmem through *this*
1593 1660 * port. But it could be in use by the *other* port (of a 5704),
1594 1661 * or by the chip's internal firmware, so we have to go through
1595 1662 * the full (hardware) arbitration protocol ...
1596 1663 *
1597 1664 * Note that *because* we're holding <genlock>, the interrupt handler
1598 1665 * won't be able to progress. So we're only willing to spin for a
1599 1666 * fairly short time. Specifically:
1600 1667 *
1601 1668 * We *must* wait long enough for the hardware to resolve all
1602 1669 * requests and determine the winner. Fortunately, this is
1603 1670 * "almost instantaneous", even as observed by GHz CPUs.
1604 1671 *
1605 1672 * A successful access by another Solaris thread (via either
1606 1673 * port) typically takes ~20us. So waiting a bit longer than
1607 1674 * that will give a good chance of success, if the other user
1608 1675 * *is* another thread on the other port.
1609 1676 *
1610 1677 * However, the internal firmware can hold on to the NVmem
1611 1678 * for *much* longer: at least 10 milliseconds just after a
1612 1679 * RESET, and maybe even longer if the NVmem actually contains
1613 1680 * code to download and run on the internal CPUs.
1614 1681 *
1615 1682 * So, we'll allow 50us; if that's not enough then it's up to the
1616 1683 * caller to retry later (hence the choice of return code EAGAIN).
1617 1684 */
1618 1685 regval = bge_reg_get32(bgep, NVM_SW_ARBITRATION_REG);
1619 1686 bge_reg_put32(bgep, NVM_SW_ARBITRATION_REG, NVM_SET_REQ);
1620 1687
1621 1688 for (tries = 0; tries < 50; ++tries) {
1622 1689 regval = bge_reg_get32(bgep, NVM_SW_ARBITRATION_REG);
1623 1690 if (regval & NVM_WON_REQ1)
1624 1691 break;
1625 1692 drv_usecwait(1);
1626 1693 }
1627 1694
1628 1695 if (regval & NVM_WON_REQ1) {
1629 1696 BGE_DEBUG(("bge_nvmem_acquire: won after %d us", tries));
1630 1697 return (0);
1631 1698 }
1632 1699
1633 1700 /*
1634 1701 * Somebody else must be accessing the NVmem, so abandon our
1635 1702 * attempt take control of it. The caller can try again later ...
1636 1703 */
1637 1704 BGE_DEBUG(("bge_nvmem_acquire: lost after %d us", tries));
1638 1705 bge_nvmem_relinquish(bgep);
1639 1706 return (EAGAIN);
1640 1707 }
1641 1708
1642 1709 /*
1643 1710 * This code assumes that the GPIO1 bit has been wired up to the NVmem
1644 1711 * write protect line in such a way that the NVmem is protected when
1645 1712 * GPIO1 is an input, or is an output but driven high. Thus, to make the
1646 1713 * NVmem writable we have to change GPIO1 to an output AND drive it low.
1647 1714 *
1648 1715 * Note: there's only one set of GPIO pins on a 5704, even though they
1649 1716 * can be accessed through either port. So the chip has to resolve what
1650 1717 * happens if the two ports program a single pin differently ... the rule
1651 1718 * it uses is that if the ports disagree about the *direction* of a pin,
1652 1719 * "output" wins over "input", but if they disagree about its *value* as
1653 1720 * an output, then the pin is TRISTATED instead! In such a case, no-one
1654 1721 * wins, and the external signal does whatever the external circuitry
1655 1722 * defines as the default -- which we've assumed is the PROTECTED state.
1656 1723 * So, we always change GPIO1 back to being an *input* whenever we're not
1657 1724 * specifically using it to unprotect the NVmem. This allows either port
1658 1725 * to update the NVmem, although obviously only one at a time!
1659 1726 *
1660 1727 * The caller should hold <genlock> and *also* have already acquired the
1661 1728 * right to access the NVmem, via bge_nvmem_acquire() above.
1662 1729 */
1663 1730 static void bge_nvmem_protect(bge_t *bgep, boolean_t protect);
1664 1731 #pragma inline(bge_nvmem_protect)
1665 1732
1666 1733 static void
1667 1734 bge_nvmem_protect(bge_t *bgep, boolean_t protect)
1668 1735 {
1669 1736 uint32_t regval;
1670 1737
1671 1738 ASSERT(mutex_owned(bgep->genlock));
1672 1739
1673 1740 regval = bge_reg_get32(bgep, MISC_LOCAL_CONTROL_REG);
1674 1741 if (protect) {
1675 1742 regval |= MLCR_MISC_PINS_OUTPUT_1;
1676 1743 regval &= ~MLCR_MISC_PINS_OUTPUT_ENABLE_1;
1677 1744 } else {
1678 1745 regval &= ~MLCR_MISC_PINS_OUTPUT_1;
1679 1746 regval |= MLCR_MISC_PINS_OUTPUT_ENABLE_1;
1680 1747 }
1681 1748 bge_reg_put32(bgep, MISC_LOCAL_CONTROL_REG, regval);
1682 1749 }
1683 1750
1684 1751 /*
1685 1752 * Now put it all together ...
1686 1753 *
1687 1754 * Try to acquire control of the NVmem; if successful, then:
1688 1755 * unprotect it (if we want to write to it)
1689 1756 * perform the requested access
1690 1757 * reprotect it (after a write)
1691 1758 * relinquish control
1692 1759 *
1693 1760 * Return value:
1694 1761 * 0 on success,
1695 1762 * EAGAIN if the device is in use (retryable)
1696 1763 * ENODATA on access timeout (maybe retryable: device may just be busy)
1697 1764 * ENODEV if the NVmem device is missing or otherwise unusable
1698 1765 * EPROTO on other h/w or s/w errors.
1699 1766 */
1700 1767 static int
1701 1768 bge_nvmem_rw32(bge_t *bgep, uint32_t cmd, bge_regno_t addr, uint32_t *dp)
1702 1769 {
1703 1770 int err;
1704 1771
1705 1772 if ((err = bge_nvmem_acquire(bgep)) == 0) {
1706 1773 switch (cmd) {
1707 1774 case BGE_SEE_READ:
1708 1775 err = bge_seeprom_access(bgep,
1709 1776 SEEPROM_ACCESS_READ, addr, dp);
1710 1777 break;
1711 1778
1712 1779 case BGE_SEE_WRITE:
1713 1780 bge_nvmem_protect(bgep, B_FALSE);
1714 1781 err = bge_seeprom_access(bgep,
1715 1782 SEEPROM_ACCESS_WRITE, addr, dp);
1716 1783 bge_nvmem_protect(bgep, B_TRUE);
1717 1784 break;
1718 1785
1719 1786 case BGE_FLASH_READ:
1720 1787 if (DEVICE_5721_SERIES_CHIPSETS(bgep) ||
1721 1788 DEVICE_5723_SERIES_CHIPSETS(bgep) ||
1722 1789 DEVICE_5717_SERIES_CHIPSETS(bgep) ||
1723 1790 DEVICE_5714_SERIES_CHIPSETS(bgep)) {
1724 1791 bge_reg_set32(bgep, NVM_ACCESS_REG,
1725 1792 NVM_ACCESS_ENABLE);
1726 1793 }
1727 1794 err = bge_flash_access(bgep,
1728 1795 NVM_FLASH_CMD_RD, addr, dp);
1729 1796 if (DEVICE_5721_SERIES_CHIPSETS(bgep) ||
1730 1797 DEVICE_5723_SERIES_CHIPSETS(bgep) ||
1731 1798 DEVICE_5717_SERIES_CHIPSETS(bgep) ||
1732 1799 DEVICE_5714_SERIES_CHIPSETS(bgep)) {
1733 1800 bge_reg_clr32(bgep, NVM_ACCESS_REG,
1734 1801 NVM_ACCESS_ENABLE);
1735 1802 }
1736 1803 break;
1737 1804
1738 1805 case BGE_FLASH_WRITE:
1739 1806 if (DEVICE_5721_SERIES_CHIPSETS(bgep) ||
1740 1807 DEVICE_5723_SERIES_CHIPSETS(bgep) ||
1741 1808 DEVICE_5717_SERIES_CHIPSETS(bgep) ||
1742 1809 DEVICE_5714_SERIES_CHIPSETS(bgep)) {
1743 1810 bge_reg_set32(bgep, NVM_ACCESS_REG,
1744 1811 NVM_WRITE_ENABLE|NVM_ACCESS_ENABLE);
1745 1812 }
1746 1813 bge_nvmem_protect(bgep, B_FALSE);
1747 1814 err = bge_flash_access(bgep,
1748 1815 NVM_FLASH_CMD_WR, addr, dp);
1749 1816 bge_nvmem_protect(bgep, B_TRUE);
1750 1817 if (DEVICE_5721_SERIES_CHIPSETS(bgep) ||
1751 1818 DEVICE_5723_SERIES_CHIPSETS(bgep) ||
1752 1819 DEVICE_5717_SERIES_CHIPSETS(bgep) ||
1753 1820 DEVICE_5714_SERIES_CHIPSETS(bgep)) {
1754 1821 bge_reg_clr32(bgep, NVM_ACCESS_REG,
1755 1822 NVM_WRITE_ENABLE|NVM_ACCESS_ENABLE);
1756 1823 }
1757 1824
1758 1825 break;
1759 1826
1760 1827 default:
1761 1828 _NOTE(NOTREACHED)
1762 1829 break;
1763 1830 }
1764 1831 bge_nvmem_relinquish(bgep);
1765 1832 }
1766 1833
1767 1834 BGE_DEBUG(("bge_nvmem_rw32: err %d", err));
1768 1835 return (err);
1769 1836 }
1770 1837
1771 1838 /*
1772 1839 * Attempt to get a MAC address from the SEEPROM or Flash, if any
1773 1840 */
1774 1841 static uint64_t bge_get_nvmac(bge_t *bgep);
1775 1842 #pragma no_inline(bge_get_nvmac)
1776 1843
1777 1844 static uint64_t
1778 1845 bge_get_nvmac(bge_t *bgep)
1779 1846 {
1780 1847 uint32_t mac_high;
1781 1848 uint32_t mac_low;
1782 1849 uint32_t addr;
1783 1850 uint32_t cmd;
1784 1851 uint64_t mac;
1785 1852
1786 1853 BGE_TRACE(("bge_get_nvmac($%p)",
1787 1854 (void *)bgep));
1788 1855
1789 1856 switch (bgep->chipid.nvtype) {
1790 1857 case BGE_NVTYPE_NONE:
1791 1858 case BGE_NVTYPE_UNKNOWN:
1792 1859 default:
1793 1860 return (0ULL);
1794 1861
1795 1862 case BGE_NVTYPE_SEEPROM:
1796 1863 case BGE_NVTYPE_LEGACY_SEEPROM:
1797 1864 cmd = BGE_SEE_READ;
1798 1865 break;
1799 1866
1800 1867 case BGE_NVTYPE_UNBUFFERED_FLASH:
1801 1868 case BGE_NVTYPE_BUFFERED_FLASH:
1802 1869 cmd = BGE_FLASH_READ;
1803 1870 break;
1804 1871 }
1805 1872
1806 1873 if (DEVICE_5906_SERIES_CHIPSETS(bgep))
1807 1874 addr = NVMEM_DATA_MAC_ADDRESS_5906;
1808 1875 else
1809 1876 addr = NVMEM_DATA_MAC_ADDRESS;
1810 1877
1811 1878 if (bge_nvmem_rw32(bgep, cmd, addr, &mac_high))
1812 1879 return (0ULL);
1813 1880 addr += 4;
1814 1881 if (bge_nvmem_rw32(bgep, cmd, addr, &mac_low))
1815 1882 return (0ULL);
1816 1883
1817 1884 /*
1818 1885 * The Broadcom chip is natively BIG-endian, so that's how the
1819 1886 * MAC address is represented in NVmem. We may need to swap it
1820 1887 * around on a little-endian host ...
1821 1888 */
1822 1889 #ifdef _BIG_ENDIAN
1823 1890 mac = mac_high;
1824 1891 mac = mac << 32;
1825 1892 mac |= mac_low;
1826 1893 #else
1827 1894 mac = BGE_BSWAP_32(mac_high);
1828 1895 mac = mac << 32;
1829 1896 mac |= BGE_BSWAP_32(mac_low);
1830 1897 #endif /* _BIG_ENDIAN */
1831 1898
1832 1899 return (mac);
1833 1900 }
1834 1901
1835 1902 #else /* BGE_SEE_IO32 || BGE_FLASH_IO32 */
1836 1903
1837 1904 /*
1838 1905 * Dummy version for when we're not supporting NVmem access
1839 1906 */
1840 1907 static uint64_t bge_get_nvmac(bge_t *bgep);
1841 1908 #pragma inline(bge_get_nvmac)
1842 1909
1843 1910 static uint64_t
1844 1911 bge_get_nvmac(bge_t *bgep)
1845 1912 {
1846 1913 _NOTE(ARGUNUSED(bgep))
1847 1914 return (0ULL);
1848 1915 }
1849 1916
1850 1917 #endif /* BGE_SEE_IO32 || BGE_FLASH_IO32 */
1851 1918
1852 1919 /*
1853 1920 * Determine the type of NVmem that is (or may be) attached to this chip,
1854 1921 */
1855 1922 static enum bge_nvmem_type bge_nvmem_id(bge_t *bgep);
1856 1923 #pragma no_inline(bge_nvmem_id)
1857 1924
1858 1925 static enum bge_nvmem_type
1859 1926 bge_nvmem_id(bge_t *bgep)
1860 1927 {
1861 1928 enum bge_nvmem_type nvtype;
1862 1929 uint32_t config1;
1863 1930
1864 1931 BGE_TRACE(("bge_nvmem_id($%p)",
1865 1932 (void *)bgep));
1866 1933
1867 1934 switch (bgep->chipid.device) {
1868 1935 default:
1869 1936 /*
1870 1937 * We shouldn't get here; it means we don't recognise
1871 1938 * the chip, which means we don't know how to determine
1872 1939 * what sort of NVmem (if any) it has. So we'll say
1873 1940 * NONE, to disable the NVmem access code ...
1874 1941 */
1875 1942 nvtype = BGE_NVTYPE_NONE;
1876 1943 break;
1877 1944
1878 1945 case DEVICE_ID_5700:
1879 1946 case DEVICE_ID_5700x:
1880 1947 case DEVICE_ID_5701:
1881 1948 /*
1882 1949 * These devices support *only* SEEPROMs
1883 1950 */
1884 1951 nvtype = BGE_NVTYPE_SEEPROM;
1885 1952 break;
1886 1953
1887 1954 case DEVICE_ID_5702:
1888 1955 case DEVICE_ID_5702fe:
|
↓ open down ↓ |
1429 lines elided |
↑ open up ↑ |
1889 1956 case DEVICE_ID_5703C:
1890 1957 case DEVICE_ID_5703S:
1891 1958 case DEVICE_ID_5704C:
1892 1959 case DEVICE_ID_5704S:
1893 1960 case DEVICE_ID_5704:
1894 1961 case DEVICE_ID_5705M:
1895 1962 case DEVICE_ID_5705C:
1896 1963 case DEVICE_ID_5705_2:
1897 1964 case DEVICE_ID_5717:
1898 1965 case DEVICE_ID_5718:
1966 + case DEVICE_ID_5719:
1967 + case DEVICE_ID_5720:
1899 1968 case DEVICE_ID_5724:
1900 1969 case DEVICE_ID_57760:
1901 1970 case DEVICE_ID_57780:
1902 1971 case DEVICE_ID_57788:
1903 1972 case DEVICE_ID_57790:
1904 1973 case DEVICE_ID_5780:
1905 1974 case DEVICE_ID_5782:
1906 1975 case DEVICE_ID_5784M:
1907 1976 case DEVICE_ID_5785:
1908 1977 case DEVICE_ID_5787:
1909 1978 case DEVICE_ID_5787M:
1910 1979 case DEVICE_ID_5788:
1911 1980 case DEVICE_ID_5789:
1912 1981 case DEVICE_ID_5751:
1913 1982 case DEVICE_ID_5751M:
1914 1983 case DEVICE_ID_5752:
1915 1984 case DEVICE_ID_5752M:
1916 1985 case DEVICE_ID_5754:
1917 1986 case DEVICE_ID_5755:
1918 1987 case DEVICE_ID_5755M:
1919 1988 case DEVICE_ID_5756M:
1920 1989 case DEVICE_ID_5721:
1921 1990 case DEVICE_ID_5722:
1922 1991 case DEVICE_ID_5723:
1923 1992 case DEVICE_ID_5761:
1924 1993 case DEVICE_ID_5761E:
1925 1994 case DEVICE_ID_5761S:
1926 1995 case DEVICE_ID_5761SE:
1927 1996 case DEVICE_ID_5764:
1928 1997 case DEVICE_ID_5714C:
1929 1998 case DEVICE_ID_5714S:
1930 1999 case DEVICE_ID_5715C:
1931 2000 case DEVICE_ID_5715S:
1932 2001 config1 = bge_reg_get32(bgep, NVM_CONFIG1_REG);
1933 2002 if (config1 & NVM_CFG1_FLASH_MODE)
1934 2003 if (config1 & NVM_CFG1_BUFFERED_MODE)
1935 2004 nvtype = BGE_NVTYPE_BUFFERED_FLASH;
1936 2005 else
1937 2006 nvtype = BGE_NVTYPE_UNBUFFERED_FLASH;
1938 2007 else
1939 2008 nvtype = BGE_NVTYPE_LEGACY_SEEPROM;
1940 2009 break;
1941 2010 case DEVICE_ID_5906:
1942 2011 case DEVICE_ID_5906M:
1943 2012 nvtype = BGE_NVTYPE_BUFFERED_FLASH;
1944 2013 break;
1945 2014 }
1946 2015
1947 2016 return (nvtype);
1948 2017 }
1949 2018
1950 2019 #undef BGE_DBG
1951 2020 #define BGE_DBG BGE_DBG_CHIP /* debug flag for this code */
1952 2021
1953 2022 static void
1954 2023 bge_init_recv_rule(bge_t *bgep)
1955 2024 {
1956 2025 bge_recv_rule_t *rulep = bgep->recv_rules;
1957 2026 uint32_t i;
1958 2027
1959 2028 /*
1960 2029 * Initialize receive rule registers.
1961 2030 * Note that rules may persist across each bge_m_start/stop() call.
1962 2031 */
1963 2032 for (i = 0; i < RECV_RULES_NUM_MAX; i++, rulep++) {
1964 2033 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i), rulep->mask_value);
1965 2034 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i), rulep->control);
1966 2035 }
1967 2036 }
1968 2037
1969 2038 /*
1970 2039 * Using the values captured by bge_chip_cfg_init(), and additional probes
1971 2040 * as required, characterise the chip fully: determine the label by which
1972 2041 * to refer to this chip, the correct settings for various registers, and
1973 2042 * of course whether the device and/or subsystem are supported!
1974 2043 */
1975 2044 int bge_chip_id_init(bge_t *bgep);
1976 2045 #pragma no_inline(bge_chip_id_init)
1977 2046
1978 2047 int
1979 2048 bge_chip_id_init(bge_t *bgep)
1980 2049 {
1981 2050 char buf[MAXPATHLEN]; /* any risk of stack overflow? */
1982 2051 boolean_t sys_ok;
1983 2052 boolean_t dev_ok;
1984 2053 chip_id_t *cidp;
1985 2054 uint32_t subid;
1986 2055 char *devname;
1987 2056 char *sysname;
1988 2057 int *ids;
1989 2058 int err;
1990 2059 uint_t i;
1991 2060
1992 2061 sys_ok = dev_ok = B_FALSE;
1993 2062 cidp = &bgep->chipid;
1994 2063
1995 2064 /*
1996 2065 * Check the PCI device ID to determine the generic chip type and
1997 2066 * select parameters that depend on this.
1998 2067 *
1999 2068 * Note: because the SPARC platforms in general don't fit the
2000 2069 * SEEPROM 'behind' the chip, the PCI revision ID register reads
2001 2070 * as zero - which is why we use <asic_rev> rather than <revision>
2002 2071 * below ...
2003 2072 *
2004 2073 * Note: in general we can't distinguish between the Copper/SerDes
2005 2074 * versions by ID alone, as some Copper devices (e.g. some but not
2006 2075 * all 5703Cs) have the same ID as the SerDes equivalents. So we
2007 2076 * treat them the same here, and the MII code works out the media
2008 2077 * type later on ...
2009 2078 */
2010 2079 cidp->mbuf_base = bge_mbuf_pool_base;
2011 2080 cidp->mbuf_length = bge_mbuf_pool_len;
2012 2081 cidp->recv_slots = BGE_RECV_SLOTS_USED;
2013 2082 cidp->bge_dma_rwctrl = bge_dma_rwctrl;
2014 2083 cidp->pci_type = BGE_PCI_X;
2015 2084 cidp->statistic_type = BGE_STAT_BLK;
2016 2085 cidp->mbuf_lo_water_rdma = bge_mbuf_lo_water_rdma;
2017 2086 cidp->mbuf_lo_water_rmac = bge_mbuf_lo_water_rmac;
2018 2087 cidp->mbuf_hi_water = bge_mbuf_hi_water;
2019 2088 cidp->rx_ticks_norm = bge_rx_ticks_norm;
2020 2089 cidp->rx_count_norm = bge_rx_count_norm;
2021 2090 cidp->tx_ticks_norm = bge_tx_ticks_norm;
|
↓ open down ↓ |
113 lines elided |
↑ open up ↑ |
2022 2091 cidp->tx_count_norm = bge_tx_count_norm;
2023 2092 cidp->mask_pci_int = MHCR_MASK_PCI_INT_OUTPUT;
2024 2093
2025 2094 if (cidp->rx_rings == 0 || cidp->rx_rings > BGE_RECV_RINGS_MAX)
2026 2095 cidp->rx_rings = BGE_RECV_RINGS_DEFAULT;
2027 2096 if (cidp->tx_rings == 0 || cidp->tx_rings > BGE_SEND_RINGS_MAX)
2028 2097 cidp->tx_rings = BGE_SEND_RINGS_DEFAULT;
2029 2098
2030 2099 cidp->msi_enabled = B_FALSE;
2031 2100
2101 + if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) >
2102 + MHCR_CHIP_ASIC_REV_PRODID ||
2103 + MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
2104 + MHCR_CHIP_ASIC_REV_5906 ||
2105 + MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
2106 + MHCR_CHIP_ASIC_REV_5700 ||
2107 + MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
2108 + MHCR_CHIP_ASIC_REV_5701 ||
2109 + MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
2110 + MHCR_CHIP_ASIC_REV_5750)
2111 + /*
2112 + * Just a plain reset; the "check" code breaks these chips
2113 + */
2114 + cidp->flags |= CHIP_FLAG_NO_CHECK_RESET;
2115 +
2032 2116 switch (cidp->device) {
2033 2117 case DEVICE_ID_5717:
2034 2118 case DEVICE_ID_5718:
2119 + case DEVICE_ID_5719:
2120 + case DEVICE_ID_5720:
2035 2121 case DEVICE_ID_5724:
2036 2122 if (cidp->device == DEVICE_ID_5717)
2037 2123 cidp->chip_label = 5717;
2038 2124 else if (cidp->device == DEVICE_ID_5718)
2039 2125 cidp->chip_label = 5718;
2126 + else if (cidp->device == DEVICE_ID_5719)
2127 + cidp->chip_label = 5719;
2128 + else if (cidp->device == DEVICE_ID_5720)
2129 + cidp->chip_label = 5720;
2040 2130 else
2041 2131 cidp->chip_label = 5724;
2042 2132 cidp->msi_enabled = bge_enable_msi;
2043 2133 #ifdef __sparc
2044 2134 cidp->mask_pci_int = LE_32(MHCR_MASK_PCI_INT_OUTPUT);
2045 2135 #endif
2046 2136 cidp->bge_dma_rwctrl = LE_32(PDRWCR_VAR_5717);
2047 2137 cidp->pci_type = BGE_PCI_E;
2048 2138 cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2049 2139 cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5717;
2050 2140 cidp->mbuf_hi_water = MBUF_HIWAT_5717;
2051 2141 cidp->mbuf_base = bge_mbuf_pool_base_5705;
2052 2142 cidp->mbuf_length = bge_mbuf_pool_len_5705;
2053 - cidp->recv_slots = BGE_RECV_SLOTS_5705;
2143 + cidp->recv_slots = BGE_RECV_SLOTS_5717;
2054 2144 cidp->bge_mlcr_default = MLCR_DEFAULT_5717;
2055 2145 cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2056 2146 cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2057 2147 cidp->flags |= CHIP_FLAG_NO_JUMBO;
2058 2148 cidp->statistic_type = BGE_STAT_REG;
2059 2149 dev_ok = B_TRUE;
2060 2150 break;
2061 2151
2062 2152 case DEVICE_ID_5700:
2063 2153 case DEVICE_ID_5700x:
2064 2154 cidp->chip_label = 5700;
2065 2155 cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2066 2156 break;
2067 2157
2068 2158 case DEVICE_ID_5701:
2069 2159 cidp->chip_label = 5701;
2070 2160 dev_ok = B_TRUE;
2071 2161 cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2072 2162 break;
2073 2163
2074 2164 case DEVICE_ID_5702:
2075 2165 case DEVICE_ID_5702fe:
2076 2166 cidp->chip_label = 5702;
2077 2167 dev_ok = B_TRUE;
2078 2168 cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2079 2169 cidp->pci_type = BGE_PCI;
2080 2170 break;
2081 2171
2082 2172 case DEVICE_ID_5703C:
2083 2173 case DEVICE_ID_5703S:
2084 2174 case DEVICE_ID_5703:
2085 2175 /*
2086 2176 * Revision A0 of the 5703/5793 had various errata
2087 2177 * that we can't or don't work around, so it's not
2088 2178 * supported, but all later versions are
2089 2179 */
2090 2180 cidp->chip_label = cidp->subven == VENDOR_ID_SUN ? 5793 : 5703;
2091 2181 if (bgep->chipid.asic_rev != MHCR_CHIP_REV_5703_A0)
2092 2182 dev_ok = B_TRUE;
2093 2183 cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2094 2184 break;
2095 2185
2096 2186 case DEVICE_ID_5704C:
2097 2187 case DEVICE_ID_5704S:
2098 2188 case DEVICE_ID_5704:
2099 2189 cidp->chip_label = cidp->subven == VENDOR_ID_SUN ? 5794 : 5704;
2100 2190 cidp->mbuf_base = bge_mbuf_pool_base_5704;
2101 2191 cidp->mbuf_length = bge_mbuf_pool_len_5704;
2102 2192 dev_ok = B_TRUE;
2103 2193 cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2104 2194 break;
2105 2195
2106 2196 case DEVICE_ID_5705C:
2107 2197 case DEVICE_ID_5705M:
2108 2198 case DEVICE_ID_5705MA3:
2109 2199 case DEVICE_ID_5705F:
2110 2200 case DEVICE_ID_5705_2:
2111 2201 case DEVICE_ID_5754:
2112 2202 if (cidp->device == DEVICE_ID_5754) {
2113 2203 cidp->chip_label = 5754;
2114 2204 cidp->pci_type = BGE_PCI_E;
2115 2205 } else {
2116 2206 cidp->chip_label = 5705;
2117 2207 cidp->pci_type = BGE_PCI;
2118 2208 cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2119 2209 }
2120 2210 cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2121 2211 cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2122 2212 cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2123 2213 cidp->mbuf_base = bge_mbuf_pool_base_5705;
2124 2214 cidp->mbuf_length = bge_mbuf_pool_len_5705;
2125 2215 cidp->recv_slots = BGE_RECV_SLOTS_5705;
2126 2216 cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2127 2217 cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2128 2218 cidp->flags |= CHIP_FLAG_NO_JUMBO;
2129 2219 cidp->statistic_type = BGE_STAT_REG;
2130 2220 dev_ok = B_TRUE;
2131 2221 break;
2132 2222
2133 2223 case DEVICE_ID_5906:
2134 2224 case DEVICE_ID_5906M:
2135 2225 cidp->chip_label = 5906;
2136 2226 cidp->pci_type = BGE_PCI_E;
2137 2227 cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5906;
2138 2228 cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5906;
2139 2229 cidp->mbuf_hi_water = MBUF_HIWAT_5906;
2140 2230 cidp->mbuf_base = bge_mbuf_pool_base;
2141 2231 cidp->mbuf_length = bge_mbuf_pool_len;
2142 2232 cidp->recv_slots = BGE_RECV_SLOTS_5705;
2143 2233 cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2144 2234 cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2145 2235 cidp->flags |= CHIP_FLAG_NO_JUMBO;
2146 2236 cidp->statistic_type = BGE_STAT_REG;
2147 2237 dev_ok = B_TRUE;
2148 2238 break;
2149 2239
2150 2240 case DEVICE_ID_5753:
2151 2241 cidp->chip_label = 5753;
2152 2242 cidp->pci_type = BGE_PCI_E;
2153 2243 cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2154 2244 cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2155 2245 cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2156 2246 cidp->mbuf_base = bge_mbuf_pool_base_5705;
2157 2247 cidp->mbuf_length = bge_mbuf_pool_len_5705;
2158 2248 cidp->recv_slots = BGE_RECV_SLOTS_5705;
2159 2249 cidp->bge_mlcr_default |= MLCR_MISC_PINS_OUTPUT_ENABLE_1;
2160 2250 cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2161 2251 cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2162 2252 cidp->flags |= CHIP_FLAG_NO_JUMBO;
2163 2253 cidp->statistic_type = BGE_STAT_REG;
2164 2254 dev_ok = B_TRUE;
2165 2255 break;
2166 2256
2167 2257 case DEVICE_ID_5755:
2168 2258 case DEVICE_ID_5755M:
2169 2259 cidp->chip_label = 5755;
2170 2260 cidp->pci_type = BGE_PCI_E;
2171 2261 cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2172 2262 cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2173 2263 cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2174 2264 cidp->mbuf_base = bge_mbuf_pool_base_5705;
2175 2265 cidp->mbuf_length = bge_mbuf_pool_len_5705;
2176 2266 cidp->recv_slots = BGE_RECV_SLOTS_5705;
2177 2267 cidp->bge_mlcr_default |= MLCR_MISC_PINS_OUTPUT_ENABLE_1;
2178 2268 cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2179 2269 cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2180 2270 cidp->flags |= CHIP_FLAG_NO_JUMBO;
2181 2271 if (cidp->device == DEVICE_ID_5755M)
2182 2272 cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2183 2273 cidp->statistic_type = BGE_STAT_REG;
2184 2274 dev_ok = B_TRUE;
2185 2275 break;
2186 2276
2187 2277 case DEVICE_ID_5756M:
2188 2278 /*
2189 2279 * This is nearly identical to the 5755M.
2190 2280 * (Actually reports the 5755 chip ID.)
2191 2281 */
2192 2282 cidp->chip_label = 5756;
2193 2283 cidp->pci_type = BGE_PCI_E;
2194 2284 cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2195 2285 cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2196 2286 cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2197 2287 cidp->mbuf_base = bge_mbuf_pool_base_5705;
2198 2288 cidp->mbuf_length = bge_mbuf_pool_len_5705;
2199 2289 cidp->recv_slots = BGE_RECV_SLOTS_5705;
2200 2290 cidp->bge_mlcr_default |= MLCR_MISC_PINS_OUTPUT_ENABLE_1;
2201 2291 cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2202 2292 cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2203 2293 cidp->flags |= CHIP_FLAG_NO_JUMBO;
2204 2294 cidp->statistic_type = BGE_STAT_REG;
2205 2295 dev_ok = B_TRUE;
2206 2296 break;
2207 2297
2208 2298 case DEVICE_ID_5787:
2209 2299 case DEVICE_ID_5787M:
2210 2300 cidp->chip_label = 5787;
2211 2301 cidp->pci_type = BGE_PCI_E;
2212 2302 cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2213 2303 cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2214 2304 cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2215 2305 cidp->mbuf_base = bge_mbuf_pool_base_5705;
2216 2306 cidp->mbuf_length = bge_mbuf_pool_len_5705;
2217 2307 cidp->recv_slots = BGE_RECV_SLOTS_5705;
2218 2308 cidp->bge_mlcr_default |= MLCR_MISC_PINS_OUTPUT_ENABLE_1;
2219 2309 cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2220 2310 cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2221 2311 cidp->flags |= CHIP_FLAG_NO_JUMBO;
2222 2312 cidp->statistic_type = BGE_STAT_REG;
2223 2313 dev_ok = B_TRUE;
2224 2314 break;
2225 2315
2226 2316 case DEVICE_ID_5723:
2227 2317 case DEVICE_ID_5761:
2228 2318 case DEVICE_ID_5761E:
2229 2319 case DEVICE_ID_5761S:
2230 2320 case DEVICE_ID_5761SE:
2231 2321 case DEVICE_ID_5784M:
2232 2322 case DEVICE_ID_57760:
2233 2323 case DEVICE_ID_57780:
2234 2324 case DEVICE_ID_57788:
2235 2325 case DEVICE_ID_57790:
2236 2326 cidp->msi_enabled = bge_enable_msi;
2237 2327 /*
2238 2328 * We don't use MSI for BCM5764 and BCM5785, as the
2239 2329 * status block may fail to update when the network
2240 2330 * traffic is heavy.
2241 2331 */
2242 2332 /* FALLTHRU */
2243 2333 case DEVICE_ID_5785:
2244 2334 case DEVICE_ID_5764:
2245 2335 if (cidp->device == DEVICE_ID_5723)
2246 2336 cidp->chip_label = 5723;
2247 2337 else if (cidp->device == DEVICE_ID_5764)
2248 2338 cidp->chip_label = 5764;
2249 2339 else if (cidp->device == DEVICE_ID_5784M)
2250 2340 cidp->chip_label = 5784;
2251 2341 else if (cidp->device == DEVICE_ID_5785)
2252 2342 cidp->chip_label = 5785;
2253 2343 else if (cidp->device == DEVICE_ID_57760)
2254 2344 cidp->chip_label = 57760;
2255 2345 else if (cidp->device == DEVICE_ID_57780)
2256 2346 cidp->chip_label = 57780;
2257 2347 else if (cidp->device == DEVICE_ID_57788)
2258 2348 cidp->chip_label = 57788;
2259 2349 else if (cidp->device == DEVICE_ID_57790)
2260 2350 cidp->chip_label = 57790;
2261 2351 else
2262 2352 cidp->chip_label = 5761;
2263 2353 cidp->bge_dma_rwctrl = bge_dma_rwctrl_5721;
2264 2354 cidp->pci_type = BGE_PCI_E;
2265 2355 cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2266 2356 cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2267 2357 cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2268 2358 cidp->mbuf_base = bge_mbuf_pool_base_5705;
2269 2359 cidp->mbuf_length = bge_mbuf_pool_len_5705;
2270 2360 cidp->recv_slots = BGE_RECV_SLOTS_5705;
2271 2361 cidp->bge_mlcr_default |= MLCR_MISC_PINS_OUTPUT_ENABLE_1;
2272 2362 cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2273 2363 cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2274 2364 cidp->flags |= CHIP_FLAG_NO_JUMBO;
2275 2365 cidp->statistic_type = BGE_STAT_REG;
2276 2366 dev_ok = B_TRUE;
2277 2367 break;
2278 2368
2279 2369 /* PCI-X device, identical to 5714 */
2280 2370 case DEVICE_ID_5780:
2281 2371 cidp->chip_label = 5780;
2282 2372 cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2283 2373 cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2284 2374 cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2285 2375 cidp->mbuf_base = bge_mbuf_pool_base_5721;
2286 2376 cidp->mbuf_length = bge_mbuf_pool_len_5721;
2287 2377 cidp->recv_slots = BGE_RECV_SLOTS_5721;
2288 2378 cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2289 2379 cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2290 2380 cidp->statistic_type = BGE_STAT_REG;
2291 2381 dev_ok = B_TRUE;
2292 2382 break;
2293 2383
2294 2384 case DEVICE_ID_5782:
2295 2385 /*
2296 2386 * Apart from the label, we treat this as a 5705(?)
2297 2387 */
2298 2388 cidp->chip_label = 5782;
2299 2389 cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2300 2390 cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2301 2391 cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2302 2392 cidp->mbuf_base = bge_mbuf_pool_base_5705;
2303 2393 cidp->mbuf_length = bge_mbuf_pool_len_5705;
2304 2394 cidp->recv_slots = BGE_RECV_SLOTS_5705;
2305 2395 cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2306 2396 cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2307 2397 cidp->flags |= CHIP_FLAG_NO_JUMBO;
2308 2398 cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2309 2399 cidp->statistic_type = BGE_STAT_REG;
2310 2400 dev_ok = B_TRUE;
2311 2401 break;
2312 2402
2313 2403 case DEVICE_ID_5788:
2314 2404 /*
2315 2405 * Apart from the label, we treat this as a 5705(?)
2316 2406 */
2317 2407 cidp->chip_label = 5788;
2318 2408 cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2319 2409 cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2320 2410 cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2321 2411 cidp->mbuf_base = bge_mbuf_pool_base_5705;
2322 2412 cidp->mbuf_length = bge_mbuf_pool_len_5705;
2323 2413 cidp->recv_slots = BGE_RECV_SLOTS_5705;
2324 2414 cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2325 2415 cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2326 2416 cidp->statistic_type = BGE_STAT_REG;
2327 2417 cidp->flags |= CHIP_FLAG_NO_JUMBO;
2328 2418 dev_ok = B_TRUE;
2329 2419 break;
2330 2420
2331 2421 case DEVICE_ID_5714C:
2332 2422 if (cidp->revision >= REVISION_ID_5714_A2)
2333 2423 cidp->msi_enabled = bge_enable_msi;
2334 2424 /* FALLTHRU */
2335 2425 case DEVICE_ID_5714S:
2336 2426 cidp->chip_label = 5714;
2337 2427 cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2338 2428 cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2339 2429 cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2340 2430 cidp->mbuf_base = bge_mbuf_pool_base_5721;
2341 2431 cidp->mbuf_length = bge_mbuf_pool_len_5721;
2342 2432 cidp->recv_slots = BGE_RECV_SLOTS_5721;
2343 2433 cidp->bge_dma_rwctrl = bge_dma_rwctrl_5714;
2344 2434 cidp->bge_mlcr_default = bge_mlcr_default_5714;
2345 2435 cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2346 2436 cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2347 2437 cidp->pci_type = BGE_PCI_E;
2348 2438 cidp->statistic_type = BGE_STAT_REG;
2349 2439 dev_ok = B_TRUE;
2350 2440 break;
2351 2441
2352 2442 case DEVICE_ID_5715C:
2353 2443 case DEVICE_ID_5715S:
2354 2444 cidp->chip_label = 5715;
2355 2445 cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2356 2446 cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2357 2447 cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2358 2448 cidp->mbuf_base = bge_mbuf_pool_base_5721;
2359 2449 cidp->mbuf_length = bge_mbuf_pool_len_5721;
2360 2450 cidp->recv_slots = BGE_RECV_SLOTS_5721;
2361 2451 cidp->bge_dma_rwctrl = bge_dma_rwctrl_5715;
2362 2452 cidp->bge_mlcr_default = bge_mlcr_default_5714;
2363 2453 cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2364 2454 cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2365 2455 cidp->pci_type = BGE_PCI_E;
2366 2456 cidp->statistic_type = BGE_STAT_REG;
2367 2457 if (cidp->revision >= REVISION_ID_5715_A2)
2368 2458 cidp->msi_enabled = bge_enable_msi;
2369 2459 dev_ok = B_TRUE;
2370 2460 break;
2371 2461
2372 2462 case DEVICE_ID_5721:
2373 2463 cidp->chip_label = 5721;
2374 2464 cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2375 2465 cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2376 2466 cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2377 2467 cidp->mbuf_base = bge_mbuf_pool_base_5721;
2378 2468 cidp->mbuf_length = bge_mbuf_pool_len_5721;
2379 2469 cidp->recv_slots = BGE_RECV_SLOTS_5721;
2380 2470 cidp->bge_dma_rwctrl = bge_dma_rwctrl_5721;
2381 2471 cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2382 2472 cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2383 2473 cidp->pci_type = BGE_PCI_E;
2384 2474 cidp->statistic_type = BGE_STAT_REG;
2385 2475 cidp->flags |= CHIP_FLAG_NO_JUMBO;
2386 2476 dev_ok = B_TRUE;
2387 2477 break;
2388 2478
2389 2479 case DEVICE_ID_5722:
2390 2480 cidp->chip_label = 5722;
2391 2481 cidp->pci_type = BGE_PCI_E;
2392 2482 cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2393 2483 cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2394 2484 cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2395 2485 cidp->mbuf_base = bge_mbuf_pool_base_5705;
2396 2486 cidp->mbuf_length = bge_mbuf_pool_len_5705;
2397 2487 cidp->recv_slots = BGE_RECV_SLOTS_5705;
2398 2488 cidp->bge_mlcr_default |= MLCR_MISC_PINS_OUTPUT_ENABLE_1;
2399 2489 cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2400 2490 cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2401 2491 cidp->flags |= CHIP_FLAG_NO_JUMBO;
2402 2492 cidp->statistic_type = BGE_STAT_REG;
2403 2493 dev_ok = B_TRUE;
2404 2494 break;
2405 2495
2406 2496 case DEVICE_ID_5751:
2407 2497 case DEVICE_ID_5751M:
2408 2498 cidp->chip_label = 5751;
2409 2499 cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2410 2500 cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2411 2501 cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2412 2502 cidp->mbuf_base = bge_mbuf_pool_base_5721;
2413 2503 cidp->mbuf_length = bge_mbuf_pool_len_5721;
2414 2504 cidp->recv_slots = BGE_RECV_SLOTS_5721;
2415 2505 cidp->bge_dma_rwctrl = bge_dma_rwctrl_5721;
2416 2506 cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2417 2507 cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2418 2508 cidp->pci_type = BGE_PCI_E;
2419 2509 cidp->statistic_type = BGE_STAT_REG;
2420 2510 cidp->flags |= CHIP_FLAG_NO_JUMBO;
2421 2511 dev_ok = B_TRUE;
2422 2512 break;
2423 2513
2424 2514 case DEVICE_ID_5752:
2425 2515 case DEVICE_ID_5752M:
2426 2516 cidp->chip_label = 5752;
2427 2517 cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2428 2518 cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2429 2519 cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2430 2520 cidp->mbuf_base = bge_mbuf_pool_base_5721;
2431 2521 cidp->mbuf_length = bge_mbuf_pool_len_5721;
2432 2522 cidp->recv_slots = BGE_RECV_SLOTS_5721;
2433 2523 cidp->bge_dma_rwctrl = bge_dma_rwctrl_5721;
2434 2524 cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2435 2525 cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2436 2526 cidp->pci_type = BGE_PCI_E;
2437 2527 cidp->statistic_type = BGE_STAT_REG;
2438 2528 cidp->flags |= CHIP_FLAG_NO_JUMBO;
2439 2529 dev_ok = B_TRUE;
2440 2530 break;
2441 2531
2442 2532 case DEVICE_ID_5789:
2443 2533 cidp->chip_label = 5789;
2444 2534 cidp->mbuf_base = bge_mbuf_pool_base_5721;
2445 2535 cidp->mbuf_length = bge_mbuf_pool_len_5721;
2446 2536 cidp->recv_slots = BGE_RECV_SLOTS_5721;
2447 2537 cidp->bge_dma_rwctrl = bge_dma_rwctrl_5721;
2448 2538 cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2449 2539 cidp->tx_rings = BGE_RECV_RINGS_MAX_5705;
2450 2540 cidp->pci_type = BGE_PCI_E;
2451 2541 cidp->statistic_type = BGE_STAT_REG;
2452 2542 cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2453 2543 cidp->flags |= CHIP_FLAG_NO_JUMBO;
2454 2544 cidp->msi_enabled = B_TRUE;
2455 2545 dev_ok = B_TRUE;
2456 2546 break;
2457 2547
2458 2548 }
2459 2549
2460 2550 /*
2461 2551 * Setup the default jumbo parameter.
2462 2552 */
2463 2553 cidp->ethmax_size = ETHERMAX;
2464 2554 cidp->snd_buff_size = BGE_SEND_BUFF_SIZE_DEFAULT;
2465 2555 cidp->std_buf_size = BGE_STD_BUFF_SIZE;
2466 2556
2467 2557 /*
2468 2558 * If jumbo is enabled and this kind of chipset supports jumbo feature,
2469 2559 * setup below jumbo specific parameters.
2470 2560 *
2471 2561 * For BCM5714/5715, there is only one standard receive ring. So the
2472 2562 * std buffer size should be set to BGE_JUMBO_BUFF_SIZE when jumbo
2473 2563 * feature is enabled.
2474 2564 */
2475 2565 if (!(cidp->flags & CHIP_FLAG_NO_JUMBO) &&
2476 2566 (cidp->default_mtu > BGE_DEFAULT_MTU)) {
2477 2567 if (DEVICE_5714_SERIES_CHIPSETS(bgep)) {
2478 2568 cidp->mbuf_lo_water_rdma =
2479 2569 RDMA_MBUF_LOWAT_5714_JUMBO;
2480 2570 cidp->mbuf_lo_water_rmac =
2481 2571 MAC_RX_MBUF_LOWAT_5714_JUMBO;
2482 2572 cidp->mbuf_hi_water = MBUF_HIWAT_5714_JUMBO;
2483 2573 cidp->jumbo_slots = 0;
2484 2574 cidp->std_buf_size = BGE_JUMBO_BUFF_SIZE;
2485 2575 } else {
2486 2576 cidp->mbuf_lo_water_rdma =
2487 2577 RDMA_MBUF_LOWAT_JUMBO;
2488 2578 cidp->mbuf_lo_water_rmac =
2489 2579 MAC_RX_MBUF_LOWAT_JUMBO;
2490 2580 cidp->mbuf_hi_water = MBUF_HIWAT_JUMBO;
2491 2581 cidp->jumbo_slots = BGE_JUMBO_SLOTS_USED;
2492 2582 }
2493 2583 cidp->recv_jumbo_size = BGE_JUMBO_BUFF_SIZE;
2494 2584 cidp->snd_buff_size = BGE_SEND_BUFF_SIZE_JUMBO;
2495 2585 cidp->ethmax_size = cidp->default_mtu +
2496 2586 sizeof (struct ether_header);
2497 2587 }
2498 2588
2499 2589 /*
2500 2590 * Identify the NV memory type: SEEPROM or Flash?
2501 2591 */
2502 2592 cidp->nvtype = bge_nvmem_id(bgep);
2503 2593
2504 2594 /*
2505 2595 * Now, we want to check whether this device is part of a
2506 2596 * supported subsystem (e.g., on the motherboard of a Sun
2507 2597 * branded platform).
2508 2598 *
2509 2599 * Rule 1: If the Subsystem Vendor ID is "Sun", then it's OK ;-)
2510 2600 */
2511 2601 if (cidp->subven == VENDOR_ID_SUN)
2512 2602 sys_ok = B_TRUE;
2513 2603
2514 2604 /*
2515 2605 * Rule 2: If it's on the list on known subsystems, then it's OK.
2516 2606 * Note: 0x14e41647 should *not* appear in the list, but the code
2517 2607 * doesn't enforce that.
2518 2608 */
2519 2609 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo,
2520 2610 DDI_PROP_DONTPASS, knownids_propname, &ids, &i);
2521 2611 if (err == DDI_PROP_SUCCESS) {
2522 2612 /*
2523 2613 * Got the list; scan for a matching subsystem vendor/device
2524 2614 */
2525 2615 subid = (cidp->subven << 16) | cidp->subdev;
2526 2616 while (i--)
2527 2617 if (ids[i] == subid)
2528 2618 sys_ok = B_TRUE;
2529 2619 ddi_prop_free(ids);
2530 2620 }
2531 2621
2532 2622 /*
2533 2623 * Rule 3: If it's a Taco/ENWS motherboard device, then it's OK
2534 2624 *
2535 2625 * Unfortunately, early SunBlade 1500s and 2500s didn't reprogram
2536 2626 * the Subsystem Vendor ID, so it defaults to Broadcom. Therefore,
2537 2627 * we have to check specially for the exact device paths to the
2538 2628 * motherboard devices on those platforms ;-(
2539 2629 *
2540 2630 * Note: we can't just use the "supported-subsystems" mechanism
2541 2631 * above, because the entry would have to be 0x14e41647 -- which
2542 2632 * would then accept *any* plugin card that *didn't* contain a
2543 2633 * (valid) SEEPROM ;-(
2544 2634 */
2545 2635 sysname = ddi_node_name(ddi_root_node());
2546 2636 devname = ddi_pathname(bgep->devinfo, buf);
2547 2637 ASSERT(strlen(devname) > 0);
2548 2638 if (strcmp(sysname, "SUNW,Sun-Blade-1500") == 0) /* Taco */
2549 2639 if (strcmp(devname, "/pci@1f,700000/network@2") == 0)
2550 2640 sys_ok = B_TRUE;
2551 2641 if (strcmp(sysname, "SUNW,Sun-Blade-2500") == 0) /* ENWS */
2552 2642 if (strcmp(devname, "/pci@1c,600000/network@3") == 0)
2553 2643 sys_ok = B_TRUE;
2554 2644
2555 2645 /*
2556 2646 * Now check what we've discovered: is this truly a supported
2557 2647 * chip on (the motherboard of) a supported platform?
2558 2648 *
2559 2649 * Possible problems here:
2560 2650 * 1) it's a completely unheard-of chip
2561 2651 * 2) it's a recognised but unsupported chip (e.g. 5701, 5703C-A0)
2562 2652 * 3) it's a chip we would support if it were on the motherboard
2563 2653 * of a Sun platform, but this one isn't ;-(
2564 2654 */
2565 2655 if (cidp->chip_label == 0)
2566 2656 bge_problem(bgep,
2567 2657 "Device 'pci%04x,%04x' not recognized (%d?)",
2568 2658 cidp->vendor, cidp->device, cidp->device);
2569 2659 else if (!dev_ok)
2570 2660 bge_problem(bgep,
2571 2661 "Device 'pci%04x,%04x' (%d) revision %d not supported",
2572 2662 cidp->vendor, cidp->device, cidp->chip_label,
2573 2663 cidp->revision);
2574 2664 #if BGE_DEBUGGING
2575 2665 else if (!sys_ok)
2576 2666 bge_problem(bgep,
2577 2667 "%d-based subsystem 'pci%04x,%04x' not validated",
2578 2668 cidp->chip_label, cidp->subven, cidp->subdev);
2579 2669 #endif
2580 2670 else
2581 2671 cidp->flags |= CHIP_FLAG_SUPPORTED;
2582 2672 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
2583 2673 return (EIO);
2584 2674 return (0);
2585 2675 }
2586 2676
2587 2677 void
2588 2678 bge_chip_msi_trig(bge_t *bgep)
2589 2679 {
2590 2680 uint32_t regval;
2591 2681
2592 2682 regval = bgep->param_msi_cnt<<4;
2593 2683 bge_reg_set32(bgep, HOST_COALESCE_MODE_REG, regval);
2594 2684 BGE_DEBUG(("bge_chip_msi_trig:data = %d", regval));
2595 2685 }
2596 2686
2597 2687 /*
2598 2688 * Various registers that control the chip's internal engines (state
2599 2689 * machines) have a <reset> and <enable> bits (fortunately, in the
2600 2690 * same place in each such register :-).
2601 2691 *
2602 2692 * To reset the state machine, the <reset> bit must be written with 1;
2603 2693 * it will then read back as 1 while the reset is in progress, but
2604 2694 * self-clear to 0 when the reset completes.
2605 2695 *
2606 2696 * To enable a state machine, one must set the <enable> bit, which
2607 2697 * will continue to read back as 0 until the state machine is running.
2608 2698 *
2609 2699 * To disable a state machine, the <enable> bit must be cleared, but
2610 2700 * it will continue to read back as 1 until the state machine actually
2611 2701 * stops.
2612 2702 *
2613 2703 * This routine implements polling for completion of a reset, enable
2614 2704 * or disable operation, returning B_TRUE on success (bit reached the
2615 2705 * required state) or B_FALSE on timeout (200*100us == 20ms).
2616 2706 */
2617 2707 static boolean_t bge_chip_poll_engine(bge_t *bgep, bge_regno_t regno,
2618 2708 uint32_t mask, uint32_t val);
2619 2709 #pragma no_inline(bge_chip_poll_engine)
2620 2710
2621 2711 static boolean_t
2622 2712 bge_chip_poll_engine(bge_t *bgep, bge_regno_t regno,
2623 2713 uint32_t mask, uint32_t val)
2624 2714 {
2625 2715 uint32_t regval;
2626 2716 uint32_t n;
2627 2717
2628 2718 BGE_TRACE(("bge_chip_poll_engine($%p, 0x%lx, 0x%x, 0x%x)",
2629 2719 (void *)bgep, regno, mask, val));
2630 2720
2631 2721 for (n = 200; n; --n) {
2632 2722 regval = bge_reg_get32(bgep, regno);
2633 2723 if ((regval & mask) == val)
2634 2724 return (B_TRUE);
2635 2725 drv_usecwait(100);
2636 2726 }
2637 2727
2638 2728 bge_problem(bgep, "bge_chip_poll_engine failed: regno = 0x%lx", regno);
2639 2729 bge_fm_ereport(bgep, DDI_FM_DEVICE_NO_RESPONSE);
2640 2730 return (B_FALSE);
2641 2731 }
2642 2732
2643 2733 /*
2644 2734 * Various registers that control the chip's internal engines (state
2645 2735 * machines) have a <reset> bit (fortunately, in the same place in
2646 2736 * each such register :-). To reset the state machine, this bit must
2647 2737 * be written with 1; it will then read back as 1 while the reset is
2648 2738 * in progress, but self-clear to 0 when the reset completes.
2649 2739 *
2650 2740 * This code sets the bit, then polls for it to read back as zero.
2651 2741 * The return value is B_TRUE on success (reset bit cleared itself),
2652 2742 * or B_FALSE if the state machine didn't recover :(
2653 2743 *
2654 2744 * NOTE: the Core reset is similar to other resets, except that we
2655 2745 * can't poll for completion, since the Core reset disables memory
2656 2746 * access! So we just have to assume that it will all complete in
2657 2747 * 100us. See Broadcom document 570X-PG102-R, p102, steps 4-5.
2658 2748 */
2659 2749 static boolean_t bge_chip_reset_engine(bge_t *bgep, bge_regno_t regno);
2660 2750 #pragma no_inline(bge_chip_reset_engine)
2661 2751
2662 2752 static boolean_t
2663 2753 bge_chip_reset_engine(bge_t *bgep, bge_regno_t regno)
2664 2754 {
2665 2755 uint32_t regval;
2666 2756 uint32_t val32;
2667 2757
2668 2758 regval = bge_reg_get32(bgep, regno);
2669 2759
2670 2760 BGE_TRACE(("bge_chip_reset_engine($%p, 0x%lx)",
2671 2761 (void *)bgep, regno));
2672 2762 BGE_DEBUG(("bge_chip_reset_engine: 0x%lx before reset = 0x%08x",
2673 2763 regno, regval));
2674 2764
2675 2765 regval |= STATE_MACHINE_RESET_BIT;
2676 2766
2677 2767 switch (regno) {
2678 2768 case MISC_CONFIG_REG:
2679 2769 /*
2680 2770 * BCM5714/5721/5751 pcie chip special case. In order to avoid
2681 2771 * resetting PCIE block and bringing PCIE link down, bit 29
2682 2772 * in the register needs to be set first, and then set it again
2683 2773 * while the reset bit is written.
2684 2774 * See:P500 of 57xx-PG102-RDS.pdf.
2685 2775 */
2686 2776 if (DEVICE_5705_SERIES_CHIPSETS(bgep)||
2687 2777 DEVICE_5717_SERIES_CHIPSETS(bgep)||
2688 2778 DEVICE_5721_SERIES_CHIPSETS(bgep)||
2689 2779 DEVICE_5723_SERIES_CHIPSETS(bgep)||
2690 2780 DEVICE_5714_SERIES_CHIPSETS(bgep)||
2691 2781 DEVICE_5906_SERIES_CHIPSETS(bgep)) {
2692 2782 regval |= MISC_CONFIG_GPHY_POWERDOWN_OVERRIDE;
2693 2783 if (bgep->chipid.pci_type == BGE_PCI_E) {
2694 2784 if (bgep->chipid.asic_rev ==
2695 2785 MHCR_CHIP_REV_5751_A0 ||
2696 2786 bgep->chipid.asic_rev ==
2697 2787 MHCR_CHIP_REV_5721_A0 ||
2698 2788 bgep->chipid.asic_rev ==
2699 2789 MHCR_CHIP_REV_5755_A0) {
2700 2790 val32 = bge_reg_get32(bgep,
2701 2791 PHY_TEST_CTRL_REG);
2702 2792 if (val32 == (PHY_PCIE_SCRAM_MODE |
2703 2793 PHY_PCIE_LTASS_MODE))
2704 2794 bge_reg_put32(bgep,
2705 2795 PHY_TEST_CTRL_REG,
2706 2796 PHY_PCIE_SCRAM_MODE);
2707 2797 val32 = pci_config_get32
2708 2798 (bgep->cfg_handle,
2709 2799 PCI_CONF_BGE_CLKCTL);
2710 2800 val32 |= CLKCTL_PCIE_A0_FIX;
2711 2801 pci_config_put32(bgep->cfg_handle,
2712 2802 PCI_CONF_BGE_CLKCTL, val32);
2713 2803 }
2714 2804 bge_reg_set32(bgep, regno,
2715 2805 MISC_CONFIG_GRC_RESET_DISABLE);
2716 2806 regval |= MISC_CONFIG_GRC_RESET_DISABLE;
2717 2807 }
2718 2808 }
2719 2809
2720 2810 /*
2721 2811 * Special case - causes Core reset
2722 2812 *
2723 2813 * On SPARC v9 we want to ensure that we don't start
2724 2814 * timing until the I/O access has actually reached
2725 2815 * the chip, otherwise we might make the next access
2726 2816 * too early. And we can't just force the write out
2727 2817 * by following it with a read (even to config space)
2728 2818 * because that would cause the fault we're trying
2729 2819 * to avoid. Hence the need for membar_sync() here.
2730 2820 */
2731 2821 ddi_put32(bgep->io_handle, PIO_ADDR(bgep, regno), regval);
2732 2822 #ifdef __sparcv9
2733 2823 membar_sync();
2734 2824 #endif /* __sparcv9 */
2735 2825 /*
2736 2826 * On some platforms,system need about 300us for
2737 2827 * link setup.
2738 2828 */
2739 2829 drv_usecwait(300);
2740 2830 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) {
2741 2831 bge_reg_set32(bgep, VCPU_STATUS_REG, VCPU_DRV_RESET);
2742 2832 bge_reg_clr32(
2743 2833 bgep, VCPU_EXT_CTL, VCPU_EXT_CTL_HALF);
2744 2834 }
2745 2835
2746 2836 if (bgep->chipid.pci_type == BGE_PCI_E) {
2747 2837 /* PCI-E device need more reset time */
2748 2838 drv_usecwait(120000);
2749 2839
2750 2840 /* Set PCIE max payload size and clear error status. */
2751 2841 if ((bgep->chipid.chip_label == 5721) ||
2752 2842 (bgep->chipid.chip_label == 5751) ||
2753 2843 (bgep->chipid.chip_label == 5752) ||
2754 2844 (bgep->chipid.chip_label == 5789) ||
2755 2845 (bgep->chipid.chip_label == 5906)) {
2756 2846 pci_config_put16(bgep->cfg_handle,
2757 2847 PCI_CONF_DEV_CTRL, READ_REQ_SIZE_MAX);
2758 2848 pci_config_put16(bgep->cfg_handle,
2759 2849 PCI_CONF_DEV_STUS, DEVICE_ERROR_STUS);
2760 2850 }
2761 2851
2762 2852 if ((bgep->chipid.chip_label == 5723) ||
2763 2853 (bgep->chipid.chip_label == 5761)) {
2764 2854 pci_config_put16(bgep->cfg_handle,
2765 2855 PCI_CONF_DEV_CTRL_5723, READ_REQ_SIZE_MAX);
2766 2856 pci_config_put16(bgep->cfg_handle,
2767 2857 PCI_CONF_DEV_STUS_5723, DEVICE_ERROR_STUS);
2768 2858 }
2769 2859 }
2770 2860
2771 2861 BGE_PCICHK(bgep);
2772 2862 return (B_TRUE);
2773 2863
2774 2864 default:
2775 2865 bge_reg_put32(bgep, regno, regval);
2776 2866 return (bge_chip_poll_engine(bgep, regno,
2777 2867 STATE_MACHINE_RESET_BIT, 0));
2778 2868 }
2779 2869 }
2780 2870
2781 2871 /*
2782 2872 * Various registers that control the chip's internal engines (state
2783 2873 * machines) have an <enable> bit (fortunately, in the same place in
2784 2874 * each such register :-). To stop the state machine, this bit must
2785 2875 * be written with 0, then polled to see when the state machine has
2786 2876 * actually stopped.
2787 2877 *
2788 2878 * The return value is B_TRUE on success (enable bit cleared), or
2789 2879 * B_FALSE if the state machine didn't stop :(
2790 2880 */
2791 2881 static boolean_t bge_chip_disable_engine(bge_t *bgep, bge_regno_t regno,
2792 2882 uint32_t morebits);
2793 2883 #pragma no_inline(bge_chip_disable_engine)
2794 2884
2795 2885 static boolean_t
2796 2886 bge_chip_disable_engine(bge_t *bgep, bge_regno_t regno, uint32_t morebits)
2797 2887 {
2798 2888 uint32_t regval;
2799 2889
2800 2890 BGE_TRACE(("bge_chip_disable_engine($%p, 0x%lx, 0x%x)",
2801 2891 (void *)bgep, regno, morebits));
2802 2892
2803 2893 switch (regno) {
2804 2894 case FTQ_RESET_REG:
2805 2895 /*
2806 2896 * For Schumacher's bugfix CR6490108
2807 2897 */
2808 2898 #ifdef BGE_IPMI_ASF
2809 2899 #ifdef BGE_NETCONSOLE
2810 2900 if (bgep->asf_enabled)
2811 2901 return (B_TRUE);
2812 2902 #endif
2813 2903 #endif
2814 2904 /*
2815 2905 * Not quite like the others; it doesn't
2816 2906 * have an <enable> bit, but instead we
2817 2907 * have to set and then clear all the bits
2818 2908 */
2819 2909 bge_reg_put32(bgep, regno, ~(uint32_t)0);
2820 2910 drv_usecwait(100);
2821 2911 bge_reg_put32(bgep, regno, 0);
2822 2912 return (B_TRUE);
2823 2913
2824 2914 default:
2825 2915 regval = bge_reg_get32(bgep, regno);
2826 2916 regval &= ~STATE_MACHINE_ENABLE_BIT;
2827 2917 regval &= ~morebits;
2828 2918 bge_reg_put32(bgep, regno, regval);
2829 2919 return (bge_chip_poll_engine(bgep, regno,
2830 2920 STATE_MACHINE_ENABLE_BIT, 0));
2831 2921 }
2832 2922 }
2833 2923
2834 2924 /*
2835 2925 * Various registers that control the chip's internal engines (state
2836 2926 * machines) have an <enable> bit (fortunately, in the same place in
2837 2927 * each such register :-). To start the state machine, this bit must
2838 2928 * be written with 1, then polled to see when the state machine has
2839 2929 * actually started.
2840 2930 *
2841 2931 * The return value is B_TRUE on success (enable bit set), or
2842 2932 * B_FALSE if the state machine didn't start :(
2843 2933 */
2844 2934 static boolean_t bge_chip_enable_engine(bge_t *bgep, bge_regno_t regno,
2845 2935 uint32_t morebits);
2846 2936 #pragma no_inline(bge_chip_enable_engine)
2847 2937
2848 2938 static boolean_t
2849 2939 bge_chip_enable_engine(bge_t *bgep, bge_regno_t regno, uint32_t morebits)
2850 2940 {
2851 2941 uint32_t regval;
2852 2942
2853 2943 BGE_TRACE(("bge_chip_enable_engine($%p, 0x%lx, 0x%x)",
2854 2944 (void *)bgep, regno, morebits));
2855 2945
2856 2946 switch (regno) {
2857 2947 case FTQ_RESET_REG:
2858 2948 #ifdef BGE_IPMI_ASF
2859 2949 #ifdef BGE_NETCONSOLE
2860 2950 if (bgep->asf_enabled)
2861 2951 return (B_TRUE);
2862 2952 #endif
2863 2953 #endif
2864 2954 /*
2865 2955 * Not quite like the others; it doesn't
2866 2956 * have an <enable> bit, but instead we
2867 2957 * have to set and then clear all the bits
2868 2958 */
2869 2959 bge_reg_put32(bgep, regno, ~(uint32_t)0);
2870 2960 drv_usecwait(100);
2871 2961 bge_reg_put32(bgep, regno, 0);
2872 2962 return (B_TRUE);
2873 2963
2874 2964 default:
2875 2965 regval = bge_reg_get32(bgep, regno);
2876 2966 regval |= STATE_MACHINE_ENABLE_BIT;
2877 2967 regval |= morebits;
2878 2968 bge_reg_put32(bgep, regno, regval);
2879 2969 return (bge_chip_poll_engine(bgep, regno,
2880 2970 STATE_MACHINE_ENABLE_BIT, STATE_MACHINE_ENABLE_BIT));
2881 2971 }
2882 2972 }
2883 2973
2884 2974 /*
2885 2975 * Reprogram the Ethernet, Transmit, and Receive MAC
2886 2976 * modes to match the param_* variables
2887 2977 */
2888 2978 void bge_sync_mac_modes(bge_t *bgep);
2889 2979 #pragma no_inline(bge_sync_mac_modes)
2890 2980
2891 2981 void
2892 2982 bge_sync_mac_modes(bge_t *bgep)
2893 2983 {
2894 2984 uint32_t macmode;
2895 2985 uint32_t regval;
2896 2986
2897 2987 ASSERT(mutex_owned(bgep->genlock));
2898 2988
2899 2989 /*
2900 2990 * Reprogram the Ethernet MAC mode ...
2901 2991 */
2902 2992 macmode = regval = bge_reg_get32(bgep, ETHERNET_MAC_MODE_REG);
2903 2993 if ((bgep->chipid.flags & CHIP_FLAG_SERDES) &&
2904 2994 (bgep->param_loop_mode != BGE_LOOP_INTERNAL_MAC))
2905 2995 if (DEVICE_5714_SERIES_CHIPSETS(bgep))
2906 2996 macmode |= ETHERNET_MODE_LINK_POLARITY;
2907 2997 else
2908 2998 macmode &= ~ETHERNET_MODE_LINK_POLARITY;
2909 2999 else
2910 3000 macmode |= ETHERNET_MODE_LINK_POLARITY;
2911 3001 macmode &= ~ETHERNET_MODE_PORTMODE_MASK;
2912 3002 if ((bgep->chipid.flags & CHIP_FLAG_SERDES) &&
2913 3003 (bgep->param_loop_mode != BGE_LOOP_INTERNAL_MAC)) {
2914 3004 if (DEVICE_5714_SERIES_CHIPSETS(bgep))
2915 3005 macmode |= ETHERNET_MODE_PORTMODE_GMII;
2916 3006 else
2917 3007 macmode |= ETHERNET_MODE_PORTMODE_TBI;
2918 3008 } else if (bgep->param_link_speed == 10 ||
2919 3009 bgep->param_link_speed == 100)
2920 3010 macmode |= ETHERNET_MODE_PORTMODE_MII;
2921 3011 else
2922 3012 macmode |= ETHERNET_MODE_PORTMODE_GMII;
2923 3013 if (bgep->param_link_duplex == LINK_DUPLEX_HALF)
2924 3014 macmode |= ETHERNET_MODE_HALF_DUPLEX;
2925 3015 else
2926 3016 macmode &= ~ETHERNET_MODE_HALF_DUPLEX;
2927 3017 if (bgep->param_loop_mode == BGE_LOOP_INTERNAL_MAC)
2928 3018 macmode |= ETHERNET_MODE_MAC_LOOPBACK;
2929 3019 else
2930 3020 macmode &= ~ETHERNET_MODE_MAC_LOOPBACK;
2931 3021 bge_reg_put32(bgep, ETHERNET_MAC_MODE_REG, macmode);
2932 3022 BGE_DEBUG(("bge_sync_mac_modes($%p) Ethernet MAC mode 0x%x => 0x%x",
2933 3023 (void *)bgep, regval, macmode));
2934 3024
2935 3025 /*
2936 3026 * ... the Transmit MAC mode ...
2937 3027 */
2938 3028 macmode = regval = bge_reg_get32(bgep, TRANSMIT_MAC_MODE_REG);
2939 3029 if (bgep->param_link_tx_pause)
2940 3030 macmode |= TRANSMIT_MODE_FLOW_CONTROL;
2941 3031 else
2942 3032 macmode &= ~TRANSMIT_MODE_FLOW_CONTROL;
2943 3033 bge_reg_put32(bgep, TRANSMIT_MAC_MODE_REG, macmode);
2944 3034 BGE_DEBUG(("bge_sync_mac_modes($%p) Transmit MAC mode 0x%x => 0x%x",
2945 3035 (void *)bgep, regval, macmode));
2946 3036
2947 3037 /*
2948 3038 * ... and the Receive MAC mode
2949 3039 */
2950 3040 macmode = regval = bge_reg_get32(bgep, RECEIVE_MAC_MODE_REG);
2951 3041 if (bgep->param_link_rx_pause)
2952 3042 macmode |= RECEIVE_MODE_FLOW_CONTROL;
2953 3043 else
2954 3044 macmode &= ~RECEIVE_MODE_FLOW_CONTROL;
2955 3045 bge_reg_put32(bgep, RECEIVE_MAC_MODE_REG, macmode);
2956 3046 BGE_DEBUG(("bge_sync_mac_modes($%p) Receive MAC mode 0x%x => 0x%x",
2957 3047 (void *)bgep, regval, macmode));
2958 3048
2959 3049 /*
2960 3050 * For BCM5785, we need to configure the link status in the MI Status
2961 3051 * register with a write command when auto-polling is disabled.
2962 3052 */
2963 3053 if (bgep->chipid.device == DEVICE_ID_5785)
2964 3054 if (bgep->param_link_speed == 10)
2965 3055 bge_reg_put32(bgep, MI_STATUS_REG, MI_STATUS_LINK
2966 3056 | MI_STATUS_10MBPS);
2967 3057 else
2968 3058 bge_reg_put32(bgep, MI_STATUS_REG, MI_STATUS_LINK);
2969 3059 }
2970 3060
2971 3061 /*
2972 3062 * bge_chip_sync() -- program the chip with the unicast MAC address,
2973 3063 * the multicast hash table, the required level of promiscuity, and
2974 3064 * the current loopback mode ...
2975 3065 */
2976 3066 #ifdef BGE_IPMI_ASF
2977 3067 int bge_chip_sync(bge_t *bgep, boolean_t asf_keeplive);
2978 3068 #else
2979 3069 int bge_chip_sync(bge_t *bgep);
2980 3070 #endif
2981 3071 #pragma no_inline(bge_chip_sync)
2982 3072
2983 3073 int
2984 3074 #ifdef BGE_IPMI_ASF
2985 3075 bge_chip_sync(bge_t *bgep, boolean_t asf_keeplive)
2986 3076 #else
2987 3077 bge_chip_sync(bge_t *bgep)
2988 3078 #endif
2989 3079 {
2990 3080 void (*opfn)(bge_t *bgep, bge_regno_t reg, uint32_t bits);
2991 3081 boolean_t promisc;
2992 3082 uint64_t macaddr;
2993 3083 uint32_t fill = 0;
2994 3084 int i, j;
2995 3085 int retval = DDI_SUCCESS;
2996 3086
2997 3087 BGE_TRACE(("bge_chip_sync($%p)",
2998 3088 (void *)bgep));
2999 3089
3000 3090 ASSERT(mutex_owned(bgep->genlock));
3001 3091
3002 3092 promisc = B_FALSE;
3003 3093 fill = ~(uint32_t)0;
3004 3094
3005 3095 if (bgep->promisc)
3006 3096 promisc = B_TRUE;
3007 3097 else
3008 3098 fill = (uint32_t)0;
3009 3099
3010 3100 /*
3011 3101 * If the TX/RX MAC engines are already running, we should stop
3012 3102 * them (and reset the RX engine) before changing the parameters.
3013 3103 * If they're not running, this will have no effect ...
3014 3104 *
3015 3105 * NOTE: this is currently disabled by default because stopping
3016 3106 * and restarting the Tx engine may cause an outgoing packet in
3017 3107 * transit to be truncated. Also, stopping and restarting the
3018 3108 * Rx engine seems to not work correctly on the 5705. Testing
3019 3109 * has not (yet!) revealed any problems with NOT stopping and
3020 3110 * restarting these engines (and Broadcom say their drivers don't
3021 3111 * do this), but if it is found to cause problems, this variable
3022 3112 * can be patched to re-enable the old behaviour ...
3023 3113 */
3024 3114 if (bge_stop_start_on_sync) {
3025 3115 #ifdef BGE_IPMI_ASF
3026 3116 if (!bgep->asf_enabled) {
3027 3117 if (!bge_chip_disable_engine(bgep,
3028 3118 RECEIVE_MAC_MODE_REG, RECEIVE_MODE_KEEP_VLAN_TAG))
3029 3119 retval = DDI_FAILURE;
3030 3120 } else {
3031 3121 if (!bge_chip_disable_engine(bgep,
3032 3122 RECEIVE_MAC_MODE_REG, 0))
3033 3123 retval = DDI_FAILURE;
3034 3124 }
3035 3125 #else
3036 3126 if (!bge_chip_disable_engine(bgep, RECEIVE_MAC_MODE_REG,
3037 3127 RECEIVE_MODE_KEEP_VLAN_TAG))
3038 3128 retval = DDI_FAILURE;
3039 3129 #endif
3040 3130 if (!bge_chip_disable_engine(bgep, TRANSMIT_MAC_MODE_REG, 0))
3041 3131 retval = DDI_FAILURE;
3042 3132 if (!bge_chip_reset_engine(bgep, RECEIVE_MAC_MODE_REG))
3043 3133 retval = DDI_FAILURE;
3044 3134 }
3045 3135
3046 3136 /*
3047 3137 * Reprogram the hashed multicast address table ...
3048 3138 */
3049 3139 for (i = 0; i < BGE_HASH_TABLE_SIZE/32; ++i)
3050 3140 bge_reg_put32(bgep, MAC_HASH_REG(i), 0);
3051 3141
3052 3142 for (i = 0; i < BGE_HASH_TABLE_SIZE/32; ++i)
3053 3143 bge_reg_put32(bgep, MAC_HASH_REG(i),
3054 3144 bgep->mcast_hash[i] | fill);
3055 3145
3056 3146 #ifdef BGE_IPMI_ASF
3057 3147 if (!bgep->asf_enabled || !asf_keeplive) {
3058 3148 #endif
3059 3149 /*
3060 3150 * Transform the MAC address(es) from host to chip format, then
3061 3151 * reprogram the transmit random backoff seed and the unicast
3062 3152 * MAC address(es) ...
3063 3153 */
3064 3154 for (j = 0; j < MAC_ADDRESS_REGS_MAX; j++) {
3065 3155 for (i = 0, macaddr = 0ull;
3066 3156 i < ETHERADDRL; ++i) {
3067 3157 macaddr <<= 8;
3068 3158 macaddr |= bgep->curr_addr[j].addr[i];
3069 3159 }
3070 3160 fill += (macaddr >> 16) + (macaddr & 0xffffffff);
3071 3161 bge_reg_put64(bgep, MAC_ADDRESS_REG(j), macaddr);
3072 3162
3073 3163 BGE_DEBUG(("bge_chip_sync($%p) "
3074 3164 "setting MAC address %012llx",
3075 3165 (void *)bgep, macaddr));
3076 3166 }
3077 3167 #ifdef BGE_IPMI_ASF
3078 3168 }
3079 3169 #endif
3080 3170 /*
3081 3171 * Set random seed of backoff interval
3082 3172 * - Writing zero means no backoff interval
3083 3173 */
3084 3174 fill = ((fill >> 20) + (fill >> 10) + fill) & 0x3ff;
3085 3175 if (fill == 0)
3086 3176 fill = 1;
3087 3177 bge_reg_put32(bgep, MAC_TX_RANDOM_BACKOFF_REG, fill);
3088 3178
3089 3179 /*
3090 3180 * Set or clear the PROMISCUOUS mode bit
3091 3181 */
3092 3182 opfn = promisc ? bge_reg_set32 : bge_reg_clr32;
3093 3183 (*opfn)(bgep, RECEIVE_MAC_MODE_REG, RECEIVE_MODE_PROMISCUOUS);
3094 3184
3095 3185 /*
3096 3186 * Sync the rest of the MAC modes too ...
3097 3187 */
3098 3188 bge_sync_mac_modes(bgep);
3099 3189
3100 3190 /*
3101 3191 * Restart RX/TX MAC engines if required ...
3102 3192 */
3103 3193 if (bgep->bge_chip_state == BGE_CHIP_RUNNING) {
3104 3194 if (!bge_chip_enable_engine(bgep, TRANSMIT_MAC_MODE_REG, 0))
3105 3195 retval = DDI_FAILURE;
3106 3196 #ifdef BGE_IPMI_ASF
3107 3197 if (!bgep->asf_enabled) {
3108 3198 if (!bge_chip_enable_engine(bgep,
3109 3199 RECEIVE_MAC_MODE_REG, RECEIVE_MODE_KEEP_VLAN_TAG))
3110 3200 retval = DDI_FAILURE;
3111 3201 } else {
3112 3202 if (!bge_chip_enable_engine(bgep,
3113 3203 RECEIVE_MAC_MODE_REG, 0))
3114 3204 retval = DDI_FAILURE;
3115 3205 }
3116 3206 #else
3117 3207 if (!bge_chip_enable_engine(bgep, RECEIVE_MAC_MODE_REG,
3118 3208 RECEIVE_MODE_KEEP_VLAN_TAG))
3119 3209 retval = DDI_FAILURE;
3120 3210 #endif
3121 3211 }
3122 3212 return (retval);
3123 3213 }
3124 3214
3125 3215 /*
3126 3216 * This array defines the sequence of state machine control registers
3127 3217 * in which the <enable> bit must be cleared to bring the chip to a
3128 3218 * clean stop. Taken from Broadcom document 570X-PG102-R, p116.
3129 3219 */
3130 3220 static bge_regno_t shutdown_engine_regs[] = {
3131 3221 RECEIVE_MAC_MODE_REG,
3132 3222 RCV_BD_INITIATOR_MODE_REG,
3133 3223 RCV_LIST_PLACEMENT_MODE_REG,
3134 3224 RCV_LIST_SELECTOR_MODE_REG, /* BCM5704 series only */
3135 3225 RCV_DATA_BD_INITIATOR_MODE_REG,
3136 3226 RCV_DATA_COMPLETION_MODE_REG,
3137 3227 RCV_BD_COMPLETION_MODE_REG,
3138 3228
3139 3229 SEND_BD_SELECTOR_MODE_REG,
3140 3230 SEND_BD_INITIATOR_MODE_REG,
3141 3231 SEND_DATA_INITIATOR_MODE_REG,
3142 3232 READ_DMA_MODE_REG,
3143 3233 SEND_DATA_COMPLETION_MODE_REG,
3144 3234 DMA_COMPLETION_MODE_REG, /* BCM5704 series only */
3145 3235 SEND_BD_COMPLETION_MODE_REG,
3146 3236 TRANSMIT_MAC_MODE_REG,
3147 3237
3148 3238 HOST_COALESCE_MODE_REG,
3149 3239 WRITE_DMA_MODE_REG,
3150 3240 MBUF_CLUSTER_FREE_MODE_REG, /* BCM5704 series only */
3151 3241 FTQ_RESET_REG, /* special - see code */
3152 3242 BUFFER_MANAGER_MODE_REG, /* BCM5704 series only */
3153 3243 MEMORY_ARBITER_MODE_REG, /* BCM5704 series only */
3154 3244 BGE_REGNO_NONE /* terminator */
3155 3245 };
3156 3246
3157 3247 #ifndef __sparc
3158 3248 static bge_regno_t quiesce_regs[] = {
3159 3249 READ_DMA_MODE_REG,
3160 3250 DMA_COMPLETION_MODE_REG,
3161 3251 WRITE_DMA_MODE_REG,
3162 3252 BGE_REGNO_NONE
3163 3253 };
3164 3254
3165 3255 void bge_chip_stop_nonblocking(bge_t *bgep);
3166 3256 #pragma no_inline(bge_chip_stop_nonblocking)
3167 3257
3168 3258 /*
3169 3259 * This function is called by bge_quiesce(). We
3170 3260 * turn off all the DMA engines here.
3171 3261 */
3172 3262 void
3173 3263 bge_chip_stop_nonblocking(bge_t *bgep)
3174 3264 {
3175 3265 bge_regno_t *rbp;
3176 3266
3177 3267 /*
3178 3268 * Flag that no more activity may be initiated
3179 3269 */
3180 3270 bgep->progress &= ~PROGRESS_READY;
3181 3271
3182 3272 rbp = quiesce_regs;
3183 3273 while (*rbp != BGE_REGNO_NONE) {
3184 3274 (void) bge_chip_disable_engine(bgep, *rbp, 0);
3185 3275 ++rbp;
3186 3276 }
3187 3277
3188 3278 bgep->bge_chip_state = BGE_CHIP_STOPPED;
3189 3279 }
3190 3280
3191 3281 #endif
3192 3282
3193 3283 /*
3194 3284 * bge_chip_stop() -- stop all chip processing
3195 3285 *
3196 3286 * If the <fault> parameter is B_TRUE, we're stopping the chip because
3197 3287 * we've detected a problem internally; otherwise, this is a normal
3198 3288 * (clean) stop (at user request i.e. the last STREAM has been closed).
3199 3289 */
3200 3290 void bge_chip_stop(bge_t *bgep, boolean_t fault);
3201 3291 #pragma no_inline(bge_chip_stop)
3202 3292
3203 3293 void
3204 3294 bge_chip_stop(bge_t *bgep, boolean_t fault)
3205 3295 {
3206 3296 bge_regno_t regno;
3207 3297 bge_regno_t *rbp;
3208 3298 boolean_t ok;
3209 3299
3210 3300 BGE_TRACE(("bge_chip_stop($%p)",
3211 3301 (void *)bgep));
3212 3302
3213 3303 ASSERT(mutex_owned(bgep->genlock));
3214 3304
3215 3305 rbp = shutdown_engine_regs;
3216 3306 /*
3217 3307 * When driver try to shutdown the BCM5705/5788/5721/5751/
3218 3308 * 5752/5714 and 5715 chipsets,the buffer manager and the mem
3219 3309 * -ory arbiter should not be disabled.
3220 3310 */
3221 3311 for (ok = B_TRUE; (regno = *rbp) != BGE_REGNO_NONE; ++rbp) {
3222 3312 if (DEVICE_5704_SERIES_CHIPSETS(bgep))
3223 3313 ok &= bge_chip_disable_engine(bgep, regno, 0);
3224 3314 else if ((regno != RCV_LIST_SELECTOR_MODE_REG) &&
3225 3315 (regno != DMA_COMPLETION_MODE_REG) &&
3226 3316 (regno != MBUF_CLUSTER_FREE_MODE_REG)&&
3227 3317 (regno != BUFFER_MANAGER_MODE_REG) &&
3228 3318 (regno != MEMORY_ARBITER_MODE_REG))
3229 3319 ok &= bge_chip_disable_engine(bgep,
3230 3320 regno, 0);
3231 3321 }
3232 3322
3233 3323 if (!ok && !fault)
3234 3324 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED);
3235 3325
3236 3326 /*
3237 3327 * Finally, disable (all) MAC events & clear the MAC status
3238 3328 */
3239 3329 bge_reg_put32(bgep, ETHERNET_MAC_EVENT_ENABLE_REG, 0);
3240 3330 bge_reg_put32(bgep, ETHERNET_MAC_STATUS_REG, ~0);
3241 3331
3242 3332 /*
3243 3333 * if we're stopping the chip because of a detected fault then do
3244 3334 * appropriate actions
3245 3335 */
3246 3336 if (fault) {
3247 3337 if (bgep->bge_chip_state != BGE_CHIP_FAULT) {
3248 3338 bgep->bge_chip_state = BGE_CHIP_FAULT;
3249 3339 if (!bgep->manual_reset)
3250 3340 ddi_fm_service_impact(bgep->devinfo,
3251 3341 DDI_SERVICE_LOST);
3252 3342 if (bgep->bge_dma_error) {
3253 3343 /*
3254 3344 * need to free buffers in case the fault was
3255 3345 * due to a memory error in a buffer - got to
3256 3346 * do a fair bit of tidying first
3257 3347 */
3258 3348 if (bgep->progress & PROGRESS_KSTATS) {
3259 3349 bge_fini_kstats(bgep);
3260 3350 bgep->progress &= ~PROGRESS_KSTATS;
3261 3351 }
3262 3352 if (bgep->progress & PROGRESS_INTR) {
3263 3353 bge_intr_disable(bgep);
3264 3354 rw_enter(bgep->errlock, RW_WRITER);
3265 3355 bge_fini_rings(bgep);
3266 3356 rw_exit(bgep->errlock);
3267 3357 bgep->progress &= ~PROGRESS_INTR;
3268 3358 }
3269 3359 if (bgep->progress & PROGRESS_BUFS) {
3270 3360 bge_free_bufs(bgep);
3271 3361 bgep->progress &= ~PROGRESS_BUFS;
3272 3362 }
3273 3363 bgep->bge_dma_error = B_FALSE;
3274 3364 }
3275 3365 }
3276 3366 } else
3277 3367 bgep->bge_chip_state = BGE_CHIP_STOPPED;
3278 3368 }
3279 3369
3280 3370 /*
3281 3371 * Poll for completion of chip's ROM firmware; also, at least on the
3282 3372 * first time through, find and return the hardware MAC address, if any.
3283 3373 */
3284 3374 static uint64_t bge_poll_firmware(bge_t *bgep);
3285 3375 #pragma no_inline(bge_poll_firmware)
3286 3376
3287 3377 static uint64_t
3288 3378 bge_poll_firmware(bge_t *bgep)
3289 3379 {
3290 3380 uint64_t magic;
3291 3381 uint64_t mac;
3292 3382 uint32_t gen, val;
3293 3383 uint32_t i;
3294 3384
3295 3385 /*
3296 3386 * Step 19: poll for firmware completion (GENCOMM port set
3297 3387 * to the ones complement of T3_MAGIC_NUMBER).
3298 3388 *
3299 3389 * While we're at it, we also read the MAC address register;
3300 3390 * at some stage the firmware will load this with the
3301 3391 * factory-set value.
3302 3392 *
3303 3393 * When both the magic number and the MAC address are set,
3304 3394 * we're done; but we impose a time limit of one second
3305 3395 * (1000*1000us) in case the firmware fails in some fashion
3306 3396 * or the SEEPROM that provides that MAC address isn't fitted.
3307 3397 *
3308 3398 * After the first time through (chip state != INITIAL), we
3309 3399 * don't need the MAC address to be set (we've already got it
3310 3400 * or not, from the first time), so we don't wait for it, but
3311 3401 * we still have to wait for the T3_MAGIC_NUMBER.
3312 3402 *
3313 3403 * Note: the magic number is only a 32-bit quantity, but the NIC
3314 3404 * memory is 64-bit (and big-endian) internally. Addressing the
3315 3405 * GENCOMM word as "the upper half of a 64-bit quantity" makes
3316 3406 * it work correctly on both big- and little-endian hosts.
3317 3407 */
3318 3408 if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
3319 3409 MHCR_CHIP_ASIC_REV_5906) {
3320 3410 for (i = 0; i < 1000; ++i) {
3321 3411 drv_usecwait(1000);
3322 3412 val = bge_reg_get32(bgep, VCPU_STATUS_REG);
3323 3413 if (val & VCPU_INIT_DONE)
3324 3414 break;
3325 3415 }
3326 3416 BGE_DEBUG(("bge_poll_firmware($%p): return after %d loops",
3327 3417 (void *)bgep, i));
3328 3418 mac = bge_reg_get64(bgep, MAC_ADDRESS_REG(0));
3329 3419 } else {
3330 3420 for (i = 0; i < 1000; ++i) {
3331 3421 drv_usecwait(1000);
3332 3422 gen = bge_nic_get64(bgep, NIC_MEM_GENCOMM) >> 32;
3333 3423 #ifdef BGE_IPMI_ASF
3334 3424 if (!bgep->asf_enabled) {
3335 3425 #endif
3336 3426 if (gen != ~T3_MAGIC_NUMBER)
3337 3427 continue;
3338 3428 #ifdef BGE_IPMI_ASF
3339 3429 }
3340 3430 #endif
3341 3431 mac = bge_reg_get64(bgep, MAC_ADDRESS_REG(0));
3342 3432 if (mac != 0ULL)
3343 3433 break;
3344 3434 if (bgep->bge_chip_state != BGE_CHIP_INITIAL)
3345 3435 break;
3346 3436 }
3347 3437 }
3348 3438
3349 3439 magic = bge_nic_get64(bgep, NIC_MEM_GENCOMM);
3350 3440 BGE_DEBUG(("bge_poll_firmware($%p): PXE magic 0x%x after %d loops",
3351 3441 (void *)bgep, gen, i));
3352 3442 BGE_DEBUG(("bge_poll_firmware: MAC %016llx, GENCOMM %016llx",
3353 3443 mac, magic));
3354 3444
3355 3445 return (mac);
3356 3446 }
3357 3447
3358 3448 /*
3359 3449 * Maximum times of trying to get the NVRAM access lock
3360 3450 * by calling bge_nvmem_acquire()
3361 3451 */
3362 3452 #define MAX_TRY_NVMEM_ACQUIRE 10000
3363 3453
3364 3454 #ifdef BGE_IPMI_ASF
3365 3455 int bge_chip_reset(bge_t *bgep, boolean_t enable_dma, uint_t asf_mode);
3366 3456 #else
3367 3457 int bge_chip_reset(bge_t *bgep, boolean_t enable_dma);
3368 3458 #endif
3369 3459 #pragma no_inline(bge_chip_reset)
3370 3460
3371 3461 int
3372 3462 #ifdef BGE_IPMI_ASF
3373 3463 bge_chip_reset(bge_t *bgep, boolean_t enable_dma, uint_t asf_mode)
3374 3464 #else
3375 3465 bge_chip_reset(bge_t *bgep, boolean_t enable_dma)
3376 3466 #endif
3377 3467 {
3378 3468 chip_id_t chipid;
3379 3469 uint64_t mac;
3380 3470 uint64_t magic;
3381 3471 uint32_t modeflags;
3382 3472 uint32_t mhcr;
3383 3473 uint32_t sx0;
3384 3474 uint32_t i, tries;
3385 3475 #ifdef BGE_IPMI_ASF
3386 3476 uint32_t mailbox;
3387 3477 #endif
3388 3478 int retval = DDI_SUCCESS;
3389 3479
3390 3480 BGE_TRACE(("bge_chip_reset($%p, %d)",
3391 3481 (void *)bgep, enable_dma));
3392 3482
3393 3483 ASSERT(mutex_owned(bgep->genlock));
3394 3484
3395 3485 BGE_DEBUG(("bge_chip_reset($%p, %d): current state is %d",
3396 3486 (void *)bgep, enable_dma, bgep->bge_chip_state));
3397 3487
3398 3488 /*
3399 3489 * Do we need to stop the chip cleanly before resetting?
3400 3490 */
3401 3491 switch (bgep->bge_chip_state) {
3402 3492 default:
3403 3493 _NOTE(NOTREACHED)
3404 3494 return (DDI_FAILURE);
3405 3495
3406 3496 case BGE_CHIP_INITIAL:
3407 3497 case BGE_CHIP_STOPPED:
3408 3498 case BGE_CHIP_RESET:
3409 3499 break;
3410 3500
3411 3501 case BGE_CHIP_RUNNING:
3412 3502 case BGE_CHIP_ERROR:
3413 3503 case BGE_CHIP_FAULT:
3414 3504 bge_chip_stop(bgep, B_FALSE);
3415 3505 break;
3416 3506 }
3417 3507
3418 3508 #ifdef BGE_IPMI_ASF
3419 3509 if (bgep->asf_enabled) {
3420 3510 #ifdef __sparc
3421 3511 mhcr = MHCR_ENABLE_INDIRECT_ACCESS |
3422 3512 MHCR_ENABLE_TAGGED_STATUS_MODE |
3423 3513 MHCR_MASK_INTERRUPT_MODE |
|
↓ open down ↓ |
1360 lines elided |
↑ open up ↑ |
3424 3514 MHCR_CLEAR_INTERRUPT_INTA |
3425 3515 MHCR_ENABLE_ENDIAN_WORD_SWAP |
3426 3516 MHCR_ENABLE_ENDIAN_BYTE_SWAP;
3427 3517
3428 3518 if (bgep->intr_type == DDI_INTR_TYPE_FIXED)
3429 3519 mhcr |= MHCR_MASK_PCI_INT_OUTPUT;
3430 3520
3431 3521 if (DEVICE_5717_SERIES_CHIPSETS(bgep))
3432 3522 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR,
3433 3523 0);
3524 +#else
3525 + mhcr = MHCR_ENABLE_INDIRECT_ACCESS |
3526 + MHCR_ENABLE_TAGGED_STATUS_MODE |
3527 + MHCR_MASK_INTERRUPT_MODE |
3528 + MHCR_MASK_PCI_INT_OUTPUT |
3529 + MHCR_CLEAR_INTERRUPT_INTA;
3530 +#endif
3434 3531 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcr);
3435 3532 bge_reg_put32(bgep, MEMORY_ARBITER_MODE_REG,
3436 3533 bge_reg_get32(bgep, MEMORY_ARBITER_MODE_REG) |
3437 3534 MEMORY_ARBITER_ENABLE);
3438 -#endif
3439 3535 if (asf_mode == ASF_MODE_INIT) {
3440 3536 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET);
3441 3537 } else if (asf_mode == ASF_MODE_SHUTDOWN) {
3442 3538 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET);
3443 3539 }
3444 3540 }
3445 3541 #endif
3446 3542 /*
3447 3543 * Adapted from Broadcom document 570X-PG102-R, pp 102-116.
3448 3544 * Updated to reflect Broadcom document 570X-PG104-R, pp 146-159.
3449 3545 *
3450 3546 * Before reset Core clock,it is
3451 3547 * also required to initialize the Memory Arbiter as specified in step9
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
3452 3548 * and Misc Host Control Register as specified in step-13
3453 3549 * Step 4-5: reset Core clock & wait for completion
3454 3550 * Steps 6-8: are done by bge_chip_cfg_init()
3455 3551 * put the T3_MAGIC_NUMBER into the GENCOMM port before reset
3456 3552 */
3457 3553 if (!bge_chip_enable_engine(bgep, MEMORY_ARBITER_MODE_REG, 0))
3458 3554 retval = DDI_FAILURE;
3459 3555
3460 3556 mhcr = MHCR_ENABLE_INDIRECT_ACCESS |
3461 3557 MHCR_ENABLE_TAGGED_STATUS_MODE |
3558 + MHCR_ENABLE_PCI_STATE_WRITE |
3462 3559 MHCR_MASK_INTERRUPT_MODE |
3463 3560 MHCR_CLEAR_INTERRUPT_INTA;
3464 3561
3465 3562 if (bgep->intr_type == DDI_INTR_TYPE_FIXED)
3466 3563 mhcr |= MHCR_MASK_PCI_INT_OUTPUT;
3467 3564
3468 3565 #ifdef _BIG_ENDIAN
3469 3566 mhcr |= MHCR_ENABLE_ENDIAN_WORD_SWAP | MHCR_ENABLE_ENDIAN_BYTE_SWAP;
3470 3567 #endif /* _BIG_ENDIAN */
3471 3568 if (DEVICE_5717_SERIES_CHIPSETS(bgep))
3472 3569 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, 0);
3473 3570 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcr);
3474 3571 #ifdef BGE_IPMI_ASF
3475 3572 if (bgep->asf_enabled)
3476 3573 bgep->asf_wordswapped = B_FALSE;
3477 3574 #endif
3575 +
3576 + if (DEVICE_IS_5755_PLUS(bgep) ||
3577 + MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
3578 + MHCR_CHIP_ASIC_REV_5752)
3579 + bge_reg_put32(bgep, GRC_FASTBOOT_PC, 0);
3580 +
3478 3581 /*
3479 3582 * NVRAM Corruption Workaround
3480 3583 */
3481 3584 for (tries = 0; tries < MAX_TRY_NVMEM_ACQUIRE; tries++)
3482 3585 if (bge_nvmem_acquire(bgep) != EAGAIN)
3483 3586 break;
3484 3587 if (tries >= MAX_TRY_NVMEM_ACQUIRE)
3485 3588 BGE_DEBUG(("%s: fail to acquire nvram lock",
3486 3589 bgep->ifname));
3487 3590
3488 3591 #ifdef BGE_IPMI_ASF
3489 3592 if (!bgep->asf_enabled) {
3490 3593 #endif
3491 3594 magic = (uint64_t)T3_MAGIC_NUMBER << 32;
3492 3595 bge_nic_put64(bgep, NIC_MEM_GENCOMM, magic);
3493 3596 #ifdef BGE_IPMI_ASF
3494 3597 }
3495 3598 #endif
3496 3599
3497 3600 if (!bge_chip_reset_engine(bgep, MISC_CONFIG_REG))
3498 3601 retval = DDI_FAILURE;
3499 3602 bge_chip_cfg_init(bgep, &chipid, enable_dma);
3500 3603
3501 3604 /*
3502 3605 * Step 8a: This may belong elsewhere, but BCM5721 needs
3503 3606 * a bit set to avoid a fifo overflow/underflow bug.
3504 3607 */
3505 3608 if ((bgep->chipid.chip_label == 5721) ||
3506 3609 (bgep->chipid.chip_label == 5751) ||
3507 3610 (bgep->chipid.chip_label == 5752) ||
3508 3611 (bgep->chipid.chip_label == 5755) ||
3509 3612 (bgep->chipid.chip_label == 5756) ||
3510 3613 (bgep->chipid.chip_label == 5789) ||
3511 3614 (bgep->chipid.chip_label == 5906))
3512 3615 bge_reg_set32(bgep, TLP_CONTROL_REG, TLP_DATA_FIFO_PROTECT);
3513 3616
3514 3617
3515 3618 /*
3516 3619 * Step 9: enable MAC memory arbiter,bit30 and bit31 of 5714/5715 should
3517 3620 * not be changed.
3518 3621 */
3519 3622 if (!bge_chip_enable_engine(bgep, MEMORY_ARBITER_MODE_REG, 0))
3520 3623 retval = DDI_FAILURE;
3521 3624
3522 3625 /*
3523 3626 * Steps 10-11: configure PIO endianness options and
3524 3627 * enable indirect register access -- already done
3525 3628 * Steps 12-13: enable writing to the PCI state & clock
3526 3629 * control registers -- not required; we aren't going to
|
↓ open down ↓ |
39 lines elided |
↑ open up ↑ |
3527 3630 * use those features.
3528 3631 * Steps 14-15: Configure DMA endianness options. See
3529 3632 * the comments on the setting of the MHCR above.
3530 3633 */
3531 3634 #ifdef _BIG_ENDIAN
3532 3635 modeflags = MODE_WORD_SWAP_FRAME | MODE_BYTE_SWAP_FRAME |
3533 3636 MODE_WORD_SWAP_NONFRAME | MODE_BYTE_SWAP_NONFRAME;
3534 3637 #else
3535 3638 modeflags = MODE_WORD_SWAP_FRAME | MODE_BYTE_SWAP_FRAME;
3536 3639 #endif /* _BIG_ENDIAN */
3640 + if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
3641 + MHCR_CHIP_ASIC_REV_5720)
3642 + modeflags |=
3643 + MODE_BYTE_SWAP_B2HRX_DATA | MODE_WORD_SWAP_B2HRX_DATA |
3644 + MODE_B2HRX_ENABLE | MODE_HTX2B_ENABLE;
3537 3645 #ifdef BGE_IPMI_ASF
3538 3646 if (bgep->asf_enabled)
3539 3647 modeflags |= MODE_HOST_STACK_UP;
3540 3648 #endif
3541 3649 bge_reg_put32(bgep, MODE_CONTROL_REG, modeflags);
3542 3650
3543 3651 #ifdef BGE_IPMI_ASF
3544 3652 if (bgep->asf_enabled) {
3545 3653 #ifdef __sparc
3546 3654 bge_reg_put32(bgep, MEMORY_ARBITER_MODE_REG,
3547 3655 MEMORY_ARBITER_ENABLE |
3548 3656 bge_reg_get32(bgep, MEMORY_ARBITER_MODE_REG));
3549 3657 #endif
3550 3658
3551 3659 #ifdef BGE_NETCONSOLE
3552 3660 if (!bgep->asf_newhandshake) {
3553 3661 if ((asf_mode == ASF_MODE_INIT) ||
3554 3662 (asf_mode == ASF_MODE_POST_INIT)) {
3555 3663 bge_asf_post_reset_old_mode(bgep,
3556 3664 BGE_INIT_RESET);
3557 3665 } else {
3558 3666 bge_asf_post_reset_old_mode(bgep,
3559 3667 BGE_SHUTDOWN_RESET);
3560 3668 }
3561 3669 }
3562 3670 #endif
3563 3671
3564 3672 /* Wait for NVRAM init */
3565 3673 i = 0;
3566 3674 drv_usecwait(5000);
3567 3675 mailbox = bge_nic_get32(bgep, BGE_FIRMWARE_MAILBOX);
3568 3676
3569 3677 while ((mailbox != (uint32_t)
3570 3678 ~BGE_MAGIC_NUM_FIRMWARE_INIT_DONE) &&
3571 3679 (i < 10000)) {
3572 3680 drv_usecwait(100);
3573 3681 mailbox = bge_nic_get32(bgep,
3574 3682 BGE_FIRMWARE_MAILBOX);
3575 3683 i++;
3576 3684 }
3577 3685
3578 3686 #ifndef BGE_NETCONSOLE
3579 3687 if (!bgep->asf_newhandshake) {
3580 3688 if ((asf_mode == ASF_MODE_INIT) ||
3581 3689 (asf_mode == ASF_MODE_POST_INIT)) {
3582 3690
3583 3691 bge_asf_post_reset_old_mode(bgep,
3584 3692 BGE_INIT_RESET);
3585 3693 } else {
3586 3694 bge_asf_post_reset_old_mode(bgep,
3587 3695 BGE_SHUTDOWN_RESET);
3588 3696 }
3589 3697 }
3590 3698 #endif
3591 3699 }
3592 3700 #endif
3593 3701 /*
3594 3702 * Steps 16-17: poll for firmware completion
3595 3703 */
3596 3704 mac = bge_poll_firmware(bgep);
3597 3705
3598 3706 /*
3599 3707 * Step 18: enable external memory -- doesn't apply.
3600 3708 *
3601 3709 * However we take the opportunity to set the MLCR anyway, as
3602 3710 * this register also controls the SEEPROM auto-access method
3603 3711 * which we may want to use later ...
3604 3712 *
3605 3713 * The proper value here depends on the way the chip is wired
3606 3714 * into the circuit board, as this register *also* controls which
3607 3715 * of the "Miscellaneous I/O" pins are driven as outputs and the
3608 3716 * values driven onto those pins!
3609 3717 *
3610 3718 * See also step 74 in the PRM ...
|
↓ open down ↓ |
64 lines elided |
↑ open up ↑ |
3611 3719 */
3612 3720 bge_reg_put32(bgep, MISC_LOCAL_CONTROL_REG,
3613 3721 bgep->chipid.bge_mlcr_default);
3614 3722 bge_reg_set32(bgep, SERIAL_EEPROM_ADDRESS_REG, SEEPROM_ACCESS_INIT);
3615 3723
3616 3724 /*
3617 3725 * Step 20: clear the Ethernet MAC mode register
3618 3726 */
3619 3727 bge_reg_put32(bgep, ETHERNET_MAC_MODE_REG, 0);
3620 3728
3729 + if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
3730 + MHCR_CHIP_ASIC_REV_5720) {
3731 + uint32_t regval = bge_reg_get32(bgep, CPMU_CLCK_ORIDE_REG);
3732 + bge_reg_put32(bgep, CPMU_CLCK_ORIDE_REG,
3733 + regval & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
3734 + }
3735 +
3621 3736 /*
3622 3737 * Step 21: restore cache-line-size, latency timer, and
3623 3738 * subsystem ID registers to their original values (not
3624 3739 * those read into the local structure <chipid>, 'cos
3625 3740 * that was after they were cleared by the RESET).
3626 3741 *
3627 3742 * Note: the Subsystem Vendor/Device ID registers are not
3628 3743 * directly writable in config space, so we use the shadow
3629 3744 * copy in "Page Zero" of register space to restore them
3630 3745 * both in one go ...
3631 3746 */
3632 3747 pci_config_put8(bgep->cfg_handle, PCI_CONF_CACHE_LINESZ,
3633 3748 bgep->chipid.clsize);
3634 3749 pci_config_put8(bgep->cfg_handle, PCI_CONF_LATENCY_TIMER,
3635 3750 bgep->chipid.latency);
3636 3751 bge_reg_put32(bgep, PCI_CONF_SUBVENID,
3637 3752 (bgep->chipid.subdev << 16) | bgep->chipid.subven);
3638 3753
3639 3754 /*
3640 3755 * The SEND INDEX registers should be reset to zero by the
3641 3756 * global chip reset; if they're not, there'll be trouble
3642 3757 * later on.
3643 3758 */
3644 3759 sx0 = bge_reg_get32(bgep, NIC_DIAG_SEND_INDEX_REG(0));
3645 3760 if (sx0 != 0) {
3646 3761 BGE_REPORT((bgep, "SEND INDEX - device didn't RESET"));
3647 3762 bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE);
3648 3763 retval = DDI_FAILURE;
3649 3764 }
3650 3765
3651 3766 /* Enable MSI code */
3652 3767 if (bgep->intr_type == DDI_INTR_TYPE_MSI)
3653 3768 bge_reg_set32(bgep, MSI_MODE_REG,
3654 3769 MSI_PRI_HIGHEST|MSI_MSI_ENABLE|MSI_ERROR_ATTENTION);
3655 3770
3656 3771 /*
3657 3772 * On the first time through, save the factory-set MAC address
3658 3773 * (if any). If bge_poll_firmware() above didn't return one
3659 3774 * (from a chip register) consider looking in the attached NV
3660 3775 * memory device, if any. Once we have it, we save it in both
3661 3776 * register-image (64-bit) and byte-array forms. All-zero and
3662 3777 * all-one addresses are not valid, and we refuse to stash those.
3663 3778 */
3664 3779 if (bgep->bge_chip_state == BGE_CHIP_INITIAL) {
3665 3780 if (mac == 0ULL)
3666 3781 mac = bge_get_nvmac(bgep);
3667 3782 if (mac != 0ULL && mac != ~0ULL) {
3668 3783 bgep->chipid.hw_mac_addr = mac;
3669 3784 for (i = ETHERADDRL; i-- != 0; ) {
3670 3785 bgep->chipid.vendor_addr.addr[i] = (uchar_t)mac;
3671 3786 mac >>= 8;
3672 3787 }
3673 3788 bgep->chipid.vendor_addr.set = B_TRUE;
3674 3789 }
3675 3790 }
3676 3791
3677 3792 #ifdef BGE_IPMI_ASF
3678 3793 if (bgep->asf_enabled && bgep->asf_newhandshake) {
3679 3794 if (asf_mode != ASF_MODE_NONE) {
3680 3795 if ((asf_mode == ASF_MODE_INIT) ||
3681 3796 (asf_mode == ASF_MODE_POST_INIT)) {
3682 3797
3683 3798 bge_asf_post_reset_new_mode(bgep,
3684 3799 BGE_INIT_RESET);
3685 3800 } else {
3686 3801 bge_asf_post_reset_new_mode(bgep,
3687 3802 BGE_SHUTDOWN_RESET);
3688 3803 }
3689 3804 }
3690 3805 }
3691 3806 #endif
3692 3807
3693 3808 /*
3694 3809 * Record the new state
3695 3810 */
3696 3811 bgep->chip_resets += 1;
3697 3812 bgep->bge_chip_state = BGE_CHIP_RESET;
3698 3813 return (retval);
3699 3814 }
3700 3815
3701 3816 /*
3702 3817 * bge_chip_start() -- start the chip transmitting and/or receiving,
3703 3818 * including enabling interrupts
3704 3819 */
3705 3820 int bge_chip_start(bge_t *bgep, boolean_t reset_phys);
3706 3821 #pragma no_inline(bge_chip_start)
3707 3822
3708 3823 void
3709 3824 bge_chip_coalesce_update(bge_t *bgep)
3710 3825 {
3711 3826 bge_reg_put32(bgep, SEND_COALESCE_MAX_BD_REG,
3712 3827 bgep->chipid.tx_count_norm);
3713 3828 bge_reg_put32(bgep, SEND_COALESCE_TICKS_REG,
3714 3829 bgep->chipid.tx_ticks_norm);
3715 3830 bge_reg_put32(bgep, RCV_COALESCE_MAX_BD_REG,
3716 3831 bgep->chipid.rx_count_norm);
3717 3832 bge_reg_put32(bgep, RCV_COALESCE_TICKS_REG,
3718 3833 bgep->chipid.rx_ticks_norm);
3719 3834 }
3720 3835
3721 3836 int
3722 3837 bge_chip_start(bge_t *bgep, boolean_t reset_phys)
3723 3838 {
3724 3839 uint32_t coalmode;
3725 3840 uint32_t ledctl;
3726 3841 uint32_t mtu;
3727 3842 uint32_t maxring;
3728 3843 uint32_t stats_mask;
3729 3844 uint32_t dma_wrprio;
3730 3845 uint64_t ring;
3731 3846 uint32_t regval;
3732 3847 int retval = DDI_SUCCESS;
3733 3848
3734 3849 BGE_TRACE(("bge_chip_start($%p)",
3735 3850 (void *)bgep));
3736 3851
3737 3852 ASSERT(mutex_owned(bgep->genlock));
3738 3853 ASSERT(bgep->bge_chip_state == BGE_CHIP_RESET);
3739 3854
3740 3855 /*
3741 3856 * Taken from Broadcom document 570X-PG102-R, pp 102-116.
3742 3857 * The document specifies 95 separate steps to fully
3743 3858 * initialise the chip!!!!
3744 3859 *
3745 3860 * The reset code above has already got us as far as step
3746 3861 * 21, so we continue with ...
3747 3862 *
3748 3863 * Step 22: clear the MAC statistics block
3749 3864 * (0x0300-0x0aff in NIC-local memory)
3750 3865 */
3751 3866 if (bgep->chipid.statistic_type == BGE_STAT_BLK)
3752 3867 bge_nic_zero(bgep, NIC_MEM_STATISTICS,
3753 3868 NIC_MEM_STATISTICS_SIZE);
3754 3869
3755 3870 /*
3756 3871 * Step 23: clear the status block (in host memory)
3757 3872 */
3758 3873 DMA_ZERO(bgep->status_block);
3759 3874
3760 3875 /*
3761 3876 * Step 24: set DMA read/write control register
3762 3877 */
3763 3878 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_PDRWCR,
3764 3879 bgep->chipid.bge_dma_rwctrl);
3765 3880
3766 3881 /*
3767 3882 * Step 25: Configure DMA endianness -- already done (16/17)
3768 3883 * Step 26: Configure Host-Based Send Rings
3769 3884 * Step 27: Indicate Host Stack Up
3770 3885 */
3771 3886 bge_reg_set32(bgep, MODE_CONTROL_REG,
3772 3887 MODE_HOST_SEND_BDS |
3773 3888 MODE_HOST_STACK_UP);
3774 3889
3775 3890 /*
3776 3891 * Step 28: Configure checksum options:
3777 3892 * Solaris supports the hardware default checksum options.
3778 3893 *
3779 3894 * Workaround for Incorrect pseudo-header checksum calculation.
3780 3895 */
3781 3896 if (bgep->chipid.flags & CHIP_FLAG_PARTIAL_CSUM)
3782 3897 bge_reg_set32(bgep, MODE_CONTROL_REG,
3783 3898 MODE_SEND_NO_PSEUDO_HDR_CSUM);
3784 3899
3785 3900 /*
3786 3901 * Step 29: configure Timer Prescaler. The value is always the
3787 3902 * same: the Core Clock frequency in MHz (66), minus 1, shifted
3788 3903 * into bits 7-1. Don't set bit 0, 'cos that's the RESET bit
3789 3904 * for the whole chip!
3790 3905 */
3791 3906 regval = bge_reg_get32(bgep, MISC_CONFIG_REG);
3792 3907 regval = (regval & 0xffffff00) | MISC_CONFIG_DEFAULT;
3793 3908 bge_reg_put32(bgep, MISC_CONFIG_REG, regval);
3794 3909
3795 3910 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) {
3796 3911 drv_usecwait(40);
3797 3912 /* put PHY into ready state */
3798 3913 bge_reg_clr32(bgep, MISC_CONFIG_REG, MISC_CONFIG_EPHY_IDDQ);
3799 3914 (void) bge_reg_get32(bgep, MISC_CONFIG_REG); /* flush */
3800 3915 drv_usecwait(40);
3801 3916 }
3802 3917
3803 3918 /*
3804 3919 * Steps 30-31: Configure MAC local memory pool & DMA pool registers
3805 3920 *
3806 3921 * If the mbuf_length is specified as 0, we just leave these at
3807 3922 * their hardware defaults, rather than explicitly setting them.
3808 3923 * As the Broadcom HRM,driver better not change the parameters
3809 3924 * when the chipsets is 5705/5788/5721/5751/5714 and 5715.
3810 3925 */
3811 3926 if ((bgep->chipid.mbuf_length != 0) &&
3812 3927 (DEVICE_5704_SERIES_CHIPSETS(bgep))) {
3813 3928 bge_reg_put32(bgep, MBUF_POOL_BASE_REG,
3814 3929 bgep->chipid.mbuf_base);
3815 3930 bge_reg_put32(bgep, MBUF_POOL_LENGTH_REG,
3816 3931 bgep->chipid.mbuf_length);
3817 3932 bge_reg_put32(bgep, DMAD_POOL_BASE_REG,
3818 3933 DMAD_POOL_BASE_DEFAULT);
3819 3934 bge_reg_put32(bgep, DMAD_POOL_LENGTH_REG,
3820 3935 DMAD_POOL_LENGTH_DEFAULT);
3821 3936 }
3822 3937
3823 3938 /*
3824 3939 * Step 32: configure MAC memory pool watermarks
3825 3940 */
3826 3941 bge_reg_put32(bgep, RDMA_MBUF_LOWAT_REG,
3827 3942 bgep->chipid.mbuf_lo_water_rdma);
3828 3943 bge_reg_put32(bgep, MAC_RX_MBUF_LOWAT_REG,
3829 3944 bgep->chipid.mbuf_lo_water_rmac);
3830 3945 bge_reg_put32(bgep, MBUF_HIWAT_REG,
3831 3946 bgep->chipid.mbuf_hi_water);
3832 3947
3833 3948 /*
3834 3949 * Step 33: configure DMA resource watermarks
3835 3950 */
3836 3951 if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
|
↓ open down ↓ |
206 lines elided |
↑ open up ↑ |
3837 3952 bge_reg_put32(bgep, DMAD_POOL_LOWAT_REG,
3838 3953 bge_dmad_lo_water);
3839 3954 bge_reg_put32(bgep, DMAD_POOL_HIWAT_REG,
3840 3955 bge_dmad_hi_water);
3841 3956 }
3842 3957 bge_reg_put32(bgep, LOWAT_MAX_RECV_FRAMES_REG, bge_lowat_recv_frames);
3843 3958
3844 3959 /*
3845 3960 * Steps 34-36: enable buffer manager & internal h/w queues
3846 3961 */
3847 - if (!bge_chip_enable_engine(bgep, BUFFER_MANAGER_MODE_REG,
3848 - STATE_MACHINE_ATTN_ENABLE_BIT))
3962 +
3963 + regval = STATE_MACHINE_ATTN_ENABLE_BIT;
3964 + if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
3965 + MHCR_CHIP_ASIC_REV_5719)
3966 + regval |= BUFF_MGR_NO_TX_UNDERRUN;
3967 + if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
3968 + MHCR_CHIP_ASIC_REV_5717 ||
3969 + bgep->chipid.asic_rev == MHCR_CHIP_REV_5719_A0 ||
3970 + bgep->chipid.asic_rev == MHCR_CHIP_REV_5720_A0)
3971 + regval |= BUFF_MGR_MBUF_LOW_ATTN_ENABLE;
3972 + if (!bge_chip_enable_engine(bgep, BUFFER_MANAGER_MODE_REG, regval))
3849 3973 retval = DDI_FAILURE;
3850 3974 if (!bge_chip_enable_engine(bgep, FTQ_RESET_REG, 0))
3851 3975 retval = DDI_FAILURE;
3852 3976
3853 3977 /*
3854 3978 * Steps 37-39: initialise Receive Buffer (Producer) RCBs
3855 3979 */
3856 3980 if (DEVICE_5717_SERIES_CHIPSETS(bgep)) {
3857 3981 buff_ring_t *brp = &bgep->buff[BGE_STD_BUFF_RING];
3858 3982 bge_reg_put64(bgep, STD_RCV_BD_RING_RCB_REG,
3859 3983 brp->desc.cookie.dmac_laddress);
3860 3984 bge_reg_put32(bgep, STD_RCV_BD_RING_RCB_REG + 8,
3861 3985 (brp->desc.nslots) << 16 | brp->buf[0].size << 2);
3862 3986 bge_reg_put32(bgep, STD_RCV_BD_RING_RCB_REG + 0xc,
3863 3987 NIC_MEM_SHADOW_BUFF_STD_5717);
3864 3988 } else
3865 3989 bge_reg_putrcb(bgep, STD_RCV_BD_RING_RCB_REG,
3866 3990 &bgep->buff[BGE_STD_BUFF_RING].hw_rcb);
3867 3991
3868 3992 if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
3869 3993 bge_reg_putrcb(bgep, JUMBO_RCV_BD_RING_RCB_REG,
3870 3994 &bgep->buff[BGE_JUMBO_BUFF_RING].hw_rcb);
3871 3995 bge_reg_putrcb(bgep, MINI_RCV_BD_RING_RCB_REG,
3872 3996 &bgep->buff[BGE_MINI_BUFF_RING].hw_rcb);
3873 3997 }
3874 3998
3875 3999 /*
3876 4000 * Step 40: set Receive Buffer Descriptor Ring replenish thresholds
3877 4001 */
3878 4002 bge_reg_put32(bgep, STD_RCV_BD_REPLENISH_REG, bge_replenish_std);
3879 4003 if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
3880 4004 bge_reg_put32(bgep, JUMBO_RCV_BD_REPLENISH_REG,
3881 4005 bge_replenish_jumbo);
3882 4006 bge_reg_put32(bgep, MINI_RCV_BD_REPLENISH_REG,
3883 4007 bge_replenish_mini);
3884 4008 }
3885 4009
3886 4010 /*
3887 4011 * Steps 41-43: clear Send Ring Producer Indices and initialise
3888 4012 * Send Producer Rings (0x0100-0x01ff in NIC-local memory)
3889 4013 */
3890 4014 if (DEVICE_5704_SERIES_CHIPSETS(bgep))
3891 4015 maxring = BGE_SEND_RINGS_MAX;
3892 4016 else
3893 4017 maxring = BGE_SEND_RINGS_MAX_5705;
3894 4018 for (ring = 0; ring < maxring; ++ring) {
3895 4019 bge_mbx_put(bgep, SEND_RING_HOST_INDEX_REG(ring), 0);
3896 4020 bge_mbx_put(bgep, SEND_RING_NIC_INDEX_REG(ring), 0);
3897 4021 bge_nic_putrcb(bgep, NIC_MEM_SEND_RING(ring),
3898 4022 &bgep->send[ring].hw_rcb);
3899 4023 }
3900 4024
3901 4025 /*
3902 4026 * Steps 44-45: initialise Receive Return Rings
3903 4027 * (0x0200-0x02ff in NIC-local memory)
3904 4028 */
3905 4029 if (DEVICE_5704_SERIES_CHIPSETS(bgep))
3906 4030 maxring = BGE_RECV_RINGS_MAX;
3907 4031 else
3908 4032 maxring = BGE_RECV_RINGS_MAX_5705;
3909 4033 for (ring = 0; ring < maxring; ++ring)
3910 4034 bge_nic_putrcb(bgep, NIC_MEM_RECV_RING(ring),
3911 4035 &bgep->recv[ring].hw_rcb);
3912 4036
3913 4037 /*
3914 4038 * Step 46: initialise Receive Buffer (Producer) Ring indexes
3915 4039 */
3916 4040 bge_mbx_put(bgep, RECV_STD_PROD_INDEX_REG, 0);
3917 4041 if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
3918 4042 bge_mbx_put(bgep, RECV_JUMBO_PROD_INDEX_REG, 0);
3919 4043 bge_mbx_put(bgep, RECV_MINI_PROD_INDEX_REG, 0);
3920 4044 }
3921 4045 /*
3922 4046 * Step 47: configure the MAC unicast address
3923 4047 * Step 48: configure the random backoff seed
3924 4048 * Step 96: set up multicast filters
3925 4049 */
3926 4050 #ifdef BGE_IPMI_ASF
3927 4051 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE)
3928 4052 #else
3929 4053 if (bge_chip_sync(bgep) == DDI_FAILURE)
3930 4054 #endif
3931 4055 retval = DDI_FAILURE;
|
↓ open down ↓ |
73 lines elided |
↑ open up ↑ |
3932 4056
3933 4057 /*
3934 4058 * Step 49: configure the MTU
3935 4059 */
3936 4060 mtu = bgep->chipid.ethmax_size+ETHERFCSL+VLAN_TAGSZ;
3937 4061 bge_reg_put32(bgep, MAC_RX_MTU_SIZE_REG, mtu);
3938 4062
3939 4063 /*
3940 4064 * Step 50: configure the IPG et al
3941 4065 */
3942 - bge_reg_put32(bgep, MAC_TX_LENGTHS_REG, MAC_TX_LENGTHS_DEFAULT);
4066 + regval = MAC_TX_LENGTHS_DEFAULT;
4067 + if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev)
4068 + == MHCR_CHIP_ASIC_REV_5720)
4069 + regval |= bge_reg_get32(bgep, MAC_TX_LENGTHS_REG) &
4070 + (MAC_TX_LENGTHS_JMB_FRM_LEN_MSK |
4071 + MAC_TX_LENGTHS_CNT_DWN_VAL_MSK);
4072 + bge_reg_put32(bgep, MAC_TX_LENGTHS_REG, regval);
3943 4073
3944 4074 /*
3945 4075 * Step 51: configure the default Rx Return Ring
3946 4076 */
3947 4077 bge_reg_put32(bgep, RCV_RULES_CONFIG_REG, RCV_RULES_CONFIG_DEFAULT);
3948 4078
3949 4079 /*
3950 4080 * Steps 52-54: configure Receive List Placement,
3951 4081 * and enable Receive List Placement Statistics
3952 4082 */
3953 4083 bge_reg_put32(bgep, RCV_LP_CONFIG_REG,
3954 4084 RCV_LP_CONFIG(bgep->chipid.rx_rings));
3955 4085 switch (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev)) {
3956 4086 case MHCR_CHIP_ASIC_REV_5700:
3957 4087 case MHCR_CHIP_ASIC_REV_5701:
3958 4088 case MHCR_CHIP_ASIC_REV_5703:
3959 4089 case MHCR_CHIP_ASIC_REV_5704:
3960 4090 bge_reg_put32(bgep, RCV_LP_STATS_ENABLE_MASK_REG, ~0);
3961 4091 break;
3962 4092 case MHCR_CHIP_ASIC_REV_5705:
3963 4093 break;
3964 4094 default:
3965 4095 stats_mask = bge_reg_get32(bgep, RCV_LP_STATS_ENABLE_MASK_REG);
3966 4096 stats_mask &= ~RCV_LP_STATS_DISABLE_MACTQ;
3967 4097 bge_reg_put32(bgep, RCV_LP_STATS_ENABLE_MASK_REG, stats_mask);
3968 4098 break;
3969 4099 }
3970 4100 bge_reg_set32(bgep, RCV_LP_STATS_CONTROL_REG, RCV_LP_STATS_ENABLE);
3971 4101
3972 4102 if (bgep->chipid.rx_rings > 1)
3973 4103 bge_init_recv_rule(bgep);
3974 4104
3975 4105 /*
3976 4106 * Steps 55-56: enable Send Data Initiator Statistics
3977 4107 */
3978 4108 bge_reg_put32(bgep, SEND_INIT_STATS_ENABLE_MASK_REG, ~0);
3979 4109 if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
3980 4110 bge_reg_put32(bgep, SEND_INIT_STATS_CONTROL_REG,
3981 4111 SEND_INIT_STATS_ENABLE | SEND_INIT_STATS_FASTER);
3982 4112 } else {
3983 4113 bge_reg_put32(bgep, SEND_INIT_STATS_CONTROL_REG,
3984 4114 SEND_INIT_STATS_ENABLE);
3985 4115 }
3986 4116 /*
3987 4117 * Steps 57-58: stop (?) the Host Coalescing Engine
3988 4118 */
3989 4119 if (!bge_chip_disable_engine(bgep, HOST_COALESCE_MODE_REG, ~0))
3990 4120 retval = DDI_FAILURE;
3991 4121
3992 4122 /*
3993 4123 * Steps 59-62: initialise Host Coalescing parameters
3994 4124 */
3995 4125 bge_chip_coalesce_update(bgep);
3996 4126 if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
3997 4127 bge_reg_put32(bgep, SEND_COALESCE_INT_BD_REG,
3998 4128 bge_tx_count_intr);
3999 4129 bge_reg_put32(bgep, SEND_COALESCE_INT_TICKS_REG,
4000 4130 bge_tx_ticks_intr);
4001 4131 bge_reg_put32(bgep, RCV_COALESCE_INT_BD_REG,
4002 4132 bge_rx_count_intr);
4003 4133 bge_reg_put32(bgep, RCV_COALESCE_INT_TICKS_REG,
4004 4134 bge_rx_ticks_intr);
4005 4135 }
4006 4136
4007 4137 /*
4008 4138 * Steps 63-64: initialise status block & statistics
4009 4139 * host memory addresses
4010 4140 * The statistic block does not exist in some chipsets
4011 4141 * Step 65: initialise Statistics Coalescing Tick Counter
4012 4142 */
4013 4143 bge_reg_put64(bgep, STATUS_BLOCK_HOST_ADDR_REG,
4014 4144 bgep->status_block.cookie.dmac_laddress);
4015 4145
4016 4146 /*
4017 4147 * Steps 66-67: initialise status block & statistics
4018 4148 * NIC-local memory addresses
4019 4149 */
4020 4150 if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
4021 4151 bge_reg_put64(bgep, STATISTICS_HOST_ADDR_REG,
4022 4152 bgep->statistics.cookie.dmac_laddress);
4023 4153 bge_reg_put32(bgep, STATISTICS_TICKS_REG,
4024 4154 STATISTICS_TICKS_DEFAULT);
4025 4155 bge_reg_put32(bgep, STATUS_BLOCK_BASE_ADDR_REG,
4026 4156 NIC_MEM_STATUS_BLOCK);
4027 4157 bge_reg_put32(bgep, STATISTICS_BASE_ADDR_REG,
4028 4158 NIC_MEM_STATISTICS);
4029 4159 }
4030 4160
4031 4161 /*
4032 4162 * Steps 68-71: start the Host Coalescing Engine, the Receive BD
4033 4163 * Completion Engine, the Receive List Placement Engine, and the
4034 4164 * Receive List selector.Pay attention:0x3400 is not exist in BCM5714
4035 4165 * and BCM5715.
4036 4166 */
4037 4167 if (bgep->chipid.tx_rings <= COALESCE_64_BYTE_RINGS &&
4038 4168 bgep->chipid.rx_rings <= COALESCE_64_BYTE_RINGS)
4039 4169 coalmode = COALESCE_64_BYTE_STATUS;
4040 4170 else
4041 4171 coalmode = 0;
4042 4172 if (DEVICE_5717_SERIES_CHIPSETS(bgep))
4043 4173 coalmode = COALESCE_CLR_TICKS_RX;
4044 4174 if (!bge_chip_enable_engine(bgep, HOST_COALESCE_MODE_REG, coalmode))
4045 4175 retval = DDI_FAILURE;
4046 4176 if (!bge_chip_enable_engine(bgep, RCV_BD_COMPLETION_MODE_REG,
4047 4177 STATE_MACHINE_ATTN_ENABLE_BIT))
4048 4178 retval = DDI_FAILURE;
4049 4179 if (!bge_chip_enable_engine(bgep, RCV_LIST_PLACEMENT_MODE_REG, 0))
4050 4180 retval = DDI_FAILURE;
4051 4181
4052 4182 if (DEVICE_5704_SERIES_CHIPSETS(bgep))
4053 4183 if (!bge_chip_enable_engine(bgep, RCV_LIST_SELECTOR_MODE_REG,
4054 4184 STATE_MACHINE_ATTN_ENABLE_BIT))
4055 4185 retval = DDI_FAILURE;
4056 4186
4057 4187 /*
4058 4188 * Step 72: Enable MAC DMA engines
4059 4189 * Step 73: Clear & enable MAC statistics
4060 4190 */
4061 4191 bge_reg_set32(bgep, ETHERNET_MAC_MODE_REG,
4062 4192 ETHERNET_MODE_ENABLE_FHDE |
4063 4193 ETHERNET_MODE_ENABLE_RDE |
4064 4194 ETHERNET_MODE_ENABLE_TDE);
4065 4195 bge_reg_set32(bgep, ETHERNET_MAC_MODE_REG,
4066 4196 ETHERNET_MODE_ENABLE_TX_STATS |
4067 4197 ETHERNET_MODE_ENABLE_RX_STATS |
4068 4198 ETHERNET_MODE_CLEAR_TX_STATS |
4069 4199 ETHERNET_MODE_CLEAR_RX_STATS);
4070 4200
4071 4201 /*
4072 4202 * Step 74: configure the MLCR (Miscellaneous Local Control
4073 4203 * Register); not required, as we set up the MLCR in step 10
4074 4204 * (part of the reset code) above.
4075 4205 *
4076 4206 * Step 75: clear Interrupt Mailbox 0
4077 4207 */
4078 4208 bge_mbx_put(bgep, INTERRUPT_MBOX_0_REG, 0);
4079 4209
4080 4210 /*
4081 4211 * Steps 76-87: Gentlemen, start your engines ...
4082 4212 *
4083 4213 * Enable the DMA Completion Engine, the Write DMA Engine,
4084 4214 * the Read DMA Engine, Receive Data Completion Engine,
4085 4215 * the MBuf Cluster Free Engine, the Send Data Completion Engine,
4086 4216 * the Send BD Completion Engine, the Receive BD Initiator Engine,
|
↓ open down ↓ |
134 lines elided |
↑ open up ↑ |
4087 4217 * the Receive Data Initiator Engine, the Send Data Initiator Engine,
4088 4218 * the Send BD Initiator Engine, and the Send BD Selector Engine.
4089 4219 *
4090 4220 * Beware exhaust fumes?
4091 4221 */
4092 4222 if (DEVICE_5704_SERIES_CHIPSETS(bgep))
4093 4223 if (!bge_chip_enable_engine(bgep, DMA_COMPLETION_MODE_REG, 0))
4094 4224 retval = DDI_FAILURE;
4095 4225 dma_wrprio = (bge_dma_wrprio << DMA_PRIORITY_SHIFT) |
4096 4226 ALL_DMA_ATTN_BITS;
4097 - if ((MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4098 - MHCR_CHIP_ASIC_REV_5755) ||
4099 - DEVICE_5723_SERIES_CHIPSETS(bgep) ||
4100 - DEVICE_5906_SERIES_CHIPSETS(bgep)) {
4227 + if (DEVICE_IS_5755_PLUS(bgep))
4101 4228 dma_wrprio |= DMA_STATUS_TAG_FIX_CQ12384;
4102 - }
4103 4229 if (!bge_chip_enable_engine(bgep, WRITE_DMA_MODE_REG,
4104 4230 dma_wrprio))
4105 4231 retval = DDI_FAILURE;
4232 + if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4233 + MHCR_CHIP_ASIC_REV_5761 ||
4234 + MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4235 + MHCR_CHIP_ASIC_REV_5784 ||
4236 + MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4237 + MHCR_CHIP_ASIC_REV_5785 ||
4238 + MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4239 + MHCR_CHIP_ASIC_REV_57780 ||
4240 + DEVICE_IS_57765_PLUS(bgep)) {
4241 + regval = bge_reg_get32(bgep, READ_DMA_RESERVED_CONTROL_REG);
4242 + if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4243 + MHCR_CHIP_ASIC_REV_5719 ||
4244 + MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4245 + MHCR_CHIP_ASIC_REV_5720) {
4246 + regval &= ~(RDMA_RSRVCTRL_TXMRGN_MASK |
4247 + RDMA_RSRVCTRL_FIFO_LWM_MASK |
4248 + RDMA_RSRVCTRL_FIFO_HWM_MASK);
4249 + regval |= RDMA_RSRVCTRL_TXMRGN_320B |
4250 + RDMA_RSRVCTRL_FIFO_LWM_1_5K |
4251 + RDMA_RSRVCTRL_FIFO_HWM_1_5K;
4252 + }
4253 + bge_reg_put32(bgep, READ_DMA_RESERVED_CONTROL_REG,
4254 + regval | RDMA_RSRVCTRL_FIFO_OFLW_FIX);
4255 + }
4106 4256 if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
4107 4257 DEVICE_5717_SERIES_CHIPSETS(bgep))
4108 4258 bge_dma_rdprio = 0;
4259 + regval = bge_dma_rdprio << DMA_PRIORITY_SHIFT;
4260 + if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4261 + MHCR_CHIP_ASIC_REV_5720)
4262 + regval |= bge_reg_get32(bgep, READ_DMA_MODE_REG) &
4263 + DMA_H2BNC_VLAN_DET;
4109 4264 if (!bge_chip_enable_engine(bgep, READ_DMA_MODE_REG,
4110 - (bge_dma_rdprio << DMA_PRIORITY_SHIFT) | ALL_DMA_ATTN_BITS))
4265 + regval | ALL_DMA_ATTN_BITS))
4111 4266 retval = DDI_FAILURE;
4112 4267 if (!bge_chip_enable_engine(bgep, RCV_DATA_COMPLETION_MODE_REG,
4113 4268 STATE_MACHINE_ATTN_ENABLE_BIT))
4114 4269 retval = DDI_FAILURE;
4115 4270 if (DEVICE_5704_SERIES_CHIPSETS(bgep))
4116 4271 if (!bge_chip_enable_engine(bgep,
4117 4272 MBUF_CLUSTER_FREE_MODE_REG, 0))
4118 4273 retval = DDI_FAILURE;
4119 4274 if (!bge_chip_enable_engine(bgep, SEND_DATA_COMPLETION_MODE_REG, 0))
4120 4275 retval = DDI_FAILURE;
4121 4276 if (!bge_chip_enable_engine(bgep, SEND_BD_COMPLETION_MODE_REG,
4122 4277 STATE_MACHINE_ATTN_ENABLE_BIT))
4123 4278 retval = DDI_FAILURE;
4124 4279 if (!bge_chip_enable_engine(bgep, RCV_BD_INITIATOR_MODE_REG,
4125 4280 RCV_BD_DISABLED_RING_ATTN))
4126 4281 retval = DDI_FAILURE;
4127 4282 if (!bge_chip_enable_engine(bgep, RCV_DATA_BD_INITIATOR_MODE_REG,
4128 4283 RCV_DATA_BD_ILL_RING_ATTN))
4129 4284 retval = DDI_FAILURE;
4130 4285 if (!bge_chip_enable_engine(bgep, SEND_DATA_INITIATOR_MODE_REG, 0))
4131 4286 retval = DDI_FAILURE;
4132 4287 if (!bge_chip_enable_engine(bgep, SEND_BD_INITIATOR_MODE_REG,
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
4133 4288 STATE_MACHINE_ATTN_ENABLE_BIT))
4134 4289 retval = DDI_FAILURE;
4135 4290 if (!bge_chip_enable_engine(bgep, SEND_BD_SELECTOR_MODE_REG,
4136 4291 STATE_MACHINE_ATTN_ENABLE_BIT))
4137 4292 retval = DDI_FAILURE;
4138 4293
4139 4294 /*
4140 4295 * Step 88: download firmware -- doesn't apply
4141 4296 * Steps 89-90: enable Transmit & Receive MAC Engines
4142 4297 */
4143 - if (!bge_chip_enable_engine(bgep, TRANSMIT_MAC_MODE_REG, 0))
4298 + if (DEVICE_IS_5755_PLUS(bgep) ||
4299 + MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4300 + MHCR_CHIP_ASIC_REV_5906) {
4301 + regval = bge_reg_get32(bgep, TRANSMIT_MAC_MODE_REG);
4302 + regval |= TRANSMIT_MODE_MBUF_LOCKUP_FIX;
4303 + } else {
4304 + regval = 0;
4305 + }
4306 + if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4307 + MHCR_CHIP_ASIC_REV_5720) {
4308 + regval &= ~(TRANSMIT_MODE_HTX2B_JMB_FRM_LEN |
4309 + TRANSMIT_MODE_HTX2B_CNT_DN_MODE);
4310 + regval |= bge_reg_get32(bgep, TRANSMIT_MAC_MODE_REG) &
4311 + (TRANSMIT_MODE_HTX2B_JMB_FRM_LEN |
4312 + TRANSMIT_MODE_HTX2B_CNT_DN_MODE);
4313 + }
4314 + if (!bge_chip_enable_engine(bgep, TRANSMIT_MAC_MODE_REG, regval))
4144 4315 retval = DDI_FAILURE;
4145 4316 #ifdef BGE_IPMI_ASF
4146 4317 if (!bgep->asf_enabled) {
4147 4318 if (!bge_chip_enable_engine(bgep, RECEIVE_MAC_MODE_REG,
4148 4319 RECEIVE_MODE_KEEP_VLAN_TAG))
4149 4320 retval = DDI_FAILURE;
4150 4321 } else {
4151 4322 if (!bge_chip_enable_engine(bgep, RECEIVE_MAC_MODE_REG, 0))
4152 4323 retval = DDI_FAILURE;
4153 4324 }
4154 4325 #else
4155 4326 if (!bge_chip_enable_engine(bgep, RECEIVE_MAC_MODE_REG,
4156 4327 RECEIVE_MODE_KEEP_VLAN_TAG))
4157 4328 retval = DDI_FAILURE;
4158 4329 #endif
4159 4330
4160 4331 /*
4161 4332 * Step 91: disable auto-polling of PHY status
4162 4333 */
4163 4334 bge_reg_put32(bgep, MI_MODE_REG, MI_MODE_DEFAULT);
4164 4335
4165 4336 /*
4166 4337 * Step 92: configure D0 power state (not required)
4167 4338 * Step 93: initialise LED control register ()
4168 4339 */
4169 4340 ledctl = LED_CONTROL_DEFAULT;
4170 4341 switch (bgep->chipid.device) {
4171 4342 case DEVICE_ID_5700:
4172 4343 case DEVICE_ID_5700x:
4173 4344 case DEVICE_ID_5701:
4174 4345 /*
4175 4346 * Switch to 5700 (MAC) mode on these older chips
4176 4347 */
4177 4348 ledctl &= ~LED_CONTROL_LED_MODE_MASK;
4178 4349 ledctl |= LED_CONTROL_LED_MODE_5700;
4179 4350 break;
4180 4351
4181 4352 default:
4182 4353 break;
4183 4354 }
4184 4355 bge_reg_put32(bgep, ETHERNET_MAC_LED_CONTROL_REG, ledctl);
4185 4356
4186 4357 /*
4187 4358 * Step 94: activate link
4188 4359 */
4189 4360 bge_reg_put32(bgep, MI_STATUS_REG, MI_STATUS_LINK);
4190 4361
4191 4362 /*
4192 4363 * Step 95: set up physical layer (PHY/SerDes)
4193 4364 * restart autoneg (if required)
4194 4365 */
4195 4366 if (reset_phys)
4196 4367 if (bge_phys_update(bgep) == DDI_FAILURE)
4197 4368 retval = DDI_FAILURE;
4198 4369
4199 4370 /*
4200 4371 * Extra step (DSG): hand over all the Receive Buffers to the chip
4201 4372 */
4202 4373 for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring)
4203 4374 bge_mbx_put(bgep, bgep->buff[ring].chip_mbx_reg,
4204 4375 bgep->buff[ring].rf_next);
4205 4376
4206 4377 /*
4207 4378 * MSI bits:The least significant MSI 16-bit word.
4208 4379 * ISR will be triggered different.
4209 4380 */
4210 4381 if (bgep->intr_type == DDI_INTR_TYPE_MSI)
4211 4382 bge_reg_set32(bgep, HOST_COALESCE_MODE_REG, 0x70);
4212 4383
4213 4384 /*
4214 4385 * Extra step (DSG): select which interrupts are enabled
4215 4386 *
4216 4387 * Program the Ethernet MAC engine to signal attention on
4217 4388 * Link Change events, then enable interrupts on MAC, DMA,
4218 4389 * and FLOW attention signals.
4219 4390 */
4220 4391 bge_reg_set32(bgep, ETHERNET_MAC_EVENT_ENABLE_REG,
4221 4392 ETHERNET_EVENT_LINK_INT |
4222 4393 ETHERNET_STATUS_PCS_ERROR_INT);
4223 4394 #ifdef BGE_IPMI_ASF
4224 4395 if (bgep->asf_enabled) {
4225 4396 bge_reg_set32(bgep, MODE_CONTROL_REG,
4226 4397 MODE_INT_ON_FLOW_ATTN |
4227 4398 MODE_INT_ON_DMA_ATTN |
4228 4399 MODE_HOST_STACK_UP|
4229 4400 MODE_INT_ON_MAC_ATTN);
4230 4401 } else {
4231 4402 #endif
4232 4403 bge_reg_set32(bgep, MODE_CONTROL_REG,
4233 4404 MODE_INT_ON_FLOW_ATTN |
4234 4405 MODE_INT_ON_DMA_ATTN |
4235 4406 MODE_INT_ON_MAC_ATTN);
|
↓ open down ↓ |
82 lines elided |
↑ open up ↑ |
4236 4407 #ifdef BGE_IPMI_ASF
4237 4408 }
4238 4409 #endif
4239 4410
4240 4411 /*
4241 4412 * Step 97: enable PCI interrupts!!!
4242 4413 */
4243 4414 if (bgep->intr_type == DDI_INTR_TYPE_FIXED)
4244 4415 bge_cfg_clr32(bgep, PCI_CONF_BGE_MHCR,
4245 4416 bgep->chipid.mask_pci_int);
4246 -
4247 4417 /*
4248 4418 * All done!
4249 4419 */
4250 4420 bgep->bge_chip_state = BGE_CHIP_RUNNING;
4251 4421 return (retval);
4252 4422 }
4253 4423
4254 4424
4255 4425 /*
4256 4426 * ========== Hardware interrupt handler ==========
4257 4427 */
4258 4428
4259 4429 #undef BGE_DBG
4260 4430 #define BGE_DBG BGE_DBG_INT /* debug flag for this code */
4261 4431
4262 4432 /*
4263 4433 * Sync the status block, then atomically clear the specified bits in
4264 4434 * the <flags-and-tag> field of the status block.
4265 4435 * the <flags> word of the status block, returning the value of the
4266 4436 * <tag> and the <flags> before the bits were cleared.
4267 4437 */
4268 4438 static int bge_status_sync(bge_t *bgep, uint64_t bits, uint64_t *flags);
4269 4439 #pragma inline(bge_status_sync)
4270 4440
4271 4441 static int
4272 4442 bge_status_sync(bge_t *bgep, uint64_t bits, uint64_t *flags)
4273 4443 {
4274 4444 bge_status_t *bsp;
4275 4445 int retval;
4276 4446
4277 4447 BGE_TRACE(("bge_status_sync($%p, 0x%llx)",
4278 4448 (void *)bgep, bits));
4279 4449
4280 4450 ASSERT(bgep->bge_guard == BGE_GUARD);
4281 4451
4282 4452 DMA_SYNC(bgep->status_block, DDI_DMA_SYNC_FORKERNEL);
4283 4453 retval = bge_check_dma_handle(bgep, bgep->status_block.dma_hdl);
4284 4454 if (retval != DDI_FM_OK)
4285 4455 return (retval);
4286 4456
4287 4457 bsp = DMA_VPTR(bgep->status_block);
4288 4458 *flags = bge_atomic_clr64(&bsp->flags_n_tag, bits);
4289 4459
4290 4460 BGE_DEBUG(("bge_status_sync($%p, 0x%llx) returning 0x%llx",
4291 4461 (void *)bgep, bits, *flags));
4292 4462
4293 4463 return (retval);
4294 4464 }
4295 4465
4296 4466 void bge_wake_factotum(bge_t *bgep);
4297 4467 #pragma inline(bge_wake_factotum)
4298 4468
4299 4469 void
4300 4470 bge_wake_factotum(bge_t *bgep)
4301 4471 {
4302 4472 mutex_enter(bgep->softintrlock);
4303 4473 if (bgep->factotum_flag == 0) {
4304 4474 bgep->factotum_flag = 1;
4305 4475 ddi_trigger_softintr(bgep->factotum_id);
4306 4476 }
4307 4477 mutex_exit(bgep->softintrlock);
4308 4478 }
4309 4479
4310 4480 /*
4311 4481 * bge_intr() -- handle chip interrupts
4312 4482 */
4313 4483 uint_t bge_intr(caddr_t arg1, caddr_t arg2);
4314 4484 #pragma no_inline(bge_intr)
4315 4485
4316 4486 uint_t
4317 4487 bge_intr(caddr_t arg1, caddr_t arg2)
4318 4488 {
4319 4489 bge_t *bgep = (void *)arg1; /* private device info */
4320 4490 bge_status_t *bsp;
4321 4491 uint64_t flags;
4322 4492 uint32_t regval;
4323 4493 uint_t result;
4324 4494 int retval, loop_cnt = 0;
4325 4495
4326 4496 BGE_TRACE(("bge_intr($%p) ($%p)", arg1, arg2));
4327 4497
4328 4498 /*
4329 4499 * GLD v2 checks that s/w setup is complete before passing
4330 4500 * interrupts to this routine, thus eliminating the old
4331 4501 * (and well-known) race condition around ddi_add_intr()
4332 4502 */
4333 4503 ASSERT(bgep->progress & PROGRESS_HWINT);
4334 4504
4335 4505 result = DDI_INTR_UNCLAIMED;
4336 4506 mutex_enter(bgep->genlock);
4337 4507
4338 4508 if (bgep->intr_type == DDI_INTR_TYPE_FIXED) {
4339 4509 /*
4340 4510 * Check whether chip's says it's asserting #INTA;
4341 4511 * if not, don't process or claim the interrupt.
4342 4512 *
4343 4513 * Note that the PCI signal is active low, so the
4344 4514 * bit is *zero* when the interrupt is asserted.
4345 4515 */
4346 4516 regval = bge_reg_get32(bgep, MISC_LOCAL_CONTROL_REG);
4347 4517 if (!(DEVICE_5717_SERIES_CHIPSETS(bgep)) &&
4348 4518 (regval & MLCR_INTA_STATE)) {
4349 4519 if (bge_check_acc_handle(bgep, bgep->io_handle)
4350 4520 != DDI_FM_OK)
4351 4521 goto chip_stop;
4352 4522 mutex_exit(bgep->genlock);
4353 4523 return (result);
4354 4524 }
4355 4525
4356 4526 /*
4357 4527 * Block further PCI interrupts ...
4358 4528 */
4359 4529 bge_reg_set32(bgep, PCI_CONF_BGE_MHCR,
4360 4530 bgep->chipid.mask_pci_int);
4361 4531
4362 4532 } else {
4363 4533 /*
4364 4534 * Check MSI status
4365 4535 */
4366 4536 regval = bge_reg_get32(bgep, MSI_STATUS_REG);
4367 4537 if (regval & MSI_ERROR_ATTENTION) {
4368 4538 BGE_REPORT((bgep, "msi error attention,"
4369 4539 " status=0x%x", regval));
4370 4540 bge_reg_put32(bgep, MSI_STATUS_REG, regval);
4371 4541 }
4372 4542 }
4373 4543
4374 4544 result = DDI_INTR_CLAIMED;
4375 4545
4376 4546 BGE_DEBUG(("bge_intr($%p) ($%p) regval 0x%08x", arg1, arg2, regval));
4377 4547
4378 4548 /*
4379 4549 * Sync the status block and grab the flags-n-tag from it.
4380 4550 * We count the number of interrupts where there doesn't
4381 4551 * seem to have been a DMA update of the status block; if
4382 4552 * it *has* been updated, the counter will be cleared in
4383 4553 * the while() loop below ...
4384 4554 */
4385 4555 bgep->missed_dmas += 1;
4386 4556 bsp = DMA_VPTR(bgep->status_block);
4387 4557 for (loop_cnt = 0; loop_cnt < bge_intr_max_loop; loop_cnt++) {
4388 4558 if (bgep->bge_chip_state != BGE_CHIP_RUNNING) {
4389 4559 /*
4390 4560 * bge_chip_stop() may have freed dma area etc
4391 4561 * while we were in this interrupt handler -
4392 4562 * better not call bge_status_sync()
4393 4563 */
4394 4564 (void) bge_check_acc_handle(bgep,
4395 4565 bgep->io_handle);
4396 4566 mutex_exit(bgep->genlock);
4397 4567 return (DDI_INTR_CLAIMED);
4398 4568 }
4399 4569 retval = bge_status_sync(bgep, STATUS_FLAG_UPDATED,
4400 4570 &flags);
4401 4571 if (retval != DDI_FM_OK) {
4402 4572 bgep->bge_dma_error = B_TRUE;
4403 4573 goto chip_stop;
4404 4574 }
4405 4575
4406 4576 if (!(flags & STATUS_FLAG_UPDATED))
4407 4577 break;
4408 4578
4409 4579 /*
4410 4580 * Tell the chip that we're processing the interrupt
4411 4581 */
4412 4582 bge_mbx_put(bgep, INTERRUPT_MBOX_0_REG,
4413 4583 INTERRUPT_MBOX_DISABLE(flags));
4414 4584 if (bge_check_acc_handle(bgep, bgep->io_handle) !=
4415 4585 DDI_FM_OK)
4416 4586 goto chip_stop;
4417 4587
4418 4588 /*
4419 4589 * Drop the mutex while we:
4420 4590 * Receive any newly-arrived packets
4421 4591 * Recycle any newly-finished send buffers
4422 4592 */
4423 4593 bgep->bge_intr_running = B_TRUE;
4424 4594 mutex_exit(bgep->genlock);
4425 4595 bge_receive(bgep, bsp);
4426 4596 (void) bge_recycle(bgep, bsp);
4427 4597 mutex_enter(bgep->genlock);
4428 4598 bgep->bge_intr_running = B_FALSE;
4429 4599
4430 4600 /*
4431 4601 * Tell the chip we've finished processing, and
4432 4602 * give it the tag that we got from the status
4433 4603 * block earlier, so that it knows just how far
4434 4604 * we've gone. If it's got more for us to do,
4435 4605 * it will now update the status block and try
4436 4606 * to assert an interrupt (but we've got the
4437 4607 * #INTA blocked at present). If we see the
4438 4608 * update, we'll loop around to do some more.
4439 4609 * Eventually we'll get out of here ...
4440 4610 */
4441 4611 bge_mbx_put(bgep, INTERRUPT_MBOX_0_REG,
4442 4612 INTERRUPT_MBOX_ENABLE(flags));
4443 4613 if (bgep->chipid.pci_type == BGE_PCI_E)
4444 4614 (void) bge_mbx_get(bgep, INTERRUPT_MBOX_0_REG);
4445 4615 bgep->missed_dmas = 0;
4446 4616 }
4447 4617
4448 4618 /*
4449 4619 * Check for exceptional conditions that we need to handle
4450 4620 *
4451 4621 * Link status changed
4452 4622 * Status block not updated
4453 4623 */
4454 4624 if (flags & STATUS_FLAG_LINK_CHANGED)
4455 4625 bge_wake_factotum(bgep);
4456 4626
4457 4627 if (bgep->missed_dmas) {
4458 4628 /*
4459 4629 * Probably due to the internal status tag not
4460 4630 * being reset. Force a status block update now;
4461 4631 * this should ensure that we get an update and
4462 4632 * a new interrupt. After that, we should be in
4463 4633 * sync again ...
4464 4634 */
4465 4635 BGE_REPORT((bgep, "interrupt: flags 0x%llx - "
4466 4636 "not updated?", flags));
4467 4637 bgep->missed_updates++;
4468 4638 bge_reg_set32(bgep, HOST_COALESCE_MODE_REG,
4469 4639 COALESCE_NOW);
4470 4640
4471 4641 if (bgep->missed_dmas >= bge_dma_miss_limit) {
4472 4642 /*
4473 4643 * If this happens multiple times in a row,
4474 4644 * it means DMA is just not working. Maybe
4475 4645 * the chip's failed, or maybe there's a
4476 4646 * problem on the PCI bus or in the host-PCI
4477 4647 * bridge (Tomatillo).
4478 4648 *
4479 4649 * At all events, we want to stop further
4480 4650 * interrupts and let the recovery code take
4481 4651 * over to see whether anything can be done
4482 4652 * about it ...
4483 4653 */
4484 4654 bge_fm_ereport(bgep,
4485 4655 DDI_FM_DEVICE_BADINT_LIMIT);
4486 4656 goto chip_stop;
4487 4657 }
4488 4658 }
4489 4659
4490 4660 /*
4491 4661 * Reenable assertion of #INTA, unless there's a DMA fault
4492 4662 */
4493 4663 if (bgep->intr_type == DDI_INTR_TYPE_FIXED) {
4494 4664 bge_reg_clr32(bgep, PCI_CONF_BGE_MHCR,
4495 4665 bgep->chipid.mask_pci_int);
4496 4666 if (bge_check_acc_handle(bgep, bgep->cfg_handle) !=
4497 4667 DDI_FM_OK)
4498 4668 goto chip_stop;
4499 4669 }
4500 4670
4501 4671 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
4502 4672 goto chip_stop;
4503 4673
4504 4674 mutex_exit(bgep->genlock);
4505 4675 return (result);
4506 4676
4507 4677 chip_stop:
4508 4678 #ifdef BGE_IPMI_ASF
4509 4679 if (bgep->asf_enabled && bgep->asf_status == ASF_STAT_RUN) {
4510 4680 /*
4511 4681 * We must stop ASF heart beat before
4512 4682 * bge_chip_stop(), otherwise some
4513 4683 * computers (ex. IBM HS20 blade
4514 4684 * server) may crash.
4515 4685 */
4516 4686 bge_asf_update_status(bgep);
4517 4687 bge_asf_stop_timer(bgep);
4518 4688 bgep->asf_status = ASF_STAT_STOP;
4519 4689
4520 4690 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET);
4521 4691 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
4522 4692 }
4523 4693 #endif
4524 4694 bge_chip_stop(bgep, B_TRUE);
4525 4695 (void) bge_check_acc_handle(bgep, bgep->io_handle);
4526 4696 mutex_exit(bgep->genlock);
4527 4697 return (result);
4528 4698 }
4529 4699
4530 4700 /*
4531 4701 * ========== Factotum, implemented as a softint handler ==========
4532 4702 */
4533 4703
4534 4704 #undef BGE_DBG
4535 4705 #define BGE_DBG BGE_DBG_FACT /* debug flag for this code */
4536 4706
4537 4707 static void bge_factotum_error_handler(bge_t *bgep);
4538 4708 #pragma no_inline(bge_factotum_error_handler)
4539 4709
4540 4710 static void
4541 4711 bge_factotum_error_handler(bge_t *bgep)
4542 4712 {
4543 4713 uint32_t flow;
4544 4714 uint32_t rdma;
4545 4715 uint32_t wdma;
4546 4716 uint32_t tmac;
4547 4717 uint32_t rmac;
4548 4718 uint32_t rxrs;
4549 4719 uint32_t txrs = 0;
4550 4720
4551 4721 ASSERT(mutex_owned(bgep->genlock));
4552 4722
4553 4723 /*
4554 4724 * Read all the registers that show the possible
4555 4725 * reasons for the ERROR bit to be asserted
4556 4726 */
4557 4727 flow = bge_reg_get32(bgep, FLOW_ATTN_REG);
4558 4728 rdma = bge_reg_get32(bgep, READ_DMA_STATUS_REG);
4559 4729 wdma = bge_reg_get32(bgep, WRITE_DMA_STATUS_REG);
4560 4730 tmac = bge_reg_get32(bgep, TRANSMIT_MAC_STATUS_REG);
4561 4731 rmac = bge_reg_get32(bgep, RECEIVE_MAC_STATUS_REG);
4562 4732 rxrs = bge_reg_get32(bgep, RX_RISC_STATE_REG);
4563 4733 if (DEVICE_5704_SERIES_CHIPSETS(bgep))
4564 4734 txrs = bge_reg_get32(bgep, TX_RISC_STATE_REG);
4565 4735
4566 4736 BGE_DEBUG(("factotum($%p) flow 0x%x rdma 0x%x wdma 0x%x",
4567 4737 (void *)bgep, flow, rdma, wdma));
4568 4738 BGE_DEBUG(("factotum($%p) tmac 0x%x rmac 0x%x rxrs 0x%08x txrs 0x%08x",
4569 4739 (void *)bgep, tmac, rmac, rxrs, txrs));
4570 4740
4571 4741 /*
4572 4742 * For now, just clear all the errors ...
4573 4743 */
4574 4744 if (DEVICE_5704_SERIES_CHIPSETS(bgep))
4575 4745 bge_reg_put32(bgep, TX_RISC_STATE_REG, ~0);
4576 4746 bge_reg_put32(bgep, RX_RISC_STATE_REG, ~0);
4577 4747 bge_reg_put32(bgep, RECEIVE_MAC_STATUS_REG, ~0);
4578 4748 bge_reg_put32(bgep, WRITE_DMA_STATUS_REG, ~0);
4579 4749 bge_reg_put32(bgep, READ_DMA_STATUS_REG, ~0);
4580 4750 bge_reg_put32(bgep, FLOW_ATTN_REG, ~0);
4581 4751 }
4582 4752
4583 4753 /*
4584 4754 * Handler for hardware link state change.
4585 4755 *
4586 4756 * When this routine is called, the hardware link state has changed
4587 4757 * and the new state is reflected in the param_* variables. Here
4588 4758 * we must update the softstate and reprogram the MAC to match.
4589 4759 */
4590 4760 static void bge_factotum_link_handler(bge_t *bgep);
4591 4761 #pragma no_inline(bge_factotum_link_handler)
4592 4762
4593 4763 static void
4594 4764 bge_factotum_link_handler(bge_t *bgep)
4595 4765 {
4596 4766 ASSERT(mutex_owned(bgep->genlock));
4597 4767
4598 4768 /*
4599 4769 * Update the s/w link_state
4600 4770 */
4601 4771 if (bgep->param_link_up)
4602 4772 bgep->link_state = LINK_STATE_UP;
4603 4773 else
4604 4774 bgep->link_state = LINK_STATE_DOWN;
4605 4775
4606 4776 /*
4607 4777 * Reprogram the MAC modes to match
4608 4778 */
4609 4779 bge_sync_mac_modes(bgep);
4610 4780 }
4611 4781
4612 4782 static boolean_t bge_factotum_link_check(bge_t *bgep, int *dma_state);
4613 4783 #pragma no_inline(bge_factotum_link_check)
4614 4784
4615 4785 static boolean_t
4616 4786 bge_factotum_link_check(bge_t *bgep, int *dma_state)
4617 4787 {
4618 4788 boolean_t check;
4619 4789 uint64_t flags;
4620 4790 uint32_t tmac_status;
4621 4791
4622 4792 ASSERT(mutex_owned(bgep->genlock));
4623 4793
4624 4794 /*
4625 4795 * Get & clear the writable status bits in the Tx status register
4626 4796 * (some bits are write-1-to-clear, others are just readonly).
4627 4797 */
4628 4798 tmac_status = bge_reg_get32(bgep, TRANSMIT_MAC_STATUS_REG);
4629 4799 bge_reg_put32(bgep, TRANSMIT_MAC_STATUS_REG, tmac_status);
4630 4800
4631 4801 /*
4632 4802 * Get & clear the ERROR and LINK_CHANGED bits from the status block
4633 4803 */
4634 4804 *dma_state = bge_status_sync(bgep, STATUS_FLAG_ERROR |
4635 4805 STATUS_FLAG_LINK_CHANGED, &flags);
4636 4806 if (*dma_state != DDI_FM_OK)
4637 4807 return (B_FALSE);
4638 4808
4639 4809 /*
4640 4810 * Clear any errors flagged in the status block ...
4641 4811 */
4642 4812 if (flags & STATUS_FLAG_ERROR)
4643 4813 bge_factotum_error_handler(bgep);
4644 4814
4645 4815 /*
4646 4816 * We need to check the link status if:
4647 4817 * the status block says there's been a link change
4648 4818 * or there's any discrepancy between the various
4649 4819 * flags indicating the link state (link_state,
4650 4820 * param_link_up, and the LINK STATE bit in the
4651 4821 * Transmit MAC status register).
4652 4822 */
4653 4823 check = (flags & STATUS_FLAG_LINK_CHANGED) != 0;
4654 4824 switch (bgep->link_state) {
4655 4825 case LINK_STATE_UP:
4656 4826 check |= (bgep->param_link_up == B_FALSE);
4657 4827 check |= ((tmac_status & TRANSMIT_STATUS_LINK_UP) == 0);
4658 4828 break;
4659 4829
4660 4830 case LINK_STATE_DOWN:
4661 4831 check |= (bgep->param_link_up != B_FALSE);
4662 4832 check |= ((tmac_status & TRANSMIT_STATUS_LINK_UP) != 0);
4663 4833 break;
4664 4834
4665 4835 default:
4666 4836 check = B_TRUE;
4667 4837 break;
4668 4838 }
4669 4839
4670 4840 /*
4671 4841 * If <check> is false, we're sure the link hasn't changed.
4672 4842 * If true, however, it's not yet definitive; we have to call
4673 4843 * bge_phys_check() to determine whether the link has settled
4674 4844 * into a new state yet ... and if it has, then call the link
4675 4845 * state change handler.But when the chip is 5700 in Dell 6650
4676 4846 * ,even if check is false, the link may have changed.So we
4677 4847 * have to call bge_phys_check() to determine the link state.
4678 4848 */
4679 4849 if (check || bgep->chipid.device == DEVICE_ID_5700) {
4680 4850 check = bge_phys_check(bgep);
4681 4851 if (check)
4682 4852 bge_factotum_link_handler(bgep);
4683 4853 }
4684 4854
4685 4855 return (check);
4686 4856 }
4687 4857
4688 4858 /*
4689 4859 * Factotum routine to check for Tx stall, using the 'watchdog' counter
4690 4860 */
4691 4861 static boolean_t bge_factotum_stall_check(bge_t *bgep);
4692 4862 #pragma no_inline(bge_factotum_stall_check)
4693 4863
4694 4864 static boolean_t
4695 4865 bge_factotum_stall_check(bge_t *bgep)
4696 4866 {
4697 4867 uint32_t dogval;
4698 4868 bge_status_t *bsp;
4699 4869 uint64_t now = gethrtime();
4700 4870
4701 4871 if ((now - bgep->timestamp) < BGE_CYCLIC_PERIOD)
4702 4872 return (B_FALSE);
4703 4873
4704 4874 bgep->timestamp = now;
4705 4875
4706 4876 ASSERT(mutex_owned(bgep->genlock));
4707 4877
4708 4878 /*
4709 4879 * Specific check for Tx stall ...
4710 4880 *
4711 4881 * The 'watchdog' counter is incremented whenever a packet
4712 4882 * is queued, reset to 1 when some (but not all) buffers
4713 4883 * are reclaimed, reset to 0 (disabled) when all buffers
4714 4884 * are reclaimed, and shifted left here. If it exceeds the
4715 4885 * threshold value, the chip is assumed to have stalled and
4716 4886 * is put into the ERROR state. The factotum will then reset
4717 4887 * it on the next pass.
4718 4888 *
4719 4889 * All of which should ensure that we don't get into a state
4720 4890 * where packets are left pending indefinitely!
4721 4891 */
4722 4892 dogval = bge_atomic_shl32(&bgep->watchdog, 1);
4723 4893 bsp = DMA_VPTR(bgep->status_block);
4724 4894 if (dogval < bge_watchdog_count || bge_recycle(bgep, bsp))
4725 4895 return (B_FALSE);
4726 4896
4727 4897 #if !defined(BGE_NETCONSOLE)
4728 4898 BGE_REPORT((bgep, "Tx stall detected, watchdog code 0x%x", dogval));
4729 4899 #endif
4730 4900 bge_fm_ereport(bgep, DDI_FM_DEVICE_STALL);
4731 4901 return (B_TRUE);
4732 4902 }
4733 4903
4734 4904 /*
4735 4905 * The factotum is woken up when there's something to do that we'd rather
4736 4906 * not do from inside a hardware interrupt handler or high-level cyclic.
4737 4907 * Its two main tasks are:
4738 4908 * reset & restart the chip after an error
4739 4909 * check the link status whenever necessary
4740 4910 */
4741 4911 uint_t bge_chip_factotum(caddr_t arg);
4742 4912 #pragma no_inline(bge_chip_factotum)
4743 4913
4744 4914 uint_t
4745 4915 bge_chip_factotum(caddr_t arg)
4746 4916 {
4747 4917 bge_t *bgep;
4748 4918 uint_t result;
4749 4919 boolean_t error;
4750 4920 boolean_t linkchg;
4751 4921 int dma_state;
4752 4922
4753 4923 bgep = (void *)arg;
4754 4924
4755 4925 BGE_TRACE(("bge_chip_factotum($%p)", (void *)bgep));
4756 4926
4757 4927 mutex_enter(bgep->softintrlock);
4758 4928 if (bgep->factotum_flag == 0) {
4759 4929 mutex_exit(bgep->softintrlock);
4760 4930 return (DDI_INTR_UNCLAIMED);
4761 4931 }
4762 4932 bgep->factotum_flag = 0;
4763 4933 mutex_exit(bgep->softintrlock);
4764 4934
4765 4935 result = DDI_INTR_CLAIMED;
4766 4936 error = B_FALSE;
4767 4937 linkchg = B_FALSE;
4768 4938
4769 4939 mutex_enter(bgep->genlock);
4770 4940 switch (bgep->bge_chip_state) {
4771 4941 default:
4772 4942 break;
4773 4943
4774 4944 case BGE_CHIP_RUNNING:
4775 4945 linkchg = bge_factotum_link_check(bgep, &dma_state);
4776 4946 error = bge_factotum_stall_check(bgep);
4777 4947 if (dma_state != DDI_FM_OK) {
4778 4948 bgep->bge_dma_error = B_TRUE;
4779 4949 error = B_TRUE;
4780 4950 }
4781 4951 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
4782 4952 error = B_TRUE;
4783 4953 if (error)
4784 4954 bgep->bge_chip_state = BGE_CHIP_ERROR;
4785 4955 break;
4786 4956
4787 4957 case BGE_CHIP_ERROR:
4788 4958 error = B_TRUE;
4789 4959 break;
4790 4960
4791 4961 case BGE_CHIP_FAULT:
4792 4962 /*
4793 4963 * Fault detected, time to reset ...
4794 4964 */
4795 4965 if (bge_autorecover) {
4796 4966 if (!(bgep->progress & PROGRESS_BUFS)) {
4797 4967 /*
4798 4968 * if we can't allocate the ring buffers,
4799 4969 * try later
4800 4970 */
4801 4971 if (bge_alloc_bufs(bgep) != DDI_SUCCESS) {
4802 4972 mutex_exit(bgep->genlock);
4803 4973 return (result);
4804 4974 }
4805 4975 bgep->progress |= PROGRESS_BUFS;
4806 4976 }
4807 4977 if (!(bgep->progress & PROGRESS_INTR)) {
4808 4978 bge_init_rings(bgep);
4809 4979 bge_intr_enable(bgep);
4810 4980 bgep->progress |= PROGRESS_INTR;
4811 4981 }
4812 4982 if (!(bgep->progress & PROGRESS_KSTATS)) {
4813 4983 bge_init_kstats(bgep,
4814 4984 ddi_get_instance(bgep->devinfo));
4815 4985 bgep->progress |= PROGRESS_KSTATS;
4816 4986 }
4817 4987
4818 4988 BGE_REPORT((bgep, "automatic recovery activated"));
4819 4989
4820 4990 if (bge_restart(bgep, B_FALSE) != DDI_SUCCESS) {
4821 4991 bgep->bge_chip_state = BGE_CHIP_ERROR;
4822 4992 error = B_TRUE;
4823 4993 }
4824 4994 if (bge_check_acc_handle(bgep, bgep->cfg_handle) !=
4825 4995 DDI_FM_OK) {
4826 4996 bgep->bge_chip_state = BGE_CHIP_ERROR;
4827 4997 error = B_TRUE;
4828 4998 }
4829 4999 if (bge_check_acc_handle(bgep, bgep->io_handle) !=
4830 5000 DDI_FM_OK) {
4831 5001 bgep->bge_chip_state = BGE_CHIP_ERROR;
4832 5002 error = B_TRUE;
4833 5003 }
4834 5004 if (error == B_FALSE) {
4835 5005 #ifdef BGE_IPMI_ASF
4836 5006 if (bgep->asf_enabled &&
4837 5007 bgep->asf_status != ASF_STAT_RUN) {
4838 5008 bgep->asf_timeout_id = timeout(
4839 5009 bge_asf_heartbeat, (void *)bgep,
4840 5010 drv_usectohz(
4841 5011 BGE_ASF_HEARTBEAT_INTERVAL));
4842 5012 bgep->asf_status = ASF_STAT_RUN;
4843 5013 }
4844 5014 #endif
4845 5015 if (!bgep->manual_reset) {
4846 5016 ddi_fm_service_impact(bgep->devinfo,
4847 5017 DDI_SERVICE_RESTORED);
4848 5018 }
4849 5019 }
4850 5020 }
4851 5021 break;
4852 5022 }
4853 5023
4854 5024
4855 5025 /*
4856 5026 * If an error is detected, stop the chip now, marking it as
4857 5027 * faulty, so that it will be reset next time through ...
4858 5028 *
4859 5029 * Note that if intr_running is set, then bge_intr() has dropped
4860 5030 * genlock to call bge_receive/bge_recycle. Can't stop the chip at
4861 5031 * this point so have to wait until the next time the factotum runs.
4862 5032 */
4863 5033 if (error && !bgep->bge_intr_running) {
4864 5034 #ifdef BGE_IPMI_ASF
4865 5035 if (bgep->asf_enabled && (bgep->asf_status == ASF_STAT_RUN)) {
4866 5036 /*
4867 5037 * We must stop ASF heart beat before bge_chip_stop(),
4868 5038 * otherwise some computers (ex. IBM HS20 blade server)
4869 5039 * may crash.
4870 5040 */
4871 5041 bge_asf_update_status(bgep);
4872 5042 bge_asf_stop_timer(bgep);
4873 5043 bgep->asf_status = ASF_STAT_STOP;
4874 5044
4875 5045 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET);
4876 5046 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
4877 5047 }
4878 5048 #endif
4879 5049 bge_chip_stop(bgep, B_TRUE);
4880 5050 (void) bge_check_acc_handle(bgep, bgep->io_handle);
4881 5051 }
4882 5052 mutex_exit(bgep->genlock);
4883 5053
4884 5054 /*
4885 5055 * If the link state changed, tell the world about it.
4886 5056 * Note: can't do this while still holding the mutex.
4887 5057 */
4888 5058 if (bgep->link_update_timer == BGE_LINK_UPDATE_TIMEOUT &&
4889 5059 bgep->link_state != LINK_STATE_UNKNOWN)
4890 5060 linkchg = B_TRUE;
4891 5061 else if (bgep->link_update_timer < BGE_LINK_UPDATE_TIMEOUT &&
4892 5062 bgep->link_state == LINK_STATE_DOWN)
4893 5063 linkchg = B_FALSE;
4894 5064
4895 5065 if (linkchg) {
4896 5066 mac_link_update(bgep->mh, bgep->link_state);
4897 5067 bgep->link_update_timer = BGE_LINK_UPDATE_DONE;
4898 5068 }
4899 5069 if (bgep->manual_reset) {
4900 5070 bgep->manual_reset = B_FALSE;
4901 5071 }
4902 5072
4903 5073 return (result);
4904 5074 }
4905 5075
4906 5076 /*
4907 5077 * High-level cyclic handler
4908 5078 *
4909 5079 * This routine schedules a (low-level) softint callback to the
4910 5080 * factotum, and prods the chip to update the status block (which
4911 5081 * will cause a hardware interrupt when complete).
4912 5082 */
4913 5083 void bge_chip_cyclic(void *arg);
4914 5084 #pragma no_inline(bge_chip_cyclic)
4915 5085
4916 5086 void
4917 5087 bge_chip_cyclic(void *arg)
4918 5088 {
4919 5089 bge_t *bgep;
4920 5090
4921 5091 bgep = arg;
4922 5092
4923 5093 switch (bgep->bge_chip_state) {
4924 5094 default:
4925 5095 return;
4926 5096
4927 5097 case BGE_CHIP_RUNNING:
4928 5098 bge_reg_set32(bgep, HOST_COALESCE_MODE_REG, COALESCE_NOW);
4929 5099 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
4930 5100 ddi_fm_service_impact(bgep->devinfo,
4931 5101 DDI_SERVICE_UNAFFECTED);
4932 5102
4933 5103 if (bgep->link_update_timer < BGE_LINK_UPDATE_TIMEOUT)
4934 5104 bgep->link_update_timer++;
4935 5105
4936 5106 break;
4937 5107
4938 5108 case BGE_CHIP_FAULT:
4939 5109 case BGE_CHIP_ERROR:
4940 5110 break;
4941 5111 }
4942 5112
4943 5113 bge_wake_factotum(bgep);
4944 5114 }
4945 5115
4946 5116
4947 5117 /*
4948 5118 * ========== Ioctl subfunctions ==========
4949 5119 */
4950 5120
4951 5121 #undef BGE_DBG
4952 5122 #define BGE_DBG BGE_DBG_PPIO /* debug flag for this code */
4953 5123
4954 5124 #if BGE_DEBUGGING || BGE_DO_PPIO
4955 5125
4956 5126 static void bge_chip_peek_cfg(bge_t *bgep, bge_peekpoke_t *ppd);
4957 5127 #pragma no_inline(bge_chip_peek_cfg)
4958 5128
4959 5129 static void
4960 5130 bge_chip_peek_cfg(bge_t *bgep, bge_peekpoke_t *ppd)
4961 5131 {
4962 5132 uint64_t regval;
4963 5133 uint64_t regno;
4964 5134
4965 5135 BGE_TRACE(("bge_chip_peek_cfg($%p, $%p)",
4966 5136 (void *)bgep, (void *)ppd));
4967 5137
4968 5138 regno = ppd->pp_acc_offset;
4969 5139
4970 5140 switch (ppd->pp_acc_size) {
4971 5141 case 1:
4972 5142 regval = pci_config_get8(bgep->cfg_handle, regno);
4973 5143 break;
4974 5144
4975 5145 case 2:
4976 5146 regval = pci_config_get16(bgep->cfg_handle, regno);
4977 5147 break;
4978 5148
4979 5149 case 4:
4980 5150 regval = pci_config_get32(bgep->cfg_handle, regno);
4981 5151 break;
4982 5152
4983 5153 case 8:
4984 5154 regval = pci_config_get64(bgep->cfg_handle, regno);
4985 5155 break;
4986 5156 }
4987 5157
4988 5158 ppd->pp_acc_data = regval;
4989 5159 }
4990 5160
4991 5161 static void bge_chip_poke_cfg(bge_t *bgep, bge_peekpoke_t *ppd);
4992 5162 #pragma no_inline(bge_chip_poke_cfg)
4993 5163
4994 5164 static void
4995 5165 bge_chip_poke_cfg(bge_t *bgep, bge_peekpoke_t *ppd)
4996 5166 {
4997 5167 uint64_t regval;
4998 5168 uint64_t regno;
4999 5169
5000 5170 BGE_TRACE(("bge_chip_poke_cfg($%p, $%p)",
5001 5171 (void *)bgep, (void *)ppd));
5002 5172
5003 5173 regno = ppd->pp_acc_offset;
5004 5174 regval = ppd->pp_acc_data;
5005 5175
5006 5176 switch (ppd->pp_acc_size) {
5007 5177 case 1:
5008 5178 pci_config_put8(bgep->cfg_handle, regno, regval);
5009 5179 break;
5010 5180
5011 5181 case 2:
5012 5182 pci_config_put16(bgep->cfg_handle, regno, regval);
5013 5183 break;
5014 5184
5015 5185 case 4:
5016 5186 pci_config_put32(bgep->cfg_handle, regno, regval);
5017 5187 break;
5018 5188
5019 5189 case 8:
5020 5190 pci_config_put64(bgep->cfg_handle, regno, regval);
5021 5191 break;
5022 5192 }
5023 5193 }
5024 5194
5025 5195 static void bge_chip_peek_reg(bge_t *bgep, bge_peekpoke_t *ppd);
5026 5196 #pragma no_inline(bge_chip_peek_reg)
5027 5197
5028 5198 static void
5029 5199 bge_chip_peek_reg(bge_t *bgep, bge_peekpoke_t *ppd)
5030 5200 {
5031 5201 uint64_t regval;
5032 5202 void *regaddr;
5033 5203
5034 5204 BGE_TRACE(("bge_chip_peek_reg($%p, $%p)",
5035 5205 (void *)bgep, (void *)ppd));
5036 5206
5037 5207 regaddr = PIO_ADDR(bgep, ppd->pp_acc_offset);
5038 5208
5039 5209 switch (ppd->pp_acc_size) {
5040 5210 case 1:
5041 5211 regval = ddi_get8(bgep->io_handle, regaddr);
5042 5212 break;
5043 5213
5044 5214 case 2:
5045 5215 regval = ddi_get16(bgep->io_handle, regaddr);
5046 5216 break;
5047 5217
5048 5218 case 4:
5049 5219 regval = ddi_get32(bgep->io_handle, regaddr);
5050 5220 break;
5051 5221
5052 5222 case 8:
5053 5223 regval = ddi_get64(bgep->io_handle, regaddr);
5054 5224 break;
5055 5225 }
5056 5226
5057 5227 ppd->pp_acc_data = regval;
5058 5228 }
5059 5229
5060 5230 static void bge_chip_poke_reg(bge_t *bgep, bge_peekpoke_t *ppd);
5061 5231 #pragma no_inline(bge_chip_peek_reg)
5062 5232
5063 5233 static void
5064 5234 bge_chip_poke_reg(bge_t *bgep, bge_peekpoke_t *ppd)
5065 5235 {
5066 5236 uint64_t regval;
5067 5237 void *regaddr;
5068 5238
5069 5239 BGE_TRACE(("bge_chip_poke_reg($%p, $%p)",
5070 5240 (void *)bgep, (void *)ppd));
5071 5241
5072 5242 regaddr = PIO_ADDR(bgep, ppd->pp_acc_offset);
5073 5243 regval = ppd->pp_acc_data;
5074 5244
5075 5245 switch (ppd->pp_acc_size) {
5076 5246 case 1:
5077 5247 ddi_put8(bgep->io_handle, regaddr, regval);
5078 5248 break;
5079 5249
5080 5250 case 2:
5081 5251 ddi_put16(bgep->io_handle, regaddr, regval);
5082 5252 break;
5083 5253
5084 5254 case 4:
5085 5255 ddi_put32(bgep->io_handle, regaddr, regval);
5086 5256 break;
5087 5257
5088 5258 case 8:
5089 5259 ddi_put64(bgep->io_handle, regaddr, regval);
5090 5260 break;
5091 5261 }
5092 5262 BGE_PCICHK(bgep);
5093 5263 }
5094 5264
5095 5265 static void bge_chip_peek_nic(bge_t *bgep, bge_peekpoke_t *ppd);
5096 5266 #pragma no_inline(bge_chip_peek_nic)
5097 5267
5098 5268 static void
5099 5269 bge_chip_peek_nic(bge_t *bgep, bge_peekpoke_t *ppd)
5100 5270 {
5101 5271 uint64_t regoff;
5102 5272 uint64_t regval;
5103 5273 void *regaddr;
5104 5274
5105 5275 BGE_TRACE(("bge_chip_peek_nic($%p, $%p)",
5106 5276 (void *)bgep, (void *)ppd));
5107 5277
5108 5278 regoff = ppd->pp_acc_offset;
5109 5279 bge_nic_setwin(bgep, regoff & ~MWBAR_GRANULE_MASK);
5110 5280 regoff &= MWBAR_GRANULE_MASK;
5111 5281 regoff += NIC_MEM_WINDOW_OFFSET;
5112 5282 regaddr = PIO_ADDR(bgep, regoff);
5113 5283
5114 5284 switch (ppd->pp_acc_size) {
5115 5285 case 1:
5116 5286 regval = ddi_get8(bgep->io_handle, regaddr);
5117 5287 break;
5118 5288
5119 5289 case 2:
5120 5290 regval = ddi_get16(bgep->io_handle, regaddr);
5121 5291 break;
5122 5292
5123 5293 case 4:
5124 5294 regval = ddi_get32(bgep->io_handle, regaddr);
5125 5295 break;
5126 5296
5127 5297 case 8:
5128 5298 regval = ddi_get64(bgep->io_handle, regaddr);
5129 5299 break;
5130 5300 }
5131 5301
5132 5302 ppd->pp_acc_data = regval;
5133 5303 }
5134 5304
5135 5305 static void bge_chip_poke_nic(bge_t *bgep, bge_peekpoke_t *ppd);
5136 5306 #pragma no_inline(bge_chip_poke_nic)
5137 5307
5138 5308 static void
5139 5309 bge_chip_poke_nic(bge_t *bgep, bge_peekpoke_t *ppd)
5140 5310 {
5141 5311 uint64_t regoff;
5142 5312 uint64_t regval;
5143 5313 void *regaddr;
5144 5314
5145 5315 BGE_TRACE(("bge_chip_poke_nic($%p, $%p)",
5146 5316 (void *)bgep, (void *)ppd));
5147 5317
5148 5318 regoff = ppd->pp_acc_offset;
5149 5319 bge_nic_setwin(bgep, regoff & ~MWBAR_GRANULE_MASK);
5150 5320 regoff &= MWBAR_GRANULE_MASK;
5151 5321 regoff += NIC_MEM_WINDOW_OFFSET;
5152 5322 regaddr = PIO_ADDR(bgep, regoff);
5153 5323 regval = ppd->pp_acc_data;
5154 5324
5155 5325 switch (ppd->pp_acc_size) {
5156 5326 case 1:
5157 5327 ddi_put8(bgep->io_handle, regaddr, regval);
5158 5328 break;
5159 5329
5160 5330 case 2:
5161 5331 ddi_put16(bgep->io_handle, regaddr, regval);
5162 5332 break;
5163 5333
5164 5334 case 4:
5165 5335 ddi_put32(bgep->io_handle, regaddr, regval);
5166 5336 break;
5167 5337
5168 5338 case 8:
5169 5339 ddi_put64(bgep->io_handle, regaddr, regval);
5170 5340 break;
5171 5341 }
5172 5342 BGE_PCICHK(bgep);
5173 5343 }
5174 5344
5175 5345 static void bge_chip_peek_mii(bge_t *bgep, bge_peekpoke_t *ppd);
5176 5346 #pragma no_inline(bge_chip_peek_mii)
5177 5347
5178 5348 static void
5179 5349 bge_chip_peek_mii(bge_t *bgep, bge_peekpoke_t *ppd)
5180 5350 {
5181 5351 BGE_TRACE(("bge_chip_peek_mii($%p, $%p)",
5182 5352 (void *)bgep, (void *)ppd));
5183 5353
5184 5354 ppd->pp_acc_data = bge_mii_get16(bgep, ppd->pp_acc_offset/2);
5185 5355 }
5186 5356
5187 5357 static void bge_chip_poke_mii(bge_t *bgep, bge_peekpoke_t *ppd);
5188 5358 #pragma no_inline(bge_chip_poke_mii)
5189 5359
5190 5360 static void
5191 5361 bge_chip_poke_mii(bge_t *bgep, bge_peekpoke_t *ppd)
5192 5362 {
5193 5363 BGE_TRACE(("bge_chip_poke_mii($%p, $%p)",
5194 5364 (void *)bgep, (void *)ppd));
5195 5365
5196 5366 bge_mii_put16(bgep, ppd->pp_acc_offset/2, ppd->pp_acc_data);
5197 5367 }
5198 5368
5199 5369 #if BGE_SEE_IO32
5200 5370
5201 5371 static void bge_chip_peek_seeprom(bge_t *bgep, bge_peekpoke_t *ppd);
5202 5372 #pragma no_inline(bge_chip_peek_seeprom)
5203 5373
5204 5374 static void
5205 5375 bge_chip_peek_seeprom(bge_t *bgep, bge_peekpoke_t *ppd)
5206 5376 {
5207 5377 uint32_t data;
5208 5378 int err;
5209 5379
5210 5380 BGE_TRACE(("bge_chip_peek_seeprom($%p, $%p)",
5211 5381 (void *)bgep, (void *)ppd));
5212 5382
5213 5383 err = bge_nvmem_rw32(bgep, BGE_SEE_READ, ppd->pp_acc_offset, &data);
5214 5384 ppd->pp_acc_data = err ? ~0ull : data;
5215 5385 }
5216 5386
5217 5387 static void bge_chip_poke_seeprom(bge_t *bgep, bge_peekpoke_t *ppd);
5218 5388 #pragma no_inline(bge_chip_poke_seeprom)
5219 5389
5220 5390 static void
5221 5391 bge_chip_poke_seeprom(bge_t *bgep, bge_peekpoke_t *ppd)
5222 5392 {
5223 5393 uint32_t data;
5224 5394
5225 5395 BGE_TRACE(("bge_chip_poke_seeprom($%p, $%p)",
5226 5396 (void *)bgep, (void *)ppd));
5227 5397
5228 5398 data = ppd->pp_acc_data;
5229 5399 (void) bge_nvmem_rw32(bgep, BGE_SEE_WRITE, ppd->pp_acc_offset, &data);
5230 5400 }
5231 5401 #endif /* BGE_SEE_IO32 */
5232 5402
5233 5403 #if BGE_FLASH_IO32
5234 5404
5235 5405 static void bge_chip_peek_flash(bge_t *bgep, bge_peekpoke_t *ppd);
5236 5406 #pragma no_inline(bge_chip_peek_flash)
5237 5407
5238 5408 static void
5239 5409 bge_chip_peek_flash(bge_t *bgep, bge_peekpoke_t *ppd)
5240 5410 {
5241 5411 uint32_t data;
5242 5412 int err;
5243 5413
5244 5414 BGE_TRACE(("bge_chip_peek_flash($%p, $%p)",
5245 5415 (void *)bgep, (void *)ppd));
5246 5416
5247 5417 err = bge_nvmem_rw32(bgep, BGE_FLASH_READ, ppd->pp_acc_offset, &data);
5248 5418 ppd->pp_acc_data = err ? ~0ull : data;
5249 5419 }
5250 5420
5251 5421 static void bge_chip_poke_flash(bge_t *bgep, bge_peekpoke_t *ppd);
5252 5422 #pragma no_inline(bge_chip_poke_flash)
5253 5423
5254 5424 static void
5255 5425 bge_chip_poke_flash(bge_t *bgep, bge_peekpoke_t *ppd)
5256 5426 {
5257 5427 uint32_t data;
5258 5428
5259 5429 BGE_TRACE(("bge_chip_poke_flash($%p, $%p)",
5260 5430 (void *)bgep, (void *)ppd));
5261 5431
5262 5432 data = ppd->pp_acc_data;
5263 5433 (void) bge_nvmem_rw32(bgep, BGE_FLASH_WRITE,
5264 5434 ppd->pp_acc_offset, &data);
5265 5435 }
5266 5436 #endif /* BGE_FLASH_IO32 */
5267 5437
5268 5438 static void bge_chip_peek_mem(bge_t *bgep, bge_peekpoke_t *ppd);
5269 5439 #pragma no_inline(bge_chip_peek_mem)
5270 5440
5271 5441 static void
5272 5442 bge_chip_peek_mem(bge_t *bgep, bge_peekpoke_t *ppd)
5273 5443 {
5274 5444 uint64_t regval;
5275 5445 void *vaddr;
5276 5446
5277 5447 BGE_TRACE(("bge_chip_peek_bge($%p, $%p)",
5278 5448 (void *)bgep, (void *)ppd));
5279 5449
5280 5450 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
5281 5451
5282 5452 switch (ppd->pp_acc_size) {
5283 5453 case 1:
5284 5454 regval = *(uint8_t *)vaddr;
5285 5455 break;
5286 5456
5287 5457 case 2:
5288 5458 regval = *(uint16_t *)vaddr;
5289 5459 break;
5290 5460
5291 5461 case 4:
5292 5462 regval = *(uint32_t *)vaddr;
5293 5463 break;
5294 5464
5295 5465 case 8:
5296 5466 regval = *(uint64_t *)vaddr;
5297 5467 break;
5298 5468 }
5299 5469
5300 5470 BGE_DEBUG(("bge_chip_peek_mem($%p, $%p) peeked 0x%llx from $%p",
5301 5471 (void *)bgep, (void *)ppd, regval, vaddr));
5302 5472
5303 5473 ppd->pp_acc_data = regval;
5304 5474 }
5305 5475
5306 5476 static void bge_chip_poke_mem(bge_t *bgep, bge_peekpoke_t *ppd);
5307 5477 #pragma no_inline(bge_chip_poke_mem)
5308 5478
5309 5479 static void
5310 5480 bge_chip_poke_mem(bge_t *bgep, bge_peekpoke_t *ppd)
5311 5481 {
5312 5482 uint64_t regval;
5313 5483 void *vaddr;
5314 5484
5315 5485 BGE_TRACE(("bge_chip_poke_mem($%p, $%p)",
5316 5486 (void *)bgep, (void *)ppd));
5317 5487
5318 5488 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
5319 5489 regval = ppd->pp_acc_data;
5320 5490
5321 5491 BGE_DEBUG(("bge_chip_poke_mem($%p, $%p) poking 0x%llx at $%p",
5322 5492 (void *)bgep, (void *)ppd, regval, vaddr));
5323 5493
5324 5494 switch (ppd->pp_acc_size) {
5325 5495 case 1:
5326 5496 *(uint8_t *)vaddr = (uint8_t)regval;
5327 5497 break;
5328 5498
5329 5499 case 2:
5330 5500 *(uint16_t *)vaddr = (uint16_t)regval;
5331 5501 break;
5332 5502
5333 5503 case 4:
5334 5504 *(uint32_t *)vaddr = (uint32_t)regval;
5335 5505 break;
5336 5506
5337 5507 case 8:
5338 5508 *(uint64_t *)vaddr = (uint64_t)regval;
5339 5509 break;
5340 5510 }
5341 5511 }
5342 5512
5343 5513 static enum ioc_reply bge_pp_ioctl(bge_t *bgep, int cmd, mblk_t *mp,
5344 5514 struct iocblk *iocp);
5345 5515 #pragma no_inline(bge_pp_ioctl)
5346 5516
5347 5517 static enum ioc_reply
5348 5518 bge_pp_ioctl(bge_t *bgep, int cmd, mblk_t *mp, struct iocblk *iocp)
5349 5519 {
5350 5520 void (*ppfn)(bge_t *bgep, bge_peekpoke_t *ppd);
5351 5521 bge_peekpoke_t *ppd;
5352 5522 dma_area_t *areap;
5353 5523 uint64_t sizemask;
5354 5524 uint64_t mem_va;
5355 5525 uint64_t maxoff;
5356 5526 boolean_t peek;
5357 5527
5358 5528 switch (cmd) {
5359 5529 default:
5360 5530 /* NOTREACHED */
5361 5531 bge_error(bgep, "bge_pp_ioctl: invalid cmd 0x%x", cmd);
5362 5532 return (IOC_INVAL);
5363 5533
5364 5534 case BGE_PEEK:
5365 5535 peek = B_TRUE;
5366 5536 break;
5367 5537
5368 5538 case BGE_POKE:
5369 5539 peek = B_FALSE;
5370 5540 break;
5371 5541 }
5372 5542
5373 5543 /*
5374 5544 * Validate format of ioctl
5375 5545 */
5376 5546 if (iocp->ioc_count != sizeof (bge_peekpoke_t))
5377 5547 return (IOC_INVAL);
5378 5548 if (mp->b_cont == NULL)
5379 5549 return (IOC_INVAL);
5380 5550 ppd = (void *)mp->b_cont->b_rptr;
5381 5551
5382 5552 /*
5383 5553 * Validate request parameters
5384 5554 */
5385 5555 switch (ppd->pp_acc_space) {
5386 5556 default:
5387 5557 return (IOC_INVAL);
5388 5558
5389 5559 case BGE_PP_SPACE_CFG:
5390 5560 /*
5391 5561 * Config space
5392 5562 */
5393 5563 sizemask = 8|4|2|1;
5394 5564 mem_va = 0;
5395 5565 maxoff = PCI_CONF_HDR_SIZE;
5396 5566 ppfn = peek ? bge_chip_peek_cfg : bge_chip_poke_cfg;
5397 5567 break;
5398 5568
5399 5569 case BGE_PP_SPACE_REG:
5400 5570 /*
5401 5571 * Memory-mapped I/O space
5402 5572 */
5403 5573 sizemask = 8|4|2|1;
5404 5574 mem_va = 0;
5405 5575 maxoff = RIAAR_REGISTER_MAX;
5406 5576 ppfn = peek ? bge_chip_peek_reg : bge_chip_poke_reg;
5407 5577 break;
5408 5578
5409 5579 case BGE_PP_SPACE_NIC:
5410 5580 /*
5411 5581 * NIC on-chip memory
5412 5582 */
5413 5583 sizemask = 8|4|2|1;
5414 5584 mem_va = 0;
5415 5585 maxoff = MWBAR_ONCHIP_MAX;
5416 5586 ppfn = peek ? bge_chip_peek_nic : bge_chip_poke_nic;
5417 5587 break;
5418 5588
5419 5589 case BGE_PP_SPACE_MII:
5420 5590 /*
5421 5591 * PHY's MII registers
5422 5592 * NB: all PHY registers are two bytes, but the
5423 5593 * addresses increment in ones (word addressing).
5424 5594 * So we scale the address here, then undo the
5425 5595 * transformation inside the peek/poke functions.
5426 5596 */
5427 5597 ppd->pp_acc_offset *= 2;
5428 5598 sizemask = 2;
5429 5599 mem_va = 0;
5430 5600 maxoff = (MII_MAXREG+1)*2;
5431 5601 ppfn = peek ? bge_chip_peek_mii : bge_chip_poke_mii;
5432 5602 break;
5433 5603
5434 5604 #if BGE_SEE_IO32
5435 5605 case BGE_PP_SPACE_SEEPROM:
5436 5606 /*
5437 5607 * Attached SEEPROM(s), if any.
5438 5608 * NB: we use the high-order bits of the 'address' as
5439 5609 * a device select to accommodate multiple SEEPROMS,
5440 5610 * If each one is the maximum size (64kbytes), this
5441 5611 * makes them appear contiguous. Otherwise, there may
5442 5612 * be holes in the mapping. ENxS doesn't have any
5443 5613 * SEEPROMs anyway ...
5444 5614 */
5445 5615 sizemask = 4;
5446 5616 mem_va = 0;
5447 5617 maxoff = SEEPROM_DEV_AND_ADDR_MASK;
5448 5618 ppfn = peek ? bge_chip_peek_seeprom : bge_chip_poke_seeprom;
5449 5619 break;
5450 5620 #endif /* BGE_SEE_IO32 */
5451 5621
5452 5622 #if BGE_FLASH_IO32
5453 5623 case BGE_PP_SPACE_FLASH:
5454 5624 /*
5455 5625 * Attached Flash device (if any); a maximum of one device
5456 5626 * is currently supported. But it can be up to 1MB (unlike
5457 5627 * the 64k limit on SEEPROMs) so why would you need more ;-)
5458 5628 */
5459 5629 sizemask = 4;
5460 5630 mem_va = 0;
5461 5631 maxoff = NVM_FLASH_ADDR_MASK;
5462 5632 ppfn = peek ? bge_chip_peek_flash : bge_chip_poke_flash;
5463 5633 break;
5464 5634 #endif /* BGE_FLASH_IO32 */
5465 5635
5466 5636 case BGE_PP_SPACE_BGE:
5467 5637 /*
5468 5638 * BGE data structure!
5469 5639 */
5470 5640 sizemask = 8|4|2|1;
5471 5641 mem_va = (uintptr_t)bgep;
5472 5642 maxoff = sizeof (*bgep);
5473 5643 ppfn = peek ? bge_chip_peek_mem : bge_chip_poke_mem;
5474 5644 break;
5475 5645
5476 5646 case BGE_PP_SPACE_STATUS:
5477 5647 case BGE_PP_SPACE_STATISTICS:
5478 5648 case BGE_PP_SPACE_TXDESC:
5479 5649 case BGE_PP_SPACE_TXBUFF:
5480 5650 case BGE_PP_SPACE_RXDESC:
5481 5651 case BGE_PP_SPACE_RXBUFF:
5482 5652 /*
5483 5653 * Various DMA_AREAs
5484 5654 */
5485 5655 switch (ppd->pp_acc_space) {
5486 5656 case BGE_PP_SPACE_TXDESC:
5487 5657 areap = &bgep->tx_desc;
5488 5658 break;
5489 5659 case BGE_PP_SPACE_TXBUFF:
5490 5660 areap = &bgep->tx_buff[0];
5491 5661 break;
5492 5662 case BGE_PP_SPACE_RXDESC:
5493 5663 areap = &bgep->rx_desc[0];
5494 5664 break;
5495 5665 case BGE_PP_SPACE_RXBUFF:
5496 5666 areap = &bgep->rx_buff[0];
5497 5667 break;
5498 5668 case BGE_PP_SPACE_STATUS:
5499 5669 areap = &bgep->status_block;
5500 5670 break;
5501 5671 case BGE_PP_SPACE_STATISTICS:
5502 5672 if (bgep->chipid.statistic_type == BGE_STAT_BLK)
5503 5673 areap = &bgep->statistics;
5504 5674 break;
5505 5675 }
5506 5676
5507 5677 sizemask = 8|4|2|1;
5508 5678 mem_va = (uintptr_t)areap->mem_va;
5509 5679 maxoff = areap->alength;
5510 5680 ppfn = peek ? bge_chip_peek_mem : bge_chip_poke_mem;
5511 5681 break;
5512 5682 }
5513 5683
5514 5684 switch (ppd->pp_acc_size) {
5515 5685 default:
5516 5686 return (IOC_INVAL);
5517 5687
5518 5688 case 8:
5519 5689 case 4:
5520 5690 case 2:
5521 5691 case 1:
5522 5692 if ((ppd->pp_acc_size & sizemask) == 0)
5523 5693 return (IOC_INVAL);
5524 5694 break;
5525 5695 }
5526 5696
5527 5697 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
5528 5698 return (IOC_INVAL);
5529 5699
5530 5700 if (ppd->pp_acc_offset >= maxoff)
5531 5701 return (IOC_INVAL);
5532 5702
5533 5703 if (ppd->pp_acc_offset+ppd->pp_acc_size > maxoff)
5534 5704 return (IOC_INVAL);
5535 5705
5536 5706 /*
5537 5707 * All OK - go do it!
5538 5708 */
5539 5709 ppd->pp_acc_offset += mem_va;
5540 5710 (*ppfn)(bgep, ppd);
5541 5711 return (peek ? IOC_REPLY : IOC_ACK);
5542 5712 }
5543 5713
5544 5714 static enum ioc_reply bge_diag_ioctl(bge_t *bgep, int cmd, mblk_t *mp,
5545 5715 struct iocblk *iocp);
5546 5716 #pragma no_inline(bge_diag_ioctl)
5547 5717
5548 5718 static enum ioc_reply
5549 5719 bge_diag_ioctl(bge_t *bgep, int cmd, mblk_t *mp, struct iocblk *iocp)
5550 5720 {
5551 5721 ASSERT(mutex_owned(bgep->genlock));
5552 5722
5553 5723 switch (cmd) {
5554 5724 default:
5555 5725 /* NOTREACHED */
5556 5726 bge_error(bgep, "bge_diag_ioctl: invalid cmd 0x%x", cmd);
5557 5727 return (IOC_INVAL);
5558 5728
5559 5729 case BGE_DIAG:
5560 5730 /*
5561 5731 * Currently a no-op
5562 5732 */
5563 5733 return (IOC_ACK);
5564 5734
5565 5735 case BGE_PEEK:
5566 5736 case BGE_POKE:
5567 5737 return (bge_pp_ioctl(bgep, cmd, mp, iocp));
5568 5738
5569 5739 case BGE_PHY_RESET:
5570 5740 return (IOC_RESTART_ACK);
5571 5741
5572 5742 case BGE_SOFT_RESET:
5573 5743 case BGE_HARD_RESET:
5574 5744 /*
5575 5745 * Reset and reinitialise the 570x hardware
5576 5746 */
5577 5747 bgep->bge_chip_state = BGE_CHIP_FAULT;
5578 5748 ddi_trigger_softintr(bgep->factotum_id);
5579 5749 (void) bge_restart(bgep, cmd == BGE_HARD_RESET);
5580 5750 return (IOC_ACK);
5581 5751 }
5582 5752
5583 5753 /* NOTREACHED */
5584 5754 }
5585 5755
5586 5756 #endif /* BGE_DEBUGGING || BGE_DO_PPIO */
5587 5757
5588 5758 static enum ioc_reply bge_mii_ioctl(bge_t *bgep, int cmd, mblk_t *mp,
5589 5759 struct iocblk *iocp);
5590 5760 #pragma no_inline(bge_mii_ioctl)
5591 5761
5592 5762 static enum ioc_reply
5593 5763 bge_mii_ioctl(bge_t *bgep, int cmd, mblk_t *mp, struct iocblk *iocp)
5594 5764 {
5595 5765 struct bge_mii_rw *miirwp;
5596 5766
5597 5767 /*
5598 5768 * Validate format of ioctl
5599 5769 */
5600 5770 if (iocp->ioc_count != sizeof (struct bge_mii_rw))
5601 5771 return (IOC_INVAL);
5602 5772 if (mp->b_cont == NULL)
5603 5773 return (IOC_INVAL);
5604 5774 miirwp = (void *)mp->b_cont->b_rptr;
5605 5775
5606 5776 /*
5607 5777 * Validate request parameters ...
5608 5778 */
5609 5779 if (miirwp->mii_reg > MII_MAXREG)
5610 5780 return (IOC_INVAL);
5611 5781
5612 5782 switch (cmd) {
5613 5783 default:
5614 5784 /* NOTREACHED */
5615 5785 bge_error(bgep, "bge_mii_ioctl: invalid cmd 0x%x", cmd);
5616 5786 return (IOC_INVAL);
5617 5787
5618 5788 case BGE_MII_READ:
5619 5789 miirwp->mii_data = bge_mii_get16(bgep, miirwp->mii_reg);
5620 5790 return (IOC_REPLY);
5621 5791
5622 5792 case BGE_MII_WRITE:
5623 5793 bge_mii_put16(bgep, miirwp->mii_reg, miirwp->mii_data);
5624 5794 return (IOC_ACK);
5625 5795 }
5626 5796
5627 5797 /* NOTREACHED */
5628 5798 }
5629 5799
5630 5800 #if BGE_SEE_IO32
5631 5801
5632 5802 static enum ioc_reply bge_see_ioctl(bge_t *bgep, int cmd, mblk_t *mp,
5633 5803 struct iocblk *iocp);
5634 5804 #pragma no_inline(bge_see_ioctl)
5635 5805
5636 5806 static enum ioc_reply
5637 5807 bge_see_ioctl(bge_t *bgep, int cmd, mblk_t *mp, struct iocblk *iocp)
5638 5808 {
5639 5809 struct bge_see_rw *seerwp;
5640 5810
5641 5811 /*
5642 5812 * Validate format of ioctl
5643 5813 */
5644 5814 if (iocp->ioc_count != sizeof (struct bge_see_rw))
5645 5815 return (IOC_INVAL);
5646 5816 if (mp->b_cont == NULL)
5647 5817 return (IOC_INVAL);
5648 5818 seerwp = (void *)mp->b_cont->b_rptr;
5649 5819
5650 5820 /*
5651 5821 * Validate request parameters ...
5652 5822 */
5653 5823 if (seerwp->see_addr & ~SEEPROM_DEV_AND_ADDR_MASK)
5654 5824 return (IOC_INVAL);
5655 5825
5656 5826 switch (cmd) {
5657 5827 default:
5658 5828 /* NOTREACHED */
5659 5829 bge_error(bgep, "bge_see_ioctl: invalid cmd 0x%x", cmd);
5660 5830 return (IOC_INVAL);
5661 5831
5662 5832 case BGE_SEE_READ:
5663 5833 case BGE_SEE_WRITE:
5664 5834 iocp->ioc_error = bge_nvmem_rw32(bgep, cmd,
5665 5835 seerwp->see_addr, &seerwp->see_data);
5666 5836 return (IOC_REPLY);
5667 5837 }
5668 5838
5669 5839 /* NOTREACHED */
5670 5840 }
5671 5841
5672 5842 #endif /* BGE_SEE_IO32 */
5673 5843
5674 5844 #if BGE_FLASH_IO32
5675 5845
5676 5846 static enum ioc_reply bge_flash_ioctl(bge_t *bgep, int cmd, mblk_t *mp,
5677 5847 struct iocblk *iocp);
5678 5848 #pragma no_inline(bge_flash_ioctl)
5679 5849
5680 5850 static enum ioc_reply
5681 5851 bge_flash_ioctl(bge_t *bgep, int cmd, mblk_t *mp, struct iocblk *iocp)
5682 5852 {
5683 5853 struct bge_flash_rw *flashrwp;
5684 5854
5685 5855 /*
5686 5856 * Validate format of ioctl
5687 5857 */
5688 5858 if (iocp->ioc_count != sizeof (struct bge_flash_rw))
5689 5859 return (IOC_INVAL);
5690 5860 if (mp->b_cont == NULL)
5691 5861 return (IOC_INVAL);
5692 5862 flashrwp = (void *)mp->b_cont->b_rptr;
5693 5863
5694 5864 /*
5695 5865 * Validate request parameters ...
5696 5866 */
5697 5867 if (flashrwp->flash_addr & ~NVM_FLASH_ADDR_MASK)
5698 5868 return (IOC_INVAL);
5699 5869
5700 5870 switch (cmd) {
5701 5871 default:
5702 5872 /* NOTREACHED */
5703 5873 bge_error(bgep, "bge_flash_ioctl: invalid cmd 0x%x", cmd);
5704 5874 return (IOC_INVAL);
5705 5875
5706 5876 case BGE_FLASH_READ:
5707 5877 case BGE_FLASH_WRITE:
5708 5878 iocp->ioc_error = bge_nvmem_rw32(bgep, cmd,
5709 5879 flashrwp->flash_addr, &flashrwp->flash_data);
5710 5880 return (IOC_REPLY);
5711 5881 }
5712 5882
5713 5883 /* NOTREACHED */
5714 5884 }
5715 5885
5716 5886 #endif /* BGE_FLASH_IO32 */
5717 5887
5718 5888 enum ioc_reply bge_chip_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp,
5719 5889 struct iocblk *iocp);
5720 5890 #pragma no_inline(bge_chip_ioctl)
5721 5891
5722 5892 enum ioc_reply
5723 5893 bge_chip_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
5724 5894 {
5725 5895 int cmd;
5726 5896
5727 5897 BGE_TRACE(("bge_chip_ioctl($%p, $%p, $%p, $%p)",
5728 5898 (void *)bgep, (void *)wq, (void *)mp, (void *)iocp));
5729 5899
5730 5900 ASSERT(mutex_owned(bgep->genlock));
5731 5901
5732 5902 cmd = iocp->ioc_cmd;
5733 5903 switch (cmd) {
5734 5904 default:
5735 5905 /* NOTREACHED */
5736 5906 bge_error(bgep, "bge_chip_ioctl: invalid cmd 0x%x", cmd);
5737 5907 return (IOC_INVAL);
5738 5908
5739 5909 case BGE_DIAG:
5740 5910 case BGE_PEEK:
5741 5911 case BGE_POKE:
5742 5912 case BGE_PHY_RESET:
5743 5913 case BGE_SOFT_RESET:
5744 5914 case BGE_HARD_RESET:
5745 5915 #if BGE_DEBUGGING || BGE_DO_PPIO
5746 5916 return (bge_diag_ioctl(bgep, cmd, mp, iocp));
5747 5917 #else
5748 5918 return (IOC_INVAL);
5749 5919 #endif /* BGE_DEBUGGING || BGE_DO_PPIO */
5750 5920
5751 5921 case BGE_MII_READ:
5752 5922 case BGE_MII_WRITE:
5753 5923 return (bge_mii_ioctl(bgep, cmd, mp, iocp));
5754 5924
5755 5925 #if BGE_SEE_IO32
5756 5926 case BGE_SEE_READ:
5757 5927 case BGE_SEE_WRITE:
5758 5928 return (bge_see_ioctl(bgep, cmd, mp, iocp));
5759 5929 #endif /* BGE_SEE_IO32 */
5760 5930
5761 5931 #if BGE_FLASH_IO32
5762 5932 case BGE_FLASH_READ:
5763 5933 case BGE_FLASH_WRITE:
5764 5934 return (bge_flash_ioctl(bgep, cmd, mp, iocp));
5765 5935 #endif /* BGE_FLASH_IO32 */
5766 5936 }
5767 5937
5768 5938 /* NOTREACHED */
5769 5939 }
5770 5940
5771 5941 /* ARGSUSED */
5772 5942 void
5773 5943 bge_chip_blank(void *arg, time_t ticks, uint_t count, int flag)
5774 5944 {
5775 5945 recv_ring_t *rrp = arg;
5776 5946 bge_t *bgep = rrp->bgep;
5777 5947
5778 5948 mutex_enter(bgep->genlock);
5779 5949 rrp->poll_flag = flag;
5780 5950 #ifdef NOT_YET
5781 5951 /*
5782 5952 * XXX-Sunay: Since most broadcom cards support only one
5783 5953 * interrupt but multiple rx rings, we can't disable the
5784 5954 * physical interrupt. This need to be done via capability
5785 5955 * negotiation depending on the NIC.
5786 5956 */
5787 5957 bge_reg_put32(bgep, RCV_COALESCE_TICKS_REG, ticks);
5788 5958 bge_reg_put32(bgep, RCV_COALESCE_MAX_BD_REG, count);
5789 5959 #endif
5790 5960 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
5791 5961 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED);
5792 5962 mutex_exit(bgep->genlock);
5793 5963 }
5794 5964
5795 5965 #ifdef BGE_IPMI_ASF
5796 5966
5797 5967 uint32_t
5798 5968 bge_nic_read32(bge_t *bgep, bge_regno_t addr)
5799 5969 {
5800 5970 uint32_t data;
5801 5971
5802 5972 #ifndef __sparc
5803 5973 if (!bgep->asf_wordswapped) {
5804 5974 /* a workaround word swap error */
5805 5975 if (addr & 4)
5806 5976 addr = addr - 4;
5807 5977 else
5808 5978 addr = addr + 4;
5809 5979 }
5810 5980 #else
5811 5981 if (DEVICE_5717_SERIES_CHIPSETS(bgep))
5812 5982 addr = LE_32(addr);
5813 5983 #endif
5814 5984
5815 5985 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWBAR, addr);
5816 5986 data = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MWDAR);
5817 5987 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWBAR, 0);
5818 5988
5819 5989 data = LE_32(data);
5820 5990
5821 5991 BGE_DEBUG(("bge_nic_read32($%p, 0x%x) => 0x%x",
5822 5992 (void *)bgep, addr, data));
5823 5993
5824 5994 return (data);
5825 5995 }
5826 5996
5827 5997 void
5828 5998 bge_asf_update_status(bge_t *bgep)
5829 5999 {
5830 6000 uint32_t event;
5831 6001
5832 6002 bge_nic_put32(bgep, BGE_CMD_MAILBOX, BGE_CMD_NICDRV_ALIVE);
5833 6003 bge_nic_put32(bgep, BGE_CMD_LENGTH_MAILBOX, 4);
5834 6004 bge_nic_put32(bgep, BGE_CMD_DATA_MAILBOX, 3);
5835 6005
5836 6006 event = bge_reg_get32(bgep, RX_RISC_EVENT_REG);
5837 6007 bge_reg_put32(bgep, RX_RISC_EVENT_REG, event | RRER_ASF_EVENT);
5838 6008 }
5839 6009
5840 6010
5841 6011 /*
5842 6012 * The driver is supposed to notify ASF that the OS is still running
5843 6013 * every three seconds, otherwise the management server may attempt
5844 6014 * to reboot the machine. If it hasn't actually failed, this is
5845 6015 * not a desirable result. However, this isn't running as a real-time
5846 6016 * thread, and even if it were, it might not be able to generate the
5847 6017 * heartbeat in a timely manner due to system load. As it isn't a
5848 6018 * significant strain on the machine, we will set the interval to half
5849 6019 * of the required value.
5850 6020 */
5851 6021 void
5852 6022 bge_asf_heartbeat(void *arg)
5853 6023 {
5854 6024 bge_t *bgep = (bge_t *)arg;
5855 6025
5856 6026 mutex_enter(bgep->genlock);
5857 6027 bge_asf_update_status((bge_t *)bgep);
5858 6028 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
5859 6029 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
5860 6030 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK)
5861 6031 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
5862 6032 mutex_exit(bgep->genlock);
5863 6033 ((bge_t *)bgep)->asf_timeout_id = timeout(bge_asf_heartbeat, bgep,
5864 6034 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL));
5865 6035 }
5866 6036
5867 6037
5868 6038 void
5869 6039 bge_asf_stop_timer(bge_t *bgep)
5870 6040 {
5871 6041 timeout_id_t tmp_id = 0;
5872 6042
5873 6043 while ((bgep->asf_timeout_id != 0) &&
5874 6044 (tmp_id != bgep->asf_timeout_id)) {
5875 6045 tmp_id = bgep->asf_timeout_id;
5876 6046 (void) untimeout(tmp_id);
5877 6047 }
5878 6048 bgep->asf_timeout_id = 0;
5879 6049 }
5880 6050
5881 6051
5882 6052
5883 6053 /*
5884 6054 * This function should be placed at the earliest position of bge_attach().
5885 6055 */
5886 6056 void
5887 6057 bge_asf_get_config(bge_t *bgep)
5888 6058 {
5889 6059 uint32_t nicsig;
5890 6060 uint32_t niccfg;
5891 6061
5892 6062 bgep->asf_enabled = B_FALSE;
5893 6063 nicsig = bge_nic_read32(bgep, BGE_NIC_DATA_SIG_ADDR);
5894 6064 if (nicsig == BGE_NIC_DATA_SIG) {
5895 6065 niccfg = bge_nic_read32(bgep, BGE_NIC_DATA_NIC_CFG_ADDR);
5896 6066 if (niccfg & BGE_NIC_CFG_ENABLE_ASF)
5897 6067 /*
5898 6068 * Here, we don't consider BAXTER, because BGE haven't
5899 6069 * supported BAXTER (that is 5752). Also, as I know,
5900 6070 * BAXTER doesn't support ASF feature.
5901 6071 */
5902 6072 bgep->asf_enabled = B_TRUE;
5903 6073 else
5904 6074 bgep->asf_enabled = B_FALSE;
5905 6075 } else
5906 6076 bgep->asf_enabled = B_FALSE;
5907 6077 }
5908 6078
5909 6079
5910 6080 void
5911 6081 bge_asf_pre_reset_operations(bge_t *bgep, uint32_t mode)
5912 6082 {
5913 6083 uint32_t tries;
5914 6084 uint32_t event;
5915 6085
5916 6086 ASSERT(bgep->asf_enabled);
5917 6087
5918 6088 /* Issues "pause firmware" command and wait for ACK */
5919 6089 bge_nic_put32(bgep, BGE_CMD_MAILBOX, BGE_CMD_NICDRV_PAUSE_FW);
5920 6090 event = bge_reg_get32(bgep, RX_RISC_EVENT_REG);
5921 6091 bge_reg_put32(bgep, RX_RISC_EVENT_REG, event | RRER_ASF_EVENT);
5922 6092
5923 6093 event = bge_reg_get32(bgep, RX_RISC_EVENT_REG);
5924 6094 tries = 0;
5925 6095 while ((event & RRER_ASF_EVENT) && (tries < 100)) {
5926 6096 drv_usecwait(1);
5927 6097 tries ++;
5928 6098 event = bge_reg_get32(bgep, RX_RISC_EVENT_REG);
5929 6099 }
5930 6100
5931 6101 bge_nic_put32(bgep, BGE_FIRMWARE_MAILBOX,
5932 6102 BGE_MAGIC_NUM_FIRMWARE_INIT_DONE);
5933 6103
5934 6104 if (bgep->asf_newhandshake) {
5935 6105 switch (mode) {
5936 6106 case BGE_INIT_RESET:
5937 6107 bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
5938 6108 BGE_DRV_STATE_START);
5939 6109 break;
5940 6110 case BGE_SHUTDOWN_RESET:
5941 6111 bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
5942 6112 BGE_DRV_STATE_UNLOAD);
5943 6113 break;
5944 6114 case BGE_SUSPEND_RESET:
5945 6115 bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
5946 6116 BGE_DRV_STATE_SUSPEND);
5947 6117 break;
5948 6118 default:
5949 6119 break;
5950 6120 }
5951 6121 }
5952 6122 }
5953 6123
5954 6124
5955 6125 void
5956 6126 bge_asf_post_reset_old_mode(bge_t *bgep, uint32_t mode)
5957 6127 {
5958 6128 switch (mode) {
5959 6129 case BGE_INIT_RESET:
5960 6130 bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
5961 6131 BGE_DRV_STATE_START);
5962 6132 break;
5963 6133 case BGE_SHUTDOWN_RESET:
5964 6134 bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
5965 6135 BGE_DRV_STATE_UNLOAD);
5966 6136 break;
5967 6137 case BGE_SUSPEND_RESET:
5968 6138 bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
5969 6139 BGE_DRV_STATE_SUSPEND);
5970 6140 break;
5971 6141 default:
5972 6142 break;
5973 6143 }
5974 6144 }
5975 6145
5976 6146
5977 6147 void
5978 6148 bge_asf_post_reset_new_mode(bge_t *bgep, uint32_t mode)
5979 6149 {
5980 6150 switch (mode) {
5981 6151 case BGE_INIT_RESET:
5982 6152 bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
5983 6153 BGE_DRV_STATE_START_DONE);
5984 6154 break;
5985 6155 case BGE_SHUTDOWN_RESET:
5986 6156 bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
5987 6157 BGE_DRV_STATE_UNLOAD_DONE);
5988 6158 break;
5989 6159 default:
5990 6160 break;
5991 6161 }
5992 6162 }
5993 6163
5994 6164 #endif /* BGE_IPMI_ASF */
|
↓ open down ↓ |
1738 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX