7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
28 */
29
30 #include "bge_impl.h"
31
32 #define PIO_ADDR(bgep, offset) ((void *)((caddr_t)(bgep)->io_regs+(offset)))
33
34 /*
35 * Future features ... ?
36 */
37 #define BGE_CFG_IO8 1 /* 8/16-bit cfg space BIS/BIC */
38 #define BGE_IND_IO32 1 /* indirect access code */
39 #define BGE_SEE_IO32 1 /* SEEPROM access code */
40 #define BGE_FLASH_IO32 1 /* FLASH access code */
41
42 /*
43 * BGE MSI tunable:
44 *
45 * By default MSI is enabled on all supported platforms but it is disabled
46 * for some Broadcom chips due to known MSI hardware issues. Currently MSI
47 * is enabled only for 5714C A2 and 5715C A2 broadcom chips.
346 * cleared by reset, so we'll have to restore them later. This
347 * comes from the Broadcom document 570X-PG102-R ...
348 *
349 * Note: Broadcom document 570X-PG102-R seems to be in error
350 * here w.r.t. the offsets of the Subsystem Vendor ID and
351 * Subsystem (Device) ID registers, which are the opposite way
352 * round according to the PCI standard. For good measure, we
353 * save/restore both anyway.
354 */
355 handle = bgep->cfg_handle;
356
357 /*
358 * For some chipsets (e.g., BCM5718), if MHCR_ENABLE_ENDIAN_BYTE_SWAP
359 * has been set in PCI_CONF_COMM already, we need to write the
360 * byte-swapped value to it. So we just write zero first for simplicity.
361 */
362 cidp->device = pci_config_get16(handle, PCI_CONF_DEVID);
363 if (DEVICE_5717_SERIES_CHIPSETS(bgep))
364 pci_config_put32(handle, PCI_CONF_BGE_MHCR, 0);
365 mhcr = pci_config_get32(handle, PCI_CONF_BGE_MHCR);
366 cidp->asic_rev = mhcr & MHCR_CHIP_REV_MASK;
367 cidp->businfo = pci_config_get32(handle, PCI_CONF_BGE_PCISTATE);
368 cidp->command = pci_config_get16(handle, PCI_CONF_COMM);
369
370 cidp->vendor = pci_config_get16(handle, PCI_CONF_VENID);
371 cidp->subven = pci_config_get16(handle, PCI_CONF_SUBVENID);
372 cidp->subdev = pci_config_get16(handle, PCI_CONF_SUBSYSID);
373 cidp->revision = pci_config_get8(handle, PCI_CONF_REVID);
374 cidp->clsize = pci_config_get8(handle, PCI_CONF_CACHE_LINESZ);
375 cidp->latency = pci_config_get8(handle, PCI_CONF_LATENCY_TIMER);
376
377 BGE_DEBUG(("bge_chip_cfg_init: %s bus is %s and %s; #INTA is %s",
378 cidp->businfo & PCISTATE_BUS_IS_PCI ? "PCI" : "PCI-X",
379 cidp->businfo & PCISTATE_BUS_IS_FAST ? "fast" : "slow",
380 cidp->businfo & PCISTATE_BUS_IS_32_BIT ? "narrow" : "wide",
381 cidp->businfo & PCISTATE_INTA_STATE ? "high" : "low"));
382 BGE_DEBUG(("bge_chip_cfg_init: vendor 0x%x device 0x%x revision 0x%x",
383 cidp->vendor, cidp->device, cidp->revision));
384 BGE_DEBUG(("bge_chip_cfg_init: subven 0x%x subdev 0x%x asic_rev 0x%x",
385 cidp->subven, cidp->subdev, cidp->asic_rev));
386 BGE_DEBUG(("bge_chip_cfg_init: clsize %d latency %d command 0x%x",
387 cidp->clsize, cidp->latency, cidp->command));
388
389 /*
390 * Step 2 (also step 6): disable and clear interrupts.
391 * Steps 11-13: configure PIO endianness options, and enable
392 * indirect register access. We'll also select any other
393 * options controlled by the MHCR (e.g. tagged status, mask
394 * interrupt mode) at this stage ...
395 *
396 * Note: internally, the chip is 64-bit and BIG-endian, but
397 * since it talks to the host over a (LITTLE-endian) PCI bus,
398 * it normally swaps bytes around at the PCI interface.
399 * However, the PCI host bridge on SPARC systems normally
400 * swaps the byte lanes around too, since SPARCs are also
401 * BIG-endian. So it turns out that on SPARC, the right
402 * option is to tell the chip to swap (and the host bridge
403 * will swap back again), whereas on x86 we ask the chip
404 * NOT to swap, so the natural little-endianness of the
405 * PCI bus is assumed. Then the only thing that doesn't
406 * automatically work right is access to an 8-byte register
407 * by a little-endian host; but we don't want to set the
408 * MHCR_ENABLE_REGISTER_WORD_SWAP bit because then 4-byte
430 * likely in the interrupt handler:
431 *
432 * (1) acknowledge & disable interrupts
433 * (2) while (more to do)
434 * process packets
435 * (3) enable interrupts -- also clears pending
436 *
437 * If the chip received more packets and internally generated
438 * an interrupt between the check at (2) and the mbox write
439 * at (3), this interrupt would be lost :-(
440 *
441 * The best way to avoid this is to use TAGGED STATUS mode,
442 * where the chip includes a unique tag in each status block
443 * update, and the host, when re-enabling interrupts, passes
444 * the last tag it saw back to the chip; then the chip can
445 * see whether the host is truly up to date, and regenerate
446 * its interrupt if not.
447 */
448 mhcr = MHCR_ENABLE_INDIRECT_ACCESS |
449 MHCR_ENABLE_TAGGED_STATUS_MODE |
450 MHCR_MASK_INTERRUPT_MODE |
451 MHCR_CLEAR_INTERRUPT_INTA;
452
453 if (bgep->intr_type == DDI_INTR_TYPE_FIXED)
454 mhcr |= MHCR_MASK_PCI_INT_OUTPUT;
455
456 #ifdef _BIG_ENDIAN
457 mhcr |= MHCR_ENABLE_ENDIAN_WORD_SWAP | MHCR_ENABLE_ENDIAN_BYTE_SWAP;
458 #endif /* _BIG_ENDIAN */
459
460 if (DEVICE_5717_SERIES_CHIPSETS(bgep))
461 pci_config_put32(handle, PCI_CONF_BGE_MHCR, 0);
462 pci_config_put32(handle, PCI_CONF_BGE_MHCR, mhcr);
463
464 #ifdef BGE_IPMI_ASF
465 bgep->asf_wordswapped = B_FALSE;
466 #endif
467 /*
468 * Step 1 (also step 7): Enable PCI Memory Space accesses
469 * Disable Memory Write/Invalidate
1879 case DEVICE_ID_5700x:
1880 case DEVICE_ID_5701:
1881 /*
1882 * These devices support *only* SEEPROMs
1883 */
1884 nvtype = BGE_NVTYPE_SEEPROM;
1885 break;
1886
1887 case DEVICE_ID_5702:
1888 case DEVICE_ID_5702fe:
1889 case DEVICE_ID_5703C:
1890 case DEVICE_ID_5703S:
1891 case DEVICE_ID_5704C:
1892 case DEVICE_ID_5704S:
1893 case DEVICE_ID_5704:
1894 case DEVICE_ID_5705M:
1895 case DEVICE_ID_5705C:
1896 case DEVICE_ID_5705_2:
1897 case DEVICE_ID_5717:
1898 case DEVICE_ID_5718:
1899 case DEVICE_ID_5724:
1900 case DEVICE_ID_57760:
1901 case DEVICE_ID_57780:
1902 case DEVICE_ID_57788:
1903 case DEVICE_ID_57790:
1904 case DEVICE_ID_5780:
1905 case DEVICE_ID_5782:
1906 case DEVICE_ID_5784M:
1907 case DEVICE_ID_5785:
1908 case DEVICE_ID_5787:
1909 case DEVICE_ID_5787M:
1910 case DEVICE_ID_5788:
1911 case DEVICE_ID_5789:
1912 case DEVICE_ID_5751:
1913 case DEVICE_ID_5751M:
1914 case DEVICE_ID_5752:
1915 case DEVICE_ID_5752M:
1916 case DEVICE_ID_5754:
1917 case DEVICE_ID_5755:
1918 case DEVICE_ID_5755M:
2012 cidp->recv_slots = BGE_RECV_SLOTS_USED;
2013 cidp->bge_dma_rwctrl = bge_dma_rwctrl;
2014 cidp->pci_type = BGE_PCI_X;
2015 cidp->statistic_type = BGE_STAT_BLK;
2016 cidp->mbuf_lo_water_rdma = bge_mbuf_lo_water_rdma;
2017 cidp->mbuf_lo_water_rmac = bge_mbuf_lo_water_rmac;
2018 cidp->mbuf_hi_water = bge_mbuf_hi_water;
2019 cidp->rx_ticks_norm = bge_rx_ticks_norm;
2020 cidp->rx_count_norm = bge_rx_count_norm;
2021 cidp->tx_ticks_norm = bge_tx_ticks_norm;
2022 cidp->tx_count_norm = bge_tx_count_norm;
2023 cidp->mask_pci_int = MHCR_MASK_PCI_INT_OUTPUT;
2024
2025 if (cidp->rx_rings == 0 || cidp->rx_rings > BGE_RECV_RINGS_MAX)
2026 cidp->rx_rings = BGE_RECV_RINGS_DEFAULT;
2027 if (cidp->tx_rings == 0 || cidp->tx_rings > BGE_SEND_RINGS_MAX)
2028 cidp->tx_rings = BGE_SEND_RINGS_DEFAULT;
2029
2030 cidp->msi_enabled = B_FALSE;
2031
2032 switch (cidp->device) {
2033 case DEVICE_ID_5717:
2034 case DEVICE_ID_5718:
2035 case DEVICE_ID_5724:
2036 if (cidp->device == DEVICE_ID_5717)
2037 cidp->chip_label = 5717;
2038 else if (cidp->device == DEVICE_ID_5718)
2039 cidp->chip_label = 5718;
2040 else
2041 cidp->chip_label = 5724;
2042 cidp->msi_enabled = bge_enable_msi;
2043 #ifdef __sparc
2044 cidp->mask_pci_int = LE_32(MHCR_MASK_PCI_INT_OUTPUT);
2045 #endif
2046 cidp->bge_dma_rwctrl = LE_32(PDRWCR_VAR_5717);
2047 cidp->pci_type = BGE_PCI_E;
2048 cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2049 cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5717;
2050 cidp->mbuf_hi_water = MBUF_HIWAT_5717;
2051 cidp->mbuf_base = bge_mbuf_pool_base_5705;
2052 cidp->mbuf_length = bge_mbuf_pool_len_5705;
2053 cidp->recv_slots = BGE_RECV_SLOTS_5705;
2054 cidp->bge_mlcr_default = MLCR_DEFAULT_5717;
2055 cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2056 cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2057 cidp->flags |= CHIP_FLAG_NO_JUMBO;
2058 cidp->statistic_type = BGE_STAT_REG;
2059 dev_ok = B_TRUE;
2060 break;
2061
2062 case DEVICE_ID_5700:
2063 case DEVICE_ID_5700x:
2064 cidp->chip_label = 5700;
2065 cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2066 break;
2067
2068 case DEVICE_ID_5701:
2069 cidp->chip_label = 5701;
2070 dev_ok = B_TRUE;
2071 cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2072 break;
2073
3414 bge_chip_stop(bgep, B_FALSE);
3415 break;
3416 }
3417
3418 #ifdef BGE_IPMI_ASF
3419 if (bgep->asf_enabled) {
3420 #ifdef __sparc
3421 mhcr = MHCR_ENABLE_INDIRECT_ACCESS |
3422 MHCR_ENABLE_TAGGED_STATUS_MODE |
3423 MHCR_MASK_INTERRUPT_MODE |
3424 MHCR_CLEAR_INTERRUPT_INTA |
3425 MHCR_ENABLE_ENDIAN_WORD_SWAP |
3426 MHCR_ENABLE_ENDIAN_BYTE_SWAP;
3427
3428 if (bgep->intr_type == DDI_INTR_TYPE_FIXED)
3429 mhcr |= MHCR_MASK_PCI_INT_OUTPUT;
3430
3431 if (DEVICE_5717_SERIES_CHIPSETS(bgep))
3432 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR,
3433 0);
3434 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcr);
3435 bge_reg_put32(bgep, MEMORY_ARBITER_MODE_REG,
3436 bge_reg_get32(bgep, MEMORY_ARBITER_MODE_REG) |
3437 MEMORY_ARBITER_ENABLE);
3438 #endif
3439 if (asf_mode == ASF_MODE_INIT) {
3440 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET);
3441 } else if (asf_mode == ASF_MODE_SHUTDOWN) {
3442 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET);
3443 }
3444 }
3445 #endif
3446 /*
3447 * Adapted from Broadcom document 570X-PG102-R, pp 102-116.
3448 * Updated to reflect Broadcom document 570X-PG104-R, pp 146-159.
3449 *
3450 * Before reset Core clock,it is
3451 * also required to initialize the Memory Arbiter as specified in step9
3452 * and Misc Host Control Register as specified in step-13
3453 * Step 4-5: reset Core clock & wait for completion
3454 * Steps 6-8: are done by bge_chip_cfg_init()
3455 * put the T3_MAGIC_NUMBER into the GENCOMM port before reset
3456 */
3457 if (!bge_chip_enable_engine(bgep, MEMORY_ARBITER_MODE_REG, 0))
3458 retval = DDI_FAILURE;
3459
3460 mhcr = MHCR_ENABLE_INDIRECT_ACCESS |
3461 MHCR_ENABLE_TAGGED_STATUS_MODE |
3462 MHCR_MASK_INTERRUPT_MODE |
3463 MHCR_CLEAR_INTERRUPT_INTA;
3464
3465 if (bgep->intr_type == DDI_INTR_TYPE_FIXED)
3466 mhcr |= MHCR_MASK_PCI_INT_OUTPUT;
3467
3468 #ifdef _BIG_ENDIAN
3469 mhcr |= MHCR_ENABLE_ENDIAN_WORD_SWAP | MHCR_ENABLE_ENDIAN_BYTE_SWAP;
3470 #endif /* _BIG_ENDIAN */
3471 if (DEVICE_5717_SERIES_CHIPSETS(bgep))
3472 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, 0);
3473 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcr);
3474 #ifdef BGE_IPMI_ASF
3475 if (bgep->asf_enabled)
3476 bgep->asf_wordswapped = B_FALSE;
3477 #endif
3478 /*
3479 * NVRAM Corruption Workaround
3480 */
3481 for (tries = 0; tries < MAX_TRY_NVMEM_ACQUIRE; tries++)
3482 if (bge_nvmem_acquire(bgep) != EAGAIN)
3483 break;
3484 if (tries >= MAX_TRY_NVMEM_ACQUIRE)
3485 BGE_DEBUG(("%s: fail to acquire nvram lock",
3486 bgep->ifname));
3487
3488 #ifdef BGE_IPMI_ASF
3489 if (!bgep->asf_enabled) {
3490 #endif
3491 magic = (uint64_t)T3_MAGIC_NUMBER << 32;
3492 bge_nic_put64(bgep, NIC_MEM_GENCOMM, magic);
3493 #ifdef BGE_IPMI_ASF
3494 }
3495 #endif
3496
3497 if (!bge_chip_reset_engine(bgep, MISC_CONFIG_REG))
3517 * not be changed.
3518 */
3519 if (!bge_chip_enable_engine(bgep, MEMORY_ARBITER_MODE_REG, 0))
3520 retval = DDI_FAILURE;
3521
3522 /*
3523 * Steps 10-11: configure PIO endianness options and
3524 * enable indirect register access -- already done
3525 * Steps 12-13: enable writing to the PCI state & clock
3526 * control registers -- not required; we aren't going to
3527 * use those features.
3528 * Steps 14-15: Configure DMA endianness options. See
3529 * the comments on the setting of the MHCR above.
3530 */
3531 #ifdef _BIG_ENDIAN
3532 modeflags = MODE_WORD_SWAP_FRAME | MODE_BYTE_SWAP_FRAME |
3533 MODE_WORD_SWAP_NONFRAME | MODE_BYTE_SWAP_NONFRAME;
3534 #else
3535 modeflags = MODE_WORD_SWAP_FRAME | MODE_BYTE_SWAP_FRAME;
3536 #endif /* _BIG_ENDIAN */
3537 #ifdef BGE_IPMI_ASF
3538 if (bgep->asf_enabled)
3539 modeflags |= MODE_HOST_STACK_UP;
3540 #endif
3541 bge_reg_put32(bgep, MODE_CONTROL_REG, modeflags);
3542
3543 #ifdef BGE_IPMI_ASF
3544 if (bgep->asf_enabled) {
3545 #ifdef __sparc
3546 bge_reg_put32(bgep, MEMORY_ARBITER_MODE_REG,
3547 MEMORY_ARBITER_ENABLE |
3548 bge_reg_get32(bgep, MEMORY_ARBITER_MODE_REG));
3549 #endif
3550
3551 #ifdef BGE_NETCONSOLE
3552 if (!bgep->asf_newhandshake) {
3553 if ((asf_mode == ASF_MODE_INIT) ||
3554 (asf_mode == ASF_MODE_POST_INIT)) {
3555 bge_asf_post_reset_old_mode(bgep,
3556 BGE_INIT_RESET);
3601 * However we take the opportunity to set the MLCR anyway, as
3602 * this register also controls the SEEPROM auto-access method
3603 * which we may want to use later ...
3604 *
3605 * The proper value here depends on the way the chip is wired
3606 * into the circuit board, as this register *also* controls which
3607 * of the "Miscellaneous I/O" pins are driven as outputs and the
3608 * values driven onto those pins!
3609 *
3610 * See also step 74 in the PRM ...
3611 */
3612 bge_reg_put32(bgep, MISC_LOCAL_CONTROL_REG,
3613 bgep->chipid.bge_mlcr_default);
3614 bge_reg_set32(bgep, SERIAL_EEPROM_ADDRESS_REG, SEEPROM_ACCESS_INIT);
3615
3616 /*
3617 * Step 20: clear the Ethernet MAC mode register
3618 */
3619 bge_reg_put32(bgep, ETHERNET_MAC_MODE_REG, 0);
3620
3621 /*
3622 * Step 21: restore cache-line-size, latency timer, and
3623 * subsystem ID registers to their original values (not
3624 * those read into the local structure <chipid>, 'cos
3625 * that was after they were cleared by the RESET).
3626 *
3627 * Note: the Subsystem Vendor/Device ID registers are not
3628 * directly writable in config space, so we use the shadow
3629 * copy in "Page Zero" of register space to restore them
3630 * both in one go ...
3631 */
3632 pci_config_put8(bgep->cfg_handle, PCI_CONF_CACHE_LINESZ,
3633 bgep->chipid.clsize);
3634 pci_config_put8(bgep->cfg_handle, PCI_CONF_LATENCY_TIMER,
3635 bgep->chipid.latency);
3636 bge_reg_put32(bgep, PCI_CONF_SUBVENID,
3637 (bgep->chipid.subdev << 16) | bgep->chipid.subven);
3638
3639 /*
3640 * The SEND INDEX registers should be reset to zero by the
3827 bgep->chipid.mbuf_lo_water_rdma);
3828 bge_reg_put32(bgep, MAC_RX_MBUF_LOWAT_REG,
3829 bgep->chipid.mbuf_lo_water_rmac);
3830 bge_reg_put32(bgep, MBUF_HIWAT_REG,
3831 bgep->chipid.mbuf_hi_water);
3832
3833 /*
3834 * Step 33: configure DMA resource watermarks
3835 */
3836 if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
3837 bge_reg_put32(bgep, DMAD_POOL_LOWAT_REG,
3838 bge_dmad_lo_water);
3839 bge_reg_put32(bgep, DMAD_POOL_HIWAT_REG,
3840 bge_dmad_hi_water);
3841 }
3842 bge_reg_put32(bgep, LOWAT_MAX_RECV_FRAMES_REG, bge_lowat_recv_frames);
3843
3844 /*
3845 * Steps 34-36: enable buffer manager & internal h/w queues
3846 */
3847 if (!bge_chip_enable_engine(bgep, BUFFER_MANAGER_MODE_REG,
3848 STATE_MACHINE_ATTN_ENABLE_BIT))
3849 retval = DDI_FAILURE;
3850 if (!bge_chip_enable_engine(bgep, FTQ_RESET_REG, 0))
3851 retval = DDI_FAILURE;
3852
3853 /*
3854 * Steps 37-39: initialise Receive Buffer (Producer) RCBs
3855 */
3856 if (DEVICE_5717_SERIES_CHIPSETS(bgep)) {
3857 buff_ring_t *brp = &bgep->buff[BGE_STD_BUFF_RING];
3858 bge_reg_put64(bgep, STD_RCV_BD_RING_RCB_REG,
3859 brp->desc.cookie.dmac_laddress);
3860 bge_reg_put32(bgep, STD_RCV_BD_RING_RCB_REG + 8,
3861 (brp->desc.nslots) << 16 | brp->buf[0].size << 2);
3862 bge_reg_put32(bgep, STD_RCV_BD_RING_RCB_REG + 0xc,
3863 NIC_MEM_SHADOW_BUFF_STD_5717);
3864 } else
3865 bge_reg_putrcb(bgep, STD_RCV_BD_RING_RCB_REG,
3866 &bgep->buff[BGE_STD_BUFF_RING].hw_rcb);
3867
3868 if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
3922 * Step 47: configure the MAC unicast address
3923 * Step 48: configure the random backoff seed
3924 * Step 96: set up multicast filters
3925 */
3926 #ifdef BGE_IPMI_ASF
3927 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE)
3928 #else
3929 if (bge_chip_sync(bgep) == DDI_FAILURE)
3930 #endif
3931 retval = DDI_FAILURE;
3932
3933 /*
3934 * Step 49: configure the MTU
3935 */
3936 mtu = bgep->chipid.ethmax_size+ETHERFCSL+VLAN_TAGSZ;
3937 bge_reg_put32(bgep, MAC_RX_MTU_SIZE_REG, mtu);
3938
3939 /*
3940 * Step 50: configure the IPG et al
3941 */
3942 bge_reg_put32(bgep, MAC_TX_LENGTHS_REG, MAC_TX_LENGTHS_DEFAULT);
3943
3944 /*
3945 * Step 51: configure the default Rx Return Ring
3946 */
3947 bge_reg_put32(bgep, RCV_RULES_CONFIG_REG, RCV_RULES_CONFIG_DEFAULT);
3948
3949 /*
3950 * Steps 52-54: configure Receive List Placement,
3951 * and enable Receive List Placement Statistics
3952 */
3953 bge_reg_put32(bgep, RCV_LP_CONFIG_REG,
3954 RCV_LP_CONFIG(bgep->chipid.rx_rings));
3955 switch (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev)) {
3956 case MHCR_CHIP_ASIC_REV_5700:
3957 case MHCR_CHIP_ASIC_REV_5701:
3958 case MHCR_CHIP_ASIC_REV_5703:
3959 case MHCR_CHIP_ASIC_REV_5704:
3960 bge_reg_put32(bgep, RCV_LP_STATS_ENABLE_MASK_REG, ~0);
3961 break;
3962 case MHCR_CHIP_ASIC_REV_5705:
4077 */
4078 bge_mbx_put(bgep, INTERRUPT_MBOX_0_REG, 0);
4079
4080 /*
4081 * Steps 76-87: Gentlemen, start your engines ...
4082 *
4083 * Enable the DMA Completion Engine, the Write DMA Engine,
4084 * the Read DMA Engine, Receive Data Completion Engine,
4085 * the MBuf Cluster Free Engine, the Send Data Completion Engine,
4086 * the Send BD Completion Engine, the Receive BD Initiator Engine,
4087 * the Receive Data Initiator Engine, the Send Data Initiator Engine,
4088 * the Send BD Initiator Engine, and the Send BD Selector Engine.
4089 *
4090 * Beware exhaust fumes?
4091 */
4092 if (DEVICE_5704_SERIES_CHIPSETS(bgep))
4093 if (!bge_chip_enable_engine(bgep, DMA_COMPLETION_MODE_REG, 0))
4094 retval = DDI_FAILURE;
4095 dma_wrprio = (bge_dma_wrprio << DMA_PRIORITY_SHIFT) |
4096 ALL_DMA_ATTN_BITS;
4097 if ((MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4098 MHCR_CHIP_ASIC_REV_5755) ||
4099 DEVICE_5723_SERIES_CHIPSETS(bgep) ||
4100 DEVICE_5906_SERIES_CHIPSETS(bgep)) {
4101 dma_wrprio |= DMA_STATUS_TAG_FIX_CQ12384;
4102 }
4103 if (!bge_chip_enable_engine(bgep, WRITE_DMA_MODE_REG,
4104 dma_wrprio))
4105 retval = DDI_FAILURE;
4106 if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
4107 DEVICE_5717_SERIES_CHIPSETS(bgep))
4108 bge_dma_rdprio = 0;
4109 if (!bge_chip_enable_engine(bgep, READ_DMA_MODE_REG,
4110 (bge_dma_rdprio << DMA_PRIORITY_SHIFT) | ALL_DMA_ATTN_BITS))
4111 retval = DDI_FAILURE;
4112 if (!bge_chip_enable_engine(bgep, RCV_DATA_COMPLETION_MODE_REG,
4113 STATE_MACHINE_ATTN_ENABLE_BIT))
4114 retval = DDI_FAILURE;
4115 if (DEVICE_5704_SERIES_CHIPSETS(bgep))
4116 if (!bge_chip_enable_engine(bgep,
4117 MBUF_CLUSTER_FREE_MODE_REG, 0))
4118 retval = DDI_FAILURE;
4119 if (!bge_chip_enable_engine(bgep, SEND_DATA_COMPLETION_MODE_REG, 0))
4120 retval = DDI_FAILURE;
4121 if (!bge_chip_enable_engine(bgep, SEND_BD_COMPLETION_MODE_REG,
4122 STATE_MACHINE_ATTN_ENABLE_BIT))
4123 retval = DDI_FAILURE;
4124 if (!bge_chip_enable_engine(bgep, RCV_BD_INITIATOR_MODE_REG,
4125 RCV_BD_DISABLED_RING_ATTN))
4126 retval = DDI_FAILURE;
4127 if (!bge_chip_enable_engine(bgep, RCV_DATA_BD_INITIATOR_MODE_REG,
4128 RCV_DATA_BD_ILL_RING_ATTN))
4129 retval = DDI_FAILURE;
4130 if (!bge_chip_enable_engine(bgep, SEND_DATA_INITIATOR_MODE_REG, 0))
4131 retval = DDI_FAILURE;
4132 if (!bge_chip_enable_engine(bgep, SEND_BD_INITIATOR_MODE_REG,
4133 STATE_MACHINE_ATTN_ENABLE_BIT))
4134 retval = DDI_FAILURE;
4135 if (!bge_chip_enable_engine(bgep, SEND_BD_SELECTOR_MODE_REG,
4136 STATE_MACHINE_ATTN_ENABLE_BIT))
4137 retval = DDI_FAILURE;
4138
4139 /*
4140 * Step 88: download firmware -- doesn't apply
4141 * Steps 89-90: enable Transmit & Receive MAC Engines
4142 */
4143 if (!bge_chip_enable_engine(bgep, TRANSMIT_MAC_MODE_REG, 0))
4144 retval = DDI_FAILURE;
4145 #ifdef BGE_IPMI_ASF
4146 if (!bgep->asf_enabled) {
4147 if (!bge_chip_enable_engine(bgep, RECEIVE_MAC_MODE_REG,
4148 RECEIVE_MODE_KEEP_VLAN_TAG))
4149 retval = DDI_FAILURE;
4150 } else {
4151 if (!bge_chip_enable_engine(bgep, RECEIVE_MAC_MODE_REG, 0))
4152 retval = DDI_FAILURE;
4153 }
4154 #else
4155 if (!bge_chip_enable_engine(bgep, RECEIVE_MAC_MODE_REG,
4156 RECEIVE_MODE_KEEP_VLAN_TAG))
4157 retval = DDI_FAILURE;
4158 #endif
4159
4160 /*
4161 * Step 91: disable auto-polling of PHY status
4162 */
4163 bge_reg_put32(bgep, MI_MODE_REG, MI_MODE_DEFAULT);
4226 MODE_INT_ON_FLOW_ATTN |
4227 MODE_INT_ON_DMA_ATTN |
4228 MODE_HOST_STACK_UP|
4229 MODE_INT_ON_MAC_ATTN);
4230 } else {
4231 #endif
4232 bge_reg_set32(bgep, MODE_CONTROL_REG,
4233 MODE_INT_ON_FLOW_ATTN |
4234 MODE_INT_ON_DMA_ATTN |
4235 MODE_INT_ON_MAC_ATTN);
4236 #ifdef BGE_IPMI_ASF
4237 }
4238 #endif
4239
4240 /*
4241 * Step 97: enable PCI interrupts!!!
4242 */
4243 if (bgep->intr_type == DDI_INTR_TYPE_FIXED)
4244 bge_cfg_clr32(bgep, PCI_CONF_BGE_MHCR,
4245 bgep->chipid.mask_pci_int);
4246
4247 /*
4248 * All done!
4249 */
4250 bgep->bge_chip_state = BGE_CHIP_RUNNING;
4251 return (retval);
4252 }
4253
4254
4255 /*
4256 * ========== Hardware interrupt handler ==========
4257 */
4258
4259 #undef BGE_DBG
4260 #define BGE_DBG BGE_DBG_INT /* debug flag for this code */
4261
4262 /*
4263 * Sync the status block, then atomically clear the specified bits in
4264 * the <flags-and-tag> field of the status block.
4265 * the <flags> word of the status block, returning the value of the
4266 * <tag> and the <flags> before the bits were cleared.
|
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * Copyright 2011, 2012 Nexenta Systems, Inc. All rights reserved.
28 */
29
30 #include "bge_impl.h"
31
32 #define PIO_ADDR(bgep, offset) ((void *)((caddr_t)(bgep)->io_regs+(offset)))
33
34 /*
35 * Future features ... ?
36 */
37 #define BGE_CFG_IO8 1 /* 8/16-bit cfg space BIS/BIC */
38 #define BGE_IND_IO32 1 /* indirect access code */
39 #define BGE_SEE_IO32 1 /* SEEPROM access code */
40 #define BGE_FLASH_IO32 1 /* FLASH access code */
41
42 /*
43 * BGE MSI tunable:
44 *
45 * By default MSI is enabled on all supported platforms but it is disabled
46 * for some Broadcom chips due to known MSI hardware issues. Currently MSI
47 * is enabled only for 5714C A2 and 5715C A2 broadcom chips.
346 * cleared by reset, so we'll have to restore them later. This
347 * comes from the Broadcom document 570X-PG102-R ...
348 *
349 * Note: Broadcom document 570X-PG102-R seems to be in error
350 * here w.r.t. the offsets of the Subsystem Vendor ID and
351 * Subsystem (Device) ID registers, which are the opposite way
352 * round according to the PCI standard. For good measure, we
353 * save/restore both anyway.
354 */
355 handle = bgep->cfg_handle;
356
357 /*
358 * For some chipsets (e.g., BCM5718), if MHCR_ENABLE_ENDIAN_BYTE_SWAP
359 * has been set in PCI_CONF_COMM already, we need to write the
360 * byte-swapped value to it. So we just write zero first for simplicity.
361 */
362 cidp->device = pci_config_get16(handle, PCI_CONF_DEVID);
363 if (DEVICE_5717_SERIES_CHIPSETS(bgep))
364 pci_config_put32(handle, PCI_CONF_BGE_MHCR, 0);
365 mhcr = pci_config_get32(handle, PCI_CONF_BGE_MHCR);
366 cidp->asic_rev = (mhcr & MHCR_CHIP_REV_MASK) >> MHCR_CHIP_REV_SHIFT;
367 if (MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_PRODID) {
368 uint32_t reg;
369 switch (cidp->device) {
370 case DEVICE_ID_5717:
371 case DEVICE_ID_5718:
372 case DEVICE_ID_5719:
373 case DEVICE_ID_5720:
374 reg = PCI_CONF_GEN2_PRODID_ASICREV;
375 break;
376 case DEVICE_ID_57781:
377 case DEVICE_ID_57785:
378 case DEVICE_ID_57761:
379 case DEVICE_ID_57765:
380 case DEVICE_ID_57791:
381 case DEVICE_ID_57795:
382 case DEVICE_ID_57762:
383 case DEVICE_ID_57766:
384 case DEVICE_ID_57782:
385 case DEVICE_ID_57786:
386 reg = PCI_CONF_GEN15_PRODID_ASICREV;
387 break;
388 default:
389 reg = PCI_CONF_PRODID_ASICREV;
390 break;
391 }
392 cidp->asic_rev = pci_config_get32(handle, reg);
393 }
394 cidp->businfo = pci_config_get32(handle, PCI_CONF_BGE_PCISTATE);
395 cidp->command = pci_config_get16(handle, PCI_CONF_COMM);
396
397 cidp->vendor = pci_config_get16(handle, PCI_CONF_VENID);
398 cidp->subven = pci_config_get16(handle, PCI_CONF_SUBVENID);
399 cidp->subdev = pci_config_get16(handle, PCI_CONF_SUBSYSID);
400 cidp->revision = pci_config_get8(handle, PCI_CONF_REVID);
401 cidp->clsize = pci_config_get8(handle, PCI_CONF_CACHE_LINESZ);
402 cidp->latency = pci_config_get8(handle, PCI_CONF_LATENCY_TIMER);
403
404 BGE_DEBUG(("bge_chip_cfg_init: %s bus is %s and %s; #INTA is %s",
405 cidp->businfo & PCISTATE_BUS_IS_PCI ? "PCI" : "PCI-X",
406 cidp->businfo & PCISTATE_BUS_IS_FAST ? "fast" : "slow",
407 cidp->businfo & PCISTATE_BUS_IS_32_BIT ? "narrow" : "wide",
408 cidp->businfo & PCISTATE_INTA_STATE ? "high" : "low"));
409 BGE_DEBUG(("bge_chip_cfg_init: vendor 0x%x device 0x%x revision 0x%x",
410 cidp->vendor, cidp->device, cidp->revision));
411 BGE_DEBUG(("bge_chip_cfg_init: subven 0x%x subdev 0x%x asic_rev 0x%x",
412 cidp->subven, cidp->subdev, cidp->asic_rev));
413 BGE_DEBUG(("bge_chip_cfg_init: clsize %d latency %d command 0x%x",
414 cidp->clsize, cidp->latency, cidp->command));
415
416 cidp->chip_type = 0;
417 if (MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5717 ||
418 MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5719 ||
419 MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5720)
420 cidp->chip_type |= CHIP_TYPE_5717_PLUS;
421
422 if (MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_57765 ||
423 MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_57766)
424 cidp->chip_type |= CHIP_TYPE_57765_CLASS;
425
426 if (cidp->chip_type & CHIP_TYPE_57765_CLASS ||
427 cidp->chip_type & CHIP_TYPE_5717_PLUS)
428 cidp->chip_type |= CHIP_TYPE_57765_PLUS;
429
430 /* Intentionally exclude ASIC_REV_5906 */
431 if (MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5755 ||
432 MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5787 ||
433 MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5784 ||
434 MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5761 ||
435 MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5785 ||
436 MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_57780 ||
437 cidp->chip_type & CHIP_TYPE_57765_PLUS)
438 cidp->chip_type |= CHIP_TYPE_5755_PLUS;
439
440 if (MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5780 ||
441 MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5714)
442 cidp->chip_type |= CHIP_TYPE_5780_CLASS;
443
444 if (MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5750 ||
445 MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5752 ||
446 MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5906 ||
447 cidp->chip_type & CHIP_TYPE_5755_PLUS ||
448 cidp->chip_type & CHIP_TYPE_5780_CLASS)
449 cidp->chip_type |= CHIP_TYPE_5750_PLUS;
450
451 if (MHCR_CHIP_ASIC_REV(cidp->asic_rev) == MHCR_CHIP_ASIC_REV_5705 ||
452 cidp->chip_type & CHIP_TYPE_5750_PLUS)
453 cidp->chip_type |= CHIP_TYPE_5705_PLUS;
454
455 /*
456 * Step 2 (also step 6): disable and clear interrupts.
457 * Steps 11-13: configure PIO endianness options, and enable
458 * indirect register access. We'll also select any other
459 * options controlled by the MHCR (e.g. tagged status, mask
460 * interrupt mode) at this stage ...
461 *
462 * Note: internally, the chip is 64-bit and BIG-endian, but
463 * since it talks to the host over a (LITTLE-endian) PCI bus,
464 * it normally swaps bytes around at the PCI interface.
465 * However, the PCI host bridge on SPARC systems normally
466 * swaps the byte lanes around too, since SPARCs are also
467 * BIG-endian. So it turns out that on SPARC, the right
468 * option is to tell the chip to swap (and the host bridge
469 * will swap back again), whereas on x86 we ask the chip
470 * NOT to swap, so the natural little-endianness of the
471 * PCI bus is assumed. Then the only thing that doesn't
472 * automatically work right is access to an 8-byte register
473 * by a little-endian host; but we don't want to set the
474 * MHCR_ENABLE_REGISTER_WORD_SWAP bit because then 4-byte
496 * likely in the interrupt handler:
497 *
498 * (1) acknowledge & disable interrupts
499 * (2) while (more to do)
500 * process packets
501 * (3) enable interrupts -- also clears pending
502 *
503 * If the chip received more packets and internally generated
504 * an interrupt between the check at (2) and the mbox write
505 * at (3), this interrupt would be lost :-(
506 *
507 * The best way to avoid this is to use TAGGED STATUS mode,
508 * where the chip includes a unique tag in each status block
509 * update, and the host, when re-enabling interrupts, passes
510 * the last tag it saw back to the chip; then the chip can
511 * see whether the host is truly up to date, and regenerate
512 * its interrupt if not.
513 */
514 mhcr = MHCR_ENABLE_INDIRECT_ACCESS |
515 MHCR_ENABLE_TAGGED_STATUS_MODE |
516 MHCR_ENABLE_PCI_STATE_WRITE |
517 MHCR_MASK_INTERRUPT_MODE |
518 MHCR_CLEAR_INTERRUPT_INTA;
519
520 if (bgep->intr_type == DDI_INTR_TYPE_FIXED)
521 mhcr |= MHCR_MASK_PCI_INT_OUTPUT;
522
523 #ifdef _BIG_ENDIAN
524 mhcr |= MHCR_ENABLE_ENDIAN_WORD_SWAP | MHCR_ENABLE_ENDIAN_BYTE_SWAP;
525 #endif /* _BIG_ENDIAN */
526
527 if (DEVICE_5717_SERIES_CHIPSETS(bgep))
528 pci_config_put32(handle, PCI_CONF_BGE_MHCR, 0);
529 pci_config_put32(handle, PCI_CONF_BGE_MHCR, mhcr);
530
531 #ifdef BGE_IPMI_ASF
532 bgep->asf_wordswapped = B_FALSE;
533 #endif
534 /*
535 * Step 1 (also step 7): Enable PCI Memory Space accesses
536 * Disable Memory Write/Invalidate
1946 case DEVICE_ID_5700x:
1947 case DEVICE_ID_5701:
1948 /*
1949 * These devices support *only* SEEPROMs
1950 */
1951 nvtype = BGE_NVTYPE_SEEPROM;
1952 break;
1953
1954 case DEVICE_ID_5702:
1955 case DEVICE_ID_5702fe:
1956 case DEVICE_ID_5703C:
1957 case DEVICE_ID_5703S:
1958 case DEVICE_ID_5704C:
1959 case DEVICE_ID_5704S:
1960 case DEVICE_ID_5704:
1961 case DEVICE_ID_5705M:
1962 case DEVICE_ID_5705C:
1963 case DEVICE_ID_5705_2:
1964 case DEVICE_ID_5717:
1965 case DEVICE_ID_5718:
1966 case DEVICE_ID_5719:
1967 case DEVICE_ID_5720:
1968 case DEVICE_ID_5724:
1969 case DEVICE_ID_57760:
1970 case DEVICE_ID_57780:
1971 case DEVICE_ID_57788:
1972 case DEVICE_ID_57790:
1973 case DEVICE_ID_5780:
1974 case DEVICE_ID_5782:
1975 case DEVICE_ID_5784M:
1976 case DEVICE_ID_5785:
1977 case DEVICE_ID_5787:
1978 case DEVICE_ID_5787M:
1979 case DEVICE_ID_5788:
1980 case DEVICE_ID_5789:
1981 case DEVICE_ID_5751:
1982 case DEVICE_ID_5751M:
1983 case DEVICE_ID_5752:
1984 case DEVICE_ID_5752M:
1985 case DEVICE_ID_5754:
1986 case DEVICE_ID_5755:
1987 case DEVICE_ID_5755M:
2081 cidp->recv_slots = BGE_RECV_SLOTS_USED;
2082 cidp->bge_dma_rwctrl = bge_dma_rwctrl;
2083 cidp->pci_type = BGE_PCI_X;
2084 cidp->statistic_type = BGE_STAT_BLK;
2085 cidp->mbuf_lo_water_rdma = bge_mbuf_lo_water_rdma;
2086 cidp->mbuf_lo_water_rmac = bge_mbuf_lo_water_rmac;
2087 cidp->mbuf_hi_water = bge_mbuf_hi_water;
2088 cidp->rx_ticks_norm = bge_rx_ticks_norm;
2089 cidp->rx_count_norm = bge_rx_count_norm;
2090 cidp->tx_ticks_norm = bge_tx_ticks_norm;
2091 cidp->tx_count_norm = bge_tx_count_norm;
2092 cidp->mask_pci_int = MHCR_MASK_PCI_INT_OUTPUT;
2093
2094 if (cidp->rx_rings == 0 || cidp->rx_rings > BGE_RECV_RINGS_MAX)
2095 cidp->rx_rings = BGE_RECV_RINGS_DEFAULT;
2096 if (cidp->tx_rings == 0 || cidp->tx_rings > BGE_SEND_RINGS_MAX)
2097 cidp->tx_rings = BGE_SEND_RINGS_DEFAULT;
2098
2099 cidp->msi_enabled = B_FALSE;
2100
2101 if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) >
2102 MHCR_CHIP_ASIC_REV_PRODID ||
2103 MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
2104 MHCR_CHIP_ASIC_REV_5906 ||
2105 MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
2106 MHCR_CHIP_ASIC_REV_5700 ||
2107 MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
2108 MHCR_CHIP_ASIC_REV_5701 ||
2109 MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
2110 MHCR_CHIP_ASIC_REV_5750)
2111 /*
2112 * Just a plain reset; the "check" code breaks these chips
2113 */
2114 cidp->flags |= CHIP_FLAG_NO_CHECK_RESET;
2115
2116 switch (cidp->device) {
2117 case DEVICE_ID_5717:
2118 case DEVICE_ID_5718:
2119 case DEVICE_ID_5719:
2120 case DEVICE_ID_5720:
2121 case DEVICE_ID_5724:
2122 if (cidp->device == DEVICE_ID_5717)
2123 cidp->chip_label = 5717;
2124 else if (cidp->device == DEVICE_ID_5718)
2125 cidp->chip_label = 5718;
2126 else if (cidp->device == DEVICE_ID_5719)
2127 cidp->chip_label = 5719;
2128 else if (cidp->device == DEVICE_ID_5720)
2129 cidp->chip_label = 5720;
2130 else
2131 cidp->chip_label = 5724;
2132 cidp->msi_enabled = bge_enable_msi;
2133 #ifdef __sparc
2134 cidp->mask_pci_int = LE_32(MHCR_MASK_PCI_INT_OUTPUT);
2135 #endif
2136 cidp->bge_dma_rwctrl = LE_32(PDRWCR_VAR_5717);
2137 cidp->pci_type = BGE_PCI_E;
2138 cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2139 cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5717;
2140 cidp->mbuf_hi_water = MBUF_HIWAT_5717;
2141 cidp->mbuf_base = bge_mbuf_pool_base_5705;
2142 cidp->mbuf_length = bge_mbuf_pool_len_5705;
2143 cidp->recv_slots = BGE_RECV_SLOTS_5717;
2144 cidp->bge_mlcr_default = MLCR_DEFAULT_5717;
2145 cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2146 cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2147 cidp->flags |= CHIP_FLAG_NO_JUMBO;
2148 cidp->statistic_type = BGE_STAT_REG;
2149 dev_ok = B_TRUE;
2150 break;
2151
2152 case DEVICE_ID_5700:
2153 case DEVICE_ID_5700x:
2154 cidp->chip_label = 5700;
2155 cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2156 break;
2157
2158 case DEVICE_ID_5701:
2159 cidp->chip_label = 5701;
2160 dev_ok = B_TRUE;
2161 cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2162 break;
2163
3504 bge_chip_stop(bgep, B_FALSE);
3505 break;
3506 }
3507
3508 #ifdef BGE_IPMI_ASF
3509 if (bgep->asf_enabled) {
3510 #ifdef __sparc
3511 mhcr = MHCR_ENABLE_INDIRECT_ACCESS |
3512 MHCR_ENABLE_TAGGED_STATUS_MODE |
3513 MHCR_MASK_INTERRUPT_MODE |
3514 MHCR_CLEAR_INTERRUPT_INTA |
3515 MHCR_ENABLE_ENDIAN_WORD_SWAP |
3516 MHCR_ENABLE_ENDIAN_BYTE_SWAP;
3517
3518 if (bgep->intr_type == DDI_INTR_TYPE_FIXED)
3519 mhcr |= MHCR_MASK_PCI_INT_OUTPUT;
3520
3521 if (DEVICE_5717_SERIES_CHIPSETS(bgep))
3522 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR,
3523 0);
3524 #else
3525 mhcr = MHCR_ENABLE_INDIRECT_ACCESS |
3526 MHCR_ENABLE_TAGGED_STATUS_MODE |
3527 MHCR_MASK_INTERRUPT_MODE |
3528 MHCR_MASK_PCI_INT_OUTPUT |
3529 MHCR_CLEAR_INTERRUPT_INTA;
3530 #endif
3531 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcr);
3532 bge_reg_put32(bgep, MEMORY_ARBITER_MODE_REG,
3533 bge_reg_get32(bgep, MEMORY_ARBITER_MODE_REG) |
3534 MEMORY_ARBITER_ENABLE);
3535 if (asf_mode == ASF_MODE_INIT) {
3536 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET);
3537 } else if (asf_mode == ASF_MODE_SHUTDOWN) {
3538 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET);
3539 }
3540 }
3541 #endif
3542 /*
3543 * Adapted from Broadcom document 570X-PG102-R, pp 102-116.
3544 * Updated to reflect Broadcom document 570X-PG104-R, pp 146-159.
3545 *
3546 * Before reset Core clock,it is
3547 * also required to initialize the Memory Arbiter as specified in step9
3548 * and Misc Host Control Register as specified in step-13
3549 * Step 4-5: reset Core clock & wait for completion
3550 * Steps 6-8: are done by bge_chip_cfg_init()
3551 * put the T3_MAGIC_NUMBER into the GENCOMM port before reset
3552 */
3553 if (!bge_chip_enable_engine(bgep, MEMORY_ARBITER_MODE_REG, 0))
3554 retval = DDI_FAILURE;
3555
3556 mhcr = MHCR_ENABLE_INDIRECT_ACCESS |
3557 MHCR_ENABLE_TAGGED_STATUS_MODE |
3558 MHCR_ENABLE_PCI_STATE_WRITE |
3559 MHCR_MASK_INTERRUPT_MODE |
3560 MHCR_CLEAR_INTERRUPT_INTA;
3561
3562 if (bgep->intr_type == DDI_INTR_TYPE_FIXED)
3563 mhcr |= MHCR_MASK_PCI_INT_OUTPUT;
3564
3565 #ifdef _BIG_ENDIAN
3566 mhcr |= MHCR_ENABLE_ENDIAN_WORD_SWAP | MHCR_ENABLE_ENDIAN_BYTE_SWAP;
3567 #endif /* _BIG_ENDIAN */
3568 if (DEVICE_5717_SERIES_CHIPSETS(bgep))
3569 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, 0);
3570 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcr);
3571 #ifdef BGE_IPMI_ASF
3572 if (bgep->asf_enabled)
3573 bgep->asf_wordswapped = B_FALSE;
3574 #endif
3575
3576 if (DEVICE_IS_5755_PLUS(bgep) ||
3577 MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
3578 MHCR_CHIP_ASIC_REV_5752)
3579 bge_reg_put32(bgep, GRC_FASTBOOT_PC, 0);
3580
3581 /*
3582 * NVRAM Corruption Workaround
3583 */
3584 for (tries = 0; tries < MAX_TRY_NVMEM_ACQUIRE; tries++)
3585 if (bge_nvmem_acquire(bgep) != EAGAIN)
3586 break;
3587 if (tries >= MAX_TRY_NVMEM_ACQUIRE)
3588 BGE_DEBUG(("%s: fail to acquire nvram lock",
3589 bgep->ifname));
3590
3591 #ifdef BGE_IPMI_ASF
3592 if (!bgep->asf_enabled) {
3593 #endif
3594 magic = (uint64_t)T3_MAGIC_NUMBER << 32;
3595 bge_nic_put64(bgep, NIC_MEM_GENCOMM, magic);
3596 #ifdef BGE_IPMI_ASF
3597 }
3598 #endif
3599
3600 if (!bge_chip_reset_engine(bgep, MISC_CONFIG_REG))
3620 * not be changed.
3621 */
3622 if (!bge_chip_enable_engine(bgep, MEMORY_ARBITER_MODE_REG, 0))
3623 retval = DDI_FAILURE;
3624
3625 /*
3626 * Steps 10-11: configure PIO endianness options and
3627 * enable indirect register access -- already done
3628 * Steps 12-13: enable writing to the PCI state & clock
3629 * control registers -- not required; we aren't going to
3630 * use those features.
3631 * Steps 14-15: Configure DMA endianness options. See
3632 * the comments on the setting of the MHCR above.
3633 */
3634 #ifdef _BIG_ENDIAN
3635 modeflags = MODE_WORD_SWAP_FRAME | MODE_BYTE_SWAP_FRAME |
3636 MODE_WORD_SWAP_NONFRAME | MODE_BYTE_SWAP_NONFRAME;
3637 #else
3638 modeflags = MODE_WORD_SWAP_FRAME | MODE_BYTE_SWAP_FRAME;
3639 #endif /* _BIG_ENDIAN */
3640 if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
3641 MHCR_CHIP_ASIC_REV_5720)
3642 modeflags |=
3643 MODE_BYTE_SWAP_B2HRX_DATA | MODE_WORD_SWAP_B2HRX_DATA |
3644 MODE_B2HRX_ENABLE | MODE_HTX2B_ENABLE;
3645 #ifdef BGE_IPMI_ASF
3646 if (bgep->asf_enabled)
3647 modeflags |= MODE_HOST_STACK_UP;
3648 #endif
3649 bge_reg_put32(bgep, MODE_CONTROL_REG, modeflags);
3650
3651 #ifdef BGE_IPMI_ASF
3652 if (bgep->asf_enabled) {
3653 #ifdef __sparc
3654 bge_reg_put32(bgep, MEMORY_ARBITER_MODE_REG,
3655 MEMORY_ARBITER_ENABLE |
3656 bge_reg_get32(bgep, MEMORY_ARBITER_MODE_REG));
3657 #endif
3658
3659 #ifdef BGE_NETCONSOLE
3660 if (!bgep->asf_newhandshake) {
3661 if ((asf_mode == ASF_MODE_INIT) ||
3662 (asf_mode == ASF_MODE_POST_INIT)) {
3663 bge_asf_post_reset_old_mode(bgep,
3664 BGE_INIT_RESET);
3709 * However we take the opportunity to set the MLCR anyway, as
3710 * this register also controls the SEEPROM auto-access method
3711 * which we may want to use later ...
3712 *
3713 * The proper value here depends on the way the chip is wired
3714 * into the circuit board, as this register *also* controls which
3715 * of the "Miscellaneous I/O" pins are driven as outputs and the
3716 * values driven onto those pins!
3717 *
3718 * See also step 74 in the PRM ...
3719 */
3720 bge_reg_put32(bgep, MISC_LOCAL_CONTROL_REG,
3721 bgep->chipid.bge_mlcr_default);
3722 bge_reg_set32(bgep, SERIAL_EEPROM_ADDRESS_REG, SEEPROM_ACCESS_INIT);
3723
3724 /*
3725 * Step 20: clear the Ethernet MAC mode register
3726 */
3727 bge_reg_put32(bgep, ETHERNET_MAC_MODE_REG, 0);
3728
3729 if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
3730 MHCR_CHIP_ASIC_REV_5720) {
3731 uint32_t regval = bge_reg_get32(bgep, CPMU_CLCK_ORIDE_REG);
3732 bge_reg_put32(bgep, CPMU_CLCK_ORIDE_REG,
3733 regval & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
3734 }
3735
3736 /*
3737 * Step 21: restore cache-line-size, latency timer, and
3738 * subsystem ID registers to their original values (not
3739 * those read into the local structure <chipid>, 'cos
3740 * that was after they were cleared by the RESET).
3741 *
3742 * Note: the Subsystem Vendor/Device ID registers are not
3743 * directly writable in config space, so we use the shadow
3744 * copy in "Page Zero" of register space to restore them
3745 * both in one go ...
3746 */
3747 pci_config_put8(bgep->cfg_handle, PCI_CONF_CACHE_LINESZ,
3748 bgep->chipid.clsize);
3749 pci_config_put8(bgep->cfg_handle, PCI_CONF_LATENCY_TIMER,
3750 bgep->chipid.latency);
3751 bge_reg_put32(bgep, PCI_CONF_SUBVENID,
3752 (bgep->chipid.subdev << 16) | bgep->chipid.subven);
3753
3754 /*
3755 * The SEND INDEX registers should be reset to zero by the
3942 bgep->chipid.mbuf_lo_water_rdma);
3943 bge_reg_put32(bgep, MAC_RX_MBUF_LOWAT_REG,
3944 bgep->chipid.mbuf_lo_water_rmac);
3945 bge_reg_put32(bgep, MBUF_HIWAT_REG,
3946 bgep->chipid.mbuf_hi_water);
3947
3948 /*
3949 * Step 33: configure DMA resource watermarks
3950 */
3951 if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
3952 bge_reg_put32(bgep, DMAD_POOL_LOWAT_REG,
3953 bge_dmad_lo_water);
3954 bge_reg_put32(bgep, DMAD_POOL_HIWAT_REG,
3955 bge_dmad_hi_water);
3956 }
3957 bge_reg_put32(bgep, LOWAT_MAX_RECV_FRAMES_REG, bge_lowat_recv_frames);
3958
3959 /*
3960 * Steps 34-36: enable buffer manager & internal h/w queues
3961 */
3962
3963 regval = STATE_MACHINE_ATTN_ENABLE_BIT;
3964 if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
3965 MHCR_CHIP_ASIC_REV_5719)
3966 regval |= BUFF_MGR_NO_TX_UNDERRUN;
3967 if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
3968 MHCR_CHIP_ASIC_REV_5717 ||
3969 bgep->chipid.asic_rev == MHCR_CHIP_REV_5719_A0 ||
3970 bgep->chipid.asic_rev == MHCR_CHIP_REV_5720_A0)
3971 regval |= BUFF_MGR_MBUF_LOW_ATTN_ENABLE;
3972 if (!bge_chip_enable_engine(bgep, BUFFER_MANAGER_MODE_REG, regval))
3973 retval = DDI_FAILURE;
3974 if (!bge_chip_enable_engine(bgep, FTQ_RESET_REG, 0))
3975 retval = DDI_FAILURE;
3976
3977 /*
3978 * Steps 37-39: initialise Receive Buffer (Producer) RCBs
3979 */
3980 if (DEVICE_5717_SERIES_CHIPSETS(bgep)) {
3981 buff_ring_t *brp = &bgep->buff[BGE_STD_BUFF_RING];
3982 bge_reg_put64(bgep, STD_RCV_BD_RING_RCB_REG,
3983 brp->desc.cookie.dmac_laddress);
3984 bge_reg_put32(bgep, STD_RCV_BD_RING_RCB_REG + 8,
3985 (brp->desc.nslots) << 16 | brp->buf[0].size << 2);
3986 bge_reg_put32(bgep, STD_RCV_BD_RING_RCB_REG + 0xc,
3987 NIC_MEM_SHADOW_BUFF_STD_5717);
3988 } else
3989 bge_reg_putrcb(bgep, STD_RCV_BD_RING_RCB_REG,
3990 &bgep->buff[BGE_STD_BUFF_RING].hw_rcb);
3991
3992 if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
4046 * Step 47: configure the MAC unicast address
4047 * Step 48: configure the random backoff seed
4048 * Step 96: set up multicast filters
4049 */
4050 #ifdef BGE_IPMI_ASF
4051 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE)
4052 #else
4053 if (bge_chip_sync(bgep) == DDI_FAILURE)
4054 #endif
4055 retval = DDI_FAILURE;
4056
4057 /*
4058 * Step 49: configure the MTU
4059 */
4060 mtu = bgep->chipid.ethmax_size+ETHERFCSL+VLAN_TAGSZ;
4061 bge_reg_put32(bgep, MAC_RX_MTU_SIZE_REG, mtu);
4062
4063 /*
4064 * Step 50: configure the IPG et al
4065 */
4066 regval = MAC_TX_LENGTHS_DEFAULT;
4067 if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev)
4068 == MHCR_CHIP_ASIC_REV_5720)
4069 regval |= bge_reg_get32(bgep, MAC_TX_LENGTHS_REG) &
4070 (MAC_TX_LENGTHS_JMB_FRM_LEN_MSK |
4071 MAC_TX_LENGTHS_CNT_DWN_VAL_MSK);
4072 bge_reg_put32(bgep, MAC_TX_LENGTHS_REG, regval);
4073
4074 /*
4075 * Step 51: configure the default Rx Return Ring
4076 */
4077 bge_reg_put32(bgep, RCV_RULES_CONFIG_REG, RCV_RULES_CONFIG_DEFAULT);
4078
4079 /*
4080 * Steps 52-54: configure Receive List Placement,
4081 * and enable Receive List Placement Statistics
4082 */
4083 bge_reg_put32(bgep, RCV_LP_CONFIG_REG,
4084 RCV_LP_CONFIG(bgep->chipid.rx_rings));
4085 switch (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev)) {
4086 case MHCR_CHIP_ASIC_REV_5700:
4087 case MHCR_CHIP_ASIC_REV_5701:
4088 case MHCR_CHIP_ASIC_REV_5703:
4089 case MHCR_CHIP_ASIC_REV_5704:
4090 bge_reg_put32(bgep, RCV_LP_STATS_ENABLE_MASK_REG, ~0);
4091 break;
4092 case MHCR_CHIP_ASIC_REV_5705:
4207 */
4208 bge_mbx_put(bgep, INTERRUPT_MBOX_0_REG, 0);
4209
4210 /*
4211 * Steps 76-87: Gentlemen, start your engines ...
4212 *
4213 * Enable the DMA Completion Engine, the Write DMA Engine,
4214 * the Read DMA Engine, Receive Data Completion Engine,
4215 * the MBuf Cluster Free Engine, the Send Data Completion Engine,
4216 * the Send BD Completion Engine, the Receive BD Initiator Engine,
4217 * the Receive Data Initiator Engine, the Send Data Initiator Engine,
4218 * the Send BD Initiator Engine, and the Send BD Selector Engine.
4219 *
4220 * Beware exhaust fumes?
4221 */
4222 if (DEVICE_5704_SERIES_CHIPSETS(bgep))
4223 if (!bge_chip_enable_engine(bgep, DMA_COMPLETION_MODE_REG, 0))
4224 retval = DDI_FAILURE;
4225 dma_wrprio = (bge_dma_wrprio << DMA_PRIORITY_SHIFT) |
4226 ALL_DMA_ATTN_BITS;
4227 if (DEVICE_IS_5755_PLUS(bgep))
4228 dma_wrprio |= DMA_STATUS_TAG_FIX_CQ12384;
4229 if (!bge_chip_enable_engine(bgep, WRITE_DMA_MODE_REG,
4230 dma_wrprio))
4231 retval = DDI_FAILURE;
4232 if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4233 MHCR_CHIP_ASIC_REV_5761 ||
4234 MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4235 MHCR_CHIP_ASIC_REV_5784 ||
4236 MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4237 MHCR_CHIP_ASIC_REV_5785 ||
4238 MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4239 MHCR_CHIP_ASIC_REV_57780 ||
4240 DEVICE_IS_57765_PLUS(bgep)) {
4241 regval = bge_reg_get32(bgep, READ_DMA_RESERVED_CONTROL_REG);
4242 if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4243 MHCR_CHIP_ASIC_REV_5719 ||
4244 MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4245 MHCR_CHIP_ASIC_REV_5720) {
4246 regval &= ~(RDMA_RSRVCTRL_TXMRGN_MASK |
4247 RDMA_RSRVCTRL_FIFO_LWM_MASK |
4248 RDMA_RSRVCTRL_FIFO_HWM_MASK);
4249 regval |= RDMA_RSRVCTRL_TXMRGN_320B |
4250 RDMA_RSRVCTRL_FIFO_LWM_1_5K |
4251 RDMA_RSRVCTRL_FIFO_HWM_1_5K;
4252 }
4253 bge_reg_put32(bgep, READ_DMA_RESERVED_CONTROL_REG,
4254 regval | RDMA_RSRVCTRL_FIFO_OFLW_FIX);
4255 }
4256 if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
4257 DEVICE_5717_SERIES_CHIPSETS(bgep))
4258 bge_dma_rdprio = 0;
4259 regval = bge_dma_rdprio << DMA_PRIORITY_SHIFT;
4260 if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4261 MHCR_CHIP_ASIC_REV_5720)
4262 regval |= bge_reg_get32(bgep, READ_DMA_MODE_REG) &
4263 DMA_H2BNC_VLAN_DET;
4264 if (!bge_chip_enable_engine(bgep, READ_DMA_MODE_REG,
4265 regval | ALL_DMA_ATTN_BITS))
4266 retval = DDI_FAILURE;
4267 if (!bge_chip_enable_engine(bgep, RCV_DATA_COMPLETION_MODE_REG,
4268 STATE_MACHINE_ATTN_ENABLE_BIT))
4269 retval = DDI_FAILURE;
4270 if (DEVICE_5704_SERIES_CHIPSETS(bgep))
4271 if (!bge_chip_enable_engine(bgep,
4272 MBUF_CLUSTER_FREE_MODE_REG, 0))
4273 retval = DDI_FAILURE;
4274 if (!bge_chip_enable_engine(bgep, SEND_DATA_COMPLETION_MODE_REG, 0))
4275 retval = DDI_FAILURE;
4276 if (!bge_chip_enable_engine(bgep, SEND_BD_COMPLETION_MODE_REG,
4277 STATE_MACHINE_ATTN_ENABLE_BIT))
4278 retval = DDI_FAILURE;
4279 if (!bge_chip_enable_engine(bgep, RCV_BD_INITIATOR_MODE_REG,
4280 RCV_BD_DISABLED_RING_ATTN))
4281 retval = DDI_FAILURE;
4282 if (!bge_chip_enable_engine(bgep, RCV_DATA_BD_INITIATOR_MODE_REG,
4283 RCV_DATA_BD_ILL_RING_ATTN))
4284 retval = DDI_FAILURE;
4285 if (!bge_chip_enable_engine(bgep, SEND_DATA_INITIATOR_MODE_REG, 0))
4286 retval = DDI_FAILURE;
4287 if (!bge_chip_enable_engine(bgep, SEND_BD_INITIATOR_MODE_REG,
4288 STATE_MACHINE_ATTN_ENABLE_BIT))
4289 retval = DDI_FAILURE;
4290 if (!bge_chip_enable_engine(bgep, SEND_BD_SELECTOR_MODE_REG,
4291 STATE_MACHINE_ATTN_ENABLE_BIT))
4292 retval = DDI_FAILURE;
4293
4294 /*
4295 * Step 88: download firmware -- doesn't apply
4296 * Steps 89-90: enable Transmit & Receive MAC Engines
4297 */
4298 if (DEVICE_IS_5755_PLUS(bgep) ||
4299 MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4300 MHCR_CHIP_ASIC_REV_5906) {
4301 regval = bge_reg_get32(bgep, TRANSMIT_MAC_MODE_REG);
4302 regval |= TRANSMIT_MODE_MBUF_LOCKUP_FIX;
4303 } else {
4304 regval = 0;
4305 }
4306 if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4307 MHCR_CHIP_ASIC_REV_5720) {
4308 regval &= ~(TRANSMIT_MODE_HTX2B_JMB_FRM_LEN |
4309 TRANSMIT_MODE_HTX2B_CNT_DN_MODE);
4310 regval |= bge_reg_get32(bgep, TRANSMIT_MAC_MODE_REG) &
4311 (TRANSMIT_MODE_HTX2B_JMB_FRM_LEN |
4312 TRANSMIT_MODE_HTX2B_CNT_DN_MODE);
4313 }
4314 if (!bge_chip_enable_engine(bgep, TRANSMIT_MAC_MODE_REG, regval))
4315 retval = DDI_FAILURE;
4316 #ifdef BGE_IPMI_ASF
4317 if (!bgep->asf_enabled) {
4318 if (!bge_chip_enable_engine(bgep, RECEIVE_MAC_MODE_REG,
4319 RECEIVE_MODE_KEEP_VLAN_TAG))
4320 retval = DDI_FAILURE;
4321 } else {
4322 if (!bge_chip_enable_engine(bgep, RECEIVE_MAC_MODE_REG, 0))
4323 retval = DDI_FAILURE;
4324 }
4325 #else
4326 if (!bge_chip_enable_engine(bgep, RECEIVE_MAC_MODE_REG,
4327 RECEIVE_MODE_KEEP_VLAN_TAG))
4328 retval = DDI_FAILURE;
4329 #endif
4330
4331 /*
4332 * Step 91: disable auto-polling of PHY status
4333 */
4334 bge_reg_put32(bgep, MI_MODE_REG, MI_MODE_DEFAULT);
4397 MODE_INT_ON_FLOW_ATTN |
4398 MODE_INT_ON_DMA_ATTN |
4399 MODE_HOST_STACK_UP|
4400 MODE_INT_ON_MAC_ATTN);
4401 } else {
4402 #endif
4403 bge_reg_set32(bgep, MODE_CONTROL_REG,
4404 MODE_INT_ON_FLOW_ATTN |
4405 MODE_INT_ON_DMA_ATTN |
4406 MODE_INT_ON_MAC_ATTN);
4407 #ifdef BGE_IPMI_ASF
4408 }
4409 #endif
4410
4411 /*
4412 * Step 97: enable PCI interrupts!!!
4413 */
4414 if (bgep->intr_type == DDI_INTR_TYPE_FIXED)
4415 bge_cfg_clr32(bgep, PCI_CONF_BGE_MHCR,
4416 bgep->chipid.mask_pci_int);
4417 /*
4418 * All done!
4419 */
4420 bgep->bge_chip_state = BGE_CHIP_RUNNING;
4421 return (retval);
4422 }
4423
4424
4425 /*
4426 * ========== Hardware interrupt handler ==========
4427 */
4428
4429 #undef BGE_DBG
4430 #define BGE_DBG BGE_DBG_INT /* debug flag for this code */
4431
4432 /*
4433 * Sync the status block, then atomically clear the specified bits in
4434 * the <flags-and-tag> field of the status block.
4435 * the <flags> word of the status block, returning the value of the
4436 * <tag> and the <flags> before the bits were cleared.
|