1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  24  */
  25 
  26 /*
  27  * Copyright 2012 Nexenta Systems, Inc.  All rights reserved.
  28  */
  29 
  30 #include "bge_impl.h"
  31 #include <sys/sdt.h>
  32 #include <sys/mac_provider.h>
  33 #include <sys/mac.h>
  34 #include <sys/mac_flow.h>
  35 
  36 /*
  37  * This is the string displayed by modinfo, etc.
  38  */
  39 static char bge_ident[] = "Broadcom Gb Ethernet";
  40 
  41 /*
  42  * Property names
  43  */
  44 static char debug_propname[] = "bge-debug-flags";
  45 static char clsize_propname[] = "cache-line-size";
  46 static char latency_propname[] = "latency-timer";
  47 static char localmac_boolname[] = "local-mac-address?";
  48 static char localmac_propname[] = "local-mac-address";
  49 static char macaddr_propname[] = "mac-address";
  50 static char subdev_propname[] = "subsystem-id";
  51 static char subven_propname[] = "subsystem-vendor-id";
  52 static char rxrings_propname[] = "bge-rx-rings";
  53 static char txrings_propname[] = "bge-tx-rings";
  54 static char fm_cap[] = "fm-capable";
  55 static char default_mtu[] = "default_mtu";
  56 
  57 static int bge_add_intrs(bge_t *, int);
  58 static void bge_rem_intrs(bge_t *);
  59 static int bge_unicst_set(void *, const uint8_t *, int);
  60 
  61 /*
  62  * Describes the chip's DMA engine
  63  */
  64 static ddi_dma_attr_t dma_attr = {
  65         DMA_ATTR_V0,                    /* dma_attr version     */
  66         0x0000000000000000ull,          /* dma_attr_addr_lo     */
  67         0xFFFFFFFFFFFFFFFFull,          /* dma_attr_addr_hi     */
  68         0x00000000FFFFFFFFull,          /* dma_attr_count_max   */
  69         0x0000000000000001ull,          /* dma_attr_align       */
  70         0x00000FFF,                     /* dma_attr_burstsizes  */
  71         0x00000001,                     /* dma_attr_minxfer     */
  72         0x000000000000FFFFull,          /* dma_attr_maxxfer     */
  73         0xFFFFFFFFFFFFFFFFull,          /* dma_attr_seg         */
  74         1,                              /* dma_attr_sgllen      */
  75         0x00000001,                     /* dma_attr_granular    */
  76         DDI_DMA_FLAGERR                 /* dma_attr_flags */
  77 };
  78 
  79 /*
  80  * PIO access attributes for registers
  81  */
  82 static ddi_device_acc_attr_t bge_reg_accattr = {
  83         DDI_DEVICE_ATTR_V1,
  84         DDI_NEVERSWAP_ACC,
  85         DDI_STRICTORDER_ACC,
  86         DDI_FLAGERR_ACC
  87 };
  88 
  89 /*
  90  * DMA access attributes for descriptors: NOT to be byte swapped.
  91  */
  92 static ddi_device_acc_attr_t bge_desc_accattr = {
  93         DDI_DEVICE_ATTR_V0,
  94         DDI_NEVERSWAP_ACC,
  95         DDI_STRICTORDER_ACC
  96 };
  97 
  98 /*
  99  * DMA access attributes for data: NOT to be byte swapped.
 100  */
 101 static ddi_device_acc_attr_t bge_data_accattr = {
 102         DDI_DEVICE_ATTR_V0,
 103         DDI_NEVERSWAP_ACC,
 104         DDI_STRICTORDER_ACC
 105 };
 106 
 107 static int              bge_m_start(void *);
 108 static void             bge_m_stop(void *);
 109 static int              bge_m_promisc(void *, boolean_t);
 110 static int              bge_m_multicst(void *, boolean_t, const uint8_t *);
 111 static void             bge_m_ioctl(void *, queue_t *, mblk_t *);
 112 static boolean_t        bge_m_getcapab(void *, mac_capab_t, void *);
 113 static int              bge_unicst_set(void *, const uint8_t *,
 114     int);
 115 static int              bge_m_setprop(void *, const char *, mac_prop_id_t,
 116     uint_t, const void *);
 117 static int              bge_m_getprop(void *, const char *, mac_prop_id_t,
 118     uint_t, void *);
 119 static void             bge_m_propinfo(void *, const char *, mac_prop_id_t,
 120     mac_prop_info_handle_t);
 121 static int              bge_set_priv_prop(bge_t *, const char *, uint_t,
 122     const void *);
 123 static int              bge_get_priv_prop(bge_t *, const char *, uint_t,
 124     void *);
 125 static void             bge_priv_propinfo(const char *,
 126     mac_prop_info_handle_t);
 127 
 128 #define BGE_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | \
 129     MC_GETPROP | MC_PROPINFO)
 130 
 131 static mac_callbacks_t bge_m_callbacks = {
 132         BGE_M_CALLBACK_FLAGS,
 133         bge_m_stat,
 134         bge_m_start,
 135         bge_m_stop,
 136         bge_m_promisc,
 137         bge_m_multicst,
 138         NULL,
 139         bge_m_tx,
 140         NULL,
 141         bge_m_ioctl,
 142         bge_m_getcapab,
 143         NULL,
 144         NULL,
 145         bge_m_setprop,
 146         bge_m_getprop,
 147         bge_m_propinfo
 148 };
 149 
 150 char *bge_priv_prop[] = {
 151         "_adv_asym_pause_cap",
 152         "_adv_pause_cap",
 153         "_drain_max",
 154         "_msi_cnt",
 155         "_rx_intr_coalesce_blank_time",
 156         "_tx_intr_coalesce_blank_time",
 157         "_rx_intr_coalesce_pkt_cnt",
 158         "_tx_intr_coalesce_pkt_cnt",
 159         NULL
 160 };
 161 
 162 uint8_t zero_addr[6] = {0, 0, 0, 0, 0, 0};
 163 /*
 164  * ========== Transmit and receive ring reinitialisation ==========
 165  */
 166 
 167 /*
 168  * These <reinit> routines each reset the specified ring to an initial
 169  * state, assuming that the corresponding <init> routine has already
 170  * been called exactly once.
 171  */
 172 
 173 static void
 174 bge_reinit_send_ring(send_ring_t *srp)
 175 {
 176         bge_queue_t *txbuf_queue;
 177         bge_queue_item_t *txbuf_head;
 178         sw_txbuf_t *txbuf;
 179         sw_sbd_t *ssbdp;
 180         uint32_t slot;
 181 
 182         /*
 183          * Reinitialise control variables ...
 184          */
 185         srp->tx_flow = 0;
 186         srp->tx_next = 0;
 187         srp->txfill_next = 0;
 188         srp->tx_free = srp->desc.nslots;
 189         ASSERT(mutex_owned(srp->tc_lock));
 190         srp->tc_next = 0;
 191         srp->txpkt_next = 0;
 192         srp->tx_block = 0;
 193         srp->tx_nobd = 0;
 194         srp->tx_nobuf = 0;
 195 
 196         /*
 197          * Initialize the tx buffer push queue
 198          */
 199         mutex_enter(srp->freetxbuf_lock);
 200         mutex_enter(srp->txbuf_lock);
 201         txbuf_queue = &srp->freetxbuf_queue;
 202         txbuf_queue->head = NULL;
 203         txbuf_queue->count = 0;
 204         txbuf_queue->lock = srp->freetxbuf_lock;
 205         srp->txbuf_push_queue = txbuf_queue;
 206 
 207         /*
 208          * Initialize the tx buffer pop queue
 209          */
 210         txbuf_queue = &srp->txbuf_queue;
 211         txbuf_queue->head = NULL;
 212         txbuf_queue->count = 0;
 213         txbuf_queue->lock = srp->txbuf_lock;
 214         srp->txbuf_pop_queue = txbuf_queue;
 215         txbuf_head = srp->txbuf_head;
 216         txbuf = srp->txbuf;
 217         for (slot = 0; slot < srp->tx_buffers; ++slot) {
 218                 txbuf_head->item = txbuf;
 219                 txbuf_head->next = txbuf_queue->head;
 220                 txbuf_queue->head = txbuf_head;
 221                 txbuf_queue->count++;
 222                 txbuf++;
 223                 txbuf_head++;
 224         }
 225         mutex_exit(srp->txbuf_lock);
 226         mutex_exit(srp->freetxbuf_lock);
 227 
 228         /*
 229          * Zero and sync all the h/w Send Buffer Descriptors
 230          */
 231         DMA_ZERO(srp->desc);
 232         DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV);
 233         bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp));
 234         ssbdp = srp->sw_sbds;
 235         for (slot = 0; slot < srp->desc.nslots; ++ssbdp, ++slot)
 236                 ssbdp->pbuf = NULL;
 237 }
 238 
 239 static void
 240 bge_reinit_recv_ring(recv_ring_t *rrp)
 241 {
 242         /*
 243          * Reinitialise control variables ...
 244          */
 245         rrp->rx_next = 0;
 246 }
 247 
 248 static void
 249 bge_reinit_buff_ring(buff_ring_t *brp, uint32_t ring)
 250 {
 251         bge_rbd_t *hw_rbd_p;
 252         sw_rbd_t *srbdp;
 253         uint32_t bufsize;
 254         uint32_t nslots;
 255         uint32_t slot;
 256 
 257         static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = {
 258                 RBD_FLAG_STD_RING,
 259                 RBD_FLAG_JUMBO_RING,
 260                 RBD_FLAG_MINI_RING
 261         };
 262 
 263         /*
 264          * Zero, initialise and sync all the h/w Receive Buffer Descriptors
 265          * Note: all the remaining fields (<type>, <flags>, <ip_cksum>,
 266          * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>)
 267          * should be zeroed, and so don't need to be set up specifically
 268          * once the whole area has been cleared.
 269          */
 270         DMA_ZERO(brp->desc);
 271 
 272         hw_rbd_p = DMA_VPTR(brp->desc);
 273         nslots = brp->desc.nslots;
 274         ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT);
 275         bufsize = brp->buf[0].size;
 276         srbdp = brp->sw_rbds;
 277         for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) {
 278                 hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress;
 279                 hw_rbd_p->index = (uint16_t)slot;
 280                 hw_rbd_p->len = (uint16_t)bufsize;
 281                 hw_rbd_p->opaque = srbdp->pbuf.token;
 282                 hw_rbd_p->flags |= ring_type_flag[ring];
 283         }
 284 
 285         DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV);
 286 
 287         /*
 288          * Finally, reinitialise the ring control variables ...
 289          */
 290         brp->rf_next = (nslots != 0) ? (nslots-1) : 0;
 291 }
 292 
 293 /*
 294  * Reinitialize all rings
 295  */
 296 static void
 297 bge_reinit_rings(bge_t *bgep)
 298 {
 299         uint32_t ring;
 300 
 301         ASSERT(mutex_owned(bgep->genlock));
 302 
 303         /*
 304          * Send Rings ...
 305          */
 306         for (ring = 0; ring < bgep->chipid.tx_rings; ++ring)
 307                 bge_reinit_send_ring(&bgep->send[ring]);
 308 
 309         /*
 310          * Receive Return Rings ...
 311          */
 312         for (ring = 0; ring < bgep->chipid.rx_rings; ++ring)
 313                 bge_reinit_recv_ring(&bgep->recv[ring]);
 314 
 315         /*
 316          * Receive Producer Rings ...
 317          */
 318         for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring)
 319                 bge_reinit_buff_ring(&bgep->buff[ring], ring);
 320 }
 321 
 322 /*
 323  * ========== Internal state management entry points ==========
 324  */
 325 
 326 #undef  BGE_DBG
 327 #define BGE_DBG         BGE_DBG_NEMO    /* debug flag for this code     */
 328 
 329 /*
 330  * These routines provide all the functionality required by the
 331  * corresponding GLD entry points, but don't update the GLD state
 332  * so they can be called internally without disturbing our record
 333  * of what GLD thinks we should be doing ...
 334  */
 335 
 336 /*
 337  *      bge_reset() -- reset h/w & rings to initial state
 338  */
 339 static int
 340 #ifdef BGE_IPMI_ASF
 341 bge_reset(bge_t *bgep, uint_t asf_mode)
 342 #else
 343 bge_reset(bge_t *bgep)
 344 #endif
 345 {
 346         uint32_t        ring;
 347         int retval;
 348 
 349         BGE_TRACE(("bge_reset($%p)", (void *)bgep));
 350 
 351         ASSERT(mutex_owned(bgep->genlock));
 352 
 353         /*
 354          * Grab all the other mutexes in the world (this should
 355          * ensure no other threads are manipulating driver state)
 356          */
 357         for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring)
 358                 mutex_enter(bgep->recv[ring].rx_lock);
 359         for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring)
 360                 mutex_enter(bgep->buff[ring].rf_lock);
 361         rw_enter(bgep->errlock, RW_WRITER);
 362         for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
 363                 mutex_enter(bgep->send[ring].tx_lock);
 364         for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
 365                 mutex_enter(bgep->send[ring].tc_lock);
 366 
 367 #ifdef BGE_IPMI_ASF
 368         retval = bge_chip_reset(bgep, B_TRUE, asf_mode);
 369 #else
 370         retval = bge_chip_reset(bgep, B_TRUE);
 371 #endif
 372         bge_reinit_rings(bgep);
 373 
 374         /*
 375          * Free the world ...
 376          */
 377         for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; )
 378                 mutex_exit(bgep->send[ring].tc_lock);
 379         for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
 380                 mutex_exit(bgep->send[ring].tx_lock);
 381         rw_exit(bgep->errlock);
 382         for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; )
 383                 mutex_exit(bgep->buff[ring].rf_lock);
 384         for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; )
 385                 mutex_exit(bgep->recv[ring].rx_lock);
 386 
 387         BGE_DEBUG(("bge_reset($%p) done", (void *)bgep));
 388         return (retval);
 389 }
 390 
 391 /*
 392  *      bge_stop() -- stop processing, don't reset h/w or rings
 393  */
 394 static void
 395 bge_stop(bge_t *bgep)
 396 {
 397         BGE_TRACE(("bge_stop($%p)", (void *)bgep));
 398 
 399         ASSERT(mutex_owned(bgep->genlock));
 400 
 401 #ifdef BGE_IPMI_ASF
 402         if (bgep->asf_enabled) {
 403                 bgep->asf_pseudostop = B_TRUE;
 404         } else {
 405 #endif
 406                 bge_chip_stop(bgep, B_FALSE);
 407 #ifdef BGE_IPMI_ASF
 408         }
 409 #endif
 410 
 411         BGE_DEBUG(("bge_stop($%p) done", (void *)bgep));
 412 }
 413 
 414 /*
 415  *      bge_start() -- start transmitting/receiving
 416  */
 417 static int
 418 bge_start(bge_t *bgep, boolean_t reset_phys)
 419 {
 420         int retval;
 421 
 422         BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys));
 423 
 424         ASSERT(mutex_owned(bgep->genlock));
 425 
 426         /*
 427          * Start chip processing, including enabling interrupts
 428          */
 429         retval = bge_chip_start(bgep, reset_phys);
 430 
 431         BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys));
 432         return (retval);
 433 }
 434 
 435 /*
 436  * bge_restart - restart transmitting/receiving after error or suspend
 437  */
 438 int
 439 bge_restart(bge_t *bgep, boolean_t reset_phys)
 440 {
 441         int retval = DDI_SUCCESS;
 442         ASSERT(mutex_owned(bgep->genlock));
 443 
 444 #ifdef BGE_IPMI_ASF
 445         if (bgep->asf_enabled) {
 446                 if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS)
 447                         retval = DDI_FAILURE;
 448         } else
 449                 if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS)
 450                         retval = DDI_FAILURE;
 451 #else
 452         if (bge_reset(bgep) != DDI_SUCCESS)
 453                 retval = DDI_FAILURE;
 454 #endif
 455         if (bgep->bge_mac_state == BGE_MAC_STARTED) {
 456                 if (bge_start(bgep, reset_phys) != DDI_SUCCESS)
 457                         retval = DDI_FAILURE;
 458                 bgep->watchdog = 0;
 459                 ddi_trigger_softintr(bgep->drain_id);
 460         }
 461 
 462         BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys));
 463         return (retval);
 464 }
 465 
 466 
 467 /*
 468  * ========== Nemo-required management entry points ==========
 469  */
 470 
 471 #undef  BGE_DBG
 472 #define BGE_DBG         BGE_DBG_NEMO    /* debug flag for this code     */
 473 
 474 /*
 475  *      bge_m_stop() -- stop transmitting/receiving
 476  */
 477 static void
 478 bge_m_stop(void *arg)
 479 {
 480         bge_t *bgep = arg;              /* private device info  */
 481         send_ring_t *srp;
 482         uint32_t ring;
 483 
 484         BGE_TRACE(("bge_m_stop($%p)", arg));
 485 
 486         /*
 487          * Just stop processing, then record new GLD state
 488          */
 489         mutex_enter(bgep->genlock);
 490         if (!(bgep->progress & PROGRESS_INTR)) {
 491                 /* can happen during autorecovery */
 492                 bgep->bge_chip_state = BGE_CHIP_STOPPED;
 493         } else
 494                 bge_stop(bgep);
 495 
 496         bgep->link_update_timer = 0;
 497         bgep->link_state = LINK_STATE_UNKNOWN;
 498         mac_link_update(bgep->mh, bgep->link_state);
 499 
 500         /*
 501          * Free the possible tx buffers allocated in tx process.
 502          */
 503 #ifdef BGE_IPMI_ASF
 504         if (!bgep->asf_pseudostop)
 505 #endif
 506         {
 507                 rw_enter(bgep->errlock, RW_WRITER);
 508                 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) {
 509                         srp = &bgep->send[ring];
 510                         mutex_enter(srp->tx_lock);
 511                         if (srp->tx_array > 1)
 512                                 bge_free_txbuf_arrays(srp);
 513                         mutex_exit(srp->tx_lock);
 514                 }
 515                 rw_exit(bgep->errlock);
 516         }
 517         bgep->bge_mac_state = BGE_MAC_STOPPED;
 518         BGE_DEBUG(("bge_m_stop($%p) done", arg));
 519         if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
 520                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED);
 521         mutex_exit(bgep->genlock);
 522 }
 523 
 524 /*
 525  *      bge_m_start() -- start transmitting/receiving
 526  */
 527 static int
 528 bge_m_start(void *arg)
 529 {
 530         bge_t *bgep = arg;              /* private device info  */
 531 
 532         BGE_TRACE(("bge_m_start($%p)", arg));
 533 
 534         /*
 535          * Start processing and record new GLD state
 536          */
 537         mutex_enter(bgep->genlock);
 538         if (!(bgep->progress & PROGRESS_INTR)) {
 539                 /* can happen during autorecovery */
 540                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
 541                 mutex_exit(bgep->genlock);
 542                 return (EIO);
 543         }
 544 #ifdef BGE_IPMI_ASF
 545         if (bgep->asf_enabled) {
 546                 if ((bgep->asf_status == ASF_STAT_RUN) &&
 547                     (bgep->asf_pseudostop)) {
 548                         bgep->bge_mac_state = BGE_MAC_STARTED;
 549                         mutex_exit(bgep->genlock);
 550                         return (0);
 551                 }
 552         }
 553         if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) {
 554 #else
 555         if (bge_reset(bgep) != DDI_SUCCESS) {
 556 #endif
 557                 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
 558                 (void) bge_check_acc_handle(bgep, bgep->io_handle);
 559                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
 560                 mutex_exit(bgep->genlock);
 561                 return (EIO);
 562         }
 563         if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) {
 564                 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
 565                 (void) bge_check_acc_handle(bgep, bgep->io_handle);
 566                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
 567                 mutex_exit(bgep->genlock);
 568                 return (EIO);
 569         }
 570         bgep->watchdog = 0;
 571         bgep->bge_mac_state = BGE_MAC_STARTED;
 572         BGE_DEBUG(("bge_m_start($%p) done", arg));
 573 
 574         if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
 575                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
 576                 mutex_exit(bgep->genlock);
 577                 return (EIO);
 578         }
 579         if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
 580                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
 581                 mutex_exit(bgep->genlock);
 582                 return (EIO);
 583         }
 584 #ifdef BGE_IPMI_ASF
 585         if (bgep->asf_enabled) {
 586                 if (bgep->asf_status != ASF_STAT_RUN) {
 587                         /* start ASF heart beat */
 588                         bgep->asf_timeout_id = timeout(bge_asf_heartbeat,
 589                             (void *)bgep,
 590                             drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL));
 591                         bgep->asf_status = ASF_STAT_RUN;
 592                 }
 593         }
 594 #endif
 595         mutex_exit(bgep->genlock);
 596 
 597         return (0);
 598 }
 599 
 600 /*
 601  *      bge_unicst_set() -- set the physical network address
 602  */
 603 static int
 604 bge_unicst_set(void *arg, const uint8_t *macaddr, int slot)
 605 {
 606         bge_t *bgep = arg;              /* private device info  */
 607 
 608         BGE_TRACE(("bge_m_unicst_set($%p, %s)", arg,
 609             ether_sprintf((void *)macaddr)));
 610         /*
 611          * Remember the new current address in the driver state
 612          * Sync the chip's idea of the address too ...
 613          */
 614         mutex_enter(bgep->genlock);
 615         if (!(bgep->progress & PROGRESS_INTR)) {
 616                 /* can happen during autorecovery */
 617                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
 618                 mutex_exit(bgep->genlock);
 619                 return (EIO);
 620         }
 621         ethaddr_copy(macaddr, bgep->curr_addr[slot].addr);
 622 #ifdef BGE_IPMI_ASF
 623         if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) {
 624 #else
 625         if (bge_chip_sync(bgep) == DDI_FAILURE) {
 626 #endif
 627                 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
 628                 (void) bge_check_acc_handle(bgep, bgep->io_handle);
 629                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
 630                 mutex_exit(bgep->genlock);
 631                 return (EIO);
 632         }
 633 #ifdef BGE_IPMI_ASF
 634         if (bgep->asf_enabled) {
 635                 /*
 636                  * The above bge_chip_sync() function wrote the ethernet MAC
 637                  * addresses registers which destroyed the IPMI/ASF sideband.
 638                  * Here, we have to reset chip to make IPMI/ASF sideband work.
 639                  */
 640                 if (bgep->asf_status == ASF_STAT_RUN) {
 641                         /*
 642                          * We must stop ASF heart beat before bge_chip_stop(),
 643                          * otherwise some computers (ex. IBM HS20 blade server)
 644                          * may crash.
 645                          */
 646                         bge_asf_update_status(bgep);
 647                         bge_asf_stop_timer(bgep);
 648                         bgep->asf_status = ASF_STAT_STOP;
 649 
 650                         bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET);
 651                 }
 652                 bge_chip_stop(bgep, B_FALSE);
 653 
 654                 if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) {
 655                         (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
 656                         (void) bge_check_acc_handle(bgep, bgep->io_handle);
 657                         ddi_fm_service_impact(bgep->devinfo,
 658                             DDI_SERVICE_DEGRADED);
 659                         mutex_exit(bgep->genlock);
 660                         return (EIO);
 661                 }
 662 
 663                 /*
 664                  * Start our ASF heartbeat counter as soon as possible.
 665                  */
 666                 if (bgep->asf_status != ASF_STAT_RUN) {
 667                         /* start ASF heart beat */
 668                         bgep->asf_timeout_id = timeout(bge_asf_heartbeat,
 669                             (void *)bgep,
 670                             drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL));
 671                         bgep->asf_status = ASF_STAT_RUN;
 672                 }
 673         }
 674 #endif
 675         BGE_DEBUG(("bge_m_unicst_set($%p) done", arg));
 676         if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
 677                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
 678                 mutex_exit(bgep->genlock);
 679                 return (EIO);
 680         }
 681         if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
 682                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
 683                 mutex_exit(bgep->genlock);
 684                 return (EIO);
 685         }
 686         mutex_exit(bgep->genlock);
 687 
 688         return (0);
 689 }
 690 
 691 extern void bge_wake_factotum(bge_t *);
 692 
 693 static boolean_t
 694 bge_param_locked(mac_prop_id_t pr_num)
 695 {
 696         /*
 697          * All adv_* parameters are locked (read-only) while
 698          * the device is in any sort of loopback mode ...
 699          */
 700         switch (pr_num) {
 701                 case MAC_PROP_ADV_1000FDX_CAP:
 702                 case MAC_PROP_EN_1000FDX_CAP:
 703                 case MAC_PROP_ADV_1000HDX_CAP:
 704                 case MAC_PROP_EN_1000HDX_CAP:
 705                 case MAC_PROP_ADV_100FDX_CAP:
 706                 case MAC_PROP_EN_100FDX_CAP:
 707                 case MAC_PROP_ADV_100HDX_CAP:
 708                 case MAC_PROP_EN_100HDX_CAP:
 709                 case MAC_PROP_ADV_10FDX_CAP:
 710                 case MAC_PROP_EN_10FDX_CAP:
 711                 case MAC_PROP_ADV_10HDX_CAP:
 712                 case MAC_PROP_EN_10HDX_CAP:
 713                 case MAC_PROP_AUTONEG:
 714                 case MAC_PROP_FLOWCTRL:
 715                         return (B_TRUE);
 716         }
 717         return (B_FALSE);
 718 }
 719 /*
 720  * callback functions for set/get of properties
 721  */
 722 static int
 723 bge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
 724     uint_t pr_valsize, const void *pr_val)
 725 {
 726         bge_t *bgep = barg;
 727         int err = 0;
 728         uint32_t cur_mtu, new_mtu;
 729         link_flowctrl_t fl;
 730 
 731         mutex_enter(bgep->genlock);
 732         if (bgep->param_loop_mode != BGE_LOOP_NONE &&
 733             bge_param_locked(pr_num)) {
 734                 /*
 735                  * All adv_* parameters are locked (read-only)
 736                  * while the device is in any sort of loopback mode.
 737                  */
 738                 mutex_exit(bgep->genlock);
 739                 return (EBUSY);
 740         }
 741         if ((bgep->chipid.flags & CHIP_FLAG_SERDES) &&
 742             ((pr_num == MAC_PROP_EN_100FDX_CAP) ||
 743             (pr_num == MAC_PROP_EN_100HDX_CAP) ||
 744             (pr_num == MAC_PROP_EN_10FDX_CAP) ||
 745             (pr_num == MAC_PROP_EN_10HDX_CAP))) {
 746                 /*
 747                  * these properties are read/write on copper,
 748                  * read-only and 0 on serdes
 749                  */
 750                 mutex_exit(bgep->genlock);
 751                 return (ENOTSUP);
 752         }
 753         if (DEVICE_5906_SERIES_CHIPSETS(bgep) &&
 754             ((pr_num == MAC_PROP_EN_1000FDX_CAP) ||
 755             (pr_num == MAC_PROP_EN_1000HDX_CAP))) {
 756                 mutex_exit(bgep->genlock);
 757                 return (ENOTSUP);
 758         }
 759 
 760         switch (pr_num) {
 761                 case MAC_PROP_EN_1000FDX_CAP:
 762                         bgep->param_en_1000fdx = *(uint8_t *)pr_val;
 763                         bgep->param_adv_1000fdx = *(uint8_t *)pr_val;
 764                         goto reprogram;
 765                 case MAC_PROP_EN_1000HDX_CAP:
 766                         bgep->param_en_1000hdx = *(uint8_t *)pr_val;
 767                         bgep->param_adv_1000hdx = *(uint8_t *)pr_val;
 768                         goto reprogram;
 769                 case MAC_PROP_EN_100FDX_CAP:
 770                         bgep->param_en_100fdx = *(uint8_t *)pr_val;
 771                         bgep->param_adv_100fdx = *(uint8_t *)pr_val;
 772                         goto reprogram;
 773                 case MAC_PROP_EN_100HDX_CAP:
 774                         bgep->param_en_100hdx = *(uint8_t *)pr_val;
 775                         bgep->param_adv_100hdx = *(uint8_t *)pr_val;
 776                         goto reprogram;
 777                 case MAC_PROP_EN_10FDX_CAP:
 778                         bgep->param_en_10fdx = *(uint8_t *)pr_val;
 779                         bgep->param_adv_10fdx = *(uint8_t *)pr_val;
 780                         goto reprogram;
 781                 case MAC_PROP_EN_10HDX_CAP:
 782                         bgep->param_en_10hdx = *(uint8_t *)pr_val;
 783                         bgep->param_adv_10hdx = *(uint8_t *)pr_val;
 784 reprogram:
 785                         if (err == 0 && bge_reprogram(bgep) == IOC_INVAL)
 786                                 err = EINVAL;
 787                         break;
 788                 case MAC_PROP_ADV_1000FDX_CAP:
 789                 case MAC_PROP_ADV_1000HDX_CAP:
 790                 case MAC_PROP_ADV_100FDX_CAP:
 791                 case MAC_PROP_ADV_100HDX_CAP:
 792                 case MAC_PROP_ADV_10FDX_CAP:
 793                 case MAC_PROP_ADV_10HDX_CAP:
 794                 case MAC_PROP_STATUS:
 795                 case MAC_PROP_SPEED:
 796                 case MAC_PROP_DUPLEX:
 797                         err = ENOTSUP; /* read-only prop. Can't set this */
 798                         break;
 799                 case MAC_PROP_AUTONEG:
 800                         bgep->param_adv_autoneg = *(uint8_t *)pr_val;
 801                         if (bge_reprogram(bgep) == IOC_INVAL)
 802                                 err = EINVAL;
 803                         break;
 804                 case MAC_PROP_MTU:
 805                         cur_mtu = bgep->chipid.default_mtu;
 806                         bcopy(pr_val, &new_mtu, sizeof (new_mtu));
 807 
 808                         if (new_mtu == cur_mtu) {
 809                                 err = 0;
 810                                 break;
 811                         }
 812                         if (new_mtu < BGE_DEFAULT_MTU ||
 813                             new_mtu > BGE_MAXIMUM_MTU) {
 814                                 err = EINVAL;
 815                                 break;
 816                         }
 817                         if ((new_mtu > BGE_DEFAULT_MTU) &&
 818                             (bgep->chipid.flags & CHIP_FLAG_NO_JUMBO)) {
 819                                 err = EINVAL;
 820                                 break;
 821                         }
 822                         if (bgep->bge_mac_state == BGE_MAC_STARTED) {
 823                                 err = EBUSY;
 824                                 break;
 825                         }
 826                         bgep->chipid.default_mtu = new_mtu;
 827                         if (bge_chip_id_init(bgep)) {
 828                                 err = EINVAL;
 829                                 break;
 830                         }
 831                         bgep->bge_dma_error = B_TRUE;
 832                         bgep->manual_reset = B_TRUE;
 833                         bge_chip_stop(bgep, B_TRUE);
 834                         bge_wake_factotum(bgep);
 835                         err = 0;
 836                         break;
 837                 case MAC_PROP_FLOWCTRL:
 838                         bcopy(pr_val, &fl, sizeof (fl));
 839                         switch (fl) {
 840                         default:
 841                                 err = ENOTSUP;
 842                                 break;
 843                         case LINK_FLOWCTRL_NONE:
 844                                 bgep->param_adv_pause = 0;
 845                                 bgep->param_adv_asym_pause = 0;
 846 
 847                                 bgep->param_link_rx_pause = B_FALSE;
 848                                 bgep->param_link_tx_pause = B_FALSE;
 849                                 break;
 850                         case LINK_FLOWCTRL_RX:
 851                                 bgep->param_adv_pause = 1;
 852                                 bgep->param_adv_asym_pause = 1;
 853 
 854                                 bgep->param_link_rx_pause = B_TRUE;
 855                                 bgep->param_link_tx_pause = B_FALSE;
 856                                 break;
 857                         case LINK_FLOWCTRL_TX:
 858                                 bgep->param_adv_pause = 0;
 859                                 bgep->param_adv_asym_pause = 1;
 860 
 861                                 bgep->param_link_rx_pause = B_FALSE;
 862                                 bgep->param_link_tx_pause = B_TRUE;
 863                                 break;
 864                         case LINK_FLOWCTRL_BI:
 865                                 bgep->param_adv_pause = 1;
 866                                 bgep->param_adv_asym_pause = 0;
 867 
 868                                 bgep->param_link_rx_pause = B_TRUE;
 869                                 bgep->param_link_tx_pause = B_TRUE;
 870                                 break;
 871                         }
 872 
 873                         if (err == 0) {
 874                                 if (bge_reprogram(bgep) == IOC_INVAL)
 875                                         err = EINVAL;
 876                         }
 877 
 878                         break;
 879                 case MAC_PROP_PRIVATE:
 880                         err = bge_set_priv_prop(bgep, pr_name, pr_valsize,
 881                             pr_val);
 882                         break;
 883                 default:
 884                         err = ENOTSUP;
 885                         break;
 886         }
 887         mutex_exit(bgep->genlock);
 888         return (err);
 889 }
 890 
 891 /* ARGSUSED */
 892 static int
 893 bge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
 894     uint_t pr_valsize, void *pr_val)
 895 {
 896         bge_t *bgep = barg;
 897         int err = 0;
 898 
 899         switch (pr_num) {
 900                 case MAC_PROP_DUPLEX:
 901                         ASSERT(pr_valsize >= sizeof (link_duplex_t));
 902                         bcopy(&bgep->param_link_duplex, pr_val,
 903                             sizeof (link_duplex_t));
 904                         break;
 905                 case MAC_PROP_SPEED: {
 906                         uint64_t speed = bgep->param_link_speed * 1000000ull;
 907 
 908                         ASSERT(pr_valsize >= sizeof (speed));
 909                         bcopy(&speed, pr_val, sizeof (speed));
 910                         break;
 911                 }
 912                 case MAC_PROP_STATUS:
 913                         ASSERT(pr_valsize >= sizeof (link_state_t));
 914                         bcopy(&bgep->link_state, pr_val,
 915                             sizeof (link_state_t));
 916                         break;
 917                 case MAC_PROP_AUTONEG:
 918                         *(uint8_t *)pr_val = bgep->param_adv_autoneg;
 919                         break;
 920                 case MAC_PROP_FLOWCTRL: {
 921                         link_flowctrl_t fl;
 922 
 923                         ASSERT(pr_valsize >= sizeof (fl));
 924 
 925                         if (bgep->param_link_rx_pause &&
 926                             !bgep->param_link_tx_pause)
 927                                 fl = LINK_FLOWCTRL_RX;
 928 
 929                         if (!bgep->param_link_rx_pause &&
 930                             !bgep->param_link_tx_pause)
 931                                 fl = LINK_FLOWCTRL_NONE;
 932 
 933                         if (!bgep->param_link_rx_pause &&
 934                             bgep->param_link_tx_pause)
 935                                 fl = LINK_FLOWCTRL_TX;
 936 
 937                         if (bgep->param_link_rx_pause &&
 938                             bgep->param_link_tx_pause)
 939                                 fl = LINK_FLOWCTRL_BI;
 940                         bcopy(&fl, pr_val, sizeof (fl));
 941                         break;
 942                 }
 943                 case MAC_PROP_ADV_1000FDX_CAP:
 944                         *(uint8_t *)pr_val = bgep->param_adv_1000fdx;
 945                         break;
 946                 case MAC_PROP_EN_1000FDX_CAP:
 947                         *(uint8_t *)pr_val = bgep->param_en_1000fdx;
 948                         break;
 949                 case MAC_PROP_ADV_1000HDX_CAP:
 950                         *(uint8_t *)pr_val = bgep->param_adv_1000hdx;
 951                         break;
 952                 case MAC_PROP_EN_1000HDX_CAP:
 953                         *(uint8_t *)pr_val = bgep->param_en_1000hdx;
 954                         break;
 955                 case MAC_PROP_ADV_100FDX_CAP:
 956                         *(uint8_t *)pr_val = bgep->param_adv_100fdx;
 957                         break;
 958                 case MAC_PROP_EN_100FDX_CAP:
 959                         *(uint8_t *)pr_val = bgep->param_en_100fdx;
 960                         break;
 961                 case MAC_PROP_ADV_100HDX_CAP:
 962                         *(uint8_t *)pr_val = bgep->param_adv_100hdx;
 963                         break;
 964                 case MAC_PROP_EN_100HDX_CAP:
 965                         *(uint8_t *)pr_val = bgep->param_en_100hdx;
 966                         break;
 967                 case MAC_PROP_ADV_10FDX_CAP:
 968                         *(uint8_t *)pr_val = bgep->param_adv_10fdx;
 969                         break;
 970                 case MAC_PROP_EN_10FDX_CAP:
 971                         *(uint8_t *)pr_val = bgep->param_en_10fdx;
 972                         break;
 973                 case MAC_PROP_ADV_10HDX_CAP:
 974                         *(uint8_t *)pr_val = bgep->param_adv_10hdx;
 975                         break;
 976                 case MAC_PROP_EN_10HDX_CAP:
 977                         *(uint8_t *)pr_val = bgep->param_en_10hdx;
 978                         break;
 979                 case MAC_PROP_ADV_100T4_CAP:
 980                 case MAC_PROP_EN_100T4_CAP:
 981                         *(uint8_t *)pr_val = 0;
 982                         break;
 983                 case MAC_PROP_PRIVATE:
 984                         err = bge_get_priv_prop(bgep, pr_name,
 985                             pr_valsize, pr_val);
 986                         return (err);
 987                 default:
 988                         return (ENOTSUP);
 989         }
 990         return (0);
 991 }
 992 
 993 static void
 994 bge_m_propinfo(void *barg, const char *pr_name, mac_prop_id_t pr_num,
 995     mac_prop_info_handle_t prh)
 996 {
 997         bge_t *bgep = barg;
 998         int flags = bgep->chipid.flags;
 999 
1000         /*
1001          * By default permissions are read/write unless specified
1002          * otherwise by the driver.
1003          */
1004 
1005         switch (pr_num) {
1006         case MAC_PROP_DUPLEX:
1007         case MAC_PROP_SPEED:
1008         case MAC_PROP_STATUS:
1009         case MAC_PROP_ADV_1000FDX_CAP:
1010         case MAC_PROP_ADV_1000HDX_CAP:
1011         case MAC_PROP_ADV_100FDX_CAP:
1012         case MAC_PROP_ADV_100HDX_CAP:
1013         case MAC_PROP_ADV_10FDX_CAP:
1014         case MAC_PROP_ADV_10HDX_CAP:
1015         case MAC_PROP_ADV_100T4_CAP:
1016         case MAC_PROP_EN_100T4_CAP:
1017                 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1018                 break;
1019 
1020         case MAC_PROP_EN_1000FDX_CAP:
1021         case MAC_PROP_EN_1000HDX_CAP:
1022                 if (DEVICE_5906_SERIES_CHIPSETS(bgep))
1023                         mac_prop_info_set_default_uint8(prh, 0);
1024                 else
1025                         mac_prop_info_set_default_uint8(prh, 1);
1026                 break;
1027 
1028         case MAC_PROP_EN_100FDX_CAP:
1029         case MAC_PROP_EN_100HDX_CAP:
1030         case MAC_PROP_EN_10FDX_CAP:
1031         case MAC_PROP_EN_10HDX_CAP:
1032                 mac_prop_info_set_default_uint8(prh,
1033                     (flags & CHIP_FLAG_SERDES) ? 0 : 1);
1034                 break;
1035 
1036         case MAC_PROP_AUTONEG:
1037                 mac_prop_info_set_default_uint8(prh, 1);
1038                 break;
1039 
1040         case MAC_PROP_FLOWCTRL:
1041                 mac_prop_info_set_default_link_flowctrl(prh,
1042                     LINK_FLOWCTRL_BI);
1043                 break;
1044 
1045         case MAC_PROP_MTU:
1046                 mac_prop_info_set_range_uint32(prh, BGE_DEFAULT_MTU,
1047                     (flags & CHIP_FLAG_NO_JUMBO) ?
1048                     BGE_DEFAULT_MTU : BGE_MAXIMUM_MTU);
1049                 break;
1050 
1051         case MAC_PROP_PRIVATE:
1052                 bge_priv_propinfo(pr_name, prh);
1053                 break;
1054         }
1055 
1056         mutex_enter(bgep->genlock);
1057         if ((bgep->param_loop_mode != BGE_LOOP_NONE &&
1058             bge_param_locked(pr_num)) ||
1059             ((bgep->chipid.flags & CHIP_FLAG_SERDES) &&
1060             ((pr_num == MAC_PROP_EN_100FDX_CAP) ||
1061             (pr_num == MAC_PROP_EN_100HDX_CAP) ||
1062             (pr_num == MAC_PROP_EN_10FDX_CAP) ||
1063             (pr_num == MAC_PROP_EN_10HDX_CAP))) ||
1064             (DEVICE_5906_SERIES_CHIPSETS(bgep) &&
1065             ((pr_num == MAC_PROP_EN_1000FDX_CAP) ||
1066             (pr_num == MAC_PROP_EN_1000HDX_CAP))))
1067                 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1068         mutex_exit(bgep->genlock);
1069 }
1070 
1071 /* ARGSUSED */
1072 static int
1073 bge_set_priv_prop(bge_t *bgep, const char *pr_name, uint_t pr_valsize,
1074     const void *pr_val)
1075 {
1076         int err = 0;
1077         long result;
1078 
1079         if (strcmp(pr_name, "_adv_pause_cap") == 0) {
1080                 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1081                 if (result > 1 || result < 0) {
1082                         err = EINVAL;
1083                 } else {
1084                         bgep->param_adv_pause = (uint32_t)result;
1085                         if (bge_reprogram(bgep) == IOC_INVAL)
1086                                 err = EINVAL;
1087                 }
1088                 return (err);
1089         }
1090         if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
1091                 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1092                 if (result > 1 || result < 0) {
1093                         err = EINVAL;
1094                 } else {
1095                         bgep->param_adv_asym_pause = (uint32_t)result;
1096                         if (bge_reprogram(bgep) == IOC_INVAL)
1097                                 err = EINVAL;
1098                 }
1099                 return (err);
1100         }
1101         if (strcmp(pr_name, "_drain_max") == 0) {
1102 
1103                 /*
1104                  * on the Tx side, we need to update the h/w register for
1105                  * real packet transmission per packet. The drain_max parameter
1106                  * is used to reduce the register access. This parameter
1107                  * controls the max number of packets that we will hold before
1108                  * updating the bge h/w to trigger h/w transmit. The bge
1109                  * chipset usually has a max of 512 Tx descriptors, thus
1110                  * the upper bound on drain_max is 512.
1111                  */
1112                 if (pr_val == NULL) {
1113                         err = EINVAL;
1114                         return (err);
1115                 }
1116                 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1117                 if (result > 512 || result < 1)
1118                         err = EINVAL;
1119                 else {
1120                         bgep->param_drain_max = (uint32_t)result;
1121                         if (bge_reprogram(bgep) == IOC_INVAL)
1122                                 err = EINVAL;
1123                 }
1124                 return (err);
1125         }
1126         if (strcmp(pr_name, "_msi_cnt") == 0) {
1127 
1128                 if (pr_val == NULL) {
1129                         err = EINVAL;
1130                         return (err);
1131                 }
1132                 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1133                 if (result > 7 || result < 0)
1134                         err = EINVAL;
1135                 else {
1136                         bgep->param_msi_cnt = (uint32_t)result;
1137                         if (bge_reprogram(bgep) == IOC_INVAL)
1138                                 err = EINVAL;
1139                 }
1140                 return (err);
1141         }
1142         if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0) {
1143                 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0)
1144                         return (EINVAL);
1145                 if (result < 0)
1146                         err = EINVAL;
1147                 else {
1148                         bgep->chipid.rx_ticks_norm = (uint32_t)result;
1149                         bge_chip_coalesce_update(bgep);
1150                 }
1151                 return (err);
1152         }
1153 
1154         if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0) {
1155                 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0)
1156                         return (EINVAL);
1157 
1158                 if (result < 0)
1159                         err = EINVAL;
1160                 else {
1161                         bgep->chipid.rx_count_norm = (uint32_t)result;
1162                         bge_chip_coalesce_update(bgep);
1163                 }
1164                 return (err);
1165         }
1166         if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0) {
1167                 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0)
1168                         return (EINVAL);
1169                 if (result < 0)
1170                         err = EINVAL;
1171                 else {
1172                         bgep->chipid.tx_ticks_norm = (uint32_t)result;
1173                         bge_chip_coalesce_update(bgep);
1174                 }
1175                 return (err);
1176         }
1177 
1178         if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0) {
1179                 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0)
1180                         return (EINVAL);
1181 
1182                 if (result < 0)
1183                         err = EINVAL;
1184                 else {
1185                         bgep->chipid.tx_count_norm = (uint32_t)result;
1186                         bge_chip_coalesce_update(bgep);
1187                 }
1188                 return (err);
1189         }
1190         return (ENOTSUP);
1191 }
1192 
1193 static int
1194 bge_get_priv_prop(bge_t *bge, const char *pr_name, uint_t pr_valsize,
1195     void *pr_val)
1196 {
1197         int value;
1198 
1199         if (strcmp(pr_name, "_adv_pause_cap") == 0)
1200                 value = bge->param_adv_pause;
1201         else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0)
1202                 value = bge->param_adv_asym_pause;
1203         else if (strcmp(pr_name, "_drain_max") == 0)
1204                 value = bge->param_drain_max;
1205         else if (strcmp(pr_name, "_msi_cnt") == 0)
1206                 value = bge->param_msi_cnt;
1207         else if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0)
1208                 value = bge->chipid.rx_ticks_norm;
1209         else if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0)
1210                 value = bge->chipid.tx_ticks_norm;
1211         else if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0)
1212                 value = bge->chipid.rx_count_norm;
1213         else if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0)
1214                 value = bge->chipid.tx_count_norm;
1215         else
1216                 return (ENOTSUP);
1217 
1218         (void) snprintf(pr_val, pr_valsize, "%d", value);
1219         return (0);
1220 }
1221 
1222 static void
1223 bge_priv_propinfo(const char *pr_name, mac_prop_info_handle_t mph)
1224 {
1225         char valstr[64];
1226         int value;
1227 
1228         if (strcmp(pr_name, "_adv_pause_cap") == 0)
1229                 value = 1;
1230         else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0)
1231                 value = 1;
1232         else if (strcmp(pr_name, "_drain_max") == 0)
1233                 value = 64;
1234         else if (strcmp(pr_name, "_msi_cnt") == 0)
1235                 value = 0;
1236         else if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0)
1237                 value = bge_rx_ticks_norm;
1238         else if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0)
1239                 value = bge_tx_ticks_norm;
1240         else if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0)
1241                 value = bge_rx_count_norm;
1242         else if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0)
1243                 value = bge_tx_count_norm;
1244         else
1245                 return;
1246 
1247         (void) snprintf(valstr, sizeof (valstr), "%d", value);
1248         mac_prop_info_set_default_str(mph, valstr);
1249 }
1250 
1251 /*
1252  * Compute the index of the required bit in the multicast hash map.
1253  * This must mirror the way the hardware actually does it!
1254  * See Broadcom document 570X-PG102-R page 125.
1255  */
1256 static uint32_t
1257 bge_hash_index(const uint8_t *mca)
1258 {
1259         uint32_t hash;
1260 
1261         CRC32(hash, mca, ETHERADDRL, -1U, crc32_table);
1262 
1263         return (hash);
1264 }
1265 
1266 /*
1267  *      bge_m_multicst_add() -- enable/disable a multicast address
1268  */
1269 static int
1270 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1271 {
1272         bge_t *bgep = arg;              /* private device info  */
1273         uint32_t hash;
1274         uint32_t index;
1275         uint32_t word;
1276         uint32_t bit;
1277         uint8_t *refp;
1278 
1279         BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg,
1280             (add) ? "add" : "remove", ether_sprintf((void *)mca)));
1281 
1282         /*
1283          * Precalculate all required masks, pointers etc ...
1284          */
1285         hash = bge_hash_index(mca);
1286         index = hash % BGE_HASH_TABLE_SIZE;
1287         word = index/32u;
1288         bit = 1 << (index % 32u);
1289         refp = &bgep->mcast_refs[index];
1290 
1291         BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d",
1292             hash, index, word, bit, *refp));
1293 
1294         /*
1295          * We must set the appropriate bit in the hash map (and the
1296          * corresponding h/w register) when the refcount goes from 0
1297          * to >0, and clear it when the last ref goes away (refcount
1298          * goes from >0 back to 0).  If we change the hash map, we
1299          * must also update the chip's hardware map registers.
1300          */
1301         mutex_enter(bgep->genlock);
1302         if (!(bgep->progress & PROGRESS_INTR)) {
1303                 /* can happen during autorecovery */
1304                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1305                 mutex_exit(bgep->genlock);
1306                 return (EIO);
1307         }
1308         if (add) {
1309                 if ((*refp)++ == 0) {
1310                         bgep->mcast_hash[word] |= bit;
1311 #ifdef BGE_IPMI_ASF
1312                         if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) {
1313 #else
1314                         if (bge_chip_sync(bgep) == DDI_FAILURE) {
1315 #endif
1316                                 (void) bge_check_acc_handle(bgep,
1317                                     bgep->cfg_handle);
1318                                 (void) bge_check_acc_handle(bgep,
1319                                     bgep->io_handle);
1320                                 ddi_fm_service_impact(bgep->devinfo,
1321                                     DDI_SERVICE_DEGRADED);
1322                                 mutex_exit(bgep->genlock);
1323                                 return (EIO);
1324                         }
1325                 }
1326         } else {
1327                 if (--(*refp) == 0) {
1328                         bgep->mcast_hash[word] &= ~bit;
1329 #ifdef BGE_IPMI_ASF
1330                         if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) {
1331 #else
1332                         if (bge_chip_sync(bgep) == DDI_FAILURE) {
1333 #endif
1334                                 (void) bge_check_acc_handle(bgep,
1335                                     bgep->cfg_handle);
1336                                 (void) bge_check_acc_handle(bgep,
1337                                     bgep->io_handle);
1338                                 ddi_fm_service_impact(bgep->devinfo,
1339                                     DDI_SERVICE_DEGRADED);
1340                                 mutex_exit(bgep->genlock);
1341                                 return (EIO);
1342                         }
1343                 }
1344         }
1345         BGE_DEBUG(("bge_m_multicst($%p) done", arg));
1346         if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
1347                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1348                 mutex_exit(bgep->genlock);
1349                 return (EIO);
1350         }
1351         if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
1352                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1353                 mutex_exit(bgep->genlock);
1354                 return (EIO);
1355         }
1356         mutex_exit(bgep->genlock);
1357 
1358         return (0);
1359 }
1360 
1361 /*
1362  * bge_m_promisc() -- set or reset promiscuous mode on the board
1363  *
1364  *      Program the hardware to enable/disable promiscuous and/or
1365  *      receive-all-multicast modes.
1366  */
1367 static int
1368 bge_m_promisc(void *arg, boolean_t on)
1369 {
1370         bge_t *bgep = arg;
1371 
1372         BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on));
1373 
1374         /*
1375          * Store MAC layer specified mode and pass to chip layer to update h/w
1376          */
1377         mutex_enter(bgep->genlock);
1378         if (!(bgep->progress & PROGRESS_INTR)) {
1379                 /* can happen during autorecovery */
1380                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1381                 mutex_exit(bgep->genlock);
1382                 return (EIO);
1383         }
1384         bgep->promisc = on;
1385 #ifdef BGE_IPMI_ASF
1386         if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) {
1387 #else
1388         if (bge_chip_sync(bgep) == DDI_FAILURE) {
1389 #endif
1390                 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
1391                 (void) bge_check_acc_handle(bgep, bgep->io_handle);
1392                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1393                 mutex_exit(bgep->genlock);
1394                 return (EIO);
1395         }
1396         BGE_DEBUG(("bge_m_promisc_set($%p) done", arg));
1397         if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
1398                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1399                 mutex_exit(bgep->genlock);
1400                 return (EIO);
1401         }
1402         if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
1403                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1404                 mutex_exit(bgep->genlock);
1405                 return (EIO);
1406         }
1407         mutex_exit(bgep->genlock);
1408         return (0);
1409 }
1410 
1411 /*
1412  * Find the slot for the specified unicast address
1413  */
1414 int
1415 bge_unicst_find(bge_t *bgep, const uint8_t *mac_addr)
1416 {
1417         int slot;
1418 
1419         ASSERT(mutex_owned(bgep->genlock));
1420 
1421         for (slot = 0; slot < bgep->unicst_addr_total; slot++) {
1422                 if (bcmp(bgep->curr_addr[slot].addr, mac_addr, ETHERADDRL) == 0)
1423                         return (slot);
1424         }
1425 
1426         return (-1);
1427 }
1428 
1429 /*
1430  * Programs the classifier to start steering packets matching 'mac_addr' to the
1431  * specified ring 'arg'.
1432  */
1433 static int
1434 bge_addmac(void *arg, const uint8_t *mac_addr)
1435 {
1436         recv_ring_t *rrp = (recv_ring_t *)arg;
1437         bge_t           *bgep = rrp->bgep;
1438         bge_recv_rule_t *rulep = bgep->recv_rules;
1439         bge_rule_info_t *rinfop = NULL;
1440         uint8_t         ring = (uint8_t)(rrp - bgep->recv) + 1;
1441         int             i;
1442         uint16_t        tmp16;
1443         uint32_t        tmp32;
1444         int             slot;
1445         int             err;
1446 
1447         mutex_enter(bgep->genlock);
1448         if (bgep->unicst_addr_avail == 0) {
1449                 mutex_exit(bgep->genlock);
1450                 return (ENOSPC);
1451         }
1452 
1453         /*
1454          * First add the unicast address to a available slot.
1455          */
1456         slot = bge_unicst_find(bgep, mac_addr);
1457         ASSERT(slot == -1);
1458 
1459         for (slot = 0; slot < bgep->unicst_addr_total; slot++) {
1460                 if (!bgep->curr_addr[slot].set) {
1461                         bgep->curr_addr[slot].set = B_TRUE;
1462                         break;
1463                 }
1464         }
1465 
1466         ASSERT(slot < bgep->unicst_addr_total);
1467         bgep->unicst_addr_avail--;
1468         mutex_exit(bgep->genlock);
1469 
1470         if ((err = bge_unicst_set(bgep, mac_addr, slot)) != 0)
1471                 goto fail;
1472 
1473         /* A rule is already here. Deny this.  */
1474         if (rrp->mac_addr_rule != NULL) {
1475                 err = ether_cmp(mac_addr, rrp->mac_addr_val) ? EEXIST : EBUSY;
1476                 goto fail;
1477         }
1478 
1479         /*
1480          * Allocate a bge_rule_info_t to keep track of which rule slots
1481          * are being used.
1482          */
1483         rinfop = kmem_zalloc(sizeof (bge_rule_info_t), KM_NOSLEEP);
1484         if (rinfop == NULL) {
1485                 err = ENOMEM;
1486                 goto fail;
1487         }
1488 
1489         /*
1490          * Look for the starting slot to place the rules.
1491          * The two slots we reserve must be contiguous.
1492          */
1493         for (i = 0; i + 1 < RECV_RULES_NUM_MAX; i++)
1494                 if ((rulep[i].control & RECV_RULE_CTL_ENABLE) == 0 &&
1495                     (rulep[i+1].control & RECV_RULE_CTL_ENABLE) == 0)
1496                         break;
1497 
1498         ASSERT(i + 1 < RECV_RULES_NUM_MAX);
1499 
1500         bcopy(mac_addr, &tmp32, sizeof (tmp32));
1501         rulep[i].mask_value = ntohl(tmp32);
1502         rulep[i].control = RULE_DEST_MAC_1(ring) | RECV_RULE_CTL_AND;
1503         bge_reg_put32(bgep, RECV_RULE_MASK_REG(i), rulep[i].mask_value);
1504         bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i), rulep[i].control);
1505 
1506         bcopy(mac_addr + 4, &tmp16, sizeof (tmp16));
1507         rulep[i+1].mask_value = 0xffff0000 | ntohs(tmp16);
1508         rulep[i+1].control = RULE_DEST_MAC_2(ring);
1509         bge_reg_put32(bgep, RECV_RULE_MASK_REG(i+1), rulep[i+1].mask_value);
1510         bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i+1), rulep[i+1].control);
1511         rinfop->start = i;
1512         rinfop->count = 2;
1513 
1514         rrp->mac_addr_rule = rinfop;
1515         bcopy(mac_addr, rrp->mac_addr_val, ETHERADDRL);
1516 
1517         return (0);
1518 
1519 fail:
1520         /* Clear the address just set */
1521         (void) bge_unicst_set(bgep, zero_addr, slot);
1522         mutex_enter(bgep->genlock);
1523         bgep->curr_addr[slot].set = B_FALSE;
1524         bgep->unicst_addr_avail++;
1525         mutex_exit(bgep->genlock);
1526 
1527         return (err);
1528 }
1529 
1530 /*
1531  * Stop classifying packets matching the MAC address to the specified ring.
1532  */
1533 static int
1534 bge_remmac(void *arg, const uint8_t *mac_addr)
1535 {
1536         recv_ring_t     *rrp = (recv_ring_t *)arg;
1537         bge_t           *bgep = rrp->bgep;
1538         bge_recv_rule_t *rulep = bgep->recv_rules;
1539         bge_rule_info_t *rinfop = rrp->mac_addr_rule;
1540         int             start;
1541         int             slot;
1542         int             err;
1543 
1544         /*
1545          * Remove the MAC address from its slot.
1546          */
1547         mutex_enter(bgep->genlock);
1548         slot = bge_unicst_find(bgep, mac_addr);
1549         if (slot == -1) {
1550                 mutex_exit(bgep->genlock);
1551                 return (EINVAL);
1552         }
1553 
1554         ASSERT(bgep->curr_addr[slot].set);
1555         mutex_exit(bgep->genlock);
1556 
1557         if ((err = bge_unicst_set(bgep, zero_addr, slot)) != 0)
1558                 return (err);
1559 
1560         if (rinfop == NULL || ether_cmp(mac_addr, rrp->mac_addr_val) != 0)
1561                 return (EINVAL);
1562 
1563         start = rinfop->start;
1564         rulep[start].mask_value = 0;
1565         rulep[start].control = 0;
1566         bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value);
1567         bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control);
1568         start++;
1569         rulep[start].mask_value = 0;
1570         rulep[start].control = 0;
1571         bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value);
1572         bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control);
1573 
1574         kmem_free(rinfop, sizeof (bge_rule_info_t));
1575         rrp->mac_addr_rule = NULL;
1576         bzero(rrp->mac_addr_val, ETHERADDRL);
1577 
1578         mutex_enter(bgep->genlock);
1579         bgep->curr_addr[slot].set = B_FALSE;
1580         bgep->unicst_addr_avail++;
1581         mutex_exit(bgep->genlock);
1582 
1583         return (0);
1584 }
1585 
1586 static int
1587 bge_flag_intr_enable(mac_intr_handle_t ih)
1588 {
1589         recv_ring_t *rrp = (recv_ring_t *)ih;
1590         bge_t *bgep = rrp->bgep;
1591 
1592         mutex_enter(bgep->genlock);
1593         rrp->poll_flag = 0;
1594         mutex_exit(bgep->genlock);
1595 
1596         return (0);
1597 }
1598 
1599 static int
1600 bge_flag_intr_disable(mac_intr_handle_t ih)
1601 {
1602         recv_ring_t *rrp = (recv_ring_t *)ih;
1603         bge_t *bgep = rrp->bgep;
1604 
1605         mutex_enter(bgep->genlock);
1606         rrp->poll_flag = 1;
1607         mutex_exit(bgep->genlock);
1608 
1609         return (0);
1610 }
1611 
1612 static int
1613 bge_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
1614 {
1615         recv_ring_t *rx_ring;
1616 
1617         rx_ring = (recv_ring_t *)rh;
1618         mutex_enter(rx_ring->rx_lock);
1619         rx_ring->ring_gen_num = mr_gen_num;
1620         mutex_exit(rx_ring->rx_lock);
1621         return (0);
1622 }
1623 
1624 
1625 /*
1626  * Callback funtion for MAC layer to register all rings
1627  * for given ring_group, noted by rg_index.
1628  */
1629 void
1630 bge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
1631     const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
1632 {
1633         bge_t *bgep = arg;
1634         mac_intr_t *mintr;
1635 
1636         switch (rtype) {
1637         case MAC_RING_TYPE_RX: {
1638                 recv_ring_t *rx_ring;
1639                 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings,
1640                     MAC_ADDRESS_REGS_MAX) && index == 0);
1641 
1642                 rx_ring = &bgep->recv[rg_index];
1643                 rx_ring->ring_handle = rh;
1644 
1645                 infop->mri_driver = (mac_ring_driver_t)rx_ring;
1646                 infop->mri_start = bge_ring_start;
1647                 infop->mri_stop = NULL;
1648                 infop->mri_poll = bge_poll_ring;
1649                 infop->mri_stat = bge_rx_ring_stat;
1650 
1651                 mintr = &infop->mri_intr;
1652                 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
1653                 mintr->mi_enable = bge_flag_intr_enable;
1654                 mintr->mi_disable = bge_flag_intr_disable;
1655 
1656                 break;
1657         }
1658         case MAC_RING_TYPE_TX:
1659         default:
1660                 ASSERT(0);
1661                 break;
1662         }
1663 }
1664 
1665 /*
1666  * Fill infop passed as argument
1667  * fill in respective ring_group info
1668  * Each group has a single ring in it. We keep it simple
1669  * and use the same internal handle for rings and groups.
1670  */
1671 void
1672 bge_fill_group(void *arg, mac_ring_type_t rtype, const int rg_index,
1673         mac_group_info_t *infop, mac_group_handle_t gh)
1674 {
1675         bge_t *bgep = arg;
1676 
1677         switch (rtype) {
1678         case MAC_RING_TYPE_RX: {
1679                 recv_ring_t *rx_ring;
1680 
1681                 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings,
1682                     MAC_ADDRESS_REGS_MAX));
1683                 rx_ring = &bgep->recv[rg_index];
1684                 rx_ring->ring_group_handle = gh;
1685 
1686                 infop->mgi_driver = (mac_group_driver_t)rx_ring;
1687                 infop->mgi_start = NULL;
1688                 infop->mgi_stop = NULL;
1689                 infop->mgi_addmac = bge_addmac;
1690                 infop->mgi_remmac = bge_remmac;
1691                 infop->mgi_count = 1;
1692                 break;
1693         }
1694         case MAC_RING_TYPE_TX:
1695         default:
1696                 ASSERT(0);
1697                 break;
1698         }
1699 }
1700 
1701 /*ARGSUSED*/
1702 static boolean_t
1703 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1704 {
1705         bge_t *bgep = arg;
1706 
1707         switch (cap) {
1708         case MAC_CAPAB_HCKSUM: {
1709                 uint32_t *txflags = cap_data;
1710 
1711                 *txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM;
1712                 break;
1713         }
1714         case MAC_CAPAB_RINGS: {
1715                 mac_capab_rings_t *cap_rings = cap_data;
1716 
1717                 /* Temporarily disable multiple tx rings. */
1718                 if (cap_rings->mr_type != MAC_RING_TYPE_RX)
1719                         return (B_FALSE);
1720 
1721                 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
1722                 cap_rings->mr_rnum = cap_rings->mr_gnum =
1723                     MIN(bgep->chipid.rx_rings, MAC_ADDRESS_REGS_MAX);
1724                 cap_rings->mr_rget = bge_fill_ring;
1725                 cap_rings->mr_gget = bge_fill_group;
1726                 break;
1727         }
1728         default:
1729                 return (B_FALSE);
1730         }
1731         return (B_TRUE);
1732 }
1733 
1734 /*
1735  * Loopback ioctl code
1736  */
1737 
1738 static lb_property_t loopmodes[] = {
1739         { normal,       "normal",       BGE_LOOP_NONE           },
1740         { external,     "1000Mbps",     BGE_LOOP_EXTERNAL_1000  },
1741         { external,     "100Mbps",      BGE_LOOP_EXTERNAL_100   },
1742         { external,     "10Mbps",       BGE_LOOP_EXTERNAL_10    },
1743         { internal,     "PHY",          BGE_LOOP_INTERNAL_PHY   },
1744         { internal,     "MAC",          BGE_LOOP_INTERNAL_MAC   }
1745 };
1746 
1747 static enum ioc_reply
1748 bge_set_loop_mode(bge_t *bgep, uint32_t mode)
1749 {
1750         /*
1751          * If the mode isn't being changed, there's nothing to do ...
1752          */
1753         if (mode == bgep->param_loop_mode)
1754                 return (IOC_ACK);
1755 
1756         /*
1757          * Validate the requested mode and prepare a suitable message
1758          * to explain the link down/up cycle that the change will
1759          * probably induce ...
1760          */
1761         switch (mode) {
1762         default:
1763                 return (IOC_INVAL);
1764 
1765         case BGE_LOOP_NONE:
1766         case BGE_LOOP_EXTERNAL_1000:
1767         case BGE_LOOP_EXTERNAL_100:
1768         case BGE_LOOP_EXTERNAL_10:
1769         case BGE_LOOP_INTERNAL_PHY:
1770         case BGE_LOOP_INTERNAL_MAC:
1771                 break;
1772         }
1773 
1774         /*
1775          * All OK; tell the caller to reprogram
1776          * the PHY and/or MAC for the new mode ...
1777          */
1778         bgep->param_loop_mode = mode;
1779         return (IOC_RESTART_ACK);
1780 }
1781 
1782 static enum ioc_reply
1783 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
1784 {
1785         lb_info_sz_t *lbsp;
1786         lb_property_t *lbpp;
1787         uint32_t *lbmp;
1788         int cmd;
1789 
1790         _NOTE(ARGUNUSED(wq))
1791 
1792         /*
1793          * Validate format of ioctl
1794          */
1795         if (mp->b_cont == NULL)
1796                 return (IOC_INVAL);
1797 
1798         cmd = iocp->ioc_cmd;
1799         switch (cmd) {
1800         default:
1801                 /* NOTREACHED */
1802                 bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd);
1803                 return (IOC_INVAL);
1804 
1805         case LB_GET_INFO_SIZE:
1806                 if (iocp->ioc_count != sizeof (lb_info_sz_t))
1807                         return (IOC_INVAL);
1808                 lbsp = (void *)mp->b_cont->b_rptr;
1809                 *lbsp = sizeof (loopmodes);
1810                 return (IOC_REPLY);
1811 
1812         case LB_GET_INFO:
1813                 if (iocp->ioc_count != sizeof (loopmodes))
1814                         return (IOC_INVAL);
1815                 lbpp = (void *)mp->b_cont->b_rptr;
1816                 bcopy(loopmodes, lbpp, sizeof (loopmodes));
1817                 return (IOC_REPLY);
1818 
1819         case LB_GET_MODE:
1820                 if (iocp->ioc_count != sizeof (uint32_t))
1821                         return (IOC_INVAL);
1822                 lbmp = (void *)mp->b_cont->b_rptr;
1823                 *lbmp = bgep->param_loop_mode;
1824                 return (IOC_REPLY);
1825 
1826         case LB_SET_MODE:
1827                 if (iocp->ioc_count != sizeof (uint32_t))
1828                         return (IOC_INVAL);
1829                 lbmp = (void *)mp->b_cont->b_rptr;
1830                 return (bge_set_loop_mode(bgep, *lbmp));
1831         }
1832 }
1833 
1834 /*
1835  * Specific bge IOCTLs, the gld module handles the generic ones.
1836  */
1837 static void
1838 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1839 {
1840         bge_t *bgep = arg;
1841         struct iocblk *iocp;
1842         enum ioc_reply status;
1843         boolean_t need_privilege;
1844         int err;
1845         int cmd;
1846 
1847         /*
1848          * Validate the command before bothering with the mutex ...
1849          */
1850         iocp = (void *)mp->b_rptr;
1851         iocp->ioc_error = 0;
1852         need_privilege = B_TRUE;
1853         cmd = iocp->ioc_cmd;
1854         switch (cmd) {
1855         default:
1856                 miocnak(wq, mp, 0, EINVAL);
1857                 return;
1858 
1859         case BGE_MII_READ:
1860         case BGE_MII_WRITE:
1861         case BGE_SEE_READ:
1862         case BGE_SEE_WRITE:
1863         case BGE_FLASH_READ:
1864         case BGE_FLASH_WRITE:
1865         case BGE_DIAG:
1866         case BGE_PEEK:
1867         case BGE_POKE:
1868         case BGE_PHY_RESET:
1869         case BGE_SOFT_RESET:
1870         case BGE_HARD_RESET:
1871                 break;
1872 
1873         case LB_GET_INFO_SIZE:
1874         case LB_GET_INFO:
1875         case LB_GET_MODE:
1876                 need_privilege = B_FALSE;
1877                 /* FALLTHRU */
1878         case LB_SET_MODE:
1879                 break;
1880 
1881         }
1882 
1883         if (need_privilege) {
1884                 /*
1885                  * Check for specific net_config privilege on Solaris 10+.
1886                  */
1887                 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1888                 if (err != 0) {
1889                         miocnak(wq, mp, 0, err);
1890                         return;
1891                 }
1892         }
1893 
1894         mutex_enter(bgep->genlock);
1895         if (!(bgep->progress & PROGRESS_INTR)) {
1896                 /* can happen during autorecovery */
1897                 mutex_exit(bgep->genlock);
1898                 miocnak(wq, mp, 0, EIO);
1899                 return;
1900         }
1901 
1902         switch (cmd) {
1903         default:
1904                 _NOTE(NOTREACHED)
1905                 status = IOC_INVAL;
1906                 break;
1907 
1908         case BGE_MII_READ:
1909         case BGE_MII_WRITE:
1910         case BGE_SEE_READ:
1911         case BGE_SEE_WRITE:
1912         case BGE_FLASH_READ:
1913         case BGE_FLASH_WRITE:
1914         case BGE_DIAG:
1915         case BGE_PEEK:
1916         case BGE_POKE:
1917         case BGE_PHY_RESET:
1918         case BGE_SOFT_RESET:
1919         case BGE_HARD_RESET:
1920                 status = bge_chip_ioctl(bgep, wq, mp, iocp);
1921                 break;
1922 
1923         case LB_GET_INFO_SIZE:
1924         case LB_GET_INFO:
1925         case LB_GET_MODE:
1926         case LB_SET_MODE:
1927                 status = bge_loop_ioctl(bgep, wq, mp, iocp);
1928                 break;
1929 
1930         }
1931 
1932         /*
1933          * Do we need to reprogram the PHY and/or the MAC?
1934          * Do it now, while we still have the mutex.
1935          *
1936          * Note: update the PHY first, 'cos it controls the
1937          * speed/duplex parameters that the MAC code uses.
1938          */
1939         switch (status) {
1940         case IOC_RESTART_REPLY:
1941         case IOC_RESTART_ACK:
1942                 if (bge_reprogram(bgep) == IOC_INVAL)
1943                         status = IOC_INVAL;
1944                 break;
1945         }
1946 
1947         if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
1948                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1949                 status = IOC_INVAL;
1950         }
1951         if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
1952                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
1953                 status = IOC_INVAL;
1954         }
1955         mutex_exit(bgep->genlock);
1956 
1957         /*
1958          * Finally, decide how to reply
1959          */
1960         switch (status) {
1961         default:
1962         case IOC_INVAL:
1963                 /*
1964                  * Error, reply with a NAK and EINVAL or the specified error
1965                  */
1966                 miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
1967                     EINVAL : iocp->ioc_error);
1968                 break;
1969 
1970         case IOC_DONE:
1971                 /*
1972                  * OK, reply already sent
1973                  */
1974                 break;
1975 
1976         case IOC_RESTART_ACK:
1977         case IOC_ACK:
1978                 /*
1979                  * OK, reply with an ACK
1980                  */
1981                 miocack(wq, mp, 0, 0);
1982                 break;
1983 
1984         case IOC_RESTART_REPLY:
1985         case IOC_REPLY:
1986                 /*
1987                  * OK, send prepared reply as ACK or NAK
1988                  */
1989                 mp->b_datap->db_type = iocp->ioc_error == 0 ?
1990                     M_IOCACK : M_IOCNAK;
1991                 qreply(wq, mp);
1992                 break;
1993         }
1994 }
1995 
1996 /*
1997  * ========== Per-instance setup/teardown code ==========
1998  */
1999 
2000 #undef  BGE_DBG
2001 #define BGE_DBG         BGE_DBG_INIT    /* debug flag for this code     */
2002 /*
2003  * Allocate an area of memory and a DMA handle for accessing it
2004  */
2005 static int
2006 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p,
2007         uint_t dma_flags, dma_area_t *dma_p)
2008 {
2009         caddr_t va;
2010         int err;
2011 
2012         BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)",
2013             (void *)bgep, memsize, attr_p, dma_flags, dma_p));
2014 
2015         /*
2016          * Allocate handle
2017          */
2018         err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr,
2019             DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl);
2020         if (err != DDI_SUCCESS)
2021                 return (DDI_FAILURE);
2022 
2023         /*
2024          * Allocate memory
2025          */
2026         err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
2027             dma_flags, DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength,
2028             &dma_p->acc_hdl);
2029         if (err != DDI_SUCCESS)
2030                 return (DDI_FAILURE);
2031 
2032         /*
2033          * Bind the two together
2034          */
2035         dma_p->mem_va = va;
2036         err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
2037             va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL,
2038             &dma_p->cookie, &dma_p->ncookies);
2039 
2040         BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies",
2041             dma_p->alength, err, dma_p->ncookies));
2042 
2043         if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1)
2044                 return (DDI_FAILURE);
2045 
2046         dma_p->nslots = ~0U;
2047         dma_p->size = ~0U;
2048         dma_p->token = ~0U;
2049         dma_p->offset = 0;
2050         return (DDI_SUCCESS);
2051 }
2052 
2053 /*
2054  * Free one allocated area of DMAable memory
2055  */
2056 static void
2057 bge_free_dma_mem(dma_area_t *dma_p)
2058 {
2059         if (dma_p->dma_hdl != NULL) {
2060                 if (dma_p->ncookies) {
2061                         (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
2062                         dma_p->ncookies = 0;
2063                 }
2064                 ddi_dma_free_handle(&dma_p->dma_hdl);
2065                 dma_p->dma_hdl = NULL;
2066         }
2067 
2068         if (dma_p->acc_hdl != NULL) {
2069                 ddi_dma_mem_free(&dma_p->acc_hdl);
2070                 dma_p->acc_hdl = NULL;
2071         }
2072 }
2073 /*
2074  * Utility routine to carve a slice off a chunk of allocated memory,
2075  * updating the chunk descriptor accordingly.  The size of the slice
2076  * is given by the product of the <qty> and <size> parameters.
2077  */
2078 static void
2079 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk,
2080         uint32_t qty, uint32_t size)
2081 {
2082         static uint32_t sequence = 0xbcd5704a;
2083         size_t totsize;
2084 
2085         totsize = qty*size;
2086         ASSERT(totsize <= chunk->alength);
2087 
2088         *slice = *chunk;
2089         slice->nslots = qty;
2090         slice->size = size;
2091         slice->alength = totsize;
2092         slice->token = ++sequence;
2093 
2094         chunk->mem_va = (caddr_t)chunk->mem_va + totsize;
2095         chunk->alength -= totsize;
2096         chunk->offset += totsize;
2097         chunk->cookie.dmac_laddress += totsize;
2098         chunk->cookie.dmac_size -= totsize;
2099 }
2100 
2101 /*
2102  * Initialise the specified Receive Producer (Buffer) Ring, using
2103  * the information in the <dma_area> descriptors that it contains
2104  * to set up all the other fields. This routine should be called
2105  * only once for each ring.
2106  */
2107 static void
2108 bge_init_buff_ring(bge_t *bgep, uint64_t ring)
2109 {
2110         buff_ring_t *brp;
2111         bge_status_t *bsp;
2112         sw_rbd_t *srbdp;
2113         dma_area_t pbuf;
2114         uint32_t bufsize;
2115         uint32_t nslots;
2116         uint32_t slot;
2117         uint32_t split;
2118 
2119         static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = {
2120                 NIC_MEM_SHADOW_BUFF_STD,
2121                 NIC_MEM_SHADOW_BUFF_JUMBO,
2122                 NIC_MEM_SHADOW_BUFF_MINI
2123         };
2124         static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = {
2125                 RECV_STD_PROD_INDEX_REG,
2126                 RECV_JUMBO_PROD_INDEX_REG,
2127                 RECV_MINI_PROD_INDEX_REG
2128         };
2129         static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = {
2130                 STATUS_STD_BUFF_CONS_INDEX,
2131                 STATUS_JUMBO_BUFF_CONS_INDEX,
2132                 STATUS_MINI_BUFF_CONS_INDEX
2133         };
2134 
2135         BGE_TRACE(("bge_init_buff_ring($%p, %d)",
2136             (void *)bgep, ring));
2137 
2138         brp = &bgep->buff[ring];
2139         nslots = brp->desc.nslots;
2140         ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT);
2141         bufsize = brp->buf[0].size;
2142 
2143         /*
2144          * Set up the copy of the h/w RCB
2145          *
2146          * Note: unlike Send & Receive Return Rings, (where the max_len
2147          * field holds the number of slots), in a Receive Buffer Ring
2148          * this field indicates the size of each buffer in the ring.
2149          */
2150         brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress;
2151         brp->hw_rcb.max_len = (uint16_t)bufsize;
2152         brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED;
2153         brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring];
2154 
2155         /*
2156          * Other one-off initialisation of per-ring data
2157          */
2158         brp->bgep = bgep;
2159         bsp = DMA_VPTR(bgep->status_block);
2160         brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]];
2161         brp->chip_mbx_reg = mailbox_regs[ring];
2162         mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER,
2163             DDI_INTR_PRI(bgep->intr_pri));
2164 
2165         /*
2166          * Allocate the array of s/w Receive Buffer Descriptors
2167          */
2168         srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP);
2169         brp->sw_rbds = srbdp;
2170 
2171         /*
2172          * Now initialise each array element once and for all
2173          */
2174         for (split = 0; split < BGE_SPLIT; ++split) {
2175                 pbuf = brp->buf[split];
2176                 for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot)
2177                         bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize);
2178                 ASSERT(pbuf.alength == 0);
2179         }
2180 }
2181 
2182 /*
2183  * Clean up initialisation done above before the memory is freed
2184  */
2185 static void
2186 bge_fini_buff_ring(bge_t *bgep, uint64_t ring)
2187 {
2188         buff_ring_t *brp;
2189         sw_rbd_t *srbdp;
2190 
2191         BGE_TRACE(("bge_fini_buff_ring($%p, %d)",
2192             (void *)bgep, ring));
2193 
2194         brp = &bgep->buff[ring];
2195         srbdp = brp->sw_rbds;
2196         kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp));
2197 
2198         mutex_destroy(brp->rf_lock);
2199 }
2200 
2201 /*
2202  * Initialise the specified Receive (Return) Ring, using the
2203  * information in the <dma_area> descriptors that it contains
2204  * to set up all the other fields. This routine should be called
2205  * only once for each ring.
2206  */
2207 static void
2208 bge_init_recv_ring(bge_t *bgep, uint64_t ring)
2209 {
2210         recv_ring_t *rrp;
2211         bge_status_t *bsp;
2212         uint32_t nslots;
2213 
2214         BGE_TRACE(("bge_init_recv_ring($%p, %d)",
2215             (void *)bgep, ring));
2216 
2217         /*
2218          * The chip architecture requires that receive return rings have
2219          * 512 or 1024 or 2048 elements per ring.  See 570X-PG108-R page 103.
2220          */
2221         rrp = &bgep->recv[ring];
2222         nslots = rrp->desc.nslots;
2223         ASSERT(nslots == 0 || nslots == 512 ||
2224             nslots == 1024 || nslots == 2048);
2225 
2226         /*
2227          * Set up the copy of the h/w RCB
2228          */
2229         rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress;
2230         rrp->hw_rcb.max_len = (uint16_t)nslots;
2231         rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED;
2232         rrp->hw_rcb.nic_ring_addr = 0;
2233 
2234         /*
2235          * Other one-off initialisation of per-ring data
2236          */
2237         rrp->bgep = bgep;
2238         bsp = DMA_VPTR(bgep->status_block);
2239         rrp->prod_index_p = RECV_INDEX_P(bsp, ring);
2240         rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring);
2241         mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER,
2242             DDI_INTR_PRI(bgep->intr_pri));
2243 }
2244 
2245 
2246 /*
2247  * Clean up initialisation done above before the memory is freed
2248  */
2249 static void
2250 bge_fini_recv_ring(bge_t *bgep, uint64_t ring)
2251 {
2252         recv_ring_t *rrp;
2253 
2254         BGE_TRACE(("bge_fini_recv_ring($%p, %d)",
2255             (void *)bgep, ring));
2256 
2257         rrp = &bgep->recv[ring];
2258         if (rrp->rx_softint)
2259                 ddi_remove_softintr(rrp->rx_softint);
2260         mutex_destroy(rrp->rx_lock);
2261 }
2262 
2263 /*
2264  * Initialise the specified Send Ring, using the information in the
2265  * <dma_area> descriptors that it contains to set up all the other
2266  * fields. This routine should be called only once for each ring.
2267  */
2268 static void
2269 bge_init_send_ring(bge_t *bgep, uint64_t ring)
2270 {
2271         send_ring_t *srp;
2272         bge_status_t *bsp;
2273         sw_sbd_t *ssbdp;
2274         dma_area_t desc;
2275         dma_area_t pbuf;
2276         uint32_t nslots;
2277         uint32_t slot;
2278         uint32_t split;
2279         sw_txbuf_t *txbuf;
2280 
2281         BGE_TRACE(("bge_init_send_ring($%p, %d)",
2282             (void *)bgep, ring));
2283 
2284         /*
2285          * The chip architecture requires that host-based send rings
2286          * have 512 elements per ring.  See 570X-PG102-R page 56.
2287          */
2288         srp = &bgep->send[ring];
2289         nslots = srp->desc.nslots;
2290         ASSERT(nslots == 0 || nslots == 512);
2291 
2292         /*
2293          * Set up the copy of the h/w RCB
2294          */
2295         srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress;
2296         srp->hw_rcb.max_len = (uint16_t)nslots;
2297         srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED;
2298         srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots);
2299 
2300         /*
2301          * Other one-off initialisation of per-ring data
2302          */
2303         srp->bgep = bgep;
2304         bsp = DMA_VPTR(bgep->status_block);
2305         srp->cons_index_p = SEND_INDEX_P(bsp, ring);
2306         srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring);
2307         mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER,
2308             DDI_INTR_PRI(bgep->intr_pri));
2309         mutex_init(srp->txbuf_lock, NULL, MUTEX_DRIVER,
2310             DDI_INTR_PRI(bgep->intr_pri));
2311         mutex_init(srp->freetxbuf_lock, NULL, MUTEX_DRIVER,
2312             DDI_INTR_PRI(bgep->intr_pri));
2313         mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER,
2314             DDI_INTR_PRI(bgep->intr_pri));
2315         if (nslots == 0)
2316                 return;
2317 
2318         /*
2319          * Allocate the array of s/w Send Buffer Descriptors
2320          */
2321         ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP);
2322         txbuf = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (*txbuf), KM_SLEEP);
2323         srp->txbuf_head =
2324             kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (bge_queue_item_t), KM_SLEEP);
2325         srp->pktp = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (send_pkt_t), KM_SLEEP);
2326         srp->sw_sbds = ssbdp;
2327         srp->txbuf = txbuf;
2328         srp->tx_buffers = BGE_SEND_BUF_NUM;
2329         srp->tx_buffers_low = srp->tx_buffers / 4;
2330         if (bgep->chipid.snd_buff_size > BGE_SEND_BUFF_SIZE_DEFAULT)
2331                 srp->tx_array_max = BGE_SEND_BUF_ARRAY_JUMBO;
2332         else
2333                 srp->tx_array_max = BGE_SEND_BUF_ARRAY;
2334         srp->tx_array = 1;
2335 
2336         /*
2337          * Chunk tx desc area
2338          */
2339         desc = srp->desc;
2340         for (slot = 0; slot < nslots; ++ssbdp, ++slot) {
2341                 bge_slice_chunk(&ssbdp->desc, &desc, 1,
2342                     sizeof (bge_sbd_t));
2343         }
2344         ASSERT(desc.alength == 0);
2345 
2346         /*
2347          * Chunk tx buffer area
2348          */
2349         for (split = 0; split < BGE_SPLIT; ++split) {
2350                 pbuf = srp->buf[0][split];
2351                 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) {
2352                         bge_slice_chunk(&txbuf->buf, &pbuf, 1,
2353                             bgep->chipid.snd_buff_size);
2354                         txbuf++;
2355                 }
2356                 ASSERT(pbuf.alength == 0);
2357         }
2358 }
2359 
2360 /*
2361  * Clean up initialisation done above before the memory is freed
2362  */
2363 static void
2364 bge_fini_send_ring(bge_t *bgep, uint64_t ring)
2365 {
2366         send_ring_t *srp;
2367         uint32_t array;
2368         uint32_t split;
2369         uint32_t nslots;
2370 
2371         BGE_TRACE(("bge_fini_send_ring($%p, %d)",
2372             (void *)bgep, ring));
2373 
2374         srp = &bgep->send[ring];
2375         mutex_destroy(srp->tc_lock);
2376         mutex_destroy(srp->freetxbuf_lock);
2377         mutex_destroy(srp->txbuf_lock);
2378         mutex_destroy(srp->tx_lock);
2379         nslots = srp->desc.nslots;
2380         if (nslots == 0)
2381                 return;
2382 
2383         for (array = 1; array < srp->tx_array; ++array)
2384                 for (split = 0; split < BGE_SPLIT; ++split)
2385                         bge_free_dma_mem(&srp->buf[array][split]);
2386         kmem_free(srp->sw_sbds, nslots*sizeof (*srp->sw_sbds));
2387         kmem_free(srp->txbuf_head, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf_head));
2388         kmem_free(srp->txbuf, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf));
2389         kmem_free(srp->pktp, BGE_SEND_BUF_MAX*sizeof (*srp->pktp));
2390         srp->sw_sbds = NULL;
2391         srp->txbuf_head = NULL;
2392         srp->txbuf = NULL;
2393         srp->pktp = NULL;
2394 }
2395 
2396 /*
2397  * Initialise all transmit, receive, and buffer rings.
2398  */
2399 void
2400 bge_init_rings(bge_t *bgep)
2401 {
2402         uint32_t ring;
2403 
2404         BGE_TRACE(("bge_init_rings($%p)", (void *)bgep));
2405 
2406         /*
2407          * Perform one-off initialisation of each ring ...
2408          */
2409         for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
2410                 bge_init_send_ring(bgep, ring);
2411         for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring)
2412                 bge_init_recv_ring(bgep, ring);
2413         for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring)
2414                 bge_init_buff_ring(bgep, ring);
2415 }
2416 
2417 /*
2418  * Undo the work of bge_init_rings() above before the memory is freed
2419  */
2420 void
2421 bge_fini_rings(bge_t *bgep)
2422 {
2423         uint32_t ring;
2424 
2425         BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep));
2426 
2427         for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring)
2428                 bge_fini_buff_ring(bgep, ring);
2429         for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring)
2430                 bge_fini_recv_ring(bgep, ring);
2431         for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring)
2432                 bge_fini_send_ring(bgep, ring);
2433 }
2434 
2435 /*
2436  * Called from the bge_m_stop() to free the tx buffers which are
2437  * allocated from the tx process.
2438  */
2439 void
2440 bge_free_txbuf_arrays(send_ring_t *srp)
2441 {
2442         uint32_t array;
2443         uint32_t split;
2444 
2445         ASSERT(mutex_owned(srp->tx_lock));
2446 
2447         /*
2448          * Free the extra tx buffer DMA area
2449          */
2450         for (array = 1; array < srp->tx_array; ++array)
2451                 for (split = 0; split < BGE_SPLIT; ++split)
2452                         bge_free_dma_mem(&srp->buf[array][split]);
2453 
2454         /*
2455          * Restore initial tx buffer numbers
2456          */
2457         srp->tx_array = 1;
2458         srp->tx_buffers = BGE_SEND_BUF_NUM;
2459         srp->tx_buffers_low = srp->tx_buffers / 4;
2460         srp->tx_flow = 0;
2461         bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp));
2462 }
2463 
2464 /*
2465  * Called from tx process to allocate more tx buffers
2466  */
2467 bge_queue_item_t *
2468 bge_alloc_txbuf_array(bge_t *bgep, send_ring_t *srp)
2469 {
2470         bge_queue_t *txbuf_queue;
2471         bge_queue_item_t *txbuf_item_last;
2472         bge_queue_item_t *txbuf_item;
2473         bge_queue_item_t *txbuf_item_rtn;
2474         sw_txbuf_t *txbuf;
2475         dma_area_t area;
2476         size_t txbuffsize;
2477         uint32_t slot;
2478         uint32_t array;
2479         uint32_t split;
2480         uint32_t err;
2481 
2482         ASSERT(mutex_owned(srp->tx_lock));
2483 
2484         array = srp->tx_array;
2485         if (array >= srp->tx_array_max)
2486                 return (NULL);
2487 
2488         /*
2489          * Allocate memory & handles for TX buffers
2490          */
2491         txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size;
2492         ASSERT((txbuffsize % BGE_SPLIT) == 0);
2493         for (split = 0; split < BGE_SPLIT; ++split) {
2494                 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT,
2495                     &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE,
2496                     &srp->buf[array][split]);
2497                 if (err != DDI_SUCCESS) {
2498                         /* Free the last already allocated OK chunks */
2499                         for (slot = 0; slot <= split; ++slot)
2500                                 bge_free_dma_mem(&srp->buf[array][slot]);
2501                         srp->tx_alloc_fail++;
2502                         return (NULL);
2503                 }
2504         }
2505 
2506         /*
2507          * Chunk tx buffer area
2508          */
2509         txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM;
2510         for (split = 0; split < BGE_SPLIT; ++split) {
2511                 area = srp->buf[array][split];
2512                 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) {
2513                         bge_slice_chunk(&txbuf->buf, &area, 1,
2514                             bgep->chipid.snd_buff_size);
2515                         txbuf++;
2516                 }
2517         }
2518 
2519         /*
2520          * Add above buffers to the tx buffer pop queue
2521          */
2522         txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM;
2523         txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM;
2524         txbuf_item_last = NULL;
2525         for (slot = 0; slot < BGE_SEND_BUF_NUM; ++slot) {
2526                 txbuf_item->item = txbuf;
2527                 txbuf_item->next = txbuf_item_last;
2528                 txbuf_item_last = txbuf_item;
2529                 txbuf++;
2530                 txbuf_item++;
2531         }
2532         txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM;
2533         txbuf_item_rtn = txbuf_item;
2534         txbuf_item++;
2535         txbuf_queue = srp->txbuf_pop_queue;
2536         mutex_enter(txbuf_queue->lock);
2537         txbuf_item->next = txbuf_queue->head;
2538         txbuf_queue->head = txbuf_item_last;
2539         txbuf_queue->count += BGE_SEND_BUF_NUM - 1;
2540         mutex_exit(txbuf_queue->lock);
2541 
2542         srp->tx_array++;
2543         srp->tx_buffers += BGE_SEND_BUF_NUM;
2544         srp->tx_buffers_low = srp->tx_buffers / 4;
2545 
2546         return (txbuf_item_rtn);
2547 }
2548 
2549 /*
2550  * This function allocates all the transmit and receive buffers
2551  * and descriptors, in four chunks.
2552  */
2553 int
2554 bge_alloc_bufs(bge_t *bgep)
2555 {
2556         dma_area_t area;
2557         size_t rxbuffsize;
2558         size_t txbuffsize;
2559         size_t rxbuffdescsize;
2560         size_t rxdescsize;
2561         size_t txdescsize;
2562         uint32_t ring;
2563         uint32_t rx_rings = bgep->chipid.rx_rings;
2564         uint32_t tx_rings = bgep->chipid.tx_rings;
2565         int split;
2566         int err;
2567 
2568         BGE_TRACE(("bge_alloc_bufs($%p)",
2569             (void *)bgep));
2570 
2571         rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size;
2572         rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size;
2573         rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE;
2574 
2575         txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size;
2576         txbuffsize *= tx_rings;
2577 
2578         rxdescsize = rx_rings*bgep->chipid.recv_slots;
2579         rxdescsize *= sizeof (bge_rbd_t);
2580 
2581         rxbuffdescsize = BGE_STD_SLOTS_USED;
2582         rxbuffdescsize += bgep->chipid.jumbo_slots;
2583         rxbuffdescsize += BGE_MINI_SLOTS_USED;
2584         rxbuffdescsize *= sizeof (bge_rbd_t);
2585 
2586         txdescsize = tx_rings*BGE_SEND_SLOTS_USED;
2587         txdescsize *= sizeof (bge_sbd_t);
2588         txdescsize += sizeof (bge_statistics_t);
2589         txdescsize += sizeof (bge_status_t);
2590         txdescsize += BGE_STATUS_PADDING;
2591 
2592         /*
2593          * Enable PCI relaxed ordering only for RX/TX data buffers
2594          */
2595         if (bge_relaxed_ordering)
2596                 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2597 
2598         /*
2599          * Allocate memory & handles for RX buffers
2600          */
2601         ASSERT((rxbuffsize % BGE_SPLIT) == 0);
2602         for (split = 0; split < BGE_SPLIT; ++split) {
2603                 err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT,
2604                     &bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE,
2605                     &bgep->rx_buff[split]);
2606                 if (err != DDI_SUCCESS)
2607                         return (DDI_FAILURE);
2608         }
2609 
2610         /*
2611          * Allocate memory & handles for TX buffers
2612          */
2613         ASSERT((txbuffsize % BGE_SPLIT) == 0);
2614         for (split = 0; split < BGE_SPLIT; ++split) {
2615                 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT,
2616                     &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE,
2617                     &bgep->tx_buff[split]);
2618                 if (err != DDI_SUCCESS)
2619                         return (DDI_FAILURE);
2620         }
2621 
2622         dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING;
2623 
2624         /*
2625          * Allocate memory & handles for receive return rings
2626          */
2627         ASSERT((rxdescsize % rx_rings) == 0);
2628         for (split = 0; split < rx_rings; ++split) {
2629                 err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings,
2630                     &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2631                     &bgep->rx_desc[split]);
2632                 if (err != DDI_SUCCESS)
2633                         return (DDI_FAILURE);
2634         }
2635 
2636         /*
2637          * Allocate memory & handles for buffer (producer) descriptor rings
2638          */
2639         err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr,
2640             DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]);
2641         if (err != DDI_SUCCESS)
2642                 return (DDI_FAILURE);
2643 
2644         /*
2645          * Allocate memory & handles for TX descriptor rings,
2646          * status block, and statistics area
2647          */
2648         err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr,
2649             DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc);
2650         if (err != DDI_SUCCESS)
2651                 return (DDI_FAILURE);
2652 
2653         /*
2654          * Now carve up each of the allocated areas ...
2655          */
2656         for (split = 0; split < BGE_SPLIT; ++split) {
2657                 area = bgep->rx_buff[split];
2658                 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split],
2659                     &area, BGE_STD_SLOTS_USED/BGE_SPLIT,
2660                     bgep->chipid.std_buf_size);
2661                 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split],
2662                     &area, bgep->chipid.jumbo_slots/BGE_SPLIT,
2663                     bgep->chipid.recv_jumbo_size);
2664                 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split],
2665                     &area, BGE_MINI_SLOTS_USED/BGE_SPLIT,
2666                     BGE_MINI_BUFF_SIZE);
2667         }
2668 
2669         for (split = 0; split < BGE_SPLIT; ++split) {
2670                 area = bgep->tx_buff[split];
2671                 for (ring = 0; ring < tx_rings; ++ring)
2672                         bge_slice_chunk(&bgep->send[ring].buf[0][split],
2673                             &area, BGE_SEND_BUF_NUM/BGE_SPLIT,
2674                             bgep->chipid.snd_buff_size);
2675                 for (; ring < BGE_SEND_RINGS_MAX; ++ring)
2676                         bge_slice_chunk(&bgep->send[ring].buf[0][split],
2677                             &area, 0, bgep->chipid.snd_buff_size);
2678         }
2679 
2680         for (ring = 0; ring < rx_rings; ++ring)
2681                 bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring],
2682                     bgep->chipid.recv_slots, sizeof (bge_rbd_t));
2683 
2684         area = bgep->rx_desc[rx_rings];
2685         for (; ring < BGE_RECV_RINGS_MAX; ++ring)
2686                 bge_slice_chunk(&bgep->recv[ring].desc, &area,
2687                     0, sizeof (bge_rbd_t));
2688         bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area,
2689             BGE_STD_SLOTS_USED, sizeof (bge_rbd_t));
2690         bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area,
2691             bgep->chipid.jumbo_slots, sizeof (bge_rbd_t));
2692         bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area,
2693             BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t));
2694         ASSERT(area.alength == 0);
2695 
2696         area = bgep->tx_desc;
2697         for (ring = 0; ring < tx_rings; ++ring)
2698                 bge_slice_chunk(&bgep->send[ring].desc, &area,
2699                     BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t));
2700         for (; ring < BGE_SEND_RINGS_MAX; ++ring)
2701                 bge_slice_chunk(&bgep->send[ring].desc, &area,
2702                     0, sizeof (bge_sbd_t));
2703         bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t));
2704         bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t));
2705         ASSERT(area.alength == BGE_STATUS_PADDING);
2706         DMA_ZERO(bgep->status_block);
2707 
2708         return (DDI_SUCCESS);
2709 }
2710 
2711 /*
2712  * This routine frees the transmit and receive buffers and descriptors.
2713  * Make sure the chip is stopped before calling it!
2714  */
2715 void
2716 bge_free_bufs(bge_t *bgep)
2717 {
2718         int split;
2719 
2720         BGE_TRACE(("bge_free_bufs($%p)",
2721             (void *)bgep));
2722 
2723         bge_free_dma_mem(&bgep->tx_desc);
2724         for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split)
2725                 bge_free_dma_mem(&bgep->rx_desc[split]);
2726         for (split = 0; split < BGE_SPLIT; ++split)
2727                 bge_free_dma_mem(&bgep->tx_buff[split]);
2728         for (split = 0; split < BGE_SPLIT; ++split)
2729                 bge_free_dma_mem(&bgep->rx_buff[split]);
2730 }
2731 
2732 /*
2733  * Determine (initial) MAC address ("BIA") to use for this interface
2734  */
2735 
2736 static void
2737 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp)
2738 {
2739         struct ether_addr sysaddr;
2740         char propbuf[8];                /* "true" or "false", plus NUL  */
2741         uchar_t *bytes;
2742         int *ints;
2743         uint_t nelts;
2744         int err;
2745 
2746         BGE_TRACE(("bge_find_mac_address($%p)",
2747             (void *)bgep));
2748 
2749         BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)",
2750             cidp->hw_mac_addr,
2751             ether_sprintf((void *)cidp->vendor_addr.addr),
2752             cidp->vendor_addr.set ? "" : "not "));
2753 
2754         /*
2755          * The "vendor's factory-set address" may already have
2756          * been extracted from the chip, but if the property
2757          * "local-mac-address" is set we use that instead.  It
2758          * will normally be set by OBP, but it could also be
2759          * specified in a .conf file(!)
2760          *
2761          * There doesn't seem to be a way to define byte-array
2762          * properties in a .conf, so we check whether it looks
2763          * like an array of 6 ints instead.
2764          *
2765          * Then, we check whether it looks like an array of 6
2766          * bytes (which it should, if OBP set it).  If we can't
2767          * make sense of it either way, we'll ignore it.
2768          */
2769         err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo,
2770             DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts);
2771         if (err == DDI_PROP_SUCCESS) {
2772                 if (nelts == ETHERADDRL) {
2773                         while (nelts--)
2774                                 cidp->vendor_addr.addr[nelts] = ints[nelts];
2775                         cidp->vendor_addr.set = B_TRUE;
2776                 }
2777                 ddi_prop_free(ints);
2778         }
2779 
2780         err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo,
2781             DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts);
2782         if (err == DDI_PROP_SUCCESS) {
2783                 if (nelts == ETHERADDRL) {
2784                         while (nelts--)
2785                                 cidp->vendor_addr.addr[nelts] = bytes[nelts];
2786                         cidp->vendor_addr.set = B_TRUE;
2787                 }
2788                 ddi_prop_free(bytes);
2789         }
2790 
2791         BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)",
2792             ether_sprintf((void *)cidp->vendor_addr.addr),
2793             cidp->vendor_addr.set ? "" : "not "));
2794 
2795         /*
2796          * Look up the OBP property "local-mac-address?".  Note that even
2797          * though its value is a string (which should be "true" or "false"),
2798          * it can't be decoded by ddi_prop_lookup_string(9F).  So, we zero
2799          * the buffer first and then fetch the property as an untyped array;
2800          * this may or may not include a final NUL, but since there will
2801          * always be one left at the end of the buffer we can now treat it
2802          * as a string anyway.
2803          */
2804         nelts = sizeof (propbuf);
2805         bzero(propbuf, nelts--);
2806         err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo,
2807             DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts);
2808 
2809         /*
2810          * Now, if the address still isn't set from the hardware (SEEPROM)
2811          * or the OBP or .conf property, OR if the user has foolishly set
2812          * 'local-mac-address? = false', use "the system address" instead
2813          * (but only if it's non-null i.e. has been set from the IDPROM).
2814          */
2815         if (cidp->vendor_addr.set == B_FALSE || strcmp(propbuf, "false") == 0)
2816                 if (localetheraddr(NULL, &sysaddr) != 0) {
2817                         ethaddr_copy(&sysaddr, cidp->vendor_addr.addr);
2818                         cidp->vendor_addr.set = B_TRUE;
2819                 }
2820 
2821         BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)",
2822             ether_sprintf((void *)cidp->vendor_addr.addr),
2823             cidp->vendor_addr.set ? "" : "not "));
2824 
2825         /*
2826          * Finally(!), if there's a valid "mac-address" property (created
2827          * if we netbooted from this interface), we must use this instead
2828          * of any of the above to ensure that the NFS/install server doesn't
2829          * get confused by the address changing as Solaris takes over!
2830          */
2831         err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo,
2832             DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts);
2833         if (err == DDI_PROP_SUCCESS) {
2834                 if (nelts == ETHERADDRL) {
2835                         while (nelts--)
2836                                 cidp->vendor_addr.addr[nelts] = bytes[nelts];
2837                         cidp->vendor_addr.set = B_TRUE;
2838                 }
2839                 ddi_prop_free(bytes);
2840         }
2841 
2842         BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)",
2843             ether_sprintf((void *)cidp->vendor_addr.addr),
2844             cidp->vendor_addr.set ? "" : "not "));
2845 }
2846 
2847 
2848 /*ARGSUSED*/
2849 int
2850 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle)
2851 {
2852         ddi_fm_error_t de;
2853 
2854         ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
2855         ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
2856         return (de.fme_status);
2857 }
2858 
2859 /*ARGSUSED*/
2860 int
2861 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle)
2862 {
2863         ddi_fm_error_t de;
2864 
2865         ASSERT(bgep->progress & PROGRESS_BUFS);
2866         ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
2867         return (de.fme_status);
2868 }
2869 
2870 /*
2871  * The IO fault service error handling callback function
2872  */
2873 /*ARGSUSED*/
2874 static int
2875 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
2876 {
2877         /*
2878          * as the driver can always deal with an error in any dma or
2879          * access handle, we can just return the fme_status value.
2880          */
2881         pci_ereport_post(dip, err, NULL);
2882         return (err->fme_status);
2883 }
2884 
2885 static void
2886 bge_fm_init(bge_t *bgep)
2887 {
2888         ddi_iblock_cookie_t iblk;
2889 
2890         /* Only register with IO Fault Services if we have some capability */
2891         if (bgep->fm_capabilities) {
2892                 bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC;
2893                 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
2894 
2895                 /* Register capabilities with IO Fault Services */
2896                 ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk);
2897 
2898                 /*
2899                  * Initialize pci ereport capabilities if ereport capable
2900                  */
2901                 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) ||
2902                     DDI_FM_ERRCB_CAP(bgep->fm_capabilities))
2903                         pci_ereport_setup(bgep->devinfo);
2904 
2905                 /*
2906                  * Register error callback if error callback capable
2907                  */
2908                 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities))
2909                         ddi_fm_handler_register(bgep->devinfo,
2910                             bge_fm_error_cb, (void*) bgep);
2911         } else {
2912                 /*
2913                  * These fields have to be cleared of FMA if there are no
2914                  * FMA capabilities at runtime.
2915                  */
2916                 bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC;
2917                 dma_attr.dma_attr_flags = 0;
2918         }
2919 }
2920 
2921 static void
2922 bge_fm_fini(bge_t *bgep)
2923 {
2924         /* Only unregister FMA capabilities if we registered some */
2925         if (bgep->fm_capabilities) {
2926 
2927                 /*
2928                  * Release any resources allocated by pci_ereport_setup()
2929                  */
2930                 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) ||
2931                     DDI_FM_ERRCB_CAP(bgep->fm_capabilities))
2932                         pci_ereport_teardown(bgep->devinfo);
2933 
2934                 /*
2935                  * Un-register error callback if error callback capable
2936                  */
2937                 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities))
2938                         ddi_fm_handler_unregister(bgep->devinfo);
2939 
2940                 /* Unregister from IO Fault Services */
2941                 ddi_fm_fini(bgep->devinfo);
2942         }
2943 }
2944 
2945 static void
2946 #ifdef BGE_IPMI_ASF
2947 bge_unattach(bge_t *bgep, uint_t asf_mode)
2948 #else
2949 bge_unattach(bge_t *bgep)
2950 #endif
2951 {
2952         BGE_TRACE(("bge_unattach($%p)",
2953                 (void *)bgep));
2954 
2955         /*
2956          * Flag that no more activity may be initiated
2957          */
2958         bgep->progress &= ~PROGRESS_READY;
2959 
2960         /*
2961          * Quiesce the PHY and MAC (leave it reset but still powered).
2962          * Clean up and free all BGE data structures
2963          */
2964         if (bgep->periodic_id != NULL) {
2965                 ddi_periodic_delete(bgep->periodic_id);
2966                 bgep->periodic_id = NULL;
2967         }
2968         if (bgep->progress & PROGRESS_KSTATS)
2969                 bge_fini_kstats(bgep);
2970         if (bgep->progress & PROGRESS_PHY)
2971                 bge_phys_reset(bgep);
2972         if (bgep->progress & PROGRESS_HWINT) {
2973                 mutex_enter(bgep->genlock);
2974 #ifdef BGE_IPMI_ASF
2975                 if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS)
2976 #else
2977                 if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS)
2978 #endif
2979                         ddi_fm_service_impact(bgep->devinfo,
2980                             DDI_SERVICE_UNAFFECTED);
2981 #ifdef BGE_IPMI_ASF
2982                 if (bgep->asf_enabled) {
2983                         /*
2984                          * This register has been overlaid. We restore its
2985                          * initial value here.
2986                          */
2987                         bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR,
2988                             BGE_NIC_DATA_SIG);
2989                 }
2990 #endif
2991                 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK)
2992                         ddi_fm_service_impact(bgep->devinfo,
2993                             DDI_SERVICE_UNAFFECTED);
2994                 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
2995                         ddi_fm_service_impact(bgep->devinfo,
2996                             DDI_SERVICE_UNAFFECTED);
2997                 mutex_exit(bgep->genlock);
2998         }
2999         if (bgep->progress & PROGRESS_INTR) {
3000                 bge_intr_disable(bgep);
3001                 bge_fini_rings(bgep);
3002         }
3003         if (bgep->progress & PROGRESS_HWINT) {
3004                 bge_rem_intrs(bgep);
3005                 rw_destroy(bgep->errlock);
3006                 mutex_destroy(bgep->softintrlock);
3007                 mutex_destroy(bgep->genlock);
3008         }
3009         if (bgep->progress & PROGRESS_FACTOTUM)
3010                 ddi_remove_softintr(bgep->factotum_id);
3011         if (bgep->progress & PROGRESS_RESCHED)
3012                 ddi_remove_softintr(bgep->drain_id);
3013         if (bgep->progress & PROGRESS_BUFS)
3014                 bge_free_bufs(bgep);
3015         if (bgep->progress & PROGRESS_REGS)
3016                 ddi_regs_map_free(&bgep->io_handle);
3017         if (bgep->progress & PROGRESS_CFG)
3018                 pci_config_teardown(&bgep->cfg_handle);
3019 
3020         bge_fm_fini(bgep);
3021 
3022         ddi_remove_minor_node(bgep->devinfo, NULL);
3023         kmem_free(bgep->pstats, sizeof (bge_statistics_reg_t));
3024         kmem_free(bgep, sizeof (*bgep));
3025 }
3026 
3027 static int
3028 bge_resume(dev_info_t *devinfo)
3029 {
3030         bge_t *bgep;                            /* Our private data     */
3031         chip_id_t *cidp;
3032         chip_id_t chipid;
3033 
3034         bgep = ddi_get_driver_private(devinfo);
3035         if (bgep == NULL)
3036                 return (DDI_FAILURE);
3037 
3038         /*
3039          * Refuse to resume if the data structures aren't consistent
3040          */
3041         if (bgep->devinfo != devinfo)
3042                 return (DDI_FAILURE);
3043 
3044 #ifdef BGE_IPMI_ASF
3045         /*
3046          * Power management hasn't been supported in BGE now. If you
3047          * want to implement it, please add the ASF/IPMI related
3048          * code here.
3049          */
3050 
3051 #endif
3052 
3053         /*
3054          * Read chip ID & set up config space command register(s)
3055          * Refuse to resume if the chip has changed its identity!
3056          */
3057         cidp = &bgep->chipid;
3058         mutex_enter(bgep->genlock);
3059         bge_chip_cfg_init(bgep, &chipid, B_FALSE);
3060         if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
3061                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3062                 mutex_exit(bgep->genlock);
3063                 return (DDI_FAILURE);
3064         }
3065         mutex_exit(bgep->genlock);
3066         if (chipid.vendor != cidp->vendor)
3067                 return (DDI_FAILURE);
3068         if (chipid.device != cidp->device)
3069                 return (DDI_FAILURE);
3070         if (chipid.revision != cidp->revision)
3071                 return (DDI_FAILURE);
3072         if (chipid.asic_rev != cidp->asic_rev)
3073                 return (DDI_FAILURE);
3074 
3075         /*
3076          * All OK, reinitialise h/w & kick off GLD scheduling
3077          */
3078         mutex_enter(bgep->genlock);
3079         if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) {
3080                 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
3081                 (void) bge_check_acc_handle(bgep, bgep->io_handle);
3082                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3083                 mutex_exit(bgep->genlock);
3084                 return (DDI_FAILURE);
3085         }
3086         if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
3087                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3088                 mutex_exit(bgep->genlock);
3089                 return (DDI_FAILURE);
3090         }
3091         if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
3092                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3093                 mutex_exit(bgep->genlock);
3094                 return (DDI_FAILURE);
3095         }
3096         mutex_exit(bgep->genlock);
3097         return (DDI_SUCCESS);
3098 }
3099 
3100 /*
3101  * attach(9E) -- Attach a device to the system
3102  *
3103  * Called once for each board successfully probed.
3104  */
3105 static int
3106 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
3107 {
3108         bge_t *bgep;                            /* Our private data     */
3109         mac_register_t *macp;
3110         chip_id_t *cidp;
3111         caddr_t regs;
3112         int instance;
3113         int err;
3114         int intr_types;
3115 #ifdef BGE_IPMI_ASF
3116         uint32_t mhcrValue;
3117 #ifdef __sparc
3118         uint16_t value16;
3119 #endif
3120 #ifdef BGE_NETCONSOLE
3121         int retval;
3122 #endif
3123 #endif
3124 
3125         instance = ddi_get_instance(devinfo);
3126 
3127         BGE_GTRACE(("bge_attach($%p, %d) instance %d",
3128             (void *)devinfo, cmd, instance));
3129         BGE_BRKPT(NULL, "bge_attach");
3130 
3131         switch (cmd) {
3132         default:
3133                 return (DDI_FAILURE);
3134 
3135         case DDI_RESUME:
3136                 return (bge_resume(devinfo));
3137 
3138         case DDI_ATTACH:
3139                 break;
3140         }
3141 
3142         bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP);
3143         bgep->pstats = kmem_zalloc(sizeof (bge_statistics_reg_t), KM_SLEEP);
3144         ddi_set_driver_private(devinfo, bgep);
3145         bgep->bge_guard = BGE_GUARD;
3146         bgep->devinfo = devinfo;
3147         bgep->param_drain_max = 64;
3148         bgep->param_msi_cnt = 0;
3149         bgep->param_loop_mode = 0;
3150 
3151         /*
3152          * Initialize more fields in BGE private data
3153          */
3154         bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3155             DDI_PROP_DONTPASS, debug_propname, bge_debug);
3156         (void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d",
3157             BGE_DRIVER_NAME, instance);
3158 
3159         /*
3160          * Initialize for fma support
3161          */
3162         bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3163             DDI_PROP_DONTPASS, fm_cap,
3164             DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
3165             DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
3166         BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities));
3167         bge_fm_init(bgep);
3168 
3169         /*
3170          * Look up the IOMMU's page size for DVMA mappings (must be
3171          * a power of 2) and convert to a mask.  This can be used to
3172          * determine whether a message buffer crosses a page boundary.
3173          * Note: in 2s complement binary notation, if X is a power of
3174          * 2, then -X has the representation "11...1100...00".
3175          */
3176         bgep->pagemask = dvma_pagesize(devinfo);
3177         ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask));
3178         bgep->pagemask = -bgep->pagemask;
3179 
3180         /*
3181          * Map config space registers
3182          * Read chip ID & set up config space command register(s)
3183          *
3184          * Note: this leaves the chip accessible by Memory Space
3185          * accesses, but with interrupts and Bus Mastering off.
3186          * This should ensure that nothing untoward will happen
3187          * if it has been left active by the (net-)bootloader.
3188          * We'll re-enable Bus Mastering once we've reset the chip,
3189          * and allow interrupts only when everything else is set up.
3190          */
3191         err = pci_config_setup(devinfo, &bgep->cfg_handle);
3192 #ifdef BGE_IPMI_ASF
3193 #ifdef __sparc
3194         /*
3195          * We need to determine the type of chipset for accessing some configure
3196          * registers. (This information will be used by bge_ind_put32,
3197          * bge_ind_get32 and bge_nic_read32)
3198          */
3199         bgep->chipid.device = pci_config_get16(bgep->cfg_handle,
3200             PCI_CONF_DEVID);
3201         value16 = pci_config_get16(bgep->cfg_handle, PCI_CONF_COMM);
3202         value16 = value16 | (PCI_COMM_MAE | PCI_COMM_ME);
3203         pci_config_put16(bgep->cfg_handle, PCI_CONF_COMM, value16);
3204         mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS |
3205             MHCR_ENABLE_TAGGED_STATUS_MODE |
3206             MHCR_MASK_INTERRUPT_MODE |
3207             MHCR_MASK_PCI_INT_OUTPUT |
3208             MHCR_CLEAR_INTERRUPT_INTA |
3209             MHCR_ENABLE_ENDIAN_WORD_SWAP |
3210             MHCR_ENABLE_ENDIAN_BYTE_SWAP;
3211         /*
3212          * For some chipsets (e.g., BCM5718), if MHCR_ENABLE_ENDIAN_BYTE_SWAP
3213          * has been set in PCI_CONF_COMM already, we need to write the
3214          * byte-swapped value to it. So we just write zero first for simplicity.
3215          */
3216         if (DEVICE_5717_SERIES_CHIPSETS(bgep))
3217                 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, 0);
3218 #else
3219         mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS |
3220             MHCR_ENABLE_TAGGED_STATUS_MODE |
3221             MHCR_MASK_INTERRUPT_MODE |
3222             MHCR_MASK_PCI_INT_OUTPUT |
3223             MHCR_CLEAR_INTERRUPT_INTA;
3224 #endif
3225         pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue);
3226         bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG,
3227             bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) |
3228             MEMORY_ARBITER_ENABLE);
3229         if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) {
3230                 bgep->asf_wordswapped = B_TRUE;
3231         } else {
3232                 bgep->asf_wordswapped = B_FALSE;
3233         }
3234         bge_asf_get_config(bgep);
3235 #endif
3236         if (err != DDI_SUCCESS) {
3237                 bge_problem(bgep, "pci_config_setup() failed");
3238                 goto attach_fail;
3239         }
3240         bgep->progress |= PROGRESS_CFG;
3241         cidp = &bgep->chipid;
3242         bzero(cidp, sizeof (*cidp));
3243         bge_chip_cfg_init(bgep, cidp, B_FALSE);
3244         if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
3245                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3246                 goto attach_fail;
3247         }
3248 
3249 #ifdef BGE_IPMI_ASF
3250         if (DEVICE_5721_SERIES_CHIPSETS(bgep) ||
3251             DEVICE_5714_SERIES_CHIPSETS(bgep)) {
3252                 bgep->asf_newhandshake = B_TRUE;
3253         } else {
3254                 bgep->asf_newhandshake = B_FALSE;
3255         }
3256 #endif
3257 
3258         /*
3259          * Update those parts of the chip ID derived from volatile
3260          * registers with the values seen by OBP (in case the chip
3261          * has been reset externally and therefore lost them).
3262          */
3263         cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3264             DDI_PROP_DONTPASS, subven_propname, cidp->subven);
3265         cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3266             DDI_PROP_DONTPASS, subdev_propname, cidp->subdev);
3267         cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3268             DDI_PROP_DONTPASS, clsize_propname, cidp->clsize);
3269         cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3270             DDI_PROP_DONTPASS, latency_propname, cidp->latency);
3271         cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3272             DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings);
3273         cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3274             DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings);
3275 
3276         cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
3277             DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU);
3278         if ((cidp->default_mtu < BGE_DEFAULT_MTU) ||
3279             (cidp->default_mtu > BGE_MAXIMUM_MTU)) {
3280                 cidp->default_mtu = BGE_DEFAULT_MTU;
3281         }
3282 
3283         /*
3284          * Map operating registers
3285          */
3286         err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER,
3287             &regs, 0, 0, &bge_reg_accattr, &bgep->io_handle);
3288         if (err != DDI_SUCCESS) {
3289                 bge_problem(bgep, "ddi_regs_map_setup() failed");
3290                 goto attach_fail;
3291         }
3292         bgep->io_regs = regs;
3293         bgep->progress |= PROGRESS_REGS;
3294 
3295         /*
3296          * Characterise the device, so we know its requirements.
3297          * Then allocate the appropriate TX and RX descriptors & buffers.
3298          */
3299         if (bge_chip_id_init(bgep) == EIO) {
3300                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3301                 goto attach_fail;
3302         }
3303 
3304         err = bge_alloc_bufs(bgep);
3305         if (err != DDI_SUCCESS) {
3306                 bge_problem(bgep, "DMA buffer allocation failed");
3307                 goto attach_fail;
3308         }
3309         bgep->progress |= PROGRESS_BUFS;
3310 
3311         /*
3312          * Add the softint handlers:
3313          *
3314          * Both of these handlers are used to avoid restrictions on the
3315          * context and/or mutexes required for some operations.  In
3316          * particular, the hardware interrupt handler and its subfunctions
3317          * can detect a number of conditions that we don't want to handle
3318          * in that context or with that set of mutexes held.  So, these
3319          * softints are triggered instead:
3320          *
3321          * the <resched> softint is triggered if we have previously
3322          * had to refuse to send a packet because of resource shortage
3323          * (we've run out of transmit buffers), but the send completion
3324          * interrupt handler has now detected that more buffers have
3325          * become available.
3326          *
3327          * the <factotum> is triggered if the h/w interrupt handler
3328          * sees the <link state changed> or <error> bits in the status
3329          * block.  It's also triggered periodically to poll the link
3330          * state, just in case we aren't getting link status change
3331          * interrupts ...
3332          */
3333         err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->drain_id,
3334             NULL, NULL, bge_send_drain, (caddr_t)bgep);
3335         if (err != DDI_SUCCESS) {
3336                 bge_problem(bgep, "ddi_add_softintr() failed");
3337                 goto attach_fail;
3338         }
3339         bgep->progress |= PROGRESS_RESCHED;
3340         err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id,
3341             NULL, NULL, bge_chip_factotum, (caddr_t)bgep);
3342         if (err != DDI_SUCCESS) {
3343                 bge_problem(bgep, "ddi_add_softintr() failed");
3344                 goto attach_fail;
3345         }
3346         bgep->progress |= PROGRESS_FACTOTUM;
3347 
3348         /* Get supported interrupt types */
3349         if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) {
3350                 bge_error(bgep, "ddi_intr_get_supported_types failed\n");
3351 
3352                 goto attach_fail;
3353         }
3354 
3355         BGE_DEBUG(("%s: ddi_intr_get_supported_types() returned: %x",
3356             bgep->ifname, intr_types));
3357 
3358         if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) {
3359                 if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
3360                         bge_error(bgep, "MSI registration failed, "
3361                             "trying FIXED interrupt type\n");
3362                 } else {
3363                         BGE_DEBUG(("%s: Using MSI interrupt type",
3364                             bgep->ifname));
3365                         bgep->intr_type = DDI_INTR_TYPE_MSI;
3366                         bgep->progress |= PROGRESS_HWINT;
3367                 }
3368         }
3369 
3370         if (!(bgep->progress & PROGRESS_HWINT) &&
3371             (intr_types & DDI_INTR_TYPE_FIXED)) {
3372                 if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
3373                         bge_error(bgep, "FIXED interrupt "
3374                             "registration failed\n");
3375                         goto attach_fail;
3376                 }
3377 
3378                 BGE_DEBUG(("%s: Using FIXED interrupt type", bgep->ifname));
3379 
3380                 bgep->intr_type = DDI_INTR_TYPE_FIXED;
3381                 bgep->progress |= PROGRESS_HWINT;
3382         }
3383 
3384         if (!(bgep->progress & PROGRESS_HWINT)) {
3385                 bge_error(bgep, "No interrupts registered\n");
3386                 goto attach_fail;
3387         }
3388 
3389         /*
3390          * Note that interrupts are not enabled yet as
3391          * mutex locks are not initialized. Initialize mutex locks.
3392          */
3393         mutex_init(bgep->genlock, NULL, MUTEX_DRIVER,
3394             DDI_INTR_PRI(bgep->intr_pri));
3395         mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER,
3396             DDI_INTR_PRI(bgep->intr_pri));
3397         rw_init(bgep->errlock, NULL, RW_DRIVER,
3398             DDI_INTR_PRI(bgep->intr_pri));
3399 
3400         /*
3401          * Initialize rings.
3402          */
3403         bge_init_rings(bgep);
3404 
3405         /*
3406          * Now that mutex locks are initialized, enable interrupts.
3407          */
3408         bge_intr_enable(bgep);
3409         bgep->progress |= PROGRESS_INTR;
3410 
3411         /*
3412          * Initialise link state variables
3413          * Stop, reset & reinitialise the chip.
3414          * Initialise the (internal) PHY.
3415          */
3416         bgep->link_state = LINK_STATE_UNKNOWN;
3417 
3418         mutex_enter(bgep->genlock);
3419 
3420         /*
3421          * Reset chip & rings to initial state; also reset address
3422          * filtering, promiscuity, loopback mode.
3423          */
3424 #ifdef BGE_IPMI_ASF
3425 #ifdef BGE_NETCONSOLE
3426         if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) {
3427 #else
3428         if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) {
3429 #endif
3430 #else
3431         if (bge_reset(bgep) != DDI_SUCCESS) {
3432 #endif
3433                 (void) bge_check_acc_handle(bgep, bgep->cfg_handle);
3434                 (void) bge_check_acc_handle(bgep, bgep->io_handle);
3435                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3436                 mutex_exit(bgep->genlock);
3437                 goto attach_fail;
3438         }
3439 
3440 #ifdef BGE_IPMI_ASF
3441         if (bgep->asf_enabled) {
3442                 bgep->asf_status = ASF_STAT_RUN_INIT;
3443         }
3444 #endif
3445 
3446         bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash));
3447         bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs));
3448         bgep->promisc = B_FALSE;
3449         bgep->param_loop_mode = BGE_LOOP_NONE;
3450         if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
3451                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3452                 mutex_exit(bgep->genlock);
3453                 goto attach_fail;
3454         }
3455         if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
3456                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3457                 mutex_exit(bgep->genlock);
3458                 goto attach_fail;
3459         }
3460 
3461         mutex_exit(bgep->genlock);
3462 
3463         if (bge_phys_init(bgep) == EIO) {
3464                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST);
3465                 goto attach_fail;
3466         }
3467         bgep->progress |= PROGRESS_PHY;
3468 
3469         /*
3470          * initialize NDD-tweakable parameters
3471          */
3472         if (bge_nd_init(bgep)) {
3473                 bge_problem(bgep, "bge_nd_init() failed");
3474                 goto attach_fail;
3475         }
3476         bgep->progress |= PROGRESS_NDD;
3477 
3478         /*
3479          * Create & initialise named kstats
3480          */
3481         bge_init_kstats(bgep, instance);
3482         bgep->progress |= PROGRESS_KSTATS;
3483 
3484         /*
3485          * Determine whether to override the chip's own MAC address
3486          */
3487         bge_find_mac_address(bgep, cidp);
3488 
3489         bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX;
3490         bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX;
3491 
3492         if ((macp = mac_alloc(MAC_VERSION)) == NULL)
3493                 goto attach_fail;
3494         macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
3495         macp->m_driver = bgep;
3496         macp->m_dip = devinfo;
3497         macp->m_src_addr = cidp->vendor_addr.addr;
3498         macp->m_callbacks = &bge_m_callbacks;
3499         macp->m_min_sdu = 0;
3500         macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header);
3501         macp->m_margin = VLAN_TAGSZ;
3502         macp->m_priv_props = bge_priv_prop;
3503         macp->m_v12n = MAC_VIRT_LEVEL1;
3504 
3505         /*
3506          * Finally, we're ready to register ourselves with the MAC layer
3507          * interface; if this succeeds, we're all ready to start()
3508          */
3509         err = mac_register(macp, &bgep->mh);
3510         mac_free(macp);
3511         if (err != 0)
3512                 goto attach_fail;
3513 
3514         mac_link_update(bgep->mh, LINK_STATE_UNKNOWN);
3515 
3516         /*
3517          * Register a periodical handler.
3518          * bge_chip_cyclic() is invoked in kernel context.
3519          */
3520         bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep,
3521             BGE_CYCLIC_PERIOD, DDI_IPL_0);
3522 
3523         bgep->progress |= PROGRESS_READY;
3524         ASSERT(bgep->bge_guard == BGE_GUARD);
3525 #ifdef BGE_IPMI_ASF
3526 #ifdef BGE_NETCONSOLE
3527         if (bgep->asf_enabled) {
3528                 mutex_enter(bgep->genlock);
3529                 retval = bge_chip_start(bgep, B_TRUE);
3530                 mutex_exit(bgep->genlock);
3531                 if (retval != DDI_SUCCESS)
3532                         goto attach_fail;
3533         }
3534 #endif
3535 #endif
3536 
3537         ddi_report_dev(devinfo);
3538 
3539         return (DDI_SUCCESS);
3540 
3541 attach_fail:
3542 #ifdef BGE_IPMI_ASF
3543         bge_unattach(bgep, ASF_MODE_SHUTDOWN);
3544 #else
3545         bge_unattach(bgep);
3546 #endif
3547         return (DDI_FAILURE);
3548 }
3549 
3550 /*
3551  *      bge_suspend() -- suspend transmit/receive for powerdown
3552  */
3553 static int
3554 bge_suspend(bge_t *bgep)
3555 {
3556         /*
3557          * Stop processing and idle (powerdown) the PHY ...
3558          */
3559         mutex_enter(bgep->genlock);
3560 #ifdef BGE_IPMI_ASF
3561         /*
3562          * Power management hasn't been supported in BGE now. If you
3563          * want to implement it, please add the ASF/IPMI related
3564          * code here.
3565          */
3566 #endif
3567         bge_stop(bgep);
3568         if (bge_phys_idle(bgep) != DDI_SUCCESS) {
3569                 (void) bge_check_acc_handle(bgep, bgep->io_handle);
3570                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
3571                 mutex_exit(bgep->genlock);
3572                 return (DDI_FAILURE);
3573         }
3574         if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
3575                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
3576                 mutex_exit(bgep->genlock);
3577                 return (DDI_FAILURE);
3578         }
3579         mutex_exit(bgep->genlock);
3580 
3581         return (DDI_SUCCESS);
3582 }
3583 
3584 /*
3585  * quiesce(9E) entry point.
3586  *
3587  * This function is called when the system is single-threaded at high
3588  * PIL with preemption disabled. Therefore, this function must not be
3589  * blocked.
3590  *
3591  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
3592  * DDI_FAILURE indicates an error condition and should almost never happen.
3593  */
3594 #ifdef  __sparc
3595 #define bge_quiesce     ddi_quiesce_not_supported
3596 #else
3597 static int
3598 bge_quiesce(dev_info_t *devinfo)
3599 {
3600         bge_t *bgep = ddi_get_driver_private(devinfo);
3601 
3602         if (bgep == NULL)
3603                 return (DDI_FAILURE);
3604 
3605         if (bgep->intr_type == DDI_INTR_TYPE_FIXED) {
3606                 bge_reg_set32(bgep, PCI_CONF_BGE_MHCR,
3607                     MHCR_MASK_PCI_INT_OUTPUT);
3608         } else {
3609                 bge_reg_clr32(bgep, MSI_MODE_REG, MSI_MSI_ENABLE);
3610         }
3611 
3612         /* Stop the chip */
3613         bge_chip_stop_nonblocking(bgep);
3614 
3615         return (DDI_SUCCESS);
3616 }
3617 #endif
3618 
3619 /*
3620  * detach(9E) -- Detach a device from the system
3621  */
3622 static int
3623 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
3624 {
3625         bge_t *bgep;
3626 #ifdef BGE_IPMI_ASF
3627         uint_t asf_mode;
3628         asf_mode = ASF_MODE_NONE;
3629 #endif
3630 
3631         BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd));
3632 
3633         bgep = ddi_get_driver_private(devinfo);
3634 
3635         switch (cmd) {
3636         default:
3637                 return (DDI_FAILURE);
3638 
3639         case DDI_SUSPEND:
3640                 return (bge_suspend(bgep));
3641 
3642         case DDI_DETACH:
3643                 break;
3644         }
3645 
3646 #ifdef BGE_IPMI_ASF
3647         mutex_enter(bgep->genlock);
3648         if (bgep->asf_enabled && ((bgep->asf_status == ASF_STAT_RUN) ||
3649             (bgep->asf_status == ASF_STAT_RUN_INIT))) {
3650 
3651                 bge_asf_update_status(bgep);
3652                 if (bgep->asf_status == ASF_STAT_RUN) {
3653                         bge_asf_stop_timer(bgep);
3654                 }
3655                 bgep->asf_status = ASF_STAT_STOP;
3656 
3657                 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET);
3658 
3659                 if (bgep->asf_pseudostop) {
3660                         bge_chip_stop(bgep, B_FALSE);
3661                         bgep->bge_mac_state = BGE_MAC_STOPPED;
3662                         bgep->asf_pseudostop = B_FALSE;
3663                 }
3664 
3665                 asf_mode = ASF_MODE_POST_SHUTDOWN;
3666 
3667                 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK)
3668                         ddi_fm_service_impact(bgep->devinfo,
3669                             DDI_SERVICE_UNAFFECTED);
3670                 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
3671                         ddi_fm_service_impact(bgep->devinfo,
3672                             DDI_SERVICE_UNAFFECTED);
3673         }
3674         mutex_exit(bgep->genlock);
3675 #endif
3676 
3677         /*
3678          * Unregister from the GLD subsystem.  This can fail, in
3679          * particular if there are DLPI style-2 streams still open -
3680          * in which case we just return failure without shutting
3681          * down chip operations.
3682          */
3683         if (mac_unregister(bgep->mh) != 0)
3684                 return (DDI_FAILURE);
3685 
3686         /*
3687          * All activity stopped, so we can clean up & exit
3688          */
3689 #ifdef BGE_IPMI_ASF
3690         bge_unattach(bgep, asf_mode);
3691 #else
3692         bge_unattach(bgep);
3693 #endif
3694         return (DDI_SUCCESS);
3695 }
3696 
3697 
3698 /*
3699  * ========== Module Loading Data & Entry Points ==========
3700  */
3701 
3702 #undef  BGE_DBG
3703 #define BGE_DBG         BGE_DBG_INIT    /* debug flag for this code     */
3704 
3705 DDI_DEFINE_STREAM_OPS(bge_dev_ops,
3706         nulldev,        /* identify */
3707         nulldev,        /* probe */
3708         bge_attach,     /* attach */
3709         bge_detach,     /* detach */
3710         nodev,          /* reset */
3711         NULL,           /* cb_ops */
3712         D_MP,           /* bus_ops */
3713         NULL,           /* power */
3714         bge_quiesce     /* quiesce */
3715 );
3716 
3717 static struct modldrv bge_modldrv = {
3718         &mod_driverops,             /* Type of module.  This one is a driver */
3719         bge_ident,              /* short description */
3720         &bge_dev_ops                /* driver specific ops */
3721 };
3722 
3723 static struct modlinkage modlinkage = {
3724         MODREV_1, (void *)&bge_modldrv, NULL
3725 };
3726 
3727 
3728 int
3729 _info(struct modinfo *modinfop)
3730 {
3731         return (mod_info(&modlinkage, modinfop));
3732 }
3733 
3734 int
3735 _init(void)
3736 {
3737         int status;
3738 
3739         mac_init_ops(&bge_dev_ops, "bge");
3740         status = mod_install(&modlinkage);
3741         if (status == DDI_SUCCESS)
3742                 mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL);
3743         else
3744                 mac_fini_ops(&bge_dev_ops);
3745         return (status);
3746 }
3747 
3748 int
3749 _fini(void)
3750 {
3751         int status;
3752 
3753         status = mod_remove(&modlinkage);
3754         if (status == DDI_SUCCESS) {
3755                 mac_fini_ops(&bge_dev_ops);
3756                 mutex_destroy(bge_log_mutex);
3757         }
3758         return (status);
3759 }
3760 
3761 
3762 /*
3763  * bge_add_intrs:
3764  *
3765  * Register FIXED or MSI interrupts.
3766  */
3767 static int
3768 bge_add_intrs(bge_t *bgep, int  intr_type)
3769 {
3770         dev_info_t      *dip = bgep->devinfo;
3771         int             avail, actual, intr_size, count = 0;
3772         int             i, flag, ret;
3773 
3774         BGE_DEBUG(("bge_add_intrs($%p, 0x%x)", (void *)bgep, intr_type));
3775 
3776         /* Get number of interrupts */
3777         ret = ddi_intr_get_nintrs(dip, intr_type, &count);
3778         if ((ret != DDI_SUCCESS) || (count == 0)) {
3779                 bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, "
3780                     "count: %d", ret, count);
3781 
3782                 return (DDI_FAILURE);
3783         }
3784 
3785         /* Get number of available interrupts */
3786         ret = ddi_intr_get_navail(dip, intr_type, &avail);
3787         if ((ret != DDI_SUCCESS) || (avail == 0)) {
3788                 bge_error(bgep, "ddi_intr_get_navail() failure, "
3789                     "ret: %d, avail: %d\n", ret, avail);
3790 
3791                 return (DDI_FAILURE);
3792         }
3793 
3794         if (avail < count) {
3795                 BGE_DEBUG(("%s: nintrs() returned %d, navail returned %d",
3796                     bgep->ifname, count, avail));
3797         }
3798 
3799         /*
3800          * BGE hardware generates only single MSI even though it claims
3801          * to support multiple MSIs. So, hard code MSI count value to 1.
3802          */
3803         if (intr_type == DDI_INTR_TYPE_MSI) {
3804                 count = 1;
3805                 flag = DDI_INTR_ALLOC_STRICT;
3806         } else {
3807                 flag = DDI_INTR_ALLOC_NORMAL;
3808         }
3809 
3810         /* Allocate an array of interrupt handles */
3811         intr_size = count * sizeof (ddi_intr_handle_t);
3812         bgep->htable = kmem_alloc(intr_size, KM_SLEEP);
3813 
3814         /* Call ddi_intr_alloc() */
3815         ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0,
3816             count, &actual, flag);
3817 
3818         if ((ret != DDI_SUCCESS) || (actual == 0)) {
3819                 bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret);
3820 
3821                 kmem_free(bgep->htable, intr_size);
3822                 return (DDI_FAILURE);
3823         }
3824 
3825         if (actual < count) {
3826                 BGE_DEBUG(("%s: Requested: %d, Received: %d",
3827                     bgep->ifname, count, actual));
3828         }
3829 
3830         bgep->intr_cnt = actual;
3831 
3832         /*
3833          * Get priority for first msi, assume remaining are all the same
3834          */
3835         if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) !=
3836             DDI_SUCCESS) {
3837                 bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret);
3838 
3839                 /* Free already allocated intr */
3840                 for (i = 0; i < actual; i++) {
3841                         (void) ddi_intr_free(bgep->htable[i]);
3842                 }
3843 
3844                 kmem_free(bgep->htable, intr_size);
3845                 return (DDI_FAILURE);
3846         }
3847 
3848         /* Call ddi_intr_add_handler() */
3849         for (i = 0; i < actual; i++) {
3850                 if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr,
3851                     (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
3852                         bge_error(bgep, "ddi_intr_add_handler() "
3853                             "failed %d\n", ret);
3854 
3855                         /* Free already allocated intr */
3856                         for (i = 0; i < actual; i++) {
3857                                 (void) ddi_intr_free(bgep->htable[i]);
3858                         }
3859 
3860                         kmem_free(bgep->htable, intr_size);
3861                         return (DDI_FAILURE);
3862                 }
3863         }
3864 
3865         if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap))
3866             != DDI_SUCCESS) {
3867                 bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret);
3868 
3869                 for (i = 0; i < actual; i++) {
3870                         (void) ddi_intr_remove_handler(bgep->htable[i]);
3871                         (void) ddi_intr_free(bgep->htable[i]);
3872                 }
3873 
3874                 kmem_free(bgep->htable, intr_size);
3875                 return (DDI_FAILURE);
3876         }
3877 
3878         return (DDI_SUCCESS);
3879 }
3880 
3881 /*
3882  * bge_rem_intrs:
3883  *
3884  * Unregister FIXED or MSI interrupts
3885  */
3886 static void
3887 bge_rem_intrs(bge_t *bgep)
3888 {
3889         int     i;
3890 
3891         BGE_DEBUG(("bge_rem_intrs($%p)", (void *)bgep));
3892 
3893         /* Call ddi_intr_remove_handler() */
3894         for (i = 0; i < bgep->intr_cnt; i++) {
3895                 (void) ddi_intr_remove_handler(bgep->htable[i]);
3896                 (void) ddi_intr_free(bgep->htable[i]);
3897         }
3898 
3899         kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t));
3900 }
3901 
3902 
3903 void
3904 bge_intr_enable(bge_t *bgep)
3905 {
3906         int i;
3907 
3908         if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) {
3909                 /* Call ddi_intr_block_enable() for MSI interrupts */
3910                 (void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt);
3911         } else {
3912                 /* Call ddi_intr_enable for MSI or FIXED interrupts */
3913                 for (i = 0; i < bgep->intr_cnt; i++) {
3914                         (void) ddi_intr_enable(bgep->htable[i]);
3915                 }
3916         }
3917 }
3918 
3919 
3920 void
3921 bge_intr_disable(bge_t *bgep)
3922 {
3923         int i;
3924 
3925         if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) {
3926                 /* Call ddi_intr_block_disable() */
3927                 (void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt);
3928         } else {
3929                 for (i = 0; i < bgep->intr_cnt; i++) {
3930                         (void) ddi_intr_disable(bgep->htable[i]);
3931                 }
3932         }
3933 }
3934 
3935 int
3936 bge_reprogram(bge_t *bgep)
3937 {
3938         int status = 0;
3939 
3940         ASSERT(mutex_owned(bgep->genlock));
3941 
3942         if (bge_phys_update(bgep) != DDI_SUCCESS) {
3943                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
3944                 status = IOC_INVAL;
3945         }
3946 #ifdef BGE_IPMI_ASF
3947         if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) {
3948 #else
3949         if (bge_chip_sync(bgep) == DDI_FAILURE) {
3950 #endif
3951                 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
3952                 status = IOC_INVAL;
3953         }
3954         if (bgep->intr_type == DDI_INTR_TYPE_MSI)
3955                 bge_chip_msi_trig(bgep);
3956         return (status);
3957 }