1 /*
   2  * This file and its contents are supplied under the terms of the
   3  * Common Development and Distribution License ("CDDL"), version 1.0.
   4  * You may only use this file in accordance with the terms of version
   5  * 1.0 of the CDDL.
   6  *
   7  * A full copy of the text of the CDDL should have accompanied this
   8  * source. A copy of the CDDL is also available via the Internet at
   9  * http://www.illumos.org/license/CDDL.
  10  */
  11 
  12 /*
  13  * This file is part of the Chelsio T4 support code.
  14  *
  15  * Copyright (C) 2010-2013 Chelsio Communications.  All rights reserved.
  16  *
  17  * This program is distributed in the hope that it will be useful, but WITHOUT
  18  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  19  * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
  20  * release for licensing terms and conditions.
  21  */
  22 
  23 #include <sys/ddi.h>
  24 #include <sys/sunddi.h>
  25 #include <sys/sunndi.h>
  26 #include <sys/modctl.h>
  27 #include <sys/conf.h>
  28 #include <sys/devops.h>
  29 #include <sys/pci.h>
  30 #include <sys/atomic.h>
  31 #include <sys/types.h>
  32 #include <sys/file.h>
  33 #include <sys/errno.h>
  34 #include <sys/open.h>
  35 #include <sys/cred.h>
  36 #include <sys/stat.h>
  37 #include <sys/mkdev.h>
  38 #include <sys/queue.h>
  39 
  40 #include "version.h"
  41 #include "common/common.h"
  42 #include "common/t4_msg.h"
  43 #include "common/t4_regs.h"
  44 #include "firmware/t4_fw.h"
  45 #include "firmware/t4_cfg.h"
  46 #include "firmware/t5_fw.h"
  47 #include "firmware/t5_cfg.h"
  48 #include "firmware/t6_fw.h"
  49 #include "firmware/t6_cfg.h"
  50 #include "t4_l2t.h"
  51 
  52 static int t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp);
  53 static int t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp);
  54 static int t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp,
  55     int *rp);
  56 struct cb_ops t4_cb_ops = {
  57         .cb_open =              t4_cb_open,
  58         .cb_close =             t4_cb_close,
  59         .cb_strategy =          nodev,
  60         .cb_print =             nodev,
  61         .cb_dump =              nodev,
  62         .cb_read =              nodev,
  63         .cb_write =             nodev,
  64         .cb_ioctl =             t4_cb_ioctl,
  65         .cb_devmap =            nodev,
  66         .cb_mmap =              nodev,
  67         .cb_segmap =            nodev,
  68         .cb_chpoll =            nochpoll,
  69         .cb_prop_op =           ddi_prop_op,
  70         .cb_flag =              D_MP,
  71         .cb_rev =               CB_REV,
  72         .cb_aread =             nodev,
  73         .cb_awrite =            nodev
  74 };
  75 
  76 static int t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op,
  77     void *arg, void *result);
  78 static int t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op,
  79     void *arg, dev_info_t **cdipp);
  80 static int t4_bus_unconfig(dev_info_t *dip, uint_t flags,
  81     ddi_bus_config_op_t op, void *arg);
  82 struct bus_ops t4_bus_ops = {
  83         .busops_rev =           BUSO_REV,
  84         .bus_ctl =              t4_bus_ctl,
  85         .bus_prop_op =          ddi_bus_prop_op,
  86         .bus_config =           t4_bus_config,
  87         .bus_unconfig =         t4_bus_unconfig,
  88 };
  89 
  90 static int t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
  91     void **rp);
  92 static int t4_devo_probe(dev_info_t *dip);
  93 static int t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
  94 static int t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
  95 static int t4_devo_quiesce(dev_info_t *dip);
  96 struct dev_ops t4_dev_ops = {
  97         .devo_rev =             DEVO_REV,
  98         .devo_getinfo =         t4_devo_getinfo,
  99         .devo_identify =        nulldev,
 100         .devo_probe =           t4_devo_probe,
 101         .devo_attach =          t4_devo_attach,
 102         .devo_detach =          t4_devo_detach,
 103         .devo_reset =           nodev,
 104         .devo_cb_ops =          &t4_cb_ops,
 105         .devo_bus_ops =         &t4_bus_ops,
 106         .devo_quiesce =         &t4_devo_quiesce,
 107 };
 108 
 109 static struct modldrv modldrv = {
 110         .drv_modops =           &mod_driverops,
 111         .drv_linkinfo =         "Chelsio T4 nexus " DRV_VERSION,
 112         .drv_dev_ops =          &t4_dev_ops
 113 };
 114 
 115 static struct modlinkage modlinkage = {
 116         .ml_rev =               MODREV_1,
 117         .ml_linkage =           {&modldrv, NULL},
 118 };
 119 
 120 void *t4_list;
 121 
 122 struct intrs_and_queues {
 123         int intr_type;          /* DDI_INTR_TYPE_* */
 124         int nirq;               /* Number of vectors */
 125         int intr_fwd;           /* Interrupts forwarded */
 126         int ntxq10g;            /* # of NIC txq's for each 10G port */
 127         int nrxq10g;            /* # of NIC rxq's for each 10G port */
 128         int ntxq1g;             /* # of NIC txq's for each 1G port */
 129         int nrxq1g;             /* # of NIC rxq's for each 1G port */
 130 #ifdef TCP_OFFLOAD_ENABLE
 131         int nofldtxq10g;        /* # of TOE txq's for each 10G port */
 132         int nofldrxq10g;        /* # of TOE rxq's for each 10G port */
 133         int nofldtxq1g;         /* # of TOE txq's for each 1G port */
 134         int nofldrxq1g;         /* # of TOE rxq's for each 1G port */
 135 #endif
 136 };
 137 
 138 struct fw_info fi[3];
 139 
 140 static int cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss,
 141     mblk_t *m);
 142 static int fw_msg_not_handled(struct adapter *, const __be64 *);
 143 int t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h);
 144 static unsigned int getpf(struct adapter *sc);
 145 static int prep_firmware(struct adapter *sc);
 146 static int upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma);
 147 static int partition_resources(struct adapter *sc);
 148 static int adap__pre_init_tweaks(struct adapter *sc);
 149 static int get_params__pre_init(struct adapter *sc);
 150 static int get_params__post_init(struct adapter *sc);
 151 static int set_params__post_init(struct adapter *);
 152 static void setup_memwin(struct adapter *sc);
 153 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
 154     uint32_t *);
 155 void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
 156 uint32_t position_memwin(struct adapter *, int, uint32_t);
 157 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data,
 158     uint_t count);
 159 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data,
 160     uint_t count);
 161 static int init_driver_props(struct adapter *sc, struct driver_properties *p);
 162 static int remove_extra_props(struct adapter *sc, int n10g, int n1g);
 163 static int cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
 164     struct intrs_and_queues *iaq);
 165 static int add_child_node(struct adapter *sc, int idx);
 166 static int remove_child_node(struct adapter *sc, int idx);
 167 static kstat_t *setup_kstats(struct adapter *sc);
 168 static kstat_t *setup_wc_kstats(struct adapter *);
 169 static int update_wc_kstats(kstat_t *, int);
 170 #ifdef TCP_OFFLOAD_ENABLE
 171 static int toe_capability(struct port_info *pi, int enable);
 172 static int activate_uld(struct adapter *sc, int id, struct uld_softc *usc);
 173 static int deactivate_uld(struct uld_softc *usc);
 174 #endif
 175 static kmutex_t t4_adapter_list_lock;
 176 static SLIST_HEAD(, adapter) t4_adapter_list;
 177 #ifdef TCP_OFFLOAD_ENABLE
 178 static kmutex_t t4_uld_list_lock;
 179 static SLIST_HEAD(, uld_info) t4_uld_list;
 180 #endif
 181 
 182 int
 183 _init(void)
 184 {
 185         int rc;
 186 
 187         rc = ddi_soft_state_init(&t4_list, sizeof (struct adapter), 0);
 188         if (rc != 0)
 189                 return (rc);
 190 
 191         rc = mod_install(&modlinkage);
 192         if (rc != 0)
 193                 ddi_soft_state_fini(&t4_list);
 194 
 195         mutex_init(&t4_adapter_list_lock, NULL, MUTEX_DRIVER, NULL);
 196         SLIST_INIT(&t4_adapter_list);
 197 
 198 #ifdef TCP_OFFLOAD_ENABLE
 199         mutex_init(&t4_uld_list_lock, NULL, MUTEX_DRIVER, NULL);
 200         SLIST_INIT(&t4_uld_list);
 201 #endif
 202 
 203         return (rc);
 204 }
 205 
 206 int
 207 _fini(void)
 208 {
 209         int rc;
 210 
 211         rc = mod_remove(&modlinkage);
 212         if (rc != 0)
 213                 return (rc);
 214 
 215         ddi_soft_state_fini(&t4_list);
 216         return (0);
 217 }
 218 
 219 int
 220 _info(struct modinfo *mi)
 221 {
 222         return (mod_info(&modlinkage, mi));
 223 }
 224 
 225 /* ARGSUSED */
 226 static int
 227 t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **rp)
 228 {
 229         struct adapter *sc;
 230         minor_t minor;
 231 
 232         minor = getminor((dev_t)arg);   /* same as instance# in our case */
 233 
 234         if (cmd == DDI_INFO_DEVT2DEVINFO) {
 235                 sc = ddi_get_soft_state(t4_list, minor);
 236                 if (sc == NULL)
 237                         return (DDI_FAILURE);
 238 
 239                 ASSERT(sc->dev == (dev_t)arg);
 240                 *rp = (void *)sc->dip;
 241         } else if (cmd == DDI_INFO_DEVT2INSTANCE)
 242                 *rp = (void *) (unsigned long) minor;
 243         else
 244                 ASSERT(0);
 245 
 246         return (DDI_SUCCESS);
 247 }
 248 
 249 static int
 250 t4_devo_probe(dev_info_t *dip)
 251 {
 252         int rc, id, *reg;
 253         uint_t n, pf;
 254 
 255         id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
 256             "device-id", 0xffff);
 257         if (id == 0xffff)
 258                 return (DDI_PROBE_DONTCARE);
 259 
 260         rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
 261             "reg", &reg, &n);
 262         if (rc != DDI_SUCCESS)
 263                 return (DDI_PROBE_DONTCARE);
 264 
 265         pf = PCI_REG_FUNC_G(reg[0]);
 266         ddi_prop_free(reg);
 267 
 268         /* Prevent driver attachment on any PF except 0 on the FPGA */
 269         if (id == 0xa000 && pf != 0)
 270                 return (DDI_PROBE_FAILURE);
 271 
 272         return (DDI_PROBE_DONTCARE);
 273 }
 274 
 275 static int
 276 t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
 277 {
 278         struct adapter *sc = NULL;
 279         struct sge *s;
 280         int i, instance, rc = DDI_SUCCESS, rqidx, tqidx, q;
 281         int irq = 0, nxg, n100g, n40g, n25g, n10g, n1g;
 282 #ifdef TCP_OFFLOAD_ENABLE
 283         int ofld_rqidx, ofld_tqidx;
 284 #endif
 285         char name[16];
 286         struct driver_properties *prp;
 287         struct intrs_and_queues iaq;
 288         ddi_device_acc_attr_t da = {
 289                 .devacc_attr_version = DDI_DEVICE_ATTR_V0,
 290                 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
 291                 .devacc_attr_dataorder = DDI_UNORDERED_OK_ACC
 292         };
 293         ddi_device_acc_attr_t da1 = {
 294                 .devacc_attr_version = DDI_DEVICE_ATTR_V0,
 295                 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
 296                 .devacc_attr_dataorder = DDI_MERGING_OK_ACC
 297         };
 298  
 299         if (cmd != DDI_ATTACH)
 300                 return (DDI_FAILURE);
 301 
 302         /*
 303          * Allocate space for soft state.
 304          */
 305         instance = ddi_get_instance(dip);
 306         rc = ddi_soft_state_zalloc(t4_list, instance);
 307         if (rc != DDI_SUCCESS) {
 308                 cxgb_printf(dip, CE_WARN,
 309                     "failed to allocate soft state: %d", rc);
 310                 return (DDI_FAILURE);
 311         }
 312 
 313         sc = ddi_get_soft_state(t4_list, instance);
 314         sc->dip = dip;
 315         sc->dev = makedevice(ddi_driver_major(dip), instance);
 316         mutex_init(&sc->lock, NULL, MUTEX_DRIVER, NULL);
 317         cv_init(&sc->cv, NULL, CV_DRIVER, NULL);
 318         mutex_init(&sc->sfl_lock, NULL, MUTEX_DRIVER, NULL);
 319 
 320         mutex_enter(&t4_adapter_list_lock);
 321         SLIST_INSERT_HEAD(&t4_adapter_list, sc, link);
 322         mutex_exit(&t4_adapter_list_lock);
 323 
 324         sc->pf = getpf(sc);
 325         if (sc->pf > 8) {
 326                 rc = EINVAL;
 327                 cxgb_printf(dip, CE_WARN,
 328                     "failed to determine PCI PF# of device");
 329                 goto done;
 330         }
 331         sc->mbox = sc->pf;
 332 
 333         /* Initialize the driver properties */
 334         prp = &sc->props;
 335         (void)init_driver_props(sc, prp);
 336 
 337         /*
 338          * Enable access to the PCI config space.
 339          */
 340         rc = pci_config_setup(dip, &sc->pci_regh);
 341         if (rc != DDI_SUCCESS) {
 342                 cxgb_printf(dip, CE_WARN,
 343                     "failed to enable PCI config space access: %d", rc);
 344                 goto done;
 345         }
 346 
 347         /* TODO: Set max read request to 4K */
 348 
 349         /*
 350          * Enable MMIO access.
 351          */
 352         rc = ddi_regs_map_setup(dip, 1, &sc->regp, 0, 0, &da, &sc->regh);
 353         if (rc != DDI_SUCCESS) {
 354                 cxgb_printf(dip, CE_WARN,
 355                     "failed to map device registers: %d", rc);
 356                 goto done;
 357         }
 358 
 359         (void) memset(sc->chan_map, 0xff, sizeof (sc->chan_map));
 360 
 361         /*
 362          * Initialize cpl handler.
 363          */
 364         for (i = 0; i < ARRAY_SIZE(sc->cpl_handler); i++) {
 365                 sc->cpl_handler[i] = cpl_not_handled;
 366         }
 367 
 368         for (i = 0; i < ARRAY_SIZE(sc->fw_msg_handler); i++) {
 369                 sc->fw_msg_handler[i] = fw_msg_not_handled;
 370         }
 371  
 372         /*
 373          * Prepare the adapter for operation.
 374          */
 375         rc = -t4_prep_adapter(sc, false);
 376         if (rc != 0) {
 377                 cxgb_printf(dip, CE_WARN, "failed to prepare adapter: %d", rc);
 378                 goto done;
 379         }
 380 
 381         /*
 382          * Enable BAR1 access.
 383          */
 384         sc->doorbells |= DOORBELL_KDB;
 385         rc = ddi_regs_map_setup(dip, 2, &sc->reg1p, 0, 0, &da1, &sc->reg1h);
 386         if (rc != DDI_SUCCESS) {
 387                 cxgb_printf(dip, CE_WARN,
 388                     "failed to map BAR1 device registers: %d", rc);
 389                 goto done;
 390         } else {
 391                 if (is_t5(sc->params.chip)) {
 392                         sc->doorbells |= DOORBELL_UDB;
 393                         if (prp->wc) {
 394                                 /*
 395                                  * Enable write combining on BAR2.  This is the
 396                                  * userspace doorbell BAR and is split into 128B
 397                                  * (UDBS_SEG_SIZE) doorbell regions, each associated
 398                                  * with an egress queue.  The first 64B has the doorbell
 399                                  * and the second 64B can be used to submit a tx work
 400                                  * request with an implicit doorbell.
 401                                  */
 402                                 sc->doorbells &= ~DOORBELL_UDB;
 403                                 sc->doorbells |= (DOORBELL_WCWR |
 404                                     DOORBELL_UDBWC);
 405                                 t4_write_reg(sc, A_SGE_STAT_CFG,
 406                                     V_STATSOURCE_T5(7) | V_STATMODE(0));
 407                         }
 408                 }
 409         }
 410 
 411         /*
 412          * Do this really early.  Note that minor number = instance.
 413          */
 414         (void) snprintf(name, sizeof (name), "%s,%d", T4_NEXUS_NAME, instance);
 415         rc = ddi_create_minor_node(dip, name, S_IFCHR, instance,
 416             DDI_NT_NEXUS, 0);
 417         if (rc != DDI_SUCCESS) {
 418                 cxgb_printf(dip, CE_WARN,
 419                     "failed to create device node: %d", rc);
 420                 rc = DDI_SUCCESS; /* carry on */
 421         }
 422 
 423         /* Do this early. Memory window is required for loading config file. */
 424         setup_memwin(sc);
 425 
 426         /* Prepare the firmware for operation */
 427         rc = prep_firmware(sc);
 428         if (rc != 0)
 429                 goto done; /* error message displayed already */
 430 
 431         rc = adap__pre_init_tweaks(sc);
 432         if (rc != 0)
 433                 goto done;
 434 
 435         rc = get_params__pre_init(sc);
 436         if (rc != 0)
 437                 goto done; /* error message displayed already */
 438 
 439         t4_sge_init(sc);
 440 
 441         if (sc->flags & MASTER_PF) {
 442                 /* get basic stuff going */
 443                 rc = -t4_fw_initialize(sc, sc->mbox);
 444                 if (rc != 0) {
 445                         cxgb_printf(sc->dip, CE_WARN,
 446                             "early init failed: %d.\n", rc);
 447                         goto done;
 448                 }
 449         }
 450 
 451         rc = get_params__post_init(sc);
 452         if (rc != 0)
 453                 goto done; /* error message displayed already */
 454 
 455         rc = set_params__post_init(sc);
 456         if (rc != 0)
 457                 goto done; /* error message displayed already */
 458 
 459         /*
 460          * TODO: This is the place to call t4_set_filter_mode()
 461          */
 462 
 463         /* tweak some settings */
 464         t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) |
 465             V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
 466             V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9));
 467         t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
 468 
 469         /*
 470          * Work-around for bug 2619
 471          * Set DisableVlan field in TP_RSS_CONFIG_VRT register so that the
 472          * VLAN tag extraction is disabled.
 473          */
 474         t4_set_reg_field(sc, A_TP_RSS_CONFIG_VRT, F_DISABLEVLAN, F_DISABLEVLAN);
 475 
 476         /* Store filter mode */
 477         t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &sc->filter_mode, 1,
 478             A_TP_VLAN_PRI_MAP);
 479 
 480         /*
 481          * First pass over all the ports - allocate VIs and initialize some
 482          * basic parameters like mac address, port type, etc.  We also figure
 483          * out whether a port is 10G or 1G and use that information when
 484          * calculating how many interrupts to attempt to allocate.
 485          */
 486         n100g = n40g = n25g = n10g = n1g = 0;
 487         for_each_port(sc, i) {
 488                 struct port_info *pi;
 489 
 490                 pi = kmem_zalloc(sizeof (*pi), KM_SLEEP);
 491                 sc->port[i] = pi;
 492 
 493                 /* These must be set before t4_port_init */
 494                 pi->adapter = sc;
 495                 /* LINTED: E_ASSIGN_NARROW_CONV */
 496                 pi->port_id = i;
 497         }
 498 
 499         /* Allocate the vi and initialize parameters like mac addr */
 500         rc = -t4_port_init(sc, sc->mbox, sc->pf, 0);
 501         if (rc) {
 502                 cxgb_printf(dip, CE_WARN,
 503                             "unable to initialize port: %d", rc);
 504                 goto done;
 505         }
 506 
 507         for_each_port(sc, i) {
 508                 struct port_info *pi = sc->port[i];
 509 
 510                 mutex_init(&pi->lock, NULL, MUTEX_DRIVER, NULL);
 511                 pi->mtu = ETHERMTU;
 512 
 513                 if (is_100G_port(pi)) {
 514                         n100g++;
 515                         pi->tmr_idx = prp->tmr_idx_10g;
 516                         pi->pktc_idx = prp->pktc_idx_10g;
 517                 } else if (is_40G_port(pi)) {
 518                         n40g++;
 519                         pi->tmr_idx = prp->tmr_idx_10g;
 520                         pi->pktc_idx = prp->pktc_idx_10g;
 521                 } else if (is_25G_port(pi)) {
 522                         n25g++;
 523                         pi->tmr_idx = prp->tmr_idx_10g;
 524                         pi->pktc_idx = prp->pktc_idx_10g;
 525                 } else if (is_10G_port(pi)) {
 526                         n10g++;
 527                         pi->tmr_idx = prp->tmr_idx_10g;
 528                         pi->pktc_idx = prp->pktc_idx_10g;
 529                 } else {
 530                         n1g++;
 531                         pi->tmr_idx = prp->tmr_idx_1g;
 532                         pi->pktc_idx = prp->pktc_idx_1g;
 533                 }
 534 
 535                 pi->xact_addr_filt = -1;
 536                 t4_mc_init(pi);
 537 
 538                 setbit(&sc->registered_device_map, i);
 539         }
 540 
 541         nxg = n10g + n25g + n40g + n100g;
 542         (void) remove_extra_props(sc, nxg, n1g);
 543 
 544         if (sc->registered_device_map == 0) {
 545                 cxgb_printf(dip, CE_WARN, "no usable ports");
 546                 rc = DDI_FAILURE;
 547                 goto done;
 548         }
 549 
 550         rc = cfg_itype_and_nqueues(sc, nxg, n1g, &iaq);
 551         if (rc != 0)
 552                 goto done; /* error message displayed already */
 553 
 554         sc->intr_type = iaq.intr_type;
 555         sc->intr_count = iaq.nirq;
 556 
 557         if (sc->props.multi_rings && (sc->intr_type != DDI_INTR_TYPE_MSIX)) {
 558                 sc->props.multi_rings = 0;
 559                 cxgb_printf(dip, CE_WARN,
 560                     "Multiple rings disabled as interrupt type is not MSI-X");
 561         }
 562 
 563         if (sc->props.multi_rings && iaq.intr_fwd) {
 564                 sc->props.multi_rings = 0;
 565                 cxgb_printf(dip, CE_WARN,
 566                     "Multiple rings disabled as interrupts are forwarded");
 567         }
 568 
 569         if (!sc->props.multi_rings) {
 570                 iaq.ntxq10g = 1;
 571                 iaq.ntxq1g = 1;
 572         }
 573         s = &sc->sge;
 574         s->nrxq = nxg * iaq.nrxq10g + n1g * iaq.nrxq1g;
 575         s->ntxq = nxg * iaq.ntxq10g + n1g * iaq.ntxq1g;
 576         s->neq = s->ntxq + s->nrxq;    /* the fl in an rxq is an eq */
 577 #ifdef TCP_OFFLOAD_ENABLE
 578         /* control queues, 1 per port + 1 mgmtq */
 579         s->neq += sc->params.nports + 1;
 580 #endif
 581         s->niq = s->nrxq + 1;             /* 1 extra for firmware event queue */
 582         if (iaq.intr_fwd != 0)
 583                 sc->flags |= INTR_FWD;
 584 #ifdef TCP_OFFLOAD_ENABLE
 585         if (is_offload(sc) != 0) {
 586 
 587                 s->nofldrxq = nxg * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
 588                 s->nofldtxq = nxg * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
 589                 s->neq += s->nofldtxq + s->nofldrxq;
 590                 s->niq += s->nofldrxq;
 591 
 592                 s->ofld_rxq = kmem_zalloc(s->nofldrxq *
 593                     sizeof (struct sge_ofld_rxq), KM_SLEEP);
 594                 s->ofld_txq = kmem_zalloc(s->nofldtxq *
 595                     sizeof (struct sge_wrq), KM_SLEEP);
 596                 s->ctrlq = kmem_zalloc(sc->params.nports *
 597                     sizeof (struct sge_wrq), KM_SLEEP);
 598 
 599         }
 600 #endif
 601         s->rxq = kmem_zalloc(s->nrxq * sizeof (struct sge_rxq), KM_SLEEP);
 602         s->txq = kmem_zalloc(s->ntxq * sizeof (struct sge_txq), KM_SLEEP);
 603         s->iqmap = kmem_zalloc(s->niq * sizeof (struct sge_iq *), KM_SLEEP);
 604         s->eqmap = kmem_zalloc(s->neq * sizeof (struct sge_eq *), KM_SLEEP);
 605 
 606         sc->intr_handle = kmem_zalloc(sc->intr_count *
 607             sizeof (ddi_intr_handle_t), KM_SLEEP);
 608 
 609         /*
 610          * Second pass over the ports.  This time we know the number of rx and
 611          * tx queues that each port should get.
 612          */
 613         rqidx = tqidx = 0;
 614 #ifdef TCP_OFFLOAD_ENABLE
 615         ofld_rqidx = ofld_tqidx = 0;
 616 #endif
 617         for_each_port(sc, i) {
 618                 struct port_info *pi = sc->port[i];
 619 
 620                 if (pi == NULL)
 621                         continue;
 622 
 623                 t4_mc_cb_init(pi);
 624                 /* LINTED: E_ASSIGN_NARROW_CONV */
 625                 pi->first_rxq = rqidx;
 626                 /* LINTED: E_ASSIGN_NARROW_CONV */
 627                 pi->nrxq = (is_10XG_port(pi)) ? iaq.nrxq10g
 628                     : iaq.nrxq1g;
 629                 /* LINTED: E_ASSIGN_NARROW_CONV */
 630                 pi->first_txq = tqidx;
 631                 /* LINTED: E_ASSIGN_NARROW_CONV */
 632                 pi->ntxq = (is_10XG_port(pi)) ? iaq.ntxq10g
 633                     : iaq.ntxq1g;
 634 
 635                 rqidx += pi->nrxq;
 636                 tqidx += pi->ntxq;
 637 
 638 #ifdef TCP_OFFLOAD_ENABLE
 639                 if (is_offload(sc) != 0) {
 640                         /* LINTED: E_ASSIGN_NARROW_CONV */
 641                         pi->first_ofld_rxq = ofld_rqidx;
 642                         pi->nofldrxq = max(1, pi->nrxq / 4);
 643 
 644                         /* LINTED: E_ASSIGN_NARROW_CONV */
 645                         pi->first_ofld_txq = ofld_tqidx;
 646                         pi->nofldtxq = max(1, pi->ntxq / 2);
 647 
 648                         ofld_rqidx += pi->nofldrxq;
 649                         ofld_tqidx += pi->nofldtxq;
 650                 }
 651 #endif
 652 
 653                 /*
 654                  * Enable hw checksumming and LSO for all ports by default.
 655                  * They can be disabled using ndd (hw_csum and hw_lso).
 656                  */
 657                 pi->features |= (CXGBE_HW_CSUM | CXGBE_HW_LSO);
 658         }
 659 
 660 #ifdef TCP_OFFLOAD_ENABLE
 661                 sc->l2t = t4_init_l2t(sc);
 662 #endif
 663 
 664         /*
 665          * Setup Interrupts.
 666          */
 667 
 668         i = 0;
 669         rc = ddi_intr_alloc(dip, sc->intr_handle, sc->intr_type, 0,
 670             sc->intr_count, &i, DDI_INTR_ALLOC_STRICT);
 671         if (rc != DDI_SUCCESS) {
 672                 cxgb_printf(dip, CE_WARN,
 673                     "failed to allocate %d interrupt(s) of type %d: %d, %d",
 674                     sc->intr_count, sc->intr_type, rc, i);
 675                 goto done;
 676         }
 677         ASSERT(sc->intr_count == i); /* allocation was STRICT */
 678         (void) ddi_intr_get_cap(sc->intr_handle[0], &sc->intr_cap);
 679         (void) ddi_intr_get_pri(sc->intr_handle[0], &sc->intr_pri);
 680         if (sc->intr_count == 1) {
 681                 ASSERT(sc->flags & INTR_FWD);
 682                 (void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_all, sc,
 683                     &s->fwq);
 684         } else {
 685                 /* Multiple interrupts.  The first one is always error intr */
 686                 (void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_err, sc,
 687                     NULL);
 688                 irq++;
 689 
 690                 /* The second one is always the firmware event queue */
 691                 (void) ddi_intr_add_handler(sc->intr_handle[1], t4_intr, sc,
 692                     &s->fwq);
 693                 irq++;
 694                 /*
 695                  * Note that if INTR_FWD is set then either the NIC rx
 696                  * queues or (exclusive or) the TOE rx queueus will be taking
 697                  * direct interrupts.
 698                  *
 699                  * There is no need to check for is_offload(sc) as nofldrxq
 700                  * will be 0 if offload is disabled.
 701                  */
 702                 for_each_port(sc, i) {
 703                         struct port_info *pi = sc->port[i];
 704                         struct sge_rxq *rxq;
 705 #ifdef TCP_OFFLOAD_ENABLE
 706                         struct sge_ofld_rxq *ofld_rxq;
 707 
 708                         /*
 709                          * Skip over the NIC queues if they aren't taking direct
 710                          * interrupts.
 711                          */
 712                         if ((sc->flags & INTR_FWD) &&
 713                             pi->nofldrxq > pi->nrxq)
 714                                 goto ofld_queues;
 715 #endif
 716                         rxq = &s->rxq[pi->first_rxq];
 717                         for (q = 0; q < pi->nrxq; q++, rxq++) {
 718                                 (void) ddi_intr_add_handler(
 719                                     sc->intr_handle[irq], t4_intr, sc,
 720                                     &rxq->iq);
 721                                 irq++;
 722                         }
 723 
 724 #ifdef TCP_OFFLOAD_ENABLE
 725                         /*
 726                          * Skip over the offload queues if they aren't taking
 727                          * direct interrupts.
 728                          */
 729                         if ((sc->flags & INTR_FWD))
 730                                 continue;
 731 ofld_queues:
 732                         ofld_rxq = &s->ofld_rxq[pi->first_ofld_rxq];
 733                         for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
 734                                 (void) ddi_intr_add_handler(
 735                                     sc->intr_handle[irq], t4_intr, sc,
 736                                     &ofld_rxq->iq);
 737                                 irq++;
 738                         }
 739 #endif
 740                 }
 741 
 742         }
 743         sc->flags |= INTR_ALLOCATED;
 744 
 745         ASSERT(rc == DDI_SUCCESS);
 746         ddi_report_dev(dip);
 747 
 748         /*
 749          * Hardware/Firmware/etc. Version/Revision IDs.
 750          */
 751         t4_dump_version_info(sc);
 752 
 753         if (n100g) {
 754                 cxgb_printf(dip, CE_NOTE,
 755                     "%dx100G (%d rxq, %d txq total) %d %s.",
 756                     n100g, rqidx, tqidx, sc->intr_count,
 757                     sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
 758                     sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
 759                     "fixed interrupt");
 760         } else if (n40g) {
 761                 cxgb_printf(dip, CE_NOTE,
 762                     "%dx40G (%d rxq, %d txq total) %d %s.",
 763                     n40g, rqidx, tqidx, sc->intr_count,
 764                     sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
 765                     sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
 766                     "fixed interrupt");
 767         } else if (n25g) {
 768                 cxgb_printf(dip, CE_NOTE,
 769                     "%dx25G (%d rxq, %d txq total) %d %s.",
 770                     n25g, rqidx, tqidx, sc->intr_count,
 771                     sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
 772                     sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
 773                     "fixed interrupt");
 774         } else if (n10g && n1g) {
 775                 cxgb_printf(dip, CE_NOTE,
 776                     "%dx10G %dx1G (%d rxq, %d txq total) %d %s.",
 777                     n10g, n1g, rqidx, tqidx, sc->intr_count,
 778                     sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
 779                     sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
 780                     "fixed interrupt");
 781         } else {
 782                 cxgb_printf(dip, CE_NOTE,
 783                     "%dx%sG (%d rxq, %d txq per port) %d %s.",
 784                     n10g ? n10g : n1g,
 785                     n10g ? "10" : "1",
 786                     n10g ? iaq.nrxq10g : iaq.nrxq1g,
 787                     n10g ? iaq.ntxq10g : iaq.ntxq1g,
 788                     sc->intr_count,
 789                     sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
 790                     sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
 791                     "fixed interrupt");
 792         }
 793 
 794         sc->ksp = setup_kstats(sc);
 795         sc->ksp_stat = setup_wc_kstats(sc);
 796         sc->params.drv_memwin = MEMWIN_NIC;
 797 
 798 done:
 799         if (rc != DDI_SUCCESS) {
 800                 (void) t4_devo_detach(dip, DDI_DETACH);
 801 
 802                 /* rc may have errno style errors or DDI errors */
 803                 rc = DDI_FAILURE;
 804         }
 805 
 806         return (rc);
 807 }
 808 
 809 static int
 810 t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
 811 {
 812         int instance, i;
 813         struct adapter *sc;
 814         struct port_info *pi;
 815         struct sge *s;
 816 
 817         if (cmd != DDI_DETACH)
 818                 return (DDI_FAILURE);
 819 
 820         instance = ddi_get_instance(dip);
 821         sc = ddi_get_soft_state(t4_list, instance);
 822         if (sc == NULL)
 823                 return (DDI_SUCCESS);
 824 
 825         if (sc->flags & FULL_INIT_DONE) {
 826                 t4_intr_disable(sc);
 827                 for_each_port(sc, i) {
 828                         pi = sc->port[i];
 829                         if (pi && pi->flags & PORT_INIT_DONE)
 830                                 (void) port_full_uninit(pi);
 831                 }
 832                 (void) adapter_full_uninit(sc);
 833         }
 834 
 835         /* Safe to call no matter what */
 836         ddi_prop_remove_all(dip);
 837         ddi_remove_minor_node(dip, NULL);
 838 
 839         if (sc->ksp != NULL)
 840                 kstat_delete(sc->ksp);
 841         if (sc->ksp_stat != NULL)
 842                 kstat_delete(sc->ksp_stat);
 843 
 844         s = &sc->sge;
 845         if (s->rxq != NULL)
 846                 kmem_free(s->rxq, s->nrxq * sizeof (struct sge_rxq));
 847 #ifdef TCP_OFFLOAD_ENABLE
 848         if (s->ofld_txq != NULL)
 849                 kmem_free(s->ofld_txq, s->nofldtxq * sizeof (struct sge_wrq));
 850         if (s->ofld_rxq != NULL)
 851                 kmem_free(s->ofld_rxq,
 852                     s->nofldrxq * sizeof (struct sge_ofld_rxq));
 853         if (s->ctrlq != NULL)
 854                 kmem_free(s->ctrlq,
 855                     sc->params.nports * sizeof (struct sge_wrq));
 856 #endif
 857         if (s->txq != NULL)
 858                 kmem_free(s->txq, s->ntxq * sizeof (struct sge_txq));
 859         if (s->iqmap != NULL)
 860                 kmem_free(s->iqmap, s->niq * sizeof (struct sge_iq *));
 861         if (s->eqmap != NULL)
 862                 kmem_free(s->eqmap, s->neq * sizeof (struct sge_eq *));
 863 
 864         if (s->rxbuf_cache != NULL)
 865                 rxbuf_cache_destroy(s->rxbuf_cache);
 866 
 867         if (sc->flags & INTR_ALLOCATED) {
 868                 for (i = 0; i < sc->intr_count; i++) {
 869                         (void) ddi_intr_remove_handler(sc->intr_handle[i]);
 870                         (void) ddi_intr_free(sc->intr_handle[i]);
 871                 }
 872                 sc->flags &= ~INTR_ALLOCATED;
 873         }
 874 
 875         if (sc->intr_handle != NULL) {
 876                 kmem_free(sc->intr_handle,
 877                     sc->intr_count * sizeof (*sc->intr_handle));
 878         }
 879 
 880         for_each_port(sc, i) {
 881                 pi = sc->port[i];
 882                 if (pi != NULL) {
 883                         mutex_destroy(&pi->lock);
 884                         kmem_free(pi, sizeof (*pi));
 885                         clrbit(&sc->registered_device_map, i);
 886                 }
 887         }
 888 
 889         if (sc->flags & FW_OK)
 890                 (void) t4_fw_bye(sc, sc->mbox);
 891 
 892         if (sc->reg1h != NULL)
 893                 ddi_regs_map_free(&sc->reg1h);
 894 
 895         if (sc->regh != NULL)
 896                 ddi_regs_map_free(&sc->regh);
 897 
 898         if (sc->pci_regh != NULL)
 899                 pci_config_teardown(&sc->pci_regh);
 900 
 901         mutex_enter(&t4_adapter_list_lock);
 902         SLIST_REMOVE_HEAD(&t4_adapter_list, link);
 903         mutex_exit(&t4_adapter_list_lock);
 904 
 905         mutex_destroy(&sc->lock);
 906         cv_destroy(&sc->cv);
 907         mutex_destroy(&sc->sfl_lock);
 908 
 909 #ifdef DEBUG
 910         bzero(sc, sizeof (*sc));
 911 #endif
 912         ddi_soft_state_free(t4_list, instance);
 913 
 914         return (DDI_SUCCESS);
 915 }
 916 
 917 static int
 918 t4_devo_quiesce(dev_info_t *dip)
 919 {
 920         int instance;
 921         struct adapter *sc;
 922 
 923         instance = ddi_get_instance(dip);
 924         sc = ddi_get_soft_state(t4_list, instance);
 925         if (sc == NULL)
 926                 return (DDI_SUCCESS);
 927 
 928         t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
 929         t4_intr_disable(sc);
 930         t4_write_reg(sc, A_PL_RST, F_PIORSTMODE | F_PIORST);
 931 
 932         return (DDI_SUCCESS);
 933 }
 934 
 935 static int
 936 t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op, void *arg,
 937     void *result)
 938 {
 939         char s[4];
 940         struct port_info *pi;
 941         dev_info_t *child = (dev_info_t *)arg;
 942 
 943         switch (op) {
 944         case DDI_CTLOPS_REPORTDEV:
 945                 pi = ddi_get_parent_data(rdip);
 946                 pi->instance = ddi_get_instance(dip);
 947                 pi->child_inst = ddi_get_instance(rdip);
 948                 cmn_err(CE_CONT, "?%s%d is port %s on %s%d\n",
 949                     ddi_node_name(rdip), ddi_get_instance(rdip),
 950                     ddi_get_name_addr(rdip), ddi_driver_name(dip),
 951                     ddi_get_instance(dip));
 952                 return (DDI_SUCCESS);
 953 
 954         case DDI_CTLOPS_INITCHILD:
 955                 pi = ddi_get_parent_data(child);
 956                 if (pi == NULL)
 957                         return (DDI_NOT_WELL_FORMED);
 958                 (void) snprintf(s, sizeof (s), "%d", pi->port_id);
 959                 ddi_set_name_addr(child, s);
 960                 return (DDI_SUCCESS);
 961 
 962         case DDI_CTLOPS_UNINITCHILD:
 963                 ddi_set_name_addr(child, NULL);
 964                 return (DDI_SUCCESS);
 965 
 966         case DDI_CTLOPS_ATTACH:
 967         case DDI_CTLOPS_DETACH:
 968                 return (DDI_SUCCESS);
 969 
 970         default:
 971                 return (ddi_ctlops(dip, rdip, op, arg, result));
 972         }
 973 }
 974 
 975 static int
 976 t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op, void *arg,
 977     dev_info_t **cdipp)
 978 {
 979         int instance, i;
 980         struct adapter *sc;
 981 
 982         instance = ddi_get_instance(dip);
 983         sc = ddi_get_soft_state(t4_list, instance);
 984 
 985         if (op == BUS_CONFIG_ONE) {
 986                 char *c;
 987 
 988                 /*
 989                  * arg is something like "cxgb@0" where 0 is the port_id hanging
 990                  * off this nexus.
 991                  */
 992 
 993                 c = arg;
 994                 while (*(c + 1))
 995                         c++;
 996 
 997                 /* There should be exactly 1 digit after '@' */
 998                 if (*(c - 1) != '@')
 999                         return (NDI_FAILURE);
1000 
1001                 i = *c - '0';
1002 
1003                 if (add_child_node(sc, i) != 0)
1004                         return (NDI_FAILURE);
1005 
1006                 flags |= NDI_ONLINE_ATTACH;
1007 
1008         } else if (op == BUS_CONFIG_ALL || op == BUS_CONFIG_DRIVER) {
1009                 /* Allocate and bind all child device nodes */
1010                 for_each_port(sc, i)
1011                     (void) add_child_node(sc, i);
1012                 flags |= NDI_ONLINE_ATTACH;
1013         }
1014 
1015         return (ndi_busop_bus_config(dip, flags, op, arg, cdipp, 0));
1016 }
1017 
1018 static int
1019 t4_bus_unconfig(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op,
1020     void *arg)
1021 {
1022         int instance, i, rc;
1023         struct adapter *sc;
1024 
1025         instance = ddi_get_instance(dip);
1026         sc = ddi_get_soft_state(t4_list, instance);
1027 
1028         if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ALL ||
1029             op == BUS_UNCONFIG_DRIVER)
1030                 flags |= NDI_UNCONFIG;
1031 
1032         rc = ndi_busop_bus_unconfig(dip, flags, op, arg);
1033         if (rc != 0)
1034                 return (rc);
1035 
1036         if (op == BUS_UNCONFIG_ONE) {
1037                 char *c;
1038 
1039                 c = arg;
1040                 while (*(c + 1))
1041                         c++;
1042 
1043                 if (*(c - 1) != '@')
1044                         return (NDI_SUCCESS);
1045 
1046                 i = *c - '0';
1047 
1048                 rc = remove_child_node(sc, i);
1049 
1050         } else if (op == BUS_UNCONFIG_ALL || op == BUS_UNCONFIG_DRIVER) {
1051 
1052                 for_each_port(sc, i)
1053                     (void) remove_child_node(sc, i);
1054         }
1055 
1056         return (rc);
1057 }
1058 
1059 /* ARGSUSED */
1060 static int
1061 t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp)
1062 {
1063         struct adapter *sc;
1064 
1065         if (otyp != OTYP_CHR)
1066                 return (EINVAL);
1067 
1068         sc = ddi_get_soft_state(t4_list, getminor(*devp));
1069         if (sc == NULL)
1070                 return (ENXIO);
1071 
1072         return (atomic_cas_uint(&sc->open, 0, EBUSY));
1073 }
1074 
1075 /* ARGSUSED */
1076 static int
1077 t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp)
1078 {
1079         struct adapter *sc;
1080 
1081         sc = ddi_get_soft_state(t4_list, getminor(dev));
1082         if (sc == NULL)
1083                 return (EINVAL);
1084 
1085         (void) atomic_swap_uint(&sc->open, 0);
1086         return (0);
1087 }
1088 
1089 /* ARGSUSED */
1090 static int
1091 t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp, int *rp)
1092 {
1093         int instance;
1094         struct adapter *sc;
1095         void *data = (void *)d;
1096 
1097         if (crgetuid(credp) != 0)
1098                 return (EPERM);
1099 
1100         instance = getminor(dev);
1101         sc = ddi_get_soft_state(t4_list, instance);
1102         if (sc == NULL)
1103                 return (EINVAL);
1104 
1105         return (t4_ioctl(sc, cmd, data, mode));
1106 }
1107 
1108 static unsigned int
1109 getpf(struct adapter *sc)
1110 {
1111         int rc, *data;
1112         uint_t n, pf;
1113 
1114         rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip,
1115             DDI_PROP_DONTPASS, "reg", &data, &n);
1116         if (rc != DDI_SUCCESS) {
1117                 cxgb_printf(sc->dip, CE_WARN,
1118                     "failed to lookup \"reg\" property: %d", rc);
1119                 return (0xff);
1120         }
1121 
1122         pf = PCI_REG_FUNC_G(data[0]);
1123         ddi_prop_free(data);
1124 
1125         return (pf);
1126 }
1127 
1128 
1129 static struct fw_info *
1130 find_fw_info(int chip)
1131 {
1132         u32 i;
1133 
1134         fi[0].chip = CHELSIO_T4;
1135         fi[0].fw_hdr.chip = FW_HDR_CHIP_T4;
1136         fi[0].fw_hdr.fw_ver = cpu_to_be32(FW_VERSION(T4));
1137         fi[0].fw_hdr.intfver_nic = FW_INTFVER(T4, NIC);
1138         fi[0].fw_hdr.intfver_vnic = FW_INTFVER(T4, VNIC);
1139         fi[0].fw_hdr.intfver_ofld = FW_INTFVER(T4, OFLD);
1140         fi[0].fw_hdr.intfver_ri = FW_INTFVER(T4, RI);
1141         fi[0].fw_hdr.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU);
1142         fi[0].fw_hdr.intfver_iscsi = FW_INTFVER(T4, ISCSI);
1143         fi[0].fw_hdr.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU);
1144         fi[0].fw_hdr.intfver_fcoe = FW_INTFVER(T4, FCOE);
1145 
1146         fi[1].chip = CHELSIO_T5;
1147         fi[1].fw_hdr.chip = FW_HDR_CHIP_T5;
1148         fi[1].fw_hdr.fw_ver = cpu_to_be32(FW_VERSION(T5));
1149         fi[1].fw_hdr.intfver_nic = FW_INTFVER(T5, NIC);
1150         fi[1].fw_hdr.intfver_vnic = FW_INTFVER(T5, VNIC);
1151         fi[1].fw_hdr.intfver_ofld = FW_INTFVER(T5, OFLD);
1152         fi[1].fw_hdr.intfver_ri = FW_INTFVER(T5, RI);
1153         fi[1].fw_hdr.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU);
1154         fi[1].fw_hdr.intfver_iscsi = FW_INTFVER(T5, ISCSI);
1155         fi[1].fw_hdr.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU);
1156         fi[1].fw_hdr.intfver_fcoe = FW_INTFVER(T5, FCOE);
1157 
1158         fi[2].chip = CHELSIO_T6;
1159         fi[2].fw_hdr.chip = FW_HDR_CHIP_T6;
1160         fi[2].fw_hdr.fw_ver = cpu_to_be32(FW_VERSION(T6));
1161         fi[2].fw_hdr.intfver_nic = FW_INTFVER(T6, NIC);
1162         fi[2].fw_hdr.intfver_vnic = FW_INTFVER(T6, VNIC);
1163         fi[2].fw_hdr.intfver_ofld = FW_INTFVER(T6, OFLD);
1164         fi[2].fw_hdr.intfver_ri = FW_INTFVER(T6, RI);
1165         fi[2].fw_hdr.intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU);
1166         fi[2].fw_hdr.intfver_iscsi = FW_INTFVER(T6, ISCSI);
1167         fi[2].fw_hdr.intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU);
1168         fi[2].fw_hdr.intfver_fcoe = FW_INTFVER(T6, FCOE);
1169 
1170         for (i = 0; i < ARRAY_SIZE(fi); i++) {
1171                 if (fi[i].chip == chip)
1172                         return &fi[i];
1173         }
1174 
1175         return NULL;
1176 }
1177 
1178 /*
1179  * Install a compatible firmware (if required), establish contact with it,
1180  * become the master, and reset the device.
1181  */
1182 static int
1183 prep_firmware(struct adapter *sc)
1184 {
1185         int rc;
1186         int fw_size;
1187         int reset = 1;
1188         enum dev_state state;
1189         unsigned char *fw_data;
1190         struct fw_info *fw_info;
1191         struct fw_hdr *card_fw;
1192         
1193         struct driver_properties *p = &sc->props;
1194 
1195         /* Contact firmware, request master */
1196         rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state);
1197         if (rc < 0) {
1198                 rc = -rc;
1199                 cxgb_printf(sc->dip, CE_WARN,
1200                     "failed to connect to the firmware: %d.", rc);
1201                 return (rc);
1202         }
1203 
1204         if (rc == sc->mbox)
1205                 sc->flags |= MASTER_PF;
1206 
1207         /* We may need FW version info for later reporting */
1208         t4_get_version_info(sc);
1209         fw_info = find_fw_info(CHELSIO_CHIP_VERSION(sc->params.chip));
1210         /* allocate memory to read the header of the firmware on the
1211          * card
1212          */
1213         if (!fw_info) {
1214                 cxgb_printf(sc->dip, CE_WARN,
1215                             "unable to look up firmware information for chip %d.\n",
1216                             CHELSIO_CHIP_VERSION(sc->params.chip));
1217                 return EINVAL;
1218         }
1219         card_fw = kmem_zalloc(sizeof(*card_fw), KM_SLEEP);
1220         if(!card_fw) {
1221                 cxgb_printf(sc->dip, CE_WARN,
1222                             "Memory allocation for card FW header failed\n");
1223                 return ENOMEM;
1224         }
1225         switch(CHELSIO_CHIP_VERSION(sc->params.chip)) {
1226         case CHELSIO_T4:
1227                 fw_data = t4fw_data;
1228                 fw_size = t4fw_size;
1229                 break;
1230         case CHELSIO_T5:
1231                 fw_data = t5fw_data;
1232                 fw_size = t5fw_size;
1233                 break;
1234         case CHELSIO_T6:
1235                 fw_data = t6fw_data;
1236                 fw_size = t6fw_size;
1237                 break;
1238         default:
1239                 cxgb_printf(sc->dip, CE_WARN, "Adapter type not supported\n");
1240                 kmem_free(card_fw, sizeof(*card_fw));
1241                 return EINVAL;
1242         }
1243                 
1244         rc = -t4_prep_fw(sc, fw_info, fw_data, fw_size, card_fw,
1245                          p->t4_fw_install, state, &reset);
1246 
1247         kmem_free(card_fw, sizeof(*card_fw));
1248 
1249         if (rc != 0) {
1250                 cxgb_printf(sc->dip, CE_WARN,
1251                     "failed to install firmware: %d", rc);
1252                 return (rc);
1253         } else {
1254                 /* refresh */
1255                 (void) t4_check_fw_version(sc);
1256         }
1257 
1258         /* Reset device */
1259         rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
1260         if (rc != 0) {
1261                 cxgb_printf(sc->dip, CE_WARN,
1262                     "firmware reset failed: %d.", rc);
1263                 if (rc != ETIMEDOUT && rc != EIO)
1264                         (void) t4_fw_bye(sc, sc->mbox);
1265                 return (rc);
1266         }
1267 
1268         /* Partition adapter resources as specified in the config file. */
1269         if (sc->flags & MASTER_PF) {
1270                 /* Handle default vs special T4 config file */
1271 
1272                 rc = partition_resources(sc);
1273                 if (rc != 0)
1274                         goto err;       /* error message displayed already */
1275         }
1276 
1277         sc->flags |= FW_OK;
1278         return (0);
1279 err:
1280         return (rc);
1281 
1282 }
1283 
1284 static const struct memwin t4_memwin[] = {
1285         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1286         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1287         { MEMWIN2_BASE, MEMWIN2_APERTURE }
1288 };
1289 
1290 static const struct memwin t5_memwin[] = {
1291         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1292         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1293         { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1294 };
1295 
1296 #define FW_PARAM_DEV(param) \
1297         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1298             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1299 #define FW_PARAM_PFVF(param) \
1300         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1301             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
1302 
1303 /*
1304  * Verify that the memory range specified by the memtype/offset/len pair is
1305  * valid and lies entirely within the memtype specified.  The global address of
1306  * the start of the range is returned in addr.
1307  */
1308 int
1309 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1310         uint32_t *addr)
1311 {
1312         uint32_t em, addr_len, maddr, mlen;
1313 
1314         /* Memory can only be accessed in naturally aligned 4 byte units */
1315         if (off & 3 || len & 3 || len == 0)
1316                 return (EINVAL);
1317 
1318         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1319         switch (mtype) {
1320                 case MEM_EDC0:
1321                         if (!(em & F_EDRAM0_ENABLE))
1322                                 return (EINVAL);
1323                         addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1324                         maddr = G_EDRAM0_BASE(addr_len) << 20;
1325                         mlen = G_EDRAM0_SIZE(addr_len) << 20;
1326                         break;
1327                 case MEM_EDC1:
1328                         if (!(em & F_EDRAM1_ENABLE))
1329                                 return (EINVAL);
1330                         addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1331                         maddr = G_EDRAM1_BASE(addr_len) << 20;
1332                         mlen = G_EDRAM1_SIZE(addr_len) << 20;
1333                         break;
1334                 case MEM_MC:
1335                         if (!(em & F_EXT_MEM_ENABLE))
1336                                 return (EINVAL);
1337                         addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1338                         maddr = G_EXT_MEM_BASE(addr_len) << 20;
1339                         mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1340                         break;
1341                 case MEM_MC1:
1342                         if (is_t4(sc->params.chip) || !(em & F_EXT_MEM1_ENABLE))
1343                                 return (EINVAL);
1344                         addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1345                         maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1346                         mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1347                         break;
1348                 default:
1349                         return (EINVAL);
1350         }
1351 
1352         if (mlen > 0 && off < mlen && off + len <= mlen) {
1353                 *addr = maddr + off;    /* global address */
1354                 return (0);
1355         }
1356 
1357         return (EFAULT);
1358 }
1359 
1360 void
1361 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1362 {
1363         const struct memwin *mw;
1364 
1365         if (is_t4(sc->params.chip)) {
1366                 mw = &t4_memwin[win];
1367         } else {
1368                 mw = &t5_memwin[win];
1369         }
1370 
1371         if (base != NULL)
1372                 *base = mw->base;
1373         if (aperture != NULL)
1374                 *aperture = mw->aperture;
1375 }
1376 
1377 /*
1378  * Upload configuration file to card's memory.
1379  */
1380 static int
1381 upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma)
1382 {
1383         int rc = 0, cflen;
1384         u_int i, n;
1385         uint32_t param, val, addr, mtype, maddr;
1386         uint32_t off, mw_base, mw_aperture;
1387         const uint32_t *cfdata;
1388 
1389         /* Figure out where the firmware wants us to upload it. */
1390         param = FW_PARAM_DEV(CF);
1391         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
1392         if (rc != 0) {
1393                 /* Firmwares without config file support will fail this way */
1394                 cxgb_printf(sc->dip, CE_WARN,
1395                     "failed to query config file location: %d.\n", rc);
1396                 return (rc);
1397         }
1398         *mt = mtype = G_FW_PARAMS_PARAM_Y(val);
1399         *ma = maddr = G_FW_PARAMS_PARAM_Z(val) << 16;
1400 
1401         switch (CHELSIO_CHIP_VERSION(sc->params.chip)) {
1402         case CHELSIO_T4:
1403                 cflen = t4cfg_size & ~3;
1404                 /* LINTED: E_BAD_PTR_CAST_ALIGN */
1405                 cfdata = (const uint32_t *)t4cfg_data;
1406                 break;
1407         case CHELSIO_T5:
1408                 cflen = t5cfg_size & ~3;
1409                 /* LINTED: E_BAD_PTR_CAST_ALIGN */
1410                 cfdata = (const uint32_t *)t5cfg_data;
1411                 break;
1412         case CHELSIO_T6:
1413                 cflen = t6cfg_size & ~3;
1414                 /* LINTED: E_BAD_PTR_CAST_ALIGN */
1415                 cfdata = (const uint32_t *)t6cfg_data;
1416                 break;
1417         default:
1418                 cxgb_printf(sc->dip, CE_WARN,
1419                             "Invalid Adapter detected\n");
1420                 return EINVAL; 
1421         }
1422 
1423         if (cflen > FLASH_CFG_MAX_SIZE) {
1424                 cxgb_printf(sc->dip, CE_WARN,
1425                     "config file too long (%d, max allowed is %d).  ",
1426                     cflen, FLASH_CFG_MAX_SIZE);
1427                 return (EFBIG);
1428         }
1429 
1430         rc = validate_mt_off_len(sc, mtype, maddr, cflen, &addr);
1431         if (rc != 0) {
1432 
1433                 cxgb_printf(sc->dip, CE_WARN,
1434                     "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
1435                     "Will try to use the config on the card, if any.\n",
1436                     __func__, mtype, maddr, cflen, rc);
1437                 return (EFAULT);
1438         }
1439 
1440         memwin_info(sc, 2, &mw_base, &mw_aperture);
1441         while (cflen) {
1442                 off = position_memwin(sc, 2, addr);
1443                 n = min(cflen, mw_aperture - off);
1444                 for (i = 0; i < n; i += 4)
1445                         t4_write_reg(sc, mw_base + off + i, *cfdata++);
1446                 cflen -= n;
1447                 addr += n;
1448         }
1449 
1450         return (rc);
1451 }
1452 
1453 /*
1454  * Partition chip resources for use between various PFs, VFs, etc.  This is done
1455  * by uploading the firmware configuration file to the adapter and instructing
1456  * the firmware to process it.
1457  */
1458 static int
1459 partition_resources(struct adapter *sc)
1460 {
1461         int rc;
1462         struct fw_caps_config_cmd caps;
1463         uint32_t mtype, maddr, finicsum, cfcsum;
1464 
1465         rc = upload_config_file(sc, &mtype, &maddr);
1466         if (rc != 0) {
1467                 mtype = FW_MEMTYPE_CF_FLASH;
1468                 maddr = t4_flash_cfg_addr(sc);
1469         }
1470 
1471         bzero(&caps, sizeof (caps));
1472         caps.op_to_write = BE_32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1473             F_FW_CMD_REQUEST | F_FW_CMD_READ);
1474         caps.cfvalid_to_len16 = BE_32(F_FW_CAPS_CONFIG_CMD_CFVALID |
1475             V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
1476             V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | FW_LEN16(caps));
1477         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps);
1478         if (rc != 0) {
1479                 cxgb_printf(sc->dip, CE_WARN,
1480                     "failed to pre-process config file: %d.\n", rc);
1481                 return (rc);
1482         }
1483 
1484         finicsum = ntohl(caps.finicsum);
1485         cfcsum = ntohl(caps.cfcsum);
1486         if (finicsum != cfcsum) {
1487                 cxgb_printf(sc->dip, CE_WARN,
1488                     "WARNING: config file checksum mismatch: %08x %08x\n",
1489                     finicsum, cfcsum);
1490         }
1491         sc->cfcsum = cfcsum;
1492 
1493         /* TODO: Need to configure this correctly */
1494         caps.toecaps = htons(FW_CAPS_CONFIG_TOE);
1495         caps.iscsicaps = 0;
1496         caps.rdmacaps = 0;
1497         caps.fcoecaps = 0;
1498         /* TODO: Disable VNIC cap for now */
1499         caps.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
1500 
1501         caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1502             F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1503         caps.cfvalid_to_len16 = htonl(FW_LEN16(caps));
1504         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), NULL);
1505         if (rc != 0) {
1506                 cxgb_printf(sc->dip, CE_WARN,
1507                     "failed to process config file: %d.\n", rc);
1508                 return (rc);
1509         }
1510 
1511         return (0);
1512 }
1513 
1514 /*
1515  * Tweak configuration based on module parameters, etc.  Most of these have
1516  * defaults assigned to them by Firmware Configuration Files (if we're using
1517  * them) but need to be explicitly set if we're using hard-coded
1518  * initialization.  But even in the case of using Firmware Configuration
1519  * Files, we'd like to expose the ability to change these via module
1520  * parameters so these are essentially common tweaks/settings for
1521  * Configuration Files and hard-coded initialization ...
1522  */
1523 static int
1524 adap__pre_init_tweaks(struct adapter *sc)
1525 {
1526         int rx_dma_offset = 2; /* Offset of RX packets into DMA buffers */
1527 
1528         /*
1529          * Fix up various Host-Dependent Parameters like Page Size, Cache
1530          * Line Size, etc.  The firmware default is for a 4KB Page Size and
1531          * 64B Cache Line Size ...
1532          */
1533         (void) t4_fixup_host_params_compat(sc, PAGE_SIZE, CACHE_LINE, T5_LAST_REV);
1534 
1535         t4_set_reg_field(sc, A_SGE_CONTROL,
1536                          V_PKTSHIFT(M_PKTSHIFT), V_PKTSHIFT(rx_dma_offset));
1537 
1538         return 0;
1539 }
1540 /*
1541  * Retrieve parameters that are needed (or nice to have) prior to calling
1542  * t4_sge_init and t4_fw_initialize.
1543  */
1544 static int
1545 get_params__pre_init(struct adapter *sc)
1546 {
1547         int rc;
1548         uint32_t param[2], val[2];
1549         struct fw_devlog_cmd cmd;
1550         struct devlog_params *dlog = &sc->params.devlog;
1551 
1552         /*
1553          * Grab the raw VPD parameters.
1554          */
1555         rc = -t4_get_raw_vpd_params(sc, &sc->params.vpd);
1556         if (rc != 0) {
1557                 cxgb_printf(sc->dip, CE_WARN,
1558                     "failed to query VPD parameters (pre_init): %d.\n", rc);
1559                 return (rc);
1560         }
1561 
1562         param[0] = FW_PARAM_DEV(PORTVEC);
1563         param[1] = FW_PARAM_DEV(CCLK);
1564         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1565         if (rc != 0) {
1566                 cxgb_printf(sc->dip, CE_WARN,
1567                     "failed to query parameters (pre_init): %d.\n", rc);
1568                 return (rc);
1569         }
1570 
1571         sc->params.portvec = val[0];
1572         sc->params.nports = 0;
1573         while (val[0]) {
1574                 sc->params.nports++;
1575                 val[0] &= val[0] - 1;
1576         }
1577 
1578         sc->params.vpd.cclk = val[1];
1579 
1580         /* Read device log parameters. */
1581         bzero(&cmd, sizeof (cmd));
1582         cmd.op_to_write = htonl(V_FW_CMD_OP(FW_DEVLOG_CMD) |
1583             F_FW_CMD_REQUEST | F_FW_CMD_READ);
1584         cmd.retval_len16 = htonl(FW_LEN16(cmd));
1585         rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof (cmd), &cmd);
1586         if (rc != 0) {
1587                 cxgb_printf(sc->dip, CE_WARN,
1588                     "failed to get devlog parameters: %d.\n", rc);
1589                 bzero(dlog, sizeof (*dlog));
1590                 rc = 0; /* devlog isn't critical for device operation */
1591         } else {
1592                 val[0] = ntohl(cmd.memtype_devlog_memaddr16_devlog);
1593                 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
1594                 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
1595                 dlog->size = ntohl(cmd.memsize_devlog);
1596         }
1597 
1598         return (rc);
1599 }
1600 
1601 /*
1602  * Retrieve various parameters that are of interest to the driver.  The device
1603  * has been initialized by the firmware at this point.
1604  */
1605 static int
1606 get_params__post_init(struct adapter *sc)
1607 {
1608         int rc;
1609         uint32_t param[7], val[7];
1610         struct fw_caps_config_cmd caps;
1611 
1612         param[0] = FW_PARAM_PFVF(IQFLINT_START);
1613         param[1] = FW_PARAM_PFVF(EQ_START);
1614         param[2] = FW_PARAM_PFVF(FILTER_START);
1615         param[3] = FW_PARAM_PFVF(FILTER_END);
1616         param[4] = FW_PARAM_PFVF(L2T_START);
1617         param[5] = FW_PARAM_PFVF(L2T_END);
1618         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1619         if (rc != 0) {
1620                 cxgb_printf(sc->dip, CE_WARN,
1621                     "failed to query parameters (post_init): %d.\n", rc);
1622                 return (rc);
1623         }
1624 
1625         /* LINTED: E_ASSIGN_NARROW_CONV */
1626         sc->sge.iq_start = val[0];
1627         sc->sge.eq_start = val[1];
1628         sc->tids.ftid_base = val[2];
1629         sc->tids.nftids = val[3] - val[2] + 1;
1630         sc->vres.l2t.start = val[4];
1631         sc->vres.l2t.size = val[5] - val[4] + 1;
1632 
1633         /* get capabilites */
1634         bzero(&caps, sizeof (caps));
1635         caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1636             F_FW_CMD_REQUEST | F_FW_CMD_READ);
1637         caps.cfvalid_to_len16 = htonl(FW_LEN16(caps));
1638         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps);
1639         if (rc != 0) {
1640                 cxgb_printf(sc->dip, CE_WARN,
1641                     "failed to get card capabilities: %d.\n", rc);
1642                 return (rc);
1643         }
1644 
1645         if (caps.toecaps != 0) {
1646                 /* query offload-related parameters */
1647                 param[0] = FW_PARAM_DEV(NTID);
1648                 param[1] = FW_PARAM_PFVF(SERVER_START);
1649                 param[2] = FW_PARAM_PFVF(SERVER_END);
1650                 param[3] = FW_PARAM_PFVF(TDDP_START);
1651                 param[4] = FW_PARAM_PFVF(TDDP_END);
1652                 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
1653                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1654                 if (rc != 0) {
1655                         cxgb_printf(sc->dip, CE_WARN,
1656                             "failed to query TOE parameters: %d.\n", rc);
1657                         return (rc);
1658                 }
1659                 sc->tids.ntids = val[0];
1660                 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
1661                 sc->tids.stid_base = val[1];
1662                 sc->tids.nstids = val[2] - val[1] + 1;
1663                 sc->vres.ddp.start = val[3];
1664                 sc->vres.ddp.size = val[4] - val[3] + 1;
1665                 sc->params.ofldq_wr_cred = val[5];
1666                 sc->params.offload = 1;
1667         }
1668 
1669         /* These are finalized by FW initialization, load their values now */
1670         val[0] = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
1671         sc->params.tp.tre = G_TIMERRESOLUTION(val[0]);
1672         sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(val[0]);
1673         t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
1674 
1675         return (rc);
1676 }
1677 
1678 static int
1679 set_params__post_init(struct adapter *sc)
1680 {
1681         uint32_t param, val;
1682 
1683         /* ask for encapsulated CPLs */
1684         param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
1685         val = 1;
1686         (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
1687 
1688         return (0);
1689 }
1690 
1691 /* TODO: verify */
1692 static void
1693 setup_memwin(struct adapter *sc)
1694 {
1695         pci_regspec_t *data;
1696         int rc;
1697         uint_t n;
1698         uintptr_t bar0;
1699         uintptr_t mem_win0_base, mem_win1_base, mem_win2_base;
1700         uintptr_t mem_win2_aperture;
1701 
1702         rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip,
1703             DDI_PROP_DONTPASS, "assigned-addresses", (int **)&data, &n);
1704         if (rc != DDI_SUCCESS) {
1705                 cxgb_printf(sc->dip, CE_WARN,
1706                     "failed to lookup \"assigned-addresses\" property: %d", rc);
1707                 return;
1708         }
1709         n /= sizeof (*data);
1710 
1711         bar0 = ((uint64_t)data[0].pci_phys_mid << 32) | data[0].pci_phys_low;
1712         ddi_prop_free(data);
1713 
1714         if (is_t4(sc->params.chip)) {
1715                 mem_win0_base = bar0 + MEMWIN0_BASE;
1716                 mem_win1_base = bar0 + MEMWIN1_BASE;
1717                 mem_win2_base = bar0 + MEMWIN2_BASE;
1718                 mem_win2_aperture = MEMWIN2_APERTURE;
1719         } else {
1720                 /* For T5, only relative offset inside the PCIe BAR is passed */
1721                 mem_win0_base = MEMWIN0_BASE;
1722                 mem_win1_base = MEMWIN1_BASE;
1723                 mem_win2_base = MEMWIN2_BASE_T5;
1724                 mem_win2_aperture = MEMWIN2_APERTURE_T5;
1725         }
1726 
1727         t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0),
1728             mem_win0_base | V_BIR(0) |
1729             V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
1730 
1731         t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1),
1732             mem_win1_base | V_BIR(0) |
1733             V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
1734 
1735         t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2),
1736             mem_win2_base | V_BIR(0) |
1737             V_WINDOW(ilog2(mem_win2_aperture) - 10));
1738 
1739         /* flush */
1740         (void)t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1741 }
1742 
1743 /*
1744  * Positions the memory window such that it can be used to access the specified
1745  * address in the chip's address space.  The return value is the offset of addr
1746  * from the start of the window.
1747  */
1748 uint32_t
1749 position_memwin(struct adapter *sc, int n, uint32_t addr)
1750 {
1751         uint32_t start, pf;
1752         uint32_t reg;
1753 
1754         if (addr & 3) {
1755                 cxgb_printf(sc->dip, CE_WARN,
1756                     "addr (0x%x) is not at a 4B boundary.\n", addr);
1757                 return (EFAULT);
1758         }
1759 
1760         if (is_t4(sc->params.chip)) {
1761                 pf = 0;
1762                 start = addr & ~0xf;    /* start must be 16B aligned */
1763         } else {
1764                 pf = V_PFNUM(sc->pf);
1765                 start = addr & ~0x7f;   /* start must be 128B aligned */
1766         }
1767         reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1768 
1769         t4_write_reg(sc, reg, start | pf);
1770         (void) t4_read_reg(sc, reg);
1771 
1772         return (addr - start);
1773 }
1774  
1775 
1776 /*
1777  * Reads the named property and fills up the "data" array (which has at least
1778  * "count" elements).  We first try and lookup the property for our dev_t and
1779  * then retry with DDI_DEV_T_ANY if it's not found.
1780  *
1781  * Returns non-zero if the property was found and "data" has been updated.
1782  */
1783 static int
1784 prop_lookup_int_array(struct adapter *sc, char *name, int *data, uint_t count)
1785 {
1786         dev_info_t *dip = sc->dip;
1787         dev_t dev = sc->dev;
1788         int rc, *d;
1789         uint_t i, n;
1790 
1791         rc = ddi_prop_lookup_int_array(dev, dip, DDI_PROP_DONTPASS,
1792             name, &d, &n);
1793         if (rc == DDI_PROP_SUCCESS)
1794                 goto found;
1795 
1796         if (rc != DDI_PROP_NOT_FOUND) {
1797                 cxgb_printf(dip, CE_WARN,
1798                     "failed to lookup property %s for minor %d: %d.",
1799                     name, getminor(dev), rc);
1800                 return (0);
1801         }
1802 
1803         rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1804             name, &d, &n);
1805         if (rc == DDI_PROP_SUCCESS)
1806                 goto found;
1807 
1808         if (rc != DDI_PROP_NOT_FOUND) {
1809                 cxgb_printf(dip, CE_WARN,
1810                     "failed to lookup property %s: %d.", name, rc);
1811                 return (0);
1812         }
1813 
1814         return (0);
1815 
1816 found:
1817         if (n > count) {
1818                 cxgb_printf(dip, CE_NOTE,
1819                     "property %s has too many elements (%d), ignoring extras",
1820                     name, n);
1821         }
1822 
1823         for (i = 0; i < n && i < count; i++)
1824                 data[i] = d[i];
1825         ddi_prop_free(d);
1826 
1827         return (1);
1828 }
1829 
1830 static int
1831 prop_lookup_int(struct adapter *sc, char *name, int defval)
1832 {
1833         int rc;
1834 
1835         rc = ddi_prop_get_int(sc->dev, sc->dip, DDI_PROP_DONTPASS, name, -1);
1836         if (rc != -1)
1837                 return (rc);
1838 
1839         return (ddi_prop_get_int(DDI_DEV_T_ANY, sc->dip, DDI_PROP_DONTPASS,
1840             name, defval));
1841 }
1842 
1843 static int
1844 init_driver_props(struct adapter *sc, struct driver_properties *p)
1845 {
1846         dev_t dev = sc->dev;
1847         dev_info_t *dip = sc->dip;
1848         int i, *data;
1849         uint_t tmr[SGE_NTIMERS] = {5, 10, 20, 50, 100, 200};
1850         uint_t cnt[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
1851 
1852         /*
1853          * Holdoff timer
1854          */
1855         data = &p->timer_val[0];
1856         for (i = 0; i < SGE_NTIMERS; i++)
1857                 data[i] = tmr[i];
1858         (void) prop_lookup_int_array(sc, "holdoff-timer-values", data,
1859             SGE_NTIMERS);
1860         for (i = 0; i < SGE_NTIMERS; i++) {
1861                 int limit = 200U;
1862                 if (data[i] > limit) {
1863                         cxgb_printf(dip, CE_WARN,
1864                             "holdoff timer %d is too high (%d), lowered to %d.",
1865                             i, data[i], limit);
1866                         data[i] = limit;
1867                 }
1868         }
1869         (void) ddi_prop_update_int_array(dev, dip, "holdoff-timer-values",
1870             data, SGE_NTIMERS);
1871 
1872         /*
1873          * Holdoff packet counter
1874          */
1875         data = &p->counter_val[0];
1876         for (i = 0; i < SGE_NCOUNTERS; i++)
1877                 data[i] = cnt[i];
1878         (void) prop_lookup_int_array(sc, "holdoff-pkt-counter-values", data,
1879             SGE_NCOUNTERS);
1880         for (i = 0; i < SGE_NCOUNTERS; i++) {
1881                 int limit = M_THRESHOLD_0;
1882                 if (data[i] > limit) {
1883                         cxgb_printf(dip, CE_WARN,
1884                             "holdoff pkt-counter %d is too high (%d), "
1885                             "lowered to %d.", i, data[i], limit);
1886                         data[i] = limit;
1887                 }
1888         }
1889         (void) ddi_prop_update_int_array(dev, dip, "holdoff-pkt-counter-values",
1890             data, SGE_NCOUNTERS);
1891 
1892         /*
1893          * Maximum # of tx and rx queues to use for each 
1894          * 100G, 40G, 25G, 10G and 1G port.
1895          */
1896         p->max_ntxq_10g = prop_lookup_int(sc, "max-ntxq-10G-port", 8);
1897         (void) ddi_prop_update_int(dev, dip, "max-ntxq-10G-port",
1898             p->max_ntxq_10g);
1899 
1900         p->max_nrxq_10g = prop_lookup_int(sc, "max-nrxq-10G-port", 8);
1901         (void) ddi_prop_update_int(dev, dip, "max-nrxq-10G-port",
1902             p->max_nrxq_10g);
1903 
1904         p->max_ntxq_1g = prop_lookup_int(sc, "max-ntxq-1G-port", 2);
1905         (void) ddi_prop_update_int(dev, dip, "max-ntxq-1G-port",
1906             p->max_ntxq_1g);
1907 
1908         p->max_nrxq_1g = prop_lookup_int(sc, "max-nrxq-1G-port", 2);
1909         (void) ddi_prop_update_int(dev, dip, "max-nrxq-1G-port",
1910             p->max_nrxq_1g);
1911 
1912 #ifdef TCP_OFFLOAD_ENABLE
1913         p->max_nofldtxq_10g = prop_lookup_int(sc, "max-nofldtxq-10G-port", 8);
1914         (void) ddi_prop_update_int(dev, dip, "max-ntxq-10G-port",
1915             p->max_nofldtxq_10g);
1916 
1917         p->max_nofldrxq_10g = prop_lookup_int(sc, "max-nofldrxq-10G-port", 2);
1918         (void) ddi_prop_update_int(dev, dip, "max-nrxq-10G-port",
1919             p->max_nofldrxq_10g);
1920 
1921         p->max_nofldtxq_1g = prop_lookup_int(sc, "max-nofldtxq-1G-port", 2);
1922         (void) ddi_prop_update_int(dev, dip, "max-ntxq-1G-port",
1923             p->max_nofldtxq_1g);
1924 
1925         p->max_nofldrxq_1g = prop_lookup_int(sc, "max-nofldrxq-1G-port", 1);
1926         (void) ddi_prop_update_int(dev, dip, "max-nrxq-1G-port",
1927             p->max_nofldrxq_1g);
1928 #endif
1929 
1930         /*
1931          * Holdoff parameters for 10G and 1G ports.
1932          */
1933         p->tmr_idx_10g = prop_lookup_int(sc, "holdoff-timer-idx-10G", 0);
1934         (void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-10G",
1935             p->tmr_idx_10g);
1936 
1937         p->pktc_idx_10g = prop_lookup_int(sc, "holdoff-pktc-idx-10G", 2);
1938         (void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-10G",
1939             p->pktc_idx_10g);
1940 
1941         p->tmr_idx_1g = prop_lookup_int(sc, "holdoff-timer-idx-1G", 0);
1942         (void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-1G",
1943             p->tmr_idx_1g);
1944 
1945         p->pktc_idx_1g = prop_lookup_int(sc, "holdoff-pktc-idx-1G", 2);
1946         (void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-1G",
1947             p->pktc_idx_1g);
1948 
1949         /*
1950          * Size (number of entries) of each tx and rx queue.
1951          */
1952         i = prop_lookup_int(sc, "qsize-txq", TX_EQ_QSIZE);
1953         p->qsize_txq = max(i, 128);
1954         if (p->qsize_txq != i) {
1955                 cxgb_printf(dip, CE_WARN,
1956                     "using %d instead of %d as the tx queue size",
1957                     p->qsize_txq, i);
1958         }
1959         (void) ddi_prop_update_int(dev, dip, "qsize-txq", p->qsize_txq);
1960 
1961         i = prop_lookup_int(sc, "qsize-rxq", RX_IQ_QSIZE);
1962         p->qsize_rxq = max(i, 128);
1963         while (p->qsize_rxq & 7)
1964                 p->qsize_rxq--;
1965         if (p->qsize_rxq != i) {
1966                 cxgb_printf(dip, CE_WARN,
1967                     "using %d instead of %d as the rx queue size",
1968                     p->qsize_rxq, i);
1969         }
1970         (void) ddi_prop_update_int(dev, dip, "qsize-rxq", p->qsize_rxq);
1971 
1972         /*
1973          * Interrupt types allowed.
1974          * Bits 0, 1, 2 = INTx, MSI, MSI-X respectively.  See sys/ddi_intr.h
1975          */
1976         p->intr_types = prop_lookup_int(sc, "interrupt-types",
1977             DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_FIXED);
1978         (void) ddi_prop_update_int(dev, dip, "interrupt-types", p->intr_types);
1979 
1980         /*
1981          * Forwarded interrupt queues.  Create this property to force the driver
1982          * to use forwarded interrupt queues.
1983          */
1984         if (ddi_prop_exists(dev, dip, DDI_PROP_DONTPASS,
1985             "interrupt-forwarding") != 0 ||
1986             ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1987             "interrupt-forwarding") != 0) {
1988                 UNIMPLEMENTED();
1989                 (void) ddi_prop_create(dev, dip, DDI_PROP_CANSLEEP,
1990                     "interrupt-forwarding", NULL, 0);
1991         }
1992 
1993         /*
1994          * Write combining
1995          * 0 to disable, 1 to enable
1996          */
1997         p->wc = prop_lookup_int(sc, "write-combine", 1);
1998         cxgb_printf(dip, CE_WARN, "write-combine: using of %d", p->wc);
1999         if (p->wc != 0 && p->wc != 1) {
2000                 cxgb_printf(dip, CE_WARN,
2001                     "write-combine: using 1 instead of %d", p->wc);
2002                 p->wc = 1;
2003         }
2004         (void) ddi_prop_update_int(dev, dip, "write-combine", p->wc);
2005 
2006         p->t4_fw_install = prop_lookup_int(sc, "t4_fw_install", 1);
2007         if (p->t4_fw_install != 0 && p->t4_fw_install != 2)
2008                 p->t4_fw_install = 1;
2009         (void) ddi_prop_update_int(dev, dip, "t4_fw_install", p->t4_fw_install);
2010 
2011         /* Multiple Rings */
2012         p->multi_rings = prop_lookup_int(sc, "multi-rings", 1);
2013         if (p->multi_rings != 0 && p->multi_rings != 1) {
2014                 cxgb_printf(dip, CE_NOTE,
2015                            "multi-rings: using value 1 instead of %d", p->multi_rings);
2016                 p->multi_rings = 1;
2017         }
2018 
2019         (void) ddi_prop_update_int(dev, dip, "multi-rings", p->multi_rings);
2020 
2021         return (0);
2022 }
2023 
2024 static int
2025 remove_extra_props(struct adapter *sc, int n10g, int n1g)
2026 {
2027         if (n10g == 0) {
2028                 (void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-10G-port");
2029                 (void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-10G-port");
2030                 (void) ddi_prop_remove(sc->dev, sc->dip,
2031                     "holdoff-timer-idx-10G");
2032                 (void) ddi_prop_remove(sc->dev, sc->dip,
2033                     "holdoff-pktc-idx-10G");
2034         }
2035 
2036         if (n1g == 0) {
2037                 (void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-1G-port");
2038                 (void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-1G-port");
2039                 (void) ddi_prop_remove(sc->dev, sc->dip,
2040                     "holdoff-timer-idx-1G");
2041                 (void) ddi_prop_remove(sc->dev, sc->dip, "holdoff-pktc-idx-1G");
2042         }
2043 
2044         return (0);
2045 }
2046 
2047 static int
2048 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
2049     struct intrs_and_queues *iaq)
2050 {
2051         struct driver_properties *p = &sc->props;
2052         int rc, itype, itypes, navail, nc, nrxq10g, nrxq1g, n;
2053         int nofldrxq10g = 0, nofldrxq1g = 0;
2054 
2055         bzero(iaq, sizeof (*iaq));
2056         nc = ncpus;     /* our snapshot of the number of CPUs */
2057         iaq->ntxq10g = min(nc, p->max_ntxq_10g);
2058         iaq->ntxq1g = min(nc, p->max_ntxq_1g);
2059         iaq->nrxq10g = nrxq10g = min(nc, p->max_nrxq_10g);
2060         iaq->nrxq1g = nrxq1g = min(nc, p->max_nrxq_1g);
2061 #ifdef TCP_OFFLOAD_ENABLE
2062         iaq->nofldtxq10g = min(nc, p->max_nofldtxq_10g);
2063         iaq->nofldtxq1g = min(nc, p->max_nofldtxq_1g);
2064         iaq->nofldrxq10g = nofldrxq10g = min(nc, p->max_nofldrxq_10g);
2065         iaq->nofldrxq1g = nofldrxq1g = min(nc, p->max_nofldrxq_1g);
2066 #endif
2067 
2068         rc = ddi_intr_get_supported_types(sc->dip, &itypes);
2069         if (rc != DDI_SUCCESS) {
2070                 cxgb_printf(sc->dip, CE_WARN,
2071                     "failed to determine supported interrupt types: %d", rc);
2072                 return (rc);
2073         }
2074 
2075         for (itype = DDI_INTR_TYPE_MSIX; itype; itype >>= 1) {
2076                 ASSERT(itype == DDI_INTR_TYPE_MSIX ||
2077                     itype == DDI_INTR_TYPE_MSI ||
2078                     itype == DDI_INTR_TYPE_FIXED);
2079 
2080                 if ((itype & itypes & p->intr_types) == 0)
2081                         continue;       /* not supported or not allowed */
2082 
2083                 navail = 0;
2084                 rc = ddi_intr_get_navail(sc->dip, itype, &navail);
2085                 if (rc != DDI_SUCCESS || navail == 0) {
2086                         cxgb_printf(sc->dip, CE_WARN,
2087                             "failed to get # of interrupts for type %d: %d",
2088                             itype, rc);
2089                         continue;       /* carry on */
2090                 }
2091 
2092                 iaq->intr_type = itype;
2093                 if (navail == 0)
2094                         continue;
2095 
2096                 /*
2097                  * Best option: an interrupt vector for errors, one for the
2098                  * firmware event queue, and one each for each rxq (NIC as well
2099                  * as offload).
2100                  */
2101                 iaq->nirq = T4_EXTRA_INTR;
2102                 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
2103                 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
2104 
2105                 if (iaq->nirq <= navail &&
2106                     (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) {
2107                         iaq->intr_fwd = 0;
2108                         goto allocate;
2109                 }
2110 
2111                 /*
2112                  * Second best option: an interrupt vector for errors, one for
2113                  * the firmware event queue, and one each for either NIC or
2114                  * offload rxq's.
2115                  */
2116                 iaq->nirq = T4_EXTRA_INTR;
2117                 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
2118                 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
2119                 if (iaq->nirq <= navail &&
2120                     (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) {
2121                         iaq->intr_fwd = 1;
2122                         goto allocate;
2123                 }
2124 
2125                 /*
2126                  * Next best option: an interrupt vector for errors, one for the
2127                  * firmware event queue, and at least one per port.  At this
2128                  * point we know we'll have to downsize nrxq or nofldrxq to fit
2129                  * what's available to us.
2130                  */
2131                 iaq->nirq = T4_EXTRA_INTR;
2132                 iaq->nirq += n10g + n1g;
2133                 if (iaq->nirq <= navail) {
2134                         int leftover = navail - iaq->nirq;
2135 
2136                         if (n10g > 0) {
2137                                 int target = max(nrxq10g, nofldrxq10g);
2138 
2139                                 n = 1;
2140                                 while (n < target && leftover >= n10g) {
2141                                         leftover -= n10g;
2142                                         iaq->nirq += n10g;
2143                                         n++;
2144                                 }
2145                                 iaq->nrxq10g = min(n, nrxq10g);
2146 #ifdef TCP_OFFLOAD_ENABLE
2147                                 iaq->nofldrxq10g = min(n, nofldrxq10g);
2148 #endif
2149                         }
2150 
2151                         if (n1g > 0) {
2152                                 int target = max(nrxq1g, nofldrxq1g);
2153 
2154                                 n = 1;
2155                                 while (n < target && leftover >= n1g) {
2156                                         leftover -= n1g;
2157                                         iaq->nirq += n1g;
2158                                         n++;
2159                                 }
2160                                 iaq->nrxq1g = min(n, nrxq1g);
2161 #ifdef TCP_OFFLOAD_ENABLE
2162                                 iaq->nofldrxq1g = min(n, nofldrxq1g);
2163 #endif
2164                         }
2165 
2166                         /* We have arrived at a minimum value required to enable
2167                          * per queue irq(either NIC or offload). Thus for non-
2168                          * offload case, we will get a vector per queue, while
2169                          * offload case, we will get a vector per offload/NIC q.
2170                          * Hence enable Interrupt forwarding only for offload
2171                          * case.
2172                          */
2173 #ifdef TCP_OFFLOAD_ENABLE
2174                         if (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq)) {
2175                                 iaq->intr_fwd = 1;
2176 #else
2177                         if (itype != DDI_INTR_TYPE_MSI) {
2178 #endif
2179                                 goto allocate;
2180                         }
2181                 }
2182 
2183                 /*
2184                  * Least desirable option: one interrupt vector for everything.
2185                  */
2186                 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
2187 #ifdef TCP_OFFLOAD_ENABLE
2188                 iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
2189 #endif
2190                 iaq->intr_fwd = 1;
2191 
2192 allocate:
2193                 return (0);
2194         }
2195 
2196         cxgb_printf(sc->dip, CE_WARN,
2197             "failed to find a usable interrupt type.  supported=%d, allowed=%d",
2198             itypes, p->intr_types);
2199         return (DDI_FAILURE);
2200 }
2201 
2202 static int
2203 add_child_node(struct adapter *sc, int idx)
2204 {
2205         int rc;
2206         struct port_info *pi;
2207 
2208         if (idx < 0 || idx >= sc->params.nports)
2209                 return (EINVAL);
2210 
2211         pi = sc->port[idx];
2212         if (pi == NULL)
2213                 return (ENODEV);        /* t4_port_init failed earlier */
2214 
2215         PORT_LOCK(pi);
2216         if (pi->dip != NULL) {
2217                 rc = 0;         /* EEXIST really, but then bus_config fails */
2218                 goto done;
2219         }
2220 
2221         rc = ndi_devi_alloc(sc->dip, T4_PORT_NAME, DEVI_SID_NODEID, &pi->dip);
2222         if (rc != DDI_SUCCESS || pi->dip == NULL) {
2223                 rc = ENOMEM;
2224                 goto done;
2225         }
2226 
2227         (void) ddi_set_parent_data(pi->dip, pi);
2228         (void) ndi_devi_bind_driver(pi->dip, 0);
2229         rc = 0;
2230 done:
2231         PORT_UNLOCK(pi);
2232         return (rc);
2233 }
2234 
2235 static int
2236 remove_child_node(struct adapter *sc, int idx)
2237 {
2238         int rc;
2239         struct port_info *pi;
2240 
2241         if (idx < 0 || idx >= sc->params.nports)
2242                 return (EINVAL);
2243 
2244         pi = sc->port[idx];
2245         if (pi == NULL)
2246                 return (ENODEV);
2247 
2248         PORT_LOCK(pi);
2249         if (pi->dip == NULL) {
2250                 rc = ENODEV;
2251                 goto done;
2252         }
2253 
2254         rc = ndi_devi_free(pi->dip);
2255         if (rc == 0)
2256                 pi->dip = NULL;
2257 done:
2258         PORT_UNLOCK(pi);
2259         return (rc);
2260 }
2261 
2262 #define KS_UINIT(x)     kstat_named_init(&kstatp->x, #x, KSTAT_DATA_ULONG)
2263 #define KS_CINIT(x)     kstat_named_init(&kstatp->x, #x, KSTAT_DATA_CHAR)
2264 #define KS_U_SET(x, y)  kstatp->x.value.ul = (y)
2265 #define KS_C_SET(x, ...)        \
2266                         (void) snprintf(kstatp->x.value.c, 16,  __VA_ARGS__)
2267 
2268 /*
2269  * t4nex:X:config
2270  */
2271 struct t4_kstats {
2272         kstat_named_t chip_ver;
2273         kstat_named_t fw_vers;
2274         kstat_named_t tp_vers;
2275         kstat_named_t driver_version;
2276         kstat_named_t serial_number;
2277         kstat_named_t ec_level;
2278         kstat_named_t id;
2279         kstat_named_t bus_type;
2280         kstat_named_t bus_width;
2281         kstat_named_t bus_speed;
2282         kstat_named_t core_clock;
2283         kstat_named_t port_cnt;
2284         kstat_named_t port_type;
2285         kstat_named_t pci_vendor_id;
2286         kstat_named_t pci_device_id;
2287 };
2288 static kstat_t *
2289 setup_kstats(struct adapter *sc)
2290 {
2291         kstat_t *ksp;
2292         struct t4_kstats *kstatp;
2293         int ndata;
2294         struct pci_params *p = &sc->params.pci;
2295         struct vpd_params *v = &sc->params.vpd;
2296         uint16_t pci_vendor, pci_device;
2297 
2298         ndata = sizeof (struct t4_kstats) / sizeof (kstat_named_t);
2299 
2300         ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "config",
2301             "nexus", KSTAT_TYPE_NAMED, ndata, 0);
2302         if (ksp == NULL) {
2303                 cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats.");
2304                 return (NULL);
2305         }
2306 
2307         kstatp = (struct t4_kstats *)ksp->ks_data;
2308 
2309         KS_UINIT(chip_ver);
2310         KS_CINIT(fw_vers);
2311         KS_CINIT(tp_vers);
2312         KS_CINIT(driver_version);
2313         KS_CINIT(serial_number);
2314         KS_CINIT(ec_level);
2315         KS_CINIT(id);
2316         KS_CINIT(bus_type);
2317         KS_CINIT(bus_width);
2318         KS_CINIT(bus_speed);
2319         KS_UINIT(core_clock);
2320         KS_UINIT(port_cnt);
2321         KS_CINIT(port_type);
2322         KS_CINIT(pci_vendor_id);
2323         KS_CINIT(pci_device_id);
2324 
2325         KS_U_SET(chip_ver, sc->params.chip);
2326         KS_C_SET(fw_vers, "%d.%d.%d.%d",
2327             G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2328             G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2329             G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2330             G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2331         KS_C_SET(tp_vers, "%d.%d.%d.%d",
2332             G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
2333             G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
2334             G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
2335             G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
2336         KS_C_SET(driver_version, DRV_VERSION);
2337         KS_C_SET(serial_number, "%s", v->sn);
2338         KS_C_SET(ec_level, "%s", v->ec);
2339         KS_C_SET(id, "%s", v->id);
2340         KS_C_SET(bus_type, "pci-express");
2341         KS_C_SET(bus_width, "x%d lanes", p->width);
2342         KS_C_SET(bus_speed, "%d", p->speed);
2343         KS_U_SET(core_clock, v->cclk);
2344         KS_U_SET(port_cnt, sc->params.nports);
2345 
2346         t4_os_pci_read_cfg2(sc, PCI_CONF_VENID, &pci_vendor);
2347         KS_C_SET(pci_vendor_id, "0x%x", pci_vendor);
2348 
2349         t4_os_pci_read_cfg2(sc, PCI_CONF_DEVID, &pci_device);
2350         KS_C_SET(pci_device_id, "0x%x", pci_device);
2351 
2352         KS_C_SET(port_type, "%s/%s/%s/%s",
2353                  print_port_speed(sc->port[0]),
2354                  print_port_speed(sc->port[1]),
2355                  print_port_speed(sc->port[2]),
2356                  print_port_speed(sc->port[3]));
2357 
2358         /* Do NOT set ksp->ks_update.  These kstats do not change. */
2359 
2360         /* Install the kstat */
2361         ksp->ks_private = (void *)sc;
2362         kstat_install(ksp);
2363 
2364         return (ksp);
2365 }
2366 
2367 /*
2368  * t4nex:X:stat
2369  */
2370 struct t4_wc_kstats {
2371         kstat_named_t write_coal_success;
2372         kstat_named_t write_coal_failure;
2373 };
2374 static kstat_t *
2375 setup_wc_kstats(struct adapter *sc)
2376 {
2377         kstat_t *ksp;
2378         struct t4_wc_kstats *kstatp;
2379         int ndata;
2380 
2381         ndata = sizeof(struct t4_wc_kstats) / sizeof(kstat_named_t);
2382         ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "stats",
2383             "nexus", KSTAT_TYPE_NAMED, ndata, 0);
2384         if (ksp == NULL) {
2385                 cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats.");
2386                 return (NULL);
2387         }
2388 
2389         kstatp = (struct t4_wc_kstats *)ksp->ks_data;
2390 
2391         KS_UINIT(write_coal_success);
2392         KS_UINIT(write_coal_failure);
2393 
2394         ksp->ks_update = update_wc_kstats;
2395         /* Install the kstat */
2396         ksp->ks_private = (void *)sc;
2397         kstat_install(ksp);
2398 
2399         return (ksp);
2400 }
2401 
2402 static int
2403 update_wc_kstats(kstat_t *ksp, int rw)
2404 {
2405         struct t4_wc_kstats *kstatp = (struct t4_wc_kstats *)ksp->ks_data;
2406         struct adapter *sc = ksp->ks_private;
2407         uint32_t wc_total, wc_success, wc_failure;
2408 
2409         if (rw == KSTAT_WRITE)
2410                 return (0);
2411 
2412         if (is_t5(sc->params.chip)) {
2413                 wc_total = t4_read_reg(sc, A_SGE_STAT_TOTAL);
2414                 wc_failure = t4_read_reg(sc, A_SGE_STAT_MATCH);
2415                 wc_success = wc_total - wc_failure;
2416         } else {
2417                 wc_success = 0;
2418                 wc_failure = 0;
2419         }
2420 
2421         KS_U_SET(write_coal_success, wc_success);
2422         KS_U_SET(write_coal_failure, wc_failure);
2423 
2424         return (0);
2425 }
2426 
2427 int
2428 adapter_full_init(struct adapter *sc)
2429 {
2430         int i, rc = 0;
2431 
2432         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2433 
2434         rc = t4_setup_adapter_queues(sc);
2435         if (rc != 0)
2436                 goto done;
2437 
2438         if (sc->intr_cap & DDI_INTR_FLAG_BLOCK)
2439                 (void) ddi_intr_block_enable(sc->intr_handle, sc->intr_count);
2440         else {
2441                 for (i = 0; i < sc->intr_count; i++)
2442                         (void) ddi_intr_enable(sc->intr_handle[i]);
2443         }
2444         t4_intr_enable(sc);
2445         sc->flags |= FULL_INIT_DONE;
2446 
2447 #ifdef TCP_OFFLOAD_ENABLE
2448         /* TODO: wrong place to enable TOE capability */
2449         if (is_offload(sc) != 0) {
2450                 for_each_port(sc, i) {
2451                         struct port_info *pi = sc->port[i];
2452                         rc = toe_capability(pi, 1);
2453                         if (rc != 0) {
2454                                 cxgb_printf(pi->dip, CE_WARN,
2455                                     "Failed to activate toe capability: %d",
2456                                     rc);
2457                                 rc = 0;         /* not a fatal error */
2458                         }
2459                 }
2460         }
2461 #endif
2462 
2463 done:
2464         if (rc != 0)
2465                 (void) adapter_full_uninit(sc);
2466 
2467         return (rc);
2468 }
2469 
2470 int
2471 adapter_full_uninit(struct adapter *sc)
2472 {
2473         int i, rc = 0;
2474 
2475         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2476 
2477         if (sc->intr_cap & DDI_INTR_FLAG_BLOCK)
2478                 (void) ddi_intr_block_disable(sc->intr_handle, sc->intr_count);
2479         else {
2480                 for (i = 0; i < sc->intr_count; i++)
2481                         (void) ddi_intr_disable(sc->intr_handle[i]);
2482         }
2483 
2484         rc = t4_teardown_adapter_queues(sc);
2485         if (rc != 0)
2486                 return (rc);
2487 
2488         sc->flags &= ~FULL_INIT_DONE;
2489 
2490         return (0);
2491 }
2492 
2493 int
2494 port_full_init(struct port_info *pi)
2495 {
2496         struct adapter *sc = pi->adapter;
2497         uint16_t *rss;
2498         struct sge_rxq *rxq;
2499         int rc, i;
2500 
2501         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2502         ASSERT((pi->flags & PORT_INIT_DONE) == 0);
2503 
2504         /*
2505          * Allocate tx/rx/fl queues for this port.
2506          */
2507         rc = t4_setup_port_queues(pi);
2508         if (rc != 0)
2509                 goto done;      /* error message displayed already */
2510 
2511         /*
2512          * Setup RSS for this port.
2513          */
2514         rss = kmem_zalloc(pi->nrxq * sizeof (*rss), KM_SLEEP);
2515         for_each_rxq(pi, i, rxq) {
2516                 rss[i] = rxq->iq.abs_id;
2517         }
2518         rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
2519             pi->rss_size, rss, pi->nrxq);
2520         kmem_free(rss, pi->nrxq * sizeof (*rss));
2521         if (rc != 0) {
2522                 cxgb_printf(pi->dip, CE_WARN, "rss_config failed: %d", rc);
2523                 goto done;
2524         }
2525 
2526         pi->flags |= PORT_INIT_DONE;
2527 done:
2528         if (rc != 0)
2529                 (void) port_full_uninit(pi);
2530 
2531         return (rc);
2532 }
2533 
2534 /*
2535  * Idempotent.
2536  */
2537 int
2538 port_full_uninit(struct port_info *pi)
2539 {
2540 
2541         ASSERT(pi->flags & PORT_INIT_DONE);
2542 
2543         (void) t4_teardown_port_queues(pi);
2544         pi->flags &= ~PORT_INIT_DONE;
2545 
2546         return (0);
2547 }
2548 
2549 void
2550 enable_port_queues(struct port_info *pi)
2551 {
2552         struct adapter *sc = pi->adapter;
2553         int i;
2554         struct sge_iq *iq;
2555         struct sge_rxq *rxq;
2556 #ifdef TCP_OFFLOAD_ENABLE
2557         struct sge_ofld_rxq *ofld_rxq;
2558 #endif
2559 
2560         ASSERT(pi->flags & PORT_INIT_DONE);
2561 
2562         /*
2563          * TODO: whatever was queued up after we set iq->state to IQS_DISABLED
2564          * back in disable_port_queues will be processed now, after an unbounded
2565          * delay.  This can't be good.
2566          */
2567 
2568 #ifdef TCP_OFFLOAD_ENABLE
2569         for_each_ofld_rxq(pi, i, ofld_rxq) {
2570                 iq = &ofld_rxq->iq;
2571                 if (atomic_cas_uint(&iq->state, IQS_DISABLED, IQS_IDLE) !=
2572                     IQS_DISABLED)
2573                         panic("%s: iq %p wasn't disabled", __func__,
2574                             (void *)iq);
2575                 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
2576                     V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id));
2577         }
2578 #endif
2579 
2580         for_each_rxq(pi, i, rxq) {
2581                 iq = &rxq->iq;
2582                 if (atomic_cas_uint(&iq->state, IQS_DISABLED, IQS_IDLE) !=
2583                     IQS_DISABLED)
2584                         panic("%s: iq %p wasn't disabled", __func__,
2585                             (void *) iq);
2586                 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
2587                     V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id));
2588         }
2589 }
2590 
2591 void
2592 disable_port_queues(struct port_info *pi)
2593 {
2594         int i;
2595         struct adapter *sc = pi->adapter;
2596         struct sge_rxq *rxq;
2597 #ifdef TCP_OFFLOAD_ENABLE
2598         struct sge_ofld_rxq *ofld_rxq;
2599 #endif
2600 
2601         ASSERT(pi->flags & PORT_INIT_DONE);
2602 
2603         /*
2604          * TODO: need proper implementation for all tx queues (ctrl, eth, ofld).
2605          */
2606 
2607 #ifdef TCP_OFFLOAD_ENABLE
2608         for_each_ofld_rxq(pi, i, ofld_rxq) {
2609                 while (atomic_cas_uint(&ofld_rxq->iq.state, IQS_IDLE,
2610                     IQS_DISABLED) != IQS_IDLE)
2611                         msleep(1);
2612         }
2613 #endif
2614 
2615         for_each_rxq(pi, i, rxq) {
2616                 while (atomic_cas_uint(&rxq->iq.state, IQS_IDLE,
2617                     IQS_DISABLED) != IQS_IDLE)
2618                         msleep(1);
2619         }
2620 
2621         mutex_enter(&sc->sfl_lock);
2622 #ifdef TCP_OFFLOAD_ENABLE
2623         for_each_ofld_rxq(pi, i, ofld_rxq)
2624             ofld_rxq->fl.flags |= FL_DOOMED;
2625 #endif
2626         for_each_rxq(pi, i, rxq)
2627             rxq->fl.flags |= FL_DOOMED;
2628         mutex_exit(&sc->sfl_lock);
2629         /* TODO: need to wait for all fl's to be removed from sc->sfl */
2630 }
2631 
2632 void
2633 t4_fatal_err(struct adapter *sc)
2634 {
2635         t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
2636         t4_intr_disable(sc);
2637         cxgb_printf(sc->dip, CE_WARN,
2638             "encountered fatal error, adapter stopped.");
2639 }
2640 
2641 int
2642 t4_os_find_pci_capability(struct adapter *sc, int cap)
2643 {
2644         uint16_t stat;
2645         uint8_t cap_ptr, cap_id;
2646 
2647         t4_os_pci_read_cfg2(sc, PCI_CONF_STAT, &stat);
2648         if ((stat & PCI_STAT_CAP) == 0)
2649                 return (0); /* does not implement capabilities */
2650 
2651         t4_os_pci_read_cfg1(sc, PCI_CONF_CAP_PTR, &cap_ptr);
2652         while (cap_ptr) {
2653                 t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_ID, &cap_id);
2654                 if (cap_id == cap)
2655                         return (cap_ptr); /* found */
2656                 t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_NEXT_PTR, &cap_ptr);
2657         }
2658 
2659         return (0); /* not found */
2660 }
2661 
2662 void
2663 t4_os_portmod_changed(const struct adapter *sc, int idx)
2664 {
2665         static const char *mod_str[] = {
2666                 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
2667         };
2668         const struct port_info *pi = sc->port[idx];
2669 
2670         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
2671                 cxgb_printf(pi->dip, CE_NOTE, "transceiver unplugged.");
2672         else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
2673                 cxgb_printf(pi->dip, CE_NOTE,
2674                     "unknown transceiver inserted.\n");
2675         else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
2676                 cxgb_printf(pi->dip, CE_NOTE,
2677                     "unsupported transceiver inserted.\n");
2678         else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str))
2679                 cxgb_printf(pi->dip, CE_NOTE, "%s transceiver inserted.\n",
2680                     mod_str[pi->mod_type]);
2681         else
2682                 cxgb_printf(pi->dip, CE_NOTE, "transceiver (type %d) inserted.",
2683                     pi->mod_type);
2684 }
2685 
2686 /* ARGSUSED */
2687 static int
2688 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, mblk_t *m)
2689 {
2690         if (m != NULL)
2691                 freemsg(m);
2692         return (0);
2693 }
2694 
2695 int
2696 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
2697 {
2698         uint_t *loc, new;
2699 
2700         if (opcode >= ARRAY_SIZE(sc->cpl_handler))
2701                 return (EINVAL);
2702 
2703         new = (uint_t)(unsigned long) (h ? h : cpl_not_handled);
2704         loc = (uint_t *)&sc->cpl_handler[opcode];
2705         (void) atomic_swap_uint(loc, new);
2706 
2707         return (0);
2708 }
2709 
2710 static int
2711 fw_msg_not_handled(struct adapter *sc, const __be64 *data)
2712 {
2713         struct cpl_fw6_msg *cpl = container_of(data, struct cpl_fw6_msg, data);
2714 
2715         cxgb_printf(sc->dip, CE_WARN, "%s fw_msg type %d", __func__, cpl->type);
2716         return (0);
2717 }
2718 
2719 int
2720 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
2721 {
2722         fw_msg_handler_t *loc, new;
2723 
2724         if (type >= ARRAY_SIZE(sc->fw_msg_handler))
2725                 return (EINVAL);
2726 
2727         /*
2728          * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
2729          * handler dispatch table.  Reject any attempt to install a handler for
2730          * this subtype.
2731          */
2732         if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
2733                 return (EINVAL);
2734 
2735         new = h ? h : fw_msg_not_handled;
2736         loc = &sc->fw_msg_handler[type];
2737         (void)atomic_swap_ptr(loc, (void *)new);
2738 
2739         return (0);
2740 }
2741 
2742 #ifdef TCP_OFFLOAD_ENABLE
2743 static int
2744 toe_capability(struct port_info *pi, int enable)
2745 {
2746         int rc;
2747         struct adapter *sc = pi->adapter;
2748 
2749         if (!is_offload(sc))
2750                 return (ENODEV);
2751 
2752         if (enable != 0) {
2753                 if (isset(&sc->offload_map, pi->port_id) != 0)
2754                         return (0);
2755 
2756                 if (sc->offload_map == 0) {
2757                         rc = activate_uld(sc, ULD_TOM, &sc->tom);
2758                         if (rc != 0)
2759                                 return (rc);
2760                 }
2761 
2762                 setbit(&sc->offload_map, pi->port_id);
2763         } else {
2764                 if (!isset(&sc->offload_map, pi->port_id))
2765                         return (0);
2766 
2767                 clrbit(&sc->offload_map, pi->port_id);
2768 
2769                 if (sc->offload_map == 0) {
2770                         rc = deactivate_uld(&sc->tom);
2771                         if (rc != 0) {
2772                                 setbit(&sc->offload_map, pi->port_id);
2773                                 return (rc);
2774                         }
2775                 }
2776         }
2777 
2778         return (0);
2779 }
2780 
2781 /*
2782  * Add an upper layer driver to the global list.
2783  */
2784 int
2785 t4_register_uld(struct uld_info *ui)
2786 {
2787         int rc = 0;
2788         struct uld_info *u;
2789 
2790         mutex_enter(&t4_uld_list_lock);
2791         SLIST_FOREACH(u, &t4_uld_list, link) {
2792                 if (u->uld_id == ui->uld_id) {
2793                         rc = EEXIST;
2794                         goto done;
2795                 }
2796         }
2797 
2798         SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
2799         ui->refcount = 0;
2800 done:
2801         mutex_exit(&t4_uld_list_lock);
2802         return (rc);
2803 }
2804 
2805 int
2806 t4_unregister_uld(struct uld_info *ui)
2807 {
2808         int rc = EINVAL;
2809         struct uld_info *u;
2810 
2811         mutex_enter(&t4_uld_list_lock);
2812 
2813         SLIST_FOREACH(u, &t4_uld_list, link) {
2814                 if (u == ui) {
2815                         if (ui->refcount > 0) {
2816                                 rc = EBUSY;
2817                                 goto done;
2818                         }
2819 
2820                         SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
2821                         rc = 0;
2822                         goto done;
2823                 }
2824         }
2825 done:
2826         mutex_exit(&t4_uld_list_lock);
2827         return (rc);
2828 }
2829 
2830 static int
2831 activate_uld(struct adapter *sc, int id, struct uld_softc *usc)
2832 {
2833         int rc = EAGAIN;
2834         struct uld_info *ui;
2835 
2836         mutex_enter(&t4_uld_list_lock);
2837 
2838         SLIST_FOREACH(ui, &t4_uld_list, link) {
2839                 if (ui->uld_id == id) {
2840                         rc = ui->attach(sc, &usc->softc);
2841                         if (rc == 0) {
2842                                 ASSERT(usc->softc != NULL);
2843                                 ui->refcount++;
2844                                 usc->uld = ui;
2845                         }
2846                         goto done;
2847                 }
2848         }
2849 done:
2850         mutex_exit(&t4_uld_list_lock);
2851 
2852         return (rc);
2853 }
2854 
2855 static int
2856 deactivate_uld(struct uld_softc *usc)
2857 {
2858         int rc;
2859 
2860         mutex_enter(&t4_uld_list_lock);
2861 
2862         if (usc->uld == NULL || usc->softc == NULL) {
2863                 rc = EINVAL;
2864                 goto done;
2865         }
2866 
2867         rc = usc->uld->detach(usc->softc);
2868         if (rc == 0) {
2869                 ASSERT(usc->uld->refcount > 0);
2870                 usc->uld->refcount--;
2871                 usc->uld = NULL;
2872                 usc->softc = NULL;
2873         }
2874 done:
2875         mutex_exit(&t4_uld_list_lock);
2876 
2877         return (rc);
2878 }
2879 
2880 void
2881 t4_iterate(void (*func)(int, void *), void *arg)
2882 {
2883         struct adapter *sc;
2884 
2885         mutex_enter(&t4_adapter_list_lock);
2886         SLIST_FOREACH(sc, &t4_adapter_list, link) {
2887                 /*
2888                  * func should not make any assumptions about what state sc is
2889                  * in - the only guarantee is that sc->sc_lock is a valid lock.
2890                  */
2891                 func(ddi_get_instance(sc->dip), arg);
2892         }
2893         mutex_exit(&t4_adapter_list_lock);
2894 }
2895 
2896 #endif