1 /*
   2 * CDDL HEADER START
   3 *
   4 * The contents of this file are subject to the terms of the
   5 * Common Development and Distribution License, v.1,  (the "License").
   6 * You may not use this file except in compliance with the License.
   7 *
   8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9 * or http://opensource.org/licenses/CDDL-1.0.
  10 * See the License for the specific language governing permissions
  11 * and limitations under the License.
  12 *
  13 * When distributing Covered Code, include this CDDL HEADER in each
  14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15 * If applicable, add the following below this CDDL HEADER, with the
  16 * fields enclosed by brackets "[]" replaced with your own identifying
  17 * information: Portions Copyright [yyyy] [name of copyright owner]
  18 *
  19 * CDDL HEADER END
  20 */
  21 
  22 /*
  23 * Copyright 2014-2017 Cavium, Inc. 
  24 * The contents of this file are subject to the terms of the Common Development 
  25 * and Distribution License, v.1,  (the "License").
  26 
  27 * You may not use this file except in compliance with the License.
  28 
  29 * You can obtain a copy of the License at available 
  30 * at http://opensource.org/licenses/CDDL-1.0
  31 
  32 * See the License for the specific language governing permissions and 
  33 * limitations under the License.
  34 */
  35 
  36 
  37 #include "qede.h"
  38 
  39 ddi_device_acc_attr_t qede_regs_acc_attr = {                                    
  40         DDI_DEVICE_ATTR_V1,     // devacc_attr_version;                         
  41         DDI_STRUCTURE_LE_ACC,   // devacc_attr_endian_flags;                            
  42         DDI_STRICTORDER_ACC,    // devacc_attr_dataorder;                               
  43         DDI_FLAGERR_ACC         // devacc_attr_access;
  44 };
  45 
  46 ddi_device_acc_attr_t qede_desc_acc_attr = {
  47         DDI_DEVICE_ATTR_V0,    // devacc_attr_version;          
  48         DDI_STRUCTURE_LE_ACC,  // devacc_attr_endian_flags;
  49         DDI_STRICTORDER_ACC    // devacc_attr_dataorder;                
  50 };
  51 
  52 /*
  53  * DMA access attributes for BUFFERS.
  54  */
  55 ddi_device_acc_attr_t qede_buf_acc_attr = 
  56 {                                       
  57         DDI_DEVICE_ATTR_V0,   // devacc_attr_version;                                           
  58         DDI_NEVERSWAP_ACC,    // devacc_attr_endian_flags;                              
  59         DDI_STRICTORDER_ACC   // devacc_attr_dataorder;                                         
  60 };                                                                                                                              
  61 
  62 
  63 ddi_dma_attr_t qede_desc_dma_attr = 
  64 {
  65         DMA_ATTR_V0,
  66         0x0000000000000000ull,
  67         0xFFFFFFFFFFFFFFFFull,
  68         0x00000000FFFFFFFFull,
  69         QEDE_PAGE_ALIGNMENT,
  70         0x00000FFF,
  71         0x00000001,
  72         0x00000000FFFFFFFFull,
  73         0xFFFFFFFFFFFFFFFFull,
  74         1,
  75         0x00000001,
  76         DDI_DMA_FLAGERR
  77 };
  78 
  79 ddi_dma_attr_t qede_gen_buf_dma_attr = 
  80 {
  81         DMA_ATTR_V0,
  82         0x0000000000000000ull,
  83         0xFFFFFFFFFFFFFFFFull,
  84         0x00000000FFFFFFFFull,
  85         QEDE_PAGE_ALIGNMENT,
  86         0x00000FFF,
  87         0x00000001,
  88         0x00000000FFFFFFFFull,
  89         0xFFFFFFFFFFFFFFFFull,
  90         1,
  91         0x00000001,
  92         DDI_DMA_FLAGERR
  93 };
  94 
  95 /*
  96  * DMA attributes for transmit.
  97  */
  98 ddi_dma_attr_t qede_tx_buf_dma_attr = 
  99 {
 100         DMA_ATTR_V0,
 101         0x0000000000000000ull,
 102         0xFFFFFFFFFFFFFFFFull,
 103         0x00000000FFFFFFFFull,
 104         1,
 105         0x00000FFF,
 106         0x00000001,
 107         0x00000000FFFFFFFFull,
 108         0xFFFFFFFFFFFFFFFFull,
 109         ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1,
 110         0x00000001,
 111         DDI_DMA_FLAGERR
 112 };
 113 
 114 
 115 ddi_dma_attr_t qede_dma_attr_desc = 
 116 {
 117         DMA_ATTR_V0,            /* dma_attr_version */
 118         0,                      /* dma_attr_addr_lo */
 119         0xffffffffffffffffull,  /* dma_attr_addr_hi */
 120         0x000fffffull,          /* dma_attr_count_max */
 121         4096,                   /* dma_attr_align */
 122         0x000fffffull,          /* dma_attr_burstsizes */
 123         4,                      /* dma_attr_minxfer */
 124         0xffffffffull,          /* dma_attr_maxxfer */
 125         0xffffffffull,          /* dma_attr_seg */
 126         1,                      /* dma_attr_sgllen */
 127         1,                      /* dma_attr_granular */
 128         DDI_DMA_FLAGERR         /* dma_attr_flags */
 129 };
 130 
 131 static ddi_dma_attr_t qede_dma_attr_txbuf = 
 132 {
 133         DMA_ATTR_V0,            /* dma_attr_version */
 134         0,                      /* dma_attr_addr_lo */
 135         0xffffffffffffffffull,  /* dma_attr_addr_hi */
 136         0x00000000FFFFFFFFull,  /* dma_attr_count_max */
 137         QEDE_PAGE_ALIGNMENT, /* dma_attr_align */
 138         0xfff8ull,              /* dma_attr_burstsizes */
 139         1,                      /* dma_attr_minxfer */
 140         0xffffffffull,          /* dma_attr_maxxfer */
 141         0xFFFFFFFFFFFFFFFFull,  /* maximum segment size */
 142         1,                      /* dma_attr_sgllen */
 143         1,                      /* dma_attr_granular */
 144         0                       /* dma_attr_flags */
 145 };
 146 
 147 ddi_dma_attr_t qede_dma_attr_rxbuf = 
 148 {
 149         DMA_ATTR_V0,            /* dma_attr_version */
 150         0,                      /* dma_attr_addr_lo */
 151         0xffffffffffffffffull,  /* dma_attr_addr_hi */
 152         0x00000000FFFFFFFFull,  /* dma counter max */
 153         QEDE_PAGE_ALIGNMENT,    /* dma_attr_align */
 154         0xfff8ull,              /* dma_attr_burstsizes */
 155         1,                      /* dma_attr_minxfer */
 156         0xffffffffull,          /* dma_attr_maxxfer */
 157         0xFFFFFFFFFFFFFFFFull,  /* maximum segment size */
 158         1,                      /* dma_attr_sgllen */
 159         1,                      /* dma_attr_granular */
 160         DDI_DMA_RELAXED_ORDERING        /* dma_attr_flags */
 161 };
 162 
 163 /* LINTED E_STATIC_UNUSED */
 164 static ddi_dma_attr_t qede_dma_attr_cmddesc = 
 165 {
 166         DMA_ATTR_V0,            /* dma_attr_version */
 167         0,                      /* dma_attr_addr_lo */
 168         0xffffffffffffffffull,  /* dma_attr_addr_hi */
 169         0xffffffffull,          /* dma_attr_count_max */
 170         1,                      /* dma_attr_align */
 171         0xfff8ull,              /* dma_attr_burstsizes */
 172         1,                      /* dma_attr_minxfer */
 173         0xffffffff,             /* dma_attr_maxxfer */
 174         0xffffffff,             /* dma_attr_seg */
 175         ETH_TX_MAX_BDS_PER_NON_LSO_PACKET,      /* dma_attr_sgllen */
 176         1,                      /* dma_attr_granular */
 177         0                       /* dma_attr_flags */
 178 };
 179 
 180 
 181 
 182 /*
 183  * Generic dma attribute for single sg
 184  */
 185 /* LINTED E_STATIC_UNUSED */
 186 static ddi_dma_attr_t qede_gen_dma_attr_desc = 
 187 {
 188         DMA_ATTR_V0,            /* dma_attr_version */
 189         0,                      /* dma_attr_addr_lo */
 190         0xffffffffffffffffull,  /* dma_attr_addr_hi */
 191         0x000fffffull,          /* dma_attr_count_max */
 192         4096,                   /* dma_attr_align */
 193         0x000fffffull,          /* dma_attr_burstsizes */
 194         4,                      /* dma_attr_minxfer */
 195         0xffffffffull,          /* dma_attr_maxxfer */
 196         0xffffffffull,          /* dma_attr_seg */
 197         1,                      /* dma_attr_sgllen */
 198         1,                      /* dma_attr_granular */
 199         DDI_DMA_FLAGERR         /* dma_attr_flags */
 200 };
 201 
 202 ddi_dma_attr_t qede_buf2k_dma_attr_txbuf = 
 203 {
 204         DMA_ATTR_V0,            /* dma_attr_version */
 205         0,                      /* dma_attr_addr_lo */
 206         0xffffffffffffffffull,  /* dma_attr_addr_hi */
 207         0x00000000FFFFFFFFull,  /* dma_attr_count_max */
 208         BUF_2K_ALIGNMENT,       /* dma_attr_align */
 209         0xfff8ull,              /* dma_attr_burstsizes */
 210         1,                      /* dma_attr_minxfer */
 211         0xffffffffull,          /* dma_attr_maxxfer */
 212         0xFFFFFFFFFFFFFFFFull,  /* maximum segment size */
 213         1,                      /* dma_attr_sgllen */
 214         0x00000001,             /* dma_attr_granular */
 215         0                       /* dma_attr_flags */
 216 };
 217 
 218 char * 
 219 qede_get_ddi_fail(int status)
 220 {
 221         switch (status) {
 222         case DDI_FAILURE:
 223                 return ("DDI_FAILURE");
 224         case DDI_NOT_WELL_FORMED:
 225                 return ("DDI_NOT_WELL_FORMED");
 226         case DDI_EAGAIN:
 227                 return ("DDI_EAGAIN");
 228         case DDI_EINVAL:
 229                 return ("DDI_EINVAL");
 230         case DDI_ENOTSUP:
 231                 return ("DDI_ENOTSUP");
 232         case DDI_EPENDING:
 233                 return ("DDI_EPENDING");
 234         case DDI_EALREADY:
 235                 return ("DDI_EALREADY");
 236         case DDI_ENOMEM:
 237                 return ("DDI_ENOMEM");
 238         case DDI_EBUSY:
 239                 return ("DDI_EBUSY");
 240         case DDI_ETRANSPORT:
 241                 return ("DDI_ETRANSPORT");
 242         case DDI_ECONTEXT:
 243                 return ("DDI_ECONTEXT");
 244         default:
 245                 return ("ERROR CODE NOT FOUND!");
 246         }
 247 }
 248 
 249 char *
 250 qede_get_ecore_fail(int status)
 251 {
 252         switch (status) {
 253         case ECORE_UNKNOWN_ERROR:
 254                 return ("ECORE_UNKNOWN_ERROR");
 255         case ECORE_NORESOURCES:
 256                 return ("ECORE_NORESOURCES");
 257         case ECORE_NODEV:
 258                 return ("ECORE_NODEV");
 259         case ECORE_ABORTED:
 260                 return ("ECORE_ABORTED");
 261         case ECORE_AGAIN:
 262                 return ("ECORE_AGAIN");
 263         case ECORE_NOTIMPL:
 264                 return ("ECORE_NOTIMPL");
 265         case ECORE_EXISTS:
 266                 return ("ECORE_EXISTS");
 267         case ECORE_IO:
 268                 return ("ECORE_IO");
 269         case ECORE_TIMEOUT:
 270                 return ("ECORE_TIMEOUT");
 271         case ECORE_INVAL:
 272                 return ("ECORE_INVAL");
 273         case ECORE_BUSY:
 274                 return ("ECORE_BUSY");
 275         case ECORE_NOMEM:
 276                 return ("ECORE_NOMEM");
 277         case ECORE_SUCCESS:
 278                 return ("ECORE_SUCCESS");
 279         case ECORE_PENDING:
 280                 return ("ECORE_PENDING");
 281         default:
 282                 return ("ECORE ERROR CODE NOT FOUND!");
 283         }
 284 }
 285 
 286 #define QEDE_CHIP_NUM(_p)\
 287  (((_p)->edev.chip_num) & 0xffff)
 288 
 289 char *
 290 qede_chip_name(qede_t *qede)
 291 {
 292     switch (QEDE_CHIP_NUM(qede)) {
 293         case 0x1634: 
 294                 return ("BCM57980E");
 295 
 296         case 0x1629: 
 297                 return ("BCM57980S");
 298 
 299         case 0x1630: 
 300                 return ("BCM57940_KR2");
 301 
 302         case 0x8070: 
 303                 return ("ARROWHEAD");
 304 
 305         case 0x8071: 
 306                 return ("ARROWHEAD");
 307 
 308         case 0x8072: 
 309                 return ("ARROWHEAD");        
 310 
 311         case 0x8073: 
 312                 return ("ARROWHEAD");        
 313 
 314         default:     
 315                 return ("UNKNOWN");
 316     }
 317 }
 318 
 319         
 320 
 321 
 322 static void
 323 qede_destroy_locks(qede_t *qede)
 324 {
 325         qede_fastpath_t *fp = &qede->fp_array[0];
 326         qede_rx_ring_t *rx_ring;
 327         qede_tx_ring_t *tx_ring;
 328         int i, j;
 329 
 330         mutex_destroy(&qede->drv_lock);
 331         mutex_destroy(&qede->watch_lock);
 332 
 333         for (i = 0; i < qede->num_fp; i++, fp++) {
 334                 mutex_destroy(&fp->fp_lock);
 335 
 336                 rx_ring = fp->rx_ring;
 337                 mutex_destroy(&rx_ring->rx_lock);
 338                 mutex_destroy(&rx_ring->rx_replen_lock);
 339 
 340                 for (j = 0; j < qede->num_tc; j++) {
 341                         tx_ring = fp->tx_ring[j];
 342                         mutex_destroy(&tx_ring->tx_lock);
 343                 }
 344         }
 345         mutex_destroy(&qede->gld_lock);
 346         mutex_destroy(&qede->kstat_lock);
 347 }
 348 
 349 static void
 350 qede_init_locks(qede_t *qede)
 351 {
 352         qede_intr_context_t *intr_ctx = &qede->intr_ctx;
 353         qede_fastpath_t *fp = &qede->fp_array[0];
 354         qede_rx_ring_t *rx_ring;
 355         qede_tx_ring_t *tx_ring;
 356         int i, tc;
 357 
 358         mutex_init(&qede->drv_lock, NULL,
 359             MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
 360         mutex_init(&qede->watch_lock, NULL,
 361             MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
 362 
 363         for (i = 0; i < qede->num_fp; i++, fp++) {
 364                 mutex_init(&fp->fp_lock, NULL,
 365                     MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
 366 
 367                 rx_ring = fp->rx_ring;
 368                 mutex_init(&rx_ring->rx_lock, NULL,
 369                     MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
 370                 mutex_init(&rx_ring->rx_replen_lock, NULL,
 371                     MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
 372 
 373                 for (tc = 0; tc < qede->num_tc; tc++) {
 374                         tx_ring = fp->tx_ring[tc];
 375                         mutex_init(&tx_ring->tx_lock, NULL,
 376                             MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
 377                 }
 378         }
 379 
 380         mutex_init(&qede->gld_lock, NULL,
 381             MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
 382         mutex_init(&qede->kstat_lock, NULL,
 383             MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
 384 }
 385 
 386 /* LINTED E_FUNC_ARG_UNUSED */
 387 static void qede_free_io_structs(qede_t *qede)
 388 {
 389 }
 390 
 391 static int
 392 qede_alloc_io_structs(qede_t *qede)
 393 {
 394         qede_fastpath_t *fp;
 395         qede_rx_ring_t *rx_ring;
 396         qede_tx_ring_t *tx_array, *tx_ring;
 397         int i, tc;
 398 
 399         /*
 400          * Put rx ring + tx_ring pointers paired
 401          * into the fp data structure array
 402          */
 403         for (i = 0; i < qede->num_fp; i++) {
 404                 fp = &qede->fp_array[i];
 405                 rx_ring = &qede->rx_array[i];
 406 
 407                 for (tc = 0; tc < qede->num_tc; tc++) {
 408                         tx_array = qede->tx_array[tc];
 409                         tx_ring = &tx_array[i];
 410                         fp->tx_ring[tc] = tx_ring;
 411                 }
 412 
 413                 fp->rx_ring = rx_ring;
 414                 rx_ring->group_index = 0;
 415         }
 416         
 417         return (DDI_SUCCESS);
 418 }
 419 
 420 static int
 421 qede_get_config_params(qede_t *qede)
 422 {
 423         struct ecore_dev *edev = &qede->edev;
 424 
 425         qede_cfg_init(qede);
 426 
 427         qede->num_tc = DEFAULT_TRFK_CLASS_COUNT;
 428         qede->num_hwfns = edev->num_hwfns;
 429         qede->rx_buf_count = qede->rx_ring_size;
 430         qede->rx_buf_size = DEFAULT_RX_BUF_SIZE;
 431         qede_print("!%s:%d: qede->num_fp = %d\n", __func__, qede->instance, 
 432                 qede->num_fp);
 433         qede_print("!%s:%d: qede->rx_ring_size = %d\n", __func__, 
 434                 qede->instance, qede->rx_ring_size);
 435         qede_print("!%s:%d: qede->rx_buf_count = %d\n", __func__, 
 436                 qede->instance, qede->rx_buf_count);
 437         qede_print("!%s:%d: qede->rx_buf_size = %d\n", __func__, 
 438                 qede->instance, qede->rx_buf_size);
 439         qede_print("!%s:%d: qede->rx_copy_threshold = %d\n", __func__, 
 440                 qede->instance, qede->rx_copy_threshold);
 441         qede_print("!%s:%d: qede->tx_ring_size = %d\n", __func__, 
 442                 qede->instance, qede->tx_ring_size);
 443         qede_print("!%s:%d: qede->tx_copy_threshold = %d\n", __func__, 
 444                 qede->instance, qede->tx_bcopy_threshold);
 445         qede_print("!%s:%d: qede->lso_enable = %d\n", __func__, 
 446                 qede->instance, qede->lso_enable);
 447         qede_print("!%s:%d: qede->lro_enable = %d\n", __func__, 
 448                 qede->instance, qede->lro_enable);
 449         qede_print("!%s:%d: qede->jumbo_enable = %d\n", __func__, 
 450                 qede->instance, qede->jumbo_enable);
 451         qede_print("!%s:%d: qede->log_enable = %d\n", __func__, 
 452                 qede->instance, qede->log_enable);
 453         qede_print("!%s:%d: qede->checksum = %d\n", __func__, 
 454                 qede->instance, qede->checksum);
 455         qede_print("!%s:%d: qede->debug_level = 0x%x\n", __func__, 
 456                 qede->instance, qede->ecore_debug_level);
 457         qede_print("!%s:%d: qede->num_hwfns = %d\n", __func__, 
 458                 qede->instance,qede->num_hwfns);
 459 
 460         //qede->tx_buf_size = qede->mtu + QEDE_MAX_ETHER_HDR;
 461         qede->tx_buf_size = BUF_2K_SIZE;
 462         return (DDI_SUCCESS);
 463 }
 464 
 465 void 
 466 qede_config_debug(qede_t *qede)
 467 {
 468 
 469         struct ecore_dev *edev = &qede->edev;
 470         u32 dp_level = 0;
 471         u8 dp_module = 0;
 472 
 473         dp_level = qede->ecore_debug_level;
 474         dp_module = qede->ecore_debug_module;
 475         ecore_init_dp(edev, dp_module, dp_level, NULL);
 476 }
 477 
 478 
 479 
 480 static int
 481 qede_set_operating_params(qede_t *qede)
 482 {
 483         int status = 0;
 484         qede_intr_context_t *intr_ctx = &qede->intr_ctx;
 485 
 486         /* Get qede.conf paramters from user */
 487         status = qede_get_config_params(qede);
 488         if (status != DDI_SUCCESS) {
 489                 return (DDI_FAILURE);
 490         }
 491         /* config debug level */
 492         qede_config_debug(qede);
 493 
 494 
 495         intr_ctx->intr_vect_to_request = 
 496                 qede->num_fp + qede->num_hwfns; 
 497         intr_ctx->intr_fp_vector_count = qede->num_fp - qede->num_hwfns;
 498 
 499         /* set max number of Unicast list */
 500         qede->ucst_total = QEDE_MAX_UCST_CNT;
 501         qede->ucst_avail = QEDE_MAX_UCST_CNT;
 502         bzero(&qede->ucst_mac[0], sizeof (qede_mac_addr_t) * qede->ucst_total);
 503         qede->params.multi_promisc_fl = B_FALSE;
 504         qede->params.promisc_fl = B_FALSE;
 505         qede->mc_cnt = 0;
 506         qede->rx_low_buffer_threshold = RX_LOW_BUFFER_THRESHOLD;
 507 
 508         return (status);
 509 }
 510 
 511 /* Resume the interface */
 512 static int
 513 qede_resume(qede_t *qede)
 514 {
 515         mutex_enter(&qede->drv_lock);
 516         qede->qede_state = QEDE_STATE_ATTACHED;
 517         mutex_exit(&qede->drv_lock);
 518         return (DDI_FAILURE);
 519 }
 520 
 521 /*
 522  * Write dword to doorbell from tx_path
 523  * Avoid use of qede_t * pointer
 524  */
 525 #pragma inline(qede_bar2_write32_tx_doorbell)
 526 void 
 527 qede_bar2_write32_tx_doorbell(qede_tx_ring_t *tx_ring, u32 val)
 528 {
 529         u64 addr = (u64)tx_ring->doorbell_addr;
 530         ddi_put32(tx_ring->doorbell_handle, (u32 *)addr, val);
 531 }
 532 
 533 static void
 534 qede_unconfig_pci(qede_t *qede)
 535 {
 536         if (qede->doorbell_handle != NULL) {
 537                 ddi_regs_map_free(&(qede->doorbell_handle));
 538                 qede->doorbell_handle = NULL;
 539         }
 540 
 541         if (qede->regs_handle != NULL) {
 542                 ddi_regs_map_free(&qede->regs_handle);
 543                 qede->regs_handle = NULL;
 544         }
 545         if (qede->pci_cfg_handle != NULL) {
 546                 pci_config_teardown(&qede->pci_cfg_handle);
 547                 qede->pci_cfg_handle = NULL;
 548         }
 549 }
 550 
 551 static int
 552 qede_config_pci(qede_t *qede)
 553 {
 554         int ret;
 555 
 556         ret = pci_config_setup(qede->dip, &qede->pci_cfg_handle);
 557         if (ret != DDI_SUCCESS) {
 558                 cmn_err(CE_NOTE, "%s:%d Failed to get PCI config handle\n", 
 559                         __func__, qede->instance);
 560                 return (DDI_FAILURE);
 561         }
 562 
 563         /* get register size */
 564         ret = ddi_dev_regsize(qede->dip, 1, &qede->regview_size);
 565         if (ret != DDI_SUCCESS) {
 566                 cmn_err(CE_WARN, "%s%d: failed to read reg size for bar0",
 567                         __func__, qede->instance);
 568                 goto err_exit;
 569         }
 570 
 571         /* get doorbell size */
 572         ret = ddi_dev_regsize(qede->dip, 3, &qede->doorbell_size);
 573         if (ret != DDI_SUCCESS) {
 574                 cmn_err(CE_WARN, "%s%d: failed to read doorbell size for bar2",
 575                         __func__, qede->instance);
 576                 goto err_exit;
 577         }
 578 
 579         /* map register space */
 580         ret = ddi_regs_map_setup(
 581         /* Pointer to the device's dev_info structure. */
 582             qede->dip,
 583         /*
 584          * Index number to the register address space  set.
 585          * A  value of 0 indicates PCI configuration space,
 586          * while a value of 1 indicates the real  start  of
 587          * device register sets.
 588          */
 589             1,
 590         /*
 591          * A platform-dependent value that, when  added  to
 592          * an  offset that is less than or equal to the len
 593          * parameter (see below), is used for the  dev_addr
 594          * argument   to   the  ddi_get,  ddi_mem_get,  and
 595          * ddi_io_get/put routines.
 596          */
 597             &qede->regview,
 598         /*
 599          * Offset into the register address space.
 600          */
 601             0,
 602         /* Length to be mapped. */
 603             qede->regview_size,
 604         /*
 605          * Pointer to a device access  attribute  structure
 606          * of this mapping.
 607          */
 608             &qede_regs_acc_attr,
 609         /* Pointer to a data access handle. */
 610             &qede->regs_handle);
 611 
 612         if (ret != DDI_SUCCESS) {
 613                 cmn_err(CE_WARN, "!qede(%d): failed to map registers, err %d",
 614                     qede->instance, ret);
 615                 goto err_exit;
 616         }
 617 
 618         qede->pci_bar0_base = (unsigned long)qede->regview;
 619 
 620         /* map doorbell space */
 621         ret = ddi_regs_map_setup(qede->dip,
 622             2,
 623             &qede->doorbell,
 624             0,
 625             qede->doorbell_size,
 626             &qede_regs_acc_attr,
 627             &qede->doorbell_handle);
 628         
 629         if (ret != DDI_SUCCESS) {
 630                 cmn_err(CE_WARN, "qede%d: failed to map doorbell, err %d",
 631                     qede->instance, ret);
 632                 goto err_exit;
 633         }
 634 
 635         qede->pci_bar2_base = (unsigned long)qede->doorbell;
 636 
 637         return (ret);
 638 err_exit:
 639         qede_unconfig_pci(qede);
 640         return (DDI_FAILURE);
 641 }
 642 
 643 static uint_t
 644 qede_sp_handler(caddr_t arg1, caddr_t arg2)
 645 {
 646         /*LINTED E_BAD_PTR_CAST_ALIGN*/
 647         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)arg1;
 648         /* LINTED E_BAD_PTR_CAST_ALIGN */
 649         qede_vector_info_t *vect_info = (qede_vector_info_t *)arg2;
 650         struct ecore_dev *edev = p_hwfn->p_dev;
 651         qede_t *qede = (qede_t *)edev;
 652 
 653         if ((arg1 == NULL) || (arg2 == NULL)) {
 654                 cmn_err(CE_WARN, "qede_sp_handler: invalid parameters");
 655                 /*
 656                  * MSIX intr should always
 657                  * return DDI_INTR_CLAIMED
 658                  */
 659                 return (DDI_INTR_CLAIMED);
 660         }
 661 
 662 
 663         vect_info->in_isr = B_TRUE;
 664 
 665         atomic_add_64((volatile uint64_t *)&qede->intrFired, 1);
 666         qede->intrSbCnt[vect_info->vect_index]++;
 667 
 668 
 669         ecore_int_sp_dpc((osal_int_ptr_t)p_hwfn);
 670 
 671         vect_info->in_isr = B_FALSE;
 672 
 673         return (DDI_INTR_CLAIMED);
 674 }
 675 
 676 void
 677 qede_enable_hw_intr(qede_fastpath_t *fp)
 678 {
 679         ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
 680         ddi_dma_sync(fp->sb_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
 681 }
 682 
 683 void
 684 qede_disable_hw_intr(qede_fastpath_t *fp)
 685 {
 686         ddi_dma_sync(fp->sb_dma_handle, 0, 0, DDI_DMA_SYNC_FORKERNEL);
 687         ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
 688 }
 689 
 690 
 691 static uint_t
 692 qede_fp_handler(caddr_t arg1, caddr_t arg2)
 693 {
 694         /* LINTED E_BAD_PTR_CAST_ALIGN */ 
 695         qede_vector_info_t *vect_info = (qede_vector_info_t *)arg1;
 696         /* LINTED E_BAD_PTR_CAST_ALIGN */ 
 697         qede_t *qede = (qede_t *)arg2;
 698         qede_fastpath_t *fp;
 699         qede_rx_ring_t *rx_ring;
 700         mblk_t *mp;
 701         int work_done = 0;
 702 
 703         if ((vect_info == NULL) || (vect_info->fp == NULL)) {
 704                 cmn_err(CE_WARN, "qede_fp_handler: invalid parameters");
 705                 return (DDI_INTR_UNCLAIMED);
 706         }
 707 
 708         fp = (qede_fastpath_t *)vect_info->fp;
 709         rx_ring = fp->rx_ring;
 710 
 711         mutex_enter(&fp->fp_lock);
 712 
 713         atomic_add_64((volatile uint64_t *)&qede->intrFired, 1);
 714         qede->intrSbCnt[vect_info->vect_index]++;
 715 
 716         mutex_enter(&fp->qede->drv_lock);
 717         qede_disable_hw_intr(fp);
 718         mutex_exit(&fp->qede->drv_lock);
 719 
 720         mp = qede_process_fastpath(fp, QEDE_POLL_ALL,
 721             QEDE_MAX_RX_PKTS_PER_INTR, &work_done);
 722 
 723         if (mp)
 724 #ifndef NO_CROSSBOW
 725         {
 726                 mac_rx_ring(rx_ring->qede->mac_handle,
 727                     rx_ring->mac_ring_handle,
 728                     mp,
 729                     rx_ring->mr_gen_num);
 730         }
 731 #else
 732         {
 733                 mac_rx(qede->mac_handle, NULL, mp);
 734         }
 735 #endif
 736        else if (!mp && (work_done == 0)) {
 737                 qede->intrSbNoChangeCnt[vect_info->vect_index]++;
 738         }
 739 
 740 
 741         mutex_enter(&fp->qede->drv_lock);
 742         /*
 743          * The mac layer may disabled interrupts
 744          * in the context of the mac_rx_ring call
 745          * above while readying for poll process.
 746          * In this case we do not want to 
 747          * enable them here.
 748          */
 749         if (fp->disabled_by_poll == 0) {
 750                 qede_enable_hw_intr(fp);
 751         }
 752         mutex_exit(&fp->qede->drv_lock);
 753 
 754         mutex_exit(&fp->fp_lock);
 755 
 756         return (work_done ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
 757 }
 758 
 759 static int
 760 qede_disable_intr(qede_t *qede, uint32_t index)
 761 {
 762         int status;
 763         qede_intr_context_t *intr_ctx = &qede->intr_ctx;
 764 
 765         status = ddi_intr_disable(intr_ctx->intr_hdl_array[index]);
 766         if (status != DDI_SUCCESS) {
 767                 cmn_err(CE_WARN, "qede:%s: Failed ddi_intr_enable with %s"
 768                     " for index %d\n",
 769                     __func__, qede_get_ddi_fail(status), index);
 770                 return (status);
 771         }                                                                         
 772         atomic_and_32(&intr_ctx->intr_state, ~(1 << index));
 773 
 774         return (status);
 775 }
 776 
 777 static int
 778 qede_enable_intr(qede_t *qede, int index)
 779 {
 780         int status = 0;
 781 
 782         qede_intr_context_t *intr_ctx = &qede->intr_ctx;
 783 
 784         status = ddi_intr_enable(intr_ctx->intr_hdl_array[index]);
 785         
 786         if (status != DDI_SUCCESS) {
 787                 cmn_err(CE_WARN, "qede:%s: Failed ddi_intr_enable with %s"
 788                     " for index %d\n",
 789                     __func__, qede_get_ddi_fail(status), index);
 790                 return (status);
 791         }
 792         
 793         atomic_or_32(&intr_ctx->intr_state, (1 << index));
 794         
 795         return (status);
 796 }
 797 
 798 static int
 799 qede_disable_all_fastpath_intrs(qede_t *qede)
 800 {
 801         int i, status;
 802 
 803         for (i = qede->num_hwfns; i <= qede->num_fp; i++) {
 804                 status = qede_disable_intr(qede, i);
 805                 if (status != DDI_SUCCESS) {
 806                         return (status);
 807                 }
 808         }
 809         return (DDI_SUCCESS);
 810 }
 811 
 812 static int
 813 qede_enable_all_fastpath_intrs(qede_t *qede)
 814 {
 815         int status = 0, i;
 816 
 817         for (i = qede->num_hwfns; i <= qede->num_fp; i++) {
 818                 status = qede_enable_intr(qede, i);
 819                 if (status != DDI_SUCCESS) {
 820                         return (status);
 821                 }
 822         }
 823         return (DDI_SUCCESS);
 824 }
 825 
 826 static int
 827 qede_disable_slowpath_intrs(qede_t *qede)
 828 {
 829         int i, status;
 830 
 831         for (i = 0; i < qede->num_hwfns; i++) {
 832                 status = qede_disable_intr(qede, i);
 833                 if (status != DDI_SUCCESS) {
 834                         return (status);
 835                 }
 836         }
 837         return (DDI_SUCCESS);
 838 }
 839 
 840 static int
 841 qede_enable_slowpath_intrs(qede_t *qede)
 842 {
 843         int i, status;
 844 
 845         for (i = 0; i < qede->num_hwfns; i++) {
 846                 status = qede_enable_intr(qede, i);
 847                 if (status != DDI_SUCCESS) {
 848                         return (status);
 849                 }
 850         }
 851         return (DDI_SUCCESS);
 852 }
 853 
 854 static int
 855 qede_prepare_edev(qede_t *qede)
 856 {
 857         struct ecore_dev *edev = &qede->edev;
 858         struct ecore_hw_prepare_params p_params;
 859 
 860         /*
 861          * Setup the bar0 and bar2 base address
 862          * in ecore_device
 863          */
 864         edev->regview = (void *)qede->regview;
 865         edev->doorbells = (void *)qede->doorbell;
 866 
 867         /* LINTED E_FUNC_RET_MAYBE_IGNORED2 */
 868         strcpy(edev->name, qede->name);
 869         ecore_init_struct(edev);
 870         
 871         p_params.personality = ECORE_PCI_ETH;
 872         p_params.drv_resc_alloc = 0;
 873         p_params.chk_reg_fifo = 1;
 874         p_params.initiate_pf_flr = 1; 
 875         //p_params->epoch = time(&epoch);
 876         p_params.allow_mdump = 1;
 877         p_params.b_relaxed_probe = 0;
 878         return (ecore_hw_prepare(edev, &p_params));
 879 }
 880 
 881 static int
 882 qede_config_edev(qede_t *qede)
 883 {
 884         int status, i;
 885         struct ecore_dev *edev = &qede->edev;
 886         struct ecore_pf_params *params;
 887 
 888         for (i = 0; i < qede->num_hwfns; i++) {
 889                 struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
 890                 params = &p_hwfn->pf_params; 
 891                 memset((void *)params, 0, sizeof (struct ecore_pf_params));
 892                 params->eth_pf_params.num_cons = 32;
 893         }
 894         status = ecore_resc_alloc(edev);
 895         if (status != ECORE_SUCCESS) {
 896                 cmn_err(CE_NOTE, "%s: Could not allocate ecore resources\n",
 897                  __func__);
 898                 return (DDI_ENOMEM);
 899         }
 900         ecore_resc_setup(edev);
 901         return (DDI_SUCCESS);
 902 }
 903 
 904 static void
 905 qede_unconfig_intrs(qede_t *qede)
 906 {
 907         qede_intr_context_t *intr_ctx = &qede->intr_ctx;
 908         qede_vector_info_t *vect_info;
 909         int i, status = 0;
 910 
 911         for (i = 0; i < intr_ctx->intr_vect_allocated; i++) {
 912                 vect_info = &intr_ctx->intr_vect_info[i];
 913                 if (intr_ctx->intr_vect_info[i].handler_added == B_TRUE) {
 914                         status = ddi_intr_remove_handler(
 915                                 intr_ctx->intr_hdl_array[i]);
 916                         if (status != DDI_SUCCESS) {
 917                                 cmn_err(CE_WARN, "qede:%s: Failed" 
 918                                         " ddi_intr_remove_handler with %s"
 919                                         " for index %d\n",
 920                                 __func__, qede_get_ddi_fail(
 921                                 status), i);
 922                         }
 923                 
 924                         (void) ddi_intr_free(intr_ctx->intr_hdl_array[i]);
 925 
 926                         vect_info->handler_added = B_FALSE;
 927                         intr_ctx->intr_hdl_array[i] = NULL;
 928                 }
 929         }
 930 }
 931 
 932 static int
 933 qede_config_intrs(qede_t *qede)
 934 {
 935         qede_intr_context_t *intr_ctx = &qede->intr_ctx;
 936         qede_vector_info_t *vect_info;
 937         struct ecore_dev *edev = &qede->edev;
 938         int i, status = DDI_FAILURE;
 939         ddi_intr_handler_t *handler;
 940         void *arg1, *arg2;
 941 
 942         /*
 943          * Set up the interrupt handler argument
 944          * for the slowpath
 945          */
 946         for (i = 0; i < intr_ctx->intr_vect_allocated; i++) {
 947                 vect_info = &intr_ctx->intr_vect_info[i];
 948                 /* Store the table index */
 949                 vect_info->vect_index = i;
 950                 vect_info->qede = qede;
 951                 /* 
 952                  * Store the interrupt handler's argument.
 953                  * This will be the a pointer to ecore_dev->hwfns
 954                  * for slowpath, a pointer to the fastpath
 955                  * structure for fastpath.
 956                  */
 957                 if (i < qede->num_hwfns) {
 958                         vect_info->fp = (void *)&edev->hwfns[i];
 959                         handler = qede_sp_handler; 
 960                         arg1 = (caddr_t)&qede->edev.hwfns[i];
 961                         arg2 = (caddr_t)vect_info;
 962                 } else {
 963                         /* 
 964                          * loop index includes hwfns
 965                          * so they need to be subtracked
 966                          * for fp_array
 967                          */
 968                         vect_info->fp =
 969                             (void *)&qede->fp_array[i - qede->num_hwfns];
 970                         handler = qede_fp_handler; 
 971                         arg1 = (caddr_t)vect_info;
 972                         arg2 = (caddr_t)qede;
 973                 }
 974 
 975                 status = ddi_intr_add_handler(
 976                     intr_ctx->intr_hdl_array[i],
 977                     handler,
 978                     arg1,
 979                     arg2);
 980                 if (status != DDI_SUCCESS) {
 981                         cmn_err(CE_WARN, "qede:%s: Failed "
 982                             " ddi_intr_add_handler with %s"
 983                             " for index %d\n",
 984                             __func__, qede_get_ddi_fail(
 985                             status), i);
 986                         qede_unconfig_intrs(qede);
 987                         return (DDI_FAILURE);
 988                 }
 989                 vect_info->handler_added = B_TRUE;
 990         }
 991                 
 992         return (status);
 993 }
 994 
 995 static void
 996 qede_free_intrs(qede_t *qede)
 997 {
 998         qede_intr_context_t *intr_ctx;
 999         int i, status;
1000 
1001         ASSERT(qede != NULL);
1002         intr_ctx = &qede->intr_ctx;
1003         ASSERT(intr_ctx != NULL);
1004 
1005         if (intr_ctx->intr_hdl_array) {
1006                 for (i = 0; i < intr_ctx->intr_vect_allocated; i++) {
1007                         if (intr_ctx->intr_hdl_array[i]) {
1008                                 status = 
1009                                     ddi_intr_free(intr_ctx->intr_hdl_array[i]);
1010                                 if (status != DDI_SUCCESS) {
1011                                         cmn_err(CE_NOTE, 
1012                                             "qede:%s: Failed ddi_intr_free"
1013                                             " with %s\n",
1014                                             __func__, 
1015                                             qede_get_ddi_fail(status));
1016                                 }
1017                         }
1018                 }
1019                 intr_ctx->intr_hdl_array = NULL;
1020         }
1021 
1022         if (intr_ctx->intr_hdl_array) {
1023                 kmem_free(intr_ctx->intr_hdl_array, 
1024                     intr_ctx->intr_hdl_array_size);
1025                 intr_ctx->intr_hdl_array = NULL;
1026         }
1027 
1028         if (intr_ctx->intr_vect_info) {
1029                 kmem_free(intr_ctx->intr_vect_info, 
1030                     intr_ctx->intr_vect_info_array_size);
1031                 intr_ctx->intr_vect_info = NULL;
1032         }
1033 }
1034 
1035 static int
1036 qede_alloc_intrs(qede_t *qede)
1037 {
1038         int status, type_supported, num_supported;
1039         int actual, num_available, num_to_request;
1040         dev_info_t *dip;
1041         qede_intr_context_t *intr_ctx = &qede->intr_ctx;
1042 
1043         dip = qede->dip;
1044 
1045         status = ddi_intr_get_supported_types(dip, &type_supported);
1046         if (status != DDI_SUCCESS) {
1047                 cmn_err(CE_WARN, 
1048                     "qede:%s: Failed ddi_intr_get_supported_types with %s\n",
1049                     __func__, qede_get_ddi_fail(status));
1050                 return (status);
1051         }
1052         intr_ctx->intr_types_available = type_supported;
1053 
1054         if (type_supported & DDI_INTR_TYPE_MSIX) {
1055                 intr_ctx->intr_type_in_use = DDI_INTR_TYPE_MSIX;
1056 
1057                 /* 
1058                  * get the total number of vectors 
1059                  * supported by the device 
1060                  */
1061                 status = ddi_intr_get_nintrs(qede->dip, 
1062                              DDI_INTR_TYPE_MSIX, &num_supported);
1063                 if (status != DDI_SUCCESS) {
1064                         cmn_err(CE_WARN, 
1065                             "qede:%s: Failed ddi_intr_get_nintrs with %s\n",
1066                             __func__, qede_get_ddi_fail(status));
1067                         return (status);
1068                 }
1069                 intr_ctx->intr_vect_supported = num_supported;
1070 
1071                 /* 
1072                  * get the total number of vectors 
1073                  * available for this instance 
1074                  */
1075                 status = ddi_intr_get_navail(dip, DDI_INTR_TYPE_MSIX, 
1076                              &num_available);
1077                 if (status != DDI_SUCCESS) {
1078                         cmn_err(CE_WARN, 
1079                             "qede:%s: Failed ddi_intr_get_navail with %s\n",
1080                             __func__, qede_get_ddi_fail(status));
1081                         return (status);
1082                 }
1083 
1084                 if ((num_available < intr_ctx->intr_vect_to_request) && 
1085                         (num_available >= 2)) {
1086                         qede->num_fp = num_available - qede->num_hwfns;
1087                         cmn_err(CE_NOTE, 
1088                             "qede:%s: allocated %d interrupts"
1089                             " requested was %d\n",
1090                             __func__, num_available, 
1091                             intr_ctx->intr_vect_to_request);
1092                         intr_ctx->intr_vect_to_request = num_available;
1093                 } else if(num_available < 2) {
1094                         cmn_err(CE_WARN, 
1095                             "qede:%s: Failed ddi_intr_get_navail with %s\n",
1096                                 __func__, qede_get_ddi_fail(status));
1097                         return (DDI_FAILURE);
1098                 }
1099 
1100                 intr_ctx->intr_vect_available = num_available;
1101                 num_to_request = intr_ctx->intr_vect_to_request;
1102                 intr_ctx->intr_hdl_array_size = num_to_request *
1103                     sizeof (ddi_intr_handle_t);
1104                 intr_ctx->intr_vect_info_array_size = num_to_request *
1105                     sizeof (qede_vector_info_t);
1106 
1107                 /* Allocate an array big enough for maximum supported */
1108                 intr_ctx->intr_hdl_array = kmem_zalloc(
1109                     intr_ctx->intr_hdl_array_size, KM_SLEEP);
1110                 if (intr_ctx->intr_hdl_array == NULL) {
1111                         cmn_err(CE_WARN, 
1112                             "qede:%s: Failed to allocate"
1113                             " intr_ctx->intr_hdl_array\n",
1114                                 __func__);
1115                         return (status);
1116                 }
1117                 intr_ctx->intr_vect_info = kmem_zalloc(
1118                     intr_ctx->intr_vect_info_array_size, KM_SLEEP);
1119                 if (intr_ctx->intr_vect_info_array_size == NULL) {
1120                         cmn_err(CE_WARN, 
1121                             "qede:%s: Failed to allocate"
1122                             " intr_ctx->vect_info_array_size\n",
1123                                 __func__);
1124                         goto err_exit;
1125                 }
1126 
1127                 /* 
1128                  * Use strict allocation. It will fail if we do not get
1129                  * exactly what we want.  Later we can shift through with
1130                  * power of two like this:
1131                  *   for (i = intr_ctx->intr_requested; i > 0; i >>= 1)
1132                  * (Though we would need to account for the slowpath vector)
1133                  */
1134                 status = ddi_intr_alloc(qede->dip, 
1135                         intr_ctx->intr_hdl_array, 
1136                         DDI_INTR_TYPE_MSIX,
1137                         0, 
1138                         num_to_request,
1139                         &actual,
1140                         DDI_INTR_ALLOC_STRICT);
1141                 if (status != DDI_SUCCESS) {
1142                         cmn_err(CE_WARN, 
1143                             "qede:%s: Failed to allocate"
1144                             " %d interrupts with %s\n",
1145                             __func__, num_to_request, 
1146                             qede_get_ddi_fail(status));
1147                         cmn_err(CE_WARN, 
1148                             "qede:%s: Only %d interrupts available.\n",
1149                             __func__, actual);
1150                         goto err_exit;
1151                 }
1152                 intr_ctx->intr_vect_allocated = num_to_request;
1153 
1154                 status = ddi_intr_get_pri(intr_ctx->intr_hdl_array[0], 
1155                             &intr_ctx->intr_pri);
1156                 if (status != DDI_SUCCESS) {
1157                         cmn_err(CE_WARN, 
1158                             "qede:%s: Failed ddi_intr_get_pri with %s\n",
1159                             __func__, qede_get_ddi_fail(status));
1160                         goto err_exit;
1161                 }
1162 
1163                 status = ddi_intr_get_cap(intr_ctx->intr_hdl_array[0], 
1164                             &intr_ctx->intr_cap);
1165                 if (status != DDI_SUCCESS) {
1166                         cmn_err(CE_WARN, 
1167                             "qede:%s: Failed ddi_intr_get_cap with %s\n",
1168                                 __func__, qede_get_ddi_fail(status));
1169                         goto err_exit;
1170                 }
1171 
1172         } else {
1173                 /* For now we only support type MSIX */
1174                 cmn_err(CE_WARN, 
1175                     "qede:%s: Failed to allocate intr_ctx->intr_hdl_array\n",
1176                         __func__);
1177                 return (DDI_FAILURE);
1178         }
1179         
1180         intr_ctx->intr_mode = ECORE_INT_MODE_MSIX;   
1181         return (status);
1182 err_exit:
1183         qede_free_intrs(qede);
1184         return (status);
1185 }
1186 
1187 static void
1188 /* LINTED E_FUNC_ARG_UNUSED */
1189 qede_unconfig_fm(qede_t *qede)
1190 {
1191 }
1192 
1193 /* LINTED E_FUNC_ARG_UNUSED */
1194 static int
1195 qede_fm_err_cb(dev_info_t *dip, ddi_fm_error_t *err,
1196     const void *impl_data)
1197 {
1198         pci_ereport_post(dip, err, NULL);
1199         return (err->fme_status);
1200 }
1201 
1202 
1203 static int
1204 qede_config_fm(qede_t * qede)
1205 {
1206         ddi_iblock_cookie_t iblk;
1207 
1208         qede_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
1209         qede_desc_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
1210         qede_buf_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
1211         qede_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1212         qede_gen_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1213         qede_tx_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1214         qede_dma_attr_desc.dma_attr_flags = DDI_DMA_FLAGERR;
1215         qede_dma_attr_txbuf.dma_attr_flags = DDI_DMA_FLAGERR;
1216         qede_dma_attr_rxbuf.dma_attr_flags = DDI_DMA_FLAGERR;
1217         qede_dma_attr_cmddesc.dma_attr_flags = DDI_DMA_FLAGERR;
1218         qede_gen_dma_attr_desc.dma_attr_flags = DDI_DMA_FLAGERR;
1219         qede_buf2k_dma_attr_txbuf.dma_attr_flags = DDI_DMA_FLAGERR;
1220 
1221         ddi_fm_init(qede->dip, &qede->fm_cap, &iblk);
1222 
1223         if (DDI_FM_EREPORT_CAP(qede->fm_cap) ||
1224             DDI_FM_ERRCB_CAP(qede->fm_cap)) {
1225                 pci_ereport_setup(qede->dip);
1226         }
1227 
1228         if (DDI_FM_ERRCB_CAP(qede->fm_cap)) {
1229                 ddi_fm_handler_register(qede->dip,
1230                     qede_fm_err_cb, (void *)qede);
1231         }
1232         return (DDI_SUCCESS);
1233 
1234 }
1235 
1236 int
1237 qede_dma_mem_alloc(qede_t *qede,
1238     int size, uint_t dma_flags, caddr_t *address, ddi_dma_cookie_t *cookie,
1239     ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *handlep,
1240     ddi_dma_attr_t *dma_attr, ddi_device_acc_attr_t *dev_acc_attr)
1241 {
1242         int err;
1243         uint32_t ncookies;
1244         size_t ring_len;
1245 
1246         *dma_handle = NULL;
1247 
1248         if (size <= 0) {
1249                 return (DDI_ENOMEM);
1250         }
1251 
1252         err = ddi_dma_alloc_handle(qede->dip,
1253             dma_attr,
1254             DDI_DMA_DONTWAIT, NULL, dma_handle);
1255         if (err != DDI_SUCCESS) {
1256                 cmn_err(CE_WARN, "!qede(%d): pci_alloc_consistent: "
1257                     "ddi_dma_alloc_handle FAILED: %d", qede->instance, err);
1258                 *dma_handle = NULL;
1259                 return (DDI_ENOMEM);
1260         }
1261 
1262         err = ddi_dma_mem_alloc(*dma_handle,
1263             size, dev_acc_attr,
1264             dma_flags,
1265             DDI_DMA_DONTWAIT, NULL, address, &ring_len,
1266             handlep);
1267         if (err != DDI_SUCCESS) {
1268                 cmn_err(CE_WARN, "!qede(%d): pci_alloc_consistent: "
1269                     "ddi_dma_mem_alloc FAILED: %d, request size: %d",
1270                     qede->instance, err, size);
1271                 ddi_dma_free_handle(dma_handle);
1272                 *dma_handle = NULL;
1273                 *handlep = NULL;
1274                 return (DDI_ENOMEM);
1275         }
1276 
1277         if (ring_len < size) {
1278                 cmn_err(CE_WARN, "!qede(%d): pci_alloc_consistent: "
1279                     "could not allocate required: %d, request size: %d",
1280                     qede->instance, err, size);
1281                 ddi_dma_mem_free(handlep);
1282                 ddi_dma_free_handle(dma_handle);
1283                 *dma_handle = NULL;
1284                 *handlep = NULL;
1285                 return (DDI_FAILURE);
1286         }
1287 
1288         (void) memset(*address, 0, size);
1289 
1290         if (((err = ddi_dma_addr_bind_handle(*dma_handle,
1291             NULL, *address, ring_len,
1292             dma_flags,
1293             DDI_DMA_DONTWAIT, NULL,
1294             cookie, &ncookies)) != DDI_DMA_MAPPED) ||
1295             (ncookies != 1)) {
1296                 cmn_err(CE_WARN, "!qede(%d): pci_alloc_consistent: "
1297                     "ddi_dma_addr_bind_handle Failed: %d",
1298                     qede->instance, err);
1299                 ddi_dma_mem_free(handlep);
1300                 ddi_dma_free_handle(dma_handle);
1301                 *dma_handle = NULL;
1302                 *handlep = NULL;
1303                 return (DDI_FAILURE);
1304         }
1305 
1306         return (DDI_SUCCESS);
1307 }
1308 
1309 void
1310 qede_pci_free_consistent(ddi_dma_handle_t *dma_handle,
1311     ddi_acc_handle_t *acc_handle)
1312 {
1313         int err;
1314 
1315         if (*dma_handle != NULL) {
1316                 err = ddi_dma_unbind_handle(*dma_handle);
1317                 if (err != DDI_SUCCESS) {
1318                         cmn_err(CE_WARN, "!pci_free_consistent: "
1319                             "Error unbinding memory, err %d", err);
1320                         return;
1321                 }
1322         } else {
1323                 goto exit;
1324         }
1325         ddi_dma_mem_free(acc_handle);
1326         ddi_dma_free_handle(dma_handle);
1327 exit:
1328         *dma_handle = NULL;
1329         *acc_handle = NULL;
1330 }
1331 
1332 static int
1333 qede_vport_stop(qede_t *qede)
1334 {
1335         struct ecore_dev *edev = &qede->edev;
1336         struct ecore_hwfn *p_hwfn;
1337         int i, status = ECORE_BUSY; 
1338 
1339         for (i = 0; i < edev->num_hwfns; i++) {
1340                 p_hwfn = &edev->hwfns[i];
1341 
1342                 if (qede->vport_state[i] !=
1343                     QEDE_VPORT_STARTED) {
1344                         qede_info(qede, "vport %d not started", i);
1345                         continue;
1346                 }
1347 
1348                 status = ecore_sp_vport_stop(p_hwfn,
1349                         p_hwfn->hw_info.opaque_fid,
1350                         i); /* vport needs fix */
1351                 if (status != ECORE_SUCCESS) {
1352                         cmn_err(CE_WARN, "!qede_vport_stop: "
1353                             "FAILED for hwfn%d ", i);
1354                         return (DDI_FAILURE);
1355                 }
1356 
1357                 qede->vport_state[i] =
1358                     QEDE_VPORT_STOPPED;
1359         }
1360 
1361         return (status);
1362 }
1363 
1364 static uint8_t
1365 qede_get_active_rss_params(qede_t *qede, u8 hwfn_id)
1366 {
1367         struct ecore_rss_params rss_params;
1368         qede_fastpath_t *fp;
1369         int i;
1370         const uint64_t hash_key[] = 
1371         { 
1372                 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
1373                 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
1374                 0x255b0ec26d5a56daULL 
1375         };
1376         uint8_t enable_rss = 0;
1377 
1378         bzero(&rss_params, sizeof (rss_params));
1379         if (qede->num_fp > 1) {
1380                 qede_info(qede, "Configuring RSS parameters");
1381                 enable_rss = 1;
1382         } else {
1383                 qede_info(qede, "RSS configuration not needed");
1384                 enable_rss = 0;
1385                 goto exit;
1386         }
1387 
1388         rss_params.update_rss_config = 1;
1389         rss_params.rss_enable = 1;
1390         rss_params.update_rss_capabilities = 1;
1391         rss_params.update_rss_ind_table = 1;
1392         rss_params.update_rss_key = 1;
1393 
1394         rss_params.rss_caps = ECORE_RSS_IPV4 |
1395             ECORE_RSS_IPV6 |
1396             ECORE_RSS_IPV4_TCP |
1397             ECORE_RSS_IPV6_TCP |
1398             ECORE_RSS_IPV4_UDP |
1399             ECORE_RSS_IPV6_UDP;
1400 
1401         rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
1402 
1403         bcopy(&hash_key[0], &rss_params.rss_key[0], 
1404                 sizeof (rss_params.rss_key));
1405 
1406         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
1407                 fp = &qede->fp_array[i % qede->num_fp];
1408                 rss_params.rss_ind_table[i] = (void *)(fp->rx_ring->p_cid);
1409         }
1410 exit:
1411         bcopy(&rss_params, &qede->rss_params[hwfn_id], sizeof (rss_params));
1412         return (enable_rss);
1413 }
1414 
1415 static int
1416 qede_vport_update(qede_t *qede,
1417     enum qede_vport_state state)
1418 {
1419         struct ecore_dev *edev = &qede->edev;
1420         struct ecore_hwfn *p_hwfn;
1421         struct ecore_sp_vport_update_params *vport_params;
1422         struct ecore_sge_tpa_params tpa_params;
1423         int  status = DDI_SUCCESS;
1424         bool new_state;
1425         uint8_t i;
1426 
1427         /*
1428          * Update only does on and off.
1429          * For now we combine TX and RX
1430          * together.  Later we can split them
1431          * and set other params as well.
1432          */
1433         if (state == QEDE_VPORT_ON) {
1434             new_state = B_TRUE;
1435         } else if (state == QEDE_VPORT_OFF) {
1436             new_state = B_FALSE;
1437         } else {
1438                 cmn_err(CE_WARN, "qede_vport_update: "
1439                     "invalid, state = %d", state);
1440                 return (DDI_EINVAL);
1441         }
1442 
1443         for (i = 0; i < edev->num_hwfns; i++) {
1444                 p_hwfn = &edev->hwfns[i];
1445                 vport_params = &qede->vport_params[i];
1446 
1447                 vport_params->opaque_fid =
1448                     p_hwfn->hw_info.opaque_fid;
1449                 vport_params->vport_id =
1450                     i;
1451 
1452                 vport_params->update_vport_active_rx_flg =
1453                     1;
1454                 if (new_state == B_TRUE)
1455                         vport_params->vport_active_rx_flg = 1;
1456                 else
1457                         vport_params->vport_active_rx_flg = 0;
1458 
1459                 vport_params->update_vport_active_tx_flg =
1460                     1;
1461                 if (new_state == B_TRUE)
1462                         vport_params->vport_active_tx_flg = 1;
1463                 else
1464                         vport_params->vport_active_tx_flg = 0;
1465 
1466                 vport_params->update_inner_vlan_removal_flg =
1467                     0;
1468                 vport_params->inner_vlan_removal_flg =
1469                     0;
1470                 vport_params->update_default_vlan_enable_flg =
1471                     0;
1472                 vport_params->default_vlan_enable_flg =
1473                     0;
1474                 vport_params->update_default_vlan_flg =
1475                     1;
1476                 vport_params->default_vlan =
1477                     0;
1478                 vport_params->update_tx_switching_flg =
1479                     0;
1480                 vport_params->tx_switching_flg =
1481                     0;
1482                 vport_params->update_approx_mcast_flg =
1483                     0;
1484                 vport_params->update_anti_spoofing_en_flg =
1485                     0;
1486                 vport_params->anti_spoofing_en = 0;
1487                 vport_params->update_accept_any_vlan_flg =
1488                     1;
1489                 vport_params->accept_any_vlan = 1;
1490 
1491                 vport_params->accept_flags.update_rx_mode_config = 1;
1492                 vport_params->accept_flags.update_tx_mode_config = 1;
1493                 vport_params->accept_flags.rx_accept_filter =
1494                     ECORE_ACCEPT_BCAST |
1495                     ECORE_ACCEPT_UCAST_UNMATCHED |
1496                     ECORE_ACCEPT_MCAST_UNMATCHED;
1497                 vport_params->accept_flags.tx_accept_filter =
1498                     ECORE_ACCEPT_BCAST |
1499                     ECORE_ACCEPT_UCAST_UNMATCHED |
1500                     ECORE_ACCEPT_MCAST_UNMATCHED;
1501 
1502                 vport_params->sge_tpa_params = NULL;
1503 
1504                 if (qede->lro_enable &&
1505                     (new_state == B_TRUE)) {
1506                         qede_print("!%s(%d): enabling LRO ",
1507                                 __func__, qede->instance);
1508 
1509                         memset(&tpa_params, 0, 
1510                             sizeof (struct ecore_sge_tpa_params));
1511                         tpa_params.max_buffers_per_cqe = 5;
1512                         tpa_params.update_tpa_en_flg = 1;
1513                         tpa_params.tpa_ipv4_en_flg = 1;
1514                         tpa_params.tpa_ipv6_en_flg = 1;
1515                         tpa_params.tpa_ipv4_tunn_en_flg = 0;
1516                         tpa_params.tpa_ipv6_tunn_en_flg = 0;
1517                         tpa_params.update_tpa_param_flg = 1;
1518                         tpa_params.tpa_pkt_split_flg = 0;
1519                         tpa_params.tpa_hdr_data_split_flg = 0;
1520                         tpa_params.tpa_gro_consistent_flg = 0;
1521                         tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
1522                         tpa_params.tpa_max_size = 65535;
1523                         tpa_params.tpa_min_size_to_start = qede->mtu/2;
1524                         tpa_params.tpa_min_size_to_cont = qede->mtu/2;
1525                         vport_params->sge_tpa_params = &tpa_params;
1526                 }
1527 
1528                 /* 
1529                  * Get the rss_params to be configured
1530                  */
1531                 if (qede_get_active_rss_params(qede, i /* hwfn id */)) {
1532                         vport_params->rss_params = &qede->rss_params[i];
1533                 } else {
1534                         vport_params->rss_params = NULL;
1535                 }
1536 
1537                 status = ecore_sp_vport_update(p_hwfn,
1538                     vport_params,
1539                     ECORE_SPQ_MODE_EBLOCK,
1540                     NULL);
1541 
1542                 if (status != ECORE_SUCCESS) {
1543                         cmn_err(CE_WARN, "ecore_sp_vport_update: "
1544                             "FAILED for hwfn%d "
1545                             " with ", i);
1546                         return (DDI_FAILURE);
1547                 }
1548         }
1549         return (DDI_SUCCESS);
1550 }
1551 
1552 
1553 static int
1554 qede_vport_start(qede_t *qede)
1555 {
1556         struct ecore_dev *edev = &qede->edev;
1557         struct ecore_hwfn *p_hwfn;
1558         struct ecore_sp_vport_start_params params;
1559         uint8_t i;
1560         int  status = ECORE_BUSY;
1561 
1562         for (i = 0; i < edev->num_hwfns; i++) {
1563                 p_hwfn = &edev->hwfns[i];
1564                 if ((qede->vport_state[i] !=
1565                     QEDE_VPORT_UNKNOWN) &&
1566                     (qede->vport_state[i] !=
1567                     QEDE_VPORT_STOPPED)) {
1568                     continue;
1569                 }
1570 
1571                 params.tpa_mode = ECORE_TPA_MODE_NONE;
1572                 params.remove_inner_vlan = 0;
1573                 params.tx_switching = 0;
1574                 params.handle_ptp_pkts = 0; 
1575                 params.only_untagged = 0;
1576                 params.drop_ttl0 = 1;
1577                 params.max_buffers_per_cqe = 16; 
1578                 params.concrete_fid = p_hwfn->hw_info.concrete_fid;
1579                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1580                 params.vport_id = i;
1581                 params.mtu = qede->mtu;
1582                 status = ecore_sp_vport_start(p_hwfn, &params);
1583                 if (status != ECORE_SUCCESS) {
1584                         cmn_err(CE_WARN, "qede_vport_start: "
1585                             "FAILED for hwfn%d", i);
1586                         return (DDI_FAILURE);
1587                 }
1588 
1589                 ecore_hw_start_fastpath(p_hwfn);
1590                 qede->vport_state[i] = QEDE_VPORT_STARTED;
1591         }
1592         ecore_reset_vport_stats(edev);
1593         return (status);
1594 }
1595 
1596 void
1597 qede_update_rx_q_producer(qede_rx_ring_t *rx_ring)
1598 {
1599         u16 bd_prod = ecore_chain_get_prod_idx(&rx_ring->rx_bd_ring);
1600         u16 cqe_prod = ecore_chain_get_prod_idx(&rx_ring->rx_cqe_ring);
1601         /* LINTED E_FUNC_SET_NOT_USED */
1602         struct eth_rx_prod_data rx_prod_cmd = { 0 };
1603 
1604 
1605         rx_prod_cmd.bd_prod = HOST_TO_LE_32(bd_prod);
1606         rx_prod_cmd.cqe_prod = HOST_TO_LE_32(cqe_prod);
1607         UPDATE_RX_PROD(rx_ring, rx_prod_cmd);
1608 }
1609 
1610 static int
1611 qede_fastpath_stop_queues(qede_t *qede)
1612 {
1613         int i, j;
1614         int status = DDI_FAILURE;
1615         struct ecore_dev *edev;
1616         struct ecore_hwfn *p_hwfn;
1617         struct ecore_queue_cid *p_tx_cid, *p_rx_cid;
1618 
1619         qede_fastpath_t *fp;
1620         qede_rx_ring_t *rx_ring;
1621         qede_tx_ring_t *tx_ring;
1622 
1623         ASSERT(qede != NULL);
1624         /* ASSERT(qede->edev != NULL); */
1625 
1626         edev = &qede->edev;
1627 
1628         status = qede_vport_update(qede, QEDE_VPORT_OFF);
1629         if (status != DDI_SUCCESS) {
1630                 cmn_err(CE_WARN, "FAILED to "
1631                     "update vports");
1632                 return (DDI_FAILURE);
1633         }
1634 
1635         for (i = 0; i < qede->num_fp; i++) {
1636                 fp = &qede->fp_array[i];
1637                 rx_ring = fp->rx_ring;
1638                 p_hwfn = &edev->hwfns[fp->fp_hw_eng_index];
1639                 for (j = 0; j < qede->num_tc; j++) {
1640                         tx_ring = fp->tx_ring[j];
1641                         if (tx_ring->queue_started == B_TRUE) {
1642                                 p_tx_cid = tx_ring->p_cid; 
1643                                 status = ecore_eth_tx_queue_stop(p_hwfn,
1644                                         (void *)p_tx_cid);
1645                                 if (status != ECORE_SUCCESS) {
1646                                         cmn_err(CE_WARN, "FAILED to "
1647                                             "stop tx queue %d:%d", i, j);
1648                                         return (DDI_FAILURE);
1649                                 }
1650                                 tx_ring->queue_started = B_FALSE;
1651                         }
1652                 }
1653 
1654                 if (rx_ring->queue_started == B_TRUE) {
1655                         p_rx_cid = rx_ring->p_cid; 
1656                         status = ecore_eth_rx_queue_stop(p_hwfn, 
1657                             (void *)p_rx_cid, B_TRUE, B_FALSE);
1658                         if (status != ECORE_SUCCESS) {
1659                                 cmn_err(CE_WARN, "FAILED to "
1660                                     "stop rx queue %d "
1661                                     "with ecore status %s",
1662                                     i, qede_get_ecore_fail(status));
1663                                 return (DDI_FAILURE);
1664                         }
1665                         rx_ring->queue_started = B_FALSE;
1666                 }
1667         }
1668 
1669         status = qede_vport_stop(qede);
1670         if (status != DDI_SUCCESS) {
1671                 cmn_err(CE_WARN, "qede_vport_stop "
1672                     "FAILED to stop vports");
1673                 return (DDI_FAILURE);
1674         }
1675 
1676         ecore_hw_stop_fastpath(edev);
1677 
1678         return (DDI_SUCCESS);
1679 }
1680 
1681 static int
1682 qede_fastpath_start_queues(qede_t *qede)
1683 {
1684         int i, j;
1685         int status = DDI_FAILURE;
1686         struct ecore_dev *edev;
1687         struct ecore_hwfn *p_hwfn;
1688         struct ecore_queue_start_common_params params;
1689         struct ecore_txq_start_ret_params tx_ret_params;
1690         struct ecore_rxq_start_ret_params rx_ret_params;
1691         qede_fastpath_t *fp;
1692         qede_rx_ring_t *rx_ring;
1693         qede_tx_ring_t *tx_ring;
1694         dma_addr_t p_phys_table;
1695         u16 page_cnt;
1696 
1697         ASSERT(qede != NULL);
1698         /* ASSERT(qede->edev != NULL); */
1699         edev = &qede->edev;
1700 
1701         status = qede_vport_start(qede);
1702         if (status != DDI_SUCCESS) {
1703                 cmn_err(CE_WARN, "Failed to "
1704                     "start vports");
1705                 return (DDI_FAILURE);
1706         }
1707 
1708         for (i = 0; i < qede->num_fp; i++) {
1709                 fp = &qede->fp_array[i];
1710                 rx_ring = fp->rx_ring;
1711                 p_hwfn = &edev->hwfns[fp->fp_hw_eng_index];
1712                 
1713                 params.vport_id = fp->vport_id;
1714                 params.queue_id = fp->rx_queue_index;
1715                 params.stats_id = fp->stats_id;
1716                 params.p_sb = fp->sb_info;
1717                 params.sb_idx = RX_PI;
1718                 p_phys_table = ecore_chain_get_pbl_phys(&rx_ring->rx_cqe_ring);
1719                 page_cnt = ecore_chain_get_page_cnt(&rx_ring->rx_cqe_ring);
1720 
1721                 status = ecore_eth_rx_queue_start(p_hwfn,
1722                     p_hwfn->hw_info.opaque_fid, 
1723                     &params,
1724                     qede->rx_buf_size,
1725                     rx_ring->rx_bd_ring.p_phys_addr,
1726                     p_phys_table,
1727                     page_cnt,
1728                     &rx_ret_params);
1729                 
1730                 rx_ring->hw_rxq_prod_addr = rx_ret_params.p_prod;    
1731                 rx_ring->p_cid = rx_ret_params.p_handle;
1732                 if (status != DDI_SUCCESS) {
1733                         cmn_err(CE_WARN, "ecore_sp_eth_rx_queue_start "
1734                             "FAILED for rxq%d", i);
1735                         return (DDI_FAILURE);
1736                 }
1737                 rx_ring->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
1738 
1739                 OSAL_MSLEEP(20);
1740                 *rx_ring->hw_cons_ptr = 0;
1741 
1742                 qede_update_rx_q_producer(rx_ring);
1743                 rx_ring->queue_started = B_TRUE;
1744 
1745                 for (j = 0; j < qede->num_tc; j++) {
1746                         tx_ring = fp->tx_ring[j];
1747                         
1748                         params.vport_id = fp->vport_id;
1749                         params.queue_id = tx_ring->tx_queue_index;
1750                         params.stats_id = fp->stats_id;
1751                         params.p_sb = fp->sb_info;
1752                         params.sb_idx = TX_PI(j);
1753 
1754                         p_phys_table = ecore_chain_get_pbl_phys(
1755                             &tx_ring->tx_bd_ring);
1756                         page_cnt = ecore_chain_get_page_cnt(
1757                             &tx_ring->tx_bd_ring);
1758                         status = ecore_eth_tx_queue_start(p_hwfn,
1759                             p_hwfn->hw_info.opaque_fid,
1760                             &params, 
1761                             0, 
1762                             p_phys_table,
1763                             page_cnt, 
1764                             &tx_ret_params);
1765                         tx_ring->doorbell_addr = tx_ret_params.p_doorbell;
1766                         tx_ring->p_cid = tx_ret_params.p_handle;     
1767                         if (status != DDI_SUCCESS) {
1768                                 cmn_err(CE_WARN, "ecore_sp_eth_tx_queue_start "
1769                                     "FAILED for txq%d:%d", i,j);
1770                                 return (DDI_FAILURE);
1771                         }
1772                         tx_ring->hw_cons_ptr = 
1773                             &fp->sb_info->sb_virt->pi_array[TX_PI(j)];
1774                         /* LINTED E_CONSTANT_CONDITION */
1775                         SET_FIELD(tx_ring->tx_db.data.params,
1776                             ETH_DB_DATA_DEST, DB_DEST_XCM);
1777                         /* LINTED E_CONSTANT_CONDITION */
1778                         SET_FIELD(tx_ring->tx_db.data.params,
1779                             ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1780                         /* LINTED E_CONSTANT_CONDITION */
1781                         SET_FIELD(tx_ring->tx_db.data.params,
1782                             ETH_DB_DATA_AGG_VAL_SEL, DQ_XCM_ETH_TX_BD_PROD_CMD);
1783                         tx_ring->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
1784                         tx_ring->queue_started = B_TRUE;
1785                 }
1786         }
1787 
1788         status = qede_vport_update(qede, QEDE_VPORT_ON);
1789         if (status != DDI_SUCCESS) {
1790                 cmn_err(CE_WARN, "Failed to "
1791                     "update vports");
1792                 return (DDI_FAILURE);
1793         }
1794         return (status);
1795 }
1796 
1797 static void
1798 qede_free_mag_elem(qede_rx_ring_t *rx_ring, qede_rx_buffer_t *rx_buffer,
1799     struct eth_rx_bd *bd)
1800 {
1801         int i;
1802 
1803         if (bd != NULL) {
1804                 bzero(bd, sizeof (*bd));
1805         }
1806 
1807         if (rx_buffer->mp != NULL) {
1808                 freemsg(rx_buffer->mp);
1809                 rx_buffer->mp = NULL;
1810         }
1811 }
1812 
1813 static void
1814 qede_free_lro_rx_buffers(qede_rx_ring_t *rx_ring)
1815 {
1816         int i, j; 
1817         qede_lro_info_t *lro_info;
1818 
1819         for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1820                 lro_info = &rx_ring->lro_info[i];
1821                 if (lro_info->agg_state == QEDE_AGG_STATE_NONE) {
1822                         continue;
1823                 }
1824                 for (j = 0; j < QEDE_MAX_BD_PER_AGG; j++) {
1825                         if (lro_info->rx_buffer[j] == NULL) {
1826                                 break;
1827                         }
1828                         qede_recycle_copied_rx_buffer(
1829                             lro_info->rx_buffer[j]);
1830                         lro_info->rx_buffer[j] = NULL;
1831                 }
1832                 lro_info->agg_state = QEDE_AGG_STATE_NONE;
1833         }
1834 }
1835 
1836 static void
1837 qede_free_rx_buffers_legacy(qede_t *qede, qede_rx_buf_area_t *rx_buf_area)
1838 {
1839         int i, j;
1840         u32 ref_cnt, bufs_per_page;
1841         qede_rx_buffer_t *rx_buffer, *first_rx_buf_in_page = 0;
1842         qede_rx_ring_t *rx_ring = rx_buf_area->rx_ring;
1843         bool free_rx_buffer;
1844 
1845         bufs_per_page = rx_buf_area->bufs_per_page;
1846         
1847         rx_buffer = &rx_buf_area->rx_buf_pool[0];
1848 
1849         if (rx_buf_area) {
1850                 for (i = 0; i < rx_ring->rx_buf_count; i += bufs_per_page) {
1851                         free_rx_buffer = B_TRUE;
1852                         for (j = 0; j < bufs_per_page; j++) {
1853                                 if (!j) {
1854                                         first_rx_buf_in_page = rx_buffer;
1855                                 }
1856                                 if (rx_buffer->ref_cnt != 0) {
1857                                         ref_cnt = atomic_dec_32_nv(
1858                                             &rx_buffer->ref_cnt);
1859                                         if (ref_cnt == 0) {
1860                                                 /*
1861                                                  * Buffer is now 
1862                                                  * completely free 
1863                                                  */
1864                                                 if (rx_buffer->mp) {
1865                                                         freemsg(rx_buffer->mp);
1866                                                         rx_buffer->mp = NULL;
1867                                                 }
1868                                         } else {
1869                                                 /*
1870                                                  * Since Buffer still 
1871                                                  * held up in Stack,
1872                                                  * we cant free the whole page
1873                                                  */
1874                                                 free_rx_buffer = B_FALSE;
1875                                         }
1876                                 }
1877                                 rx_buffer++;
1878                         }
1879 
1880                         if (free_rx_buffer == B_TRUE) {
1881                                 qede_pci_free_consistent(
1882                                     &first_rx_buf_in_page->dma_info.dma_handle,
1883                                     &first_rx_buf_in_page->dma_info.acc_handle);
1884                         }
1885                 }
1886 
1887                 /* 
1888                  * If no more buffers are with the stack
1889                  *  then free the buf pools 
1890                  */
1891                 if (rx_buf_area->buf_upstream == 0) {
1892                         mutex_destroy(&rx_buf_area->active_buf_list.lock);
1893                         mutex_destroy(&rx_buf_area->passive_buf_list.lock);
1894 
1895                         kmem_free(rx_buf_area, sizeof (qede_rx_buf_area_t));
1896                         rx_buf_area = NULL;
1897                         if (atomic_cas_32(&qede->detach_unsafe, 2, 2)) {
1898                                 atomic_dec_32(&qede->detach_unsafe);
1899                         }
1900                         
1901                 }
1902         }
1903 }
1904 
1905 
1906 static void
1907 qede_free_rx_buffers(qede_t *qede, qede_rx_ring_t *rx_ring)
1908 {
1909         qede_free_lro_rx_buffers(rx_ring);
1910         qede_rx_buf_area_t *rx_buf_area = rx_ring->rx_buf_area;
1911         qede_free_rx_buffers_legacy(qede, rx_buf_area);
1912 }
1913 
1914 static void
1915 qede_free_rx_ring_phys(qede_t *qede, qede_fastpath_t *fp)
1916 {
1917         qede_rx_ring_t *rx_ring;
1918 
1919         ASSERT(qede != NULL);
1920         ASSERT(fp != NULL);
1921 
1922 
1923         rx_ring = fp->rx_ring;
1924         rx_ring->rx_buf_area->inactive = 1;
1925 
1926         qede_free_rx_buffers(qede, rx_ring);
1927 
1928 
1929         if (rx_ring->rx_bd_ring.p_virt_addr) {
1930                 ecore_chain_free(&qede->edev, &rx_ring->rx_bd_ring);
1931                 rx_ring->rx_bd_ring.p_virt_addr = NULL;
1932         }
1933 
1934         if (rx_ring->rx_cqe_ring.p_virt_addr) {
1935                 ecore_chain_free(&qede->edev, &rx_ring->rx_cqe_ring);
1936                 rx_ring->rx_cqe_ring.p_virt_addr = NULL;
1937                 if (rx_ring->rx_cqe_ring.pbl_sp.p_virt_table) {
1938                         rx_ring->rx_cqe_ring.pbl_sp.p_virt_table = NULL;
1939                 }
1940         }
1941         rx_ring->hw_cons_ptr = NULL;
1942         rx_ring->hw_rxq_prod_addr = NULL;
1943         rx_ring->sw_rx_cons = 0;
1944         rx_ring->sw_rx_prod = 0;
1945 
1946 }
1947 
1948 
1949 static int
1950 qede_init_bd(qede_t *qede, qede_rx_ring_t *rx_ring)
1951 {
1952         struct eth_rx_bd *bd = NULL;
1953         int ret = DDI_SUCCESS;
1954         int i;
1955         qede_rx_buffer_t *rx_buffer;
1956         qede_rx_buf_area_t *rx_buf_area = rx_ring->rx_buf_area;
1957         qede_rx_buf_list_t *active_buf_list = &rx_buf_area->active_buf_list;
1958 
1959         for (i = 0; i < rx_ring->rx_buf_count; i++) {
1960                 rx_buffer = &rx_buf_area->rx_buf_pool[i];
1961                 active_buf_list->buf_list[i] = rx_buffer;
1962                 active_buf_list->num_entries++;
1963                 bd = ecore_chain_produce(&rx_ring->rx_bd_ring);
1964                 if (bd == NULL) {
1965                         qede_print_err("!%s(%d): invalid NULL bd in "
1966                             "rx_bd_ring", __func__, qede->instance);
1967                         ret = DDI_FAILURE;
1968                         goto err;
1969                 }
1970 
1971                 bd->addr.lo = HOST_TO_LE_32(U64_LO(
1972                                 rx_buffer->dma_info.phys_addr)); 
1973                 bd->addr.hi = HOST_TO_LE_32(U64_HI(
1974                                 rx_buffer->dma_info.phys_addr));
1975         
1976         }
1977         active_buf_list->tail = 0;
1978 err:
1979         return (ret);
1980 }
1981 
1982 
1983 qede_rx_buffer_t *
1984 qede_get_from_active_list(qede_rx_ring_t *rx_ring,
1985     uint32_t *num_entries)
1986 {
1987         qede_rx_buffer_t *rx_buffer;
1988         qede_rx_buf_list_t *active_buf_list =
1989             &rx_ring->rx_buf_area->active_buf_list;
1990         u16 head = active_buf_list->head;
1991 
1992         rx_buffer = active_buf_list->buf_list[head];
1993         active_buf_list->buf_list[head] = NULL;
1994         head = (head + 1) & RX_RING_MASK;
1995 
1996         if (rx_buffer) {
1997                 atomic_dec_32(&active_buf_list->num_entries);
1998                 atomic_inc_32(&rx_ring->rx_buf_area->buf_upstream);
1999                 atomic_inc_32(&rx_buffer->ref_cnt);
2000                 rx_buffer->buf_state = RX_BUF_STATE_WITH_OS;
2001 
2002                 if (rx_buffer->mp == NULL) {
2003                         rx_buffer->mp =
2004                             desballoc(rx_buffer->dma_info.virt_addr,
2005                             rx_ring->rx_buf_size, 0, &rx_buffer->recycle);
2006                 }
2007         }
2008 
2009         *num_entries = active_buf_list->num_entries;
2010         active_buf_list->head = head;
2011 
2012         return (rx_buffer);
2013 }
2014 
2015 qede_rx_buffer_t *
2016 qede_get_from_passive_list(qede_rx_ring_t *rx_ring)
2017 {
2018         qede_rx_buf_list_t *passive_buf_list =
2019             &rx_ring->rx_buf_area->passive_buf_list;
2020         qede_rx_buffer_t *rx_buffer;
2021         u32 head;
2022         
2023         mutex_enter(&passive_buf_list->lock);
2024         head = passive_buf_list->head;
2025         if (passive_buf_list->buf_list[head] == NULL) {
2026                 mutex_exit(&passive_buf_list->lock);
2027                 return (NULL);
2028         }
2029 
2030         rx_buffer = passive_buf_list->buf_list[head];
2031         passive_buf_list->buf_list[head] = NULL;
2032 
2033         passive_buf_list->head = (passive_buf_list->head + 1) & RX_RING_MASK;
2034         mutex_exit(&passive_buf_list->lock);
2035 
2036         atomic_dec_32(&passive_buf_list->num_entries);
2037 
2038         return (rx_buffer);
2039 }
2040 
2041 void
2042 qede_put_to_active_list(qede_rx_ring_t *rx_ring, qede_rx_buffer_t *rx_buffer)
2043 {
2044         qede_rx_buf_list_t *active_buf_list =
2045             &rx_ring->rx_buf_area->active_buf_list;
2046         u16 tail = active_buf_list->tail;
2047 
2048         active_buf_list->buf_list[tail] = rx_buffer;
2049         tail = (tail + 1) & RX_RING_MASK;
2050 
2051         active_buf_list->tail = tail;
2052         atomic_inc_32(&active_buf_list->num_entries);
2053 }
2054 
2055 void
2056 qede_replenish_rx_buffers(qede_rx_ring_t *rx_ring)
2057 {
2058         qede_rx_buffer_t *rx_buffer;
2059         int count = 0;
2060         struct eth_rx_bd *bd;
2061 
2062         /*
2063          * Only replenish when we have at least
2064          * 1/4th of the ring to do.  We don't want
2065          * to incur many lock contentions and
2066          * cycles for just a few buffers.
2067          * We don't bother with the passive area lock
2068          * here because we're just getting an
2069          * estimate.  Also, we only pull from
2070          * the passive list in this function.
2071          */
2072         
2073         /*
2074          * Use a replenish lock because we can do the
2075          * replenish operation at the end of
2076          * processing the rx_ring, but also when
2077          * we get buffers back from the upper
2078          * layers.
2079          */
2080         if (mutex_tryenter(&rx_ring->rx_replen_lock) == 0) {
2081                 qede_info(rx_ring->qede, "!%s(%d): Failed to take"
2082                         " replenish_lock",
2083                         __func__, rx_ring->qede->instance);
2084                 return;
2085         }
2086 
2087         rx_buffer = qede_get_from_passive_list(rx_ring);
2088 
2089         while (rx_buffer != NULL) {
2090                 bd = ecore_chain_produce(&rx_ring->rx_bd_ring);
2091                 if (bd == NULL) {
2092                         qede_info(rx_ring->qede, "!%s(%d): bd = null",
2093                                 __func__, rx_ring->qede->instance);
2094                         qede_put_to_passive_list(rx_ring, rx_buffer);
2095                         break;
2096                 }
2097 
2098                 bd->addr.lo = HOST_TO_LE_32(U64_LO(
2099                                 rx_buffer->dma_info.phys_addr));
2100                 bd->addr.hi = HOST_TO_LE_32(
2101                                 U64_HI(rx_buffer->dma_info.phys_addr));
2102 
2103                 /*
2104                  * Put the buffer in active list since it will be
2105                  * posted to fw now
2106                  */
2107                 qede_put_to_active_list(rx_ring, rx_buffer);
2108                 rx_buffer->buf_state = RX_BUF_STATE_WITH_FW;
2109                 count++;
2110                 rx_buffer = qede_get_from_passive_list(rx_ring);
2111         }
2112         mutex_exit(&rx_ring->rx_replen_lock);
2113 }
2114 
2115 /*
2116  * Put the rx_buffer to the passive_buf_list
2117  */
2118 int
2119 qede_put_to_passive_list(qede_rx_ring_t *rx_ring, qede_rx_buffer_t *rx_buffer)
2120 {
2121         qede_rx_buf_list_t *passive_buf_list =
2122             &rx_ring->rx_buf_area->passive_buf_list;
2123         qede_rx_buf_area_t *rx_buf_area = rx_ring->rx_buf_area;
2124         int tail = 0;
2125 
2126         mutex_enter(&passive_buf_list->lock);
2127 
2128         tail = passive_buf_list->tail;
2129         passive_buf_list->tail = (passive_buf_list->tail + 1) & RX_RING_MASK;
2130 
2131         rx_buf_area->passive_buf_list.buf_list[tail] = rx_buffer;
2132         atomic_inc_32(&passive_buf_list->num_entries);
2133 
2134         if (passive_buf_list->num_entries > rx_ring->rx_buf_count) {
2135                 /* Sanity check */
2136                 qede_info(rx_ring->qede, "ERROR: num_entries (%d)"
2137                     " > max count (%d)",
2138                     passive_buf_list->num_entries,
2139                     rx_ring->rx_buf_count);
2140         }
2141         mutex_exit(&passive_buf_list->lock);
2142         return (passive_buf_list->num_entries);
2143 }
2144 
2145 void
2146 qede_recycle_rx_buffer(char *arg)
2147 {
2148         /* LINTED E_BAD_PTR_CAST_ALIGN */
2149         qede_rx_buffer_t *rx_buffer = (qede_rx_buffer_t *)arg;
2150         qede_rx_ring_t *rx_ring = rx_buffer->rx_ring;
2151         qede_rx_buf_area_t *rx_buf_area = rx_buffer->rx_buf_area;
2152         qede_t *qede = rx_ring->qede;
2153         u32 buf_upstream = 0, ref_cnt;
2154         u32 num_entries;
2155 
2156         if (rx_buffer->ref_cnt == 0) {
2157                 return;
2158         }
2159 
2160         /*
2161          * Since the data buffer associated with the mblk is free'ed
2162          * by upper layer, allocate it again to contain proper
2163          * free_func pointer
2164          */
2165         rx_buffer->mp = desballoc(rx_buffer->dma_info.virt_addr,
2166             rx_ring->rx_buf_size, 0, &rx_buffer->recycle);
2167 
2168         ref_cnt = atomic_dec_32_nv(&rx_buffer->ref_cnt);
2169         if (ref_cnt == 1) {
2170                 /* Put the buffer into passive_buf_list to be reused */
2171                 num_entries = qede_put_to_passive_list(rx_ring, rx_buffer);
2172                 if(num_entries >= 32) {
2173                         if(mutex_tryenter(&rx_ring->rx_lock) != 0) {
2174                                 qede_replenish_rx_buffers(rx_ring);
2175                                 qede_update_rx_q_producer(rx_ring);
2176                                 mutex_exit(&rx_ring->rx_lock);
2177                         }
2178                 }
2179         } else if (ref_cnt == 0) {
2180                 /* 
2181                  * This is a buffer from a previous load instance of
2182                  * rx_buf_area. Free the rx_buffer and if no more
2183                  * buffers are upstream from this rx_buf_area instance
2184                  * then free the rx_buf_area;
2185                  */
2186                 if (rx_buffer->mp != NULL) {
2187                         freemsg(rx_buffer->mp);
2188                         rx_buffer->mp = NULL;
2189                 }
2190                 mutex_enter(&qede->drv_lock);
2191 
2192                 buf_upstream = atomic_cas_32(&rx_buf_area->buf_upstream, 1, 1);
2193                 if (buf_upstream >= 1) {
2194                         atomic_dec_32(&rx_buf_area->buf_upstream);
2195                 }
2196                 if (rx_buf_area->inactive && (rx_buf_area->buf_upstream == 0)) {
2197                         qede_free_rx_buffers_legacy(qede, rx_buf_area);
2198                 }
2199 
2200                 mutex_exit(&qede->drv_lock);
2201         } else {
2202                 /* Sanity check */
2203                 qede_info(rx_ring->qede, "rx_buffer %p"
2204                     " ref_cnt %d is invalid",
2205                     rx_buffer, ref_cnt);
2206         }
2207 }
2208 
2209 void
2210 qede_recycle_copied_rx_buffer(qede_rx_buffer_t *rx_buffer)
2211 {
2212         qede_rx_ring_t *rx_ring = rx_buffer->rx_ring;
2213         qede_rx_buf_area_t *rx_buf_area = rx_buffer->rx_buf_area;
2214         qede_t *qede = rx_ring->qede;
2215         u32 buf_upstream = 0, ref_cnt;
2216 
2217         if (rx_buffer->ref_cnt == 0) {
2218                 /*
2219                  * Can happen if the buffer is being free'd
2220                  * in the stop routine
2221                  */
2222                 qede_info(qede, "!%s(%d): rx_buffer->ref_cnt = 0",
2223                     __func__, qede->instance);
2224                 return;
2225         }
2226 
2227         buf_upstream = atomic_cas_32(&rx_buf_area->buf_upstream, 1, 1);
2228         if (buf_upstream >= 1) {
2229                 atomic_dec_32(&rx_buf_area->buf_upstream);
2230         }
2231 
2232         /*
2233          * Since the data buffer associated with the mblk is free'ed
2234          * by upper layer, allocate it again to contain proper
2235          * free_func pointer
2236          * Though we could also be recycling a buffer that got copied,
2237          * so in that case the mp would still be intact.
2238          */
2239 
2240         ref_cnt = atomic_dec_32_nv(&rx_buffer->ref_cnt);
2241         if (ref_cnt == 1) {
2242                 qede_put_to_passive_list(rx_ring, rx_buffer);
2243                 /* Put the buffer into passive_buf_list to be reused */
2244         } else if (ref_cnt == 0) {
2245                 /* 
2246                  * This is a buffer from a previous load instance of
2247                  * rx_buf_area. Free the rx_buffer and if no more
2248                  * buffers are upstream from this rx_buf_area instance
2249                  * then free the rx_buf_area;
2250                  */
2251                 qede_info(rx_ring->qede, "Free up rx_buffer %p, index %d"
2252                     " ref_cnt %d from a previous driver iteration",
2253                     rx_buffer, rx_buffer->index, ref_cnt);
2254                 if (rx_buffer->mp != NULL) {
2255                         freemsg(rx_buffer->mp);
2256                         rx_buffer->mp = NULL;
2257                 }
2258 
2259                 if (rx_buf_area->inactive && (rx_buf_area->buf_upstream == 0)) {
2260                         mutex_enter(&qede->drv_lock);
2261                         qede_free_rx_buffers_legacy(qede, rx_buf_area);
2262                         mutex_exit(&qede->drv_lock);
2263                 }
2264         } else {
2265                 /* Sanity check */
2266                 qede_info(rx_ring->qede, "rx_buffer %p"
2267                     " ref_cnt %d is invalid",
2268                     rx_buffer, ref_cnt);
2269         }
2270 }
2271 
2272 
2273 static int
2274 qede_alloc_rx_buffers(qede_t *qede, qede_rx_ring_t *rx_ring)
2275 {
2276         int ret = DDI_SUCCESS, i, j;
2277         qede_rx_buffer_t *rx_buffer;
2278         qede_rx_buf_area_t *rx_buf_area = rx_ring->rx_buf_area;
2279         u32 bufs_per_page, buf_size;
2280         int page_size = (int)ddi_ptob(qede->dip, 1);
2281         qede_dma_info_t *dma_info;
2282         ddi_dma_cookie_t temp_cookie;
2283         int allocated = 0;
2284         u64 dma_addr;
2285         u8 *vaddr;
2286         ddi_dma_handle_t dma_handle;
2287         ddi_acc_handle_t acc_handle;
2288 
2289         if (rx_ring->rx_buf_size > page_size) {
2290                 bufs_per_page = 1;
2291                 buf_size = rx_ring->rx_buf_size;
2292         } else {
2293                 bufs_per_page =
2294                     (page_size) / DEFAULT_RX_BUF_SIZE;
2295                 buf_size = page_size;
2296         }
2297 
2298         rx_buffer = &rx_buf_area->rx_buf_pool[0];
2299         rx_buf_area->bufs_per_page = bufs_per_page;
2300 
2301         mutex_init(&rx_buf_area->active_buf_list.lock, NULL,
2302             MUTEX_DRIVER, 0);
2303         mutex_init(&rx_buf_area->passive_buf_list.lock, NULL,
2304             MUTEX_DRIVER, 0);
2305 
2306         for (i = 0; i < rx_ring->rx_buf_count; i += bufs_per_page) {
2307                 dma_info = &rx_buffer->dma_info;
2308 
2309                 ret = qede_dma_mem_alloc(qede,
2310                         buf_size,
2311                         DDI_DMA_READ | DDI_DMA_STREAMING | DDI_DMA_CONSISTENT,
2312                         (caddr_t *)&dma_info->virt_addr,
2313                         &temp_cookie,
2314                         &dma_info->dma_handle,
2315                         &dma_info->acc_handle,
2316                         &qede_dma_attr_rxbuf,
2317                         &qede_buf_acc_attr); 
2318                 if (ret != DDI_SUCCESS) {
2319                         goto err;
2320                 }
2321 
2322                 allocated++;
2323                 vaddr = dma_info->virt_addr;
2324                 dma_addr = temp_cookie.dmac_laddress;
2325                 dma_handle = dma_info->dma_handle;
2326                 acc_handle = dma_info->acc_handle;
2327                 
2328                 for (j = 0; j < bufs_per_page; j++) {
2329                         dma_info = &rx_buffer->dma_info;
2330                         dma_info->virt_addr = vaddr;
2331                         dma_info->phys_addr = dma_addr;
2332                         dma_info->dma_handle = dma_handle;
2333                         dma_info->acc_handle = acc_handle;
2334                         dma_info->offset = j * rx_ring->rx_buf_size;
2335                         /* Populate the recycle func and arg for the buffer */
2336                         rx_buffer->recycle.free_func = qede_recycle_rx_buffer;
2337                         rx_buffer->recycle.free_arg = (caddr_t)rx_buffer;
2338 
2339                         rx_buffer->mp = desballoc(dma_info->virt_addr,
2340                                         rx_ring->rx_buf_size, 0,
2341                                         &rx_buffer->recycle);
2342                         if (rx_buffer->mp == NULL) {
2343                                 qede_warn(qede, "desballoc() failed, index %d",
2344                                      i);
2345                         }
2346                         rx_buffer->rx_ring = rx_ring;
2347                         rx_buffer->rx_buf_area = rx_buf_area;
2348                         rx_buffer->index = i + j;
2349                         rx_buffer->ref_cnt = 1;
2350                         rx_buffer++;
2351 
2352                         vaddr += rx_ring->rx_buf_size;
2353                         dma_addr += rx_ring->rx_buf_size;
2354                 }
2355                 rx_ring->sw_rx_prod++;
2356         }
2357 
2358         /*
2359          * Fill the rx_bd_ring with the allocated
2360          * buffers
2361          */
2362         ret = qede_init_bd(qede, rx_ring);
2363         if (ret != DDI_SUCCESS) {
2364                 goto err;
2365         }
2366 
2367         rx_buf_area->buf_upstream = 0;
2368 
2369         return (ret);
2370 err:
2371         qede_free_rx_buffers(qede, rx_ring);
2372         return (ret);
2373 }
2374 
2375 static int
2376 qede_alloc_rx_ring_phys(qede_t *qede, qede_fastpath_t *fp)
2377 {
2378         qede_rx_ring_t *rx_ring;
2379         qede_rx_buf_area_t *rx_buf_area;
2380         size_t size;
2381 
2382         ASSERT(qede != NULL);
2383         ASSERT(fp != NULL);
2384 
2385         rx_ring = fp->rx_ring;
2386 
2387         atomic_inc_32(&qede->detach_unsafe);
2388         /*
2389          * Allocate rx_buf_area for the plumb instance
2390          */
2391         rx_buf_area = kmem_zalloc(sizeof (*rx_buf_area), KM_SLEEP);
2392         if (rx_buf_area == NULL) {
2393                 qede_info(qede, "!%s(%d): Cannot alloc rx_buf_area",
2394                         __func__, qede->instance);
2395                 return (DDI_FAILURE);
2396         }
2397 
2398         rx_buf_area->inactive = 0;
2399         rx_buf_area->rx_ring = rx_ring;
2400         rx_ring->rx_buf_area = rx_buf_area;
2401         /* Rx Buffer descriptor queue */
2402         if (ecore_chain_alloc(&qede->edev,
2403                         ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
2404                         ECORE_CHAIN_MODE_NEXT_PTR,
2405                         ECORE_CHAIN_CNT_TYPE_U16,
2406                         qede->rx_ring_size,
2407                         sizeof (struct eth_rx_bd),
2408                         &rx_ring->rx_bd_ring,
2409                         NULL) != ECORE_SUCCESS) {
2410                 cmn_err(CE_WARN, "Failed to allocate "
2411                     "ecore cqe chain");
2412                 return (DDI_FAILURE);
2413         }
2414 
2415         /* Rx Completion Descriptor queue */
2416         if (ecore_chain_alloc(&qede->edev,
2417                         ECORE_CHAIN_USE_TO_CONSUME,
2418                         ECORE_CHAIN_MODE_PBL,
2419                         ECORE_CHAIN_CNT_TYPE_U16,
2420                         qede->rx_ring_size,
2421                         sizeof (union eth_rx_cqe),
2422                         &rx_ring->rx_cqe_ring,
2423                         NULL) != ECORE_SUCCESS) {
2424                 cmn_err(CE_WARN, "Failed to allocate "
2425                     "ecore bd chain");
2426                 return (DDI_FAILURE);
2427         }
2428 
2429         /* Rx Data buffers */
2430         if (qede_alloc_rx_buffers(qede, rx_ring) != DDI_SUCCESS) {
2431                 qede_print_err("!%s(%d): Failed to alloc rx buffers",
2432                     __func__, qede->instance);
2433                 return (DDI_FAILURE);
2434         }
2435         return (DDI_SUCCESS);
2436 }
2437 
2438 static void
2439 qede_free_tx_bd_ring(qede_t *qede, qede_fastpath_t *fp)
2440 {
2441         int i;
2442         qede_tx_ring_t *tx_ring;
2443         
2444         ASSERT(qede != NULL);
2445         ASSERT(fp != NULL);
2446 
2447         for (i = 0; i < qede->num_tc; i++) {
2448                 tx_ring = fp->tx_ring[i];
2449 
2450                 if (tx_ring->tx_bd_ring.p_virt_addr) {
2451                         ecore_chain_free(&qede->edev, &tx_ring->tx_bd_ring);
2452                         tx_ring->tx_bd_ring.p_virt_addr = NULL;
2453                 }
2454                 tx_ring->hw_cons_ptr = NULL;
2455                 tx_ring->sw_tx_cons = 0;
2456                 tx_ring->sw_tx_prod = 0;
2457 
2458         }
2459 }
2460 
2461 static u32
2462 qede_alloc_tx_bd_ring(qede_t *qede, qede_tx_ring_t *tx_ring)
2463 {
2464         u32 ret = 0;
2465 
2466         ret = ecore_chain_alloc(&qede->edev,
2467             ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
2468             ECORE_CHAIN_MODE_PBL,
2469             ECORE_CHAIN_CNT_TYPE_U16,
2470             tx_ring->bd_ring_size,
2471             sizeof (union eth_tx_bd_types),
2472             &tx_ring->tx_bd_ring,
2473             NULL);
2474         if (ret) {
2475                 cmn_err(CE_WARN, "!%s(%d): Failed to alloc tx bd chain",
2476                     __func__, qede->instance);
2477                 goto error;
2478         }
2479 
2480 
2481 error:
2482         return (ret);
2483 }
2484 
2485 static void
2486 qede_free_tx_bcopy_buffers(qede_tx_ring_t *tx_ring)
2487 {
2488         qede_tx_bcopy_pkt_t *bcopy_pkt;
2489         int i;
2490 
2491         for (i = 0; i < tx_ring->tx_ring_size; i++) {
2492                 bcopy_pkt = &tx_ring->bcopy_list.bcopy_pool[i];
2493                 if(bcopy_pkt->dma_handle != NULL)
2494                         (void) ddi_dma_unbind_handle(bcopy_pkt->dma_handle);
2495                 if(bcopy_pkt->acc_handle != NULL) {
2496                         ddi_dma_mem_free(&bcopy_pkt->acc_handle);
2497                         bcopy_pkt->acc_handle = NULL;
2498                 }
2499                 if(bcopy_pkt->dma_handle != NULL) {
2500                         ddi_dma_free_handle(&bcopy_pkt->dma_handle);
2501                         bcopy_pkt->dma_handle = NULL;        
2502                 }
2503                 if (bcopy_pkt) {
2504                         if (bcopy_pkt->mp) {
2505                                 freemsg(bcopy_pkt->mp);
2506                         }
2507                 }
2508         }
2509 
2510         if (tx_ring->bcopy_list.bcopy_pool != NULL) {
2511                 kmem_free(tx_ring->bcopy_list.bcopy_pool,
2512                     tx_ring->bcopy_list.size);
2513                 tx_ring->bcopy_list.bcopy_pool = NULL;
2514         }
2515 
2516         mutex_destroy(&tx_ring->bcopy_list.lock);
2517 }
2518 
2519 static u32
2520 qede_alloc_tx_bcopy_buffers(qede_t *qede, qede_tx_ring_t *tx_ring)
2521 {
2522         u32 ret = DDI_SUCCESS;
2523         int page_size = (int)ddi_ptob(qede->dip, 1);
2524         size_t size;
2525         qede_tx_bcopy_pkt_t *bcopy_pkt, *bcopy_list;
2526         int i;
2527         qede_dma_info_t dma_info;
2528         ddi_dma_cookie_t temp_cookie;
2529 
2530         /*
2531          * If the tx_buffers size if less than the page size
2532          * then try to use multiple copy buffers inside the
2533          * same page. Otherwise use the whole page (or more)
2534          * for the copy buffers
2535          */
2536         if (qede->tx_buf_size > page_size) {
2537                 size = qede->tx_buf_size;
2538         } else {
2539                 size = page_size;
2540         }
2541 
2542         size = sizeof (qede_tx_bcopy_pkt_t) * qede->tx_ring_size;
2543         bcopy_list = kmem_zalloc(size, KM_SLEEP);
2544         if (bcopy_list == NULL) {
2545                 qede_warn(qede, "!%s(%d): Failed to allocate bcopy_list",
2546                     __func__, qede->instance);
2547                 ret = DDI_FAILURE;
2548                 goto exit;
2549         }
2550 
2551         tx_ring->bcopy_list.size = size;
2552         tx_ring->bcopy_list.bcopy_pool = bcopy_list;
2553         bcopy_pkt = bcopy_list;
2554 
2555         tx_ring->bcopy_list.head = 0;
2556         tx_ring->bcopy_list.tail = 0;
2557         mutex_init(&tx_ring->bcopy_list.lock, NULL, MUTEX_DRIVER, 0);
2558 
2559         for (i = 0; i < qede->tx_ring_size; i++) {
2560 
2561                 ret = qede_dma_mem_alloc(qede,
2562                                         qede->tx_buf_size,
2563                                         DDI_DMA_READ | DDI_DMA_STREAMING | DDI_DMA_CONSISTENT,
2564                                         (caddr_t *)&dma_info.virt_addr,
2565                                         &temp_cookie,
2566                                         &dma_info.dma_handle,
2567                                         &dma_info.acc_handle,
2568                                         &qede_dma_attr_txbuf,
2569                                         &qede_buf_acc_attr);
2570                 if(ret) {
2571                         ret = DDI_FAILURE;
2572                         goto exit;
2573                 }
2574                 
2575                                         
2576                 bcopy_pkt->virt_addr = dma_info.virt_addr;
2577                 bcopy_pkt->phys_addr = temp_cookie.dmac_laddress;
2578                 bcopy_pkt->dma_handle = dma_info.dma_handle;
2579                 bcopy_pkt->acc_handle = dma_info.acc_handle;
2580                 
2581                 tx_ring->bcopy_list.free_list[i] = bcopy_pkt;
2582                 bcopy_pkt++;
2583         }
2584 
2585 exit:
2586         return (ret);
2587 }
2588 
2589 static void
2590 qede_free_tx_dma_handles(qede_t *qede, qede_tx_ring_t *tx_ring)
2591 {
2592         qede_dma_handle_entry_t *dmah_entry;
2593         int i;
2594 
2595         for (i = 0; i < tx_ring->tx_ring_size; i++) {
2596                 dmah_entry = &tx_ring->dmah_list.dmah_pool[i];
2597                 if (dmah_entry) {
2598                         if (dmah_entry->dma_handle != NULL) {
2599                                 ddi_dma_free_handle(&dmah_entry->dma_handle);
2600                                 dmah_entry->dma_handle = NULL;
2601                         } else {
2602                                 qede_info(qede, "dmah_entry %p, handle is NULL",
2603                                      dmah_entry);
2604                         }
2605                 }
2606         }
2607 
2608         if (tx_ring->dmah_list.dmah_pool != NULL) {
2609                 kmem_free(tx_ring->dmah_list.dmah_pool,
2610                     tx_ring->dmah_list.size);
2611                 tx_ring->dmah_list.dmah_pool = NULL;
2612         }
2613 
2614         mutex_destroy(&tx_ring->dmah_list.lock);
2615 }
2616 
2617 static u32
2618 qede_alloc_tx_dma_handles(qede_t *qede, qede_tx_ring_t *tx_ring)
2619 {
2620         int i;
2621         size_t size;
2622         u32 ret = DDI_SUCCESS;
2623         qede_dma_handle_entry_t *dmah_entry, *dmah_list;
2624 
2625         size = sizeof (qede_dma_handle_entry_t) * qede->tx_ring_size;
2626         dmah_list = kmem_zalloc(size, KM_SLEEP);
2627         if (dmah_list == NULL) {
2628                 qede_warn(qede, "!%s(%d): Failed to allocated dmah_list",
2629                     __func__, qede->instance);
2630                 /* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2631                 ret = DDI_FAILURE;
2632                 goto exit;
2633         }
2634 
2635         tx_ring->dmah_list.size = size;
2636         tx_ring->dmah_list.dmah_pool = dmah_list;
2637         dmah_entry = dmah_list;
2638 
2639         tx_ring->dmah_list.head = 0;
2640         tx_ring->dmah_list.tail = 0;
2641         mutex_init(&tx_ring->dmah_list.lock, NULL, MUTEX_DRIVER, 0);
2642 
2643         /*
2644          *
2645          */
2646         for (i = 0; i < qede->tx_ring_size; i++) {
2647                 ret = ddi_dma_alloc_handle(qede->dip,
2648                     &qede_tx_buf_dma_attr,
2649                     DDI_DMA_DONTWAIT,
2650                     NULL,
2651                     &dmah_entry->dma_handle);
2652                 if (ret != DDI_SUCCESS) {
2653                         qede_print_err("!%s(%d): dma alloc handle failed "
2654                             "for index %d",
2655                             __func__, qede->instance, i);
2656                         /* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2657                         ret = DDI_FAILURE;
2658                         goto exit;
2659                 }
2660 
2661                 tx_ring->dmah_list.free_list[i] = dmah_entry;
2662                 dmah_entry++;
2663         }
2664 exit:
2665         return (ret);
2666 }
2667 
2668 static u32 
2669 qede_alloc_tx_ring_phys(qede_t *qede, qede_fastpath_t *fp)
2670 {
2671         int i;
2672         qede_tx_ring_t *tx_ring;
2673         u32 ret = DDI_SUCCESS;
2674         size_t size;
2675         qede_tx_recycle_list_t *recycle_list;
2676 
2677         ASSERT(qede != NULL);
2678         ASSERT(fp != NULL);
2679 
2680         for (i = 0; i < qede->num_tc; i++) {
2681                 tx_ring = fp->tx_ring[i];
2682                 tx_ring->bd_ring_size = qede->tx_ring_size;
2683 
2684                 /*
2685                  * Allocate the buffer descriptor chain
2686                  */
2687                 ret = qede_alloc_tx_bd_ring(qede, tx_ring);
2688                 if (ret) {
2689                         cmn_err(CE_WARN, "!%s(%d): failed, %s",
2690                             __func__, qede->instance, qede_get_ddi_fail(ret));
2691                         return (ret);
2692                 }
2693 
2694                 /*
2695                  * Allocate copy mode buffers
2696                  */
2697                 ret = qede_alloc_tx_bcopy_buffers(qede, tx_ring);
2698                 if (ret) {
2699                         qede_print_err("!%s(%d): Failed to alloc tx copy "
2700                             "buffers", __func__, qede->instance);
2701                         /* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2702                         ret = DDI_FAILURE;
2703                         goto exit;
2704                 }
2705 
2706                 /*
2707                  * Allocate dma handles for mapped mode
2708                  */
2709                 ret = qede_alloc_tx_dma_handles(qede, tx_ring);
2710                 if (ret) {
2711                         qede_print_err("!%s(%d): Failed to alloc tx dma "
2712                             "handles", __func__, qede->instance);
2713                         /* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2714                         ret = DDI_FAILURE;
2715                         goto exit; 
2716                 }
2717 
2718                 /* Allocate tx_recycle list */
2719                 size = sizeof (qede_tx_recycle_list_t) * qede->tx_ring_size;
2720                 recycle_list = kmem_zalloc(size, KM_SLEEP);
2721                 if (recycle_list == NULL) {
2722                         qede_warn(qede, "!%s(%d): Failed to allocate"
2723                             " tx_recycle_list", __func__, qede->instance);
2724                         /* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2725                         ret = DDI_FAILURE;
2726                         goto exit;
2727                 }
2728 
2729                 tx_ring->tx_recycle_list = recycle_list;
2730         }
2731 exit:
2732         return (ret);
2733 }
2734 
2735 static void
2736 /* LINTED E_FUNC_ARG_UNUSED */
2737 qede_free_sb_phys(qede_t *qede, qede_fastpath_t *fp)
2738 {
2739         qede_pci_free_consistent(&fp->sb_dma_handle, &fp->sb_acc_handle);
2740         fp->sb_virt = NULL;
2741         fp->sb_phys = 0;
2742 }
2743 
2744 static int
2745 qede_alloc_sb_phys(qede_t *qede, qede_fastpath_t *fp)
2746 {
2747         int status;
2748         int sb_id;
2749         struct ecore_dev *edev = &qede->edev;
2750         struct ecore_hwfn *p_hwfn;
2751         qede_vector_info_t *vect_info = fp->vect_info;
2752         ddi_dma_cookie_t sb_cookie;
2753 
2754         ASSERT(qede != NULL);
2755         ASSERT(fp != NULL);
2756 
2757         /*
2758          * In the case of multiple hardware engines,
2759          * interrupts are spread across all of them.
2760          * In the case of only one engine, all
2761          * interrupts are handled by that engine.
2762          * In the case of 2 engines, each has half
2763          * of the interrupts.
2764          */
2765         sb_id = vect_info->vect_index;
2766         p_hwfn = &edev->hwfns[sb_id % qede->num_hwfns];
2767 
2768         /* Allocate dma mem. for status_block */
2769         status = qede_dma_mem_alloc(qede,
2770             sizeof (struct status_block),
2771             (DDI_DMA_RDWR | DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
2772             (caddr_t *)&fp->sb_virt,
2773             &sb_cookie,
2774             &fp->sb_dma_handle,
2775             &fp->sb_acc_handle,
2776             &qede_desc_dma_attr,
2777             &qede_desc_acc_attr);
2778 
2779         if (status != DDI_SUCCESS) {
2780                 qede_info(qede, "Failed to allocate status_block dma mem");
2781                 return (status);
2782         }
2783 
2784         fp->sb_phys = sb_cookie.dmac_laddress;
2785 
2786 
2787         status = ecore_int_sb_init(p_hwfn, 
2788                         p_hwfn->p_main_ptt, 
2789                         fp->sb_info,
2790                         (void *)fp->sb_virt,
2791                         fp->sb_phys, 
2792                         fp->fp_index);
2793         if (status != ECORE_SUCCESS) {
2794                 cmn_err(CE_WARN, "Failed ecore_int_sb_init");
2795                 return (DDI_FAILURE);
2796         }
2797 
2798         return (status);
2799 }
2800 
2801 static void
2802 qede_free_tx_ring_phys(qede_t *qede, qede_fastpath_t *fp)
2803 {
2804         qede_tx_ring_t *tx_ring;
2805         int i;
2806 
2807         for (i = 0; i < qede->num_tc; i++) {
2808                 tx_ring = fp->tx_ring[i];
2809                 qede_free_tx_dma_handles(qede, tx_ring);
2810                 qede_free_tx_bcopy_buffers(tx_ring);
2811                 qede_free_tx_bd_ring(qede, fp);
2812 
2813                 if (tx_ring->tx_recycle_list) {
2814                         kmem_free(tx_ring->tx_recycle_list,
2815                             sizeof (qede_tx_recycle_list_t)
2816                             * qede->tx_ring_size);
2817                 }
2818         }
2819 }
2820 
2821 static void
2822 qede_fastpath_free_phys_mem(qede_t *qede)
2823 {
2824         int  i;
2825         qede_fastpath_t *fp;
2826 
2827         for (i = 0; i < qede->num_fp; i++) {
2828                 fp = &qede->fp_array[i];
2829 
2830                 qede_free_rx_ring_phys(qede, fp);
2831                 qede_free_tx_ring_phys(qede, fp);
2832                 qede_free_sb_phys(qede, fp);
2833         }
2834 }
2835 
2836 /*
2837  * Save dma_handles associated with the fastpath elements
2838  * allocate by ecore for doing dma_sync in the fast_path
2839  */
2840 static int
2841 qede_save_fp_dma_handles(qede_t *qede, qede_fastpath_t *fp)
2842 {
2843         int ret, i;
2844         qede_rx_ring_t *rx_ring;
2845         qede_tx_ring_t *tx_ring;
2846 
2847         rx_ring = fp->rx_ring;
2848 
2849         /* Rx bd ring dma_handle */
2850         ret = qede_osal_find_dma_handle_for_block(qede,
2851             (void *)rx_ring->rx_bd_ring.p_phys_addr,
2852             &rx_ring->rx_bd_dmah); 
2853         if (ret != DDI_SUCCESS) {
2854                 qede_print_err("!%s(%d): Cannot find dma_handle for "
2855                     "rx_bd_ring, addr %p", __func__, qede->instance,
2856                     rx_ring->rx_bd_ring.p_phys_addr);
2857                 goto exit;
2858         }
2859 
2860         /* rx cqe ring dma_handle */
2861         ret = qede_osal_find_dma_handle_for_block(qede,
2862             (void *)rx_ring->rx_cqe_ring.p_phys_addr,
2863             &rx_ring->rx_cqe_dmah);
2864         if (ret != DDI_SUCCESS) {
2865                 qede_print_err("!%s(%d): Cannot find dma_handle for "
2866                     "rx_cqe_ring, addr %p", __func__, qede->instance,
2867                     rx_ring->rx_cqe_ring.p_phys_addr);
2868                 goto exit;
2869         }
2870         /* rx cqe ring pbl */
2871         ret = qede_osal_find_dma_handle_for_block(qede,
2872             (void *)rx_ring->rx_cqe_ring.pbl_sp.p_phys_table,
2873             &rx_ring->rx_cqe_pbl_dmah);
2874         if (ret) {
2875                 qede_print_err("!%s(%d): Cannot find dma_handle for "
2876                     "rx_cqe pbl, addr %p", __func__, qede->instance,
2877                     rx_ring->rx_cqe_ring.pbl_sp.p_phys_table);
2878                 goto exit;
2879         }
2880 
2881         /* tx_bd ring dma_handle(s) */
2882         for (i = 0; i < qede->num_tc; i++) {
2883                 tx_ring = fp->tx_ring[i];
2884 
2885                 ret = qede_osal_find_dma_handle_for_block(qede,
2886                     (void *)tx_ring->tx_bd_ring.p_phys_addr,
2887                     &tx_ring->tx_bd_dmah);
2888                 if (ret != DDI_SUCCESS) {
2889                         qede_print_err("!%s(%d): Cannot find dma_handle "
2890                             "for tx_bd_ring, addr %p", __func__,
2891                             qede->instance,
2892                             tx_ring->tx_bd_ring.p_phys_addr);
2893                         goto exit;
2894                 }
2895 
2896                 ret = qede_osal_find_dma_handle_for_block(qede,
2897                     (void *)tx_ring->tx_bd_ring.pbl_sp.p_phys_table,
2898                     &tx_ring->tx_pbl_dmah);
2899                 if (ret) {
2900                         qede_print_err("!%s(%d): Cannot find dma_handle for "
2901                             "tx_bd pbl, addr %p", __func__, qede->instance,
2902                             tx_ring->tx_bd_ring.pbl_sp.p_phys_table);
2903                         goto exit;
2904                 }
2905         }
2906 
2907 exit:
2908         return (ret);
2909 }
2910 
2911 int
2912 qede_fastpath_alloc_phys_mem(qede_t *qede)
2913 {
2914         int status = 0, i;
2915         qede_fastpath_t *fp;
2916 
2917         for (i = 0; i < qede->num_fp; i++) {
2918                 fp = &qede->fp_array[i];
2919 
2920                 status = qede_alloc_sb_phys(qede, fp);
2921                 if (status != DDI_SUCCESS) {
2922                         goto err;
2923                 }
2924 
2925                 status = qede_alloc_rx_ring_phys(qede, fp);
2926                 if (status != DDI_SUCCESS) {
2927                         goto err;
2928                 }
2929 
2930                 status = qede_alloc_tx_ring_phys(qede, fp);
2931                 if (status != DDI_SUCCESS) {
2932                         goto err;
2933                 }
2934                 status = qede_save_fp_dma_handles(qede, fp);
2935                 if (status != DDI_SUCCESS) {
2936                         goto err;
2937                 }
2938         }
2939         return (status);
2940 err:
2941         qede_fastpath_free_phys_mem(qede);
2942         return (status);
2943 }
2944 
2945 static int
2946 qede_fastpath_config(qede_t *qede)
2947 {
2948         int i, j;
2949         qede_fastpath_t *fp;
2950         qede_rx_ring_t *rx_ring;
2951         qede_tx_ring_t *tx_ring;
2952         qede_vector_info_t *vect_info;
2953         int num_fp, num_hwfns;
2954 
2955         ASSERT(qede != NULL);
2956 
2957         num_fp = qede->num_fp;
2958         num_hwfns = qede->num_hwfns;
2959 
2960         vect_info = &qede->intr_ctx.intr_vect_info[num_hwfns];
2961         fp = &qede->fp_array[0];
2962         tx_ring = &qede->tx_array[0][0];
2963 
2964         for (i = 0; i < num_fp; i++, fp++, vect_info++) {
2965                 fp->sb_info = &qede->sb_array[i];
2966                 fp->qede = qede;
2967                 fp->fp_index = i;
2968                 /* 
2969                  * With a single hwfn, all fp's hwfn index should be zero 
2970                  * for all fp entries. If there are two engines this 
2971                  * index should altenate between 0 and 1.
2972                  */
2973                 fp->fp_hw_eng_index = fp->fp_index % num_hwfns;
2974                 fp->vport_id = 0;
2975                 fp->stats_id = 0;
2976                 fp->rss_id = fp->fp_index;
2977                 fp->rx_queue_index = fp->fp_index; 
2978                 fp->vect_info = vect_info; 
2979                 /*
2980                  * After vport update, interrupts will be
2981                  * running, so we need to intialize our
2982                  * enable/disable gate as such.
2983                  */ 
2984                 fp->disabled_by_poll = 0;
2985 
2986                 /* rx_ring setup */
2987                 rx_ring = &qede->rx_array[i];
2988                 fp->rx_ring = rx_ring;
2989                 rx_ring->fp = fp;
2990                 rx_ring->rx_buf_count = qede->rx_buf_count;
2991                 rx_ring->rx_buf_size = qede->rx_buf_size;
2992                 rx_ring->qede = qede;
2993                 rx_ring->sw_rx_cons = 0;
2994                 rx_ring->rx_copy_threshold = qede->rx_copy_threshold;
2995                 rx_ring->rx_low_buffer_threshold =
2996                     qede->rx_low_buffer_threshold;
2997                 rx_ring->queue_started = B_FALSE;
2998 
2999                 /* tx_ring setup */
3000                 for (j = 0; j < qede->num_tc; j++) {
3001                         tx_ring = &qede->tx_array[j][i];
3002                         fp->tx_ring[j] = tx_ring;
3003                         tx_ring->qede = qede;
3004                         tx_ring->fp = fp;
3005                         tx_ring->fp_idx = i;
3006                         tx_ring->tx_queue_index = i * qede->num_fp + 
3007                             fp->fp_index;
3008                         tx_ring->tx_buf_size = qede->tx_buf_size;
3009                         tx_ring->tx_ring_size = qede->tx_ring_size;
3010                         tx_ring->queue_started = B_FALSE;
3011 #ifdef  DBLK_DMA_PREMAP
3012                         tx_ring->pm_handle = qede->pm_handle;
3013 #endif
3014 
3015                         tx_ring->doorbell_addr =
3016                             qede->doorbell;
3017                         tx_ring->doorbell_handle =
3018                             qede->doorbell_handle;
3019                 }
3020         }
3021 
3022         return (DDI_SUCCESS);
3023 }
3024 
3025 /*
3026  * op = 1, Initialize link
3027  * op = 0, Destroy link
3028  */
3029 int
3030 qede_configure_link(qede_t *qede, bool op) 
3031 {
3032         struct ecore_dev *edev = &qede->edev;
3033         struct ecore_hwfn *hwfn;
3034         struct ecore_ptt *ptt = NULL;
3035         int i, ret = DDI_SUCCESS;
3036 
3037         for_each_hwfn(edev, i) {
3038                 hwfn = &edev->hwfns[i];
3039                 qede_info(qede, "Configuring link for hwfn#%d", i);
3040 
3041                 ptt = ecore_ptt_acquire(hwfn);
3042                 if (ptt == NULL) {
3043                         qede_info(qede, "Cannot reserver ptt from ecore");
3044                         ret = DDI_FAILURE;
3045                         goto exit;
3046                 }
3047 
3048                 ret = ecore_mcp_set_link(hwfn, ptt, op);
3049 
3050                 ecore_ptt_release(hwfn, ptt);
3051                 if (ret) {
3052                         /* if link config fails, make sure ptt is released */
3053                         goto exit;
3054                 }
3055         }
3056 exit:
3057         return (ret);
3058 }
3059 
3060 /*
3061  * drv_lock must be held by the caller.
3062  */
3063 int
3064 qede_stop(qede_t *qede)
3065 {
3066         int status;
3067 
3068         ASSERT(mutex_owned(&qede->drv_lock));
3069         qede->qede_state = QEDE_STATE_STOPPING;
3070 
3071         mac_link_update(qede->mac_handle, LINK_STATE_DOWN);
3072 
3073         qede_disable_all_fastpath_intrs(qede);
3074         status = qede_configure_link(qede, 0 /* Re-Set */);
3075         if (status) {
3076                 /* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3077                 cmn_err(CE_NOTE, "!%s(%d): Failed to reset link",
3078                     __func__, qede->instance);
3079                 return (status);
3080         }
3081         qede_clear_filters(qede);
3082         status = qede_fastpath_stop_queues(qede);
3083         if (status != DDI_SUCCESS) {
3084                 /* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3085                 cmn_err(CE_WARN, "qede_stop:"
3086                     " qede_fastpath_stop_queues FAILED "
3087                     " qede=%p\n",
3088                     qede);
3089                 return (status);
3090         }
3091 
3092         qede_fastpath_free_phys_mem(qede);
3093         
3094         qede->qede_state = QEDE_STATE_STOPPED;
3095         return (DDI_SUCCESS);
3096 }
3097 
3098 /*
3099  * drv_lock must be held by the caller.
3100  */
3101 int
3102 qede_start(qede_t *qede)
3103 {
3104         int status;
3105 
3106         ASSERT(mutex_owned(&qede->drv_lock));
3107 
3108         qede->qede_state = QEDE_STATE_STARTING;
3109 
3110         mac_link_update(qede->mac_handle, LINK_STATE_DOWN);
3111 
3112         /* 
3113          * Configure the fastpath blocks with
3114          * the sb_info, rx_ring and tx_rings
3115          */
3116         if (qede_fastpath_config(qede) != DDI_SUCCESS) {
3117                 /* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3118                 qede_print_err("!%s(%d): qede_fastpath_config failed",
3119                     __func__, qede->instance);
3120                 return (DDI_FAILURE);
3121         }
3122 
3123         
3124         /*
3125          * Allocate the physical memory
3126          * for fastpath.   
3127          */
3128         status = qede_fastpath_alloc_phys_mem(qede);
3129         if (status) {
3130                 cmn_err(CE_NOTE, "fastpath_alloc_phys_mem "
3131                     " failed qede=%p\n", qede);
3132                 return (DDI_FAILURE);
3133         }
3134         
3135         status = qede_fastpath_start_queues(qede);
3136         if (status) {
3137                 cmn_err(CE_NOTE, "fp_start_queues "
3138                     " failed qede=%p\n", qede);
3139                 goto err_out1;
3140         }
3141 
3142         status = qede_configure_link(qede, 1 /* Set */);
3143         if (status) {
3144                 cmn_err(CE_NOTE, "!%s(%d): Failed to configure link",
3145                     __func__, qede->instance);
3146                 goto err_out1;
3147         }
3148 
3149         /*
3150          * Put interface in regular mode 
3151          */
3152         if (qede_set_filter_rx_mode(qede, 
3153                 QEDE_FILTER_RX_MODE_REGULAR) != DDI_SUCCESS) {
3154                 cmn_err(CE_NOTE, "!%s(%d): Failed to set filter mode",
3155                     __func__, qede->instance);
3156                 goto err_out1;
3157         }
3158 
3159         status = qede_enable_all_fastpath_intrs(qede);
3160         if (status) {
3161                 /* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3162                 cmn_err(CE_NOTE, "!%s(%d): Failed to enable intrs",
3163                     __func__, qede->instance);
3164                 goto err_out2;
3165         }
3166         qede->qede_state = QEDE_STATE_STARTED;
3167 
3168         return (status);
3169 
3170 err_out2:
3171         (void) qede_fastpath_stop_queues(qede);
3172 err_out1:
3173         qede_fastpath_free_phys_mem(qede);
3174         return (DDI_FAILURE);
3175 }
3176 
3177 static void
3178 qede_free_attach_resources(qede_t *qede)
3179 {
3180         struct ecore_dev *edev;
3181         int status;     
3182         
3183         edev = &qede->edev;
3184 
3185         if (qede->attach_resources & QEDE_ECORE_HW_INIT) {
3186                 if (ecore_hw_stop(edev) != ECORE_SUCCESS) {
3187                         cmn_err(CE_NOTE, "%s(%d): ecore_hw_stop: failed\n",
3188                             __func__, qede->instance);
3189                 }
3190                 qede->attach_resources &= ~QEDE_ECORE_HW_INIT;
3191         }
3192         
3193         if (qede->attach_resources & QEDE_SP_INTR_ENBL) {
3194                 status = qede_disable_slowpath_intrs(qede);
3195                 if (status != DDI_SUCCESS) {
3196                         qede_print("%s(%d): qede_disable_slowpath_intrs Failed",
3197                             __func__, qede->instance);
3198                 } 
3199                 qede->attach_resources &= ~QEDE_SP_INTR_ENBL;
3200         }
3201         if (qede->attach_resources & QEDE_KSTAT_INIT) {
3202                 qede_kstat_fini(qede);
3203                 qede->attach_resources &= ~QEDE_KSTAT_INIT;
3204         }
3205         
3206 
3207         if (qede->attach_resources & QEDE_GLD_INIT) {
3208                 status = mac_unregister(qede->mac_handle);
3209                 if (status != 0) {
3210                         qede_print("%s(%d): mac_unregister Failed",
3211                             __func__, qede->instance);
3212                 } 
3213                 qede->attach_resources &= ~QEDE_GLD_INIT;
3214         }
3215 
3216         if (qede->attach_resources & QEDE_EDEV_CONFIG) {
3217                 ecore_resc_free(edev);
3218                 qede->attach_resources &= ~QEDE_EDEV_CONFIG;
3219         }
3220 
3221         if (qede->attach_resources & QEDE_INTR_CONFIG) {
3222                 qede_unconfig_intrs(qede);
3223                 qede->attach_resources &= ~QEDE_INTR_CONFIG;
3224         }
3225 
3226         if (qede->attach_resources & QEDE_INTR_ALLOC) {
3227                 qede_free_intrs(qede);
3228                 qede->attach_resources &= ~QEDE_INTR_ALLOC;
3229         }
3230 
3231         if (qede->attach_resources & QEDE_INIT_LOCKS) {
3232                 qede_destroy_locks(qede);
3233                 qede->attach_resources &= ~QEDE_INIT_LOCKS;
3234         }
3235 
3236         if (qede->attach_resources & QEDE_IO_STRUCT_ALLOC) {
3237                 qede_free_io_structs(qede);
3238                 qede->attach_resources &= ~QEDE_IO_STRUCT_ALLOC;
3239         }
3240 #ifdef QEDE_LSR
3241         if (qede->attach_resources & QEDE_CALLBACK) {
3242 
3243 
3244                 status = ddi_cb_unregister(qede->callback_hdl);
3245                 if (status != DDI_SUCCESS) {
3246                 } 
3247                 qede->attach_resources &= ~QEDE_CALLBACK;
3248         }
3249 #endif
3250         if (qede->attach_resources & QEDE_ECORE_HW_PREP) {
3251                 ecore_hw_remove(edev);
3252                 qede->attach_resources &= ~QEDE_ECORE_HW_PREP;
3253         }
3254 
3255         if (qede->attach_resources & QEDE_PCI) {
3256                 qede_unconfig_pci(qede);
3257                 qede->attach_resources &= ~QEDE_PCI;
3258         }
3259 
3260         if (qede->attach_resources & QEDE_FM) {
3261                 qede_unconfig_fm(qede);
3262                 qede->attach_resources &= ~QEDE_FM;
3263         }
3264 
3265         /*
3266          * Check for possible mem. left behind by ecore
3267          */
3268         (void) qede_osal_cleanup(qede);
3269 
3270         if (qede->attach_resources & QEDE_STRUCT_ALLOC) {
3271                 ddi_set_driver_private(qede->dip, NULL);
3272                 qede->attach_resources &= ~QEDE_STRUCT_ALLOC;
3273                 kmem_free(qede, sizeof (qede_t));
3274         }
3275 }
3276 
3277 /*
3278  * drv_lock must be held by the caller.
3279  */
3280 static int
3281 qede_suspend(qede_t *qede)
3282 {
3283         // STUB
3284         ASSERT(mutex_owned(&qede->drv_lock));
3285         printf("in qede_suspend\n");
3286         return (DDI_FAILURE);
3287 }
3288 
3289 static int
3290 qede_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
3291 {
3292         qede_t *qede;
3293         struct ecore_dev *edev;
3294         int instance;
3295         uint32_t vendor_id;
3296         uint32_t device_id;
3297         struct ecore_hwfn *p_hwfn;
3298         struct ecore_ptt *p_ptt;
3299         struct ecore_mcp_link_params *link_params;
3300         struct ecore_hw_init_params hw_init_params;
3301         struct ecore_drv_load_params load_params;
3302         int *props;
3303         uint32_t num_props;
3304         int rc = 0;
3305 
3306         switch (cmd) {
3307         default:
3308                 return (DDI_FAILURE);
3309     
3310         case DDI_RESUME:
3311         {
3312                 qede = (qede_t * )ddi_get_driver_private(dip);
3313                 if (qede == NULL || qede->dip != dip) {
3314                         cmn_err(CE_NOTE, "qede:%s: Could not allocate"
3315                             " adapter structure\n", __func__);
3316                         return (DDI_FAILURE);
3317                 }
3318 
3319                 mutex_enter(&qede->drv_lock);
3320                 if (qede->qede_state != QEDE_STATE_SUSPENDED) {
3321                         mutex_exit(&qede->drv_lock);
3322                         return (DDI_FAILURE);
3323                 }
3324         
3325                 if (qede_resume(qede) != DDI_SUCCESS) {
3326                         cmn_err(CE_NOTE, "%s:%d resume operation failure\n",
3327                             __func__, qede->instance);
3328                         mutex_exit(&qede->drv_lock);
3329                         return (DDI_FAILURE);
3330                 }
3331 
3332                 qede->qede_state = QEDE_STATE_ATTACHED;
3333                 mutex_exit(&qede->drv_lock);
3334                 return (DDI_SUCCESS);
3335         }
3336         case DDI_ATTACH:
3337         {
3338                 instance = ddi_get_instance(dip);
3339 
3340                 /* Allocate main structure rounded up to cache line size */
3341                 if ((qede = kmem_zalloc(sizeof (qede_t), KM_SLEEP)) == NULL) {
3342                         cmn_err(CE_NOTE, "!%s(%d): Could not allocate adapter "
3343                             "structure\n", __func__, instance);
3344                         return (DDI_FAILURE);
3345                 }
3346 
3347                 qede->attach_resources |= QEDE_STRUCT_ALLOC;
3348                 ddi_set_driver_private(dip, qede);
3349                 qede->dip = dip;
3350                 qede->instance = instance;
3351                 snprintf(qede->name, sizeof (qede->name), "qede%d", instance);
3352                 edev = &qede->edev;
3353         
3354                 if (qede_config_fm(qede) != DDI_SUCCESS) {
3355                         goto exit_with_err;
3356                 }
3357                 qede->attach_resources |= QEDE_FM;
3358 
3359                 /* 
3360                  * Do PCI config setup and map the register 
3361                  * and doorbell space */
3362                 if (qede_config_pci(qede) != DDI_SUCCESS) {
3363                         goto exit_with_err;
3364                 }
3365                 qede->attach_resources |= QEDE_PCI;
3366 
3367                 /*
3368                  * Setup OSAL mem alloc related locks.
3369                  * Do not call any ecore functions without
3370                  * initializing these locks
3371                  */
3372                 mutex_init(&qede->mem_list.mem_list_lock, NULL,
3373                     MUTEX_DRIVER, 0);
3374                 mutex_init(&qede->phys_mem_list.lock, NULL,
3375                     MUTEX_DRIVER, 0);
3376                 QEDE_INIT_LIST_HEAD(&qede->mem_list.mem_list_head);
3377                 QEDE_INIT_LIST_HEAD(&qede->phys_mem_list.head);
3378                 QEDE_INIT_LIST_HEAD(&qede->mclist.head);
3379 
3380 
3381                 /*
3382                  * FIXME: this function calls ecore api, but
3383                  * dp_level and module are not yet set
3384                  */
3385                 if (qede_prepare_edev(qede) != ECORE_SUCCESS) {
3386                         // report fma
3387                         goto exit_with_err;
3388                 }
3389 
3390                 qede->num_hwfns = edev->num_hwfns;
3391                 qede->num_tc = 1;
3392                 memcpy(qede->ether_addr, edev->hwfns->hw_info.hw_mac_addr,
3393                     ETHERADDRL);
3394                 qede_info(qede, "Interface mac_addr : " MAC_STRING,
3395                     MACTOSTR(qede->ether_addr));
3396                 qede->attach_resources |= QEDE_ECORE_HW_PREP;
3397 
3398                 if (qede_set_operating_params(qede) != DDI_SUCCESS) {
3399                         goto exit_with_err;
3400                 }
3401                 qede->attach_resources |= QEDE_SET_PARAMS;
3402 #ifdef QEDE_LSR
3403                 if (ddi_cb_register(qede->dip,
3404                     qede->callback_flags,
3405                     qede_callback,
3406                     qede,
3407                     NULL,
3408                     &qede->callback_hdl)) {
3409                         goto exit_with_err;
3410                 }
3411                 qede->attach_resources |= QEDE_CALLBACK;
3412 #endif
3413                 qede_cfg_reset(qede);
3414 
3415                 if (qede_alloc_intrs(qede)) {
3416                         cmn_err(CE_NOTE, "%s: Could not allocate interrupts\n",
3417                             __func__);
3418                         goto exit_with_err;
3419                 }
3420         
3421                 qede->attach_resources |= QEDE_INTR_ALLOC;
3422 
3423                 if (qede_config_intrs(qede)) {
3424                         cmn_err(CE_NOTE, "%s: Could not allocate interrupts\n",
3425                             __func__);
3426                         goto exit_with_err;
3427                 }
3428                 qede->attach_resources |= QEDE_INTR_CONFIG;
3429 
3430                 if (qede_alloc_io_structs(qede) != DDI_SUCCESS) {
3431                         cmn_err(CE_NOTE, "%s: Could not allocate data"
3432                             " path structures\n", __func__);
3433                         goto exit_with_err;
3434                 }
3435 
3436                 qede->attach_resources |= QEDE_IO_STRUCT_ALLOC;
3437 
3438                 /* Lock init cannot fail */
3439                 qede_init_locks(qede);
3440                 qede->attach_resources |= QEDE_INIT_LOCKS;
3441 
3442 
3443                 if (qede_config_edev(qede)) {
3444                         cmn_err(CE_NOTE, "%s: Could not configure ecore \n",
3445                             __func__);
3446                         goto exit_with_err;
3447                 }
3448                 qede->attach_resources |= QEDE_EDEV_CONFIG;
3449 
3450                 if (qede_kstat_init(qede) == B_FALSE) {
3451                         cmn_err(CE_NOTE, "%s: Could not initialize kstat \n",
3452                             __func__);
3453                         goto exit_with_err;
3454 
3455                 }
3456                 qede->attach_resources |= QEDE_KSTAT_INIT;
3457 
3458                 if (qede_gld_init(qede) == B_FALSE) {
3459                         cmn_err(CE_NOTE, "%s: Failed call to qede_gld_init",
3460                             __func__);
3461                         goto exit_with_err;
3462                 }
3463 
3464                 qede->attach_resources |= QEDE_GLD_INIT;
3465 
3466                 if (qede_enable_slowpath_intrs(qede)) {
3467                         cmn_err(CE_NOTE, "%s: Could not enable interrupts\n",
3468                             __func__);
3469                         goto exit_with_err;
3470                 }
3471 
3472                 qede->attach_resources |= QEDE_SP_INTR_ENBL;
3473 
3474                 memset((void *)&hw_init_params, 0, 
3475                     sizeof (struct ecore_hw_init_params));
3476                 hw_init_params.p_drv_load_params = &load_params;
3477 
3478                 hw_init_params.p_tunn = NULL; 
3479                 hw_init_params.b_hw_start = true;
3480                 hw_init_params.int_mode = qede->intr_ctx.intr_mode;
3481                 hw_init_params.allow_npar_tx_switch = false;
3482                 hw_init_params.bin_fw_data = NULL;
3483                 load_params.is_crash_kernel = false;
3484                 load_params.mfw_timeout_val = 0; 
3485                 load_params.avoid_eng_reset = false;
3486                 load_params.override_force_load = 
3487                     ECORE_OVERRIDE_FORCE_LOAD_NONE;
3488 
3489                 if (ecore_hw_init(edev, &hw_init_params) != ECORE_SUCCESS) {
3490                         cmn_err(CE_NOTE,
3491                             "%s: Could not initialze ecore block\n",
3492                              __func__);
3493                         goto exit_with_err;
3494                 }
3495                 qede->attach_resources |= QEDE_ECORE_HW_INIT;
3496                 qede->qede_state = QEDE_STATE_ATTACHED;
3497 
3498                 qede->detach_unsafe = 0;
3499 
3500                 snprintf(qede->version,
3501                         sizeof (qede->version),
3502                         "%d.%d.%d",
3503                         MAJVERSION,
3504                         MINVERSION,
3505                         REVVERSION);
3506 
3507                 snprintf(qede->versionFW,
3508                         sizeof (qede->versionFW),
3509                         "%d.%d.%d.%d",
3510                         FW_MAJOR_VERSION,
3511                         FW_MINOR_VERSION,
3512                         FW_REVISION_VERSION,
3513                         FW_ENGINEERING_VERSION);
3514 
3515                 p_hwfn = &qede->edev.hwfns[0];
3516                 p_ptt = ecore_ptt_acquire(p_hwfn);
3517                 /*
3518                  * (test) : saving the default link_input params 
3519                  */
3520                 link_params = ecore_mcp_get_link_params(p_hwfn);
3521                 memset(&qede->link_input_params, 0, 
3522                     sizeof (qede_link_input_params_t));
3523                 memcpy(&qede->link_input_params.default_link_params, 
3524                     link_params,
3525                     sizeof (struct ecore_mcp_link_params));
3526 
3527                 p_hwfn = ECORE_LEADING_HWFN(edev);
3528                 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &qede->mfw_ver, NULL);
3529 
3530                 ecore_ptt_release(p_hwfn, p_ptt);
3531 
3532                 snprintf(qede->versionMFW,
3533                         sizeof (qede->versionMFW),
3534                         "%d.%d.%d.%d",
3535                         (qede->mfw_ver >> 24) & 0xFF,
3536                         (qede->mfw_ver >> 16) & 0xFF,
3537                         (qede->mfw_ver >> 8) & 0xFF,
3538                         qede->mfw_ver & 0xFF);   
3539 
3540                 snprintf(qede->chip_name,
3541                         sizeof (qede->chip_name),
3542                         "%s",
3543                         ECORE_IS_BB(edev) ? "BB" : "AH");
3544 
3545                 snprintf(qede->chipID,
3546                         sizeof (qede->chipID),
3547                         "0x%x",
3548                         qede->edev.chip_num);
3549 
3550                 *qede->bus_dev_func = 0;
3551                 vendor_id = 0;
3552                 device_id = 0;
3553 
3554 
3555                 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, qede->dip,
3556                                         0, "reg", &props, &num_props);
3557                 if((rc == DDI_PROP_SUCCESS) && (num_props > 0)) {
3558 
3559                 snprintf(qede->bus_dev_func,
3560                         sizeof (qede->bus_dev_func),
3561                         "%04x:%02x:%02x",
3562                         PCI_REG_BUS_G(props[0]),
3563                         PCI_REG_DEV_G(props[0]),
3564                         PCI_REG_FUNC_G(props[0]));
3565         
3566                 /* 
3567                  * This information is used 
3568                  * in the QEDE_FUNC_INFO ioctl 
3569                  */
3570                 qede->pci_func = (uint8_t) PCI_REG_FUNC_G(props[0]);
3571 
3572                 ddi_prop_free(props);
3573 
3574                 }
3575 
3576                 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, qede->dip,
3577                                         0, "vendor-id", &props, &num_props);
3578                 if((rc == DDI_PROP_SUCCESS) && (num_props > 0)) {
3579                         vendor_id = props[0];
3580                         ddi_prop_free(props);
3581                 }
3582                 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, qede->dip,
3583                                         0, "device-id", &props, &num_props);
3584                 if((rc == DDI_PROP_SUCCESS) && (num_props > 0)) {
3585                         device_id = props[0];
3586                         ddi_prop_free(props);
3587                 }
3588 
3589 
3590                 snprintf(qede->vendor_device,
3591                         sizeof (qede->vendor_device),
3592                         "%04x:%04x",
3593                         vendor_id,
3594                         device_id);
3595 
3596 
3597                 snprintf(qede->intrAlloc,
3598                         sizeof (qede->intrAlloc), "%d %s",
3599                         (qede->intr_ctx.intr_type_in_use == DDI_INTR_TYPE_FIXED)
3600                         ? 1 :
3601                         qede->intr_ctx.intr_vect_allocated,
3602                         (qede->intr_ctx.intr_type_in_use == DDI_INTR_TYPE_MSIX)
3603                         ? "MSIX" :
3604                         (qede->intr_ctx.intr_type_in_use == DDI_INTR_TYPE_MSI) 
3605                         ? "MSI"  : "Fixed");
3606 
3607                 qede_print("%s(%d): success, addr %p chip %s id %s intr %s\n",
3608                     __func__, qede->instance, qede, qede->chip_name, 
3609                     qede->vendor_device,qede->intrAlloc);
3610 
3611                 qede_print("%s(%d): version %s FW %s MFW %s\n",
3612                     __func__, qede->instance, qede->version,
3613                     qede->versionFW, qede->versionMFW);
3614 
3615                 return (DDI_SUCCESS);
3616         }
3617         }
3618 exit_with_err:
3619         cmn_err(CE_WARN, "%s:%d   failed %x\n", __func__, qede->instance, 
3620             qede->attach_resources);                 
3621         (void)qede_free_attach_resources(qede);
3622         return (DDI_FAILURE);
3623 }
3624 
3625 static int
3626 qede_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
3627 {
3628 
3629         qede_t *qede;
3630         int status;
3631         uint32_t count = 0;
3632 
3633         qede = (qede_t *)ddi_get_driver_private(dip);
3634         if ((qede == NULL) || (qede->dip != dip)) {
3635                 return (DDI_FAILURE);
3636         }
3637 
3638         switch (cmd) {
3639         default:
3640                 return (DDI_FAILURE);
3641         case DDI_SUSPEND:
3642                 mutex_enter(&qede->drv_lock);
3643                 status = qede_suspend(qede); 
3644                 if (status != DDI_SUCCESS) {
3645                         mutex_exit(&qede->drv_lock);
3646                         return (DDI_FAILURE);
3647                 }
3648 
3649                 qede->qede_state = QEDE_STATE_SUSPENDED;
3650                 mutex_exit(&qede->drv_lock);
3651                 return (DDI_SUCCESS);
3652 
3653         case DDI_DETACH:
3654                 mutex_enter(&qede->drv_lock);
3655                 if (qede->qede_state == QEDE_STATE_STARTED) {
3656                         qede->plumbed = 0;
3657                         status = qede_stop(qede);
3658                         if (status != DDI_SUCCESS) {
3659                                 qede->qede_state = QEDE_STATE_FAILED;
3660                                 mutex_exit(&qede->drv_lock);
3661                                 return (DDI_FAILURE);
3662                         }
3663                 }
3664                 mutex_exit(&qede->drv_lock);
3665                 if (qede->detach_unsafe) {
3666                         /*
3667                          * wait for rx buffers to be returned from
3668                          * upper layers
3669                          */
3670                         count = 0;
3671                         while ((qede->detach_unsafe) && (count < 100)) {
3672                                 qede_delay(100);
3673                                 count++;
3674                         }
3675                         if (qede->detach_unsafe) {
3676                                 qede_info(qede, "!%s(%d) : Buffers still with"
3677                                     " OS, failing detach\n",
3678                                     qede->name, qede->instance);
3679                                 return (DDI_FAILURE);
3680                         }
3681                 }
3682                 qede_free_attach_resources(qede);
3683                 return (DDI_SUCCESS);
3684         }
3685 }
3686 
3687 static int
3688 /* LINTED E_FUNC_ARG_UNUSED */
3689 qede_quiesce(dev_info_t *dip)
3690 {
3691         qede_t *qede = (qede_t *)ddi_get_driver_private(dip);
3692         struct ecore_dev *edev = &qede->edev;
3693         int status = DDI_SUCCESS;
3694         struct ecore_hwfn *p_hwfn;
3695         struct ecore_ptt *p_ptt = NULL;
3696 
3697         mac_link_update(qede->mac_handle, LINK_STATE_DOWN);
3698         p_hwfn = ECORE_LEADING_HWFN(edev);
3699         p_ptt = ecore_ptt_acquire(p_hwfn);
3700         if (p_ptt) {
3701                 status = ecore_start_recovery_process(p_hwfn, p_ptt);
3702                 ecore_ptt_release(p_hwfn, p_ptt);
3703                 OSAL_MSLEEP(5000);
3704         }
3705         return (status);
3706 
3707 }
3708 
3709 
3710 DDI_DEFINE_STREAM_OPS(qede_dev_ops, nulldev, nulldev, qede_attach, qede_detach,
3711     nodev, NULL, D_MP, NULL, qede_quiesce);
3712 
3713 static struct modldrv qede_modldrv =
3714 {
3715     &mod_driverops,    /* drv_modops (must be mod_driverops for drivers) */
3716     QEDE_PRODUCT_INFO, /* drv_linkinfo (string displayed by modinfo) */
3717     &qede_dev_ops      /* drv_dev_ops */
3718 };
3719 
3720 
3721 static struct modlinkage qede_modlinkage =
3722 {
3723     MODREV_1,        /* ml_rev */
3724     (&qede_modldrv), /* ml_linkage */
3725     NULL           /* NULL termination */
3726 };
3727 
3728 int 
3729 _init(void)
3730 {
3731     int rc;
3732 
3733     qede_dev_ops.devo_cb_ops->cb_str = NULL;
3734     mac_init_ops(&qede_dev_ops, "qede");
3735 
3736     /* Install module information with O/S */
3737     if ((rc = mod_install(&qede_modlinkage)) != DDI_SUCCESS) {
3738         mac_fini_ops(&qede_dev_ops);
3739         cmn_err(CE_NOTE, "mod_install failed");
3740         return (rc);
3741     }
3742 
3743     return (rc);
3744 }
3745 
3746 
3747 int 
3748 _fini(void)
3749 {
3750     int rc;
3751 
3752     if ((rc = mod_remove(&qede_modlinkage)) == DDI_SUCCESS) {
3753         mac_fini_ops(&qede_dev_ops);
3754     }
3755 
3756     return (rc);
3757 }
3758 
3759 
3760 int
3761 _info(struct modinfo * modinfop)
3762 {
3763     return (mod_info(&qede_modlinkage, modinfop));
3764 }