1 /*
   2 * CDDL HEADER START
   3 *
   4 * The contents of this file are subject to the terms of the
   5 * Common Development and Distribution License, v.1,  (the "License").
   6 * You may not use this file except in compliance with the License.
   7 *
   8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9 * or http://opensource.org/licenses/CDDL-1.0.
  10 * See the License for the specific language governing permissions
  11 * and limitations under the License.
  12 *
  13 * When distributing Covered Code, include this CDDL HEADER in each
  14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15 * If applicable, add the following below this CDDL HEADER, with the
  16 * fields enclosed by brackets "[]" replaced with your own identifying
  17 * information: Portions Copyright [yyyy] [name of copyright owner]
  18 *
  19 * CDDL HEADER END
  20 */
  21 
  22 /*
  23 * Copyright 2014-2017 Cavium, Inc. 
  24 * The contents of this file are subject to the terms of the Common Development 
  25 * and Distribution License, v.1,  (the "License").
  26 
  27 * You may not use this file except in compliance with the License.
  28 
  29 * You can obtain a copy of the License at available 
  30 * at http://opensource.org/licenses/CDDL-1.0
  31 
  32 * See the License for the specific language governing permissions and 
  33 * limitations under the License.
  34 */
  35 
  36 #include "qede.h"
  37 #include <sys/pci.h>
  38 #include <sys/pcie.h>
  39 extern ddi_dma_attr_t qede_gen_buf_dma_attr;
  40 extern struct ddi_device_acc_attr qede_desc_acc_attr;
  41 
  42 /*
  43  * Find the dma_handle corresponding to the tx, rx data structures
  44  */
  45 int
  46 qede_osal_find_dma_handle_for_block(qede_t *qede, void *addr,
  47     ddi_dma_handle_t *dma_handle)
  48 {
  49         qede_phys_mem_entry_t *entry;
  50         int ret = DDI_FAILURE;
  51 
  52         mutex_enter(&qede->phys_mem_list.lock);
  53         QEDE_LIST_FOR_EACH_ENTRY(entry,
  54             /* LINTED E_BAD_PTR_CAST_ALIGN */
  55             &qede->phys_mem_list.head,
  56             qede_phys_mem_entry_t,
  57             list_entry) {
  58                 if (entry->paddr == addr) {
  59                         *dma_handle = entry->dma_handle;
  60                         ret = DDI_SUCCESS;
  61                         break;
  62                 }
  63         }
  64 
  65         mutex_exit(&qede->phys_mem_list.lock);
  66 
  67         return (ret);
  68 }
  69 
  70 void
  71 qede_osal_dma_sync(struct ecore_dev *edev, void* addr, u32 size, bool is_post)
  72 {
  73         qede_t *qede = (qede_t *)edev;
  74         qede_phys_mem_entry_t *entry;
  75         ddi_dma_handle_t *dma_handle = NULL;
  76         uint_t type = (is_post == false) ? DDI_DMA_SYNC_FORDEV :
  77             DDI_DMA_SYNC_FORKERNEL;
  78 
  79         mutex_enter(&qede->phys_mem_list.lock);
  80 
  81         /* LINTED E_BAD_PTR_CAST_ALIGN */       
  82         QEDE_LIST_FOR_EACH_ENTRY(entry, &qede->phys_mem_list.head,
  83             qede_phys_mem_entry_t, list_entry) {
  84                 if (entry->paddr == addr) {
  85                         dma_handle = &entry->dma_handle;
  86                 }
  87         }
  88 
  89         if (dma_handle == NULL) {
  90                 qede_print_err("!%s(%d): addr %p not found in list",
  91                     __func__, qede->instance, addr);
  92                 mutex_exit(&qede->phys_mem_list.lock);
  93                 return;
  94         } else {
  95                 (void) ddi_dma_sync(*dma_handle,
  96                     0 /* offset into the mem block */,
  97                     size, type);
  98         }
  99 
 100         mutex_exit(&qede->phys_mem_list.lock);
 101 }
 102 
 103 void *
 104 qede_osal_zalloc(struct ecore_dev *edev, int flags, size_t size)
 105 {
 106         qede_t *qede = (qede_t *)edev;
 107         qede_mem_list_entry_t *new_entry;
 108         void *buf;
 109 
 110         if ((new_entry = kmem_zalloc(sizeof (qede_mem_list_entry_t), flags))
 111             == NULL) {
 112                 qede_print_err("%s(%d): Failed to alloc new list entry",
 113                     __func__, qede->instance);
 114                 return (NULL);
 115         }
 116 
 117         if ((buf = kmem_zalloc(size, flags)) == NULL) {
 118                 qede_print_err("%s(%d): Failed to alloc mem, size %d",
 119                     __func__, qede->instance, size);
 120                 kmem_free(new_entry, sizeof (qede_mem_list_entry_t));
 121                 return (NULL);
 122         }
 123 
 124         new_entry->size = size;
 125         new_entry->buf = buf;
 126 
 127         mutex_enter(&qede->mem_list.mem_list_lock);
 128         QEDE_LIST_ADD(&new_entry->mem_entry, &qede->mem_list.mem_list_head);
 129         mutex_exit(&qede->mem_list.mem_list_lock);
 130 
 131         return (buf);
 132 }
 133 
 134 
 135 void *
 136 qede_osal_alloc(struct ecore_dev *edev, int flags, size_t size)
 137 {
 138         qede_t *qede = (qede_t *)edev;
 139         qede_mem_list_entry_t *new_entry;
 140         void *buf;
 141 
 142         if ((new_entry = kmem_zalloc(sizeof (qede_mem_list_entry_t), flags))
 143             == NULL) {
 144                 qede_print_err("%s(%d): Failed to alloc new list entry",
 145                     __func__, qede->instance);
 146                 return (NULL);
 147         }
 148 
 149         if ((buf = kmem_alloc(size, flags)) == NULL) {
 150                 qede_print_err("%s(%d): Failed to alloc %d bytes",
 151                     __func__, qede->instance, size);
 152                 kmem_free(new_entry, sizeof (qede_mem_list_t));
 153                 return (NULL);
 154         }
 155 
 156         new_entry->size = size;
 157         new_entry->buf = buf;
 158 
 159         mutex_enter(&qede->mem_list.mem_list_lock);
 160         QEDE_LIST_ADD(&new_entry->mem_entry, &qede->mem_list.mem_list_head);
 161         mutex_exit(&qede->mem_list.mem_list_lock);
 162 
 163         return (buf);
 164 }
 165 
 166 void
 167 qede_osal_free(struct ecore_dev *edev, void *addr)
 168 {
 169         qede_t *qede = (qede_t *)edev;
 170         qede_mem_list_entry_t *mem_entry;
 171 
 172         mutex_enter(&qede->mem_list.mem_list_lock);
 173 
 174         /* LINTED E_BAD_PTR_CAST_ALIGN */
 175         QEDE_LIST_FOR_EACH_ENTRY(mem_entry, &qede->mem_list.mem_list_head,
 176             qede_mem_list_entry_t, mem_entry) {
 177                 if (mem_entry->buf == addr) {
 178                         QEDE_LIST_REMOVE(&mem_entry->mem_entry, 
 179                             &qede->mem_list.mem_list_head);
 180                         kmem_free(addr, mem_entry->size);
 181                         kmem_free(mem_entry, sizeof (qede_mem_list_entry_t));
 182                         break;
 183                 }
 184         }
 185 
 186         mutex_exit(&qede->mem_list.mem_list_lock);
 187 }
 188 
 189 /*
 190  * @VB: What are the alignment requirements here ??
 191  */
 192 void *
 193 qede_osal_dma_alloc_coherent(struct ecore_dev *edev, dma_addr_t *paddr, 
 194     size_t size)
 195 {
 196         qede_t *qede = (qede_t *)edev;
 197         qede_phys_mem_entry_t *new_entry;
 198         ddi_dma_handle_t *dma_handle;
 199         ddi_acc_handle_t *dma_acc_handle;
 200         ddi_dma_cookie_t cookie;
 201         int ret;
 202         caddr_t pbuf;
 203         unsigned int count;
 204 
 205         memset(&cookie, 0, sizeof (cookie));
 206 
 207         if ((new_entry = 
 208             kmem_zalloc(sizeof (qede_phys_mem_entry_t), KM_NOSLEEP)) == NULL) {
 209                 qede_print_err("%s(%d): Failed to alloc new list entry",
 210                     __func__, qede->instance);
 211                 return (NULL);
 212         }
 213 
 214         dma_handle = &new_entry->dma_handle;
 215         dma_acc_handle = &new_entry->dma_acc_handle;
 216 
 217         if ((ret = 
 218             ddi_dma_alloc_handle(qede->dip, &qede_gen_buf_dma_attr, 
 219             DDI_DMA_DONTWAIT,
 220             NULL, dma_handle)) != DDI_SUCCESS) {
 221                 qede_print_err("%s(%d): Failed to alloc dma handle",
 222                     __func__, qede->instance);
 223                 qede_stacktrace(qede);
 224                 goto free;
 225         }
 226 
 227         if ((ret = ddi_dma_mem_alloc(*dma_handle, size, &qede_desc_acc_attr,
 228             DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, &pbuf, &size, 
 229             dma_acc_handle)) != DDI_SUCCESS) {
 230                 qede_print_err("%s(%d): Failed to alloc dma mem %d bytes",
 231                     __func__, qede->instance, size);
 232                 qede_stacktrace(qede);
 233                 goto free_hdl;
 234         }
 235 
 236         if ((ret = ddi_dma_addr_bind_handle(*dma_handle, NULL, pbuf, size, 
 237             DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
 238             DDI_DMA_DONTWAIT, NULL, &cookie, &count)) != DDI_DMA_MAPPED) {
 239                 qede_print("!%s(%d): failed to bind dma addr to handle,"
 240                    " ret %d",
 241                     __func__, qede->instance, ret);
 242                 goto free_dma_mem;
 243         }
 244 
 245         if (count != 1) {
 246                 qede_print("%s(%d): ncookies = %d for phys addr %p, "
 247                     "discard dma buffer",
 248                     __func__, qede->instance, count, &cookie.dmac_laddress);
 249                 goto free_dma_mem;
 250         }
 251 
 252         new_entry->size = size;
 253         new_entry->virt_addr = pbuf;
 254 
 255         new_entry->paddr = (void *)cookie.dmac_laddress;
 256 
 257         *paddr = (dma_addr_t)new_entry->paddr;
 258 
 259         mutex_enter(&qede->phys_mem_list.lock);
 260         QEDE_LIST_ADD(&new_entry->list_entry, &qede->phys_mem_list.head);
 261         mutex_exit(&qede->phys_mem_list.lock);
 262 
 263         return (new_entry->virt_addr);
 264 
 265 free_dma_mem:
 266         ddi_dma_mem_free(dma_acc_handle);
 267 free_hdl:
 268         ddi_dma_free_handle(dma_handle);
 269 free:
 270         kmem_free(new_entry, sizeof (qede_phys_mem_entry_t));
 271         return (NULL);
 272 }
 273 
 274 void 
 275 qede_osal_dma_free_coherent(struct ecore_dev *edev, void *vaddr,
 276     dma_addr_t paddr, size_t size)
 277 {
 278         qede_t *qede = (qede_t *)edev;
 279         qede_phys_mem_entry_t *entry;
 280 
 281         mutex_enter(&qede->phys_mem_list.lock);
 282 
 283         /* LINTED E_BAD_PTR_CAST_ALIGN */
 284         QEDE_LIST_FOR_EACH_ENTRY(entry, &qede->phys_mem_list.head,
 285             qede_phys_mem_entry_t, list_entry) {
 286                 if (entry->virt_addr == vaddr) {
 287                         QEDE_LIST_REMOVE(&entry->list_entry, 
 288                             &qede->phys_mem_list.head);
 289                         ddi_dma_unbind_handle(entry->dma_handle);
 290                         ddi_dma_mem_free(&entry->dma_acc_handle);
 291                         ddi_dma_free_handle(&entry->dma_handle);
 292                         kmem_free(entry, sizeof (qede_phys_mem_entry_t));
 293                         break;
 294                 }
 295         }
 296 
 297         mutex_exit(&qede->phys_mem_list.lock);
 298 }
 299 
 300 static int 
 301 qede_get_port_type(uint32_t media_type)
 302 {
 303         uint32_t port_type;
 304 
 305         switch (media_type) {
 306         case MEDIA_SFPP_10G_FIBER:
 307         case MEDIA_SFP_1G_FIBER:
 308         case MEDIA_XFP_FIBER:
 309         case MEDIA_KR:
 310                 port_type = GLDM_FIBER;
 311                 break;
 312         case MEDIA_DA_TWINAX:
 313                 port_type = GLDM_BNC; /* Check? */
 314                 break;
 315         case MEDIA_BASE_T:
 316                 port_type = GLDM_TP;
 317                 break;
 318         case MEDIA_NOT_PRESENT:
 319         case MEDIA_UNSPECIFIED:
 320         default:
 321                 port_type = GLDM_UNKNOWN;
 322                 break;
 323         }
 324         return (port_type);
 325 }
 326 
 327 void
 328 qede_get_link_info(struct ecore_hwfn *hwfn, struct qede_link_cfg *lnkCfg)
 329 {
 330         struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
 331         qede_t *qede = (qede_t *)(void *)edev;
 332         uint32_t media_type;
 333         struct ecore_mcp_link_state lnk_state;
 334         struct ecore_mcp_link_params lnk_params;
 335         struct ecore_mcp_link_capabilities lnk_caps;
 336 
 337         ecore_mcp_get_media_type(edev, &media_type);
 338         lnkCfg->port = qede_get_port_type(media_type);
 339 
 340         memcpy(&lnk_state, ecore_mcp_get_link_state(hwfn), 
 341             sizeof (lnk_state));
 342         memcpy(&lnk_params, ecore_mcp_get_link_params(hwfn), 
 343             sizeof (lnk_params));
 344         memcpy(&lnk_caps, ecore_mcp_get_link_capabilities(hwfn), 
 345             sizeof (lnk_caps));
 346 
 347         if (lnk_state.link_up) {
 348                 lnkCfg->link_up = B_TRUE;
 349                 lnkCfg->speed = lnk_state.speed;
 350                 lnkCfg->duplex = DUPLEX_FULL;
 351         }
 352 
 353         if (lnk_params.speed.autoneg) {
 354                 lnkCfg->supp_capab.autoneg = B_TRUE;
 355                 lnkCfg->adv_capab.autoneg = B_TRUE;
 356         }
 357         if (lnk_params.speed.autoneg || 
 358                 (lnk_params.pause.forced_rx && lnk_params.pause.forced_tx)) {
 359                 lnkCfg->supp_capab.asym_pause = B_TRUE;
 360                 lnkCfg->adv_capab.asym_pause = B_TRUE;
 361         }
 362         if (lnk_params.speed.autoneg ||
 363                 lnk_params.pause.forced_rx || lnk_params.pause.forced_tx) {
 364                 lnkCfg->supp_capab.pause = B_TRUE;
 365                 lnkCfg->adv_capab.pause = B_TRUE;
 366         }
 367 
 368         if (lnk_params.speed.advertised_speeds & 
 369             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
 370                 lnkCfg->adv_capab.param_10000fdx = B_TRUE;
 371         }
 372         if(lnk_params.speed.advertised_speeds & 
 373             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) {
 374                 lnkCfg->adv_capab.param_25000fdx = B_TRUE;
 375         }
 376         if (lnk_params.speed.advertised_speeds & 
 377             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) {
 378                 lnkCfg->adv_capab.param_40000fdx = B_TRUE;
 379         }
 380         if (lnk_params.speed.advertised_speeds & 
 381             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) {
 382                 lnkCfg->adv_capab.param_50000fdx = B_TRUE;
 383         }
 384         if (lnk_params.speed.advertised_speeds & 
 385             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) {
 386                 lnkCfg->adv_capab.param_100000fdx = B_TRUE;
 387         }
 388         if (lnk_params.speed.advertised_speeds & 
 389             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
 390                 lnkCfg->adv_capab.param_1000fdx = B_TRUE;
 391                 lnkCfg->adv_capab.param_1000hdx = B_TRUE;
 392         }
 393 
 394         lnkCfg->autoneg = lnk_params.speed.autoneg;
 395 
 396         if (lnk_caps.speed_capabilities & 
 397             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
 398                 lnkCfg->supp_capab.param_10000fdx = B_TRUE;
 399         }
 400         if(lnk_caps.speed_capabilities & 
 401             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) {
 402                 lnkCfg->supp_capab.param_25000fdx = B_TRUE;
 403         }
 404         if (lnk_caps.speed_capabilities & 
 405             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) {
 406                 lnkCfg->supp_capab.param_40000fdx = B_TRUE;
 407         }
 408         if (lnk_caps.speed_capabilities & 
 409             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) {
 410                 lnkCfg->supp_capab.param_50000fdx = B_TRUE;
 411         }
 412         if (lnk_caps.speed_capabilities & 
 413             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) {
 414                 lnkCfg->supp_capab.param_100000fdx = B_TRUE;
 415         }
 416         if (lnk_caps.speed_capabilities & 
 417             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
 418                 lnkCfg->supp_capab.param_1000fdx = B_TRUE;
 419                 lnkCfg->supp_capab.param_1000hdx = B_TRUE;
 420         }
 421         
 422         if (lnk_params.pause.autoneg) {
 423                 lnkCfg->pause_cfg |= QEDE_LINK_PAUSE_AUTONEG_ENABLE;
 424         }
 425         if (lnk_params.pause.forced_rx) {
 426                 lnkCfg->pause_cfg |= QEDE_LINK_PAUSE_RX_ENABLE;
 427         }
 428         if (lnk_params.pause.forced_tx) {
 429                 lnkCfg->pause_cfg |= QEDE_LINK_PAUSE_TX_ENABLE;
 430         }
 431         
 432         
 433         if(lnk_state.partner_adv_speed &
 434                 ECORE_LINK_PARTNER_SPEED_1G_HD) {
 435                 lnkCfg->rem_capab.param_1000hdx = B_TRUE;
 436         }
 437         if(lnk_state.partner_adv_speed &
 438                 ECORE_LINK_PARTNER_SPEED_1G_FD) {
 439                 lnkCfg->rem_capab.param_1000fdx = B_TRUE;
 440         }
 441         if(lnk_state.partner_adv_speed &
 442                 ECORE_LINK_PARTNER_SPEED_10G) {
 443                 lnkCfg->rem_capab.param_10000fdx = B_TRUE;
 444         }
 445         if(lnk_state.partner_adv_speed &
 446                 ECORE_LINK_PARTNER_SPEED_40G) {
 447                 lnkCfg->rem_capab.param_40000fdx = B_TRUE;
 448         }
 449         if(lnk_state.partner_adv_speed &
 450                 ECORE_LINK_PARTNER_SPEED_50G) {
 451                 lnkCfg->rem_capab.param_50000fdx = B_TRUE;
 452         }
 453         if(lnk_state.partner_adv_speed &
 454                 ECORE_LINK_PARTNER_SPEED_100G) {
 455                 lnkCfg->rem_capab.param_100000fdx = B_TRUE;
 456         }
 457         
 458         if(lnk_state.an_complete) {
 459             lnkCfg->rem_capab.autoneg = B_TRUE;
 460         }
 461         
 462         if(lnk_state.partner_adv_pause) {
 463             lnkCfg->rem_capab.pause = B_TRUE;
 464         }
 465         if(lnk_state.partner_adv_pause == 
 466             ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE ||
 467             lnk_state.partner_adv_pause == ECORE_LINK_PARTNER_BOTH_PAUSE) {
 468             lnkCfg->rem_capab.asym_pause = B_TRUE;
 469         }
 470 }
 471 
 472 void
 473 qede_osal_link_update(struct ecore_hwfn *hwfn)
 474 {
 475         struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
 476         qede_t *qede = (qede_t *)(void *)edev;
 477         struct qede_link_cfg link_cfg;  
 478         
 479         memset(&link_cfg, 0 , sizeof (struct qede_link_cfg));       
 480         qede_get_link_info(hwfn, &link_cfg);        
 481         
 482         if (link_cfg.duplex == DUPLEX_FULL) {
 483                 qede->props.link_duplex = DUPLEX_FULL;
 484         } else {
 485                 qede->props.link_duplex = DUPLEX_HALF;
 486         }
 487 
 488         if (!link_cfg.link_up) {
 489                 qede_print("!%s(%d): Link marked down",
 490                     __func__, qede->instance);
 491                 qede->params.link_state = 0;
 492                 qede->props.link_duplex = B_FALSE;
 493                 qede->props.link_speed = 0;
 494                 qede->props.tx_pause = B_FALSE;
 495                 qede->props.rx_pause = B_FALSE;
 496                 qede->props.uptime = 0;
 497                 mac_link_update(qede->mac_handle, LINK_STATE_DOWN);
 498         } else if (link_cfg.link_up) {
 499                 qede_print("!%s(%d): Link marked up",
 500                     __func__, qede->instance);
 501                 qede->params.link_state = 1;
 502                 qede->props.link_speed = link_cfg.speed;
 503                 qede->props.link_duplex = link_cfg.duplex;
 504                 qede->props.tx_pause = (link_cfg.pause_cfg & 
 505                     QEDE_LINK_PAUSE_TX_ENABLE) ? B_TRUE : B_FALSE;
 506                 qede->props.rx_pause = (link_cfg.pause_cfg & 
 507                     QEDE_LINK_PAUSE_RX_ENABLE) ? B_TRUE : B_FALSE;
 508                 qede->props.uptime = ddi_get_time();
 509                 mac_link_update(qede->mac_handle, LINK_STATE_UP);
 510         }
 511 }
 512 
 513 unsigned long 
 514 log2_align(unsigned long n)
 515 {
 516         unsigned long ret = n ? 1 : 0;
 517         unsigned long _n  = n >> 1;
 518         
 519         while (_n) {
 520                 _n >>= 1;
 521                 ret <<= 1;
 522         }
 523 
 524         if (ret < n) {
 525                 ret <<= 1;
 526         }
 527 
 528         return (ret);
 529 }
 530 
 531 u32
 532 LOG2(u32 v)
 533 {
 534         u32 r = 0;
 535         while (v >>= 1) {
 536                 r++;
 537         }
 538         return (r);
 539 }
 540 
 541 int
 542 /* LINTED E_FUNC_ARG_UNUSED */
 543 qede_osal_pci_find_ext_capab(struct ecore_dev *edev, u16 pcie_id)
 544 {
 545         int offset = 0;
 546 
 547         return (offset);
 548 }
 549 
 550 void
 551 qede_osal_pci_write32(struct ecore_hwfn *hwfn, u32 offset, u32 val)
 552 {
 553         struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
 554         qede_t *qede = (qede_t *)(void *)edev;
 555         u64 addr = qede->pci_bar0_base;
 556 
 557         addr += offset;
 558 
 559         ddi_put32(qede->regs_handle, (u32 *)addr, val);
 560 }
 561 
 562 void
 563 qede_osal_pci_write16(struct ecore_hwfn *hwfn, u32 offset, u16 val)
 564 {
 565         struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
 566         qede_t *qede = (qede_t *)(void *)edev;
 567         u64 addr = qede->pci_bar0_base;
 568 
 569         addr += offset;
 570 
 571         ddi_put16(qede->regs_handle, (u16 *)addr, val);
 572 }
 573 
 574 u32
 575 qede_osal_pci_read32(struct ecore_hwfn *hwfn, u32 offset)
 576 {
 577         struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
 578         qede_t *qede = (qede_t *)(void *)edev;
 579         u32 val = 0;
 580         u64 addr = qede->pci_bar0_base;
 581 
 582         addr += offset;
 583 
 584         val = ddi_get32(qede->regs_handle, (u32 *)addr);
 585 
 586         return (val);
 587 }
 588 
 589 void
 590 qede_osal_pci_bar2_write32(struct ecore_hwfn *hwfn, u32 offset, u32 val)
 591 {
 592         struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
 593         qede_t *qede = (qede_t *)(void *)edev;
 594         u64 addr = qede->pci_bar2_base;
 595 
 596         addr += offset;
 597         ddi_put32(qede->doorbell_handle, (u32 *)addr, val);
 598 }
 599 
 600 u32
 601 qede_osal_direct_reg_read32(struct ecore_hwfn *hwfn, void *addr)
 602 {
 603         struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
 604         qede_t *qede = (qede_t *)(void *)edev;
 605 
 606         return (ddi_get32(qede->regs_handle, (u32 *)addr));
 607 }
 608 
 609 void
 610 qede_osal_direct_reg_write32(struct ecore_hwfn *hwfn, void *addr, u32 value)
 611 {
 612         struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
 613         qede_t *qede = (qede_t *)(void *)edev;
 614         
 615         ddi_put32(qede->regs_handle, (u32 *)addr, value);
 616 }
 617 
 618 u32 *
 619 qede_osal_reg_addr(struct ecore_hwfn *hwfn, u32 addr)
 620 {
 621         struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
 622         qede_t *qede = (qede_t *)(void *)edev;
 623 
 624         return ((u32 *)(qede->pci_bar0_base + addr));
 625 }
 626 
 627 void
 628 qede_osal_pci_read_config_byte(struct ecore_dev *edev, u32 addr, u8 *val)
 629 {
 630 
 631         qede_t *qede = (qede_t *)edev;
 632 
 633         *val = pci_config_get8(qede->pci_cfg_handle, (off_t)addr);
 634 }
 635 
 636 void
 637 qede_osal_pci_read_config_word(struct ecore_dev *edev, u32 addr, u16 *val)
 638 {
 639         qede_t *qede = (qede_t *)edev;
 640 
 641         *val = pci_config_get16(qede->pci_cfg_handle, (off_t)addr);
 642 }
 643 
 644 void
 645 qede_osal_pci_read_config_dword(struct ecore_dev *edev, u32 addr, u32 *val)
 646 {
 647         qede_t *qede = (qede_t *)edev;
 648 
 649         *val = pci_config_get32(qede->pci_cfg_handle, (off_t)addr);
 650         
 651 }
 652 
 653 #ifdef VERBOSE_DEBUG
 654 void 
 655 qede_print(char *format, ...)
 656 {
 657         va_list ap;
 658 
 659         va_start(ap, format);
 660         vcmn_err(CE_NOTE, format, ap);
 661         va_end(ap);
 662 }
 663 
 664 void 
 665 qede_print_err(char *format, ...)
 666 {
 667         va_list ap;
 668 
 669         va_start(ap, format);
 670         vcmn_err(CE_WARN, format, ap);
 671         va_end(ap);
 672 }
 673 #else
 674 /*ARGSUSED*/
 675 void
 676 qede_print(char *format, ...)
 677 {
 678 }
 679 /*ARGSUSED*/
 680 void
 681 qede_print_err(char *format, ...)
 682 {
 683 }
 684 #endif
 685 
 686 /*
 687  * Check if any mem/dma entries are left behind
 688  * after unloading the ecore. If found
 689  * then make sure they are freed
 690  */
 691 u32
 692 qede_osal_cleanup(qede_t *qede)
 693 {
 694         qede_mem_list_entry_t *entry = NULL;
 695         qede_mem_list_entry_t *temp = NULL;
 696         qede_phys_mem_entry_t *entry_phys;
 697         qede_phys_mem_entry_t *temp_phys;
 698 
 699         /* 
 700          * Check for misplaced mem. blocks(if any)
 701          */
 702         mutex_enter(&qede->mem_list.mem_list_lock);
 703 
 704         if (!QEDE_LIST_EMPTY(&qede->mem_list.mem_list_head)) {
 705                 /*
 706                  * Something went wrong either in ecore
 707                  * or the osal mem management routines
 708                  * and the mem entry was not freed
 709                  */
 710                 qede_print_err("!%s(%d): Mem entries left behind",
 711                     __func__, qede->instance);
 712 
 713                 QEDE_LIST_FOR_EACH_ENTRY_SAFE(entry,
 714                     temp,
 715                     /* LINTED E_BAD_PTR_CAST_ALIGN */
 716                     &qede->mem_list.mem_list_head,
 717                     mem_entry,
 718                     qede_mem_list_entry_t) {
 719                         qede_print("!%s(%d): Cleaning-up entry %p",
 720                             __func__, qede->instance, entry);
 721                         QEDE_LIST_REMOVE(&entry->mem_entry,
 722                             &qede->mem_list.mem_list_head);
 723                         if (entry->buf) {
 724                                 kmem_free(entry->buf, entry->size);
 725                                 kmem_free(entry,
 726                                     sizeof (qede_mem_list_entry_t));
 727                         }
 728                 }
 729         } 
 730 
 731         mutex_exit(&qede->mem_list.mem_list_lock);
 732 
 733         /*
 734          * Check for misplaced dma blocks (if any)
 735          */
 736         mutex_enter(&qede->phys_mem_list.lock);
 737         
 738         if (!QEDE_LIST_EMPTY(&qede->phys_mem_list.head)) {
 739                 qede_print("!%s(%d): Dma entries left behind",
 740                     __func__, qede->instance);
 741 
 742                 QEDE_LIST_FOR_EACH_ENTRY_SAFE(entry_phys,
 743                     temp_phys,
 744                     /* LINTED E_BAD_PTR_CAST_ALIGN */
 745                     &qede->phys_mem_list.head,
 746                     list_entry,
 747                     qede_phys_mem_entry_t) {
 748                         qede_print("!%s(%d): Cleaning-up entry %p",
 749                             __func__, qede->instance, entry_phys);
 750                         QEDE_LIST_REMOVE(&entry_phys->list_entry,
 751                             &qede->phys_mem_list.head);
 752 
 753                         if (entry_phys->virt_addr) {
 754                                 ddi_dma_unbind_handle(entry_phys->dma_handle);
 755                                 ddi_dma_mem_free(&entry_phys->dma_acc_handle);
 756                                 ddi_dma_free_handle(&entry_phys->dma_handle);
 757                                 kmem_free(entry_phys,
 758                                     sizeof (qede_phys_mem_entry_t));
 759                         }
 760                 }
 761         }
 762 
 763         mutex_exit(&qede->phys_mem_list.lock);
 764 
 765         return (0);
 766 }
 767 
 768 
 769 void
 770 qede_osal_recovery_handler(struct ecore_hwfn *hwfn)
 771 {
 772         struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
 773         qede_t *qede = (qede_t *)(void *)edev;
 774 
 775          cmn_err(CE_WARN, "!%s(%d):Not implemented !",
 776             __func__, qede->instance);
 777 
 778 }
 779 
 780 
 781 enum _ecore_status_t 
 782 qede_osal_iov_vf_acquire(struct ecore_hwfn *p_hwfn, int vf_id)
 783 {
 784         return (ECORE_SUCCESS);
 785 }
 786 
 787 
 788 void 
 789 qede_osal_pci_write_config_word(struct ecore_dev *dev, u32 addr, u16 pcie_id)
 790 {
 791         qede_t *qede = (qede_t *)dev;
 792         ddi_acc_handle_t pci_cfg_handle = qede->pci_cfg_handle;
 793         
 794         pci_config_put16(pci_cfg_handle, (off_t)addr, pcie_id);
 795 }
 796 
 797 void * 
 798 qede_osal_valloc(struct ecore_dev *dev, u32 size)
 799 {
 800         void *ptr = 0;
 801 
 802         return (ptr);
 803 }
 804 
 805 void 
 806 qede_osal_vfree(struct ecore_dev *dev, void* mem)
 807 {
 808 }
 809 
 810 int 
 811 /* LINTED E_FUNC_ARG_UNUSED */
 812 qede_osal_pci_find_capability(struct ecore_dev *dev, u16 pcie_id)
 813 {
 814         return 1;
 815 }
 816 
 817 void 
 818 qede_osal_poll_mode_dpc(struct ecore_hwfn *p_hwfn)
 819 {
 820 }
 821 
 822 int 
 823 /* LINTED E_FUNC_ARG_UNUSED */
 824 qede_osal_bitmap_weight(unsigned long *bitmap, uint32_t nbits)
 825 {
 826         uint32_t count = 0, temp = *bitmap;
 827         return count;
 828 }
 829 
 830 void 
 831 /* LINTED E_FUNC_ARG_UNUSED */
 832 qede_osal_mfw_tlv_req(struct ecore_hwfn *p_hwfn)
 833 {
 834 }
 835 
 836 u32 
 837 /* LINTED E_FUNC_ARG_UNUSED */
 838 qede_osal_crc32(u32 crc, u8 *buf, u64 length)
 839 {
 840         return 1;
 841 }
 842 
 843 void 
 844 /* LINTED E_FUNC_ARG_UNUSED */
 845 qede_osal_hw_info_change(struct ecore_hwfn *p_hwfn, int change)
 846 {
 847 }
 848 
 849 void 
 850 /* LINTED E_FUNC_ARG_UNUSED */
 851 OSAL_CRC8_POPULATE(u8 * cdu_crc8_table, u8 polynomial)
 852 {
 853 }
 854 u8 
 855 /* LINTED E_FUNC_ARG_UNUSED */
 856 OSAL_CRC8(u8 * cdu_crc8_table, u8 * data_to_crc, int data_to_crc_len, 
 857     u8 init_value)
 858 {
 859         return (0); 
 860 }
 861 void 
 862 /* LINTED E_FUNC_ARG_UNUSED */
 863 OSAL_DPC_SYNC(struct ecore_hwfn *p_hwfn)
 864 {
 865         //Do nothing right now.
 866 }