1 /*
   2  * This file and its contents are supplied under the terms of the
   3  * Common Development and Distribution License ("CDDL"), version 1.0.
   4  * You may only use this file in accordance with the terms of version
   5  * 1.0 of the CDDL.
   6  *
   7  * A full copy of the text of the CDDL should have accompanied this
   8  * source.  A copy of the CDDL is also available via the Internet at
   9  * http://www.illumos.org/license/CDDL.
  10  */
  11 
  12 /*
  13  * Copyright 2013 Nexenta Systems, Inc.  All rights reserved.
  14  * Copyright 2012 Alexey Zaytsev <alexey.zaytsev@gmail.com>
  15  * Copyright (c) 2016 by Delphix. All rights reserved.
  16  * Copyright 2017 Joyent, Inc.
  17  */
  18 
  19 /* Based on the NetBSD virtio driver by Minoura Makoto. */
  20 /*
  21  * Copyright (c) 2010 Minoura Makoto.
  22  * All rights reserved.
  23  *
  24  * Redistribution and use in source and binary forms, with or without
  25  * modification, are permitted provided that the following conditions
  26  * are met:
  27  * 1. Redistributions of source code must retain the above copyright
  28  *    notice, this list of conditions and the following disclaimer.
  29  * 2. Redistributions in binary form must reproduce the above copyright
  30  *    notice, this list of conditions and the following disclaimer in the
  31  *    documentation and/or other materials provided with the distribution.
  32  *
  33  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  34  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  35  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  36  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  37  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  38  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  39  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  40  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  41  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  42  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  43  *
  44  */
  45 
  46 #include <sys/conf.h>
  47 #include <sys/kmem.h>
  48 #include <sys/debug.h>
  49 #include <sys/modctl.h>
  50 #include <sys/autoconf.h>
  51 #include <sys/ddi_impldefs.h>
  52 #include <sys/ddi.h>
  53 #include <sys/sunddi.h>
  54 #include <sys/sunndi.h>
  55 #include <sys/avintr.h>
  56 #include <sys/spl.h>
  57 #include <sys/promif.h>
  58 #include <sys/list.h>
  59 #include <sys/bootconf.h>
  60 #include <sys/bootsvcs.h>
  61 #include <sys/sysmacros.h>
  62 #include <sys/pci.h>
  63 
  64 #include "virtiovar.h"
  65 #include "virtioreg.h"
  66 
  67 #define NDEVNAMES       (sizeof (virtio_device_name) / sizeof (char *))
  68 #define MINSEG_INDIRECT 2       /* use indirect if nsegs >= this value */
  69 #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1)) & \
  70             ~(VIRTIO_PAGE_SIZE-1))
  71 
  72 void
  73 virtio_set_status(struct virtio_softc *sc, unsigned int status)
  74 {
  75         int old = 0;
  76 
  77         if (status != 0) {
  78                 old = ddi_get8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
  79                     VIRTIO_CONFIG_DEVICE_STATUS));
  80         }
  81 
  82         ddi_put8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
  83             VIRTIO_CONFIG_DEVICE_STATUS), status | old);
  84 }
  85 
  86 /*
  87  * Negotiate features, save the result in sc->sc_features
  88  */
  89 uint32_t
  90 virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
  91 {
  92         uint32_t host_features;
  93         uint32_t features;
  94 
  95         host_features = ddi_get32(sc->sc_ioh,
  96             /* LINTED E_BAD_PTR_CAST_ALIGN */
  97             (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_FEATURES));
  98 
  99         dev_debug(sc->sc_dev, CE_NOTE, "host features: %x, guest features: %x",
 100             host_features, guest_features);
 101 
 102         features = host_features & guest_features;
 103         ddi_put32(sc->sc_ioh,
 104             /* LINTED E_BAD_PTR_CAST_ALIGN */
 105             (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_GUEST_FEATURES),
 106             features);
 107 
 108         sc->sc_features = features;
 109 
 110         return (host_features);
 111 }
 112 
 113 size_t
 114 virtio_show_features(uint32_t features, char *buf, size_t len)
 115 {
 116         char *orig_buf = buf;
 117         char *bufend = buf + len;
 118 
 119         /* LINTED E_PTRDIFF_OVERFLOW */
 120         buf += snprintf(buf, bufend - buf, "Generic ( ");
 121         if (features & VIRTIO_F_RING_INDIRECT_DESC)
 122                 /* LINTED E_PTRDIFF_OVERFLOW */
 123                 buf += snprintf(buf, bufend - buf, "INDIRECT_DESC ");
 124 
 125         /* LINTED E_PTRDIFF_OVERFLOW */
 126         buf += snprintf(buf, bufend - buf, ") ");
 127 
 128         /* LINTED E_PTRDIFF_OVERFLOW */
 129         return (buf - orig_buf);
 130 }
 131 
 132 boolean_t
 133 virtio_has_feature(struct virtio_softc *sc, uint32_t feature)
 134 {
 135         return (sc->sc_features & feature);
 136 }
 137 
 138 /*
 139  * Device configuration registers.
 140  */
 141 uint8_t
 142 virtio_read_device_config_1(struct virtio_softc *sc, unsigned int index)
 143 {
 144         ASSERT(sc->sc_config_offset);
 145         return ddi_get8(sc->sc_ioh,
 146             (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 147 }
 148 
 149 uint16_t
 150 virtio_read_device_config_2(struct virtio_softc *sc, unsigned int index)
 151 {
 152         ASSERT(sc->sc_config_offset);
 153         return ddi_get16(sc->sc_ioh,
 154             /* LINTED E_BAD_PTR_CAST_ALIGN */
 155             (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 156 }
 157 
 158 uint32_t
 159 virtio_read_device_config_4(struct virtio_softc *sc, unsigned int index)
 160 {
 161         ASSERT(sc->sc_config_offset);
 162         return ddi_get32(sc->sc_ioh,
 163             /* LINTED E_BAD_PTR_CAST_ALIGN */
 164             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 165 }
 166 
 167 uint64_t
 168 virtio_read_device_config_8(struct virtio_softc *sc, unsigned int index)
 169 {
 170         uint64_t r;
 171 
 172         ASSERT(sc->sc_config_offset);
 173         r = ddi_get32(sc->sc_ioh,
 174             /* LINTED E_BAD_PTR_CAST_ALIGN */
 175             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
 176             index + sizeof (uint32_t)));
 177 
 178         r <<= 32;
 179 
 180         r += ddi_get32(sc->sc_ioh,
 181             /* LINTED E_BAD_PTR_CAST_ALIGN */
 182             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 183         return (r);
 184 }
 185 
 186 void
 187 virtio_write_device_config_1(struct virtio_softc *sc, unsigned int index,
 188     uint8_t value)
 189 {
 190         ASSERT(sc->sc_config_offset);
 191         ddi_put8(sc->sc_ioh,
 192             (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 193 }
 194 
 195 void
 196 virtio_write_device_config_2(struct virtio_softc *sc, unsigned int index,
 197     uint16_t value)
 198 {
 199         ASSERT(sc->sc_config_offset);
 200         ddi_put16(sc->sc_ioh,
 201             /* LINTED E_BAD_PTR_CAST_ALIGN */
 202             (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 203 }
 204 
 205 void
 206 virtio_write_device_config_4(struct virtio_softc *sc, unsigned int index,
 207     uint32_t value)
 208 {
 209         ASSERT(sc->sc_config_offset);
 210         ddi_put32(sc->sc_ioh,
 211             /* LINTED E_BAD_PTR_CAST_ALIGN */
 212             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 213 }
 214 
 215 void
 216 virtio_write_device_config_8(struct virtio_softc *sc, unsigned int index,
 217     uint64_t value)
 218 {
 219         ASSERT(sc->sc_config_offset);
 220         ddi_put32(sc->sc_ioh,
 221             /* LINTED E_BAD_PTR_CAST_ALIGN */
 222             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index),
 223             value & 0xFFFFFFFF);
 224         ddi_put32(sc->sc_ioh,
 225             /* LINTED E_BAD_PTR_CAST_ALIGN */
 226             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
 227             index + sizeof (uint32_t)), value >> 32);
 228 }
 229 
 230 /*
 231  * Start/stop vq interrupt.  No guarantee.
 232  */
 233 void
 234 virtio_stop_vq_intr(struct virtqueue *vq)
 235 {
 236         vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
 237 }
 238 
 239 void
 240 virtio_start_vq_intr(struct virtqueue *vq)
 241 {
 242         vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
 243 }
 244 
 245 static ddi_dma_attr_t virtio_vq_dma_attr = {
 246         DMA_ATTR_V0,            /* Version number */
 247         0,                      /* low address */
 248         0x00000FFFFFFFFFFF,     /* high address. Has to fit into 32 bits */
 249                                 /* after page-shifting */
 250         0xFFFFFFFF,             /* counter register max */
 251         VIRTIO_PAGE_SIZE,       /* page alignment required */
 252         0x3F,                   /* burst sizes: 1 - 32 */
 253         0x1,                    /* minimum transfer size */
 254         0xFFFFFFFF,             /* max transfer size */
 255         0xFFFFFFFF,             /* address register max */
 256         1,                      /* no scatter-gather */
 257         1,                      /* device operates on bytes */
 258         0,                      /* attr flag: set to 0 */
 259 };
 260 
 261 static ddi_dma_attr_t virtio_vq_indirect_dma_attr = {
 262         DMA_ATTR_V0,            /* Version number */
 263         0,                      /* low address */
 264         0xFFFFFFFFFFFFFFFF,     /* high address */
 265         0xFFFFFFFF,             /* counter register max */
 266         1,                      /* No specific alignment */
 267         0x3F,                   /* burst sizes: 1 - 32 */
 268         0x1,                    /* minimum transfer size */
 269         0xFFFFFFFF,             /* max transfer size */
 270         0xFFFFFFFF,             /* address register max */
 271         1,                      /* no scatter-gather */
 272         1,                      /* device operates on bytes */
 273         0,                      /* attr flag: set to 0 */
 274 };
 275 
 276 /* Same for direct and indirect descriptors. */
 277 static ddi_device_acc_attr_t virtio_vq_devattr = {
 278         DDI_DEVICE_ATTR_V0,
 279         DDI_NEVERSWAP_ACC,
 280         DDI_STORECACHING_OK_ACC,
 281         DDI_DEFAULT_ACC
 282 };
 283 
 284 static void
 285 virtio_free_indirect(struct vq_entry *entry)
 286 {
 287 
 288         (void) ddi_dma_unbind_handle(entry->qe_indirect_dma_handle);
 289         ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
 290         ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
 291 
 292         entry->qe_indirect_descs = NULL;
 293 }
 294 
 295 
 296 static int
 297 virtio_alloc_indirect(struct virtio_softc *sc, struct vq_entry *entry)
 298 {
 299         int allocsize, num;
 300         size_t len;
 301         unsigned int ncookies;
 302         int ret;
 303 
 304         num = entry->qe_queue->vq_indirect_num;
 305         ASSERT(num > 1);
 306 
 307         allocsize = sizeof (struct vring_desc) * num;
 308 
 309         ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_indirect_dma_attr,
 310             DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_handle);
 311         if (ret != DDI_SUCCESS) {
 312                 dev_err(sc->sc_dev, CE_WARN,
 313                     "Failed to allocate dma handle for indirect descriptors, "
 314                     "entry %d, vq %d", entry->qe_index,
 315                     entry->qe_queue->vq_index);
 316                 goto out_alloc_handle;
 317         }
 318 
 319         ret = ddi_dma_mem_alloc(entry->qe_indirect_dma_handle, allocsize,
 320             &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 321             (caddr_t *)&entry->qe_indirect_descs, &len,
 322             &entry->qe_indirect_dma_acch);
 323         if (ret != DDI_SUCCESS) {
 324                 dev_err(sc->sc_dev, CE_WARN,
 325                     "Failed to allocate dma memory for indirect descriptors, "
 326                     "entry %d, vq %d,", entry->qe_index,
 327                     entry->qe_queue->vq_index);
 328                 goto out_alloc;
 329         }
 330 
 331         (void) memset(entry->qe_indirect_descs, 0xff, allocsize);
 332 
 333         ret = ddi_dma_addr_bind_handle(entry->qe_indirect_dma_handle, NULL,
 334             (caddr_t)entry->qe_indirect_descs, len,
 335             DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 336             &entry->qe_indirect_dma_cookie, &ncookies);
 337         if (ret != DDI_DMA_MAPPED) {
 338                 dev_err(sc->sc_dev, CE_WARN,
 339                     "Failed to bind dma memory for indirect descriptors, "
 340                     "entry %d, vq %d", entry->qe_index,
 341                     entry->qe_queue->vq_index);
 342                 goto out_bind;
 343         }
 344 
 345         /* We asked for a single segment */
 346         ASSERT(ncookies == 1);
 347 
 348         return (0);
 349 
 350 out_bind:
 351         ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
 352 out_alloc:
 353         ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
 354 out_alloc_handle:
 355 
 356         return (ret);
 357 }
 358 
 359 /*
 360  * Initialize the vq structure.
 361  */
 362 static int
 363 virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq)
 364 {
 365         int ret;
 366         uint16_t i;
 367         int vq_size = vq->vq_num;
 368         int indirect_num = vq->vq_indirect_num;
 369 
 370         /* free slot management */
 371         list_create(&vq->vq_freelist, sizeof (struct vq_entry),
 372             offsetof(struct vq_entry, qe_list));
 373 
 374         for (i = 0; i < vq_size; i++) {
 375                 struct vq_entry *entry = &vq->vq_entries[i];
 376                 list_insert_tail(&vq->vq_freelist, entry);
 377                 entry->qe_index = i;
 378                 entry->qe_desc = &vq->vq_descs[i];
 379                 entry->qe_queue = vq;
 380 
 381                 if (indirect_num) {
 382                         ret = virtio_alloc_indirect(sc, entry);
 383                         if (ret)
 384                                 goto out_indirect;
 385                 }
 386         }
 387 
 388         mutex_init(&vq->vq_freelist_lock, "virtio-freelist", MUTEX_DRIVER,
 389             DDI_INTR_PRI(sc->sc_intr_prio));
 390         mutex_init(&vq->vq_avail_lock, "virtio-avail", MUTEX_DRIVER,
 391             DDI_INTR_PRI(sc->sc_intr_prio));
 392         mutex_init(&vq->vq_used_lock, "virtio-used", MUTEX_DRIVER,
 393             DDI_INTR_PRI(sc->sc_intr_prio));
 394 
 395         return (0);
 396 
 397 out_indirect:
 398         for (i = 0; i < vq_size; i++) {
 399                 struct vq_entry *entry = &vq->vq_entries[i];
 400                 if (entry->qe_indirect_descs)
 401                         virtio_free_indirect(entry);
 402         }
 403 
 404         return (ret);
 405 }
 406 
 407 /*
 408  * Allocate/free a vq.
 409  */
 410 struct virtqueue *
 411 virtio_alloc_vq(struct virtio_softc *sc, unsigned int index, unsigned int size,
 412     unsigned int indirect_num, const char *name)
 413 {
 414         int vq_size, allocsize1, allocsize2, allocsize = 0;
 415         int ret;
 416         unsigned int ncookies;
 417         size_t len;
 418         struct virtqueue *vq;
 419 
 420         ddi_put16(sc->sc_ioh,
 421             /* LINTED E_BAD_PTR_CAST_ALIGN */
 422             (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
 423         vq_size = ddi_get16(sc->sc_ioh,
 424             /* LINTED E_BAD_PTR_CAST_ALIGN */
 425             (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
 426         if (vq_size == 0) {
 427                 dev_err(sc->sc_dev, CE_WARN,
 428                     "virtqueue dest not exist, index %d for %s\n", index, name);
 429                 goto out;
 430         }
 431 
 432         vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);
 433 
 434         /* size 0 => use native vq size, good for receive queues. */
 435         if (size)
 436                 vq_size = MIN(vq_size, size);
 437 
 438         /* allocsize1: descriptor table + avail ring + pad */
 439         allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size +
 440             sizeof (struct vring_avail) + sizeof (uint16_t) * vq_size);
 441         /* allocsize2: used ring + pad */
 442         allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used) +
 443             sizeof (struct vring_used_elem) * vq_size);
 444 
 445         allocsize = allocsize1 + allocsize2;
 446 
 447         ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
 448             DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
 449         if (ret != DDI_SUCCESS) {
 450                 dev_err(sc->sc_dev, CE_WARN,
 451                     "Failed to allocate dma handle for vq %d", index);
 452                 goto out_alloc_handle;
 453         }
 454 
 455         ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
 456             &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 457             (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
 458         if (ret != DDI_SUCCESS) {
 459                 dev_err(sc->sc_dev, CE_WARN,
 460                     "Failed to allocate dma memory for vq %d", index);
 461                 goto out_alloc;
 462         }
 463 
 464         ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
 465             (caddr_t)vq->vq_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
 466             DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
 467         if (ret != DDI_DMA_MAPPED) {
 468                 dev_err(sc->sc_dev, CE_WARN,
 469                     "Failed to bind dma memory for vq %d", index);
 470                 goto out_bind;
 471         }
 472 
 473         /* We asked for a single segment */
 474         ASSERT(ncookies == 1);
 475         /* and page-ligned buffers. */
 476         ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0);
 477 
 478         (void) memset(vq->vq_vaddr, 0, allocsize);
 479 
 480         /* Make sure all zeros hit the buffer before we point the host to it */
 481         membar_producer();
 482 
 483         /* set the vq address */
 484         ddi_put32(sc->sc_ioh,
 485             /* LINTED E_BAD_PTR_CAST_ALIGN */
 486             (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS),
 487             (vq->vq_dma_cookie.dmac_laddress / VIRTIO_PAGE_SIZE));
 488 
 489         /* remember addresses and offsets for later use */
 490         vq->vq_owner = sc;
 491         vq->vq_num = vq_size;
 492         vq->vq_index = index;
 493         vq->vq_descs = vq->vq_vaddr;
 494         vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
 495         vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
 496         vq->vq_usedoffset = allocsize1;
 497         vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);
 498 
 499         ASSERT(indirect_num == 0 ||
 500             virtio_has_feature(sc, VIRTIO_F_RING_INDIRECT_DESC));
 501         vq->vq_indirect_num = indirect_num;
 502 
 503         /* free slot management */
 504         vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
 505             KM_SLEEP);
 506 
 507         ret = virtio_init_vq(sc, vq);
 508         if (ret)
 509                 goto out_init;
 510 
 511         dev_debug(sc->sc_dev, CE_NOTE,
 512             "Allocated %d entries for vq %d:%s (%d indirect descs)",
 513             vq_size, index, name, indirect_num * vq_size);
 514 
 515         return (vq);
 516 
 517 out_init:
 518         kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
 519         (void) ddi_dma_unbind_handle(vq->vq_dma_handle);
 520 out_bind:
 521         ddi_dma_mem_free(&vq->vq_dma_acch);
 522 out_alloc:
 523         ddi_dma_free_handle(&vq->vq_dma_handle);
 524 out_alloc_handle:
 525         kmem_free(vq, sizeof (struct virtqueue));
 526 out:
 527         return (NULL);
 528 }
 529 
 530 void
 531 virtio_free_vq(struct virtqueue *vq)
 532 {
 533         struct virtio_softc *sc = vq->vq_owner;
 534         int i;
 535 
 536         /* tell device that there's no virtqueue any longer */
 537         ddi_put16(sc->sc_ioh,
 538             /* LINTED E_BAD_PTR_CAST_ALIGN */
 539             (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT),
 540             vq->vq_index);
 541         ddi_put32(sc->sc_ioh,
 542             /* LINTED E_BAD_PTR_CAST_ALIGN */
 543             (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS), 0);
 544 
 545         /* Free the indirect descriptors, if any. */
 546         for (i = 0; i < vq->vq_num; i++) {
 547                 struct vq_entry *entry = &vq->vq_entries[i];
 548                 if (entry->qe_indirect_descs)
 549                         virtio_free_indirect(entry);
 550         }
 551 
 552         kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq->vq_num);
 553 
 554         (void) ddi_dma_unbind_handle(vq->vq_dma_handle);
 555         ddi_dma_mem_free(&vq->vq_dma_acch);
 556         ddi_dma_free_handle(&vq->vq_dma_handle);
 557 
 558         mutex_destroy(&vq->vq_used_lock);
 559         mutex_destroy(&vq->vq_avail_lock);
 560         mutex_destroy(&vq->vq_freelist_lock);
 561 
 562         kmem_free(vq, sizeof (struct virtqueue));
 563 }
 564 
 565 /*
 566  * Free descriptor management.
 567  */
 568 struct vq_entry *
 569 vq_alloc_entry(struct virtqueue *vq)
 570 {
 571         struct vq_entry *qe;
 572 
 573         mutex_enter(&vq->vq_freelist_lock);
 574         if (list_is_empty(&vq->vq_freelist)) {
 575                 mutex_exit(&vq->vq_freelist_lock);
 576                 return (NULL);
 577         }
 578         qe = list_remove_head(&vq->vq_freelist);
 579 
 580         ASSERT(vq->vq_used_entries >= 0);
 581         vq->vq_used_entries++;
 582 
 583         mutex_exit(&vq->vq_freelist_lock);
 584 
 585         qe->qe_next = NULL;
 586         qe->qe_indirect_next = 0;
 587         (void) memset(qe->qe_desc, 0, sizeof (struct vring_desc));
 588 
 589         return (qe);
 590 }
 591 
 592 void
 593 vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
 594 {
 595         mutex_enter(&vq->vq_freelist_lock);
 596 
 597         list_insert_head(&vq->vq_freelist, qe);
 598         vq->vq_used_entries--;
 599         ASSERT(vq->vq_used_entries >= 0);
 600         mutex_exit(&vq->vq_freelist_lock);
 601 }
 602 
 603 /*
 604  * We (intentionally) don't have a global vq mutex, so you are
 605  * responsible for external locking to avoid allocting/freeing any
 606  * entries before using the returned value. Have fun.
 607  */
 608 uint_t
 609 vq_num_used(struct virtqueue *vq)
 610 {
 611         /* vq->vq_freelist_lock would not help here. */
 612         return (vq->vq_used_entries);
 613 }
 614 
 615 static inline void
 616 virtio_ve_set_desc(struct vring_desc *desc, uint64_t paddr, uint32_t len,
 617     boolean_t write)
 618 {
 619         desc->addr = paddr;
 620         desc->len = len;
 621         desc->next = 0;
 622         desc->flags = 0;
 623 
 624         /* 'write' - from the driver's point of view */
 625         if (!write)
 626                 desc->flags = VRING_DESC_F_WRITE;
 627 }
 628 
 629 void
 630 virtio_ve_set(struct vq_entry *qe, uint64_t paddr, uint32_t len,
 631     boolean_t write)
 632 {
 633         virtio_ve_set_desc(qe->qe_desc, paddr, len, write);
 634 }
 635 
 636 unsigned int
 637 virtio_ve_indirect_available(struct vq_entry *qe)
 638 {
 639         return (qe->qe_queue->vq_indirect_num - qe->qe_indirect_next);
 640 }
 641 
 642 void
 643 virtio_ve_add_indirect_buf(struct vq_entry *qe, uint64_t paddr, uint32_t len,
 644     boolean_t write)
 645 {
 646         struct vring_desc *indirect_desc;
 647 
 648         ASSERT(qe->qe_queue->vq_indirect_num);
 649         ASSERT(qe->qe_indirect_next < qe->qe_queue->vq_indirect_num);
 650 
 651         indirect_desc = &qe->qe_indirect_descs[qe->qe_indirect_next];
 652         virtio_ve_set_desc(indirect_desc, paddr, len, write);
 653         qe->qe_indirect_next++;
 654 }
 655 
 656 void
 657 virtio_ve_add_cookie(struct vq_entry *qe, ddi_dma_handle_t dma_handle,
 658     ddi_dma_cookie_t dma_cookie, unsigned int ncookies, boolean_t write)
 659 {
 660         int i;
 661 
 662         for (i = 0; i < ncookies; i++) {
 663                 virtio_ve_add_indirect_buf(qe, dma_cookie.dmac_laddress,
 664                     dma_cookie.dmac_size, write);
 665                 ddi_dma_nextcookie(dma_handle, &dma_cookie);
 666         }
 667 }
 668 
 669 void
 670 virtio_sync_vq(struct virtqueue *vq)
 671 {
 672         struct virtio_softc *vsc = vq->vq_owner;
 673 
 674         /* Make sure the avail ring update hit the buffer */
 675         membar_producer();
 676 
 677         vq->vq_avail->idx = vq->vq_avail_idx;
 678 
 679         /* Make sure the avail idx update hits the buffer */
 680         membar_producer();
 681 
 682         /* Make sure we see the flags update */
 683         membar_consumer();
 684 
 685         if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY)) {
 686                 ddi_put16(vsc->sc_ioh,
 687                     /* LINTED E_BAD_PTR_CAST_ALIGN */
 688                     (uint16_t *)(vsc->sc_io_addr +
 689                     VIRTIO_CONFIG_QUEUE_NOTIFY),
 690                     vq->vq_index);
 691         }
 692 }
 693 
 694 void
 695 virtio_push_chain(struct vq_entry *qe, boolean_t sync)
 696 {
 697         struct virtqueue *vq = qe->qe_queue;
 698         struct vq_entry *head = qe;
 699         struct vring_desc *desc;
 700         int idx;
 701 
 702         ASSERT(qe);
 703 
 704         /*
 705          * Bind the descs together, paddr and len should be already
 706          * set with virtio_ve_set
 707          */
 708         do {
 709                 /* Bind the indirect descriptors */
 710                 if (qe->qe_indirect_next > 1) {
 711                         uint16_t i = 0;
 712 
 713                         /*
 714                          * Set the pointer/flags to the
 715                          * first indirect descriptor
 716                          */
 717                         virtio_ve_set_desc(qe->qe_desc,
 718                             qe->qe_indirect_dma_cookie.dmac_laddress,
 719                             sizeof (struct vring_desc) * qe->qe_indirect_next,
 720                             B_FALSE);
 721                         qe->qe_desc->flags |= VRING_DESC_F_INDIRECT;
 722 
 723                         /* For all but the last one, add the next index/flag */
 724                         do {
 725                                 desc = &qe->qe_indirect_descs[i];
 726                                 i++;
 727 
 728                                 desc->flags |= VRING_DESC_F_NEXT;
 729                                 desc->next = i;
 730                         } while (i < qe->qe_indirect_next - 1);
 731 
 732                 }
 733 
 734                 if (qe->qe_next) {
 735                         qe->qe_desc->flags |= VRING_DESC_F_NEXT;
 736                         qe->qe_desc->next = qe->qe_next->qe_index;
 737                 }
 738 
 739                 qe = qe->qe_next;
 740         } while (qe);
 741 
 742         mutex_enter(&vq->vq_avail_lock);
 743         idx = vq->vq_avail_idx;
 744         vq->vq_avail_idx++;
 745 
 746         /* Make sure the bits hit the descriptor(s) */
 747         membar_producer();
 748         vq->vq_avail->ring[idx % vq->vq_num] = head->qe_index;
 749 
 750         /* Notify the device, if needed. */
 751         if (sync)
 752                 virtio_sync_vq(vq);
 753 
 754         mutex_exit(&vq->vq_avail_lock);
 755 }
 756 
 757 /*
 758  * Get a chain of descriptors from the used ring, if one is available.
 759  */
 760 struct vq_entry *
 761 virtio_pull_chain(struct virtqueue *vq, uint32_t *len)
 762 {
 763         struct vq_entry *head;
 764         int slot;
 765         int usedidx;
 766 
 767         mutex_enter(&vq->vq_used_lock);
 768 
 769         /* No used entries? Bye. */
 770         if (vq->vq_used_idx == vq->vq_used->idx) {
 771                 mutex_exit(&vq->vq_used_lock);
 772                 return (NULL);
 773         }
 774 
 775         usedidx = vq->vq_used_idx;
 776         vq->vq_used_idx++;
 777         mutex_exit(&vq->vq_used_lock);
 778 
 779         usedidx %= vq->vq_num;
 780 
 781         /* Make sure we do the next step _after_ checking the idx. */
 782         membar_consumer();
 783 
 784         slot = vq->vq_used->ring[usedidx].id;
 785         *len = vq->vq_used->ring[usedidx].len;
 786 
 787         head = &vq->vq_entries[slot];
 788 
 789         return (head);
 790 }
 791 
 792 void
 793 virtio_free_chain(struct vq_entry *qe)
 794 {
 795         struct vq_entry *tmp;
 796         struct virtqueue *vq = qe->qe_queue;
 797 
 798         ASSERT(qe);
 799 
 800         do {
 801                 ASSERT(qe->qe_queue == vq);
 802                 tmp = qe->qe_next;
 803                 vq_free_entry(vq, qe);
 804                 qe = tmp;
 805         } while (tmp != NULL);
 806 }
 807 
 808 void
 809 virtio_ventry_stick(struct vq_entry *first, struct vq_entry *second)
 810 {
 811         first->qe_next = second;
 812 }
 813 
 814 static int
 815 virtio_register_msi(struct virtio_softc *sc,
 816     struct virtio_int_handler *config_handler,
 817     struct virtio_int_handler vq_handlers[], int intr_types)
 818 {
 819         int count, actual;
 820         int int_type;
 821         int i;
 822         int handler_count;
 823         int ret;
 824 
 825         /* If both MSI and MSI-x are reported, prefer MSI-x. */
 826         int_type = DDI_INTR_TYPE_MSI;
 827         if (intr_types & DDI_INTR_TYPE_MSIX)
 828                 int_type = DDI_INTR_TYPE_MSIX;
 829 
 830         /* Walk the handler table to get the number of handlers. */
 831         for (handler_count = 0;
 832             vq_handlers && vq_handlers[handler_count].vh_func;
 833             handler_count++)
 834                 ;
 835 
 836         /* +1 if there is a config change handler. */
 837         if (config_handler != NULL)
 838                 handler_count++;
 839 
 840         /* Number of MSIs supported by the device. */
 841         ret = ddi_intr_get_nintrs(sc->sc_dev, int_type, &count);
 842         if (ret != DDI_SUCCESS) {
 843                 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_nintrs failed");
 844                 return (ret);
 845         }
 846 
 847         /*
 848          * Those who try to register more handlers then the device
 849          * supports shall suffer.
 850          */
 851         ASSERT(handler_count <= count);
 852 
 853         sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t) *
 854             handler_count, KM_SLEEP);
 855 
 856         ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable, int_type, 0,
 857             handler_count, &actual, DDI_INTR_ALLOC_NORMAL);
 858         if (ret != DDI_SUCCESS) {
 859                 dev_err(sc->sc_dev, CE_WARN, "Failed to allocate MSI: %d", ret);
 860                 goto out_msi_alloc;
 861         }
 862 
 863         if (actual != handler_count) {
 864                 dev_err(sc->sc_dev, CE_WARN,
 865                     "Not enough MSI available: need %d, available %d",
 866                     handler_count, actual);
 867                 goto out_msi_available;
 868         }
 869 
 870         sc->sc_intr_num = handler_count;
 871         sc->sc_intr_config = B_FALSE;
 872         if (config_handler != NULL) {
 873                 sc->sc_intr_config = B_TRUE;
 874         }
 875 
 876         /* Assume they are all same priority */
 877         ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
 878         if (ret != DDI_SUCCESS) {
 879                 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
 880                 goto out_msi_prio;
 881         }
 882 
 883         /* Add the vq handlers */
 884         for (i = 0; vq_handlers[i].vh_func; i++) {
 885                 ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
 886                     vq_handlers[i].vh_func, sc, vq_handlers[i].vh_priv);
 887                 if (ret != DDI_SUCCESS) {
 888                         dev_err(sc->sc_dev, CE_WARN,
 889                             "ddi_intr_add_handler failed");
 890                         /* Remove the handlers that succeeded. */
 891                         while (--i >= 0) {
 892                                 (void) ddi_intr_remove_handler(
 893                                     sc->sc_intr_htable[i]);
 894                         }
 895                         goto out_add_handlers;
 896                 }
 897         }
 898 
 899         /* Don't forget the config handler */
 900         if (config_handler != NULL) {
 901                 ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
 902                     config_handler->vh_func, sc, config_handler->vh_priv);
 903                 if (ret != DDI_SUCCESS) {
 904                         dev_err(sc->sc_dev, CE_WARN,
 905                             "ddi_intr_add_handler failed");
 906                         /* Remove the handlers that succeeded. */
 907                         while (--i >= 0) {
 908                                 (void) ddi_intr_remove_handler(
 909                                     sc->sc_intr_htable[i]);
 910                         }
 911                         goto out_add_handlers;
 912                 }
 913         }
 914 
 915         ret = ddi_intr_get_cap(sc->sc_intr_htable[0], &sc->sc_intr_cap);
 916         if (ret == DDI_SUCCESS) {
 917                 sc->sc_int_type = int_type;
 918                 return (DDI_SUCCESS);
 919         }
 920 
 921 out_add_handlers:
 922 out_msi_prio:
 923 out_msi_available:
 924         for (i = 0; i < actual; i++)
 925                 (void) ddi_intr_free(sc->sc_intr_htable[i]);
 926 out_msi_alloc:
 927         kmem_free(sc->sc_intr_htable,
 928             sizeof (ddi_intr_handle_t) * handler_count);
 929 
 930         return (ret);
 931 }
 932 
 933 struct virtio_handler_container {
 934         int nhandlers;
 935         struct virtio_int_handler config_handler;
 936         struct virtio_int_handler vq_handlers[];
 937 };
 938 
 939 uint_t
 940 virtio_intx_dispatch(caddr_t arg1, caddr_t arg2)
 941 {
 942         struct virtio_softc *sc = (void *)arg1;
 943         struct virtio_handler_container *vhc = (void *)arg2;
 944         uint8_t isr_status;
 945         int i;
 946 
 947         isr_status = ddi_get8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
 948             VIRTIO_CONFIG_ISR_STATUS));
 949 
 950         if (!isr_status)
 951                 return (DDI_INTR_UNCLAIMED);
 952 
 953         if ((isr_status & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
 954             vhc->config_handler.vh_func) {
 955                 vhc->config_handler.vh_func((void *)sc,
 956                     vhc->config_handler.vh_priv);
 957         }
 958 
 959         /* Notify all handlers */
 960         for (i = 0; i < vhc->nhandlers; i++) {
 961                 vhc->vq_handlers[i].vh_func((void *)sc,
 962                     vhc->vq_handlers[i].vh_priv);
 963         }
 964 
 965         return (DDI_INTR_CLAIMED);
 966 }
 967 
 968 /*
 969  * config_handler and vq_handlers may be allocated on stack.
 970  * Take precautions not to loose them.
 971  */
 972 static int
 973 virtio_register_intx(struct virtio_softc *sc,
 974     struct virtio_int_handler *config_handler,
 975     struct virtio_int_handler vq_handlers[])
 976 {
 977         int vq_handler_count;
 978         int actual;
 979         struct virtio_handler_container *vhc;
 980         size_t vhc_sz;
 981         int ret = DDI_FAILURE;
 982 
 983         /* Walk the handler table to get the number of handlers. */
 984         for (vq_handler_count = 0;
 985             vq_handlers && vq_handlers[vq_handler_count].vh_func;
 986             vq_handler_count++)
 987                 ;
 988 
 989         vhc_sz = sizeof (struct virtio_handler_container) +
 990             sizeof (struct virtio_int_handler) * vq_handler_count;
 991         vhc = kmem_zalloc(vhc_sz, KM_SLEEP);
 992 
 993         vhc->nhandlers = vq_handler_count;
 994         (void) memcpy(vhc->vq_handlers, vq_handlers,
 995             sizeof (struct virtio_int_handler) * vq_handler_count);
 996 
 997         if (config_handler != NULL) {
 998                 (void) memcpy(&vhc->config_handler, config_handler,
 999                     sizeof (struct virtio_int_handler));
1000         }
1001 
1002         /* Just a single entry for a single interrupt. */
1003         sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
1004 
1005         ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable,
1006             DDI_INTR_TYPE_FIXED, 0, 1, &actual, DDI_INTR_ALLOC_NORMAL);
1007         if (ret != DDI_SUCCESS) {
1008                 dev_err(sc->sc_dev, CE_WARN,
1009                     "Failed to allocate a fixed interrupt: %d", ret);
1010                 goto out_int_alloc;
1011         }
1012 
1013         ASSERT(actual == 1);
1014         sc->sc_intr_num = 1;
1015 
1016         ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
1017         if (ret != DDI_SUCCESS) {
1018                 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
1019                 goto out_prio;
1020         }
1021 
1022         ret = ddi_intr_add_handler(sc->sc_intr_htable[0],
1023             virtio_intx_dispatch, sc, vhc);
1024         if (ret != DDI_SUCCESS) {
1025                 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_add_handler failed");
1026                 goto out_add_handlers;
1027         }
1028 
1029         sc->sc_int_type = DDI_INTR_TYPE_FIXED;
1030 
1031         return (DDI_SUCCESS);
1032 
1033 out_add_handlers:
1034 out_prio:
1035         (void) ddi_intr_free(sc->sc_intr_htable[0]);
1036 out_int_alloc:
1037         kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
1038         kmem_free(vhc, vhc_sz);
1039         return (ret);
1040 }
1041 
1042 /*
1043  * We find out if we support MSI during this, and the register layout
1044  * depends on the MSI (doh). Don't acces the device specific bits in
1045  * BAR 0 before calling it!
1046  */
1047 int
1048 virtio_register_ints(struct virtio_softc *sc,
1049     struct virtio_int_handler *config_handler,
1050     struct virtio_int_handler vq_handlers[])
1051 {
1052         int ret;
1053         int intr_types;
1054 
1055         /* Default offset until MSI-X is enabled, if ever. */
1056         sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSIX;
1057 
1058         /* Determine which types of interrupts are supported */
1059         ret = ddi_intr_get_supported_types(sc->sc_dev, &intr_types);
1060         if (ret != DDI_SUCCESS) {
1061                 dev_err(sc->sc_dev, CE_WARN, "Can't get supported int types");
1062                 goto out_inttype;
1063         }
1064 
1065         /* If we have msi, let's use them. */
1066         if (intr_types & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
1067                 ret = virtio_register_msi(sc, config_handler,
1068                     vq_handlers, intr_types);
1069                 if (!ret)
1070                         return (0);
1071         }
1072 
1073         /* Fall back to old-fashioned interrupts. */
1074         if (intr_types & DDI_INTR_TYPE_FIXED) {
1075                 dev_debug(sc->sc_dev, CE_WARN,
1076                     "Using legacy interrupts");
1077 
1078                 return (virtio_register_intx(sc, config_handler, vq_handlers));
1079         }
1080 
1081         dev_err(sc->sc_dev, CE_WARN,
1082             "MSI failed and fixed interrupts not supported. Giving up.");
1083         ret = DDI_FAILURE;
1084 
1085 out_inttype:
1086         return (ret);
1087 }
1088 
1089 static int
1090 virtio_enable_msi(struct virtio_softc *sc)
1091 {
1092         int ret, i;
1093         int vq_handler_count = sc->sc_intr_num;
1094 
1095         /* Number of handlers, not counting the counfig. */
1096         if (sc->sc_intr_config)
1097                 vq_handler_count--;
1098 
1099         /* Enable the interrupts. Either the whole block, or one by one. */
1100         if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1101                 ret = ddi_intr_block_enable(sc->sc_intr_htable,
1102                     sc->sc_intr_num);
1103                 if (ret != DDI_SUCCESS) {
1104                         dev_err(sc->sc_dev, CE_WARN,
1105                             "Failed to enable MSI, falling back to INTx");
1106                         goto out_enable;
1107                 }
1108         } else {
1109                 for (i = 0; i < sc->sc_intr_num; i++) {
1110                         ret = ddi_intr_enable(sc->sc_intr_htable[i]);
1111                         if (ret != DDI_SUCCESS) {
1112                                 dev_err(sc->sc_dev, CE_WARN,
1113                                     "Failed to enable MSI %d, "
1114                                     "falling back to INTx", i);
1115 
1116                                 while (--i >= 0) {
1117                                         (void) ddi_intr_disable(
1118                                             sc->sc_intr_htable[i]);
1119                                 }
1120                                 goto out_enable;
1121                         }
1122                 }
1123         }
1124 
1125         /* Bind the allocated MSI to the queues and config */
1126         for (i = 0; i < vq_handler_count; i++) {
1127                 int check;
1128 
1129                 ddi_put16(sc->sc_ioh,
1130                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1131                     (uint16_t *)(sc->sc_io_addr +
1132                     VIRTIO_CONFIG_QUEUE_SELECT), i);
1133 
1134                 ddi_put16(sc->sc_ioh,
1135                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1136                     (uint16_t *)(sc->sc_io_addr +
1137                     VIRTIO_CONFIG_QUEUE_VECTOR), i);
1138 
1139                 check = ddi_get16(sc->sc_ioh,
1140                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1141                     (uint16_t *)(sc->sc_io_addr +
1142                     VIRTIO_CONFIG_QUEUE_VECTOR));
1143                 if (check != i) {
1144                         dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1145                             "for VQ %d, MSI %d. Check = %x", i, i, check);
1146                         ret = ENODEV;
1147                         goto out_bind;
1148                 }
1149         }
1150 
1151         if (sc->sc_intr_config) {
1152                 int check;
1153 
1154                 ddi_put16(sc->sc_ioh,
1155                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1156                     (uint16_t *)(sc->sc_io_addr +
1157                     VIRTIO_CONFIG_CONFIG_VECTOR), i);
1158 
1159                 check = ddi_get16(sc->sc_ioh,
1160                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1161                     (uint16_t *)(sc->sc_io_addr +
1162                     VIRTIO_CONFIG_CONFIG_VECTOR));
1163                 if (check != i) {
1164                         dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1165                             "for Config updates, MSI %d", i);
1166                         ret = ENODEV;
1167                         goto out_bind;
1168                 }
1169         }
1170 
1171         /* Configuration offset depends on whether MSI-X is used. */
1172         if (sc->sc_int_type == DDI_INTR_TYPE_MSIX)
1173                 sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSIX;
1174         else
1175                 ASSERT(sc->sc_int_type == DDI_INTR_TYPE_MSI);
1176 
1177         return (DDI_SUCCESS);
1178 
1179 out_bind:
1180         /* Unbind the vqs */
1181         for (i = 0; i < vq_handler_count - 1; i++) {
1182                 ddi_put16(sc->sc_ioh,
1183                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1184                     (uint16_t *)(sc->sc_io_addr +
1185                     VIRTIO_CONFIG_QUEUE_SELECT), i);
1186 
1187                 ddi_put16(sc->sc_ioh,
1188                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1189                     (uint16_t *)(sc->sc_io_addr +
1190                     VIRTIO_CONFIG_QUEUE_VECTOR),
1191                     VIRTIO_MSI_NO_VECTOR);
1192         }
1193         /* And the config */
1194         /* LINTED E_BAD_PTR_CAST_ALIGN */
1195         ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1196             VIRTIO_CONFIG_CONFIG_VECTOR), VIRTIO_MSI_NO_VECTOR);
1197 
1198         /* Disable the interrupts. Either the whole block, or one by one. */
1199         if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1200                 ret = ddi_intr_block_disable(sc->sc_intr_htable,
1201                     sc->sc_intr_num);
1202                 if (ret != DDI_SUCCESS) {
1203                         dev_err(sc->sc_dev, CE_WARN,
1204                             "Failed to disable MSIs, won't be able to "
1205                             "reuse next time");
1206                 }
1207         } else {
1208                 for (i = 0; i < sc->sc_intr_num; i++) {
1209                         ret = ddi_intr_disable(sc->sc_intr_htable[i]);
1210                         if (ret != DDI_SUCCESS) {
1211                                 dev_err(sc->sc_dev, CE_WARN,
1212                                     "Failed to disable interrupt %d, "
1213                                     "won't be able to reuse", i);
1214                         }
1215                 }
1216         }
1217 
1218         ret = DDI_FAILURE;
1219 
1220 out_enable:
1221         return (ret);
1222 }
1223 
1224 static int
1225 virtio_enable_intx(struct virtio_softc *sc)
1226 {
1227         int ret;
1228 
1229         ret = ddi_intr_enable(sc->sc_intr_htable[0]);
1230         if (ret != DDI_SUCCESS) {
1231                 dev_err(sc->sc_dev, CE_WARN,
1232                     "Failed to enable interrupt: %d", ret);
1233         }
1234 
1235         return (ret);
1236 }
1237 
1238 /*
1239  * We can't enable/disable individual handlers in the INTx case so do
1240  * the whole bunch even in the msi case.
1241  */
1242 int
1243 virtio_enable_ints(struct virtio_softc *sc)
1244 {
1245 
1246         ASSERT(sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_NOMSIX);
1247 
1248         /* See if we are using MSI. */
1249         if (sc->sc_int_type == DDI_INTR_TYPE_MSIX ||
1250             sc->sc_int_type == DDI_INTR_TYPE_MSI)
1251                 return (virtio_enable_msi(sc));
1252 
1253         ASSERT(sc->sc_int_type == DDI_INTR_TYPE_FIXED);
1254         return (virtio_enable_intx(sc));
1255 }
1256 
1257 void
1258 virtio_release_ints(struct virtio_softc *sc)
1259 {
1260         int i;
1261         int ret;
1262 
1263         /* We were running with MSI, unbind them. */
1264         if (sc->sc_int_type == DDI_INTR_TYPE_MSIX ||
1265             sc->sc_int_type == DDI_INTR_TYPE_MSI) {
1266                 /* Unbind all vqs */
1267                 for (i = 0; i < sc->sc_nvqs; i++) {
1268                         ddi_put16(sc->sc_ioh,
1269                             /* LINTED E_BAD_PTR_CAST_ALIGN */
1270                             (uint16_t *)(sc->sc_io_addr +
1271                             VIRTIO_CONFIG_QUEUE_SELECT), i);
1272 
1273                         ddi_put16(sc->sc_ioh,
1274                             /* LINTED E_BAD_PTR_CAST_ALIGN */
1275                             (uint16_t *)(sc->sc_io_addr +
1276                             VIRTIO_CONFIG_QUEUE_VECTOR),
1277                             VIRTIO_MSI_NO_VECTOR);
1278                 }
1279                 /* And the config */
1280                 /* LINTED E_BAD_PTR_CAST_ALIGN */
1281                 ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1282                     VIRTIO_CONFIG_CONFIG_VECTOR),
1283                     VIRTIO_MSI_NO_VECTOR);
1284 
1285         }
1286 
1287         /* Disable the interrupts. Either the whole block, or one by one. */
1288         if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1289                 ret = ddi_intr_block_disable(sc->sc_intr_htable,
1290                     sc->sc_intr_num);
1291                 if (ret != DDI_SUCCESS) {
1292                         dev_err(sc->sc_dev, CE_WARN,
1293                             "Failed to disable MSIs, won't be able to "
1294                             "reuse next time");
1295                 }
1296         } else {
1297                 for (i = 0; i < sc->sc_intr_num; i++) {
1298                         ret = ddi_intr_disable(sc->sc_intr_htable[i]);
1299                         if (ret != DDI_SUCCESS) {
1300                                 dev_err(sc->sc_dev, CE_WARN,
1301                                     "Failed to disable interrupt %d, "
1302                                     "won't be able to reuse", i);
1303                         }
1304                 }
1305         }
1306 
1307 
1308         for (i = 0; i < sc->sc_intr_num; i++) {
1309                 (void) ddi_intr_remove_handler(sc->sc_intr_htable[i]);
1310         }
1311 
1312         for (i = 0; i < sc->sc_intr_num; i++)
1313                 (void) ddi_intr_free(sc->sc_intr_htable[i]);
1314 
1315         kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) *
1316             sc->sc_intr_num);
1317 
1318         /* After disabling interrupts, the config offset is non-MSI-X. */
1319         sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSIX;
1320 }
1321 
1322 /*
1323  * Module linkage information for the kernel.
1324  */
1325 static struct modlmisc modlmisc = {
1326         &mod_miscops,       /* Type of module */
1327         "VirtIO common library module",
1328 };
1329 
1330 static struct modlinkage modlinkage = {
1331         MODREV_1,
1332         {
1333                 (void *)&modlmisc,
1334                 NULL
1335         }
1336 };
1337 
1338 int
1339 _init(void)
1340 {
1341         return (mod_install(&modlinkage));
1342 }
1343 
1344 int
1345 _fini(void)
1346 {
1347         return (mod_remove(&modlinkage));
1348 }
1349 
1350 int
1351 _info(struct modinfo *modinfop)
1352 {
1353         return (mod_info(&modlinkage, modinfop));
1354 }