Print this page
    
Code review changes
3644 Add virtio-net support into illumos
4945 Additional vioif fixes
Contributions by: Dan Kimmel <dan.kimmel@delphix.com>
Contributions by: Hans Rosenfeld <hans.rosenfeld@nexenta.com>
Contributions by: Alexey Zaytsev <alexey.zaytsev@gmail.com>
Contributions by: Dmitry Yusupov <Dmitry.Yusupov@nexenta.com>
Reviewed by: Igor Kozhukhov <ikozhukhov@gmail.com>
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/io/virtio/virtio.c
          +++ new/usr/src/uts/common/io/virtio/virtio.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  
    | 
      ↓ open down ↓ | 
    12 lines elided | 
    
      ↑ open up ↑ | 
  
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
  23      - * Copyright 2012 Nexenta Systems, Inc.
       23 + * Copyright 2013 Nexenta Systems, Inc.  All rights reserved.
  24   24   * Copyright 2012 Alexey Zaytsev <alexey.zaytsev@gmail.com>
  25   25   */
  26   26  
  27   27  /* Based on the NetBSD virtio driver by Minoura Makoto. */
  28   28  /*
  29   29   * Copyright (c) 2010 Minoura Makoto.
  30   30   * All rights reserved.
  31   31   *
  32   32   * Redistribution and use in source and binary forms, with or without
  33   33   * modification, are permitted provided that the following conditions
  34   34   * are met:
  35   35   * 1. Redistributions of source code must retain the above copyright
  36   36   *    notice, this list of conditions and the following disclaimer.
  37   37   * 2. Redistributions in binary form must reproduce the above copyright
  38   38   *    notice, this list of conditions and the following disclaimer in the
  39   39   *    documentation and/or other materials provided with the distribution.
  40   40   *
  41   41   * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  42   42   * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  43   43   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  44   44   * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  45   45   * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  46   46   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  47   47   * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  48   48   * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  49   49   * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  50   50   * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  51   51   *
  52   52   */
  53   53  
  54   54  #include <sys/conf.h>
  55   55  #include <sys/kmem.h>
  56   56  #include <sys/debug.h>
  57   57  #include <sys/modctl.h>
  58   58  #include <sys/autoconf.h>
  59   59  #include <sys/ddi_impldefs.h>
  60   60  #include <sys/ddi.h>
  61   61  #include <sys/sunddi.h>
  62   62  #include <sys/sunndi.h>
  63   63  #include <sys/avintr.h>
  
    | 
      ↓ open down ↓ | 
    30 lines elided | 
    
      ↑ open up ↑ | 
  
  64   64  #include <sys/spl.h>
  65   65  #include <sys/promif.h>
  66   66  #include <sys/list.h>
  67   67  #include <sys/bootconf.h>
  68   68  #include <sys/bootsvcs.h>
  69   69  #include <sys/sysmacros.h>
  70   70  #include <sys/pci.h>
  71   71  
  72   72  #include "virtiovar.h"
  73   73  #include "virtioreg.h"
       74 +
  74   75  #define NDEVNAMES       (sizeof (virtio_device_name) / sizeof (char *))
  75   76  #define MINSEG_INDIRECT 2       /* use indirect if nsegs >= this value */
  76   77  #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1)) & \
  77   78              ~(VIRTIO_PAGE_SIZE-1))
  78   79  
  79   80  void
  80   81  virtio_set_status(struct virtio_softc *sc, unsigned int status)
  81   82  {
  82   83          int old = 0;
  83   84  
  84      -        if (status != 0)
  85      -                old = ddi_get8(sc->sc_ioh,
  86      -                    (uint8_t *)(sc->sc_io_addr +
       85 +        if (status != 0) {
       86 +                old = ddi_get8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
  87   87                      VIRTIO_CONFIG_DEVICE_STATUS));
       88 +        }
  88   89  
  89      -        ddi_put8(sc->sc_ioh,
  90      -            (uint8_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_STATUS),
  91      -            status | old);
       90 +        ddi_put8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
       91 +            VIRTIO_CONFIG_DEVICE_STATUS), status | old);
  92   92  }
  93   93  
  94   94  /*
  95   95   * Negotiate features, save the result in sc->sc_features
  96   96   */
  97   97  uint32_t
  98   98  virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
  99   99  {
 100  100          uint32_t host_features;
 101  101          uint32_t features;
 102  102  
 103  103          host_features = ddi_get32(sc->sc_ioh,
 104  104              /* LINTED E_BAD_PTR_CAST_ALIGN */
 105  105              (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_FEATURES));
 106  106  
 107      -        dev_debug(sc->sc_dev, CE_NOTE,
 108      -            "host features: %x, guest features: %x",
      107 +        dev_debug(sc->sc_dev, CE_NOTE, "host features: %x, guest features: %x",
 109  108              host_features, guest_features);
 110  109  
 111  110          features = host_features & guest_features;
 112  111          ddi_put32(sc->sc_ioh,
 113  112              /* LINTED E_BAD_PTR_CAST_ALIGN */
 114  113              (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_GUEST_FEATURES),
 115  114              features);
 116  115  
 117  116          sc->sc_features = features;
 118  117  
 119  118          return (host_features);
 120  119  }
 121  120  
 122  121  size_t
 123      -virtio_show_features(uint32_t features,
 124      -    char *buf, size_t len)
      122 +virtio_show_features(uint32_t features, char *buf, size_t len)
 125  123  {
 126  124          char *orig_buf = buf;
 127  125          char *bufend = buf + len;
 128  126  
 129  127          /* LINTED E_PTRDIFF_OVERFLOW */
 130  128          buf += snprintf(buf, bufend - buf, "Generic ( ");
 131  129          if (features & VIRTIO_F_RING_INDIRECT_DESC)
 132  130                  /* LINTED E_PTRDIFF_OVERFLOW */
 133  131                  buf += snprintf(buf, bufend - buf, "INDIRECT_DESC ");
 134  132  
 135  133          /* LINTED E_PTRDIFF_OVERFLOW */
 136  134          buf += snprintf(buf, bufend - buf, ") ");
 137  135  
 138  136          /* LINTED E_PTRDIFF_OVERFLOW */
 139  137          return (buf - orig_buf);
 140  138  }
 141  139  
 142  140  boolean_t
 143  141  virtio_has_feature(struct virtio_softc *sc, uint32_t feature)
 144  142  {
 145  143          return (sc->sc_features & feature);
 146  144  }
 147  145  
 148  146  /*
 149  147   * Device configuration registers.
 150  148   */
 151  149  uint8_t
 152  150  virtio_read_device_config_1(struct virtio_softc *sc, unsigned int index)
 153  151  {
 154  152          ASSERT(sc->sc_config_offset);
 155  153          return ddi_get8(sc->sc_ioh,
 156  154              (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 157  155  }
 158  156  
 159  157  uint16_t
 160  158  virtio_read_device_config_2(struct virtio_softc *sc, unsigned int index)
 161  159  {
 162  160          ASSERT(sc->sc_config_offset);
 163  161          return ddi_get16(sc->sc_ioh,
 164  162              /* LINTED E_BAD_PTR_CAST_ALIGN */
 165  163              (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 166  164  }
 167  165  
 168  166  uint32_t
 169  167  virtio_read_device_config_4(struct virtio_softc *sc, unsigned int index)
 170  168  {
 171  169          ASSERT(sc->sc_config_offset);
 172  170          return ddi_get32(sc->sc_ioh,
 173  171              /* LINTED E_BAD_PTR_CAST_ALIGN */
 174  172              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 175  173  }
 176  174  
 177  175  uint64_t
 178  176  virtio_read_device_config_8(struct virtio_softc *sc, unsigned int index)
 179  177  {
 180  178          uint64_t r;
 181  179  
 182  180          ASSERT(sc->sc_config_offset);
 183  181          r = ddi_get32(sc->sc_ioh,
 184  182              /* LINTED E_BAD_PTR_CAST_ALIGN */
 185  183              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
 186  184              index + sizeof (uint32_t)));
  
    | 
      ↓ open down ↓ | 
    52 lines elided | 
    
      ↑ open up ↑ | 
  
 187  185  
 188  186          r <<= 32;
 189  187  
 190  188          r += ddi_get32(sc->sc_ioh,
 191  189              /* LINTED E_BAD_PTR_CAST_ALIGN */
 192  190              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 193  191          return (r);
 194  192  }
 195  193  
 196  194  void
 197      -virtio_write_device_config_1(struct virtio_softc *sc,
 198      -    unsigned int index, uint8_t value)
      195 +virtio_write_device_config_1(struct virtio_softc *sc, unsigned int index,
      196 +    uint8_t value)
 199  197  {
 200  198          ASSERT(sc->sc_config_offset);
 201  199          ddi_put8(sc->sc_ioh,
 202  200              (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 203  201  }
 204  202  
 205  203  void
 206      -virtio_write_device_config_2(struct virtio_softc *sc,
 207      -    unsigned int index, uint16_t value)
      204 +virtio_write_device_config_2(struct virtio_softc *sc, unsigned int index,
      205 +    uint16_t value)
 208  206  {
 209  207          ASSERT(sc->sc_config_offset);
 210  208          ddi_put16(sc->sc_ioh,
 211  209              /* LINTED E_BAD_PTR_CAST_ALIGN */
 212  210              (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 213  211  }
 214  212  
 215  213  void
 216      -virtio_write_device_config_4(struct virtio_softc *sc,
 217      -    unsigned int index, uint32_t value)
      214 +virtio_write_device_config_4(struct virtio_softc *sc, unsigned int index,
      215 +    uint32_t value)
 218  216  {
 219  217          ASSERT(sc->sc_config_offset);
 220  218          ddi_put32(sc->sc_ioh,
 221  219              /* LINTED E_BAD_PTR_CAST_ALIGN */
 222  220              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 223  221  }
 224  222  
 225  223  void
 226      -virtio_write_device_config_8(struct virtio_softc *sc,
 227      -    unsigned int index, uint64_t value)
      224 +virtio_write_device_config_8(struct virtio_softc *sc, unsigned int index,
      225 +    uint64_t value)
 228  226  {
 229  227          ASSERT(sc->sc_config_offset);
 230  228          ddi_put32(sc->sc_ioh,
 231  229              /* LINTED E_BAD_PTR_CAST_ALIGN */
 232  230              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index),
 233  231              value & 0xFFFFFFFF);
 234  232          ddi_put32(sc->sc_ioh,
 235  233              /* LINTED E_BAD_PTR_CAST_ALIGN */
 236  234              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
 237  235              index + sizeof (uint32_t)), value >> 32);
 238  236  }
 239  237  
 240  238  /*
 241  239   * Start/stop vq interrupt.  No guarantee.
 242  240   */
 243  241  void
 244  242  virtio_stop_vq_intr(struct virtqueue *vq)
 245  243  {
  
    | 
      ↓ open down ↓ | 
    8 lines elided | 
    
      ↑ open up ↑ | 
  
 246  244          vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
 247  245  }
 248  246  
 249  247  void
 250  248  virtio_start_vq_intr(struct virtqueue *vq)
 251  249  {
 252  250          vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
 253  251  }
 254  252  
 255  253  static ddi_dma_attr_t virtio_vq_dma_attr = {
 256      -        DMA_ATTR_V0,    /* Version number */
 257      -        0,              /* low address */
 258      -        /*
 259      -         * high address. Has to fit into 32 bits
 260      -         * after page-shifting
 261      -         */
 262      -        0x00000FFFFFFFFFFF,
 263      -        0xFFFFFFFF,     /* counter register max */
 264      -        VIRTIO_PAGE_SIZE, /* page alignment required */
 265      -        0x3F,           /* burst sizes: 1 - 32 */
 266      -        0x1,            /* minimum transfer size */
 267      -        0xFFFFFFFF,     /* max transfer size */
 268      -        0xFFFFFFFF,     /* address register max */
 269      -        1,              /* no scatter-gather */
 270      -        1,              /* device operates on bytes */
 271      -        0,              /* attr flag: set to 0 */
      254 +        DMA_ATTR_V0,            /* Version number */
      255 +        0,                      /* low address */
      256 +        0x00000FFFFFFFFFFF,     /* high address. Has to fit into 32 bits */
      257 +                                /* after page-shifting */
      258 +        0xFFFFFFFF,             /* counter register max */
      259 +        VIRTIO_PAGE_SIZE,       /* page alignment required */
      260 +        0x3F,                   /* burst sizes: 1 - 32 */
      261 +        0x1,                    /* minimum transfer size */
      262 +        0xFFFFFFFF,             /* max transfer size */
      263 +        0xFFFFFFFF,             /* address register max */
      264 +        1,                      /* no scatter-gather */
      265 +        1,                      /* device operates on bytes */
      266 +        0,                      /* attr flag: set to 0 */
 272  267  };
 273  268  
 274  269  static ddi_dma_attr_t virtio_vq_indirect_dma_attr = {
 275      -        DMA_ATTR_V0,    /* Version number */
 276      -        0,              /* low address */
 277      -        0xFFFFFFFFFFFFFFFF, /* high address */
 278      -        0xFFFFFFFF,     /* counter register max */
 279      -        1,              /* No specific alignment */
 280      -        0x3F,           /* burst sizes: 1 - 32 */
 281      -        0x1,            /* minimum transfer size */
 282      -        0xFFFFFFFF,     /* max transfer size */
 283      -        0xFFFFFFFF,     /* address register max */
 284      -        1,              /* no scatter-gather */
 285      -        1,              /* device operates on bytes */
 286      -        0,              /* attr flag: set to 0 */
      270 +        DMA_ATTR_V0,            /* Version number */
      271 +        0,                      /* low address */
      272 +        0xFFFFFFFFFFFFFFFF,     /* high address */
      273 +        0xFFFFFFFF,             /* counter register max */
      274 +        1,                      /* No specific alignment */
      275 +        0x3F,                   /* burst sizes: 1 - 32 */
      276 +        0x1,                    /* minimum transfer size */
      277 +        0xFFFFFFFF,             /* max transfer size */
      278 +        0xFFFFFFFF,             /* address register max */
      279 +        1,                      /* no scatter-gather */
      280 +        1,                      /* device operates on bytes */
      281 +        0,                      /* attr flag: set to 0 */
 287  282  };
 288  283  
 289  284  /* Same for direct and indirect descriptors. */
 290  285  static ddi_device_acc_attr_t virtio_vq_devattr = {
 291  286          DDI_DEVICE_ATTR_V0,
 292  287          DDI_NEVERSWAP_ACC,
 293  288          DDI_STORECACHING_OK_ACC,
 294  289          DDI_DEFAULT_ACC
 295  290  };
 296  291  
 297  292  static void
 298  293  virtio_free_indirect(struct vq_entry *entry)
 299  294  {
 300  295  
 301  296          (void) ddi_dma_unbind_handle(entry->qe_indirect_dma_handle);
 302  297          ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
 303  298          ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
 304  299  
 305  300          entry->qe_indirect_descs = NULL;
 306  301  }
 307  302  
 308  303  
 309  304  static int
 310  305  virtio_alloc_indirect(struct virtio_softc *sc, struct vq_entry *entry)
 311  306  {
 312  307          int allocsize, num;
 313  308          size_t len;
 314  309          unsigned int ncookies;
 315  310          int ret;
  
    | 
      ↓ open down ↓ | 
    19 lines elided | 
    
      ↑ open up ↑ | 
  
 316  311  
 317  312          num = entry->qe_queue->vq_indirect_num;
 318  313          ASSERT(num > 1);
 319  314  
 320  315          allocsize = sizeof (struct vring_desc) * num;
 321  316  
 322  317          ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_indirect_dma_attr,
 323  318              DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_handle);
 324  319          if (ret != DDI_SUCCESS) {
 325  320                  dev_err(sc->sc_dev, CE_WARN,
 326      -                    "Failed to allocate dma handle for indirect descriptors,"
 327      -                    " entry %d, vq %d", entry->qe_index,
      321 +                    "Failed to allocate dma handle for indirect descriptors, "
      322 +                    "entry %d, vq %d", entry->qe_index,
 328  323                      entry->qe_queue->vq_index);
 329  324                  goto out_alloc_handle;
 330  325          }
 331  326  
 332      -        ret = ddi_dma_mem_alloc(entry->qe_indirect_dma_handle,
 333      -            allocsize, &virtio_vq_devattr,
 334      -            DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
      327 +        ret = ddi_dma_mem_alloc(entry->qe_indirect_dma_handle, allocsize,
      328 +            &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 335  329              (caddr_t *)&entry->qe_indirect_descs, &len,
 336  330              &entry->qe_indirect_dma_acch);
 337  331          if (ret != DDI_SUCCESS) {
 338  332                  dev_err(sc->sc_dev, CE_WARN,
 339      -                    "Failed to alocate dma memory for indirect descriptors,"
 340      -                    " entry %d, vq %d,", entry->qe_index,
      333 +                    "Failed to allocate dma memory for indirect descriptors, "
      334 +                    "entry %d, vq %d,", entry->qe_index,
 341  335                      entry->qe_queue->vq_index);
 342  336                  goto out_alloc;
 343  337          }
 344  338  
 345  339          (void) memset(entry->qe_indirect_descs, 0xff, allocsize);
 346  340  
 347  341          ret = ddi_dma_addr_bind_handle(entry->qe_indirect_dma_handle, NULL,
 348  342              (caddr_t)entry->qe_indirect_descs, len,
 349      -            DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
 350      -            DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_cookie, &ncookies);
      343 +            DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
      344 +            &entry->qe_indirect_dma_cookie, &ncookies);
 351  345          if (ret != DDI_DMA_MAPPED) {
 352  346                  dev_err(sc->sc_dev, CE_WARN,
 353      -                    "Failed to bind dma memory for indirect descriptors,"
      347 +                    "Failed to bind dma memory for indirect descriptors, "
 354  348                      "entry %d, vq %d", entry->qe_index,
 355  349                      entry->qe_queue->vq_index);
 356  350                  goto out_bind;
 357  351          }
 358  352  
 359  353          /* We asked for a single segment */
 360  354          ASSERT(ncookies == 1);
 361  355  
 362  356          return (0);
 363  357  
 364  358  out_bind:
 365  359          ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
 366  360  out_alloc:
 367  361          ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
 368  362  out_alloc_handle:
 369  363  
 370  364          return (ret);
 371  365  }
 372  366  
 373  367  /*
 374  368   * Initialize the vq structure.
 375  369   */
 376  370  static int
 377  371  virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq)
 378  372  {
 379  373          int ret;
 380  374          uint16_t i;
 381  375          int vq_size = vq->vq_num;
 382  376          int indirect_num = vq->vq_indirect_num;
 383  377  
 384  378          /* free slot management */
 385  379          list_create(&vq->vq_freelist, sizeof (struct vq_entry),
 386  380              offsetof(struct vq_entry, qe_list));
 387  381  
 388  382          for (i = 0; i < vq_size; i++) {
 389  383                  struct vq_entry *entry = &vq->vq_entries[i];
 390  384                  list_insert_tail(&vq->vq_freelist, entry);
 391  385                  entry->qe_index = i;
  
    | 
      ↓ open down ↓ | 
    28 lines elided | 
    
      ↑ open up ↑ | 
  
 392  386                  entry->qe_desc = &vq->vq_descs[i];
 393  387                  entry->qe_queue = vq;
 394  388  
 395  389                  if (indirect_num) {
 396  390                          ret = virtio_alloc_indirect(sc, entry);
 397  391                          if (ret)
 398  392                                  goto out_indirect;
 399  393                  }
 400  394          }
 401  395  
 402      -        mutex_init(&vq->vq_freelist_lock, "virtio-freelist",
 403      -            MUTEX_DRIVER, DDI_INTR_PRI(sc->sc_intr_prio));
 404      -        mutex_init(&vq->vq_avail_lock, "virtio-avail",
 405      -            MUTEX_DRIVER, DDI_INTR_PRI(sc->sc_intr_prio));
 406      -        mutex_init(&vq->vq_used_lock, "virtio-used",
 407      -            MUTEX_DRIVER, DDI_INTR_PRI(sc->sc_intr_prio));
      396 +        mutex_init(&vq->vq_freelist_lock, "virtio-freelist", MUTEX_DRIVER,
      397 +            DDI_INTR_PRI(sc->sc_intr_prio));
      398 +        mutex_init(&vq->vq_avail_lock, "virtio-avail", MUTEX_DRIVER,
      399 +            DDI_INTR_PRI(sc->sc_intr_prio));
      400 +        mutex_init(&vq->vq_used_lock, "virtio-used", MUTEX_DRIVER,
      401 +            DDI_INTR_PRI(sc->sc_intr_prio));
 408  402  
 409  403          return (0);
 410  404  
 411  405  out_indirect:
 412  406          for (i = 0; i < vq_size; i++) {
 413  407                  struct vq_entry *entry = &vq->vq_entries[i];
 414  408                  if (entry->qe_indirect_descs)
 415  409                          virtio_free_indirect(entry);
 416  410          }
 417  411  
 418  412          return (ret);
 419  413  }
 420  414  
 421      -
 422      -
 423  415  /*
 424  416   * Allocate/free a vq.
 425  417   */
 426  418  struct virtqueue *
 427      -virtio_alloc_vq(struct virtio_softc *sc,
 428      -    unsigned int index,
 429      -    unsigned int size,
 430      -    unsigned int indirect_num,
 431      -    const char *name)
      419 +virtio_alloc_vq(struct virtio_softc *sc, unsigned int index, unsigned int size,
      420 +    unsigned int indirect_num, const char *name)
 432  421  {
 433  422          int vq_size, allocsize1, allocsize2, allocsize = 0;
 434  423          int ret;
 435  424          unsigned int ncookies;
 436  425          size_t len;
 437  426          struct virtqueue *vq;
 438  427  
 439      -
 440  428          ddi_put16(sc->sc_ioh,
 441  429              /* LINTED E_BAD_PTR_CAST_ALIGN */
 442  430              (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
 443  431          vq_size = ddi_get16(sc->sc_ioh,
 444  432              /* LINTED E_BAD_PTR_CAST_ALIGN */
 445  433              (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
 446  434          if (vq_size == 0) {
 447  435                  dev_err(sc->sc_dev, CE_WARN,
 448  436                      "virtqueue dest not exist, index %d for %s\n", index, name);
 449  437                  goto out;
 450  438          }
 451  439  
 452  440          vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);
 453  441  
 454  442          /* size 0 => use native vq size, good for receive queues. */
 455  443          if (size)
 456  444                  vq_size = MIN(vq_size, size);
 457  445  
 458  446          /* allocsize1: descriptor table + avail ring + pad */
 459  447          allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size +
 460      -            sizeof (struct vring_avail) +
 461      -            sizeof (uint16_t) * vq_size);
      448 +            sizeof (struct vring_avail) + sizeof (uint16_t) * vq_size);
 462  449          /* allocsize2: used ring + pad */
 463      -        allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used)
 464      -            + sizeof (struct vring_used_elem) * vq_size);
      450 +        allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used) +
      451 +            sizeof (struct vring_used_elem) * vq_size);
 465  452  
 466  453          allocsize = allocsize1 + allocsize2;
 467  454  
 468  455          ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
 469  456              DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
 470  457          if (ret != DDI_SUCCESS) {
 471  458                  dev_err(sc->sc_dev, CE_WARN,
 472  459                      "Failed to allocate dma handle for vq %d", index);
 473  460                  goto out_alloc_handle;
 474  461          }
 475  462  
 476  463          ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
 477  464              &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 478  465              (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
 479  466          if (ret != DDI_SUCCESS) {
 480  467                  dev_err(sc->sc_dev, CE_WARN,
 481      -                    "Failed to alocate dma memory for vq %d", index);
      468 +                    "Failed to allocate dma memory for vq %d", index);
 482  469                  goto out_alloc;
 483  470          }
 484  471  
 485      -
 486  472          ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
 487      -            (caddr_t)vq->vq_vaddr, len,
 488      -            DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
      473 +            (caddr_t)vq->vq_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
 489  474              DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
 490  475          if (ret != DDI_DMA_MAPPED) {
 491  476                  dev_err(sc->sc_dev, CE_WARN,
 492  477                      "Failed to bind dma memory for vq %d", index);
 493  478                  goto out_bind;
 494  479          }
 495  480  
 496  481          /* We asked for a single segment */
 497  482          ASSERT(ncookies == 1);
 498  483          /* and page-ligned buffers. */
 499  484          ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0);
 500  485  
 501  486          (void) memset(vq->vq_vaddr, 0, allocsize);
 502  487  
 503  488          /* Make sure all zeros hit the buffer before we point the host to it */
 504  489          membar_producer();
 505  490  
 506  491          /* set the vq address */
 507  492          ddi_put32(sc->sc_ioh,
 508  493              /* LINTED E_BAD_PTR_CAST_ALIGN */
 509  494              (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS),
 510  495              (vq->vq_dma_cookie.dmac_laddress / VIRTIO_PAGE_SIZE));
 511  496  
 512  497          /* remember addresses and offsets for later use */
 513  498          vq->vq_owner = sc;
 514  499          vq->vq_num = vq_size;
 515  500          vq->vq_index = index;
 516  501          vq->vq_descs = vq->vq_vaddr;
 517  502          vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
 518  503          vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
 519  504          vq->vq_usedoffset = allocsize1;
 520  505          vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);
 521  506  
 522  507          ASSERT(indirect_num == 0 ||
 523  508              virtio_has_feature(sc, VIRTIO_F_RING_INDIRECT_DESC));
 524  509          vq->vq_indirect_num = indirect_num;
  
    | 
      ↓ open down ↓ | 
    26 lines elided | 
    
      ↑ open up ↑ | 
  
 525  510  
 526  511          /* free slot management */
 527  512          vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
 528  513              KM_SLEEP);
 529  514  
 530  515          ret = virtio_init_vq(sc, vq);
 531  516          if (ret)
 532  517                  goto out_init;
 533  518  
 534  519          dev_debug(sc->sc_dev, CE_NOTE,
 535      -            "Allocated %d entries for vq %d:%s (%d incdirect descs)",
      520 +            "Allocated %d entries for vq %d:%s (%d indirect descs)",
 536  521              vq_size, index, name, indirect_num * vq_size);
 537  522  
 538  523          return (vq);
 539  524  
 540  525  out_init:
 541  526          kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
 542  527          (void) ddi_dma_unbind_handle(vq->vq_dma_handle);
 543  528  out_bind:
 544  529          ddi_dma_mem_free(&vq->vq_dma_acch);
 545  530  out_alloc:
 546  531          ddi_dma_free_handle(&vq->vq_dma_handle);
 547  532  out_alloc_handle:
 548  533          kmem_free(vq, sizeof (struct virtqueue));
 549  534  out:
 550  535          return (NULL);
 551  536  }
 552  537  
 553      -
 554  538  void
 555  539  virtio_free_vq(struct virtqueue *vq)
 556  540  {
 557  541          struct virtio_softc *sc = vq->vq_owner;
 558  542          int i;
 559  543  
 560  544          /* tell device that there's no virtqueue any longer */
 561  545          ddi_put16(sc->sc_ioh,
 562  546              /* LINTED E_BAD_PTR_CAST_ALIGN */
 563  547              (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT),
 564  548              vq->vq_index);
 565  549          ddi_put32(sc->sc_ioh,
 566  550              /* LINTED E_BAD_PTR_CAST_ALIGN */
 567  551              (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS), 0);
 568  552  
 569  553          /* Free the indirect descriptors, if any. */
 570  554          for (i = 0; i < vq->vq_num; i++) {
 571  555                  struct vq_entry *entry = &vq->vq_entries[i];
 572  556                  if (entry->qe_indirect_descs)
 573  557                          virtio_free_indirect(entry);
 574  558          }
 575  559  
 576  560          kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq->vq_num);
 577  561  
 578  562          (void) ddi_dma_unbind_handle(vq->vq_dma_handle);
 579  563          ddi_dma_mem_free(&vq->vq_dma_acch);
 580  564          ddi_dma_free_handle(&vq->vq_dma_handle);
 581  565  
 582  566          mutex_destroy(&vq->vq_used_lock);
 583  567          mutex_destroy(&vq->vq_avail_lock);
 584  568          mutex_destroy(&vq->vq_freelist_lock);
 585  569  
 586  570          kmem_free(vq, sizeof (struct virtqueue));
 587  571  }
 588  572  
 589  573  /*
 590  574   * Free descriptor management.
 591  575   */
 592  576  struct vq_entry *
 593  577  vq_alloc_entry(struct virtqueue *vq)
 594  578  {
 595  579          struct vq_entry *qe;
 596  580  
 597  581          mutex_enter(&vq->vq_freelist_lock);
 598  582          if (list_is_empty(&vq->vq_freelist)) {
 599  583                  mutex_exit(&vq->vq_freelist_lock);
 600  584                  return (NULL);
 601  585          }
 602  586          qe = list_remove_head(&vq->vq_freelist);
 603  587  
 604  588          ASSERT(vq->vq_used_entries >= 0);
 605  589          vq->vq_used_entries++;
 606  590  
 607  591          mutex_exit(&vq->vq_freelist_lock);
 608  592  
 609  593          qe->qe_next = NULL;
 610  594          qe->qe_indirect_next = 0;
 611  595          (void) memset(qe->qe_desc, 0, sizeof (struct vring_desc));
 612  596  
 613  597          return (qe);
 614  598  }
 615  599  
 616  600  void
 617  601  vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
 618  602  {
 619  603          mutex_enter(&vq->vq_freelist_lock);
 620  604  
 621  605          list_insert_head(&vq->vq_freelist, qe);
 622  606          vq->vq_used_entries--;
 623  607          ASSERT(vq->vq_used_entries >= 0);
 624  608          mutex_exit(&vq->vq_freelist_lock);
 625  609  }
 626  610  
 627  611  /*
 628  612   * We (intentionally) don't have a global vq mutex, so you are
 629  613   * responsible for external locking to avoid allocting/freeing any
 630  614   * entries before using the returned value. Have fun.
 631  615   */
 632  616  uint_t
 633  617  vq_num_used(struct virtqueue *vq)
 634  618  {
 635  619          /* vq->vq_freelist_lock would not help here. */
 636  620          return (vq->vq_used_entries);
 637  621  }
 638  622  
 639  623  static inline void
 640  624  virtio_ve_set_desc(struct vring_desc *desc, uint64_t paddr, uint32_t len,
  
    | 
      ↓ open down ↓ | 
    77 lines elided | 
    
      ↑ open up ↑ | 
  
 641  625      boolean_t write)
 642  626  {
 643  627          desc->addr = paddr;
 644  628          desc->len = len;
 645  629          desc->next = 0;
 646  630          desc->flags = 0;
 647  631  
 648  632          /* 'write' - from the driver's point of view */
 649  633          if (!write)
 650  634                  desc->flags = VRING_DESC_F_WRITE;
 651      -
 652      -
 653  635  }
 654  636  
 655  637  void
 656  638  virtio_ve_set(struct vq_entry *qe, uint64_t paddr, uint32_t len,
 657  639      boolean_t write)
 658  640  {
 659  641          virtio_ve_set_desc(qe->qe_desc, paddr, len, write);
 660  642  }
 661  643  
      644 +unsigned int
      645 +virtio_ve_indirect_available(struct vq_entry *qe)
      646 +{
      647 +        return (qe->qe_queue->vq_indirect_num - (qe->qe_indirect_next - 1));
      648 +}
      649 +
 662  650  void
 663  651  virtio_ve_add_indirect_buf(struct vq_entry *qe, uint64_t paddr, uint32_t len,
 664  652      boolean_t write)
 665  653  {
 666  654          struct vring_desc *indirect_desc;
 667  655  
 668  656          ASSERT(qe->qe_queue->vq_indirect_num);
 669  657          ASSERT(qe->qe_indirect_next < qe->qe_queue->vq_indirect_num);
 670  658  
 671  659          indirect_desc = &qe->qe_indirect_descs[qe->qe_indirect_next];
 672  660          virtio_ve_set_desc(indirect_desc, paddr, len, write);
 673  661          qe->qe_indirect_next++;
 674  662  }
 675  663  
 676  664  void
 677  665  virtio_ve_add_cookie(struct vq_entry *qe, ddi_dma_handle_t dma_handle,
 678  666      ddi_dma_cookie_t dma_cookie, unsigned int ncookies, boolean_t write)
 679  667  {
 680  668          int i;
 681  669  
 682  670          for (i = 0; i < ncookies; i++) {
 683  671                  virtio_ve_add_indirect_buf(qe, dma_cookie.dmac_laddress,
 684  672                      dma_cookie.dmac_size, write);
 685  673                  ddi_dma_nextcookie(dma_handle, &dma_cookie);
 686  674          }
 687  675  }
 688  676  
 689  677  void
 690  678  virtio_sync_vq(struct virtqueue *vq)
 691  679  {
 692  680          struct virtio_softc *vsc = vq->vq_owner;
 693  681  
 694  682          /* Make sure the avail ring update hit the buffer */
  
    | 
      ↓ open down ↓ | 
    23 lines elided | 
    
      ↑ open up ↑ | 
  
 695  683          membar_producer();
 696  684  
 697  685          vq->vq_avail->idx = vq->vq_avail_idx;
 698  686  
 699  687          /* Make sure the avail idx update hits the buffer */
 700  688          membar_producer();
 701  689  
 702  690          /* Make sure we see the flags update */
 703  691          membar_consumer();
 704  692  
 705      -        if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY))
      693 +        if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY)) {
 706  694                  ddi_put16(vsc->sc_ioh,
 707  695                      /* LINTED E_BAD_PTR_CAST_ALIGN */
 708  696                      (uint16_t *)(vsc->sc_io_addr +
 709  697                      VIRTIO_CONFIG_QUEUE_NOTIFY),
 710  698                      vq->vq_index);
      699 +        }
 711  700  }
 712  701  
 713  702  void
 714  703  virtio_push_chain(struct vq_entry *qe, boolean_t sync)
 715  704  {
 716  705          struct virtqueue *vq = qe->qe_queue;
 717  706          struct vq_entry *head = qe;
 718  707          struct vring_desc *desc;
 719  708          int idx;
 720  709  
 721  710          ASSERT(qe);
 722  711  
 723  712          /*
 724  713           * Bind the descs together, paddr and len should be already
 725  714           * set with virtio_ve_set
 726  715           */
 727  716          do {
 728  717                  /* Bind the indirect descriptors */
 729  718                  if (qe->qe_indirect_next > 1) {
 730  719                          uint16_t i = 0;
 731  720  
 732  721                          /*
 733  722                           * Set the pointer/flags to the
 734  723                           * first indirect descriptor
 735  724                           */
 736  725                          virtio_ve_set_desc(qe->qe_desc,
 737  726                              qe->qe_indirect_dma_cookie.dmac_laddress,
 738  727                              sizeof (struct vring_desc) * qe->qe_indirect_next,
 739  728                              B_FALSE);
 740  729                          qe->qe_desc->flags |= VRING_DESC_F_INDIRECT;
 741  730  
 742  731                          /* For all but the last one, add the next index/flag */
 743  732                          do {
 744  733                                  desc = &qe->qe_indirect_descs[i];
 745  734                                  i++;
 746  735  
 747  736                                  desc->flags |= VRING_DESC_F_NEXT;
 748  737                                  desc->next = i;
 749  738                          } while (i < qe->qe_indirect_next - 1);
 750  739  
 751  740                  }
 752  741  
 753  742                  if (qe->qe_next) {
 754  743                          qe->qe_desc->flags |= VRING_DESC_F_NEXT;
 755  744                          qe->qe_desc->next = qe->qe_next->qe_index;
 756  745                  }
 757  746  
 758  747                  qe = qe->qe_next;
 759  748          } while (qe);
 760  749  
 761  750          mutex_enter(&vq->vq_avail_lock);
 762  751          idx = vq->vq_avail_idx;
 763  752          vq->vq_avail_idx++;
 764  753  
 765  754          /* Make sure the bits hit the descriptor(s) */
  
    | 
      ↓ open down ↓ | 
    45 lines elided | 
    
      ↑ open up ↑ | 
  
 766  755          membar_producer();
 767  756          vq->vq_avail->ring[idx % vq->vq_num] = head->qe_index;
 768  757  
 769  758          /* Notify the device, if needed. */
 770  759          if (sync)
 771  760                  virtio_sync_vq(vq);
 772  761  
 773  762          mutex_exit(&vq->vq_avail_lock);
 774  763  }
 775  764  
 776      -/* Get a chain of descriptors from the used ring, if one is available. */
      765 +/*
      766 + * Get a chain of descriptors from the used ring, if one is available.
      767 + */
 777  768  struct vq_entry *
 778  769  virtio_pull_chain(struct virtqueue *vq, uint32_t *len)
 779  770  {
 780  771          struct vq_entry *head;
 781  772          int slot;
 782  773          int usedidx;
 783  774  
 784  775          mutex_enter(&vq->vq_used_lock);
 785  776  
 786  777          /* No used entries? Bye. */
 787  778          if (vq->vq_used_idx == vq->vq_used->idx) {
 788  779                  mutex_exit(&vq->vq_used_lock);
 789  780                  return (NULL);
 790  781          }
 791  782  
 792  783          usedidx = vq->vq_used_idx;
 793  784          vq->vq_used_idx++;
 794  785          mutex_exit(&vq->vq_used_lock);
 795  786  
 796  787          usedidx %= vq->vq_num;
 797  788  
 798  789          /* Make sure we do the next step _after_ checking the idx. */
 799  790          membar_consumer();
 800  791  
 801  792          slot = vq->vq_used->ring[usedidx].id;
 802  793          *len = vq->vq_used->ring[usedidx].len;
 803  794  
 804  795          head = &vq->vq_entries[slot];
 805  796  
 806  797          return (head);
 807  798  }
 808  799  
 809  800  void
 810  801  virtio_free_chain(struct vq_entry *qe)
 811  802  {
  
    | 
      ↓ open down ↓ | 
    25 lines elided | 
    
      ↑ open up ↑ | 
  
 812  803          struct vq_entry *tmp;
 813  804          struct virtqueue *vq = qe->qe_queue;
 814  805  
 815  806          ASSERT(qe);
 816  807  
 817  808          do {
 818  809                  ASSERT(qe->qe_queue == vq);
 819  810                  tmp = qe->qe_next;
 820  811                  vq_free_entry(vq, qe);
 821  812                  qe = tmp;
 822      -        } while (tmp);
      813 +        } while (tmp != NULL);
 823  814  }
 824  815  
 825  816  void
 826  817  virtio_ventry_stick(struct vq_entry *first, struct vq_entry *second)
 827  818  {
 828  819          first->qe_next = second;
 829  820  }
 830  821  
 831  822  static int
 832  823  virtio_register_msi(struct virtio_softc *sc,
 833  824      struct virtio_int_handler *config_handler,
 834      -    struct virtio_int_handler vq_handlers[],
 835      -    int intr_types)
      825 +    struct virtio_int_handler vq_handlers[], int intr_types)
 836  826  {
 837  827          int count, actual;
 838  828          int int_type;
 839  829          int i;
 840  830          int handler_count;
 841  831          int ret;
 842  832  
 843  833          /* If both MSI and MSI-x are reported, prefer MSI-x. */
 844  834          int_type = DDI_INTR_TYPE_MSI;
 845  835          if (intr_types & DDI_INTR_TYPE_MSIX)
 846  836                  int_type = DDI_INTR_TYPE_MSIX;
 847  837  
 848  838          /* Walk the handler table to get the number of handlers. */
 849  839          for (handler_count = 0;
 850  840              vq_handlers && vq_handlers[handler_count].vh_func;
 851  841              handler_count++)
 852  842                  ;
 853  843  
 854  844          /* +1 if there is a config change handler. */
 855      -        if (config_handler)
      845 +        if (config_handler != NULL)
 856  846                  handler_count++;
 857  847  
 858  848          /* Number of MSIs supported by the device. */
 859  849          ret = ddi_intr_get_nintrs(sc->sc_dev, int_type, &count);
 860  850          if (ret != DDI_SUCCESS) {
 861  851                  dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_nintrs failed");
 862  852                  return (ret);
 863  853          }
 864  854  
 865  855          /*
 866  856           * Those who try to register more handlers then the device
 867  857           * supports shall suffer.
 868  858           */
 869  859          ASSERT(handler_count <= count);
 870  860  
 871      -        sc->sc_intr_htable = kmem_zalloc(
 872      -            sizeof (ddi_intr_handle_t) * handler_count, KM_SLEEP);
      861 +        sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t) *
      862 +            handler_count, KM_SLEEP);
 873  863  
 874  864          ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable, int_type, 0,
 875  865              handler_count, &actual, DDI_INTR_ALLOC_NORMAL);
 876  866          if (ret != DDI_SUCCESS) {
 877  867                  dev_err(sc->sc_dev, CE_WARN, "Failed to allocate MSI: %d", ret);
 878  868                  goto out_msi_alloc;
 879  869          }
 880  870  
 881  871          if (actual != handler_count) {
 882  872                  dev_err(sc->sc_dev, CE_WARN,
 883  873                      "Not enough MSI available: need %d, available %d",
 884  874                      handler_count, actual);
 885  875                  goto out_msi_available;
 886  876          }
 887  877  
 888  878          sc->sc_intr_num = handler_count;
 889  879          sc->sc_intr_config = B_FALSE;
 890      -        if (config_handler) {
      880 +        if (config_handler != NULL) {
 891  881                  sc->sc_intr_config = B_TRUE;
 892  882          }
 893  883  
 894  884          /* Assume they are all same priority */
 895  885          ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
 896  886          if (ret != DDI_SUCCESS) {
 897  887                  dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
 898  888                  goto out_msi_prio;
 899  889          }
 900  890  
 901  891          /* Add the vq handlers */
 902  892          for (i = 0; vq_handlers[i].vh_func; i++) {
 903  893                  ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
 904      -                    vq_handlers[i].vh_func,
 905      -                    sc, vq_handlers[i].vh_priv);
      894 +                    vq_handlers[i].vh_func, sc, vq_handlers[i].vh_priv);
 906  895                  if (ret != DDI_SUCCESS) {
 907  896                          dev_err(sc->sc_dev, CE_WARN,
 908  897                              "ddi_intr_add_handler failed");
 909  898                          /* Remove the handlers that succeeded. */
 910  899                          while (--i >= 0) {
 911  900                                  (void) ddi_intr_remove_handler(
 912  901                                      sc->sc_intr_htable[i]);
 913  902                          }
 914  903                          goto out_add_handlers;
 915  904                  }
 916  905          }
 917  906  
 918  907          /* Don't forget the config handler */
 919      -        if (config_handler) {
      908 +        if (config_handler != NULL) {
 920  909                  ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
 921      -                    config_handler->vh_func,
 922      -                    sc, config_handler->vh_priv);
      910 +                    config_handler->vh_func, sc, config_handler->vh_priv);
 923  911                  if (ret != DDI_SUCCESS) {
 924  912                          dev_err(sc->sc_dev, CE_WARN,
 925  913                              "ddi_intr_add_handler failed");
 926  914                          /* Remove the handlers that succeeded. */
 927  915                          while (--i >= 0) {
 928  916                                  (void) ddi_intr_remove_handler(
 929  917                                      sc->sc_intr_htable[i]);
 930  918                          }
 931  919                          goto out_add_handlers;
 932  920                  }
 933  921          }
 934  922  
 935  923          /* We know we are using MSI, so set the config offset. */
 936  924          sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
 937  925  
 938      -        ret = ddi_intr_get_cap(sc->sc_intr_htable[0],
 939      -            &sc->sc_intr_cap);
      926 +        ret = ddi_intr_get_cap(sc->sc_intr_htable[0], &sc->sc_intr_cap);
 940  927          /* Just in case. */
 941  928          if (ret != DDI_SUCCESS)
 942  929                  sc->sc_intr_cap = 0;
 943  930  
 944  931  out_add_handlers:
 945  932  out_msi_prio:
 946  933  out_msi_available:
 947  934          for (i = 0; i < actual; i++)
 948  935                  (void) ddi_intr_free(sc->sc_intr_htable[i]);
 949  936  out_msi_alloc:
 950  937          kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) * count);
 951  938  
 952  939          return (ret);
 953  940  }
 954  941  
 955  942  struct virtio_handler_container {
 956  943          int nhandlers;
 957  944          struct virtio_int_handler config_handler;
 958  945          struct virtio_int_handler vq_handlers[];
 959  946  };
 960  947  
 961  948  uint_t
 962  949  virtio_intx_dispatch(caddr_t arg1, caddr_t arg2)
 963  950  {
 964  951          struct virtio_softc *sc = (void *)arg1;
 965  952          struct virtio_handler_container *vhc = (void *)arg2;
 966  953          uint8_t isr_status;
 967  954          int i;
 968  955  
 969  956          isr_status = ddi_get8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
 970  957              VIRTIO_CONFIG_ISR_STATUS));
 971  958  
 972  959          if (!isr_status)
 973  960                  return (DDI_INTR_UNCLAIMED);
 974  961  
 975  962          if ((isr_status & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
 976  963              vhc->config_handler.vh_func) {
 977  964                  vhc->config_handler.vh_func((void *)sc,
 978  965                      vhc->config_handler.vh_priv);
 979  966          }
 980  967  
 981  968          /* Notify all handlers */
 982  969          for (i = 0; i < vhc->nhandlers; i++) {
 983  970                  vhc->vq_handlers[i].vh_func((void *)sc,
 984  971                      vhc->vq_handlers[i].vh_priv);
 985  972          }
 986  973  
 987  974          return (DDI_INTR_CLAIMED);
 988  975  }
 989  976  
 990  977  /*
 991  978   * config_handler and vq_handlers may be allocated on stack.
 992  979   * Take precautions not to loose them.
 993  980   */
 994  981  static int
 995  982  virtio_register_intx(struct virtio_softc *sc,
 996  983      struct virtio_int_handler *config_handler,
 997  984      struct virtio_int_handler vq_handlers[])
 998  985  {
 999  986          int vq_handler_count;
1000  987          int config_handler_count = 0;
  
    | 
      ↓ open down ↓ | 
    51 lines elided | 
    
      ↑ open up ↑ | 
  
1001  988          int actual;
1002  989          struct virtio_handler_container *vhc;
1003  990          int ret = DDI_FAILURE;
1004  991  
1005  992          /* Walk the handler table to get the number of handlers. */
1006  993          for (vq_handler_count = 0;
1007  994              vq_handlers && vq_handlers[vq_handler_count].vh_func;
1008  995              vq_handler_count++)
1009  996                  ;
1010  997  
1011      -        if (config_handler)
      998 +        if (config_handler != NULL)
1012  999                  config_handler_count = 1;
1013 1000  
1014 1001          vhc = kmem_zalloc(sizeof (struct virtio_handler_container) +
1015      -            sizeof (struct virtio_int_handler) * vq_handler_count,
1016      -            KM_SLEEP);
     1002 +            sizeof (struct virtio_int_handler) * vq_handler_count, KM_SLEEP);
1017 1003  
1018 1004          vhc->nhandlers = vq_handler_count;
1019 1005          (void) memcpy(vhc->vq_handlers, vq_handlers,
1020 1006              sizeof (struct virtio_int_handler) * vq_handler_count);
1021 1007  
1022      -        if (config_handler) {
     1008 +        if (config_handler != NULL) {
1023 1009                  (void) memcpy(&vhc->config_handler, config_handler,
1024 1010                      sizeof (struct virtio_int_handler));
1025 1011          }
1026 1012  
1027 1013          /* Just a single entry for a single interrupt. */
1028 1014          sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
1029 1015  
1030 1016          ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable,
1031      -            DDI_INTR_TYPE_FIXED, 0, 1, &actual,
1032      -            DDI_INTR_ALLOC_NORMAL);
     1017 +            DDI_INTR_TYPE_FIXED, 0, 1, &actual, DDI_INTR_ALLOC_NORMAL);
1033 1018          if (ret != DDI_SUCCESS) {
1034 1019                  dev_err(sc->sc_dev, CE_WARN,
1035 1020                      "Failed to allocate a fixed interrupt: %d", ret);
1036 1021                  goto out_int_alloc;
1037 1022          }
1038 1023  
1039 1024          ASSERT(actual == 1);
1040 1025          sc->sc_intr_num = 1;
1041 1026  
1042 1027          ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
1043 1028          if (ret != DDI_SUCCESS) {
1044 1029                  dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
1045 1030                  goto out_prio;
1046 1031          }
1047 1032  
1048 1033          ret = ddi_intr_add_handler(sc->sc_intr_htable[0],
1049 1034              virtio_intx_dispatch, sc, vhc);
1050 1035          if (ret != DDI_SUCCESS) {
1051 1036                  dev_err(sc->sc_dev, CE_WARN, "ddi_intr_add_handler failed");
1052 1037                  goto out_add_handlers;
1053 1038          }
1054 1039  
1055 1040          /* We know we are not using MSI, so set the config offset. */
1056 1041          sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1057 1042  
1058 1043          return (DDI_SUCCESS);
1059 1044  
1060 1045  out_add_handlers:
1061 1046  out_prio:
1062 1047          (void) ddi_intr_free(sc->sc_intr_htable[0]);
1063 1048  out_int_alloc:
1064 1049          kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
1065 1050          kmem_free(vhc, sizeof (struct virtio_int_handler) *
1066 1051              (vq_handler_count + config_handler_count));
1067 1052          return (ret);
1068 1053  }
1069 1054  
1070 1055  /*
1071 1056   * We find out if we support MSI during this, and the register layout
1072 1057   * depends on the MSI (doh). Don't acces the device specific bits in
1073 1058   * BAR 0 before calling it!
1074 1059   */
1075 1060  int
1076 1061  virtio_register_ints(struct virtio_softc *sc,
1077 1062      struct virtio_int_handler *config_handler,
1078 1063      struct virtio_int_handler vq_handlers[])
1079 1064  {
1080 1065          int ret;
1081 1066          int intr_types;
1082 1067  
1083 1068          /* Determine which types of interrupts are supported */
1084 1069          ret = ddi_intr_get_supported_types(sc->sc_dev, &intr_types);
1085 1070          if (ret != DDI_SUCCESS) {
1086 1071                  dev_err(sc->sc_dev, CE_WARN, "Can't get supported int types");
1087 1072                  goto out_inttype;
1088 1073          }
1089 1074  
1090 1075          /* If we have msi, let's use them. */
1091 1076          if (intr_types & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
1092 1077                  ret = virtio_register_msi(sc, config_handler,
1093 1078                      vq_handlers, intr_types);
1094 1079                  if (!ret)
1095 1080                          return (0);
1096 1081          }
1097 1082  
1098 1083          /* Fall back to old-fashioned interrupts. */
1099 1084          if (intr_types & DDI_INTR_TYPE_FIXED) {
1100 1085                  dev_debug(sc->sc_dev, CE_WARN,
1101 1086                      "Using legacy interrupts");
1102 1087  
1103 1088                  return (virtio_register_intx(sc, config_handler, vq_handlers));
  
    | 
      ↓ open down ↓ | 
    61 lines elided | 
    
      ↑ open up ↑ | 
  
1104 1089          }
1105 1090  
1106 1091          dev_err(sc->sc_dev, CE_WARN,
1107 1092              "MSI failed and fixed interrupts not supported. Giving up.");
1108 1093          ret = DDI_FAILURE;
1109 1094  
1110 1095  out_inttype:
1111 1096          return (ret);
1112 1097  }
1113 1098  
1114      -
1115 1099  static int
1116 1100  virtio_enable_msi(struct virtio_softc *sc)
1117 1101  {
1118 1102          int ret, i;
1119 1103          int vq_handler_count = sc->sc_intr_num;
1120 1104  
1121 1105          /* Number of handlers, not counting the counfig. */
1122 1106          if (sc->sc_intr_config)
1123 1107                  vq_handler_count--;
1124 1108  
1125 1109          /* Enable the iterrupts. Either the whole block, or one by one. */
1126 1110          if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1127 1111                  ret = ddi_intr_block_enable(sc->sc_intr_htable,
1128 1112                      sc->sc_intr_num);
1129 1113                  if (ret != DDI_SUCCESS) {
1130 1114                          dev_err(sc->sc_dev, CE_WARN,
1131 1115                              "Failed to enable MSI, falling back to INTx");
1132 1116                          goto out_enable;
1133 1117                  }
1134 1118          } else {
1135 1119                  for (i = 0; i < sc->sc_intr_num; i++) {
1136 1120                          ret = ddi_intr_enable(sc->sc_intr_htable[i]);
1137 1121                          if (ret != DDI_SUCCESS) {
1138 1122                                  dev_err(sc->sc_dev, CE_WARN,
1139 1123                                      "Failed to enable MSI %d, "
1140 1124                                      "falling back to INTx", i);
1141 1125  
1142 1126                                  while (--i >= 0) {
1143 1127                                          (void) ddi_intr_disable(
  
    | 
      ↓ open down ↓ | 
    19 lines elided | 
    
      ↑ open up ↑ | 
  
1144 1128                                              sc->sc_intr_htable[i]);
1145 1129                                  }
1146 1130                                  goto out_enable;
1147 1131                          }
1148 1132                  }
1149 1133          }
1150 1134  
1151 1135          /* Bind the allocated MSI to the queues and config */
1152 1136          for (i = 0; i < vq_handler_count; i++) {
1153 1137                  int check;
     1138 +
1154 1139                  ddi_put16(sc->sc_ioh,
1155 1140                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1156 1141                      (uint16_t *)(sc->sc_io_addr +
1157 1142                      VIRTIO_CONFIG_QUEUE_SELECT), i);
1158 1143  
1159 1144                  ddi_put16(sc->sc_ioh,
1160 1145                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1161 1146                      (uint16_t *)(sc->sc_io_addr +
1162 1147                      VIRTIO_CONFIG_QUEUE_VECTOR), i);
1163 1148  
1164 1149                  check = ddi_get16(sc->sc_ioh,
1165 1150                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1166 1151                      (uint16_t *)(sc->sc_io_addr +
1167 1152                      VIRTIO_CONFIG_QUEUE_VECTOR));
1168 1153                  if (check != i) {
1169      -                        dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler"
     1154 +                        dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1170 1155                              "for VQ %d, MSI %d. Check = %x", i, i, check);
1171 1156                          ret = ENODEV;
1172 1157                          goto out_bind;
1173 1158                  }
1174 1159          }
1175 1160  
1176 1161          if (sc->sc_intr_config) {
1177 1162                  int check;
     1163 +
1178 1164                  ddi_put16(sc->sc_ioh,
1179 1165                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1180 1166                      (uint16_t *)(sc->sc_io_addr +
1181 1167                      VIRTIO_CONFIG_CONFIG_VECTOR), i);
1182 1168  
1183 1169                  check = ddi_get16(sc->sc_ioh,
1184 1170                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1185 1171                      (uint16_t *)(sc->sc_io_addr +
1186 1172                      VIRTIO_CONFIG_CONFIG_VECTOR));
1187 1173                  if (check != i) {
1188 1174                          dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1189 1175                              "for Config updates, MSI %d", i);
1190 1176                          ret = ENODEV;
1191 1177                          goto out_bind;
1192 1178                  }
1193 1179          }
1194 1180  
1195 1181          return (DDI_SUCCESS);
1196 1182  
1197 1183  out_bind:
1198 1184          /* Unbind the vqs */
1199 1185          for (i = 0; i < vq_handler_count - 1; i++) {
1200 1186                  ddi_put16(sc->sc_ioh,
1201 1187                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1202 1188                      (uint16_t *)(sc->sc_io_addr +
1203 1189                      VIRTIO_CONFIG_QUEUE_SELECT), i);
1204 1190  
1205 1191                  ddi_put16(sc->sc_ioh,
1206 1192                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1207 1193                      (uint16_t *)(sc->sc_io_addr +
1208 1194                      VIRTIO_CONFIG_QUEUE_VECTOR),
1209 1195                      VIRTIO_MSI_NO_VECTOR);
1210 1196          }
1211 1197          /* And the config */
  
    | 
      ↓ open down ↓ | 
    24 lines elided | 
    
      ↑ open up ↑ | 
  
1212 1198          /* LINTED E_BAD_PTR_CAST_ALIGN */
1213 1199          ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1214 1200              VIRTIO_CONFIG_CONFIG_VECTOR), VIRTIO_MSI_NO_VECTOR);
1215 1201  
1216 1202          ret = DDI_FAILURE;
1217 1203  
1218 1204  out_enable:
1219 1205          return (ret);
1220 1206  }
1221 1207  
1222      -static int virtio_enable_intx(struct virtio_softc *sc)
     1208 +static int
     1209 +virtio_enable_intx(struct virtio_softc *sc)
1223 1210  {
1224 1211          int ret;
1225 1212  
1226 1213          ret = ddi_intr_enable(sc->sc_intr_htable[0]);
1227      -        if (ret != DDI_SUCCESS)
     1214 +        if (ret != DDI_SUCCESS) {
1228 1215                  dev_err(sc->sc_dev, CE_WARN,
1229 1216                      "Failed to enable interrupt: %d", ret);
     1217 +        }
     1218 +
1230 1219          return (ret);
1231 1220  }
1232 1221  
1233 1222  /*
1234 1223   * We can't enable/disable individual handlers in the INTx case so do
1235 1224   * the whole bunch even in the msi case.
1236 1225   */
1237 1226  int
1238 1227  virtio_enable_ints(struct virtio_softc *sc)
1239 1228  {
1240 1229  
1241 1230          /* See if we are using MSI. */
1242 1231          if (sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI)
1243 1232                  return (virtio_enable_msi(sc));
1244 1233  
1245 1234          ASSERT(sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI);
1246 1235  
1247 1236          return (virtio_enable_intx(sc));
1248 1237  }
1249 1238  
1250 1239  void
1251 1240  virtio_release_ints(struct virtio_softc *sc)
1252 1241  {
1253 1242          int i;
1254 1243          int ret;
1255 1244  
1256 1245          /* We were running with MSI, unbind them. */
1257 1246          if (sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI) {
1258 1247                  /* Unbind all vqs */
1259 1248                  for (i = 0; i < sc->sc_nvqs; i++) {
1260 1249                          ddi_put16(sc->sc_ioh,
1261 1250                              /* LINTED E_BAD_PTR_CAST_ALIGN */
1262 1251                              (uint16_t *)(sc->sc_io_addr +
1263 1252                              VIRTIO_CONFIG_QUEUE_SELECT), i);
1264 1253  
1265 1254                          ddi_put16(sc->sc_ioh,
1266 1255                              /* LINTED E_BAD_PTR_CAST_ALIGN */
1267 1256                              (uint16_t *)(sc->sc_io_addr +
1268 1257                              VIRTIO_CONFIG_QUEUE_VECTOR),
1269 1258                              VIRTIO_MSI_NO_VECTOR);
1270 1259                  }
1271 1260                  /* And the config */
1272 1261                  /* LINTED E_BAD_PTR_CAST_ALIGN */
1273 1262                  ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1274 1263                      VIRTIO_CONFIG_CONFIG_VECTOR),
  
    | 
      ↓ open down ↓ | 
    35 lines elided | 
    
      ↑ open up ↑ | 
  
1275 1264                      VIRTIO_MSI_NO_VECTOR);
1276 1265  
1277 1266          }
1278 1267  
1279 1268          /* Disable the iterrupts. Either the whole block, or one by one. */
1280 1269          if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1281 1270                  ret = ddi_intr_block_disable(sc->sc_intr_htable,
1282 1271                      sc->sc_intr_num);
1283 1272                  if (ret != DDI_SUCCESS) {
1284 1273                          dev_err(sc->sc_dev, CE_WARN,
1285      -                            "Failed to disable MSIs, won't be able to"
     1274 +                            "Failed to disable MSIs, won't be able to "
1286 1275                              "reuse next time");
1287 1276                  }
1288 1277          } else {
1289 1278                  for (i = 0; i < sc->sc_intr_num; i++) {
1290 1279                          ret = ddi_intr_disable(sc->sc_intr_htable[i]);
1291 1280                          if (ret != DDI_SUCCESS) {
1292 1281                                  dev_err(sc->sc_dev, CE_WARN,
1293 1282                                      "Failed to disable interrupt %d, "
1294 1283                                      "won't be able to reuse", i);
1295      -
1296 1284                          }
1297 1285                  }
1298 1286          }
1299 1287  
1300 1288  
1301 1289          for (i = 0; i < sc->sc_intr_num; i++) {
1302 1290                  (void) ddi_intr_remove_handler(sc->sc_intr_htable[i]);
1303 1291          }
1304 1292  
1305 1293          for (i = 0; i < sc->sc_intr_num; i++)
1306 1294                  (void) ddi_intr_free(sc->sc_intr_htable[i]);
1307 1295  
1308      -        kmem_free(sc->sc_intr_htable,
1309      -            sizeof (ddi_intr_handle_t) * sc->sc_intr_num);
     1296 +        kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) *
     1297 +            sc->sc_intr_num);
1310 1298  
1311      -
1312 1299          /* After disabling interrupts, the config offset is non-MSI. */
1313 1300          sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1314 1301  }
1315 1302  
1316 1303  /*
1317 1304   * Module linkage information for the kernel.
1318 1305   */
1319 1306  static struct modlmisc modlmisc = {
1320      -        &mod_miscops, /* Type of module */
     1307 +        &mod_miscops,   /* Type of module */
1321 1308          "VirtIO common library module",
1322 1309  };
1323 1310  
1324 1311  static struct modlinkage modlinkage = {
1325 1312          MODREV_1,
1326 1313          {
1327 1314                  (void *)&modlmisc,
1328 1315                  NULL
1329 1316          }
1330 1317  };
1331 1318  
1332 1319  int
1333 1320  _init(void)
1334 1321  {
1335 1322          return (mod_install(&modlinkage));
1336 1323  }
1337 1324  
1338 1325  int
1339 1326  _fini(void)
1340 1327  {
1341 1328          return (mod_remove(&modlinkage));
1342 1329  }
1343 1330  
1344 1331  int
1345 1332  _info(struct modinfo *modinfop)
1346 1333  {
1347 1334          return (mod_info(&modlinkage, modinfop));
1348 1335  }
  
    | 
      ↓ open down ↓ | 
    18 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX