Print this page
    
MFV: illumos-gate@9f16e2df28efab26216cf68e3841c0a460c5bb73
9790 buffer freed to wrong cache in virtio_register_intx
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Patrick Mooney <patrick.mooney@joyent.com>
Reviewed by: Yuri Pankov <yuripv@yuripv.net>
Reviewed by: Andy Fiddaman <omnios@citrus-it.net>
Approved by: Garrett D'Amore <garrett@damore.org>
Author: Hans Rosenfeld <hans.rosenfeld@joyent.com>
re #13879 make libsqlite a real shared lib (tweaks)
  Fix rebuild after pull (remove files left in the way)
  Make sqlite.h SQLITE_VERSION more predictable.
OS-5 Integrate virtio drivers
    integration cleanup (copyrights, cddl 1.0)
port of illumos-3644
    3644 Add virtio-net support into the Illumos
    Reviewed by: Alexey Zaytsev <alexey.zaytsev@gmail.com>
    Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
    Reviewed by: David Hoppner <0xffea@gmail.com>
port of illumos-1562
    1562 Integrate the virtio core module
    Reviewed by: Dmitry Yusupov <Dmitry.Yusupov@nexenta.com>
    Reviewed by: Gordon Ross <gordon.w.ross@gmail.com>
    Approved by: Garrett D'Amore <garrett@damore.org>
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/io/virtio/virtio.c
          +++ new/usr/src/uts/common/io/virtio/virtio.c
   1    1  /*
   2      - * CDDL HEADER START
        2 + * This file and its contents are supplied under the terms of the
        3 + * Common Development and Distribution License ("CDDL"), version 1.0.
        4 + * You may only use this file in accordance with the terms of version
        5 + * 1.0 of the CDDL.
   3    6   *
   4      - * The contents of this file are subject to the terms of the
   5      - * Common Development and Distribution License (the "License").
   6      - * You may not use this file except in compliance with the License.
   7      - *
   8      - * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9      - * or http://www.opensolaris.org/os/licensing.
  10      - * See the License for the specific language governing permissions
  11      - * and limitations under the License.
  12      - *
  13      - * When distributing Covered Code, include this CDDL HEADER in each
  14      - * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15      - * If applicable, add the following below this CDDL HEADER, with the
  16      - * fields enclosed by brackets "[]" replaced with your own identifying
  17      - * information: Portions Copyright [yyyy] [name of copyright owner]
  18      - *
  19      - * CDDL HEADER END
        7 + * A full copy of the text of the CDDL should have accompanied this
        8 + * source.  A copy of the CDDL is also available via the Internet at
        9 + * http://www.illumos.org/license/CDDL.
  20   10   */
  21   11  
  22   12  /*
  23   13   * Copyright 2013 Nexenta Systems, Inc.  All rights reserved.
  24   14   * Copyright 2012 Alexey Zaytsev <alexey.zaytsev@gmail.com>
  25   15   * Copyright (c) 2016 by Delphix. All rights reserved.
       16 + * Copyright 2017 Joyent, Inc.
  26   17   */
  27   18  
  28   19  /* Based on the NetBSD virtio driver by Minoura Makoto. */
  29   20  /*
  30   21   * Copyright (c) 2010 Minoura Makoto.
  31   22   * All rights reserved.
  32   23   *
  33   24   * Redistribution and use in source and binary forms, with or without
  34   25   * modification, are permitted provided that the following conditions
  35   26   * are met:
  36   27   * 1. Redistributions of source code must retain the above copyright
  37   28   *    notice, this list of conditions and the following disclaimer.
  38   29   * 2. Redistributions in binary form must reproduce the above copyright
  39   30   *    notice, this list of conditions and the following disclaimer in the
  40   31   *    documentation and/or other materials provided with the distribution.
  41   32   *
  42   33   * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  43   34   * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  44   35   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  45   36   * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  46   37   * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  47   38   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  48   39   * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  49   40   * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  50   41   * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  51   42   * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  52   43   *
  53   44   */
  54   45  
  55   46  #include <sys/conf.h>
  56   47  #include <sys/kmem.h>
  57   48  #include <sys/debug.h>
  58   49  #include <sys/modctl.h>
  59   50  #include <sys/autoconf.h>
  60   51  #include <sys/ddi_impldefs.h>
  61   52  #include <sys/ddi.h>
  62   53  #include <sys/sunddi.h>
  63   54  #include <sys/sunndi.h>
  64   55  #include <sys/avintr.h>
  65   56  #include <sys/spl.h>
  66   57  #include <sys/promif.h>
  67   58  #include <sys/list.h>
  68   59  #include <sys/bootconf.h>
  69   60  #include <sys/bootsvcs.h>
  70   61  #include <sys/sysmacros.h>
  71   62  #include <sys/pci.h>
  72   63  
  73   64  #include "virtiovar.h"
  74   65  #include "virtioreg.h"
  75   66  
  76   67  #define NDEVNAMES       (sizeof (virtio_device_name) / sizeof (char *))
  77   68  #define MINSEG_INDIRECT 2       /* use indirect if nsegs >= this value */
  78   69  #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1)) & \
  79   70              ~(VIRTIO_PAGE_SIZE-1))
  80   71  
  81   72  void
  82   73  virtio_set_status(struct virtio_softc *sc, unsigned int status)
  83   74  {
  84   75          int old = 0;
  85   76  
  86   77          if (status != 0) {
  87   78                  old = ddi_get8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
  88   79                      VIRTIO_CONFIG_DEVICE_STATUS));
  89   80          }
  90   81  
  91   82          ddi_put8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
  92   83              VIRTIO_CONFIG_DEVICE_STATUS), status | old);
  93   84  }
  94   85  
  95   86  /*
  96   87   * Negotiate features, save the result in sc->sc_features
  97   88   */
  98   89  uint32_t
  99   90  virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
 100   91  {
 101   92          uint32_t host_features;
 102   93          uint32_t features;
 103   94  
 104   95          host_features = ddi_get32(sc->sc_ioh,
 105   96              /* LINTED E_BAD_PTR_CAST_ALIGN */
 106   97              (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_FEATURES));
 107   98  
 108   99          dev_debug(sc->sc_dev, CE_NOTE, "host features: %x, guest features: %x",
 109  100              host_features, guest_features);
 110  101  
 111  102          features = host_features & guest_features;
 112  103          ddi_put32(sc->sc_ioh,
 113  104              /* LINTED E_BAD_PTR_CAST_ALIGN */
 114  105              (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_GUEST_FEATURES),
 115  106              features);
 116  107  
 117  108          sc->sc_features = features;
 118  109  
 119  110          return (host_features);
 120  111  }
 121  112  
 122  113  size_t
 123  114  virtio_show_features(uint32_t features, char *buf, size_t len)
 124  115  {
 125  116          char *orig_buf = buf;
 126  117          char *bufend = buf + len;
 127  118  
 128  119          /* LINTED E_PTRDIFF_OVERFLOW */
 129  120          buf += snprintf(buf, bufend - buf, "Generic ( ");
 130  121          if (features & VIRTIO_F_RING_INDIRECT_DESC)
 131  122                  /* LINTED E_PTRDIFF_OVERFLOW */
 132  123                  buf += snprintf(buf, bufend - buf, "INDIRECT_DESC ");
 133  124  
 134  125          /* LINTED E_PTRDIFF_OVERFLOW */
 135  126          buf += snprintf(buf, bufend - buf, ") ");
 136  127  
 137  128          /* LINTED E_PTRDIFF_OVERFLOW */
 138  129          return (buf - orig_buf);
 139  130  }
 140  131  
 141  132  boolean_t
 142  133  virtio_has_feature(struct virtio_softc *sc, uint32_t feature)
 143  134  {
 144  135          return (sc->sc_features & feature);
 145  136  }
 146  137  
 147  138  /*
 148  139   * Device configuration registers.
 149  140   */
 150  141  uint8_t
 151  142  virtio_read_device_config_1(struct virtio_softc *sc, unsigned int index)
 152  143  {
 153  144          ASSERT(sc->sc_config_offset);
 154  145          return ddi_get8(sc->sc_ioh,
 155  146              (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 156  147  }
 157  148  
 158  149  uint16_t
 159  150  virtio_read_device_config_2(struct virtio_softc *sc, unsigned int index)
 160  151  {
 161  152          ASSERT(sc->sc_config_offset);
 162  153          return ddi_get16(sc->sc_ioh,
 163  154              /* LINTED E_BAD_PTR_CAST_ALIGN */
 164  155              (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 165  156  }
 166  157  
 167  158  uint32_t
 168  159  virtio_read_device_config_4(struct virtio_softc *sc, unsigned int index)
 169  160  {
 170  161          ASSERT(sc->sc_config_offset);
 171  162          return ddi_get32(sc->sc_ioh,
 172  163              /* LINTED E_BAD_PTR_CAST_ALIGN */
 173  164              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 174  165  }
 175  166  
 176  167  uint64_t
 177  168  virtio_read_device_config_8(struct virtio_softc *sc, unsigned int index)
 178  169  {
 179  170          uint64_t r;
 180  171  
 181  172          ASSERT(sc->sc_config_offset);
 182  173          r = ddi_get32(sc->sc_ioh,
 183  174              /* LINTED E_BAD_PTR_CAST_ALIGN */
 184  175              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
 185  176              index + sizeof (uint32_t)));
 186  177  
 187  178          r <<= 32;
 188  179  
 189  180          r += ddi_get32(sc->sc_ioh,
 190  181              /* LINTED E_BAD_PTR_CAST_ALIGN */
 191  182              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 192  183          return (r);
 193  184  }
 194  185  
 195  186  void
 196  187  virtio_write_device_config_1(struct virtio_softc *sc, unsigned int index,
 197  188      uint8_t value)
 198  189  {
 199  190          ASSERT(sc->sc_config_offset);
 200  191          ddi_put8(sc->sc_ioh,
 201  192              (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 202  193  }
 203  194  
 204  195  void
 205  196  virtio_write_device_config_2(struct virtio_softc *sc, unsigned int index,
 206  197      uint16_t value)
 207  198  {
 208  199          ASSERT(sc->sc_config_offset);
 209  200          ddi_put16(sc->sc_ioh,
 210  201              /* LINTED E_BAD_PTR_CAST_ALIGN */
 211  202              (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 212  203  }
 213  204  
 214  205  void
 215  206  virtio_write_device_config_4(struct virtio_softc *sc, unsigned int index,
 216  207      uint32_t value)
 217  208  {
 218  209          ASSERT(sc->sc_config_offset);
 219  210          ddi_put32(sc->sc_ioh,
 220  211              /* LINTED E_BAD_PTR_CAST_ALIGN */
 221  212              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 222  213  }
 223  214  
 224  215  void
 225  216  virtio_write_device_config_8(struct virtio_softc *sc, unsigned int index,
 226  217      uint64_t value)
 227  218  {
 228  219          ASSERT(sc->sc_config_offset);
 229  220          ddi_put32(sc->sc_ioh,
 230  221              /* LINTED E_BAD_PTR_CAST_ALIGN */
 231  222              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index),
 232  223              value & 0xFFFFFFFF);
 233  224          ddi_put32(sc->sc_ioh,
 234  225              /* LINTED E_BAD_PTR_CAST_ALIGN */
 235  226              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
 236  227              index + sizeof (uint32_t)), value >> 32);
 237  228  }
 238  229  
 239  230  /*
 240  231   * Start/stop vq interrupt.  No guarantee.
 241  232   */
 242  233  void
 243  234  virtio_stop_vq_intr(struct virtqueue *vq)
 244  235  {
 245  236          vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
 246  237  }
 247  238  
 248  239  void
 249  240  virtio_start_vq_intr(struct virtqueue *vq)
 250  241  {
 251  242          vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
 252  243  }
 253  244  
 254  245  static ddi_dma_attr_t virtio_vq_dma_attr = {
 255  246          DMA_ATTR_V0,            /* Version number */
 256  247          0,                      /* low address */
 257  248          0x00000FFFFFFFFFFF,     /* high address. Has to fit into 32 bits */
 258  249                                  /* after page-shifting */
 259  250          0xFFFFFFFF,             /* counter register max */
 260  251          VIRTIO_PAGE_SIZE,       /* page alignment required */
 261  252          0x3F,                   /* burst sizes: 1 - 32 */
 262  253          0x1,                    /* minimum transfer size */
 263  254          0xFFFFFFFF,             /* max transfer size */
 264  255          0xFFFFFFFF,             /* address register max */
 265  256          1,                      /* no scatter-gather */
 266  257          1,                      /* device operates on bytes */
 267  258          0,                      /* attr flag: set to 0 */
 268  259  };
 269  260  
 270  261  static ddi_dma_attr_t virtio_vq_indirect_dma_attr = {
 271  262          DMA_ATTR_V0,            /* Version number */
 272  263          0,                      /* low address */
 273  264          0xFFFFFFFFFFFFFFFF,     /* high address */
 274  265          0xFFFFFFFF,             /* counter register max */
 275  266          1,                      /* No specific alignment */
 276  267          0x3F,                   /* burst sizes: 1 - 32 */
 277  268          0x1,                    /* minimum transfer size */
 278  269          0xFFFFFFFF,             /* max transfer size */
 279  270          0xFFFFFFFF,             /* address register max */
 280  271          1,                      /* no scatter-gather */
 281  272          1,                      /* device operates on bytes */
 282  273          0,                      /* attr flag: set to 0 */
 283  274  };
 284  275  
 285  276  /* Same for direct and indirect descriptors. */
 286  277  static ddi_device_acc_attr_t virtio_vq_devattr = {
 287  278          DDI_DEVICE_ATTR_V0,
 288  279          DDI_NEVERSWAP_ACC,
 289  280          DDI_STORECACHING_OK_ACC,
 290  281          DDI_DEFAULT_ACC
 291  282  };
 292  283  
 293  284  static void
 294  285  virtio_free_indirect(struct vq_entry *entry)
 295  286  {
 296  287  
 297  288          (void) ddi_dma_unbind_handle(entry->qe_indirect_dma_handle);
 298  289          ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
 299  290          ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
 300  291  
 301  292          entry->qe_indirect_descs = NULL;
 302  293  }
 303  294  
 304  295  
 305  296  static int
 306  297  virtio_alloc_indirect(struct virtio_softc *sc, struct vq_entry *entry)
 307  298  {
 308  299          int allocsize, num;
 309  300          size_t len;
 310  301          unsigned int ncookies;
 311  302          int ret;
 312  303  
 313  304          num = entry->qe_queue->vq_indirect_num;
 314  305          ASSERT(num > 1);
 315  306  
 316  307          allocsize = sizeof (struct vring_desc) * num;
 317  308  
 318  309          ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_indirect_dma_attr,
 319  310              DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_handle);
 320  311          if (ret != DDI_SUCCESS) {
 321  312                  dev_err(sc->sc_dev, CE_WARN,
 322  313                      "Failed to allocate dma handle for indirect descriptors, "
 323  314                      "entry %d, vq %d", entry->qe_index,
 324  315                      entry->qe_queue->vq_index);
 325  316                  goto out_alloc_handle;
 326  317          }
 327  318  
 328  319          ret = ddi_dma_mem_alloc(entry->qe_indirect_dma_handle, allocsize,
 329  320              &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 330  321              (caddr_t *)&entry->qe_indirect_descs, &len,
 331  322              &entry->qe_indirect_dma_acch);
 332  323          if (ret != DDI_SUCCESS) {
 333  324                  dev_err(sc->sc_dev, CE_WARN,
 334  325                      "Failed to allocate dma memory for indirect descriptors, "
 335  326                      "entry %d, vq %d,", entry->qe_index,
 336  327                      entry->qe_queue->vq_index);
 337  328                  goto out_alloc;
 338  329          }
 339  330  
 340  331          (void) memset(entry->qe_indirect_descs, 0xff, allocsize);
 341  332  
 342  333          ret = ddi_dma_addr_bind_handle(entry->qe_indirect_dma_handle, NULL,
 343  334              (caddr_t)entry->qe_indirect_descs, len,
 344  335              DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 345  336              &entry->qe_indirect_dma_cookie, &ncookies);
 346  337          if (ret != DDI_DMA_MAPPED) {
 347  338                  dev_err(sc->sc_dev, CE_WARN,
 348  339                      "Failed to bind dma memory for indirect descriptors, "
 349  340                      "entry %d, vq %d", entry->qe_index,
 350  341                      entry->qe_queue->vq_index);
 351  342                  goto out_bind;
 352  343          }
 353  344  
 354  345          /* We asked for a single segment */
 355  346          ASSERT(ncookies == 1);
 356  347  
 357  348          return (0);
 358  349  
 359  350  out_bind:
 360  351          ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
 361  352  out_alloc:
 362  353          ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
 363  354  out_alloc_handle:
 364  355  
 365  356          return (ret);
 366  357  }
 367  358  
 368  359  /*
 369  360   * Initialize the vq structure.
 370  361   */
 371  362  static int
 372  363  virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq)
 373  364  {
 374  365          int ret;
 375  366          uint16_t i;
 376  367          int vq_size = vq->vq_num;
 377  368          int indirect_num = vq->vq_indirect_num;
 378  369  
 379  370          /* free slot management */
 380  371          list_create(&vq->vq_freelist, sizeof (struct vq_entry),
 381  372              offsetof(struct vq_entry, qe_list));
 382  373  
 383  374          for (i = 0; i < vq_size; i++) {
 384  375                  struct vq_entry *entry = &vq->vq_entries[i];
 385  376                  list_insert_tail(&vq->vq_freelist, entry);
 386  377                  entry->qe_index = i;
 387  378                  entry->qe_desc = &vq->vq_descs[i];
 388  379                  entry->qe_queue = vq;
 389  380  
 390  381                  if (indirect_num) {
 391  382                          ret = virtio_alloc_indirect(sc, entry);
 392  383                          if (ret)
 393  384                                  goto out_indirect;
 394  385                  }
 395  386          }
 396  387  
 397  388          mutex_init(&vq->vq_freelist_lock, "virtio-freelist", MUTEX_DRIVER,
 398  389              DDI_INTR_PRI(sc->sc_intr_prio));
 399  390          mutex_init(&vq->vq_avail_lock, "virtio-avail", MUTEX_DRIVER,
 400  391              DDI_INTR_PRI(sc->sc_intr_prio));
 401  392          mutex_init(&vq->vq_used_lock, "virtio-used", MUTEX_DRIVER,
 402  393              DDI_INTR_PRI(sc->sc_intr_prio));
 403  394  
 404  395          return (0);
 405  396  
 406  397  out_indirect:
 407  398          for (i = 0; i < vq_size; i++) {
 408  399                  struct vq_entry *entry = &vq->vq_entries[i];
 409  400                  if (entry->qe_indirect_descs)
 410  401                          virtio_free_indirect(entry);
 411  402          }
 412  403  
 413  404          return (ret);
 414  405  }
 415  406  
 416  407  /*
 417  408   * Allocate/free a vq.
 418  409   */
 419  410  struct virtqueue *
 420  411  virtio_alloc_vq(struct virtio_softc *sc, unsigned int index, unsigned int size,
 421  412      unsigned int indirect_num, const char *name)
 422  413  {
 423  414          int vq_size, allocsize1, allocsize2, allocsize = 0;
 424  415          int ret;
 425  416          unsigned int ncookies;
 426  417          size_t len;
 427  418          struct virtqueue *vq;
 428  419  
 429  420          ddi_put16(sc->sc_ioh,
 430  421              /* LINTED E_BAD_PTR_CAST_ALIGN */
 431  422              (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
 432  423          vq_size = ddi_get16(sc->sc_ioh,
 433  424              /* LINTED E_BAD_PTR_CAST_ALIGN */
 434  425              (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
 435  426          if (vq_size == 0) {
 436  427                  dev_err(sc->sc_dev, CE_WARN,
 437  428                      "virtqueue dest not exist, index %d for %s\n", index, name);
 438  429                  goto out;
 439  430          }
 440  431  
 441  432          vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);
 442  433  
 443  434          /* size 0 => use native vq size, good for receive queues. */
 444  435          if (size)
 445  436                  vq_size = MIN(vq_size, size);
 446  437  
 447  438          /* allocsize1: descriptor table + avail ring + pad */
 448  439          allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size +
 449  440              sizeof (struct vring_avail) + sizeof (uint16_t) * vq_size);
 450  441          /* allocsize2: used ring + pad */
 451  442          allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used) +
 452  443              sizeof (struct vring_used_elem) * vq_size);
 453  444  
 454  445          allocsize = allocsize1 + allocsize2;
 455  446  
 456  447          ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
 457  448              DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
 458  449          if (ret != DDI_SUCCESS) {
 459  450                  dev_err(sc->sc_dev, CE_WARN,
 460  451                      "Failed to allocate dma handle for vq %d", index);
 461  452                  goto out_alloc_handle;
 462  453          }
 463  454  
 464  455          ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
 465  456              &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 466  457              (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
 467  458          if (ret != DDI_SUCCESS) {
 468  459                  dev_err(sc->sc_dev, CE_WARN,
 469  460                      "Failed to allocate dma memory for vq %d", index);
 470  461                  goto out_alloc;
 471  462          }
 472  463  
 473  464          ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
 474  465              (caddr_t)vq->vq_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
 475  466              DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
 476  467          if (ret != DDI_DMA_MAPPED) {
 477  468                  dev_err(sc->sc_dev, CE_WARN,
 478  469                      "Failed to bind dma memory for vq %d", index);
 479  470                  goto out_bind;
 480  471          }
 481  472  
 482  473          /* We asked for a single segment */
 483  474          ASSERT(ncookies == 1);
 484  475          /* and page-ligned buffers. */
 485  476          ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0);
 486  477  
 487  478          (void) memset(vq->vq_vaddr, 0, allocsize);
 488  479  
 489  480          /* Make sure all zeros hit the buffer before we point the host to it */
 490  481          membar_producer();
 491  482  
 492  483          /* set the vq address */
 493  484          ddi_put32(sc->sc_ioh,
 494  485              /* LINTED E_BAD_PTR_CAST_ALIGN */
 495  486              (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS),
 496  487              (vq->vq_dma_cookie.dmac_laddress / VIRTIO_PAGE_SIZE));
 497  488  
 498  489          /* remember addresses and offsets for later use */
 499  490          vq->vq_owner = sc;
 500  491          vq->vq_num = vq_size;
 501  492          vq->vq_index = index;
 502  493          vq->vq_descs = vq->vq_vaddr;
 503  494          vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
 504  495          vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
 505  496          vq->vq_usedoffset = allocsize1;
 506  497          vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);
 507  498  
 508  499          ASSERT(indirect_num == 0 ||
 509  500              virtio_has_feature(sc, VIRTIO_F_RING_INDIRECT_DESC));
 510  501          vq->vq_indirect_num = indirect_num;
 511  502  
 512  503          /* free slot management */
 513  504          vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
 514  505              KM_SLEEP);
 515  506  
 516  507          ret = virtio_init_vq(sc, vq);
 517  508          if (ret)
 518  509                  goto out_init;
 519  510  
 520  511          dev_debug(sc->sc_dev, CE_NOTE,
 521  512              "Allocated %d entries for vq %d:%s (%d indirect descs)",
 522  513              vq_size, index, name, indirect_num * vq_size);
 523  514  
 524  515          return (vq);
 525  516  
 526  517  out_init:
 527  518          kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
 528  519          (void) ddi_dma_unbind_handle(vq->vq_dma_handle);
 529  520  out_bind:
 530  521          ddi_dma_mem_free(&vq->vq_dma_acch);
 531  522  out_alloc:
 532  523          ddi_dma_free_handle(&vq->vq_dma_handle);
 533  524  out_alloc_handle:
 534  525          kmem_free(vq, sizeof (struct virtqueue));
 535  526  out:
 536  527          return (NULL);
 537  528  }
 538  529  
 539  530  void
 540  531  virtio_free_vq(struct virtqueue *vq)
 541  532  {
 542  533          struct virtio_softc *sc = vq->vq_owner;
 543  534          int i;
 544  535  
 545  536          /* tell device that there's no virtqueue any longer */
 546  537          ddi_put16(sc->sc_ioh,
 547  538              /* LINTED E_BAD_PTR_CAST_ALIGN */
 548  539              (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT),
 549  540              vq->vq_index);
 550  541          ddi_put32(sc->sc_ioh,
 551  542              /* LINTED E_BAD_PTR_CAST_ALIGN */
 552  543              (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS), 0);
 553  544  
 554  545          /* Free the indirect descriptors, if any. */
 555  546          for (i = 0; i < vq->vq_num; i++) {
 556  547                  struct vq_entry *entry = &vq->vq_entries[i];
 557  548                  if (entry->qe_indirect_descs)
 558  549                          virtio_free_indirect(entry);
 559  550          }
 560  551  
 561  552          kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq->vq_num);
 562  553  
 563  554          (void) ddi_dma_unbind_handle(vq->vq_dma_handle);
 564  555          ddi_dma_mem_free(&vq->vq_dma_acch);
 565  556          ddi_dma_free_handle(&vq->vq_dma_handle);
 566  557  
 567  558          mutex_destroy(&vq->vq_used_lock);
 568  559          mutex_destroy(&vq->vq_avail_lock);
 569  560          mutex_destroy(&vq->vq_freelist_lock);
 570  561  
 571  562          kmem_free(vq, sizeof (struct virtqueue));
 572  563  }
 573  564  
 574  565  /*
 575  566   * Free descriptor management.
 576  567   */
 577  568  struct vq_entry *
 578  569  vq_alloc_entry(struct virtqueue *vq)
 579  570  {
 580  571          struct vq_entry *qe;
 581  572  
 582  573          mutex_enter(&vq->vq_freelist_lock);
 583  574          if (list_is_empty(&vq->vq_freelist)) {
 584  575                  mutex_exit(&vq->vq_freelist_lock);
 585  576                  return (NULL);
 586  577          }
 587  578          qe = list_remove_head(&vq->vq_freelist);
 588  579  
 589  580          ASSERT(vq->vq_used_entries >= 0);
 590  581          vq->vq_used_entries++;
 591  582  
 592  583          mutex_exit(&vq->vq_freelist_lock);
 593  584  
 594  585          qe->qe_next = NULL;
 595  586          qe->qe_indirect_next = 0;
 596  587          (void) memset(qe->qe_desc, 0, sizeof (struct vring_desc));
 597  588  
 598  589          return (qe);
 599  590  }
 600  591  
 601  592  void
 602  593  vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
 603  594  {
 604  595          mutex_enter(&vq->vq_freelist_lock);
 605  596  
 606  597          list_insert_head(&vq->vq_freelist, qe);
 607  598          vq->vq_used_entries--;
 608  599          ASSERT(vq->vq_used_entries >= 0);
 609  600          mutex_exit(&vq->vq_freelist_lock);
 610  601  }
 611  602  
 612  603  /*
 613  604   * We (intentionally) don't have a global vq mutex, so you are
 614  605   * responsible for external locking to avoid allocting/freeing any
 615  606   * entries before using the returned value. Have fun.
 616  607   */
 617  608  uint_t
 618  609  vq_num_used(struct virtqueue *vq)
 619  610  {
 620  611          /* vq->vq_freelist_lock would not help here. */
 621  612          return (vq->vq_used_entries);
 622  613  }
 623  614  
 624  615  static inline void
 625  616  virtio_ve_set_desc(struct vring_desc *desc, uint64_t paddr, uint32_t len,
 626  617      boolean_t write)
 627  618  {
 628  619          desc->addr = paddr;
 629  620          desc->len = len;
 630  621          desc->next = 0;
 631  622          desc->flags = 0;
 632  623  
 633  624          /* 'write' - from the driver's point of view */
 634  625          if (!write)
 635  626                  desc->flags = VRING_DESC_F_WRITE;
 636  627  }
 637  628  
 638  629  void
 639  630  virtio_ve_set(struct vq_entry *qe, uint64_t paddr, uint32_t len,
 640  631      boolean_t write)
 641  632  {
 642  633          virtio_ve_set_desc(qe->qe_desc, paddr, len, write);
 643  634  }
 644  635  
 645  636  unsigned int
 646  637  virtio_ve_indirect_available(struct vq_entry *qe)
 647  638  {
 648  639          return (qe->qe_queue->vq_indirect_num - qe->qe_indirect_next);
 649  640  }
 650  641  
 651  642  void
 652  643  virtio_ve_add_indirect_buf(struct vq_entry *qe, uint64_t paddr, uint32_t len,
 653  644      boolean_t write)
 654  645  {
 655  646          struct vring_desc *indirect_desc;
 656  647  
 657  648          ASSERT(qe->qe_queue->vq_indirect_num);
 658  649          ASSERT(qe->qe_indirect_next < qe->qe_queue->vq_indirect_num);
 659  650  
 660  651          indirect_desc = &qe->qe_indirect_descs[qe->qe_indirect_next];
 661  652          virtio_ve_set_desc(indirect_desc, paddr, len, write);
 662  653          qe->qe_indirect_next++;
 663  654  }
 664  655  
 665  656  void
 666  657  virtio_ve_add_cookie(struct vq_entry *qe, ddi_dma_handle_t dma_handle,
 667  658      ddi_dma_cookie_t dma_cookie, unsigned int ncookies, boolean_t write)
 668  659  {
 669  660          int i;
 670  661  
 671  662          for (i = 0; i < ncookies; i++) {
 672  663                  virtio_ve_add_indirect_buf(qe, dma_cookie.dmac_laddress,
 673  664                      dma_cookie.dmac_size, write);
 674  665                  ddi_dma_nextcookie(dma_handle, &dma_cookie);
 675  666          }
 676  667  }
 677  668  
 678  669  void
 679  670  virtio_sync_vq(struct virtqueue *vq)
 680  671  {
 681  672          struct virtio_softc *vsc = vq->vq_owner;
 682  673  
 683  674          /* Make sure the avail ring update hit the buffer */
 684  675          membar_producer();
 685  676  
 686  677          vq->vq_avail->idx = vq->vq_avail_idx;
 687  678  
 688  679          /* Make sure the avail idx update hits the buffer */
 689  680          membar_producer();
 690  681  
 691  682          /* Make sure we see the flags update */
 692  683          membar_consumer();
 693  684  
 694  685          if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY)) {
 695  686                  ddi_put16(vsc->sc_ioh,
 696  687                      /* LINTED E_BAD_PTR_CAST_ALIGN */
 697  688                      (uint16_t *)(vsc->sc_io_addr +
 698  689                      VIRTIO_CONFIG_QUEUE_NOTIFY),
 699  690                      vq->vq_index);
 700  691          }
 701  692  }
 702  693  
 703  694  void
 704  695  virtio_push_chain(struct vq_entry *qe, boolean_t sync)
 705  696  {
 706  697          struct virtqueue *vq = qe->qe_queue;
 707  698          struct vq_entry *head = qe;
 708  699          struct vring_desc *desc;
 709  700          int idx;
 710  701  
 711  702          ASSERT(qe);
 712  703  
 713  704          /*
 714  705           * Bind the descs together, paddr and len should be already
 715  706           * set with virtio_ve_set
 716  707           */
 717  708          do {
 718  709                  /* Bind the indirect descriptors */
 719  710                  if (qe->qe_indirect_next > 1) {
 720  711                          uint16_t i = 0;
 721  712  
 722  713                          /*
 723  714                           * Set the pointer/flags to the
 724  715                           * first indirect descriptor
 725  716                           */
 726  717                          virtio_ve_set_desc(qe->qe_desc,
 727  718                              qe->qe_indirect_dma_cookie.dmac_laddress,
 728  719                              sizeof (struct vring_desc) * qe->qe_indirect_next,
 729  720                              B_FALSE);
 730  721                          qe->qe_desc->flags |= VRING_DESC_F_INDIRECT;
 731  722  
 732  723                          /* For all but the last one, add the next index/flag */
 733  724                          do {
 734  725                                  desc = &qe->qe_indirect_descs[i];
 735  726                                  i++;
 736  727  
 737  728                                  desc->flags |= VRING_DESC_F_NEXT;
 738  729                                  desc->next = i;
 739  730                          } while (i < qe->qe_indirect_next - 1);
 740  731  
 741  732                  }
 742  733  
 743  734                  if (qe->qe_next) {
 744  735                          qe->qe_desc->flags |= VRING_DESC_F_NEXT;
 745  736                          qe->qe_desc->next = qe->qe_next->qe_index;
 746  737                  }
 747  738  
 748  739                  qe = qe->qe_next;
 749  740          } while (qe);
 750  741  
 751  742          mutex_enter(&vq->vq_avail_lock);
 752  743          idx = vq->vq_avail_idx;
 753  744          vq->vq_avail_idx++;
 754  745  
 755  746          /* Make sure the bits hit the descriptor(s) */
 756  747          membar_producer();
 757  748          vq->vq_avail->ring[idx % vq->vq_num] = head->qe_index;
 758  749  
 759  750          /* Notify the device, if needed. */
 760  751          if (sync)
 761  752                  virtio_sync_vq(vq);
 762  753  
 763  754          mutex_exit(&vq->vq_avail_lock);
 764  755  }
 765  756  
 766  757  /*
 767  758   * Get a chain of descriptors from the used ring, if one is available.
 768  759   */
 769  760  struct vq_entry *
 770  761  virtio_pull_chain(struct virtqueue *vq, uint32_t *len)
 771  762  {
 772  763          struct vq_entry *head;
 773  764          int slot;
 774  765          int usedidx;
 775  766  
 776  767          mutex_enter(&vq->vq_used_lock);
 777  768  
 778  769          /* No used entries? Bye. */
 779  770          if (vq->vq_used_idx == vq->vq_used->idx) {
 780  771                  mutex_exit(&vq->vq_used_lock);
 781  772                  return (NULL);
 782  773          }
 783  774  
 784  775          usedidx = vq->vq_used_idx;
 785  776          vq->vq_used_idx++;
 786  777          mutex_exit(&vq->vq_used_lock);
 787  778  
 788  779          usedidx %= vq->vq_num;
 789  780  
 790  781          /* Make sure we do the next step _after_ checking the idx. */
 791  782          membar_consumer();
 792  783  
 793  784          slot = vq->vq_used->ring[usedidx].id;
 794  785          *len = vq->vq_used->ring[usedidx].len;
 795  786  
 796  787          head = &vq->vq_entries[slot];
 797  788  
 798  789          return (head);
 799  790  }
 800  791  
 801  792  void
 802  793  virtio_free_chain(struct vq_entry *qe)
 803  794  {
 804  795          struct vq_entry *tmp;
 805  796          struct virtqueue *vq = qe->qe_queue;
 806  797  
 807  798          ASSERT(qe);
 808  799  
 809  800          do {
 810  801                  ASSERT(qe->qe_queue == vq);
 811  802                  tmp = qe->qe_next;
 812  803                  vq_free_entry(vq, qe);
 813  804                  qe = tmp;
 814  805          } while (tmp != NULL);
 815  806  }
 816  807  
 817  808  void
 818  809  virtio_ventry_stick(struct vq_entry *first, struct vq_entry *second)
 819  810  {
 820  811          first->qe_next = second;
 821  812  }
 822  813  
 823  814  static int
 824  815  virtio_register_msi(struct virtio_softc *sc,
 825  816      struct virtio_int_handler *config_handler,
 826  817      struct virtio_int_handler vq_handlers[], int intr_types)
 827  818  {
 828  819          int count, actual;
 829  820          int int_type;
 830  821          int i;
 831  822          int handler_count;
 832  823          int ret;
 833  824  
 834  825          /* If both MSI and MSI-x are reported, prefer MSI-x. */
 835  826          int_type = DDI_INTR_TYPE_MSI;
 836  827          if (intr_types & DDI_INTR_TYPE_MSIX)
 837  828                  int_type = DDI_INTR_TYPE_MSIX;
 838  829  
 839  830          /* Walk the handler table to get the number of handlers. */
 840  831          for (handler_count = 0;
 841  832              vq_handlers && vq_handlers[handler_count].vh_func;
 842  833              handler_count++)
 843  834                  ;
 844  835  
 845  836          /* +1 if there is a config change handler. */
 846  837          if (config_handler != NULL)
 847  838                  handler_count++;
 848  839  
 849  840          /* Number of MSIs supported by the device. */
 850  841          ret = ddi_intr_get_nintrs(sc->sc_dev, int_type, &count);
 851  842          if (ret != DDI_SUCCESS) {
 852  843                  dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_nintrs failed");
 853  844                  return (ret);
 854  845          }
 855  846  
 856  847          /*
 857  848           * Those who try to register more handlers then the device
 858  849           * supports shall suffer.
 859  850           */
 860  851          ASSERT(handler_count <= count);
 861  852  
 862  853          sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t) *
 863  854              handler_count, KM_SLEEP);
 864  855  
 865  856          ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable, int_type, 0,
 866  857              handler_count, &actual, DDI_INTR_ALLOC_NORMAL);
 867  858          if (ret != DDI_SUCCESS) {
 868  859                  dev_err(sc->sc_dev, CE_WARN, "Failed to allocate MSI: %d", ret);
 869  860                  goto out_msi_alloc;
 870  861          }
 871  862  
 872  863          if (actual != handler_count) {
 873  864                  dev_err(sc->sc_dev, CE_WARN,
 874  865                      "Not enough MSI available: need %d, available %d",
 875  866                      handler_count, actual);
 876  867                  goto out_msi_available;
 877  868          }
 878  869  
 879  870          sc->sc_intr_num = handler_count;
 880  871          sc->sc_intr_config = B_FALSE;
 881  872          if (config_handler != NULL) {
 882  873                  sc->sc_intr_config = B_TRUE;
 883  874          }
 884  875  
 885  876          /* Assume they are all same priority */
 886  877          ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
 887  878          if (ret != DDI_SUCCESS) {
 888  879                  dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
 889  880                  goto out_msi_prio;
 890  881          }
 891  882  
 892  883          /* Add the vq handlers */
 893  884          for (i = 0; vq_handlers[i].vh_func; i++) {
 894  885                  ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
 895  886                      vq_handlers[i].vh_func, sc, vq_handlers[i].vh_priv);
 896  887                  if (ret != DDI_SUCCESS) {
 897  888                          dev_err(sc->sc_dev, CE_WARN,
 898  889                              "ddi_intr_add_handler failed");
 899  890                          /* Remove the handlers that succeeded. */
 900  891                          while (--i >= 0) {
 901  892                                  (void) ddi_intr_remove_handler(
 902  893                                      sc->sc_intr_htable[i]);
 903  894                          }
 904  895                          goto out_add_handlers;
 905  896                  }
 906  897          }
 907  898  
 908  899          /* Don't forget the config handler */
 909  900          if (config_handler != NULL) {
 910  901                  ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
 911  902                      config_handler->vh_func, sc, config_handler->vh_priv);
 912  903                  if (ret != DDI_SUCCESS) {
 913  904                          dev_err(sc->sc_dev, CE_WARN,
 914  905                              "ddi_intr_add_handler failed");
 915  906                          /* Remove the handlers that succeeded. */
 916  907                          while (--i >= 0) {
 917  908                                  (void) ddi_intr_remove_handler(
 918  909                                      sc->sc_intr_htable[i]);
 919  910                          }
 920  911                          goto out_add_handlers;
 921  912                  }
 922  913          }
 923  914  
 924  915          ret = ddi_intr_get_cap(sc->sc_intr_htable[0], &sc->sc_intr_cap);
 925  916          if (ret == DDI_SUCCESS) {
 926  917                  sc->sc_int_type = int_type;
 927  918                  return (DDI_SUCCESS);
 928  919          }
 929  920  
 930  921  out_add_handlers:
 931  922  out_msi_prio:
 932  923  out_msi_available:
 933  924          for (i = 0; i < actual; i++)
 934  925                  (void) ddi_intr_free(sc->sc_intr_htable[i]);
 935  926  out_msi_alloc:
 936  927          kmem_free(sc->sc_intr_htable,
 937  928              sizeof (ddi_intr_handle_t) * handler_count);
 938  929  
 939  930          return (ret);
 940  931  }
 941  932  
 942  933  struct virtio_handler_container {
 943  934          int nhandlers;
 944  935          struct virtio_int_handler config_handler;
 945  936          struct virtio_int_handler vq_handlers[];
 946  937  };
 947  938  
 948  939  uint_t
 949  940  virtio_intx_dispatch(caddr_t arg1, caddr_t arg2)
 950  941  {
 951  942          struct virtio_softc *sc = (void *)arg1;
 952  943          struct virtio_handler_container *vhc = (void *)arg2;
 953  944          uint8_t isr_status;
 954  945          int i;
 955  946  
 956  947          isr_status = ddi_get8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
 957  948              VIRTIO_CONFIG_ISR_STATUS));
 958  949  
 959  950          if (!isr_status)
 960  951                  return (DDI_INTR_UNCLAIMED);
 961  952  
 962  953          if ((isr_status & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
 963  954              vhc->config_handler.vh_func) {
 964  955                  vhc->config_handler.vh_func((void *)sc,
 965  956                      vhc->config_handler.vh_priv);
 966  957          }
 967  958  
 968  959          /* Notify all handlers */
 969  960          for (i = 0; i < vhc->nhandlers; i++) {
 970  961                  vhc->vq_handlers[i].vh_func((void *)sc,
 971  962                      vhc->vq_handlers[i].vh_priv);
 972  963          }
 973  964  
 974  965          return (DDI_INTR_CLAIMED);
 975  966  }
 976  967  
  
    | 
      ↓ open down ↓ | 
    941 lines elided | 
    
      ↑ open up ↑ | 
  
 977  968  /*
 978  969   * config_handler and vq_handlers may be allocated on stack.
 979  970   * Take precautions not to loose them.
 980  971   */
 981  972  static int
 982  973  virtio_register_intx(struct virtio_softc *sc,
 983  974      struct virtio_int_handler *config_handler,
 984  975      struct virtio_int_handler vq_handlers[])
 985  976  {
 986  977          int vq_handler_count;
 987      -        int config_handler_count = 0;
 988  978          int actual;
 989  979          struct virtio_handler_container *vhc;
      980 +        size_t vhc_sz;
 990  981          int ret = DDI_FAILURE;
 991  982  
 992  983          /* Walk the handler table to get the number of handlers. */
 993  984          for (vq_handler_count = 0;
 994  985              vq_handlers && vq_handlers[vq_handler_count].vh_func;
 995  986              vq_handler_count++)
 996  987                  ;
 997  988  
 998      -        if (config_handler != NULL)
 999      -                config_handler_count = 1;
      989 +        vhc_sz = sizeof (struct virtio_handler_container) +
      990 +            sizeof (struct virtio_int_handler) * vq_handler_count;
      991 +        vhc = kmem_zalloc(vhc_sz, KM_SLEEP);
1000  992  
1001      -        vhc = kmem_zalloc(sizeof (struct virtio_handler_container) +
1002      -            sizeof (struct virtio_int_handler) * vq_handler_count, KM_SLEEP);
1003      -
1004  993          vhc->nhandlers = vq_handler_count;
1005  994          (void) memcpy(vhc->vq_handlers, vq_handlers,
1006  995              sizeof (struct virtio_int_handler) * vq_handler_count);
1007  996  
1008  997          if (config_handler != NULL) {
1009  998                  (void) memcpy(&vhc->config_handler, config_handler,
1010  999                      sizeof (struct virtio_int_handler));
1011 1000          }
1012 1001  
1013 1002          /* Just a single entry for a single interrupt. */
1014 1003          sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
1015 1004  
1016 1005          ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable,
1017 1006              DDI_INTR_TYPE_FIXED, 0, 1, &actual, DDI_INTR_ALLOC_NORMAL);
1018 1007          if (ret != DDI_SUCCESS) {
1019 1008                  dev_err(sc->sc_dev, CE_WARN,
1020 1009                      "Failed to allocate a fixed interrupt: %d", ret);
1021 1010                  goto out_int_alloc;
1022 1011          }
1023 1012  
1024 1013          ASSERT(actual == 1);
1025 1014          sc->sc_intr_num = 1;
1026 1015  
1027 1016          ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
1028 1017          if (ret != DDI_SUCCESS) {
1029 1018                  dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
1030 1019                  goto out_prio;
1031 1020          }
1032 1021  
1033 1022          ret = ddi_intr_add_handler(sc->sc_intr_htable[0],
1034 1023              virtio_intx_dispatch, sc, vhc);
1035 1024          if (ret != DDI_SUCCESS) {
1036 1025                  dev_err(sc->sc_dev, CE_WARN, "ddi_intr_add_handler failed");
1037 1026                  goto out_add_handlers;
1038 1027          }
  
    | 
      ↓ open down ↓ | 
    25 lines elided | 
    
      ↑ open up ↑ | 
  
1039 1028  
1040 1029          sc->sc_int_type = DDI_INTR_TYPE_FIXED;
1041 1030  
1042 1031          return (DDI_SUCCESS);
1043 1032  
1044 1033  out_add_handlers:
1045 1034  out_prio:
1046 1035          (void) ddi_intr_free(sc->sc_intr_htable[0]);
1047 1036  out_int_alloc:
1048 1037          kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
1049      -        kmem_free(vhc, sizeof (struct virtio_int_handler) *
1050      -            (vq_handler_count + config_handler_count));
     1038 +        kmem_free(vhc, vhc_sz);
1051 1039          return (ret);
1052 1040  }
1053 1041  
1054 1042  /*
1055 1043   * We find out if we support MSI during this, and the register layout
1056 1044   * depends on the MSI (doh). Don't acces the device specific bits in
1057 1045   * BAR 0 before calling it!
1058 1046   */
1059 1047  int
1060 1048  virtio_register_ints(struct virtio_softc *sc,
1061 1049      struct virtio_int_handler *config_handler,
1062 1050      struct virtio_int_handler vq_handlers[])
1063 1051  {
1064 1052          int ret;
1065 1053          int intr_types;
1066 1054  
1067 1055          /* Default offset until MSI-X is enabled, if ever. */
1068 1056          sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSIX;
1069 1057  
1070 1058          /* Determine which types of interrupts are supported */
1071 1059          ret = ddi_intr_get_supported_types(sc->sc_dev, &intr_types);
1072 1060          if (ret != DDI_SUCCESS) {
1073 1061                  dev_err(sc->sc_dev, CE_WARN, "Can't get supported int types");
1074 1062                  goto out_inttype;
1075 1063          }
1076 1064  
1077 1065          /* If we have msi, let's use them. */
1078 1066          if (intr_types & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
1079 1067                  ret = virtio_register_msi(sc, config_handler,
1080 1068                      vq_handlers, intr_types);
1081 1069                  if (!ret)
1082 1070                          return (0);
1083 1071          }
1084 1072  
1085 1073          /* Fall back to old-fashioned interrupts. */
1086 1074          if (intr_types & DDI_INTR_TYPE_FIXED) {
1087 1075                  dev_debug(sc->sc_dev, CE_WARN,
1088 1076                      "Using legacy interrupts");
1089 1077  
1090 1078                  return (virtio_register_intx(sc, config_handler, vq_handlers));
1091 1079          }
1092 1080  
1093 1081          dev_err(sc->sc_dev, CE_WARN,
1094 1082              "MSI failed and fixed interrupts not supported. Giving up.");
1095 1083          ret = DDI_FAILURE;
1096 1084  
1097 1085  out_inttype:
1098 1086          return (ret);
1099 1087  }
1100 1088  
1101 1089  static int
1102 1090  virtio_enable_msi(struct virtio_softc *sc)
1103 1091  {
1104 1092          int ret, i;
1105 1093          int vq_handler_count = sc->sc_intr_num;
1106 1094  
1107 1095          /* Number of handlers, not counting the counfig. */
1108 1096          if (sc->sc_intr_config)
1109 1097                  vq_handler_count--;
1110 1098  
1111 1099          /* Enable the interrupts. Either the whole block, or one by one. */
1112 1100          if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1113 1101                  ret = ddi_intr_block_enable(sc->sc_intr_htable,
1114 1102                      sc->sc_intr_num);
1115 1103                  if (ret != DDI_SUCCESS) {
1116 1104                          dev_err(sc->sc_dev, CE_WARN,
1117 1105                              "Failed to enable MSI, falling back to INTx");
1118 1106                          goto out_enable;
1119 1107                  }
1120 1108          } else {
1121 1109                  for (i = 0; i < sc->sc_intr_num; i++) {
1122 1110                          ret = ddi_intr_enable(sc->sc_intr_htable[i]);
1123 1111                          if (ret != DDI_SUCCESS) {
1124 1112                                  dev_err(sc->sc_dev, CE_WARN,
1125 1113                                      "Failed to enable MSI %d, "
1126 1114                                      "falling back to INTx", i);
1127 1115  
1128 1116                                  while (--i >= 0) {
1129 1117                                          (void) ddi_intr_disable(
1130 1118                                              sc->sc_intr_htable[i]);
1131 1119                                  }
1132 1120                                  goto out_enable;
1133 1121                          }
1134 1122                  }
1135 1123          }
1136 1124  
1137 1125          /* Bind the allocated MSI to the queues and config */
1138 1126          for (i = 0; i < vq_handler_count; i++) {
1139 1127                  int check;
1140 1128  
1141 1129                  ddi_put16(sc->sc_ioh,
1142 1130                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1143 1131                      (uint16_t *)(sc->sc_io_addr +
1144 1132                      VIRTIO_CONFIG_QUEUE_SELECT), i);
1145 1133  
1146 1134                  ddi_put16(sc->sc_ioh,
1147 1135                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1148 1136                      (uint16_t *)(sc->sc_io_addr +
1149 1137                      VIRTIO_CONFIG_QUEUE_VECTOR), i);
1150 1138  
1151 1139                  check = ddi_get16(sc->sc_ioh,
1152 1140                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1153 1141                      (uint16_t *)(sc->sc_io_addr +
1154 1142                      VIRTIO_CONFIG_QUEUE_VECTOR));
1155 1143                  if (check != i) {
1156 1144                          dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1157 1145                              "for VQ %d, MSI %d. Check = %x", i, i, check);
1158 1146                          ret = ENODEV;
1159 1147                          goto out_bind;
1160 1148                  }
1161 1149          }
1162 1150  
1163 1151          if (sc->sc_intr_config) {
1164 1152                  int check;
1165 1153  
1166 1154                  ddi_put16(sc->sc_ioh,
1167 1155                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1168 1156                      (uint16_t *)(sc->sc_io_addr +
1169 1157                      VIRTIO_CONFIG_CONFIG_VECTOR), i);
1170 1158  
1171 1159                  check = ddi_get16(sc->sc_ioh,
1172 1160                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1173 1161                      (uint16_t *)(sc->sc_io_addr +
1174 1162                      VIRTIO_CONFIG_CONFIG_VECTOR));
1175 1163                  if (check != i) {
1176 1164                          dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1177 1165                              "for Config updates, MSI %d", i);
1178 1166                          ret = ENODEV;
1179 1167                          goto out_bind;
1180 1168                  }
1181 1169          }
1182 1170  
1183 1171          /* Configuration offset depends on whether MSI-X is used. */
1184 1172          if (sc->sc_int_type == DDI_INTR_TYPE_MSIX)
1185 1173                  sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSIX;
1186 1174          else
1187 1175                  ASSERT(sc->sc_int_type == DDI_INTR_TYPE_MSI);
1188 1176  
1189 1177          return (DDI_SUCCESS);
1190 1178  
1191 1179  out_bind:
1192 1180          /* Unbind the vqs */
1193 1181          for (i = 0; i < vq_handler_count - 1; i++) {
1194 1182                  ddi_put16(sc->sc_ioh,
1195 1183                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1196 1184                      (uint16_t *)(sc->sc_io_addr +
1197 1185                      VIRTIO_CONFIG_QUEUE_SELECT), i);
1198 1186  
1199 1187                  ddi_put16(sc->sc_ioh,
1200 1188                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1201 1189                      (uint16_t *)(sc->sc_io_addr +
1202 1190                      VIRTIO_CONFIG_QUEUE_VECTOR),
1203 1191                      VIRTIO_MSI_NO_VECTOR);
1204 1192          }
1205 1193          /* And the config */
1206 1194          /* LINTED E_BAD_PTR_CAST_ALIGN */
1207 1195          ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1208 1196              VIRTIO_CONFIG_CONFIG_VECTOR), VIRTIO_MSI_NO_VECTOR);
1209 1197  
1210 1198          /* Disable the interrupts. Either the whole block, or one by one. */
1211 1199          if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1212 1200                  ret = ddi_intr_block_disable(sc->sc_intr_htable,
1213 1201                      sc->sc_intr_num);
1214 1202                  if (ret != DDI_SUCCESS) {
1215 1203                          dev_err(sc->sc_dev, CE_WARN,
1216 1204                              "Failed to disable MSIs, won't be able to "
1217 1205                              "reuse next time");
1218 1206                  }
1219 1207          } else {
1220 1208                  for (i = 0; i < sc->sc_intr_num; i++) {
1221 1209                          ret = ddi_intr_disable(sc->sc_intr_htable[i]);
1222 1210                          if (ret != DDI_SUCCESS) {
1223 1211                                  dev_err(sc->sc_dev, CE_WARN,
1224 1212                                      "Failed to disable interrupt %d, "
1225 1213                                      "won't be able to reuse", i);
1226 1214                          }
1227 1215                  }
1228 1216          }
1229 1217  
1230 1218          ret = DDI_FAILURE;
1231 1219  
1232 1220  out_enable:
1233 1221          return (ret);
1234 1222  }
1235 1223  
1236 1224  static int
1237 1225  virtio_enable_intx(struct virtio_softc *sc)
1238 1226  {
1239 1227          int ret;
1240 1228  
1241 1229          ret = ddi_intr_enable(sc->sc_intr_htable[0]);
1242 1230          if (ret != DDI_SUCCESS) {
1243 1231                  dev_err(sc->sc_dev, CE_WARN,
1244 1232                      "Failed to enable interrupt: %d", ret);
1245 1233          }
1246 1234  
1247 1235          return (ret);
1248 1236  }
1249 1237  
1250 1238  /*
1251 1239   * We can't enable/disable individual handlers in the INTx case so do
1252 1240   * the whole bunch even in the msi case.
1253 1241   */
1254 1242  int
1255 1243  virtio_enable_ints(struct virtio_softc *sc)
1256 1244  {
1257 1245  
1258 1246          ASSERT(sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_NOMSIX);
1259 1247  
1260 1248          /* See if we are using MSI. */
1261 1249          if (sc->sc_int_type == DDI_INTR_TYPE_MSIX ||
1262 1250              sc->sc_int_type == DDI_INTR_TYPE_MSI)
1263 1251                  return (virtio_enable_msi(sc));
1264 1252  
1265 1253          ASSERT(sc->sc_int_type == DDI_INTR_TYPE_FIXED);
1266 1254          return (virtio_enable_intx(sc));
1267 1255  }
1268 1256  
1269 1257  void
1270 1258  virtio_release_ints(struct virtio_softc *sc)
1271 1259  {
1272 1260          int i;
1273 1261          int ret;
1274 1262  
1275 1263          /* We were running with MSI, unbind them. */
1276 1264          if (sc->sc_int_type == DDI_INTR_TYPE_MSIX ||
1277 1265              sc->sc_int_type == DDI_INTR_TYPE_MSI) {
1278 1266                  /* Unbind all vqs */
1279 1267                  for (i = 0; i < sc->sc_nvqs; i++) {
1280 1268                          ddi_put16(sc->sc_ioh,
1281 1269                              /* LINTED E_BAD_PTR_CAST_ALIGN */
1282 1270                              (uint16_t *)(sc->sc_io_addr +
1283 1271                              VIRTIO_CONFIG_QUEUE_SELECT), i);
1284 1272  
1285 1273                          ddi_put16(sc->sc_ioh,
1286 1274                              /* LINTED E_BAD_PTR_CAST_ALIGN */
1287 1275                              (uint16_t *)(sc->sc_io_addr +
1288 1276                              VIRTIO_CONFIG_QUEUE_VECTOR),
1289 1277                              VIRTIO_MSI_NO_VECTOR);
1290 1278                  }
1291 1279                  /* And the config */
1292 1280                  /* LINTED E_BAD_PTR_CAST_ALIGN */
1293 1281                  ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1294 1282                      VIRTIO_CONFIG_CONFIG_VECTOR),
1295 1283                      VIRTIO_MSI_NO_VECTOR);
1296 1284  
1297 1285          }
1298 1286  
1299 1287          /* Disable the interrupts. Either the whole block, or one by one. */
1300 1288          if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1301 1289                  ret = ddi_intr_block_disable(sc->sc_intr_htable,
1302 1290                      sc->sc_intr_num);
1303 1291                  if (ret != DDI_SUCCESS) {
1304 1292                          dev_err(sc->sc_dev, CE_WARN,
1305 1293                              "Failed to disable MSIs, won't be able to "
1306 1294                              "reuse next time");
1307 1295                  }
1308 1296          } else {
1309 1297                  for (i = 0; i < sc->sc_intr_num; i++) {
1310 1298                          ret = ddi_intr_disable(sc->sc_intr_htable[i]);
1311 1299                          if (ret != DDI_SUCCESS) {
1312 1300                                  dev_err(sc->sc_dev, CE_WARN,
1313 1301                                      "Failed to disable interrupt %d, "
1314 1302                                      "won't be able to reuse", i);
1315 1303                          }
1316 1304                  }
1317 1305          }
1318 1306  
1319 1307  
1320 1308          for (i = 0; i < sc->sc_intr_num; i++) {
1321 1309                  (void) ddi_intr_remove_handler(sc->sc_intr_htable[i]);
1322 1310          }
1323 1311  
1324 1312          for (i = 0; i < sc->sc_intr_num; i++)
1325 1313                  (void) ddi_intr_free(sc->sc_intr_htable[i]);
1326 1314  
1327 1315          kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) *
1328 1316              sc->sc_intr_num);
1329 1317  
1330 1318          /* After disabling interrupts, the config offset is non-MSI-X. */
1331 1319          sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSIX;
1332 1320  }
1333 1321  
1334 1322  /*
1335 1323   * Module linkage information for the kernel.
1336 1324   */
1337 1325  static struct modlmisc modlmisc = {
1338 1326          &mod_miscops,   /* Type of module */
1339 1327          "VirtIO common library module",
1340 1328  };
1341 1329  
1342 1330  static struct modlinkage modlinkage = {
1343 1331          MODREV_1,
1344 1332          {
1345 1333                  (void *)&modlmisc,
1346 1334                  NULL
1347 1335          }
1348 1336  };
1349 1337  
1350 1338  int
1351 1339  _init(void)
1352 1340  {
1353 1341          return (mod_install(&modlinkage));
1354 1342  }
1355 1343  
1356 1344  int
1357 1345  _fini(void)
1358 1346  {
1359 1347          return (mod_remove(&modlinkage));
1360 1348  }
1361 1349  
1362 1350  int
1363 1351  _info(struct modinfo *modinfop)
1364 1352  {
1365 1353          return (mod_info(&modlinkage, modinfop));
1366 1354  }
  
    | 
      ↓ open down ↓ | 
    306 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX