Print this page
    
Code review changes
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/io/virtio/virtio.c
          +++ new/usr/src/uts/common/io/virtio/virtio.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  
    | 
      ↓ open down ↓ | 
    12 lines elided | 
    
      ↑ open up ↑ | 
  
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
  23      - * Copyright 2012 Nexenta Systems, Inc.
       23 + * Copyright 2013 Nexenta Systems, Inc.  All rights reserved.
  24   24   * Copyright 2012 Alexey Zaytsev <alexey.zaytsev@gmail.com>
  25   25   */
  26   26  
  27   27  /* Based on the NetBSD virtio driver by Minoura Makoto. */
  28   28  /*
  29   29   * Copyright (c) 2010 Minoura Makoto.
  30   30   * All rights reserved.
  31   31   *
  32   32   * Redistribution and use in source and binary forms, with or without
  33   33   * modification, are permitted provided that the following conditions
  34   34   * are met:
  35   35   * 1. Redistributions of source code must retain the above copyright
  36   36   *    notice, this list of conditions and the following disclaimer.
  37   37   * 2. Redistributions in binary form must reproduce the above copyright
  38   38   *    notice, this list of conditions and the following disclaimer in the
  39   39   *    documentation and/or other materials provided with the distribution.
  40   40   *
  41   41   * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  42   42   * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  43   43   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  44   44   * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  45   45   * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  46   46   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  47   47   * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  48   48   * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  49   49   * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  50   50   * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  51   51   *
  52   52   */
  53   53  
  54   54  #include <sys/conf.h>
  55   55  #include <sys/kmem.h>
  56   56  #include <sys/debug.h>
  57   57  #include <sys/modctl.h>
  58   58  #include <sys/autoconf.h>
  59   59  #include <sys/ddi_impldefs.h>
  60   60  #include <sys/ddi.h>
  61   61  #include <sys/sunddi.h>
  62   62  #include <sys/sunndi.h>
  63   63  #include <sys/avintr.h>
  64   64  #include <sys/spl.h>
  65   65  #include <sys/promif.h>
  66   66  #include <sys/list.h>
  67   67  #include <sys/bootconf.h>
  68   68  #include <sys/bootsvcs.h>
  69   69  #include <sys/sysmacros.h>
  70   70  #include <sys/pci.h>
  71   71  
  72   72  #include "virtiovar.h"
  73   73  #include "virtioreg.h"
  74   74  
  75   75  #define NDEVNAMES       (sizeof (virtio_device_name) / sizeof (char *))
  76   76  #define MINSEG_INDIRECT 2       /* use indirect if nsegs >= this value */
  77   77  #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1)) & \
  78   78              ~(VIRTIO_PAGE_SIZE-1))
  79   79  
  80   80  void
  81   81  virtio_set_status(struct virtio_softc *sc, unsigned int status)
  82   82  {
  83   83          int old = 0;
  84   84  
  85   85          if (status != 0) {
  86   86                  old = ddi_get8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
  87   87                      VIRTIO_CONFIG_DEVICE_STATUS));
  88   88          }
  89   89  
  90   90          ddi_put8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
  91   91              VIRTIO_CONFIG_DEVICE_STATUS), status | old);
  92   92  }
  93   93  
  94   94  /*
  95   95   * Negotiate features, save the result in sc->sc_features
  96   96   */
  97   97  uint32_t
  98   98  virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
  99   99  {
 100  100          uint32_t host_features;
 101  101          uint32_t features;
 102  102  
 103  103          host_features = ddi_get32(sc->sc_ioh,
 104  104              /* LINTED E_BAD_PTR_CAST_ALIGN */
 105  105              (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_FEATURES));
 106  106  
 107  107          dev_debug(sc->sc_dev, CE_NOTE, "host features: %x, guest features: %x",
 108  108              host_features, guest_features);
 109  109  
 110  110          features = host_features & guest_features;
 111  111          ddi_put32(sc->sc_ioh,
 112  112              /* LINTED E_BAD_PTR_CAST_ALIGN */
 113  113              (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_GUEST_FEATURES),
 114  114              features);
 115  115  
 116  116          sc->sc_features = features;
 117  117  
 118  118          return (host_features);
 119  119  }
 120  120  
 121  121  size_t
 122  122  virtio_show_features(uint32_t features, char *buf, size_t len)
 123  123  {
 124  124          char *orig_buf = buf;
 125  125          char *bufend = buf + len;
 126  126  
 127  127          /* LINTED E_PTRDIFF_OVERFLOW */
 128  128          buf += snprintf(buf, bufend - buf, "Generic ( ");
 129  129          if (features & VIRTIO_F_RING_INDIRECT_DESC)
 130  130                  /* LINTED E_PTRDIFF_OVERFLOW */
 131  131                  buf += snprintf(buf, bufend - buf, "INDIRECT_DESC ");
 132  132  
 133  133          /* LINTED E_PTRDIFF_OVERFLOW */
 134  134          buf += snprintf(buf, bufend - buf, ") ");
 135  135  
 136  136          /* LINTED E_PTRDIFF_OVERFLOW */
 137  137          return (buf - orig_buf);
 138  138  }
 139  139  
 140  140  boolean_t
 141  141  virtio_has_feature(struct virtio_softc *sc, uint32_t feature)
 142  142  {
 143  143          return (sc->sc_features & feature);
 144  144  }
 145  145  
 146  146  /*
 147  147   * Device configuration registers.
 148  148   */
 149  149  uint8_t
 150  150  virtio_read_device_config_1(struct virtio_softc *sc, unsigned int index)
 151  151  {
 152  152          ASSERT(sc->sc_config_offset);
 153  153          return ddi_get8(sc->sc_ioh,
 154  154              (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 155  155  }
 156  156  
 157  157  uint16_t
 158  158  virtio_read_device_config_2(struct virtio_softc *sc, unsigned int index)
 159  159  {
 160  160          ASSERT(sc->sc_config_offset);
 161  161          return ddi_get16(sc->sc_ioh,
 162  162              /* LINTED E_BAD_PTR_CAST_ALIGN */
 163  163              (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 164  164  }
 165  165  
 166  166  uint32_t
 167  167  virtio_read_device_config_4(struct virtio_softc *sc, unsigned int index)
 168  168  {
 169  169          ASSERT(sc->sc_config_offset);
 170  170          return ddi_get32(sc->sc_ioh,
 171  171              /* LINTED E_BAD_PTR_CAST_ALIGN */
 172  172              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 173  173  }
 174  174  
 175  175  uint64_t
 176  176  virtio_read_device_config_8(struct virtio_softc *sc, unsigned int index)
 177  177  {
 178  178          uint64_t r;
 179  179  
 180  180          ASSERT(sc->sc_config_offset);
 181  181          r = ddi_get32(sc->sc_ioh,
 182  182              /* LINTED E_BAD_PTR_CAST_ALIGN */
 183  183              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
 184  184              index + sizeof (uint32_t)));
 185  185  
 186  186          r <<= 32;
 187  187  
 188  188          r += ddi_get32(sc->sc_ioh,
 189  189              /* LINTED E_BAD_PTR_CAST_ALIGN */
 190  190              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 191  191          return (r);
 192  192  }
 193  193  
 194  194  void
 195  195  virtio_write_device_config_1(struct virtio_softc *sc, unsigned int index,
 196  196      uint8_t value)
 197  197  {
 198  198          ASSERT(sc->sc_config_offset);
 199  199          ddi_put8(sc->sc_ioh,
 200  200              (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 201  201  }
 202  202  
 203  203  void
 204  204  virtio_write_device_config_2(struct virtio_softc *sc, unsigned int index,
 205  205      uint16_t value)
 206  206  {
 207  207          ASSERT(sc->sc_config_offset);
 208  208          ddi_put16(sc->sc_ioh,
 209  209              /* LINTED E_BAD_PTR_CAST_ALIGN */
 210  210              (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 211  211  }
 212  212  
 213  213  void
 214  214  virtio_write_device_config_4(struct virtio_softc *sc, unsigned int index,
 215  215      uint32_t value)
 216  216  {
 217  217          ASSERT(sc->sc_config_offset);
 218  218          ddi_put32(sc->sc_ioh,
 219  219              /* LINTED E_BAD_PTR_CAST_ALIGN */
 220  220              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 221  221  }
 222  222  
 223  223  void
 224  224  virtio_write_device_config_8(struct virtio_softc *sc, unsigned int index,
 225  225      uint64_t value)
 226  226  {
 227  227          ASSERT(sc->sc_config_offset);
 228  228          ddi_put32(sc->sc_ioh,
 229  229              /* LINTED E_BAD_PTR_CAST_ALIGN */
 230  230              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index),
 231  231              value & 0xFFFFFFFF);
 232  232          ddi_put32(sc->sc_ioh,
 233  233              /* LINTED E_BAD_PTR_CAST_ALIGN */
 234  234              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
 235  235              index + sizeof (uint32_t)), value >> 32);
 236  236  }
 237  237  
 238  238  /*
 239  239   * Start/stop vq interrupt.  No guarantee.
 240  240   */
 241  241  void
 242  242  virtio_stop_vq_intr(struct virtqueue *vq)
 243  243  {
 244  244          vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
 245  245  }
 246  246  
 247  247  void
 248  248  virtio_start_vq_intr(struct virtqueue *vq)
 249  249  {
 250  250          vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
 251  251  }
 252  252  
 253  253  static ddi_dma_attr_t virtio_vq_dma_attr = {
 254  254          DMA_ATTR_V0,            /* Version number */
 255  255          0,                      /* low address */
 256  256          0x00000FFFFFFFFFFF,     /* high address. Has to fit into 32 bits */
 257  257                                  /* after page-shifting */
 258  258          0xFFFFFFFF,             /* counter register max */
 259  259          VIRTIO_PAGE_SIZE,       /* page alignment required */
 260  260          0x3F,                   /* burst sizes: 1 - 32 */
 261  261          0x1,                    /* minimum transfer size */
 262  262          0xFFFFFFFF,             /* max transfer size */
 263  263          0xFFFFFFFF,             /* address register max */
 264  264          1,                      /* no scatter-gather */
 265  265          1,                      /* device operates on bytes */
 266  266          0,                      /* attr flag: set to 0 */
 267  267  };
 268  268  
 269  269  static ddi_dma_attr_t virtio_vq_indirect_dma_attr = {
 270  270          DMA_ATTR_V0,            /* Version number */
 271  271          0,                      /* low address */
 272  272          0xFFFFFFFFFFFFFFFF,     /* high address */
 273  273          0xFFFFFFFF,             /* counter register max */
 274  274          1,                      /* No specific alignment */
 275  275          0x3F,                   /* burst sizes: 1 - 32 */
 276  276          0x1,                    /* minimum transfer size */
 277  277          0xFFFFFFFF,             /* max transfer size */
 278  278          0xFFFFFFFF,             /* address register max */
 279  279          1,                      /* no scatter-gather */
 280  280          1,                      /* device operates on bytes */
 281  281          0,                      /* attr flag: set to 0 */
 282  282  };
 283  283  
 284  284  /* Same for direct and indirect descriptors. */
 285  285  static ddi_device_acc_attr_t virtio_vq_devattr = {
 286  286          DDI_DEVICE_ATTR_V0,
 287  287          DDI_NEVERSWAP_ACC,
 288  288          DDI_STORECACHING_OK_ACC,
 289  289          DDI_DEFAULT_ACC
 290  290  };
 291  291  
 292  292  static void
 293  293  virtio_free_indirect(struct vq_entry *entry)
 294  294  {
 295  295  
 296  296          (void) ddi_dma_unbind_handle(entry->qe_indirect_dma_handle);
 297  297          ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
 298  298          ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
 299  299  
 300  300          entry->qe_indirect_descs = NULL;
 301  301  }
 302  302  
 303  303  
 304  304  static int
 305  305  virtio_alloc_indirect(struct virtio_softc *sc, struct vq_entry *entry)
 306  306  {
 307  307          int allocsize, num;
 308  308          size_t len;
 309  309          unsigned int ncookies;
 310  310          int ret;
 311  311  
 312  312          num = entry->qe_queue->vq_indirect_num;
 313  313          ASSERT(num > 1);
 314  314  
 315  315          allocsize = sizeof (struct vring_desc) * num;
 316  316  
 317  317          ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_indirect_dma_attr,
 318  318              DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_handle);
 319  319          if (ret != DDI_SUCCESS) {
 320  320                  dev_err(sc->sc_dev, CE_WARN,
 321  321                      "Failed to allocate dma handle for indirect descriptors, "
 322  322                      "entry %d, vq %d", entry->qe_index,
 323  323                      entry->qe_queue->vq_index);
 324  324                  goto out_alloc_handle;
 325  325          }
 326  326  
 327  327          ret = ddi_dma_mem_alloc(entry->qe_indirect_dma_handle, allocsize,
 328  328              &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 329  329              (caddr_t *)&entry->qe_indirect_descs, &len,
 330  330              &entry->qe_indirect_dma_acch);
 331  331          if (ret != DDI_SUCCESS) {
 332  332                  dev_err(sc->sc_dev, CE_WARN,
 333  333                      "Failed to allocate dma memory for indirect descriptors, "
 334  334                      "entry %d, vq %d,", entry->qe_index,
 335  335                      entry->qe_queue->vq_index);
 336  336                  goto out_alloc;
 337  337          }
 338  338  
 339  339          (void) memset(entry->qe_indirect_descs, 0xff, allocsize);
 340  340  
 341  341          ret = ddi_dma_addr_bind_handle(entry->qe_indirect_dma_handle, NULL,
 342  342              (caddr_t)entry->qe_indirect_descs, len,
 343  343              DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 344  344              &entry->qe_indirect_dma_cookie, &ncookies);
 345  345          if (ret != DDI_DMA_MAPPED) {
 346  346                  dev_err(sc->sc_dev, CE_WARN,
 347  347                      "Failed to bind dma memory for indirect descriptors, "
 348  348                      "entry %d, vq %d", entry->qe_index,
 349  349                      entry->qe_queue->vq_index);
 350  350                  goto out_bind;
 351  351          }
 352  352  
 353  353          /* We asked for a single segment */
 354  354          ASSERT(ncookies == 1);
 355  355  
 356  356          return (0);
 357  357  
 358  358  out_bind:
 359  359          ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
 360  360  out_alloc:
 361  361          ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
 362  362  out_alloc_handle:
 363  363  
 364  364          return (ret);
 365  365  }
 366  366  
 367  367  /*
 368  368   * Initialize the vq structure.
 369  369   */
 370  370  static int
 371  371  virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq)
 372  372  {
 373  373          int ret;
 374  374          uint16_t i;
 375  375          int vq_size = vq->vq_num;
 376  376          int indirect_num = vq->vq_indirect_num;
 377  377  
 378  378          /* free slot management */
 379  379          list_create(&vq->vq_freelist, sizeof (struct vq_entry),
 380  380              offsetof(struct vq_entry, qe_list));
 381  381  
 382  382          for (i = 0; i < vq_size; i++) {
 383  383                  struct vq_entry *entry = &vq->vq_entries[i];
 384  384                  list_insert_tail(&vq->vq_freelist, entry);
 385  385                  entry->qe_index = i;
 386  386                  entry->qe_desc = &vq->vq_descs[i];
 387  387                  entry->qe_queue = vq;
 388  388  
 389  389                  if (indirect_num) {
 390  390                          ret = virtio_alloc_indirect(sc, entry);
 391  391                          if (ret)
 392  392                                  goto out_indirect;
 393  393                  }
 394  394          }
 395  395  
 396  396          mutex_init(&vq->vq_freelist_lock, "virtio-freelist", MUTEX_DRIVER,
 397  397              DDI_INTR_PRI(sc->sc_intr_prio));
 398  398          mutex_init(&vq->vq_avail_lock, "virtio-avail", MUTEX_DRIVER,
 399  399              DDI_INTR_PRI(sc->sc_intr_prio));
 400  400          mutex_init(&vq->vq_used_lock, "virtio-used", MUTEX_DRIVER,
 401  401              DDI_INTR_PRI(sc->sc_intr_prio));
 402  402  
 403  403          return (0);
 404  404  
 405  405  out_indirect:
 406  406          for (i = 0; i < vq_size; i++) {
 407  407                  struct vq_entry *entry = &vq->vq_entries[i];
 408  408                  if (entry->qe_indirect_descs)
 409  409                          virtio_free_indirect(entry);
 410  410          }
 411  411  
 412  412          return (ret);
 413  413  }
 414  414  
 415  415  /*
 416  416   * Allocate/free a vq.
 417  417   */
 418  418  struct virtqueue *
 419  419  virtio_alloc_vq(struct virtio_softc *sc, unsigned int index, unsigned int size,
 420  420      unsigned int indirect_num, const char *name)
 421  421  {
 422  422          int vq_size, allocsize1, allocsize2, allocsize = 0;
 423  423          int ret;
 424  424          unsigned int ncookies;
 425  425          size_t len;
 426  426          struct virtqueue *vq;
 427  427  
 428  428          ddi_put16(sc->sc_ioh,
 429  429              /* LINTED E_BAD_PTR_CAST_ALIGN */
 430  430              (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
 431  431          vq_size = ddi_get16(sc->sc_ioh,
 432  432              /* LINTED E_BAD_PTR_CAST_ALIGN */
 433  433              (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
 434  434          if (vq_size == 0) {
 435  435                  dev_err(sc->sc_dev, CE_WARN,
 436  436                      "virtqueue dest not exist, index %d for %s\n", index, name);
 437  437                  goto out;
 438  438          }
 439  439  
 440  440          vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);
 441  441  
 442  442          /* size 0 => use native vq size, good for receive queues. */
 443  443          if (size)
 444  444                  vq_size = MIN(vq_size, size);
 445  445  
 446  446          /* allocsize1: descriptor table + avail ring + pad */
 447  447          allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size +
 448  448              sizeof (struct vring_avail) + sizeof (uint16_t) * vq_size);
 449  449          /* allocsize2: used ring + pad */
 450  450          allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used) +
 451  451              sizeof (struct vring_used_elem) * vq_size);
 452  452  
 453  453          allocsize = allocsize1 + allocsize2;
 454  454  
 455  455          ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
 456  456              DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
 457  457          if (ret != DDI_SUCCESS) {
 458  458                  dev_err(sc->sc_dev, CE_WARN,
 459  459                      "Failed to allocate dma handle for vq %d", index);
 460  460                  goto out_alloc_handle;
 461  461          }
 462  462  
 463  463          ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
 464  464              &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 465  465              (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
 466  466          if (ret != DDI_SUCCESS) {
 467  467                  dev_err(sc->sc_dev, CE_WARN,
 468  468                      "Failed to allocate dma memory for vq %d", index);
 469  469                  goto out_alloc;
 470  470          }
 471  471  
 472  472          ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
 473  473              (caddr_t)vq->vq_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
 474  474              DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
 475  475          if (ret != DDI_DMA_MAPPED) {
 476  476                  dev_err(sc->sc_dev, CE_WARN,
 477  477                      "Failed to bind dma memory for vq %d", index);
 478  478                  goto out_bind;
 479  479          }
 480  480  
 481  481          /* We asked for a single segment */
 482  482          ASSERT(ncookies == 1);
 483  483          /* and page-ligned buffers. */
 484  484          ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0);
 485  485  
 486  486          (void) memset(vq->vq_vaddr, 0, allocsize);
 487  487  
 488  488          /* Make sure all zeros hit the buffer before we point the host to it */
 489  489          membar_producer();
 490  490  
 491  491          /* set the vq address */
 492  492          ddi_put32(sc->sc_ioh,
 493  493              /* LINTED E_BAD_PTR_CAST_ALIGN */
 494  494              (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS),
 495  495              (vq->vq_dma_cookie.dmac_laddress / VIRTIO_PAGE_SIZE));
 496  496  
 497  497          /* remember addresses and offsets for later use */
 498  498          vq->vq_owner = sc;
 499  499          vq->vq_num = vq_size;
 500  500          vq->vq_index = index;
 501  501          vq->vq_descs = vq->vq_vaddr;
 502  502          vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
 503  503          vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
 504  504          vq->vq_usedoffset = allocsize1;
 505  505          vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);
 506  506  
 507  507          ASSERT(indirect_num == 0 ||
 508  508              virtio_has_feature(sc, VIRTIO_F_RING_INDIRECT_DESC));
 509  509          vq->vq_indirect_num = indirect_num;
 510  510  
 511  511          /* free slot management */
 512  512          vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
 513  513              KM_SLEEP);
 514  514  
 515  515          ret = virtio_init_vq(sc, vq);
 516  516          if (ret)
 517  517                  goto out_init;
 518  518  
 519  519          dev_debug(sc->sc_dev, CE_NOTE,
 520  520              "Allocated %d entries for vq %d:%s (%d indirect descs)",
 521  521              vq_size, index, name, indirect_num * vq_size);
 522  522  
 523  523          return (vq);
 524  524  
 525  525  out_init:
 526  526          kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
 527  527          (void) ddi_dma_unbind_handle(vq->vq_dma_handle);
 528  528  out_bind:
 529  529          ddi_dma_mem_free(&vq->vq_dma_acch);
 530  530  out_alloc:
 531  531          ddi_dma_free_handle(&vq->vq_dma_handle);
 532  532  out_alloc_handle:
 533  533          kmem_free(vq, sizeof (struct virtqueue));
 534  534  out:
 535  535          return (NULL);
 536  536  }
 537  537  
 538  538  void
 539  539  virtio_free_vq(struct virtqueue *vq)
 540  540  {
 541  541          struct virtio_softc *sc = vq->vq_owner;
 542  542          int i;
 543  543  
 544  544          /* tell device that there's no virtqueue any longer */
 545  545          ddi_put16(sc->sc_ioh,
 546  546              /* LINTED E_BAD_PTR_CAST_ALIGN */
 547  547              (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT),
 548  548              vq->vq_index);
 549  549          ddi_put32(sc->sc_ioh,
 550  550              /* LINTED E_BAD_PTR_CAST_ALIGN */
 551  551              (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS), 0);
 552  552  
 553  553          /* Free the indirect descriptors, if any. */
 554  554          for (i = 0; i < vq->vq_num; i++) {
 555  555                  struct vq_entry *entry = &vq->vq_entries[i];
 556  556                  if (entry->qe_indirect_descs)
 557  557                          virtio_free_indirect(entry);
 558  558          }
 559  559  
 560  560          kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq->vq_num);
 561  561  
 562  562          (void) ddi_dma_unbind_handle(vq->vq_dma_handle);
 563  563          ddi_dma_mem_free(&vq->vq_dma_acch);
 564  564          ddi_dma_free_handle(&vq->vq_dma_handle);
 565  565  
 566  566          mutex_destroy(&vq->vq_used_lock);
 567  567          mutex_destroy(&vq->vq_avail_lock);
 568  568          mutex_destroy(&vq->vq_freelist_lock);
 569  569  
 570  570          kmem_free(vq, sizeof (struct virtqueue));
 571  571  }
 572  572  
 573  573  /*
 574  574   * Free descriptor management.
 575  575   */
 576  576  struct vq_entry *
 577  577  vq_alloc_entry(struct virtqueue *vq)
 578  578  {
 579  579          struct vq_entry *qe;
 580  580  
 581  581          mutex_enter(&vq->vq_freelist_lock);
 582  582          if (list_is_empty(&vq->vq_freelist)) {
 583  583                  mutex_exit(&vq->vq_freelist_lock);
 584  584                  return (NULL);
 585  585          }
 586  586          qe = list_remove_head(&vq->vq_freelist);
 587  587  
 588  588          ASSERT(vq->vq_used_entries >= 0);
 589  589          vq->vq_used_entries++;
 590  590  
 591  591          mutex_exit(&vq->vq_freelist_lock);
 592  592  
 593  593          qe->qe_next = NULL;
 594  594          qe->qe_indirect_next = 0;
 595  595          (void) memset(qe->qe_desc, 0, sizeof (struct vring_desc));
 596  596  
 597  597          return (qe);
 598  598  }
 599  599  
 600  600  void
 601  601  vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
 602  602  {
 603  603          mutex_enter(&vq->vq_freelist_lock);
 604  604  
 605  605          list_insert_head(&vq->vq_freelist, qe);
 606  606          vq->vq_used_entries--;
 607  607          ASSERT(vq->vq_used_entries >= 0);
 608  608          mutex_exit(&vq->vq_freelist_lock);
 609  609  }
 610  610  
 611  611  /*
 612  612   * We (intentionally) don't have a global vq mutex, so you are
 613  613   * responsible for external locking to avoid allocting/freeing any
 614  614   * entries before using the returned value. Have fun.
 615  615   */
 616  616  uint_t
 617  617  vq_num_used(struct virtqueue *vq)
 618  618  {
 619  619          /* vq->vq_freelist_lock would not help here. */
 620  620          return (vq->vq_used_entries);
 621  621  }
 622  622  
 623  623  static inline void
 624  624  virtio_ve_set_desc(struct vring_desc *desc, uint64_t paddr, uint32_t len,
 625  625      boolean_t write)
 626  626  {
 627  627          desc->addr = paddr;
 628  628          desc->len = len;
 629  629          desc->next = 0;
 630  630          desc->flags = 0;
 631  631  
 632  632          /* 'write' - from the driver's point of view */
 633  633          if (!write)
 634  634                  desc->flags = VRING_DESC_F_WRITE;
 635  635  }
 636  636  
 637  637  void
 638  638  virtio_ve_set(struct vq_entry *qe, uint64_t paddr, uint32_t len,
 639  639      boolean_t write)
 640  640  {
 641  641          virtio_ve_set_desc(qe->qe_desc, paddr, len, write);
 642  642  }
 643  643  
 644  644  unsigned int
 645  645  virtio_ve_indirect_available(struct vq_entry *qe)
 646  646  {
 647  647          return (qe->qe_queue->vq_indirect_num - (qe->qe_indirect_next - 1));
 648  648  }
 649  649  
 650  650  void
 651  651  virtio_ve_add_indirect_buf(struct vq_entry *qe, uint64_t paddr, uint32_t len,
 652  652      boolean_t write)
 653  653  {
 654  654          struct vring_desc *indirect_desc;
 655  655  
 656  656          ASSERT(qe->qe_queue->vq_indirect_num);
 657  657          ASSERT(qe->qe_indirect_next < qe->qe_queue->vq_indirect_num);
 658  658  
 659  659          indirect_desc = &qe->qe_indirect_descs[qe->qe_indirect_next];
 660  660          virtio_ve_set_desc(indirect_desc, paddr, len, write);
 661  661          qe->qe_indirect_next++;
 662  662  }
 663  663  
 664  664  void
 665  665  virtio_ve_add_cookie(struct vq_entry *qe, ddi_dma_handle_t dma_handle,
 666  666      ddi_dma_cookie_t dma_cookie, unsigned int ncookies, boolean_t write)
 667  667  {
 668  668          int i;
 669  669  
 670  670          for (i = 0; i < ncookies; i++) {
 671  671                  virtio_ve_add_indirect_buf(qe, dma_cookie.dmac_laddress,
 672  672                      dma_cookie.dmac_size, write);
 673  673                  ddi_dma_nextcookie(dma_handle, &dma_cookie);
 674  674          }
 675  675  }
 676  676  
 677  677  void
 678  678  virtio_sync_vq(struct virtqueue *vq)
 679  679  {
 680  680          struct virtio_softc *vsc = vq->vq_owner;
 681  681  
 682  682          /* Make sure the avail ring update hit the buffer */
 683  683          membar_producer();
 684  684  
 685  685          vq->vq_avail->idx = vq->vq_avail_idx;
 686  686  
 687  687          /* Make sure the avail idx update hits the buffer */
 688  688          membar_producer();
 689  689  
 690  690          /* Make sure we see the flags update */
 691  691          membar_consumer();
 692  692  
 693  693          if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY)) {
 694  694                  ddi_put16(vsc->sc_ioh,
 695  695                      /* LINTED E_BAD_PTR_CAST_ALIGN */
 696  696                      (uint16_t *)(vsc->sc_io_addr +
 697  697                      VIRTIO_CONFIG_QUEUE_NOTIFY),
 698  698                      vq->vq_index);
 699  699          }
 700  700  }
 701  701  
 702  702  void
 703  703  virtio_push_chain(struct vq_entry *qe, boolean_t sync)
 704  704  {
 705  705          struct virtqueue *vq = qe->qe_queue;
 706  706          struct vq_entry *head = qe;
 707  707          struct vring_desc *desc;
 708  708          int idx;
 709  709  
 710  710          ASSERT(qe);
 711  711  
 712  712          /*
 713  713           * Bind the descs together, paddr and len should be already
 714  714           * set with virtio_ve_set
 715  715           */
 716  716          do {
 717  717                  /* Bind the indirect descriptors */
 718  718                  if (qe->qe_indirect_next > 1) {
 719  719                          uint16_t i = 0;
 720  720  
 721  721                          /*
 722  722                           * Set the pointer/flags to the
 723  723                           * first indirect descriptor
 724  724                           */
 725  725                          virtio_ve_set_desc(qe->qe_desc,
 726  726                              qe->qe_indirect_dma_cookie.dmac_laddress,
 727  727                              sizeof (struct vring_desc) * qe->qe_indirect_next,
 728  728                              B_FALSE);
 729  729                          qe->qe_desc->flags |= VRING_DESC_F_INDIRECT;
 730  730  
 731  731                          /* For all but the last one, add the next index/flag */
 732  732                          do {
 733  733                                  desc = &qe->qe_indirect_descs[i];
 734  734                                  i++;
 735  735  
 736  736                                  desc->flags |= VRING_DESC_F_NEXT;
 737  737                                  desc->next = i;
 738  738                          } while (i < qe->qe_indirect_next - 1);
 739  739  
 740  740                  }
 741  741  
 742  742                  if (qe->qe_next) {
 743  743                          qe->qe_desc->flags |= VRING_DESC_F_NEXT;
 744  744                          qe->qe_desc->next = qe->qe_next->qe_index;
 745  745                  }
 746  746  
 747  747                  qe = qe->qe_next;
 748  748          } while (qe);
 749  749  
 750  750          mutex_enter(&vq->vq_avail_lock);
 751  751          idx = vq->vq_avail_idx;
 752  752          vq->vq_avail_idx++;
 753  753  
 754  754          /* Make sure the bits hit the descriptor(s) */
 755  755          membar_producer();
 756  756          vq->vq_avail->ring[idx % vq->vq_num] = head->qe_index;
 757  757  
 758  758          /* Notify the device, if needed. */
 759  759          if (sync)
 760  760                  virtio_sync_vq(vq);
 761  761  
 762  762          mutex_exit(&vq->vq_avail_lock);
 763  763  }
 764  764  
 765  765  /*
 766  766   * Get a chain of descriptors from the used ring, if one is available.
 767  767   */
 768  768  struct vq_entry *
 769  769  virtio_pull_chain(struct virtqueue *vq, uint32_t *len)
 770  770  {
 771  771          struct vq_entry *head;
 772  772          int slot;
 773  773          int usedidx;
 774  774  
 775  775          mutex_enter(&vq->vq_used_lock);
 776  776  
 777  777          /* No used entries? Bye. */
 778  778          if (vq->vq_used_idx == vq->vq_used->idx) {
 779  779                  mutex_exit(&vq->vq_used_lock);
 780  780                  return (NULL);
 781  781          }
 782  782  
 783  783          usedidx = vq->vq_used_idx;
 784  784          vq->vq_used_idx++;
 785  785          mutex_exit(&vq->vq_used_lock);
 786  786  
 787  787          usedidx %= vq->vq_num;
 788  788  
 789  789          /* Make sure we do the next step _after_ checking the idx. */
 790  790          membar_consumer();
 791  791  
 792  792          slot = vq->vq_used->ring[usedidx].id;
 793  793          *len = vq->vq_used->ring[usedidx].len;
 794  794  
 795  795          head = &vq->vq_entries[slot];
 796  796  
 797  797          return (head);
 798  798  }
 799  799  
 800  800  void
 801  801  virtio_free_chain(struct vq_entry *qe)
 802  802  {
 803  803          struct vq_entry *tmp;
 804  804          struct virtqueue *vq = qe->qe_queue;
 805  805  
 806  806          ASSERT(qe);
 807  807  
 808  808          do {
 809  809                  ASSERT(qe->qe_queue == vq);
 810  810                  tmp = qe->qe_next;
 811  811                  vq_free_entry(vq, qe);
 812  812                  qe = tmp;
 813  813          } while (tmp != NULL);
 814  814  }
 815  815  
 816  816  void
 817  817  virtio_ventry_stick(struct vq_entry *first, struct vq_entry *second)
 818  818  {
 819  819          first->qe_next = second;
 820  820  }
 821  821  
 822  822  static int
 823  823  virtio_register_msi(struct virtio_softc *sc,
 824  824      struct virtio_int_handler *config_handler,
 825  825      struct virtio_int_handler vq_handlers[], int intr_types)
 826  826  {
 827  827          int count, actual;
 828  828          int int_type;
 829  829          int i;
 830  830          int handler_count;
 831  831          int ret;
 832  832  
 833  833          /* If both MSI and MSI-x are reported, prefer MSI-x. */
 834  834          int_type = DDI_INTR_TYPE_MSI;
 835  835          if (intr_types & DDI_INTR_TYPE_MSIX)
 836  836                  int_type = DDI_INTR_TYPE_MSIX;
 837  837  
 838  838          /* Walk the handler table to get the number of handlers. */
 839  839          for (handler_count = 0;
 840  840              vq_handlers && vq_handlers[handler_count].vh_func;
 841  841              handler_count++)
 842  842                  ;
 843  843  
 844  844          /* +1 if there is a config change handler. */
 845  845          if (config_handler != NULL)
 846  846                  handler_count++;
 847  847  
 848  848          /* Number of MSIs supported by the device. */
 849  849          ret = ddi_intr_get_nintrs(sc->sc_dev, int_type, &count);
 850  850          if (ret != DDI_SUCCESS) {
 851  851                  dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_nintrs failed");
 852  852                  return (ret);
 853  853          }
 854  854  
 855  855          /*
 856  856           * Those who try to register more handlers then the device
 857  857           * supports shall suffer.
 858  858           */
 859  859          ASSERT(handler_count <= count);
 860  860  
 861  861          sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t) *
 862  862              handler_count, KM_SLEEP);
 863  863  
 864  864          ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable, int_type, 0,
 865  865              handler_count, &actual, DDI_INTR_ALLOC_NORMAL);
 866  866          if (ret != DDI_SUCCESS) {
 867  867                  dev_err(sc->sc_dev, CE_WARN, "Failed to allocate MSI: %d", ret);
 868  868                  goto out_msi_alloc;
 869  869          }
 870  870  
 871  871          if (actual != handler_count) {
 872  872                  dev_err(sc->sc_dev, CE_WARN,
 873  873                      "Not enough MSI available: need %d, available %d",
 874  874                      handler_count, actual);
 875  875                  goto out_msi_available;
 876  876          }
 877  877  
 878  878          sc->sc_intr_num = handler_count;
 879  879          sc->sc_intr_config = B_FALSE;
 880  880          if (config_handler != NULL) {
 881  881                  sc->sc_intr_config = B_TRUE;
 882  882          }
 883  883  
 884  884          /* Assume they are all same priority */
 885  885          ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
 886  886          if (ret != DDI_SUCCESS) {
 887  887                  dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
 888  888                  goto out_msi_prio;
 889  889          }
 890  890  
 891  891          /* Add the vq handlers */
 892  892          for (i = 0; vq_handlers[i].vh_func; i++) {
 893  893                  ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
 894  894                      vq_handlers[i].vh_func, sc, vq_handlers[i].vh_priv);
 895  895                  if (ret != DDI_SUCCESS) {
 896  896                          dev_err(sc->sc_dev, CE_WARN,
 897  897                              "ddi_intr_add_handler failed");
 898  898                          /* Remove the handlers that succeeded. */
 899  899                          while (--i >= 0) {
 900  900                                  (void) ddi_intr_remove_handler(
 901  901                                      sc->sc_intr_htable[i]);
 902  902                          }
 903  903                          goto out_add_handlers;
 904  904                  }
 905  905          }
 906  906  
 907  907          /* Don't forget the config handler */
 908  908          if (config_handler != NULL) {
 909  909                  ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
 910  910                      config_handler->vh_func, sc, config_handler->vh_priv);
 911  911                  if (ret != DDI_SUCCESS) {
 912  912                          dev_err(sc->sc_dev, CE_WARN,
 913  913                              "ddi_intr_add_handler failed");
 914  914                          /* Remove the handlers that succeeded. */
 915  915                          while (--i >= 0) {
 916  916                                  (void) ddi_intr_remove_handler(
 917  917                                      sc->sc_intr_htable[i]);
 918  918                          }
 919  919                          goto out_add_handlers;
 920  920                  }
 921  921          }
 922  922  
 923  923          /* We know we are using MSI, so set the config offset. */
 924  924          sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
 925  925  
 926  926          ret = ddi_intr_get_cap(sc->sc_intr_htable[0], &sc->sc_intr_cap);
 927  927          /* Just in case. */
 928  928          if (ret != DDI_SUCCESS)
 929  929                  sc->sc_intr_cap = 0;
 930  930  
 931  931  out_add_handlers:
 932  932  out_msi_prio:
 933  933  out_msi_available:
 934  934          for (i = 0; i < actual; i++)
 935  935                  (void) ddi_intr_free(sc->sc_intr_htable[i]);
 936  936  out_msi_alloc:
 937  937          kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) * count);
 938  938  
 939  939          return (ret);
 940  940  }
 941  941  
 942  942  struct virtio_handler_container {
 943  943          int nhandlers;
 944  944          struct virtio_int_handler config_handler;
 945  945          struct virtio_int_handler vq_handlers[];
 946  946  };
 947  947  
 948  948  uint_t
 949  949  virtio_intx_dispatch(caddr_t arg1, caddr_t arg2)
 950  950  {
 951  951          struct virtio_softc *sc = (void *)arg1;
 952  952          struct virtio_handler_container *vhc = (void *)arg2;
 953  953          uint8_t isr_status;
 954  954          int i;
 955  955  
 956  956          isr_status = ddi_get8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
 957  957              VIRTIO_CONFIG_ISR_STATUS));
 958  958  
 959  959          if (!isr_status)
 960  960                  return (DDI_INTR_UNCLAIMED);
 961  961  
 962  962          if ((isr_status & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
 963  963              vhc->config_handler.vh_func) {
 964  964                  vhc->config_handler.vh_func((void *)sc,
 965  965                      vhc->config_handler.vh_priv);
 966  966          }
 967  967  
 968  968          /* Notify all handlers */
 969  969          for (i = 0; i < vhc->nhandlers; i++) {
 970  970                  vhc->vq_handlers[i].vh_func((void *)sc,
 971  971                      vhc->vq_handlers[i].vh_priv);
 972  972          }
 973  973  
 974  974          return (DDI_INTR_CLAIMED);
 975  975  }
 976  976  
 977  977  /*
 978  978   * config_handler and vq_handlers may be allocated on stack.
 979  979   * Take precautions not to loose them.
 980  980   */
 981  981  static int
 982  982  virtio_register_intx(struct virtio_softc *sc,
 983  983      struct virtio_int_handler *config_handler,
 984  984      struct virtio_int_handler vq_handlers[])
 985  985  {
 986  986          int vq_handler_count;
 987  987          int config_handler_count = 0;
 988  988          int actual;
 989  989          struct virtio_handler_container *vhc;
 990  990          int ret = DDI_FAILURE;
 991  991  
 992  992          /* Walk the handler table to get the number of handlers. */
 993  993          for (vq_handler_count = 0;
 994  994              vq_handlers && vq_handlers[vq_handler_count].vh_func;
 995  995              vq_handler_count++)
 996  996                  ;
 997  997  
 998  998          if (config_handler != NULL)
 999  999                  config_handler_count = 1;
1000 1000  
1001 1001          vhc = kmem_zalloc(sizeof (struct virtio_handler_container) +
1002 1002              sizeof (struct virtio_int_handler) * vq_handler_count, KM_SLEEP);
1003 1003  
1004 1004          vhc->nhandlers = vq_handler_count;
1005 1005          (void) memcpy(vhc->vq_handlers, vq_handlers,
1006 1006              sizeof (struct virtio_int_handler) * vq_handler_count);
1007 1007  
1008 1008          if (config_handler != NULL) {
1009 1009                  (void) memcpy(&vhc->config_handler, config_handler,
1010 1010                      sizeof (struct virtio_int_handler));
1011 1011          }
1012 1012  
1013 1013          /* Just a single entry for a single interrupt. */
1014 1014          sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
1015 1015  
1016 1016          ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable,
1017 1017              DDI_INTR_TYPE_FIXED, 0, 1, &actual, DDI_INTR_ALLOC_NORMAL);
1018 1018          if (ret != DDI_SUCCESS) {
1019 1019                  dev_err(sc->sc_dev, CE_WARN,
1020 1020                      "Failed to allocate a fixed interrupt: %d", ret);
1021 1021                  goto out_int_alloc;
1022 1022          }
1023 1023  
1024 1024          ASSERT(actual == 1);
1025 1025          sc->sc_intr_num = 1;
1026 1026  
1027 1027          ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
1028 1028          if (ret != DDI_SUCCESS) {
1029 1029                  dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
1030 1030                  goto out_prio;
1031 1031          }
1032 1032  
1033 1033          ret = ddi_intr_add_handler(sc->sc_intr_htable[0],
1034 1034              virtio_intx_dispatch, sc, vhc);
1035 1035          if (ret != DDI_SUCCESS) {
1036 1036                  dev_err(sc->sc_dev, CE_WARN, "ddi_intr_add_handler failed");
1037 1037                  goto out_add_handlers;
1038 1038          }
1039 1039  
1040 1040          /* We know we are not using MSI, so set the config offset. */
1041 1041          sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1042 1042  
1043 1043          return (DDI_SUCCESS);
1044 1044  
1045 1045  out_add_handlers:
1046 1046  out_prio:
1047 1047          (void) ddi_intr_free(sc->sc_intr_htable[0]);
1048 1048  out_int_alloc:
1049 1049          kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
1050 1050          kmem_free(vhc, sizeof (struct virtio_int_handler) *
1051 1051              (vq_handler_count + config_handler_count));
1052 1052          return (ret);
1053 1053  }
1054 1054  
1055 1055  /*
1056 1056   * We find out if we support MSI during this, and the register layout
1057 1057   * depends on the MSI (doh). Don't acces the device specific bits in
1058 1058   * BAR 0 before calling it!
1059 1059   */
1060 1060  int
1061 1061  virtio_register_ints(struct virtio_softc *sc,
1062 1062      struct virtio_int_handler *config_handler,
1063 1063      struct virtio_int_handler vq_handlers[])
1064 1064  {
1065 1065          int ret;
1066 1066          int intr_types;
1067 1067  
1068 1068          /* Determine which types of interrupts are supported */
1069 1069          ret = ddi_intr_get_supported_types(sc->sc_dev, &intr_types);
1070 1070          if (ret != DDI_SUCCESS) {
1071 1071                  dev_err(sc->sc_dev, CE_WARN, "Can't get supported int types");
1072 1072                  goto out_inttype;
1073 1073          }
1074 1074  
1075 1075          /* If we have msi, let's use them. */
1076 1076          if (intr_types & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
1077 1077                  ret = virtio_register_msi(sc, config_handler,
1078 1078                      vq_handlers, intr_types);
1079 1079                  if (!ret)
1080 1080                          return (0);
1081 1081          }
1082 1082  
1083 1083          /* Fall back to old-fashioned interrupts. */
1084 1084          if (intr_types & DDI_INTR_TYPE_FIXED) {
1085 1085                  dev_debug(sc->sc_dev, CE_WARN,
1086 1086                      "Using legacy interrupts");
1087 1087  
1088 1088                  return (virtio_register_intx(sc, config_handler, vq_handlers));
1089 1089          }
1090 1090  
1091 1091          dev_err(sc->sc_dev, CE_WARN,
1092 1092              "MSI failed and fixed interrupts not supported. Giving up.");
1093 1093          ret = DDI_FAILURE;
1094 1094  
1095 1095  out_inttype:
1096 1096          return (ret);
1097 1097  }
1098 1098  
1099 1099  static int
1100 1100  virtio_enable_msi(struct virtio_softc *sc)
1101 1101  {
1102 1102          int ret, i;
1103 1103          int vq_handler_count = sc->sc_intr_num;
1104 1104  
1105 1105          /* Number of handlers, not counting the counfig. */
1106 1106          if (sc->sc_intr_config)
1107 1107                  vq_handler_count--;
1108 1108  
1109 1109          /* Enable the iterrupts. Either the whole block, or one by one. */
1110 1110          if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1111 1111                  ret = ddi_intr_block_enable(sc->sc_intr_htable,
1112 1112                      sc->sc_intr_num);
1113 1113                  if (ret != DDI_SUCCESS) {
1114 1114                          dev_err(sc->sc_dev, CE_WARN,
1115 1115                              "Failed to enable MSI, falling back to INTx");
1116 1116                          goto out_enable;
1117 1117                  }
1118 1118          } else {
1119 1119                  for (i = 0; i < sc->sc_intr_num; i++) {
1120 1120                          ret = ddi_intr_enable(sc->sc_intr_htable[i]);
1121 1121                          if (ret != DDI_SUCCESS) {
1122 1122                                  dev_err(sc->sc_dev, CE_WARN,
1123 1123                                      "Failed to enable MSI %d, "
1124 1124                                      "falling back to INTx", i);
1125 1125  
1126 1126                                  while (--i >= 0) {
1127 1127                                          (void) ddi_intr_disable(
1128 1128                                              sc->sc_intr_htable[i]);
1129 1129                                  }
1130 1130                                  goto out_enable;
1131 1131                          }
1132 1132                  }
1133 1133          }
1134 1134  
1135 1135          /* Bind the allocated MSI to the queues and config */
1136 1136          for (i = 0; i < vq_handler_count; i++) {
1137 1137                  int check;
1138 1138  
1139 1139                  ddi_put16(sc->sc_ioh,
1140 1140                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1141 1141                      (uint16_t *)(sc->sc_io_addr +
1142 1142                      VIRTIO_CONFIG_QUEUE_SELECT), i);
1143 1143  
1144 1144                  ddi_put16(sc->sc_ioh,
1145 1145                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1146 1146                      (uint16_t *)(sc->sc_io_addr +
1147 1147                      VIRTIO_CONFIG_QUEUE_VECTOR), i);
1148 1148  
1149 1149                  check = ddi_get16(sc->sc_ioh,
1150 1150                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1151 1151                      (uint16_t *)(sc->sc_io_addr +
1152 1152                      VIRTIO_CONFIG_QUEUE_VECTOR));
1153 1153                  if (check != i) {
1154 1154                          dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1155 1155                              "for VQ %d, MSI %d. Check = %x", i, i, check);
1156 1156                          ret = ENODEV;
1157 1157                          goto out_bind;
1158 1158                  }
1159 1159          }
1160 1160  
1161 1161          if (sc->sc_intr_config) {
1162 1162                  int check;
1163 1163  
1164 1164                  ddi_put16(sc->sc_ioh,
1165 1165                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1166 1166                      (uint16_t *)(sc->sc_io_addr +
1167 1167                      VIRTIO_CONFIG_CONFIG_VECTOR), i);
1168 1168  
1169 1169                  check = ddi_get16(sc->sc_ioh,
1170 1170                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1171 1171                      (uint16_t *)(sc->sc_io_addr +
1172 1172                      VIRTIO_CONFIG_CONFIG_VECTOR));
1173 1173                  if (check != i) {
1174 1174                          dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1175 1175                              "for Config updates, MSI %d", i);
1176 1176                          ret = ENODEV;
1177 1177                          goto out_bind;
1178 1178                  }
1179 1179          }
1180 1180  
1181 1181          return (DDI_SUCCESS);
1182 1182  
1183 1183  out_bind:
1184 1184          /* Unbind the vqs */
1185 1185          for (i = 0; i < vq_handler_count - 1; i++) {
1186 1186                  ddi_put16(sc->sc_ioh,
1187 1187                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1188 1188                      (uint16_t *)(sc->sc_io_addr +
1189 1189                      VIRTIO_CONFIG_QUEUE_SELECT), i);
1190 1190  
1191 1191                  ddi_put16(sc->sc_ioh,
1192 1192                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1193 1193                      (uint16_t *)(sc->sc_io_addr +
1194 1194                      VIRTIO_CONFIG_QUEUE_VECTOR),
1195 1195                      VIRTIO_MSI_NO_VECTOR);
1196 1196          }
1197 1197          /* And the config */
1198 1198          /* LINTED E_BAD_PTR_CAST_ALIGN */
1199 1199          ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1200 1200              VIRTIO_CONFIG_CONFIG_VECTOR), VIRTIO_MSI_NO_VECTOR);
1201 1201  
1202 1202          ret = DDI_FAILURE;
1203 1203  
1204 1204  out_enable:
1205 1205          return (ret);
1206 1206  }
1207 1207  
1208 1208  static int
1209 1209  virtio_enable_intx(struct virtio_softc *sc)
1210 1210  {
1211 1211          int ret;
1212 1212  
1213 1213          ret = ddi_intr_enable(sc->sc_intr_htable[0]);
1214 1214          if (ret != DDI_SUCCESS) {
1215 1215                  dev_err(sc->sc_dev, CE_WARN,
1216 1216                      "Failed to enable interrupt: %d", ret);
1217 1217          }
1218 1218  
1219 1219          return (ret);
1220 1220  }
1221 1221  
1222 1222  /*
1223 1223   * We can't enable/disable individual handlers in the INTx case so do
1224 1224   * the whole bunch even in the msi case.
1225 1225   */
1226 1226  int
1227 1227  virtio_enable_ints(struct virtio_softc *sc)
1228 1228  {
1229 1229  
1230 1230          /* See if we are using MSI. */
1231 1231          if (sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI)
1232 1232                  return (virtio_enable_msi(sc));
1233 1233  
1234 1234          ASSERT(sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI);
1235 1235  
1236 1236          return (virtio_enable_intx(sc));
1237 1237  }
1238 1238  
1239 1239  void
1240 1240  virtio_release_ints(struct virtio_softc *sc)
1241 1241  {
1242 1242          int i;
1243 1243          int ret;
1244 1244  
1245 1245          /* We were running with MSI, unbind them. */
1246 1246          if (sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI) {
1247 1247                  /* Unbind all vqs */
1248 1248                  for (i = 0; i < sc->sc_nvqs; i++) {
1249 1249                          ddi_put16(sc->sc_ioh,
1250 1250                              /* LINTED E_BAD_PTR_CAST_ALIGN */
1251 1251                              (uint16_t *)(sc->sc_io_addr +
1252 1252                              VIRTIO_CONFIG_QUEUE_SELECT), i);
1253 1253  
1254 1254                          ddi_put16(sc->sc_ioh,
1255 1255                              /* LINTED E_BAD_PTR_CAST_ALIGN */
1256 1256                              (uint16_t *)(sc->sc_io_addr +
1257 1257                              VIRTIO_CONFIG_QUEUE_VECTOR),
1258 1258                              VIRTIO_MSI_NO_VECTOR);
1259 1259                  }
1260 1260                  /* And the config */
1261 1261                  /* LINTED E_BAD_PTR_CAST_ALIGN */
1262 1262                  ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1263 1263                      VIRTIO_CONFIG_CONFIG_VECTOR),
1264 1264                      VIRTIO_MSI_NO_VECTOR);
1265 1265  
1266 1266          }
1267 1267  
1268 1268          /* Disable the iterrupts. Either the whole block, or one by one. */
1269 1269          if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1270 1270                  ret = ddi_intr_block_disable(sc->sc_intr_htable,
1271 1271                      sc->sc_intr_num);
1272 1272                  if (ret != DDI_SUCCESS) {
1273 1273                          dev_err(sc->sc_dev, CE_WARN,
1274 1274                              "Failed to disable MSIs, won't be able to "
1275 1275                              "reuse next time");
1276 1276                  }
1277 1277          } else {
1278 1278                  for (i = 0; i < sc->sc_intr_num; i++) {
1279 1279                          ret = ddi_intr_disable(sc->sc_intr_htable[i]);
1280 1280                          if (ret != DDI_SUCCESS) {
1281 1281                                  dev_err(sc->sc_dev, CE_WARN,
1282 1282                                      "Failed to disable interrupt %d, "
1283 1283                                      "won't be able to reuse", i);
1284 1284                          }
1285 1285                  }
1286 1286          }
1287 1287  
1288 1288  
1289 1289          for (i = 0; i < sc->sc_intr_num; i++) {
1290 1290                  (void) ddi_intr_remove_handler(sc->sc_intr_htable[i]);
1291 1291          }
1292 1292  
1293 1293          for (i = 0; i < sc->sc_intr_num; i++)
1294 1294                  (void) ddi_intr_free(sc->sc_intr_htable[i]);
1295 1295  
1296 1296          kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) *
1297 1297              sc->sc_intr_num);
1298 1298  
1299 1299          /* After disabling interrupts, the config offset is non-MSI. */
1300 1300          sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1301 1301  }
1302 1302  
1303 1303  /*
1304 1304   * Module linkage information for the kernel.
1305 1305   */
1306 1306  static struct modlmisc modlmisc = {
1307 1307          &mod_miscops,   /* Type of module */
1308 1308          "VirtIO common library module",
1309 1309  };
1310 1310  
1311 1311  static struct modlinkage modlinkage = {
1312 1312          MODREV_1,
1313 1313          {
1314 1314                  (void *)&modlmisc,
1315 1315                  NULL
1316 1316          }
1317 1317  };
1318 1318  
1319 1319  int
1320 1320  _init(void)
1321 1321  {
1322 1322          return (mod_install(&modlinkage));
1323 1323  }
1324 1324  
1325 1325  int
1326 1326  _fini(void)
1327 1327  {
1328 1328          return (mod_remove(&modlinkage));
1329 1329  }
1330 1330  
1331 1331  int
1332 1332  _info(struct modinfo *modinfop)
1333 1333  {
1334 1334          return (mod_info(&modlinkage, modinfop));
1335 1335  }
  
    | 
      ↓ open down ↓ | 
    1302 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX