Print this page
    
MFV: illumos-gate@d48defc5a82c2bc955d3c8c9a5a3c7ccea87d5d4
9793 vioblk attach doesn't always fail when it should
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Patrick Mooney <patrick.mooney@joyent.com>
Reviewed by: Toomas Soome <tsoome@me.com>
Reviewed by: Andy Fiddaman <omnios@citrus-it.net>
Approved by: Garrett D'Amore <garrett@damore.org>
Author: Hans Rosenfeld <hans.rosenfeld@joyent.com>
NEX-4424 kstat module needs cleanup
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Reviewed by: Garrett D'Amore <garrett@damore.org>
NEX-4420 format(1M) should be able to use device inquiry properties
Reviewed by: Dan McDonald <danmcd@omniti.com>
NEX-4419 blkdev and blkdev drivers should provide inquiry properties
Reviewed by: Garrett D'Amore <garrett@damore.org>
Reviewed by: Josef 'Jeff' Sipek <josef.sipek@nexenta.com>
re #13879 make libsqlite a real shared lib (tweaks)
  Fix rebuild after pull (remove files left in the way)
  Make sqlite.h SQLITE_VERSION more predictable.
OS-5 Integrate virtio drivers
    integration cleanup (copyrights, cddl 1.0)
port of illumos-1147
    1147 integrate the virtio-block driver
    Reviewed by: Dmitry Yusupov <Dmitry.Yusupov@nexenta.com>
    Reviewed by: Gordon Ross <gordon.w.ross@gmail.com>
    Approved by: Garrett D'Amore <garrett@damore.org>
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/io/vioblk/vioblk.c
          +++ new/usr/src/uts/common/io/vioblk/vioblk.c
   1    1  /*
   2      - * CDDL HEADER START
        2 + * This file and its contents are supplied under the terms of the
        3 + * Common Development and Distribution License ("CDDL"), version 1.0.
        4 + * You may only use this file in accordance with the terms of version
        5 + * 1.0 of the CDDL.
   3    6   *
   4      - * The contents of this file are subject to the terms of the
   5      - * Common Development and Distribution License (the "License").
   6      - * You may not use this file except in compliance with the License.
   7      - *
   8      - * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9      - * or http://www.opensolaris.org/os/licensing.
  10      - * See the License for the specific language governing permissions
  11      - * and limitations under the License.
  12      - *
  13      - * When distributing Covered Code, include this CDDL HEADER in each
  14      - * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15      - * If applicable, add the following below this CDDL HEADER, with the
  16      - * fields enclosed by brackets "[]" replaced with your own identifying
  17      - * information: Portions Copyright [yyyy] [name of copyright owner]
  18      - *
  19      - * CDDL HEADER END
        7 + * A full copy of the text of the CDDL should have accompanied this
        8 + * source.  A copy of the CDDL is also available via the Internet at
        9 + * http://www.illumos.org/license/CDDL.
  20   10   */
  21   11  
  22   12  /*
  23   13   * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
  24   14   * Copyright (c) 2012, Alexey Zaytsev <alexey.zaytsev@gmail.com>
       15 + * Copyright 2017, Joyent Inc.
  25   16   */
  26   17  
       18 +/*
       19 + * VirtIO block device driver
       20 + */
  27   21  
  28   22  #include <sys/modctl.h>
  29   23  #include <sys/blkdev.h>
  30   24  #include <sys/types.h>
  31   25  #include <sys/errno.h>
  32   26  #include <sys/param.h>
  33   27  #include <sys/stropts.h>
  34   28  #include <sys/stream.h>
  35   29  #include <sys/strsubr.h>
  36   30  #include <sys/kmem.h>
  37   31  #include <sys/conf.h>
  38   32  #include <sys/devops.h>
  39   33  #include <sys/ksynch.h>
  40   34  #include <sys/stat.h>
  41   35  #include <sys/modctl.h>
  42   36  #include <sys/debug.h>
  43   37  #include <sys/pci.h>
  44   38  #include <sys/sysmacros.h>
  45   39  #include "virtiovar.h"
  46   40  #include "virtioreg.h"
  47   41  
  48   42  /* Feature bits */
  49   43  #define VIRTIO_BLK_F_BARRIER    (1<<0)
  50   44  #define VIRTIO_BLK_F_SIZE_MAX   (1<<1)
  51   45  #define VIRTIO_BLK_F_SEG_MAX    (1<<2)
  52   46  #define VIRTIO_BLK_F_GEOMETRY   (1<<4)
  53   47  #define VIRTIO_BLK_F_RO         (1<<5)
  54   48  #define VIRTIO_BLK_F_BLK_SIZE   (1<<6)
  55   49  #define VIRTIO_BLK_F_SCSI       (1<<7)
  56   50  #define VIRTIO_BLK_F_FLUSH      (1<<9)
  57   51  #define VIRTIO_BLK_F_TOPOLOGY   (1<<10)
  58   52  
  59   53  /* Configuration registers */
  60   54  #define VIRTIO_BLK_CONFIG_CAPACITY      0 /* 64bit */
  61   55  #define VIRTIO_BLK_CONFIG_SIZE_MAX      8 /* 32bit */
  62   56  #define VIRTIO_BLK_CONFIG_SEG_MAX       12 /* 32bit */
  63   57  #define VIRTIO_BLK_CONFIG_GEOMETRY_C    16 /* 16bit */
  64   58  #define VIRTIO_BLK_CONFIG_GEOMETRY_H    18 /* 8bit */
  65   59  #define VIRTIO_BLK_CONFIG_GEOMETRY_S    19 /* 8bit */
  66   60  #define VIRTIO_BLK_CONFIG_BLK_SIZE      20 /* 32bit */
  67   61  #define VIRTIO_BLK_CONFIG_TOPO_PBEXP    24 /* 8bit */
  68   62  #define VIRTIO_BLK_CONFIG_TOPO_ALIGN    25 /* 8bit */
  69   63  #define VIRTIO_BLK_CONFIG_TOPO_MIN_SZ   26 /* 16bit */
  70   64  #define VIRTIO_BLK_CONFIG_TOPO_OPT_SZ   28 /* 32bit */
  71   65  
  72   66  /* Command */
  73   67  #define VIRTIO_BLK_T_IN                 0
  74   68  #define VIRTIO_BLK_T_OUT                1
  75   69  #define VIRTIO_BLK_T_SCSI_CMD           2
  76   70  #define VIRTIO_BLK_T_SCSI_CMD_OUT       3
  77   71  #define VIRTIO_BLK_T_FLUSH              4
  78   72  #define VIRTIO_BLK_T_FLUSH_OUT          5
  79   73  #define VIRTIO_BLK_T_GET_ID             8
  80   74  #define VIRTIO_BLK_T_BARRIER            0x80000000
  81   75  
  82   76  #define VIRTIO_BLK_ID_BYTES     20 /* devid */
  83   77  
  84   78  /* Statuses */
  85   79  #define VIRTIO_BLK_S_OK         0
  86   80  #define VIRTIO_BLK_S_IOERR      1
  87   81  #define VIRTIO_BLK_S_UNSUPP     2
  88   82  
  89   83  #define DEF_MAXINDIRECT         (128)
  90   84  #define DEF_MAXSECTOR           (4096)
  91   85  
  92   86  #define VIOBLK_POISON           0xdead0001dead0001
  93   87  
  94   88  /*
  95   89   * Static Variables.
  96   90   */
  97   91  static char vioblk_ident[] = "VirtIO block driver";
  98   92  
  99   93  /* Request header structure */
 100   94  struct vioblk_req_hdr {
 101   95          uint32_t                type;   /* VIRTIO_BLK_T_* */
 102   96          uint32_t                ioprio;
 103   97          uint64_t                sector;
 104   98  };
 105   99  
 106  100  struct vioblk_req {
 107  101          struct vioblk_req_hdr   hdr;
 108  102          uint8_t                 status;
 109  103          uint8_t                 unused[3];
 110  104          unsigned int            ndmac;
 111  105          ddi_dma_handle_t        dmah;
 112  106          ddi_dma_handle_t        bd_dmah;
 113  107          ddi_dma_cookie_t        dmac;
 114  108          bd_xfer_t               *xfer;
 115  109  };
 116  110  
 117  111  struct vioblk_stats {
 118  112          struct kstat_named      sts_rw_outofmemory;
 119  113          struct kstat_named      sts_rw_badoffset;
 120  114          struct kstat_named      sts_rw_queuemax;
 121  115          struct kstat_named      sts_rw_cookiesmax;
 122  116          struct kstat_named      sts_rw_cacheflush;
 123  117          struct kstat_named      sts_intr_queuemax;
 124  118          struct kstat_named      sts_intr_total;
 125  119          struct kstat_named      sts_io_errors;
 126  120          struct kstat_named      sts_unsupp_errors;
 127  121          struct kstat_named      sts_nxio_errors;
 128  122  };
 129  123  
 130  124  struct vioblk_lstats {
 131  125          uint64_t                rw_cacheflush;
 132  126          uint64_t                intr_total;
 133  127          unsigned int            rw_cookiesmax;
 134  128          unsigned int            intr_queuemax;
 135  129          unsigned int            io_errors;
 136  130          unsigned int            unsupp_errors;
 137  131          unsigned int            nxio_errors;
 138  132  };
 139  133  
 140  134  struct vioblk_softc {
 141  135          dev_info_t              *sc_dev; /* mirrors virtio_softc->sc_dev */
 142  136          struct virtio_softc     sc_virtio;
 143  137          struct virtqueue        *sc_vq;
 144  138          bd_handle_t             bd_h;
 145  139          struct vioblk_req       *sc_reqs;
 146  140          struct vioblk_stats     *ks_data;
 147  141          kstat_t                 *sc_intrstat;
 148  142          uint64_t                sc_capacity;
 149  143          uint64_t                sc_nblks;
 150  144          struct vioblk_lstats    sc_stats;
 151  145          short                   sc_blkflags;
 152  146          boolean_t               sc_in_poll_mode;
 153  147          boolean_t               sc_readonly;
 154  148          int                     sc_blk_size;
 155  149          int                     sc_pblk_size;
 156  150          int                     sc_seg_max;
 157  151          int                     sc_seg_size_max;
 158  152          kmutex_t                lock_devid;
 159  153          kcondvar_t              cv_devid;
 160  154          char                    devid[VIRTIO_BLK_ID_BYTES + 1];
 161  155  };
 162  156  
 163  157  static int vioblk_get_id(struct vioblk_softc *sc);
 164  158  
 165  159  static int vioblk_read(void *arg, bd_xfer_t *xfer);
 166  160  static int vioblk_write(void *arg, bd_xfer_t *xfer);
 167  161  static int vioblk_flush(void *arg, bd_xfer_t *xfer);
 168  162  static void vioblk_driveinfo(void *arg, bd_drive_t *drive);
 169  163  static int vioblk_mediainfo(void *arg, bd_media_t *media);
 170  164  static int vioblk_devid_init(void *, dev_info_t *, ddi_devid_t *);
 171  165  uint_t vioblk_int_handler(caddr_t arg1, caddr_t arg2);
 172  166  
 173  167  static bd_ops_t vioblk_ops = {
 174  168          BD_OPS_VERSION_0,
 175  169          vioblk_driveinfo,
 176  170          vioblk_mediainfo,
 177  171          vioblk_devid_init,
 178  172          vioblk_flush,
 179  173          vioblk_read,
 180  174          vioblk_write,
 181  175  };
 182  176  
 183  177  static int vioblk_quiesce(dev_info_t *);
 184  178  static int vioblk_attach(dev_info_t *, ddi_attach_cmd_t);
 185  179  static int vioblk_detach(dev_info_t *, ddi_detach_cmd_t);
 186  180  
 187  181  static struct dev_ops vioblk_dev_ops = {
 188  182          DEVO_REV,
 189  183          0,
 190  184          ddi_no_info,
 191  185          nulldev,        /* identify */
 192  186          nulldev,        /* probe */
 193  187          vioblk_attach,  /* attach */
 194  188          vioblk_detach,  /* detach */
 195  189          nodev,          /* reset */
 196  190          NULL,           /* cb_ops */
 197  191          NULL,           /* bus_ops */
 198  192          NULL,           /* power */
 199  193          vioblk_quiesce  /* quiesce */
 200  194  };
 201  195  
 202  196  
 203  197  
 204  198  /* Standard Module linkage initialization for a Streams driver */
 205  199  extern struct mod_ops mod_driverops;
 206  200  
 207  201  static struct modldrv modldrv = {
 208  202          &mod_driverops,         /* Type of module.  This one is a driver */
 209  203          vioblk_ident,    /* short description */
 210  204          &vioblk_dev_ops /* driver specific ops */
 211  205  };
 212  206  
 213  207  static struct modlinkage modlinkage = {
 214  208          MODREV_1,
 215  209          {
 216  210                  (void *)&modldrv,
 217  211                  NULL,
 218  212          },
 219  213  };
 220  214  
 221  215  ddi_device_acc_attr_t vioblk_attr = {
 222  216          DDI_DEVICE_ATTR_V0,
 223  217          DDI_NEVERSWAP_ACC,      /* virtio is always native byte order */
 224  218          DDI_STORECACHING_OK_ACC,
 225  219          DDI_DEFAULT_ACC
 226  220  };
 227  221  
 228  222  /* DMA attr for the header/status blocks. */
 229  223  static ddi_dma_attr_t vioblk_req_dma_attr = {
 230  224          DMA_ATTR_V0,                    /* dma_attr version     */
 231  225          0,                              /* dma_attr_addr_lo     */
 232  226          0xFFFFFFFFFFFFFFFFull,          /* dma_attr_addr_hi     */
 233  227          0x00000000FFFFFFFFull,          /* dma_attr_count_max   */
 234  228          1,                              /* dma_attr_align       */
 235  229          1,                              /* dma_attr_burstsizes  */
 236  230          1,                              /* dma_attr_minxfer     */
 237  231          0xFFFFFFFFull,                  /* dma_attr_maxxfer     */
 238  232          0xFFFFFFFFFFFFFFFFull,          /* dma_attr_seg         */
 239  233          1,                              /* dma_attr_sgllen      */
 240  234          1,                              /* dma_attr_granular    */
 241  235          0,                              /* dma_attr_flags       */
 242  236  };
 243  237  
 244  238  /* DMA attr for the data blocks. */
 245  239  static ddi_dma_attr_t vioblk_bd_dma_attr = {
 246  240          DMA_ATTR_V0,                    /* dma_attr version     */
 247  241          0,                              /* dma_attr_addr_lo     */
 248  242          0xFFFFFFFFFFFFFFFFull,          /* dma_attr_addr_hi     */
 249  243          0x00000000FFFFFFFFull,          /* dma_attr_count_max   */
 250  244          1,                              /* dma_attr_align       */
 251  245          1,                              /* dma_attr_burstsizes  */
 252  246          1,                              /* dma_attr_minxfer     */
 253  247          0,                              /* dma_attr_maxxfer, set in attach */
 254  248          0xFFFFFFFFFFFFFFFFull,          /* dma_attr_seg         */
 255  249          0,                              /* dma_attr_sgllen, set in attach */
 256  250          1,                              /* dma_attr_granular    */
 257  251          0,                              /* dma_attr_flags       */
 258  252  };
 259  253  
 260  254  static int
 261  255  vioblk_rw(struct vioblk_softc *sc, bd_xfer_t *xfer, int type,
 262  256      uint32_t len)
 263  257  {
 264  258          struct vioblk_req *req;
 265  259          struct vq_entry *ve_hdr;
 266  260          int total_cookies, write;
 267  261  
 268  262          write = (type == VIRTIO_BLK_T_OUT ||
 269  263              type == VIRTIO_BLK_T_FLUSH_OUT) ? 1 : 0;
 270  264          total_cookies = 2;
 271  265  
 272  266          if ((xfer->x_blkno + xfer->x_nblks) > sc->sc_nblks) {
 273  267                  sc->ks_data->sts_rw_badoffset.value.ui64++;
 274  268                  return (EINVAL);
 275  269          }
 276  270  
 277  271          /* allocate top entry */
 278  272          ve_hdr = vq_alloc_entry(sc->sc_vq);
 279  273          if (!ve_hdr) {
 280  274                  sc->ks_data->sts_rw_outofmemory.value.ui64++;
 281  275                  return (ENOMEM);
 282  276          }
 283  277  
 284  278          /* getting request */
 285  279          req = &sc->sc_reqs[ve_hdr->qe_index];
 286  280          req->hdr.type = type;
 287  281          req->hdr.ioprio = 0;
 288  282          req->hdr.sector = xfer->x_blkno;
 289  283          req->xfer = xfer;
 290  284  
 291  285          /* Header */
 292  286          virtio_ve_add_indirect_buf(ve_hdr, req->dmac.dmac_laddress,
 293  287              sizeof (struct vioblk_req_hdr), B_TRUE);
 294  288  
 295  289          /* Payload */
 296  290          if (len > 0) {
 297  291                  virtio_ve_add_cookie(ve_hdr, xfer->x_dmah, xfer->x_dmac,
 298  292                      xfer->x_ndmac, write ? B_TRUE : B_FALSE);
 299  293                  total_cookies += xfer->x_ndmac;
 300  294          }
 301  295  
 302  296          /* Status */
 303  297          virtio_ve_add_indirect_buf(ve_hdr,
 304  298              req->dmac.dmac_laddress + sizeof (struct vioblk_req_hdr),
 305  299              sizeof (uint8_t), B_FALSE);
 306  300  
 307  301          /* sending the whole chain to the device */
 308  302          virtio_push_chain(ve_hdr, B_TRUE);
 309  303  
 310  304          if (sc->sc_stats.rw_cookiesmax < total_cookies)
 311  305                  sc->sc_stats.rw_cookiesmax = total_cookies;
 312  306  
 313  307          return (DDI_SUCCESS);
 314  308  }
 315  309  
 316  310  /*
 317  311   * Now in polling mode. Interrupts are off, so we
 318  312   * 1) poll for the already queued requests to complete.
 319  313   * 2) push our request.
 320  314   * 3) wait for our request to complete.
 321  315   */
 322  316  static int
 323  317  vioblk_rw_poll(struct vioblk_softc *sc, bd_xfer_t *xfer,
 324  318      int type, uint32_t len)
 325  319  {
 326  320          clock_t tmout;
 327  321          int ret;
 328  322  
 329  323          ASSERT(xfer->x_flags & BD_XFER_POLL);
 330  324  
 331  325          /* Prevent a hard hang. */
 332  326          tmout = drv_usectohz(30000000);
 333  327  
 334  328          /* Poll for an empty queue */
 335  329          while (vq_num_used(sc->sc_vq)) {
 336  330                  /* Check if any pending requests completed. */
 337  331                  ret = vioblk_int_handler((caddr_t)&sc->sc_virtio, NULL);
 338  332                  if (ret != DDI_INTR_CLAIMED) {
 339  333                          drv_usecwait(10);
 340  334                          tmout -= 10;
 341  335                          return (ETIMEDOUT);
 342  336                  }
 343  337          }
 344  338  
 345  339          ret = vioblk_rw(sc, xfer, type, len);
 346  340          if (ret)
 347  341                  return (ret);
 348  342  
 349  343          tmout = drv_usectohz(30000000);
 350  344          /* Poll for an empty queue again. */
 351  345          while (vq_num_used(sc->sc_vq)) {
 352  346                  /* Check if any pending requests completed. */
 353  347                  ret = vioblk_int_handler((caddr_t)&sc->sc_virtio, NULL);
 354  348                  if (ret != DDI_INTR_CLAIMED) {
 355  349                          drv_usecwait(10);
 356  350                          tmout -= 10;
 357  351                          return (ETIMEDOUT);
 358  352                  }
 359  353          }
 360  354  
 361  355          return (DDI_SUCCESS);
 362  356  }
 363  357  
 364  358  static int
 365  359  vioblk_read(void *arg, bd_xfer_t *xfer)
 366  360  {
 367  361          int ret;
 368  362          struct vioblk_softc *sc = (void *)arg;
 369  363  
 370  364          if (xfer->x_flags & BD_XFER_POLL) {
 371  365                  if (!sc->sc_in_poll_mode) {
 372  366                          virtio_stop_vq_intr(sc->sc_vq);
 373  367                          sc->sc_in_poll_mode = 1;
 374  368                  }
 375  369  
 376  370                  ret = vioblk_rw_poll(sc, xfer, VIRTIO_BLK_T_IN,
 377  371                      xfer->x_nblks * DEV_BSIZE);
 378  372          } else {
 379  373                  if (sc->sc_in_poll_mode) {
 380  374                          virtio_start_vq_intr(sc->sc_vq);
 381  375                          sc->sc_in_poll_mode = 0;
 382  376                  }
 383  377  
 384  378                  ret = vioblk_rw(sc, xfer, VIRTIO_BLK_T_IN,
 385  379                      xfer->x_nblks * DEV_BSIZE);
 386  380          }
 387  381  
 388  382          return (ret);
 389  383  }
 390  384  
 391  385  static int
 392  386  vioblk_write(void *arg, bd_xfer_t *xfer)
 393  387  {
 394  388          int ret;
 395  389          struct vioblk_softc *sc = (void *)arg;
 396  390  
 397  391          if (xfer->x_flags & BD_XFER_POLL) {
 398  392                  if (!sc->sc_in_poll_mode) {
 399  393                          virtio_stop_vq_intr(sc->sc_vq);
 400  394                          sc->sc_in_poll_mode = 1;
 401  395                  }
 402  396  
 403  397                  ret = vioblk_rw_poll(sc, xfer, VIRTIO_BLK_T_OUT,
 404  398                      xfer->x_nblks * DEV_BSIZE);
 405  399          } else {
 406  400                  if (sc->sc_in_poll_mode) {
 407  401                          virtio_start_vq_intr(sc->sc_vq);
 408  402                          sc->sc_in_poll_mode = 0;
 409  403                  }
 410  404  
 411  405                  ret = vioblk_rw(sc, xfer, VIRTIO_BLK_T_OUT,
 412  406                      xfer->x_nblks * DEV_BSIZE);
 413  407          }
 414  408          return (ret);
 415  409  }
 416  410  
 417  411  static int
 418  412  vioblk_flush(void *arg, bd_xfer_t *xfer)
 419  413  {
 420  414          int ret;
 421  415          struct vioblk_softc *sc = (void *)arg;
 422  416  
 423  417          ASSERT((xfer->x_flags & BD_XFER_POLL) == 0);
 424  418  
 425  419          ret = vioblk_rw(sc, xfer, VIRTIO_BLK_T_FLUSH_OUT,
 426  420              xfer->x_nblks * DEV_BSIZE);
 427  421  
 428  422          if (!ret)
 429  423                  sc->sc_stats.rw_cacheflush++;
 430  424  
 431  425          return (ret);
 432  426  }
 433  427  
 434  428  
 435  429  static void
 436  430  vioblk_driveinfo(void *arg, bd_drive_t *drive)
 437  431  {
 438  432          struct vioblk_softc *sc = (void *)arg;
 439  433  
 440  434          drive->d_qsize = sc->sc_vq->vq_num;
 441  435          drive->d_removable = B_FALSE;
 442  436          drive->d_hotpluggable = B_TRUE;
 443  437          drive->d_target = 0;
 444  438          drive->d_lun = 0;
 445  439  
 446  440          drive->d_vendor = "Virtio";
 447  441          drive->d_vendor_len = strlen(drive->d_vendor);
 448  442  
 449  443          drive->d_product = "Block Device";
 450  444          drive->d_product_len = strlen(drive->d_product);
 451  445  
 452  446          (void) vioblk_get_id(sc);
 453  447          drive->d_serial = sc->devid;
 454  448          drive->d_serial_len = strlen(drive->d_serial);
 455  449  
 456  450          drive->d_revision = "0000";
 457  451          drive->d_revision_len = strlen(drive->d_revision);
 458  452  }
 459  453  
 460  454  static int
 461  455  vioblk_mediainfo(void *arg, bd_media_t *media)
 462  456  {
 463  457          struct vioblk_softc *sc = (void *)arg;
 464  458  
 465  459          media->m_nblks = sc->sc_nblks;
 466  460          media->m_blksize = sc->sc_blk_size;
 467  461          media->m_readonly = sc->sc_readonly;
 468  462          media->m_pblksize = sc->sc_pblk_size;
 469  463          return (0);
 470  464  }
 471  465  
 472  466  static int
 473  467  vioblk_get_id(struct vioblk_softc *sc)
 474  468  {
 475  469          clock_t deadline;
 476  470          int ret;
 477  471          bd_xfer_t xfer;
 478  472  
 479  473          deadline = ddi_get_lbolt() + (clock_t)drv_usectohz(3 * 1000000);
 480  474          (void) memset(&xfer, 0, sizeof (bd_xfer_t));
 481  475          xfer.x_nblks = 1;
 482  476  
 483  477          ret = ddi_dma_alloc_handle(sc->sc_dev, &vioblk_bd_dma_attr,
 484  478              DDI_DMA_SLEEP, NULL, &xfer.x_dmah);
 485  479          if (ret != DDI_SUCCESS)
 486  480                  goto out_alloc;
 487  481  
 488  482          ret = ddi_dma_addr_bind_handle(xfer.x_dmah, NULL, (caddr_t)&sc->devid,
 489  483              VIRTIO_BLK_ID_BYTES, DDI_DMA_READ | DDI_DMA_CONSISTENT,
 490  484              DDI_DMA_SLEEP, NULL, &xfer.x_dmac, &xfer.x_ndmac);
 491  485          if (ret != DDI_DMA_MAPPED) {
 492  486                  ret = DDI_FAILURE;
 493  487                  goto out_map;
 494  488          }
 495  489  
 496  490          mutex_enter(&sc->lock_devid);
 497  491  
 498  492          ret = vioblk_rw(sc, &xfer, VIRTIO_BLK_T_GET_ID,
 499  493              VIRTIO_BLK_ID_BYTES);
 500  494          if (ret) {
 501  495                  mutex_exit(&sc->lock_devid);
 502  496                  goto out_rw;
 503  497          }
 504  498  
 505  499          /* wait for reply */
 506  500          ret = cv_timedwait(&sc->cv_devid, &sc->lock_devid, deadline);
 507  501          mutex_exit(&sc->lock_devid);
 508  502  
 509  503          (void) ddi_dma_unbind_handle(xfer.x_dmah);
 510  504          ddi_dma_free_handle(&xfer.x_dmah);
 511  505  
 512  506          /* timeout */
 513  507          if (ret < 0) {
 514  508                  dev_err(sc->sc_dev, CE_WARN,
 515  509                      "Cannot get devid from the device");
 516  510                  return (DDI_FAILURE);
 517  511          }
 518  512  
 519  513          return (0);
 520  514  
 521  515  out_rw:
 522  516          (void) ddi_dma_unbind_handle(xfer.x_dmah);
 523  517  out_map:
 524  518          ddi_dma_free_handle(&xfer.x_dmah);
 525  519  out_alloc:
 526  520          return (ret);
 527  521  }
 528  522  
 529  523  static int
 530  524  vioblk_devid_init(void *arg, dev_info_t *devinfo, ddi_devid_t *devid)
 531  525  {
 532  526          struct vioblk_softc *sc = (void *)arg;
 533  527          int ret;
 534  528  
 535  529          ret = vioblk_get_id(sc);
 536  530          if (ret != DDI_SUCCESS)
 537  531                  return (ret);
 538  532  
 539  533          ret = ddi_devid_init(devinfo, DEVID_ATA_SERIAL,
 540  534              VIRTIO_BLK_ID_BYTES, sc->devid, devid);
 541  535          if (ret != DDI_SUCCESS) {
 542  536                  dev_err(devinfo, CE_WARN, "Cannot build devid from the device");
 543  537                  return (ret);
 544  538          }
 545  539  
 546  540          dev_debug(sc->sc_dev, CE_NOTE,
 547  541              "devid %x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x",
 548  542              sc->devid[0], sc->devid[1], sc->devid[2], sc->devid[3],
 549  543              sc->devid[4], sc->devid[5], sc->devid[6], sc->devid[7],
 550  544              sc->devid[8], sc->devid[9], sc->devid[10], sc->devid[11],
 551  545              sc->devid[12], sc->devid[13], sc->devid[14], sc->devid[15],
 552  546              sc->devid[16], sc->devid[17], sc->devid[18], sc->devid[19]);
 553  547  
 554  548          return (0);
 555  549  }
 556  550  
 557  551  static void
 558  552  vioblk_show_features(struct vioblk_softc *sc, const char *prefix,
 559  553      uint32_t features)
 560  554  {
 561  555          char buf[512];
 562  556          char *bufp = buf;
 563  557          char *bufend = buf + sizeof (buf);
 564  558  
 565  559          /* LINTED E_PTRDIFF_OVERFLOW */
 566  560          bufp += snprintf(bufp, bufend - bufp, prefix);
 567  561  
 568  562          /* LINTED E_PTRDIFF_OVERFLOW */
 569  563          bufp += virtio_show_features(features, bufp, bufend - bufp);
 570  564  
 571  565  
 572  566          /* LINTED E_PTRDIFF_OVERFLOW */
 573  567          bufp += snprintf(bufp, bufend - bufp, "Vioblk ( ");
 574  568  
 575  569          if (features & VIRTIO_BLK_F_BARRIER)
 576  570                  /* LINTED E_PTRDIFF_OVERFLOW */
 577  571                  bufp += snprintf(bufp, bufend - bufp, "BARRIER ");
 578  572          if (features & VIRTIO_BLK_F_SIZE_MAX)
 579  573                  /* LINTED E_PTRDIFF_OVERFLOW */
 580  574                  bufp += snprintf(bufp, bufend - bufp, "SIZE_MAX ");
 581  575          if (features & VIRTIO_BLK_F_SEG_MAX)
 582  576                  /* LINTED E_PTRDIFF_OVERFLOW */
 583  577                  bufp += snprintf(bufp, bufend - bufp, "SEG_MAX ");
 584  578          if (features & VIRTIO_BLK_F_GEOMETRY)
 585  579                  /* LINTED E_PTRDIFF_OVERFLOW */
 586  580                  bufp += snprintf(bufp, bufend - bufp, "GEOMETRY ");
 587  581          if (features & VIRTIO_BLK_F_RO)
 588  582                  /* LINTED E_PTRDIFF_OVERFLOW */
 589  583                  bufp += snprintf(bufp, bufend - bufp, "RO ");
 590  584          if (features & VIRTIO_BLK_F_BLK_SIZE)
 591  585                  /* LINTED E_PTRDIFF_OVERFLOW */
 592  586                  bufp += snprintf(bufp, bufend - bufp, "BLK_SIZE ");
 593  587          if (features & VIRTIO_BLK_F_SCSI)
 594  588                  /* LINTED E_PTRDIFF_OVERFLOW */
 595  589                  bufp += snprintf(bufp, bufend - bufp, "SCSI ");
 596  590          if (features & VIRTIO_BLK_F_FLUSH)
 597  591                  /* LINTED E_PTRDIFF_OVERFLOW */
 598  592                  bufp += snprintf(bufp, bufend - bufp, "FLUSH ");
 599  593          if (features & VIRTIO_BLK_F_TOPOLOGY)
 600  594                  /* LINTED E_PTRDIFF_OVERFLOW */
 601  595                  bufp += snprintf(bufp, bufend - bufp, "TOPOLOGY ");
 602  596  
 603  597          /* LINTED E_PTRDIFF_OVERFLOW */
 604  598          bufp += snprintf(bufp, bufend - bufp, ")");
 605  599          *bufp = '\0';
 606  600  
 607  601          dev_debug(sc->sc_dev, CE_NOTE, "%s", buf);
 608  602  }
 609  603  
 610  604  static int
 611  605  vioblk_dev_features(struct vioblk_softc *sc)
 612  606  {
 613  607          uint32_t host_features;
 614  608  
 615  609          host_features = virtio_negotiate_features(&sc->sc_virtio,
 616  610              VIRTIO_BLK_F_RO |
 617  611              VIRTIO_BLK_F_GEOMETRY |
 618  612              VIRTIO_BLK_F_BLK_SIZE |
 619  613              VIRTIO_BLK_F_FLUSH |
 620  614              VIRTIO_BLK_F_TOPOLOGY |
 621  615              VIRTIO_BLK_F_SEG_MAX |
 622  616              VIRTIO_BLK_F_SIZE_MAX |
 623  617              VIRTIO_F_RING_INDIRECT_DESC);
 624  618  
 625  619          vioblk_show_features(sc, "Host features: ", host_features);
 626  620          vioblk_show_features(sc, "Negotiated features: ",
 627  621              sc->sc_virtio.sc_features);
 628  622  
 629  623          if (!(sc->sc_virtio.sc_features & VIRTIO_F_RING_INDIRECT_DESC)) {
 630  624                  dev_err(sc->sc_dev, CE_NOTE,
 631  625                      "Host does not support RING_INDIRECT_DESC, bye.");
 632  626                  return (DDI_FAILURE);
 633  627          }
 634  628  
 635  629          return (DDI_SUCCESS);
 636  630  }
 637  631  
 638  632  /* ARGSUSED */
 639  633  uint_t
 640  634  vioblk_int_handler(caddr_t arg1, caddr_t arg2)
 641  635  {
 642  636          struct virtio_softc *vsc = (void *)arg1;
 643  637          struct vioblk_softc *sc = container_of(vsc,
 644  638              struct vioblk_softc, sc_virtio);
 645  639          struct vq_entry *ve;
 646  640          uint32_t len;
 647  641          int i = 0, error;
 648  642  
 649  643          while ((ve = virtio_pull_chain(sc->sc_vq, &len))) {
 650  644                  struct vioblk_req *req = &sc->sc_reqs[ve->qe_index];
 651  645                  bd_xfer_t *xfer = req->xfer;
 652  646                  uint8_t status = req->status;
 653  647                  uint32_t type = req->hdr.type;
 654  648  
 655  649                  if (req->xfer == (void *)VIOBLK_POISON) {
 656  650                          dev_err(sc->sc_dev, CE_WARN, "Poisoned descriptor!");
 657  651                          virtio_free_chain(ve);
 658  652                          return (DDI_INTR_CLAIMED);
 659  653                  }
 660  654  
 661  655                  req->xfer = (void *) VIOBLK_POISON;
 662  656  
 663  657                  /* Note: blkdev tears down the payload mapping for us. */
 664  658                  virtio_free_chain(ve);
 665  659  
 666  660                  /* returning payload back to blkdev */
 667  661                  switch (status) {
 668  662                          case VIRTIO_BLK_S_OK:
 669  663                                  error = 0;
 670  664                                  break;
 671  665                          case VIRTIO_BLK_S_IOERR:
 672  666                                  error = EIO;
 673  667                                  sc->sc_stats.io_errors++;
 674  668                                  break;
 675  669                          case VIRTIO_BLK_S_UNSUPP:
 676  670                                  sc->sc_stats.unsupp_errors++;
 677  671                                  error = ENOTTY;
 678  672                                  break;
 679  673                          default:
 680  674                                  sc->sc_stats.nxio_errors++;
 681  675                                  error = ENXIO;
 682  676                                  break;
 683  677                  }
 684  678  
 685  679                  if (type == VIRTIO_BLK_T_GET_ID) {
 686  680                          /* notify devid_init */
 687  681                          mutex_enter(&sc->lock_devid);
 688  682                          cv_broadcast(&sc->cv_devid);
 689  683                          mutex_exit(&sc->lock_devid);
 690  684                  } else
 691  685                          bd_xfer_done(xfer, error);
 692  686  
 693  687                  i++;
 694  688          }
 695  689  
 696  690          /* update stats */
 697  691          if (sc->sc_stats.intr_queuemax < i)
 698  692                  sc->sc_stats.intr_queuemax = i;
 699  693          sc->sc_stats.intr_total++;
 700  694  
 701  695          return (DDI_INTR_CLAIMED);
 702  696  }
 703  697  
 704  698  /* ARGSUSED */
 705  699  uint_t
 706  700  vioblk_config_handler(caddr_t arg1, caddr_t arg2)
 707  701  {
 708  702          return (DDI_INTR_CLAIMED);
 709  703  }
 710  704  
 711  705  static int
 712  706  vioblk_register_ints(struct vioblk_softc *sc)
 713  707  {
 714  708          int ret;
 715  709  
 716  710          struct virtio_int_handler vioblk_conf_h = {
 717  711                  vioblk_config_handler
 718  712          };
 719  713  
 720  714          struct virtio_int_handler vioblk_vq_h[] = {
 721  715                  { vioblk_int_handler },
 722  716                  { NULL },
 723  717          };
 724  718  
 725  719          ret = virtio_register_ints(&sc->sc_virtio,
 726  720              &vioblk_conf_h, vioblk_vq_h);
 727  721  
 728  722          return (ret);
 729  723  }
 730  724  
 731  725  static void
 732  726  vioblk_free_reqs(struct vioblk_softc *sc)
 733  727  {
 734  728          int i, qsize;
 735  729  
 736  730          qsize = sc->sc_vq->vq_num;
 737  731  
 738  732          for (i = 0; i < qsize; i++) {
 739  733                  struct vioblk_req *req = &sc->sc_reqs[i];
 740  734  
 741  735                  if (req->ndmac)
 742  736                          (void) ddi_dma_unbind_handle(req->dmah);
 743  737  
 744  738                  if (req->dmah)
 745  739                          ddi_dma_free_handle(&req->dmah);
 746  740          }
 747  741  
 748  742          kmem_free(sc->sc_reqs, sizeof (struct vioblk_req) * qsize);
 749  743  }
 750  744  
 751  745  static int
 752  746  vioblk_alloc_reqs(struct vioblk_softc *sc)
 753  747  {
 754  748          int i, qsize;
 755  749          int ret;
 756  750  
 757  751          qsize = sc->sc_vq->vq_num;
 758  752  
 759  753          sc->sc_reqs = kmem_zalloc(sizeof (struct vioblk_req) * qsize, KM_SLEEP);
 760  754  
 761  755          for (i = 0; i < qsize; i++) {
 762  756                  struct vioblk_req *req = &sc->sc_reqs[i];
 763  757  
 764  758                  ret = ddi_dma_alloc_handle(sc->sc_dev, &vioblk_req_dma_attr,
 765  759                      DDI_DMA_SLEEP, NULL, &req->dmah);
 766  760                  if (ret != DDI_SUCCESS) {
 767  761  
 768  762                          dev_err(sc->sc_dev, CE_WARN,
 769  763                              "Can't allocate dma handle for req "
 770  764                              "buffer %d", i);
 771  765                          goto exit;
 772  766                  }
 773  767  
 774  768                  ret = ddi_dma_addr_bind_handle(req->dmah, NULL,
 775  769                      (caddr_t)&req->hdr,
 776  770                      sizeof (struct vioblk_req_hdr) + sizeof (uint8_t),
 777  771                      DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
 778  772                      NULL, &req->dmac, &req->ndmac);
 779  773                  if (ret != DDI_DMA_MAPPED) {
 780  774                          dev_err(sc->sc_dev, CE_WARN,
 781  775                              "Can't bind req buffer %d", i);
 782  776                          goto exit;
 783  777                  }
 784  778          }
 785  779  
 786  780          return (0);
 787  781  
 788  782  exit:
 789  783          vioblk_free_reqs(sc);
 790  784          return (ENOMEM);
 791  785  }
 792  786  
 793  787  
 794  788  static int
 795  789  vioblk_ksupdate(kstat_t *ksp, int rw)
 796  790  {
 797  791          struct vioblk_softc *sc = ksp->ks_private;
 798  792  
 799  793          if (rw == KSTAT_WRITE)
 800  794                  return (EACCES);
 801  795  
 802  796          sc->ks_data->sts_rw_cookiesmax.value.ui32 = sc->sc_stats.rw_cookiesmax;
 803  797          sc->ks_data->sts_intr_queuemax.value.ui32 = sc->sc_stats.intr_queuemax;
 804  798          sc->ks_data->sts_unsupp_errors.value.ui32 = sc->sc_stats.unsupp_errors;
 805  799          sc->ks_data->sts_nxio_errors.value.ui32 = sc->sc_stats.nxio_errors;
 806  800          sc->ks_data->sts_io_errors.value.ui32 = sc->sc_stats.io_errors;
 807  801          sc->ks_data->sts_rw_cacheflush.value.ui64 = sc->sc_stats.rw_cacheflush;
 808  802          sc->ks_data->sts_intr_total.value.ui64 = sc->sc_stats.intr_total;
 809  803  
 810  804  
 811  805          return (0);
 812  806  }
 813  807  
 814  808  static int
 815  809  vioblk_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
 816  810  {
 817  811          int ret = DDI_SUCCESS;
 818  812          int instance;
 819  813          struct vioblk_softc *sc;
 820  814          struct virtio_softc *vsc;
 821  815          struct vioblk_stats *ks_data;
  
    | 
      ↓ open down ↓ | 
    785 lines elided | 
    
      ↑ open up ↑ | 
  
 822  816  
 823  817          instance = ddi_get_instance(devinfo);
 824  818  
 825  819          switch (cmd) {
 826  820          case DDI_ATTACH:
 827  821                  break;
 828  822  
 829  823          case DDI_RESUME:
 830  824          case DDI_PM_RESUME:
 831  825                  dev_err(devinfo, CE_WARN, "resume not supported yet");
 832      -                ret = DDI_FAILURE;
 833      -                goto exit;
      826 +                return (DDI_FAILURE);
 834  827  
 835  828          default:
 836  829                  dev_err(devinfo, CE_WARN, "cmd 0x%x not recognized", cmd);
 837      -                ret = DDI_FAILURE;
 838      -                goto exit;
      830 +                return (DDI_FAILURE);
 839  831          }
 840  832  
 841  833          sc = kmem_zalloc(sizeof (struct vioblk_softc), KM_SLEEP);
 842  834          ddi_set_driver_private(devinfo, sc);
 843  835  
 844  836          vsc = &sc->sc_virtio;
 845  837  
 846  838          /* Duplicate for faster access / less typing */
 847  839          sc->sc_dev = devinfo;
 848  840          vsc->sc_dev = devinfo;
 849  841  
 850  842          cv_init(&sc->cv_devid, NULL, CV_DRIVER, NULL);
 851  843          mutex_init(&sc->lock_devid, NULL, MUTEX_DRIVER, NULL);
 852  844  
 853  845          /*
 854  846           * Initialize interrupt kstat.  This should not normally fail, since
 855  847           * we don't use a persistent stat.  We do it this way to avoid having
 856  848           * to test for it at run time on the hot path.
 857  849           */
 858  850          sc->sc_intrstat = kstat_create("vioblk", instance,
 859  851              "intrs", "controller", KSTAT_TYPE_NAMED,
 860  852              sizeof (struct vioblk_stats) / sizeof (kstat_named_t),
 861  853              KSTAT_FLAG_PERSISTENT);
 862  854          if (sc->sc_intrstat == NULL) {
 863  855                  dev_err(devinfo, CE_WARN, "kstat_create failed");
 864  856                  goto exit_intrstat;
 865  857          }
 866  858          ks_data = (struct vioblk_stats *)sc->sc_intrstat->ks_data;
 867  859          kstat_named_init(&ks_data->sts_rw_outofmemory,
 868  860              "total_rw_outofmemory", KSTAT_DATA_UINT64);
 869  861          kstat_named_init(&ks_data->sts_rw_badoffset,
 870  862              "total_rw_badoffset", KSTAT_DATA_UINT64);
 871  863          kstat_named_init(&ks_data->sts_intr_total,
 872  864              "total_intr", KSTAT_DATA_UINT64);
 873  865          kstat_named_init(&ks_data->sts_io_errors,
 874  866              "total_io_errors", KSTAT_DATA_UINT32);
 875  867          kstat_named_init(&ks_data->sts_unsupp_errors,
 876  868              "total_unsupp_errors", KSTAT_DATA_UINT32);
 877  869          kstat_named_init(&ks_data->sts_nxio_errors,
 878  870              "total_nxio_errors", KSTAT_DATA_UINT32);
 879  871          kstat_named_init(&ks_data->sts_rw_cacheflush,
 880  872              "total_rw_cacheflush", KSTAT_DATA_UINT64);
 881  873          kstat_named_init(&ks_data->sts_rw_cookiesmax,
 882  874              "max_rw_cookies", KSTAT_DATA_UINT32);
 883  875          kstat_named_init(&ks_data->sts_intr_queuemax,
 884  876              "max_intr_queue", KSTAT_DATA_UINT32);
 885  877          sc->ks_data = ks_data;
 886  878          sc->sc_intrstat->ks_private = sc;
 887  879          sc->sc_intrstat->ks_update = vioblk_ksupdate;
 888  880          kstat_install(sc->sc_intrstat);
 889  881  
 890  882          /* map BAR0 */
 891  883          ret = ddi_regs_map_setup(devinfo, 1,
 892  884              (caddr_t *)&sc->sc_virtio.sc_io_addr,
 893  885              0, 0, &vioblk_attr, &sc->sc_virtio.sc_ioh);
 894  886          if (ret != DDI_SUCCESS) {
 895  887                  dev_err(devinfo, CE_WARN, "unable to map bar0: [%d]", ret);
 896  888                  goto exit_map;
 897  889          }
 898  890  
 899  891          virtio_device_reset(&sc->sc_virtio);
 900  892          virtio_set_status(&sc->sc_virtio, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
 901  893          virtio_set_status(&sc->sc_virtio, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
 902  894  
 903  895          if (vioblk_register_ints(sc)) {
 904  896                  dev_err(devinfo, CE_WARN, "Unable to add interrupt");
 905  897                  goto exit_int;
 906  898          }
 907  899  
 908  900          ret = vioblk_dev_features(sc);
 909  901          if (ret)
 910  902                  goto exit_features;
 911  903  
 912  904          if (sc->sc_virtio.sc_features & VIRTIO_BLK_F_RO)
 913  905                  sc->sc_readonly = B_TRUE;
 914  906          else
 915  907                  sc->sc_readonly = B_FALSE;
 916  908  
 917  909          sc->sc_capacity = virtio_read_device_config_8(&sc->sc_virtio,
 918  910              VIRTIO_BLK_CONFIG_CAPACITY);
 919  911          sc->sc_nblks = sc->sc_capacity;
 920  912  
 921  913          sc->sc_blk_size = DEV_BSIZE;
 922  914          if (sc->sc_virtio.sc_features & VIRTIO_BLK_F_BLK_SIZE) {
 923  915                  sc->sc_blk_size = virtio_read_device_config_4(&sc->sc_virtio,
 924  916                      VIRTIO_BLK_CONFIG_BLK_SIZE);
 925  917          }
 926  918  
 927  919          sc->sc_pblk_size = sc->sc_blk_size;
 928  920          if (sc->sc_virtio.sc_features & VIRTIO_BLK_F_TOPOLOGY) {
 929  921                  sc->sc_pblk_size <<= virtio_read_device_config_1(&sc->sc_virtio,
 930  922                      VIRTIO_BLK_CONFIG_TOPO_PBEXP);
 931  923          }
 932  924  
 933  925          /* Flushing is not supported. */
 934  926          if (!(sc->sc_virtio.sc_features & VIRTIO_BLK_F_FLUSH)) {
 935  927                  vioblk_ops.o_sync_cache = NULL;
 936  928          }
 937  929  
 938  930          sc->sc_seg_max = DEF_MAXINDIRECT;
 939  931          /* The max number of segments (cookies) in a request */
 940  932          if (sc->sc_virtio.sc_features & VIRTIO_BLK_F_SEG_MAX) {
 941  933                  sc->sc_seg_max = virtio_read_device_config_4(&sc->sc_virtio,
 942  934                      VIRTIO_BLK_CONFIG_SEG_MAX);
 943  935  
 944  936                  /* That's what Linux does. */
 945  937                  if (!sc->sc_seg_max)
 946  938                          sc->sc_seg_max = 1;
 947  939  
 948  940                  /*
 949  941                   * SEG_MAX corresponds to the number of _data_
 950  942                   * blocks in a request
 951  943                   */
 952  944                  sc->sc_seg_max += 2;
 953  945          }
 954  946          /* 2 descriptors taken for header/status */
 955  947          vioblk_bd_dma_attr.dma_attr_sgllen = sc->sc_seg_max - 2;
 956  948  
 957  949  
 958  950          /* The maximum size for a cookie in a request. */
 959  951          sc->sc_seg_size_max = DEF_MAXSECTOR;
 960  952          if (sc->sc_virtio.sc_features & VIRTIO_BLK_F_SIZE_MAX) {
 961  953                  sc->sc_seg_size_max = virtio_read_device_config_4(
 962  954                      &sc->sc_virtio, VIRTIO_BLK_CONFIG_SIZE_MAX);
 963  955          }
 964  956  
 965  957          /* The maximum request size */
 966  958          vioblk_bd_dma_attr.dma_attr_maxxfer =
 967  959              vioblk_bd_dma_attr.dma_attr_sgllen * sc->sc_seg_size_max;
 968  960  
 969  961          dev_debug(devinfo, CE_NOTE,
 970  962              "nblks=%" PRIu64 " blksize=%d (%d) num_seg=%d, "
 971  963              "seg_size=%d, maxxfer=%" PRIu64,
 972  964              sc->sc_nblks, sc->sc_blk_size, sc->sc_pblk_size,
 973  965              vioblk_bd_dma_attr.dma_attr_sgllen,
 974  966              sc->sc_seg_size_max,
 975  967              vioblk_bd_dma_attr.dma_attr_maxxfer);
 976  968  
 977  969  
 978  970          sc->sc_vq = virtio_alloc_vq(&sc->sc_virtio, 0, 0,
 979  971              sc->sc_seg_max, "I/O request");
 980  972          if (sc->sc_vq == NULL) {
 981  973                  goto exit_alloc1;
 982  974          }
 983  975  
 984  976          ret = vioblk_alloc_reqs(sc);
 985  977          if (ret) {
 986  978                  goto exit_alloc2;
 987  979          }
 988  980  
 989  981          sc->bd_h = bd_alloc_handle(sc, &vioblk_ops, &vioblk_bd_dma_attr,
 990  982              KM_SLEEP);
 991  983  
 992  984  
 993  985          virtio_set_status(&sc->sc_virtio,
 994  986              VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
 995  987          virtio_start_vq_intr(sc->sc_vq);
 996  988  
 997  989          ret = virtio_enable_ints(&sc->sc_virtio);
 998  990          if (ret)
 999  991                  goto exit_enable_ints;
1000  992  
1001  993          ret = bd_attach_handle(devinfo, sc->bd_h);
1002  994          if (ret != DDI_SUCCESS) {
1003  995                  dev_err(devinfo, CE_WARN, "Failed to attach blkdev");
1004  996                  goto exit_attach_bd;
1005  997          }
1006  998  
1007  999          return (DDI_SUCCESS);
1008 1000  
1009 1001  exit_attach_bd:
1010 1002          /*
1011 1003           * There is no virtio_disable_ints(), it's done in virtio_release_ints.
1012 1004           * If they ever get split, don't forget to add a call here.
1013 1005           */
1014 1006  exit_enable_ints:
1015 1007          virtio_stop_vq_intr(sc->sc_vq);
1016 1008          bd_free_handle(sc->bd_h);
1017 1009          vioblk_free_reqs(sc);
1018 1010  exit_alloc2:
1019 1011          virtio_free_vq(sc->sc_vq);
1020 1012  exit_alloc1:
1021 1013  exit_features:
  
    | 
      ↓ open down ↓ | 
    173 lines elided | 
    
      ↑ open up ↑ | 
  
1022 1014          virtio_release_ints(&sc->sc_virtio);
1023 1015  exit_int:
1024 1016          virtio_set_status(&sc->sc_virtio, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
1025 1017          ddi_regs_map_free(&sc->sc_virtio.sc_ioh);
1026 1018  exit_map:
1027 1019          kstat_delete(sc->sc_intrstat);
1028 1020  exit_intrstat:
1029 1021          mutex_destroy(&sc->lock_devid);
1030 1022          cv_destroy(&sc->cv_devid);
1031 1023          kmem_free(sc, sizeof (struct vioblk_softc));
1032      -exit:
1033      -        return (ret);
     1024 +        return (DDI_FAILURE);
1034 1025  }
1035 1026  
1036 1027  static int
1037 1028  vioblk_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1038 1029  {
1039 1030          struct vioblk_softc *sc = ddi_get_driver_private(devinfo);
1040 1031  
1041 1032          switch (cmd) {
1042 1033          case DDI_DETACH:
1043 1034                  break;
1044 1035  
1045 1036          case DDI_PM_SUSPEND:
1046 1037                  cmn_err(CE_WARN, "suspend not supported yet");
1047 1038                  return (DDI_FAILURE);
1048 1039  
1049 1040          default:
1050 1041                  cmn_err(CE_WARN, "cmd 0x%x unrecognized", cmd);
1051 1042                  return (DDI_FAILURE);
1052 1043          }
1053 1044  
1054 1045          (void) bd_detach_handle(sc->bd_h);
1055 1046          virtio_stop_vq_intr(sc->sc_vq);
1056 1047          virtio_release_ints(&sc->sc_virtio);
1057 1048          vioblk_free_reqs(sc);
1058 1049          virtio_free_vq(sc->sc_vq);
1059 1050          virtio_device_reset(&sc->sc_virtio);
1060 1051          ddi_regs_map_free(&sc->sc_virtio.sc_ioh);
1061 1052          kstat_delete(sc->sc_intrstat);
1062 1053          kmem_free(sc, sizeof (struct vioblk_softc));
1063 1054  
1064 1055          return (DDI_SUCCESS);
1065 1056  }
1066 1057  
1067 1058  static int
1068 1059  vioblk_quiesce(dev_info_t *devinfo)
1069 1060  {
1070 1061          struct vioblk_softc *sc = ddi_get_driver_private(devinfo);
1071 1062  
1072 1063          virtio_stop_vq_intr(sc->sc_vq);
1073 1064          virtio_device_reset(&sc->sc_virtio);
1074 1065  
1075 1066          return (DDI_SUCCESS);
1076 1067  }
1077 1068  
1078 1069  int
1079 1070  _init(void)
1080 1071  {
1081 1072          int rv;
1082 1073  
1083 1074          bd_mod_init(&vioblk_dev_ops);
1084 1075  
1085 1076          if ((rv = mod_install(&modlinkage)) != 0) {
1086 1077                  bd_mod_fini(&vioblk_dev_ops);
1087 1078          }
1088 1079  
1089 1080          return (rv);
1090 1081  }
1091 1082  
1092 1083  int
1093 1084  _fini(void)
1094 1085  {
1095 1086          int rv;
1096 1087  
1097 1088          if ((rv = mod_remove(&modlinkage)) == 0) {
1098 1089                  bd_mod_fini(&vioblk_dev_ops);
1099 1090          }
1100 1091  
1101 1092          return (rv);
1102 1093  }
1103 1094  
1104 1095  int
1105 1096  _info(struct modinfo *modinfop)
1106 1097  {
1107 1098          return (mod_info(&modlinkage, modinfop));
1108 1099  }
  
    | 
      ↓ open down ↓ | 
    65 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX