Print this page
MFV: illumos-gate@54b146cf23443d91aef04e2d2a59b7434add3030
7096 vioif should not log to the console on boot, or ever
Reviewed by: Alexander Pyhalov <apyhalov@gmail.com>
Reviewed by: Andy Stormont <astormont@racktopsystems.com>
Reviewed by: Igor Kozhukhov <igor@dilos.org>
Reviewed by: Toomas Soome <tsoome@me.com>
Approved by: Dan McDonald <danmcd@joyent.com>
Author: Joshua M. Clulow <jmc@joyent.com>
OS-76 vioif kernel heap corruption, NULL pointer dereference and mtu problem
port of illumos-3644
    3644 Add virtio-net support into the Illumos
    Reviewed by: Alexey Zaytsev <alexey.zaytsev@gmail.com>
    Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
    Reviewed by: David Hoppner <0xffea@gmail.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/io/vioif/vioif.c
          +++ new/usr/src/uts/common/io/vioif/vioif.c
↓ open down ↓ 2 lines elided ↑ open up ↑
   3    3   * Common Development and Distribution License ("CDDL"), version 1.0.
   4    4   * You may only use this file in accordance with the terms of version
   5    5   * 1.0 of the CDDL.
   6    6   *
   7    7   * A full copy of the text of the CDDL should have accompanied this
   8    8   * source.  A copy of the CDDL is also available via the Internet at
   9    9   * http://www.illumos.org/license/CDDL.
  10   10   */
  11   11  
  12   12  /*
  13      - * Copyright 2013 Nexenta Inc.  All rights reserved.
       13 + * Copyright 2013 Nexenta Systems, Inc.  All rights reserved.
  14   14   * Copyright (c) 2014, 2016 by Delphix. All rights reserved.
       15 + * Copyright 2015 Joyent, Inc.
  15   16   */
  16   17  
  17   18  /* Based on the NetBSD virtio driver by Minoura Makoto. */
  18   19  /*
  19   20   * Copyright (c) 2010 Minoura Makoto.
  20   21   * All rights reserved.
  21   22   *
  22   23   * Redistribution and use in source and binary forms, with or without
  23   24   * modification, are permitted provided that the following conditions
  24   25   * are met:
↓ open down ↓ 252 lines elided ↑ open up ↑
 277  278          struct virtqueue        *sc_tx_vq;
 278  279          struct virtqueue        *sc_ctrl_vq;
 279  280  
 280  281          unsigned int            sc_tx_stopped:1;
 281  282  
 282  283          /* Feature bits. */
 283  284          unsigned int            sc_rx_csum:1;
 284  285          unsigned int            sc_tx_csum:1;
 285  286          unsigned int            sc_tx_tso4:1;
 286  287  
      288 +        /*
      289 +         * For debugging, it is useful to know whether the MAC address we
      290 +         * are using came from the host (via VIRTIO_NET_CONFIG_MAC) or
      291 +         * was otherwise generated or set from within the guest.
      292 +         */
      293 +        unsigned int            sc_mac_from_host:1;
      294 +
 287  295          int                     sc_mtu;
 288  296          uint8_t                 sc_mac[ETHERADDRL];
 289  297          /*
 290  298           * For rx buffers, we keep a pointer array, because the buffers
 291  299           * can be loaned upstream, and we have to repopulate the array with
 292  300           * new members.
 293  301           */
 294  302          struct vioif_rx_buf     **sc_rxbufs;
 295  303  
 296  304          /*
↓ open down ↓ 7 lines elided ↑ open up ↑
 304  312          /*
 305  313           * We "loan" rx buffers upstream and reuse them after they are
 306  314           * freed. This lets us avoid allocations in the hot path.
 307  315           */
 308  316          kmem_cache_t            *sc_rxbuf_cache;
 309  317          ulong_t                 sc_rxloan;
 310  318  
 311  319          /* Copying small packets turns out to be faster then mapping them. */
 312  320          unsigned long           sc_rxcopy_thresh;
 313  321          unsigned long           sc_txcopy_thresh;
 314      -        /* Some statistic coming here */
      322 +
      323 +        /*
      324 +         * Statistics visible through mac:
      325 +         */
 315  326          uint64_t                sc_ipackets;
 316  327          uint64_t                sc_opackets;
 317  328          uint64_t                sc_rbytes;
 318  329          uint64_t                sc_obytes;
 319  330          uint64_t                sc_brdcstxmt;
 320  331          uint64_t                sc_brdcstrcv;
 321  332          uint64_t                sc_multixmt;
 322  333          uint64_t                sc_multircv;
 323  334          uint64_t                sc_norecvbuf;
 324  335          uint64_t                sc_notxbuf;
 325  336          uint64_t                sc_ierrors;
 326  337          uint64_t                sc_oerrors;
      338 +
      339 +        /*
      340 +         * Internal debugging statistics:
      341 +         */
      342 +        uint64_t                sc_rxfail_dma_handle;
      343 +        uint64_t                sc_rxfail_dma_buffer;
      344 +        uint64_t                sc_rxfail_dma_bind;
      345 +        uint64_t                sc_rxfail_chain_undersize;
      346 +        uint64_t                sc_rxfail_no_descriptors;
      347 +        uint64_t                sc_txfail_dma_handle;
      348 +        uint64_t                sc_txfail_dma_bind;
      349 +        uint64_t                sc_txfail_indirect_limit;
 327  350  };
 328  351  
 329  352  #define ETHER_HEADER_LEN                sizeof (struct ether_header)
 330  353  
 331  354  /* MTU + the ethernet header. */
 332  355  #define MAX_PAYLOAD     65535
 333  356  #define MAX_MTU         (MAX_PAYLOAD - ETHER_HEADER_LEN)
 334  357  #define DEFAULT_MTU     ETHERMTU
 335  358  
 336  359  /*
↓ open down ↓ 129 lines elided ↑ open up ↑
 466  489  static int
 467  490  vioif_rx_construct(void *buffer, void *user_arg, int kmflags)
 468  491  {
 469  492          _NOTE(ARGUNUSED(kmflags));
 470  493          struct vioif_softc *sc = user_arg;
 471  494          struct vioif_rx_buf *buf = buffer;
 472  495          size_t len;
 473  496  
 474  497          if (ddi_dma_alloc_handle(sc->sc_dev, &vioif_mapped_buf_dma_attr,
 475  498              DDI_DMA_SLEEP, NULL, &buf->rb_mapping.vbm_dmah)) {
 476      -                dev_err(sc->sc_dev, CE_WARN,
 477      -                    "Can't allocate dma handle for rx buffer");
      499 +                sc->sc_rxfail_dma_handle++;
 478  500                  goto exit_handle;
 479  501          }
 480  502  
 481  503          if (ddi_dma_mem_alloc(buf->rb_mapping.vbm_dmah,
 482  504              VIOIF_RX_SIZE + sizeof (struct virtio_net_hdr),
 483  505              &vioif_bufattr, DDI_DMA_STREAMING, DDI_DMA_SLEEP,
 484  506              NULL, &buf->rb_mapping.vbm_buf, &len, &buf->rb_mapping.vbm_acch)) {
 485      -                dev_err(sc->sc_dev, CE_WARN,
 486      -                    "Can't allocate rx buffer");
      507 +                sc->sc_rxfail_dma_buffer++;
 487  508                  goto exit_alloc;
 488  509          }
 489  510          ASSERT(len >= VIOIF_RX_SIZE);
 490  511  
 491  512          if (ddi_dma_addr_bind_handle(buf->rb_mapping.vbm_dmah, NULL,
 492  513              buf->rb_mapping.vbm_buf, len, DDI_DMA_READ | DDI_DMA_STREAMING,
 493  514              DDI_DMA_SLEEP, NULL, &buf->rb_mapping.vbm_dmac,
 494  515              &buf->rb_mapping.vbm_ncookies)) {
 495      -                dev_err(sc->sc_dev, CE_WARN, "Can't bind tx buffer");
 496      -
      516 +                sc->sc_rxfail_dma_bind++;
 497  517                  goto exit_bind;
 498  518          }
 499  519  
 500  520          ASSERT(buf->rb_mapping.vbm_ncookies <= VIOIF_INDIRECT_MAX);
 501  521  
 502  522          buf->rb_sc = sc;
 503  523          buf->rb_frtn.free_arg = (void *) buf;
 504  524          buf->rb_frtn.free_func = vioif_rx_free;
 505  525  
 506  526          return (0);
↓ open down ↓ 203 lines elided ↑ open up ↑
 710  730  
 711  731  static uint_t
 712  732  vioif_add_rx(struct vioif_softc *sc, int kmflag)
 713  733  {
 714  734          uint_t num_added = 0;
 715  735          struct vq_entry *ve;
 716  736  
 717  737          while ((ve = vq_alloc_entry(sc->sc_rx_vq)) != NULL) {
 718  738                  struct vioif_rx_buf *buf = sc->sc_rxbufs[ve->qe_index];
 719  739  
 720      -                if (!buf) {
      740 +                if (buf == NULL) {
 721  741                          /* First run, allocate the buffer. */
 722  742                          buf = kmem_cache_alloc(sc->sc_rxbuf_cache, kmflag);
 723  743                          sc->sc_rxbufs[ve->qe_index] = buf;
 724  744                  }
 725  745  
 726  746                  /* Still nothing? Bye. */
 727      -                if (!buf) {
 728      -                        dev_err(sc->sc_dev, CE_WARN,
 729      -                            "Can't allocate rx buffer");
      747 +                if (buf == NULL) {
 730  748                          sc->sc_norecvbuf++;
 731  749                          vq_free_entry(sc->sc_rx_vq, ve);
 732  750                          break;
 733  751                  }
 734  752  
 735  753                  ASSERT(buf->rb_mapping.vbm_ncookies >= 1);
 736  754  
 737  755                  /*
 738  756                   * For an unknown reason, the virtio_net_hdr must be placed
 739  757                   * as a separate virtio queue entry.
↓ open down ↓ 53 lines elided ↑ open up ↑
 793  811          mblk_t *mphead = NULL, *lastmp = NULL, *mp;
 794  812          uint32_t len;
 795  813          uint_t num_processed = 0;
 796  814  
 797  815          while ((ve = virtio_pull_chain(sc->sc_rx_vq, &len))) {
 798  816  
 799  817                  buf = sc->sc_rxbufs[ve->qe_index];
 800  818                  ASSERT(buf);
 801  819  
 802  820                  if (len < sizeof (struct virtio_net_hdr)) {
 803      -                        dev_err(sc->sc_dev, CE_WARN, "RX: Cnain too small: %u",
 804      -                            len - (uint32_t)sizeof (struct virtio_net_hdr));
      821 +                        sc->sc_rxfail_chain_undersize++;
 805  822                          sc->sc_ierrors++;
 806  823                          virtio_free_chain(ve);
 807  824                          continue;
 808  825                  }
 809  826  
 810  827                  len -= sizeof (struct virtio_net_hdr);
 811  828                  /*
 812  829                   * We copy small packets that happen to fit into a single
 813  830                   * cookie and reuse the buffers. For bigger ones, we loan
 814  831                   * the buffers upstream.
 815  832                   */
 816  833                  if (len < sc->sc_rxcopy_thresh) {
 817  834                          mp = allocb(len, 0);
 818      -                        if (!mp) {
      835 +                        if (mp == NULL) {
 819  836                                  sc->sc_norecvbuf++;
 820  837                                  sc->sc_ierrors++;
 821  838  
 822  839                                  virtio_free_chain(ve);
 823  840                                  break;
 824  841                          }
 825  842  
 826  843                          bcopy((char *)buf->rb_mapping.vbm_buf +
 827  844                              sizeof (struct virtio_net_hdr), mp->b_rptr, len);
 828  845                          mp->b_wptr = mp->b_rptr + len;
 829  846  
 830  847                  } else {
 831  848                          mp = desballoc((unsigned char *)
 832  849                              buf->rb_mapping.vbm_buf +
 833  850                              sizeof (struct virtio_net_hdr) +
 834  851                              VIOIF_IP_ALIGN, len, 0, &buf->rb_frtn);
 835      -                        if (!mp) {
      852 +                        if (mp == NULL) {
 836  853                                  sc->sc_norecvbuf++;
 837  854                                  sc->sc_ierrors++;
 838  855  
 839  856                                  virtio_free_chain(ve);
 840  857                                  break;
 841  858                          }
 842  859                          mp->b_wptr = mp->b_rptr + len;
 843  860  
 844  861                          atomic_inc_ulong(&sc->sc_rxloan);
 845  862                          /*
↓ open down ↓ 45 lines elided ↑ open up ↑
 891  908          uint_t num_reclaimed = 0;
 892  909  
 893  910          while ((ve = virtio_pull_chain(sc->sc_tx_vq, &len))) {
 894  911                  /* We don't chain descriptors for tx, so don't expect any. */
 895  912                  ASSERT(!ve->qe_next);
 896  913  
 897  914                  buf = &sc->sc_txbufs[ve->qe_index];
 898  915                  mp = buf->tb_mp;
 899  916                  buf->tb_mp = NULL;
 900  917  
 901      -                if (mp) {
      918 +                if (mp != NULL) {
 902  919                          for (int i = 0; i < buf->tb_external_num; i++)
 903  920                                  (void) ddi_dma_unbind_handle(
 904  921                                      buf->tb_external_mapping[i].vbm_dmah);
 905  922                  }
 906  923  
 907  924                  virtio_free_chain(ve);
 908  925  
 909  926                  /* External mapping used, mp was not freed in vioif_send() */
 910      -                if (mp)
      927 +                if (mp != NULL)
 911  928                          freemsg(mp);
 912  929                  num_reclaimed++;
 913  930          }
 914  931  
 915  932          if (sc->sc_tx_stopped && num_reclaimed > 0) {
 916  933                  sc->sc_tx_stopped = 0;
 917  934                  mac_tx_update(sc->sc_mac_handle);
 918  935          }
 919  936  
 920  937          return (num_reclaimed);
↓ open down ↓ 23 lines elided ↑ open up ↑
 944  961  vioif_tx_lazy_handle_alloc(struct vioif_softc *sc, struct vioif_tx_buf *buf,
 945  962      int i)
 946  963  {
 947  964          int ret = DDI_SUCCESS;
 948  965  
 949  966          if (!buf->tb_external_mapping[i].vbm_dmah) {
 950  967                  ret = ddi_dma_alloc_handle(sc->sc_dev,
 951  968                      &vioif_mapped_buf_dma_attr, DDI_DMA_SLEEP, NULL,
 952  969                      &buf->tb_external_mapping[i].vbm_dmah);
 953  970                  if (ret != DDI_SUCCESS) {
 954      -                        dev_err(sc->sc_dev, CE_WARN,
 955      -                            "Can't allocate dma handle for external tx buffer");
      971 +                        sc->sc_txfail_dma_handle++;
 956  972                  }
 957  973          }
 958  974  
 959  975          return (ret);
 960  976  }
 961  977  
 962  978  static inline int
 963  979  vioif_tx_external(struct vioif_softc *sc, struct vq_entry *ve, mblk_t *mp,
 964  980      size_t msg_size)
 965  981  {
↓ open down ↓ 33 lines elided ↑ open up ↑
 999 1015                          sc->sc_oerrors++;
1000 1016                          goto exit_lazy_alloc;
1001 1017                  }
1002 1018                  ret = ddi_dma_addr_bind_handle(
1003 1019                      buf->tb_external_mapping[i].vbm_dmah, NULL,
1004 1020                      (caddr_t)nmp->b_rptr, len,
1005 1021                      DDI_DMA_WRITE | DDI_DMA_STREAMING,
1006 1022                      DDI_DMA_SLEEP, NULL, &dmac, &ncookies);
1007 1023  
1008 1024                  if (ret != DDI_SUCCESS) {
     1025 +                        sc->sc_txfail_dma_bind++;
1009 1026                          sc->sc_oerrors++;
1010      -                        dev_err(sc->sc_dev, CE_NOTE,
1011      -                            "TX: Failed to bind external handle");
1012 1027                          goto exit_bind;
1013 1028                  }
1014 1029  
1015 1030                  /* Check if we still fit into the indirect table. */
1016 1031                  if (virtio_ve_indirect_available(ve) < ncookies) {
1017      -                        dev_err(sc->sc_dev, CE_NOTE,
1018      -                            "TX: Indirect descriptor table limit reached."
1019      -                            " It took %d fragments.", i);
     1032 +                        sc->sc_txfail_indirect_limit++;
1020 1033                          sc->sc_notxbuf++;
1021 1034                          sc->sc_oerrors++;
1022 1035  
1023 1036                          ret = DDI_FAILURE;
1024 1037                          goto exit_limit;
1025 1038                  }
1026 1039  
1027 1040                  virtio_ve_add_cookie(ve, buf->tb_external_mapping[i].vbm_dmah,
1028 1041                      dmac, ncookies, B_TRUE);
1029 1042  
↓ open down ↓ 38 lines elided ↑ open up ↑
1068 1081          for (nmp = mp; nmp; nmp = nmp->b_cont)
1069 1082                  msg_size += MBLKL(nmp);
1070 1083  
1071 1084          if (sc->sc_tx_tso4) {
1072 1085                  mac_lso_get(mp, &lso_mss, &lso_flags);
1073 1086                  lso_required = (lso_flags & HW_LSO);
1074 1087          }
1075 1088  
1076 1089          ve = vq_alloc_entry(sc->sc_tx_vq);
1077 1090  
1078      -        if (!ve) {
     1091 +        if (ve == NULL) {
1079 1092                  sc->sc_notxbuf++;
1080 1093                  /* Out of free descriptors - try later. */
1081 1094                  return (B_FALSE);
1082 1095          }
1083 1096          buf = &sc->sc_txbufs[ve->qe_index];
1084 1097  
1085 1098          /* Use the inline buffer of the first entry for the virtio_net_hdr. */
1086 1099          (void) memset(buf->tb_inline_mapping.vbm_buf, 0,
1087 1100              sizeof (struct virtio_net_hdr));
1088 1101  
↓ open down ↓ 31 lines elided ↑ open up ↑
1120 1133                  net_header->gso_size = (uint16_t)lso_mss;
1121 1134          }
1122 1135  
1123 1136          virtio_ve_add_indirect_buf(ve,
1124 1137              buf->tb_inline_mapping.vbm_dmac.dmac_laddress,
1125 1138              sizeof (struct virtio_net_hdr), B_TRUE);
1126 1139  
1127 1140          /* meanwhile update the statistic */
1128 1141          if (mp->b_rptr[0] & 0x1) {
1129 1142                  if (bcmp(mp->b_rptr, vioif_broadcast, ETHERADDRL) != 0)
1130      -                                sc->sc_multixmt++;
1131      -                        else
1132      -                                sc->sc_brdcstxmt++;
     1143 +                        sc->sc_multixmt++;
     1144 +                else
     1145 +                        sc->sc_brdcstxmt++;
1133 1146          }
1134 1147  
1135 1148          /*
1136 1149           * We copy small packets into the inline buffer. The bigger ones
1137 1150           * get mapped using the mapped buffer.
1138 1151           */
1139 1152          if (msg_size < sc->sc_txcopy_thresh) {
1140 1153                  vioif_tx_inline(sc, ve, mp, msg_size);
1141 1154          } else {
1142 1155                  /* statistic gets updated by vioif_tx_external when fail */
↓ open down ↓ 43 lines elided ↑ open up ↑
1186 1199          return (mp);
1187 1200  }
1188 1201  
1189 1202  int
1190 1203  vioif_start(void *arg)
1191 1204  {
1192 1205          struct vioif_softc *sc = arg;
1193 1206          struct vq_entry *ve;
1194 1207          uint32_t len;
1195 1208  
1196      -        mac_link_update(sc->sc_mac_handle,
1197      -            vioif_link_state(sc));
     1209 +        mac_link_update(sc->sc_mac_handle, vioif_link_state(sc));
1198 1210  
1199 1211          virtio_start_vq_intr(sc->sc_rx_vq);
1200 1212  
1201 1213          /*
1202 1214           * Don't start interrupts on sc_tx_vq. We use VIRTIO_F_NOTIFY_ON_EMPTY,
1203 1215           * so the device will send a transmit interrupt when the queue is empty
1204 1216           * and we can reclaim it in one sweep.
1205 1217           */
1206 1218  
1207 1219          /*
↓ open down ↓ 195 lines elided ↑ open up ↑
1403 1415          int value;
1404 1416  
1405 1417          switch (pr_num) {
1406 1418          case MAC_PROP_MTU:
1407 1419                  mac_prop_info_set_range_uint32(prh, ETHERMIN, MAX_MTU);
1408 1420                  break;
1409 1421  
1410 1422          case MAC_PROP_PRIVATE:
1411 1423                  bzero(valstr, sizeof (valstr));
1412 1424                  if (strcmp(pr_name, vioif_txcopy_thresh) == 0) {
1413      -
1414 1425                          value = sc->sc_txcopy_thresh;
1415      -                } else  if (strcmp(pr_name,
1416      -                    vioif_rxcopy_thresh) == 0) {
     1426 +                } else if (strcmp(pr_name, vioif_rxcopy_thresh) == 0) {
1417 1427                          value = sc->sc_rxcopy_thresh;
1418 1428                  } else {
1419 1429                          return;
1420 1430                  }
1421 1431                  (void) snprintf(valstr, sizeof (valstr), "%d", value);
1422 1432                  break;
1423 1433  
1424 1434          default:
1425 1435                  break;
1426 1436          }
↓ open down ↓ 55 lines elided ↑ open up ↑
1482 1492          char buf[512];
1483 1493          char *bufp = buf;
1484 1494          char *bufend = buf + sizeof (buf);
1485 1495  
1486 1496          /* LINTED E_PTRDIFF_OVERFLOW */
1487 1497          bufp += snprintf(bufp, bufend - bufp, prefix);
1488 1498          /* LINTED E_PTRDIFF_OVERFLOW */
1489 1499          bufp += virtio_show_features(features, bufp, bufend - bufp);
1490 1500          *bufp = '\0';
1491 1501  
1492      -
1493 1502          /* Using '!' to only CE_NOTE this to the system log. */
1494 1503          dev_err(sc->sc_dev, CE_NOTE, "!%s Vioif (%b)", buf, features,
1495 1504              VIRTIO_NET_FEATURE_BITS);
1496 1505  }
1497 1506  
1498 1507  /*
1499 1508   * Find out which features are supported by the device and
1500 1509   * choose which ones we wish to use.
1501 1510   */
1502 1511  static int
↓ open down ↓ 8 lines elided ↑ open up ↑
1511 1520              VIRTIO_NET_F_MAC |
1512 1521              VIRTIO_NET_F_STATUS |
1513 1522              VIRTIO_F_RING_INDIRECT_DESC |
1514 1523              VIRTIO_F_NOTIFY_ON_EMPTY);
1515 1524  
1516 1525          vioif_show_features(sc, "Host features: ", host_features);
1517 1526          vioif_show_features(sc, "Negotiated features: ",
1518 1527              sc->sc_virtio.sc_features);
1519 1528  
1520 1529          if (!(sc->sc_virtio.sc_features & VIRTIO_F_RING_INDIRECT_DESC)) {
1521      -                dev_err(sc->sc_dev, CE_NOTE,
1522      -                    "Host does not support RING_INDIRECT_DESC, bye.");
     1530 +                dev_err(sc->sc_dev, CE_WARN,
     1531 +                    "Host does not support RING_INDIRECT_DESC. Cannot attach.");
1523 1532                  return (DDI_FAILURE);
1524 1533          }
1525 1534  
1526 1535          return (DDI_SUCCESS);
1527 1536  }
1528 1537  
1529 1538  static int
1530 1539  vioif_has_feature(struct vioif_softc *sc, uint32_t feature)
1531 1540  {
1532 1541          return (virtio_has_feature(&sc->sc_virtio, feature));
↓ open down ↓ 1 lines elided ↑ open up ↑
1534 1543  
1535 1544  static void
1536 1545  vioif_set_mac(struct vioif_softc *sc)
1537 1546  {
1538 1547          int i;
1539 1548  
1540 1549          for (i = 0; i < ETHERADDRL; i++) {
1541 1550                  virtio_write_device_config_1(&sc->sc_virtio,
1542 1551                      VIRTIO_NET_CONFIG_MAC + i, sc->sc_mac[i]);
1543 1552          }
     1553 +        sc->sc_mac_from_host = 0;
1544 1554  }
1545 1555  
1546 1556  /* Get the mac address out of the hardware, or make up one. */
1547 1557  static void
1548 1558  vioif_get_mac(struct vioif_softc *sc)
1549 1559  {
1550 1560          int i;
1551 1561          if (sc->sc_virtio.sc_features & VIRTIO_NET_F_MAC) {
1552 1562                  for (i = 0; i < ETHERADDRL; i++) {
1553 1563                          sc->sc_mac[i] = virtio_read_device_config_1(
1554 1564                              &sc->sc_virtio,
1555 1565                              VIRTIO_NET_CONFIG_MAC + i);
1556 1566                  }
1557      -                dev_err(sc->sc_dev, CE_NOTE, "Got MAC address from host: %s",
1558      -                    ether_sprintf((struct ether_addr *)sc->sc_mac));
     1567 +                sc->sc_mac_from_host = 1;
1559 1568          } else {
1560 1569                  /* Get a few random bytes */
1561 1570                  (void) random_get_pseudo_bytes(sc->sc_mac, ETHERADDRL);
1562 1571                  /* Make sure it's a unicast MAC */
1563 1572                  sc->sc_mac[0] &= ~1;
1564 1573                  /* Set the "locally administered" bit */
1565 1574                  sc->sc_mac[1] |= 2;
1566 1575  
1567 1576                  vioif_set_mac(sc);
1568 1577  
1569 1578                  dev_err(sc->sc_dev, CE_NOTE,
1570      -                    "Generated a random MAC address: %s",
     1579 +                    "!Generated a random MAC address: %s",
1571 1580                      ether_sprintf((struct ether_addr *)sc->sc_mac));
1572 1581          }
1573 1582  }
1574 1583  
1575 1584  /*
1576 1585   * Virtqueue interrupt handlers
1577 1586   */
1578 1587  /* ARGSUSED */
1579 1588  uint_t
1580 1589  vioif_rx_handler(caddr_t arg1, caddr_t arg2)
↓ open down ↓ 52 lines elided ↑ open up ↑
1633 1642  vioif_check_features(struct vioif_softc *sc)
1634 1643  {
1635 1644          if (vioif_has_feature(sc, VIRTIO_NET_F_CSUM)) {
1636 1645                  /* The GSO/GRO featured depend on CSUM, check them here. */
1637 1646                  sc->sc_tx_csum = 1;
1638 1647                  sc->sc_rx_csum = 1;
1639 1648  
1640 1649                  if (!vioif_has_feature(sc, VIRTIO_NET_F_GUEST_CSUM)) {
1641 1650                          sc->sc_rx_csum = 0;
1642 1651                  }
1643      -                cmn_err(CE_NOTE, "Csum enabled.");
     1652 +                dev_err(sc->sc_dev, CE_NOTE, "!Csum enabled.");
1644 1653  
1645 1654                  if (vioif_has_feature(sc, VIRTIO_NET_F_HOST_TSO4)) {
1646 1655  
1647 1656                          sc->sc_tx_tso4 = 1;
1648 1657                          /*
1649 1658                           * We don't seem to have a way to ask the system
1650 1659                           * not to send us LSO packets with Explicit
1651 1660                           * Congestion Notification bit set, so we require
1652 1661                           * the device to support it in order to do
1653 1662                           * LSO.
1654 1663                           */
1655 1664                          if (!vioif_has_feature(sc, VIRTIO_NET_F_HOST_ECN)) {
1656 1665                                  dev_err(sc->sc_dev, CE_NOTE,
1657      -                                    "TSO4 supported, but not ECN. "
     1666 +                                    "!TSO4 supported, but not ECN. "
1658 1667                                      "Not using LSO.");
1659 1668                                  sc->sc_tx_tso4 = 0;
1660 1669                          } else {
1661      -                                cmn_err(CE_NOTE, "LSO enabled");
     1670 +                                dev_err(sc->sc_dev, CE_NOTE, "!LSO enabled");
1662 1671                          }
1663 1672                  }
1664 1673          }
1665 1674  }
1666 1675  
1667 1676  static int
1668 1677  vioif_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1669 1678  {
1670 1679          int ret, instance;
1671 1680          struct vioif_softc *sc;
↓ open down ↓ 103 lines elided ↑ open up ↑
1775 1784  
1776 1785          sc->sc_rxloan = 0;
1777 1786  
1778 1787          /* set some reasonable-small default values */
1779 1788          sc->sc_rxcopy_thresh = 300;
1780 1789          sc->sc_txcopy_thresh = 300;
1781 1790          sc->sc_mtu = ETHERMTU;
1782 1791  
1783 1792          vioif_check_features(sc);
1784 1793  
1785      -        if (vioif_alloc_mems(sc))
     1794 +        if (vioif_alloc_mems(sc) != 0)
1786 1795                  goto exit_alloc_mems;
1787 1796  
1788 1797          if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
1789 1798                  dev_err(devinfo, CE_WARN, "Failed to allocate a mac_register");
1790 1799                  goto exit_macalloc;
1791 1800          }
1792 1801  
1793 1802          macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1794 1803          macp->m_driver = sc;
1795 1804          macp->m_dip = devinfo;
↓ open down ↓ 67 lines elided ↑ open up ↑
1863 1872                  break;
1864 1873  
1865 1874          case DDI_PM_SUSPEND:
1866 1875                  /* We do not support suspend/resume for vioif. */
1867 1876                  return (DDI_FAILURE);
1868 1877  
1869 1878          default:
1870 1879                  return (DDI_FAILURE);
1871 1880          }
1872 1881  
1873      -        if (sc->sc_rxloan) {
     1882 +        if (sc->sc_rxloan > 0) {
1874 1883                  dev_err(devinfo, CE_WARN, "!Some rx buffers are still upstream,"
1875 1884                      " not detaching.");
1876 1885                  return (DDI_FAILURE);
1877 1886          }
1878 1887  
1879 1888          virtio_stop_vq_intr(sc->sc_rx_vq);
1880 1889          virtio_stop_vq_intr(sc->sc_tx_vq);
1881 1890  
1882 1891          virtio_release_ints(&sc->sc_virtio);
1883 1892  
↓ open down ↓ 70 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX