1 /*
   2  * This file and its contents are supplied under the terms of the
   3  * Common Development and Distribution License ("CDDL"), version 1.0.
   4  * You may only use this file in accordance with the terms of version
   5  * 1.0 of the CDDL.
   6  *
   7  * A full copy of the text of the CDDL should have accompanied this
   8  * source.  A copy of the CDDL is also available via the Internet at
   9  * http://www.illumos.org/license/CDDL.
  10  */
  11 
  12 /*
  13  * Copyright 2018, Joyent, Inc.
  14  */
  15 
  16 /*
  17  * This plugin implements the SDC VXLAN Protocol (SVP).
  18  *
  19  * This plugin is designed to work with a broader distributed system that
  20  * mainains a database of mappings and provides a means of looking up data and
  21  * provides a stream of updates. While it is named after VXLAN, there isn't
  22  * anything specific to VXLAN baked into the protocol at this time, other than
  23  * that it requires both an IP address and a port; however, if there's a good
  24  * reason to support others here, we can modify that.
  25  *
  26  * -----------
  27  * Terminology
  28  * -----------
  29  *
  30  * Throughout this module we refer to a few different kinds of addresses:
  31  *
  32  *    VL3
  33  *
  34  *      A VL3 address, or virtual layer 3, refers to the layer three addreses
  35  *      that are used by entities on an overlay network. As far as we're
  36  *      concerned that means that this is the IP address of an interface on an
  37  *      overlay network.
  38  *
  39  *    VL2
  40  *
  41  *      A VL2 address, or a virtual layer 2, referes to the link-layer addresses
  42  *      that are used by entities on an overlay network. As far as we're
  43  *      concerned that means that this is the MAC addresses of an interface on
  44  *      an overlay network.
  45  *
  46  *    UL3
  47  *
  48  *      A UL3, or underlay layer 3, refers to the layer three (IP) address on
  49  *      the underlay network.
  50  *
  51  * The svp plugin provides lookups from VL3->VL2, eg. the equivalent of an ARP
  52  * or NDP query, and then also provides VL2->UL3 lookups.
  53  *
  54  * -------------------
  55  * Protocol Operations
  56  * -------------------
  57  *
  58  * The svp protocol is defined in lib/varpd/svp/common/libvarpd_svp_prot.h. It
  59  * defines the basic TCP protocol that we use to communicate to hosts. At this
  60  * time, it is not quite 100% implemented in both this plug-in and our primary
  61  * server, sdc-portolan (see https://github.com/joyent/sdc-portolan).
  62  *
  63  * At this time, we don't quite support everything that we need to. Including
  64  * the SVP_R_BULK_REQ and SVP_R_SHOOTDOWN.
  65  *
  66  * ---------------------------------
  67  * General Design and Considerations
  68  * ---------------------------------
  69  *
  70  * Every instance of the svp plugin requires the hostname and port of a server
  71  * to contact. Though, we have co-opted the port 1296 (the year of the oldest
  72  * extant portolan) as our default port.
  73  *
  74  * Each of the different instance of the plugins has a corresponding remote
  75  * backend. The remote backend represents the tuple of the [ host, port ].
  76  * Different instances that share the same host and port tuple will use the same
  77  * backend.
  78  *
  79  * The backend is actually in charge of performing lookups, resolving and
  80  * updating the set of remote hosts based on the DNS resolution we've been
  81  * provided, and taking care of things like shootdowns.
  82  *
  83  * The whole plugin itself maintains an event loop and a number of threads to
  84  * service that event loop. On top of that event loop, we have a simple timer
  85  * backend that ticks at one second intervals and performs various callbacks,
  86  * such as idle query timers, DNS resolution, connection backoff, etc. Each of
  87  * the remote hosts that we obtain is wrapped up in an svp_conn_t, which manages
  88  * the connection state, reconnecting, etc.
  89  *
  90  * All in all, the general way that this all looks like is:
  91  *
  92  *  +----------------------------+
  93  *  | Plugin Instance            |
  94  *  | svp_t                      |
  95  *  |                            |
  96  *  | varpd_provider_handle_t * -+-> varpd handle
  97  *  | uint64_t               ----+-> varpd ID
  98  *  | char *                 ----+-> remote host
  99  *  | uint16_t               ----+-> remote port
 100  *  | svp_remote_t *   ---+------+-> remote backend
 101  *  +---------------------+------+
 102  *                        |
 103  *                        v
 104  *   +----------------------+                   +----------------+
 105  *   | Remote backend       |------------------>| Remove Backend |---> ...
 106  *   | svp_remote_t         |                   | svp_remote_t   |
 107  *   |                      |                   +----------------+
 108  *   | svp_remote_state_t --+-> state flags
 109  *   | svp_degrade_state_t -+-> degraded reason
 110  *   | struct addrinfo *  --+-> resolved hosts
 111  *   | uint_t            ---+-> active hosts
 112  *   | uint_t            ---+-> DNS generation
 113  *   | uint_t            ---+-> Reference count
 114  *   | uint_t            ---+-> active conns
 115  *   | uint_t            ---+-> degraded conns
 116  *   | list_t        ---+---+-> connection list
 117  *   +------------------+---+
 118  *                      |
 119  *                      +------------------------------+-----------------+
 120  *                      |                              |                 |
 121  *                      v                              v                 v
 122  *   +-------------------+                       +----------------
 123  *   | SVP Connection    |                       | SVP connection |     ...
 124  *   | svp_conn_t        |                       | svp_conn_t     |
 125  *   |                   |                       +----------------+
 126  *   | svp_event_t   ----+-> event loop handle
 127  *   | svp_timer_t   ----+-> backoff timer
 128  *   | svp_timer_t   ----+-> query timer
 129  *   | int           ----+-> socket fd
 130  *   | uint_t        ----+-> generation
 131  *   | uint_t        ----+-> current backoff
 132  *   | svp_conn_flags_t -+-> connection flags
 133  *   | svp_conn_state_t -+-> connection state
 134  *   | svp_conn_error_t -+-> connection error
 135  *   | int            ---+-> last errrno
 136  *   | hrtime_t       ---+-> activity timestamp
 137  *   | svp_conn_out_t ---+-> outgoing data state
 138  *   | svp_conn_in_t  ---+-> incoming data state
 139  *   | list_t      ---+--+-> active queries
 140  *   +----------------+--+
 141  *                    |
 142  *                    +----------------------------------+-----------------+
 143  *                    |                                  |                 |
 144  *                    v                                  v                 v
 145  *   +--------------------+                       +-------------+
 146  *   | SVP Query          |                       | SVP Query   |         ...
 147  *   | svp_query_t        |                       | svp_query_t |
 148  *   |                    |                       +-------------+
 149  *   | svp_query_f     ---+-> callback function
 150  *   | void *          ---+-> callback arg
 151  *   | svp_query_state_t -+-> state flags
 152  *   | svp_req_t       ---+-> svp prot. header
 153  *   | svp_query_data_t --+-> read data
 154  *   | svp_query_data_t --+-> write data
 155  *   | svp_status_t    ---+-> request status
 156  *   +--------------------+
 157  *
 158  * The svp_t is the instance that we assoicate with varpd. The instance itself
 159  * maintains properties and then when it's started associates with an
 160  * svp_remote_t, which is the remote backend. The remote backend itself,
 161  * maintains the DNS state and spins up and downs connections based on the
 162  * results from DNS. By default, we query DNS every 30 seconds. For more on the
 163  * connection life cycle, see the next section.
 164  *
 165  * By default, each connection maintains its own back off timer and list of
 166  * queries it's servicing. Only one request is generally outstanding at a time
 167  * and requests are round robined across the various connections.
 168  *
 169  * The query itself represents the svp request that's going on and keep track of
 170  * its state and is a place for data that's read and written to as part of the
 171  * request.
 172  *
 173  * Connections maintain a query timer such that if we have not received data on
 174  * a socket for a certain amount of time, we kill that socket and begin a
 175  * reconnection cycle with backoff.
 176  *
 177  * ------------------------
 178  * Connection State Machine
 179  * ------------------------
 180  *
 181  * We have a connection pool that's built upon DNS records. DNS describes the
 182  * membership of the set of remote peers that make up our pool and we maintain
 183  * one connection to each of them.  In addition, we maintain an exponential
 184  * backoff for each peer and will attempt to reconect immediately before backing
 185  * off. The following are the valid states that a connection can be in:
 186  *
 187  *      SVP_CS_ERROR            An OS error has occurred on this connection,
 188  *                              such as failure to create a socket or associate
 189  *                              the socket with an event port. We also
 190  *                              transition all connections to this state before
 191  *                              we destroy them.
 192  *
 193  *      SVP_CS_INITIAL          This is the initial state of a connection, all
 194  *                              that should exist is an unbound socket.
 195  *
 196  *      SVP_CS_CONNECTING       A call to connect has been made and we are
 197  *                              polling for it to complete.
 198  *
 199  *      SVP_CS_BACKOFF          A connect attempt has failed and we are
 200  *                              currently backing off, waiting to try again.
 201  *
 202  *      SVP_CS_ACTIVE           We have successfully connected to the remote
 203  *                              system.
 204  *
 205  *      SVP_CS_WINDDOWN         This connection is going to valhalla. In other
 206  *                              words, a previously active connection is no
 207  *                              longer valid in DNS, so we should curb our use
 208  *                              of it, and reap it as soon as we have other
 209  *                              active connections.
 210  *
 211  * The following diagram attempts to describe our state transition scheme, and
 212  * when we transition from one state to the next.
 213  *
 214  *                               |
 215  *                               * New remote IP from DNS resolution,
 216  *                               | not currently active in the system.
 217  *                               |
 218  *                               v                                Socket Error,
 219  *                       +----------------+                       still in DNS
 220  *  +----------------<---| SVP_CS_INITIAL |<----------------------*-----+
 221  *  |                    +----------------+                             |
 222  *  |                            System  |                              |
 223  *  | Connection . . . . .       success *               Successful     |
 224  *  | failed             .               |               connect()      |
 225  *  |               +----*---------+     |        +-----------*--+      |
 226  *  |               |              |     |        |              |      |
 227  *  |               V              ^     v        ^              V      ^
 228  *  |  +----------------+         +-------------------+     +---------------+
 229  *  +<-| SVP_CS_BACKOFF |         | SVP_CS_CONNECTING |     | SVP_CS_ACTIVE |
 230  *  |  +----------------+         +-------------------+     +---------------+
 231  *  |               V              ^  V                       V  V
 232  *  | Backoff wait  *              |  |                       |  * Removed
 233  *  v interval      +--------------+  +-----------------<-----+  | from DNS
 234  *  | finished                        |                          |
 235  *  |                                 V                          |
 236  *  |                                 |                          V
 237  *  |                                 |            +-----------------+
 238  *  +----------------+----------<-----+-------<----| SVP_CS_WINDDOWN |
 239  *                   |                             +-----------------+
 240  *                   * . . .   Fatal system, not
 241  *                   |         socket error or
 242  *                   V         quiesced after
 243  *           +--------------+  removal from DNS
 244  *           | SVP_CS_ERROR |
 245  *           +--------------+
 246  *                   |
 247  *                   * . . . Removed from DNS
 248  *                   v
 249  *            +------------+
 250  *            | Connection |
 251  *            | Destroyed  |
 252  *            +------------+
 253  *
 254  * --------------------------
 255  * Connection Event Injection
 256  * --------------------------
 257  *
 258  * For each connection that exists in the system, we have a timer in place that
 259  * is in charge of performing timeout activity. It fires once every thirty
 260  * seconds or so for a given connection and checks to ensure that we have had
 261  * activity for the most recent query on the connection. If not, it terminates
 262  * the connection. This is important as if we have sent all our data and are
 263  * waiting for the remote end to reply, without enabling something like TCP
 264  * keep-alive, we will not be notified that anything that has happened to the
 265  * remote connection, for example a panic. In addition, this also protects
 266  * against a server that is up, but a portolan that is not making forward
 267  * progress.
 268  *
 269  * When a timeout occurs, we first try to disassociate any active events, which
 270  * by definition must exist. Once that's done, we inject a port source user
 271  * event. Now, there is a small gotcha. Let's assume for a moment that we have a
 272  * pathological portolan. That means that it knows to inject activity right at
 273  * the time out window. That means, that the event may be disassociated before
 274  * we could get to it. If that's the case, we must _not_ inject the user event
 275  * and instead, we'll let the pending event take care of it. We know that the
 276  * pending event hasn't hit the main part of the loop yet, otherwise, it would
 277  * have released the lock protecting our state and associated the event.
 278  *
 279  * ------------
 280  * Notes on DNS
 281  * ------------
 282  *
 283  * Unfortunately, doing host name resolution in a way that allows us to leverage
 284  * the system's resolvers and the system's caching, require us to make blocking
 285  * calls in libc via getaddrinfo(3SOCKET). If we can't reach a given server,
 286  * that will tie up a thread for quite some time. To work around that fact,
 287  * we're going to create a fixed number of threads and we'll use them to service
 288  * our DNS requests. While this isn't ideal, until we have a sane means of
 289  * integrating a DNS resolution into an event loop with say portfs, it's not
 290  * going to be a fun day no matter what we do.
 291  *
 292  * ------
 293  * Timers
 294  * ------
 295  *
 296  * We maintain a single timer based on CLOCK_REALTIME. It's designed to fire
 297  * every second. While we'd rather use CLOCK_HIGHRES just to alleviate ourselves
 298  * from timer drift; however, as zones may not actually have CLOCK_HIGHRES
 299  * access, we don't want them to end up in there. The timer itself is just a
 300  * simple avl tree sorted by expiration time, which is stored as a tick in the
 301  * future, a tick is just one second.
 302  *
 303  * ----------
 304  * Shootdowns
 305  * ----------
 306  *
 307  * As part of the protocol, we need to be able to handle shootdowns that inform
 308  * us some of the information in the system is out of date. This information
 309  * needs to be processed promptly; however, the information is hopefully going
 310  * to be relatively infrequent relative to the normal flow of information.
 311  *
 312  * The shoot down information needs to be done on a per-backend basis. The
 313  * general design is that we'll have a single query for this which can fire on a
 314  * 5-10s period, we randmoize the latter part to give us a bit more load
 315  * spreading. If we complete because there's no work to do, then we wait the
 316  * normal period. If we complete, but there's still work to do, we'll go again
 317  * after a second.
 318  *
 319  * A shootdown has a few different parts. We first receive a list of items to
 320  * shootdown. After performing all of those, we need to acknowledge them. When
 321  * that's been done successfully, we can move onto the next part. From a
 322  * protocol perspective, we make a SVP_R_LOG_REQ, we get a reply, and then after
 323  * processing them, send an SVP_R_LOG_RM. Only once that's been acked do we
 324  * continue.
 325  *
 326  * However, one of the challenges that we have is that these invalidations are
 327  * just that, an invalidation. For a virtual layer two request, that's fine,
 328  * because the kernel supports that. However, for virtual layer three
 329  * invalidations, we have a bit more work to do. These protocols, ARP and NDP,
 330  * don't really support a notion of just an invalidation, instead you have to
 331  * inject the new data in a gratuitous fashion.
 332  *
 333  * To that end, what we instead do is when we receive a VL3 invalidation, we
 334  * turn that info a VL3 request. We hold the general request as outstanding
 335  * until we receive all of the callbacks for the VL3 invalidations, at which
 336  * point we go through and do the log removal request.
 337  */
 338 
 339 #include <umem.h>
 340 #include <errno.h>
 341 #include <stdlib.h>
 342 #include <sys/types.h>
 343 #include <sys/socket.h>
 344 #include <netinet/in.h>
 345 #include <arpa/inet.h>
 346 #include <libnvpair.h>
 347 #include <strings.h>
 348 #include <string.h>
 349 #include <assert.h>
 350 #include <unistd.h>
 351 
 352 #include <libvarpd_provider.h>
 353 #include "libvarpd_svp.h"
 354 
 355 bunyan_logger_t *svp_bunyan;
 356 static int svp_defport = 1296;
 357 static int svp_defuport = 1339;
 358 static umem_cache_t *svp_lookup_cache;
 359 
 360 typedef enum svp_lookup_type {
 361         SVP_L_UNKNOWN   = 0x0,
 362         SVP_L_VL2       = 0x1,
 363         SVP_L_VL3       = 0x2,
 364         SVP_L_ROUTE     = 0x3
 365 } svp_lookup_type_t;
 366 
 367 typedef struct svp_lookup {
 368         int svl_type;
 369         union {
 370                 struct svl_lookup_vl2 {
 371                         varpd_query_handle_t    *svl_handle;
 372                         overlay_target_point_t  *svl_point;
 373                 } svl_vl2;
 374                 struct svl_lookup_vl3 {
 375                         varpd_arp_handle_t      *svl_vah;
 376                         uint8_t                 *svl_out;
 377                 } svl_vl3;
 378                 struct svl_lookup_route {
 379                         varpd_query_handle_t    *svl_handle;
 380                         overlay_target_point_t  *svl_point;
 381                         overlay_target_route_t  *svl_route;
 382                 } svl_route;
 383         } svl_u;
 384         svp_query_t                             svl_query;
 385 } svp_lookup_t;
 386 
 387 static const char *varpd_svp_props[] = {
 388         "svp/host",
 389         "svp/port",
 390         "svp/underlay_ip",
 391         "svp/underlay_port",
 392         "svp/dcid",
 393         "svp/router_mac"
 394 };
 395 
 396 static const uint8_t svp_bcast[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
 397 
 398 int
 399 svp_comparator(const void *l, const void *r)
 400 {
 401         const svp_t *ls = l;
 402         const svp_t *rs = r;
 403 
 404         if (ls->svp_vid > rs->svp_vid)
 405                 return (1);
 406         if (ls->svp_vid < rs->svp_vid)
 407                 return (-1);
 408         return (0);
 409 }
 410 
 411 static void
 412 svp_vl2_lookup_cb(svp_t *svp, svp_status_t status, const struct in6_addr *uip,
 413     const uint16_t uport, void *arg)
 414 {
 415         svp_lookup_t *svl = arg;
 416         overlay_target_point_t *otp;
 417 
 418         assert(svp != NULL);
 419         assert(arg != NULL);
 420 
 421         if (status != SVP_S_OK) {
 422                 libvarpd_plugin_query_reply(svl->svl_u.svl_vl2.svl_handle,
 423                     VARPD_LOOKUP_DROP);
 424                 umem_cache_free(svp_lookup_cache, svl);
 425                 return;
 426         }
 427 
 428         otp = svl->svl_u.svl_vl2.svl_point;
 429         bcopy(uip, &otp->otp_ip, sizeof (struct in6_addr));
 430         otp->otp_port = uport;
 431         libvarpd_plugin_query_reply(svl->svl_u.svl_vl2.svl_handle,
 432             VARPD_LOOKUP_OK);
 433         umem_cache_free(svp_lookup_cache, svl);
 434 }
 435 
 436 static void
 437 svp_vl3_lookup_cb(svp_t *svp, svp_status_t status, const uint8_t *vl2mac,
 438     const struct in6_addr *uip, const uint16_t uport, void *arg)
 439 {
 440         overlay_target_point_t point;
 441         svp_lookup_t *svl = arg;
 442 
 443         assert(svp != NULL);
 444         assert(svl != NULL);
 445 
 446         if (status != SVP_S_OK) {
 447                 libvarpd_plugin_arp_reply(svl->svl_u.svl_vl3.svl_vah,
 448                     VARPD_LOOKUP_DROP);
 449                 umem_cache_free(svp_lookup_cache, svl);
 450                 return;
 451         }
 452 
 453         /* Inject the L2 mapping before the L3 */
 454         bcopy(uip, &point.otp_ip, sizeof (struct in6_addr));
 455         point.otp_port = uport;
 456         libvarpd_inject_varp(svp->svp_hdl, vl2mac, &point);
 457 
 458         bcopy(vl2mac, svl->svl_u.svl_vl3.svl_out, ETHERADDRL);
 459         libvarpd_plugin_arp_reply(svl->svl_u.svl_vl3.svl_vah,
 460             VARPD_LOOKUP_OK);
 461         umem_cache_free(svp_lookup_cache, svl);
 462 }
 463 
 464 static void
 465 svp_vl2_invalidate_cb(svp_t *svp, const uint8_t *vl2mac)
 466 {
 467         libvarpd_inject_varp(svp->svp_hdl, vl2mac, NULL);
 468 }
 469 
 470 static void
 471 svp_vl3_inject_cb(svp_t *svp, const uint16_t vlan, const struct in6_addr *vl3ip,
 472     const uint8_t *vl2mac, const uint8_t *targmac)
 473 {
 474         struct in_addr v4;
 475 
 476         /*
 477          * At the moment we don't support any IPv6 related log entries, this
 478          * will change soon as we develop a bit more of the IPv6 related
 479          * infrastructure so we can properly test the injection.
 480          */
 481         if (IN6_IS_ADDR_V4MAPPED(vl3ip) == 0) {
 482                 return;
 483         } else {
 484                 IN6_V4MAPPED_TO_INADDR(vl3ip, &v4);
 485                 if (targmac == NULL)
 486                         targmac = svp_bcast;
 487                 libvarpd_inject_arp(svp->svp_hdl, vlan, vl2mac, &v4, targmac);
 488         }
 489 }
 490 
 491 /* ARGSUSED */
 492 static void
 493 svp_shootdown_cb(svp_t *svp, const uint8_t *vl2mac, const struct in6_addr *uip,
 494     const uint16_t uport)
 495 {
 496         /*
 497          * We should probably do a conditional invalidation here.
 498          */
 499         libvarpd_inject_varp(svp->svp_hdl, vl2mac, NULL);
 500 }
 501 
 502 static void
 503 svp_route_lookup_cb(svp_t *svp, svp_status_t status, uint32_t dcid,
 504     uint32_t vnetid, uint16_t vlan, uint8_t *srcmac, uint8_t *dstmac,
 505     uint16_t ul3_port, uint8_t *ul3_addr, uint8_t srcpfx, uint8_t dstpfx,
 506     void *arg)
 507 {
 508         svp_lookup_t *svl = arg;
 509         overlay_target_point_t *otp;
 510         overlay_target_route_t *otr;
 511 
 512         if (status != SVP_S_OK) {
 513                 libvarpd_plugin_query_reply(svl->svl_u.svl_route.svl_handle,
 514                     VARPD_LOOKUP_DROP);
 515                 umem_cache_free(svp_lookup_cache, svl);
 516                 return;
 517         }
 518 
 519         otp = svl->svl_u.svl_route.svl_point;
 520         bcopy(dstmac, otp->otp_mac, ETHERADDRL);
 521         bcopy(ul3_addr, &otp->otp_ip, sizeof (struct in6_addr));
 522         otp->otp_port = ul3_port;
 523 
 524         otr = svl->svl_u.svl_route.svl_route;
 525         otr->otr_vnet = vnetid;
 526         otr->otr_vlan = vlan;
 527         bcopy(srcmac, otr->otr_srcmac, ETHERADDRL);
 528         otr->otr_dcid = dcid;
 529         otr->otr_src_prefixlen = srcpfx;
 530         otr->otr_dst_prefixlen = dstpfx;
 531 
 532         libvarpd_plugin_query_reply(svl->svl_u.svl_route.svl_handle,
 533             VARPD_LOOKUP_OK);
 534         umem_cache_free(svp_lookup_cache, svl);
 535 }
 536 
 537 static svp_cb_t svp_defops = {
 538         svp_vl2_lookup_cb,
 539         svp_vl3_lookup_cb,
 540         svp_vl2_invalidate_cb,
 541         svp_vl3_inject_cb,
 542         svp_shootdown_cb,
 543         svp_route_lookup_cb,
 544 };
 545 
 546 static boolean_t
 547 varpd_svp_valid_dest(overlay_plugin_dest_t dest)
 548 {
 549         if (dest != (OVERLAY_PLUGIN_D_IP | OVERLAY_PLUGIN_D_PORT))
 550                 return (B_FALSE);
 551 
 552         return (B_TRUE);
 553 }
 554 
 555 static int
 556 varpd_svp_create(varpd_provider_handle_t *hdl, void **outp,
 557     overlay_plugin_dest_t dest)
 558 {
 559         int ret;
 560         svp_t *svp;
 561 
 562         if (varpd_svp_valid_dest(dest) == B_FALSE)
 563                 return (ENOTSUP);
 564 
 565         svp = umem_zalloc(sizeof (svp_t), UMEM_DEFAULT);
 566         if (svp == NULL)
 567                 return (ENOMEM);
 568 
 569         if ((ret = mutex_init(&svp->svp_lock, USYNC_THREAD | LOCK_ERRORCHECK,
 570             NULL)) != 0) {
 571                 umem_free(svp, sizeof (svp_t));
 572                 return (ret);
 573         }
 574 
 575         svp->svp_port = svp_defport;
 576         svp->svp_uport = svp_defuport;
 577         svp->svp_cb = svp_defops;
 578         svp->svp_hdl = hdl;
 579         svp->svp_vid = libvarpd_plugin_vnetid(svp->svp_hdl);
 580         *outp = svp;
 581         return (0);
 582 }
 583 
 584 static int
 585 varpd_svp_start(void *arg)
 586 {
 587         int ret;
 588         svp_remote_t *srp;
 589         svp_t *svp = arg;
 590 
 591         mutex_enter(&svp->svp_lock);
 592         if (svp->svp_host == NULL || svp->svp_port == 0 ||
 593             svp->svp_huip == B_FALSE || svp->svp_uport == 0) {
 594                 mutex_exit(&svp->svp_lock);
 595                 return (EAGAIN);
 596         }
 597         mutex_exit(&svp->svp_lock);
 598 
 599         if ((ret = svp_remote_find(svp->svp_host, svp->svp_port, &svp->svp_uip,
 600             &srp)) != 0)
 601                 return (ret);
 602 
 603         if ((ret = svp_remote_attach(srp, svp)) != 0) {
 604                 svp_remote_release(srp);
 605                 return (ret);
 606         }
 607 
 608         return (0);
 609 }
 610 
 611 static void
 612 varpd_svp_stop(void *arg)
 613 {
 614         svp_t *svp = arg;
 615 
 616         svp_remote_detach(svp);
 617 }
 618 
 619 static void
 620 varpd_svp_destroy(void *arg)
 621 {
 622         svp_t *svp = arg;
 623 
 624         if (svp->svp_host != NULL)
 625                 umem_free(svp->svp_host, strlen(svp->svp_host) + 1);
 626 
 627         if (mutex_destroy(&svp->svp_lock) != 0)
 628                 libvarpd_panic("failed to destroy svp_t`svp_lock");
 629 
 630         umem_free(svp, sizeof (svp_t));
 631 }
 632 
 633 static void
 634 varpd_svp_lookup_l3(svp_t *svp, varpd_query_handle_t *vqh,
 635     const overlay_targ_lookup_t *otl, overlay_target_point_t *otp,
 636     overlay_target_route_t *otr)
 637 {
 638         svp_lookup_t *slp;
 639         uint32_t type;
 640         const struct in6_addr *src = &otl->otl_addru.otlu_l3.otl3_srcip,
 641             *dst = &otl->otl_addru.otlu_l3.otl3_dstip;
 642 
 643         /*
 644          * otl is an L3 request, so we have src/dst IPs for the inner packet.
 645          * We also have the vlan.
 646          *
 647          * Assume kernel's overlay module is caching well, so we are directly
 648          * going to query (i.e. no caching up here of actual destinations).
 649          *
 650          * Our existing remote sever (svp_remote), but with the new message
 651          * SVP_R_ROUTE_REQ.
 652          */
 653 
 654         /* XXX KEBE SAYS DO SOME otl verification too... */
 655         if (IN6_IS_ADDR_V4MAPPED(src)) {
 656                 if (!IN6_IS_ADDR_V4MAPPED(dst)) {
 657                         libvarpd_plugin_query_reply(vqh, VARPD_LOOKUP_DROP);
 658                         return;
 659                 }
 660                 type = SVP_VL3_IP;
 661         } else {
 662                 if (IN6_IS_ADDR_V4MAPPED(dst)) {
 663                         libvarpd_plugin_query_reply(vqh, VARPD_LOOKUP_DROP);
 664                         return;
 665                 }
 666                 type = SVP_VL3_IPV6;
 667         }
 668 
 669         slp = umem_cache_alloc(svp_lookup_cache, UMEM_DEFAULT);
 670         if (slp == NULL) {
 671                 libvarpd_plugin_query_reply(vqh, VARPD_LOOKUP_DROP);
 672                 return;
 673         }
 674 
 675         slp->svl_type = SVP_L_ROUTE;
 676         slp->svl_u.svl_route.svl_handle = vqh;
 677         slp->svl_u.svl_route.svl_point = otp;
 678         slp->svl_u.svl_route.svl_route = otr;
 679 
 680         svp_remote_route_lookup(svp, &slp->svl_query, src, dst,
 681             otl->otl_vnetid, (uint16_t)otl->otl_vlan, slp);
 682 }
 683 
 684 static void
 685 varpd_svp_lookup(void *arg, varpd_query_handle_t *vqh,
 686     const overlay_targ_lookup_t *otl, overlay_target_point_t *otp,
 687     overlay_target_route_t *otr)
 688 {
 689         svp_lookup_t *slp;
 690         svp_t *svp = arg;
 691 
 692         /*
 693          * Shuffle off L3 lookups to their own codepath.
 694          */
 695         if (otl->otl_l3req) {
 696                 varpd_svp_lookup_l3(svp, vqh, otl, otp, otr);
 697                 return;
 698         }
 699         /*
 700          * At this point, the traditional overlay_target_point_t is all that
 701          * needs filling in.  Zero-out the otr for safety.
 702          */
 703         bzero(otr, sizeof (*otr));
 704 
 705 
 706         /*
 707          * Check if this is something that we need to proxy, eg. arp or ndp.
 708          */
 709         if (otl->otl_addru.otlu_l2.otl2_sap == ETHERTYPE_ARP) {
 710                 libvarpd_plugin_proxy_arp(svp->svp_hdl, vqh, otl);
 711                 return;
 712         }
 713 
 714         if (otl->otl_addru.otlu_l2.otl2_dstaddr[0] == 0x33 &&
 715             otl->otl_addru.otlu_l2.otl2_dstaddr[1] == 0x33) {
 716                 if (otl->otl_addru.otlu_l2.otl2_sap == ETHERTYPE_IPV6) {
 717                         libvarpd_plugin_proxy_ndp(svp->svp_hdl, vqh, otl);
 718                 } else {
 719                         libvarpd_plugin_query_reply(vqh, VARPD_LOOKUP_DROP);
 720                 }
 721                 return;
 722         }
 723 
 724         /*
 725          * Watch out for various multicast and broadcast addresses. We've
 726          * already taken care of the IPv6 range above. Now we just need to
 727          * handle broadcast and if the multicast bit is set, lowest bit of the
 728          * first octet of the MAC, then we drop it now.
 729          */
 730         if (bcmp(otl->otl_addru.otlu_l2.otl2_dstaddr, svp_bcast,
 731             ETHERADDRL) == 0 ||
 732             (otl->otl_addru.otlu_l2.otl2_dstaddr[0] & 0x01) == 0x01) {
 733                 libvarpd_plugin_query_reply(vqh, VARPD_LOOKUP_DROP);
 734                 return;
 735         }
 736 
 737         /*
 738          * If we have a failure to allocate memory for this, that's not good.
 739          * However, telling the kernel to just drop this packet is much better
 740          * than the alternative at this moment. At least we'll try again and we
 741          * may have something more available to us in a little bit.
 742          */
 743         slp = umem_cache_alloc(svp_lookup_cache, UMEM_DEFAULT);
 744         if (slp == NULL) {
 745                 libvarpd_plugin_query_reply(vqh, VARPD_LOOKUP_DROP);
 746                 return;
 747         }
 748 
 749         slp->svl_type = SVP_L_VL2;
 750         slp->svl_u.svl_vl2.svl_handle = vqh;
 751         slp->svl_u.svl_vl2.svl_point = otp;
 752 
 753         svp_remote_vl2_lookup(svp, &slp->svl_query,
 754             otl->otl_addru.otlu_l2.otl2_dstaddr, slp);
 755 }
 756 
 757 /* ARGSUSED */
 758 static int
 759 varpd_svp_nprops(void *arg, uint_t *nprops)
 760 {
 761         *nprops = sizeof (varpd_svp_props) / sizeof (char *);
 762         return (0);
 763 }
 764 
 765 /* ARGSUSED */
 766 static int
 767 varpd_svp_propinfo(void *arg, uint_t propid, varpd_prop_handle_t *vph)
 768 {
 769         switch (propid) {
 770         case 0:
 771                 /* svp/host */
 772                 libvarpd_prop_set_name(vph, varpd_svp_props[0]);
 773                 libvarpd_prop_set_prot(vph, OVERLAY_PROP_PERM_RRW);
 774                 libvarpd_prop_set_type(vph, OVERLAY_PROP_T_STRING);
 775                 libvarpd_prop_set_nodefault(vph);
 776                 break;
 777         case 1:
 778                 /* svp/port */
 779                 libvarpd_prop_set_name(vph, varpd_svp_props[1]);
 780                 libvarpd_prop_set_prot(vph, OVERLAY_PROP_PERM_RRW);
 781                 libvarpd_prop_set_type(vph, OVERLAY_PROP_T_UINT);
 782                 (void) libvarpd_prop_set_default(vph, &svp_defport,
 783                     sizeof (svp_defport));
 784                 libvarpd_prop_set_range_uint32(vph, 1, UINT16_MAX);
 785                 break;
 786         case 2:
 787                 /* svp/underlay_ip */
 788                 libvarpd_prop_set_name(vph, varpd_svp_props[2]);
 789                 libvarpd_prop_set_prot(vph, OVERLAY_PROP_PERM_RRW);
 790                 libvarpd_prop_set_type(vph, OVERLAY_PROP_T_IP);
 791                 libvarpd_prop_set_nodefault(vph);
 792                 break;
 793         case 3:
 794                 /* svp/underlay_port */
 795                 libvarpd_prop_set_name(vph, varpd_svp_props[3]);
 796                 libvarpd_prop_set_prot(vph, OVERLAY_PROP_PERM_RRW);
 797                 libvarpd_prop_set_type(vph, OVERLAY_PROP_T_UINT);
 798                 (void) libvarpd_prop_set_default(vph, &svp_defuport,
 799                     sizeof (svp_defuport));
 800                 libvarpd_prop_set_range_uint32(vph, 1, UINT16_MAX);
 801                 break;
 802         case 4:
 803                 /* svp/dcid */
 804                 libvarpd_prop_set_name(vph, varpd_svp_props[4]);
 805                 libvarpd_prop_set_prot(vph, OVERLAY_PROP_PERM_RRW);
 806                 libvarpd_prop_set_type(vph, OVERLAY_PROP_T_UINT);
 807                 libvarpd_prop_set_nodefault(vph);
 808                 /* XXX KEBE ASKS should I just set high to UINT32_MAX? */
 809                 libvarpd_prop_set_range_uint32(vph, 1, UINT32_MAX - 1);
 810                 break;
 811         case 5:
 812                 /* svp/router_mac */
 813                 libvarpd_prop_set_name(vph, varpd_svp_props[5]);
 814                 libvarpd_prop_set_prot(vph, OVERLAY_PROP_PERM_RRW);
 815                 libvarpd_prop_set_type(vph, OVERLAY_PROP_T_ETHER);
 816                 libvarpd_prop_set_nodefault(vph);
 817                 break;
 818         default:
 819                 return (EINVAL);
 820         }
 821         return (0);
 822 }
 823 
 824 static int
 825 varpd_svp_getprop(void *arg, const char *pname, void *buf, uint32_t *sizep)
 826 {
 827         svp_t *svp = arg;
 828 
 829         /* svp/host */
 830         if (strcmp(pname, varpd_svp_props[0]) == 0) {
 831                 size_t len;
 832 
 833                 mutex_enter(&svp->svp_lock);
 834                 if (svp->svp_host == NULL) {
 835                         *sizep = 0;
 836                 } else {
 837                         len = strlen(svp->svp_host) + 1;
 838                         if (*sizep < len) {
 839                                 mutex_exit(&svp->svp_lock);
 840                                 return (EOVERFLOW);
 841                         }
 842                         *sizep = len;
 843                         (void) strlcpy(buf, svp->svp_host, *sizep);
 844                 }
 845                 mutex_exit(&svp->svp_lock);
 846                 return (0);
 847         }
 848 
 849         /* svp/port */
 850         if (strcmp(pname, varpd_svp_props[1]) == 0) {
 851                 uint64_t val;
 852 
 853                 if (*sizep < sizeof (uint64_t))
 854                         return (EOVERFLOW);
 855 
 856                 mutex_enter(&svp->svp_lock);
 857                 if (svp->svp_port == 0) {
 858                         *sizep = 0;
 859                 } else {
 860                         val = svp->svp_port;
 861                         bcopy(&val, buf, sizeof (uint64_t));
 862                         *sizep = sizeof (uint64_t);
 863                 }
 864                 mutex_exit(&svp->svp_lock);
 865                 return (0);
 866         }
 867 
 868         /* svp/underlay_ip */
 869         if (strcmp(pname, varpd_svp_props[2]) == 0) {
 870                 if (*sizep < sizeof (struct in6_addr))
 871                         return (EOVERFLOW);
 872                 mutex_enter(&svp->svp_lock);
 873                 if (svp->svp_huip == B_FALSE) {
 874                         *sizep = 0;
 875                 } else {
 876                         bcopy(&svp->svp_uip, buf, sizeof (struct in6_addr));
 877                         *sizep = sizeof (struct in6_addr);
 878                 }
 879                 mutex_exit(&svp->svp_lock);
 880                 return (0);
 881         }
 882 
 883         /* svp/underlay_port */
 884         if (strcmp(pname, varpd_svp_props[3]) == 0) {
 885                 uint64_t val;
 886 
 887                 if (*sizep < sizeof (uint64_t))
 888                         return (EOVERFLOW);
 889 
 890                 mutex_enter(&svp->svp_lock);
 891                 if (svp->svp_uport == 0) {
 892                         *sizep = 0;
 893                 } else {
 894                         val = svp->svp_uport;
 895                         bcopy(&val, buf, sizeof (uint64_t));
 896                         *sizep = sizeof (uint64_t);
 897                 }
 898 
 899                 mutex_exit(&svp->svp_lock);
 900                 return (0);
 901         }
 902 
 903         /* svp/dcid */
 904         if (strcmp(pname, varpd_svp_props[4]) == 0) {
 905                 uint64_t val;
 906 
 907                 if (*sizep < sizeof (uint64_t))
 908                         return (EOVERFLOW);
 909 
 910                 mutex_enter(&svp->svp_lock);
 911                 if (svp->svp_uport == 0) {
 912                         *sizep = 0;
 913                 } else {
 914                         val = svp->svp_dcid;
 915                         bcopy(&val, buf, sizeof (uint64_t));
 916                         *sizep = sizeof (uint64_t);
 917                 }
 918 
 919                 mutex_exit(&svp->svp_lock);
 920                 return (0);
 921         }
 922 
 923         /* svp/router_mac */
 924         if (strcmp(pname, varpd_svp_props[5]) == 0) {
 925                 if (*sizep < ETHERADDRL)
 926                         return (EOVERFLOW);
 927                 mutex_enter(&svp->svp_lock);
 928 
 929                 if (ether_is_zero(&svp->svp_router_mac)) {
 930                         *sizep = 0;
 931                 } else {
 932                         bcopy(&svp->svp_router_mac, buf, ETHERADDRL);
 933                         *sizep = ETHERADDRL;
 934                 }
 935 
 936                 mutex_exit(&svp->svp_lock);
 937                 return (0);
 938         }
 939         return (EINVAL);
 940 }
 941 
 942 static int
 943 varpd_svp_setprop(void *arg, const char *pname, const void *buf,
 944     const uint32_t size)
 945 {
 946         svp_t *svp = arg;
 947 
 948         /* svp/host */
 949         if (strcmp(pname, varpd_svp_props[0]) == 0) {
 950                 char *dup;
 951                 dup = umem_alloc(size, UMEM_DEFAULT);
 952                 (void) strlcpy(dup, buf, size);
 953                 if (dup == NULL)
 954                         return (ENOMEM);
 955                 mutex_enter(&svp->svp_lock);
 956                 if (svp->svp_host != NULL)
 957                         umem_free(svp->svp_host, strlen(svp->svp_host) + 1);
 958                 svp->svp_host = dup;
 959                 mutex_exit(&svp->svp_lock);
 960                 return (0);
 961         }
 962 
 963         /* svp/port */
 964         if (strcmp(pname, varpd_svp_props[1]) == 0) {
 965                 const uint64_t *valp = buf;
 966                 if (size < sizeof (uint64_t))
 967                         return (EOVERFLOW);
 968 
 969                 if (*valp == 0 || *valp > UINT16_MAX)
 970                         return (EINVAL);
 971 
 972                 mutex_enter(&svp->svp_lock);
 973                 svp->svp_port = (uint16_t)*valp;
 974                 mutex_exit(&svp->svp_lock);
 975                 return (0);
 976         }
 977 
 978         /* svp/underlay_ip */
 979         if (strcmp(pname, varpd_svp_props[2]) == 0) {
 980                 const struct in6_addr *ipv6 = buf;
 981 
 982                 if (size < sizeof (struct in6_addr))
 983                         return (EOVERFLOW);
 984 
 985                 if (IN6_IS_ADDR_V4COMPAT(ipv6))
 986                         return (EINVAL);
 987 
 988                 if (IN6_IS_ADDR_MULTICAST(ipv6))
 989                         return (EINVAL);
 990 
 991                 if (IN6_IS_ADDR_6TO4(ipv6))
 992                         return (EINVAL);
 993 
 994                 if (IN6_IS_ADDR_V4MAPPED(ipv6)) {
 995                         ipaddr_t v4;
 996                         IN6_V4MAPPED_TO_IPADDR(ipv6, v4);
 997                         if (IN_MULTICAST(v4))
 998                                 return (EINVAL);
 999                 }
1000 
1001                 mutex_enter(&svp->svp_lock);
1002                 bcopy(buf, &svp->svp_uip, sizeof (struct in6_addr));
1003                 svp->svp_huip = B_TRUE;
1004                 mutex_exit(&svp->svp_lock);
1005                 return (0);
1006         }
1007 
1008         /* svp/underlay_port */
1009         if (strcmp(pname, varpd_svp_props[3]) == 0) {
1010                 const uint64_t *valp = buf;
1011                 if (size < sizeof (uint64_t))
1012                         return (EOVERFLOW);
1013 
1014                 if (*valp == 0 || *valp > UINT16_MAX)
1015                         return (EINVAL);
1016 
1017                 mutex_enter(&svp->svp_lock);
1018                 svp->svp_uport = (uint16_t)*valp;
1019                 mutex_exit(&svp->svp_lock);
1020 
1021                 return (0);
1022         }
1023 
1024         /* svp/dcid */
1025         if (strcmp(pname, varpd_svp_props[4]) == 0) {
1026                 const uint64_t *valp = buf;
1027                 if (size < sizeof (uint64_t))
1028                         return (EOVERFLOW);
1029 
1030                 /* XXX KEBE ASKS, use UINT32_MAX instead? */
1031                 if (*valp == 0 || *valp > UINT32_MAX - 1)
1032                         return (EINVAL);
1033 
1034                 mutex_enter(&svp->svp_lock);
1035                 svp->svp_dcid = (uint32_t)*valp;
1036                 mutex_exit(&svp->svp_lock);
1037 
1038                 return (0);
1039         }
1040 
1041         /* svp/router_mac */
1042         if (strcmp(pname, varpd_svp_props[5]) == 0) {
1043                 if (size < ETHERADDRL)
1044                         return (EOVERFLOW);
1045                 mutex_enter(&svp->svp_lock);
1046                 bcopy(buf, &svp->svp_router_mac, ETHERADDRL);
1047                 mutex_exit(&svp->svp_lock);
1048                 return (0);
1049         }
1050 
1051         return (EINVAL);
1052 }
1053 
1054 static int
1055 varpd_svp_save(void *arg, nvlist_t *nvp)
1056 {
1057         int ret;
1058         svp_t *svp = arg;
1059 
1060         mutex_enter(&svp->svp_lock);
1061         /* svp/host */
1062         if (svp->svp_host != NULL) {
1063                 if ((ret = nvlist_add_string(nvp, varpd_svp_props[0],
1064                     svp->svp_host)) != 0) {
1065                         mutex_exit(&svp->svp_lock);
1066                         return (ret);
1067                 }
1068         }
1069 
1070         /* svp/port */
1071         if (svp->svp_port != 0) {
1072                 if ((ret = nvlist_add_uint16(nvp, varpd_svp_props[1],
1073                     svp->svp_port)) != 0) {
1074                         mutex_exit(&svp->svp_lock);
1075                         return (ret);
1076                 }
1077         }
1078 
1079         /* svp/underlay_ip */
1080         if (svp->svp_huip == B_TRUE) {
1081                 char buf[INET6_ADDRSTRLEN];
1082 
1083                 if (inet_ntop(AF_INET6, &svp->svp_uip, buf, sizeof (buf)) ==
1084                     NULL)
1085                         libvarpd_panic("unexpected inet_ntop failure: %d",
1086                             errno);
1087 
1088                 if ((ret = nvlist_add_string(nvp, varpd_svp_props[2],
1089                     buf)) != 0) {
1090                         mutex_exit(&svp->svp_lock);
1091                         return (ret);
1092                 }
1093         }
1094 
1095         /* svp/underlay_port */
1096         if (svp->svp_uport != 0) {
1097                 if ((ret = nvlist_add_uint16(nvp, varpd_svp_props[3],
1098                     svp->svp_uport)) != 0) {
1099                         mutex_exit(&svp->svp_lock);
1100                         return (ret);
1101                 }
1102         }
1103 
1104         /* svp/dcid */
1105         if (svp->svp_dcid != 0) {
1106                 if ((ret = nvlist_add_uint32(nvp, varpd_svp_props[4],
1107                     svp->svp_dcid)) != 0) {
1108                         mutex_exit(&svp->svp_lock);
1109                         return (ret);
1110                 }
1111         }
1112 
1113         /* svp/router_mac */
1114         if (!ether_is_zero(&svp->svp_router_mac)) {
1115                 char buf[ETHERADDRSTRL];
1116 
1117                 /* XXX KEBE SAYS See underlay_ip... */
1118                 if (ether_ntoa_r(&svp->svp_router_mac, buf) == NULL) {
1119                         libvarpd_panic("unexpected ether_ntoa_r failure: %d",
1120                             errno);
1121                 }
1122 
1123                 if ((ret = nvlist_add_string(nvp, varpd_svp_props[5],
1124                     buf)) != 0) {
1125                         mutex_exit(&svp->svp_lock);
1126                         return (ret);
1127                 }
1128         }
1129 
1130         mutex_exit(&svp->svp_lock);
1131         return (0);
1132 }
1133 
1134 static int
1135 varpd_svp_restore(nvlist_t *nvp, varpd_provider_handle_t *hdl,
1136     overlay_plugin_dest_t dest, void **outp)
1137 {
1138         int ret;
1139         svp_t *svp;
1140         char *ipstr, *hstr, *etherstr;
1141 
1142         if (varpd_svp_valid_dest(dest) == B_FALSE)
1143                 return (ENOTSUP);
1144 
1145         if ((ret = varpd_svp_create(hdl, (void **)&svp, dest)) != 0)
1146                 return (ret);
1147 
1148         /* svp/host */
1149         if ((ret = nvlist_lookup_string(nvp, varpd_svp_props[0],
1150             &hstr)) != 0) {
1151                 if (ret != ENOENT) {
1152                         varpd_svp_destroy(svp);
1153                         return (ret);
1154                 }
1155                 svp->svp_host = NULL;
1156         } else {
1157                 size_t blen = strlen(hstr) + 1;
1158                 svp->svp_host = umem_alloc(blen, UMEM_DEFAULT);
1159                 (void) strlcpy(svp->svp_host, hstr, blen);
1160         }
1161 
1162         /* svp/port */
1163         if ((ret = nvlist_lookup_uint16(nvp, varpd_svp_props[1],
1164             &svp->svp_port)) != 0) {
1165                 if (ret != ENOENT) {
1166                         varpd_svp_destroy(svp);
1167                         return (ret);
1168                 }
1169                 svp->svp_port = 0;
1170         }
1171 
1172         /* svp/underlay_ip */
1173         if ((ret = nvlist_lookup_string(nvp, varpd_svp_props[2],
1174             &ipstr)) != 0) {
1175                 if (ret != ENOENT) {
1176                         varpd_svp_destroy(svp);
1177                         return (ret);
1178                 }
1179                 svp->svp_huip = B_FALSE;
1180         } else {
1181                 ret = inet_pton(AF_INET6, ipstr, &svp->svp_uip);
1182                 if (ret == -1) {
1183                         assert(errno == EAFNOSUPPORT);
1184                         libvarpd_panic("unexpected inet_pton failure: %d",
1185                             errno);
1186                 }
1187 
1188                 if (ret == 0) {
1189                         varpd_svp_destroy(svp);
1190                         return (EINVAL);
1191                 }
1192                 svp->svp_huip = B_TRUE;
1193         }
1194 
1195         /* svp/underlay_port */
1196         if ((ret = nvlist_lookup_uint16(nvp, varpd_svp_props[3],
1197             &svp->svp_uport)) != 0) {
1198                 if (ret != ENOENT) {
1199                         varpd_svp_destroy(svp);
1200                         return (ret);
1201                 }
1202                 svp->svp_uport = 0;
1203         }
1204 
1205         /* svp/dcid */
1206         if ((ret = nvlist_lookup_uint32(nvp, varpd_svp_props[4],
1207             &svp->svp_dcid)) != 0) {
1208                 if (ret != ENOENT) {
1209                         varpd_svp_destroy(svp);
1210                         return (ret);
1211                 }
1212                 svp->svp_dcid = 0;
1213         }
1214 
1215         /* svp/router_mac */
1216         if ((ret = nvlist_lookup_string(nvp, varpd_svp_props[5],
1217             &etherstr)) != 0) {
1218                 if (ret != ENOENT) {
1219                         varpd_svp_destroy(svp);
1220                         return (ret);
1221                 }
1222                 bzero(&svp->svp_router_mac, ETHERADDRL);
1223         } else if (ether_aton_r(etherstr, &svp->svp_router_mac) == NULL) {
1224                 libvarpd_panic("unexpected ether_aton_r failure: %d", errno);
1225         }
1226 
1227         svp->svp_hdl = hdl;
1228         *outp = svp;
1229         return (0);
1230 }
1231 
1232 static void
1233 varpd_svp_arp(void *arg, varpd_arp_handle_t *vah, int type,
1234     const struct sockaddr *sock, uint8_t *out)
1235 {
1236         svp_t *svp = arg;
1237         svp_lookup_t *svl;
1238 
1239         if (type != VARPD_QTYPE_ETHERNET) {
1240                 libvarpd_plugin_arp_reply(vah, VARPD_LOOKUP_DROP);
1241                 return;
1242         }
1243 
1244         svl = umem_cache_alloc(svp_lookup_cache, UMEM_DEFAULT);
1245         if (svl == NULL) {
1246                 libvarpd_plugin_arp_reply(vah, VARPD_LOOKUP_DROP);
1247                 return;
1248         }
1249 
1250         svl->svl_type = SVP_L_VL3;
1251         svl->svl_u.svl_vl3.svl_vah = vah;
1252         svl->svl_u.svl_vl3.svl_out = out;
1253         svp_remote_vl3_lookup(svp, &svl->svl_query, sock, svl);
1254 }
1255 
1256 static const varpd_plugin_ops_t varpd_svp_ops = {
1257         0,
1258         varpd_svp_create,
1259         varpd_svp_start,
1260         varpd_svp_stop,
1261         varpd_svp_destroy,
1262         NULL,
1263         varpd_svp_lookup,
1264         varpd_svp_nprops,
1265         varpd_svp_propinfo,
1266         varpd_svp_getprop,
1267         varpd_svp_setprop,
1268         varpd_svp_save,
1269         varpd_svp_restore,
1270         varpd_svp_arp,
1271         NULL
1272 };
1273 
1274 static int
1275 svp_bunyan_init(void)
1276 {
1277         int ret;
1278 
1279         if ((ret = bunyan_init("svp", &svp_bunyan)) != 0)
1280                 return (ret);
1281         ret = bunyan_stream_add(svp_bunyan, "stderr", BUNYAN_L_INFO,
1282             bunyan_stream_fd, (void *)STDERR_FILENO);
1283         if (ret != 0)
1284                 bunyan_fini(svp_bunyan);
1285         return (ret);
1286 }
1287 
1288 static void
1289 svp_bunyan_fini(void)
1290 {
1291         if (svp_bunyan != NULL)
1292                 bunyan_fini(svp_bunyan);
1293 }
1294 
1295 #pragma init(varpd_svp_init)
1296 static void
1297 varpd_svp_init(void)
1298 {
1299         int err;
1300         varpd_plugin_register_t *vpr;
1301 
1302         if (svp_bunyan_init() != 0)
1303                 return;
1304 
1305         if ((err = svp_host_init()) != 0) {
1306                 (void) bunyan_error(svp_bunyan, "failed to init host subsystem",
1307                     BUNYAN_T_INT32, "error", err,
1308                     BUNYAN_T_END);
1309                 svp_bunyan_fini();
1310                 return;
1311         }
1312 
1313         svp_lookup_cache = umem_cache_create("svp_lookup",
1314             sizeof (svp_lookup_t),  0, NULL, NULL, NULL, NULL, NULL, 0);
1315         if (svp_lookup_cache == NULL) {
1316                 (void) bunyan_error(svp_bunyan,
1317                     "failed to create svp_lookup cache",
1318                     BUNYAN_T_INT32, "error", errno,
1319                     BUNYAN_T_END);
1320                 svp_bunyan_fini();
1321                 return;
1322         }
1323 
1324         if ((err = svp_event_init()) != 0) {
1325                 (void) bunyan_error(svp_bunyan,
1326                     "failed to init event subsystem",
1327                     BUNYAN_T_INT32, "error", err,
1328                     BUNYAN_T_END);
1329                 svp_bunyan_fini();
1330                 umem_cache_destroy(svp_lookup_cache);
1331                 return;
1332         }
1333 
1334         if ((err = svp_timer_init()) != 0) {
1335                 (void) bunyan_error(svp_bunyan,
1336                     "failed to init timer subsystem",
1337                     BUNYAN_T_INT32, "error", err,
1338                     BUNYAN_T_END);
1339                 svp_event_fini();
1340                 umem_cache_destroy(svp_lookup_cache);
1341                 svp_bunyan_fini();
1342                 return;
1343         }
1344 
1345         if ((err = svp_remote_init()) != 0) {
1346                 (void) bunyan_error(svp_bunyan,
1347                     "failed to init remote subsystem",
1348                     BUNYAN_T_INT32, "error", err,
1349                     BUNYAN_T_END);
1350                 svp_event_fini();
1351                 umem_cache_destroy(svp_lookup_cache);
1352                 svp_bunyan_fini();
1353                 return;
1354         }
1355 
1356         vpr = libvarpd_plugin_alloc(VARPD_CURRENT_VERSION, &err);
1357         if (vpr == NULL) {
1358                 (void) bunyan_error(svp_bunyan,
1359                     "failed to alloc varpd plugin",
1360                     BUNYAN_T_INT32, "error", err,
1361                     BUNYAN_T_END);
1362                 svp_remote_fini();
1363                 svp_event_fini();
1364                 umem_cache_destroy(svp_lookup_cache);
1365                 svp_bunyan_fini();
1366                 return;
1367         }
1368 
1369         vpr->vpr_mode = OVERLAY_TARGET_DYNAMIC;
1370         vpr->vpr_name = "svp";
1371         vpr->vpr_ops = &varpd_svp_ops;
1372 
1373         if ((err = libvarpd_plugin_register(vpr)) != 0) {
1374                 (void) bunyan_error(svp_bunyan,
1375                     "failed to register varpd plugin",
1376                     BUNYAN_T_INT32, "error", err,
1377                     BUNYAN_T_END);
1378                 svp_remote_fini();
1379                 svp_event_fini();
1380                 umem_cache_destroy(svp_lookup_cache);
1381                 svp_bunyan_fini();
1382 
1383         }
1384         libvarpd_plugin_free(vpr);
1385 }