Print this page
    
11083 support NFS server in zone
Portions contributed by: Dan Kruchinin <dan.kruchinin@nexenta.com>
Portions contributed by: Stepan Zastupov <stepan.zastupov@gmail.com>
Portions contributed by: Joyce McIntosh <joyce.mcintosh@nexenta.com>
Portions contributed by: Mike Zeller <mike@mikezeller.net>
Portions contributed by: Dan McDonald <danmcd@joyent.com>
Portions contributed by: Gordon Ross <gordon.w.ross@gmail.com>
Portions contributed by: Vitaliy Gusev <gusev.vitaliy@gmail.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
Reviewed by: Rob Gittins <rob.gittins@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
Reviewed by: Jason King <jbk@joyent.com>
Reviewed by: C Fraire <cfraire@me.com>
Change-Id: I22f289d357503f9b48a0bc2482cc4328a6d43d16
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/fs/nfs/nfs4_dispatch.c
          +++ new/usr/src/uts/common/fs/nfs/nfs4_dispatch.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  
    | 
      ↓ open down ↓ | 
    16 lines elided | 
    
      ↑ open up ↑ | 
  
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
  23   23   * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  24   24   * Use is subject to license terms.
  25   25   */
  26   26  
       27 +/*
       28 + * Copyright 2018 Nexenta Systems, Inc.
       29 + */
       30 +
  27   31  #include <sys/systm.h>
  28   32  #include <sys/sdt.h>
  29   33  #include <rpc/types.h>
  30   34  #include <rpc/auth.h>
  31   35  #include <rpc/auth_unix.h>
  32   36  #include <rpc/auth_des.h>
  33   37  #include <rpc/svc.h>
  34   38  #include <rpc/xdr.h>
  35   39  #include <nfs/nfs4.h>
  36   40  #include <nfs/nfs_dispatch.h>
  37   41  #include <nfs/nfs4_drc.h>
  38   42  
  39   43  #define NFS4_MAX_MINOR_VERSION  0
  40   44  
  41   45  /*
  42      - * This is the duplicate request cache for NFSv4
  43      - */
  44      -rfs4_drc_t *nfs4_drc = NULL;
  45      -
  46      -/*
  47   46   * The default size of the duplicate request cache
  48   47   */
  49   48  uint32_t nfs4_drc_max = 8 * 1024;
  50   49  
  51   50  /*
  52   51   * The number of buckets we'd like to hash the
  53   52   * replies into.. do not change this on the fly.
  54   53   */
  55   54  uint32_t nfs4_drc_hash = 541;
  56   55  
  57   56  static void rfs4_resource_err(struct svc_req *req, COMPOUND4args *argsp);
  58   57  
  59   58  /*
  60   59   * Initialize a duplicate request cache.
  61   60   */
  62   61  rfs4_drc_t *
  63   62  rfs4_init_drc(uint32_t drc_size, uint32_t drc_hash_size)
  64   63  {
  65   64          rfs4_drc_t *drc;
  66   65          uint32_t   bki;
  67   66  
  68   67          ASSERT(drc_size);
  69   68          ASSERT(drc_hash_size);
  70   69  
  71   70          drc = kmem_alloc(sizeof (rfs4_drc_t), KM_SLEEP);
  72   71  
  73   72          drc->max_size = drc_size;
  74   73          drc->in_use = 0;
  75   74  
  76   75          mutex_init(&drc->lock, NULL, MUTEX_DEFAULT, NULL);
  77   76  
  78   77          drc->dr_hash = drc_hash_size;
  79   78  
  80   79          drc->dr_buckets = kmem_alloc(sizeof (list_t)*drc_hash_size, KM_SLEEP);
  81   80  
  82   81          for (bki = 0; bki < drc_hash_size; bki++) {
  83   82                  list_create(&drc->dr_buckets[bki], sizeof (rfs4_dupreq_t),
  84   83                      offsetof(rfs4_dupreq_t, dr_bkt_next));
  85   84          }
  86   85  
  
    | 
      ↓ open down ↓ | 
    30 lines elided | 
    
      ↑ open up ↑ | 
  
  87   86          list_create(&(drc->dr_cache), sizeof (rfs4_dupreq_t),
  88   87              offsetof(rfs4_dupreq_t, dr_next));
  89   88  
  90   89          return (drc);
  91   90  }
  92   91  
  93   92  /*
  94   93   * Destroy a duplicate request cache.
  95   94   */
  96   95  void
  97      -rfs4_fini_drc(rfs4_drc_t *drc)
       96 +rfs4_fini_drc(void)
  98   97  {
       98 +        nfs4_srv_t *nsrv4 = nfs4_get_srv();
       99 +        rfs4_drc_t *drc = nsrv4->nfs4_drc;
  99  100          rfs4_dupreq_t *drp, *drp_next;
 100  101  
 101      -        ASSERT(drc);
 102      -
 103  102          /* iterate over the dr_cache and free the enties */
 104  103          for (drp = list_head(&(drc->dr_cache)); drp != NULL; drp = drp_next) {
 105  104  
 106  105                  if (drp->dr_state == NFS4_DUP_REPLAY)
 107  106                          rfs4_compound_free(&(drp->dr_res));
 108  107  
 109  108                  if (drp->dr_addr.buf != NULL)
 110  109                          kmem_free(drp->dr_addr.buf, drp->dr_addr.maxlen);
 111  110  
 112  111                  drp_next = list_next(&(drc->dr_cache), drp);
 113  112  
 114  113                  kmem_free(drp, sizeof (rfs4_dupreq_t));
 115  114          }
 116  115  
 117  116          mutex_destroy(&drc->lock);
 118  117          kmem_free(drc->dr_buckets,
 119  118              sizeof (list_t)*drc->dr_hash);
 120  119          kmem_free(drc, sizeof (rfs4_drc_t));
 121  120  }
 122  121  
 123  122  /*
 124  123   * rfs4_dr_chstate:
 125  124   *
 126  125   * Change the state of a rfs4_dupreq. If it's not in transition
 127  126   * to the FREE state, return. If we are moving to the FREE state
 128  127   * then we need to clean up the compound results and move the entry
 129  128   * to the end of the list.
 130  129   */
 131  130  void
 132  131  rfs4_dr_chstate(rfs4_dupreq_t *drp, int new_state)
 133  132  {
 134  133          rfs4_drc_t *drc;
 135  134  
 136  135          ASSERT(drp);
 137  136          ASSERT(drp->drc);
 138  137          ASSERT(drp->dr_bkt);
 139  138          ASSERT(MUTEX_HELD(&drp->drc->lock));
 140  139  
 141  140          drp->dr_state = new_state;
 142  141  
 143  142          if (new_state != NFS4_DUP_FREE)
 144  143                  return;
 145  144  
 146  145          drc = drp->drc;
 147  146  
 148  147          /*
 149  148           * Remove entry from the bucket and
 150  149           * dr_cache list, free compound results.
 151  150           */
 152  151          list_remove(drp->dr_bkt, drp);
 153  152          list_remove(&(drc->dr_cache), drp);
 154  153          rfs4_compound_free(&(drp->dr_res));
 155  154  }
 156  155  
 157  156  /*
 158  157   * rfs4_alloc_dr:
 159  158   *
 160  159   * Malloc a new one if we have not reached our maximum cache
 161  160   * limit, otherwise pick an entry off the tail -- Use if it
 162  161   * is marked as NFS4_DUP_FREE, or is an entry in the
 163  162   * NFS4_DUP_REPLAY state.
 164  163   */
 165  164  rfs4_dupreq_t *
 166  165  rfs4_alloc_dr(rfs4_drc_t *drc)
 167  166  {
 168  167          rfs4_dupreq_t *drp_tail, *drp = NULL;
 169  168  
 170  169          ASSERT(drc);
 171  170          ASSERT(MUTEX_HELD(&drc->lock));
 172  171  
 173  172          /*
 174  173           * Have we hit the cache limit yet ?
 175  174           */
 176  175          if (drc->in_use < drc->max_size) {
 177  176                  /*
 178  177                   * nope, so let's malloc a new one
 179  178                   */
 180  179                  drp = kmem_zalloc(sizeof (rfs4_dupreq_t), KM_SLEEP);
 181  180                  drp->drc = drc;
 182  181                  drc->in_use++;
 183  182                  DTRACE_PROBE1(nfss__i__drc_new, rfs4_dupreq_t *, drp);
 184  183                  return (drp);
 185  184          }
 186  185  
 187  186          /*
 188  187           * Cache is all allocated now traverse the list
 189  188           * backwards to find one we can reuse.
 190  189           */
 191  190          for (drp_tail = list_tail(&drc->dr_cache); drp_tail != NULL;
 192  191              drp_tail = list_prev(&drc->dr_cache, drp_tail)) {
 193  192  
 194  193                  switch (drp_tail->dr_state) {
 195  194  
 196  195                  case NFS4_DUP_FREE:
 197  196                          list_remove(&(drc->dr_cache), drp_tail);
 198  197                          DTRACE_PROBE1(nfss__i__drc_freeclaim,
 199  198                              rfs4_dupreq_t *, drp_tail);
 200  199                          return (drp_tail);
 201  200                          /* NOTREACHED */
 202  201  
 203  202                  case NFS4_DUP_REPLAY:
 204  203                          /* grab it. */
 205  204                          rfs4_dr_chstate(drp_tail, NFS4_DUP_FREE);
 206  205                          DTRACE_PROBE1(nfss__i__drc_replayclaim,
 207  206                              rfs4_dupreq_t *, drp_tail);
 208  207                          return (drp_tail);
 209  208                          /* NOTREACHED */
 210  209                  }
 211  210          }
 212  211          DTRACE_PROBE1(nfss__i__drc_full, rfs4_drc_t *, drc);
 213  212          return (NULL);
 214  213  }
 215  214  
 216  215  /*
 217  216   * rfs4_find_dr:
 218  217   *
 219  218   * Search for an entry in the duplicate request cache by
 220  219   * calculating the hash index based on the XID, and examining
 221  220   * the entries in the hash bucket. If we find a match, return.
 222  221   * Once we have searched the bucket we call rfs4_alloc_dr() to
 223  222   * allocate a new entry, or reuse one that is available.
 224  223   */
 225  224  int
 226  225  rfs4_find_dr(struct svc_req *req, rfs4_drc_t *drc, rfs4_dupreq_t **dup)
 227  226  {
 228  227  
 229  228          uint32_t        the_xid;
 230  229          list_t          *dr_bkt;
 231  230          rfs4_dupreq_t   *drp;
 232  231          int             bktdex;
 233  232  
 234  233          /*
 235  234           * Get the XID, calculate the bucket and search to
 236  235           * see if we need to replay from the cache.
 237  236           */
 238  237          the_xid = req->rq_xprt->xp_xid;
 239  238          bktdex = the_xid % drc->dr_hash;
 240  239  
 241  240          dr_bkt = (list_t *)
 242  241              &(drc->dr_buckets[(the_xid % drc->dr_hash)]);
 243  242  
 244  243          DTRACE_PROBE3(nfss__i__drc_bktdex,
 245  244              int, bktdex,
 246  245              uint32_t, the_xid,
 247  246              list_t *, dr_bkt);
 248  247  
 249  248          *dup = NULL;
 250  249  
 251  250          mutex_enter(&drc->lock);
 252  251          /*
 253  252           * Search the bucket for a matching xid and address.
 254  253           */
 255  254          for (drp = list_head(dr_bkt); drp != NULL;
 256  255              drp = list_next(dr_bkt, drp)) {
 257  256  
 258  257                  if (drp->dr_xid == the_xid &&
 259  258                      drp->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
 260  259                      bcmp((caddr_t)drp->dr_addr.buf,
 261  260                      (caddr_t)req->rq_xprt->xp_rtaddr.buf,
 262  261                      drp->dr_addr.len) == 0) {
 263  262  
 264  263                          /*
 265  264                           * Found a match so REPLAY the Reply
 266  265                           */
 267  266                          if (drp->dr_state == NFS4_DUP_REPLAY) {
 268  267                                  rfs4_dr_chstate(drp, NFS4_DUP_INUSE);
 269  268                                  mutex_exit(&drc->lock);
 270  269                                  *dup = drp;
 271  270                                  DTRACE_PROBE1(nfss__i__drc_replay,
 272  271                                      rfs4_dupreq_t *, drp);
 273  272                                  return (NFS4_DUP_REPLAY);
 274  273                          }
 275  274  
 276  275                          /*
 277  276                           * This entry must be in transition, so return
 278  277                           * the 'pending' status.
 279  278                           */
 280  279                          mutex_exit(&drc->lock);
 281  280                          return (NFS4_DUP_PENDING);
 282  281                  }
 283  282          }
 284  283  
 285  284          drp = rfs4_alloc_dr(drc);
 286  285          mutex_exit(&drc->lock);
 287  286  
 288  287          /*
 289  288           * The DRC is full and all entries are in use. Upper function
 290  289           * should error out this request and force the client to
 291  290           * retransmit -- effectively this is a resource issue. NFSD
 292  291           * threads tied up with native File System, or the cache size
 293  292           * is too small for the server load.
 294  293           */
 295  294          if (drp == NULL)
 296  295                  return (NFS4_DUP_ERROR);
 297  296  
 298  297          /*
 299  298           * Init the state to NEW.
 300  299           */
 301  300          drp->dr_state = NFS4_DUP_NEW;
 302  301  
 303  302          /*
 304  303           * If needed, resize the address buffer
 305  304           */
 306  305          if (drp->dr_addr.maxlen < req->rq_xprt->xp_rtaddr.len) {
 307  306                  if (drp->dr_addr.buf != NULL)
 308  307                          kmem_free(drp->dr_addr.buf, drp->dr_addr.maxlen);
 309  308                  drp->dr_addr.maxlen = req->rq_xprt->xp_rtaddr.len;
 310  309                  drp->dr_addr.buf = kmem_alloc(drp->dr_addr.maxlen, KM_NOSLEEP);
 311  310                  if (drp->dr_addr.buf == NULL) {
 312  311                          /*
 313  312                           * If the malloc fails, mark the entry
 314  313                           * as free and put on the tail.
 315  314                           */
 316  315                          drp->dr_addr.maxlen = 0;
 317  316                          drp->dr_state = NFS4_DUP_FREE;
 318  317                          mutex_enter(&drc->lock);
 319  318                          list_insert_tail(&(drc->dr_cache), drp);
 320  319                          mutex_exit(&drc->lock);
 321  320                          return (NFS4_DUP_ERROR);
 322  321                  }
 323  322          }
 324  323  
 325  324  
 326  325          /*
 327  326           * Copy the address.
 328  327           */
 329  328          drp->dr_addr.len = req->rq_xprt->xp_rtaddr.len;
 330  329  
 331  330          bcopy((caddr_t)req->rq_xprt->xp_rtaddr.buf,
 332  331              (caddr_t)drp->dr_addr.buf,
 333  332              drp->dr_addr.len);
 334  333  
 335  334          drp->dr_xid = the_xid;
 336  335          drp->dr_bkt = dr_bkt;
 337  336  
 338  337          /*
 339  338           * Insert at the head of the bucket and
 340  339           * the drc lists..
 341  340           */
 342  341          mutex_enter(&drc->lock);
 343  342          list_insert_head(&drc->dr_cache, drp);
 344  343          list_insert_head(dr_bkt, drp);
 345  344          mutex_exit(&drc->lock);
 346  345  
 347  346          *dup = drp;
 348  347  
  
    | 
      ↓ open down ↓ | 
    236 lines elided | 
    
      ↑ open up ↑ | 
  
 349  348          return (NFS4_DUP_NEW);
 350  349  }
 351  350  
 352  351  /*
 353  352   *
 354  353   * This function handles the duplicate request cache,
 355  354   * NULL_PROC and COMPOUND procedure calls for NFSv4;
 356  355   *
 357  356   * Passed into this function are:-
 358  357   *
 359      - *      disp    A pointer to our dispatch table entry
 360      - *      req     The request to process
 361      - *      xprt    The server transport handle
 362      - *      ap      A pointer to the arguments
      358 + *      disp    A pointer to our dispatch table entry
      359 + *      req     The request to process
      360 + *      xprt    The server transport handle
      361 + *      ap      A pointer to the arguments
 363  362   *
 364  363   *
 365  364   * When appropriate this function is responsible for inserting
 366  365   * the reply into the duplicate cache or replaying an existing
 367  366   * cached reply.
 368  367   *
 369      - * dr_stat      reflects the state of the duplicate request that
 370      - *              has been inserted into or retrieved from the cache
      368 + * dr_stat      reflects the state of the duplicate request that
      369 + *              has been inserted into or retrieved from the cache
 371  370   *
 372  371   * drp          is the duplicate request entry
 373  372   *
 374  373   */
 375  374  int
 376      -rfs4_dispatch(struct rpcdisp *disp, struct svc_req *req,
 377      -                SVCXPRT *xprt, char *ap)
      375 +rfs4_dispatch(struct rpcdisp *disp, struct svc_req *req, SVCXPRT *xprt,
      376 +    char *ap)
 378  377  {
 379  378  
 380  379          COMPOUND4res     res_buf;
 381  380          COMPOUND4res    *rbp;
 382  381          COMPOUND4args   *cap;
 383  382          cred_t          *cr = NULL;
 384  383          int              error = 0;
 385  384          int              dis_flags = 0;
 386  385          int              dr_stat = NFS4_NOT_DUP;
 387  386          rfs4_dupreq_t   *drp = NULL;
 388  387          int              rv;
      388 +        nfs4_srv_t *nsrv4 = nfs4_get_srv();
      389 +        rfs4_drc_t *nfs4_drc = nsrv4->nfs4_drc;
 389  390  
 390  391          ASSERT(disp);
 391  392  
 392  393          /*
 393  394           * Short circuit the RPC_NULL proc.
 394  395           */
 395  396          if (disp->dis_proc == rpc_null) {
 396  397                  DTRACE_NFSV4_1(null__start, struct svc_req *, req);
 397  398                  if (!svc_sendreply(xprt, xdr_void, NULL)) {
 398  399                          DTRACE_NFSV4_1(null__done, struct svc_req *, req);
 399  400                          svcerr_systemerr(xprt);
 400  401                          return (1);
 401  402                  }
 402  403                  DTRACE_NFSV4_1(null__done, struct svc_req *, req);
 403  404                  return (0);
 404  405          }
 405  406  
 406  407          /* Only NFSv4 Compounds from this point onward */
 407  408  
 408  409          rbp = &res_buf;
 409  410          cap = (COMPOUND4args *)ap;
 410  411  
 411  412          /*
 412  413           * Figure out the disposition of the whole COMPOUND
 413  414           * and record it's IDEMPOTENTCY.
 414  415           */
 415  416          rfs4_compound_flagproc(cap, &dis_flags);
 416  417  
 417  418          /*
 418  419           * If NON-IDEMPOTENT then we need to figure out if this
 419  420           * request can be replied from the duplicate cache.
 420  421           *
 421  422           * If this is a new request then we need to insert the
 422  423           * reply into the duplicate cache.
 423  424           */
 424  425          if (!(dis_flags & RPC_IDEMPOTENT)) {
 425  426                  /* look for a replay from the cache or allocate */
 426  427                  dr_stat = rfs4_find_dr(req, nfs4_drc, &drp);
 427  428  
 428  429                  switch (dr_stat) {
 429  430  
 430  431                  case NFS4_DUP_ERROR:
 431  432                          rfs4_resource_err(req, cap);
 432  433                          return (1);
 433  434                          /* NOTREACHED */
 434  435  
 435  436                  case NFS4_DUP_PENDING:
 436  437                          /*
 437  438                           * reply has previously been inserted into the
 438  439                           * duplicate cache, however the reply has
 439  440                           * not yet been sent via svc_sendreply()
 440  441                           */
 441  442                          return (1);
 442  443                          /* NOTREACHED */
 443  444  
 444  445                  case NFS4_DUP_NEW:
 445  446                          curthread->t_flag |= T_DONTPEND;
 446  447                          /* NON-IDEMPOTENT proc call */
 447  448                          rfs4_compound(cap, rbp, NULL, req, cr, &rv);
 448  449                          curthread->t_flag &= ~T_DONTPEND;
 449  450  
 450  451                          if (rv)         /* short ckt sendreply on error */
 451  452                                  return (rv);
 452  453  
 453  454                          /*
 454  455                           * dr_res must be initialized before calling
 455  456                           * rfs4_dr_chstate (it frees the reply).
 456  457                           */
 457  458                          drp->dr_res = res_buf;
 458  459                          if (curthread->t_flag & T_WOULDBLOCK) {
 459  460                                  curthread->t_flag &= ~T_WOULDBLOCK;
 460  461                                  /*
 461  462                                   * mark this entry as FREE and plop
 462  463                                   * on the end of the cache list
 463  464                                   */
 464  465                                  mutex_enter(&drp->drc->lock);
 465  466                                  rfs4_dr_chstate(drp, NFS4_DUP_FREE);
 466  467                                  list_insert_tail(&(drp->drc->dr_cache), drp);
 467  468                                  mutex_exit(&drp->drc->lock);
 468  469                                  return (1);
 469  470                          }
 470  471                          break;
 471  472  
 472  473                  case NFS4_DUP_REPLAY:
 473  474                          /* replay from the cache */
 474  475                          rbp = &(drp->dr_res);
 475  476                          break;
 476  477                  }
 477  478          } else {
 478  479                  curthread->t_flag |= T_DONTPEND;
 479  480                  /* IDEMPOTENT proc call */
 480  481                  rfs4_compound(cap, rbp, NULL, req, cr, &rv);
 481  482                  curthread->t_flag &= ~T_DONTPEND;
 482  483  
 483  484                  if (rv)         /* short ckt sendreply on error */
 484  485                          return (rv);
 485  486  
 486  487                  if (curthread->t_flag & T_WOULDBLOCK) {
 487  488                          curthread->t_flag &= ~T_WOULDBLOCK;
 488  489                          return (1);
 489  490                  }
 490  491          }
 491  492  
 492  493          /*
 493  494           * Send out the replayed reply or the 'real' one.
 494  495           */
 495  496          if (!svc_sendreply(xprt,  xdr_COMPOUND4res_srv, (char *)rbp)) {
 496  497                  DTRACE_PROBE2(nfss__e__dispatch_sendfail,
 497  498                      struct svc_req *, xprt,
 498  499                      char *, rbp);
 499  500                  svcerr_systemerr(xprt);
 500  501                  error++;
 501  502          }
 502  503  
 503  504          /*
 504  505           * If this reply was just inserted into the duplicate cache
 505  506           * or it was replayed from the dup cache; (re)mark it as
 506  507           * available for replay
 507  508           *
 508  509           * At first glance, this 'if' statement seems a little strange;
 509  510           * testing for NFS4_DUP_REPLAY, and then calling...
 510  511           *
 511  512           *      rfs4_dr_chatate(NFS4_DUP_REPLAY)
 512  513           *
 513  514           * ... but notice that we are checking dr_stat, and not the
 514  515           * state of the entry itself, the entry will be NFS4_DUP_INUSE,
 515  516           * we do that so that we know not to prematurely reap it whilst
 516  517           * we resent it to the client.
 517  518           *
 518  519           */
 519  520          if (dr_stat == NFS4_DUP_NEW || dr_stat == NFS4_DUP_REPLAY) {
 520  521                  mutex_enter(&drp->drc->lock);
 521  522                  rfs4_dr_chstate(drp, NFS4_DUP_REPLAY);
 522  523                  mutex_exit(&drp->drc->lock);
 523  524          } else if (dr_stat == NFS4_NOT_DUP) {
 524  525                  rfs4_compound_free(rbp);
 525  526          }
 526  527  
 527  528          return (error);
 528  529  }
 529  530  
 530  531  bool_t
 531  532  rfs4_minorvers_mismatch(struct svc_req *req, SVCXPRT *xprt, void *args)
 532  533  {
 533  534          COMPOUND4args *argsp;
 534  535          COMPOUND4res res_buf, *resp;
 535  536  
 536  537          if (req->rq_vers != 4)
  
    | 
      ↓ open down ↓ | 
    138 lines elided | 
    
      ↑ open up ↑ | 
  
 537  538                  return (FALSE);
 538  539  
 539  540          argsp = (COMPOUND4args *)args;
 540  541  
 541  542          if (argsp->minorversion <= NFS4_MAX_MINOR_VERSION)
 542  543                  return (FALSE);
 543  544  
 544  545          resp = &res_buf;
 545  546  
 546  547          /*
 547      -         * Form a reply tag by copying over the reqeuest tag.
      548 +         * Form a reply tag by copying over the request tag.
 548  549           */
 549      -        resp->tag.utf8string_val =
 550      -            kmem_alloc(argsp->tag.utf8string_len, KM_SLEEP);
 551  550          resp->tag.utf8string_len = argsp->tag.utf8string_len;
 552      -        bcopy(argsp->tag.utf8string_val, resp->tag.utf8string_val,
 553      -            resp->tag.utf8string_len);
      551 +        if (argsp->tag.utf8string_len != 0) {
      552 +                resp->tag.utf8string_val =
      553 +                    kmem_alloc(argsp->tag.utf8string_len, KM_SLEEP);
      554 +                bcopy(argsp->tag.utf8string_val, resp->tag.utf8string_val,
      555 +                    resp->tag.utf8string_len);
      556 +        } else {
      557 +                resp->tag.utf8string_val = NULL;
      558 +        }
 554  559          resp->array_len = 0;
 555  560          resp->array = NULL;
 556  561          resp->status = NFS4ERR_MINOR_VERS_MISMATCH;
 557  562          if (!svc_sendreply(xprt,  xdr_COMPOUND4res_srv, (char *)resp)) {
 558  563                  DTRACE_PROBE2(nfss__e__minorvers_mismatch,
 559  564                      SVCXPRT *, xprt, char *, resp);
 560  565                  svcerr_systemerr(xprt);
 561  566          }
 562  567          rfs4_compound_free(resp);
 563  568          return (TRUE);
 564  569  }
 565  570  
 566  571  void
 567  572  rfs4_resource_err(struct svc_req *req, COMPOUND4args *argsp)
  
    | 
      ↓ open down ↓ | 
    4 lines elided | 
    
      ↑ open up ↑ | 
  
 568  573  {
 569  574          COMPOUND4res res_buf, *rbp;
 570  575          nfs_resop4 *resop;
 571  576          PUTFH4res *resp;
 572  577  
 573  578          rbp = &res_buf;
 574  579  
 575  580          /*
 576  581           * Form a reply tag by copying over the request tag.
 577  582           */
 578      -        rbp->tag.utf8string_val =
 579      -            kmem_alloc(argsp->tag.utf8string_len, KM_SLEEP);
 580  583          rbp->tag.utf8string_len = argsp->tag.utf8string_len;
 581      -        bcopy(argsp->tag.utf8string_val, rbp->tag.utf8string_val,
 582      -            rbp->tag.utf8string_len);
      584 +        if (argsp->tag.utf8string_len != 0) {
      585 +                rbp->tag.utf8string_val =
      586 +                    kmem_alloc(argsp->tag.utf8string_len, KM_SLEEP);
      587 +                bcopy(argsp->tag.utf8string_val, rbp->tag.utf8string_val,
      588 +                    rbp->tag.utf8string_len);
      589 +        } else {
      590 +                rbp->tag.utf8string_val = NULL;
      591 +        }
 583  592  
 584  593          rbp->array_len = 1;
 585  594          rbp->array = kmem_zalloc(rbp->array_len * sizeof (nfs_resop4),
 586  595              KM_SLEEP);
 587  596          resop = &rbp->array[0];
 588  597          resop->resop = argsp->array[0].argop;   /* copy first op over */
 589  598  
 590  599          /* Any op will do, just need to access status field */
 591  600          resp = &resop->nfs_resop4_u.opputfh;
 592  601  
 593  602          /*
 594  603           * NFS4ERR_RESOURCE is allowed for all ops, except OP_ILLEGAL.
 595  604           * Note that all op numbers in the compound array were already
 596  605           * validated by the XDR decoder (xdr_COMPOUND4args_srv()).
 597  606           */
 598  607          resp->status = (resop->resop == OP_ILLEGAL ?
 599  608              NFS4ERR_OP_ILLEGAL : NFS4ERR_RESOURCE);
 600  609  
 601  610          /* compound status is same as first op status */
 602  611          rbp->status = resp->status;
 603  612  
 604  613          if (!svc_sendreply(req->rq_xprt, xdr_COMPOUND4res_srv, (char *)rbp)) {
 605  614                  DTRACE_PROBE2(nfss__rsrc_err__sendfail,
 606  615                      struct svc_req *, req->rq_xprt, char *, rbp);
 607  616                  svcerr_systemerr(req->rq_xprt);
 608  617          }
 609  618  
 610  619          UTF8STRING_FREE(rbp->tag);
 611  620          kmem_free(rbp->array, rbp->array_len * sizeof (nfs_resop4));
 612  621  }
  
    | 
      ↓ open down ↓ | 
    20 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX