Print this page
pbchk
cleanup port_free_event_local() semantics

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/fs/portfs/port_vnops.c
          +++ new/usr/src/uts/common/fs/portfs/port_vnops.c
↓ open down ↓ 17 lines elided ↑ open up ↑
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
  23   23   * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  24   24   * Use is subject to license terms.
  25   25   */
  26   26  
  27   27  /*
  28      - * Copyright (c) 2014, Joyent, Inc.  All rights reserved.
       28 + * Copyright 2020 Joyent, Inc.
  29   29   */
  30   30  
  31   31  #include <sys/types.h>
  32   32  #include <sys/vnode.h>
  33   33  #include <sys/vfs_opreg.h>
  34   34  #include <sys/kmem.h>
  35   35  #include <fs/fs_subr.h>
  36   36  #include <sys/proc.h>
  37   37  #include <sys/kstat.h>
  38   38  #include <sys/port_impl.h>
↓ open down ↓ 69 lines elided ↑ open up ↑
 108  108   * Free all kernel events structures which are still in the event queue.
 109  109   */
 110  110  static void
 111  111  port_close_events(port_queue_t *portq)
 112  112  {
 113  113          port_kevent_t   *pkevp;
 114  114          int             events;         /* ignore events */
 115  115  
 116  116          mutex_enter(&portq->portq_mutex);
 117  117          while (pkevp = list_head(&portq->portq_list)) {
      118 +                port_t *pp = pkevp->portkev_port;
      119 +
 118  120                  portq->portq_nent--;
 119  121                  list_remove(&portq->portq_list, pkevp);
 120  122                  if (pkevp->portkev_callback) {
 121  123                          (void) (*pkevp->portkev_callback)(pkevp->portkev_arg,
 122  124                              &events, pkevp->portkev_pid, PORT_CALLBACK_CLOSE,
 123  125                              pkevp);
 124  126                  }
 125      -                mutex_exit(&portq->portq_mutex);
 126      -                port_free_event_local(pkevp, 0);
      127 +                /*
      128 +                 * Don't drop the portq_mutex, but instead perform the
      129 +                 * decrement of port_curr in advance of calling
      130 +                 * port_free_event_local().  We do need to reacquire
      131 +                 * portq_mutex so we can properly wait for any
      132 +                 * pollwakeup()-signalled threads to finish up.
      133 +                 */
      134 +                if (--pp->port_curr < pp->port_max_events)
      135 +                        cv_signal(&pp->port_cv);
      136 +                port_free_event_local(pkevp, B_FALSE);
 127  137                  mutex_enter(&portq->portq_mutex);
 128  138          }
 129  139  
 130  140          /*
 131  141           * Wait for any thread in pollwakeup(), accessing this port to
 132  142           * finish.
 133  143           */
 134  144          while (portq->portq_flags & PORTQ_POLLWK_PEND) {
 135  145                  cv_wait(&portq->portq_closecv, &portq->portq_mutex);
 136  146          }
↓ open down ↓ 200 lines elided ↑ open up ↑
 337  347          return (0);
 338  348  }
 339  349  
 340  350  /*
 341  351   * Destroy the port.
 342  352   */
 343  353  /* ARGSUSED */
 344  354  static void
 345  355  port_inactive(struct vnode *vp, cred_t *cr, caller_context_t *ct)
 346  356  {
 347      -        port_t  *pp = VTOEP(vp);
 348      -        extern  port_kstat_t port_kstat;
      357 +        port_t  *pp = VTOEP(vp);
      358 +        extern  port_kstat_t port_kstat;
 349  359  
 350  360          mutex_enter(&port_control.pc_mutex);
 351  361          port_control.pc_nents--;
 352  362          curproc->p_portcnt--;
 353  363          port_kstat.pks_ports.value.ui32--;
 354  364          mutex_exit(&port_control.pc_mutex);
 355  365          vn_free(vp);
 356  366          mutex_destroy(&pp->port_mutex);
 357  367          mutex_destroy(&pp->port_queue.portq_mutex);
 358  368          mutex_destroy(&pp->port_queue.portq_source_mutex);
↓ open down ↓ 18 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX