Print this page
XXXXX convert NLM's single-count semaphore to a mutex

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/klm/nlm_impl.h
          +++ new/usr/src/uts/common/klm/nlm_impl.h
↓ open down ↓ 380 lines elided ↑ open up ↑
 381  381   * can not share one handle between several threads. It's assumed
 382  382   * that NLM uses only one NSM handle per zone, thus all RPC operations
 383  383   * on NSM's handle are serialized using nlm_nsm->sem semaphore.
 384  384   *
 385  385   * nlm_nsm also contains refcnt field used for reference counting.
 386  386   * It's used because there exist a possibility of simultaneous
 387  387   * execution of NLM shutdown operation and host monitor/unmonitor
 388  388   * operations.
 389  389   *
 390  390   * struct nlm_nsm:
 391      - *  ns_sem: a semaphore for serialization network operations to statd
      391 + *  ns_lock: a mutex for serialization network operations to statd
 392  392   *  ns_knc: a kneconfig describing transport that is used for communication
 393  393   *  ns_addr: an address of local statd we're talking to
 394  394   *  ns_handle: an RPC handle used for talking to local statd using the status
 395  395   *      monitor protocol (SM_PROG)
 396  396   *  ns_addr_handle: an RPC handle used for talking to local statd using the
 397  397   *      address registration protocol (NSM_ADDR_PROGRAM)
 398  398   */
 399  399  struct nlm_nsm {
 400      -        ksema_t                 ns_sem;
      400 +        kmutex_t                ns_lock;
 401  401          struct knetconfig       ns_knc;          /* (c) */
 402  402          struct netbuf           ns_addr;         /* (c) */
 403  403          CLIENT                  *ns_handle;      /* (c) */
 404  404          CLIENT                  *ns_addr_handle; /* (c) */
 405  405  };
 406  406  
 407  407  /*
 408  408   * Could use flock.h flk_nlm_status_t instead, but
 409  409   * prefer our own enum with initial zero...
 410  410   */
↓ open down ↓ 235 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX