Print this page
3354 kernel crash in rpcsec_gss after using gsscred
Reviewed by: Toomas Soome <tsoome@me.com>
Reviewed by: Carlos Neira <cneirabustos@gmail.com>
Approved by: Robert Mustacchi <rm@joyent.com>
NEX-14051 Be careful with RPC groups
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
Reviewed by: Evan Layton <evan.layton@nexenta.com>
8085 Handle RPC groups better
Reviewed by: "Joshua M. Clulow" <josh@sysmgr.org>
Reviewed by: Paul Dagnelie <pcd@delphix.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Approved by: Dan McDonald <danmcd@omniti.com>
NEX-2320 svc_queueclose() could wake up too much threads
Reviewed by: Rick Mesta <rick.mesta@nexenta.com>
Reviewed by: Gordon Ross <gordon.ross@nexenta.com>
Reviewed by: Dan Fields <dan.fields@nexenta.com>
Reviewed by: Josef Sipek <josef.sipek@nexenta.com>
Reviewed by: Tony Nguyen <tony.nguyen@nexenta.com>
re #12783 rb4338 Flow control is needed in rpcmod when the NFS server is unable to keep up with the network
re #13613 rb4516 Tunables needs volatile keyword
re #12714 rb4347 Corruption of the `xprt-ready' queue in svc_xprt_qdelete()

*** 18,28 **** --- 18,30 ---- * * CDDL HEADER END */ /* + * Copyright 2012 Marcel Telka <marcel@telka.sk> * Copyright 2015 Nexenta Systems, Inc. All rights reserved. + * Copyright 2018 OmniOS Community Edition (OmniOSce) Association. */ /* * Copyright 2010 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms.
*** 217,227 **** /* * Default stack size for service threads. */ #define DEFAULT_SVC_RUN_STKSIZE (0) /* default kernel stack */ ! int svc_default_stksize = DEFAULT_SVC_RUN_STKSIZE; /* * Default polling timeout for service threads. * Multiplied by hz when used. */ --- 219,229 ---- /* * Default stack size for service threads. */ #define DEFAULT_SVC_RUN_STKSIZE (0) /* default kernel stack */ ! volatile int svc_default_stksize = DEFAULT_SVC_RUN_STKSIZE; /* * Default polling timeout for service threads. * Multiplied by hz when used. */
*** 246,256 **** /* * Maximum number of requests from the same transport (in `drain' mode). */ #define DEFAULT_SVC_MAX_SAME_XPRT (8) ! int svc_default_max_same_xprt = DEFAULT_SVC_MAX_SAME_XPRT; /* * Default `Redline' of non-detached threads. * Total number of detached and reserved threads in an RPC server --- 248,258 ---- /* * Maximum number of requests from the same transport (in `drain' mode). */ #define DEFAULT_SVC_MAX_SAME_XPRT (8) ! volatile int svc_default_max_same_xprt = DEFAULT_SVC_MAX_SAME_XPRT; /* * Default `Redline' of non-detached threads. * Total number of detached and reserved threads in an RPC server
*** 294,324 **** */ static caddr_t rqcred_head; static kmutex_t rqcred_lock; /* - * Pointers to transport specific `rele' routines in rpcmod (set from rpcmod). - */ - void (*rpc_rele)(queue_t *, mblk_t *, bool_t) = NULL; - void (*mir_rele)(queue_t *, mblk_t *, bool_t) = NULL; - - /* ARGSUSED */ - void - rpc_rdma_rele(queue_t *q, mblk_t *mp, bool_t enable) - { - } - void (*rdma_rele)(queue_t *, mblk_t *, bool_t) = rpc_rdma_rele; - - - /* - * This macro picks which `rele' routine to use, based on the transport type. - */ - #define RELE_PROC(xprt) \ - ((xprt)->xp_type == T_RDMA ? rdma_rele : \ - (((xprt)->xp_type == T_CLTS) ? rpc_rele : mir_rele)) - - /* * If true, then keep quiet about version mismatch. * This macro is for broadcast RPC only. We have no broadcast RPC in * kernel now but one may define a flag in the transport structure * and redefine this macro. */ --- 296,305 ----
*** 2385,2395 **** mutex_enter(&xprt->xp_req_lock); enable = xprt->xp_enable; if (enable) xprt->xp_enable = FALSE; mutex_exit(&xprt->xp_req_lock); ! (*RELE_PROC(xprt)) (clone_xprt->xp_wq, NULL, enable); } /* NOTREACHED */ } /* --- 2366,2376 ---- mutex_enter(&xprt->xp_req_lock); enable = xprt->xp_enable; if (enable) xprt->xp_enable = FALSE; mutex_exit(&xprt->xp_req_lock); ! SVC_RELE(clone_xprt, NULL, enable); } /* NOTREACHED */ } /*
*** 2410,2420 **** pool = xprt->xp_pool; while ((mp = xprt->xp_req_head) != NULL) { /* remove the request from the list */ xprt->xp_req_head = mp->b_next; mp->b_next = (mblk_t *)0; ! (*RELE_PROC(xprt)) (xprt->xp_wq, mp, FALSE); } mutex_enter(&pool->p_req_lock); pool->p_reqs -= xprt->xp_reqs; pool->p_size -= xprt->xp_size; --- 2391,2401 ---- pool = xprt->xp_pool; while ((mp = xprt->xp_req_head) != NULL) { /* remove the request from the list */ xprt->xp_req_head = mp->b_next; mp->b_next = (mblk_t *)0; ! SVC_RELE(xprt, mp, FALSE); } mutex_enter(&pool->p_req_lock); pool->p_reqs -= xprt->xp_reqs; pool->p_size -= xprt->xp_size;
*** 2727,2737 **** mutex_enter(&xprt->xp_req_lock); enable = xprt->xp_enable; if (enable) xprt->xp_enable = FALSE; mutex_exit(&xprt->xp_req_lock); ! (*RELE_PROC(xprt)) (clone_xprt->xp_wq, NULL, enable); /* Mark the clone (thread) as detached */ clone_xprt->xp_reserved = FALSE; clone_xprt->xp_detached = TRUE; --- 2708,2718 ---- mutex_enter(&xprt->xp_req_lock); enable = xprt->xp_enable; if (enable) xprt->xp_enable = FALSE; mutex_exit(&xprt->xp_req_lock); ! SVC_RELE(clone_xprt, NULL, enable); /* Mark the clone (thread) as detached */ clone_xprt->xp_reserved = FALSE; clone_xprt->xp_detached = TRUE;