7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25 /*
26 * Copyright 2012 Milan Jurik. All rights reserved.
27 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
28 */
29 /* Copyright (c) 1990 Mentat Inc. */
30
31 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
32 /* All Rights Reserved */
33
34 /*
35 * Kernel RPC filtering module
36 */
37
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/stream.h>
41 #include <sys/stropts.h>
42 #include <sys/strsubr.h>
43 #include <sys/tihdr.h>
44 #include <sys/timod.h>
45 #include <sys/tiuser.h>
46 #include <sys/debug.h>
47 #include <sys/signal.h>
553 }
554
555 void
556 rmm_wput(queue_t *q, mblk_t *mp)
557 {
558 (*((struct temp_slot *)q->q_ptr)->ops->xo_wput)(q, mp);
559 }
560
561 void
562 rmm_wsrv(queue_t *q)
563 {
564 (*((struct temp_slot *)q->q_ptr)->ops->xo_wsrv)(q);
565 }
566
567 int
568 rmm_close(queue_t *q, int flag, cred_t *crp)
569 {
570 return ((*((struct temp_slot *)q->q_ptr)->ops->xo_close)(q, flag, crp));
571 }
572
573 static void rpcmod_release(queue_t *, mblk_t *, bool_t);
574 /*
575 * rpcmodopen - open routine gets called when the module gets pushed
576 * onto the stream.
577 */
578 /*ARGSUSED*/
579 int
580 rpcmodopen(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *crp)
581 {
582 struct rpcm *rmp;
583
584 extern void (*rpc_rele)(queue_t *, mblk_t *, bool_t);
585
586 TRACE_0(TR_FAC_KRPC, TR_RPCMODOPEN_START, "rpcmodopen_start:");
587
588 /*
589 * Initialize entry points to release a rpcmod slot (and an input
590 * message if supplied) and to send an output message to the module
591 * below rpcmod.
592 */
593 if (rpc_rele == NULL)
594 rpc_rele = rpcmod_release;
595
596 /*
597 * Only sufficiently privileged users can use this module, and it
598 * is assumed that they will use this module properly, and NOT send
599 * bulk data from downstream.
600 */
601 if (secpolicy_rpcmod_open(crp) != 0)
602 return (EPERM);
603
604 /*
605 * Allocate slot data structure.
606 */
607 rmp = kmem_zalloc(sizeof (*rmp), KM_SLEEP);
608
609 mutex_init(&rmp->rm_lock, NULL, MUTEX_DEFAULT, NULL);
610 cv_init(&rmp->rm_cwait, NULL, CV_DEFAULT, NULL);
611 rmp->rm_zoneid = rpc_zoneid();
612 /*
613 * slot type will be set by kRPC client and server ioctl's
614 */
615 rmp->rm_type = 0;
616
933 rmp = (struct rpcm *)q->q_ptr;
934 ASSERT(rmp != NULL);
935
936 /*
937 * Get messages that may be queued and send them down stream
938 */
939 while ((mp = getq(q)) != NULL) {
940 /*
941 * Optimize the service procedure for the server-side, by
942 * avoiding a call to canputnext().
943 */
944 if (rmp->rm_type == RPC_SERVER || canputnext(q)) {
945 putnext(q, mp);
946 continue;
947 }
948 (void) putbq(q, mp);
949 return;
950 }
951 }
952
953 /* ARGSUSED */
954 static void
955 rpcmod_release(queue_t *q, mblk_t *bp, bool_t enable)
956 {
957 struct rpcm *rmp;
958
959 /*
960 * For now, just free the message.
961 */
962 if (bp)
963 freemsg(bp);
964 rmp = (struct rpcm *)q->q_ptr;
965
966 mutex_enter(&rmp->rm_lock);
967 rmp->rm_ref--;
968
969 if (rmp->rm_ref == 0 && (rmp->rm_state & RM_CLOSING)) {
970 cv_broadcast(&rmp->rm_cwait);
971 }
972
973 mutex_exit(&rmp->rm_lock);
974 }
975
976 /*
988 #define MIR_SVC_QUIESCED(mir) \
989 (mir->mir_ref_cnt == 0 && mir->mir_inrservice == 0)
990
991 #define MIR_CLEAR_INRSRV(mir_ptr) { \
992 (mir_ptr)->mir_inrservice = 0; \
993 if ((mir_ptr)->mir_type == RPC_SERVER && \
994 (mir_ptr)->mir_closing) \
995 cv_signal(&(mir_ptr)->mir_condvar); \
996 }
997
998 /*
999 * Don't block service procedure (and mir_close) if
1000 * we are in the process of closing.
1001 */
1002 #define MIR_WCANPUTNEXT(mir_ptr, write_q) \
1003 (canputnext(write_q) || ((mir_ptr)->mir_svc_no_more_msgs == 1))
1004
1005 static int mir_clnt_dup_request(queue_t *q, mblk_t *mp);
1006 static void mir_rput_proto(queue_t *q, mblk_t *mp);
1007 static int mir_svc_policy_notify(queue_t *q, int event);
1008 static void mir_svc_release(queue_t *wq, mblk_t *mp, bool_t);
1009 static void mir_svc_start(queue_t *wq);
1010 static void mir_svc_idle_start(queue_t *, mir_t *);
1011 static void mir_svc_idle_stop(queue_t *, mir_t *);
1012 static void mir_svc_start_close(queue_t *, mir_t *);
1013 static void mir_clnt_idle_do_stop(queue_t *);
1014 static void mir_clnt_idle_stop(queue_t *, mir_t *);
1015 static void mir_clnt_idle_start(queue_t *, mir_t *);
1016 static void mir_wput(queue_t *q, mblk_t *mp);
1017 static void mir_wput_other(queue_t *q, mblk_t *mp);
1018 static void mir_wsrv(queue_t *q);
1019 static void mir_disconnect(queue_t *, mir_t *ir);
1020 static int mir_check_len(queue_t *, mblk_t *);
1021 static void mir_timer(void *);
1022
1023 extern void (*mir_rele)(queue_t *, mblk_t *, bool_t);
1024 extern void (*mir_start)(queue_t *);
1025 extern void (*clnt_stop_idle)(queue_t *);
1026
1027 clock_t clnt_idle_timeout = MIR_CLNT_IDLE_TIMEOUT;
1028 clock_t svc_idle_timeout = MIR_SVC_IDLE_TIMEOUT;
1029
1030 /*
1031 * Timeout for subsequent notifications of idle connection. This is
1032 * typically used to clean up after a wedged orderly release.
1033 */
1034 clock_t svc_ordrel_timeout = MIR_SVC_ORDREL_TIMEOUT; /* milliseconds */
1035
1036 extern uint_t *clnt_max_msg_sizep;
1037 extern uint_t *svc_max_msg_sizep;
1038 uint_t clnt_max_msg_size = RPC_MAXDATASIZE;
1039 uint_t svc_max_msg_size = RPC_MAXDATASIZE;
1040 uint_t mir_krpc_cell_null;
1041
1042 static void
1043 mir_timer_stop(mir_t *mir)
1044 {
1045 timeout_id_t tid;
1046
1047 ASSERT(MUTEX_HELD(&mir->mir_mutex));
1048
1242 mir->mir_ordrel_pending ? "ordrel" : "normal");
1243 /*
1244 * Normal condition, start the idle timer. If an orderly
1245 * release has been sent, set the timeout to wait for the
1246 * client to close its side of the connection. Otherwise,
1247 * use the normal idle timeout.
1248 */
1249 mir_timer_start(q, mir, mir->mir_ordrel_pending ?
1250 svc_ordrel_timeout : mir->mir_idle_timeout);
1251 }
1252 }
1253
1254 /* ARGSUSED */
1255 static int
1256 mir_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
1257 {
1258 mir_t *mir;
1259
1260 RPCLOG(32, "rpcmod: mir_open of q 0x%p\n", (void *)q);
1261 /* Set variables used directly by kRPC. */
1262 if (!mir_rele)
1263 mir_rele = mir_svc_release;
1264 if (!mir_start)
1265 mir_start = mir_svc_start;
1266 if (!clnt_stop_idle)
1267 clnt_stop_idle = mir_clnt_idle_do_stop;
1268 if (!clnt_max_msg_sizep)
1269 clnt_max_msg_sizep = &clnt_max_msg_size;
1270 if (!svc_max_msg_sizep)
1271 svc_max_msg_sizep = &svc_max_msg_size;
1272
1273 /* Allocate a zero'ed out mir structure for this stream. */
1274 mir = kmem_zalloc(sizeof (mir_t), KM_SLEEP);
1275
1276 /*
1277 * We set hold inbound here so that incoming messages will
1278 * be held on the read-side queue until the stream is completely
1279 * initialized with a RPC_CLIENT or RPC_SERVER ioctl. During
1280 * the ioctl processing, the flag is cleared and any messages that
1281 * arrived between the open and the ioctl are delivered to kRPC.
1282 *
1283 * Early data should never arrive on a client stream since
2002 * Do not accept any more messages.
2003 */
2004 mir->mir_svc_no_more_msgs = 1;
2005
2006 /*
2007 * Next two statements will make the read service procedure
2008 * free everything stuck in the streams read queue.
2009 * It's not necessary because enabling the write queue will
2010 * have the same effect, but why not speed the process along?
2011 */
2012 mir->mir_hold_inbound = 0;
2013 qenable(RD(wq));
2014
2015 /*
2016 * Meanwhile force the write service procedure to send the
2017 * responses downstream, regardless of flow control.
2018 */
2019 qenable(wq);
2020 }
2021
2022 /*
2023 * This routine is called directly by kRPC after a request is completed,
2024 * whether a reply was sent or the request was dropped.
2025 */
2026 static void
2027 mir_svc_release(queue_t *wq, mblk_t *mp, bool_t enable)
2028 {
2029 mir_t *mir = (mir_t *)wq->q_ptr;
2030 mblk_t *cmp = NULL;
2031
2032 ASSERT((wq->q_flag & QREADR) == 0);
2033 if (mp)
2034 freemsg(mp);
2035
2036 if (enable)
2037 qenable(RD(wq));
2038
2039 mutex_enter(&mir->mir_mutex);
2040
2041 /*
2042 * Start idle processing if this is the last reference.
2043 */
2044 if ((mir->mir_ref_cnt == 1) && (mir->mir_inrservice == 0)) {
2045 cmp = mir->mir_svc_pend_mp;
2046 mir->mir_svc_pend_mp = NULL;
|
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25 /*
26 * Copyright 2012 Milan Jurik. All rights reserved.
27 * Copyright 2012 Marcel Telka <marcel@telka.sk>
28 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
29 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
30 */
31 /* Copyright (c) 1990 Mentat Inc. */
32
33 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
34 /* All Rights Reserved */
35
36 /*
37 * Kernel RPC filtering module
38 */
39
40 #include <sys/param.h>
41 #include <sys/types.h>
42 #include <sys/stream.h>
43 #include <sys/stropts.h>
44 #include <sys/strsubr.h>
45 #include <sys/tihdr.h>
46 #include <sys/timod.h>
47 #include <sys/tiuser.h>
48 #include <sys/debug.h>
49 #include <sys/signal.h>
555 }
556
557 void
558 rmm_wput(queue_t *q, mblk_t *mp)
559 {
560 (*((struct temp_slot *)q->q_ptr)->ops->xo_wput)(q, mp);
561 }
562
563 void
564 rmm_wsrv(queue_t *q)
565 {
566 (*((struct temp_slot *)q->q_ptr)->ops->xo_wsrv)(q);
567 }
568
569 int
570 rmm_close(queue_t *q, int flag, cred_t *crp)
571 {
572 return ((*((struct temp_slot *)q->q_ptr)->ops->xo_close)(q, flag, crp));
573 }
574
575 /*
576 * rpcmodopen - open routine gets called when the module gets pushed
577 * onto the stream.
578 */
579 /*ARGSUSED*/
580 int
581 rpcmodopen(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *crp)
582 {
583 struct rpcm *rmp;
584
585 TRACE_0(TR_FAC_KRPC, TR_RPCMODOPEN_START, "rpcmodopen_start:");
586
587 /*
588 * Only sufficiently privileged users can use this module, and it
589 * is assumed that they will use this module properly, and NOT send
590 * bulk data from downstream.
591 */
592 if (secpolicy_rpcmod_open(crp) != 0)
593 return (EPERM);
594
595 /*
596 * Allocate slot data structure.
597 */
598 rmp = kmem_zalloc(sizeof (*rmp), KM_SLEEP);
599
600 mutex_init(&rmp->rm_lock, NULL, MUTEX_DEFAULT, NULL);
601 cv_init(&rmp->rm_cwait, NULL, CV_DEFAULT, NULL);
602 rmp->rm_zoneid = rpc_zoneid();
603 /*
604 * slot type will be set by kRPC client and server ioctl's
605 */
606 rmp->rm_type = 0;
607
924 rmp = (struct rpcm *)q->q_ptr;
925 ASSERT(rmp != NULL);
926
927 /*
928 * Get messages that may be queued and send them down stream
929 */
930 while ((mp = getq(q)) != NULL) {
931 /*
932 * Optimize the service procedure for the server-side, by
933 * avoiding a call to canputnext().
934 */
935 if (rmp->rm_type == RPC_SERVER || canputnext(q)) {
936 putnext(q, mp);
937 continue;
938 }
939 (void) putbq(q, mp);
940 return;
941 }
942 }
943
944 void
945 rpcmod_hold(queue_t *q)
946 {
947 struct rpcm *rmp = (struct rpcm *)q->q_ptr;
948
949 mutex_enter(&rmp->rm_lock);
950 rmp->rm_ref++;
951 mutex_exit(&rmp->rm_lock);
952 }
953
954 void
955 rpcmod_release(queue_t *q, mblk_t *bp,
956 /* LINTED E_FUNC_ARG_UNUSED */
957 bool_t enable __unused)
958 {
959 struct rpcm *rmp;
960
961 /*
962 * For now, just free the message.
963 */
964 if (bp)
965 freemsg(bp);
966 rmp = (struct rpcm *)q->q_ptr;
967
968 mutex_enter(&rmp->rm_lock);
969 rmp->rm_ref--;
970
971 if (rmp->rm_ref == 0 && (rmp->rm_state & RM_CLOSING)) {
972 cv_broadcast(&rmp->rm_cwait);
973 }
974
975 mutex_exit(&rmp->rm_lock);
976 }
977
978 /*
990 #define MIR_SVC_QUIESCED(mir) \
991 (mir->mir_ref_cnt == 0 && mir->mir_inrservice == 0)
992
993 #define MIR_CLEAR_INRSRV(mir_ptr) { \
994 (mir_ptr)->mir_inrservice = 0; \
995 if ((mir_ptr)->mir_type == RPC_SERVER && \
996 (mir_ptr)->mir_closing) \
997 cv_signal(&(mir_ptr)->mir_condvar); \
998 }
999
1000 /*
1001 * Don't block service procedure (and mir_close) if
1002 * we are in the process of closing.
1003 */
1004 #define MIR_WCANPUTNEXT(mir_ptr, write_q) \
1005 (canputnext(write_q) || ((mir_ptr)->mir_svc_no_more_msgs == 1))
1006
1007 static int mir_clnt_dup_request(queue_t *q, mblk_t *mp);
1008 static void mir_rput_proto(queue_t *q, mblk_t *mp);
1009 static int mir_svc_policy_notify(queue_t *q, int event);
1010 static void mir_svc_start(queue_t *wq);
1011 static void mir_svc_idle_start(queue_t *, mir_t *);
1012 static void mir_svc_idle_stop(queue_t *, mir_t *);
1013 static void mir_svc_start_close(queue_t *, mir_t *);
1014 static void mir_clnt_idle_do_stop(queue_t *);
1015 static void mir_clnt_idle_stop(queue_t *, mir_t *);
1016 static void mir_clnt_idle_start(queue_t *, mir_t *);
1017 static void mir_wput(queue_t *q, mblk_t *mp);
1018 static void mir_wput_other(queue_t *q, mblk_t *mp);
1019 static void mir_wsrv(queue_t *q);
1020 static void mir_disconnect(queue_t *, mir_t *ir);
1021 static int mir_check_len(queue_t *, mblk_t *);
1022 static void mir_timer(void *);
1023
1024 extern void (*mir_start)(queue_t *);
1025 extern void (*clnt_stop_idle)(queue_t *);
1026
1027 volatile clock_t clnt_idle_timeout = MIR_CLNT_IDLE_TIMEOUT;
1028 volatile clock_t svc_idle_timeout = MIR_SVC_IDLE_TIMEOUT;
1029
1030 /*
1031 * Timeout for subsequent notifications of idle connection. This is
1032 * typically used to clean up after a wedged orderly release.
1033 */
1034 clock_t svc_ordrel_timeout = MIR_SVC_ORDREL_TIMEOUT; /* milliseconds */
1035
1036 extern uint_t *clnt_max_msg_sizep;
1037 extern uint_t *svc_max_msg_sizep;
1038 uint_t clnt_max_msg_size = RPC_MAXDATASIZE;
1039 uint_t svc_max_msg_size = RPC_MAXDATASIZE;
1040 uint_t mir_krpc_cell_null;
1041
1042 static void
1043 mir_timer_stop(mir_t *mir)
1044 {
1045 timeout_id_t tid;
1046
1047 ASSERT(MUTEX_HELD(&mir->mir_mutex));
1048
1242 mir->mir_ordrel_pending ? "ordrel" : "normal");
1243 /*
1244 * Normal condition, start the idle timer. If an orderly
1245 * release has been sent, set the timeout to wait for the
1246 * client to close its side of the connection. Otherwise,
1247 * use the normal idle timeout.
1248 */
1249 mir_timer_start(q, mir, mir->mir_ordrel_pending ?
1250 svc_ordrel_timeout : mir->mir_idle_timeout);
1251 }
1252 }
1253
1254 /* ARGSUSED */
1255 static int
1256 mir_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
1257 {
1258 mir_t *mir;
1259
1260 RPCLOG(32, "rpcmod: mir_open of q 0x%p\n", (void *)q);
1261 /* Set variables used directly by kRPC. */
1262 if (!mir_start)
1263 mir_start = mir_svc_start;
1264 if (!clnt_stop_idle)
1265 clnt_stop_idle = mir_clnt_idle_do_stop;
1266 if (!clnt_max_msg_sizep)
1267 clnt_max_msg_sizep = &clnt_max_msg_size;
1268 if (!svc_max_msg_sizep)
1269 svc_max_msg_sizep = &svc_max_msg_size;
1270
1271 /* Allocate a zero'ed out mir structure for this stream. */
1272 mir = kmem_zalloc(sizeof (mir_t), KM_SLEEP);
1273
1274 /*
1275 * We set hold inbound here so that incoming messages will
1276 * be held on the read-side queue until the stream is completely
1277 * initialized with a RPC_CLIENT or RPC_SERVER ioctl. During
1278 * the ioctl processing, the flag is cleared and any messages that
1279 * arrived between the open and the ioctl are delivered to kRPC.
1280 *
1281 * Early data should never arrive on a client stream since
2000 * Do not accept any more messages.
2001 */
2002 mir->mir_svc_no_more_msgs = 1;
2003
2004 /*
2005 * Next two statements will make the read service procedure
2006 * free everything stuck in the streams read queue.
2007 * It's not necessary because enabling the write queue will
2008 * have the same effect, but why not speed the process along?
2009 */
2010 mir->mir_hold_inbound = 0;
2011 qenable(RD(wq));
2012
2013 /*
2014 * Meanwhile force the write service procedure to send the
2015 * responses downstream, regardless of flow control.
2016 */
2017 qenable(wq);
2018 }
2019
2020 void
2021 mir_svc_hold(queue_t *wq)
2022 {
2023 mir_t *mir = (mir_t *)wq->q_ptr;
2024
2025 mutex_enter(&mir->mir_mutex);
2026 mir->mir_ref_cnt++;
2027 mutex_exit(&mir->mir_mutex);
2028 }
2029
2030 /*
2031 * This routine is called directly by kRPC after a request is completed,
2032 * whether a reply was sent or the request was dropped.
2033 */
2034 void
2035 mir_svc_release(queue_t *wq, mblk_t *mp, bool_t enable)
2036 {
2037 mir_t *mir = (mir_t *)wq->q_ptr;
2038 mblk_t *cmp = NULL;
2039
2040 ASSERT((wq->q_flag & QREADR) == 0);
2041 if (mp)
2042 freemsg(mp);
2043
2044 if (enable)
2045 qenable(RD(wq));
2046
2047 mutex_enter(&mir->mir_mutex);
2048
2049 /*
2050 * Start idle processing if this is the last reference.
2051 */
2052 if ((mir->mir_ref_cnt == 1) && (mir->mir_inrservice == 0)) {
2053 cmp = mir->mir_svc_pend_mp;
2054 mir->mir_svc_pend_mp = NULL;
|