1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
22 /* All Rights Reserved */
23
24
25 /*
26 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
27 * Copyright 2017 Joyent, Inc.
28 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
29 */
30
31 #include <sys/types.h>
32 #include <sys/sysmacros.h>
33 #include <sys/param.h>
34 #include <sys/errno.h>
35 #include <sys/signal.h>
36 #include <sys/stat.h>
37 #include <sys/proc.h>
38 #include <sys/cred.h>
39 #include <sys/user.h>
40 #include <sys/vnode.h>
41 #include <sys/file.h>
42 #include <sys/stream.h>
43 #include <sys/strsubr.h>
44 #include <sys/stropts.h>
45 #include <sys/tihdr.h>
46 #include <sys/var.h>
47 #include <sys/poll.h>
48 #include <sys/termio.h>
49 #include <sys/ttold.h>
50 #include <sys/systm.h>
51 #include <sys/uio.h>
52 #include <sys/cmn_err.h>
53 #include <sys/sad.h>
54 #include <sys/netstack.h>
55 #include <sys/priocntl.h>
56 #include <sys/jioctl.h>
57 #include <sys/procset.h>
58 #include <sys/session.h>
59 #include <sys/kmem.h>
60 #include <sys/filio.h>
61 #include <sys/vtrace.h>
62 #include <sys/debug.h>
63 #include <sys/strredir.h>
64 #include <sys/fs/fifonode.h>
65 #include <sys/fs/snode.h>
66 #include <sys/strlog.h>
67 #include <sys/strsun.h>
68 #include <sys/project.h>
69 #include <sys/kbio.h>
70 #include <sys/msio.h>
71 #include <sys/tty.h>
72 #include <sys/ptyvar.h>
73 #include <sys/vuid_event.h>
74 #include <sys/modctl.h>
75 #include <sys/sunddi.h>
76 #include <sys/sunldi_impl.h>
77 #include <sys/autoconf.h>
78 #include <sys/policy.h>
79 #include <sys/dld.h>
80 #include <sys/zone.h>
81 #include <c2/audit.h>
82
83 /*
84 * This define helps improve the readability of streams code while
85 * still maintaining a very old streams performance enhancement. The
86 * performance enhancement basically involved having all callers
87 * of straccess() perform the first check that straccess() will do
88 * locally before actually calling straccess(). (There by reducing
89 * the number of unnecessary calls to straccess().)
90 */
91 #define i_straccess(x, y) ((stp->sd_sidp == NULL) ? 0 : \
92 (stp->sd_vnode->v_type == VFIFO) ? 0 : \
93 straccess((x), (y)))
94
95 /*
96 * what is mblk_pull_len?
97 *
98 * If a streams message consists of many short messages,
99 * a performance degradation occurs from copyout overhead.
100 * To decrease the per mblk overhead, messages that are
101 * likely to consist of many small mblks are pulled up into
102 * one continuous chunk of memory.
103 *
104 * To avoid the processing overhead of examining every
105 * mblk, a quick heuristic is used. If the first mblk in
106 * the message is shorter than mblk_pull_len, it is likely
107 * that the rest of the mblk will be short.
108 *
109 * This heuristic was decided upon after performance tests
110 * indicated that anything more complex slowed down the main
111 * code path.
112 */
113 #define MBLK_PULL_LEN 64
114 uint32_t mblk_pull_len = MBLK_PULL_LEN;
115
116 /*
117 * The sgttyb_handling flag controls the handling of the old BSD
118 * TIOCGETP, TIOCSETP, and TIOCSETN ioctls as follows:
119 *
120 * 0 - Emit no warnings at all and retain old, broken behavior.
121 * 1 - Emit no warnings and silently handle new semantics.
122 * 2 - Send cmn_err(CE_NOTE) when either TIOCSETP or TIOCSETN is used
123 * (once per system invocation). Handle with new semantics.
124 * 3 - Send SIGSYS when any TIOCGETP, TIOCSETP, or TIOCSETN call is
125 * made (so that offenders drop core and are easy to debug).
126 *
127 * The "new semantics" are that TIOCGETP returns B38400 for
128 * sg_[io]speed if the corresponding value is over B38400, and that
129 * TIOCSET[PN] accept B38400 in these cases to mean "retain current
130 * bit rate."
131 */
132 int sgttyb_handling = 1;
133 static boolean_t sgttyb_complaint;
134
135 /* don't push drcompat module by default on Style-2 streams */
136 static int push_drcompat = 0;
137
138 /*
139 * id value used to distinguish between different ioctl messages
140 */
141 static uint32_t ioc_id;
142
143 static void putback(struct stdata *, queue_t *, mblk_t *, int);
144 static void strcleanall(struct vnode *);
145 static int strwsrv(queue_t *);
146 static int strdocmd(struct stdata *, struct strcmd *, cred_t *);
147
148 /*
149 * qinit and module_info structures for stream head read and write queues
150 */
151 struct module_info strm_info = { 0, "strrhead", 0, INFPSZ, STRHIGH, STRLOW };
152 struct module_info stwm_info = { 0, "strwhead", 0, 0, 0, 0 };
153 struct qinit strdata = { strrput, NULL, NULL, NULL, NULL, &strm_info };
154 struct qinit stwdata = { NULL, strwsrv, NULL, NULL, NULL, &stwm_info };
155 struct module_info fiform_info = { 0, "fifostrrhead", 0, PIPE_BUF, FIFOHIWAT,
156 FIFOLOWAT };
157 struct module_info fifowm_info = { 0, "fifostrwhead", 0, 0, 0, 0 };
158 struct qinit fifo_strdata = { strrput, NULL, NULL, NULL, NULL, &fiform_info };
159 struct qinit fifo_stwdata = { NULL, strwsrv, NULL, NULL, NULL, &fifowm_info };
160
161 extern kmutex_t strresources; /* protects global resources */
162 extern kmutex_t muxifier; /* single-threads multiplexor creation */
163
164 static boolean_t msghasdata(mblk_t *bp);
165 #define msgnodata(bp) (!msghasdata(bp))
166
167 /*
168 * Stream head locking notes:
169 * There are four monitors associated with the stream head:
170 * 1. v_stream monitor: in stropen() and strclose() v_lock
171 * is held while the association of vnode and stream
172 * head is established or tested for.
173 * 2. open/close/push/pop monitor: sd_lock is held while each
174 * thread bids for exclusive access to this monitor
175 * for opening or closing a stream. In addition, this
176 * monitor is entered during pushes and pops. This
177 * guarantees that during plumbing operations there
178 * is only one thread trying to change the plumbing.
179 * Any other threads present in the stream are only
180 * using the plumbing.
181 * 3. read/write monitor: in the case of read, a thread holds
182 * sd_lock while trying to get data from the stream
183 * head queue. if there is none to fulfill a read
184 * request, it sets RSLEEP and calls cv_wait_sig() down
185 * in strwaitq() to await the arrival of new data.
186 * when new data arrives in strrput(), sd_lock is acquired
187 * before testing for RSLEEP and calling cv_broadcast().
188 * the behavior of strwrite(), strwsrv(), and WSLEEP
189 * mirror this.
190 * 4. ioctl monitor: sd_lock is gotten to ensure that only one
191 * thread is doing an ioctl at a time.
192 */
193
194 static int
195 push_mod(queue_t *qp, dev_t *devp, struct stdata *stp, const char *name,
196 int anchor, cred_t *crp, uint_t anchor_zoneid)
197 {
198 int error;
199 fmodsw_impl_t *fp;
200
201 if (stp->sd_flag & (STRHUP|STRDERR|STWRERR)) {
202 error = (stp->sd_flag & STRHUP) ? ENXIO : EIO;
203 return (error);
204 }
205 if (stp->sd_pushcnt >= nstrpush) {
206 return (EINVAL);
207 }
208
209 if ((fp = fmodsw_find(name, FMODSW_HOLD | FMODSW_LOAD)) == NULL) {
210 stp->sd_flag |= STREOPENFAIL;
211 return (EINVAL);
212 }
213
214 /*
215 * push new module and call its open routine via qattach
216 */
217 if ((error = qattach(qp, devp, 0, crp, fp, B_FALSE)) != 0)
218 return (error);
219
220 /*
221 * Check to see if caller wants a STREAMS anchor
222 * put at this place in the stream, and add if so.
223 */
224 mutex_enter(&stp->sd_lock);
225 if (anchor == stp->sd_pushcnt) {
226 stp->sd_anchor = stp->sd_pushcnt;
227 stp->sd_anchorzone = anchor_zoneid;
228 }
229 mutex_exit(&stp->sd_lock);
230
231 return (0);
232 }
233
234 /*
235 * Open a stream device.
236 */
237 int
238 stropen(vnode_t *vp, dev_t *devp, int flag, cred_t *crp)
239 {
240 struct stdata *stp;
241 queue_t *qp;
242 int s;
243 dev_t dummydev, savedev;
244 struct autopush *ap;
245 struct dlautopush dlap;
246 int error = 0;
247 ssize_t rmin, rmax;
248 int cloneopen;
249 queue_t *brq;
250 major_t major;
251 str_stack_t *ss;
252 zoneid_t zoneid;
253 uint_t anchor;
254
255 /*
256 * If the stream already exists, wait for any open in progress
257 * to complete, then call the open function of each module and
258 * driver in the stream. Otherwise create the stream.
259 */
260 TRACE_1(TR_FAC_STREAMS_FR, TR_STROPEN, "stropen:%p", vp);
261 retry:
262 mutex_enter(&vp->v_lock);
263 if ((stp = vp->v_stream) != NULL) {
264
265 /*
266 * Waiting for stream to be created to device
267 * due to another open.
268 */
269 mutex_exit(&vp->v_lock);
270
271 if (STRMATED(stp)) {
272 struct stdata *strmatep = stp->sd_mate;
273
274 STRLOCKMATES(stp);
275 if (strmatep->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
276 if (flag & (FNDELAY|FNONBLOCK)) {
277 error = EAGAIN;
278 mutex_exit(&strmatep->sd_lock);
279 goto ckreturn;
280 }
281 mutex_exit(&stp->sd_lock);
282 if (!cv_wait_sig(&strmatep->sd_monitor,
283 &strmatep->sd_lock)) {
284 error = EINTR;
285 mutex_exit(&strmatep->sd_lock);
286 mutex_enter(&stp->sd_lock);
287 goto ckreturn;
288 }
289 mutex_exit(&strmatep->sd_lock);
290 goto retry;
291 }
292 if (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
293 if (flag & (FNDELAY|FNONBLOCK)) {
294 error = EAGAIN;
295 mutex_exit(&strmatep->sd_lock);
296 goto ckreturn;
297 }
298 mutex_exit(&strmatep->sd_lock);
299 if (!cv_wait_sig(&stp->sd_monitor,
300 &stp->sd_lock)) {
301 error = EINTR;
302 goto ckreturn;
303 }
304 mutex_exit(&stp->sd_lock);
305 goto retry;
306 }
307
308 if (stp->sd_flag & (STRDERR|STWRERR)) {
309 error = EIO;
310 mutex_exit(&strmatep->sd_lock);
311 goto ckreturn;
312 }
313
314 stp->sd_flag |= STWOPEN;
315 STRUNLOCKMATES(stp);
316 } else {
317 mutex_enter(&stp->sd_lock);
318 if (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
319 if (flag & (FNDELAY|FNONBLOCK)) {
320 error = EAGAIN;
321 goto ckreturn;
322 }
323 if (!cv_wait_sig(&stp->sd_monitor,
324 &stp->sd_lock)) {
325 error = EINTR;
326 goto ckreturn;
327 }
328 mutex_exit(&stp->sd_lock);
329 goto retry; /* could be clone! */
330 }
331
332 if (stp->sd_flag & (STRDERR|STWRERR)) {
333 error = EIO;
334 goto ckreturn;
335 }
336
337 stp->sd_flag |= STWOPEN;
338 mutex_exit(&stp->sd_lock);
339 }
340
341 /*
342 * Open all modules and devices down stream to notify
343 * that another user is streaming. For modules, set the
344 * last argument to MODOPEN and do not pass any open flags.
345 * Ignore dummydev since this is not the first open.
346 */
347 claimstr(stp->sd_wrq);
348 qp = stp->sd_wrq;
349 while (_SAMESTR(qp)) {
350 qp = qp->q_next;
351 if ((error = qreopen(_RD(qp), devp, flag, crp)) != 0)
352 break;
353 }
354 releasestr(stp->sd_wrq);
355 mutex_enter(&stp->sd_lock);
356 stp->sd_flag &= ~(STRHUP|STWOPEN|STRDERR|STWRERR);
357 stp->sd_rerror = 0;
358 stp->sd_werror = 0;
359 ckreturn:
360 cv_broadcast(&stp->sd_monitor);
361 mutex_exit(&stp->sd_lock);
362 return (error);
363 }
364
365 /*
366 * This vnode isn't streaming. SPECFS already
367 * checked for multiple vnodes pointing to the
368 * same stream, so create a stream to the driver.
369 */
370 qp = allocq();
371 stp = shalloc(qp);
372
373 /*
374 * Initialize stream head. shalloc() has given us
375 * exclusive access, and we have the vnode locked;
376 * we can do whatever we want with stp.
377 */
378 stp->sd_flag = STWOPEN;
379 stp->sd_siglist = NULL;
380 stp->sd_pollist.ph_list = NULL;
381 stp->sd_sigflags = 0;
382 stp->sd_mark = NULL;
383 stp->sd_closetime = STRTIMOUT;
384 stp->sd_sidp = NULL;
385 stp->sd_pgidp = NULL;
386 stp->sd_vnode = vp;
387 stp->sd_rerror = 0;
388 stp->sd_werror = 0;
389 stp->sd_wroff = 0;
390 stp->sd_tail = 0;
391 stp->sd_iocblk = NULL;
392 stp->sd_cmdblk = NULL;
393 stp->sd_pushcnt = 0;
394 stp->sd_qn_minpsz = 0;
395 stp->sd_qn_maxpsz = INFPSZ - 1; /* used to check for initialization */
396 stp->sd_maxblk = INFPSZ;
397 qp->q_ptr = _WR(qp)->q_ptr = stp;
398 STREAM(qp) = STREAM(_WR(qp)) = stp;
399 vp->v_stream = stp;
400 mutex_exit(&vp->v_lock);
401 if (vp->v_type == VFIFO) {
402 stp->sd_flag |= OLDNDELAY;
403 /*
404 * This means, both for pipes and fifos
405 * strwrite will send SIGPIPE if the other
406 * end is closed. For putmsg it depends
407 * on whether it is a XPG4_2 application
408 * or not
409 */
410 stp->sd_wput_opt = SW_SIGPIPE;
411
412 /* setq might sleep in kmem_alloc - avoid holding locks. */
413 setq(qp, &fifo_strdata, &fifo_stwdata, NULL, QMTSAFE,
414 SQ_CI|SQ_CO, B_FALSE);
415
416 set_qend(qp);
417 stp->sd_strtab = fifo_getinfo();
418 _WR(qp)->q_nfsrv = _WR(qp);
419 qp->q_nfsrv = qp;
420 /*
421 * Wake up others that are waiting for stream to be created.
422 */
423 mutex_enter(&stp->sd_lock);
424 /*
425 * nothing is be pushed on stream yet, so
426 * optimized stream head packetsizes are just that
427 * of the read queue
428 */
429 stp->sd_qn_minpsz = qp->q_minpsz;
430 stp->sd_qn_maxpsz = qp->q_maxpsz;
431 stp->sd_flag &= ~STWOPEN;
432 goto fifo_opendone;
433 }
434 /* setq might sleep in kmem_alloc - avoid holding locks. */
435 setq(qp, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, B_FALSE);
436
437 set_qend(qp);
438
439 /*
440 * Open driver and create stream to it (via qattach).
441 */
442 savedev = *devp;
443 cloneopen = (getmajor(*devp) == clone_major);
444 if ((error = qattach(qp, devp, flag, crp, NULL, B_FALSE)) != 0) {
445 mutex_enter(&vp->v_lock);
446 vp->v_stream = NULL;
447 mutex_exit(&vp->v_lock);
448 mutex_enter(&stp->sd_lock);
449 cv_broadcast(&stp->sd_monitor);
450 mutex_exit(&stp->sd_lock);
451 freeq(_RD(qp));
452 shfree(stp);
453 return (error);
454 }
455 /*
456 * Set sd_strtab after open in order to handle clonable drivers
457 */
458 stp->sd_strtab = STREAMSTAB(getmajor(*devp));
459
460 /*
461 * Historical note: dummydev used to be be prior to the initial
462 * open (via qattach above), which made the value seen
463 * inconsistent between an I_PUSH and an autopush of a module.
464 */
465 dummydev = *devp;
466
467 /*
468 * For clone open of old style (Q not associated) network driver,
469 * push DRMODNAME module to handle DL_ATTACH/DL_DETACH
470 */
471 brq = _RD(_WR(qp)->q_next);
472 major = getmajor(*devp);
473 if (push_drcompat && cloneopen && NETWORK_DRV(major) &&
474 ((brq->q_flag & _QASSOCIATED) == 0)) {
475 if (push_mod(qp, &dummydev, stp, DRMODNAME, 0, crp, 0) != 0)
476 cmn_err(CE_WARN, "cannot push " DRMODNAME
477 " streams module");
478 }
479
480 if (!NETWORK_DRV(major)) {
481 savedev = *devp;
482 } else {
483 /*
484 * For network devices, process differently based on the
485 * return value from dld_autopush():
486 *
487 * 0: the passed-in device points to a GLDv3 datalink with
488 * per-link autopush configuration; use that configuration
489 * and ignore any per-driver autopush configuration.
490 *
491 * 1: the passed-in device points to a physical GLDv3
492 * datalink without per-link autopush configuration. The
493 * passed in device was changed to refer to the actual
494 * physical device (if it's not already); we use that new
495 * device to look up any per-driver autopush configuration.
496 *
497 * -1: neither of the above cases applied; use the initial
498 * device to look up any per-driver autopush configuration.
499 */
500 switch (dld_autopush(&savedev, &dlap)) {
501 case 0:
502 zoneid = crgetzoneid(crp);
503 for (s = 0; s < dlap.dap_npush; s++) {
504 error = push_mod(qp, &dummydev, stp,
505 dlap.dap_aplist[s], dlap.dap_anchor, crp,
506 zoneid);
507 if (error != 0)
508 break;
509 }
510 goto opendone;
511 case 1:
512 break;
513 case -1:
514 savedev = *devp;
515 break;
516 }
517 }
518 /*
519 * Find the autopush configuration based on "savedev". Start with the
520 * global zone. If not found check in the local zone.
521 */
522 zoneid = GLOBAL_ZONEID;
523 retryap:
524 ss = netstack_find_by_stackid(zoneid_to_netstackid(zoneid))->
525 netstack_str;
526 if ((ap = sad_ap_find_by_dev(savedev, ss)) == NULL) {
527 netstack_rele(ss->ss_netstack);
528 if (zoneid == GLOBAL_ZONEID) {
529 /*
530 * None found. Also look in the zone's autopush table.
531 */
532 zoneid = crgetzoneid(crp);
533 if (zoneid != GLOBAL_ZONEID)
534 goto retryap;
535 }
536 goto opendone;
537 }
538 anchor = ap->ap_anchor;
539 zoneid = crgetzoneid(crp);
540 for (s = 0; s < ap->ap_npush; s++) {
541 error = push_mod(qp, &dummydev, stp, ap->ap_list[s],
542 anchor, crp, zoneid);
543 if (error != 0)
544 break;
545 }
546 sad_ap_rele(ap, ss);
547 netstack_rele(ss->ss_netstack);
548
549 opendone:
550
551 /*
552 * let specfs know that open failed part way through
553 */
554 if (error) {
555 mutex_enter(&stp->sd_lock);
556 stp->sd_flag |= STREOPENFAIL;
557 mutex_exit(&stp->sd_lock);
558 }
559
560 /*
561 * Wake up others that are waiting for stream to be created.
562 */
563 mutex_enter(&stp->sd_lock);
564 stp->sd_flag &= ~STWOPEN;
565
566 /*
567 * As a performance concern we are caching the values of
568 * q_minpsz and q_maxpsz of the module below the stream
569 * head in the stream head.
570 */
571 mutex_enter(QLOCK(stp->sd_wrq->q_next));
572 rmin = stp->sd_wrq->q_next->q_minpsz;
573 rmax = stp->sd_wrq->q_next->q_maxpsz;
574 mutex_exit(QLOCK(stp->sd_wrq->q_next));
575
576 /* do this processing here as a performance concern */
577 if (strmsgsz != 0) {
578 if (rmax == INFPSZ)
579 rmax = strmsgsz;
580 else
581 rmax = MIN(strmsgsz, rmax);
582 }
583
584 mutex_enter(QLOCK(stp->sd_wrq));
585 stp->sd_qn_minpsz = rmin;
586 stp->sd_qn_maxpsz = rmax;
587 mutex_exit(QLOCK(stp->sd_wrq));
588
589 fifo_opendone:
590 cv_broadcast(&stp->sd_monitor);
591 mutex_exit(&stp->sd_lock);
592 return (error);
593 }
594
595 static int strsink(queue_t *, mblk_t *);
596 static struct qinit deadrend = {
597 strsink, NULL, NULL, NULL, NULL, &strm_info, NULL
598 };
599 static struct qinit deadwend = {
600 NULL, NULL, NULL, NULL, NULL, &stwm_info, NULL
601 };
602
603 /*
604 * Close a stream.
605 * This is called from closef() on the last close of an open stream.
606 * Strclean() will already have removed the siglist and pollist
607 * information, so all that remains is to remove all multiplexor links
608 * for the stream, pop all the modules (and the driver), and free the
609 * stream structure.
610 */
611
612 int
613 strclose(struct vnode *vp, int flag, cred_t *crp)
614 {
615 struct stdata *stp;
616 queue_t *qp;
617 int rval;
618 int freestp = 1;
619 queue_t *rmq;
620
621 TRACE_1(TR_FAC_STREAMS_FR,
622 TR_STRCLOSE, "strclose:%p", vp);
623 ASSERT(vp->v_stream);
624
625 stp = vp->v_stream;
626 ASSERT(!(stp->sd_flag & STPLEX));
627 qp = stp->sd_wrq;
628
629 /*
630 * Needed so that strpoll will return non-zero for this fd.
631 * Note that with POLLNOERR STRHUP does still cause POLLHUP.
632 */
633 mutex_enter(&stp->sd_lock);
634 stp->sd_flag |= STRHUP;
635 mutex_exit(&stp->sd_lock);
636
637 /*
638 * If the registered process or process group did not have an
639 * open instance of this stream then strclean would not be
640 * called. Thus at the time of closing all remaining siglist entries
641 * are removed.
642 */
643 if (stp->sd_siglist != NULL)
644 strcleanall(vp);
645
646 ASSERT(stp->sd_siglist == NULL);
647 ASSERT(stp->sd_sigflags == 0);
648
649 if (STRMATED(stp)) {
650 struct stdata *strmatep = stp->sd_mate;
651 int waited = 1;
652
653 STRLOCKMATES(stp);
654 while (waited) {
655 waited = 0;
656 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
657 mutex_exit(&strmatep->sd_lock);
658 cv_wait(&stp->sd_monitor, &stp->sd_lock);
659 mutex_exit(&stp->sd_lock);
660 STRLOCKMATES(stp);
661 waited = 1;
662 }
663 while (strmatep->sd_flag &
664 (STWOPEN|STRCLOSE|STRPLUMB)) {
665 mutex_exit(&stp->sd_lock);
666 cv_wait(&strmatep->sd_monitor,
667 &strmatep->sd_lock);
668 mutex_exit(&strmatep->sd_lock);
669 STRLOCKMATES(stp);
670 waited = 1;
671 }
672 }
673 stp->sd_flag |= STRCLOSE;
674 STRUNLOCKMATES(stp);
675 } else {
676 mutex_enter(&stp->sd_lock);
677 stp->sd_flag |= STRCLOSE;
678 mutex_exit(&stp->sd_lock);
679 }
680
681 ASSERT(qp->q_first == NULL); /* No more delayed write */
682
683 /* Check if an I_LINK was ever done on this stream */
684 if (stp->sd_flag & STRHASLINKS) {
685 netstack_t *ns;
686 str_stack_t *ss;
687
688 ns = netstack_find_by_cred(crp);
689 ASSERT(ns != NULL);
690 ss = ns->netstack_str;
691 ASSERT(ss != NULL);
692
693 (void) munlinkall(stp, LINKCLOSE|LINKNORMAL, crp, &rval, ss);
694 netstack_rele(ss->ss_netstack);
695 }
696
697 while (_SAMESTR(qp)) {
698 /*
699 * Holding sd_lock prevents q_next from changing in
700 * this stream.
701 */
702 mutex_enter(&stp->sd_lock);
703 if (!(flag & (FNDELAY|FNONBLOCK)) && (stp->sd_closetime > 0)) {
704
705 /*
706 * sleep until awakened by strwsrv() or timeout
707 */
708 for (;;) {
709 mutex_enter(QLOCK(qp->q_next));
710 if (!(qp->q_next->q_mblkcnt)) {
711 mutex_exit(QLOCK(qp->q_next));
712 break;
713 }
714 stp->sd_flag |= WSLEEP;
715
716 /* ensure strwsrv gets enabled */
717 qp->q_next->q_flag |= QWANTW;
718 mutex_exit(QLOCK(qp->q_next));
719 /* get out if we timed out or recv'd a signal */
720 if (str_cv_wait(&qp->q_wait, &stp->sd_lock,
721 stp->sd_closetime, 0) <= 0) {
722 break;
723 }
724 }
725 stp->sd_flag &= ~WSLEEP;
726 }
727 mutex_exit(&stp->sd_lock);
728
729 rmq = qp->q_next;
730 if (rmq->q_flag & QISDRV) {
731 ASSERT(!_SAMESTR(rmq));
732 wait_sq_svc(_RD(qp)->q_syncq);
733 }
734
735 qdetach(_RD(rmq), 1, flag, crp, B_FALSE);
736 }
737
738 /*
739 * Since we call pollwakeup in close() now, the poll list should
740 * be empty in most cases. The only exception is the layered devices
741 * (e.g. the console drivers with redirection modules pushed on top
742 * of it). We have to do this after calling qdetach() because
743 * the redirection module won't have torn down the console
744 * redirection until after qdetach() has been invoked.
745 */
746 if (stp->sd_pollist.ph_list != NULL) {
747 pollwakeup(&stp->sd_pollist, POLLERR);
748 pollhead_clean(&stp->sd_pollist);
749 }
750 ASSERT(stp->sd_pollist.ph_list == NULL);
751 ASSERT(stp->sd_sidp == NULL);
752 ASSERT(stp->sd_pgidp == NULL);
753
754 /* Prevent qenable from re-enabling the stream head queue */
755 disable_svc(_RD(qp));
756
757 /*
758 * Wait until service procedure of each queue is
759 * run, if QINSERVICE is set.
760 */
761 wait_svc(_RD(qp));
762
763 /*
764 * Now, flush both queues.
765 */
766 flushq(_RD(qp), FLUSHALL);
767 flushq(qp, FLUSHALL);
768
769 /*
770 * If the write queue of the stream head is pointing to a
771 * read queue, we have a twisted stream. If the read queue
772 * is alive, convert the stream head queues into a dead end.
773 * If the read queue is dead, free the dead pair.
774 */
775 if (qp->q_next && !_SAMESTR(qp)) {
776 if (qp->q_next->q_qinfo == &deadrend) { /* half-closed pipe */
777 flushq(qp->q_next, FLUSHALL); /* ensure no message */
778 shfree(qp->q_next->q_stream);
779 freeq(qp->q_next);
780 freeq(_RD(qp));
781 } else if (qp->q_next == _RD(qp)) { /* fifo */
782 freeq(_RD(qp));
783 } else { /* pipe */
784 freestp = 0;
785 /*
786 * The q_info pointers are never accessed when
787 * SQLOCK is held.
788 */
789 ASSERT(qp->q_syncq == _RD(qp)->q_syncq);
790 mutex_enter(SQLOCK(qp->q_syncq));
791 qp->q_qinfo = &deadwend;
792 _RD(qp)->q_qinfo = &deadrend;
793 mutex_exit(SQLOCK(qp->q_syncq));
794 }
795 } else {
796 freeq(_RD(qp)); /* free stream head queue pair */
797 }
798
799 mutex_enter(&vp->v_lock);
800 if (stp->sd_iocblk) {
801 if (stp->sd_iocblk != (mblk_t *)-1) {
802 freemsg(stp->sd_iocblk);
803 }
804 stp->sd_iocblk = NULL;
805 }
806 stp->sd_vnode = NULL;
807 vp->v_stream = NULL;
808 mutex_exit(&vp->v_lock);
809 mutex_enter(&stp->sd_lock);
810 freemsg(stp->sd_cmdblk);
811 stp->sd_cmdblk = NULL;
812 stp->sd_flag &= ~STRCLOSE;
813 cv_broadcast(&stp->sd_monitor);
814 mutex_exit(&stp->sd_lock);
815
816 if (freestp)
817 shfree(stp);
818 return (0);
819 }
820
821 static int
822 strsink(queue_t *q, mblk_t *bp)
823 {
824 struct copyresp *resp;
825
826 switch (bp->b_datap->db_type) {
827 case M_FLUSH:
828 if ((*bp->b_rptr & FLUSHW) && !(bp->b_flag & MSGNOLOOP)) {
829 *bp->b_rptr &= ~FLUSHR;
830 bp->b_flag |= MSGNOLOOP;
831 /*
832 * Protect against the driver passing up
833 * messages after it has done a qprocsoff.
834 */
835 if (_OTHERQ(q)->q_next == NULL)
836 freemsg(bp);
837 else
838 qreply(q, bp);
839 } else {
840 freemsg(bp);
841 }
842 break;
843
844 case M_COPYIN:
845 case M_COPYOUT:
846 if (bp->b_cont) {
847 freemsg(bp->b_cont);
848 bp->b_cont = NULL;
849 }
850 bp->b_datap->db_type = M_IOCDATA;
851 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp);
852 resp = (struct copyresp *)bp->b_rptr;
853 resp->cp_rval = (caddr_t)1; /* failure */
854 /*
855 * Protect against the driver passing up
856 * messages after it has done a qprocsoff.
857 */
858 if (_OTHERQ(q)->q_next == NULL)
859 freemsg(bp);
860 else
861 qreply(q, bp);
862 break;
863
864 case M_IOCTL:
865 if (bp->b_cont) {
866 freemsg(bp->b_cont);
867 bp->b_cont = NULL;
868 }
869 bp->b_datap->db_type = M_IOCNAK;
870 /*
871 * Protect against the driver passing up
872 * messages after it has done a qprocsoff.
873 */
874 if (_OTHERQ(q)->q_next == NULL)
875 freemsg(bp);
876 else
877 qreply(q, bp);
878 break;
879
880 default:
881 freemsg(bp);
882 break;
883 }
884
885 return (0);
886 }
887
888 /*
889 * Clean up after a process when it closes a stream. This is called
890 * from closef for all closes, whereas strclose is called only for the
891 * last close on a stream. The siglist is scanned for entries for the
892 * current process, and these are removed.
893 */
894 void
895 strclean(struct vnode *vp)
896 {
897 strsig_t *ssp, *pssp, *tssp;
898 stdata_t *stp;
899 int update = 0;
900
901 TRACE_1(TR_FAC_STREAMS_FR,
902 TR_STRCLEAN, "strclean:%p", vp);
903 stp = vp->v_stream;
904 pssp = NULL;
905 mutex_enter(&stp->sd_lock);
906 ssp = stp->sd_siglist;
907 while (ssp) {
908 if (ssp->ss_pidp == curproc->p_pidp) {
909 tssp = ssp->ss_next;
910 if (pssp)
911 pssp->ss_next = tssp;
912 else
913 stp->sd_siglist = tssp;
914 mutex_enter(&pidlock);
915 PID_RELE(ssp->ss_pidp);
916 mutex_exit(&pidlock);
917 kmem_free(ssp, sizeof (strsig_t));
918 update = 1;
919 ssp = tssp;
920 } else {
921 pssp = ssp;
922 ssp = ssp->ss_next;
923 }
924 }
925 if (update) {
926 stp->sd_sigflags = 0;
927 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next)
928 stp->sd_sigflags |= ssp->ss_events;
929 }
930 mutex_exit(&stp->sd_lock);
931 }
932
933 /*
934 * Used on the last close to remove any remaining items on the siglist.
935 * These could be present on the siglist due to I_ESETSIG calls that
936 * use process groups or processed that do not have an open file descriptor
937 * for this stream (Such entries would not be removed by strclean).
938 */
939 static void
940 strcleanall(struct vnode *vp)
941 {
942 strsig_t *ssp, *nssp;
943 stdata_t *stp;
944
945 stp = vp->v_stream;
946 mutex_enter(&stp->sd_lock);
947 ssp = stp->sd_siglist;
948 stp->sd_siglist = NULL;
949 while (ssp) {
950 nssp = ssp->ss_next;
951 mutex_enter(&pidlock);
952 PID_RELE(ssp->ss_pidp);
953 mutex_exit(&pidlock);
954 kmem_free(ssp, sizeof (strsig_t));
955 ssp = nssp;
956 }
957 stp->sd_sigflags = 0;
958 mutex_exit(&stp->sd_lock);
959 }
960
961 /*
962 * Retrieve the next message from the logical stream head read queue
963 * using either rwnext (if sync stream) or getq_noenab.
964 * It is the callers responsibility to call qbackenable after
965 * it is finished with the message. The caller should not call
966 * qbackenable until after any putback calls to avoid spurious backenabling.
967 */
968 mblk_t *
969 strget(struct stdata *stp, queue_t *q, struct uio *uiop, int first,
970 int *errorp)
971 {
972 mblk_t *bp;
973 int error;
974 ssize_t rbytes = 0;
975
976 /* Holding sd_lock prevents the read queue from changing */
977 ASSERT(MUTEX_HELD(&stp->sd_lock));
978
979 if (uiop != NULL && stp->sd_struiordq != NULL &&
980 q->q_first == NULL &&
981 (!first || (stp->sd_wakeq & RSLEEP))) {
982 /*
983 * Stream supports rwnext() for the read side.
984 * If this is the first time we're called by e.g. strread
985 * only do the downcall if there is a deferred wakeup
986 * (registered in sd_wakeq).
987 */
988 struiod_t uiod;
989
990 if (first)
991 stp->sd_wakeq &= ~RSLEEP;
992
993 (void) uiodup(uiop, &uiod.d_uio, uiod.d_iov,
994 sizeof (uiod.d_iov) / sizeof (*uiod.d_iov));
995 uiod.d_mp = 0;
996 /*
997 * Mark that a thread is in rwnext on the read side
998 * to prevent strrput from nacking ioctls immediately.
999 * When the last concurrent rwnext returns
1000 * the ioctls are nack'ed.
1001 */
1002 ASSERT(MUTEX_HELD(&stp->sd_lock));
1003 stp->sd_struiodnak++;
1004 /*
1005 * Note: rwnext will drop sd_lock.
1006 */
1007 error = rwnext(q, &uiod);
1008 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock));
1009 mutex_enter(&stp->sd_lock);
1010 stp->sd_struiodnak--;
1011 while (stp->sd_struiodnak == 0 &&
1012 ((bp = stp->sd_struionak) != NULL)) {
1013 stp->sd_struionak = bp->b_next;
1014 bp->b_next = NULL;
1015 bp->b_datap->db_type = M_IOCNAK;
1016 /*
1017 * Protect against the driver passing up
1018 * messages after it has done a qprocsoff.
1019 */
1020 if (_OTHERQ(q)->q_next == NULL)
1021 freemsg(bp);
1022 else {
1023 mutex_exit(&stp->sd_lock);
1024 qreply(q, bp);
1025 mutex_enter(&stp->sd_lock);
1026 }
1027 }
1028 ASSERT(MUTEX_HELD(&stp->sd_lock));
1029 if (error == 0 || error == EWOULDBLOCK) {
1030 if ((bp = uiod.d_mp) != NULL) {
1031 *errorp = 0;
1032 ASSERT(MUTEX_HELD(&stp->sd_lock));
1033 return (bp);
1034 }
1035 error = 0;
1036 } else if (error == EINVAL) {
1037 /*
1038 * The stream plumbing must have
1039 * changed while we were away, so
1040 * just turn off rwnext()s.
1041 */
1042 error = 0;
1043 } else if (error == EBUSY) {
1044 /*
1045 * The module might have data in transit using putnext
1046 * Fall back on waiting + getq.
1047 */
1048 error = 0;
1049 } else {
1050 *errorp = error;
1051 ASSERT(MUTEX_HELD(&stp->sd_lock));
1052 return (NULL);
1053 }
1054 /*
1055 * Try a getq in case a rwnext() generated mblk
1056 * has bubbled up via strrput().
1057 */
1058 }
1059 *errorp = 0;
1060 ASSERT(MUTEX_HELD(&stp->sd_lock));
1061
1062 /*
1063 * If we have a valid uio, try and use this as a guide for how
1064 * many bytes to retrieve from the queue via getq_noenab().
1065 * Doing this can avoid unneccesary counting of overlong
1066 * messages in putback(). We currently only do this for sockets
1067 * and only if there is no sd_rputdatafunc hook.
1068 *
1069 * The sd_rputdatafunc hook transforms the entire message
1070 * before any bytes in it can be given to a client. So, rbytes
1071 * must be 0 if there is a hook.
1072 */
1073 if ((uiop != NULL) && (stp->sd_vnode->v_type == VSOCK) &&
1074 (stp->sd_rputdatafunc == NULL))
1075 rbytes = uiop->uio_resid;
1076
1077 return (getq_noenab(q, rbytes));
1078 }
1079
1080 /*
1081 * Copy out the message pointed to by `bp' into the uio pointed to by `uiop'.
1082 * If the message does not fit in the uio the remainder of it is returned;
1083 * otherwise NULL is returned. Any embedded zero-length mblk_t's are
1084 * consumed, even if uio_resid reaches zero. On error, `*errorp' is set to
1085 * the error code, the message is consumed, and NULL is returned.
1086 */
1087 static mblk_t *
1088 struiocopyout(mblk_t *bp, struct uio *uiop, int *errorp)
1089 {
1090 int error;
1091 ptrdiff_t n;
1092 mblk_t *nbp;
1093
1094 ASSERT(bp->b_wptr >= bp->b_rptr);
1095
1096 do {
1097 if ((n = MIN(uiop->uio_resid, MBLKL(bp))) != 0) {
1098 ASSERT(n > 0);
1099
1100 error = uiomove(bp->b_rptr, n, UIO_READ, uiop);
1101 if (error != 0) {
1102 freemsg(bp);
1103 *errorp = error;
1104 return (NULL);
1105 }
1106 }
1107
1108 bp->b_rptr += n;
1109 while (bp != NULL && (bp->b_rptr >= bp->b_wptr)) {
1110 nbp = bp;
1111 bp = bp->b_cont;
1112 freeb(nbp);
1113 }
1114 } while (bp != NULL && uiop->uio_resid > 0);
1115
1116 *errorp = 0;
1117 return (bp);
1118 }
1119
1120 /*
1121 * Read a stream according to the mode flags in sd_flag:
1122 *
1123 * (default mode) - Byte stream, msg boundaries are ignored
1124 * RD_MSGDIS (msg discard) - Read on msg boundaries and throw away
1125 * any data remaining in msg
1126 * RD_MSGNODIS (msg non-discard) - Read on msg boundaries and put back
1127 * any remaining data on head of read queue
1128 *
1129 * Consume readable messages on the front of the queue until
1130 * ttolwp(curthread)->lwp_count
1131 * is satisfied, the readable messages are exhausted, or a message
1132 * boundary is reached in a message mode. If no data was read and
1133 * the stream was not opened with the NDELAY flag, block until data arrives.
1134 * Otherwise return the data read and update the count.
1135 *
1136 * In default mode a 0 length message signifies end-of-file and terminates
1137 * a read in progress. The 0 length message is removed from the queue
1138 * only if it is the only message read (no data is read).
1139 *
1140 * An attempt to read an M_PROTO or M_PCPROTO message results in an
1141 * EBADMSG error return, unless either RD_PROTDAT or RD_PROTDIS are set.
1142 * If RD_PROTDAT is set, M_PROTO and M_PCPROTO messages are read as data.
1143 * If RD_PROTDIS is set, the M_PROTO and M_PCPROTO parts of the message
1144 * are unlinked from and M_DATA blocks in the message, the protos are
1145 * thrown away, and the data is read.
1146 */
1147 /* ARGSUSED */
1148 int
1149 strread(struct vnode *vp, struct uio *uiop, cred_t *crp)
1150 {
1151 struct stdata *stp;
1152 mblk_t *bp, *nbp;
1153 queue_t *q;
1154 int error = 0;
1155 uint_t old_sd_flag;
1156 int first;
1157 char rflg;
1158 uint_t mark; /* Contains MSG*MARK and _LASTMARK */
1159 #define _LASTMARK 0x8000 /* Distinct from MSG*MARK */
1160 short delim;
1161 unsigned char pri = 0;
1162 char waitflag;
1163 unsigned char type;
1164
1165 TRACE_1(TR_FAC_STREAMS_FR,
1166 TR_STRREAD_ENTER, "strread:%p", vp);
1167 ASSERT(vp->v_stream);
1168 stp = vp->v_stream;
1169
1170 mutex_enter(&stp->sd_lock);
1171
1172 if ((error = i_straccess(stp, JCREAD)) != 0) {
1173 mutex_exit(&stp->sd_lock);
1174 return (error);
1175 }
1176
1177 if (stp->sd_flag & (STRDERR|STPLEX)) {
1178 error = strgeterr(stp, STRDERR|STPLEX, 0);
1179 if (error != 0) {
1180 mutex_exit(&stp->sd_lock);
1181 return (error);
1182 }
1183 }
1184
1185 /*
1186 * Loop terminates when uiop->uio_resid == 0.
1187 */
1188 rflg = 0;
1189 waitflag = READWAIT;
1190 q = _RD(stp->sd_wrq);
1191 for (;;) {
1192 ASSERT(MUTEX_HELD(&stp->sd_lock));
1193 old_sd_flag = stp->sd_flag;
1194 mark = 0;
1195 delim = 0;
1196 first = 1;
1197 while ((bp = strget(stp, q, uiop, first, &error)) == NULL) {
1198 int done = 0;
1199
1200 ASSERT(MUTEX_HELD(&stp->sd_lock));
1201
1202 if (error != 0)
1203 goto oops;
1204
1205 if (stp->sd_flag & (STRHUP|STREOF)) {
1206 goto oops;
1207 }
1208 if (rflg && !(stp->sd_flag & STRDELIM)) {
1209 goto oops;
1210 }
1211 /*
1212 * If a read(fd,buf,0) has been done, there is no
1213 * need to sleep. We always have zero bytes to
1214 * return.
1215 */
1216 if (uiop->uio_resid == 0) {
1217 goto oops;
1218 }
1219
1220 qbackenable(q, 0);
1221
1222 TRACE_3(TR_FAC_STREAMS_FR, TR_STRREAD_WAIT,
1223 "strread calls strwaitq:%p, %p, %p",
1224 vp, uiop, crp);
1225 if ((error = strwaitq(stp, waitflag, uiop->uio_resid,
1226 uiop->uio_fmode, -1, &done)) != 0 || done) {
1227 TRACE_3(TR_FAC_STREAMS_FR, TR_STRREAD_DONE,
1228 "strread error or done:%p, %p, %p",
1229 vp, uiop, crp);
1230 if ((uiop->uio_fmode & FNDELAY) &&
1231 (stp->sd_flag & OLDNDELAY) &&
1232 (error == EAGAIN))
1233 error = 0;
1234 goto oops;
1235 }
1236 TRACE_3(TR_FAC_STREAMS_FR, TR_STRREAD_AWAKE,
1237 "strread awakes:%p, %p, %p", vp, uiop, crp);
1238 if ((error = i_straccess(stp, JCREAD)) != 0) {
1239 goto oops;
1240 }
1241 first = 0;
1242 }
1243
1244 ASSERT(MUTEX_HELD(&stp->sd_lock));
1245 ASSERT(bp);
1246 pri = bp->b_band;
1247 /*
1248 * Extract any mark information. If the message is not
1249 * completely consumed this information will be put in the mblk
1250 * that is putback.
1251 * If MSGMARKNEXT is set and the message is completely consumed
1252 * the STRATMARK flag will be set below. Likewise, if
1253 * MSGNOTMARKNEXT is set and the message is
1254 * completely consumed STRNOTATMARK will be set.
1255 *
1256 * For some unknown reason strread only breaks the read at the
1257 * last mark.
1258 */
1259 mark = bp->b_flag & (MSGMARK | MSGMARKNEXT | MSGNOTMARKNEXT);
1260 ASSERT((mark & (MSGMARKNEXT|MSGNOTMARKNEXT)) !=
1261 (MSGMARKNEXT|MSGNOTMARKNEXT));
1262 if (mark != 0 && bp == stp->sd_mark) {
1263 if (rflg) {
1264 putback(stp, q, bp, pri);
1265 goto oops;
1266 }
1267 mark |= _LASTMARK;
1268 stp->sd_mark = NULL;
1269 }
1270 if ((stp->sd_flag & STRDELIM) && (bp->b_flag & MSGDELIM))
1271 delim = 1;
1272 mutex_exit(&stp->sd_lock);
1273
1274 if (STREAM_NEEDSERVICE(stp))
1275 stream_runservice(stp);
1276
1277 type = bp->b_datap->db_type;
1278
1279 switch (type) {
1280
1281 case M_DATA:
1282 ismdata:
1283 if (msgnodata(bp)) {
1284 if (mark || delim) {
1285 freemsg(bp);
1286 } else if (rflg) {
1287
1288 /*
1289 * If already read data put zero
1290 * length message back on queue else
1291 * free msg and return 0.
1292 */
1293 bp->b_band = pri;
1294 mutex_enter(&stp->sd_lock);
1295 putback(stp, q, bp, pri);
1296 mutex_exit(&stp->sd_lock);
1297 } else {
1298 freemsg(bp);
1299 }
1300 error = 0;
1301 goto oops1;
1302 }
1303
1304 rflg = 1;
1305 waitflag |= NOINTR;
1306 bp = struiocopyout(bp, uiop, &error);
1307 if (error != 0)
1308 goto oops1;
1309
1310 mutex_enter(&stp->sd_lock);
1311 if (bp) {
1312 /*
1313 * Have remaining data in message.
1314 * Free msg if in discard mode.
1315 */
1316 if (stp->sd_read_opt & RD_MSGDIS) {
1317 freemsg(bp);
1318 } else {
1319 bp->b_band = pri;
1320 if ((mark & _LASTMARK) &&
1321 (stp->sd_mark == NULL))
1322 stp->sd_mark = bp;
1323 bp->b_flag |= mark & ~_LASTMARK;
1324 if (delim)
1325 bp->b_flag |= MSGDELIM;
1326 if (msgnodata(bp))
1327 freemsg(bp);
1328 else
1329 putback(stp, q, bp, pri);
1330 }
1331 } else {
1332 /*
1333 * Consumed the complete message.
1334 * Move the MSG*MARKNEXT information
1335 * to the stream head just in case
1336 * the read queue becomes empty.
1337 *
1338 * If the stream head was at the mark
1339 * (STRATMARK) before we dropped sd_lock above
1340 * and some data was consumed then we have
1341 * moved past the mark thus STRATMARK is
1342 * cleared. However, if a message arrived in
1343 * strrput during the copyout above causing
1344 * STRATMARK to be set we can not clear that
1345 * flag.
1346 */
1347 if (mark &
1348 (MSGMARKNEXT|MSGNOTMARKNEXT|MSGMARK)) {
1349 if (mark & MSGMARKNEXT) {
1350 stp->sd_flag &= ~STRNOTATMARK;
1351 stp->sd_flag |= STRATMARK;
1352 } else if (mark & MSGNOTMARKNEXT) {
1353 stp->sd_flag &= ~STRATMARK;
1354 stp->sd_flag |= STRNOTATMARK;
1355 } else {
1356 stp->sd_flag &=
1357 ~(STRATMARK|STRNOTATMARK);
1358 }
1359 } else if (rflg && (old_sd_flag & STRATMARK)) {
1360 stp->sd_flag &= ~STRATMARK;
1361 }
1362 }
1363
1364 /*
1365 * Check for signal messages at the front of the read
1366 * queue and generate the signal(s) if appropriate.
1367 * The only signal that can be on queue is M_SIG at
1368 * this point.
1369 */
1370 while ((((bp = q->q_first)) != NULL) &&
1371 (bp->b_datap->db_type == M_SIG)) {
1372 bp = getq_noenab(q, 0);
1373 /*
1374 * sd_lock is held so the content of the
1375 * read queue can not change.
1376 */
1377 ASSERT(bp != NULL && DB_TYPE(bp) == M_SIG);
1378 strsignal_nolock(stp, *bp->b_rptr, bp->b_band);
1379 mutex_exit(&stp->sd_lock);
1380 freemsg(bp);
1381 if (STREAM_NEEDSERVICE(stp))
1382 stream_runservice(stp);
1383 mutex_enter(&stp->sd_lock);
1384 }
1385
1386 if ((uiop->uio_resid == 0) || (mark & _LASTMARK) ||
1387 delim ||
1388 (stp->sd_read_opt & (RD_MSGDIS|RD_MSGNODIS))) {
1389 goto oops;
1390 }
1391 continue;
1392
1393 case M_SIG:
1394 strsignal(stp, *bp->b_rptr, (int32_t)bp->b_band);
1395 freemsg(bp);
1396 mutex_enter(&stp->sd_lock);
1397 continue;
1398
1399 case M_PROTO:
1400 case M_PCPROTO:
1401 /*
1402 * Only data messages are readable.
1403 * Any others generate an error, unless
1404 * RD_PROTDIS or RD_PROTDAT is set.
1405 */
1406 if (stp->sd_read_opt & RD_PROTDAT) {
1407 for (nbp = bp; nbp; nbp = nbp->b_next) {
1408 if ((nbp->b_datap->db_type ==
1409 M_PROTO) ||
1410 (nbp->b_datap->db_type ==
1411 M_PCPROTO)) {
1412 nbp->b_datap->db_type = M_DATA;
1413 } else {
1414 break;
1415 }
1416 }
1417 /*
1418 * clear stream head hi pri flag based on
1419 * first message
1420 */
1421 if (type == M_PCPROTO) {
1422 mutex_enter(&stp->sd_lock);
1423 stp->sd_flag &= ~STRPRI;
1424 mutex_exit(&stp->sd_lock);
1425 }
1426 goto ismdata;
1427 } else if (stp->sd_read_opt & RD_PROTDIS) {
1428 /*
1429 * discard non-data messages
1430 */
1431 while (bp &&
1432 ((bp->b_datap->db_type == M_PROTO) ||
1433 (bp->b_datap->db_type == M_PCPROTO))) {
1434 nbp = unlinkb(bp);
1435 freeb(bp);
1436 bp = nbp;
1437 }
1438 /*
1439 * clear stream head hi pri flag based on
1440 * first message
1441 */
1442 if (type == M_PCPROTO) {
1443 mutex_enter(&stp->sd_lock);
1444 stp->sd_flag &= ~STRPRI;
1445 mutex_exit(&stp->sd_lock);
1446 }
1447 if (bp) {
1448 bp->b_band = pri;
1449 goto ismdata;
1450 } else {
1451 break;
1452 }
1453 }
1454 /* FALLTHRU */
1455 case M_PASSFP:
1456 if ((bp->b_datap->db_type == M_PASSFP) &&
1457 (stp->sd_read_opt & RD_PROTDIS)) {
1458 freemsg(bp);
1459 break;
1460 }
1461 mutex_enter(&stp->sd_lock);
1462 putback(stp, q, bp, pri);
1463 mutex_exit(&stp->sd_lock);
1464 if (rflg == 0)
1465 error = EBADMSG;
1466 goto oops1;
1467
1468 default:
1469 /*
1470 * Garbage on stream head read queue.
1471 */
1472 cmn_err(CE_WARN, "bad %x found at stream head\n",
1473 bp->b_datap->db_type);
1474 freemsg(bp);
1475 goto oops1;
1476 }
1477 mutex_enter(&stp->sd_lock);
1478 }
1479 oops:
1480 mutex_exit(&stp->sd_lock);
1481 oops1:
1482 qbackenable(q, pri);
1483 return (error);
1484 #undef _LASTMARK
1485 }
1486
1487 /*
1488 * Default processing of M_PROTO/M_PCPROTO messages.
1489 * Determine which wakeups and signals are needed.
1490 * This can be replaced by a user-specified procedure for kernel users
1491 * of STREAMS.
1492 */
1493 /* ARGSUSED */
1494 mblk_t *
1495 strrput_proto(vnode_t *vp, mblk_t *mp,
1496 strwakeup_t *wakeups, strsigset_t *firstmsgsigs,
1497 strsigset_t *allmsgsigs, strpollset_t *pollwakeups)
1498 {
1499 *wakeups = RSLEEP;
1500 *allmsgsigs = 0;
1501
1502 switch (mp->b_datap->db_type) {
1503 case M_PROTO:
1504 if (mp->b_band == 0) {
1505 *firstmsgsigs = S_INPUT | S_RDNORM;
1506 *pollwakeups = POLLIN | POLLRDNORM;
1507 } else {
1508 *firstmsgsigs = S_INPUT | S_RDBAND;
1509 *pollwakeups = POLLIN | POLLRDBAND;
1510 }
1511 break;
1512 case M_PCPROTO:
1513 *firstmsgsigs = S_HIPRI;
1514 *pollwakeups = POLLPRI;
1515 break;
1516 }
1517 return (mp);
1518 }
1519
1520 /*
1521 * Default processing of everything but M_DATA, M_PROTO, M_PCPROTO and
1522 * M_PASSFP messages.
1523 * Determine which wakeups and signals are needed.
1524 * This can be replaced by a user-specified procedure for kernel users
1525 * of STREAMS.
1526 */
1527 /* ARGSUSED */
1528 mblk_t *
1529 strrput_misc(vnode_t *vp, mblk_t *mp,
1530 strwakeup_t *wakeups, strsigset_t *firstmsgsigs,
1531 strsigset_t *allmsgsigs, strpollset_t *pollwakeups)
1532 {
1533 *wakeups = 0;
1534 *firstmsgsigs = 0;
1535 *allmsgsigs = 0;
1536 *pollwakeups = 0;
1537 return (mp);
1538 }
1539
1540 /*
1541 * Stream read put procedure. Called from downstream driver/module
1542 * with messages for the stream head. Data, protocol, and in-stream
1543 * signal messages are placed on the queue, others are handled directly.
1544 */
1545 int
1546 strrput(queue_t *q, mblk_t *bp)
1547 {
1548 struct stdata *stp;
1549 ulong_t rput_opt;
1550 strwakeup_t wakeups;
1551 strsigset_t firstmsgsigs; /* Signals if first message on queue */
1552 strsigset_t allmsgsigs; /* Signals for all messages */
1553 strsigset_t signals; /* Signals events to generate */
1554 strpollset_t pollwakeups;
1555 mblk_t *nextbp;
1556 uchar_t band = 0;
1557 int hipri_sig;
1558
1559 stp = (struct stdata *)q->q_ptr;
1560 /*
1561 * Use rput_opt for optimized access to the SR_ flags except
1562 * SR_POLLIN. That flag has to be checked under sd_lock since it
1563 * is modified by strpoll().
1564 */
1565 rput_opt = stp->sd_rput_opt;
1566
1567 ASSERT(qclaimed(q));
1568 TRACE_2(TR_FAC_STREAMS_FR, TR_STRRPUT_ENTER,
1569 "strrput called with message type:q %p bp %p", q, bp);
1570
1571 /*
1572 * Perform initial processing and pass to the parameterized functions.
1573 */
1574 ASSERT(bp->b_next == NULL);
1575
1576 switch (bp->b_datap->db_type) {
1577 case M_DATA:
1578 /*
1579 * sockfs is the only consumer of STREOF and when it is set,
1580 * it implies that the receiver is not interested in receiving
1581 * any more data, hence the mblk is freed to prevent unnecessary
1582 * message queueing at the stream head.
1583 */
1584 if (stp->sd_flag == STREOF) {
1585 freemsg(bp);
1586 return (0);
1587 }
1588 if ((rput_opt & SR_IGN_ZEROLEN) &&
1589 bp->b_rptr == bp->b_wptr && msgnodata(bp)) {
1590 /*
1591 * Ignore zero-length M_DATA messages. These might be
1592 * generated by some transports.
1593 * The zero-length M_DATA messages, even if they
1594 * are ignored, should effect the atmark tracking and
1595 * should wake up a thread sleeping in strwaitmark.
1596 */
1597 mutex_enter(&stp->sd_lock);
1598 if (bp->b_flag & MSGMARKNEXT) {
1599 /*
1600 * Record the position of the mark either
1601 * in q_last or in STRATMARK.
1602 */
1603 if (q->q_last != NULL) {
1604 q->q_last->b_flag &= ~MSGNOTMARKNEXT;
1605 q->q_last->b_flag |= MSGMARKNEXT;
1606 } else {
1607 stp->sd_flag &= ~STRNOTATMARK;
1608 stp->sd_flag |= STRATMARK;
1609 }
1610 } else if (bp->b_flag & MSGNOTMARKNEXT) {
1611 /*
1612 * Record that this is not the position of
1613 * the mark either in q_last or in
1614 * STRNOTATMARK.
1615 */
1616 if (q->q_last != NULL) {
1617 q->q_last->b_flag &= ~MSGMARKNEXT;
1618 q->q_last->b_flag |= MSGNOTMARKNEXT;
1619 } else {
1620 stp->sd_flag &= ~STRATMARK;
1621 stp->sd_flag |= STRNOTATMARK;
1622 }
1623 }
1624 if (stp->sd_flag & RSLEEP) {
1625 stp->sd_flag &= ~RSLEEP;
1626 cv_broadcast(&q->q_wait);
1627 }
1628 mutex_exit(&stp->sd_lock);
1629 freemsg(bp);
1630 return (0);
1631 }
1632 wakeups = RSLEEP;
1633 if (bp->b_band == 0) {
1634 firstmsgsigs = S_INPUT | S_RDNORM;
1635 pollwakeups = POLLIN | POLLRDNORM;
1636 } else {
1637 firstmsgsigs = S_INPUT | S_RDBAND;
1638 pollwakeups = POLLIN | POLLRDBAND;
1639 }
1640 if (rput_opt & SR_SIGALLDATA)
1641 allmsgsigs = firstmsgsigs;
1642 else
1643 allmsgsigs = 0;
1644
1645 mutex_enter(&stp->sd_lock);
1646 if ((rput_opt & SR_CONSOL_DATA) &&
1647 (q->q_last != NULL) &&
1648 (bp->b_flag & (MSGMARK|MSGDELIM)) == 0) {
1649 /*
1650 * Consolidate an M_DATA message onto an M_DATA,
1651 * M_PROTO, or M_PCPROTO by merging it with q_last.
1652 * The consolidation does not take place if
1653 * the old message is marked with either of the
1654 * marks or the delim flag or if the new
1655 * message is marked with MSGMARK. The MSGMARK
1656 * check is needed to handle the odd semantics of
1657 * MSGMARK where essentially the whole message
1658 * is to be treated as marked.
1659 * Carry any MSGMARKNEXT and MSGNOTMARKNEXT from the
1660 * new message to the front of the b_cont chain.
1661 */
1662 mblk_t *lbp = q->q_last;
1663 unsigned char db_type = lbp->b_datap->db_type;
1664
1665 if ((db_type == M_DATA || db_type == M_PROTO ||
1666 db_type == M_PCPROTO) &&
1667 !(lbp->b_flag & (MSGDELIM|MSGMARK|MSGMARKNEXT))) {
1668 rmvq_noenab(q, lbp);
1669 /*
1670 * The first message in the b_cont list
1671 * tracks MSGMARKNEXT and MSGNOTMARKNEXT.
1672 * We need to handle the case where we
1673 * are appending:
1674 *
1675 * 1) a MSGMARKNEXT to a MSGNOTMARKNEXT.
1676 * 2) a MSGMARKNEXT to a plain message.
1677 * 3) a MSGNOTMARKNEXT to a plain message
1678 * 4) a MSGNOTMARKNEXT to a MSGNOTMARKNEXT
1679 * message.
1680 *
1681 * Thus we never append a MSGMARKNEXT or
1682 * MSGNOTMARKNEXT to a MSGMARKNEXT message.
1683 */
1684 if (bp->b_flag & MSGMARKNEXT) {
1685 lbp->b_flag |= MSGMARKNEXT;
1686 lbp->b_flag &= ~MSGNOTMARKNEXT;
1687 bp->b_flag &= ~MSGMARKNEXT;
1688 } else if (bp->b_flag & MSGNOTMARKNEXT) {
1689 lbp->b_flag |= MSGNOTMARKNEXT;
1690 bp->b_flag &= ~MSGNOTMARKNEXT;
1691 }
1692
1693 linkb(lbp, bp);
1694 bp = lbp;
1695 /*
1696 * The new message logically isn't the first
1697 * even though the q_first check below thinks
1698 * it is. Clear the firstmsgsigs to make it
1699 * not appear to be first.
1700 */
1701 firstmsgsigs = 0;
1702 }
1703 }
1704 break;
1705
1706 case M_PASSFP:
1707 wakeups = RSLEEP;
1708 allmsgsigs = 0;
1709 if (bp->b_band == 0) {
1710 firstmsgsigs = S_INPUT | S_RDNORM;
1711 pollwakeups = POLLIN | POLLRDNORM;
1712 } else {
1713 firstmsgsigs = S_INPUT | S_RDBAND;
1714 pollwakeups = POLLIN | POLLRDBAND;
1715 }
1716 mutex_enter(&stp->sd_lock);
1717 break;
1718
1719 case M_PROTO:
1720 case M_PCPROTO:
1721 ASSERT(stp->sd_rprotofunc != NULL);
1722 bp = (stp->sd_rprotofunc)(stp->sd_vnode, bp,
1723 &wakeups, &firstmsgsigs, &allmsgsigs, &pollwakeups);
1724 #define ALLSIG (S_INPUT|S_HIPRI|S_OUTPUT|S_MSG|S_ERROR|S_HANGUP|S_RDNORM|\
1725 S_WRNORM|S_RDBAND|S_WRBAND|S_BANDURG)
1726 #define ALLPOLL (POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLWRNORM|POLLRDBAND|\
1727 POLLWRBAND)
1728
1729 ASSERT((wakeups & ~(RSLEEP|WSLEEP)) == 0);
1730 ASSERT((firstmsgsigs & ~ALLSIG) == 0);
1731 ASSERT((allmsgsigs & ~ALLSIG) == 0);
1732 ASSERT((pollwakeups & ~ALLPOLL) == 0);
1733
1734 mutex_enter(&stp->sd_lock);
1735 break;
1736
1737 default:
1738 ASSERT(stp->sd_rmiscfunc != NULL);
1739 bp = (stp->sd_rmiscfunc)(stp->sd_vnode, bp,
1740 &wakeups, &firstmsgsigs, &allmsgsigs, &pollwakeups);
1741 ASSERT((wakeups & ~(RSLEEP|WSLEEP)) == 0);
1742 ASSERT((firstmsgsigs & ~ALLSIG) == 0);
1743 ASSERT((allmsgsigs & ~ALLSIG) == 0);
1744 ASSERT((pollwakeups & ~ALLPOLL) == 0);
1745 #undef ALLSIG
1746 #undef ALLPOLL
1747 mutex_enter(&stp->sd_lock);
1748 break;
1749 }
1750 ASSERT(MUTEX_HELD(&stp->sd_lock));
1751
1752 /* By default generate superset of signals */
1753 signals = (firstmsgsigs | allmsgsigs);
1754
1755 /*
1756 * The proto and misc functions can return multiple messages
1757 * as a b_next chain. Such messages are processed separately.
1758 */
1759 one_more:
1760 hipri_sig = 0;
1761 if (bp == NULL) {
1762 nextbp = NULL;
1763 } else {
1764 nextbp = bp->b_next;
1765 bp->b_next = NULL;
1766
1767 switch (bp->b_datap->db_type) {
1768 case M_PCPROTO:
1769 /*
1770 * Only one priority protocol message is allowed at the
1771 * stream head at a time.
1772 */
1773 if (stp->sd_flag & STRPRI) {
1774 TRACE_0(TR_FAC_STREAMS_FR, TR_STRRPUT_PROTERR,
1775 "M_PCPROTO already at head");
1776 freemsg(bp);
1777 mutex_exit(&stp->sd_lock);
1778 goto done;
1779 }
1780 stp->sd_flag |= STRPRI;
1781 hipri_sig = 1;
1782 /* FALLTHRU */
1783 case M_DATA:
1784 case M_PROTO:
1785 case M_PASSFP:
1786 band = bp->b_band;
1787 /*
1788 * Marking doesn't work well when messages
1789 * are marked in more than one band. We only
1790 * remember the last message received, even if
1791 * it is placed on the queue ahead of other
1792 * marked messages.
1793 */
1794 if (bp->b_flag & MSGMARK)
1795 stp->sd_mark = bp;
1796 (void) putq(q, bp);
1797
1798 /*
1799 * If message is a PCPROTO message, always use
1800 * firstmsgsigs to determine if a signal should be
1801 * sent as strrput is the only place to send
1802 * signals for PCPROTO. Other messages are based on
1803 * the STRGETINPROG flag. The flag determines if
1804 * strrput or (k)strgetmsg will be responsible for
1805 * sending the signals, in the firstmsgsigs case.
1806 */
1807 if ((hipri_sig == 1) ||
1808 (((stp->sd_flag & STRGETINPROG) == 0) &&
1809 (q->q_first == bp)))
1810 signals = (firstmsgsigs | allmsgsigs);
1811 else
1812 signals = allmsgsigs;
1813 break;
1814
1815 default:
1816 mutex_exit(&stp->sd_lock);
1817 (void) strrput_nondata(q, bp);
1818 mutex_enter(&stp->sd_lock);
1819 break;
1820 }
1821 }
1822 ASSERT(MUTEX_HELD(&stp->sd_lock));
1823 /*
1824 * Wake sleeping read/getmsg and cancel deferred wakeup
1825 */
1826 if (wakeups & RSLEEP)
1827 stp->sd_wakeq &= ~RSLEEP;
1828
1829 wakeups &= stp->sd_flag;
1830 if (wakeups & RSLEEP) {
1831 stp->sd_flag &= ~RSLEEP;
1832 cv_broadcast(&q->q_wait);
1833 }
1834 if (wakeups & WSLEEP) {
1835 stp->sd_flag &= ~WSLEEP;
1836 cv_broadcast(&_WR(q)->q_wait);
1837 }
1838
1839 if (pollwakeups != 0) {
1840 if (pollwakeups == (POLLIN | POLLRDNORM)) {
1841 /*
1842 * Can't use rput_opt since it was not
1843 * read when sd_lock was held and SR_POLLIN is changed
1844 * by strpoll() under sd_lock.
1845 */
1846 if (!(stp->sd_rput_opt & SR_POLLIN))
1847 goto no_pollwake;
1848 stp->sd_rput_opt &= ~SR_POLLIN;
1849 }
1850 mutex_exit(&stp->sd_lock);
1851 pollwakeup(&stp->sd_pollist, pollwakeups);
1852 mutex_enter(&stp->sd_lock);
1853 }
1854 no_pollwake:
1855
1856 /*
1857 * strsendsig can handle multiple signals with a
1858 * single call.
1859 */
1860 if (stp->sd_sigflags & signals)
1861 strsendsig(stp->sd_siglist, signals, band, 0);
1862 mutex_exit(&stp->sd_lock);
1863
1864
1865 done:
1866 if (nextbp == NULL)
1867 return (0);
1868
1869 /*
1870 * Any signals were handled the first time.
1871 * Wakeups and pollwakeups are redone to avoid any race
1872 * conditions - all the messages are not queued until the
1873 * last message has been processed by strrput.
1874 */
1875 bp = nextbp;
1876 signals = firstmsgsigs = allmsgsigs = 0;
1877 mutex_enter(&stp->sd_lock);
1878 goto one_more;
1879 }
1880
1881 static void
1882 log_dupioc(queue_t *rq, mblk_t *bp)
1883 {
1884 queue_t *wq, *qp;
1885 char *modnames, *mnp, *dname;
1886 size_t maxmodstr;
1887 boolean_t islast;
1888
1889 /*
1890 * Allocate a buffer large enough to hold the names of nstrpush modules
1891 * and one driver, with spaces between and NUL terminator. If we can't
1892 * get memory, then we'll just log the driver name.
1893 */
1894 maxmodstr = nstrpush * (FMNAMESZ + 1);
1895 mnp = modnames = kmem_alloc(maxmodstr, KM_NOSLEEP);
1896
1897 /* march down write side to print log message down to the driver */
1898 wq = WR(rq);
1899
1900 /* make sure q_next doesn't shift around while we're grabbing data */
1901 claimstr(wq);
1902 qp = wq->q_next;
1903 do {
1904 dname = Q2NAME(qp);
1905 islast = !SAMESTR(qp) || qp->q_next == NULL;
1906 if (modnames == NULL) {
1907 /*
1908 * If we don't have memory, then get the driver name in
1909 * the log where we can see it. Note that memory
1910 * pressure is a possible cause of these sorts of bugs.
1911 */
1912 if (islast) {
1913 modnames = dname;
1914 maxmodstr = 0;
1915 }
1916 } else {
1917 mnp += snprintf(mnp, FMNAMESZ + 1, "%s", dname);
1918 if (!islast)
1919 *mnp++ = ' ';
1920 }
1921 qp = qp->q_next;
1922 } while (!islast);
1923 releasestr(wq);
1924 /* Cannot happen unless stream head is corrupt. */
1925 ASSERT(modnames != NULL);
1926 (void) strlog(rq->q_qinfo->qi_minfo->mi_idnum, 0, 1,
1927 SL_CONSOLE|SL_TRACE|SL_ERROR,
1928 "Warning: stream %p received duplicate %X M_IOC%s; module list: %s",
1929 rq->q_ptr, ((struct iocblk *)bp->b_rptr)->ioc_cmd,
1930 (DB_TYPE(bp) == M_IOCACK ? "ACK" : "NAK"), modnames);
1931 if (maxmodstr != 0)
1932 kmem_free(modnames, maxmodstr);
1933 }
1934
1935 int
1936 strrput_nondata(queue_t *q, mblk_t *bp)
1937 {
1938 struct stdata *stp;
1939 struct iocblk *iocbp;
1940 struct stroptions *sop;
1941 struct copyreq *reqp;
1942 struct copyresp *resp;
1943 unsigned char bpri;
1944 unsigned char flushed_already = 0;
1945
1946 stp = (struct stdata *)q->q_ptr;
1947
1948 ASSERT(!(stp->sd_flag & STPLEX));
1949 ASSERT(qclaimed(q));
1950
1951 switch (bp->b_datap->db_type) {
1952 case M_ERROR:
1953 /*
1954 * An error has occurred downstream, the errno is in the first
1955 * bytes of the message.
1956 */
1957 if ((bp->b_wptr - bp->b_rptr) == 2) { /* New flavor */
1958 unsigned char rw = 0;
1959
1960 mutex_enter(&stp->sd_lock);
1961 if (*bp->b_rptr != NOERROR) { /* read error */
1962 if (*bp->b_rptr != 0) {
1963 if (stp->sd_flag & STRDERR)
1964 flushed_already |= FLUSHR;
1965 stp->sd_flag |= STRDERR;
1966 rw |= FLUSHR;
1967 } else {
1968 stp->sd_flag &= ~STRDERR;
1969 }
1970 stp->sd_rerror = *bp->b_rptr;
1971 }
1972 bp->b_rptr++;
1973 if (*bp->b_rptr != NOERROR) { /* write error */
1974 if (*bp->b_rptr != 0) {
1975 if (stp->sd_flag & STWRERR)
1976 flushed_already |= FLUSHW;
1977 stp->sd_flag |= STWRERR;
1978 rw |= FLUSHW;
1979 } else {
1980 stp->sd_flag &= ~STWRERR;
1981 }
1982 stp->sd_werror = *bp->b_rptr;
1983 }
1984 if (rw) {
1985 TRACE_2(TR_FAC_STREAMS_FR, TR_STRRPUT_WAKE,
1986 "strrput cv_broadcast:q %p, bp %p",
1987 q, bp);
1988 cv_broadcast(&q->q_wait); /* readers */
1989 cv_broadcast(&_WR(q)->q_wait); /* writers */
1990 cv_broadcast(&stp->sd_monitor); /* ioctllers */
1991
1992 mutex_exit(&stp->sd_lock);
1993 pollwakeup(&stp->sd_pollist, POLLERR);
1994 mutex_enter(&stp->sd_lock);
1995
1996 if (stp->sd_sigflags & S_ERROR)
1997 strsendsig(stp->sd_siglist, S_ERROR, 0,
1998 ((rw & FLUSHR) ? stp->sd_rerror :
1999 stp->sd_werror));
2000 mutex_exit(&stp->sd_lock);
2001 /*
2002 * Send the M_FLUSH only
2003 * for the first M_ERROR
2004 * message on the stream
2005 */
2006 if (flushed_already == rw) {
2007 freemsg(bp);
2008 return (0);
2009 }
2010
2011 bp->b_datap->db_type = M_FLUSH;
2012 *bp->b_rptr = rw;
2013 bp->b_wptr = bp->b_rptr + 1;
2014 /*
2015 * Protect against the driver
2016 * passing up messages after
2017 * it has done a qprocsoff
2018 */
2019 if (_OTHERQ(q)->q_next == NULL)
2020 freemsg(bp);
2021 else
2022 qreply(q, bp);
2023 return (0);
2024 } else
2025 mutex_exit(&stp->sd_lock);
2026 } else if (*bp->b_rptr != 0) { /* Old flavor */
2027 if (stp->sd_flag & (STRDERR|STWRERR))
2028 flushed_already = FLUSHRW;
2029 mutex_enter(&stp->sd_lock);
2030 stp->sd_flag |= (STRDERR|STWRERR);
2031 stp->sd_rerror = *bp->b_rptr;
2032 stp->sd_werror = *bp->b_rptr;
2033 TRACE_2(TR_FAC_STREAMS_FR,
2034 TR_STRRPUT_WAKE2,
2035 "strrput wakeup #2:q %p, bp %p", q, bp);
2036 cv_broadcast(&q->q_wait); /* the readers */
2037 cv_broadcast(&_WR(q)->q_wait); /* the writers */
2038 cv_broadcast(&stp->sd_monitor); /* ioctllers */
2039
2040 mutex_exit(&stp->sd_lock);
2041 pollwakeup(&stp->sd_pollist, POLLERR);
2042 mutex_enter(&stp->sd_lock);
2043
2044 if (stp->sd_sigflags & S_ERROR)
2045 strsendsig(stp->sd_siglist, S_ERROR, 0,
2046 (stp->sd_werror ? stp->sd_werror :
2047 stp->sd_rerror));
2048 mutex_exit(&stp->sd_lock);
2049
2050 /*
2051 * Send the M_FLUSH only
2052 * for the first M_ERROR
2053 * message on the stream
2054 */
2055 if (flushed_already != FLUSHRW) {
2056 bp->b_datap->db_type = M_FLUSH;
2057 *bp->b_rptr = FLUSHRW;
2058 /*
2059 * Protect against the driver passing up
2060 * messages after it has done a
2061 * qprocsoff.
2062 */
2063 if (_OTHERQ(q)->q_next == NULL)
2064 freemsg(bp);
2065 else
2066 qreply(q, bp);
2067 return (0);
2068 }
2069 }
2070 freemsg(bp);
2071 return (0);
2072
2073 case M_HANGUP:
2074
2075 freemsg(bp);
2076 mutex_enter(&stp->sd_lock);
2077 stp->sd_werror = ENXIO;
2078 stp->sd_flag |= STRHUP;
2079 stp->sd_flag &= ~(WSLEEP|RSLEEP);
2080
2081 /*
2082 * send signal if controlling tty
2083 */
2084
2085 if (stp->sd_sidp) {
2086 prsignal(stp->sd_sidp, SIGHUP);
2087 if (stp->sd_sidp != stp->sd_pgidp)
2088 pgsignal(stp->sd_pgidp, SIGTSTP);
2089 }
2090
2091 /*
2092 * wake up read, write, and exception pollers and
2093 * reset wakeup mechanism.
2094 */
2095 cv_broadcast(&q->q_wait); /* the readers */
2096 cv_broadcast(&_WR(q)->q_wait); /* the writers */
2097 cv_broadcast(&stp->sd_monitor); /* the ioctllers */
2098 strhup(stp);
2099 mutex_exit(&stp->sd_lock);
2100 return (0);
2101
2102 case M_UNHANGUP:
2103 freemsg(bp);
2104 mutex_enter(&stp->sd_lock);
2105 stp->sd_werror = 0;
2106 stp->sd_flag &= ~STRHUP;
2107 mutex_exit(&stp->sd_lock);
2108 return (0);
2109
2110 case M_SIG:
2111 /*
2112 * Someone downstream wants to post a signal. The
2113 * signal to post is contained in the first byte of the
2114 * message. If the message would go on the front of
2115 * the queue, send a signal to the process group
2116 * (if not SIGPOLL) or to the siglist processes
2117 * (SIGPOLL). If something is already on the queue,
2118 * OR if we are delivering a delayed suspend (*sigh*
2119 * another "tty" hack) and there's no one sleeping already,
2120 * just enqueue the message.
2121 */
2122 mutex_enter(&stp->sd_lock);
2123 if (q->q_first || (*bp->b_rptr == SIGTSTP &&
2124 !(stp->sd_flag & RSLEEP))) {
2125 (void) putq(q, bp);
2126 mutex_exit(&stp->sd_lock);
2127 return (0);
2128 }
2129 mutex_exit(&stp->sd_lock);
2130 /* FALLTHRU */
2131
2132 case M_PCSIG:
2133 /*
2134 * Don't enqueue, just post the signal.
2135 */
2136 strsignal(stp, *bp->b_rptr, 0L);
2137 freemsg(bp);
2138 return (0);
2139
2140 case M_CMD:
2141 if (MBLKL(bp) != sizeof (cmdblk_t)) {
2142 freemsg(bp);
2143 return (0);
2144 }
2145
2146 mutex_enter(&stp->sd_lock);
2147 if (stp->sd_flag & STRCMDWAIT) {
2148 ASSERT(stp->sd_cmdblk == NULL);
2149 stp->sd_cmdblk = bp;
2150 cv_broadcast(&stp->sd_monitor);
2151 mutex_exit(&stp->sd_lock);
2152 } else {
2153 mutex_exit(&stp->sd_lock);
2154 freemsg(bp);
2155 }
2156 return (0);
2157
2158 case M_FLUSH:
2159 /*
2160 * Flush queues. The indication of which queues to flush
2161 * is in the first byte of the message. If the read queue
2162 * is specified, then flush it. If FLUSHBAND is set, just
2163 * flush the band specified by the second byte of the message.
2164 *
2165 * If a module has issued a M_SETOPT to not flush hi
2166 * priority messages off of the stream head, then pass this
2167 * flag into the flushq code to preserve such messages.
2168 */
2169
2170 if (*bp->b_rptr & FLUSHR) {
2171 mutex_enter(&stp->sd_lock);
2172 if (*bp->b_rptr & FLUSHBAND) {
2173 ASSERT((bp->b_wptr - bp->b_rptr) >= 2);
2174 flushband(q, *(bp->b_rptr + 1), FLUSHALL);
2175 } else
2176 flushq_common(q, FLUSHALL,
2177 stp->sd_read_opt & RFLUSHPCPROT);
2178 if ((q->q_first == NULL) ||
2179 (q->q_first->b_datap->db_type < QPCTL))
2180 stp->sd_flag &= ~STRPRI;
2181 else {
2182 ASSERT(stp->sd_flag & STRPRI);
2183 }
2184 mutex_exit(&stp->sd_lock);
2185 }
2186 if ((*bp->b_rptr & FLUSHW) && !(bp->b_flag & MSGNOLOOP)) {
2187 *bp->b_rptr &= ~FLUSHR;
2188 bp->b_flag |= MSGNOLOOP;
2189 /*
2190 * Protect against the driver passing up
2191 * messages after it has done a qprocsoff.
2192 */
2193 if (_OTHERQ(q)->q_next == NULL)
2194 freemsg(bp);
2195 else
2196 qreply(q, bp);
2197 return (0);
2198 }
2199 freemsg(bp);
2200 return (0);
2201
2202 case M_IOCACK:
2203 case M_IOCNAK:
2204 iocbp = (struct iocblk *)bp->b_rptr;
2205 /*
2206 * If not waiting for ACK or NAK then just free msg.
2207 * If incorrect id sequence number then just free msg.
2208 * If already have ACK or NAK for user then this is a
2209 * duplicate, display a warning and free the msg.
2210 */
2211 mutex_enter(&stp->sd_lock);
2212 if ((stp->sd_flag & IOCWAIT) == 0 || stp->sd_iocblk ||
2213 (stp->sd_iocid != iocbp->ioc_id)) {
2214 /*
2215 * If the ACK/NAK is a dup, display a message
2216 * Dup is when sd_iocid == ioc_id, and
2217 * sd_iocblk == <valid ptr> or -1 (the former
2218 * is when an ioctl has been put on the stream
2219 * head, but has not yet been consumed, the
2220 * later is when it has been consumed).
2221 */
2222 if ((stp->sd_iocid == iocbp->ioc_id) &&
2223 (stp->sd_iocblk != NULL)) {
2224 log_dupioc(q, bp);
2225 }
2226 freemsg(bp);
2227 mutex_exit(&stp->sd_lock);
2228 return (0);
2229 }
2230
2231 /*
2232 * Assign ACK or NAK to user and wake up.
2233 */
2234 stp->sd_iocblk = bp;
2235 cv_broadcast(&stp->sd_monitor);
2236 mutex_exit(&stp->sd_lock);
2237 return (0);
2238
2239 case M_COPYIN:
2240 case M_COPYOUT:
2241 reqp = (struct copyreq *)bp->b_rptr;
2242
2243 /*
2244 * If not waiting for ACK or NAK then just fail request.
2245 * If already have ACK, NAK, or copy request, then just
2246 * fail request.
2247 * If incorrect id sequence number then just fail request.
2248 */
2249 mutex_enter(&stp->sd_lock);
2250 if ((stp->sd_flag & IOCWAIT) == 0 || stp->sd_iocblk ||
2251 (stp->sd_iocid != reqp->cq_id)) {
2252 if (bp->b_cont) {
2253 freemsg(bp->b_cont);
2254 bp->b_cont = NULL;
2255 }
2256 bp->b_datap->db_type = M_IOCDATA;
2257 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp);
2258 resp = (struct copyresp *)bp->b_rptr;
2259 resp->cp_rval = (caddr_t)1; /* failure */
2260 mutex_exit(&stp->sd_lock);
2261 putnext(stp->sd_wrq, bp);
2262 return (0);
2263 }
2264
2265 /*
2266 * Assign copy request to user and wake up.
2267 */
2268 stp->sd_iocblk = bp;
2269 cv_broadcast(&stp->sd_monitor);
2270 mutex_exit(&stp->sd_lock);
2271 return (0);
2272
2273 case M_SETOPTS:
2274 /*
2275 * Set stream head options (read option, write offset,
2276 * min/max packet size, and/or high/low water marks for
2277 * the read side only).
2278 */
2279
2280 bpri = 0;
2281 sop = (struct stroptions *)bp->b_rptr;
2282 mutex_enter(&stp->sd_lock);
2283 if (sop->so_flags & SO_READOPT) {
2284 switch (sop->so_readopt & RMODEMASK) {
2285 case RNORM:
2286 stp->sd_read_opt &= ~(RD_MSGDIS | RD_MSGNODIS);
2287 break;
2288
2289 case RMSGD:
2290 stp->sd_read_opt =
2291 ((stp->sd_read_opt & ~RD_MSGNODIS) |
2292 RD_MSGDIS);
2293 break;
2294
2295 case RMSGN:
2296 stp->sd_read_opt =
2297 ((stp->sd_read_opt & ~RD_MSGDIS) |
2298 RD_MSGNODIS);
2299 break;
2300 }
2301 switch (sop->so_readopt & RPROTMASK) {
2302 case RPROTNORM:
2303 stp->sd_read_opt &= ~(RD_PROTDAT | RD_PROTDIS);
2304 break;
2305
2306 case RPROTDAT:
2307 stp->sd_read_opt =
2308 ((stp->sd_read_opt & ~RD_PROTDIS) |
2309 RD_PROTDAT);
2310 break;
2311
2312 case RPROTDIS:
2313 stp->sd_read_opt =
2314 ((stp->sd_read_opt & ~RD_PROTDAT) |
2315 RD_PROTDIS);
2316 break;
2317 }
2318 switch (sop->so_readopt & RFLUSHMASK) {
2319 case RFLUSHPCPROT:
2320 /*
2321 * This sets the stream head to NOT flush
2322 * M_PCPROTO messages.
2323 */
2324 stp->sd_read_opt |= RFLUSHPCPROT;
2325 break;
2326 }
2327 }
2328 if (sop->so_flags & SO_ERROPT) {
2329 switch (sop->so_erropt & RERRMASK) {
2330 case RERRNORM:
2331 stp->sd_flag &= ~STRDERRNONPERSIST;
2332 break;
2333 case RERRNONPERSIST:
2334 stp->sd_flag |= STRDERRNONPERSIST;
2335 break;
2336 }
2337 switch (sop->so_erropt & WERRMASK) {
2338 case WERRNORM:
2339 stp->sd_flag &= ~STWRERRNONPERSIST;
2340 break;
2341 case WERRNONPERSIST:
2342 stp->sd_flag |= STWRERRNONPERSIST;
2343 break;
2344 }
2345 }
2346 if (sop->so_flags & SO_COPYOPT) {
2347 if (sop->so_copyopt & ZCVMSAFE) {
2348 stp->sd_copyflag |= STZCVMSAFE;
2349 stp->sd_copyflag &= ~STZCVMUNSAFE;
2350 } else if (sop->so_copyopt & ZCVMUNSAFE) {
2351 stp->sd_copyflag |= STZCVMUNSAFE;
2352 stp->sd_copyflag &= ~STZCVMSAFE;
2353 }
2354
2355 if (sop->so_copyopt & COPYCACHED) {
2356 stp->sd_copyflag |= STRCOPYCACHED;
2357 }
2358 }
2359 if (sop->so_flags & SO_WROFF)
2360 stp->sd_wroff = sop->so_wroff;
2361 if (sop->so_flags & SO_TAIL)
2362 stp->sd_tail = sop->so_tail;
2363 if (sop->so_flags & SO_MINPSZ)
2364 q->q_minpsz = sop->so_minpsz;
2365 if (sop->so_flags & SO_MAXPSZ)
2366 q->q_maxpsz = sop->so_maxpsz;
2367 if (sop->so_flags & SO_MAXBLK)
2368 stp->sd_maxblk = sop->so_maxblk;
2369 if (sop->so_flags & SO_HIWAT) {
2370 if (sop->so_flags & SO_BAND) {
2371 if (strqset(q, QHIWAT,
2372 sop->so_band, sop->so_hiwat)) {
2373 cmn_err(CE_WARN, "strrput: could not "
2374 "allocate qband\n");
2375 } else {
2376 bpri = sop->so_band;
2377 }
2378 } else {
2379 q->q_hiwat = sop->so_hiwat;
2380 }
2381 }
2382 if (sop->so_flags & SO_LOWAT) {
2383 if (sop->so_flags & SO_BAND) {
2384 if (strqset(q, QLOWAT,
2385 sop->so_band, sop->so_lowat)) {
2386 cmn_err(CE_WARN, "strrput: could not "
2387 "allocate qband\n");
2388 } else {
2389 bpri = sop->so_band;
2390 }
2391 } else {
2392 q->q_lowat = sop->so_lowat;
2393 }
2394 }
2395 if (sop->so_flags & SO_MREADON)
2396 stp->sd_flag |= SNDMREAD;
2397 if (sop->so_flags & SO_MREADOFF)
2398 stp->sd_flag &= ~SNDMREAD;
2399 if (sop->so_flags & SO_NDELON)
2400 stp->sd_flag |= OLDNDELAY;
2401 if (sop->so_flags & SO_NDELOFF)
2402 stp->sd_flag &= ~OLDNDELAY;
2403 if (sop->so_flags & SO_ISTTY)
2404 stp->sd_flag |= STRISTTY;
2405 if (sop->so_flags & SO_ISNTTY)
2406 stp->sd_flag &= ~STRISTTY;
2407 if (sop->so_flags & SO_TOSTOP)
2408 stp->sd_flag |= STRTOSTOP;
2409 if (sop->so_flags & SO_TONSTOP)
2410 stp->sd_flag &= ~STRTOSTOP;
2411 if (sop->so_flags & SO_DELIM)
2412 stp->sd_flag |= STRDELIM;
2413 if (sop->so_flags & SO_NODELIM)
2414 stp->sd_flag &= ~STRDELIM;
2415
2416 mutex_exit(&stp->sd_lock);
2417 freemsg(bp);
2418
2419 /* Check backenable in case the water marks changed */
2420 qbackenable(q, bpri);
2421 return (0);
2422
2423 /*
2424 * The following set of cases deal with situations where two stream
2425 * heads are connected to each other (twisted streams). These messages
2426 * have no meaning at the stream head.
2427 */
2428 case M_BREAK:
2429 case M_CTL:
2430 case M_DELAY:
2431 case M_START:
2432 case M_STOP:
2433 case M_IOCDATA:
2434 case M_STARTI:
2435 case M_STOPI:
2436 freemsg(bp);
2437 return (0);
2438
2439 case M_IOCTL:
2440 /*
2441 * Always NAK this condition
2442 * (makes no sense)
2443 * If there is one or more threads in the read side
2444 * rwnext we have to defer the nacking until that thread
2445 * returns (in strget).
2446 */
2447 mutex_enter(&stp->sd_lock);
2448 if (stp->sd_struiodnak != 0) {
2449 /*
2450 * Defer NAK to the streamhead. Queue at the end
2451 * the list.
2452 */
2453 mblk_t *mp = stp->sd_struionak;
2454
2455 while (mp && mp->b_next)
2456 mp = mp->b_next;
2457 if (mp)
2458 mp->b_next = bp;
2459 else
2460 stp->sd_struionak = bp;
2461 bp->b_next = NULL;
2462 mutex_exit(&stp->sd_lock);
2463 return (0);
2464 }
2465 mutex_exit(&stp->sd_lock);
2466
2467 bp->b_datap->db_type = M_IOCNAK;
2468 /*
2469 * Protect against the driver passing up
2470 * messages after it has done a qprocsoff.
2471 */
2472 if (_OTHERQ(q)->q_next == NULL)
2473 freemsg(bp);
2474 else
2475 qreply(q, bp);
2476 return (0);
2477
2478 default:
2479 #ifdef DEBUG
2480 cmn_err(CE_WARN,
2481 "bad message type %x received at stream head\n",
2482 bp->b_datap->db_type);
2483 #endif
2484 freemsg(bp);
2485 return (0);
2486 }
2487
2488 /* NOTREACHED */
2489 }
2490
2491 /*
2492 * Check if the stream pointed to by `stp' can be written to, and return an
2493 * error code if not. If `eiohup' is set, then return EIO if STRHUP is set.
2494 * If `sigpipeok' is set and the SW_SIGPIPE option is enabled on the stream,
2495 * then always return EPIPE and send a SIGPIPE to the invoking thread.
2496 */
2497 static int
2498 strwriteable(struct stdata *stp, boolean_t eiohup, boolean_t sigpipeok)
2499 {
2500 int error;
2501
2502 ASSERT(MUTEX_HELD(&stp->sd_lock));
2503
2504 /*
2505 * For modem support, POSIX states that on writes, EIO should
2506 * be returned if the stream has been hung up.
2507 */
2508 if (eiohup && (stp->sd_flag & (STPLEX|STRHUP)) == STRHUP)
2509 error = EIO;
2510 else
2511 error = strgeterr(stp, STRHUP|STPLEX|STWRERR, 0);
2512
2513 if (error != 0) {
2514 if (!(stp->sd_flag & STPLEX) &&
2515 (stp->sd_wput_opt & SW_SIGPIPE) && sigpipeok) {
2516 tsignal(curthread, SIGPIPE);
2517 error = EPIPE;
2518 }
2519 }
2520
2521 return (error);
2522 }
2523
2524 /*
2525 * Copyin and send data down a stream.
2526 * The caller will allocate and copyin any control part that precedes the
2527 * message and pass that in as mctl.
2528 *
2529 * Caller should *not* hold sd_lock.
2530 * When EWOULDBLOCK is returned the caller has to redo the canputnext
2531 * under sd_lock in order to avoid missing a backenabling wakeup.
2532 *
2533 * Use iosize = -1 to not send any M_DATA. iosize = 0 sends zero-length M_DATA.
2534 *
2535 * Set MSG_IGNFLOW in flags to ignore flow control for hipri messages.
2536 * For sync streams we can only ignore flow control by reverting to using
2537 * putnext.
2538 *
2539 * If sd_maxblk is less than *iosize this routine might return without
2540 * transferring all of *iosize. In all cases, on return *iosize will contain
2541 * the amount of data that was transferred.
2542 */
2543 static int
2544 strput(struct stdata *stp, mblk_t *mctl, struct uio *uiop, ssize_t *iosize,
2545 int b_flag, int pri, int flags)
2546 {
2547 struiod_t uiod;
2548 mblk_t *mp;
2549 queue_t *wqp = stp->sd_wrq;
2550 int error = 0;
2551 ssize_t count = *iosize;
2552
2553 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock));
2554
2555 if (uiop != NULL && count >= 0)
2556 flags |= stp->sd_struiowrq ? STRUIO_POSTPONE : 0;
2557
2558 if (!(flags & STRUIO_POSTPONE)) {
2559 /*
2560 * Use regular canputnext, strmakedata, putnext sequence.
2561 */
2562 if (pri == 0) {
2563 if (!canputnext(wqp) && !(flags & MSG_IGNFLOW)) {
2564 freemsg(mctl);
2565 return (EWOULDBLOCK);
2566 }
2567 } else {
2568 if (!(flags & MSG_IGNFLOW) && !bcanputnext(wqp, pri)) {
2569 freemsg(mctl);
2570 return (EWOULDBLOCK);
2571 }
2572 }
2573
2574 if ((error = strmakedata(iosize, uiop, stp, flags,
2575 &mp)) != 0) {
2576 freemsg(mctl);
2577 /*
2578 * need to change return code to ENOMEM
2579 * so that this is not confused with
2580 * flow control, EAGAIN.
2581 */
2582
2583 if (error == EAGAIN)
2584 return (ENOMEM);
2585 else
2586 return (error);
2587 }
2588 if (mctl != NULL) {
2589 if (mctl->b_cont == NULL)
2590 mctl->b_cont = mp;
2591 else if (mp != NULL)
2592 linkb(mctl, mp);
2593 mp = mctl;
2594 } else if (mp == NULL)
2595 return (0);
2596
2597 mp->b_flag |= b_flag;
2598 mp->b_band = (uchar_t)pri;
2599
2600 if (flags & MSG_IGNFLOW) {
2601 /*
2602 * XXX Hack: Don't get stuck running service
2603 * procedures. This is needed for sockfs when
2604 * sending the unbind message out of the rput
2605 * procedure - we don't want a put procedure
2606 * to run service procedures.
2607 */
2608 putnext(wqp, mp);
2609 } else {
2610 stream_willservice(stp);
2611 putnext(wqp, mp);
2612 stream_runservice(stp);
2613 }
2614 return (0);
2615 }
2616 /*
2617 * Stream supports rwnext() for the write side.
2618 */
2619 if ((error = strmakedata(iosize, uiop, stp, flags, &mp)) != 0) {
2620 freemsg(mctl);
2621 /*
2622 * map EAGAIN to ENOMEM since EAGAIN means "flow controlled".
2623 */
2624 return (error == EAGAIN ? ENOMEM : error);
2625 }
2626 if (mctl != NULL) {
2627 if (mctl->b_cont == NULL)
2628 mctl->b_cont = mp;
2629 else if (mp != NULL)
2630 linkb(mctl, mp);
2631 mp = mctl;
2632 } else if (mp == NULL) {
2633 return (0);
2634 }
2635
2636 mp->b_flag |= b_flag;
2637 mp->b_band = (uchar_t)pri;
2638
2639 (void) uiodup(uiop, &uiod.d_uio, uiod.d_iov,
2640 sizeof (uiod.d_iov) / sizeof (*uiod.d_iov));
2641 uiod.d_uio.uio_offset = 0;
2642 uiod.d_mp = mp;
2643 error = rwnext(wqp, &uiod);
2644 if (! uiod.d_mp) {
2645 uioskip(uiop, *iosize);
2646 return (error);
2647 }
2648 ASSERT(mp == uiod.d_mp);
2649 if (error == EINVAL) {
2650 /*
2651 * The stream plumbing must have changed while
2652 * we were away, so just turn off rwnext()s.
2653 */
2654 error = 0;
2655 } else if (error == EBUSY || error == EWOULDBLOCK) {
2656 /*
2657 * Couldn't enter a perimeter or took a page fault,
2658 * so fall-back to putnext().
2659 */
2660 error = 0;
2661 } else {
2662 freemsg(mp);
2663 return (error);
2664 }
2665 /* Have to check canput before consuming data from the uio */
2666 if (pri == 0) {
2667 if (!canputnext(wqp) && !(flags & MSG_IGNFLOW)) {
2668 freemsg(mp);
2669 return (EWOULDBLOCK);
2670 }
2671 } else {
2672 if (!bcanputnext(wqp, pri) && !(flags & MSG_IGNFLOW)) {
2673 freemsg(mp);
2674 return (EWOULDBLOCK);
2675 }
2676 }
2677 ASSERT(mp == uiod.d_mp);
2678 /* Copyin data from the uio */
2679 if ((error = struioget(wqp, mp, &uiod, 0)) != 0) {
2680 freemsg(mp);
2681 return (error);
2682 }
2683 uioskip(uiop, *iosize);
2684 if (flags & MSG_IGNFLOW) {
2685 /*
2686 * XXX Hack: Don't get stuck running service procedures.
2687 * This is needed for sockfs when sending the unbind message
2688 * out of the rput procedure - we don't want a put procedure
2689 * to run service procedures.
2690 */
2691 putnext(wqp, mp);
2692 } else {
2693 stream_willservice(stp);
2694 putnext(wqp, mp);
2695 stream_runservice(stp);
2696 }
2697 return (0);
2698 }
2699
2700 /*
2701 * Write attempts to break the write request into messages conforming
2702 * with the minimum and maximum packet sizes set downstream.
2703 *
2704 * Write will not block if downstream queue is full and
2705 * O_NDELAY is set, otherwise it will block waiting for the queue to get room.
2706 *
2707 * A write of zero bytes gets packaged into a zero length message and sent
2708 * downstream like any other message.
2709 *
2710 * If buffers of the requested sizes are not available, the write will
2711 * sleep until the buffers become available.
2712 *
2713 * Write (if specified) will supply a write offset in a message if it
2714 * makes sense. This can be specified by downstream modules as part of
2715 * a M_SETOPTS message. Write will not supply the write offset if it
2716 * cannot supply any data in a buffer. In other words, write will never
2717 * send down an empty packet due to a write offset.
2718 */
2719 /* ARGSUSED2 */
2720 int
2721 strwrite(struct vnode *vp, struct uio *uiop, cred_t *crp)
2722 {
2723 return (strwrite_common(vp, uiop, crp, 0));
2724 }
2725
2726 /* ARGSUSED2 */
2727 int
2728 strwrite_common(struct vnode *vp, struct uio *uiop, cred_t *crp, int wflag)
2729 {
2730 struct stdata *stp;
2731 struct queue *wqp;
2732 ssize_t rmin, rmax;
2733 ssize_t iosize;
2734 int waitflag;
2735 int tempmode;
2736 int error = 0;
2737 int b_flag;
2738
2739 ASSERT(vp->v_stream);
2740 stp = vp->v_stream;
2741
2742 mutex_enter(&stp->sd_lock);
2743
2744 if ((error = i_straccess(stp, JCWRITE)) != 0) {
2745 mutex_exit(&stp->sd_lock);
2746 return (error);
2747 }
2748
2749 if (stp->sd_flag & (STWRERR|STRHUP|STPLEX)) {
2750 error = strwriteable(stp, B_TRUE, B_TRUE);
2751 if (error != 0) {
2752 mutex_exit(&stp->sd_lock);
2753 return (error);
2754 }
2755 }
2756
2757 mutex_exit(&stp->sd_lock);
2758
2759 wqp = stp->sd_wrq;
2760
2761 /* get these values from them cached in the stream head */
2762 rmin = stp->sd_qn_minpsz;
2763 rmax = stp->sd_qn_maxpsz;
2764
2765 /*
2766 * Check the min/max packet size constraints. If min packet size
2767 * is non-zero, the write cannot be split into multiple messages
2768 * and still guarantee the size constraints.
2769 */
2770 TRACE_1(TR_FAC_STREAMS_FR, TR_STRWRITE_IN, "strwrite in:q %p", wqp);
2771
2772 ASSERT((rmax >= 0) || (rmax == INFPSZ));
2773 if (rmax == 0) {
2774 return (0);
2775 }
2776 if (rmin > 0) {
2777 if (uiop->uio_resid < rmin) {
2778 TRACE_3(TR_FAC_STREAMS_FR, TR_STRWRITE_OUT,
2779 "strwrite out:q %p out %d error %d",
2780 wqp, 0, ERANGE);
2781 return (ERANGE);
2782 }
2783 if ((rmax != INFPSZ) && (uiop->uio_resid > rmax)) {
2784 TRACE_3(TR_FAC_STREAMS_FR, TR_STRWRITE_OUT,
2785 "strwrite out:q %p out %d error %d",
2786 wqp, 1, ERANGE);
2787 return (ERANGE);
2788 }
2789 }
2790
2791 /*
2792 * Do until count satisfied or error.
2793 */
2794 waitflag = WRITEWAIT | wflag;
2795 if (stp->sd_flag & OLDNDELAY)
2796 tempmode = uiop->uio_fmode & ~FNDELAY;
2797 else
2798 tempmode = uiop->uio_fmode;
2799
2800 if (rmax == INFPSZ)
2801 rmax = uiop->uio_resid;
2802
2803 /*
2804 * Note that tempmode does not get used in strput/strmakedata
2805 * but only in strwaitq. The other routines use uio_fmode
2806 * unmodified.
2807 */
2808
2809 /* LINTED: constant in conditional context */
2810 while (1) { /* breaks when uio_resid reaches zero */
2811 /*
2812 * Determine the size of the next message to be
2813 * packaged. May have to break write into several
2814 * messages based on max packet size.
2815 */
2816 iosize = MIN(uiop->uio_resid, rmax);
2817
2818 /*
2819 * Put block downstream when flow control allows it.
2820 */
2821 if ((stp->sd_flag & STRDELIM) && (uiop->uio_resid == iosize))
2822 b_flag = MSGDELIM;
2823 else
2824 b_flag = 0;
2825
2826 for (;;) {
2827 int done = 0;
2828
2829 error = strput(stp, NULL, uiop, &iosize, b_flag, 0, 0);
2830 if (error == 0)
2831 break;
2832 if (error != EWOULDBLOCK)
2833 goto out;
2834
2835 mutex_enter(&stp->sd_lock);
2836 /*
2837 * Check for a missed wakeup.
2838 * Needed since strput did not hold sd_lock across
2839 * the canputnext.
2840 */
2841 if (canputnext(wqp)) {
2842 /* Try again */
2843 mutex_exit(&stp->sd_lock);
2844 continue;
2845 }
2846 TRACE_1(TR_FAC_STREAMS_FR, TR_STRWRITE_WAIT,
2847 "strwrite wait:q %p wait", wqp);
2848 if ((error = strwaitq(stp, waitflag, (ssize_t)0,
2849 tempmode, -1, &done)) != 0 || done) {
2850 mutex_exit(&stp->sd_lock);
2851 if ((vp->v_type == VFIFO) &&
2852 (uiop->uio_fmode & FNDELAY) &&
2853 (error == EAGAIN))
2854 error = 0;
2855 goto out;
2856 }
2857 TRACE_1(TR_FAC_STREAMS_FR, TR_STRWRITE_WAKE,
2858 "strwrite wake:q %p awakes", wqp);
2859 if ((error = i_straccess(stp, JCWRITE)) != 0) {
2860 mutex_exit(&stp->sd_lock);
2861 goto out;
2862 }
2863 mutex_exit(&stp->sd_lock);
2864 }
2865 waitflag |= NOINTR;
2866 TRACE_2(TR_FAC_STREAMS_FR, TR_STRWRITE_RESID,
2867 "strwrite resid:q %p uiop %p", wqp, uiop);
2868 if (uiop->uio_resid) {
2869 /* Recheck for errors - needed for sockets */
2870 if ((stp->sd_wput_opt & SW_RECHECK_ERR) &&
2871 (stp->sd_flag & (STWRERR|STRHUP|STPLEX))) {
2872 mutex_enter(&stp->sd_lock);
2873 error = strwriteable(stp, B_FALSE, B_TRUE);
2874 mutex_exit(&stp->sd_lock);
2875 if (error != 0)
2876 return (error);
2877 }
2878 continue;
2879 }
2880 break;
2881 }
2882 out:
2883 /*
2884 * For historical reasons, applications expect EAGAIN when a data
2885 * mblk_t cannot be allocated, so change ENOMEM back to EAGAIN.
2886 */
2887 if (error == ENOMEM)
2888 error = EAGAIN;
2889 TRACE_3(TR_FAC_STREAMS_FR, TR_STRWRITE_OUT,
2890 "strwrite out:q %p out %d error %d", wqp, 2, error);
2891 return (error);
2892 }
2893
2894 /*
2895 * Stream head write service routine.
2896 * Its job is to wake up any sleeping writers when a queue
2897 * downstream needs data (part of the flow control in putq and getq).
2898 * It also must wake anyone sleeping on a poll().
2899 * For stream head right below mux module, it must also invoke put procedure
2900 * of next downstream module.
2901 */
2902 int
2903 strwsrv(queue_t *q)
2904 {
2905 struct stdata *stp;
2906 queue_t *tq;
2907 qband_t *qbp;
2908 int i;
2909 qband_t *myqbp;
2910 int isevent;
2911 unsigned char qbf[NBAND]; /* band flushing backenable flags */
2912
2913 TRACE_1(TR_FAC_STREAMS_FR,
2914 TR_STRWSRV, "strwsrv:q %p", q);
2915 stp = (struct stdata *)q->q_ptr;
2916 ASSERT(qclaimed(q));
2917 mutex_enter(&stp->sd_lock);
2918 ASSERT(!(stp->sd_flag & STPLEX));
2919
2920 if (stp->sd_flag & WSLEEP) {
2921 stp->sd_flag &= ~WSLEEP;
2922 cv_broadcast(&q->q_wait);
2923 }
2924 mutex_exit(&stp->sd_lock);
2925
2926 /* The other end of a stream pipe went away. */
2927 if ((tq = q->q_next) == NULL) {
2928 return (0);
2929 }
2930
2931 /* Find the next module forward that has a service procedure */
2932 claimstr(q);
2933 tq = q->q_nfsrv;
2934 ASSERT(tq != NULL);
2935
2936 if ((q->q_flag & QBACK)) {
2937 if ((tq->q_flag & QFULL)) {
2938 mutex_enter(QLOCK(tq));
2939 if (!(tq->q_flag & QFULL)) {
2940 mutex_exit(QLOCK(tq));
2941 goto wakeup;
2942 }
2943 /*
2944 * The queue must have become full again. Set QWANTW
2945 * again so strwsrv will be back enabled when
2946 * the queue becomes non-full next time.
2947 */
2948 tq->q_flag |= QWANTW;
2949 mutex_exit(QLOCK(tq));
2950 } else {
2951 wakeup:
2952 pollwakeup(&stp->sd_pollist, POLLWRNORM);
2953 mutex_enter(&stp->sd_lock);
2954 if (stp->sd_sigflags & S_WRNORM)
2955 strsendsig(stp->sd_siglist, S_WRNORM, 0, 0);
2956 mutex_exit(&stp->sd_lock);
2957 }
2958 }
2959
2960 isevent = 0;
2961 i = 1;
2962 bzero((caddr_t)qbf, NBAND);
2963 mutex_enter(QLOCK(tq));
2964 if ((myqbp = q->q_bandp) != NULL)
2965 for (qbp = tq->q_bandp; qbp && myqbp; qbp = qbp->qb_next) {
2966 ASSERT(myqbp);
2967 if ((myqbp->qb_flag & QB_BACK)) {
2968 if (qbp->qb_flag & QB_FULL) {
2969 /*
2970 * The band must have become full again.
2971 * Set QB_WANTW again so strwsrv will
2972 * be back enabled when the band becomes
2973 * non-full next time.
2974 */
2975 qbp->qb_flag |= QB_WANTW;
2976 } else {
2977 isevent = 1;
2978 qbf[i] = 1;
2979 }
2980 }
2981 myqbp = myqbp->qb_next;
2982 i++;
2983 }
2984 mutex_exit(QLOCK(tq));
2985
2986 if (isevent) {
2987 for (i = tq->q_nband; i; i--) {
2988 if (qbf[i]) {
2989 pollwakeup(&stp->sd_pollist, POLLWRBAND);
2990 mutex_enter(&stp->sd_lock);
2991 if (stp->sd_sigflags & S_WRBAND)
2992 strsendsig(stp->sd_siglist, S_WRBAND,
2993 (uchar_t)i, 0);
2994 mutex_exit(&stp->sd_lock);
2995 }
2996 }
2997 }
2998
2999 releasestr(q);
3000 return (0);
3001 }
3002
3003 /*
3004 * Special case of strcopyin/strcopyout for copying
3005 * struct strioctl that can deal with both data
3006 * models.
3007 */
3008
3009 #ifdef _LP64
3010
3011 static int
3012 strcopyin_strioctl(void *from, void *to, int flag, int copyflag)
3013 {
3014 struct strioctl32 strioc32;
3015 struct strioctl *striocp;
3016
3017 if (copyflag & U_TO_K) {
3018 ASSERT((copyflag & K_TO_K) == 0);
3019
3020 if ((flag & FMODELS) == DATAMODEL_ILP32) {
3021 if (copyin(from, &strioc32, sizeof (strioc32)))
3022 return (EFAULT);
3023
3024 striocp = (struct strioctl *)to;
3025 striocp->ic_cmd = strioc32.ic_cmd;
3026 striocp->ic_timout = strioc32.ic_timout;
3027 striocp->ic_len = strioc32.ic_len;
3028 striocp->ic_dp = (char *)(uintptr_t)strioc32.ic_dp;
3029
3030 } else { /* NATIVE data model */
3031 if (copyin(from, to, sizeof (struct strioctl))) {
3032 return (EFAULT);
3033 } else {
3034 return (0);
3035 }
3036 }
3037 } else {
3038 ASSERT(copyflag & K_TO_K);
3039 bcopy(from, to, sizeof (struct strioctl));
3040 }
3041 return (0);
3042 }
3043
3044 static int
3045 strcopyout_strioctl(void *from, void *to, int flag, int copyflag)
3046 {
3047 struct strioctl32 strioc32;
3048 struct strioctl *striocp;
3049
3050 if (copyflag & U_TO_K) {
3051 ASSERT((copyflag & K_TO_K) == 0);
3052
3053 if ((flag & FMODELS) == DATAMODEL_ILP32) {
3054 striocp = (struct strioctl *)from;
3055 strioc32.ic_cmd = striocp->ic_cmd;
3056 strioc32.ic_timout = striocp->ic_timout;
3057 strioc32.ic_len = striocp->ic_len;
3058 strioc32.ic_dp = (caddr32_t)(uintptr_t)striocp->ic_dp;
3059 ASSERT((char *)(uintptr_t)strioc32.ic_dp ==
3060 striocp->ic_dp);
3061
3062 if (copyout(&strioc32, to, sizeof (strioc32)))
3063 return (EFAULT);
3064
3065 } else { /* NATIVE data model */
3066 if (copyout(from, to, sizeof (struct strioctl))) {
3067 return (EFAULT);
3068 } else {
3069 return (0);
3070 }
3071 }
3072 } else {
3073 ASSERT(copyflag & K_TO_K);
3074 bcopy(from, to, sizeof (struct strioctl));
3075 }
3076 return (0);
3077 }
3078
3079 #else /* ! _LP64 */
3080
3081 /* ARGSUSED2 */
3082 static int
3083 strcopyin_strioctl(void *from, void *to, int flag, int copyflag)
3084 {
3085 return (strcopyin(from, to, sizeof (struct strioctl), copyflag));
3086 }
3087
3088 /* ARGSUSED2 */
3089 static int
3090 strcopyout_strioctl(void *from, void *to, int flag, int copyflag)
3091 {
3092 return (strcopyout(from, to, sizeof (struct strioctl), copyflag));
3093 }
3094
3095 #endif /* _LP64 */
3096
3097 /*
3098 * Determine type of job control semantics expected by user. The
3099 * possibilities are:
3100 * JCREAD - Behaves like read() on fd; send SIGTTIN
3101 * JCWRITE - Behaves like write() on fd; send SIGTTOU if TOSTOP set
3102 * JCSETP - Sets a value in the stream; send SIGTTOU, ignore TOSTOP
3103 * JCGETP - Gets a value in the stream; no signals.
3104 * See straccess in strsubr.c for usage of these values.
3105 *
3106 * This routine also returns -1 for I_STR as a special case; the
3107 * caller must call again with the real ioctl number for
3108 * classification.
3109 */
3110 static int
3111 job_control_type(int cmd)
3112 {
3113 switch (cmd) {
3114 case I_STR:
3115 return (-1);
3116
3117 case I_RECVFD:
3118 case I_E_RECVFD:
3119 return (JCREAD);
3120
3121 case I_FDINSERT:
3122 case I_SENDFD:
3123 return (JCWRITE);
3124
3125 case TCSETA:
3126 case TCSETAW:
3127 case TCSETAF:
3128 case TCSBRK:
3129 case TCXONC:
3130 case TCFLSH:
3131 case TCDSET: /* Obsolete */
3132 case TIOCSWINSZ:
3133 case TCSETS:
3134 case TCSETSW:
3135 case TCSETSF:
3136 case TIOCSETD:
3137 case TIOCHPCL:
3138 case TIOCSETP:
3139 case TIOCSETN:
3140 case TIOCEXCL:
3141 case TIOCNXCL:
3142 case TIOCFLUSH:
3143 case TIOCSETC:
3144 case TIOCLBIS:
3145 case TIOCLBIC:
3146 case TIOCLSET:
3147 case TIOCSBRK:
3148 case TIOCCBRK:
3149 case TIOCSDTR:
3150 case TIOCCDTR:
3151 case TIOCSLTC:
3152 case TIOCSTOP:
3153 case TIOCSTART:
3154 case TIOCSTI:
3155 case TIOCSPGRP:
3156 case TIOCMSET:
3157 case TIOCMBIS:
3158 case TIOCMBIC:
3159 case TIOCREMOTE:
3160 case TIOCSIGNAL:
3161 case LDSETT:
3162 case LDSMAP: /* Obsolete */
3163 case DIOCSETP:
3164 case I_FLUSH:
3165 case I_SRDOPT:
3166 case I_SETSIG:
3167 case I_SWROPT:
3168 case I_FLUSHBAND:
3169 case I_SETCLTIME:
3170 case I_SERROPT:
3171 case I_ESETSIG:
3172 case FIONBIO:
3173 case FIOASYNC:
3174 case FIOSETOWN:
3175 case JBOOT: /* Obsolete */
3176 case JTERM: /* Obsolete */
3177 case JTIMOM: /* Obsolete */
3178 case JZOMBOOT: /* Obsolete */
3179 case JAGENT: /* Obsolete */
3180 case JTRUN: /* Obsolete */
3181 case JXTPROTO: /* Obsolete */
3182 return (JCSETP);
3183 }
3184
3185 return (JCGETP);
3186 }
3187
3188 /*
3189 * ioctl for streams
3190 */
3191 int
3192 strioctl(struct vnode *vp, int cmd, intptr_t arg, int flag, int copyflag,
3193 cred_t *crp, int *rvalp)
3194 {
3195 struct stdata *stp;
3196 struct strcmd *scp;
3197 struct strioctl strioc;
3198 struct uio uio;
3199 struct iovec iov;
3200 int access;
3201 mblk_t *mp;
3202 int error = 0;
3203 int done = 0;
3204 ssize_t rmin, rmax;
3205 queue_t *wrq;
3206 queue_t *rdq;
3207 boolean_t kioctl = B_FALSE;
3208 uint32_t auditing = AU_AUDITING();
3209
3210 if (flag & FKIOCTL) {
3211 copyflag = K_TO_K;
3212 kioctl = B_TRUE;
3213 }
3214 ASSERT(vp->v_stream);
3215 ASSERT(copyflag == U_TO_K || copyflag == K_TO_K);
3216 stp = vp->v_stream;
3217
3218 TRACE_3(TR_FAC_STREAMS_FR, TR_IOCTL_ENTER,
3219 "strioctl:stp %p cmd %X arg %lX", stp, cmd, arg);
3220
3221 /*
3222 * If the copy is kernel to kernel, make sure that the FNATIVE
3223 * flag is set. After this it would be a serious error to have
3224 * no model flag.
3225 */
3226 if (copyflag == K_TO_K)
3227 flag = (flag & ~FMODELS) | FNATIVE;
3228
3229 ASSERT((flag & FMODELS) != 0);
3230
3231 wrq = stp->sd_wrq;
3232 rdq = _RD(wrq);
3233
3234 access = job_control_type(cmd);
3235
3236 /* We should never see these here, should be handled by iwscn */
3237 if (cmd == SRIOCSREDIR || cmd == SRIOCISREDIR)
3238 return (EINVAL);
3239
3240 mutex_enter(&stp->sd_lock);
3241 if ((access != -1) && ((error = i_straccess(stp, access)) != 0)) {
3242 mutex_exit(&stp->sd_lock);
3243 return (error);
3244 }
3245 mutex_exit(&stp->sd_lock);
3246
3247 /*
3248 * Check for sgttyb-related ioctls first, and complain as
3249 * necessary.
3250 */
3251 switch (cmd) {
3252 case TIOCGETP:
3253 case TIOCSETP:
3254 case TIOCSETN:
3255 if (sgttyb_handling >= 2 && !sgttyb_complaint) {
3256 sgttyb_complaint = B_TRUE;
3257 cmn_err(CE_NOTE,
3258 "application used obsolete TIOC[GS]ET");
3259 }
3260 if (sgttyb_handling >= 3) {
3261 tsignal(curthread, SIGSYS);
3262 return (EIO);
3263 }
3264 break;
3265 }
3266
3267 mutex_enter(&stp->sd_lock);
3268
3269 switch (cmd) {
3270 case I_RECVFD:
3271 case I_E_RECVFD:
3272 case I_PEEK:
3273 case I_NREAD:
3274 case FIONREAD:
3275 case FIORDCHK:
3276 case I_ATMARK:
3277 case FIONBIO:
3278 case FIOASYNC:
3279 if (stp->sd_flag & (STRDERR|STPLEX)) {
3280 error = strgeterr(stp, STRDERR|STPLEX, 0);
3281 if (error != 0) {
3282 mutex_exit(&stp->sd_lock);
3283 return (error);
3284 }
3285 }
3286 break;
3287
3288 default:
3289 if (stp->sd_flag & (STRDERR|STWRERR|STPLEX)) {
3290 error = strgeterr(stp, STRDERR|STWRERR|STPLEX, 0);
3291 if (error != 0) {
3292 mutex_exit(&stp->sd_lock);
3293 return (error);
3294 }
3295 }
3296 }
3297
3298 mutex_exit(&stp->sd_lock);
3299
3300 switch (cmd) {
3301 default:
3302 /*
3303 * The stream head has hardcoded knowledge of a
3304 * miscellaneous collection of terminal-, keyboard- and
3305 * mouse-related ioctls, enumerated below. This hardcoded
3306 * knowledge allows the stream head to automatically
3307 * convert transparent ioctl requests made by userland
3308 * programs into I_STR ioctls which many old STREAMS
3309 * modules and drivers require.
3310 *
3311 * No new ioctls should ever be added to this list.
3312 * Instead, the STREAMS module or driver should be written
3313 * to either handle transparent ioctls or require any
3314 * userland programs to use I_STR ioctls (by returning
3315 * EINVAL to any transparent ioctl requests).
3316 *
3317 * More importantly, removing ioctls from this list should
3318 * be done with the utmost care, since our STREAMS modules
3319 * and drivers *count* on the stream head performing this
3320 * conversion, and thus may panic while processing
3321 * transparent ioctl request for one of these ioctls (keep
3322 * in mind that third party modules and drivers may have
3323 * similar problems).
3324 */
3325 if (((cmd & IOCTYPE) == LDIOC) ||
3326 ((cmd & IOCTYPE) == tIOC) ||
3327 ((cmd & IOCTYPE) == TIOC) ||
3328 ((cmd & IOCTYPE) == KIOC) ||
3329 ((cmd & IOCTYPE) == MSIOC) ||
3330 ((cmd & IOCTYPE) == VUIOC)) {
3331 /*
3332 * The ioctl is a tty ioctl - set up strioc buffer
3333 * and call strdoioctl() to do the work.
3334 */
3335 if (stp->sd_flag & STRHUP)
3336 return (ENXIO);
3337 strioc.ic_cmd = cmd;
3338 strioc.ic_timout = INFTIM;
3339
3340 switch (cmd) {
3341
3342 case TCXONC:
3343 case TCSBRK:
3344 case TCFLSH:
3345 case TCDSET:
3346 {
3347 int native_arg = (int)arg;
3348 strioc.ic_len = sizeof (int);
3349 strioc.ic_dp = (char *)&native_arg;
3350 return (strdoioctl(stp, &strioc, flag,
3351 K_TO_K, crp, rvalp));
3352 }
3353
3354 case TCSETA:
3355 case TCSETAW:
3356 case TCSETAF:
3357 strioc.ic_len = sizeof (struct termio);
3358 strioc.ic_dp = (char *)arg;
3359 return (strdoioctl(stp, &strioc, flag,
3360 copyflag, crp, rvalp));
3361
3362 case TCSETS:
3363 case TCSETSW:
3364 case TCSETSF:
3365 strioc.ic_len = sizeof (struct termios);
3366 strioc.ic_dp = (char *)arg;
3367 return (strdoioctl(stp, &strioc, flag,
3368 copyflag, crp, rvalp));
3369
3370 case LDSETT:
3371 strioc.ic_len = sizeof (struct termcb);
3372 strioc.ic_dp = (char *)arg;
3373 return (strdoioctl(stp, &strioc, flag,
3374 copyflag, crp, rvalp));
3375
3376 case TIOCSETP:
3377 strioc.ic_len = sizeof (struct sgttyb);
3378 strioc.ic_dp = (char *)arg;
3379 return (strdoioctl(stp, &strioc, flag,
3380 copyflag, crp, rvalp));
3381
3382 case TIOCSTI:
3383 if ((flag & FREAD) == 0 &&
3384 secpolicy_sti(crp) != 0) {
3385 return (EPERM);
3386 }
3387 mutex_enter(&stp->sd_lock);
3388 mutex_enter(&curproc->p_splock);
3389 if (stp->sd_sidp != curproc->p_sessp->s_sidp &&
3390 secpolicy_sti(crp) != 0) {
3391 mutex_exit(&curproc->p_splock);
3392 mutex_exit(&stp->sd_lock);
3393 return (EACCES);
3394 }
3395 mutex_exit(&curproc->p_splock);
3396 mutex_exit(&stp->sd_lock);
3397
3398 strioc.ic_len = sizeof (char);
3399 strioc.ic_dp = (char *)arg;
3400 return (strdoioctl(stp, &strioc, flag,
3401 copyflag, crp, rvalp));
3402
3403 case TIOCSWINSZ:
3404 strioc.ic_len = sizeof (struct winsize);
3405 strioc.ic_dp = (char *)arg;
3406 return (strdoioctl(stp, &strioc, flag,
3407 copyflag, crp, rvalp));
3408
3409 case TIOCSSIZE:
3410 strioc.ic_len = sizeof (struct ttysize);
3411 strioc.ic_dp = (char *)arg;
3412 return (strdoioctl(stp, &strioc, flag,
3413 copyflag, crp, rvalp));
3414
3415 case TIOCSSOFTCAR:
3416 case KIOCTRANS:
3417 case KIOCTRANSABLE:
3418 case KIOCCMD:
3419 case KIOCSDIRECT:
3420 case KIOCSCOMPAT:
3421 case KIOCSKABORTEN:
3422 case KIOCSRPTDELAY:
3423 case KIOCSRPTRATE:
3424 case VUIDSFORMAT:
3425 case TIOCSPPS:
3426 strioc.ic_len = sizeof (int);
3427 strioc.ic_dp = (char *)arg;
3428 return (strdoioctl(stp, &strioc, flag,
3429 copyflag, crp, rvalp));
3430
3431 case KIOCSETKEY:
3432 case KIOCGETKEY:
3433 strioc.ic_len = sizeof (struct kiockey);
3434 strioc.ic_dp = (char *)arg;
3435 return (strdoioctl(stp, &strioc, flag,
3436 copyflag, crp, rvalp));
3437
3438 case KIOCSKEY:
3439 case KIOCGKEY:
3440 strioc.ic_len = sizeof (struct kiockeymap);
3441 strioc.ic_dp = (char *)arg;
3442 return (strdoioctl(stp, &strioc, flag,
3443 copyflag, crp, rvalp));
3444
3445 case KIOCSLED:
3446 /* arg is a pointer to char */
3447 strioc.ic_len = sizeof (char);
3448 strioc.ic_dp = (char *)arg;
3449 return (strdoioctl(stp, &strioc, flag,
3450 copyflag, crp, rvalp));
3451
3452 case MSIOSETPARMS:
3453 strioc.ic_len = sizeof (Ms_parms);
3454 strioc.ic_dp = (char *)arg;
3455 return (strdoioctl(stp, &strioc, flag,
3456 copyflag, crp, rvalp));
3457
3458 case VUIDSADDR:
3459 case VUIDGADDR:
3460 strioc.ic_len = sizeof (struct vuid_addr_probe);
3461 strioc.ic_dp = (char *)arg;
3462 return (strdoioctl(stp, &strioc, flag,
3463 copyflag, crp, rvalp));
3464
3465 /*
3466 * These M_IOCTL's don't require any data to be sent
3467 * downstream, and the driver will allocate and link
3468 * on its own mblk_t upon M_IOCACK -- thus we set
3469 * ic_len to zero and set ic_dp to arg so we know
3470 * where to copyout to later.
3471 */
3472 case TIOCGSOFTCAR:
3473 case TIOCGWINSZ:
3474 case TIOCGSIZE:
3475 case KIOCGTRANS:
3476 case KIOCGTRANSABLE:
3477 case KIOCTYPE:
3478 case KIOCGDIRECT:
3479 case KIOCGCOMPAT:
3480 case KIOCLAYOUT:
3481 case KIOCGLED:
3482 case MSIOGETPARMS:
3483 case MSIOBUTTONS:
3484 case VUIDGFORMAT:
3485 case TIOCGPPS:
3486 case TIOCGPPSEV:
3487 case TCGETA:
3488 case TCGETS:
3489 case LDGETT:
3490 case TIOCGETP:
3491 case KIOCGRPTDELAY:
3492 case KIOCGRPTRATE:
3493 strioc.ic_len = 0;
3494 strioc.ic_dp = (char *)arg;
3495 return (strdoioctl(stp, &strioc, flag,
3496 copyflag, crp, rvalp));
3497 }
3498 }
3499
3500 /*
3501 * Unknown cmd - send it down as a transparent ioctl.
3502 */
3503 strioc.ic_cmd = cmd;
3504 strioc.ic_timout = INFTIM;
3505 strioc.ic_len = TRANSPARENT;
3506 strioc.ic_dp = (char *)&arg;
3507
3508 return (strdoioctl(stp, &strioc, flag, copyflag, crp, rvalp));
3509
3510 case I_STR:
3511 /*
3512 * Stream ioctl. Read in an strioctl buffer from the user
3513 * along with any data specified and send it downstream.
3514 * Strdoioctl will wait allow only one ioctl message at
3515 * a time, and waits for the acknowledgement.
3516 */
3517
3518 if (stp->sd_flag & STRHUP)
3519 return (ENXIO);
3520
3521 error = strcopyin_strioctl((void *)arg, &strioc, flag,
3522 copyflag);
3523 if (error != 0)
3524 return (error);
3525
3526 if ((strioc.ic_len < 0) || (strioc.ic_timout < -1))
3527 return (EINVAL);
3528
3529 access = job_control_type(strioc.ic_cmd);
3530 mutex_enter(&stp->sd_lock);
3531 if ((access != -1) &&
3532 ((error = i_straccess(stp, access)) != 0)) {
3533 mutex_exit(&stp->sd_lock);
3534 return (error);
3535 }
3536 mutex_exit(&stp->sd_lock);
3537
3538 /*
3539 * The I_STR facility provides a trap door for malicious
3540 * code to send down bogus streamio(7I) ioctl commands to
3541 * unsuspecting STREAMS modules and drivers which expect to
3542 * only get these messages from the stream head.
3543 * Explicitly prohibit any streamio ioctls which can be
3544 * passed downstream by the stream head. Note that we do
3545 * not block all streamio ioctls because the ioctl
3546 * numberspace is not well managed and thus it's possible
3547 * that a module or driver's ioctl numbers may accidentally
3548 * collide with them.
3549 */
3550 switch (strioc.ic_cmd) {
3551 case I_LINK:
3552 case I_PLINK:
3553 case I_UNLINK:
3554 case I_PUNLINK:
3555 case _I_GETPEERCRED:
3556 case _I_PLINK_LH:
3557 return (EINVAL);
3558 }
3559
3560 error = strdoioctl(stp, &strioc, flag, copyflag, crp, rvalp);
3561 if (error == 0) {
3562 error = strcopyout_strioctl(&strioc, (void *)arg,
3563 flag, copyflag);
3564 }
3565 return (error);
3566
3567 case _I_CMD:
3568 /*
3569 * Like I_STR, but without using M_IOC* messages and without
3570 * copyins/copyouts beyond the passed-in argument.
3571 */
3572 if (stp->sd_flag & STRHUP)
3573 return (ENXIO);
3574
3575 if ((scp = kmem_alloc(sizeof (strcmd_t), KM_NOSLEEP)) == NULL)
3576 return (ENOMEM);
3577
3578 if (copyin((void *)arg, scp, sizeof (strcmd_t))) {
3579 kmem_free(scp, sizeof (strcmd_t));
3580 return (EFAULT);
3581 }
3582
3583 access = job_control_type(scp->sc_cmd);
3584 mutex_enter(&stp->sd_lock);
3585 if (access != -1 && (error = i_straccess(stp, access)) != 0) {
3586 mutex_exit(&stp->sd_lock);
3587 kmem_free(scp, sizeof (strcmd_t));
3588 return (error);
3589 }
3590 mutex_exit(&stp->sd_lock);
3591
3592 *rvalp = 0;
3593 if ((error = strdocmd(stp, scp, crp)) == 0) {
3594 if (copyout(scp, (void *)arg, sizeof (strcmd_t)))
3595 error = EFAULT;
3596 }
3597 kmem_free(scp, sizeof (strcmd_t));
3598 return (error);
3599
3600 case I_NREAD:
3601 /*
3602 * Return number of bytes of data in first message
3603 * in queue in "arg" and return the number of messages
3604 * in queue in return value.
3605 */
3606 {
3607 size_t size;
3608 int retval;
3609 int count = 0;
3610
3611 mutex_enter(QLOCK(rdq));
3612
3613 size = msgdsize(rdq->q_first);
3614 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next)
3615 count++;
3616
3617 mutex_exit(QLOCK(rdq));
3618 if (stp->sd_struiordq) {
3619 infod_t infod;
3620
3621 infod.d_cmd = INFOD_COUNT;
3622 infod.d_count = 0;
3623 if (count == 0) {
3624 infod.d_cmd |= INFOD_FIRSTBYTES;
3625 infod.d_bytes = 0;
3626 }
3627 infod.d_res = 0;
3628 (void) infonext(rdq, &infod);
3629 count += infod.d_count;
3630 if (infod.d_res & INFOD_FIRSTBYTES)
3631 size = infod.d_bytes;
3632 }
3633
3634 /*
3635 * Drop down from size_t to the "int" required by the
3636 * interface. Cap at INT_MAX.
3637 */
3638 retval = MIN(size, INT_MAX);
3639 error = strcopyout(&retval, (void *)arg, sizeof (retval),
3640 copyflag);
3641 if (!error)
3642 *rvalp = count;
3643 return (error);
3644 }
3645
3646 case FIONREAD:
3647 /*
3648 * Return number of bytes of data in all data messages
3649 * in queue in "arg".
3650 */
3651 {
3652 size_t size = 0;
3653 int retval;
3654
3655 mutex_enter(QLOCK(rdq));
3656 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next)
3657 size += msgdsize(mp);
3658 mutex_exit(QLOCK(rdq));
3659
3660 if (stp->sd_struiordq) {
3661 infod_t infod;
3662
3663 infod.d_cmd = INFOD_BYTES;
3664 infod.d_res = 0;
3665 infod.d_bytes = 0;
3666 (void) infonext(rdq, &infod);
3667 size += infod.d_bytes;
3668 }
3669
3670 /*
3671 * Drop down from size_t to the "int" required by the
3672 * interface. Cap at INT_MAX.
3673 */
3674 retval = MIN(size, INT_MAX);
3675 error = strcopyout(&retval, (void *)arg, sizeof (retval),
3676 copyflag);
3677
3678 *rvalp = 0;
3679 return (error);
3680 }
3681 case FIORDCHK:
3682 /*
3683 * FIORDCHK does not use arg value (like FIONREAD),
3684 * instead a count is returned. I_NREAD value may
3685 * not be accurate but safe. The real thing to do is
3686 * to add the msgdsizes of all data messages until
3687 * a non-data message.
3688 */
3689 {
3690 size_t size = 0;
3691
3692 mutex_enter(QLOCK(rdq));
3693 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next)
3694 size += msgdsize(mp);
3695 mutex_exit(QLOCK(rdq));
3696
3697 if (stp->sd_struiordq) {
3698 infod_t infod;
3699
3700 infod.d_cmd = INFOD_BYTES;
3701 infod.d_res = 0;
3702 infod.d_bytes = 0;
3703 (void) infonext(rdq, &infod);
3704 size += infod.d_bytes;
3705 }
3706
3707 /*
3708 * Since ioctl returns an int, and memory sizes under
3709 * LP64 may not fit, we return INT_MAX if the count was
3710 * actually greater.
3711 */
3712 *rvalp = MIN(size, INT_MAX);
3713 return (0);
3714 }
3715
3716 case I_FIND:
3717 /*
3718 * Get module name.
3719 */
3720 {
3721 char mname[FMNAMESZ + 1];
3722 queue_t *q;
3723
3724 error = (copyflag & U_TO_K ? copyinstr : copystr)((void *)arg,
3725 mname, FMNAMESZ + 1, NULL);
3726 if (error)
3727 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT);
3728
3729 /*
3730 * Return EINVAL if we're handed a bogus module name.
3731 */
3732 if (fmodsw_find(mname, FMODSW_LOAD) == NULL) {
3733 TRACE_0(TR_FAC_STREAMS_FR,
3734 TR_I_CANT_FIND, "couldn't I_FIND");
3735 return (EINVAL);
3736 }
3737
3738 *rvalp = 0;
3739
3740 /* Look downstream to see if module is there. */
3741 claimstr(stp->sd_wrq);
3742 for (q = stp->sd_wrq->q_next; q; q = q->q_next) {
3743 if (q->q_flag & QREADR) {
3744 q = NULL;
3745 break;
3746 }
3747 if (strcmp(mname, Q2NAME(q)) == 0)
3748 break;
3749 }
3750 releasestr(stp->sd_wrq);
3751
3752 *rvalp = (q ? 1 : 0);
3753 return (error);
3754 }
3755
3756 case I_PUSH:
3757 case __I_PUSH_NOCTTY:
3758 /*
3759 * Push a module.
3760 * For the case __I_PUSH_NOCTTY push a module but
3761 * do not allocate controlling tty. See bugid 4025044
3762 */
3763
3764 {
3765 char mname[FMNAMESZ + 1];
3766 fmodsw_impl_t *fp;
3767 dev_t dummydev;
3768
3769 if (stp->sd_flag & STRHUP)
3770 return (ENXIO);
3771
3772 /*
3773 * Get module name and look up in fmodsw.
3774 */
3775 error = (copyflag & U_TO_K ? copyinstr : copystr)((void *)arg,
3776 mname, FMNAMESZ + 1, NULL);
3777 if (error)
3778 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT);
3779
3780 if ((fp = fmodsw_find(mname, FMODSW_HOLD | FMODSW_LOAD)) ==
3781 NULL)
3782 return (EINVAL);
3783
3784 TRACE_2(TR_FAC_STREAMS_FR, TR_I_PUSH,
3785 "I_PUSH:fp %p stp %p", fp, stp);
3786
3787 /*
3788 * If the module is flagged as single-instance, then check
3789 * to see if the module is already pushed. If it is, return
3790 * as if the push was successful.
3791 */
3792 if (fp->f_qflag & _QSINGLE_INSTANCE) {
3793 queue_t *q;
3794
3795 claimstr(stp->sd_wrq);
3796 for (q = stp->sd_wrq->q_next; q; q = q->q_next) {
3797 if (q->q_flag & QREADR) {
3798 q = NULL;
3799 break;
3800 }
3801 if (strcmp(mname, Q2NAME(q)) == 0)
3802 break;
3803 }
3804 releasestr(stp->sd_wrq);
3805 if (q != NULL) {
3806 fmodsw_rele(fp);
3807 return (0);
3808 }
3809 }
3810
3811 if (error = strstartplumb(stp, flag, cmd)) {
3812 fmodsw_rele(fp);
3813 return (error);
3814 }
3815
3816 /*
3817 * See if any more modules can be pushed on this stream.
3818 * Note that this check must be done after strstartplumb()
3819 * since otherwise multiple threads issuing I_PUSHes on
3820 * the same stream will be able to exceed nstrpush.
3821 */
3822 mutex_enter(&stp->sd_lock);
3823 if (stp->sd_pushcnt >= nstrpush) {
3824 fmodsw_rele(fp);
3825 strendplumb(stp);
3826 mutex_exit(&stp->sd_lock);
3827 return (EINVAL);
3828 }
3829 mutex_exit(&stp->sd_lock);
3830
3831 /*
3832 * Push new module and call its open routine
3833 * via qattach(). Modules don't change device
3834 * numbers, so just ignore dummydev here.
3835 */
3836 dummydev = vp->v_rdev;
3837 if ((error = qattach(rdq, &dummydev, 0, crp, fp,
3838 B_FALSE)) == 0) {
3839 if (vp->v_type == VCHR && /* sorry, no pipes allowed */
3840 (cmd == I_PUSH) && (stp->sd_flag & STRISTTY)) {
3841 /*
3842 * try to allocate it as a controlling terminal
3843 */
3844 (void) strctty(stp);
3845 }
3846 }
3847
3848 mutex_enter(&stp->sd_lock);
3849
3850 /*
3851 * As a performance concern we are caching the values of
3852 * q_minpsz and q_maxpsz of the module below the stream
3853 * head in the stream head.
3854 */
3855 mutex_enter(QLOCK(stp->sd_wrq->q_next));
3856 rmin = stp->sd_wrq->q_next->q_minpsz;
3857 rmax = stp->sd_wrq->q_next->q_maxpsz;
3858 mutex_exit(QLOCK(stp->sd_wrq->q_next));
3859
3860 /* Do this processing here as a performance concern */
3861 if (strmsgsz != 0) {
3862 if (rmax == INFPSZ)
3863 rmax = strmsgsz;
3864 else {
3865 if (vp->v_type == VFIFO)
3866 rmax = MIN(PIPE_BUF, rmax);
3867 else rmax = MIN(strmsgsz, rmax);
3868 }
3869 }
3870
3871 mutex_enter(QLOCK(wrq));
3872 stp->sd_qn_minpsz = rmin;
3873 stp->sd_qn_maxpsz = rmax;
3874 mutex_exit(QLOCK(wrq));
3875
3876 strendplumb(stp);
3877 mutex_exit(&stp->sd_lock);
3878 return (error);
3879 }
3880
3881 case I_POP:
3882 {
3883 queue_t *q;
3884
3885 if (stp->sd_flag & STRHUP)
3886 return (ENXIO);
3887 if (!wrq->q_next) /* for broken pipes */
3888 return (EINVAL);
3889
3890 if (error = strstartplumb(stp, flag, cmd))
3891 return (error);
3892
3893 /*
3894 * If there is an anchor on this stream and popping
3895 * the current module would attempt to pop through the
3896 * anchor, then disallow the pop unless we have sufficient
3897 * privileges; take the cheapest (non-locking) check
3898 * first.
3899 */
3900 if (secpolicy_ip_config(crp, B_TRUE) != 0 ||
3901 (stp->sd_anchorzone != crgetzoneid(crp))) {
3902 mutex_enter(&stp->sd_lock);
3903 /*
3904 * Anchors only apply if there's at least one
3905 * module on the stream (sd_pushcnt > 0).
3906 */
3907 if (stp->sd_pushcnt > 0 &&
3908 stp->sd_pushcnt == stp->sd_anchor &&
3909 stp->sd_vnode->v_type != VFIFO) {
3910 strendplumb(stp);
3911 mutex_exit(&stp->sd_lock);
3912 if (stp->sd_anchorzone != crgetzoneid(crp))
3913 return (EINVAL);
3914 /* Audit and report error */
3915 return (secpolicy_ip_config(crp, B_FALSE));
3916 }
3917 mutex_exit(&stp->sd_lock);
3918 }
3919
3920 q = wrq->q_next;
3921 TRACE_2(TR_FAC_STREAMS_FR, TR_I_POP,
3922 "I_POP:%p from %p", q, stp);
3923 if (q->q_next == NULL || (q->q_flag & (QREADR|QISDRV))) {
3924 error = EINVAL;
3925 } else {
3926 qdetach(_RD(q), 1, flag, crp, B_FALSE);
3927 error = 0;
3928 }
3929 mutex_enter(&stp->sd_lock);
3930
3931 /*
3932 * As a performance concern we are caching the values of
3933 * q_minpsz and q_maxpsz of the module below the stream
3934 * head in the stream head.
3935 */
3936 mutex_enter(QLOCK(wrq->q_next));
3937 rmin = wrq->q_next->q_minpsz;
3938 rmax = wrq->q_next->q_maxpsz;
3939 mutex_exit(QLOCK(wrq->q_next));
3940
3941 /* Do this processing here as a performance concern */
3942 if (strmsgsz != 0) {
3943 if (rmax == INFPSZ)
3944 rmax = strmsgsz;
3945 else {
3946 if (vp->v_type == VFIFO)
3947 rmax = MIN(PIPE_BUF, rmax);
3948 else rmax = MIN(strmsgsz, rmax);
3949 }
3950 }
3951
3952 mutex_enter(QLOCK(wrq));
3953 stp->sd_qn_minpsz = rmin;
3954 stp->sd_qn_maxpsz = rmax;
3955 mutex_exit(QLOCK(wrq));
3956
3957 /* If we popped through the anchor, then reset the anchor. */
3958 if (stp->sd_pushcnt < stp->sd_anchor) {
3959 stp->sd_anchor = 0;
3960 stp->sd_anchorzone = 0;
3961 }
3962 strendplumb(stp);
3963 mutex_exit(&stp->sd_lock);
3964 return (error);
3965 }
3966
3967 case _I_MUXID2FD:
3968 {
3969 /*
3970 * Create a fd for a I_PLINK'ed lower stream with a given
3971 * muxid. With the fd, application can send down ioctls,
3972 * like I_LIST, to the previously I_PLINK'ed stream. Note
3973 * that after getting the fd, the application has to do an
3974 * I_PUNLINK on the muxid before it can do any operation
3975 * on the lower stream. This is required by spec1170.
3976 *
3977 * The fd used to do this ioctl should point to the same
3978 * controlling device used to do the I_PLINK. If it uses
3979 * a different stream or an invalid muxid, I_MUXID2FD will
3980 * fail. The error code is set to EINVAL.
3981 *
3982 * The intended use of this interface is the following.
3983 * An application I_PLINK'ed a stream and exits. The fd
3984 * to the lower stream is gone. Another application
3985 * wants to get a fd to the lower stream, it uses I_MUXID2FD.
3986 */
3987 int muxid = (int)arg;
3988 int fd;
3989 linkinfo_t *linkp;
3990 struct file *fp;
3991 netstack_t *ns;
3992 str_stack_t *ss;
3993
3994 /*
3995 * Do not allow the wildcard muxid. This ioctl is not
3996 * intended to find arbitrary link.
3997 */
3998 if (muxid == 0) {
3999 return (EINVAL);
4000 }
4001
4002 ns = netstack_find_by_cred(crp);
4003 ASSERT(ns != NULL);
4004 ss = ns->netstack_str;
4005 ASSERT(ss != NULL);
4006
4007 mutex_enter(&muxifier);
4008 linkp = findlinks(vp->v_stream, muxid, LINKPERSIST, ss);
4009 if (linkp == NULL) {
4010 mutex_exit(&muxifier);
4011 netstack_rele(ss->ss_netstack);
4012 return (EINVAL);
4013 }
4014
4015 if ((fd = ufalloc(0)) == -1) {
4016 mutex_exit(&muxifier);
4017 netstack_rele(ss->ss_netstack);
4018 return (EMFILE);
4019 }
4020 fp = linkp->li_fpdown;
4021 mutex_enter(&fp->f_tlock);
4022 fp->f_count++;
4023 mutex_exit(&fp->f_tlock);
4024 mutex_exit(&muxifier);
4025 setf(fd, fp);
4026 *rvalp = fd;
4027 netstack_rele(ss->ss_netstack);
4028 return (0);
4029 }
4030
4031 case _I_INSERT:
4032 {
4033 /*
4034 * To insert a module to a given position in a stream.
4035 * In the first release, only allow privileged user
4036 * to use this ioctl. Furthermore, the insert is only allowed
4037 * below an anchor if the zoneid is the same as the zoneid
4038 * which created the anchor.
4039 *
4040 * Note that we do not plan to support this ioctl
4041 * on pipes in the first release. We want to learn more
4042 * about the implications of these ioctls before extending
4043 * their support. And we do not think these features are
4044 * valuable for pipes.
4045 */
4046 STRUCT_DECL(strmodconf, strmodinsert);
4047 char mod_name[FMNAMESZ + 1];
4048 fmodsw_impl_t *fp;
4049 dev_t dummydev;
4050 queue_t *tmp_wrq;
4051 int pos;
4052 boolean_t is_insert;
4053
4054 STRUCT_INIT(strmodinsert, flag);
4055 if (stp->sd_flag & STRHUP)
4056 return (ENXIO);
4057 if (STRMATED(stp))
4058 return (EINVAL);
4059 if ((error = secpolicy_net_config(crp, B_FALSE)) != 0)
4060 return (error);
4061 if (stp->sd_anchor != 0 &&
4062 stp->sd_anchorzone != crgetzoneid(crp))
4063 return (EINVAL);
4064
4065 error = strcopyin((void *)arg, STRUCT_BUF(strmodinsert),
4066 STRUCT_SIZE(strmodinsert), copyflag);
4067 if (error)
4068 return (error);
4069
4070 /*
4071 * Get module name and look up in fmodsw.
4072 */
4073 error = (copyflag & U_TO_K ? copyinstr :
4074 copystr)(STRUCT_FGETP(strmodinsert, mod_name),
4075 mod_name, FMNAMESZ + 1, NULL);
4076 if (error)
4077 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT);
4078
4079 if ((fp = fmodsw_find(mod_name, FMODSW_HOLD | FMODSW_LOAD)) ==
4080 NULL)
4081 return (EINVAL);
4082
4083 if (error = strstartplumb(stp, flag, cmd)) {
4084 fmodsw_rele(fp);
4085 return (error);
4086 }
4087
4088 /*
4089 * Is this _I_INSERT just like an I_PUSH? We need to know
4090 * this because we do some optimizations if this is a
4091 * module being pushed.
4092 */
4093 pos = STRUCT_FGET(strmodinsert, pos);
4094 is_insert = (pos != 0);
4095
4096 /*
4097 * Make sure pos is valid. Even though it is not an I_PUSH,
4098 * we impose the same limit on the number of modules in a
4099 * stream.
4100 */
4101 mutex_enter(&stp->sd_lock);
4102 if (stp->sd_pushcnt >= nstrpush || pos < 0 ||
4103 pos > stp->sd_pushcnt) {
4104 fmodsw_rele(fp);
4105 strendplumb(stp);
4106 mutex_exit(&stp->sd_lock);
4107 return (EINVAL);
4108 }
4109 if (stp->sd_anchor != 0) {
4110 /*
4111 * Is this insert below the anchor?
4112 * Pushcnt hasn't been increased yet hence
4113 * we test for greater than here, and greater or
4114 * equal after qattach.
4115 */
4116 if (pos > (stp->sd_pushcnt - stp->sd_anchor) &&
4117 stp->sd_anchorzone != crgetzoneid(crp)) {
4118 fmodsw_rele(fp);
4119 strendplumb(stp);
4120 mutex_exit(&stp->sd_lock);
4121 return (EPERM);
4122 }
4123 }
4124
4125 mutex_exit(&stp->sd_lock);
4126
4127 /*
4128 * First find the correct position this module to
4129 * be inserted. We don't need to call claimstr()
4130 * as the stream should not be changing at this point.
4131 *
4132 * Insert new module and call its open routine
4133 * via qattach(). Modules don't change device
4134 * numbers, so just ignore dummydev here.
4135 */
4136 for (tmp_wrq = stp->sd_wrq; pos > 0;
4137 tmp_wrq = tmp_wrq->q_next, pos--) {
4138 ASSERT(SAMESTR(tmp_wrq));
4139 }
4140 dummydev = vp->v_rdev;
4141 if ((error = qattach(_RD(tmp_wrq), &dummydev, 0, crp,
4142 fp, is_insert)) != 0) {
4143 mutex_enter(&stp->sd_lock);
4144 strendplumb(stp);
4145 mutex_exit(&stp->sd_lock);
4146 return (error);
4147 }
4148
4149 mutex_enter(&stp->sd_lock);
4150
4151 /*
4152 * As a performance concern we are caching the values of
4153 * q_minpsz and q_maxpsz of the module below the stream
4154 * head in the stream head.
4155 */
4156 if (!is_insert) {
4157 mutex_enter(QLOCK(stp->sd_wrq->q_next));
4158 rmin = stp->sd_wrq->q_next->q_minpsz;
4159 rmax = stp->sd_wrq->q_next->q_maxpsz;
4160 mutex_exit(QLOCK(stp->sd_wrq->q_next));
4161
4162 /* Do this processing here as a performance concern */
4163 if (strmsgsz != 0) {
4164 if (rmax == INFPSZ) {
4165 rmax = strmsgsz;
4166 } else {
4167 rmax = MIN(strmsgsz, rmax);
4168 }
4169 }
4170
4171 mutex_enter(QLOCK(wrq));
4172 stp->sd_qn_minpsz = rmin;
4173 stp->sd_qn_maxpsz = rmax;
4174 mutex_exit(QLOCK(wrq));
4175 }
4176
4177 /*
4178 * Need to update the anchor value if this module is
4179 * inserted below the anchor point.
4180 */
4181 if (stp->sd_anchor != 0) {
4182 pos = STRUCT_FGET(strmodinsert, pos);
4183 if (pos >= (stp->sd_pushcnt - stp->sd_anchor))
4184 stp->sd_anchor++;
4185 }
4186
4187 strendplumb(stp);
4188 mutex_exit(&stp->sd_lock);
4189 return (0);
4190 }
4191
4192 case _I_REMOVE:
4193 {
4194 /*
4195 * To remove a module with a given name in a stream. The
4196 * caller of this ioctl needs to provide both the name and
4197 * the position of the module to be removed. This eliminates
4198 * the ambiguity of removal if a module is inserted/pushed
4199 * multiple times in a stream. In the first release, only
4200 * allow privileged user to use this ioctl.
4201 * Furthermore, the remove is only allowed
4202 * below an anchor if the zoneid is the same as the zoneid
4203 * which created the anchor.
4204 *
4205 * Note that we do not plan to support this ioctl
4206 * on pipes in the first release. We want to learn more
4207 * about the implications of these ioctls before extending
4208 * their support. And we do not think these features are
4209 * valuable for pipes.
4210 *
4211 * Also note that _I_REMOVE cannot be used to remove a
4212 * driver or the stream head.
4213 */
4214 STRUCT_DECL(strmodconf, strmodremove);
4215 queue_t *q;
4216 int pos;
4217 char mod_name[FMNAMESZ + 1];
4218 boolean_t is_remove;
4219
4220 STRUCT_INIT(strmodremove, flag);
4221 if (stp->sd_flag & STRHUP)
4222 return (ENXIO);
4223 if (STRMATED(stp))
4224 return (EINVAL);
4225 if ((error = secpolicy_net_config(crp, B_FALSE)) != 0)
4226 return (error);
4227 if (stp->sd_anchor != 0 &&
4228 stp->sd_anchorzone != crgetzoneid(crp))
4229 return (EINVAL);
4230
4231 error = strcopyin((void *)arg, STRUCT_BUF(strmodremove),
4232 STRUCT_SIZE(strmodremove), copyflag);
4233 if (error)
4234 return (error);
4235
4236 error = (copyflag & U_TO_K ? copyinstr :
4237 copystr)(STRUCT_FGETP(strmodremove, mod_name),
4238 mod_name, FMNAMESZ + 1, NULL);
4239 if (error)
4240 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT);
4241
4242 if ((error = strstartplumb(stp, flag, cmd)) != 0)
4243 return (error);
4244
4245 /*
4246 * Match the name of given module to the name of module at
4247 * the given position.
4248 */
4249 pos = STRUCT_FGET(strmodremove, pos);
4250
4251 is_remove = (pos != 0);
4252 for (q = stp->sd_wrq->q_next; SAMESTR(q) && pos > 0;
4253 q = q->q_next, pos--)
4254 ;
4255 if (pos > 0 || !SAMESTR(q) ||
4256 strcmp(Q2NAME(q), mod_name) != 0) {
4257 mutex_enter(&stp->sd_lock);
4258 strendplumb(stp);
4259 mutex_exit(&stp->sd_lock);
4260 return (EINVAL);
4261 }
4262
4263 /*
4264 * If the position is at or below an anchor, then the zoneid
4265 * must match the zoneid that created the anchor.
4266 */
4267 if (stp->sd_anchor != 0) {
4268 pos = STRUCT_FGET(strmodremove, pos);
4269 if (pos >= (stp->sd_pushcnt - stp->sd_anchor) &&
4270 stp->sd_anchorzone != crgetzoneid(crp)) {
4271 mutex_enter(&stp->sd_lock);
4272 strendplumb(stp);
4273 mutex_exit(&stp->sd_lock);
4274 return (EPERM);
4275 }
4276 }
4277
4278
4279 ASSERT(!(q->q_flag & QREADR));
4280 qdetach(_RD(q), 1, flag, crp, is_remove);
4281
4282 mutex_enter(&stp->sd_lock);
4283
4284 /*
4285 * As a performance concern we are caching the values of
4286 * q_minpsz and q_maxpsz of the module below the stream
4287 * head in the stream head.
4288 */
4289 if (!is_remove) {
4290 mutex_enter(QLOCK(wrq->q_next));
4291 rmin = wrq->q_next->q_minpsz;
4292 rmax = wrq->q_next->q_maxpsz;
4293 mutex_exit(QLOCK(wrq->q_next));
4294
4295 /* Do this processing here as a performance concern */
4296 if (strmsgsz != 0) {
4297 if (rmax == INFPSZ)
4298 rmax = strmsgsz;
4299 else {
4300 if (vp->v_type == VFIFO)
4301 rmax = MIN(PIPE_BUF, rmax);
4302 else rmax = MIN(strmsgsz, rmax);
4303 }
4304 }
4305
4306 mutex_enter(QLOCK(wrq));
4307 stp->sd_qn_minpsz = rmin;
4308 stp->sd_qn_maxpsz = rmax;
4309 mutex_exit(QLOCK(wrq));
4310 }
4311
4312 /*
4313 * Need to update the anchor value if this module is removed
4314 * at or below the anchor point. If the removed module is at
4315 * the anchor point, remove the anchor for this stream if
4316 * there is no module above the anchor point. Otherwise, if
4317 * the removed module is below the anchor point, decrement the
4318 * anchor point by 1.
4319 */
4320 if (stp->sd_anchor != 0) {
4321 pos = STRUCT_FGET(strmodremove, pos);
4322 if (pos == stp->sd_pushcnt - stp->sd_anchor + 1)
4323 stp->sd_anchor = 0;
4324 else if (pos > (stp->sd_pushcnt - stp->sd_anchor + 1))
4325 stp->sd_anchor--;
4326 }
4327
4328 strendplumb(stp);
4329 mutex_exit(&stp->sd_lock);
4330 return (0);
4331 }
4332
4333 case I_ANCHOR:
4334 /*
4335 * Set the anchor position on the stream to reside at
4336 * the top module (in other words, the top module
4337 * cannot be popped). Anchors with a FIFO make no
4338 * obvious sense, so they're not allowed.
4339 */
4340 mutex_enter(&stp->sd_lock);
4341
4342 if (stp->sd_vnode->v_type == VFIFO) {
4343 mutex_exit(&stp->sd_lock);
4344 return (EINVAL);
4345 }
4346 /* Only allow the same zoneid to update the anchor */
4347 if (stp->sd_anchor != 0 &&
4348 stp->sd_anchorzone != crgetzoneid(crp)) {
4349 mutex_exit(&stp->sd_lock);
4350 return (EINVAL);
4351 }
4352 stp->sd_anchor = stp->sd_pushcnt;
4353 stp->sd_anchorzone = crgetzoneid(crp);
4354 mutex_exit(&stp->sd_lock);
4355 return (0);
4356
4357 case I_LOOK:
4358 /*
4359 * Get name of first module downstream.
4360 * If no module, return an error.
4361 */
4362 claimstr(wrq);
4363 if (_SAMESTR(wrq) && wrq->q_next->q_next != NULL) {
4364 char *name = Q2NAME(wrq->q_next);
4365
4366 error = strcopyout(name, (void *)arg, strlen(name) + 1,
4367 copyflag);
4368 releasestr(wrq);
4369 return (error);
4370 }
4371 releasestr(wrq);
4372 return (EINVAL);
4373
4374 case I_LINK:
4375 case I_PLINK:
4376 /*
4377 * Link a multiplexor.
4378 */
4379 return (mlink(vp, cmd, (int)arg, crp, rvalp, 0));
4380
4381 case _I_PLINK_LH:
4382 /*
4383 * Link a multiplexor: Call must originate from kernel.
4384 */
4385 if (kioctl)
4386 return (ldi_mlink_lh(vp, cmd, arg, crp, rvalp));
4387
4388 return (EINVAL);
4389 case I_UNLINK:
4390 case I_PUNLINK:
4391 /*
4392 * Unlink a multiplexor.
4393 * If arg is -1, unlink all links for which this is the
4394 * controlling stream. Otherwise, arg is an index number
4395 * for a link to be removed.
4396 */
4397 {
4398 struct linkinfo *linkp;
4399 int native_arg = (int)arg;
4400 int type;
4401 netstack_t *ns;
4402 str_stack_t *ss;
4403
4404 TRACE_1(TR_FAC_STREAMS_FR,
4405 TR_I_UNLINK, "I_UNLINK/I_PUNLINK:%p", stp);
4406 if (vp->v_type == VFIFO) {
4407 return (EINVAL);
4408 }
4409 if (cmd == I_UNLINK)
4410 type = LINKNORMAL;
4411 else /* I_PUNLINK */
4412 type = LINKPERSIST;
4413 if (native_arg == 0) {
4414 return (EINVAL);
4415 }
4416 ns = netstack_find_by_cred(crp);
4417 ASSERT(ns != NULL);
4418 ss = ns->netstack_str;
4419 ASSERT(ss != NULL);
4420
4421 if (native_arg == MUXID_ALL)
4422 error = munlinkall(stp, type, crp, rvalp, ss);
4423 else {
4424 mutex_enter(&muxifier);
4425 if (!(linkp = findlinks(stp, (int)arg, type, ss))) {
4426 /* invalid user supplied index number */
4427 mutex_exit(&muxifier);
4428 netstack_rele(ss->ss_netstack);
4429 return (EINVAL);
4430 }
4431 /* munlink drops the muxifier lock */
4432 error = munlink(stp, linkp, type, crp, rvalp, ss);
4433 }
4434 netstack_rele(ss->ss_netstack);
4435 return (error);
4436 }
4437
4438 case I_FLUSH:
4439 /*
4440 * send a flush message downstream
4441 * flush message can indicate
4442 * FLUSHR - flush read queue
4443 * FLUSHW - flush write queue
4444 * FLUSHRW - flush read/write queue
4445 */
4446 if (stp->sd_flag & STRHUP)
4447 return (ENXIO);
4448 if (arg & ~FLUSHRW)
4449 return (EINVAL);
4450
4451 for (;;) {
4452 if (putnextctl1(stp->sd_wrq, M_FLUSH, (int)arg)) {
4453 break;
4454 }
4455 if (error = strwaitbuf(1, BPRI_HI)) {
4456 return (error);
4457 }
4458 }
4459
4460 /*
4461 * Send down an unsupported ioctl and wait for the nack
4462 * in order to allow the M_FLUSH to propagate back
4463 * up to the stream head.
4464 * Replaces if (qready()) runqueues();
4465 */
4466 strioc.ic_cmd = -1; /* The unsupported ioctl */
4467 strioc.ic_timout = 0;
4468 strioc.ic_len = 0;
4469 strioc.ic_dp = NULL;
4470 (void) strdoioctl(stp, &strioc, flag, K_TO_K, crp, rvalp);
4471 *rvalp = 0;
4472 return (0);
4473
4474 case I_FLUSHBAND:
4475 {
4476 struct bandinfo binfo;
4477
4478 error = strcopyin((void *)arg, &binfo, sizeof (binfo),
4479 copyflag);
4480 if (error)
4481 return (error);
4482 if (stp->sd_flag & STRHUP)
4483 return (ENXIO);
4484 if (binfo.bi_flag & ~FLUSHRW)
4485 return (EINVAL);
4486 while (!(mp = allocb(2, BPRI_HI))) {
4487 if (error = strwaitbuf(2, BPRI_HI))
4488 return (error);
4489 }
4490 mp->b_datap->db_type = M_FLUSH;
4491 *mp->b_wptr++ = binfo.bi_flag | FLUSHBAND;
4492 *mp->b_wptr++ = binfo.bi_pri;
4493 putnext(stp->sd_wrq, mp);
4494 /*
4495 * Send down an unsupported ioctl and wait for the nack
4496 * in order to allow the M_FLUSH to propagate back
4497 * up to the stream head.
4498 * Replaces if (qready()) runqueues();
4499 */
4500 strioc.ic_cmd = -1; /* The unsupported ioctl */
4501 strioc.ic_timout = 0;
4502 strioc.ic_len = 0;
4503 strioc.ic_dp = NULL;
4504 (void) strdoioctl(stp, &strioc, flag, K_TO_K, crp, rvalp);
4505 *rvalp = 0;
4506 return (0);
4507 }
4508
4509 case I_SRDOPT:
4510 /*
4511 * Set read options
4512 *
4513 * RNORM - default stream mode
4514 * RMSGN - message no discard
4515 * RMSGD - message discard
4516 * RPROTNORM - fail read with EBADMSG for M_[PC]PROTOs
4517 * RPROTDAT - convert M_[PC]PROTOs to M_DATAs
4518 * RPROTDIS - discard M_[PC]PROTOs and retain M_DATAs
4519 */
4520 if (arg & ~(RMODEMASK | RPROTMASK))
4521 return (EINVAL);
4522
4523 if ((arg & (RMSGD|RMSGN)) == (RMSGD|RMSGN))
4524 return (EINVAL);
4525
4526 mutex_enter(&stp->sd_lock);
4527 switch (arg & RMODEMASK) {
4528 case RNORM:
4529 stp->sd_read_opt &= ~(RD_MSGDIS | RD_MSGNODIS);
4530 break;
4531 case RMSGD:
4532 stp->sd_read_opt = (stp->sd_read_opt & ~RD_MSGNODIS) |
4533 RD_MSGDIS;
4534 break;
4535 case RMSGN:
4536 stp->sd_read_opt = (stp->sd_read_opt & ~RD_MSGDIS) |
4537 RD_MSGNODIS;
4538 break;
4539 }
4540
4541 switch (arg & RPROTMASK) {
4542 case RPROTNORM:
4543 stp->sd_read_opt &= ~(RD_PROTDAT | RD_PROTDIS);
4544 break;
4545
4546 case RPROTDAT:
4547 stp->sd_read_opt = ((stp->sd_read_opt & ~RD_PROTDIS) |
4548 RD_PROTDAT);
4549 break;
4550
4551 case RPROTDIS:
4552 stp->sd_read_opt = ((stp->sd_read_opt & ~RD_PROTDAT) |
4553 RD_PROTDIS);
4554 break;
4555 }
4556 mutex_exit(&stp->sd_lock);
4557 return (0);
4558
4559 case I_GRDOPT:
4560 /*
4561 * Get read option and return the value
4562 * to spot pointed to by arg
4563 */
4564 {
4565 int rdopt;
4566
4567 rdopt = ((stp->sd_read_opt & RD_MSGDIS) ? RMSGD :
4568 ((stp->sd_read_opt & RD_MSGNODIS) ? RMSGN : RNORM));
4569 rdopt |= ((stp->sd_read_opt & RD_PROTDAT) ? RPROTDAT :
4570 ((stp->sd_read_opt & RD_PROTDIS) ? RPROTDIS : RPROTNORM));
4571
4572 return (strcopyout(&rdopt, (void *)arg, sizeof (int),
4573 copyflag));
4574 }
4575
4576 case I_SERROPT:
4577 /*
4578 * Set error options
4579 *
4580 * RERRNORM - persistent read errors
4581 * RERRNONPERSIST - non-persistent read errors
4582 * WERRNORM - persistent write errors
4583 * WERRNONPERSIST - non-persistent write errors
4584 */
4585 if (arg & ~(RERRMASK | WERRMASK))
4586 return (EINVAL);
4587
4588 mutex_enter(&stp->sd_lock);
4589 switch (arg & RERRMASK) {
4590 case RERRNORM:
4591 stp->sd_flag &= ~STRDERRNONPERSIST;
4592 break;
4593 case RERRNONPERSIST:
4594 stp->sd_flag |= STRDERRNONPERSIST;
4595 break;
4596 }
4597 switch (arg & WERRMASK) {
4598 case WERRNORM:
4599 stp->sd_flag &= ~STWRERRNONPERSIST;
4600 break;
4601 case WERRNONPERSIST:
4602 stp->sd_flag |= STWRERRNONPERSIST;
4603 break;
4604 }
4605 mutex_exit(&stp->sd_lock);
4606 return (0);
4607
4608 case I_GERROPT:
4609 /*
4610 * Get error option and return the value
4611 * to spot pointed to by arg
4612 */
4613 {
4614 int erropt = 0;
4615
4616 erropt |= (stp->sd_flag & STRDERRNONPERSIST) ? RERRNONPERSIST :
4617 RERRNORM;
4618 erropt |= (stp->sd_flag & STWRERRNONPERSIST) ? WERRNONPERSIST :
4619 WERRNORM;
4620 return (strcopyout(&erropt, (void *)arg, sizeof (int),
4621 copyflag));
4622 }
4623
4624 case I_SETSIG:
4625 /*
4626 * Register the calling proc to receive the SIGPOLL
4627 * signal based on the events given in arg. If
4628 * arg is zero, remove the proc from register list.
4629 */
4630 {
4631 strsig_t *ssp, *pssp;
4632 struct pid *pidp;
4633
4634 pssp = NULL;
4635 pidp = curproc->p_pidp;
4636 /*
4637 * Hold sd_lock to prevent traversal of sd_siglist while
4638 * it is modified.
4639 */
4640 mutex_enter(&stp->sd_lock);
4641 for (ssp = stp->sd_siglist; ssp && (ssp->ss_pidp != pidp);
4642 pssp = ssp, ssp = ssp->ss_next)
4643 ;
4644
4645 if (arg) {
4646 if (arg & ~(S_INPUT|S_HIPRI|S_MSG|S_HANGUP|S_ERROR|
4647 S_RDNORM|S_WRNORM|S_RDBAND|S_WRBAND|S_BANDURG)) {
4648 mutex_exit(&stp->sd_lock);
4649 return (EINVAL);
4650 }
4651 if ((arg & S_BANDURG) && !(arg & S_RDBAND)) {
4652 mutex_exit(&stp->sd_lock);
4653 return (EINVAL);
4654 }
4655
4656 /*
4657 * If proc not already registered, add it
4658 * to list.
4659 */
4660 if (!ssp) {
4661 ssp = kmem_alloc(sizeof (strsig_t), KM_SLEEP);
4662 ssp->ss_pidp = pidp;
4663 ssp->ss_pid = pidp->pid_id;
4664 ssp->ss_next = NULL;
4665 if (pssp)
4666 pssp->ss_next = ssp;
4667 else
4668 stp->sd_siglist = ssp;
4669 mutex_enter(&pidlock);
4670 PID_HOLD(pidp);
4671 mutex_exit(&pidlock);
4672 }
4673
4674 /*
4675 * Set events.
4676 */
4677 ssp->ss_events = (int)arg;
4678 } else {
4679 /*
4680 * Remove proc from register list.
4681 */
4682 if (ssp) {
4683 mutex_enter(&pidlock);
4684 PID_RELE(pidp);
4685 mutex_exit(&pidlock);
4686 if (pssp)
4687 pssp->ss_next = ssp->ss_next;
4688 else
4689 stp->sd_siglist = ssp->ss_next;
4690 kmem_free(ssp, sizeof (strsig_t));
4691 } else {
4692 mutex_exit(&stp->sd_lock);
4693 return (EINVAL);
4694 }
4695 }
4696
4697 /*
4698 * Recalculate OR of sig events.
4699 */
4700 stp->sd_sigflags = 0;
4701 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next)
4702 stp->sd_sigflags |= ssp->ss_events;
4703 mutex_exit(&stp->sd_lock);
4704 return (0);
4705 }
4706
4707 case I_GETSIG:
4708 /*
4709 * Return (in arg) the current registration of events
4710 * for which the calling proc is to be signaled.
4711 */
4712 {
4713 struct strsig *ssp;
4714 struct pid *pidp;
4715
4716 pidp = curproc->p_pidp;
4717 mutex_enter(&stp->sd_lock);
4718 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next)
4719 if (ssp->ss_pidp == pidp) {
4720 error = strcopyout(&ssp->ss_events, (void *)arg,
4721 sizeof (int), copyflag);
4722 mutex_exit(&stp->sd_lock);
4723 return (error);
4724 }
4725 mutex_exit(&stp->sd_lock);
4726 return (EINVAL);
4727 }
4728
4729 case I_ESETSIG:
4730 /*
4731 * Register the ss_pid to receive the SIGPOLL
4732 * signal based on the events is ss_events arg. If
4733 * ss_events is zero, remove the proc from register list.
4734 */
4735 {
4736 struct strsig *ssp, *pssp;
4737 struct proc *proc;
4738 struct pid *pidp;
4739 pid_t pid;
4740 struct strsigset ss;
4741
4742 error = strcopyin((void *)arg, &ss, sizeof (ss), copyflag);
4743 if (error)
4744 return (error);
4745
4746 pid = ss.ss_pid;
4747
4748 if (ss.ss_events != 0) {
4749 /*
4750 * Permissions check by sending signal 0.
4751 * Note that when kill fails it does a set_errno
4752 * causing the system call to fail.
4753 */
4754 error = kill(pid, 0);
4755 if (error) {
4756 return (error);
4757 }
4758 }
4759 mutex_enter(&pidlock);
4760 if (pid == 0)
4761 proc = curproc;
4762 else if (pid < 0)
4763 proc = pgfind(-pid);
4764 else
4765 proc = prfind(pid);
4766 if (proc == NULL) {
4767 mutex_exit(&pidlock);
4768 return (ESRCH);
4769 }
4770 if (pid < 0)
4771 pidp = proc->p_pgidp;
4772 else
4773 pidp = proc->p_pidp;
4774 ASSERT(pidp);
4775 /*
4776 * Get a hold on the pid structure while referencing it.
4777 * There is a separate PID_HOLD should it be inserted
4778 * in the list below.
4779 */
4780 PID_HOLD(pidp);
4781 mutex_exit(&pidlock);
4782
4783 pssp = NULL;
4784 /*
4785 * Hold sd_lock to prevent traversal of sd_siglist while
4786 * it is modified.
4787 */
4788 mutex_enter(&stp->sd_lock);
4789 for (ssp = stp->sd_siglist; ssp && (ssp->ss_pid != pid);
4790 pssp = ssp, ssp = ssp->ss_next)
4791 ;
4792
4793 if (ss.ss_events) {
4794 if (ss.ss_events &
4795 ~(S_INPUT|S_HIPRI|S_MSG|S_HANGUP|S_ERROR|
4796 S_RDNORM|S_WRNORM|S_RDBAND|S_WRBAND|S_BANDURG)) {
4797 mutex_exit(&stp->sd_lock);
4798 mutex_enter(&pidlock);
4799 PID_RELE(pidp);
4800 mutex_exit(&pidlock);
4801 return (EINVAL);
4802 }
4803 if ((ss.ss_events & S_BANDURG) &&
4804 !(ss.ss_events & S_RDBAND)) {
4805 mutex_exit(&stp->sd_lock);
4806 mutex_enter(&pidlock);
4807 PID_RELE(pidp);
4808 mutex_exit(&pidlock);
4809 return (EINVAL);
4810 }
4811
4812 /*
4813 * If proc not already registered, add it
4814 * to list.
4815 */
4816 if (!ssp) {
4817 ssp = kmem_alloc(sizeof (strsig_t), KM_SLEEP);
4818 ssp->ss_pidp = pidp;
4819 ssp->ss_pid = pid;
4820 ssp->ss_next = NULL;
4821 if (pssp)
4822 pssp->ss_next = ssp;
4823 else
4824 stp->sd_siglist = ssp;
4825 mutex_enter(&pidlock);
4826 PID_HOLD(pidp);
4827 mutex_exit(&pidlock);
4828 }
4829
4830 /*
4831 * Set events.
4832 */
4833 ssp->ss_events = ss.ss_events;
4834 } else {
4835 /*
4836 * Remove proc from register list.
4837 */
4838 if (ssp) {
4839 mutex_enter(&pidlock);
4840 PID_RELE(pidp);
4841 mutex_exit(&pidlock);
4842 if (pssp)
4843 pssp->ss_next = ssp->ss_next;
4844 else
4845 stp->sd_siglist = ssp->ss_next;
4846 kmem_free(ssp, sizeof (strsig_t));
4847 } else {
4848 mutex_exit(&stp->sd_lock);
4849 mutex_enter(&pidlock);
4850 PID_RELE(pidp);
4851 mutex_exit(&pidlock);
4852 return (EINVAL);
4853 }
4854 }
4855
4856 /*
4857 * Recalculate OR of sig events.
4858 */
4859 stp->sd_sigflags = 0;
4860 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next)
4861 stp->sd_sigflags |= ssp->ss_events;
4862 mutex_exit(&stp->sd_lock);
4863 mutex_enter(&pidlock);
4864 PID_RELE(pidp);
4865 mutex_exit(&pidlock);
4866 return (0);
4867 }
4868
4869 case I_EGETSIG:
4870 /*
4871 * Return (in arg) the current registration of events
4872 * for which the calling proc is to be signaled.
4873 */
4874 {
4875 struct strsig *ssp;
4876 struct proc *proc;
4877 pid_t pid;
4878 struct pid *pidp;
4879 struct strsigset ss;
4880
4881 error = strcopyin((void *)arg, &ss, sizeof (ss), copyflag);
4882 if (error)
4883 return (error);
4884
4885 pid = ss.ss_pid;
4886 mutex_enter(&pidlock);
4887 if (pid == 0)
4888 proc = curproc;
4889 else if (pid < 0)
4890 proc = pgfind(-pid);
4891 else
4892 proc = prfind(pid);
4893 if (proc == NULL) {
4894 mutex_exit(&pidlock);
4895 return (ESRCH);
4896 }
4897 if (pid < 0)
4898 pidp = proc->p_pgidp;
4899 else
4900 pidp = proc->p_pidp;
4901
4902 /* Prevent the pidp from being reassigned */
4903 PID_HOLD(pidp);
4904 mutex_exit(&pidlock);
4905
4906 mutex_enter(&stp->sd_lock);
4907 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next)
4908 if (ssp->ss_pid == pid) {
4909 ss.ss_pid = ssp->ss_pid;
4910 ss.ss_events = ssp->ss_events;
4911 error = strcopyout(&ss, (void *)arg,
4912 sizeof (struct strsigset), copyflag);
4913 mutex_exit(&stp->sd_lock);
4914 mutex_enter(&pidlock);
4915 PID_RELE(pidp);
4916 mutex_exit(&pidlock);
4917 return (error);
4918 }
4919 mutex_exit(&stp->sd_lock);
4920 mutex_enter(&pidlock);
4921 PID_RELE(pidp);
4922 mutex_exit(&pidlock);
4923 return (EINVAL);
4924 }
4925
4926 case I_PEEK:
4927 {
4928 STRUCT_DECL(strpeek, strpeek);
4929 size_t n;
4930 mblk_t *fmp, *tmp_mp = NULL;
4931
4932 STRUCT_INIT(strpeek, flag);
4933
4934 error = strcopyin((void *)arg, STRUCT_BUF(strpeek),
4935 STRUCT_SIZE(strpeek), copyflag);
4936 if (error)
4937 return (error);
4938
4939 mutex_enter(QLOCK(rdq));
4940 /*
4941 * Skip the invalid messages
4942 */
4943 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next)
4944 if (mp->b_datap->db_type != M_SIG)
4945 break;
4946
4947 /*
4948 * If user has requested to peek at a high priority message
4949 * and first message is not, return 0
4950 */
4951 if (mp != NULL) {
4952 if ((STRUCT_FGET(strpeek, flags) & RS_HIPRI) &&
4953 queclass(mp) == QNORM) {
4954 *rvalp = 0;
4955 mutex_exit(QLOCK(rdq));
4956 return (0);
4957 }
4958 } else if (stp->sd_struiordq == NULL ||
4959 (STRUCT_FGET(strpeek, flags) & RS_HIPRI)) {
4960 /*
4961 * No mblks to look at at the streamhead and
4962 * 1). This isn't a synch stream or
4963 * 2). This is a synch stream but caller wants high
4964 * priority messages which is not supported by
4965 * the synch stream. (it only supports QNORM)
4966 */
4967 *rvalp = 0;
4968 mutex_exit(QLOCK(rdq));
4969 return (0);
4970 }
4971
4972 fmp = mp;
4973
4974 if (mp && mp->b_datap->db_type == M_PASSFP) {
4975 mutex_exit(QLOCK(rdq));
4976 return (EBADMSG);
4977 }
4978
4979 ASSERT(mp == NULL || mp->b_datap->db_type == M_PCPROTO ||
4980 mp->b_datap->db_type == M_PROTO ||
4981 mp->b_datap->db_type == M_DATA);
4982
4983 if (mp && mp->b_datap->db_type == M_PCPROTO) {
4984 STRUCT_FSET(strpeek, flags, RS_HIPRI);
4985 } else {
4986 STRUCT_FSET(strpeek, flags, 0);
4987 }
4988
4989
4990 if (mp && ((tmp_mp = dupmsg(mp)) == NULL)) {
4991 mutex_exit(QLOCK(rdq));
4992 return (ENOSR);
4993 }
4994 mutex_exit(QLOCK(rdq));
4995
4996 /*
4997 * set mp = tmp_mp, so that I_PEEK processing can continue.
4998 * tmp_mp is used to free the dup'd message.
4999 */
5000 mp = tmp_mp;
5001
5002 uio.uio_fmode = 0;
5003 uio.uio_extflg = UIO_COPY_CACHED;
5004 uio.uio_segflg = (copyflag == U_TO_K) ? UIO_USERSPACE :
5005 UIO_SYSSPACE;
5006 uio.uio_limit = 0;
5007 /*
5008 * First process PROTO blocks, if any.
5009 * If user doesn't want to get ctl info by setting maxlen <= 0,
5010 * then set len to -1/0 and skip control blocks part.
5011 */
5012 if (STRUCT_FGET(strpeek, ctlbuf.maxlen) < 0)
5013 STRUCT_FSET(strpeek, ctlbuf.len, -1);
5014 else if (STRUCT_FGET(strpeek, ctlbuf.maxlen) == 0)
5015 STRUCT_FSET(strpeek, ctlbuf.len, 0);
5016 else {
5017 int ctl_part = 0;
5018
5019 iov.iov_base = STRUCT_FGETP(strpeek, ctlbuf.buf);
5020 iov.iov_len = STRUCT_FGET(strpeek, ctlbuf.maxlen);
5021 uio.uio_iov = &iov;
5022 uio.uio_resid = iov.iov_len;
5023 uio.uio_loffset = 0;
5024 uio.uio_iovcnt = 1;
5025 while (mp && mp->b_datap->db_type != M_DATA &&
5026 uio.uio_resid >= 0) {
5027 ASSERT(STRUCT_FGET(strpeek, flags) == 0 ?
5028 mp->b_datap->db_type == M_PROTO :
5029 mp->b_datap->db_type == M_PCPROTO);
5030
5031 if ((n = MIN(uio.uio_resid,
5032 mp->b_wptr - mp->b_rptr)) != 0 &&
5033 (error = uiomove((char *)mp->b_rptr, n,
5034 UIO_READ, &uio)) != 0) {
5035 freemsg(tmp_mp);
5036 return (error);
5037 }
5038 ctl_part = 1;
5039 mp = mp->b_cont;
5040 }
5041 /* No ctl message */
5042 if (ctl_part == 0)
5043 STRUCT_FSET(strpeek, ctlbuf.len, -1);
5044 else
5045 STRUCT_FSET(strpeek, ctlbuf.len,
5046 STRUCT_FGET(strpeek, ctlbuf.maxlen) -
5047 uio.uio_resid);
5048 }
5049
5050 /*
5051 * Now process DATA blocks, if any.
5052 * If user doesn't want to get data info by setting maxlen <= 0,
5053 * then set len to -1/0 and skip data blocks part.
5054 */
5055 if (STRUCT_FGET(strpeek, databuf.maxlen) < 0)
5056 STRUCT_FSET(strpeek, databuf.len, -1);
5057 else if (STRUCT_FGET(strpeek, databuf.maxlen) == 0)
5058 STRUCT_FSET(strpeek, databuf.len, 0);
5059 else {
5060 int data_part = 0;
5061
5062 iov.iov_base = STRUCT_FGETP(strpeek, databuf.buf);
5063 iov.iov_len = STRUCT_FGET(strpeek, databuf.maxlen);
5064 uio.uio_iov = &iov;
5065 uio.uio_resid = iov.iov_len;
5066 uio.uio_loffset = 0;
5067 uio.uio_iovcnt = 1;
5068 while (mp && uio.uio_resid) {
5069 if (mp->b_datap->db_type == M_DATA) {
5070 if ((n = MIN(uio.uio_resid,
5071 mp->b_wptr - mp->b_rptr)) != 0 &&
5072 (error = uiomove((char *)mp->b_rptr,
5073 n, UIO_READ, &uio)) != 0) {
5074 freemsg(tmp_mp);
5075 return (error);
5076 }
5077 data_part = 1;
5078 }
5079 ASSERT(data_part == 0 ||
5080 mp->b_datap->db_type == M_DATA);
5081 mp = mp->b_cont;
5082 }
5083 /* No data message */
5084 if (data_part == 0)
5085 STRUCT_FSET(strpeek, databuf.len, -1);
5086 else
5087 STRUCT_FSET(strpeek, databuf.len,
5088 STRUCT_FGET(strpeek, databuf.maxlen) -
5089 uio.uio_resid);
5090 }
5091 freemsg(tmp_mp);
5092
5093 /*
5094 * It is a synch stream and user wants to get
5095 * data (maxlen > 0).
5096 * uio setup is done by the codes that process DATA
5097 * blocks above.
5098 */
5099 if ((fmp == NULL) && STRUCT_FGET(strpeek, databuf.maxlen) > 0) {
5100 infod_t infod;
5101
5102 infod.d_cmd = INFOD_COPYOUT;
5103 infod.d_res = 0;
5104 infod.d_uiop = &uio;
5105 error = infonext(rdq, &infod);
5106 if (error == EINVAL || error == EBUSY)
5107 error = 0;
5108 if (error)
5109 return (error);
5110 STRUCT_FSET(strpeek, databuf.len, STRUCT_FGET(strpeek,
5111 databuf.maxlen) - uio.uio_resid);
5112 if (STRUCT_FGET(strpeek, databuf.len) == 0) {
5113 /*
5114 * No data found by the infonext().
5115 */
5116 STRUCT_FSET(strpeek, databuf.len, -1);
5117 }
5118 }
5119 error = strcopyout(STRUCT_BUF(strpeek), (void *)arg,
5120 STRUCT_SIZE(strpeek), copyflag);
5121 if (error) {
5122 return (error);
5123 }
5124 /*
5125 * If there is no message retrieved, set return code to 0
5126 * otherwise, set it to 1.
5127 */
5128 if (STRUCT_FGET(strpeek, ctlbuf.len) == -1 &&
5129 STRUCT_FGET(strpeek, databuf.len) == -1)
5130 *rvalp = 0;
5131 else
5132 *rvalp = 1;
5133 return (0);
5134 }
5135
5136 case I_FDINSERT:
5137 {
5138 STRUCT_DECL(strfdinsert, strfdinsert);
5139 struct file *resftp;
5140 struct stdata *resstp;
5141 t_uscalar_t ival;
5142 ssize_t msgsize;
5143 struct strbuf mctl;
5144
5145 STRUCT_INIT(strfdinsert, flag);
5146 if (stp->sd_flag & STRHUP)
5147 return (ENXIO);
5148 /*
5149 * STRDERR, STWRERR and STPLEX tested above.
5150 */
5151 error = strcopyin((void *)arg, STRUCT_BUF(strfdinsert),
5152 STRUCT_SIZE(strfdinsert), copyflag);
5153 if (error)
5154 return (error);
5155
5156 if (STRUCT_FGET(strfdinsert, offset) < 0 ||
5157 (STRUCT_FGET(strfdinsert, offset) %
5158 sizeof (t_uscalar_t)) != 0)
5159 return (EINVAL);
5160 if ((resftp = getf(STRUCT_FGET(strfdinsert, fildes))) != NULL) {
5161 if ((resstp = resftp->f_vnode->v_stream) == NULL) {
5162 releasef(STRUCT_FGET(strfdinsert, fildes));
5163 return (EINVAL);
5164 }
5165 } else
5166 return (EINVAL);
5167
5168 mutex_enter(&resstp->sd_lock);
5169 if (resstp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) {
5170 error = strgeterr(resstp,
5171 STRDERR|STWRERR|STRHUP|STPLEX, 0);
5172 if (error != 0) {
5173 mutex_exit(&resstp->sd_lock);
5174 releasef(STRUCT_FGET(strfdinsert, fildes));
5175 return (error);
5176 }
5177 }
5178 mutex_exit(&resstp->sd_lock);
5179
5180 #ifdef _ILP32
5181 {
5182 queue_t *q;
5183 queue_t *mate = NULL;
5184
5185 /* get read queue of stream terminus */
5186 claimstr(resstp->sd_wrq);
5187 for (q = resstp->sd_wrq->q_next; q->q_next != NULL;
5188 q = q->q_next)
5189 if (!STRMATED(resstp) && STREAM(q) != resstp &&
5190 mate == NULL) {
5191 ASSERT(q->q_qinfo->qi_srvp);
5192 ASSERT(_OTHERQ(q)->q_qinfo->qi_srvp);
5193 claimstr(q);
5194 mate = q;
5195 }
5196 q = _RD(q);
5197 if (mate)
5198 releasestr(mate);
5199 releasestr(resstp->sd_wrq);
5200 ival = (t_uscalar_t)q;
5201 }
5202 #else
5203 ival = (t_uscalar_t)getminor(resftp->f_vnode->v_rdev);
5204 #endif /* _ILP32 */
5205
5206 if (STRUCT_FGET(strfdinsert, ctlbuf.len) <
5207 STRUCT_FGET(strfdinsert, offset) + sizeof (t_uscalar_t)) {
5208 releasef(STRUCT_FGET(strfdinsert, fildes));
5209 return (EINVAL);
5210 }
5211
5212 /*
5213 * Check for legal flag value.
5214 */
5215 if (STRUCT_FGET(strfdinsert, flags) & ~RS_HIPRI) {
5216 releasef(STRUCT_FGET(strfdinsert, fildes));
5217 return (EINVAL);
5218 }
5219
5220 /* get these values from those cached in the stream head */
5221 mutex_enter(QLOCK(stp->sd_wrq));
5222 rmin = stp->sd_qn_minpsz;
5223 rmax = stp->sd_qn_maxpsz;
5224 mutex_exit(QLOCK(stp->sd_wrq));
5225
5226 /*
5227 * Make sure ctl and data sizes together fall within
5228 * the limits of the max and min receive packet sizes
5229 * and do not exceed system limit. A negative data
5230 * length means that no data part is to be sent.
5231 */
5232 ASSERT((rmax >= 0) || (rmax == INFPSZ));
5233 if (rmax == 0) {
5234 releasef(STRUCT_FGET(strfdinsert, fildes));
5235 return (ERANGE);
5236 }
5237 if ((msgsize = STRUCT_FGET(strfdinsert, databuf.len)) < 0)
5238 msgsize = 0;
5239 if ((msgsize < rmin) ||
5240 ((msgsize > rmax) && (rmax != INFPSZ)) ||
5241 (STRUCT_FGET(strfdinsert, ctlbuf.len) > strctlsz)) {
5242 releasef(STRUCT_FGET(strfdinsert, fildes));
5243 return (ERANGE);
5244 }
5245
5246 mutex_enter(&stp->sd_lock);
5247 while (!(STRUCT_FGET(strfdinsert, flags) & RS_HIPRI) &&
5248 !canputnext(stp->sd_wrq)) {
5249 if ((error = strwaitq(stp, WRITEWAIT, (ssize_t)0,
5250 flag, -1, &done)) != 0 || done) {
5251 mutex_exit(&stp->sd_lock);
5252 releasef(STRUCT_FGET(strfdinsert, fildes));
5253 return (error);
5254 }
5255 if ((error = i_straccess(stp, access)) != 0) {
5256 mutex_exit(&stp->sd_lock);
5257 releasef(
5258 STRUCT_FGET(strfdinsert, fildes));
5259 return (error);
5260 }
5261 }
5262 mutex_exit(&stp->sd_lock);
5263
5264 /*
5265 * Copy strfdinsert.ctlbuf into native form of
5266 * ctlbuf to pass down into strmakemsg().
5267 */
5268 mctl.maxlen = STRUCT_FGET(strfdinsert, ctlbuf.maxlen);
5269 mctl.len = STRUCT_FGET(strfdinsert, ctlbuf.len);
5270 mctl.buf = STRUCT_FGETP(strfdinsert, ctlbuf.buf);
5271
5272 iov.iov_base = STRUCT_FGETP(strfdinsert, databuf.buf);
5273 iov.iov_len = STRUCT_FGET(strfdinsert, databuf.len);
5274 uio.uio_iov = &iov;
5275 uio.uio_iovcnt = 1;
5276 uio.uio_loffset = 0;
5277 uio.uio_segflg = (copyflag == U_TO_K) ? UIO_USERSPACE :
5278 UIO_SYSSPACE;
5279 uio.uio_fmode = 0;
5280 uio.uio_extflg = UIO_COPY_CACHED;
5281 uio.uio_resid = iov.iov_len;
5282 if ((error = strmakemsg(&mctl,
5283 &msgsize, &uio, stp,
5284 STRUCT_FGET(strfdinsert, flags), &mp)) != 0 || !mp) {
5285 STRUCT_FSET(strfdinsert, databuf.len, msgsize);
5286 releasef(STRUCT_FGET(strfdinsert, fildes));
5287 return (error);
5288 }
5289
5290 STRUCT_FSET(strfdinsert, databuf.len, msgsize);
5291
5292 /*
5293 * Place the possibly reencoded queue pointer 'offset' bytes
5294 * from the start of the control portion of the message.
5295 */
5296 *((t_uscalar_t *)(mp->b_rptr +
5297 STRUCT_FGET(strfdinsert, offset))) = ival;
5298
5299 /*
5300 * Put message downstream.
5301 */
5302 stream_willservice(stp);
5303 putnext(stp->sd_wrq, mp);
5304 stream_runservice(stp);
5305 releasef(STRUCT_FGET(strfdinsert, fildes));
5306 return (error);
5307 }
5308
5309 case I_SENDFD:
5310 {
5311 struct file *fp;
5312
5313 if ((fp = getf((int)arg)) == NULL)
5314 return (EBADF);
5315 error = do_sendfp(stp, fp, crp);
5316 if (auditing) {
5317 audit_fdsend((int)arg, fp, error);
5318 }
5319 releasef((int)arg);
5320 return (error);
5321 }
5322
5323 case I_RECVFD:
5324 case I_E_RECVFD:
5325 {
5326 struct k_strrecvfd *srf;
5327 int i, fd;
5328
5329 mutex_enter(&stp->sd_lock);
5330 while (!(mp = getq(rdq))) {
5331 if (stp->sd_flag & (STRHUP|STREOF)) {
5332 mutex_exit(&stp->sd_lock);
5333 return (ENXIO);
5334 }
5335 if ((error = strwaitq(stp, GETWAIT, (ssize_t)0,
5336 flag, -1, &done)) != 0 || done) {
5337 mutex_exit(&stp->sd_lock);
5338 return (error);
5339 }
5340 if ((error = i_straccess(stp, access)) != 0) {
5341 mutex_exit(&stp->sd_lock);
5342 return (error);
5343 }
5344 }
5345 if (mp->b_datap->db_type != M_PASSFP) {
5346 putback(stp, rdq, mp, mp->b_band);
5347 mutex_exit(&stp->sd_lock);
5348 return (EBADMSG);
5349 }
5350 mutex_exit(&stp->sd_lock);
5351
5352 srf = (struct k_strrecvfd *)mp->b_rptr;
5353 if ((fd = ufalloc(0)) == -1) {
5354 mutex_enter(&stp->sd_lock);
5355 putback(stp, rdq, mp, mp->b_band);
5356 mutex_exit(&stp->sd_lock);
5357 return (EMFILE);
5358 }
5359 if (cmd == I_RECVFD) {
5360 struct o_strrecvfd ostrfd;
5361
5362 /* check to see if uid/gid values are too large. */
5363
5364 if (srf->uid > (o_uid_t)USHRT_MAX ||
5365 srf->gid > (o_gid_t)USHRT_MAX) {
5366 mutex_enter(&stp->sd_lock);
5367 putback(stp, rdq, mp, mp->b_band);
5368 mutex_exit(&stp->sd_lock);
5369 setf(fd, NULL); /* release fd entry */
5370 return (EOVERFLOW);
5371 }
5372
5373 ostrfd.fd = fd;
5374 ostrfd.uid = (o_uid_t)srf->uid;
5375 ostrfd.gid = (o_gid_t)srf->gid;
5376
5377 /* Null the filler bits */
5378 for (i = 0; i < 8; i++)
5379 ostrfd.fill[i] = 0;
5380
5381 error = strcopyout(&ostrfd, (void *)arg,
5382 sizeof (struct o_strrecvfd), copyflag);
5383 } else { /* I_E_RECVFD */
5384 struct strrecvfd strfd;
5385
5386 strfd.fd = fd;
5387 strfd.uid = srf->uid;
5388 strfd.gid = srf->gid;
5389
5390 /* null the filler bits */
5391 for (i = 0; i < 8; i++)
5392 strfd.fill[i] = 0;
5393
5394 error = strcopyout(&strfd, (void *)arg,
5395 sizeof (struct strrecvfd), copyflag);
5396 }
5397
5398 if (error) {
5399 setf(fd, NULL); /* release fd entry */
5400 mutex_enter(&stp->sd_lock);
5401 putback(stp, rdq, mp, mp->b_band);
5402 mutex_exit(&stp->sd_lock);
5403 return (error);
5404 }
5405 if (auditing) {
5406 audit_fdrecv(fd, srf->fp);
5407 }
5408
5409 /*
5410 * Always increment f_count since the freemsg() below will
5411 * always call free_passfp() which performs a closef().
5412 */
5413 mutex_enter(&srf->fp->f_tlock);
5414 srf->fp->f_count++;
5415 mutex_exit(&srf->fp->f_tlock);
5416 setf(fd, srf->fp);
5417 freemsg(mp);
5418 return (0);
5419 }
5420
5421 case I_SWROPT:
5422 /*
5423 * Set/clear the write options. arg is a bit
5424 * mask with any of the following bits set...
5425 * SNDZERO - send zero length message
5426 * SNDPIPE - send sigpipe to process if
5427 * sd_werror is set and process is
5428 * doing a write or putmsg.
5429 * The new stream head write options should reflect
5430 * what is in arg.
5431 */
5432 if (arg & ~(SNDZERO|SNDPIPE))
5433 return (EINVAL);
5434
5435 mutex_enter(&stp->sd_lock);
5436 stp->sd_wput_opt &= ~(SW_SIGPIPE|SW_SNDZERO);
5437 if (arg & SNDZERO)
5438 stp->sd_wput_opt |= SW_SNDZERO;
5439 if (arg & SNDPIPE)
5440 stp->sd_wput_opt |= SW_SIGPIPE;
5441 mutex_exit(&stp->sd_lock);
5442 return (0);
5443
5444 case I_GWROPT:
5445 {
5446 int wropt = 0;
5447
5448 if (stp->sd_wput_opt & SW_SNDZERO)
5449 wropt |= SNDZERO;
5450 if (stp->sd_wput_opt & SW_SIGPIPE)
5451 wropt |= SNDPIPE;
5452 return (strcopyout(&wropt, (void *)arg, sizeof (wropt),
5453 copyflag));
5454 }
5455
5456 case I_LIST:
5457 /*
5458 * Returns all the modules found on this stream,
5459 * upto the driver. If argument is NULL, return the
5460 * number of modules (including driver). If argument
5461 * is not NULL, copy the names into the structure
5462 * provided.
5463 */
5464
5465 {
5466 queue_t *q;
5467 char *qname;
5468 int i, nmods;
5469 struct str_mlist *mlist;
5470 STRUCT_DECL(str_list, strlist);
5471
5472 if (arg == NULL) { /* Return number of modules plus driver */
5473 if (stp->sd_vnode->v_type == VFIFO)
5474 *rvalp = stp->sd_pushcnt;
5475 else
5476 *rvalp = stp->sd_pushcnt + 1;
5477 return (0);
5478 }
5479
5480 STRUCT_INIT(strlist, flag);
5481
5482 error = strcopyin((void *)arg, STRUCT_BUF(strlist),
5483 STRUCT_SIZE(strlist), copyflag);
5484 if (error != 0)
5485 return (error);
5486
5487 mlist = STRUCT_FGETP(strlist, sl_modlist);
5488 nmods = STRUCT_FGET(strlist, sl_nmods);
5489 if (nmods <= 0)
5490 return (EINVAL);
5491
5492 claimstr(stp->sd_wrq);
5493 q = stp->sd_wrq;
5494 for (i = 0; i < nmods && _SAMESTR(q); i++, q = q->q_next) {
5495 qname = Q2NAME(q->q_next);
5496 error = strcopyout(qname, &mlist[i], strlen(qname) + 1,
5497 copyflag);
5498 if (error != 0) {
5499 releasestr(stp->sd_wrq);
5500 return (error);
5501 }
5502 }
5503 releasestr(stp->sd_wrq);
5504 return (strcopyout(&i, (void *)arg, sizeof (int), copyflag));
5505 }
5506
5507 case I_CKBAND:
5508 {
5509 queue_t *q;
5510 qband_t *qbp;
5511
5512 if ((arg < 0) || (arg >= NBAND))
5513 return (EINVAL);
5514 q = _RD(stp->sd_wrq);
5515 mutex_enter(QLOCK(q));
5516 if (arg > (int)q->q_nband) {
5517 *rvalp = 0;
5518 } else {
5519 if (arg == 0) {
5520 if (q->q_first)
5521 *rvalp = 1;
5522 else
5523 *rvalp = 0;
5524 } else {
5525 qbp = q->q_bandp;
5526 while (--arg > 0)
5527 qbp = qbp->qb_next;
5528 if (qbp->qb_first)
5529 *rvalp = 1;
5530 else
5531 *rvalp = 0;
5532 }
5533 }
5534 mutex_exit(QLOCK(q));
5535 return (0);
5536 }
5537
5538 case I_GETBAND:
5539 {
5540 int intpri;
5541 queue_t *q;
5542
5543 q = _RD(stp->sd_wrq);
5544 mutex_enter(QLOCK(q));
5545 mp = q->q_first;
5546 if (!mp) {
5547 mutex_exit(QLOCK(q));
5548 return (ENODATA);
5549 }
5550 intpri = (int)mp->b_band;
5551 error = strcopyout(&intpri, (void *)arg, sizeof (int),
5552 copyflag);
5553 mutex_exit(QLOCK(q));
5554 return (error);
5555 }
5556
5557 case I_ATMARK:
5558 {
5559 queue_t *q;
5560
5561 if (arg & ~(ANYMARK|LASTMARK))
5562 return (EINVAL);
5563 q = _RD(stp->sd_wrq);
5564 mutex_enter(&stp->sd_lock);
5565 if ((stp->sd_flag & STRATMARK) && (arg == ANYMARK)) {
5566 *rvalp = 1;
5567 } else {
5568 mutex_enter(QLOCK(q));
5569 mp = q->q_first;
5570
5571 if (mp == NULL)
5572 *rvalp = 0;
5573 else if ((arg == ANYMARK) && (mp->b_flag & MSGMARK))
5574 *rvalp = 1;
5575 else if ((arg == LASTMARK) && (mp == stp->sd_mark))
5576 *rvalp = 1;
5577 else
5578 *rvalp = 0;
5579 mutex_exit(QLOCK(q));
5580 }
5581 mutex_exit(&stp->sd_lock);
5582 return (0);
5583 }
5584
5585 case I_CANPUT:
5586 {
5587 char band;
5588
5589 if ((arg < 0) || (arg >= NBAND))
5590 return (EINVAL);
5591 band = (char)arg;
5592 *rvalp = bcanputnext(stp->sd_wrq, band);
5593 return (0);
5594 }
5595
5596 case I_SETCLTIME:
5597 {
5598 int closetime;
5599
5600 error = strcopyin((void *)arg, &closetime, sizeof (int),
5601 copyflag);
5602 if (error)
5603 return (error);
5604 if (closetime < 0)
5605 return (EINVAL);
5606
5607 stp->sd_closetime = closetime;
5608 return (0);
5609 }
5610
5611 case I_GETCLTIME:
5612 {
5613 int closetime;
5614
5615 closetime = stp->sd_closetime;
5616 return (strcopyout(&closetime, (void *)arg, sizeof (int),
5617 copyflag));
5618 }
5619
5620 case TIOCGSID:
5621 {
5622 pid_t sid;
5623
5624 mutex_enter(&stp->sd_lock);
5625 if (stp->sd_sidp == NULL) {
5626 mutex_exit(&stp->sd_lock);
5627 return (ENOTTY);
5628 }
5629 sid = stp->sd_sidp->pid_id;
5630 mutex_exit(&stp->sd_lock);
5631 return (strcopyout(&sid, (void *)arg, sizeof (pid_t),
5632 copyflag));
5633 }
5634
5635 case TIOCSPGRP:
5636 {
5637 pid_t pgrp;
5638 proc_t *q;
5639 pid_t sid, fg_pgid, bg_pgid;
5640
5641 if (error = strcopyin((void *)arg, &pgrp, sizeof (pid_t),
5642 copyflag))
5643 return (error);
5644 mutex_enter(&stp->sd_lock);
5645 mutex_enter(&pidlock);
5646 if (stp->sd_sidp != ttoproc(curthread)->p_sessp->s_sidp) {
5647 mutex_exit(&pidlock);
5648 mutex_exit(&stp->sd_lock);
5649 return (ENOTTY);
5650 }
5651 if (pgrp == stp->sd_pgidp->pid_id) {
5652 mutex_exit(&pidlock);
5653 mutex_exit(&stp->sd_lock);
5654 return (0);
5655 }
5656 if (pgrp <= 0 || pgrp >= maxpid) {
5657 mutex_exit(&pidlock);
5658 mutex_exit(&stp->sd_lock);
5659 return (EINVAL);
5660 }
5661 if ((q = pgfind(pgrp)) == NULL ||
5662 q->p_sessp != ttoproc(curthread)->p_sessp) {
5663 mutex_exit(&pidlock);
5664 mutex_exit(&stp->sd_lock);
5665 return (EPERM);
5666 }
5667 sid = stp->sd_sidp->pid_id;
5668 fg_pgid = q->p_pgrp;
5669 bg_pgid = stp->sd_pgidp->pid_id;
5670 CL_SET_PROCESS_GROUP(curthread, sid, bg_pgid, fg_pgid);
5671 PID_RELE(stp->sd_pgidp);
5672 ctty_clear_sighuped();
5673 stp->sd_pgidp = q->p_pgidp;
5674 PID_HOLD(stp->sd_pgidp);
5675 mutex_exit(&pidlock);
5676 mutex_exit(&stp->sd_lock);
5677 return (0);
5678 }
5679
5680 case TIOCGPGRP:
5681 {
5682 pid_t pgrp;
5683
5684 mutex_enter(&stp->sd_lock);
5685 if (stp->sd_sidp == NULL) {
5686 mutex_exit(&stp->sd_lock);
5687 return (ENOTTY);
5688 }
5689 pgrp = stp->sd_pgidp->pid_id;
5690 mutex_exit(&stp->sd_lock);
5691 return (strcopyout(&pgrp, (void *)arg, sizeof (pid_t),
5692 copyflag));
5693 }
5694
5695 case TIOCSCTTY:
5696 {
5697 return (strctty(stp));
5698 }
5699
5700 case TIOCNOTTY:
5701 {
5702 /* freectty() always assumes curproc. */
5703 if (freectty(B_FALSE) != 0)
5704 return (0);
5705 return (ENOTTY);
5706 }
5707
5708 case FIONBIO:
5709 case FIOASYNC:
5710 return (0); /* handled by the upper layer */
5711 }
5712 }
5713
5714 /*
5715 * Custom free routine used for M_PASSFP messages.
5716 */
5717 static void
5718 free_passfp(struct k_strrecvfd *srf)
5719 {
5720 (void) closef(srf->fp);
5721 kmem_free(srf, sizeof (struct k_strrecvfd) + sizeof (frtn_t));
5722 }
5723
5724 /* ARGSUSED */
5725 int
5726 do_sendfp(struct stdata *stp, struct file *fp, struct cred *cr)
5727 {
5728 queue_t *qp, *nextqp;
5729 struct k_strrecvfd *srf;
5730 mblk_t *mp;
5731 frtn_t *frtnp;
5732 size_t bufsize;
5733 queue_t *mate = NULL;
5734 syncq_t *sq = NULL;
5735 int retval = 0;
5736
5737 if (stp->sd_flag & STRHUP)
5738 return (ENXIO);
5739
5740 claimstr(stp->sd_wrq);
5741
5742 /* Fastpath, we have a pipe, and we are already mated, use it. */
5743 if (STRMATED(stp)) {
5744 qp = _RD(stp->sd_mate->sd_wrq);
5745 claimstr(qp);
5746 mate = qp;
5747 } else { /* Not already mated. */
5748
5749 /*
5750 * Walk the stream to the end of this one.
5751 * assumes that the claimstr() will prevent
5752 * plumbing between the stream head and the
5753 * driver from changing
5754 */
5755 qp = stp->sd_wrq;
5756
5757 /*
5758 * Loop until we reach the end of this stream.
5759 * On completion, qp points to the write queue
5760 * at the end of the stream, or the read queue
5761 * at the stream head if this is a fifo.
5762 */
5763 while (((qp = qp->q_next) != NULL) && _SAMESTR(qp))
5764 ;
5765
5766 /*
5767 * Just in case we get a q_next which is NULL, but
5768 * not at the end of the stream. This is actually
5769 * broken, so we set an assert to catch it in
5770 * debug, and set an error and return if not debug.
5771 */
5772 ASSERT(qp);
5773 if (qp == NULL) {
5774 releasestr(stp->sd_wrq);
5775 return (EINVAL);
5776 }
5777
5778 /*
5779 * Enter the syncq for the driver, so (hopefully)
5780 * the queue values will not change on us.
5781 * XXXX - This will only prevent the race IFF only
5782 * the write side modifies the q_next member, and
5783 * the put procedure is protected by at least
5784 * MT_PERQ.
5785 */
5786 if ((sq = qp->q_syncq) != NULL)
5787 entersq(sq, SQ_PUT);
5788
5789 /* Now get the q_next value from this qp. */
5790 nextqp = qp->q_next;
5791
5792 /*
5793 * If nextqp exists and the other stream is different
5794 * from this one claim the stream, set the mate, and
5795 * get the read queue at the stream head of the other
5796 * stream. Assumes that nextqp was at least valid when
5797 * we got it. Hopefully the entersq of the driver
5798 * will prevent it from changing on us.
5799 */
5800 if ((nextqp != NULL) && (STREAM(nextqp) != stp)) {
5801 ASSERT(qp->q_qinfo->qi_srvp);
5802 ASSERT(_OTHERQ(qp)->q_qinfo->qi_srvp);
5803 ASSERT(_OTHERQ(qp->q_next)->q_qinfo->qi_srvp);
5804 claimstr(nextqp);
5805
5806 /* Make sure we still have a q_next */
5807 if (nextqp != qp->q_next) {
5808 releasestr(stp->sd_wrq);
5809 releasestr(nextqp);
5810 return (EINVAL);
5811 }
5812
5813 qp = _RD(STREAM(nextqp)->sd_wrq);
5814 mate = qp;
5815 }
5816 /* If we entered the synq above, leave it. */
5817 if (sq != NULL)
5818 leavesq(sq, SQ_PUT);
5819 } /* STRMATED(STP) */
5820
5821 /* XXX prevents substitution of the ops vector */
5822 if (qp->q_qinfo != &strdata && qp->q_qinfo != &fifo_strdata) {
5823 retval = EINVAL;
5824 goto out;
5825 }
5826
5827 if (qp->q_flag & QFULL) {
5828 retval = EAGAIN;
5829 goto out;
5830 }
5831
5832 /*
5833 * Since M_PASSFP messages include a file descriptor, we use
5834 * esballoc() and specify a custom free routine (free_passfp()) that
5835 * will close the descriptor as part of freeing the message. For
5836 * convenience, we stash the frtn_t right after the data block.
5837 */
5838 bufsize = sizeof (struct k_strrecvfd) + sizeof (frtn_t);
5839 srf = kmem_alloc(bufsize, KM_NOSLEEP);
5840 if (srf == NULL) {
5841 retval = EAGAIN;
5842 goto out;
5843 }
5844
5845 frtnp = (frtn_t *)(srf + 1);
5846 frtnp->free_arg = (caddr_t)srf;
5847 frtnp->free_func = free_passfp;
5848
5849 mp = esballoc((uchar_t *)srf, bufsize, BPRI_MED, frtnp);
5850 if (mp == NULL) {
5851 kmem_free(srf, bufsize);
5852 retval = EAGAIN;
5853 goto out;
5854 }
5855 mp->b_wptr += sizeof (struct k_strrecvfd);
5856 mp->b_datap->db_type = M_PASSFP;
5857
5858 srf->fp = fp;
5859 srf->uid = crgetuid(curthread->t_cred);
5860 srf->gid = crgetgid(curthread->t_cred);
5861 mutex_enter(&fp->f_tlock);
5862 fp->f_count++;
5863 mutex_exit(&fp->f_tlock);
5864
5865 put(qp, mp);
5866 out:
5867 releasestr(stp->sd_wrq);
5868 if (mate)
5869 releasestr(mate);
5870 return (retval);
5871 }
5872
5873 /*
5874 * Send an ioctl message downstream and wait for acknowledgement.
5875 * flags may be set to either U_TO_K or K_TO_K and a combination
5876 * of STR_NOERROR or STR_NOSIG
5877 * STR_NOSIG: Signals are essentially ignored or held and have
5878 * no effect for the duration of the call.
5879 * STR_NOERROR: Ignores stream head read, write and hup errors.
5880 * Additionally, if an existing ioctl times out, it is assumed
5881 * lost and and this ioctl will continue as if the previous ioctl had
5882 * finished. ETIME may be returned if this ioctl times out (i.e.
5883 * ic_timout is not INFTIM). Non-stream head errors may be returned if
5884 * the ioc_error indicates that the driver/module had problems,
5885 * an EFAULT was found when accessing user data, a lack of
5886 * resources, etc.
5887 */
5888 int
5889 strdoioctl(
5890 struct stdata *stp,
5891 struct strioctl *strioc,
5892 int fflags, /* file flags with model info */
5893 int flag,
5894 cred_t *crp,
5895 int *rvalp)
5896 {
5897 mblk_t *bp;
5898 struct iocblk *iocbp;
5899 struct copyreq *reqp;
5900 struct copyresp *resp;
5901 int id;
5902 int transparent = 0;
5903 int error = 0;
5904 int len = 0;
5905 caddr_t taddr;
5906 int copyflag = (flag & (U_TO_K | K_TO_K));
5907 int sigflag = (flag & STR_NOSIG);
5908 int errs;
5909 uint_t waitflags;
5910 boolean_t set_iocwaitne = B_FALSE;
5911
5912 ASSERT(copyflag == U_TO_K || copyflag == K_TO_K);
5913 ASSERT((fflags & FMODELS) != 0);
5914
5915 TRACE_2(TR_FAC_STREAMS_FR,
5916 TR_STRDOIOCTL,
5917 "strdoioctl:stp %p strioc %p", stp, strioc);
5918 if (strioc->ic_len == TRANSPARENT) { /* send arg in M_DATA block */
5919 transparent = 1;
5920 strioc->ic_len = sizeof (intptr_t);
5921 }
5922
5923 if (strioc->ic_len < 0 || (strmsgsz > 0 && strioc->ic_len > strmsgsz))
5924 return (EINVAL);
5925
5926 if ((bp = allocb_cred_wait(sizeof (union ioctypes), sigflag, &error,
5927 crp, curproc->p_pid)) == NULL)
5928 return (error);
5929
5930 bzero(bp->b_wptr, sizeof (union ioctypes));
5931
5932 iocbp = (struct iocblk *)bp->b_wptr;
5933 iocbp->ioc_count = strioc->ic_len;
5934 iocbp->ioc_cmd = strioc->ic_cmd;
5935 iocbp->ioc_flag = (fflags & FMODELS);
5936
5937 crhold(crp);
5938 iocbp->ioc_cr = crp;
5939 DB_TYPE(bp) = M_IOCTL;
5940 bp->b_wptr += sizeof (struct iocblk);
5941
5942 if (flag & STR_NOERROR)
5943 errs = STPLEX;
5944 else
5945 errs = STRHUP|STRDERR|STWRERR|STPLEX;
5946
5947 /*
5948 * If there is data to copy into ioctl block, do so.
5949 */
5950 if (iocbp->ioc_count > 0) {
5951 if (transparent)
5952 /*
5953 * Note: STR_NOERROR does not have an effect
5954 * in putiocd()
5955 */
5956 id = K_TO_K | sigflag;
5957 else
5958 id = flag;
5959 if ((error = putiocd(bp, strioc->ic_dp, id, crp)) != 0) {
5960 freemsg(bp);
5961 crfree(crp);
5962 return (error);
5963 }
5964
5965 /*
5966 * We could have slept copying in user pages.
5967 * Recheck the stream head state (the other end
5968 * of a pipe could have gone away).
5969 */
5970 if (stp->sd_flag & errs) {
5971 mutex_enter(&stp->sd_lock);
5972 error = strgeterr(stp, errs, 0);
5973 mutex_exit(&stp->sd_lock);
5974 if (error != 0) {
5975 freemsg(bp);
5976 crfree(crp);
5977 return (error);
5978 }
5979 }
5980 }
5981 if (transparent)
5982 iocbp->ioc_count = TRANSPARENT;
5983
5984 /*
5985 * Block for up to STRTIMOUT milliseconds if there is an outstanding
5986 * ioctl for this stream already running. All processes
5987 * sleeping here will be awakened as a result of an ACK
5988 * or NAK being received for the outstanding ioctl, or
5989 * as a result of the timer expiring on the outstanding
5990 * ioctl (a failure), or as a result of any waiting
5991 * process's timer expiring (also a failure).
5992 */
5993
5994 error = 0;
5995 mutex_enter(&stp->sd_lock);
5996 while ((stp->sd_flag & IOCWAIT) ||
5997 (!set_iocwaitne && (stp->sd_flag & IOCWAITNE))) {
5998 clock_t cv_rval;
5999
6000 TRACE_0(TR_FAC_STREAMS_FR,
6001 TR_STRDOIOCTL_WAIT,
6002 "strdoioctl sleeps - IOCWAIT");
6003 cv_rval = str_cv_wait(&stp->sd_iocmonitor, &stp->sd_lock,
6004 STRTIMOUT, sigflag);
6005 if (cv_rval <= 0) {
6006 if (cv_rval == 0) {
6007 error = EINTR;
6008 } else {
6009 if (flag & STR_NOERROR) {
6010 /*
6011 * Terminating current ioctl in
6012 * progress -- assume it got lost and
6013 * wake up the other thread so that the
6014 * operation completes.
6015 */
6016 if (!(stp->sd_flag & IOCWAITNE)) {
6017 set_iocwaitne = B_TRUE;
6018 stp->sd_flag |= IOCWAITNE;
6019 cv_broadcast(&stp->sd_monitor);
6020 }
6021 /*
6022 * Otherwise, there's a running
6023 * STR_NOERROR -- we have no choice
6024 * here but to wait forever (or until
6025 * interrupted).
6026 */
6027 } else {
6028 /*
6029 * pending ioctl has caused
6030 * us to time out
6031 */
6032 error = ETIME;
6033 }
6034 }
6035 } else if ((stp->sd_flag & errs)) {
6036 error = strgeterr(stp, errs, 0);
6037 }
6038 if (error) {
6039 mutex_exit(&stp->sd_lock);
6040 freemsg(bp);
6041 crfree(crp);
6042 return (error);
6043 }
6044 }
6045
6046 /*
6047 * Have control of ioctl mechanism.
6048 * Send down ioctl packet and wait for response.
6049 */
6050 if (stp->sd_iocblk != (mblk_t *)-1) {
6051 freemsg(stp->sd_iocblk);
6052 }
6053 stp->sd_iocblk = NULL;
6054
6055 /*
6056 * If this is marked with 'noerror' (internal; mostly
6057 * I_{P,}{UN,}LINK), then make sure nobody else is able to get
6058 * in here by setting IOCWAITNE.
6059 */
6060 waitflags = IOCWAIT;
6061 if (flag & STR_NOERROR)
6062 waitflags |= IOCWAITNE;
6063
6064 stp->sd_flag |= waitflags;
6065
6066 /*
6067 * Assign sequence number.
6068 */
6069 iocbp->ioc_id = stp->sd_iocid = getiocseqno();
6070
6071 mutex_exit(&stp->sd_lock);
6072
6073 TRACE_1(TR_FAC_STREAMS_FR,
6074 TR_STRDOIOCTL_PUT, "strdoioctl put: stp %p", stp);
6075 stream_willservice(stp);
6076 putnext(stp->sd_wrq, bp);
6077 stream_runservice(stp);
6078
6079 /*
6080 * Timed wait for acknowledgment. The wait time is limited by the
6081 * timeout value, which must be a positive integer (number of
6082 * milliseconds) to wait, or 0 (use default value of STRTIMOUT
6083 * milliseconds), or -1 (wait forever). This will be awakened
6084 * either by an ACK/NAK message arriving, the timer expiring, or
6085 * the timer expiring on another ioctl waiting for control of the
6086 * mechanism.
6087 */
6088 waitioc:
6089 mutex_enter(&stp->sd_lock);
6090
6091
6092 /*
6093 * If the reply has already arrived, don't sleep. If awakened from
6094 * the sleep, fail only if the reply has not arrived by then.
6095 * Otherwise, process the reply.
6096 */
6097 while (!stp->sd_iocblk) {
6098 clock_t cv_rval;
6099
6100 if (stp->sd_flag & errs) {
6101 error = strgeterr(stp, errs, 0);
6102 if (error != 0) {
6103 stp->sd_flag &= ~waitflags;
6104 cv_broadcast(&stp->sd_iocmonitor);
6105 mutex_exit(&stp->sd_lock);
6106 crfree(crp);
6107 return (error);
6108 }
6109 }
6110
6111 TRACE_0(TR_FAC_STREAMS_FR,
6112 TR_STRDOIOCTL_WAIT2,
6113 "strdoioctl sleeps awaiting reply");
6114 ASSERT(error == 0);
6115
6116 cv_rval = str_cv_wait(&stp->sd_monitor, &stp->sd_lock,
6117 (strioc->ic_timout ?
6118 strioc->ic_timout * 1000 : STRTIMOUT), sigflag);
6119
6120 /*
6121 * There are four possible cases here: interrupt, timeout,
6122 * wakeup by IOCWAITNE (above), or wakeup by strrput_nondata (a
6123 * valid M_IOCTL reply).
6124 *
6125 * If we've been awakened by a STR_NOERROR ioctl on some other
6126 * thread, then sd_iocblk will still be NULL, and IOCWAITNE
6127 * will be set. Pretend as if we just timed out. Note that
6128 * this other thread waited at least STRTIMOUT before trying to
6129 * awaken our thread, so this is indistinguishable (even for
6130 * INFTIM) from the case where we failed with ETIME waiting on
6131 * IOCWAIT in the prior loop.
6132 */
6133 if (cv_rval > 0 && !(flag & STR_NOERROR) &&
6134 stp->sd_iocblk == NULL && (stp->sd_flag & IOCWAITNE)) {
6135 cv_rval = -1;
6136 }
6137
6138 /*
6139 * note: STR_NOERROR does not protect
6140 * us here.. use ic_timout < 0
6141 */
6142 if (cv_rval <= 0) {
6143 if (cv_rval == 0) {
6144 error = EINTR;
6145 } else {
6146 error = ETIME;
6147 }
6148 /*
6149 * A message could have come in after we were scheduled
6150 * but before we were actually run.
6151 */
6152 bp = stp->sd_iocblk;
6153 stp->sd_iocblk = NULL;
6154 if (bp != NULL) {
6155 if ((bp->b_datap->db_type == M_COPYIN) ||
6156 (bp->b_datap->db_type == M_COPYOUT)) {
6157 mutex_exit(&stp->sd_lock);
6158 if (bp->b_cont) {
6159 freemsg(bp->b_cont);
6160 bp->b_cont = NULL;
6161 }
6162 bp->b_datap->db_type = M_IOCDATA;
6163 bp->b_wptr = bp->b_rptr +
6164 sizeof (struct copyresp);
6165 resp = (struct copyresp *)bp->b_rptr;
6166 resp->cp_rval =
6167 (caddr_t)1; /* failure */
6168 stream_willservice(stp);
6169 putnext(stp->sd_wrq, bp);
6170 stream_runservice(stp);
6171 mutex_enter(&stp->sd_lock);
6172 } else {
6173 freemsg(bp);
6174 }
6175 }
6176 stp->sd_flag &= ~waitflags;
6177 cv_broadcast(&stp->sd_iocmonitor);
6178 mutex_exit(&stp->sd_lock);
6179 crfree(crp);
6180 return (error);
6181 }
6182 }
6183 bp = stp->sd_iocblk;
6184 /*
6185 * Note: it is strictly impossible to get here with sd_iocblk set to
6186 * -1. This is because the initial loop above doesn't allow any new
6187 * ioctls into the fray until all others have passed this point.
6188 */
6189 ASSERT(bp != NULL && bp != (mblk_t *)-1);
6190 TRACE_1(TR_FAC_STREAMS_FR,
6191 TR_STRDOIOCTL_ACK, "strdoioctl got reply: bp %p", bp);
6192 if ((bp->b_datap->db_type == M_IOCACK) ||
6193 (bp->b_datap->db_type == M_IOCNAK)) {
6194 /* for detection of duplicate ioctl replies */
6195 stp->sd_iocblk = (mblk_t *)-1;
6196 stp->sd_flag &= ~waitflags;
6197 cv_broadcast(&stp->sd_iocmonitor);
6198 mutex_exit(&stp->sd_lock);
6199 } else {
6200 /*
6201 * flags not cleared here because we're still doing
6202 * copy in/out for ioctl.
6203 */
6204 stp->sd_iocblk = NULL;
6205 mutex_exit(&stp->sd_lock);
6206 }
6207
6208
6209 /*
6210 * Have received acknowledgment.
6211 */
6212
6213 switch (bp->b_datap->db_type) {
6214 case M_IOCACK:
6215 /*
6216 * Positive ack.
6217 */
6218 iocbp = (struct iocblk *)bp->b_rptr;
6219
6220 /*
6221 * Set error if indicated.
6222 */
6223 if (iocbp->ioc_error) {
6224 error = iocbp->ioc_error;
6225 break;
6226 }
6227
6228 /*
6229 * Set return value.
6230 */
6231 *rvalp = iocbp->ioc_rval;
6232
6233 /*
6234 * Data may have been returned in ACK message (ioc_count > 0).
6235 * If so, copy it out to the user's buffer.
6236 */
6237 if (iocbp->ioc_count && !transparent) {
6238 if (error = getiocd(bp, strioc->ic_dp, copyflag))
6239 break;
6240 }
6241 if (!transparent) {
6242 if (len) /* an M_COPYOUT was used with I_STR */
6243 strioc->ic_len = len;
6244 else
6245 strioc->ic_len = (int)iocbp->ioc_count;
6246 }
6247 break;
6248
6249 case M_IOCNAK:
6250 /*
6251 * Negative ack.
6252 *
6253 * The only thing to do is set error as specified
6254 * in neg ack packet.
6255 */
6256 iocbp = (struct iocblk *)bp->b_rptr;
6257
6258 error = (iocbp->ioc_error ? iocbp->ioc_error : EINVAL);
6259 break;
6260
6261 case M_COPYIN:
6262 /*
6263 * Driver or module has requested user ioctl data.
6264 */
6265 reqp = (struct copyreq *)bp->b_rptr;
6266
6267 /*
6268 * M_COPYIN should *never* have a message attached, though
6269 * it's harmless if it does -- thus, panic on a DEBUG
6270 * kernel and just free it on a non-DEBUG build.
6271 */
6272 ASSERT(bp->b_cont == NULL);
6273 if (bp->b_cont != NULL) {
6274 freemsg(bp->b_cont);
6275 bp->b_cont = NULL;
6276 }
6277
6278 error = putiocd(bp, reqp->cq_addr, flag, crp);
6279 if (error && bp->b_cont) {
6280 freemsg(bp->b_cont);
6281 bp->b_cont = NULL;
6282 }
6283
6284 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp);
6285 bp->b_datap->db_type = M_IOCDATA;
6286
6287 mblk_setcred(bp, crp, curproc->p_pid);
6288 resp = (struct copyresp *)bp->b_rptr;
6289 resp->cp_rval = (caddr_t)(uintptr_t)error;
6290 resp->cp_flag = (fflags & FMODELS);
6291
6292 stream_willservice(stp);
6293 putnext(stp->sd_wrq, bp);
6294 stream_runservice(stp);
6295
6296 if (error) {
6297 mutex_enter(&stp->sd_lock);
6298 stp->sd_flag &= ~waitflags;
6299 cv_broadcast(&stp->sd_iocmonitor);
6300 mutex_exit(&stp->sd_lock);
6301 crfree(crp);
6302 return (error);
6303 }
6304
6305 goto waitioc;
6306
6307 case M_COPYOUT:
6308 /*
6309 * Driver or module has ioctl data for a user.
6310 */
6311 reqp = (struct copyreq *)bp->b_rptr;
6312 ASSERT(bp->b_cont != NULL);
6313
6314 /*
6315 * Always (transparent or non-transparent )
6316 * use the address specified in the request
6317 */
6318 taddr = reqp->cq_addr;
6319 if (!transparent)
6320 len = (int)reqp->cq_size;
6321
6322 /* copyout data to the provided address */
6323 error = getiocd(bp, taddr, copyflag);
6324
6325 freemsg(bp->b_cont);
6326 bp->b_cont = NULL;
6327
6328 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp);
6329 bp->b_datap->db_type = M_IOCDATA;
6330
6331 mblk_setcred(bp, crp, curproc->p_pid);
6332 resp = (struct copyresp *)bp->b_rptr;
6333 resp->cp_rval = (caddr_t)(uintptr_t)error;
6334 resp->cp_flag = (fflags & FMODELS);
6335
6336 stream_willservice(stp);
6337 putnext(stp->sd_wrq, bp);
6338 stream_runservice(stp);
6339
6340 if (error) {
6341 mutex_enter(&stp->sd_lock);
6342 stp->sd_flag &= ~waitflags;
6343 cv_broadcast(&stp->sd_iocmonitor);
6344 mutex_exit(&stp->sd_lock);
6345 crfree(crp);
6346 return (error);
6347 }
6348 goto waitioc;
6349
6350 default:
6351 ASSERT(0);
6352 mutex_enter(&stp->sd_lock);
6353 stp->sd_flag &= ~waitflags;
6354 cv_broadcast(&stp->sd_iocmonitor);
6355 mutex_exit(&stp->sd_lock);
6356 break;
6357 }
6358
6359 freemsg(bp);
6360 crfree(crp);
6361 return (error);
6362 }
6363
6364 /*
6365 * Send an M_CMD message downstream and wait for a reply. This is a ptools
6366 * special used to retrieve information from modules/drivers a stream without
6367 * being subjected to flow control or interfering with pending messages on the
6368 * stream (e.g. an ioctl in flight).
6369 */
6370 int
6371 strdocmd(struct stdata *stp, struct strcmd *scp, cred_t *crp)
6372 {
6373 mblk_t *mp;
6374 struct cmdblk *cmdp;
6375 int error = 0;
6376 int errs = STRHUP|STRDERR|STWRERR|STPLEX;
6377 clock_t rval, timeout = STRTIMOUT;
6378
6379 if (scp->sc_len < 0 || scp->sc_len > sizeof (scp->sc_buf) ||
6380 scp->sc_timeout < -1)
6381 return (EINVAL);
6382
6383 if (scp->sc_timeout > 0)
6384 timeout = scp->sc_timeout * MILLISEC;
6385
6386 if ((mp = allocb_cred(sizeof (struct cmdblk), crp,
6387 curproc->p_pid)) == NULL)
6388 return (ENOMEM);
6389
6390 crhold(crp);
6391
6392 cmdp = (struct cmdblk *)mp->b_wptr;
6393 cmdp->cb_cr = crp;
6394 cmdp->cb_cmd = scp->sc_cmd;
6395 cmdp->cb_len = scp->sc_len;
6396 cmdp->cb_error = 0;
6397 mp->b_wptr += sizeof (struct cmdblk);
6398
6399 DB_TYPE(mp) = M_CMD;
6400 DB_CPID(mp) = curproc->p_pid;
6401
6402 /*
6403 * Copy in the payload.
6404 */
6405 if (cmdp->cb_len > 0) {
6406 mp->b_cont = allocb_cred(sizeof (scp->sc_buf), crp,
6407 curproc->p_pid);
6408 if (mp->b_cont == NULL) {
6409 error = ENOMEM;
6410 goto out;
6411 }
6412
6413 /* cb_len comes from sc_len, which has already been checked */
6414 ASSERT(cmdp->cb_len <= sizeof (scp->sc_buf));
6415 (void) bcopy(scp->sc_buf, mp->b_cont->b_wptr, cmdp->cb_len);
6416 mp->b_cont->b_wptr += cmdp->cb_len;
6417 DB_CPID(mp->b_cont) = curproc->p_pid;
6418 }
6419
6420 /*
6421 * Since this mechanism is strictly for ptools, and since only one
6422 * process can be grabbed at a time, we simply fail if there's
6423 * currently an operation pending.
6424 */
6425 mutex_enter(&stp->sd_lock);
6426 if (stp->sd_flag & STRCMDWAIT) {
6427 mutex_exit(&stp->sd_lock);
6428 error = EBUSY;
6429 goto out;
6430 }
6431 stp->sd_flag |= STRCMDWAIT;
6432 ASSERT(stp->sd_cmdblk == NULL);
6433 mutex_exit(&stp->sd_lock);
6434
6435 putnext(stp->sd_wrq, mp);
6436 mp = NULL;
6437
6438 /*
6439 * Timed wait for acknowledgment. If the reply has already arrived,
6440 * don't sleep. If awakened from the sleep, fail only if the reply
6441 * has not arrived by then. Otherwise, process the reply.
6442 */
6443 mutex_enter(&stp->sd_lock);
6444 while (stp->sd_cmdblk == NULL) {
6445 if (stp->sd_flag & errs) {
6446 if ((error = strgeterr(stp, errs, 0)) != 0)
6447 goto waitout;
6448 }
6449
6450 rval = str_cv_wait(&stp->sd_monitor, &stp->sd_lock, timeout, 0);
6451 if (stp->sd_cmdblk != NULL)
6452 break;
6453
6454 if (rval <= 0) {
6455 error = (rval == 0) ? EINTR : ETIME;
6456 goto waitout;
6457 }
6458 }
6459
6460 /*
6461 * We received a reply.
6462 */
6463 mp = stp->sd_cmdblk;
6464 stp->sd_cmdblk = NULL;
6465 ASSERT(mp != NULL && DB_TYPE(mp) == M_CMD);
6466 ASSERT(stp->sd_flag & STRCMDWAIT);
6467 stp->sd_flag &= ~STRCMDWAIT;
6468 mutex_exit(&stp->sd_lock);
6469
6470 cmdp = (struct cmdblk *)mp->b_rptr;
6471 if ((error = cmdp->cb_error) != 0)
6472 goto out;
6473
6474 /*
6475 * Data may have been returned in the reply (cb_len > 0).
6476 * If so, copy it out to the user's buffer.
6477 */
6478 if (cmdp->cb_len > 0) {
6479 if (mp->b_cont == NULL || MBLKL(mp->b_cont) < cmdp->cb_len) {
6480 error = EPROTO;
6481 goto out;
6482 }
6483
6484 cmdp->cb_len = MIN(cmdp->cb_len, sizeof (scp->sc_buf));
6485 (void) bcopy(mp->b_cont->b_rptr, scp->sc_buf, cmdp->cb_len);
6486 }
6487 scp->sc_len = cmdp->cb_len;
6488 out:
6489 freemsg(mp);
6490 crfree(crp);
6491 return (error);
6492 waitout:
6493 ASSERT(stp->sd_cmdblk == NULL);
6494 stp->sd_flag &= ~STRCMDWAIT;
6495 mutex_exit(&stp->sd_lock);
6496 crfree(crp);
6497 return (error);
6498 }
6499
6500 /*
6501 * For the SunOS keyboard driver.
6502 * Return the next available "ioctl" sequence number.
6503 * Exported, so that streams modules can send "ioctl" messages
6504 * downstream from their open routine.
6505 */
6506 int
6507 getiocseqno(void)
6508 {
6509 int i;
6510
6511 mutex_enter(&strresources);
6512 i = ++ioc_id;
6513 mutex_exit(&strresources);
6514 return (i);
6515 }
6516
6517 /*
6518 * Get the next message from the read queue. If the message is
6519 * priority, STRPRI will have been set by strrput(). This flag
6520 * should be reset only when the entire message at the front of the
6521 * queue as been consumed.
6522 *
6523 * NOTE: strgetmsg and kstrgetmsg have much of the logic in common.
6524 */
6525 int
6526 strgetmsg(
6527 struct vnode *vp,
6528 struct strbuf *mctl,
6529 struct strbuf *mdata,
6530 unsigned char *prip,
6531 int *flagsp,
6532 int fmode,
6533 rval_t *rvp)
6534 {
6535 struct stdata *stp;
6536 mblk_t *bp, *nbp;
6537 mblk_t *savemp = NULL;
6538 mblk_t *savemptail = NULL;
6539 uint_t old_sd_flag;
6540 int flg;
6541 int more = 0;
6542 int error = 0;
6543 char first = 1;
6544 uint_t mark; /* Contains MSG*MARK and _LASTMARK */
6545 #define _LASTMARK 0x8000 /* Distinct from MSG*MARK */
6546 unsigned char pri = 0;
6547 queue_t *q;
6548 int pr = 0; /* Partial read successful */
6549 struct uio uios;
6550 struct uio *uiop = &uios;
6551 struct iovec iovs;
6552 unsigned char type;
6553
6554 TRACE_1(TR_FAC_STREAMS_FR, TR_STRGETMSG_ENTER,
6555 "strgetmsg:%p", vp);
6556
6557 ASSERT(vp->v_stream);
6558 stp = vp->v_stream;
6559 rvp->r_val1 = 0;
6560
6561 mutex_enter(&stp->sd_lock);
6562
6563 if ((error = i_straccess(stp, JCREAD)) != 0) {
6564 mutex_exit(&stp->sd_lock);
6565 return (error);
6566 }
6567
6568 if (stp->sd_flag & (STRDERR|STPLEX)) {
6569 error = strgeterr(stp, STRDERR|STPLEX, 0);
6570 if (error != 0) {
6571 mutex_exit(&stp->sd_lock);
6572 return (error);
6573 }
6574 }
6575 mutex_exit(&stp->sd_lock);
6576
6577 switch (*flagsp) {
6578 case MSG_HIPRI:
6579 if (*prip != 0)
6580 return (EINVAL);
6581 break;
6582
6583 case MSG_ANY:
6584 case MSG_BAND:
6585 break;
6586
6587 default:
6588 return (EINVAL);
6589 }
6590 /*
6591 * Setup uio and iov for data part
6592 */
6593 iovs.iov_base = mdata->buf;
6594 iovs.iov_len = mdata->maxlen;
6595 uios.uio_iov = &iovs;
6596 uios.uio_iovcnt = 1;
6597 uios.uio_loffset = 0;
6598 uios.uio_segflg = UIO_USERSPACE;
6599 uios.uio_fmode = 0;
6600 uios.uio_extflg = UIO_COPY_CACHED;
6601 uios.uio_resid = mdata->maxlen;
6602 uios.uio_offset = 0;
6603
6604 q = _RD(stp->sd_wrq);
6605 mutex_enter(&stp->sd_lock);
6606 old_sd_flag = stp->sd_flag;
6607 mark = 0;
6608 for (;;) {
6609 int done = 0;
6610 mblk_t *q_first = q->q_first;
6611
6612 /*
6613 * Get the next message of appropriate priority
6614 * from the stream head. If the caller is interested
6615 * in band or hipri messages, then they should already
6616 * be enqueued at the stream head. On the other hand
6617 * if the caller wants normal (band 0) messages, they
6618 * might be deferred in a synchronous stream and they
6619 * will need to be pulled up.
6620 *
6621 * After we have dequeued a message, we might find that
6622 * it was a deferred M_SIG that was enqueued at the
6623 * stream head. It must now be posted as part of the
6624 * read by calling strsignal_nolock().
6625 *
6626 * Also note that strrput does not enqueue an M_PCSIG,
6627 * and there cannot be more than one hipri message,
6628 * so there was no need to have the M_PCSIG case.
6629 *
6630 * At some time it might be nice to try and wrap the
6631 * functionality of kstrgetmsg() and strgetmsg() into
6632 * a common routine so to reduce the amount of replicated
6633 * code (since they are extremely similar).
6634 */
6635 if (!(*flagsp & (MSG_HIPRI|MSG_BAND))) {
6636 /* Asking for normal, band0 data */
6637 bp = strget(stp, q, uiop, first, &error);
6638 ASSERT(MUTEX_HELD(&stp->sd_lock));
6639 if (bp != NULL) {
6640 if (DB_TYPE(bp) == M_SIG) {
6641 strsignal_nolock(stp, *bp->b_rptr,
6642 bp->b_band);
6643 freemsg(bp);
6644 continue;
6645 } else {
6646 break;
6647 }
6648 }
6649 if (error != 0)
6650 goto getmout;
6651
6652 /*
6653 * We can't depend on the value of STRPRI here because
6654 * the stream head may be in transit. Therefore, we
6655 * must look at the type of the first message to
6656 * determine if a high priority messages is waiting
6657 */
6658 } else if ((*flagsp & MSG_HIPRI) && q_first != NULL &&
6659 DB_TYPE(q_first) >= QPCTL &&
6660 (bp = getq_noenab(q, 0)) != NULL) {
6661 /* Asked for HIPRI and got one */
6662 ASSERT(DB_TYPE(bp) >= QPCTL);
6663 break;
6664 } else if ((*flagsp & MSG_BAND) && q_first != NULL &&
6665 ((q_first->b_band >= *prip) || DB_TYPE(q_first) >= QPCTL) &&
6666 (bp = getq_noenab(q, 0)) != NULL) {
6667 /*
6668 * Asked for at least band "prip" and got either at
6669 * least that band or a hipri message.
6670 */
6671 ASSERT(bp->b_band >= *prip || DB_TYPE(bp) >= QPCTL);
6672 if (DB_TYPE(bp) == M_SIG) {
6673 strsignal_nolock(stp, *bp->b_rptr, bp->b_band);
6674 freemsg(bp);
6675 continue;
6676 } else {
6677 break;
6678 }
6679 }
6680
6681 /* No data. Time to sleep? */
6682 qbackenable(q, 0);
6683
6684 /*
6685 * If STRHUP or STREOF, return 0 length control and data.
6686 * If resid is 0, then a read(fd,buf,0) was done. Do not
6687 * sleep to satisfy this request because by default we have
6688 * zero bytes to return.
6689 */
6690 if ((stp->sd_flag & (STRHUP|STREOF)) || (mctl->maxlen == 0 &&
6691 mdata->maxlen == 0)) {
6692 mctl->len = mdata->len = 0;
6693 *flagsp = 0;
6694 mutex_exit(&stp->sd_lock);
6695 return (0);
6696 }
6697 TRACE_2(TR_FAC_STREAMS_FR, TR_STRGETMSG_WAIT,
6698 "strgetmsg calls strwaitq:%p, %p",
6699 vp, uiop);
6700 if (((error = strwaitq(stp, GETWAIT, (ssize_t)0, fmode, -1,
6701 &done)) != 0) || done) {
6702 TRACE_2(TR_FAC_STREAMS_FR, TR_STRGETMSG_DONE,
6703 "strgetmsg error or done:%p, %p",
6704 vp, uiop);
6705 mutex_exit(&stp->sd_lock);
6706 return (error);
6707 }
6708 TRACE_2(TR_FAC_STREAMS_FR, TR_STRGETMSG_AWAKE,
6709 "strgetmsg awakes:%p, %p", vp, uiop);
6710 if ((error = i_straccess(stp, JCREAD)) != 0) {
6711 mutex_exit(&stp->sd_lock);
6712 return (error);
6713 }
6714 first = 0;
6715 }
6716 ASSERT(bp != NULL);
6717 /*
6718 * Extract any mark information. If the message is not completely
6719 * consumed this information will be put in the mblk
6720 * that is putback.
6721 * If MSGMARKNEXT is set and the message is completely consumed
6722 * the STRATMARK flag will be set below. Likewise, if
6723 * MSGNOTMARKNEXT is set and the message is
6724 * completely consumed STRNOTATMARK will be set.
6725 */
6726 mark = bp->b_flag & (MSGMARK | MSGMARKNEXT | MSGNOTMARKNEXT);
6727 ASSERT((mark & (MSGMARKNEXT|MSGNOTMARKNEXT)) !=
6728 (MSGMARKNEXT|MSGNOTMARKNEXT));
6729 if (mark != 0 && bp == stp->sd_mark) {
6730 mark |= _LASTMARK;
6731 stp->sd_mark = NULL;
6732 }
6733 /*
6734 * keep track of the original message type and priority
6735 */
6736 pri = bp->b_band;
6737 type = bp->b_datap->db_type;
6738 if (type == M_PASSFP) {
6739 if ((mark & _LASTMARK) && (stp->sd_mark == NULL))
6740 stp->sd_mark = bp;
6741 bp->b_flag |= mark & ~_LASTMARK;
6742 putback(stp, q, bp, pri);
6743 qbackenable(q, pri);
6744 mutex_exit(&stp->sd_lock);
6745 return (EBADMSG);
6746 }
6747 ASSERT(type != M_SIG);
6748
6749 /*
6750 * Set this flag so strrput will not generate signals. Need to
6751 * make sure this flag is cleared before leaving this routine
6752 * else signals will stop being sent.
6753 */
6754 stp->sd_flag |= STRGETINPROG;
6755 mutex_exit(&stp->sd_lock);
6756
6757 if (STREAM_NEEDSERVICE(stp))
6758 stream_runservice(stp);
6759
6760 /*
6761 * Set HIPRI flag if message is priority.
6762 */
6763 if (type >= QPCTL)
6764 flg = MSG_HIPRI;
6765 else
6766 flg = MSG_BAND;
6767
6768 /*
6769 * First process PROTO or PCPROTO blocks, if any.
6770 */
6771 if (mctl->maxlen >= 0 && type != M_DATA) {
6772 size_t n, bcnt;
6773 char *ubuf;
6774
6775 bcnt = mctl->maxlen;
6776 ubuf = mctl->buf;
6777 while (bp != NULL && bp->b_datap->db_type != M_DATA) {
6778 if ((n = MIN(bcnt, bp->b_wptr - bp->b_rptr)) != 0 &&
6779 copyout(bp->b_rptr, ubuf, n)) {
6780 error = EFAULT;
6781 mutex_enter(&stp->sd_lock);
6782 /*
6783 * clear stream head pri flag based on
6784 * first message type
6785 */
6786 if (type >= QPCTL) {
6787 ASSERT(type == M_PCPROTO);
6788 stp->sd_flag &= ~STRPRI;
6789 }
6790 more = 0;
6791 freemsg(bp);
6792 goto getmout;
6793 }
6794 ubuf += n;
6795 bp->b_rptr += n;
6796 if (bp->b_rptr >= bp->b_wptr) {
6797 nbp = bp;
6798 bp = bp->b_cont;
6799 freeb(nbp);
6800 }
6801 ASSERT(n <= bcnt);
6802 bcnt -= n;
6803 if (bcnt == 0)
6804 break;
6805 }
6806 mctl->len = mctl->maxlen - bcnt;
6807 } else
6808 mctl->len = -1;
6809
6810 if (bp && bp->b_datap->db_type != M_DATA) {
6811 /*
6812 * More PROTO blocks in msg.
6813 */
6814 more |= MORECTL;
6815 savemp = bp;
6816 while (bp && bp->b_datap->db_type != M_DATA) {
6817 savemptail = bp;
6818 bp = bp->b_cont;
6819 }
6820 savemptail->b_cont = NULL;
6821 }
6822
6823 /*
6824 * Now process DATA blocks, if any.
6825 */
6826 if (mdata->maxlen >= 0 && bp) {
6827 /*
6828 * struiocopyout will consume a potential zero-length
6829 * M_DATA even if uio_resid is zero.
6830 */
6831 size_t oldresid = uiop->uio_resid;
6832
6833 bp = struiocopyout(bp, uiop, &error);
6834 if (error != 0) {
6835 mutex_enter(&stp->sd_lock);
6836 /*
6837 * clear stream head hi pri flag based on
6838 * first message
6839 */
6840 if (type >= QPCTL) {
6841 ASSERT(type == M_PCPROTO);
6842 stp->sd_flag &= ~STRPRI;
6843 }
6844 more = 0;
6845 freemsg(savemp);
6846 goto getmout;
6847 }
6848 /*
6849 * (pr == 1) indicates a partial read.
6850 */
6851 if (oldresid > uiop->uio_resid)
6852 pr = 1;
6853 mdata->len = mdata->maxlen - uiop->uio_resid;
6854 } else
6855 mdata->len = -1;
6856
6857 if (bp) { /* more data blocks in msg */
6858 more |= MOREDATA;
6859 if (savemp)
6860 savemptail->b_cont = bp;
6861 else
6862 savemp = bp;
6863 }
6864
6865 mutex_enter(&stp->sd_lock);
6866 if (savemp) {
6867 if (pr && (savemp->b_datap->db_type == M_DATA) &&
6868 msgnodata(savemp)) {
6869 /*
6870 * Avoid queuing a zero-length tail part of
6871 * a message. pr=1 indicates that we read some of
6872 * the message.
6873 */
6874 freemsg(savemp);
6875 more &= ~MOREDATA;
6876 /*
6877 * clear stream head hi pri flag based on
6878 * first message
6879 */
6880 if (type >= QPCTL) {
6881 ASSERT(type == M_PCPROTO);
6882 stp->sd_flag &= ~STRPRI;
6883 }
6884 } else {
6885 savemp->b_band = pri;
6886 /*
6887 * If the first message was HIPRI and the one we're
6888 * putting back isn't, then clear STRPRI, otherwise
6889 * set STRPRI again. Note that we must set STRPRI
6890 * again since the flush logic in strrput_nondata()
6891 * may have cleared it while we had sd_lock dropped.
6892 */
6893 if (type >= QPCTL) {
6894 ASSERT(type == M_PCPROTO);
6895 if (queclass(savemp) < QPCTL)
6896 stp->sd_flag &= ~STRPRI;
6897 else
6898 stp->sd_flag |= STRPRI;
6899 } else if (queclass(savemp) >= QPCTL) {
6900 /*
6901 * The first message was not a HIPRI message,
6902 * but the one we are about to putback is.
6903 * For simplicitly, we do not allow for HIPRI
6904 * messages to be embedded in the message
6905 * body, so just force it to same type as
6906 * first message.
6907 */
6908 ASSERT(type == M_DATA || type == M_PROTO);
6909 ASSERT(savemp->b_datap->db_type == M_PCPROTO);
6910 savemp->b_datap->db_type = type;
6911 }
6912 if (mark != 0) {
6913 savemp->b_flag |= mark & ~_LASTMARK;
6914 if ((mark & _LASTMARK) &&
6915 (stp->sd_mark == NULL)) {
6916 /*
6917 * If another marked message arrived
6918 * while sd_lock was not held sd_mark
6919 * would be non-NULL.
6920 */
6921 stp->sd_mark = savemp;
6922 }
6923 }
6924 putback(stp, q, savemp, pri);
6925 }
6926 } else {
6927 /*
6928 * The complete message was consumed.
6929 *
6930 * If another M_PCPROTO arrived while sd_lock was not held
6931 * it would have been discarded since STRPRI was still set.
6932 *
6933 * Move the MSG*MARKNEXT information
6934 * to the stream head just in case
6935 * the read queue becomes empty.
6936 * clear stream head hi pri flag based on
6937 * first message
6938 *
6939 * If the stream head was at the mark
6940 * (STRATMARK) before we dropped sd_lock above
6941 * and some data was consumed then we have
6942 * moved past the mark thus STRATMARK is
6943 * cleared. However, if a message arrived in
6944 * strrput during the copyout above causing
6945 * STRATMARK to be set we can not clear that
6946 * flag.
6947 */
6948 if (type >= QPCTL) {
6949 ASSERT(type == M_PCPROTO);
6950 stp->sd_flag &= ~STRPRI;
6951 }
6952 if (mark & (MSGMARKNEXT|MSGNOTMARKNEXT|MSGMARK)) {
6953 if (mark & MSGMARKNEXT) {
6954 stp->sd_flag &= ~STRNOTATMARK;
6955 stp->sd_flag |= STRATMARK;
6956 } else if (mark & MSGNOTMARKNEXT) {
6957 stp->sd_flag &= ~STRATMARK;
6958 stp->sd_flag |= STRNOTATMARK;
6959 } else {
6960 stp->sd_flag &= ~(STRATMARK|STRNOTATMARK);
6961 }
6962 } else if (pr && (old_sd_flag & STRATMARK)) {
6963 stp->sd_flag &= ~STRATMARK;
6964 }
6965 }
6966
6967 *flagsp = flg;
6968 *prip = pri;
6969
6970 /*
6971 * Getmsg cleanup processing - if the state of the queue has changed
6972 * some signals may need to be sent and/or poll awakened.
6973 */
6974 getmout:
6975 qbackenable(q, pri);
6976
6977 /*
6978 * We dropped the stream head lock above. Send all M_SIG messages
6979 * before processing stream head for SIGPOLL messages.
6980 */
6981 ASSERT(MUTEX_HELD(&stp->sd_lock));
6982 while ((bp = q->q_first) != NULL &&
6983 (bp->b_datap->db_type == M_SIG)) {
6984 /*
6985 * sd_lock is held so the content of the read queue can not
6986 * change.
6987 */
6988 bp = getq(q);
6989 ASSERT(bp != NULL && bp->b_datap->db_type == M_SIG);
6990
6991 strsignal_nolock(stp, *bp->b_rptr, bp->b_band);
6992 mutex_exit(&stp->sd_lock);
6993 freemsg(bp);
6994 if (STREAM_NEEDSERVICE(stp))
6995 stream_runservice(stp);
6996 mutex_enter(&stp->sd_lock);
6997 }
6998
6999 /*
7000 * stream head cannot change while we make the determination
7001 * whether or not to send a signal. Drop the flag to allow strrput
7002 * to send firstmsgsigs again.
7003 */
7004 stp->sd_flag &= ~STRGETINPROG;
7005
7006 /*
7007 * If the type of message at the front of the queue changed
7008 * due to the receive the appropriate signals and pollwakeup events
7009 * are generated. The type of changes are:
7010 * Processed a hipri message, q_first is not hipri.
7011 * Processed a band X message, and q_first is band Y.
7012 * The generated signals and pollwakeups are identical to what
7013 * strrput() generates should the message that is now on q_first
7014 * arrive to an empty read queue.
7015 *
7016 * Note: only strrput will send a signal for a hipri message.
7017 */
7018 if ((bp = q->q_first) != NULL && !(stp->sd_flag & STRPRI)) {
7019 strsigset_t signals = 0;
7020 strpollset_t pollwakeups = 0;
7021
7022 if (flg & MSG_HIPRI) {
7023 /*
7024 * Removed a hipri message. Regular data at
7025 * the front of the queue.
7026 */
7027 if (bp->b_band == 0) {
7028 signals = S_INPUT | S_RDNORM;
7029 pollwakeups = POLLIN | POLLRDNORM;
7030 } else {
7031 signals = S_INPUT | S_RDBAND;
7032 pollwakeups = POLLIN | POLLRDBAND;
7033 }
7034 } else if (pri != bp->b_band) {
7035 /*
7036 * The band is different for the new q_first.
7037 */
7038 if (bp->b_band == 0) {
7039 signals = S_RDNORM;
7040 pollwakeups = POLLIN | POLLRDNORM;
7041 } else {
7042 signals = S_RDBAND;
7043 pollwakeups = POLLIN | POLLRDBAND;
7044 }
7045 }
7046
7047 if (pollwakeups != 0) {
7048 if (pollwakeups == (POLLIN | POLLRDNORM)) {
7049 if (!(stp->sd_rput_opt & SR_POLLIN))
7050 goto no_pollwake;
7051 stp->sd_rput_opt &= ~SR_POLLIN;
7052 }
7053 mutex_exit(&stp->sd_lock);
7054 pollwakeup(&stp->sd_pollist, pollwakeups);
7055 mutex_enter(&stp->sd_lock);
7056 }
7057 no_pollwake:
7058
7059 if (stp->sd_sigflags & signals)
7060 strsendsig(stp->sd_siglist, signals, bp->b_band, 0);
7061 }
7062 mutex_exit(&stp->sd_lock);
7063
7064 rvp->r_val1 = more;
7065 return (error);
7066 #undef _LASTMARK
7067 }
7068
7069 /*
7070 * Get the next message from the read queue. If the message is
7071 * priority, STRPRI will have been set by strrput(). This flag
7072 * should be reset only when the entire message at the front of the
7073 * queue as been consumed.
7074 *
7075 * If uiop is NULL all data is returned in mctlp.
7076 * Note that a NULL uiop implies that FNDELAY and FNONBLOCK are assumed
7077 * not enabled.
7078 * The timeout parameter is in milliseconds; -1 for infinity.
7079 * This routine handles the consolidation private flags:
7080 * MSG_IGNERROR Ignore any stream head error except STPLEX.
7081 * MSG_DELAYERROR Defer the error check until the queue is empty.
7082 * MSG_HOLDSIG Hold signals while waiting for data.
7083 * MSG_IPEEK Only peek at messages.
7084 * MSG_DISCARDTAIL Discard the tail M_DATA part of the message
7085 * that doesn't fit.
7086 * MSG_NOMARK If the message is marked leave it on the queue.
7087 *
7088 * NOTE: strgetmsg and kstrgetmsg have much of the logic in common.
7089 */
7090 int
7091 kstrgetmsg(
7092 struct vnode *vp,
7093 mblk_t **mctlp,
7094 struct uio *uiop,
7095 unsigned char *prip,
7096 int *flagsp,
7097 clock_t timout,
7098 rval_t *rvp)
7099 {
7100 struct stdata *stp;
7101 mblk_t *bp, *nbp;
7102 mblk_t *savemp = NULL;
7103 mblk_t *savemptail = NULL;
7104 int flags;
7105 uint_t old_sd_flag;
7106 int flg;
7107 int more = 0;
7108 int error = 0;
7109 char first = 1;
7110 uint_t mark; /* Contains MSG*MARK and _LASTMARK */
7111 #define _LASTMARK 0x8000 /* Distinct from MSG*MARK */
7112 unsigned char pri = 0;
7113 queue_t *q;
7114 int pr = 0; /* Partial read successful */
7115 unsigned char type;
7116
7117 TRACE_1(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_ENTER,
7118 "kstrgetmsg:%p", vp);
7119
7120 ASSERT(vp->v_stream);
7121 stp = vp->v_stream;
7122 rvp->r_val1 = 0;
7123
7124 mutex_enter(&stp->sd_lock);
7125
7126 if ((error = i_straccess(stp, JCREAD)) != 0) {
7127 mutex_exit(&stp->sd_lock);
7128 return (error);
7129 }
7130
7131 flags = *flagsp;
7132 if (stp->sd_flag & (STRDERR|STPLEX)) {
7133 if ((stp->sd_flag & STPLEX) ||
7134 (flags & (MSG_IGNERROR|MSG_DELAYERROR)) == 0) {
7135 error = strgeterr(stp, STRDERR|STPLEX,
7136 (flags & MSG_IPEEK));
7137 if (error != 0) {
7138 mutex_exit(&stp->sd_lock);
7139 return (error);
7140 }
7141 }
7142 }
7143 mutex_exit(&stp->sd_lock);
7144
7145 switch (flags & (MSG_HIPRI|MSG_ANY|MSG_BAND)) {
7146 case MSG_HIPRI:
7147 if (*prip != 0)
7148 return (EINVAL);
7149 break;
7150
7151 case MSG_ANY:
7152 case MSG_BAND:
7153 break;
7154
7155 default:
7156 return (EINVAL);
7157 }
7158
7159 retry:
7160 q = _RD(stp->sd_wrq);
7161 mutex_enter(&stp->sd_lock);
7162 old_sd_flag = stp->sd_flag;
7163 mark = 0;
7164 for (;;) {
7165 int done = 0;
7166 int waitflag;
7167 int fmode;
7168 mblk_t *q_first = q->q_first;
7169
7170 /*
7171 * This section of the code operates just like the code
7172 * in strgetmsg(). There is a comment there about what
7173 * is going on here.
7174 */
7175 if (!(flags & (MSG_HIPRI|MSG_BAND))) {
7176 /* Asking for normal, band0 data */
7177 bp = strget(stp, q, uiop, first, &error);
7178 ASSERT(MUTEX_HELD(&stp->sd_lock));
7179 if (bp != NULL) {
7180 if (DB_TYPE(bp) == M_SIG) {
7181 strsignal_nolock(stp, *bp->b_rptr,
7182 bp->b_band);
7183 freemsg(bp);
7184 continue;
7185 } else {
7186 break;
7187 }
7188 }
7189 if (error != 0) {
7190 goto getmout;
7191 }
7192 /*
7193 * We can't depend on the value of STRPRI here because
7194 * the stream head may be in transit. Therefore, we
7195 * must look at the type of the first message to
7196 * determine if a high priority messages is waiting
7197 */
7198 } else if ((flags & MSG_HIPRI) && q_first != NULL &&
7199 DB_TYPE(q_first) >= QPCTL &&
7200 (bp = getq_noenab(q, 0)) != NULL) {
7201 ASSERT(DB_TYPE(bp) >= QPCTL);
7202 break;
7203 } else if ((flags & MSG_BAND) && q_first != NULL &&
7204 ((q_first->b_band >= *prip) || DB_TYPE(q_first) >= QPCTL) &&
7205 (bp = getq_noenab(q, 0)) != NULL) {
7206 /*
7207 * Asked for at least band "prip" and got either at
7208 * least that band or a hipri message.
7209 */
7210 ASSERT(bp->b_band >= *prip || DB_TYPE(bp) >= QPCTL);
7211 if (DB_TYPE(bp) == M_SIG) {
7212 strsignal_nolock(stp, *bp->b_rptr, bp->b_band);
7213 freemsg(bp);
7214 continue;
7215 } else {
7216 break;
7217 }
7218 }
7219
7220 /* No data. Time to sleep? */
7221 qbackenable(q, 0);
7222
7223 /*
7224 * Delayed error notification?
7225 */
7226 if ((stp->sd_flag & (STRDERR|STPLEX)) &&
7227 (flags & (MSG_IGNERROR|MSG_DELAYERROR)) == MSG_DELAYERROR) {
7228 error = strgeterr(stp, STRDERR|STPLEX,
7229 (flags & MSG_IPEEK));
7230 if (error != 0) {
7231 mutex_exit(&stp->sd_lock);
7232 return (error);
7233 }
7234 }
7235
7236 /*
7237 * If STRHUP or STREOF, return 0 length control and data.
7238 * If a read(fd,buf,0) has been done, do not sleep, just
7239 * return.
7240 *
7241 * If mctlp == NULL and uiop == NULL, then the code will
7242 * do the strwaitq. This is an understood way of saying
7243 * sleep "polling" until a message is received.
7244 */
7245 if ((stp->sd_flag & (STRHUP|STREOF)) ||
7246 (uiop != NULL && uiop->uio_resid == 0)) {
7247 if (mctlp != NULL)
7248 *mctlp = NULL;
7249 *flagsp = 0;
7250 mutex_exit(&stp->sd_lock);
7251 return (0);
7252 }
7253
7254 waitflag = GETWAIT;
7255 if (flags &
7256 (MSG_HOLDSIG|MSG_IGNERROR|MSG_IPEEK|MSG_DELAYERROR)) {
7257 if (flags & MSG_HOLDSIG)
7258 waitflag |= STR_NOSIG;
7259 if (flags & MSG_IGNERROR)
7260 waitflag |= STR_NOERROR;
7261 if (flags & MSG_IPEEK)
7262 waitflag |= STR_PEEK;
7263 if (flags & MSG_DELAYERROR)
7264 waitflag |= STR_DELAYERR;
7265 }
7266 if (uiop != NULL)
7267 fmode = uiop->uio_fmode;
7268 else
7269 fmode = 0;
7270
7271 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_WAIT,
7272 "kstrgetmsg calls strwaitq:%p, %p",
7273 vp, uiop);
7274 if (((error = strwaitq(stp, waitflag, (ssize_t)0,
7275 fmode, timout, &done))) != 0 || done) {
7276 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_DONE,
7277 "kstrgetmsg error or done:%p, %p",
7278 vp, uiop);
7279 mutex_exit(&stp->sd_lock);
7280 return (error);
7281 }
7282 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_AWAKE,
7283 "kstrgetmsg awakes:%p, %p", vp, uiop);
7284 if ((error = i_straccess(stp, JCREAD)) != 0) {
7285 mutex_exit(&stp->sd_lock);
7286 return (error);
7287 }
7288 first = 0;
7289 }
7290 ASSERT(bp != NULL);
7291 /*
7292 * Extract any mark information. If the message is not completely
7293 * consumed this information will be put in the mblk
7294 * that is putback.
7295 * If MSGMARKNEXT is set and the message is completely consumed
7296 * the STRATMARK flag will be set below. Likewise, if
7297 * MSGNOTMARKNEXT is set and the message is
7298 * completely consumed STRNOTATMARK will be set.
7299 */
7300 mark = bp->b_flag & (MSGMARK | MSGMARKNEXT | MSGNOTMARKNEXT);
7301 ASSERT((mark & (MSGMARKNEXT|MSGNOTMARKNEXT)) !=
7302 (MSGMARKNEXT|MSGNOTMARKNEXT));
7303 pri = bp->b_band;
7304 if (mark != 0) {
7305 /*
7306 * If the caller doesn't want the mark return.
7307 * Used to implement MSG_WAITALL in sockets.
7308 */
7309 if (flags & MSG_NOMARK) {
7310 putback(stp, q, bp, pri);
7311 qbackenable(q, pri);
7312 mutex_exit(&stp->sd_lock);
7313 return (EWOULDBLOCK);
7314 }
7315 if (bp == stp->sd_mark) {
7316 mark |= _LASTMARK;
7317 stp->sd_mark = NULL;
7318 }
7319 }
7320
7321 /*
7322 * keep track of the first message type
7323 */
7324 type = bp->b_datap->db_type;
7325
7326 if (bp->b_datap->db_type == M_PASSFP) {
7327 if ((mark & _LASTMARK) && (stp->sd_mark == NULL))
7328 stp->sd_mark = bp;
7329 bp->b_flag |= mark & ~_LASTMARK;
7330 putback(stp, q, bp, pri);
7331 qbackenable(q, pri);
7332 mutex_exit(&stp->sd_lock);
7333 return (EBADMSG);
7334 }
7335 ASSERT(type != M_SIG);
7336
7337 if (flags & MSG_IPEEK) {
7338 /*
7339 * Clear any struioflag - we do the uiomove over again
7340 * when peeking since it simplifies the code.
7341 *
7342 * Dup the message and put the original back on the queue.
7343 * If dupmsg() fails, try again with copymsg() to see if
7344 * there is indeed a shortage of memory. dupmsg() may fail
7345 * if db_ref in any of the messages reaches its limit.
7346 */
7347
7348 if ((nbp = dupmsg(bp)) == NULL && (nbp = copymsg(bp)) == NULL) {
7349 /*
7350 * Restore the state of the stream head since we
7351 * need to drop sd_lock (strwaitbuf is sleeping).
7352 */
7353 size_t size = msgdsize(bp);
7354
7355 if ((mark & _LASTMARK) && (stp->sd_mark == NULL))
7356 stp->sd_mark = bp;
7357 bp->b_flag |= mark & ~_LASTMARK;
7358 putback(stp, q, bp, pri);
7359 mutex_exit(&stp->sd_lock);
7360 error = strwaitbuf(size, BPRI_HI);
7361 if (error) {
7362 /*
7363 * There is no net change to the queue thus
7364 * no need to qbackenable.
7365 */
7366 return (error);
7367 }
7368 goto retry;
7369 }
7370
7371 if ((mark & _LASTMARK) && (stp->sd_mark == NULL))
7372 stp->sd_mark = bp;
7373 bp->b_flag |= mark & ~_LASTMARK;
7374 putback(stp, q, bp, pri);
7375 bp = nbp;
7376 }
7377
7378 /*
7379 * Set this flag so strrput will not generate signals. Need to
7380 * make sure this flag is cleared before leaving this routine
7381 * else signals will stop being sent.
7382 */
7383 stp->sd_flag |= STRGETINPROG;
7384 mutex_exit(&stp->sd_lock);
7385
7386 if ((stp->sd_rputdatafunc != NULL) && (DB_TYPE(bp) == M_DATA)) {
7387 mblk_t *tmp, *prevmp;
7388
7389 /*
7390 * Put first non-data mblk back to stream head and
7391 * cut the mblk chain so sd_rputdatafunc only sees
7392 * M_DATA mblks. We can skip the first mblk since it
7393 * is M_DATA according to the condition above.
7394 */
7395 for (prevmp = bp, tmp = bp->b_cont; tmp != NULL;
7396 prevmp = tmp, tmp = tmp->b_cont) {
7397 if (DB_TYPE(tmp) != M_DATA) {
7398 prevmp->b_cont = NULL;
7399 mutex_enter(&stp->sd_lock);
7400 putback(stp, q, tmp, tmp->b_band);
7401 mutex_exit(&stp->sd_lock);
7402 break;
7403 }
7404 }
7405
7406 bp = (stp->sd_rputdatafunc)(stp->sd_vnode, bp,
7407 NULL, NULL, NULL, NULL);
7408
7409 if (bp == NULL)
7410 goto retry;
7411 }
7412
7413 if (STREAM_NEEDSERVICE(stp))
7414 stream_runservice(stp);
7415
7416 /*
7417 * Set HIPRI flag if message is priority.
7418 */
7419 if (type >= QPCTL)
7420 flg = MSG_HIPRI;
7421 else
7422 flg = MSG_BAND;
7423
7424 /*
7425 * First process PROTO or PCPROTO blocks, if any.
7426 */
7427 if (mctlp != NULL && type != M_DATA) {
7428 mblk_t *nbp;
7429
7430 *mctlp = bp;
7431 while (bp->b_cont && bp->b_cont->b_datap->db_type != M_DATA)
7432 bp = bp->b_cont;
7433 nbp = bp->b_cont;
7434 bp->b_cont = NULL;
7435 bp = nbp;
7436 }
7437
7438 if (bp && bp->b_datap->db_type != M_DATA) {
7439 /*
7440 * More PROTO blocks in msg. Will only happen if mctlp is NULL.
7441 */
7442 more |= MORECTL;
7443 savemp = bp;
7444 while (bp && bp->b_datap->db_type != M_DATA) {
7445 savemptail = bp;
7446 bp = bp->b_cont;
7447 }
7448 savemptail->b_cont = NULL;
7449 }
7450
7451 /*
7452 * Now process DATA blocks, if any.
7453 */
7454 if (uiop == NULL) {
7455 /* Append data to tail of mctlp */
7456
7457 if (mctlp != NULL) {
7458 mblk_t **mpp = mctlp;
7459
7460 while (*mpp != NULL)
7461 mpp = &((*mpp)->b_cont);
7462 *mpp = bp;
7463 bp = NULL;
7464 }
7465 } else if (uiop->uio_resid >= 0 && bp) {
7466 size_t oldresid = uiop->uio_resid;
7467
7468 /*
7469 * If a streams message is likely to consist
7470 * of many small mblks, it is pulled up into
7471 * one continuous chunk of memory.
7472 * The size of the first mblk may be bogus because
7473 * successive read() calls on the socket reduce
7474 * the size of this mblk until it is exhausted
7475 * and then the code walks on to the next. Thus
7476 * the size of the mblk may not be the original size
7477 * that was passed up, it's simply a remainder
7478 * and hence can be very small without any
7479 * implication that the packet is badly fragmented.
7480 * So the size of the possible second mblk is
7481 * used to spot a badly fragmented packet.
7482 * see longer comment at top of page
7483 * by mblk_pull_len declaration.
7484 */
7485
7486 if (bp->b_cont != NULL && MBLKL(bp->b_cont) < mblk_pull_len) {
7487 (void) pullupmsg(bp, -1);
7488 }
7489
7490 bp = struiocopyout(bp, uiop, &error);
7491 if (error != 0) {
7492 if (mctlp != NULL) {
7493 freemsg(*mctlp);
7494 *mctlp = NULL;
7495 } else
7496 freemsg(savemp);
7497 mutex_enter(&stp->sd_lock);
7498 /*
7499 * clear stream head hi pri flag based on
7500 * first message
7501 */
7502 if (!(flags & MSG_IPEEK) && (type >= QPCTL)) {
7503 ASSERT(type == M_PCPROTO);
7504 stp->sd_flag &= ~STRPRI;
7505 }
7506 more = 0;
7507 goto getmout;
7508 }
7509 /*
7510 * (pr == 1) indicates a partial read.
7511 */
7512 if (oldresid > uiop->uio_resid)
7513 pr = 1;
7514 }
7515
7516 if (bp) { /* more data blocks in msg */
7517 more |= MOREDATA;
7518 if (savemp)
7519 savemptail->b_cont = bp;
7520 else
7521 savemp = bp;
7522 }
7523
7524 mutex_enter(&stp->sd_lock);
7525 if (savemp) {
7526 if (flags & (MSG_IPEEK|MSG_DISCARDTAIL)) {
7527 /*
7528 * When MSG_DISCARDTAIL is set or
7529 * when peeking discard any tail. When peeking this
7530 * is the tail of the dup that was copied out - the
7531 * message has already been putback on the queue.
7532 * Return MOREDATA to the caller even though the data
7533 * is discarded. This is used by sockets (to
7534 * set MSG_TRUNC).
7535 */
7536 freemsg(savemp);
7537 if (!(flags & MSG_IPEEK) && (type >= QPCTL)) {
7538 ASSERT(type == M_PCPROTO);
7539 stp->sd_flag &= ~STRPRI;
7540 }
7541 } else if (pr && (savemp->b_datap->db_type == M_DATA) &&
7542 msgnodata(savemp)) {
7543 /*
7544 * Avoid queuing a zero-length tail part of
7545 * a message. pr=1 indicates that we read some of
7546 * the message.
7547 */
7548 freemsg(savemp);
7549 more &= ~MOREDATA;
7550 if (type >= QPCTL) {
7551 ASSERT(type == M_PCPROTO);
7552 stp->sd_flag &= ~STRPRI;
7553 }
7554 } else {
7555 savemp->b_band = pri;
7556 /*
7557 * If the first message was HIPRI and the one we're
7558 * putting back isn't, then clear STRPRI, otherwise
7559 * set STRPRI again. Note that we must set STRPRI
7560 * again since the flush logic in strrput_nondata()
7561 * may have cleared it while we had sd_lock dropped.
7562 */
7563
7564 if (type >= QPCTL) {
7565 ASSERT(type == M_PCPROTO);
7566 if (queclass(savemp) < QPCTL)
7567 stp->sd_flag &= ~STRPRI;
7568 else
7569 stp->sd_flag |= STRPRI;
7570 } else if (queclass(savemp) >= QPCTL) {
7571 /*
7572 * The first message was not a HIPRI message,
7573 * but the one we are about to putback is.
7574 * For simplicitly, we do not allow for HIPRI
7575 * messages to be embedded in the message
7576 * body, so just force it to same type as
7577 * first message.
7578 */
7579 ASSERT(type == M_DATA || type == M_PROTO);
7580 ASSERT(savemp->b_datap->db_type == M_PCPROTO);
7581 savemp->b_datap->db_type = type;
7582 }
7583 if (mark != 0) {
7584 if ((mark & _LASTMARK) &&
7585 (stp->sd_mark == NULL)) {
7586 /*
7587 * If another marked message arrived
7588 * while sd_lock was not held sd_mark
7589 * would be non-NULL.
7590 */
7591 stp->sd_mark = savemp;
7592 }
7593 savemp->b_flag |= mark & ~_LASTMARK;
7594 }
7595 putback(stp, q, savemp, pri);
7596 }
7597 } else if (!(flags & MSG_IPEEK)) {
7598 /*
7599 * The complete message was consumed.
7600 *
7601 * If another M_PCPROTO arrived while sd_lock was not held
7602 * it would have been discarded since STRPRI was still set.
7603 *
7604 * Move the MSG*MARKNEXT information
7605 * to the stream head just in case
7606 * the read queue becomes empty.
7607 * clear stream head hi pri flag based on
7608 * first message
7609 *
7610 * If the stream head was at the mark
7611 * (STRATMARK) before we dropped sd_lock above
7612 * and some data was consumed then we have
7613 * moved past the mark thus STRATMARK is
7614 * cleared. However, if a message arrived in
7615 * strrput during the copyout above causing
7616 * STRATMARK to be set we can not clear that
7617 * flag.
7618 * XXX A "perimeter" would help by single-threading strrput,
7619 * strread, strgetmsg and kstrgetmsg.
7620 */
7621 if (type >= QPCTL) {
7622 ASSERT(type == M_PCPROTO);
7623 stp->sd_flag &= ~STRPRI;
7624 }
7625 if (mark & (MSGMARKNEXT|MSGNOTMARKNEXT|MSGMARK)) {
7626 if (mark & MSGMARKNEXT) {
7627 stp->sd_flag &= ~STRNOTATMARK;
7628 stp->sd_flag |= STRATMARK;
7629 } else if (mark & MSGNOTMARKNEXT) {
7630 stp->sd_flag &= ~STRATMARK;
7631 stp->sd_flag |= STRNOTATMARK;
7632 } else {
7633 stp->sd_flag &= ~(STRATMARK|STRNOTATMARK);
7634 }
7635 } else if (pr && (old_sd_flag & STRATMARK)) {
7636 stp->sd_flag &= ~STRATMARK;
7637 }
7638 }
7639
7640 *flagsp = flg;
7641 *prip = pri;
7642
7643 /*
7644 * Getmsg cleanup processing - if the state of the queue has changed
7645 * some signals may need to be sent and/or poll awakened.
7646 */
7647 getmout:
7648 qbackenable(q, pri);
7649
7650 /*
7651 * We dropped the stream head lock above. Send all M_SIG messages
7652 * before processing stream head for SIGPOLL messages.
7653 */
7654 ASSERT(MUTEX_HELD(&stp->sd_lock));
7655 while ((bp = q->q_first) != NULL &&
7656 (bp->b_datap->db_type == M_SIG)) {
7657 /*
7658 * sd_lock is held so the content of the read queue can not
7659 * change.
7660 */
7661 bp = getq(q);
7662 ASSERT(bp != NULL && bp->b_datap->db_type == M_SIG);
7663
7664 strsignal_nolock(stp, *bp->b_rptr, bp->b_band);
7665 mutex_exit(&stp->sd_lock);
7666 freemsg(bp);
7667 if (STREAM_NEEDSERVICE(stp))
7668 stream_runservice(stp);
7669 mutex_enter(&stp->sd_lock);
7670 }
7671
7672 /*
7673 * stream head cannot change while we make the determination
7674 * whether or not to send a signal. Drop the flag to allow strrput
7675 * to send firstmsgsigs again.
7676 */
7677 stp->sd_flag &= ~STRGETINPROG;
7678
7679 /*
7680 * If the type of message at the front of the queue changed
7681 * due to the receive the appropriate signals and pollwakeup events
7682 * are generated. The type of changes are:
7683 * Processed a hipri message, q_first is not hipri.
7684 * Processed a band X message, and q_first is band Y.
7685 * The generated signals and pollwakeups are identical to what
7686 * strrput() generates should the message that is now on q_first
7687 * arrive to an empty read queue.
7688 *
7689 * Note: only strrput will send a signal for a hipri message.
7690 */
7691 if ((bp = q->q_first) != NULL && !(stp->sd_flag & STRPRI)) {
7692 strsigset_t signals = 0;
7693 strpollset_t pollwakeups = 0;
7694
7695 if (flg & MSG_HIPRI) {
7696 /*
7697 * Removed a hipri message. Regular data at
7698 * the front of the queue.
7699 */
7700 if (bp->b_band == 0) {
7701 signals = S_INPUT | S_RDNORM;
7702 pollwakeups = POLLIN | POLLRDNORM;
7703 } else {
7704 signals = S_INPUT | S_RDBAND;
7705 pollwakeups = POLLIN | POLLRDBAND;
7706 }
7707 } else if (pri != bp->b_band) {
7708 /*
7709 * The band is different for the new q_first.
7710 */
7711 if (bp->b_band == 0) {
7712 signals = S_RDNORM;
7713 pollwakeups = POLLIN | POLLRDNORM;
7714 } else {
7715 signals = S_RDBAND;
7716 pollwakeups = POLLIN | POLLRDBAND;
7717 }
7718 }
7719
7720 if (pollwakeups != 0) {
7721 if (pollwakeups == (POLLIN | POLLRDNORM)) {
7722 if (!(stp->sd_rput_opt & SR_POLLIN))
7723 goto no_pollwake;
7724 stp->sd_rput_opt &= ~SR_POLLIN;
7725 }
7726 mutex_exit(&stp->sd_lock);
7727 pollwakeup(&stp->sd_pollist, pollwakeups);
7728 mutex_enter(&stp->sd_lock);
7729 }
7730 no_pollwake:
7731
7732 if (stp->sd_sigflags & signals)
7733 strsendsig(stp->sd_siglist, signals, bp->b_band, 0);
7734 }
7735 mutex_exit(&stp->sd_lock);
7736
7737 rvp->r_val1 = more;
7738 return (error);
7739 #undef _LASTMARK
7740 }
7741
7742 /*
7743 * Put a message downstream.
7744 *
7745 * NOTE: strputmsg and kstrputmsg have much of the logic in common.
7746 */
7747 int
7748 strputmsg(
7749 struct vnode *vp,
7750 struct strbuf *mctl,
7751 struct strbuf *mdata,
7752 unsigned char pri,
7753 int flag,
7754 int fmode)
7755 {
7756 struct stdata *stp;
7757 queue_t *wqp;
7758 mblk_t *mp;
7759 ssize_t msgsize;
7760 ssize_t rmin, rmax;
7761 int error;
7762 struct uio uios;
7763 struct uio *uiop = &uios;
7764 struct iovec iovs;
7765 int xpg4 = 0;
7766
7767 ASSERT(vp->v_stream);
7768 stp = vp->v_stream;
7769 wqp = stp->sd_wrq;
7770
7771 /*
7772 * If it is an XPG4 application, we need to send
7773 * SIGPIPE below
7774 */
7775
7776 xpg4 = (flag & MSG_XPG4) ? 1 : 0;
7777 flag &= ~MSG_XPG4;
7778
7779 if (AU_AUDITING())
7780 audit_strputmsg(vp, mctl, mdata, pri, flag, fmode);
7781
7782 mutex_enter(&stp->sd_lock);
7783
7784 if ((error = i_straccess(stp, JCWRITE)) != 0) {
7785 mutex_exit(&stp->sd_lock);
7786 return (error);
7787 }
7788
7789 if (stp->sd_flag & (STWRERR|STRHUP|STPLEX)) {
7790 error = strwriteable(stp, B_FALSE, xpg4);
7791 if (error != 0) {
7792 mutex_exit(&stp->sd_lock);
7793 return (error);
7794 }
7795 }
7796
7797 mutex_exit(&stp->sd_lock);
7798
7799 /*
7800 * Check for legal flag value.
7801 */
7802 switch (flag) {
7803 case MSG_HIPRI:
7804 if ((mctl->len < 0) || (pri != 0))
7805 return (EINVAL);
7806 break;
7807 case MSG_BAND:
7808 break;
7809
7810 default:
7811 return (EINVAL);
7812 }
7813
7814 TRACE_1(TR_FAC_STREAMS_FR, TR_STRPUTMSG_IN,
7815 "strputmsg in:stp %p", stp);
7816
7817 /* get these values from those cached in the stream head */
7818 rmin = stp->sd_qn_minpsz;
7819 rmax = stp->sd_qn_maxpsz;
7820
7821 /*
7822 * Make sure ctl and data sizes together fall within the
7823 * limits of the max and min receive packet sizes and do
7824 * not exceed system limit.
7825 */
7826 ASSERT((rmax >= 0) || (rmax == INFPSZ));
7827 if (rmax == 0) {
7828 return (ERANGE);
7829 }
7830 /*
7831 * Use the MAXIMUM of sd_maxblk and q_maxpsz.
7832 * Needed to prevent partial failures in the strmakedata loop.
7833 */
7834 if (stp->sd_maxblk != INFPSZ && rmax != INFPSZ && rmax < stp->sd_maxblk)
7835 rmax = stp->sd_maxblk;
7836
7837 if ((msgsize = mdata->len) < 0) {
7838 msgsize = 0;
7839 rmin = 0; /* no range check for NULL data part */
7840 }
7841 if ((msgsize < rmin) ||
7842 ((msgsize > rmax) && (rmax != INFPSZ)) ||
7843 (mctl->len > strctlsz)) {
7844 return (ERANGE);
7845 }
7846
7847 /*
7848 * Setup uio and iov for data part
7849 */
7850 iovs.iov_base = mdata->buf;
7851 iovs.iov_len = msgsize;
7852 uios.uio_iov = &iovs;
7853 uios.uio_iovcnt = 1;
7854 uios.uio_loffset = 0;
7855 uios.uio_segflg = UIO_USERSPACE;
7856 uios.uio_fmode = fmode;
7857 uios.uio_extflg = UIO_COPY_DEFAULT;
7858 uios.uio_resid = msgsize;
7859 uios.uio_offset = 0;
7860
7861 /* Ignore flow control in strput for HIPRI */
7862 if (flag & MSG_HIPRI)
7863 flag |= MSG_IGNFLOW;
7864
7865 for (;;) {
7866 int done = 0;
7867
7868 /*
7869 * strput will always free the ctl mblk - even when strput
7870 * fails.
7871 */
7872 if ((error = strmakectl(mctl, flag, fmode, &mp)) != 0) {
7873 TRACE_3(TR_FAC_STREAMS_FR, TR_STRPUTMSG_OUT,
7874 "strputmsg out:stp %p out %d error %d",
7875 stp, 1, error);
7876 return (error);
7877 }
7878 /*
7879 * Verify that the whole message can be transferred by
7880 * strput.
7881 */
7882 ASSERT(stp->sd_maxblk == INFPSZ ||
7883 stp->sd_maxblk >= mdata->len);
7884
7885 msgsize = mdata->len;
7886 error = strput(stp, mp, uiop, &msgsize, 0, pri, flag);
7887 mdata->len = msgsize;
7888
7889 if (error == 0)
7890 break;
7891
7892 if (error != EWOULDBLOCK)
7893 goto out;
7894
7895 mutex_enter(&stp->sd_lock);
7896 /*
7897 * Check for a missed wakeup.
7898 * Needed since strput did not hold sd_lock across
7899 * the canputnext.
7900 */
7901 if (bcanputnext(wqp, pri)) {
7902 /* Try again */
7903 mutex_exit(&stp->sd_lock);
7904 continue;
7905 }
7906 TRACE_2(TR_FAC_STREAMS_FR, TR_STRPUTMSG_WAIT,
7907 "strputmsg wait:stp %p waits pri %d", stp, pri);
7908 if (((error = strwaitq(stp, WRITEWAIT, (ssize_t)0, fmode, -1,
7909 &done)) != 0) || done) {
7910 mutex_exit(&stp->sd_lock);
7911 TRACE_3(TR_FAC_STREAMS_FR, TR_STRPUTMSG_OUT,
7912 "strputmsg out:q %p out %d error %d",
7913 stp, 0, error);
7914 return (error);
7915 }
7916 TRACE_1(TR_FAC_STREAMS_FR, TR_STRPUTMSG_WAKE,
7917 "strputmsg wake:stp %p wakes", stp);
7918 if ((error = i_straccess(stp, JCWRITE)) != 0) {
7919 mutex_exit(&stp->sd_lock);
7920 return (error);
7921 }
7922 mutex_exit(&stp->sd_lock);
7923 }
7924 out:
7925 /*
7926 * For historic reasons, applications expect EAGAIN
7927 * when data mblk could not be allocated. so change
7928 * ENOMEM back to EAGAIN
7929 */
7930 if (error == ENOMEM)
7931 error = EAGAIN;
7932 TRACE_3(TR_FAC_STREAMS_FR, TR_STRPUTMSG_OUT,
7933 "strputmsg out:stp %p out %d error %d", stp, 2, error);
7934 return (error);
7935 }
7936
7937 /*
7938 * Put a message downstream.
7939 * Can send only an M_PROTO/M_PCPROTO by passing in a NULL uiop.
7940 * The fmode flag (NDELAY, NONBLOCK) is the or of the flags in the uio
7941 * and the fmode parameter.
7942 *
7943 * This routine handles the consolidation private flags:
7944 * MSG_IGNERROR Ignore any stream head error except STPLEX.
7945 * MSG_HOLDSIG Hold signals while waiting for data.
7946 * MSG_IGNFLOW Don't check streams flow control.
7947 *
7948 * NOTE: strputmsg and kstrputmsg have much of the logic in common.
7949 */
7950 int
7951 kstrputmsg(
7952 struct vnode *vp,
7953 mblk_t *mctl,
7954 struct uio *uiop,
7955 ssize_t msgsize,
7956 unsigned char pri,
7957 int flag,
7958 int fmode)
7959 {
7960 struct stdata *stp;
7961 queue_t *wqp;
7962 ssize_t rmin, rmax;
7963 int error;
7964
7965 ASSERT(vp->v_stream);
7966 stp = vp->v_stream;
7967 wqp = stp->sd_wrq;
7968 if (AU_AUDITING())
7969 audit_strputmsg(vp, NULL, NULL, pri, flag, fmode);
7970 if (mctl == NULL)
7971 return (EINVAL);
7972
7973 mutex_enter(&stp->sd_lock);
7974
7975 if ((error = i_straccess(stp, JCWRITE)) != 0) {
7976 mutex_exit(&stp->sd_lock);
7977 freemsg(mctl);
7978 return (error);
7979 }
7980
7981 if ((stp->sd_flag & STPLEX) || !(flag & MSG_IGNERROR)) {
7982 if (stp->sd_flag & (STWRERR|STRHUP|STPLEX)) {
7983 error = strwriteable(stp, B_FALSE, B_TRUE);
7984 if (error != 0) {
7985 mutex_exit(&stp->sd_lock);
7986 freemsg(mctl);
7987 return (error);
7988 }
7989 }
7990 }
7991
7992 mutex_exit(&stp->sd_lock);
7993
7994 /*
7995 * Check for legal flag value.
7996 */
7997 switch (flag & (MSG_HIPRI|MSG_BAND|MSG_ANY)) {
7998 case MSG_HIPRI:
7999 if (pri != 0) {
8000 freemsg(mctl);
8001 return (EINVAL);
8002 }
8003 break;
8004 case MSG_BAND:
8005 break;
8006 default:
8007 freemsg(mctl);
8008 return (EINVAL);
8009 }
8010
8011 TRACE_1(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_IN,
8012 "kstrputmsg in:stp %p", stp);
8013
8014 /* get these values from those cached in the stream head */
8015 rmin = stp->sd_qn_minpsz;
8016 rmax = stp->sd_qn_maxpsz;
8017
8018 /*
8019 * Make sure ctl and data sizes together fall within the
8020 * limits of the max and min receive packet sizes and do
8021 * not exceed system limit.
8022 */
8023 ASSERT((rmax >= 0) || (rmax == INFPSZ));
8024 if (rmax == 0) {
8025 freemsg(mctl);
8026 return (ERANGE);
8027 }
8028 /*
8029 * Use the MAXIMUM of sd_maxblk and q_maxpsz.
8030 * Needed to prevent partial failures in the strmakedata loop.
8031 */
8032 if (stp->sd_maxblk != INFPSZ && rmax != INFPSZ && rmax < stp->sd_maxblk)
8033 rmax = stp->sd_maxblk;
8034
8035 if (uiop == NULL) {
8036 msgsize = -1;
8037 rmin = -1; /* no range check for NULL data part */
8038 } else {
8039 /* Use uio flags as well as the fmode parameter flags */
8040 fmode |= uiop->uio_fmode;
8041
8042 if ((msgsize < rmin) ||
8043 ((msgsize > rmax) && (rmax != INFPSZ))) {
8044 freemsg(mctl);
8045 return (ERANGE);
8046 }
8047 }
8048
8049 /* Ignore flow control in strput for HIPRI */
8050 if (flag & MSG_HIPRI)
8051 flag |= MSG_IGNFLOW;
8052
8053 for (;;) {
8054 int done = 0;
8055 int waitflag;
8056 mblk_t *mp;
8057
8058 /*
8059 * strput will always free the ctl mblk - even when strput
8060 * fails. If MSG_IGNFLOW is set then any error returned
8061 * will cause us to break the loop, so we don't need a copy
8062 * of the message. If MSG_IGNFLOW is not set, then we can
8063 * get hit by flow control and be forced to try again. In
8064 * this case we need to have a copy of the message. We
8065 * do this using copymsg since the message may get modified
8066 * by something below us.
8067 *
8068 * We've observed that many TPI providers do not check db_ref
8069 * on the control messages but blindly reuse them for the
8070 * T_OK_ACK/T_ERROR_ACK. Thus using copymsg is more
8071 * friendly to such providers than using dupmsg. Also, note
8072 * that sockfs uses MSG_IGNFLOW for all TPI control messages.
8073 * Only data messages are subject to flow control, hence
8074 * subject to this copymsg.
8075 */
8076 if (flag & MSG_IGNFLOW) {
8077 mp = mctl;
8078 mctl = NULL;
8079 } else {
8080 do {
8081 /*
8082 * If a message has a free pointer, the message
8083 * must be dupmsg to maintain this pointer.
8084 * Code using this facility must be sure
8085 * that modules below will not change the
8086 * contents of the dblk without checking db_ref
8087 * first. If db_ref is > 1, then the module
8088 * needs to do a copymsg first. Otherwise,
8089 * the contents of the dblk may become
8090 * inconsistent because the freesmg/freeb below
8091 * may end up calling atomic_add_32_nv.
8092 * The atomic_add_32_nv in freeb (accessing
8093 * all of db_ref, db_type, db_flags, and
8094 * db_struioflag) does not prevent other threads
8095 * from concurrently trying to modify e.g.
8096 * db_type.
8097 */
8098 if (mctl->b_datap->db_frtnp != NULL)
8099 mp = dupmsg(mctl);
8100 else
8101 mp = copymsg(mctl);
8102
8103 if (mp != NULL)
8104 break;
8105
8106 error = strwaitbuf(msgdsize(mctl), BPRI_MED);
8107 if (error) {
8108 freemsg(mctl);
8109 return (error);
8110 }
8111 } while (mp == NULL);
8112 }
8113 /*
8114 * Verify that all of msgsize can be transferred by
8115 * strput.
8116 */
8117 ASSERT(stp->sd_maxblk == INFPSZ || stp->sd_maxblk >= msgsize);
8118 error = strput(stp, mp, uiop, &msgsize, 0, pri, flag);
8119 if (error == 0)
8120 break;
8121
8122 if (error != EWOULDBLOCK)
8123 goto out;
8124
8125 /*
8126 * IF MSG_IGNFLOW is set we should have broken out of loop
8127 * above.
8128 */
8129 ASSERT(!(flag & MSG_IGNFLOW));
8130 mutex_enter(&stp->sd_lock);
8131 /*
8132 * Check for a missed wakeup.
8133 * Needed since strput did not hold sd_lock across
8134 * the canputnext.
8135 */
8136 if (bcanputnext(wqp, pri)) {
8137 /* Try again */
8138 mutex_exit(&stp->sd_lock);
8139 continue;
8140 }
8141 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_WAIT,
8142 "kstrputmsg wait:stp %p waits pri %d", stp, pri);
8143
8144 waitflag = WRITEWAIT;
8145 if (flag & (MSG_HOLDSIG|MSG_IGNERROR)) {
8146 if (flag & MSG_HOLDSIG)
8147 waitflag |= STR_NOSIG;
8148 if (flag & MSG_IGNERROR)
8149 waitflag |= STR_NOERROR;
8150 }
8151 if (((error = strwaitq(stp, waitflag,
8152 (ssize_t)0, fmode, -1, &done)) != 0) || done) {
8153 mutex_exit(&stp->sd_lock);
8154 TRACE_3(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_OUT,
8155 "kstrputmsg out:stp %p out %d error %d",
8156 stp, 0, error);
8157 freemsg(mctl);
8158 return (error);
8159 }
8160 TRACE_1(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_WAKE,
8161 "kstrputmsg wake:stp %p wakes", stp);
8162 if ((error = i_straccess(stp, JCWRITE)) != 0) {
8163 mutex_exit(&stp->sd_lock);
8164 freemsg(mctl);
8165 return (error);
8166 }
8167 mutex_exit(&stp->sd_lock);
8168 }
8169 out:
8170 freemsg(mctl);
8171 /*
8172 * For historic reasons, applications expect EAGAIN
8173 * when data mblk could not be allocated. so change
8174 * ENOMEM back to EAGAIN
8175 */
8176 if (error == ENOMEM)
8177 error = EAGAIN;
8178 TRACE_3(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_OUT,
8179 "kstrputmsg out:stp %p out %d error %d", stp, 2, error);
8180 return (error);
8181 }
8182
8183 /*
8184 * Determines whether the necessary conditions are set on a stream
8185 * for it to be readable, writeable, or have exceptions.
8186 *
8187 * strpoll handles the consolidation private events:
8188 * POLLNOERR Do not return POLLERR even if there are stream
8189 * head errors.
8190 * Used by sockfs.
8191 * POLLRDDATA Do not return POLLIN unless at least one message on
8192 * the queue contains one or more M_DATA mblks. Thus
8193 * when this flag is set a queue with only
8194 * M_PROTO/M_PCPROTO mblks does not return POLLIN.
8195 * Used by sockfs to ignore T_EXDATA_IND messages.
8196 *
8197 * Note: POLLRDDATA assumes that synch streams only return messages with
8198 * an M_DATA attached (i.e. not messages consisting of only
8199 * an M_PROTO/M_PCPROTO part).
8200 */
8201 int
8202 strpoll(struct stdata *stp, short events_arg, int anyyet, short *reventsp,
8203 struct pollhead **phpp)
8204 {
8205 int events = (ushort_t)events_arg;
8206 int retevents = 0;
8207 mblk_t *mp;
8208 qband_t *qbp;
8209 long sd_flags = stp->sd_flag;
8210 int headlocked = 0;
8211
8212 /*
8213 * For performance, a single 'if' tests for most possible edge
8214 * conditions in one shot
8215 */
8216 if (sd_flags & (STPLEX | STRDERR | STWRERR)) {
8217 if (sd_flags & STPLEX) {
8218 *reventsp = POLLNVAL;
8219 return (EINVAL);
8220 }
8221 if (((events & (POLLIN | POLLRDNORM | POLLRDBAND | POLLPRI)) &&
8222 (sd_flags & STRDERR)) ||
8223 ((events & (POLLOUT | POLLWRNORM | POLLWRBAND)) &&
8224 (sd_flags & STWRERR))) {
8225 if (!(events & POLLNOERR)) {
8226 *reventsp = POLLERR;
8227 return (0);
8228 }
8229 }
8230 }
8231 if (sd_flags & STRHUP) {
8232 retevents |= POLLHUP;
8233 } else if (events & (POLLWRNORM | POLLWRBAND)) {
8234 queue_t *tq;
8235 queue_t *qp = stp->sd_wrq;
8236
8237 claimstr(qp);
8238 /* Find next module forward that has a service procedure */
8239 tq = qp->q_next->q_nfsrv;
8240 ASSERT(tq != NULL);
8241
8242 if (polllock(&stp->sd_pollist, QLOCK(tq)) != 0) {
8243 releasestr(qp);
8244 *reventsp = POLLNVAL;
8245 return (0);
8246 }
8247 if (events & POLLWRNORM) {
8248 queue_t *sqp;
8249
8250 if (tq->q_flag & QFULL)
8251 /* ensure backq svc procedure runs */
8252 tq->q_flag |= QWANTW;
8253 else if ((sqp = stp->sd_struiowrq) != NULL) {
8254 /* Check sync stream barrier write q */
8255 mutex_exit(QLOCK(tq));
8256 if (polllock(&stp->sd_pollist,
8257 QLOCK(sqp)) != 0) {
8258 releasestr(qp);
8259 *reventsp = POLLNVAL;
8260 return (0);
8261 }
8262 if (sqp->q_flag & QFULL)
8263 /* ensure pollwakeup() is done */
8264 sqp->q_flag |= QWANTWSYNC;
8265 else
8266 retevents |= POLLOUT;
8267 /* More write events to process ??? */
8268 if (! (events & POLLWRBAND)) {
8269 mutex_exit(QLOCK(sqp));
8270 releasestr(qp);
8271 goto chkrd;
8272 }
8273 mutex_exit(QLOCK(sqp));
8274 if (polllock(&stp->sd_pollist,
8275 QLOCK(tq)) != 0) {
8276 releasestr(qp);
8277 *reventsp = POLLNVAL;
8278 return (0);
8279 }
8280 } else
8281 retevents |= POLLOUT;
8282 }
8283 if (events & POLLWRBAND) {
8284 qbp = tq->q_bandp;
8285 if (qbp) {
8286 while (qbp) {
8287 if (qbp->qb_flag & QB_FULL)
8288 qbp->qb_flag |= QB_WANTW;
8289 else
8290 retevents |= POLLWRBAND;
8291 qbp = qbp->qb_next;
8292 }
8293 } else {
8294 retevents |= POLLWRBAND;
8295 }
8296 }
8297 mutex_exit(QLOCK(tq));
8298 releasestr(qp);
8299 }
8300 chkrd:
8301 if (sd_flags & STRPRI) {
8302 retevents |= (events & POLLPRI);
8303 } else if (events & (POLLRDNORM | POLLRDBAND | POLLIN)) {
8304 queue_t *qp = _RD(stp->sd_wrq);
8305 int normevents = (events & (POLLIN | POLLRDNORM));
8306
8307 /*
8308 * Note: Need to do polllock() here since ps_lock may be
8309 * held. See bug 4191544.
8310 */
8311 if (polllock(&stp->sd_pollist, &stp->sd_lock) != 0) {
8312 *reventsp = POLLNVAL;
8313 return (0);
8314 }
8315 headlocked = 1;
8316 mp = qp->q_first;
8317 while (mp) {
8318 /*
8319 * For POLLRDDATA we scan b_cont and b_next until we
8320 * find an M_DATA.
8321 */
8322 if ((events & POLLRDDATA) &&
8323 mp->b_datap->db_type != M_DATA) {
8324 mblk_t *nmp = mp->b_cont;
8325
8326 while (nmp != NULL &&
8327 nmp->b_datap->db_type != M_DATA)
8328 nmp = nmp->b_cont;
8329 if (nmp == NULL) {
8330 mp = mp->b_next;
8331 continue;
8332 }
8333 }
8334 if (mp->b_band == 0)
8335 retevents |= normevents;
8336 else
8337 retevents |= (events & (POLLIN | POLLRDBAND));
8338 break;
8339 }
8340 if (!(retevents & normevents) && (stp->sd_wakeq & RSLEEP)) {
8341 /*
8342 * Sync stream barrier read queue has data.
8343 */
8344 retevents |= normevents;
8345 }
8346 /* Treat eof as normal data */
8347 if (sd_flags & STREOF)
8348 retevents |= normevents;
8349 }
8350
8351 /*
8352 * Pass back a pollhead if no events are pending or if edge-triggering
8353 * has been configured on this resource.
8354 */
8355 if ((retevents == 0 && !anyyet) || (events & POLLET)) {
8356 *phpp = &stp->sd_pollist;
8357 if (headlocked == 0) {
8358 if (polllock(&stp->sd_pollist, &stp->sd_lock) != 0) {
8359 *reventsp = POLLNVAL;
8360 return (0);
8361 }
8362 headlocked = 1;
8363 }
8364 stp->sd_rput_opt |= SR_POLLIN;
8365 }
8366
8367 *reventsp = (short)retevents;
8368 if (headlocked)
8369 mutex_exit(&stp->sd_lock);
8370 return (0);
8371 }
8372
8373 /*
8374 * The purpose of putback() is to assure sleeping polls/reads
8375 * are awakened when there are no new messages arriving at the,
8376 * stream head, and a message is placed back on the read queue.
8377 *
8378 * sd_lock must be held when messages are placed back on stream
8379 * head. (getq() holds sd_lock when it removes messages from
8380 * the queue)
8381 */
8382
8383 static void
8384 putback(struct stdata *stp, queue_t *q, mblk_t *bp, int band)
8385 {
8386 mblk_t *qfirst;
8387 ASSERT(MUTEX_HELD(&stp->sd_lock));
8388
8389 /*
8390 * As a result of lock-step ordering around q_lock and sd_lock,
8391 * it's possible for function calls like putnext() and
8392 * canputnext() to get an inaccurate picture of how much
8393 * data is really being processed at the stream head.
8394 * We only consolidate with existing messages on the queue
8395 * if the length of the message we want to put back is smaller
8396 * than the queue hiwater mark.
8397 */
8398 if ((stp->sd_rput_opt & SR_CONSOL_DATA) &&
8399 (DB_TYPE(bp) == M_DATA) && ((qfirst = q->q_first) != NULL) &&
8400 (DB_TYPE(qfirst) == M_DATA) &&
8401 ((qfirst->b_flag & (MSGMARK|MSGDELIM)) == 0) &&
8402 ((bp->b_flag & (MSGMARK|MSGDELIM|MSGMARKNEXT)) == 0) &&
8403 (mp_cont_len(bp, NULL) < q->q_hiwat)) {
8404 /*
8405 * We use the same logic as defined in strrput()
8406 * but in reverse as we are putting back onto the
8407 * queue and want to retain byte ordering.
8408 * Consolidate M_DATA messages with M_DATA ONLY.
8409 * strrput() allows the consolidation of M_DATA onto
8410 * M_PROTO | M_PCPROTO but not the other way round.
8411 *
8412 * The consolidation does not take place if the message
8413 * we are returning to the queue is marked with either
8414 * of the marks or the delim flag or if q_first
8415 * is marked with MSGMARK. The MSGMARK check is needed to
8416 * handle the odd semantics of MSGMARK where essentially
8417 * the whole message is to be treated as marked.
8418 * Carry any MSGMARKNEXT and MSGNOTMARKNEXT from q_first
8419 * to the front of the b_cont chain.
8420 */
8421 rmvq_noenab(q, qfirst);
8422
8423 /*
8424 * The first message in the b_cont list
8425 * tracks MSGMARKNEXT and MSGNOTMARKNEXT.
8426 * We need to handle the case where we
8427 * are appending:
8428 *
8429 * 1) a MSGMARKNEXT to a MSGNOTMARKNEXT.
8430 * 2) a MSGMARKNEXT to a plain message.
8431 * 3) a MSGNOTMARKNEXT to a plain message
8432 * 4) a MSGNOTMARKNEXT to a MSGNOTMARKNEXT
8433 * message.
8434 *
8435 * Thus we never append a MSGMARKNEXT or
8436 * MSGNOTMARKNEXT to a MSGMARKNEXT message.
8437 */
8438 if (qfirst->b_flag & MSGMARKNEXT) {
8439 bp->b_flag |= MSGMARKNEXT;
8440 bp->b_flag &= ~MSGNOTMARKNEXT;
8441 qfirst->b_flag &= ~MSGMARKNEXT;
8442 } else if (qfirst->b_flag & MSGNOTMARKNEXT) {
8443 bp->b_flag |= MSGNOTMARKNEXT;
8444 qfirst->b_flag &= ~MSGNOTMARKNEXT;
8445 }
8446
8447 linkb(bp, qfirst);
8448 }
8449 (void) putbq(q, bp);
8450
8451 /*
8452 * A message may have come in when the sd_lock was dropped in the
8453 * calling routine. If this is the case and STR*ATMARK info was
8454 * received, need to move that from the stream head to the q_last
8455 * so that SIOCATMARK can return the proper value.
8456 */
8457 if (stp->sd_flag & (STRATMARK | STRNOTATMARK)) {
8458 unsigned short *flagp = &q->q_last->b_flag;
8459 uint_t b_flag = (uint_t)*flagp;
8460
8461 if (stp->sd_flag & STRATMARK) {
8462 b_flag &= ~MSGNOTMARKNEXT;
8463 b_flag |= MSGMARKNEXT;
8464 stp->sd_flag &= ~STRATMARK;
8465 } else {
8466 b_flag &= ~MSGMARKNEXT;
8467 b_flag |= MSGNOTMARKNEXT;
8468 stp->sd_flag &= ~STRNOTATMARK;
8469 }
8470 *flagp = (unsigned short) b_flag;
8471 }
8472
8473 #ifdef DEBUG
8474 /*
8475 * Make sure that the flags are not messed up.
8476 */
8477 {
8478 mblk_t *mp;
8479 mp = q->q_last;
8480 while (mp != NULL) {
8481 ASSERT((mp->b_flag & (MSGMARKNEXT|MSGNOTMARKNEXT)) !=
8482 (MSGMARKNEXT|MSGNOTMARKNEXT));
8483 mp = mp->b_cont;
8484 }
8485 }
8486 #endif
8487 if (q->q_first == bp) {
8488 short pollevents;
8489
8490 if (stp->sd_flag & RSLEEP) {
8491 stp->sd_flag &= ~RSLEEP;
8492 cv_broadcast(&q->q_wait);
8493 }
8494 if (stp->sd_flag & STRPRI) {
8495 pollevents = POLLPRI;
8496 } else {
8497 if (band == 0) {
8498 if (!(stp->sd_rput_opt & SR_POLLIN))
8499 return;
8500 stp->sd_rput_opt &= ~SR_POLLIN;
8501 pollevents = POLLIN | POLLRDNORM;
8502 } else {
8503 pollevents = POLLIN | POLLRDBAND;
8504 }
8505 }
8506 mutex_exit(&stp->sd_lock);
8507 pollwakeup(&stp->sd_pollist, pollevents);
8508 mutex_enter(&stp->sd_lock);
8509 }
8510 }
8511
8512 /*
8513 * Return the held vnode attached to the stream head of a
8514 * given queue
8515 * It is the responsibility of the calling routine to ensure
8516 * that the queue does not go away (e.g. pop).
8517 */
8518 vnode_t *
8519 strq2vp(queue_t *qp)
8520 {
8521 vnode_t *vp;
8522 vp = STREAM(qp)->sd_vnode;
8523 ASSERT(vp != NULL);
8524 VN_HOLD(vp);
8525 return (vp);
8526 }
8527
8528 /*
8529 * return the stream head write queue for the given vp
8530 * It is the responsibility of the calling routine to ensure
8531 * that the stream or vnode do not close.
8532 */
8533 queue_t *
8534 strvp2wq(vnode_t *vp)
8535 {
8536 ASSERT(vp->v_stream != NULL);
8537 return (vp->v_stream->sd_wrq);
8538 }
8539
8540 /*
8541 * pollwakeup stream head
8542 * It is the responsibility of the calling routine to ensure
8543 * that the stream or vnode do not close.
8544 */
8545 void
8546 strpollwakeup(vnode_t *vp, short event)
8547 {
8548 ASSERT(vp->v_stream);
8549 pollwakeup(&vp->v_stream->sd_pollist, event);
8550 }
8551
8552 /*
8553 * Mate the stream heads of two vnodes together. If the two vnodes are the
8554 * same, we just make the write-side point at the read-side -- otherwise,
8555 * we do a full mate. Only works on vnodes associated with streams that are
8556 * still being built and thus have only a stream head.
8557 */
8558 void
8559 strmate(vnode_t *vp1, vnode_t *vp2)
8560 {
8561 queue_t *wrq1 = strvp2wq(vp1);
8562 queue_t *wrq2 = strvp2wq(vp2);
8563
8564 /*
8565 * Verify that there are no modules on the stream yet. We also
8566 * rely on the stream head always having a service procedure to
8567 * avoid tweaking q_nfsrv.
8568 */
8569 ASSERT(wrq1->q_next == NULL && wrq2->q_next == NULL);
8570 ASSERT(wrq1->q_qinfo->qi_srvp != NULL);
8571 ASSERT(wrq2->q_qinfo->qi_srvp != NULL);
8572
8573 /*
8574 * If the queues are the same, just twist; otherwise do a full mate.
8575 */
8576 if (wrq1 == wrq2) {
8577 wrq1->q_next = _RD(wrq1);
8578 } else {
8579 wrq1->q_next = _RD(wrq2);
8580 wrq2->q_next = _RD(wrq1);
8581 STREAM(wrq1)->sd_mate = STREAM(wrq2);
8582 STREAM(wrq1)->sd_flag |= STRMATE;
8583 STREAM(wrq2)->sd_mate = STREAM(wrq1);
8584 STREAM(wrq2)->sd_flag |= STRMATE;
8585 }
8586 }
8587
8588 /*
8589 * XXX will go away when console is correctly fixed.
8590 * Clean up the console PIDS, from previous I_SETSIG,
8591 * called only for cnopen which never calls strclean().
8592 */
8593 void
8594 str_cn_clean(struct vnode *vp)
8595 {
8596 strsig_t *ssp, *pssp, *tssp;
8597 struct stdata *stp;
8598 struct pid *pidp;
8599 int update = 0;
8600
8601 ASSERT(vp->v_stream);
8602 stp = vp->v_stream;
8603 pssp = NULL;
8604 mutex_enter(&stp->sd_lock);
8605 ssp = stp->sd_siglist;
8606 while (ssp) {
8607 mutex_enter(&pidlock);
8608 pidp = ssp->ss_pidp;
8609 /*
8610 * Get rid of PID if the proc is gone.
8611 */
8612 if (pidp->pid_prinactive) {
8613 tssp = ssp->ss_next;
8614 if (pssp)
8615 pssp->ss_next = tssp;
8616 else
8617 stp->sd_siglist = tssp;
8618 ASSERT(pidp->pid_ref <= 1);
8619 PID_RELE(ssp->ss_pidp);
8620 mutex_exit(&pidlock);
8621 kmem_free(ssp, sizeof (strsig_t));
8622 update = 1;
8623 ssp = tssp;
8624 continue;
8625 } else
8626 mutex_exit(&pidlock);
8627 pssp = ssp;
8628 ssp = ssp->ss_next;
8629 }
8630 if (update) {
8631 stp->sd_sigflags = 0;
8632 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next)
8633 stp->sd_sigflags |= ssp->ss_events;
8634 }
8635 mutex_exit(&stp->sd_lock);
8636 }
8637
8638 /*
8639 * Return B_TRUE if there is data in the message, B_FALSE otherwise.
8640 */
8641 static boolean_t
8642 msghasdata(mblk_t *bp)
8643 {
8644 for (; bp; bp = bp->b_cont)
8645 if (bp->b_datap->db_type == M_DATA) {
8646 ASSERT(bp->b_wptr >= bp->b_rptr);
8647 if (bp->b_wptr > bp->b_rptr)
8648 return (B_TRUE);
8649 }
8650 return (B_FALSE);
8651 }